From 80e444edbd8dbd1cd9399d49958b6881be406116 Mon Sep 17 00:00:00 2001 From: Derek Brown <6845676+DerekTBrown@users.noreply.github.com> Date: Wed, 20 Nov 2024 05:44:45 -0800 Subject: [PATCH 01/18] lint: fix docker build warnings (#13351) Docker builds emit a warning because the case of 'FROM' and 'as' don't match. Fix this everywhere. Signed-off-by: Derek Brown <6845676+DerekTBrown@users.noreply.github.com> --- Dockerfile-proxy | 8 ++++---- cli/Dockerfile | 30 +++++++++++++++--------------- controller/Dockerfile | 4 ++-- jaeger/injector/Dockerfile | 4 ++-- policy-controller/Dockerfile | 4 ++-- viz/metrics-api/Dockerfile | 4 ++-- viz/tap/Dockerfile | 4 ++-- web/Dockerfile | 6 +++--- 8 files changed, 32 insertions(+), 32 deletions(-) diff --git a/Dockerfile-proxy b/Dockerfile-proxy index ff20bef42ad2c..1da0f7c0727de 100644 --- a/Dockerfile-proxy +++ b/Dockerfile-proxy @@ -2,7 +2,7 @@ ARG RUNTIME_IMAGE=gcr.io/distroless/cc-debian12 ARG BUILDPLATFORM=linux/amd64 # Precompile key slow-to-build dependencies -FROM --platform=$BUILDPLATFORM golang:1.22-alpine as go-deps +FROM --platform=$BUILDPLATFORM golang:1.22-alpine AS go-deps WORKDIR /linkerd-build COPY go.mod go.sum ./ COPY bin/install-deps bin/ @@ -10,7 +10,7 @@ RUN go mod download ARG TARGETARCH RUN ./bin/install-deps $TARGETARCH -FROM --platform=$BUILDPLATFORM debian:bookworm-slim as fetch +FROM --platform=$BUILDPLATFORM debian:bookworm-slim AS fetch RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ DEBIAN_FRONTEND=noninteractive apt-get install -y curl jq && \ rm -rf /var/lib/apt/lists/* @@ -32,7 +32,7 @@ RUN bin/scurl -O https://github.com/linkerd/linkerd2-proxy-init/releases/downloa RUN tar -zxvf linkerd-network-validator-${LINKERD_VALIDATOR_VERSION}-${TARGETARCH}.tgz && mv linkerd-network-validator-${LINKERD_VALIDATOR_VERSION}-${TARGETARCH}/linkerd-network-validator . ## compile proxy-identity agent -FROM go-deps as golang +FROM go-deps AS golang WORKDIR /linkerd-build COPY pkg/util pkg/util COPY pkg/flags pkg/flags @@ -43,7 +43,7 @@ RUN CGO_ENABLED=0 GOOS=linux GOARCH=$TARGETARCH go build -mod=readonly ./pkg/... COPY proxy-identity proxy-identity RUN CGO_ENABLED=0 GOOS=linux GOARCH=$TARGETARCH go build -o /out/proxy-identity -mod=readonly -ldflags "-s -w" ./proxy-identity -FROM $RUNTIME_IMAGE as runtime +FROM $RUNTIME_IMAGE AS runtime LABEL org.opencontainers.image.source=https://github.com/linkerd/linkerd2 COPY --from=fetch /build/target/proxy/LICENSE /usr/lib/linkerd/LICENSE COPY --from=fetch /build/proxy-version /usr/lib/linkerd/linkerd2-proxy-version.txt diff --git a/cli/Dockerfile b/cli/Dockerfile index b2745d52422d2..31288dbf16d46 100644 --- a/cli/Dockerfile +++ b/cli/Dockerfile @@ -1,7 +1,7 @@ ARG BUILDPLATFORM=linux/amd64 # Precompile key slow-to-build dependencies -FROM --platform=$BUILDPLATFORM golang:1.22-alpine as go-deps +FROM --platform=$BUILDPLATFORM golang:1.22-alpine AS go-deps WORKDIR /linkerd-build COPY go.mod go.sum ./ COPY bin/install-deps bin/ @@ -9,7 +9,7 @@ RUN go mod download RUN ./bin/install-deps ## compile binaries -FROM go-deps as go-gen +FROM go-deps AS go-gen WORKDIR /linkerd-build COPY cli cli COPY charts charts @@ -31,39 +31,39 @@ RUN go generate -mod=readonly ./viz/static RUN mkdir -p /out -FROM go-gen as linux-amd64-full +FROM go-gen AS linux-amd64-full RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /out/linkerd -tags prod -mod=readonly -ldflags "-s -w" ./cli ARG LINKERD_VERSION ENV GO_LDFLAGS="-s -w -X github.com/linkerd/linkerd2/pkg/version.Version=${LINKERD_VERSION}" RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /out/linkerd -tags prod -mod=readonly -ldflags "${GO_LDFLAGS}" ./cli -FROM go-gen as linux-arm64-full +FROM go-gen AS linux-arm64-full RUN ./bin/install-deps arm64 RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o /out/linkerd -tags prod -mod=readonly -ldflags "-s -w" ./cli ARG LINKERD_VERSION ENV GO_LDFLAGS="-s -w -X github.com/linkerd/linkerd2/pkg/version.Version=${LINKERD_VERSION}" RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o /out/linkerd -tags prod -mod=readonly -ldflags "${GO_LDFLAGS}" ./cli -FROM go-gen as linux-arm-full +FROM go-gen AS linux-arm-full RUN ./bin/install-deps arm RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm go build -o /out/linkerd -tags prod -mod=readonly -ldflags "-s -w" ./cli ARG LINKERD_VERSION ENV GO_LDFLAGS="-s -w -X github.com/linkerd/linkerd2/pkg/version.Version=${LINKERD_VERSION}" RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm go build -o /out/linkerd -tags prod -mod=readonly -ldflags "${GO_LDFLAGS}" ./cli -FROM go-gen as darwin-full +FROM go-gen AS darwin-full RUN CGO_ENABLED=0 GOOS=darwin go build -o /out/linkerd -tags prod -mod=readonly -ldflags "-s -w" ./cli ARG LINKERD_VERSION ENV GO_LDFLAGS="-s -w -X github.com/linkerd/linkerd2/pkg/version.Version=${LINKERD_VERSION}" RUN CGO_ENABLED=0 GOOS=darwin go build -o /out/linkerd -tags prod -mod=readonly -ldflags "${GO_LDFLAGS}" ./cli -FROM go-gen as darwin-arm64-full +FROM go-gen AS darwin-arm64-full RUN CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -o /out/linkerd -tags prod -mod=readonly -ldflags "-s -w" ./cli ARG LINKERD_VERSION ENV GO_LDFLAGS="-s -w -X github.com/linkerd/linkerd2/pkg/version.Version=${LINKERD_VERSION}" RUN CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -o /out/linkerd -tags prod -mod=readonly -ldflags "${GO_LDFLAGS}" ./cli -FROM go-gen as windows-full +FROM go-gen AS windows-full RUN CGO_ENABLED=0 GOOS=windows go build -o /out/linkerd -tags prod -mod=readonly -ldflags "-s -w" ./cli ARG LINKERD_VERSION ENV GO_LDFLAGS="-s -w -X github.com/linkerd/linkerd2/pkg/version.Version=${LINKERD_VERSION}" @@ -75,7 +75,7 @@ RUN CGO_ENABLED=0 GOOS=windows go build -o /out/linkerd -tags prod -mod=readonly # depending on the host's OS and arch. # -FROM scratch as linux-amd64 +FROM scratch AS linux-amd64 LABEL org.opencontainers.image.source=https://github.com/linkerd/linkerd2 COPY LICENSE /linkerd/LICENSE COPY --from=linux-amd64-full /out/linkerd /out/linkerd-linux-amd64 @@ -83,37 +83,37 @@ COPY --from=linux-amd64-full /out/linkerd /out/linkerd-linux-amd64 # response from daemon: No command specified." ENTRYPOINT ["/out/linkerd-linux-amd64"] -FROM scratch as linux-arm64 +FROM scratch AS linux-arm64 LABEL org.opencontainers.image.source=https://github.com/linkerd/linkerd2 COPY LICENSE /linkerd/LICENSE COPY --from=linux-arm64-full /out/linkerd /out/linkerd-linux-arm64 ENTRYPOINT ["/out/linkerd-linux-arm64"] -FROM scratch as linux-arm +FROM scratch AS linux-arm LABEL org.opencontainers.image.source=https://github.com/linkerd/linkerd2 COPY LICENSE /linkerd/LICENSE COPY --from=linux-arm64-full /out/linkerd /out/linkerd-linux-arm ENTRYPOINT ["/out/linkerd-linux-arm"] -FROM scratch as darwin +FROM scratch AS darwin LABEL org.opencontainers.image.source=https://github.com/linkerd/linkerd2 COPY LICENSE /linkerd/LICENSE COPY --from=darwin-full /out/linkerd /out/linkerd-darwin ENTRYPOINT ["/out/linkerd-darwin"] -FROM scratch as darwin-arm64 +FROM scratch AS darwin-arm64 LABEL org.opencontainers.image.source=https://github.com/linkerd/linkerd2 COPY LICENSE /linkerd/LICENSE COPY --from=darwin-arm64-full /out/linkerd /out/linkerd-darwin-arm64 ENTRYPOINT ["/out/linkerd-darwin-arm64"] -FROM scratch as windows +FROM scratch AS windows LABEL org.opencontainers.image.source=https://github.com/linkerd/linkerd2 COPY LICENSE /linkerd/LICENSE COPY --from=windows-full /out/linkerd /out/linkerd-windows ENTRYPOINT ["/out/linkerd-windows"] -FROM scratch as multi-arch +FROM scratch AS multi-arch LABEL org.opencontainers.image.source=https://github.com/linkerd/linkerd2 COPY LICENSE /linkerd/LICENSE COPY --from=linux-amd64-full /out/linkerd /out/linkerd-linux-amd64 diff --git a/controller/Dockerfile b/controller/Dockerfile index 8cf6db7cb2648..bf0b890edeea8 100644 --- a/controller/Dockerfile +++ b/controller/Dockerfile @@ -1,7 +1,7 @@ ARG BUILDPLATFORM=linux/amd64 # Precompile key slow-to-build dependencies -FROM --platform=$BUILDPLATFORM golang:1.22-alpine as go-deps +FROM --platform=$BUILDPLATFORM golang:1.22-alpine AS go-deps WORKDIR /linkerd-build COPY go.mod go.sum ./ COPY bin/install-deps bin/ @@ -10,7 +10,7 @@ ARG TARGETARCH RUN ./bin/install-deps $TARGETARCH ## compile controller service -FROM go-deps as golang +FROM go-deps AS golang WORKDIR /linkerd-build COPY controller/gen controller/gen COPY pkg pkg diff --git a/jaeger/injector/Dockerfile b/jaeger/injector/Dockerfile index 36d7aa7d50dea..78d37af5f6146 100644 --- a/jaeger/injector/Dockerfile +++ b/jaeger/injector/Dockerfile @@ -1,7 +1,7 @@ ARG BUILDPLATFORM=linux/amd64 # Precompile key slow-to-build dependencies -FROM --platform=$BUILDPLATFORM golang:1.22-alpine as go-deps +FROM --platform=$BUILDPLATFORM golang:1.22-alpine AS go-deps WORKDIR /linkerd-build COPY go.mod go.sum ./ COPY bin/install-deps bin/ @@ -10,7 +10,7 @@ ARG TARGETARCH RUN ./bin/install-deps $TARGETARCH ## compile controller service -FROM go-deps as golang +FROM go-deps AS golang WORKDIR /linkerd-build COPY jaeger jaeger COPY controller/gen controller/gen diff --git a/policy-controller/Dockerfile b/policy-controller/Dockerfile index 099b899013c8c..808a668af47e5 100644 --- a/policy-controller/Dockerfile +++ b/policy-controller/Dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$BUILDPLATFORM ghcr.io/linkerd/dev:v43-rust-musl as controller +FROM --platform=$BUILDPLATFORM ghcr.io/linkerd/dev:v43-rust-musl AS controller ARG BUILD_TYPE="release" WORKDIR /build RUN mkdir -p target/bin @@ -21,7 +21,7 @@ RUN --mount=type=cache,target=target \ just-cargo profile=$BUILD_TYPE target=$target build --package=linkerd-policy-controller && \ mv "target/$target/$BUILD_TYPE/linkerd-policy-controller" /tmp/ -FROM scratch as runtime +FROM scratch AS runtime LABEL org.opencontainers.image.source=https://github.com/linkerd/linkerd2 COPY --from=controller /tmp/linkerd-policy-controller /bin/ ENTRYPOINT ["/bin/linkerd-policy-controller"] diff --git a/viz/metrics-api/Dockerfile b/viz/metrics-api/Dockerfile index d2621b1352191..b3050bcfccd6b 100644 --- a/viz/metrics-api/Dockerfile +++ b/viz/metrics-api/Dockerfile @@ -1,7 +1,7 @@ ARG BUILDPLATFORM=linux/amd64 # Precompile key slow-to-build dependencies -FROM --platform=$BUILDPLATFORM golang:1.22-alpine as go-deps +FROM --platform=$BUILDPLATFORM golang:1.22-alpine AS go-deps WORKDIR /linkerd-build COPY go.mod go.sum ./ COPY bin/install-deps bin/ @@ -10,7 +10,7 @@ ARG TARGETARCH RUN ./bin/install-deps $TARGETARCH ## compile metrics-apiservice -FROM go-deps as golang +FROM go-deps AS golang WORKDIR /linkerd-build COPY pkg pkg COPY controller controller diff --git a/viz/tap/Dockerfile b/viz/tap/Dockerfile index a3b4606ac06d1..a16c2c4d792b2 100644 --- a/viz/tap/Dockerfile +++ b/viz/tap/Dockerfile @@ -1,7 +1,7 @@ ARG BUILDPLATFORM=linux/amd64 # Precompile key slow-to-build dependencies -FROM --platform=$BUILDPLATFORM golang:1.22-alpine as go-deps +FROM --platform=$BUILDPLATFORM golang:1.22-alpine AS go-deps WORKDIR /linkerd-build COPY go.mod go.sum ./ COPY bin/install-deps bin/ @@ -10,7 +10,7 @@ ARG TARGETARCH RUN ./bin/install-deps $TARGETARCH ## compile tap -FROM go-deps as golang +FROM go-deps AS golang WORKDIR /linkerd-build COPY pkg pkg # TODO: remove after https://github.com/linkerd/linkerd2/issues/5661 diff --git a/web/Dockerfile b/web/Dockerfile index 11bc8756f4d26..03013caad42c7 100644 --- a/web/Dockerfile +++ b/web/Dockerfile @@ -1,7 +1,7 @@ ARG BUILDPLATFORM=linux/amd64 # Precompile key slow-to-build dependencies -FROM --platform=$BUILDPLATFORM golang:1.22-alpine as go-deps +FROM --platform=$BUILDPLATFORM golang:1.22-alpine AS go-deps WORKDIR /linkerd-build COPY go.mod go.sum ./ COPY bin/install-deps bin/ @@ -10,7 +10,7 @@ ARG TARGETARCH RUN ./bin/install-deps $TARGETARCH ## bundle web assets -FROM --platform=$BUILDPLATFORM node:20-bookworm as webpack-bundle +FROM --platform=$BUILDPLATFORM node:20-bookworm AS webpack-bundle RUN bin/scurl --retry=2 https://yarnpkg.com/install.sh | bash -s -- --version 1.22.10 --network-concurrency 1 ENV PATH /root/.yarn/bin:$PATH @@ -32,7 +32,7 @@ COPY web/app ./web/app RUN ./bin/web build ## compile go server -FROM go-deps as golang +FROM go-deps AS golang WORKDIR /linkerd-build RUN mkdir -p web COPY web/main.go web From bb9993823bec2a07235bb25c61cd5de402cdf954 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 08:45:35 -0500 Subject: [PATCH 02/18] build(deps): bump codecov/codecov-action from 5.0.2 to 5.0.4 (#13354) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 5.0.2 to 5.0.4. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/5c47607acb93fed5485fdbf7232e8a31425f672a...985343d70564a82044c1b7fcb84c2fa05405c1a2) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codecov.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index df6d158f7c3aa..bdf9e9518124f 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - run: go install gotest.tools/gotestsum@v0.4.2 - run: gotestsum -- -cover -coverprofile=coverage.out -v -mod=readonly ./... - - uses: codecov/codecov-action@5c47607acb93fed5485fdbf7232e8a31425f672a + - uses: codecov/codecov-action@985343d70564a82044c1b7fcb84c2fa05405c1a2 with: files: ./coverage.out flags: unittests,golang @@ -41,7 +41,7 @@ jobs: export NODE_ENV=test bin/web --frozen-lockfile bin/web test --reporters="jest-progress-bar-reporter" --reporters="./gh_ann_reporter.js" --coverage - - uses: codecov/codecov-action@5c47607acb93fed5485fdbf7232e8a31425f672a + - uses: codecov/codecov-action@985343d70564a82044c1b7fcb84c2fa05405c1a2 with: directory: ./web/app/coverage flags: unittests,javascript @@ -58,6 +58,6 @@ jobs: - shell: bash run: mkdir -p target && cd target && bin/scurl -v https://github.com/xd009642/tarpaulin/releases/download/0.27.3/cargo-tarpaulin-x86_64-unknown-linux-musl.tar.gz | tar zxvf - && chmod 755 cargo-tarpaulin - run: target/cargo-tarpaulin tarpaulin --workspace --out Xml - - uses: codecov/codecov-action@5c47607acb93fed5485fdbf7232e8a31425f672a + - uses: codecov/codecov-action@985343d70564a82044c1b7fcb84c2fa05405c1a2 with: flags: unittests,rust From 4a424be313f4198a510c937bb2885533631fc2a0 Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Wed, 20 Nov 2024 19:14:39 +0200 Subject: [PATCH 03/18] policy: add EgressNetwork integration tests for Http Grpc routes (#13342) This PR alters the policy-test crate so we can exercise the same set of tests for both `EgressNetwork` and `Service` parents. Signed-off-by: Zahari Dichev --- .../k8s/index/src/outbound/index.rs | 15 +- policy-test/src/lib.rs | 151 +- policy-test/src/outbound_api.rs | 82 +- .../tests/outbound_api_egress_network.rs | 85 + policy-test/tests/outbound_api_gateway.rs | 2076 ++++++++++------- policy-test/tests/outbound_api_grpc.rs | 398 ++-- policy-test/tests/outbound_api_linkerd.rs | 2026 ++++++++++------ 7 files changed, 3031 insertions(+), 1802 deletions(-) create mode 100644 policy-test/tests/outbound_api_egress_network.rs diff --git a/policy-controller/k8s/index/src/outbound/index.rs b/policy-controller/k8s/index/src/outbound/index.rs index ead7f16472c33..4a8f53c17b238 100644 --- a/policy-controller/k8s/index/src/outbound/index.rs +++ b/policy-controller/k8s/index/src/outbound/index.rs @@ -363,16 +363,17 @@ impl kubert::index::IndexNamespacedResource for traffic_policy, }; + let ns = Arc::new(ns); self.namespaces .by_ns - .entry(ns.clone()) + .entry(ns.to_string()) .or_insert_with(|| Namespace { service_http_routes: Default::default(), service_grpc_routes: Default::default(), service_tls_routes: Default::default(), service_tcp_routes: Default::default(), resource_port_routes: Default::default(), - namespace: Arc::new(ns), + namespace: ns.clone(), }) .update_resource( egress_network.name_unchecked(), @@ -384,7 +385,7 @@ impl kubert::index::IndexNamespacedResource for .insert(egress_net_ref, egress_network_info); self.reindex_resources(); - self.reinitialize_egress_watches(); + self.reinitialize_egress_watches(&ns); self.reinitialize_fallback_watches() } @@ -398,7 +399,7 @@ impl kubert::index::IndexNamespacedResource for self.egress_networks_by_ref.remove(&egress_net_ref); self.reindex_resources(); - self.reinitialize_egress_watches(); + self.reinitialize_egress_watches(&egress_net_ref.namespace); self.reinitialize_fallback_watches() } } @@ -651,9 +652,11 @@ impl Index { } } - fn reinitialize_egress_watches(&mut self) { + fn reinitialize_egress_watches(&mut self, namespace: &str) { for ns in self.namespaces.by_ns.values_mut() { - ns.reinitialize_egress_watches(); + if namespace == *self.global_egress_network_namespace || namespace == *ns.namespace { + ns.reinitialize_egress_watches() + } } } } diff --git a/policy-test/src/lib.rs b/policy-test/src/lib.rs index 6c9dc181bd617..82745b71ad9dc 100644 --- a/policy-test/src/lib.rs +++ b/policy-test/src/lib.rs @@ -9,7 +9,9 @@ pub mod outbound_api; pub mod web; use linkerd_policy_controller_k8s_api::{ - self as k8s, policy::httproute::ParentReference, ResourceExt, + self as k8s, + policy::{httproute::ParentReference, EgressNetwork}, + ResourceExt, }; use maplit::{btreemap, convert_args}; use tokio::time; @@ -21,6 +23,12 @@ pub enum LinkerdInject { Disabled, } +#[derive(Clone, Debug)] +pub enum Resource { + Service(k8s::Service), + EgressNetwork(k8s::policy::EgressNetwork), +} + /// Creates a cluster-scoped resource. pub async fn create_cluster_scoped(client: &kube::Client, obj: T) -> T where @@ -237,6 +245,28 @@ pub async fn await_gateway_route_status( route_status } +// Waits until an EgressNet with the given namespace and name has a status set. +pub async fn await_egress_net_status( + client: &kube::Client, + ns: &str, + name: &str, +) -> k8s::policy::egress_network::EgressNetworkStatus { + let egress_net_status = await_condition( + client, + ns, + name, + |obj: Option<&k8s::policy::EgressNetwork>| -> bool { + obj.and_then(|en| en.status.as_ref()).is_some() + }, + ) + .await + .expect("must fetch route") + .status + .expect("Egress net must contain a status representation"); + tracing::trace!(?egress_net_status, name, ns, "got egress net status"); + egress_net_status +} + // Waits until an GrpcRoute with the given namespace and name has a status set // on it, then returns the generic route status representation. pub async fn await_grpc_route_status( @@ -288,6 +318,63 @@ pub async fn logs(client: &kube::Client, ns: &str, pod: &str, container: &str) { } } +impl Resource { + pub fn group(&self) -> String { + match self { + Self::EgressNetwork(_) => "policy.linkerd.io".to_string(), + Self::Service(_) => "core".to_string(), + } + } + + pub fn kind(&self) -> String { + match self { + Self::EgressNetwork(_) => "EgressNetwork".to_string(), + Self::Service(_) => "Service".to_string(), + } + } + + pub fn name(&self) -> String { + match self { + Self::EgressNetwork(e) => e.name_unchecked(), + Self::Service(s) => s.name_unchecked(), + } + } + + pub fn namespace(&self) -> String { + match self { + Self::EgressNetwork(e) => e.namespace().unwrap(), + Self::Service(s) => s.namespace().unwrap(), + } + } + + pub fn ip(&self) -> String { + match self { + // For EgressNetwork, we can just return a non-private + // IP address as our default cluster setup dictates that + // all non-private networks are considered egress. Since + // we do not modify this setting in tests for the time being, + // returning 1.1.1.1 is fine. + Self::EgressNetwork(_) => "1.1.1.1".to_string(), + Self::Service(s) => s + .spec + .as_ref() + .expect("Service must have a spec") + .cluster_ip + .as_ref() + .expect("Service must have a cluster ip") + .clone(), + } + } +} + +/// Creates a service resource. +pub async fn create_parent(client: &kube::Client, parent: Resource) -> Resource { + match parent { + Resource::Service(svc) => Resource::Service(create(client, svc).await), + Resource::EgressNetwork(enet) => Resource::EgressNetwork(create(client, enet).await), + } +} + /// Creates a service resource. pub async fn create_service( client: &kube::Client, @@ -301,11 +388,7 @@ pub async fn create_service( } /// Creates an egress network resource. -pub async fn create_egress_network( - client: &kube::Client, - ns: &str, - name: &str, -) -> k8s::policy::EgressNetwork { +pub async fn create_egress_network(client: &kube::Client, ns: &str, name: &str) -> EgressNetwork { let en = mk_egress_net(ns, name); create(client, en).await } @@ -326,6 +409,22 @@ pub async fn create_opaque_service( create(client, svc).await } +/// Creates an egress network resource. +pub async fn create_opaque_egress_network( + client: &kube::Client, + ns: &str, + name: &str, + port: i32, +) -> k8s::policy::EgressNetwork { + let egress = mk_egress_net(ns, name); + let egress = annotate_egress_net( + egress, + std::iter::once(("config.linkerd.io/opaque-ports", port)), + ); + + create(client, egress).await +} + /// Creates a service resource with given annotations. pub async fn create_annotated_service( client: &kube::Client, @@ -338,6 +437,17 @@ pub async fn create_annotated_service( create(client, svc).await } +/// Creates an egress network resource with given annotations. +pub async fn create_annotated_egress_network( + client: &kube::Client, + ns: &str, + name: &str, + annotations: std::collections::BTreeMap, +) -> k8s::policy::EgressNetwork { + let enet = annotate_egress_net(mk_egress_net(ns, name), annotations); + create(client, enet).await +} + pub fn annotate_service( mut svc: k8s::Service, annotations: impl IntoIterator, @@ -354,6 +464,22 @@ where svc } +pub fn annotate_egress_net( + mut egress_net: k8s::policy::EgressNetwork, + annotations: impl IntoIterator, +) -> k8s::policy::EgressNetwork +where + K: ToString, + V: ToString, +{ + egress_net.annotations_mut().extend( + annotations + .into_iter() + .map(|(k, v)| (k.to_string(), v.to_string())), + ); + egress_net +} + pub fn mk_service(ns: &str, name: &str, port: i32) -> k8s::Service { k8s::Service { metadata: k8s::ObjectMeta { @@ -388,16 +514,17 @@ pub fn mk_egress_net(ns: &str, name: &str) -> k8s::policy::EgressNetwork { } #[track_caller] -pub fn assert_svc_meta(meta: &Option, svc: &k8s::Service, port: u16) { - tracing::debug!(?meta, ?svc, port, "Asserting service metadata"); +pub fn assert_resource_meta(meta: &Option, resource: &Resource, port: u16) { + println!("meta: {:?}", meta); + tracing::debug!(?meta, ?resource, port, "Asserting service metadata"); assert_eq!( meta, &Some(grpc::meta::Metadata { kind: Some(grpc::meta::metadata::Kind::Resource(grpc::meta::Resource { - group: "core".to_string(), - kind: "Service".to_string(), - name: svc.name_unchecked(), - namespace: svc.namespace().unwrap(), + group: resource.group(), + kind: resource.kind(), + name: resource.name(), + namespace: resource.namespace(), section: "".to_string(), port: port.into() })), diff --git a/policy-test/src/outbound_api.rs b/policy-test/src/outbound_api.rs index de3a5f81f9c33..02c53d03e2515 100644 --- a/policy-test/src/outbound_api.rs +++ b/policy-test/src/outbound_api.rs @@ -1,26 +1,25 @@ -use crate::{assert_svc_meta, grpc}; +use crate::{assert_resource_meta, grpc, Resource}; use kube::ResourceExt; -use linkerd_policy_controller_k8s_api as k8s; use std::time::Duration; use tokio::time; pub async fn retry_watch_outbound_policy( client: &kube::Client, ns: &str, - svc: &k8s::Service, + resource: &Resource, port: u16, ) -> tonic::Streaming { // Port-forward to the control plane and start watching the service's // outbound policy. let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(client).await; loop { - match policy_api.watch(ns, svc, port).await { + match policy_api.watch_ip(ns, &resource.ip(), port).await { Ok(rx) => return rx, Err(error) => { tracing::error!( ?error, ns, - svc = svc.name_unchecked(), + resource = resource.name(), "failed to watch outbound policy for port 4191" ); time::sleep(Duration::from_secs(1)).await; @@ -187,7 +186,7 @@ pub fn assert_backend_has_failure_filter( } #[track_caller] -pub fn assert_route_is_default(route: &grpc::outbound::HttpRoute, svc: &k8s::Service, port: u16) { +pub fn assert_route_is_default(route: &grpc::outbound::HttpRoute, parent: &Resource, port: u16) { let kind = route.metadata.as_ref().unwrap().kind.as_ref().unwrap(); match kind { grpc::meta::metadata::Kind::Default(_) => {} @@ -198,7 +197,7 @@ pub fn assert_route_is_default(route: &grpc::outbound::HttpRoute, svc: &k8s::Ser let backends = route_backends_first_available(route); let backend = assert_singleton(backends); - assert_backend_matches_service(backend, svc, port); + assert_backend_matches_parent(backend, parent, port); let rule = assert_singleton(&route.rules); let route_match = assert_singleton(&rule.matches); @@ -210,35 +209,49 @@ pub fn assert_route_is_default(route: &grpc::outbound::HttpRoute, svc: &k8s::Ser } #[track_caller] -pub fn assert_backend_matches_service( +pub fn assert_backend_matches_parent( backend: &grpc::outbound::http_route::RouteBackend, - svc: &k8s::Service, + parent: &Resource, port: u16, ) { let backend = backend.backend.as_ref().unwrap(); - let dst = match backend.kind.as_ref().unwrap() { - grpc::outbound::backend::Kind::Balancer(balance) => { - let kind = balance.discovery.as_ref().unwrap().kind.as_ref().unwrap(); - match kind { - grpc::outbound::backend::endpoint_discovery::Kind::Dst(dst) => &dst.path, - } + + match parent { + Resource::Service(svc) => { + let dst = match backend.kind.as_ref().unwrap() { + grpc::outbound::backend::Kind::Balancer(balance) => { + let kind = balance.discovery.as_ref().unwrap().kind.as_ref().unwrap(); + match kind { + grpc::outbound::backend::endpoint_discovery::Kind::Dst(dst) => &dst.path, + } + } + grpc::outbound::backend::Kind::Forward(_) => { + panic!("service default route backend must be Balancer") + } + }; + assert_eq!( + *dst, + format!( + "{}.{}.svc.{}:{}", + svc.name_unchecked(), + svc.namespace().unwrap(), + "cluster.local", + port + ) + ); } - grpc::outbound::backend::Kind::Forward(_) => { - panic!("default route backend must be Balancer") + + Resource::EgressNetwork(_) => { + match backend.kind.as_ref().unwrap() { + grpc::outbound::backend::Kind::Forward(_) => {} + grpc::outbound::backend::Kind::Balancer(_) => { + panic!("egress net default route backend must be Forward") + } + }; } - }; - assert_eq!( - *dst, - format!( - "{}.{}.svc.{}:{}", - svc.name_unchecked(), - svc.namespace().unwrap(), - "cluster.local", - port - ) - ); + } - assert_svc_meta(&backend.metadata, svc, port) + assert_resource_meta(&backend.metadata, parent, port) } #[track_caller] @@ -247,6 +260,17 @@ pub fn assert_singleton(ts: &[T]) -> &T { ts.first().unwrap() } +#[track_caller] +pub fn assert_route_attached<'a, T>(routes: &'a [T], parent: &Resource) -> &'a T { + match parent { + Resource::EgressNetwork(_) => { + assert_eq!(routes.len(), 2); + routes.first().unwrap() + } + Resource::Service(_) => assert_singleton(routes), + } +} + #[track_caller] pub fn assert_route_name_eq(route: &grpc::outbound::HttpRoute, name: &str) { assert_name_eq(route.metadata.as_ref().unwrap(), name) diff --git a/policy-test/tests/outbound_api_egress_network.rs b/policy-test/tests/outbound_api_egress_network.rs new file mode 100644 index 0000000000000..99f46827453aa --- /dev/null +++ b/policy-test/tests/outbound_api_egress_network.rs @@ -0,0 +1,85 @@ +use futures::prelude::*; +use linkerd2_proxy_api::meta; +use linkerd_policy_test::{ + await_egress_net_status, create_egress_network, delete, grpc, with_temp_ns, +}; + +#[tokio::test(flavor = "current_thread")] +async fn egress_switches_to_fallback() { + with_temp_ns(|client, ns| async move { + let egress_net = create_egress_network(&client, &ns, "egress-net").await; + await_egress_net_status(&client, &ns, "egress-net").await; + + let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; + let mut rsp = policy_api.watch_ip(&ns, "1.1.1.1", 80).await.unwrap(); + + let policy = rsp.next().await.unwrap().unwrap(); + let meta = policy.metadata.unwrap(); + + let expected_meta = meta::Metadata { + kind: Some(meta::metadata::Kind::Resource(meta::Resource { + group: "policy.linkerd.io".to_string(), + port: 80, + kind: "EgressNetwork".to_string(), + name: "egress-net".to_string(), + namespace: ns.clone(), + section: "".to_string(), + })), + }; + + assert_eq!(meta, expected_meta); + + delete(&client, egress_net).await; + assert!(rsp.next().await.is_none()); + + let mut rsp = policy_api.watch_ip(&ns, "1.1.1.1", 80).await.unwrap(); + + let policy = rsp.next().await.unwrap().unwrap(); + let meta = policy.metadata.unwrap(); + let expected_meta = meta::Metadata { + kind: Some(meta::metadata::Kind::Default("egress-fallback".to_string())), + }; + assert_eq!(meta, expected_meta); + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn fallback_switches_to_egress() { + with_temp_ns(|client, ns| async move { + let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; + let mut rsp = policy_api.watch_ip(&ns, "1.1.1.1", 80).await.unwrap(); + + let policy = rsp.next().await.unwrap().unwrap(); + let meta = policy.metadata.unwrap(); + let expected_meta = meta::Metadata { + kind: Some(meta::metadata::Kind::Default("egress-fallback".to_string())), + }; + assert_eq!(meta, expected_meta); + + let _egress_net = create_egress_network(&client, &ns, "egress-net").await; + await_egress_net_status(&client, &ns, "egress-net").await; + + // stream should fall apart now + assert!(rsp.next().await.is_none()); + + // we should switch to an egress net now + let mut rsp = policy_api.watch_ip(&ns, "1.1.1.1", 80).await.unwrap(); + let policy = rsp.next().await.unwrap().unwrap(); + let meta = policy.metadata.unwrap(); + + let expected_meta = meta::Metadata { + kind: Some(meta::metadata::Kind::Resource(meta::Resource { + group: "policy.linkerd.io".to_string(), + port: 80, + kind: "EgressNetwork".to_string(), + name: "egress-net".to_string(), + namespace: ns.clone(), + section: "".to_string(), + })), + }; + + assert_eq!(meta, expected_meta); + }) + .await; +} diff --git a/policy-test/tests/outbound_api_gateway.rs b/policy-test/tests/outbound_api_gateway.rs index ecbb5899033dd..e466a83088bb2 100644 --- a/policy-test/tests/outbound_api_gateway.rs +++ b/policy-test/tests/outbound_api_gateway.rs @@ -1,12 +1,12 @@ use futures::prelude::*; use kube::ResourceExt; -use linkerd2_proxy_api::meta; use linkerd_policy_controller_k8s_api as k8s; use linkerd_policy_test::{ - assert_default_accrual_backoff, assert_svc_meta, await_gateway_route_status, create, - create_annotated_service, create_cluster_scoped, create_egress_network, create_opaque_service, - create_service, delete, delete_cluster_scoped, grpc, mk_service, outbound_api::*, update, - with_temp_ns, + assert_default_accrual_backoff, assert_resource_meta, await_egress_net_status, + await_gateway_route_status, create, create_annotated_egress_network, create_annotated_service, + create_cluster_scoped, create_egress_network, create_opaque_egress_network, + create_opaque_service, create_service, delete_cluster_scoped, grpc, mk_egress_net, mk_service, + outbound_api::*, update, with_temp_ns, Resource, }; use maplit::{btreemap, convert_args}; use std::{collections::BTreeMap, time::Duration}; @@ -34,250 +34,103 @@ async fn service_does_not_exist() { } #[tokio::test(flavor = "current_thread")] -async fn egress_switches_to_fallback() { +async fn service_with_no_http_routes() { with_temp_ns(|client, ns| async move { - let egress_net = create_egress_network(&client, &ns, "egress-net").await; - - let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; - let mut rsp = policy_api.watch_ip(&ns, "1.1.1.1", 80).await.unwrap(); - - let policy = rsp.next().await.unwrap().unwrap(); - let meta = policy.metadata.unwrap(); - - let expected_meta = meta::Metadata { - kind: Some(meta::metadata::Kind::Resource(meta::Resource { - group: "policy.linkerd.io".to_string(), - port: 80, - kind: "EgressNetwork".to_string(), - name: "egress-net".to_string(), - namespace: ns.clone(), - section: "".to_string(), - })), - }; - - assert_eq!(meta, expected_meta); - - delete(&client, egress_net).await; - assert!(rsp.next().await.is_none()); - - let mut rsp = policy_api.watch_ip(&ns, "1.1.1.1", 80).await.unwrap(); - - let policy = rsp.next().await.unwrap().unwrap(); - let meta = policy.metadata.unwrap(); - let expected_meta = meta::Metadata { - kind: Some(meta::metadata::Kind::Default("egress-fallback".to_string())), - }; - assert_eq!(meta, expected_meta); + // Create a service + let svc = create_service(&client, &ns, "my-svc", 4191).await; + parent_with_no_http_routes(Resource::Service(svc), &client, &ns).await; }) .await; } #[tokio::test(flavor = "current_thread")] -async fn fallback_switches_to_egress() { +async fn egress_net_with_no_http_routes() { with_temp_ns(|client, ns| async move { - let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; - let mut rsp = policy_api.watch_ip(&ns, "1.1.1.1", 80).await.unwrap(); + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - let policy = rsp.next().await.unwrap().unwrap(); - let meta = policy.metadata.unwrap(); - let expected_meta = meta::Metadata { - kind: Some(meta::metadata::Kind::Default("egress-fallback".to_string())), - }; - assert_eq!(meta, expected_meta); - - let _egress_net = create_egress_network(&client, &ns, "egress-net").await; - // stream should fall apart now - assert!(rsp.next().await.is_none()); - - // we should switch to an egress net now - let mut rsp = policy_api.watch_ip(&ns, "1.1.1.1", 80).await.unwrap(); - let policy = rsp.next().await.unwrap().unwrap(); - let meta = policy.metadata.unwrap(); - - let expected_meta = meta::Metadata { - kind: Some(meta::metadata::Kind::Resource(meta::Resource { - group: "policy.linkerd.io".to_string(), - port: 80, - kind: "EgressNetwork".to_string(), - name: "egress-net".to_string(), - namespace: ns.clone(), - section: "".to_string(), - })), - }; - - assert_eq!(meta, expected_meta); + parent_with_no_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } #[tokio::test(flavor = "current_thread")] -async fn service_with_no_http_routes() { +async fn service_with_http_route_without_rules() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + parent_with_http_route_without_rules(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_http_route_without_rules() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); + parent_with_http_route_without_rules(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } #[tokio::test(flavor = "current_thread")] -async fn service_with_http_route_without_rules() { +async fn service_with_http_routes_without_backends() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + parent_with_http_routes_without_backends(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let _route = create(&client, mk_empty_http_route(&ns, "foo-route", &svc, 4191)).await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_http_routes_without_backends() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // There should be a route with no rules. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_eq!(route.rules.len(), 0); - }); + parent_with_http_routes_without_backends(Resource::EgressNetwork(egress), &client, &ns) + .await; }) .await; } #[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_without_backends() { +async fn service_with_http_routes_with_backend() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let _route = create( + let backend_svc = create_service(&client, &ns, "backend", 8888).await; + parent_with_http_routes_with_backend( + Resource::Service(svc), + Resource::Service(backend_svc), &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), + &ns, ) .await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // There should be a route with the logical backend. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let backends = route_backends_first_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_service(backend, &svc, 4191); - }); }) .await; } #[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_backend() { +async fn egress_net_with_http_routes_with_backend() { with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let backend_name = "backend"; - let backend_svc = create_service(&client, &ns, backend_name, 8888).await; - let backends = [backend_name]; - let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( - Some(&backends), - None, - None, - ); - let _route = create(&client, route.build()).await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // There should be a route with a backend with no filters. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_service(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!(filters.len(), 0); - }); + parent_with_http_routes_with_backend( + Resource::EgressNetwork(egress.clone()), + Resource::EgressNetwork(egress), + &client, + &ns, + ) + .await; }) .await; } @@ -286,7 +139,7 @@ async fn service_with_http_routes_with_backend() { async fn service_with_http_routes_with_cross_namespace_backend() { with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; let config = rx @@ -296,7 +149,7 @@ async fn service_with_http_routes_with_cross_namespace_backend() { .expect("watch must return an initial config"); tracing::trace!(?config); - assert_svc_meta(&config.metadata, &svc, 4191); + assert_resource_meta(&config.metadata, &svc, 4191); // There should be a default route. detect_http_routes(&config, |routes| { @@ -320,8 +173,9 @@ async fn service_with_http_routes_with_cross_namespace_backend() { ) .await; let backend_name = "backend"; - let backend_svc = create_service(&client, &backend_ns_name, backend_name, 8888).await; - let backends = [backend_name]; + let backend_svc = + Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); + let backends = [backend_svc.clone()]; let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( Some(&backends), Some(backend_ns_name), @@ -337,14 +191,14 @@ async fn service_with_http_routes_with_cross_namespace_backend() { .expect("watch must return an updated config"); tracing::trace!(?config); - assert_svc_meta(&config.metadata, &svc, 4191); + assert_resource_meta(&config.metadata, &svc, 4191); // There should be a route with a backend with no filters. detect_http_routes(&config, |routes| { let route = assert_singleton(routes); let backends = route_backends_random_available(route); let backend = assert_singleton(backends); - assert_backend_matches_service(backend.backend.as_ref().unwrap(), &backend_svc, 8888); + assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); let filters = &backend.backend.as_ref().unwrap().filters; assert_eq!(filters.len(), 0); }); @@ -360,48 +214,36 @@ async fn service_with_http_routes_with_invalid_backend() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + let backend = mk_service(&ns, "invalid", 4191); - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let backends = ["invalid-backend"]; - let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( - Some(&backends), - None, - None, - ); - let _route = create(&client, route.build()).await; - await_gateway_route_status(&client, &ns, "foo-route").await; + parent_with_http_routes_with_invalid_backend( + Resource::Service(svc), + Resource::Service(backend), + &client, + &ns, + ) + .await; + }) + .await; +} - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); +// TODO: Test fails until handling of invalid backends is implemented. +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_http_routes_with_invalid_backend() { + with_temp_ns(|client, ns| async move { + // Create an egress network + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - assert_svc_meta(&config.metadata, &svc, 4191); + let backend = mk_egress_net(&ns, "invalid"); - // There should be a route with a backend. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_has_failure_filter(backend); - }); + parent_with_http_routes_with_invalid_backend( + Resource::EgressNetwork(egress), + Resource::EgressNetwork(backend), + &client, + &ns, + ) + .await; }) .await; } @@ -413,66 +255,21 @@ async fn service_with_multiple_http_routes() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + parent_with_multiple_http_routes(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - // Routes should be returned in sorted order by creation timestamp then - // name. To ensure that this test isn't timing dependant, routes should - // be created in alphabetical order. - let _a_route = create( - &client, - mk_http_route(&ns, "a-route", &svc, Some(4191)).build(), - ) - .await; - await_gateway_route_status(&client, &ns, "a-route").await; - - // First route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - let _b_route = create( - &client, - mk_http_route(&ns, "b-route", &svc, Some(4191)).build(), - ) - .await; - await_gateway_route_status(&client, &ns, "b-route").await; - - // Second route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); +// TODO: Investigate why the policy controller is only returning one route in this +// case instead of two. +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_multiple_http_routes() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // There should be 2 routes, returned in order. - detect_http_routes(&config, |routes| { - assert_eq!(routes.len(), 2); - assert_eq!(route_name(&routes[0]), "a-route"); - assert_eq!(route_name(&routes[1]), "b-route"); - }); + parent_with_multiple_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } @@ -509,40 +306,56 @@ async fn service_with_consecutive_failure_accrual() { ]), ) .await; + parent_with_consecutive_failure_accrual(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_consecutive_failure_accrual() { + with_temp_ns(|client, ns| async move { + let egress = create_annotated_egress_network( + &client, + &ns, + "consecutive-accrual-egress", + BTreeMap::from([ + ( + "balancer.linkerd.io/failure-accrual".to_string(), + "consecutive".to_string(), + ), + ( + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), + "8".to_string(), + ), + ( + "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), + "10s".to_string(), + ), + ( + "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), + "10m".to_string(), + ), + ( + "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), + "1.0".to_string(), + ), + ]), + ) + .await; + await_egress_net_status(&client, &ns, "consecutive-accrual-egress").await; - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); + parent_with_consecutive_failure_accrual(Resource::EgressNetwork(egress), &client, &ns) + .await; }) .await; } #[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults() { +async fn service_with_consecutive_failure_accrual_defaults_no_config() { with_temp_ns(|client, ns| async move { // Create a service configured to do consecutive failure accrual, but // with no additional configuration - let svc = create_annotated_service( + let svc_no_config = create_annotated_service( &client, &ns, "default-accrual-svc", @@ -554,27 +367,22 @@ async fn service_with_consecutive_failure_accrual_defaults() { ) .await; - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect default max_failures and default backoff - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(7, consecutive.max_failures); - assert_default_accrual_backoff!(consecutive - .backoff - .as_ref() - .expect("backoff must be configured")); - }); + parent_with_consecutive_failure_accrual_defaults_no_config( + Resource::Service(svc_no_config), + &client, + &ns, + ) + .await; + }) + .await; +} +#[tokio::test(flavor = "current_thread")] +async fn service_with_consecutive_failure_accrual_defaults_max_fails() { + with_temp_ns(|client, ns| async move { // Create a service configured to do consecutive failure accrual with // max number of failures and with default backoff - let svc = create_annotated_service( + let svc_max_fails = create_annotated_service( &client, &ns, "no-backoff-svc", @@ -592,27 +400,22 @@ async fn service_with_consecutive_failure_accrual_defaults() { ) .await; - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect default backoff and overridden max_failures - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_default_accrual_backoff!(consecutive - .backoff - .as_ref() - .expect("backoff must be configured")); - }); + parent_with_consecutive_failure_accrual_defaults_max_fails( + Resource::Service(svc_max_fails), + &client, + &ns, + ) + .await; + }) + .await; +} +#[tokio::test(flavor = "current_thread")] +async fn service_with_consecutive_failure_accrual_defaults_jitter() { + with_temp_ns(|client, ns| async move { // Create a service configured to do consecutive failure accrual with // only the jitter ratio configured in the backoff - let svc = create_annotated_service( + let svc_jitter = create_annotated_service( &client, &ns, "only-jitter-svc", @@ -630,59 +433,118 @@ async fn service_with_consecutive_failure_accrual_defaults() { ) .await; - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); + parent_with_consecutive_failure_accrual_defaults_max_jitter( + Resource::Service(svc_jitter), + &client, + &ns, + ) + .await; + }) + .await; +} - // Expect defaults for everything except for the jitter ratio - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(7, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_consecutive_failure_accrual_defaults_no_config() { + with_temp_ns(|client, ns| async move { + // Create a egress network configured to do consecutive failure accrual, but + // with no additional configuration + let egress_no_config = create_annotated_egress_network( + &client, + &ns, + "default-accrual-egress", + BTreeMap::from([( + "balancer.linkerd.io/failure-accrual".to_string(), + "consecutive".to_string(), + )]), + ) + .await; + await_egress_net_status(&client, &ns, "default-accrual-egress").await; + + parent_with_consecutive_failure_accrual_defaults_no_config( + Resource::EgressNetwork(egress_no_config), + &client, + &ns, + ) + .await; }) .await; } #[tokio::test(flavor = "current_thread")] -async fn service_with_default_failure_accrual() { +async fn egress_net_with_consecutive_failure_accrual_defaults_max_fails() { with_temp_ns(|client, ns| async move { - // Default config for Service, no failure accrual - let svc = create_service(&client, &ns, "default-failure-accrual", 80).await; + // Create a egress network configured to do consecutive failure accrual with + // max number of failures and with default backoff + let egress_max_fails = create_annotated_egress_network( + &client, + &ns, + "no-backoff-egress", + BTreeMap::from([ + ( + "balancer.linkerd.io/failure-accrual".to_string(), + "consecutive".to_string(), + ), + ( + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), + "8".to_string(), + ), + ]), + ) + .await; + await_egress_net_status(&client, &ns, "no-backoff-egress").await; - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); + parent_with_consecutive_failure_accrual_defaults_max_fails( + Resource::EgressNetwork(egress_max_fails), + &client, + &ns, + ) + .await; + }) + .await; +} - // Expect failure accrual config to be default (no failure accrual) - detect_failure_accrual(&config, |accrual| { - assert!( - accrual.is_none(), - "consecutive failure accrual should not be configured for service" - ); - }); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_consecutive_failure_accrual_defaults_jitter() { + with_temp_ns(|client, ns| async move { + // Create an egress net configured to do consecutive failure accrual with + // only the jitter ratio configured in the backoff + let egress_jitter = create_annotated_egress_network( + &client, + &ns, + "only-jitter-egress", + BTreeMap::from([ + ( + "balancer.linkerd.io/failure-accrual".to_string(), + "consecutive".to_string(), + ), + ( + "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), + "1.0".to_string(), + ), + ]), + ) + .await; + await_egress_net_status(&client, &ns, "only-jitter-egress").await; + + parent_with_consecutive_failure_accrual_defaults_max_jitter( + Resource::EgressNetwork(egress_jitter), + &client, + &ns, + ) + .await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn service_with_default_failure_accrual() { + with_temp_ns(|client, ns| async move { + // Default config for Service, no failure accrual + let svc_default = create_service(&client, &ns, "default-failure-accrual", 80).await; // Create Service with consecutive failure accrual config for // max_failures but no mode - let svc = create_annotated_service( + let svc_max_fails = create_annotated_service( &client, &ns, "default-max-failure-svc", @@ -694,21 +556,45 @@ async fn service_with_default_failure_accrual() { ) .await; - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); + parent_with_default_failure_accrual( + Resource::Service(svc_default), + Resource::Service(svc_max_fails), + &client, + &ns, + ) + .await; + }) + .await; +} - // Expect failure accrual config to be default (no failure accrual) - detect_failure_accrual(&config, |accrual| { - assert!( - accrual.is_none(), - "consecutive failure accrual should not be configured for service" - ) - }); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_default_failure_accrual() { + with_temp_ns(|client, ns| async move { + // Default config for EgressNetwork, no failure accrual + let egress_default = create_egress_network(&client, &ns, "default-failure-accrual").await; + await_egress_net_status(&client, &ns, "default-failure-accrual").await; + + // Create EgressNetwork with consecutive failure accrual config for + // max_failures but no mode + let egress_max_fails = create_annotated_egress_network( + &client, + &ns, + "default-max-failure-egress", + BTreeMap::from([( + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), + "8".to_string(), + )]), + ) + .await; + await_egress_net_status(&client, &ns, "default-max-failure-egress").await; + + parent_with_default_failure_accrual( + Resource::EgressNetwork(egress_default), + Resource::EgressNetwork(egress_max_fails), + &client, + &ns, + ) + .await; }) .await; } @@ -718,246 +604,88 @@ async fn opaque_service() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_opaque_service(&client, &ns, "my-svc", 4191).await; + opaque_parent(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Proxy protocol should be opaque. - match config.protocol.unwrap().kind.unwrap() { - grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} - _ => panic!("proxy protocol must be Opaque"), - }; +#[tokio::test(flavor = "current_thread")] +async fn opaque_egress_net() { + with_temp_ns(|client, ns| async move { + // Create an egress network + let egress = create_opaque_egress_network(&client, &ns, "my-svc", 4191).await; + opaque_parent(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } #[tokio::test(flavor = "current_thread")] -async fn route_with_filters() { +async fn route_with_filters_service() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + let backend = mk_service(&ns, "backend", 4191); - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let backend_name = "backend"; - let backends = [backend_name]; - let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)) - .with_backends(Some(&backends), None, None) - .with_filters(Some(vec![ - k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { - request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { - set: Some(vec![k8s_gateway_api::HttpHeader { - name: "set".to_string(), - value: "set-value".to_string(), - }]), - add: Some(vec![k8s_gateway_api::HttpHeader { - name: "add".to_string(), - value: "add-value".to_string(), - }]), - remove: Some(vec!["remove".to_string()]), - }, - }, - k8s_gateway_api::HttpRouteFilter::RequestRedirect { - request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { - scheme: Some("http".to_string()), - hostname: Some("host".to_string()), - path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { - replace_prefix_match: "/path".to_string(), - }), - port: Some(5555), - status_code: Some(302), - }, - }, - ])); - let _route = create(&client, route.build()).await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); + route_with_filters( + Resource::Service(svc), + Resource::Service(backend), + &client, + &ns, + ) + .await; + }) + .await; +} - // There should be a route with filters. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let rule = assert_singleton(&route.rules); - let filters = &rule.filters; - assert_eq!( - *filters, - vec![ - grpc::outbound::http_route::Filter { - kind: Some( - grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( - grpc::http_route::RequestHeaderModifier { - add: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "add".to_string(), - value: "add-value".into(), - }] - }), - set: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "set".to_string(), - value: "set-value".into(), - }] - }), - remove: vec!["remove".to_string()], - } - ) - ) - }, - grpc::outbound::http_route::Filter { - kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( - grpc::http_route::RequestRedirect { - scheme: Some(grpc::http_types::Scheme { - r#type: Some(grpc::http_types::scheme::Type::Registered( - grpc::http_types::scheme::Registered::Http.into(), - )) - }), - host: "host".to_string(), - path: Some(linkerd2_proxy_api::http_route::PathModifier { replace: Some(linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix("/path".to_string())) }), - port: 5555, - status: 302, - } - )) - } - ] - ); - }); +#[tokio::test(flavor = "current_thread")] +async fn route_with_filters_egress_net() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; + + route_with_filters( + Resource::EgressNetwork(egress.clone()), + Resource::EgressNetwork(egress), + &client, + &ns, + ) + .await; }) .await; } #[tokio::test(flavor = "current_thread")] -async fn backend_with_filters() { +async fn backend_with_filters_service() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + let backend_svc = create_service(&client, &ns, "backend", 8888).await; + backend_with_filters( + Resource::Service(svc), + Resource::Service(backend_svc), + &client, + &ns, + ) + .await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let backend_name = "backend"; - let backend_svc = create_service(&client, &ns, backend_name, 8888).await; - let backends = [backend_name]; - let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)) - .with_backends(Some(&backends), None, Some(vec![ - k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { - request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { - set: Some(vec![k8s_gateway_api::HttpHeader { - name: "set".to_string(), - value: "set-value".to_string(), - }]), - add: Some(vec![k8s_gateway_api::HttpHeader { - name: "add".to_string(), - value: "add-value".to_string(), - }]), - remove: Some(vec!["remove".to_string()]), - }, - }, - k8s_gateway_api::HttpRouteFilter::RequestRedirect { - request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { - scheme: Some("http".to_string()), - hostname: Some("host".to_string()), - path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { - replace_prefix_match: "/path".to_string(), - }), - port: Some(5555), - status_code: Some(302), - }, - }, - ])); - let _route = create(&client, route.build()).await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); +#[tokio::test(flavor = "current_thread")] +async fn backend_with_filters_egress_net() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // There should be a route without rule filters. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let rule = assert_singleton(&route.rules); - assert_eq!(rule.filters.len(), 0); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_service(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!( - *filters, - vec![ - grpc::outbound::http_route::Filter { - kind: Some( - grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( - grpc::http_route::RequestHeaderModifier { - add: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "add".to_string(), - value: "add-value".into(), - }] - }), - set: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "set".to_string(), - value: "set-value".into(), - }] - }), - remove: vec!["remove".to_string()], - } - ) - ) - }, - grpc::outbound::http_route::Filter { - kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( - grpc::http_route::RequestRedirect { - scheme: Some(grpc::http_types::Scheme { - r#type: Some(grpc::http_types::scheme::Type::Registered( - grpc::http_types::scheme::Registered::Http.into(), - )) - }), - host: "host".to_string(), - path: Some(linkerd2_proxy_api::http_route::PathModifier { replace: Some(linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix("/path".to_string())) }), - port: 5555, - status: 302, - } - )) - } - ] - ); - }); + backend_with_filters( + Resource::EgressNetwork(egress.clone()), + Resource::EgressNetwork(egress), + &client, + &ns, + ) + .await; }) .await; } @@ -966,7 +694,7 @@ async fn backend_with_filters() { async fn http_route_with_no_port() { with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; let config_4191 = rx_4191 @@ -1030,7 +758,7 @@ async fn http_route_with_no_port() { async fn producer_route() { with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; let producer_config = producer_rx @@ -1102,7 +830,7 @@ async fn pre_existing_producer_route() { // a produce route already exists. with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); // A route created in the same namespace as its parent service is called // a producer route. It should be returned in outbound policy requests @@ -1149,7 +877,7 @@ async fn pre_existing_producer_route() { async fn consumer_route() { with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); let consumer_ns_name = format!("{}-consumer", ns); let consumer_ns = create_cluster_scoped( @@ -1245,57 +973,23 @@ async fn consumer_route() { } #[tokio::test(flavor = "current_thread")] -async fn http_route_retries_and_timeouts() { +async fn http_route_retries_and_timeouts_service() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + http_route_retries_and_timeouts(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let _route = create( - &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)) - .with_annotations( - vec![ - ("retry.linkerd.io/http".to_string(), "5xx".to_string()), - ("timeout.linkerd.io/response".to_string(), "10s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); +#[tokio::test(flavor = "current_thread")] +async fn http_route_retries_and_timeouts_egress_net() { + with_temp_ns(|client, ns| async move { + // Create an egress network + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - let status_range = assert_singleton(&conditions.status_ranges); - assert_eq!(status_range.start, 500); - assert_eq!(status_range.end, 599); - let timeout = rule - .timeouts - .as_ref() - .expect("timeouts expected") - .response - .as_ref() - .expect("response timeout expected"); - assert_eq!(timeout.seconds, 10); - }); + http_route_retries_and_timeouts(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } @@ -1309,53 +1003,28 @@ async fn service_retries_and_timeouts() { .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); svc.annotations_mut() .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let svc = create(&client, svc).await; + let svc = Resource::Service(create(&client, svc).await); - let _route = create( - &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)) - .with_annotations( - vec![ - // Route annotations override the timeout config specified - // on the service. - ("timeout.linkerd.io/request".to_string(), "5s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - await_gateway_route_status(&client, &ns, "foo-route").await; + retries_and_timeouts(svc, &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_retries_and_timeouts() { + with_temp_ns(|client, ns| async move { + // Create a egress net + let mut egress = mk_egress_net(&ns, "my-egress"); + egress + .annotations_mut() + .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); + egress + .annotations_mut() + .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); + let egress = Resource::EgressNetwork(create(&client, egress).await); + await_egress_net_status(&client, &ns, "my-egress").await; - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - let status_range = assert_singleton(&conditions.status_ranges); - // Retry config inherited from the service. - assert_eq!(status_range.start, 500); - assert_eq!(status_range.end, 599); - let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); - // Service timeout config overridden by route timeout config. - assert_eq!(timeouts.response, None); - let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); - assert_eq!(request_timeout.seconds, 5); - }); + retries_and_timeouts(egress, &client, &ns).await; }) .await; } @@ -1365,77 +1034,19 @@ async fn service_http_route_reattachment() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + http_route_reattachment(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut route = create(&client, mk_empty_http_route(&ns, "foo-route", &svc, 4191)).await; - await_gateway_route_status(&client, &ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // The route should be attached. - detect_http_routes(&config, |routes| { - let route: &grpc::outbound::HttpRoute = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(&client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // The route should be unattached and the default route should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = svc.name_unchecked(); - update(&client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_http_route_reattachment() { + with_temp_ns(|client, ns| async move { + // Create a egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // The route should be attached again. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); + http_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } @@ -1444,7 +1055,7 @@ async fn service_http_route_reattachment() { struct HttpRouteBuilder(k8s_gateway_api::HttpRoute); -fn mk_http_route(ns: &str, name: &str, svc: &k8s::Service, port: Option) -> HttpRouteBuilder { +fn mk_http_route(ns: &str, name: &str, parent: &Resource, port: Option) -> HttpRouteBuilder { use k8s_gateway_api as api; HttpRouteBuilder(api::HttpRoute { @@ -1456,10 +1067,10 @@ fn mk_http_route(ns: &str, name: &str, svc: &k8s::Service, port: Option) -> spec: api::HttpRouteSpec { inner: api::CommonRouteSpec { parent_refs: Some(vec![api::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), + group: Some(parent.group()), + kind: Some(parent.kind()), + namespace: Some(parent.namespace()), + name: parent.name(), section_name: None, port, }]), @@ -1485,22 +1096,22 @@ fn mk_http_route(ns: &str, name: &str, svc: &k8s::Service, port: Option) -> impl HttpRouteBuilder { fn with_backends( self, - backends: Option<&[&str]>, + backends: Option<&[Resource]>, backends_ns: Option, backend_filters: Option>, ) -> Self { let mut route = self.0; - let backend_refs = backends.map(|names| { - names + let backend_refs = backends.map(|backends| { + backends .iter() - .map(|name| k8s_gateway_api::HttpBackendRef { + .map(|backend| k8s_gateway_api::HttpBackendRef { backend_ref: Some(k8s_gateway_api::BackendRef { weight: None, inner: k8s_gateway_api::BackendObjectReference { - name: name.to_string(), + name: backend.name(), port: Some(8888), - group: None, - kind: None, + group: Some(backend.group()), + kind: Some(backend.kind()), namespace: backends_ns.clone(), }, }), @@ -1539,7 +1150,7 @@ impl HttpRouteBuilder { fn mk_empty_http_route( ns: &str, name: &str, - svc: &k8s::Service, + parent: &Resource, port: u16, ) -> k8s_gateway_api::HttpRoute { use k8s_gateway_api as api; @@ -1552,10 +1163,10 @@ fn mk_empty_http_route( spec: api::HttpRouteSpec { inner: api::CommonRouteSpec { parent_refs: Some(vec![api::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), + group: Some(parent.group()), + kind: Some(parent.kind()), + namespace: Some(parent.namespace()), + name: parent.name(), section_name: None, port: Some(port), }]), @@ -1566,3 +1177,832 @@ fn mk_empty_http_route( status: None, } } + +async fn parent_with_no_http_routes(parent: Resource, client: &kube::Client, ns: &str) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); +} + +async fn parent_with_http_route_without_rules(parent: Resource, client: &kube::Client, ns: &str) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + let _route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; + await_gateway_route_status(client, ns, "foo-route").await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a route with no rules. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + assert_eq!(route.rules.len(), 0); + }); +} + +async fn parent_with_http_routes_without_backends( + parent: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + let _route = create( + client, + mk_http_route(ns, "foo-route", &parent, Some(4191)).build(), + ) + .await; + await_gateway_route_status(client, ns, "foo-route").await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a route with the logical backend. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let backends = route_backends_first_available(route); + let backend = assert_singleton(backends); + assert_backend_matches_parent(backend, &parent, 4191); + }); +} + +async fn parent_with_http_routes_with_backend( + parent: Resource, + rule_backend: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + let backends = [rule_backend.clone()]; + let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( + Some(&backends), + None, + None, + ); + let _route = create(client, route.build()).await; + await_gateway_route_status(client, ns, "foo-route").await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a route with a backend with no filters. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let backends = route_backends_random_available(route); + let backend = assert_singleton(backends); + assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); + let filters = &backend.backend.as_ref().unwrap().filters; + assert_eq!(filters.len(), 0); + }); +} + +async fn parent_with_http_routes_with_invalid_backend( + parent: Resource, + backend: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + let backends = [backend]; + let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( + Some(&backends), + None, + None, + ); + let _route = create(client, route.build()).await; + await_gateway_route_status(client, ns, "foo-route").await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a route with a backend. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let backends = route_backends_random_available(route); + let backend = assert_singleton(backends); + assert_backend_has_failure_filter(backend); + }); +} + +async fn parent_with_multiple_http_routes(parent: Resource, client: &kube::Client, ns: &str) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + // Routes should be returned in sorted order by creation timestamp then + // name. To ensure that this test isn't timing dependant, routes should + // be created in alphabetical order. + let _a_route = create( + client, + mk_http_route(ns, "a-route", &parent, Some(4191)).build(), + ) + .await; + await_gateway_route_status(client, ns, "a-route").await; + + // First route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + let _b_route = create( + client, + mk_http_route(ns, "b-route", &parent, Some(4191)).build(), + ) + .await; + await_gateway_route_status(client, ns, "b-route").await; + + // Second route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + detect_http_routes(&config, |routes| { + let num_routes = match parent { + Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default + Resource::Service(_) => 2, // two routes for service + }; + assert_eq!(routes.len(), num_routes); + assert_eq!(route_name(&routes[0]), "a-route"); + assert_eq!(route_name(&routes[1]), "b-route"); + }); +} + +async fn parent_with_consecutive_failure_accrual( + parent: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(8, consecutive.max_failures); + assert_eq!( + &grpc::outbound::ExponentialBackoff { + min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), + max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), + jitter_ratio: 1.0_f32, + }, + consecutive + .backoff + .as_ref() + .expect("backoff must be configured") + ); + }); +} + +async fn parent_with_consecutive_failure_accrual_defaults_no_config( + parent: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // Expect default max_failures and default backoff + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(7, consecutive.max_failures); + assert_default_accrual_backoff!(consecutive + .backoff + .as_ref() + .expect("backoff must be configured")); + }); +} + +async fn parent_with_consecutive_failure_accrual_defaults_max_fails( + parent: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // Expect default backoff and overridden max_failures + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(8, consecutive.max_failures); + assert_default_accrual_backoff!(consecutive + .backoff + .as_ref() + .expect("backoff must be configured")); + }); +} + +async fn parent_with_consecutive_failure_accrual_defaults_max_jitter( + parent: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // Expect defaults for everything except for the jitter ratio + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(7, consecutive.max_failures); + assert_eq!( + &grpc::outbound::ExponentialBackoff { + min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), + max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), + jitter_ratio: 1.0_f32, + }, + consecutive + .backoff + .as_ref() + .expect("backoff must be configured") + ); + }); +} + +async fn parent_with_default_failure_accrual( + parent_default_config: Resource, + parent_max_failures: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent_default_config, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // Expect failure accrual config to be default (no failure accrual) + detect_failure_accrual(&config, |accrual| { + assert!( + accrual.is_none(), + "consecutive failure accrual should not be configured for service" + ); + }); + + let mut rx = retry_watch_outbound_policy(client, ns, &parent_max_failures, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // Expect failure accrual config to be default (no failure accrual) + detect_failure_accrual(&config, |accrual| { + assert!( + accrual.is_none(), + "consecutive failure accrual should not be configured for service" + ) + }); +} + +async fn opaque_parent(parent: Resource, client: &kube::Client, ns: &str) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // Proxy protocol should be opaque. + match config.protocol.unwrap().kind.unwrap() { + grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} + _ => panic!("proxy protocol must be Opaque"), + }; +} + +async fn route_with_filters(parent: Resource, backend: Resource, client: &kube::Client, ns: &str) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + let backends = [backend.clone()]; + let route = mk_http_route(ns, "foo-route", &parent, Some(4191)) + .with_backends(Some(&backends), None, None) + .with_filters(Some(vec![ + k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { + request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }, + k8s_gateway_api::HttpRouteFilter::RequestRedirect { + request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { + scheme: Some("http".to_string()), + hostname: Some("host".to_string()), + path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { + replace_prefix_match: "/path".to_string(), + }), + port: Some(5555), + status_code: Some(302), + }, + }, + ])); + let _route = create(client, route.build()).await; + await_gateway_route_status(client, ns, "foo-route").await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + // There should be a route with filters. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let rule = assert_singleton(&route.rules); + let filters = &rule.filters; + assert_eq!( + *filters, + vec![ + grpc::outbound::http_route::Filter { + kind: Some( + grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( + grpc::http_route::RequestHeaderModifier { + add: Some(grpc::http_types::Headers { + headers: vec![grpc::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(grpc::http_types::Headers { + headers: vec![grpc::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + ) + ) + }, + grpc::outbound::http_route::Filter { + kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( + grpc::http_route::RequestRedirect { + scheme: Some(grpc::http_types::Scheme { + r#type: Some(grpc::http_types::scheme::Type::Registered( + grpc::http_types::scheme::Registered::Http.into(), + )) + }), + host: "host".to_string(), + path: Some(linkerd2_proxy_api::http_route::PathModifier { + replace: Some( + linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( + "/path".to_string() + ) + ) + }), + port: 5555, + status: 302, + } + )) + } + ] + ); + }); +} + +async fn backend_with_filters( + parent: Resource, + backend_for_parent: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + let backends = [backend_for_parent.clone()]; + let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( + Some(&backends), + None, + Some(vec![ + k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { + request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }, + k8s_gateway_api::HttpRouteFilter::RequestRedirect { + request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { + scheme: Some("http".to_string()), + hostname: Some("host".to_string()), + path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { + replace_prefix_match: "/path".to_string(), + }), + port: Some(5555), + status_code: Some(302), + }, + }, + ]), + ); + let _route = create(client, route.build()).await; + await_gateway_route_status(client, ns, "foo-route").await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + // There should be a route without rule filters. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let rule = assert_singleton(&route.rules); + assert_eq!(rule.filters.len(), 0); + let backends = route_backends_random_available(route); + let backend = assert_singleton(backends); + assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_for_parent, 8888); + let filters = &backend.backend.as_ref().unwrap().filters; + assert_eq!( + *filters, + vec![ + grpc::outbound::http_route::Filter { + kind: Some( + grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( + grpc::http_route::RequestHeaderModifier { + add: Some(grpc::http_types::Headers { + headers: vec![grpc::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(grpc::http_types::Headers { + headers: vec![grpc::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + ) + ) + }, + grpc::outbound::http_route::Filter { + kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( + grpc::http_route::RequestRedirect { + scheme: Some(grpc::http_types::Scheme { + r#type: Some(grpc::http_types::scheme::Type::Registered( + grpc::http_types::scheme::Registered::Http.into(), + )) + }), + host: "host".to_string(), + path: Some(linkerd2_proxy_api::http_route::PathModifier { + replace: Some( + linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( + "/path".to_string() + ) + ) + }), + port: 5555, + status: 302, + } + )) + } + ] + ); + }); +} + +async fn http_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { + let _route = create( + client, + mk_http_route(ns, "foo-route", &parent, Some(4191)) + .with_annotations( + vec![ + ("retry.linkerd.io/http".to_string(), "5xx".to_string()), + ("timeout.linkerd.io/response".to_string(), "10s".to_string()), + ] + .into_iter() + .collect(), + ) + .build(), + ) + .await; + + await_gateway_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let rule = assert_singleton(&route.rules); + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + let status_range = assert_singleton(&conditions.status_ranges); + assert_eq!(status_range.start, 500); + assert_eq!(status_range.end, 599); + let timeout = rule + .timeouts + .as_ref() + .expect("timeouts expected") + .response + .as_ref() + .expect("response timeout expected"); + assert_eq!(timeout.seconds, 10); + }); +} + +async fn retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { + let _route = create( + client, + mk_http_route(ns, "foo-route", &parent, Some(4191)) + .with_annotations( + vec![ + // Route annotations override the timeout config specified + // on the service. + ("timeout.linkerd.io/request".to_string(), "5s".to_string()), + ] + .into_iter() + .collect(), + ) + .build(), + ) + .await; + await_gateway_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let rule = assert_singleton(&route.rules); + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + let status_range = assert_singleton(&conditions.status_ranges); + // Retry config inherited from the service. + assert_eq!(status_range.start, 500); + assert_eq!(status_range.end, 599); + let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); + // Service timeout config overridden by route timeout config. + assert_eq!(timeouts.response, None); + let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); + assert_eq!(request_timeout.seconds, 5); + }); +} + +async fn http_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { + let mut route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; + await_gateway_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be attached. + detect_http_routes(&config, |routes| { + let route: &grpc::outbound::HttpRoute = assert_route_attached(routes, &parent); + assert_route_name_eq(route, "foo-route"); + }); + + route + .spec + .inner + .parent_refs + .as_mut() + .unwrap() + .first_mut() + .unwrap() + .name = "other".to_string(); + update(client, route.clone()).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be unattached and the default route should be present. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + route + .spec + .inner + .parent_refs + .as_mut() + .unwrap() + .first_mut() + .unwrap() + .name = parent.name(); + update(client, route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be attached again. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + assert_route_name_eq(route, "foo-route"); + }); +} diff --git a/policy-test/tests/outbound_api_grpc.rs b/policy-test/tests/outbound_api_grpc.rs index ee5e9dabc61e8..5f31f30d13047 100644 --- a/policy-test/tests/outbound_api_grpc.rs +++ b/policy-test/tests/outbound_api_grpc.rs @@ -1,60 +1,31 @@ use futures::prelude::*; use kube::ResourceExt; -use linkerd_policy_controller_k8s_api as k8s; use linkerd_policy_test::{ - assert_svc_meta, await_grpc_route_status, create, create_service, mk_service, outbound_api::*, - update, with_temp_ns, + assert_resource_meta, await_egress_net_status, await_grpc_route_status, create, + create_egress_network, create_service, mk_egress_net, mk_service, outbound_api::*, update, + with_temp_ns, Resource, }; use std::collections::BTreeMap; #[tokio::test(flavor = "current_thread")] -async fn grpc_route_retries_and_timeouts() { +async fn service_grpc_route_retries_and_timeouts() { with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + grpc_route_retries_and_timeouts(svc, &client, &ns).await; + }) + .await; +} - let _route = create( - &client, - mk_grpc_route(&ns, "foo-route", &svc, Some(4191)) - .with_annotations( - vec![ - ("retry.linkerd.io/grpc".to_string(), "internal".to_string()), - ("timeout.linkerd.io/response".to_string(), "10s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_grpc_route_retries_and_timeouts() { + with_temp_ns(|client, ns| async move { + // Create a egress net + let egress = + Resource::EgressNetwork(create_egress_network(&client, &ns, "my-egress").await); + await_egress_net_status(&client, &ns, "my-egress").await; - let routes = grpc_routes(&config); - let route = assert_singleton(routes); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - assert!(conditions.internal); - let timeout = rule - .timeouts - .as_ref() - .expect("timeouts expected") - .response - .as_ref() - .expect("response timeout expected"); - assert_eq!(timeout.seconds, 10); + grpc_route_retries_and_timeouts(egress, &client, &ns).await; }) .await; } @@ -68,49 +39,28 @@ async fn service_retries_and_timeouts() { .insert("retry.linkerd.io/grpc".to_string(), "internal".to_string()); svc.annotations_mut() .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let svc = create(&client, svc).await; - - let _route = create( - &client, - mk_grpc_route(&ns, "foo-route", &svc, Some(4191)) - .with_annotations( - vec![ - // Route annotations override the timeout config specified - // on the service. - ("timeout.linkerd.io/request".to_string(), "5s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); + let svc = Resource::Service(create(&client, svc).await); - let routes = grpc_routes(&config); - let route = assert_singleton(routes); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - // Retry config inherited from the service. - assert!(conditions.internal); - let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); - // Service timeout config overridden by route timeout config. - assert_eq!(timeouts.response, None); - let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); - assert_eq!(request_timeout.seconds, 5); + parent_retries_and_timeouts(svc, &client, &ns).await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn egress_net_retries_and_timeouts() { + with_temp_ns(|client, ns| async move { + // Create a egress net + let mut egress = mk_egress_net(&ns, "my-egress"); + egress + .annotations_mut() + .insert("retry.linkerd.io/grpc".to_string(), "internal".to_string()); + egress + .annotations_mut() + .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); + let egress = Resource::EgressNetwork(create(&client, egress).await); + await_egress_net_status(&client, &ns, "my-egress").await; + + parent_retries_and_timeouts(egress, &client, &ns).await; }) .await; } @@ -120,85 +70,19 @@ async fn service_grpc_route_reattachment() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + grpc_route_reattachment(Resource::Service(svc), &client, &ns).await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn egress_net_grpc_route_reattachment() { + with_temp_ns(|client, ns| async move { + // Create a egress network + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - let mut route = create( - &client, - mk_grpc_route(&ns, "foo-route", &svc, Some(4191)).build(), - ) - .await; - await_grpc_route_status(&client, &ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - { - // The route should be attached. - let routes = grpc_routes(&config); - let route = assert_singleton(routes); - assert_name_eq(route.metadata.as_ref().unwrap(), "foo-route"); - } - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(&client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // The grpc route should be unattached and the default (http) route - // should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = svc.name_unchecked(); - update(&client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // The route should be attached again. - { - // The route should be attached. - let routes = grpc_routes(&config); - let route = assert_singleton(routes); - assert_name_eq(route.metadata.as_ref().unwrap(), "foo-route"); - } + grpc_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } @@ -207,7 +91,7 @@ async fn service_grpc_route_reattachment() { struct GrpcRouteBuilder(k8s_gateway_api::GrpcRoute); -fn mk_grpc_route(ns: &str, name: &str, svc: &k8s::Service, port: Option) -> GrpcRouteBuilder { +fn mk_grpc_route(ns: &str, name: &str, parent: &Resource, port: Option) -> GrpcRouteBuilder { GrpcRouteBuilder(k8s_gateway_api::GrpcRoute { metadata: kube::api::ObjectMeta { namespace: Some(ns.to_string()), @@ -217,10 +101,10 @@ fn mk_grpc_route(ns: &str, name: &str, svc: &k8s::Service, port: Option) -> spec: k8s_gateway_api::GrpcRouteSpec { inner: k8s_gateway_api::CommonRouteSpec { parent_refs: Some(vec![k8s_gateway_api::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), + group: Some(parent.group()), + kind: Some(parent.kind()), + namespace: Some(parent.namespace()), + name: parent.name(), section_name: None, port, }]), @@ -253,3 +137,175 @@ impl GrpcRouteBuilder { self.0 } } + +async fn grpc_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { + let mut route = create( + client, + mk_grpc_route(ns, "foo-route", &parent, Some(4191)).build(), + ) + .await; + await_grpc_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + { + // The route should be attached. + let routes = grpc_routes(&config); + let route = assert_route_attached(routes, &parent); + assert_name_eq(route.metadata.as_ref().unwrap(), "foo-route"); + } + + route + .spec + .inner + .parent_refs + .as_mut() + .unwrap() + .first_mut() + .unwrap() + .name = "other".to_string(); + update(client, route.clone()).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The grpc route should be unattached and the default (http) route + // should be present. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + route + .spec + .inner + .parent_refs + .as_mut() + .unwrap() + .first_mut() + .unwrap() + .name = parent.name(); + update(client, route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be attached again. + { + // The route should be attached. + let routes = grpc_routes(&config); + let route = assert_route_attached(routes, &parent); + assert_name_eq(route.metadata.as_ref().unwrap(), "foo-route"); + } +} + +async fn grpc_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { + let _route = create( + client, + mk_grpc_route(ns, "foo-route", &parent, Some(4191)) + .with_annotations( + vec![ + ("retry.linkerd.io/grpc".to_string(), "internal".to_string()), + ("timeout.linkerd.io/response".to_string(), "10s".to_string()), + ] + .into_iter() + .collect(), + ) + .build(), + ) + .await; + await_grpc_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + let routes = grpc_routes(&config); + let route = assert_route_attached(routes, &parent); + let rule = assert_singleton(&route.rules); + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + assert!(conditions.internal); + let timeout = rule + .timeouts + .as_ref() + .expect("timeouts expected") + .response + .as_ref() + .expect("response timeout expected"); + assert_eq!(timeout.seconds, 10); +} + +async fn parent_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { + let _route = create( + client, + mk_grpc_route(ns, "foo-route", &parent, Some(4191)) + .with_annotations( + vec![ + // Route annotations override the timeout config specified + // on the service. + ("timeout.linkerd.io/request".to_string(), "5s".to_string()), + ] + .into_iter() + .collect(), + ) + .build(), + ) + .await; + await_grpc_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + let routes = grpc_routes(&config); + let route = assert_route_attached(routes, &parent); + let rule = assert_singleton(&route.rules); + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + // Retry config inherited from the service. + assert!(conditions.internal); + let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); + // Parent timeout config overridden by route timeout config. + assert_eq!(timeouts.response, None); + let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); + assert_eq!(request_timeout.seconds, 5); +} diff --git a/policy-test/tests/outbound_api_linkerd.rs b/policy-test/tests/outbound_api_linkerd.rs index 785788ec744dc..93e533b482621 100644 --- a/policy-test/tests/outbound_api_linkerd.rs +++ b/policy-test/tests/outbound_api_linkerd.rs @@ -4,9 +4,11 @@ use futures::prelude::*; use kube::ResourceExt; use linkerd_policy_controller_k8s_api as k8s; use linkerd_policy_test::{ - assert_default_accrual_backoff, assert_svc_meta, await_route_status, create, - create_annotated_service, create_cluster_scoped, create_opaque_service, create_service, - delete_cluster_scoped, grpc, mk_service, outbound_api::*, update, with_temp_ns, + assert_default_accrual_backoff, assert_resource_meta, await_egress_net_status, + await_route_status, create, create_annotated_egress_network, create_annotated_service, + create_cluster_scoped, create_egress_network, create_opaque_egress_network, + create_opaque_service, create_service, delete_cluster_scoped, grpc, mk_egress_net, mk_service, + outbound_api::*, update, with_temp_ns, Resource, }; use maplit::{btreemap, convert_args}; @@ -37,22 +39,19 @@ async fn service_with_no_http_routes() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + parent_with_no_http_routes(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_no_http_routes() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); + parent_with_no_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } @@ -62,40 +61,19 @@ async fn service_with_http_route_without_rules() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + parent_with_http_route_without_rules(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let _route = create(&client, mk_empty_http_route(&ns, "foo-route", &svc, 4191)).await; - await_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_http_route_without_rules() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // There should be a route with no rules. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_eq!(route.rules.len(), 0); - }); + parent_with_http_route_without_rules(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } @@ -105,101 +83,55 @@ async fn service_with_http_routes_without_backends() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + parent_with_http_routes_without_backends(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_http_routes_without_backends() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); + parent_with_http_routes_without_backends(Resource::EgressNetwork(egress), &client, &ns) + .await; + }) + .await; +} - let _route = create( +#[tokio::test(flavor = "current_thread")] +async fn service_with_http_routes_with_backend() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = create_service(&client, &ns, "my-svc", 4191).await; + let backend_svc = create_service(&client, &ns, "backend", 8888).await; + parent_with_http_routes_with_backend( + Resource::Service(svc), + Resource::Service(backend_svc), &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)).build(), + &ns, ) .await; - await_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // There should be a route with the logical backend. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let backends = route_backends_first_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_service(backend, &svc, 4191); - }); }) .await; } #[tokio::test(flavor = "current_thread")] -async fn service_with_http_routes_with_backend() { +async fn egress_net_with_http_routes_with_backend() { with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let backend_name = "backend"; - let backend_svc = create_service(&client, &ns, backend_name, 8888).await; - let backends = [backend_name]; - let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( - Some(&backends), - None, - None, - ); - let _route = create(&client, route.build()).await; - await_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // There should be a route with a backend with no filters. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_service(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!(filters.len(), 0); - }); + parent_with_http_routes_with_backend( + Resource::EgressNetwork(egress.clone()), + Resource::EgressNetwork(egress), + &client, + &ns, + ) + .await; }) .await; } @@ -208,7 +140,7 @@ async fn service_with_http_routes_with_backend() { async fn service_with_http_routes_with_cross_namespace_backend() { with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; let config = rx @@ -218,7 +150,7 @@ async fn service_with_http_routes_with_cross_namespace_backend() { .expect("watch must return an initial config"); tracing::trace!(?config); - assert_svc_meta(&config.metadata, &svc, 4191); + assert_resource_meta(&config.metadata, &svc, 4191); // There should be a default route. detect_http_routes(&config, |routes| { @@ -242,8 +174,9 @@ async fn service_with_http_routes_with_cross_namespace_backend() { ) .await; let backend_name = "backend"; - let backend_svc = create_service(&client, &backend_ns_name, backend_name, 8888).await; - let backends = [backend_name]; + let backend_svc = + Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); + let backends = [backend_svc.clone()]; let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( Some(&backends), Some(backend_ns_name), @@ -259,14 +192,14 @@ async fn service_with_http_routes_with_cross_namespace_backend() { .expect("watch must return an updated config"); tracing::trace!(?config); - assert_svc_meta(&config.metadata, &svc, 4191); + assert_resource_meta(&config.metadata, &svc, 4191); // There should be a route with a backend with no filters. detect_http_routes(&config, |routes| { let route = assert_singleton(routes); let backends = route_backends_random_available(route); let backend = assert_singleton(backends); - assert_backend_matches_service(backend.backend.as_ref().unwrap(), &backend_svc, 8888); + assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); let filters = &backend.backend.as_ref().unwrap().filters; assert_eq!(filters.len(), 0); }); @@ -282,48 +215,36 @@ async fn service_with_http_routes_with_invalid_backend() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + let backend = mk_service(&ns, "invalid", 4191); - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let backends = ["invalid-backend"]; - let route = mk_http_route(&ns, "foo-route", &svc, Some(4191)).with_backends( - Some(&backends), - None, - None, - ); - let _route = create(&client, route.build()).await; - await_route_status(&client, &ns, "foo-route").await; + parent_with_http_routes_with_invalid_backend( + Resource::Service(svc), + Resource::Service(backend), + &client, + &ns, + ) + .await; + }) + .await; +} - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); +// TODO: Test fails until handling of invalid backends is implemented. +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_http_routes_with_invalid_backend() { + with_temp_ns(|client, ns| async move { + // Create an egress network + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - assert_svc_meta(&config.metadata, &svc, 4191); + let backend = mk_egress_net(&ns, "invalid"); - // There should be a route with a backend. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_has_failure_filter(backend); - }); + parent_with_http_routes_with_invalid_backend( + Resource::EgressNetwork(egress), + Resource::EgressNetwork(backend), + &client, + &ns, + ) + .await; }) .await; } @@ -335,66 +256,21 @@ async fn service_with_multiple_http_routes() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + parent_with_multiple_http_routes(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - // Routes should be returned in sorted order by creation timestamp then - // name. To ensure that this test isn't timing dependant, routes should - // be created in alphabetical order. - let _a_route = create( - &client, - mk_http_route(&ns, "a-route", &svc, Some(4191)).build(), - ) - .await; - await_route_status(&client, &ns, "a-route").await; - - // First route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - let _b_route = create( - &client, - mk_http_route(&ns, "b-route", &svc, Some(4191)).build(), - ) - .await; - await_route_status(&client, &ns, "b-route").await; - - // Second route update. - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); +// TODO: Investigate why the policy controller is only returning one route in this +// case instead of two. +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_multiple_http_routes() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // There should be 2 routes, returned in order. - detect_http_routes(&config, |routes| { - assert_eq!(routes.len(), 2); - assert_eq!(route_name(&routes[0]), "a-route"); - assert_eq!(route_name(&routes[1]), "b-route"); - }); + parent_with_multiple_http_routes(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } @@ -431,42 +307,56 @@ async fn service_with_consecutive_failure_accrual() { ]), ) .await; + parent_with_consecutive_failure_accrual(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_consecutive_failure_accrual() { + with_temp_ns(|client, ns| async move { + let egress = create_annotated_egress_network( + &client, + &ns, + "consecutive-accrual-egress", + BTreeMap::from([ + ( + "balancer.linkerd.io/failure-accrual".to_string(), + "consecutive".to_string(), + ), + ( + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), + "8".to_string(), + ), + ( + "balancer.linkerd.io/failure-accrual-consecutive-min-penalty".to_string(), + "10s".to_string(), + ), + ( + "balancer.linkerd.io/failure-accrual-consecutive-max-penalty".to_string(), + "10m".to_string(), + ), + ( + "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), + "1.0".to_string(), + ), + ]), + ) + .await; + await_egress_net_status(&client, &ns, "consecutive-accrual-egress").await; - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); + parent_with_consecutive_failure_accrual(Resource::EgressNetwork(egress), &client, &ns) + .await; }) .await; } #[tokio::test(flavor = "current_thread")] -async fn service_with_consecutive_failure_accrual_defaults() { +async fn service_with_consecutive_failure_accrual_defaults_no_config() { with_temp_ns(|client, ns| async move { // Create a service configured to do consecutive failure accrual, but // with no additional configuration - let svc = create_annotated_service( + let svc_no_config = create_annotated_service( &client, &ns, "default-accrual-svc", @@ -478,29 +368,22 @@ async fn service_with_consecutive_failure_accrual_defaults() { ) .await; - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // Expect default max_failures and default backoff - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(7, consecutive.max_failures); - assert_default_accrual_backoff!(consecutive - .backoff - .as_ref() - .expect("backoff must be configured")); - }); + parent_with_consecutive_failure_accrual_defaults_no_config( + Resource::Service(svc_no_config), + &client, + &ns, + ) + .await; + }) + .await; +} +#[tokio::test(flavor = "current_thread")] +async fn service_with_consecutive_failure_accrual_defaults_max_fails() { + with_temp_ns(|client, ns| async move { // Create a service configured to do consecutive failure accrual with // max number of failures and with default backoff - let svc = create_annotated_service( + let svc_max_fails = create_annotated_service( &client, &ns, "no-backoff-svc", @@ -518,27 +401,22 @@ async fn service_with_consecutive_failure_accrual_defaults() { ) .await; - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // Expect default backoff and overridden max_failures - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(8, consecutive.max_failures); - assert_default_accrual_backoff!(consecutive - .backoff - .as_ref() - .expect("backoff must be configured")); - }); + parent_with_consecutive_failure_accrual_defaults_max_fails( + Resource::Service(svc_max_fails), + &client, + &ns, + ) + .await; + }) + .await; +} +#[tokio::test(flavor = "current_thread")] +async fn service_with_consecutive_failure_accrual_defaults_jitter() { + with_temp_ns(|client, ns| async move { // Create a service configured to do consecutive failure accrual with // only the jitter ratio configured in the backoff - let svc = create_annotated_service( + let svc_jitter = create_annotated_service( &client, &ns, "only-jitter-svc", @@ -556,59 +434,118 @@ async fn service_with_consecutive_failure_accrual_defaults() { ) .await; - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); + parent_with_consecutive_failure_accrual_defaults_max_jitter( + Resource::Service(svc_jitter), + &client, + &ns, + ) + .await; + }) + .await; +} - // Expect defaults for everything except for the jitter ratio - detect_failure_accrual(&config, |accrual| { - let consecutive = failure_accrual_consecutive(accrual); - assert_eq!(7, consecutive.max_failures); - assert_eq!( - &grpc::outbound::ExponentialBackoff { - min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), - max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), - jitter_ratio: 1.0_f32, - }, - consecutive - .backoff - .as_ref() - .expect("backoff must be configured") - ); - }); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_consecutive_failure_accrual_defaults_no_config() { + with_temp_ns(|client, ns| async move { + // Create a egress network configured to do consecutive failure accrual, but + // with no additional configuration + let egress_no_config = create_annotated_egress_network( + &client, + &ns, + "default-accrual-egress", + BTreeMap::from([( + "balancer.linkerd.io/failure-accrual".to_string(), + "consecutive".to_string(), + )]), + ) + .await; + await_egress_net_status(&client, &ns, "default-accrual-egress").await; + + parent_with_consecutive_failure_accrual_defaults_no_config( + Resource::EgressNetwork(egress_no_config), + &client, + &ns, + ) + .await; }) .await; } #[tokio::test(flavor = "current_thread")] -async fn service_with_default_failure_accrual() { +async fn egress_net_with_consecutive_failure_accrual_defaults_max_fails() { with_temp_ns(|client, ns| async move { - // Default config for Service, no failure accrual - let svc = create_service(&client, &ns, "default-failure-accrual", 80).await; + // Create a egress network configured to do consecutive failure accrual with + // max number of failures and with default backoff + let egress_max_fails = create_annotated_egress_network( + &client, + &ns, + "no-backoff-egress", + BTreeMap::from([ + ( + "balancer.linkerd.io/failure-accrual".to_string(), + "consecutive".to_string(), + ), + ( + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), + "8".to_string(), + ), + ]), + ) + .await; + await_egress_net_status(&client, &ns, "no-backoff-egress").await; - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); + parent_with_consecutive_failure_accrual_defaults_max_fails( + Resource::EgressNetwork(egress_max_fails), + &client, + &ns, + ) + .await; + }) + .await; +} - // Expect failure accrual config to be default (no failure accrual) - detect_failure_accrual(&config, |accrual| { - assert!( - accrual.is_none(), - "consecutive failure accrual should not be configured for service" - ); - }); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_consecutive_failure_accrual_defaults_jitter() { + with_temp_ns(|client, ns| async move { + // Create an egress net configured to do consecutive failure accrual with + // only the jitter ratio configured in the backoff + let egress_jitter = create_annotated_egress_network( + &client, + &ns, + "only-jitter-egress", + BTreeMap::from([ + ( + "balancer.linkerd.io/failure-accrual".to_string(), + "consecutive".to_string(), + ), + ( + "balancer.linkerd.io/failure-accrual-consecutive-jitter-ratio".to_string(), + "1.0".to_string(), + ), + ]), + ) + .await; + await_egress_net_status(&client, &ns, "only-jitter-egress").await; + + parent_with_consecutive_failure_accrual_defaults_max_jitter( + Resource::EgressNetwork(egress_jitter), + &client, + &ns, + ) + .await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn service_with_default_failure_accrual() { + with_temp_ns(|client, ns| async move { + // Default config for Service, no failure accrual + let svc_default = create_service(&client, &ns, "default-failure-accrual", 80).await; // Create Service with consecutive failure accrual config for // max_failures but no mode - let svc = create_annotated_service( + let svc_max_fails = create_annotated_service( &client, &ns, "default-max-failure-svc", @@ -620,21 +557,45 @@ async fn service_with_default_failure_accrual() { ) .await; - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); + parent_with_default_failure_accrual( + Resource::Service(svc_default), + Resource::Service(svc_max_fails), + &client, + &ns, + ) + .await; + }) + .await; +} - // Expect failure accrual config to be default (no failure accrual) - detect_failure_accrual(&config, |accrual| { - assert!( - accrual.is_none(), - "consecutive failure accrual should not be configured for service" - ) - }); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_default_failure_accrual() { + with_temp_ns(|client, ns| async move { + // Default config for EgressNetwork, no failure accrual + let egress_default = create_egress_network(&client, &ns, "default-failure-accrual").await; + await_egress_net_status(&client, &ns, "default-failure-accrual").await; + + // Create EgressNetwork with consecutive failure accrual config for + // max_failures but no mode + let egress_max_fails = create_annotated_egress_network( + &client, + &ns, + "default-max-failure-egress", + BTreeMap::from([( + "balancer.linkerd.io/failure-accrual-consecutive-max-failures".to_string(), + "8".to_string(), + )]), + ) + .await; + await_egress_net_status(&client, &ns, "default-max-failure-egress").await; + + parent_with_default_failure_accrual( + Resource::EgressNetwork(egress_default), + Resource::EgressNetwork(egress_max_fails), + &client, + &ns, + ) + .await; }) .await; } @@ -644,264 +605,88 @@ async fn opaque_service() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_opaque_service(&client, &ns, "my-svc", 4191).await; + opaque_parent(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // Proxy protocol should be opaque. - match config.protocol.unwrap().kind.unwrap() { - grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} - _ => panic!("proxy protocol must be Opaque"), - }; +#[tokio::test(flavor = "current_thread")] +async fn opaque_egress_net() { + with_temp_ns(|client, ns| async move { + // Create an egress network + let egress = create_opaque_egress_network(&client, &ns, "my-svc", 4191).await; + opaque_parent(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } #[tokio::test(flavor = "current_thread")] -async fn route_rule_with_filters() { +async fn route_with_filters_service() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + let backend = mk_service(&ns, "backend", 4191); - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let backend_name = "backend"; - let backends = [backend_name]; - let route = mk_http_route( - &ns, - "foo-route", - &svc, - Some(4191), - ).with_backends(Some(&backends), None, None).with_filters(Some(vec![ - k8s::policy::httproute::HttpRouteFilter::RequestHeaderModifier { - request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { - set: Some(vec![k8s_gateway_api::HttpHeader { - name: "set".to_string(), - value: "set-value".to_string(), - }]), - add: Some(vec![k8s_gateway_api::HttpHeader { - name: "add".to_string(), - value: "add-value".to_string(), - }]), - remove: Some(vec!["remove".to_string()]), - }, - }, - k8s::policy::httproute::HttpRouteFilter::RequestRedirect { - request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { - scheme: Some("http".to_string()), - hostname: Some("host".to_string()), - path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { - replace_prefix_match: "/path".to_string(), - }), - port: Some(5555), - status_code: Some(302), - }, - }, - ])); - let _route = create( + route_with_filters( + Resource::Service(svc), + Resource::Service(backend), &client, - route.build(), + &ns, ) .await; - await_route_status(&client, &ns, "foo-route").await; + }) + .await; +} - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); +#[tokio::test(flavor = "current_thread")] +async fn route_with_filters_egress_net() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // There should be a route with filters. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let rule = assert_singleton(&route.rules); - let filters = &rule.filters; - assert_eq!( - *filters, - vec![ - grpc::outbound::http_route::Filter { - kind: Some( - grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( - grpc::http_route::RequestHeaderModifier { - add: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "add".to_string(), - value: "add-value".into(), - }] - }), - set: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "set".to_string(), - value: "set-value".into(), - }] - }), - remove: vec!["remove".to_string()], - } - ) - ) - }, - grpc::outbound::http_route::Filter { - kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( - grpc::http_route::RequestRedirect { - scheme: Some(grpc::http_types::Scheme { - r#type: Some(grpc::http_types::scheme::Type::Registered( - grpc::http_types::scheme::Registered::Http.into(), - )) - }), - host: "host".to_string(), - path: Some(linkerd2_proxy_api::http_route::PathModifier { replace: Some(linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix("/path".to_string())) }), - port: 5555, - status: 302, - } - )) - } - ] - ); - }); + route_with_filters( + Resource::EgressNetwork(egress.clone()), + Resource::EgressNetwork(egress), + &client, + &ns, + ) + .await; }) .await; } #[tokio::test(flavor = "current_thread")] -async fn backend_with_filters() { +async fn backend_with_filters_service() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // There should be a default route. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - let backend_name = "backend"; - let backend_svc = create_service(&client, &ns, backend_name, 8888).await; - let backends = [backend_name]; - let route = mk_http_route( + let backend_svc = create_service(&client, &ns, "backend", 8888).await; + backend_with_filters( + Resource::Service(svc), + Resource::Service(backend_svc), + &client, &ns, - "foo-route", - &svc, - Some(4191) - ).with_backends(Some(&backends), None, Some(vec![ - k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { - request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { - set: Some(vec![k8s_gateway_api::HttpHeader { - name: "set".to_string(), - value: "set-value".to_string(), - }]), - add: Some(vec![k8s_gateway_api::HttpHeader { - name: "add".to_string(), - value: "add-value".to_string(), - }]), - remove: Some(vec!["remove".to_string()]), - }, - }, - k8s_gateway_api::HttpRouteFilter::RequestRedirect { - request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { - scheme: Some("http".to_string()), - hostname: Some("host".to_string()), - path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { - replace_prefix_match: "/path".to_string(), - }), - port: Some(5555), - status_code: Some(302), - }, - }, - ])); - let _route = create(&client, route.build()) + ) .await; - await_route_status(&client, &ns, "foo-route").await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); + }) + .await; +} - assert_svc_meta(&config.metadata, &svc, 4191); +#[tokio::test(flavor = "current_thread")] +async fn backend_with_filters_egress_net() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // There should be a route without rule filters. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let rule = assert_singleton(&route.rules); - assert_eq!(rule.filters.len(), 0); - let backends = route_backends_random_available(route); - let backend = assert_singleton(backends); - assert_backend_matches_service(backend.backend.as_ref().unwrap(), &backend_svc, 8888); - let filters = &backend.backend.as_ref().unwrap().filters; - assert_eq!( - *filters, - vec![ - grpc::outbound::http_route::Filter { - kind: Some( - grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( - grpc::http_route::RequestHeaderModifier { - add: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "add".to_string(), - value: "add-value".into(), - }] - }), - set: Some(grpc::http_types::Headers { - headers: vec![grpc::http_types::headers::Header { - name: "set".to_string(), - value: "set-value".into(), - }] - }), - remove: vec!["remove".to_string()], - } - ) - ) - }, - grpc::outbound::http_route::Filter { - kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( - grpc::http_route::RequestRedirect { - scheme: Some(grpc::http_types::Scheme { - r#type: Some(grpc::http_types::scheme::Type::Registered( - grpc::http_types::scheme::Registered::Http.into(), - )) - }), - host: "host".to_string(), - path: Some(linkerd2_proxy_api::http_route::PathModifier { replace: Some(linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix("/path".to_string())) }), - port: 5555, - status: 302, - } - )) - } - ] - ); - }); + backend_with_filters( + Resource::EgressNetwork(egress.clone()), + Resource::EgressNetwork(egress), + &client, + &ns, + ) + .await; }) .await; } @@ -910,7 +695,7 @@ async fn backend_with_filters() { async fn http_route_with_no_port() { with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; let config_4191 = rx_4191 @@ -920,8 +705,6 @@ async fn http_route_with_no_port() { .expect("watch must return an initial config"); tracing::trace!(?config_4191); - assert_svc_meta(&config_4191.metadata, &svc, 4191); - let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; let config_9999 = rx_9999 .next() @@ -930,8 +713,6 @@ async fn http_route_with_no_port() { .expect("watch must return an initial config"); tracing::trace!(?config_9999); - assert_svc_meta(&config_9999.metadata, &svc, 9999); - // There should be a default route. detect_http_routes(&config_4191, |routes| { let route = assert_singleton(routes); @@ -978,7 +759,7 @@ async fn http_route_with_no_port() { async fn producer_route() { with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; let producer_config = producer_rx @@ -1050,7 +831,7 @@ async fn pre_existing_producer_route() { // a produce route already exists. with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); // A route created in the same namespace as its parent service is called // a producer route. It should be returned in outbound policy requests @@ -1097,7 +878,7 @@ async fn pre_existing_producer_route() { async fn consumer_route() { with_temp_ns(|client, ns| async move { // Create a service - let svc = create_service(&client, &ns, "my-svc", 4191).await; + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); let consumer_ns_name = format!("{}-consumer", ns); let consumer_ns = create_cluster_scoped( @@ -1177,6 +958,7 @@ async fn consumer_route() { .expect("watch must not fail") .expect("watch must return an initial config"); tracing::trace!(?consumer_config); + detect_http_routes(&consumer_config, |routes| { let route = assert_singleton(routes); assert_route_name_eq(route, "foo-route"); @@ -1192,57 +974,23 @@ async fn consumer_route() { } #[tokio::test(flavor = "current_thread")] -async fn http_route_retries_and_timeouts() { +async fn http_route_retries_and_timeouts_service() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + http_route_retries_and_timeouts(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let _route = create( - &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)) - .with_annotations( - vec![ - ("retry.linkerd.io/http".to_string(), "5xx".to_string()), - ("timeout.linkerd.io/response".to_string(), "10s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - await_route_status(&client, &ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); +#[tokio::test(flavor = "current_thread")] +async fn http_route_retries_and_timeouts_egress_net() { + with_temp_ns(|client, ns| async move { + // Create an egress network + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - let status_range = assert_singleton(&conditions.status_ranges); - assert_eq!(status_range.start, 500); - assert_eq!(status_range.end, 599); - let timeout = rule - .timeouts - .as_ref() - .expect("timeouts expected") - .response - .as_ref() - .expect("response timeout expected"); - assert_eq!(timeout.seconds, 10); - }); + http_route_retries_and_timeouts(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } @@ -1256,53 +1004,28 @@ async fn service_retries_and_timeouts() { .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); svc.annotations_mut() .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); - let svc = create(&client, svc).await; + let svc = Resource::Service(create(&client, svc).await); - let _route = create( - &client, - mk_http_route(&ns, "foo-route", &svc, Some(4191)) - .with_annotations( - vec![ - // Route annotations override the timeout config specified - // on the service. - ("timeout.linkerd.io/request".to_string(), "5s".to_string()), - ] - .into_iter() - .collect(), - ) - .build(), - ) - .await; - await_route_status(&client, &ns, "foo-route").await; + retries_and_timeouts(svc, &client, &ns).await; + }) + .await; +} - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_retries_and_timeouts() { + with_temp_ns(|client, ns| async move { + // Create a egress net + let mut egress = mk_egress_net(&ns, "my-egress"); + egress + .annotations_mut() + .insert("retry.linkerd.io/http".to_string(), "5xx".to_string()); + egress + .annotations_mut() + .insert("timeout.linkerd.io/response".to_string(), "10s".to_string()); + let egress = Resource::EgressNetwork(create(&client, egress).await); + await_egress_net_status(&client, &ns, "my-egress").await; - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - let rule = assert_singleton(&route.rules); - let conditions = rule - .retry - .as_ref() - .expect("retry config expected") - .conditions - .as_ref() - .expect("retry conditions expected"); - let status_range = assert_singleton(&conditions.status_ranges); - // Retry config inherited from the service. - assert_eq!(status_range.start, 500); - assert_eq!(status_range.end, 599); - let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); - // Service timeout config overridden by route timeout config. - assert_eq!(timeouts.response, None); - let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); - assert_eq!(request_timeout.seconds, 5); - }); + retries_and_timeouts(egress, &client, &ns).await; }) .await; } @@ -1312,77 +1035,19 @@ async fn service_http_route_reattachment() { with_temp_ns(|client, ns| async move { // Create a service let svc = create_service(&client, &ns, "my-svc", 4191).await; + http_route_reattachment(Resource::Service(svc), &client, &ns).await; + }) + .await; +} - let mut route = create(&client, mk_empty_http_route(&ns, "foo-route", &svc, 4191)).await; - await_route_status(&client, &ns, "foo-route").await; - - let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an initial config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // The route should be attached. - detect_http_routes(&config, |routes| { - let route: &grpc::outbound::HttpRoute = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = "other".to_string(); - update(&client, route.clone()).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); - - // The route should be unattached and the default route should be present. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_is_default(route, &svc, 4191); - }); - - route - .spec - .inner - .parent_refs - .as_mut() - .unwrap() - .first_mut() - .unwrap() - .name = svc.name_unchecked(); - update(&client, route).await; - - let config = rx - .next() - .await - .expect("watch must not fail") - .expect("watch must return an updated config"); - tracing::trace!(?config); - - assert_svc_meta(&config.metadata, &svc, 4191); +#[tokio::test(flavor = "current_thread")] +async fn egress_net_http_route_reattachment() { + with_temp_ns(|client, ns| async move { + // Create a egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; - // The route should be attached again. - detect_http_routes(&config, |routes| { - let route = assert_singleton(routes); - assert_route_name_eq(route, "foo-route"); - }); + http_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; }) .await; } @@ -1391,7 +1056,7 @@ async fn service_http_route_reattachment() { struct HttpRouteBuilder(k8s::policy::HttpRoute); -fn mk_http_route(ns: &str, name: &str, svc: &k8s::Service, port: Option) -> HttpRouteBuilder { +fn mk_http_route(ns: &str, name: &str, parent: &Resource, port: Option) -> HttpRouteBuilder { use k8s::policy::httproute as api; HttpRouteBuilder(api::HttpRoute { @@ -1403,10 +1068,10 @@ fn mk_http_route(ns: &str, name: &str, svc: &k8s::Service, port: Option) -> spec: api::HttpRouteSpec { inner: api::CommonRouteSpec { parent_refs: Some(vec![api::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), + group: Some(parent.group()), + kind: Some(parent.kind()), + namespace: Some(parent.namespace()), + name: parent.name(), section_name: None, port, }]), @@ -1433,22 +1098,22 @@ fn mk_http_route(ns: &str, name: &str, svc: &k8s::Service, port: Option) -> impl HttpRouteBuilder { fn with_backends( self, - backends: Option<&[&str]>, + backends: Option<&[Resource]>, backends_ns: Option, backend_filters: Option>, ) -> Self { let mut route = self.0; - let backend_refs = backends.map(|names| { - names + let backend_refs = backends.map(|backends| { + backends .iter() - .map(|name| k8s::policy::httproute::HttpBackendRef { + .map(|backend| k8s::policy::httproute::HttpBackendRef { backend_ref: Some(k8s_gateway_api::BackendRef { weight: None, inner: k8s_gateway_api::BackendObjectReference { - name: name.to_string(), + name: backend.name(), port: Some(8888), - group: None, - kind: None, + group: Some(backend.group()), + kind: Some(backend.kind()), namespace: backends_ns.clone(), }, }), @@ -1487,7 +1152,7 @@ impl HttpRouteBuilder { fn mk_empty_http_route( ns: &str, name: &str, - svc: &k8s::Service, + parent: &Resource, port: u16, ) -> k8s::policy::HttpRoute { use k8s::policy::httproute as api; @@ -1500,10 +1165,10 @@ fn mk_empty_http_route( spec: api::HttpRouteSpec { inner: api::CommonRouteSpec { parent_refs: Some(vec![api::ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), - namespace: svc.namespace(), - name: svc.name_unchecked(), + group: Some(parent.group()), + kind: Some(parent.kind()), + namespace: Some(parent.namespace()), + name: parent.name(), section_name: None, port: Some(port), }]), @@ -1514,3 +1179,832 @@ fn mk_empty_http_route( status: None, } } + +async fn parent_with_no_http_routes(parent: Resource, client: &kube::Client, ns: &str) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); +} + +async fn parent_with_http_route_without_rules(parent: Resource, client: &kube::Client, ns: &str) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + let _route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; + await_route_status(client, ns, "foo-route").await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a route with no rules. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + assert_eq!(route.rules.len(), 0); + }); +} + +async fn parent_with_http_routes_without_backends( + parent: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + let _route = create( + client, + mk_http_route(ns, "foo-route", &parent, Some(4191)).build(), + ) + .await; + await_route_status(client, ns, "foo-route").await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a route with the logical backend. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let backends = route_backends_first_available(route); + let backend = assert_singleton(backends); + assert_backend_matches_parent(backend, &parent, 4191); + }); +} + +async fn parent_with_http_routes_with_backend( + parent: Resource, + rule_backend: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + let backends = [rule_backend.clone()]; + let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( + Some(&backends), + None, + None, + ); + let _route = create(client, route.build()).await; + await_route_status(client, ns, "foo-route").await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a route with a backend with no filters. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let backends = route_backends_random_available(route); + let backend = assert_singleton(backends); + assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); + let filters = &backend.backend.as_ref().unwrap().filters; + assert_eq!(filters.len(), 0); + }); +} + +async fn parent_with_http_routes_with_invalid_backend( + parent: Resource, + backend: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + let backends = [backend]; + let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( + Some(&backends), + None, + None, + ); + let _route = create(client, route.build()).await; + await_route_status(client, ns, "foo-route").await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a route with a backend. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let backends = route_backends_random_available(route); + let backend = assert_singleton(backends); + assert_backend_has_failure_filter(backend); + }); +} + +async fn parent_with_multiple_http_routes(parent: Resource, client: &kube::Client, ns: &str) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + // Routes should be returned in sorted order by creation timestamp then + // name. To ensure that this test isn't timing dependant, routes should + // be created in alphabetical order. + let _a_route = create( + client, + mk_http_route(ns, "a-route", &parent, Some(4191)).build(), + ) + .await; + await_route_status(client, ns, "a-route").await; + + // First route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + let _b_route = create( + client, + mk_http_route(ns, "b-route", &parent, Some(4191)).build(), + ) + .await; + await_route_status(client, ns, "b-route").await; + + // Second route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + detect_http_routes(&config, |routes| { + let num_routes = match parent { + Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default + Resource::Service(_) => 2, // two routes for service + }; + assert_eq!(routes.len(), num_routes); + assert_eq!(route_name(&routes[0]), "a-route"); + assert_eq!(route_name(&routes[1]), "b-route"); + }); +} + +async fn parent_with_consecutive_failure_accrual( + parent: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(8, consecutive.max_failures); + assert_eq!( + &grpc::outbound::ExponentialBackoff { + min_backoff: Some(Duration::from_secs(10).try_into().unwrap()), + max_backoff: Some(Duration::from_secs(600).try_into().unwrap()), + jitter_ratio: 1.0_f32, + }, + consecutive + .backoff + .as_ref() + .expect("backoff must be configured") + ); + }); +} + +async fn parent_with_consecutive_failure_accrual_defaults_no_config( + parent: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // Expect default max_failures and default backoff + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(7, consecutive.max_failures); + assert_default_accrual_backoff!(consecutive + .backoff + .as_ref() + .expect("backoff must be configured")); + }); +} + +async fn parent_with_consecutive_failure_accrual_defaults_max_fails( + parent: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // Expect default backoff and overridden max_failures + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(8, consecutive.max_failures); + assert_default_accrual_backoff!(consecutive + .backoff + .as_ref() + .expect("backoff must be configured")); + }); +} + +async fn parent_with_consecutive_failure_accrual_defaults_max_jitter( + parent: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // Expect defaults for everything except for the jitter ratio + detect_failure_accrual(&config, |accrual| { + let consecutive = failure_accrual_consecutive(accrual); + assert_eq!(7, consecutive.max_failures); + assert_eq!( + &grpc::outbound::ExponentialBackoff { + min_backoff: Some(Duration::from_secs(1).try_into().unwrap()), + max_backoff: Some(Duration::from_secs(60).try_into().unwrap()), + jitter_ratio: 1.0_f32, + }, + consecutive + .backoff + .as_ref() + .expect("backoff must be configured") + ); + }); +} + +async fn parent_with_default_failure_accrual( + parent_default_config: Resource, + parent_max_failures: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent_default_config, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // Expect failure accrual config to be default (no failure accrual) + detect_failure_accrual(&config, |accrual| { + assert!( + accrual.is_none(), + "consecutive failure accrual should not be configured for service" + ); + }); + + let mut rx = retry_watch_outbound_policy(client, ns, &parent_max_failures, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // Expect failure accrual config to be default (no failure accrual) + detect_failure_accrual(&config, |accrual| { + assert!( + accrual.is_none(), + "consecutive failure accrual should not be configured for service" + ) + }); +} + +async fn opaque_parent(parent: Resource, client: &kube::Client, ns: &str) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // Proxy protocol should be opaque. + match config.protocol.unwrap().kind.unwrap() { + grpc::outbound::proxy_protocol::Kind::Opaque(_) => {} + _ => panic!("proxy protocol must be Opaque"), + }; +} + +async fn route_with_filters(parent: Resource, backend: Resource, client: &kube::Client, ns: &str) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + let backends = [backend.clone()]; + let route = mk_http_route(ns, "foo-route", &parent, Some(4191)) + .with_backends(Some(&backends), None, None) + .with_filters(Some(vec![ + k8s::policy::httproute::HttpRouteFilter::RequestHeaderModifier { + request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }, + k8s::policy::httproute::HttpRouteFilter::RequestRedirect { + request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { + scheme: Some("http".to_string()), + hostname: Some("host".to_string()), + path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { + replace_prefix_match: "/path".to_string(), + }), + port: Some(5555), + status_code: Some(302), + }, + }, + ])); + let _route = create(client, route.build()).await; + await_route_status(client, ns, "foo-route").await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + // There should be a route with filters. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let rule = assert_singleton(&route.rules); + let filters = &rule.filters; + assert_eq!( + *filters, + vec![ + grpc::outbound::http_route::Filter { + kind: Some( + grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( + grpc::http_route::RequestHeaderModifier { + add: Some(grpc::http_types::Headers { + headers: vec![grpc::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(grpc::http_types::Headers { + headers: vec![grpc::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + ) + ) + }, + grpc::outbound::http_route::Filter { + kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( + grpc::http_route::RequestRedirect { + scheme: Some(grpc::http_types::Scheme { + r#type: Some(grpc::http_types::scheme::Type::Registered( + grpc::http_types::scheme::Registered::Http.into(), + )) + }), + host: "host".to_string(), + path: Some(linkerd2_proxy_api::http_route::PathModifier { + replace: Some( + linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( + "/path".to_string() + ) + ) + }), + port: 5555, + status: 302, + } + )) + } + ] + ); + }); +} + +async fn backend_with_filters( + parent: Resource, + backend_for_parent: Resource, + client: &kube::Client, + ns: &str, +) { + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + // There should be a default route. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + let backends = [backend_for_parent.clone()]; + let route = mk_http_route(ns, "foo-route", &parent, Some(4191)).with_backends( + Some(&backends), + None, + Some(vec![ + k8s_gateway_api::HttpRouteFilter::RequestHeaderModifier { + request_header_modifier: k8s_gateway_api::HttpRequestHeaderFilter { + set: Some(vec![k8s_gateway_api::HttpHeader { + name: "set".to_string(), + value: "set-value".to_string(), + }]), + add: Some(vec![k8s_gateway_api::HttpHeader { + name: "add".to_string(), + value: "add-value".to_string(), + }]), + remove: Some(vec!["remove".to_string()]), + }, + }, + k8s_gateway_api::HttpRouteFilter::RequestRedirect { + request_redirect: k8s_gateway_api::HttpRequestRedirectFilter { + scheme: Some("http".to_string()), + hostname: Some("host".to_string()), + path: Some(k8s_gateway_api::HttpPathModifier::ReplacePrefixMatch { + replace_prefix_match: "/path".to_string(), + }), + port: Some(5555), + status_code: Some(302), + }, + }, + ]), + ); + let _route = create(client, route.build()).await; + await_route_status(client, ns, "foo-route").await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + // There should be a route without rule filters. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let rule = assert_singleton(&route.rules); + assert_eq!(rule.filters.len(), 0); + let backends = route_backends_random_available(route); + let backend = assert_singleton(backends); + assert_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_for_parent, 8888); + let filters = &backend.backend.as_ref().unwrap().filters; + assert_eq!( + *filters, + vec![ + grpc::outbound::http_route::Filter { + kind: Some( + grpc::outbound::http_route::filter::Kind::RequestHeaderModifier( + grpc::http_route::RequestHeaderModifier { + add: Some(grpc::http_types::Headers { + headers: vec![grpc::http_types::headers::Header { + name: "add".to_string(), + value: "add-value".into(), + }] + }), + set: Some(grpc::http_types::Headers { + headers: vec![grpc::http_types::headers::Header { + name: "set".to_string(), + value: "set-value".into(), + }] + }), + remove: vec!["remove".to_string()], + } + ) + ) + }, + grpc::outbound::http_route::Filter { + kind: Some(grpc::outbound::http_route::filter::Kind::Redirect( + grpc::http_route::RequestRedirect { + scheme: Some(grpc::http_types::Scheme { + r#type: Some(grpc::http_types::scheme::Type::Registered( + grpc::http_types::scheme::Registered::Http.into(), + )) + }), + host: "host".to_string(), + path: Some(linkerd2_proxy_api::http_route::PathModifier { + replace: Some( + linkerd2_proxy_api::http_route::path_modifier::Replace::Prefix( + "/path".to_string() + ) + ) + }), + port: 5555, + status: 302, + } + )) + } + ] + ); + }); +} + +async fn http_route_retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { + let _route = create( + client, + mk_http_route(ns, "foo-route", &parent, Some(4191)) + .with_annotations( + vec![ + ("retry.linkerd.io/http".to_string(), "5xx".to_string()), + ("timeout.linkerd.io/response".to_string(), "10s".to_string()), + ] + .into_iter() + .collect(), + ) + .build(), + ) + .await; + + await_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let rule = assert_singleton(&route.rules); + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + let status_range = assert_singleton(&conditions.status_ranges); + assert_eq!(status_range.start, 500); + assert_eq!(status_range.end, 599); + let timeout = rule + .timeouts + .as_ref() + .expect("timeouts expected") + .response + .as_ref() + .expect("response timeout expected"); + assert_eq!(timeout.seconds, 10); + }); +} + +async fn retries_and_timeouts(parent: Resource, client: &kube::Client, ns: &str) { + let _route = create( + client, + mk_http_route(ns, "foo-route", &parent, Some(4191)) + .with_annotations( + vec![ + // Route annotations override the timeout config specified + // on the service. + ("timeout.linkerd.io/request".to_string(), "5s".to_string()), + ] + .into_iter() + .collect(), + ) + .build(), + ) + .await; + await_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + let rule = assert_singleton(&route.rules); + let conditions = rule + .retry + .as_ref() + .expect("retry config expected") + .conditions + .as_ref() + .expect("retry conditions expected"); + let status_range = assert_singleton(&conditions.status_ranges); + // Retry config inherited from the service. + assert_eq!(status_range.start, 500); + assert_eq!(status_range.end, 599); + let timeouts = rule.timeouts.as_ref().expect("timeouts expected"); + // Service timeout config overridden by route timeout config. + assert_eq!(timeouts.response, None); + let request_timeout = timeouts.request.as_ref().expect("request timeout expected"); + assert_eq!(request_timeout.seconds, 5); + }); +} + +async fn http_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { + let mut route = create(client, mk_empty_http_route(ns, "foo-route", &parent, 4191)).await; + await_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be attached. + detect_http_routes(&config, |routes| { + let route: &grpc::outbound::HttpRoute = assert_route_attached(routes, &parent); + assert_route_name_eq(route, "foo-route"); + }); + + route + .spec + .inner + .parent_refs + .as_mut() + .unwrap() + .first_mut() + .unwrap() + .name = "other".to_string(); + update(client, route.clone()).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be unattached and the default route should be present. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + route + .spec + .inner + .parent_refs + .as_mut() + .unwrap() + .first_mut() + .unwrap() + .name = parent.name(); + update(client, route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be attached again. + detect_http_routes(&config, |routes| { + let route = assert_route_attached(routes, &parent); + assert_route_name_eq(route, "foo-route"); + }); +} From 451bacf33e563887bdc448b0df5d58e237cfe1ce Mon Sep 17 00:00:00 2001 From: l5d-bot <48604953+l5d-bot@users.noreply.github.com> Date: Wed, 20 Nov 2024 12:31:36 -0800 Subject: [PATCH 04/18] proxy: v2.266.0 (#13353) Release notes: https://github.com/linkerd/linkerd2-proxy/releases/tag/release/v2.266.0 Signed-off-by: l5d-bot Co-authored-by: l5d-bot --- .proxy-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.proxy-version b/.proxy-version index c5c4cea7b047e..977437ad70626 100644 --- a/.proxy-version +++ b/.proxy-version @@ -1 +1 @@ -v2.265.0 +v2.266.0 From e10855f0f49493ae36eeedaf180dd47039b1883f Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Thu, 21 Nov 2024 11:35:09 +0200 Subject: [PATCH 05/18] policy: Add TCP and TLS route API tests (#13348) This PR builds on #13342 to add TCP and TLS route tests for the outbound API. Signed-off-by: Zahari Dichev --- policy-test/src/lib.rs | 46 ++ policy-test/src/outbound_api.rs | 223 +++++++++ policy-test/tests/outbound_api_tcp.rs | 640 +++++++++++++++++++++++++ policy-test/tests/outbound_api_tls.rs | 646 ++++++++++++++++++++++++++ 4 files changed, 1555 insertions(+) create mode 100644 policy-test/tests/outbound_api_tcp.rs create mode 100644 policy-test/tests/outbound_api_tls.rs diff --git a/policy-test/src/lib.rs b/policy-test/src/lib.rs index 82745b71ad9dc..0b2b21ae72189 100644 --- a/policy-test/src/lib.rs +++ b/policy-test/src/lib.rs @@ -290,6 +290,52 @@ pub async fn await_grpc_route_status( route_status } +// Waits until an TlsRoute with the given namespace and name has a status set +// on it, then returns the generic route status representation. +pub async fn await_tls_route_status( + client: &kube::Client, + ns: &str, + name: &str, +) -> k8s::gateway::TlsRouteStatus { + let route_status = await_condition( + client, + ns, + name, + |obj: Option<&k8s::gateway::TlsRoute>| -> bool { + obj.and_then(|route| route.status.as_ref()).is_some() + }, + ) + .await + .expect("must fetch route") + .status + .expect("route must contain a status representation"); + tracing::trace!(?route_status, name, ns, "got route status"); + route_status +} + +// Waits until an TcpRoute with the given namespace and name has a status set +// on it, then returns the generic route status representation. +pub async fn await_tcp_route_status( + client: &kube::Client, + ns: &str, + name: &str, +) -> k8s::gateway::TcpRouteStatus { + let route_status = await_condition( + client, + ns, + name, + |obj: Option<&k8s::gateway::TcpRoute>| -> bool { + obj.and_then(|route| route.status.as_ref()).is_some() + }, + ) + .await + .expect("must fetch route") + .status + .expect("route must contain a status representation"); + tracing::trace!(?route_status, name, ns, "got route status"); + route_status +} + // Wait for the endpoints controller to populate the Endpoints resource. pub fn endpoints_ready(obj: Option<&k8s::Endpoints>) -> bool { if let Some(ep) = obj { diff --git a/policy-test/src/outbound_api.rs b/policy-test/src/outbound_api.rs index 02c53d03e2515..d8ee90e2189a2 100644 --- a/policy-test/src/outbound_api.rs +++ b/policy-test/src/outbound_api.rs @@ -83,6 +83,44 @@ pub fn grpc_routes(config: &grpc::outbound::OutboundPolicy) -> &[grpc::outbound: } } +#[track_caller] +pub fn tls_routes(config: &grpc::outbound::OutboundPolicy) -> &[grpc::outbound::TlsRoute] { + let kind = config + .protocol + .as_ref() + .expect("must have proxy protocol") + .kind + .as_ref() + .expect("must have kind"); + if let grpc::outbound::proxy_protocol::Kind::Tls(grpc::outbound::proxy_protocol::Tls { + routes, + }) = kind + { + routes + } else { + panic!("proxy protocol must be Tls; actually got:\n{kind:#?}") + } +} + +#[track_caller] +pub fn tcp_routes(config: &grpc::outbound::OutboundPolicy) -> &[grpc::outbound::OpaqueRoute] { + let kind = config + .protocol + .as_ref() + .expect("must have proxy protocol") + .kind + .as_ref() + .expect("must have kind"); + if let grpc::outbound::proxy_protocol::Kind::Opaque(grpc::outbound::proxy_protocol::Opaque { + routes, + }) = kind + { + routes + } else { + panic!("proxy protocol must be Opaque; actually got:\n{kind:#?}") + } +} + #[track_caller] pub fn detect_failure_accrual(config: &grpc::outbound::OutboundPolicy, f: F) where @@ -149,6 +187,23 @@ pub fn route_backends_first_available( } } +#[track_caller] +pub fn tls_route_backends_first_available( + route: &grpc::outbound::TlsRoute, +) -> &[grpc::outbound::tls_route::RouteBackend] { + let kind = assert_singleton(&route.rules) + .backends + .as_ref() + .expect("Rule must have backends") + .kind + .as_ref() + .expect("Backend must have kind"); + match kind { + grpc::outbound::tls_route::distribution::Kind::FirstAvailable(fa) => &fa.backends, + _ => panic!("Distribution must be FirstAvailable"), + } +} + #[track_caller] pub fn route_backends_random_available( route: &grpc::outbound::HttpRoute, @@ -166,6 +221,40 @@ pub fn route_backends_random_available( } } +#[track_caller] +pub fn tls_route_backends_random_available( + route: &grpc::outbound::TlsRoute, +) -> &[grpc::outbound::tls_route::WeightedRouteBackend] { + let kind = assert_singleton(&route.rules) + .backends + .as_ref() + .expect("Rule must have backends") + .kind + .as_ref() + .expect("Backend must have kind"); + match kind { + grpc::outbound::tls_route::distribution::Kind::RandomAvailable(dist) => &dist.backends, + _ => panic!("Distribution must be RandomAvailable"), + } +} + +#[track_caller] +pub fn tcp_route_backends_random_available( + route: &grpc::outbound::OpaqueRoute, +) -> &[grpc::outbound::opaque_route::WeightedRouteBackend] { + let kind = assert_singleton(&route.rules) + .backends + .as_ref() + .expect("Rule must have backends") + .kind + .as_ref() + .expect("Backend must have kind"); + match kind { + grpc::outbound::opaque_route::distribution::Kind::RandomAvailable(dist) => &dist.backends, + _ => panic!("Distribution must be RandomAvailable"), + } +} + #[track_caller] pub fn route_name(route: &grpc::outbound::HttpRoute) -> &str { match route.metadata.as_ref().unwrap().kind.as_ref().unwrap() { @@ -174,6 +263,22 @@ pub fn route_name(route: &grpc::outbound::HttpRoute) -> &str { } } +#[track_caller] +pub fn tls_route_name(route: &grpc::outbound::TlsRoute) -> &str { + match route.metadata.as_ref().unwrap().kind.as_ref().unwrap() { + grpc::meta::metadata::Kind::Resource(grpc::meta::Resource { ref name, .. }) => name, + _ => panic!("route must be a resource kind"), + } +} + +#[track_caller] +pub fn tcp_route_name(route: &grpc::outbound::OpaqueRoute) -> &str { + match route.metadata.as_ref().unwrap().kind.as_ref().unwrap() { + grpc::meta::metadata::Kind::Resource(grpc::meta::Resource { ref name, .. }) => name, + _ => panic!("route must be a resource kind"), + } +} + #[track_caller] pub fn assert_backend_has_failure_filter( backend: &grpc::outbound::http_route::WeightedRouteBackend, @@ -208,6 +313,22 @@ pub fn assert_route_is_default(route: &grpc::outbound::HttpRoute, parent: &Resou ); } +#[track_caller] +pub fn assert_tls_route_is_default(route: &grpc::outbound::TlsRoute, parent: &Resource, port: u16) { + let kind = route.metadata.as_ref().unwrap().kind.as_ref().unwrap(); + match kind { + grpc::meta::metadata::Kind::Default(_) => {} + grpc::meta::metadata::Kind::Resource(r) => { + panic!("route expected to be default but got resource {r:?}") + } + } + + let backends = tls_route_backends_first_available(route); + let backend = assert_singleton(backends); + assert_tls_backend_matches_parent(backend, parent, port); + assert_singleton(&route.rules); +} + #[track_caller] pub fn assert_backend_matches_parent( backend: &grpc::outbound::http_route::RouteBackend, @@ -254,6 +375,98 @@ pub fn assert_backend_matches_parent( assert_resource_meta(&backend.metadata, parent, port) } +#[track_caller] +pub fn assert_tls_backend_matches_parent( + backend: &grpc::outbound::tls_route::RouteBackend, + parent: &Resource, + port: u16, +) { + let backend = backend.backend.as_ref().unwrap(); + + match parent { + Resource::Service(svc) => { + let dst = match backend.kind.as_ref().unwrap() { + grpc::outbound::backend::Kind::Balancer(balance) => { + let kind = balance.discovery.as_ref().unwrap().kind.as_ref().unwrap(); + match kind { + grpc::outbound::backend::endpoint_discovery::Kind::Dst(dst) => &dst.path, + } + } + grpc::outbound::backend::Kind::Forward(_) => { + panic!("service default route backend must be Balancer") + } + }; + assert_eq!( + *dst, + format!( + "{}.{}.svc.{}:{}", + svc.name_unchecked(), + svc.namespace().unwrap(), + "cluster.local", + port + ) + ); + } + + Resource::EgressNetwork(_) => { + match backend.kind.as_ref().unwrap() { + grpc::outbound::backend::Kind::Forward(_) => {} + grpc::outbound::backend::Kind::Balancer(_) => { + panic!("egress net default route backend must be Forward") + } + }; + } + } + + assert_resource_meta(&backend.metadata, parent, port) +} + +#[track_caller] +pub fn assert_tcp_backend_matches_parent( + backend: &grpc::outbound::opaque_route::RouteBackend, + parent: &Resource, + port: u16, +) { + let backend = backend.backend.as_ref().unwrap(); + + match parent { + Resource::Service(svc) => { + let dst = match backend.kind.as_ref().unwrap() { + grpc::outbound::backend::Kind::Balancer(balance) => { + let kind = balance.discovery.as_ref().unwrap().kind.as_ref().unwrap(); + match kind { + grpc::outbound::backend::endpoint_discovery::Kind::Dst(dst) => &dst.path, + } + } + grpc::outbound::backend::Kind::Forward(_) => { + panic!("service default route backend must be Balancer") + } + }; + assert_eq!( + *dst, + format!( + "{}.{}.svc.{}:{}", + svc.name_unchecked(), + svc.namespace().unwrap(), + "cluster.local", + port + ) + ); + } + + Resource::EgressNetwork(_) => { + match backend.kind.as_ref().unwrap() { + grpc::outbound::backend::Kind::Forward(_) => {} + grpc::outbound::backend::Kind::Balancer(_) => { + panic!("egress net default route backend must be Forward") + } + }; + } + } + + assert_resource_meta(&backend.metadata, parent, port) +} + #[track_caller] pub fn assert_singleton(ts: &[T]) -> &T { assert_eq!(ts.len(), 1); @@ -276,6 +489,16 @@ pub fn assert_route_name_eq(route: &grpc::outbound::HttpRoute, name: &str) { assert_name_eq(route.metadata.as_ref().unwrap(), name) } +#[track_caller] +pub fn assert_tls_route_name_eq(route: &grpc::outbound::TlsRoute, name: &str) { + assert_name_eq(route.metadata.as_ref().unwrap(), name) +} + +#[track_caller] +pub fn assert_tcp_route_name_eq(route: &grpc::outbound::OpaqueRoute, name: &str) { + assert_name_eq(route.metadata.as_ref().unwrap(), name) +} + #[track_caller] pub fn assert_name_eq(meta: &grpc::meta::Metadata, name: &str) { let kind = meta.kind.as_ref().unwrap(); diff --git a/policy-test/tests/outbound_api_tcp.rs b/policy-test/tests/outbound_api_tcp.rs new file mode 100644 index 0000000000000..52db12b6e5f17 --- /dev/null +++ b/policy-test/tests/outbound_api_tcp.rs @@ -0,0 +1,640 @@ +use futures::prelude::*; +use linkerd_policy_controller_k8s_api as k8s; +use linkerd_policy_test::{ + assert_resource_meta, await_egress_net_status, await_tcp_route_status, create, + create_cluster_scoped, create_egress_network, create_service, delete_cluster_scoped, + mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, +}; +use maplit::{btreemap, convert_args}; + +#[tokio::test(flavor = "current_thread")] +async fn service_with_tcp_routes_with_backend() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = create_service(&client, &ns, "my-svc", 4191).await; + let backend_svc = create_service(&client, &ns, "backend", 8888).await; + parent_with_tcp_routes_with_backend( + Resource::Service(svc), + Resource::Service(backend_svc), + &client, + &ns, + ) + .await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_tcp_routes_with_backend() { + with_temp_ns(|client, ns| async move { + // Create a service + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; + + parent_with_tcp_routes_with_backend( + Resource::EgressNetwork(egress.clone()), + Resource::EgressNetwork(egress), + &client, + &ns, + ) + .await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn service_with_tcp_routes_with_cross_namespace_backend() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + + let backend_ns_name = format!("{}-backend", ns); + let backend_ns = create_cluster_scoped( + &client, + k8s::Namespace { + metadata: k8s::ObjectMeta { + name: Some(backend_ns_name.clone()), + labels: Some(convert_args!(btreemap!( + "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), + ))), + ..Default::default() + }, + ..Default::default() + }, + ) + .await; + let backend_name = "backend"; + let backend_svc = + Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); + let backends = [backend_svc.clone()]; + let route = mk_tcp_route(&ns, "foo-route", &svc, Some(4191)).with_backends(&backends); + let _route = create(&client, route.build()).await; + await_tcp_route_status(&client, &ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &svc, 4191); + + let routes = tcp_routes(&config); + let route = assert_singleton(routes); + let backends = tcp_route_backends_random_available(route); + let backend = assert_singleton(backends); + assert_tcp_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); + + delete_cluster_scoped(&client, backend_ns).await + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn service_with_tcp_routes_with_invalid_backend() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = create_service(&client, &ns, "my-svc", 4191).await; + let backend = mk_service(&ns, "invalid", 4191); + + parent_with_tcp_routes_with_invalid_backend( + Resource::Service(svc), + Resource::Service(backend), + &client, + &ns, + ) + .await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_tcp_routes_with_invalid_backend() { + with_temp_ns(|client, ns| async move { + // Create an egress network + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; + + let backend = mk_egress_net(&ns, "invalid"); + + parent_with_tcp_routes_with_invalid_backend( + Resource::EgressNetwork(egress), + Resource::EgressNetwork(backend), + &client, + &ns, + ) + .await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn service_with_multiple_tcp_routes() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = create_service(&client, &ns, "my-svc", 4191).await; + parent_with_multiple_tcp_routes(Resource::Service(svc), &client, &ns).await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_multiple_tcp_routes() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; + + parent_with_multiple_tcp_routes(Resource::EgressNetwork(egress), &client, &ns).await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn tcp_route_with_no_port() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + + let _route = create( + &client, + mk_tcp_route(&ns, "foo-route", &svc, None) + .with_backends(&[svc.clone()]) + .build(), + ) + .await; + await_tcp_route_status(&client, &ns, "foo-route").await; + + let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; + let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; + + let config_4191 = rx_4191 + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config_4191); + + let routes = tcp_routes(&config_4191); + let route = assert_singleton(routes); + assert_tcp_route_name_eq(route, "foo-route"); + + let config_9999 = rx_9999 + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config_9999); + + let routes = tcp_routes(&config_9999); + let route = assert_singleton(routes); + assert_tcp_route_name_eq(route, "foo-route"); + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn producer_route() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + + // A route created in the same namespace as its parent service is called + // a producer route. It should be returned in outbound policy requests + // for that service from ALL namespaces. + let _route = create( + &client, + mk_tcp_route(&ns, "foo-route", &svc, Some(4191)) + .with_backends(&[svc.clone()]) + .build(), + ) + .await; + await_tcp_route_status(&client, &ns, "foo-route").await; + + let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; + let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; + + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?producer_config); + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + + let routes = tcp_routes(&producer_config); + let route = assert_singleton(routes); + assert_tcp_route_name_eq(route, "foo-route"); + + let routes = tcp_routes(&consumer_config); + let route = assert_singleton(routes); + assert_tcp_route_name_eq(route, "foo-route"); + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn pre_existing_producer_route() { + // We test the scenario where outbound policy watches are initiated after + // a produce route already exists. + with_temp_ns(|client, ns| async move { + // Create a service + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + + // A route created in the same namespace as its parent service is called + // a producer route. It should be returned in outbound policy requests + // for that service from ALL namespaces. + let _route = create( + &client, + mk_tcp_route(&ns, "foo-route", &svc, Some(4191)) + .with_backends(&[svc.clone()]) + .build(), + ) + .await; + await_tcp_route_status(&client, &ns, "foo-route").await; + + let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?producer_config); + + let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + + // The route should be returned in queries from the producer namespace. + let routes = tcp_routes(&producer_config); + let route = assert_singleton(routes); + assert_tcp_route_name_eq(route, "foo-route"); + + // The route should be returned in queries from a consumer namespace. + let routes = tcp_routes(&consumer_config); + let route = assert_singleton(routes); + assert_tcp_route_name_eq(route, "foo-route"); + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn consumer_route() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + + let consumer_ns_name = format!("{}-consumer", ns); + let consumer_ns = create_cluster_scoped( + &client, + k8s::Namespace { + metadata: k8s::ObjectMeta { + name: Some(consumer_ns_name.clone()), + labels: Some(convert_args!(btreemap!( + "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), + ))), + ..Default::default() + }, + ..Default::default() + }, + ) + .await; + + let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?producer_config); + + let mut consumer_rx = + retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + + let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; + let other_config = other_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?other_config); + + // A route created in a different namespace as its parent service is + // called a consumer route. It should be returned in outbound policy + // requests for that service ONLY when the request comes from the + // consumer namespace. + let _route = create( + &client, + mk_tcp_route(&consumer_ns_name, "foo-route", &svc, Some(4191)) + .with_backends(&[svc]) + .build(), + ) + .await; + await_tcp_route_status(&client, &consumer_ns_name, "foo-route").await; + + // The route should NOT be returned in queries from the producer namespace. + // There should be a default route. + assert!(producer_rx.next().now_or_never().is_none()); + + // The route should be returned in queries from the same consumer + // namespace. + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + + let routes = tcp_routes(&consumer_config); + let route = assert_singleton(routes); + assert_tcp_route_name_eq(route, "foo-route"); + + // The route should NOT be returned in queries from a different consumer + // namespace. + assert!(other_rx.next().now_or_never().is_none()); + + delete_cluster_scoped(&client, consumer_ns).await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn service_tcp_route_reattachment() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = create_service(&client, &ns, "my-svc", 4191).await; + tcp_route_reattachment(Resource::Service(svc), &client, &ns).await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn egress_net_tcp_route_reattachment() { + with_temp_ns(|client, ns| async move { + // Create a egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; + + tcp_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; + }) + .await; +} + +/* Helpers */ + +struct TcpRouteBuilder(k8s_gateway_api::TcpRoute); + +fn mk_tcp_route(ns: &str, name: &str, parent: &Resource, port: Option) -> TcpRouteBuilder { + use k8s_gateway_api as api; + + TcpRouteBuilder(api::TcpRoute { + metadata: kube::api::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(name.to_string()), + ..Default::default() + }, + spec: api::TcpRouteSpec { + inner: api::CommonRouteSpec { + parent_refs: Some(vec![api::ParentReference { + group: Some(parent.group()), + kind: Some(parent.kind()), + namespace: Some(parent.namespace()), + name: parent.name(), + section_name: None, + port, + }]), + }, + rules: vec![api::TcpRouteRule { + backend_refs: Vec::default(), + }], + }, + status: None, + }) +} + +impl TcpRouteBuilder { + fn with_backends(self, backends: &[Resource]) -> Self { + let mut route = self.0; + let backend_refs: Vec<_> = backends + .iter() + .map(|backend| k8s_gateway_api::BackendRef { + weight: None, + inner: k8s_gateway_api::BackendObjectReference { + name: backend.name(), + port: Some(8888), + group: Some(backend.group()), + kind: Some(backend.kind()), + namespace: Some(backend.namespace()), + }, + }) + .collect(); + route.spec.rules.iter_mut().for_each(|rule| { + rule.backend_refs = backend_refs.clone(); + }); + Self(route) + } + + fn build(self) -> k8s_gateway_api::TcpRoute { + self.0 + } +} + +async fn parent_with_tcp_routes_with_backend( + parent: Resource, + rule_backend: Resource, + client: &kube::Client, + ns: &str, +) { + let backends = [rule_backend.clone()]; + let route = mk_tcp_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); + let _route = create(client, route.build()).await; + await_tcp_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + let routes = tcp_routes(&config); + let route = assert_singleton(routes); + let backends = tcp_route_backends_random_available(route); + let backend = assert_singleton(backends); + assert_tcp_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); +} + +async fn parent_with_tcp_routes_with_invalid_backend( + parent: Resource, + backend: Resource, + client: &kube::Client, + ns: &str, +) { + let backends = [backend]; + let route = mk_tcp_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); + let _route = create(client, route.build()).await; + await_tcp_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + let routes = tcp_routes(&config); + let route = assert_singleton(routes); + let backends = tcp_route_backends_random_available(route); + assert_singleton(backends); +} + +async fn parent_with_multiple_tcp_routes(parent: Resource, client: &kube::Client, ns: &str) { + // Routes should be returned in sorted order by creation timestamp then + // name. To ensure that this test isn't timing dependant, routes should + // be created in alphabetical order. + let _a_route = create( + client, + mk_tcp_route(ns, "a-route", &parent, Some(4191)) + .with_backends(&[parent.clone()]) + .build(), + ) + .await; + await_tcp_route_status(client, ns, "a-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + + // First route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + let _b_route = create( + client, + mk_tcp_route(ns, "b-route", &parent, Some(4191)) + .with_backends(&[parent.clone()]) + .build(), + ) + .await; + await_tcp_route_status(client, ns, "b-route").await; + + // Second route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + let routes = tcp_routes(&config); + assert_eq!(routes.len(), 1); + assert_eq!(tcp_route_name(&routes[0]), "a-route"); +} + +async fn tcp_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { + let mut route = create( + client, + mk_tcp_route(ns, "foo-route", &parent, Some(4191)) + .with_backends(&[parent.clone()]) + .build(), + ) + .await; + await_tcp_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be attached. + let routes = tcp_routes(&config); + let tcp_route = assert_singleton(routes); + assert_tcp_route_name_eq(tcp_route, "foo-route"); + + route + .spec + .inner + .parent_refs + .as_mut() + .unwrap() + .first_mut() + .unwrap() + .name = "other".to_string(); + update(client, route.clone()).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be unattached and the default route should be present. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + route + .spec + .inner + .parent_refs + .as_mut() + .unwrap() + .first_mut() + .unwrap() + .name = parent.name(); + update(client, route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be attached again. + // The route should be attached. + let routes = tcp_routes(&config); + let tcp_route = assert_singleton(routes); + assert_tcp_route_name_eq(tcp_route, "foo-route"); +} diff --git a/policy-test/tests/outbound_api_tls.rs b/policy-test/tests/outbound_api_tls.rs new file mode 100644 index 0000000000000..b2fd4d015cf57 --- /dev/null +++ b/policy-test/tests/outbound_api_tls.rs @@ -0,0 +1,646 @@ +use futures::prelude::*; +use linkerd_policy_controller_k8s_api as k8s; +use linkerd_policy_test::{ + assert_resource_meta, await_egress_net_status, await_tls_route_status, create, + create_cluster_scoped, create_egress_network, create_service, delete_cluster_scoped, grpc, + mk_egress_net, mk_service, outbound_api::*, update, with_temp_ns, Resource, +}; +use maplit::{btreemap, convert_args}; + +#[tokio::test(flavor = "current_thread")] +async fn service_with_tls_routes_with_backend() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = create_service(&client, &ns, "my-svc", 4191).await; + let backend_svc = create_service(&client, &ns, "backend", 8888).await; + parent_with_tls_routes_with_backend( + Resource::Service(svc), + Resource::Service(backend_svc), + &client, + &ns, + ) + .await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_tls_routes_with_backend() { + with_temp_ns(|client, ns| async move { + // Create a service + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; + + parent_with_tls_routes_with_backend( + Resource::EgressNetwork(egress.clone()), + Resource::EgressNetwork(egress), + &client, + &ns, + ) + .await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn service_with_tls_routes_with_cross_namespace_backend() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + + let backend_ns_name = format!("{}-backend", ns); + let backend_ns = create_cluster_scoped( + &client, + k8s::Namespace { + metadata: k8s::ObjectMeta { + name: Some(backend_ns_name.clone()), + labels: Some(convert_args!(btreemap!( + "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), + ))), + ..Default::default() + }, + ..Default::default() + }, + ) + .await; + let backend_name = "backend"; + let backend_svc = + Resource::Service(create_service(&client, &backend_ns_name, backend_name, 8888).await); + let backends = [backend_svc.clone()]; + let route = mk_tls_route(&ns, "foo-route", &svc, Some(4191)).with_backends(&backends); + let _route = create(&client, route.build()).await; + await_tls_route_status(&client, &ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &svc, 4191); + + let routes = tls_routes(&config); + let route = assert_singleton(routes); + let backends = tls_route_backends_random_available(route); + let backend = assert_singleton(backends); + assert_tls_backend_matches_parent(backend.backend.as_ref().unwrap(), &backend_svc, 8888); + + delete_cluster_scoped(&client, backend_ns).await + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn service_with_tls_routes_with_invalid_backend() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = create_service(&client, &ns, "my-svc", 4191).await; + let backend = mk_service(&ns, "invalid", 4191); + + parent_with_tls_routes_with_invalid_backend( + Resource::Service(svc), + Resource::Service(backend), + &client, + &ns, + ) + .await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_tls_routes_with_invalid_backend() { + with_temp_ns(|client, ns| async move { + // Create an egress network + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; + + let backend = mk_egress_net(&ns, "invalid"); + + parent_with_tls_routes_with_invalid_backend( + Resource::EgressNetwork(egress), + Resource::EgressNetwork(backend), + &client, + &ns, + ) + .await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn service_with_multiple_tls_routes() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = create_service(&client, &ns, "my-svc", 4191).await; + parent_with_multiple_tls_routes(Resource::Service(svc), &client, &ns).await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn egress_net_with_multiple_http_routes() { + with_temp_ns(|client, ns| async move { + // Create an egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; + + parent_with_multiple_tls_routes(Resource::EgressNetwork(egress), &client, &ns).await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn tls_route_with_no_port() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + + let _route = create( + &client, + mk_tls_route(&ns, "foo-route", &svc, None) + .with_backends(&[svc.clone()]) + .build(), + ) + .await; + await_tls_route_status(&client, &ns, "foo-route").await; + + let mut rx_4191 = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; + let mut rx_9999 = retry_watch_outbound_policy(&client, &ns, &svc, 9999).await; + + let config_4191 = rx_4191 + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config_4191); + + let routes = tls_routes(&config_4191); + let route = assert_singleton(routes); + assert_tls_route_name_eq(route, "foo-route"); + + let config_9999 = rx_9999 + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config_9999); + + let routes = tls_routes(&config_9999); + let route = assert_singleton(routes); + assert_tls_route_name_eq(route, "foo-route"); + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn producer_route() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + + // A route created in the same namespace as its parent service is called + // a producer route. It should be returned in outbound policy requests + // for that service from ALL namespaces. + let _route = create( + &client, + mk_tls_route(&ns, "foo-route", &svc, Some(4191)) + .with_backends(&[svc.clone()]) + .build(), + ) + .await; + await_tls_route_status(&client, &ns, "foo-route").await; + + let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; + let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; + + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?producer_config); + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + + let routes = tls_routes(&producer_config); + let route = assert_singleton(routes); + assert_tls_route_name_eq(route, "foo-route"); + + let routes = tls_routes(&consumer_config); + let route = assert_singleton(routes); + assert_tls_route_name_eq(route, "foo-route"); + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn pre_existing_producer_route() { + // We test the scenario where outbound policy watches are initiated after + // a produce route already exists. + with_temp_ns(|client, ns| async move { + // Create a service + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + + // A route created in the same namespace as its parent service is called + // a producer route. It should be returned in outbound policy requests + // for that service from ALL namespaces. + let _route = create( + &client, + mk_tls_route(&ns, "foo-route", &svc, Some(4191)) + .with_backends(&[svc.clone()]) + .build(), + ) + .await; + await_tls_route_status(&client, &ns, "foo-route").await; + + let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?producer_config); + + let mut consumer_rx = retry_watch_outbound_policy(&client, "consumer_ns", &svc, 4191).await; + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + + // The route should be returned in queries from the producer namespace. + let routes = tls_routes(&producer_config); + let route = assert_singleton(routes); + assert_tls_route_name_eq(route, "foo-route"); + + // The route should be returned in queries from a consumer namespace. + let routes = tls_routes(&consumer_config); + let route = assert_singleton(routes); + assert_tls_route_name_eq(route, "foo-route"); + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn consumer_route() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = Resource::Service(create_service(&client, &ns, "my-svc", 4191).await); + + let consumer_ns_name = format!("{}-consumer", ns); + let consumer_ns = create_cluster_scoped( + &client, + k8s::Namespace { + metadata: k8s::ObjectMeta { + name: Some(consumer_ns_name.clone()), + labels: Some(convert_args!(btreemap!( + "linkerd-policy-test" => std::thread::current().name().unwrap_or(""), + ))), + ..Default::default() + }, + ..Default::default() + }, + ) + .await; + + let mut producer_rx = retry_watch_outbound_policy(&client, &ns, &svc, 4191).await; + let producer_config = producer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?producer_config); + + let mut consumer_rx = + retry_watch_outbound_policy(&client, &consumer_ns_name, &svc, 4191).await; + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + + let mut other_rx = retry_watch_outbound_policy(&client, "other_ns", &svc, 4191).await; + let other_config = other_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?other_config); + + // A route created in a different namespace as its parent service is + // called a consumer route. It should be returned in outbound policy + // requests for that service ONLY when the request comes from the + // consumer namespace. + let _route = create( + &client, + mk_tls_route(&consumer_ns_name, "foo-route", &svc, Some(4191)) + .with_backends(&[svc]) + .build(), + ) + .await; + await_tls_route_status(&client, &consumer_ns_name, "foo-route").await; + + // The route should NOT be returned in queries from the producer namespace. + // There should be a default route. + assert!(producer_rx.next().now_or_never().is_none()); + + // The route should be returned in queries from the same consumer + // namespace. + let consumer_config = consumer_rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?consumer_config); + + let routes = tls_routes(&consumer_config); + let route = assert_singleton(routes); + assert_tls_route_name_eq(route, "foo-route"); + + // The route should NOT be returned in queries from a different consumer + // namespace. + assert!(other_rx.next().now_or_never().is_none()); + + delete_cluster_scoped(&client, consumer_ns).await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn service_tls_route_reattachment() { + with_temp_ns(|client, ns| async move { + // Create a service + let svc = create_service(&client, &ns, "my-svc", 4191).await; + tls_route_reattachment(Resource::Service(svc), &client, &ns).await; + }) + .await; +} + +#[tokio::test(flavor = "current_thread")] +async fn egress_net_tls_route_reattachment() { + with_temp_ns(|client, ns| async move { + // Create a egress net + let egress = create_egress_network(&client, &ns, "my-egress").await; + await_egress_net_status(&client, &ns, "my-egress").await; + + tls_route_reattachment(Resource::EgressNetwork(egress), &client, &ns).await; + }) + .await; +} + +/* Helpers */ + +struct TlsRouteBuilder(k8s_gateway_api::TlsRoute); + +fn mk_tls_route(ns: &str, name: &str, parent: &Resource, port: Option) -> TlsRouteBuilder { + use k8s_gateway_api as api; + + TlsRouteBuilder(api::TlsRoute { + metadata: kube::api::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(name.to_string()), + ..Default::default() + }, + spec: api::TlsRouteSpec { + inner: api::CommonRouteSpec { + parent_refs: Some(vec![api::ParentReference { + group: Some(parent.group()), + kind: Some(parent.kind()), + namespace: Some(parent.namespace()), + name: parent.name(), + section_name: None, + port, + }]), + }, + hostnames: None, + rules: vec![api::TlsRouteRule { + backend_refs: Vec::default(), + }], + }, + status: None, + }) +} + +impl TlsRouteBuilder { + fn with_backends(self, backends: &[Resource]) -> Self { + let mut route = self.0; + let backend_refs: Vec<_> = backends + .iter() + .map(|backend| k8s_gateway_api::BackendRef { + weight: None, + inner: k8s_gateway_api::BackendObjectReference { + name: backend.name(), + port: Some(8888), + group: Some(backend.group()), + kind: Some(backend.kind()), + namespace: Some(backend.namespace()), + }, + }) + .collect(); + route.spec.rules.iter_mut().for_each(|rule| { + rule.backend_refs = backend_refs.clone(); + }); + Self(route) + } + + fn build(self) -> k8s_gateway_api::TlsRoute { + self.0 + } +} + +async fn parent_with_tls_routes_with_backend( + parent: Resource, + rule_backend: Resource, + client: &kube::Client, + ns: &str, +) { + let backends = [rule_backend.clone()]; + let route = mk_tls_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); + let _route = create(client, route.build()).await; + await_tls_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + let routes = tls_routes(&config); + let route = assert_route_attached(routes, &parent); + let backends = tls_route_backends_random_available(route); + let backend = assert_singleton(backends); + assert_tls_backend_matches_parent(backend.backend.as_ref().unwrap(), &rule_backend, 8888); +} + +async fn parent_with_tls_routes_with_invalid_backend( + parent: Resource, + backend: Resource, + client: &kube::Client, + ns: &str, +) { + let backends = [backend]; + let route = mk_tls_route(ns, "foo-route", &parent, Some(4191)).with_backends(&backends); + let _route = create(client, route.build()).await; + await_tls_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + let routes = tls_routes(&config); + let route = assert_route_attached(routes, &parent); + let backends = tls_route_backends_random_available(route); + assert_singleton(backends); +} + +async fn parent_with_multiple_tls_routes(parent: Resource, client: &kube::Client, ns: &str) { + // Routes should be returned in sorted order by creation timestamp then + // name. To ensure that this test isn't timing dependant, routes should + // be created in alphabetical order. + let _a_route = create( + client, + mk_tls_route(ns, "a-route", &parent, Some(4191)) + .with_backends(&[parent.clone()]) + .build(), + ) + .await; + await_tls_route_status(client, ns, "a-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + + // First route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + let _b_route = create( + client, + mk_tls_route(ns, "b-route", &parent, Some(4191)) + .with_backends(&[parent.clone()]) + .build(), + ) + .await; + await_tls_route_status(client, ns, "b-route").await; + + // Second route update. + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + let routes = tls_routes(&config); + let num_routes = match parent { + Resource::EgressNetwork(_) => 3, // three routes for egress net 2 configured + 1 default + Resource::Service(_) => 2, // two routes for service + }; + assert_eq!(routes.len(), num_routes); + assert_eq!(tls_route_name(&routes[0]), "a-route"); + assert_eq!(tls_route_name(&routes[1]), "b-route"); +} + +async fn tls_route_reattachment(parent: Resource, client: &kube::Client, ns: &str) { + let mut route = create( + client, + mk_tls_route(ns, "foo-route", &parent, Some(4191)) + .with_backends(&[parent.clone()]) + .build(), + ) + .await; + await_tls_route_status(client, ns, "foo-route").await; + + let mut rx = retry_watch_outbound_policy(client, ns, &parent, 4191).await; + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an initial config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be attached. + let routes = tls_routes(&config); + let tls_route: &grpc::outbound::TlsRoute = assert_route_attached(routes, &parent); + assert_tls_route_name_eq(tls_route, "foo-route"); + + route + .spec + .inner + .parent_refs + .as_mut() + .unwrap() + .first_mut() + .unwrap() + .name = "other".to_string(); + update(client, route.clone()).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be unattached and the default route should be present. + detect_http_routes(&config, |routes| { + let route = assert_singleton(routes); + assert_route_is_default(route, &parent, 4191); + }); + + route + .spec + .inner + .parent_refs + .as_mut() + .unwrap() + .first_mut() + .unwrap() + .name = parent.name(); + update(client, route).await; + + let config = rx + .next() + .await + .expect("watch must not fail") + .expect("watch must return an updated config"); + tracing::trace!(?config); + + assert_resource_meta(&config.metadata, &parent, 4191); + + // The route should be attached again. + // The route should be attached. + let routes = tls_routes(&config); + let tls_route: &grpc::outbound::TlsRoute = assert_route_attached(routes, &parent); + assert_tls_route_name_eq(tls_route, "foo-route"); +} From 1d675330f0197b0505db29666392cf10f6ccf5be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Nov 2024 11:51:04 -0800 Subject: [PATCH 06/18] build(deps): bump itoa from 1.0.11 to 1.0.13 (#13357) Bumps [itoa](https://github.com/dtolnay/itoa) from 1.0.11 to 1.0.13. - [Release notes](https://github.com/dtolnay/itoa/releases) - [Commits](https://github.com/dtolnay/itoa/compare/1.0.11...1.0.13) --- updated-dependencies: - dependency-name: itoa dependency-type: indirect update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b85f0aef87b61..4fa7d8686b765 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -883,9 +883,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" [[package]] name = "jemalloc-sys" From 68093a6bf7ec3df411ec099f6bb54b20c193d646 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Nov 2024 11:51:25 -0800 Subject: [PATCH 07/18] build(deps): bump proc-macro2 from 1.0.89 to 1.0.90 (#13358) Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.89 to 1.0.90. - [Release notes](https://github.com/dtolnay/proc-macro2/releases) - [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.89...1.0.90) --- updated-dependencies: - dependency-name: proc-macro2 dependency-type: indirect update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4fa7d8686b765..9eff4e381fb58 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1648,9 +1648,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "d4e1ced3fe749df87a909c23e9607ab9a09c8f0bedb7e03b8146f4c08c298673" dependencies = [ "unicode-ident", ] From ef4f39f71b77ac8bd9dc2725921f7cbd6c09eb21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Nov 2024 11:51:48 -0800 Subject: [PATCH 08/18] build(deps): bump codecov/codecov-action from 5.0.4 to 5.0.7 (#13360) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 5.0.4 to 5.0.7. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/985343d70564a82044c1b7fcb84c2fa05405c1a2...015f24e6818733317a2da2edd6290ab26238649a) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codecov.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index bdf9e9518124f..066f13b103f76 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - run: go install gotest.tools/gotestsum@v0.4.2 - run: gotestsum -- -cover -coverprofile=coverage.out -v -mod=readonly ./... - - uses: codecov/codecov-action@985343d70564a82044c1b7fcb84c2fa05405c1a2 + - uses: codecov/codecov-action@015f24e6818733317a2da2edd6290ab26238649a with: files: ./coverage.out flags: unittests,golang @@ -41,7 +41,7 @@ jobs: export NODE_ENV=test bin/web --frozen-lockfile bin/web test --reporters="jest-progress-bar-reporter" --reporters="./gh_ann_reporter.js" --coverage - - uses: codecov/codecov-action@985343d70564a82044c1b7fcb84c2fa05405c1a2 + - uses: codecov/codecov-action@015f24e6818733317a2da2edd6290ab26238649a with: directory: ./web/app/coverage flags: unittests,javascript @@ -58,6 +58,6 @@ jobs: - shell: bash run: mkdir -p target && cd target && bin/scurl -v https://github.com/xd009642/tarpaulin/releases/download/0.27.3/cargo-tarpaulin-x86_64-unknown-linux-musl.tar.gz | tar zxvf - && chmod 755 cargo-tarpaulin - run: target/cargo-tarpaulin tarpaulin --workspace --out Xml - - uses: codecov/codecov-action@985343d70564a82044c1b7fcb84c2fa05405c1a2 + - uses: codecov/codecov-action@015f24e6818733317a2da2edd6290ab26238649a with: flags: unittests,rust From 91561810040a2ab74a03a17793c3914f472ad15e Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Thu, 21 Nov 2024 22:15:52 +0200 Subject: [PATCH 09/18] build(deps): update linkerd2-proxy-api to v0.15.0 Update opaq and tls policies to return filters rather than errors. Signed-off-by: Zahari Dichev --- Cargo.lock | 5 ++- Cargo.toml | 4 -- policy-controller/grpc/Cargo.toml | 2 +- policy-controller/grpc/src/outbound.rs | 13 +++---- policy-controller/grpc/src/outbound/tcp.rs | 45 +++++++++++----------- policy-controller/grpc/src/outbound/tls.rs | 32 ++++++++------- policy-test/Cargo.toml | 2 +- 7 files changed, 52 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9eff4e381fb58..2d5d5c0b2298f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1325,8 +1325,9 @@ dependencies = [ [[package]] name = "linkerd2-proxy-api" -version = "0.14.0" -source = "git+https://github.com/linkerd/linkerd2-proxy-api?branch=main#6c316cc41a3a0e194a70f22b5698ec21ce245e99" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4682c00263191a5bfa4fbe64f6d80b22ff2b49aaa294da5aac062f5abc6eb9e" dependencies = [ "http", "ipnet", diff --git a/Cargo.toml b/Cargo.toml index 03f7db5ad8371..2bd3689b03f4d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,3 @@ members = [ [profile.release] lto = "thin" - -[patch.crates-io] -# TODO(Zahari): switch released version once TLS protocol support is out. -linkerd2-proxy-api = { git = 'https://github.com/linkerd/linkerd2-proxy-api', branch = 'main' } diff --git a/policy-controller/grpc/Cargo.toml b/policy-controller/grpc/Cargo.toml index 7c0828f8aaa4b..c8f2b765d319e 100644 --- a/policy-controller/grpc/Cargo.toml +++ b/policy-controller/grpc/Cargo.toml @@ -22,5 +22,5 @@ serde = { version = "1", features = ["derive"] } serde_json = "1" [dependencies.linkerd2-proxy-api] -version = "0.14" +version = "0.15" features = ["inbound", "outbound"] diff --git a/policy-controller/grpc/src/outbound.rs b/policy-controller/grpc/src/outbound.rs index cba161908b4e3..1c1c76001020a 100644 --- a/policy-controller/grpc/src/outbound.rs +++ b/policy-controller/grpc/src/outbound.rs @@ -310,13 +310,13 @@ fn fallback(original_dst: SocketAddr) -> outbound::OutboundPolicy { outbound::opaque_route::distribution::FirstAvailable { backends: vec![outbound::opaque_route::RouteBackend { backend: Some(backend.clone()), - invalid: None, + filters: Vec::new(), }], }, )), }), + filters: Vec::new(), }], - error: None, }], }; @@ -586,18 +586,15 @@ fn default_outbound_opaq_route( outbound::opaque_route::distribution::FirstAvailable { backends: vec![outbound::opaque_route::RouteBackend { backend: Some(backend), - invalid: None, + filters: Vec::new(), }], }, )), }), + filters: Vec::new(), }]; - outbound::OpaqueRoute { - metadata, - rules, - error: None, - } + outbound::OpaqueRoute { metadata, rules } } } } diff --git a/policy-controller/grpc/src/outbound/tcp.rs b/policy-controller/grpc/src/outbound/tcp.rs index a1e169ce6e2ec..afd1222136bdb 100644 --- a/policy-controller/grpc/src/outbound/tcp.rs +++ b/policy-controller/grpc/src/outbound/tcp.rs @@ -1,5 +1,5 @@ use super::{default_balancer_config, default_queue_config}; -use linkerd2_proxy_api::{destination, meta, outbound}; +use linkerd2_proxy_api::{self, destination, meta, outbound}; use linkerd_policy_controller_core::{ outbound::{Backend, ParentInfo, TcpRoute, TrafficPolicy}, routes::GroupKindNamespaceName, @@ -71,7 +71,7 @@ fn convert_outbound_route( outbound::opaque_route::distribution::FirstAvailable { backends: vec![outbound::opaque_route::RouteBackend { backend: Some(backend.clone()), - invalid: None, + filters: Vec::new(), }], }, ) @@ -83,13 +83,10 @@ fn convert_outbound_route( let rules = vec![outbound::opaque_route::Rule { backends: Some(outbound::opaque_route::Distribution { kind: Some(dist) }), + filters: Vec::new(), }]; - outbound::OpaqueRoute { - metadata, - rules, - error: None, - } + outbound::OpaqueRoute { metadata, rules } } fn convert_backend( @@ -116,7 +113,7 @@ fn convert_backend( }, )), }), - invalid: None, + filters: Vec::new(), }), } } @@ -139,7 +136,7 @@ fn convert_backend( }, )), }), - invalid: None, + filters: Vec::new(), }), }, Backend::Service(svc) => invalid_backend( @@ -173,7 +170,7 @@ fn convert_backend( }, )), }), - invalid: None, + filters: Vec::new(), }), } } else { @@ -226,7 +223,12 @@ fn invalid_backend( queue: Some(default_queue_config()), kind: None, }), - invalid: Some(outbound::opaque_route::route_backend::Invalid { message }), + + filters: vec![outbound::opaque_route::Filter { + kind: Some(outbound::opaque_route::filter::Kind::Invalid( + linkerd2_proxy_api::opaque_route::Invalid { message }, + )), + }], }), } } @@ -235,12 +237,14 @@ pub(crate) fn default_outbound_egress_route( backend: outbound::Backend, traffic_policy: &TrafficPolicy, ) -> outbound::OpaqueRoute { - let (error, name) = match traffic_policy { - TrafficPolicy::Allow => (None, "tcp-egress-allow"), + let (filters, name) = match traffic_policy { + TrafficPolicy::Allow => (Vec::default(), "tcp-egress-allow"), TrafficPolicy::Deny => ( - Some(outbound::opaque_route::RouteError { - kind: outbound::opaque_route::route_error::Kind::Forbidden as i32, - }), + vec![outbound::opaque_route::Filter { + kind: Some(outbound::opaque_route::filter::Kind::Forbidden( + linkerd2_proxy_api::opaque_route::Forbidden {}, + )), + }], "tcp-egress-deny", ), }; @@ -254,15 +258,12 @@ pub(crate) fn default_outbound_egress_route( outbound::opaque_route::distribution::FirstAvailable { backends: vec![outbound::opaque_route::RouteBackend { backend: Some(backend), - invalid: None, + filters: Vec::new(), }], }, )), }), + filters, }]; - outbound::OpaqueRoute { - metadata, - rules, - error, - } + outbound::OpaqueRoute { metadata, rules } } diff --git a/policy-controller/grpc/src/outbound/tls.rs b/policy-controller/grpc/src/outbound/tls.rs index a49d756c51863..86d8a29c8cb7a 100644 --- a/policy-controller/grpc/src/outbound/tls.rs +++ b/policy-controller/grpc/src/outbound/tls.rs @@ -69,7 +69,7 @@ fn convert_outbound_route( outbound::tls_route::distribution::FirstAvailable { backends: vec![outbound::tls_route::RouteBackend { backend: Some(backend.clone()), - invalid: None, + filters: Vec::new(), }], }, ) @@ -81,13 +81,13 @@ fn convert_outbound_route( let rules = vec![outbound::tls_route::Rule { backends: Some(outbound::tls_route::Distribution { kind: Some(dist) }), + filters: Vec::new(), }]; outbound::TlsRoute { metadata, snis, rules, - error: None, } } @@ -115,7 +115,7 @@ fn convert_backend( }, )), }), - invalid: None, + filters: Vec::new(), }), } } @@ -138,7 +138,7 @@ fn convert_backend( }, )), }), - invalid: None, + filters: Vec::new(), }), }, Backend::Service(svc) => invalid_backend( @@ -172,7 +172,7 @@ fn convert_backend( }, )), }), - invalid: None, + filters: Vec::new(), }), } } else { @@ -225,7 +225,11 @@ fn invalid_backend( queue: Some(default_queue_config()), kind: None, }), - invalid: Some(outbound::tls_route::route_backend::Invalid { message }), + filters: vec![outbound::tls_route::Filter { + kind: Some(outbound::tls_route::filter::Kind::Invalid( + linkerd2_proxy_api::opaque_route::Invalid { message }, + )), + }], }), } } @@ -234,12 +238,14 @@ pub(crate) fn default_outbound_egress_route( backend: outbound::Backend, traffic_policy: &TrafficPolicy, ) -> outbound::TlsRoute { - let (error, name) = match traffic_policy { - TrafficPolicy::Allow => (None, "tls-egress-allow"), + let (filters, name) = match traffic_policy { + TrafficPolicy::Allow => (Vec::default(), "tls-egress-allow"), TrafficPolicy::Deny => ( - Some(outbound::tls_route::RouteError { - kind: outbound::tls_route::route_error::Kind::Forbidden as i32, - }), + vec![outbound::tls_route::Filter { + kind: Some(outbound::tls_route::filter::Kind::Forbidden( + linkerd2_proxy_api::opaque_route::Forbidden {}, + )), + }], "tls-egress-deny", ), }; @@ -253,16 +259,16 @@ pub(crate) fn default_outbound_egress_route( outbound::tls_route::distribution::FirstAvailable { backends: vec![outbound::tls_route::RouteBackend { backend: Some(backend), - invalid: None, + filters: Vec::new(), }], }, )), }), + filters, }]; outbound::TlsRoute { metadata, rules, - error, ..Default::default() } } diff --git a/policy-test/Cargo.toml b/policy-test/Cargo.toml index 05718827e0d82..2789b51087cc6 100644 --- a/policy-test/Cargo.toml +++ b/policy-test/Cargo.toml @@ -31,7 +31,7 @@ default-features = false features = ["client", "openssl-tls", "runtime", "ws"] [dependencies.linkerd2-proxy-api] -version = "0.14" +version = "0.15" features = ["inbound", "outbound"] [dev-dependencies] From c0a5524bc9bdcce40e76ca3fb82f23ed17ffef90 Mon Sep 17 00:00:00 2001 From: l5d-bot <48604953+l5d-bot@users.noreply.github.com> Date: Thu, 21 Nov 2024 13:09:43 -0800 Subject: [PATCH 10/18] proxy: v2.267.0 (#13363) Release notes: https://github.com/linkerd/linkerd2-proxy/releases/tag/release/v2.267.0 Signed-off-by: l5d-bot Co-authored-by: l5d-bot --- .proxy-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.proxy-version b/.proxy-version index 977437ad70626..7b4fddd40548c 100644 --- a/.proxy-version +++ b/.proxy-version @@ -1 +1 @@ -v2.266.0 +v2.267.0 From 7d9072692184f2e2962ef153882bd94ef5257f45 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Nov 2024 10:34:13 -0800 Subject: [PATCH 11/18] build(deps): bump cpufeatures from 0.2.15 to 0.2.16 (#13370) Bumps [cpufeatures](https://github.com/RustCrypto/utils) from 0.2.15 to 0.2.16. - [Commits](https://github.com/RustCrypto/utils/compare/cpufeatures-v0.2.15...cpufeatures-v0.2.16) --- updated-dependencies: - dependency-name: cpufeatures dependency-type: indirect update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2d5d5c0b2298f..f69505829d910 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -326,9 +326,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] From 6647791e5b0da42fcd0cf78aa55da5504dfd9472 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Nov 2024 10:34:28 -0800 Subject: [PATCH 12/18] build(deps): bump proc-macro2 from 1.0.90 to 1.0.92 (#13369) Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.90 to 1.0.92. - [Release notes](https://github.com/dtolnay/proc-macro2/releases) - [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.90...1.0.92) --- updated-dependencies: - dependency-name: proc-macro2 dependency-type: indirect update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f69505829d910..a108503f51fc8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1649,9 +1649,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.90" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4e1ced3fe749df87a909c23e9607ab9a09c8f0bedb7e03b8146f4c08c298673" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] From 0ecc74c6477cbafb866bd50c652f2b3d0c474642 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Nov 2024 10:34:56 -0800 Subject: [PATCH 13/18] build(deps): bump k8s.io/kube-aggregator from 0.31.2 to 0.31.3 (#13365) Bumps [k8s.io/kube-aggregator](https://github.com/kubernetes/kube-aggregator) from 0.31.2 to 0.31.3. - [Commits](https://github.com/kubernetes/kube-aggregator/compare/v0.31.2...v0.31.3) --- updated-dependencies: - dependency-name: k8s.io/kube-aggregator dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 8ebd31b1c530f..cc76caa2d27eb 100644 --- a/go.mod +++ b/go.mod @@ -45,14 +45,14 @@ require ( google.golang.org/protobuf v1.35.2 gopkg.in/yaml.v2 v2.4.0 helm.sh/helm/v3 v3.16.3 - k8s.io/api v0.31.2 + k8s.io/api v0.31.3 k8s.io/apiextensions-apiserver v0.31.2 - k8s.io/apimachinery v0.31.2 - k8s.io/client-go v0.31.2 - k8s.io/code-generator v0.31.2 + k8s.io/apimachinery v0.31.3 + k8s.io/client-go v0.31.3 + k8s.io/code-generator v0.31.3 k8s.io/endpointslice v0.31.2 k8s.io/klog/v2 v2.130.1 - k8s.io/kube-aggregator v0.31.2 + k8s.io/kube-aggregator v0.31.3 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/gateway-api v0.8.1 sigs.k8s.io/yaml v1.4.0 @@ -160,7 +160,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/cli-runtime v0.31.1 // indirect - k8s.io/component-base v0.31.2 // indirect + k8s.io/component-base v0.31.3 // indirect k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect oras.land/oras-go v1.2.5 // indirect diff --git a/go.sum b/go.sum index a043d65ed0ba8..a54051c96bda8 100644 --- a/go.sum +++ b/go.sum @@ -729,28 +729,28 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= -k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0= k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM= -k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= -k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk= k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U= -k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= -k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= -k8s.io/code-generator v0.31.2 h1:xLWxG0HEpMSHfcM//3u3Ro2Hmc6AyyLINQS//Z2GEOI= -k8s.io/code-generator v0.31.2/go.mod h1:eEQHXgBU/m7LDaToDoiz3t97dUUVyOblQdwOr8rivqc= -k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA= -k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= +k8s.io/code-generator v0.31.3 h1:Pj0fYOBms+ZrsulLi4DMsCEx1jG8fWKRLy44onHsLBI= +k8s.io/code-generator v0.31.3/go.mod h1:/umCIlT84g1+Yu5ZXtP1KGSRTnGiIzzX5AzUAxsNlts= +k8s.io/component-base v0.31.3 h1:DMCXXVx546Rfvhj+3cOm2EUxhS+EyztH423j+8sOwhQ= +k8s.io/component-base v0.31.3/go.mod h1:xME6BHfUOafRgT0rGVBGl7TuSg8Z9/deT7qq6w7qjIU= k8s.io/endpointslice v0.31.2 h1:FzelGpchzLs92ULniao/rFSq08ewMzAA3Clw+WOgl3U= k8s.io/endpointslice v0.31.2/go.mod h1:7dWUOenhr41s1hHvgfayR0PGgPnM2jcPkALOvex+tJw= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-aggregator v0.31.2 h1:Uw1zUP2D/4wiSjKWVVzSOcCGLuW/+IdRwjjC0FJooYU= -k8s.io/kube-aggregator v0.31.2/go.mod h1:41/VIXH+/Qcg9ERNAY6bRF/WQR6xL1wFgYagdHac1X4= +k8s.io/kube-aggregator v0.31.3 h1:DqHPdTglJHgOfB884AaroyxrML/aL82ASYOh65m7MSk= +k8s.io/kube-aggregator v0.31.3/go.mod h1:Kx59Xjnf0SnY47qf9Or++4y3XCHQ3kR0xk1Di6KFiFU= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= From d5af3b0808a93b0a655e54bb51cec077406fb6b0 Mon Sep 17 00:00:00 2001 From: Oliver Gould Date: Fri, 22 Nov 2024 12:25:05 -0800 Subject: [PATCH 14/18] ci: update latest k8s version to 1.31 (#13374) This change updates the test matrix to exercise k8s v1.31. --- .github/workflows/integration.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 40c529f6f58b8..3e823cbb75679 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -164,7 +164,7 @@ jobs: matrix: k8s: - v1.22 - - v1.29 + - v1.31 steps: - uses: extractions/setup-just@dd310ad5a97d8e7b41793f8ef055398d51ad4de6 env: @@ -316,7 +316,7 @@ jobs: matrix: k8s: - v1.22 - - v1.29 + - v1.31 steps: - uses: extractions/setup-just@dd310ad5a97d8e7b41793f8ef055398d51ad4de6 env: From b67674a64cbd744006e914836224faf1ab379d9e Mon Sep 17 00:00:00 2001 From: l5d-bot <48604953+l5d-bot@users.noreply.github.com> Date: Fri, 22 Nov 2024 12:26:02 -0800 Subject: [PATCH 15/18] proxy: v2.268.0 (#13375) Release notes: https://github.com/linkerd/linkerd2-proxy/releases/tag/release/v2.268.0 Signed-off-by: l5d-bot Co-authored-by: l5d-bot --- .proxy-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.proxy-version b/.proxy-version index 7b4fddd40548c..bc37455324d77 100644 --- a/.proxy-version +++ b/.proxy-version @@ -1 +1 @@ -v2.267.0 +v2.268.0 From a3f1e29193472686e2476684e27d5c881714fabc Mon Sep 17 00:00:00 2001 From: Alex Leong Date: Fri, 22 Nov 2024 13:01:35 -0800 Subject: [PATCH 16/18] feat(cli): add check for link version (#13376) We add a linkerd.io/created-by annotation to Link resources which specifies the version of the CLI which was used to create the Link. This annotation is already used in this way by control plane components. This allows us to easily see what version of Linkerd was used to generated a Link. We add a check that inspects this value and warns if any Links don't match the current version of the CLI. Additionally, we fix an issue with the orphaned services check where it was incorrectly warning that federated services were orphaned because they don't have a specific target cluster. Signed-off-by: Alex Leong --- multicluster/cmd/check.go | 31 ++++++++++++++++++++++++++++++- pkg/multicluster/link.go | 5 +++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/multicluster/cmd/check.go b/multicluster/cmd/check.go index 622ef9cbe738c..7672e67b4ceba 100644 --- a/multicluster/cmd/check.go +++ b/multicluster/cmd/check.go @@ -176,6 +176,11 @@ func multiclusterCategory(hc *healthChecker, wait time.Duration) *healthcheck.Ca WithHintAnchor("l5d-multicluster-links-are-valid"). Fatal(). WithCheck(func(ctx context.Context) error { return hc.checkLinks(ctx) })) + checkers = append(checkers, + *healthcheck.NewChecker("Link and CLI versions match"). + WithHintAnchor("l5d-multicluster-links-version"). + Warning(). + WithCheck(func(ctx context.Context) error { return hc.checkLinkVersions() })) checkers = append(checkers, *healthcheck.NewChecker("remote cluster access credentials are valid"). WithHintAnchor("l5d-smc-target-clusters-access"). @@ -332,6 +337,30 @@ func (hc *healthChecker) checkLinks(ctx context.Context) error { return healthcheck.VerboseSuccess{Message: strings.Join(linkNames, "\n")} } +func (hc *healthChecker) checkLinkVersions() error { + errors := []error{} + links := []string{} + for _, link := range hc.links { + parts := strings.Split(link.CreatedBy, " ") + if len(parts) == 2 && parts[0] == "linkerd/cli" { + if parts[1] == version.Version { + links = append(links, fmt.Sprintf("\t* %s", link.TargetClusterName)) + } else { + errors = append(errors, fmt.Errorf("* %s: CLI version is %s but Link version is %s", link.TargetClusterName, version.Version, parts[1])) + } + } else { + errors = append(errors, fmt.Errorf("* %s: unable to determine version", link.TargetClusterName)) + } + } + if len(errors) > 0 { + return joinErrors(errors, 2) + } + if len(links) == 0 { + return healthcheck.SkipError{Reason: "no links"} + } + return healthcheck.VerboseSuccess{Message: strings.Join(links, "\n")} +} + func (hc *healthChecker) checkRemoteClusterConnectivity(ctx context.Context) error { errors := []error{} links := []string{} @@ -668,7 +697,7 @@ func (hc *healthChecker) checkIfMirrorServicesHaveEndpoints(ctx context.Context) func (hc *healthChecker) checkForOrphanedServices(ctx context.Context) error { errors := []error{} - selector := fmt.Sprintf("%s, !%s", k8s.MirroredResourceLabel, k8s.MirroredGatewayLabel) + selector := fmt.Sprintf("%s, !%s, %s", k8s.MirroredResourceLabel, k8s.MirroredGatewayLabel, k8s.RemoteClusterNameLabel) mirrorServices, err := hc.KubeAPIClient().CoreV1().Services(metav1.NamespaceAll).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil { return err diff --git a/pkg/multicluster/link.go b/pkg/multicluster/link.go index a54be7024eb9e..5fbe9c47fc111 100644 --- a/pkg/multicluster/link.go +++ b/pkg/multicluster/link.go @@ -39,6 +39,7 @@ type ( Link struct { Name string Namespace string + CreatedBy string TargetClusterName string TargetClusterDomain string TargetClusterLinkerdNamespace string @@ -177,6 +178,7 @@ func NewLink(u unstructured.Unstructured) (Link, error) { return Link{ Name: u.GetName(), Namespace: u.GetNamespace(), + CreatedBy: u.GetAnnotations()[k8s.CreatedByAnnotation], TargetClusterName: targetClusterName, TargetClusterDomain: targetClusterDomain, TargetClusterLinkerdNamespace: targetClusterLinkerdNamespace, @@ -260,6 +262,9 @@ func (l Link) ToUnstructured() (unstructured.Unstructured, error) { "metadata": map[string]interface{}{ "name": l.Name, "namespace": l.Namespace, + "annotations": map[string]string{ + k8s.CreatedByAnnotation: k8s.CreatedByAnnotationValue(), + }, }, "spec": spec, "status": map[string]interface{}{}, From abfdd691a0016be3f63cab45efe209fa95a2c3c5 Mon Sep 17 00:00:00 2001 From: Oliver Gould Date: Fri, 22 Nov 2024 13:32:04 -0800 Subject: [PATCH 17/18] fix(test): ensure that the controller sets rate limit status (#13377) The http_local_rate_limit_policy test creates a resource with a status already hydrated, but status setting is a job of the controller. This change updates the test to create a resource without a status and then to wait for the status to be set properly. This will hopefully help us to avoid race conditions in this test whereby the API lookup can occur before the controller observes the resource creation. --- policy-test/tests/inbound_api.rs | 49 ++++++++++++++++---------------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/policy-test/tests/inbound_api.rs b/policy-test/tests/inbound_api.rs index 53004bf30b920..b8ab6f4cfbd1d 100644 --- a/policy-test/tests/inbound_api.rs +++ b/policy-test/tests/inbound_api.rs @@ -1,13 +1,10 @@ -use std::time::Duration; - use futures::prelude::*; -use k8s_openapi::chrono; use kube::ResourceExt; use linkerd_policy_controller_core::{Ipv4Net, Ipv6Net}; use linkerd_policy_controller_k8s_api as k8s; use linkerd_policy_test::{ assert_default_all_unauthenticated_labels, assert_is_default_all_unauthenticated, - assert_protocol_detect, create, create_ready_pod, grpc, with_temp_ns, + assert_protocol_detect, await_condition, create, create_ready_pod, grpc, with_temp_ns, }; use maplit::{btreemap, convert_args, hashmap}; use tokio::time; @@ -332,7 +329,7 @@ async fn http_local_rate_limit_policy() { .await; // Create a rate-limit policy associated to the server - create( + let rate_limit = create( &client, k8s::policy::ratelimit_policy::HttpLocalRateLimitPolicy { metadata: k8s::ObjectMeta { @@ -356,25 +353,29 @@ async fn http_local_rate_limit_policy() { }], }]), }, - status: Some(k8s::policy::HttpLocalRateLimitPolicyStatus { - conditions: vec![k8s::Condition { - last_transition_time: k8s::Time(chrono::DateTime::::MIN_UTC), - message: "".to_string(), - observed_generation: None, - reason: "".to_string(), - status: "True".to_string(), - type_: "Accepted".to_string(), - }], - target_ref: k8s::policy::LocalTargetRef { - group: Some("policy.linkerd.io".to_string()), - kind: "Server".to_string(), - name: "linkerd-admin".to_string(), - }, - }), + status: None, }, ) .await; + await_condition( + &client, + &ns, + &rate_limit.name_unchecked(), + |obj: Option<&k8s::policy::ratelimit_policy::HttpLocalRateLimitPolicy>| { + obj.as_ref().map_or(false, |obj| { + obj.status.as_ref().map_or(false, |status| { + status + .conditions + .iter() + .any(|c| c.type_ == "Accepted" && c.status == "True") + }) + }) + }, + ) + .await + .expect("rate limit must get a status"); + let client_id = format!("sa-0.{}.serviceaccount.identity.linkerd.cluster.local", ns); let ratelimit_overrides = vec![(200, vec![client_id])]; let ratelimit = @@ -609,7 +610,7 @@ async fn http_routes_ordered_by_creation() { // Creation timestamps in Kubernetes only have second precision, so we // must wait a whole second between creating each of these routes in // order for them to have different creation timestamps. - tokio::time::sleep(Duration::from_secs(1)).await; + time::sleep(time::Duration::from_secs(1)).await; create( &client, mk_admin_route_with_path(ns.as_ref(), "a", "/ready"), @@ -617,7 +618,7 @@ async fn http_routes_ordered_by_creation() { .await; next_config(&mut rx).await; - tokio::time::sleep(Duration::from_secs(1)).await; + time::sleep(time::Duration::from_secs(1)).await; create( &client, mk_admin_route_with_path(ns.as_ref(), "c", "/shutdown"), @@ -625,7 +626,7 @@ async fn http_routes_ordered_by_creation() { .await; next_config(&mut rx).await; - tokio::time::sleep(Duration::from_secs(1)).await; + time::sleep(time::Duration::from_secs(1)).await; create( &client, mk_admin_route_with_path(ns.as_ref(), "b", "/proxy-log-level"), @@ -815,7 +816,7 @@ async fn retry_watch_server( Ok(rx) => return rx, Err(error) => { tracing::error!(?error, ns, pod_name, "failed to watch policy for port 4191"); - time::sleep(Duration::from_secs(1)).await; + time::sleep(time::Duration::from_secs(1)).await; } } } From 3c91fc64ce61208e3be01f908abe178e3786616f Mon Sep 17 00:00:00 2001 From: Oliver Gould Date: Fri, 22 Nov 2024 15:21:09 -0800 Subject: [PATCH 18/18] fix(destination): avoid panic on missing managed fields timestamp (#13378) We received a report of a panic: runtime error: invalid memory address or nil pointer dereference panic({0x1edb860?, 0x37a6050?} /usr/local/go/src/runtime/panic.go:785 +0x132 github.com/linkerd/linkerd2/controller/api/destination/watcher.latestUpdated({0xc0006b2d80?, 0xc00051a540?, 0xc0008fa008?}) /linkerd-build/vendor/github.com/linkerd/linkerd2/controller/api/destination/watcher/endpoints_watcher.go:1612 +0x125 github.com/linkerd/linkerd2/controller/api/destination/watcher.(*OpaquePortsWatcher).updateService(0xc0007d5480, {0x21fd160?, 0xc000d71688?}, {0x21fd160, 0xc000d71688}) /linkerd-build/vendor/github.com/linkerd/linkerd2/controller/api/destination/watcher/opaque_ports_watcher.go:141 +0x68 The `latestUpdated` function does not properly handle the case where a atime is omitted from a `ManagedFieldsEntry`. type ManagedFieldsEntry struct { // Time is the timestamp of when the ManagedFields entry was added. The // timestamp will also be updated if a field is added, the manager // changes any of the owned fields value or removes a field. The // timestamp does not update when a field is removed from the entry // because another manager took it over. // +optional Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"` This change adds a check to avoid the nil dereference. --- controller/api/destination/watcher/endpoints_watcher.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/controller/api/destination/watcher/endpoints_watcher.go b/controller/api/destination/watcher/endpoints_watcher.go index 800035beaa315..0a7276a36cbc5 100644 --- a/controller/api/destination/watcher/endpoints_watcher.go +++ b/controller/api/destination/watcher/endpoints_watcher.go @@ -1607,6 +1607,9 @@ func SetToServerProtocolExternalWorkload(k8sAPI *k8s.API, address *Address) erro func latestUpdated(managedFields []metav1.ManagedFieldsEntry) time.Time { var latest time.Time for _, field := range managedFields { + if field.Time == nil { + continue + } if field.Operation == metav1.ManagedFieldsOperationUpdate { if latest.IsZero() || field.Time.After(latest) { latest = field.Time.Time