Skip to content

Commit

Permalink
resolve conflict
Browse files Browse the repository at this point in the history
Signed-off-by: Lukasz Dziedziak <[email protected]>
  • Loading branch information
lukidzi committed Dec 19, 2024
1 parent 4edd87d commit eca3e8e
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 75 deletions.
26 changes: 5 additions & 21 deletions test/framework/envs/kubernetes/env.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,34 +21,18 @@ func SetupAndGetState() []byte {
framework.GatewayAPICRDs,
)).To(Succeed())

<<<<<<< HEAD
kumaOptions := append([]framework.KumaDeploymentOption{
// Occasionally CP will lose a leader in the E2E test just because of this deadline,
// which does not make sense in such controlled environment (one k3d node, one instance of the CP).
// 100s and 80s are values that we also use in mesh-perf when we put a lot of pressure on the CP.
framework.WithEnv("KUMA_RUNTIME_KUBERNETES_LEADER_ELECTION_LEASE_DURATION", "100s"),
framework.WithEnv("KUMA_RUNTIME_KUBERNETES_LEADER_ELECTION_RENEW_DEADLINE", "80s"),
framework.WithCtlOpts(map[string]string{
"--experimental-gatewayapi": "true",
}),
framework.WithEgress(),
},
framework.KumaDeploymentOptionsFromConfig(framework.Config.KumaCpConfig.Standalone.Kubernetes)...)
=======
kumaOptions := append(
[]framework.KumaDeploymentOption{
framework.WithEgress(),
framework.WithEgressEnvoyAdminTunnel(),
framework.WithCtlOpts(map[string]string{
"--set": "controlPlane.supportGatewaySecretsInAllNamespaces=true", // needed for test/e2e_env/kubernetes/gateway/gatewayapi.go:470
}),
// Occasionally CP will lose a leader in the E2E test just because of this deadline,
// which does not make sense in such controlled environment (one k3d node, one instance of the CP).
// 100s and 80s are values that we also use in mesh-perf when we put a lot of pressure on the CP.
framework.WithEnv("KUMA_RUNTIME_KUBERNETES_LEADER_ELECTION_LEASE_DURATION", "100s"),
framework.WithEnv("KUMA_RUNTIME_KUBERNETES_LEADER_ELECTION_RENEW_DEADLINE", "80s"),
},
framework.KumaDeploymentOptionsFromConfig(framework.Config.KumaCpConfig.Standalone.Kubernetes)...,
)
if framework.Config.KumaExperimentalSidecarContainers {
kumaOptions = append(kumaOptions, framework.WithEnv("KUMA_EXPERIMENTAL_SIDECAR_CONTAINERS", "true"))
}
>>>>>>> bb904d04e (test(e2e): increase leader election lease and renew duration (#11796))

Eventually(func() error {
return Cluster.Install(framework.Kuma(core.Zone, kumaOptions...))
Expand Down
64 changes: 10 additions & 54 deletions test/framework/envs/multizone/env.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,60 +50,6 @@ type State struct {
KubeZone2 K8sNetworkingState
}

<<<<<<< HEAD
=======
func setupKubeZone(wg *sync.WaitGroup, clusterName string, extraOptions ...framework.KumaDeploymentOption) *K8sCluster {
wg.Add(1)
options := []framework.KumaDeploymentOption{
WithEnv("KUMA_MULTIZONE_ZONE_KDS_NACK_BACKOFF", "1s"),
WithIngress(),
WithIngressEnvoyAdminTunnel(),
WithEgress(),
WithEgressEnvoyAdminTunnel(),
WithGlobalAddress(Global.GetKuma().GetKDSServerAddress()),
// Occasionally CP will lose a leader in the E2E test just because of this deadline,
// which does not make sense in such controlled environment (one k3d node, one instance of the CP).
// 100s and 80s are values that we also use in mesh-perf when we put a lot of pressure on the CP.
framework.WithEnv("KUMA_RUNTIME_KUBERNETES_LEADER_ELECTION_LEASE_DURATION", "100s"),
framework.WithEnv("KUMA_RUNTIME_KUBERNETES_LEADER_ELECTION_RENEW_DEADLINE", "80s"),
}
options = append(options, extraOptions...)
zone := NewK8sCluster(NewTestingT(), clusterName, Verbose)
go func() {
defer ginkgo.GinkgoRecover()
defer wg.Done()
Expect(zone.Install(Kuma(core.Zone, options...))).To(Succeed())
}()
return zone
}

func setupUniZone(wg *sync.WaitGroup, clusterName string, extraOptions ...framework.KumaDeploymentOption) *UniversalCluster {
wg.Add(1)
options := append(
[]framework.KumaDeploymentOption{
WithGlobalAddress(Global.GetKuma().GetKDSServerAddress()),
WithEgressEnvoyAdminTunnel(),
WithIngressEnvoyAdminTunnel(),
WithEnv("KUMA_XDS_DATAPLANE_DEREGISTRATION_DELAY", "0s"), // we have only 1 Kuma CP instance so there is no risk setting this to 0
WithEnv("KUMA_MULTIZONE_ZONE_KDS_NACK_BACKOFF", "1s"),
},
extraOptions...,
)
zone := NewUniversalCluster(NewTestingT(), clusterName, Silent)
go func() {
defer ginkgo.GinkgoRecover()
defer wg.Done()
err := NewClusterSetup().
Install(Kuma(core.Zone, options...)).
Install(IngressUniversal(Global.GetKuma().GenerateZoneIngressToken)).
Install(EgressUniversal(Global.GetKuma().GenerateZoneEgressToken, WithConcurrency(1))).
Setup(zone)
Expect(err).ToNot(HaveOccurred())
}()
return zone
}

>>>>>>> bb904d04e (test(e2e): increase leader election lease and renew duration (#11796))
// SetupAndGetState to be used with Ginkgo SynchronizedBeforeSuite
func SetupAndGetState() []byte {
Global = NewUniversalCluster(NewTestingT(), Kuma3, Silent)
Expand All @@ -120,6 +66,11 @@ func SetupAndGetState() []byte {

kubeZone1Options := append(
[]framework.KumaDeploymentOption{
// Occasionally CP will lose a leader in the E2E test just because of this deadline,
// which does not make sense in such controlled environment (one k3d node, one instance of the CP).
// 100s and 80s are values that we also use in mesh-perf when we put a lot of pressure on the CP.
WithEnv("KUMA_RUNTIME_KUBERNETES_LEADER_ELECTION_LEASE_DURATION", "100s"),
WithEnv("KUMA_RUNTIME_KUBERNETES_LEADER_ELECTION_RENEW_DEADLINE", "80s"),
WithEnv("KUMA_STORE_UNSAFE_DELETE", "true"),
WithEnv("KUMA_MULTIZONE_ZONE_KDS_NACK_BACKOFF", "1s"),
WithIngress(),
Expand Down Expand Up @@ -147,6 +98,11 @@ func SetupAndGetState() []byte {

kubeZone2Options := append(
[]framework.KumaDeploymentOption{
// Occasionally CP will lose a leader in the E2E test just because of this deadline,
// which does not make sense in such controlled environment (one k3d node, one instance of the CP).
// 100s and 80s are values that we also use in mesh-perf when we put a lot of pressure on the CP.
WithEnv("KUMA_RUNTIME_KUBERNETES_LEADER_ELECTION_LEASE_DURATION", "100s"),
WithEnv("KUMA_RUNTIME_KUBERNETES_LEADER_ELECTION_RENEW_DEADLINE", "80s"),
WithEnv("KUMA_MULTIZONE_ZONE_KDS_NACK_BACKOFF", "1s"),
WithIngress(),
WithIngressEnvoyAdminTunnel(),
Expand Down

0 comments on commit eca3e8e

Please sign in to comment.