Skip to content

Commit

Permalink
k8s status fixes (#240)
Browse files Browse the repository at this point in the history
the k8s status POST endpoint no longer blocks waiting for a Ready node.

k8s status command did things on the client side before hitting the POST endpoint, they have been moved to the server side.
---------

Co-authored-by: Angelos Kolaitis <[email protected]>
  • Loading branch information
eaudetcobello and neoaggelos authored Mar 19, 2024
1 parent 5d34762 commit dea3716
Show file tree
Hide file tree
Showing 9 changed files with 21 additions and 56 deletions.
10 changes: 0 additions & 10 deletions src/k8s/cmd/k8s/k8s_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,16 +27,6 @@ func newStatusCmd(env cmdutil.ExecutionEnvironment) *cobra.Command {
return
}

// TODO(neoaggelos): this must be done on the server side
// fail fast if we're not explicitly waiting and we can't get kube-apiserver endpoints
if !opts.waitReady {
if !client.IsKubernetesAPIServerReady(cmd.Context()) {
cmd.PrintErrln("Error: There are no active kube-apiserver endpoints, cluster status is unavailable")
env.Exit(1)
return
}
}

status, err := client.ClusterStatus(cmd.Context(), opts.waitReady)
if err != nil {
cmd.PrintErrf("Error: Failed to retrieve the cluster status.\n\nThe error was: %v\n", err)
Expand Down
14 changes: 0 additions & 14 deletions src/k8s/pkg/k8s/client/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (

apiv1 "github.com/canonical/k8s/api/v1"
"github.com/canonical/k8s/pkg/utils/control"
"github.com/canonical/k8s/pkg/utils/k8s"
"github.com/canonical/lxd/shared/api"
)

Expand Down Expand Up @@ -66,16 +65,3 @@ func (c *k8sdClient) KubeConfig(ctx context.Context, request apiv1.GetKubeConfig
}
return response.KubeConfig, nil
}

// IsKubernetesAPIServerReady checks if kube-apiserver is reachable.
func (c *k8sdClient) IsKubernetesAPIServerReady(ctx context.Context) bool {
kc, err := k8s.NewClient(c.snap)
if err != nil {
return false
}
_, err = kc.GetKubeAPIServerEndpoints(ctx)
if err != nil {
return false
}
return err == nil
}
2 changes: 0 additions & 2 deletions src/k8s/pkg/k8s/client/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@ import (
type Client interface {
// Bootstrap initializes a new cluster member using the provided bootstrap configuration.
Bootstrap(ctx context.Context, name string, address string, bootstrapConfig apiv1.BootstrapConfig) (apiv1.NodeStatus, error)
// IsKubernetesAPIServerReady checks if kube-apiserver is reachable.
IsKubernetesAPIServerReady(ctx context.Context) bool
// IsBootstrapped checks whether the current node is already bootstrapped.
IsBootstrapped(ctx context.Context) bool
// CleanupNode performs cleanup operations for a specific node in the cluster.
Expand Down
13 changes: 4 additions & 9 deletions src/k8s/pkg/k8s/client/mock/mock.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,10 @@ type Client struct {
Address string
BootstrapConfig apiv1.BootstrapConfig
}
BootstrapClusterMember apiv1.NodeStatus
BootstrapErr error
IsBootstrappedReturn bool
IsKubernetesAPIServerReadyReturn bool
CleanupNodeCalledWith struct {
BootstrapClusterMember apiv1.NodeStatus
BootstrapErr error
IsBootstrappedReturn bool
CleanupNodeCalledWith struct {
Ctx context.Context
NodeName string
}
Expand Down Expand Up @@ -62,10 +61,6 @@ func (c *Client) Bootstrap(ctx context.Context, name string, address string, boo
return c.BootstrapClusterMember, c.BootstrapErr
}

func (c *Client) IsKubernetesAPIServerReady(ctx context.Context) bool {
return c.IsKubernetesAPIServerReadyReturn
}

func (c *Client) IsBootstrapped(ctx context.Context) bool {
return c.IsBootstrappedReturn
}
Expand Down
20 changes: 8 additions & 12 deletions src/k8s/pkg/k8sd/api/impl/k8sd.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,28 +17,24 @@ import (
func GetClusterStatus(ctx context.Context, s *state.State) (apiv1.ClusterStatus, error) {
snap := snap.SnapFromContext(s.Context)

client, err := k8s.NewClient(snap)
members, err := GetClusterMembers(ctx, s)
if err != nil {
return apiv1.ClusterStatus{}, fmt.Errorf("failed to create k8s client: %w", err)
}

if err := client.WaitApiServerReady(ctx); err != nil {
return apiv1.ClusterStatus{}, fmt.Errorf("k8s api server did not become ready in time: %w", err)
return apiv1.ClusterStatus{}, fmt.Errorf("failed to get cluster members: %w", err)
}

ready, err := client.ClusterReady(ctx)
config, err := utils.GetUserFacingClusterConfig(ctx, s)
if err != nil {
return apiv1.ClusterStatus{}, fmt.Errorf("failed to get cluster components: %w", err)
return apiv1.ClusterStatus{}, fmt.Errorf("failed to get user-facing cluster config: %w", err)
}

members, err := GetClusterMembers(ctx, s)
client, err := k8s.NewClient(snap)
if err != nil {
return apiv1.ClusterStatus{}, fmt.Errorf("failed to get cluster members: %w", err)
return apiv1.ClusterStatus{}, fmt.Errorf("failed to create k8s client: %w", err)
}

config, err := utils.GetUserFacingClusterConfig(ctx, s)
ready, err := client.HasReadyNodes(ctx)
if err != nil {
return apiv1.ClusterStatus{}, fmt.Errorf("failed to get user-facing cluster config: %w", err)
return apiv1.ClusterStatus{}, fmt.Errorf("failed to check if cluster has ready nodes: %w", err)
}

return apiv1.ClusterStatus{
Expand Down
2 changes: 1 addition & 1 deletion src/k8s/pkg/k8sd/app/hooks_bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ func onBootstrapControlPlane(s *state.State, initConfig map[string]string) error
}

if err := client.WaitApiServerReady(s.Context); err != nil {
return fmt.Errorf("k8s api server did not become ready in time: %w", err)
return fmt.Errorf("kube-apiserver did not become ready in time: %w", err)
}

if cfg.Network.Enabled != nil {
Expand Down
2 changes: 1 addition & 1 deletion src/k8s/pkg/k8sd/app/hooks_join.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ func onPostJoin(s *state.State, initConfig map[string]string) error {
}

if err := client.WaitApiServerReady(s.Context); err != nil {
return fmt.Errorf("kube-apiserver did not become ready: %w", err)
return fmt.Errorf("failed to wait for kube-apiserver to become ready: %w", err)
}

return nil
Expand Down
10 changes: 5 additions & 5 deletions src/k8s/pkg/utils/k8s/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,18 @@ import (
func (c *Client) WaitApiServerReady(ctx context.Context) error {
return control.WaitUntilReady(ctx, func() (bool, error) {
// TODO: use the /readyz endpoint instead

_, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
// We want to retry if an error occurs (=API server not ready)
// returning the error would abort, thus checking for nil
_, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
return err == nil, nil
})
}

// ClusterReady checks the status of all nodes in the Kubernetes cluster.
// If at least one node is in READY state it will return true.
func (c *Client) ClusterReady(ctx context.Context) (bool, error) {
// HasReadyNodes checks the status of all nodes in the Kubernetes cluster.
// HasReadyNodes returns true if there is at least one Ready node in the cluster, false otherwise.
func (c *Client) HasReadyNodes(ctx context.Context) (bool, error) {
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})

if err != nil {
return false, fmt.Errorf("failed to list nodes: %v", err)
}
Expand Down
4 changes: 2 additions & 2 deletions src/k8s/pkg/utils/k8s/status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
)

func TestClusterReady(t *testing.T) {
func TestClusterHasReadyNodes(t *testing.T) {
tests := []struct {
name string
nodes []runtime.Object
Expand Down Expand Up @@ -91,7 +91,7 @@ func TestClusterReady(t *testing.T) {
clientset := fake.NewSimpleClientset(tt.nodes...)
client := &Client{Interface: clientset}

ready, err := client.ClusterReady(context.Background())
ready, err := client.HasReadyNodes(context.Background())

g.Expect(err).To(BeNil())
g.Expect(ready).To(Equal(tt.expectedReady))
Expand Down

0 comments on commit dea3716

Please sign in to comment.