From 7e9f6dc182908770fc8c70f6acd8caf50d179e26 Mon Sep 17 00:00:00 2001 From: Andy Huang Date: Fri, 27 Sep 2024 16:03:26 +0900 Subject: [PATCH] chore: remove unused code around node operations (#118) * chore: remove unused code around node operations * chore: set the static cache to be expired after 7 days --- shibuya/api/main.go | 17 +-- shibuya/config/init.go | 2 - shibuya/controller/gcp.go | 135 ------------------ shibuya/controller/main.go | 25 ---- shibuya/install/shibuya/Chart.yaml | 2 +- .../install/shibuya/templates/configmap.yaml | 1 - shibuya/install/shibuya/values.yaml | 1 - shibuya/main.go | 9 +- shibuya/scheduler/cloudrun.go | 7 +- shibuya/scheduler/k8s.go | 40 ------ shibuya/scheduler/types.go | 1 - shibuya/ui/handler.go | 3 +- shibuya/ui/static/js/collection.js | 18 --- shibuya/ui/templates/app.html | 28 +--- 14 files changed, 13 insertions(+), 276 deletions(-) delete mode 100644 shibuya/controller/gcp.go diff --git a/shibuya/api/main.go b/shibuya/api/main.go index 3dfb8d50..fea408fd 100644 --- a/shibuya/api/main.go +++ b/shibuya/api/main.go @@ -241,10 +241,6 @@ func (s *ShibuyaAPI) collectionAdminGetHandler(w http.ResponseWriter, r *http.Re } acr := new(AdminCollectionResponse) acr.RunningCollections = collections - if config.SC.ExecutorConfig.Cluster.OnDemand { - // we ignore errors here for simplicity - acr.NodePools, _ = s.ctr.Scheduler.GetAllNodesInfo() - } s.jsonise(w, http.StatusOK, acr) } @@ -298,6 +294,7 @@ func (s *ShibuyaAPI) planDeleteHandler(w http.ResponseWriter, r *http.Request, p if err != nil { s.handleErrors(w, err) return + } if using { s.handleErrors(w, makeInvalidRequestError("plan is being used")) @@ -432,14 +429,6 @@ func (s *ShibuyaAPI) collectionDeleteHandler(w http.ResponseWriter, r *http.Requ s.handleErrors(w, err) return } - if config.SC.ExecutorConfig.Cluster.OnDemand { - operator := controller.NewGCPOperator(collection.ID, 0) - pool := operator.GetNodePool() - if pool != nil { - s.handleErrors(w, makeInvalidRequestError("You cannot delete collection when you have nodes launched")) - return - } - } if s.ctr.Scheduler.PodReadyCount(collection.ID) > 0 { s.handleErrors(w, makeInvalidRequestError("You cannot launch engines when there are engines already deployed")) return @@ -518,10 +507,6 @@ func (s *ShibuyaAPI) collectionUploadHandler(w http.ResponseWriter, r *http.Requ s.handleErrors(w, makeInvalidRequestError(err.Error())) return } - if e == nil { - s.handleErrors(w, makeInvalidResourceError("YAML file")) - return - } if e.Content.CollectionID != collection.ID { s.handleErrors(w, makeInvalidRequestError("collection ID mismatch")) return diff --git a/shibuya/config/init.go b/shibuya/config/init.go index c7bad19f..4c967695 100644 --- a/shibuya/config/init.go +++ b/shibuya/config/init.go @@ -43,8 +43,6 @@ type ClusterConfig struct { ClusterID string `json:"cluster_id"` Kind string `json:"kind"` APIEndpoint string `json:"api_endpoint"` - NodeCPUSpec int `json:"node_cpu_spec"` - OnDemand bool `json:"on_demand"` GCDuration float64 `json:"gc_duration"` // in minutes ServiceType string `json:"service_type"` } diff --git a/shibuya/controller/gcp.go b/shibuya/controller/gcp.go deleted file mode 100644 index 2d962d47..00000000 --- a/shibuya/controller/gcp.go +++ /dev/null @@ -1,135 +0,0 @@ -package controller - -import ( - "context" - "fmt" - - "github.com/rakutentech/shibuya/shibuya/config" - "github.com/rakutentech/shibuya/shibuya/scheduler" - smodel "github.com/rakutentech/shibuya/shibuya/scheduler/model" - log "github.com/sirupsen/logrus" - google "google.golang.org/api/container/v1" -) - -type GCPOperator struct { - collectionID int64 - nodesRequired int64 - collectionIDStr string - service *google.Service - *config.ClusterConfig -} - -func NewGCPOperator(collectionID, nodesRequired int64) *GCPOperator { - ctx := context.Background() - service, err := google.NewService(ctx) - if err != nil { - log.Error(err) - } - return &GCPOperator{ - collectionID: collectionID, - nodesRequired: nodesRequired, - collectionIDStr: fmt.Sprintf("%d", collectionID), - service: service, - ClusterConfig: config.SC.ExecutorConfig.Cluster, - } -} - -func (o *GCPOperator) makePoolName() string { - return fmt.Sprintf("pool-api-%s", o.collectionIDStr) -} - -func (o *GCPOperator) makeCreateNodePoolRequest(nodePool *google.NodePool) *google.CreateNodePoolRequest { - return &google.CreateNodePoolRequest{ - NodePool: nodePool, - } -} - -func (o *GCPOperator) GetNodePool() *google.NodePool { - nodePoolService := o.service.Projects.Zones.Clusters.NodePools - currentNodePool, err := nodePoolService.Get(o.Project, o.Zone, o.ClusterID, o.makePoolName()).Do() - if err != nil { - return nil - } - return currentNodePool -} - -func (o *GCPOperator) GetNodesSize() (int, error) { - kcm := scheduler.NewK8sClientManager(config.SC.ExecutorConfig.Cluster) - nodes, err := kcm.GetNodesByCollection(o.collectionIDStr) - if err != nil { - return 0, err - } - return len(nodes), nil -} - -type GCPNodesInfo struct { - smodel.NodesInfo - Status string -} - -func (o *GCPOperator) GCPNodesInfo() *GCPNodesInfo { - pool := o.GetNodePool() - if pool != nil { - info := new(GCPNodesInfo) - info.Status = pool.Status - info.Size = int(pool.InitialNodeCount) - if size, err := o.GetNodesSize(); err == nil && size > 0 { - info.Size = size - } - return info - } - return nil -} - -func (o *GCPOperator) prepareNodes() error { - nodePoolService := o.service.Projects.Zones.Clusters.NodePools - currentNodePool := o.GetNodePool() - // If we already have nodes provisioned, we don't need to do anything - t, err := o.GetNodesSize() - if err != nil { - return err - } - poolSize := int64(t) - if poolSize >= o.nodesRequired { - return nil - } - if currentNodePool != nil && poolSize < o.nodesRequired { - currentNodePool.InitialNodeCount = o.nodesRequired - setPoolRequest := &google.SetNodePoolSizeRequest{ - NodeCount: o.nodesRequired, - } - _, err := nodePoolService.SetSize(o.Project, o.Zone, o.ClusterID, o.makePoolName(), setPoolRequest).Do() - if err != nil { - return err - } - return nil - } - nodePool := &google.NodePool{ - Config: &google.NodeConfig{ - MachineType: "n1-highcpu-32", - OauthScopes: []string{ - "https://www.googleapis.com/auth/devstorage.read_only", - }, - MinCpuPlatform: "Intel Skylake", - }, - } - nodePool.Config.Labels = map[string]string{ - "collection_id": o.collectionIDStr, - } - nodePool.InitialNodeCount = o.nodesRequired - nodePool.Name = o.makePoolName() - request := o.makeCreateNodePoolRequest(nodePool) - _, err = nodePoolService.Create(o.Project, o.Zone, o.ClusterID, request).Do() - if err != nil { - return err - } - return nil -} - -func (o *GCPOperator) destroyNodes() error { - nodePoolService := o.service.Projects.Zones.Clusters.NodePools - if _, err := nodePoolService.Delete(o.Project, o.Zone, o.ClusterID, o.makePoolName()).Do(); err != nil { - return err - } - return nil -} diff --git a/shibuya/controller/main.go b/shibuya/controller/main.go index 1e0c8ab3..98ddcd54 100644 --- a/shibuya/controller/main.go +++ b/shibuya/controller/main.go @@ -1,7 +1,6 @@ package controller import ( - "math" "net/http" "strconv" "sync" @@ -182,13 +181,6 @@ func (c *Controller) readConnectedEngines() { } } -func (c *Controller) calNodesRequired(enginesNum int) int64 { - masterCPU, _ := strconv.ParseFloat(config.SC.ExecutorConfig.JmeterContainer.CPU, 64) - enginePerNode := math.Floor(float64(config.SC.ExecutorConfig.Cluster.NodeCPUSpec) / masterCPU) - nodesRequired := math.Ceil(float64(enginesNum) / enginePerNode) - return int64(nodesRequired) -} - func (c *Controller) DeployCollection(collection *model.Collection) error { eps, err := collection.GetExecutionPlans() if err != nil { @@ -201,14 +193,6 @@ func (c *Controller) DeployCollection(collection *model.Collection) error { enginesCount += e.Engines vu += e.Engines * e.Concurrency } - if config.SC.ExecutorConfig.Cluster.OnDemand { - nodesCount = c.calNodesRequired(enginesCount) - operator := NewGCPOperator(collection.ID, nodesCount) - err := operator.prepareNodes() - if err != nil { - return err - } - } sid := "" if project, err := model.GetProject(collection.ProjectID); err == nil { sid = project.SID @@ -255,15 +239,6 @@ func (c *Controller) CollectionStatus(collection *model.Collection) (*smodel.Col if err != nil { return nil, err } - if config.SC.ExecutorConfig.Cluster.OnDemand { - operator := NewGCPOperator(collection.ID, 0) - info := operator.GCPNodesInfo() - cs.PoolStatus = "LAUNCHED" - if info != nil { - cs.PoolSize = info.Size - cs.PoolStatus = info.Status - } - } if config.SC.DevMode { cs.PoolSize = 100 cs.PoolStatus = "running" diff --git a/shibuya/install/shibuya/Chart.yaml b/shibuya/install/shibuya/Chart.yaml index 34390b9f..9a0d152e 100644 --- a/shibuya/install/shibuya/Chart.yaml +++ b/shibuya/install/shibuya/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: v0.1.2 +version: v0.1.3 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/shibuya/install/shibuya/templates/configmap.yaml b/shibuya/install/shibuya/templates/configmap.yaml index 14ccb1ca..f57ba6a9 100644 --- a/shibuya/install/shibuya/templates/configmap.yaml +++ b/shibuya/install/shibuya/templates/configmap.yaml @@ -29,7 +29,6 @@ data: }, "executors": { "cluster": { - "on_demand": {{ .Values.runtime.executors.cluster.on_demand }}, "service_type": {{ .Values.runtime.executors.cluster.service_type | quote }}, "gc_duration": {{ .Values.runtime.executors.cluster.gc_duration }} }, diff --git a/shibuya/install/shibuya/values.yaml b/shibuya/install/shibuya/values.yaml index 5ace1e3b..7ed62d46 100644 --- a/shibuya/install/shibuya/values.yaml +++ b/shibuya/install/shibuya/values.yaml @@ -67,7 +67,6 @@ runtime: keypairs: "" executors: cluster: - on_demand: false project: "" zone: "" cluster_id: "" diff --git a/shibuya/main.go b/shibuya/main.go index 31a1286e..b1ea374c 100644 --- a/shibuya/main.go +++ b/shibuya/main.go @@ -24,6 +24,13 @@ func main() { r.Handle(route.Method, route.Path, route.HandlerFunc) } r.Handler("GET", "/metrics", promhttp.Handler()) - r.ServeFiles("/static/*filepath", http.Dir("/static")) + + fileServer := http.FileServer(http.Dir("/static")) + r.GET("/static/*filepath", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + req.URL.Path = ps.ByName("filepath") + // Set the cache expiration time to 7 days + w.Header().Set("Cache-Control", "public, max-age=604800") + fileServer.ServeHTTP(w, req) + }) log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", 8080), context.ClearHandler(r))) } diff --git a/shibuya/scheduler/cloudrun.go b/shibuya/scheduler/cloudrun.go index adfa3ae5..5af1f994 100644 --- a/shibuya/scheduler/cloudrun.go +++ b/shibuya/scheduler/cloudrun.go @@ -339,17 +339,12 @@ func (cr *CloudRun) GetDeployedCollections() (map[int64]time.Time, error) { return deployCollections, nil } -func (cr *CloudRun) GetAllNodesInfo() (smodel.AllNodesInfo, error) { - // For cloud run, nodes info is not needed - return nil, nil -} - func (cr *CloudRun) GetPodsMetrics(collectionID, planID int64) (map[string]apiv1.ResourceList, error) { // For cloud run, pod metrics is not supported return nil, FeatureUnavailable } -//TODO: what we need is actually get the deployed engines account, not only ready ones. +// TODO: what we need is actually get the deployed engines account, not only ready ones. // We also need to change this in k8s.go func (cr *CloudRun) PodReadyCount(collectionID int64) int { items, err := cr.getEnginesByCollection(collectionID) diff --git a/shibuya/scheduler/k8s.go b/shibuya/scheduler/k8s.go index e742d650..8f41c3b9 100644 --- a/shibuya/scheduler/k8s.go +++ b/shibuya/scheduler/k8s.go @@ -108,10 +108,6 @@ func collectionPodAffinity(collectionID int64) *apiv1.PodAffinity { func prepareAffinity(collectionID int64) *apiv1.Affinity { affinity := &apiv1.Affinity{} - if config.SC.ExecutorConfig.Cluster.OnDemand { - affinity.NodeAffinity = collectionNodeAffinity(collectionID) - return affinity - } affinity.PodAffinity = collectionPodAffinity(collectionID) na := config.SC.ExecutorConfig.NodeAffinity if len(na) > 0 { @@ -900,42 +896,6 @@ func (kcm *K8sClientManager) CreateIngress(ingressClass, ingressName, serviceNam return nil } -func (kcm *K8sClientManager) GetNodesByCollection(collectionID string) ([]apiv1.Node, error) { - opts := metav1.ListOptions{ - LabelSelector: fmt.Sprintf("collection_id=%s", collectionID), - } - return kcm.getNodes(opts) -} - -func (kcm *K8sClientManager) getNodes(opts metav1.ListOptions) ([]apiv1.Node, error) { - nodeList, err := kcm.client.CoreV1().Nodes().List(context.TODO(), opts) - if err != nil { - return nil, err - } - return nodeList.Items, nil -} - -func (kcm *K8sClientManager) GetAllNodesInfo() (smodel.AllNodesInfo, error) { - opts := metav1.ListOptions{} - nodes, err := kcm.getNodes(opts) - if err != nil { - return nil, err - } - r := make(smodel.AllNodesInfo) - for _, node := range nodes { - nodeInfo := r[node.ObjectMeta.Labels["collection_id"]] - if nodeInfo == nil { - nodeInfo = &smodel.NodesInfo{} - r[node.ObjectMeta.Labels["collection_id"]] = nodeInfo - } - nodeInfo.Size++ - if nodeInfo.LaunchTime.IsZero() || nodeInfo.LaunchTime.After(node.ObjectMeta.CreationTimestamp.Time) { - nodeInfo.LaunchTime = node.ObjectMeta.CreationTimestamp.Time - } - } - return r, nil -} - func (kcm *K8sClientManager) GetDeployedCollections() (map[int64]time.Time, error) { labelSelector := fmt.Sprintf("kind=executor") pods, err := kcm.GetPods(labelSelector, "") diff --git a/shibuya/scheduler/types.go b/shibuya/scheduler/types.go index c7222567..7dc16699 100644 --- a/shibuya/scheduler/types.go +++ b/shibuya/scheduler/types.go @@ -18,7 +18,6 @@ type EngineScheduler interface { FetchEngineUrlsByPlan(collectionID, planID int64, opts *smodel.EngineOwnerRef) ([]string, error) PurgeCollection(collectionID int64) error GetDeployedCollections() (map[int64]time.Time, error) - GetAllNodesInfo() (smodel.AllNodesInfo, error) GetPodsMetrics(collectionID, planID int64) (map[string]apiv1.ResourceList, error) PodReadyCount(collectionID int64) int DownloadPodLog(collectionID, planID int64) (string, error) diff --git a/shibuya/ui/handler.go b/shibuya/ui/handler.go index d8692c44..7b946d5b 100644 --- a/shibuya/ui/handler.go +++ b/shibuya/ui/handler.go @@ -29,7 +29,6 @@ type HomeResp struct { Account string BackgroundColour string Context string - OnDemandCluster bool IsAdmin bool ResultDashboard string EnableSid bool @@ -56,7 +55,7 @@ func (u *UI) homeHandler(w http.ResponseWriter, r *http.Request, params httprout sc := config.SC gcDuration := config.SC.ExecutorConfig.Cluster.GCDuration template.Execute(w, &HomeResp{account.Name, sc.BackgroundColour, sc.Context, - config.SC.ExecutorConfig.Cluster.OnDemand, IsAdmin, resultDashboardURL, enableSid, + IsAdmin, resultDashboardURL, enableSid, engineHealthDashboardURL, sc.ProjectHome, sc.UploadFileHelp, gcDuration}) } diff --git a/shibuya/ui/static/js/collection.js b/shibuya/ui/static/js/collection.js index 4278756a..c65c709d 100644 --- a/shibuya/ui/static/js/collection.js +++ b/shibuya/ui/static/js/collection.js @@ -23,7 +23,6 @@ var Collection = Vue.component("collection", { trigger_in_progress: false, stop_in_progress: false, purge_in_progress: false, - on_demand_cluster: on_demand_cluster, upload_file_help: upload_file_help, showing_log: false, showing_engines_detail: false, @@ -34,12 +33,6 @@ var Collection = Vue.component("collection", { } }, computed: { - nodes_plural: function () { - if (this.collection_status.pool_size > 1) { - return "nodes"; - } - return "node"; - }, verb: function () { if (this.collection_status.pool_size > 1) { return "are"; @@ -359,17 +352,6 @@ var Collection = Vue.component("collection", { engineHealthGrafanaUrl: function () { return engine_health_dashboard + "?var-collectionID=" + this.collection_id; }, - purgeNodes: function () { - var url = "collections/" + this.collection_id + "/nodes"; - this.$http.delete(url).then( - function (resp) { - alert("Deleting nodes in process...This will take some time."); - }, - function (resp) { - alert(resp.body.message); - } - ); - }, makeUploadURL: function (path) { switch (path) { case "yaml": diff --git a/shibuya/ui/templates/app.html b/shibuya/ui/templates/app.html index 508406bc..110e116f 100644 --- a/shibuya/ui/templates/app.html +++ b/shibuya/ui/templates/app.html @@ -12,7 +12,6 @@ var result_dashboard = {{ .ResultDashboard }}; var enable_sid = {{ .EnableSid }} var engine_health_dashboard = {{ .EngineHealthDashboard }}; - var on_demand_cluster = {{ .OnDemandCluster }} var is_admin = {{ .IsAdmin }} var project_home = {{ .ProjectHome }} var upload_file_help = {{ .UploadFileHelp }} @@ -104,10 +103,6 @@
Collection ID: ${collection.id}
Tests are being started Tests are being stopped Engines are being purged -
- ${collection_status.pool_size} ${nodes_plural} ${verb} ${collection_status.pool_status} in ${running_context} - -
@@ -381,27 +376,6 @@
-
-
-
Nodes provisoned
- - - - - - - - - - - - - - - -
CollectionNodesLaunched Since
${collection_id}${nodes.size}${toLocalTZ(nodes.launch_time)}
-
-
@@ -418,4 +392,4 @@ {{ end }} - \ No newline at end of file +