From 57f89b8fb64e2aed159c06349b31e83f63d546e1 Mon Sep 17 00:00:00 2001 From: Dimitri Saridakis Date: Mon, 20 Feb 2023 10:42:23 +0000 Subject: [PATCH] refactor: Refactor ocm methods out of register cmd file (#1807) * feat: addition of descriptions and examples to dedicated * feat: Allows the CLI to install stage env addons when terraforming customer-cloud dataplane clusters * refactor: extraction of clustermgmt funcs from register to own dir * refactor: addresses cluster pagination and refactor of ocm functions --- .../rhoas_dedicated_register-cluster.md | 2 + docs/commands/rhoas_kafka_create.md | 2 +- pkg/cmd/dedicated/register/registercluster.go | 158 +++--------------- .../localize/locales/en/cmd/dedicated.en.toml | 10 +- .../connection/api/clustermgmt/ocm_utils.go | 114 +++++++++++++ 5 files changed, 151 insertions(+), 135 deletions(-) create mode 100644 pkg/shared/connection/api/clustermgmt/ocm_utils.go diff --git a/docs/commands/rhoas_dedicated_register-cluster.md b/docs/commands/rhoas_dedicated_register-cluster.md index 719e6589f..3ee98abd0 100644 --- a/docs/commands/rhoas_dedicated_register-cluster.md +++ b/docs/commands/rhoas_dedicated_register-cluster.md @@ -29,6 +29,8 @@ rhoas cluster register-cluster --cluster-id 1234-5678-90ab-cdef --access-token string The access token to use to authenticate with the OpenShift Cluster Management API. --cluster-id string The ID of the OpenShift cluster to register: --cluster-mgmt-api-url string The API URL of the OpenShift Cluster Management API. + --page-number int The page number to use when listing clusters. (default 1) + --page-size int The page size to use when listing clusters. (default 100) ``` ### Options inherited from parent commands diff --git a/docs/commands/rhoas_kafka_create.md b/docs/commands/rhoas_kafka_create.md index 819cf456f..cbde33e1a 100644 --- a/docs/commands/rhoas_kafka_create.md +++ b/docs/commands/rhoas_kafka_create.md @@ -31,7 +31,7 @@ $ rhoas kafka create -o yaml ``` --billing-model string Billing model to be used - --cluster-id string ID of the Customer-Cloud dataplane cluster to create the Kafka instance on. + --cluster-id string ID of the Customer-Cloud data plane cluster to create the Kafka instance on. --dry-run Validate all user provided arguments without creating the Kafka instance --marketplace string Name of the marketplace where the instance is purchased on --marketplace-account-id string Cloud Account ID for the marketplace diff --git a/pkg/cmd/dedicated/register/registercluster.go b/pkg/cmd/dedicated/register/registercluster.go index aab503da0..3aa49ed55 100644 --- a/pkg/cmd/dedicated/register/registercluster.go +++ b/pkg/cmd/dedicated/register/registercluster.go @@ -3,10 +3,11 @@ package register import ( "context" "fmt" - "strings" - "github.com/redhat-developer/app-services-cli/internal/build" + "github.com/redhat-developer/app-services-cli/pkg/core/cmdutil" "github.com/redhat-developer/app-services-cli/pkg/core/config" + "github.com/redhat-developer/app-services-cli/pkg/shared/connection/api/clustermgmt" + "strings" "github.com/AlecAivazis/survey/v2" clustersmgmtv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" @@ -27,7 +28,8 @@ type options struct { selectedClusterMachinePool clustersmgmtv1.MachinePool requestedMachinePoolNodeCount int accessKafkasViaPrivateNetwork bool - // newMachinePool clustersmgmtv1.MachinePool + pageNumber int + pageSize int f *factory.Factory } @@ -68,20 +70,22 @@ func NewRegisterClusterCommand(f *factory.Factory) *cobra.Command { flags.StringVar(&opts.clusterManagementApiUrl, "cluster-mgmt-api-url", "", f.Localizer.MustLocalize("dedicated.registerCluster.flag.clusterMgmtApiUrl.description")) flags.StringVar(&opts.accessToken, "access-token", "", f.Localizer.MustLocalize("dedicated.registercluster.flag.accessToken.description")) flags.StringVar(&opts.selectedClusterId, "cluster-id", "", f.Localizer.MustLocalize("dedicated.registerCluster.flag.clusterId.description")) + flags.IntVar(&opts.pageNumber, "page-number", int(cmdutil.ConvertPageValueToInt32(build.DefaultPageNumber)), f.Localizer.MustLocalize("dedicated.registerCluster.flag.pageNumber.description")) + flags.IntVar(&opts.pageSize, "page-size", 100, f.Localizer.MustLocalize("dedicated.registerCluster.flag.pageSize.description")) return cmd } func runRegisterClusterCmd(opts *options) (err error) { - // Set the base URL for the cluster management API - err = setListClusters(opts) + opts.pageNumber = int(cmdutil.ConvertPageValueToInt32(build.DefaultPageNumber)) + err = getPaginatedClusterList(opts) if err != nil { return err } if len(opts.clusterList) == 0 { return opts.f.Localizer.MustLocalizeError("dedicated.registerCluster.run.noClusterFound") } - // TO-DO if client has supplied a cluster id, validate it and set it as the selected cluster without listing getting all clusters + if opts.selectedClusterId == "" { err = runClusterSelectionInteractivePrompt(opts) if err != nil { @@ -112,37 +116,15 @@ func runRegisterClusterCmd(opts *options) (err error) { } -func getClusterList(opts *options) (*clustersmgmtv1.ClusterList, error) { - conn, err := opts.f.Connection() - if err != nil { - return nil, err - } - client, cc, err := conn.API().OCMClustermgmt(opts.clusterManagementApiUrl, opts.accessToken) - if err != nil { - return nil, err - } - defer cc() - // TO-DO deal with pagination, validate clusters -- must be multi AZ and ready. - resource := client.Clusters().List() - response, err := resource.Send() - if err != nil { - return nil, err - } - clusters := response.Items() - return clusters, nil -} - -func setListClusters(opts *options) error { - clusters, err := getClusterList(opts) +func getPaginatedClusterList(opts *options) error { + cl, err := clustermgmt.GetClusterList(opts.f, opts.accessToken, opts.clusterManagementApiUrl, opts.pageNumber, opts.pageSize) if err != nil { + opts.f.Localizer.MustLocalizeError("dedicated.registerCluster.run.errorGettingClusterList") return err } - var cls = []clustersmgmtv1.Cluster{} - cls = validateClusters(clusters, cls) - opts.clusterList = cls + opts.clusterList = validateClusters(cl, opts.clusterList) return nil } - func validateClusters(clusters *clustersmgmtv1.ClusterList, cls []clustersmgmtv1.Cluster) []clustersmgmtv1.Cluster { for _, cluster := range clusters.Slice() { if cluster.State() == clusterReadyState && cluster.MultiAZ() == true { @@ -153,14 +135,15 @@ func validateClusters(clusters *clustersmgmtv1.ClusterList, cls []clustersmgmtv1 } func runClusterSelectionInteractivePrompt(opts *options) error { - // TO-DO handle in case of empty cluster list, must be cleared up with UX etc. + if len(opts.clusterList) == 0 { + return opts.f.Localizer.MustLocalizeError("dedicated.registerCluster.run.noClusterFound") + } clusterStringList := make([]string, 0) for i := range opts.clusterList { cluster := opts.clusterList[i] clusterStringList = append(clusterStringList, cluster.Name()) } - // TO-DO add page size prompt := &survey.Select{ Message: opts.f.Localizer.MustLocalize("dedicated.registerCluster.prompt.selectCluster.message"), Options: clusterStringList, @@ -199,7 +182,7 @@ func parseDNSURL(opts *options) (string, error) { func getOrCreateMachinePoolList(opts *options) error { // ocm client connection - response, err := getMachinePoolList(opts) + response, err := clustermgmt.GetMachinePoolList(opts.f, opts.clusterManagementApiUrl, opts.accessToken, opts.selectedCluster.ID()) if err != nil { return err } @@ -220,24 +203,6 @@ func getOrCreateMachinePoolList(opts *options) error { return nil } -func getMachinePoolList(opts *options) (*clustersmgmtv1.MachinePoolsListResponse, error) { - conn, err := opts.f.Connection() - if err != nil { - return nil, err - } - client, cc, err := conn.API().OCMClustermgmt(opts.clusterManagementApiUrl, opts.accessToken) - if err != nil { - return nil, err - } - defer cc() - resource := client.Clusters().Cluster(opts.selectedCluster.ID()).MachinePools().List() - response, err := resource.Send() - if err != nil { - return nil, err - } - return response, nil -} - func checkForValidMachinePoolLabels(machinePool *clustersmgmtv1.MachinePool) bool { labels := machinePool.Labels() for key, value := range labels { @@ -295,30 +260,12 @@ func createMachinePoolRequestForDedicated(machinePoolNodeCount int) (*clustersmg return machinePool, nil } -// TO-DO this function should be moved to an ocm client / provider area -func createMachinePool(opts *options, mprequest *clustersmgmtv1.MachinePool) error { - conn, err := opts.f.Connection() - if err != nil { - return err - } - client, cc, err := conn.API().OCMClustermgmt(opts.clusterManagementApiUrl, opts.accessToken) - if err != nil { - return err - } - defer cc() - response, err := client.Clusters().Cluster(opts.selectedCluster.ID()).MachinePools().Add().Body(mprequest).Send() - if err != nil { - return err - } - opts.selectedClusterMachinePool = *response.Body() - return nil -} - func createMachinePoolInteractivePrompt(opts *options) error { validator := &dedicatedcmdutil.Validator{ Localizer: opts.f.Localizer, Connection: opts.f.Connection, } + // TO-DO add page size and better help message promptNodeCount := &survey.Input{ Message: opts.f.Localizer.MustLocalize("dedicated.registerCluster.prompt.createMachinePoolNodeCount.message"), @@ -334,10 +281,12 @@ func createMachinePoolInteractivePrompt(opts *options) error { if err != nil { return err } - err = createMachinePool(opts, dedicatedMachinePool) + mp := &clustersmgmtv1.MachinePool{} + mp, err = clustermgmt.CreateMachinePool(opts.f, opts.clusterManagementApiUrl, opts.accessToken, dedicatedMachinePool, opts.selectedCluster.ID()) if err != nil { return err } + opts.selectedClusterMachinePool = *mp return nil } @@ -347,7 +296,7 @@ func validateMachinePoolNodes(opts *options) error { machinePool := opts.existingMachinePoolList[i] - nodeCount := getMachinePoolNodeCount(&machinePool) + nodeCount := clustermgmt.GetMachinePoolNodeCount(&machinePool) if validateMachinePoolNodeCount(nodeCount) && checkForValidMachinePoolLabels(&machinePool) && @@ -365,20 +314,6 @@ func validateMachinePoolNodes(opts *options) error { return nil } -func getMachinePoolNodeCount(machinePool *clustersmgmtv1.MachinePool) int { - var nodeCount int - replicas, ok := machinePool.GetReplicas() - if ok { - nodeCount = replicas - } else { - autoscaledReplicas, ok := machinePool.GetAutoscaling() - if ok { - nodeCount = autoscaledReplicas.MaxReplicas() - } - } - return nodeCount -} - func selectAccessPrivateNetworkInteractivePrompt(opts *options) error { prompt := &survey.Confirm{ Message: opts.f.Localizer.MustLocalize("dedicated.registerCluster.prompt.selectPublicNetworkAccess.message"), @@ -394,47 +329,6 @@ func selectAccessPrivateNetworkInteractivePrompt(opts *options) error { return nil } -func newAddonParameterListBuilder(params *[]kafkamgmtclient.FleetshardParameter) *clustersmgmtv1.AddOnInstallationParameterListBuilder { - if params == nil { - return nil - } - var items []*clustersmgmtv1.AddOnInstallationParameterBuilder - for _, p := range *params { - pb := clustersmgmtv1.NewAddOnInstallationParameter().ID(*p.Id).Value(*p.Value) - items = append(items, pb) - } - return clustersmgmtv1.NewAddOnInstallationParameterList().Items(items...) -} - -func createAddonWithParams(opts *options, addonId string, params *[]kafkamgmtclient.FleetshardParameter) error { - // create a new addon via ocm - conn, err := opts.f.Connection() - if err != nil { - return err - } - client, cc, err := conn.API().OCMClustermgmt(opts.clusterManagementApiUrl, opts.accessToken) - if err != nil { - return err - } - defer cc() - addon := clustersmgmtv1.NewAddOn().ID(addonId) - addonParameters := newAddonParameterListBuilder(params) - addonInstallationBuilder := clustersmgmtv1.NewAddOnInstallation().Addon(addon) - if addonParameters != nil { - addonInstallationBuilder = addonInstallationBuilder.Parameters(addonParameters) - } - addonInstallation, err := addonInstallationBuilder.Build() - if err != nil { - return err - } - _, err = client.Clusters().Cluster(opts.selectedCluster.ID()).Addons().Add().Body(addonInstallation).Send() - if err != nil { - return err - } - - return nil -} - func getStrimziAddonIdByEnv(con *config.Config) string { if con.APIUrl == build.ProductionAPIURL { return strimziAddonId @@ -456,7 +350,7 @@ func registerClusterWithKasFleetManager(opts *options) error { return err } - nodeCount := getMachinePoolNodeCount(&opts.selectedClusterMachinePool) + nodeCount := clustermgmt.GetMachinePoolNodeCount(&opts.selectedClusterMachinePool) kfmPayload := kafkamgmtclient.EnterpriseOsdClusterPayload{ AccessKafkasViaPrivateNetwork: opts.accessKafkasViaPrivateNetwork, ClusterId: opts.selectedCluster.ID(), @@ -479,11 +373,11 @@ func registerClusterWithKasFleetManager(opts *options) error { if err != nil { return err } - err = createAddonWithParams(opts, getStrimziAddonIdByEnv(con), nil) + err = clustermgmt.CreateAddonWithParams(opts.f, opts.clusterManagementApiUrl, opts.accessToken, getStrimziAddonIdByEnv(con), response.FleetshardParameters, opts.selectedCluster.ID()) if err != nil { return err } - err = createAddonWithParams(opts, getKafkaFleetShardAddonIdByEnv(con), response.FleetshardParameters) + err = clustermgmt.CreateAddonWithParams(opts.f, opts.clusterManagementApiUrl, opts.accessToken, getKafkaFleetShardAddonIdByEnv(con), response.FleetshardParameters, opts.selectedCluster.ID()) if err != nil { return err } diff --git a/pkg/core/localize/locales/en/cmd/dedicated.en.toml b/pkg/core/localize/locales/en/cmd/dedicated.en.toml index 64d1cfd95..4455a4719 100644 --- a/pkg/core/localize/locales/en/cmd/dedicated.en.toml +++ b/pkg/core/localize/locales/en/cmd/dedicated.en.toml @@ -26,7 +26,7 @@ one = 'The ID of the OpenShift cluster to register:' one = 'Select the ready cluster to register' [dedicated.registerCluster.prompt.selectPublicNetworkAccess.message] -one = 'Would you like your Kafkas to be accessible via a public network?' +one = 'Would you like your Kafka instances to be accessible via a public network?' [dedicated.registerCluster.prompt.selectPublicNetworkAccess.help] one = 'If you select yes, your Kafka will be accessible via a public network' @@ -72,4 +72,10 @@ one = 'The cluster has already been registered with Red Hat OpenShift Streams fo one = 'The API URL of the OpenShift Cluster Management API.' [dedicated.registercluster.flag.accessToken.description] -one = 'The access token to use to authenticate with the OpenShift Cluster Management API.' \ No newline at end of file +one = 'The access token to use to authenticate with the OpenShift Cluster Management API.' + +[dedicated.registerCluster.flag.pageNumber.description] +one = 'The page number to use when listing clusters.' + +[dedicated.registerCluster.flag.pageSize.description] +one = 'The page size to use when listing clusters.' diff --git a/pkg/shared/connection/api/clustermgmt/ocm_utils.go b/pkg/shared/connection/api/clustermgmt/ocm_utils.go new file mode 100644 index 000000000..1ad957a12 --- /dev/null +++ b/pkg/shared/connection/api/clustermgmt/ocm_utils.go @@ -0,0 +1,114 @@ +package clustermgmt + +import ( + "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + "github.com/redhat-developer/app-services-cli/pkg/shared/factory" + "github.com/redhat-developer/app-services-sdk-go/kafkamgmt/apiv1/client" +) + +func clustermgmtConnection(f *factory.Factory, accessToken string, clustermgmturl string) (*v1.Client, func(), error) { + conn, err := f.Connection() + if err != nil { + return nil, nil, err + } + client, closeConnection, err := conn.API().OCMClustermgmt(clustermgmturl, accessToken) + if err != nil { + return nil, nil, err + } + return client, closeConnection, nil +} + +func GetClusterList(f *factory.Factory, accessToken string, clustermgmturl string, pageNumber int, pageLimit int) (*v1.ClusterList, error) { + client, closeConnection, err := clustermgmtConnection(f, accessToken, clustermgmturl) + if err != nil { + return nil, err + } + defer closeConnection() + + resource := client.Clusters().List() + resource = resource.Page(pageNumber) + resource = resource.Size(pageLimit) + response, err := resource.Send() + if err != nil { + return nil, err + } + clusters := response.Items() + return clusters, nil +} + +func GetMachinePoolList(f *factory.Factory, clustermgmturl string, accessToken string, clusterId string) (*v1.MachinePoolsListResponse, error) { + client, closeConnection, err := clustermgmtConnection(f, accessToken, clustermgmturl) + if err != nil { + return nil, err + } + defer closeConnection() + resource := client.Clusters().Cluster(clusterId).MachinePools().List() + response, err := resource.Send() + if err != nil { + return nil, err + } + return response, nil +} + +func CreateAddonWithParams(f *factory.Factory, clustermgmturl string, accessToken string, addonId string, params *[]kafkamgmtclient.FleetshardParameter, clusterId string) error { + client, closeConnection, err := clustermgmtConnection(f, accessToken, clustermgmturl) + if err != nil { + return err + } + defer closeConnection() + addon := v1.NewAddOn().ID(addonId) + addonParameters := newAddonParameterListBuilder(params) + addonInstallationBuilder := v1.NewAddOnInstallation().Addon(addon) + if addonParameters != nil { + addonInstallationBuilder = addonInstallationBuilder.Parameters(addonParameters) + } + addonInstallation, err := addonInstallationBuilder.Build() + if err != nil { + return err + } + _, err = client.Clusters().Cluster(clusterId).Addons().Add().Body(addonInstallation).Send() + if err != nil { + return err + } + + return nil +} + +func newAddonParameterListBuilder(params *[]kafkamgmtclient.FleetshardParameter) *v1.AddOnInstallationParameterListBuilder { + if params == nil { + return nil + } + var items []*v1.AddOnInstallationParameterBuilder + for _, p := range *params { + pb := v1.NewAddOnInstallationParameter().ID(*p.Id).Value(*p.Value) + items = append(items, pb) + } + return v1.NewAddOnInstallationParameterList().Items(items...) +} + +func CreateMachinePool(f *factory.Factory, clustermgmturl string, accessToken string, mprequest *v1.MachinePool, clusterId string) (*v1.MachinePool, error) { + client, closeConnection, err := clustermgmtConnection(f, accessToken, clustermgmturl) + if err != nil { + return nil, err + } + defer closeConnection() + response, err := client.Clusters().Cluster(clusterId).MachinePools().Add().Body(mprequest).Send() + if err != nil { + return nil, err + } + return response.Body(), nil +} + +func GetMachinePoolNodeCount(machinePool *v1.MachinePool) int { + var nodeCount int + replicas, ok := machinePool.GetReplicas() + if ok { + nodeCount = replicas + } else { + autoscaledReplicas, ok := machinePool.GetAutoscaling() + if ok { + nodeCount = autoscaledReplicas.MaxReplicas() + } + } + return nodeCount +}