From 08d5676188c127b048fd6eba2e627321f5ef73b0 Mon Sep 17 00:00:00 2001 From: Justin Watts Date: Wed, 8 May 2019 09:21:53 -0400 Subject: [PATCH 1/9] Initial commit of moving OCP to wrapanapi 3 --- requirements.txt | 3 +- wrapanapi/systems/__init__.py | 5 +- wrapanapi/systems/openshift.py | 1742 ++++++++++++++++++++++++++++++++ 3 files changed, 1747 insertions(+), 3 deletions(-) create mode 100644 wrapanapi/systems/openshift.py diff --git a/requirements.txt b/requirements.txt index 2fe6cd70..399c255d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,8 @@ inflection miq-version>=0.1.6 oauth2client ovirt-engine-sdk-python~=4.3 -openshift==0.3.4 +#openshift==0.3.4 +openshift==0.8.8 packaging pyvmomi>=6.5.0.2017.5.post1 python-cinderclient diff --git a/wrapanapi/systems/__init__.py b/wrapanapi/systems/__init__.py index 1313df16..6ba6bed3 100644 --- a/wrapanapi/systems/__init__.py +++ b/wrapanapi/systems/__init__.py @@ -13,9 +13,10 @@ from .scvmm import SCVMMSystem from .vcloud import VmwareCloudSystem from .virtualcenter import VMWareSystem +from .openshift import Openshift __all__ = [ 'EC2System', 'GoogleCloudSystem', 'HawkularSystem', 'LenovoSystem', - 'AzureSystem', 'NuageSystem', 'OpenstackSystem', 'OpenstackInfraSystem', 'RedfishSystem', - 'RHEVMSystem', 'SCVMMSystem', 'VmwareCloudSystem', 'VMWareSystem' + 'AzureSystem', 'NuageSystem', 'OpenShift', 'OpenstackSystem', 'OpenstackInfraSystem', + 'RedfishSystem', 'RHEVMSystem', 'SCVMMSystem', 'VmwareCloudSystem', 'VMWareSystem' ] diff --git a/wrapanapi/systems/openshift.py b/wrapanapi/systems/openshift.py new file mode 100644 index 00000000..f4ebcfad --- /dev/null +++ b/wrapanapi/systems/openshift.py @@ -0,0 +1,1742 @@ +from __future__ import absolute_import + +import copy +import json +import string +import yaml +from collections import Iterable +from functools import partial, wraps +from random import choice + +import inflection +import six +from kubernetes import client as kubeclient +from kubernetes import config as kubeclientconfig +from openshift.dynamic import DynamicClient +from kubernetes.client.rest import ApiException +from miq_version import TemplateName, Version +from openshift import client as ociclient +from wait_for import TimedOutError, wait_for + +from wrapanapi.systems.base import System + + +# this service allows to access db outside of openshift +common_service = """ +{ + "api_version": "v1", + "kind": "Service", + "metadata": { + "name": "common-service" + }, + "spec": { + "ports": [ + { + "name": "postgresql", + "port": "5432" + } + ], + "type": "LoadBalancer", + "selector": { + "name": "postgresql" + } + } +} +""" + +# since 5.10 CloudForms doesn't allow to override image repo url and tag in template +# so, this information has to be stored during template deployment somewhere in project +image_repo_cm_template = """ +api_version: v1 +kind: ConfigMap +metadata: + name: "image-repo-data" +data: + tags: | + {tags} +""" + + +def reconnect(decorator): + def decorate(cls): + for attr in cls.__dict__: + if callable(getattr(cls, attr)) and not attr.startswith('_'): + setattr(cls, attr, decorator(getattr(cls, attr))) + return cls + return decorate + + +def unauthenticated_error_handler(method): + """Fixes issue with 401 error by restoring connection. + Sometimes connection to openshift api endpoint gets expired and openshift returns 401. + As a result tasks in some applications like sprout fail. + """ + @wraps(method) + def wrap(*args, **kwargs): + attempts = 3 + for _ in range(attempts): + try: + return method(*args, **kwargs) + except ApiException as e: + if e.reason == 'Unauthorized': + args[0]._connect() + else: + raise e + return method(*args, **kwargs) + return wrap + + +@reconnect(unauthenticated_error_handler) +class Openshift(System): + + _stats_available = { + 'num_container': lambda self: len(self.list_container()), + 'num_pod': lambda self: len(self.list_pods()), + 'num_service': lambda self: len(self.list_service()), + 'num_replication_controller': + lambda self: len(self.list_replication_controller()), + 'num_image': lambda self: len(self.list_image_id()), + 'num_node': lambda self: len(self.list_node()), + 'num_image_registry': lambda self: len(self.list_image_registry()), + 'num_project': lambda self: len(self.list_project()), + 'num_route': lambda self: len(self.list_route()), + 'num_template': lambda self: len(self.list_template()) + } + + stream2template_tags_mapping59 = { + 'cfme-openshift-httpd': {'tag': 'HTTPD_IMG_TAG', 'url': 'HTTPD_IMG_NAME'}, + 'cfme-openshift-app': {'tag': 'BACKEND_APPLICATION_IMG_TAG', + 'url': 'BACKEND_APPLICATION_IMG_NAME'}, + 'cfme-openshift-app-ui': {'tag': 'FRONTEND_APPLICATION_IMG_TAG', + 'url': 'FRONTEND_APPLICATION_IMG_NAME'}, + 'cfme-openshift-embedded-ansible': {'tag': 'ANSIBLE_IMG_TAG', 'url': 'ANSIBLE_IMG_NAME'}, + 'cfme-openshift-memcached': {'tag': 'MEMCACHED_IMG_TAG', 'url': 'MEMCACHED_IMG_NAME'}, + 'cfme-openshift-postgresql': {'tag': 'POSTGRESQL_IMG_TAG', 'url': 'POSTGRESQL_IMG_NAME'}, + } + + stream2template_tags_mapping58 = { + 'cfme58-openshift-app': {'tag': 'APPLICATION_IMG_TAG', 'url': 'APPLICATION_IMG_NAME'}, + 'cfme58-openshift-memcached': {'tag': 'MEMCACHED_IMG_TAG', 'url': 'MEMCACHED_IMG_NAME'}, + 'cfme58-openshift-postgresql': {'tag': 'POSTGRESQL_IMG_TAG', 'url': 'POSTGRESQL_IMG_NAME'}, + } + + scc_user_mapping59 = ( + {'scc': 'anyuid', 'user': 'cfme-anyuid'}, + {'scc': 'anyuid', 'user': 'cfme-orchestrator'}, + {'scc': 'anyuid', 'user': 'cfme-httpd'}, + {'scc': 'privileged', 'user': 'cfme-privileged'}, + ) + + scc_user_mapping58 = ( + {'scc': 'anyuid', 'user': 'cfme-anyuid'}, + {'scc': 'privileged', 'user': 'default'}, + ) + + default_namespace = 'openshift' + required_project_pods = ('httpd', 'memcached', 'postgresql', + 'cloudforms', 'cloudforms-backend') + required_project_pods58 = ('memcached', 'postgresql', 'cloudforms') + not_required_project_pods = ('cloudforms-backend', 'ansible') + + def __init__(self, hostname, protocol="https", port=8443, debug=False, + verify_ssl=False, **kwargs): + super(Openshift, self).__init__(kwargs) + self.hostname = hostname + self.protocol = protocol + self.port = port + self.username = kwargs.get('username', '') + self.password = kwargs.get('password', '') + self.base_url = kwargs.get('base_url', None) + self.token = kwargs.get('token', '') + self.auth = self.token if self.token else (self.username, self.password) + self.debug = debug + self.verify_ssl = verify_ssl + + self._connect() + + def _identifying_attrs(self): + """ + Return a dict with key, value pairs for each kwarg that is used to + uniquely identify this system. + """ + return {'hostname': self.hostname, 'port': self.port} + + def _connect(self): + url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, + port=self.port) + + token = 'Bearer {token}'.format(token=self.token) + config = kubeclientconfig.new_client_from_config() + #config = ociclient.Configuration() + config.host = url + config.verify_ssl = self.verify_ssl + config.debug = self.debug + config.configuration.api_key['authorization'] = token + self.dyn_client = DynamicClient(config) + + # self.ociclient = ociclient + # self.kclient = kubeclient + # self.oapi_client = ociclient.ApiClient(config=config) + # self.kapi_client = kubeclient.ApiClient(config=config) + # self.o_api = ociclient.OapiApi(api_client=self.oapi_client) + # self.k_api = kubeclient.CoreV1Api(api_client=self.kapi_client) + # self.security_api = self.ociclient.SecurityOpenshiftIoV1Api(api_client=self.oapi_client) + # self.batch_api = self.kclient.BatchV1Api(api_client=self.kapi_client) # for job api + + def info(self): + url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, + port=self.port) + return "rhopenshift {}".format(url) + + def list_route(self, namespace=None): + """Returns list of routes""" + if namespace: + routes = self.o_api.list_namespaced_route(namespace=namespace).items + else: + routes = self.o_api.list_route_for_all_namespaces().items + return routes + + def list_image_streams(self, namespace=None): + """Returns list of image streams""" + if namespace: + image_streams = self.o_api.list_namespaced_image_stream(namespace=namespace).items + else: + image_streams = self.o_api.list_image_stream_for_all_namespaces().items + return image_streams + + def list_project(self): + """Returns list of projects""" + return self.o_api.list_project().items + + def list_template(self, namespace=None): + """Returns list of templates""" + if namespace: + return [t.metadata.name for t in self.o_api.list_namespaced_template(namespace).items] + else: + return [t.metadata.name for t in self.o_api.list_template_for_all_namespaces().items] + + # fixme: get rid of this mapping + list_templates = list_template + + def list_image_stream_images(self): + """Returns list of images (Docker registry only)""" + return [item for item in self.o_api.list_image().items + if item.docker_image_reference is not None] + + def list_deployment_config(self, namespace=None): + """Returns list of deployment configs""" + if namespace: + dc = self.o_api.list_namespaced_deployment_config(namespace=namespace).items + else: + dc = self.o_api.list_deployment_config_for_all_namespaces().items + return dc + + def list_service(self, namespace=None): + """Returns list of services.""" + if namespace: + svc = self.k_api.list_namespaced_service(namespace=namespace).items + else: + svc = self.k_api.list_service_for_all_namespaces().items + return svc + + def list_replication_controller(self, namespace=None): + """Returns list of replication controllers""" + if namespace: + rc = self.k_api.list_namespaced_replication_controller(namespace=namespace).items + else: + rc = self.k_api.list_replication_controller_for_all_namespaces().items + return rc + + def list_node(self): + """Returns list of nodes""" + nodes = self.k_api.list_node().items + return nodes + + def cluster_info(self): + """Returns information about the cluster - number of CPUs and memory in GB""" + aggregate_cpu, aggregate_mem = 0, 0 + for node in self.list_node(): + aggregate_cpu += int(node.status.capacity['cpu']) + # converting KiB to GB. 1KiB = 1.024E-6 GB + aggregate_mem += int(round(int(node.status.capacity['memory'][:-2]) * 0.00000102400)) + + return {'cpu': aggregate_cpu, 'memory': aggregate_mem} + + def list_persistent_volume(self): + """Returns list of persistent volumes""" + pv = self.k_api.list_persistent_volume().items + return pv + + def list_pods(self, namespace=None): + """Returns list of container groups (pods). + If project_name is passed, only the pods under the selected project will be returned""" + if namespace: + pods = self.k_api.list_namespaced_pod(namespace=namespace).items + else: + pods = self.k_api.list_pod_for_all_namespaces().items + return pods + + def list_container(self, namespace=None): + """Returns list of containers (derived from pods) + If project_name is passed, only the containers under the selected project will be returned + """ + pods = self.list_pods(namespace=namespace) + return [pod.spec.containers for pod in pods] + + def list_image_id(self, namespace=None): + """Returns list of unique image ids (derived from pods)""" + pods = self.list_pods(namespace=namespace) + statuses = [] + for pod in pods: + for status in pod.status.container_statuses: + statuses.append(status) + return sorted(set([status.image_id for status in statuses])) + + def list_image_registry(self, namespace=None): + """Returns list of image registries (derived from pods)""" + pods = self.list_pods(namespace=namespace) + statuses = [] + for pod in pods: + for status in pod.status.container_statuses: + statuses.append(status) + # returns only the image registry name, without the port number in case of local registry + return sorted(set([status.image.split('/')[0].split(':')[0] for status in statuses])) + + def expose_db_ip(self, namespace): + """Creates special service in appliance project (namespace) which makes internal appliance + db be available outside. + + Args: + namespace: (str) openshift namespace + Returns: ip + """ + # creating common service with external ip and extracting assigned ip + service_obj = self.kclient.V1Service(**json.loads(common_service)) + self.k_api.create_namespaced_service(namespace=namespace, body=service_obj) + # external ip isn't assigned immediately, so, we have to wait until it is assigned + + return self.get_ip_address(namespace) + + def deploy_template(self, template, tags=None, password='smartvm', **kwargs): + """Deploy a VM from a template + + Args: + template: (str) The name of the template to deploy + tags: (dict) dict with tags if some tag isn't passed it is set to 'latest' + vm_name: (str) is used as project name if passed. otherwise, name is generated (sprout) + progress_callback: (func) function to return current progress (sprout) + template_params: (dict) parameters to override during template deployment + running_pods: (list) checks that passed pods are running instead of default set + since input tags are image stream tags whereas template expects its own tags. + So, input tags should match stream2template_tags_mapping. + password: this password will be set as default everywhere + Returns: dict with parameters necessary for appliance setup or None if deployment failed + """ + self.logger.info("starting template %s deployment", template) + self.wait_template_exist(namespace=self.default_namespace, name=template) + + if not self.base_url: + raise ValueError("base url isn't provided") + + version = Version(TemplateName.parse_template(template).version) + + if version >= '5.9': + tags_mapping = self.stream2template_tags_mapping59 + else: + tags_mapping = self.stream2template_tags_mapping58 + + prepared_tags = {tag['tag']: 'latest' for tag in tags_mapping.values()} + if tags: + not_found_tags = [t for t in tags.keys() if t not in tags_mapping.keys()] + if not_found_tags: + raise ValueError("Some passed tags {t} don't exist".format(t=not_found_tags)) + for tag, value in tags.items(): + prepared_tags[tags_mapping[tag]['url']] = value['url'] + prepared_tags[tags_mapping[tag]['tag']] = value['tag'] + + # create project + # assuming this is cfme installation and generating project name + proj_id = "".join(choice(string.digits + string.lowercase) for _ in range(6)) + + # for sprout + if 'vm_name' in kwargs: + proj_name = kwargs['vm_name'] + else: + proj_name = "{t}-project-{proj_id}".format(t=template, proj_id=proj_id) + + template_params = kwargs.pop('template_params', {}) + running_pods = kwargs.pop('running_pods', ()) + proj_url = "{proj}.{base_url}".format(proj=proj_id, base_url=self.base_url) + self.logger.info("unique id %s, project name %s", proj_id, proj_name) + + default_progress_callback = partial(self._progress_log_callback, self.logger, template, + proj_name) + progress_callback = kwargs.get('progress_callback', default_progress_callback) + + self.create_project(name=proj_name, description=template) + progress_callback("Created Project `{}`".format(proj_name)) + + # grant rights according to scc + self.logger.info("granting rights to project %s sa", proj_name) + scc_user_mapping = self.scc_user_mapping59 if version >= '5.9' else self.scc_user_mapping58 + + self.logger.info("granting required rights to project's service accounts") + for mapping in scc_user_mapping: + self.append_sa_to_scc(scc_name=mapping['scc'], namespace=proj_name, sa=mapping['user']) + progress_callback("Added service accounts to appropriate scc") + + # appliances prior 5.9 don't need such rights + # and those rights are embedded into templates since 5.9.2.2 + if version >= '5.9' and version < '5.9.2.2': + # grant roles to orchestrator + self.logger.info("assigning additional roles to cfme-orchestrator") + auth_api = self.ociclient.AuthorizationOpenshiftIoV1Api(api_client=self.oapi_client) + orchestrator_sa = self.kclient.V1ObjectReference(name='cfme-orchestrator', + kind='ServiceAccount', + namespace=proj_name) + + view_role = self.kclient.V1ObjectReference(name='view') + view_role_binding_name = self.kclient.V1ObjectMeta(name='view') + view_role_binding = self.ociclient.V1RoleBinding(role_ref=view_role, + subjects=[orchestrator_sa], + metadata=view_role_binding_name) + self.logger.debug("creating 'view' role binding " + "for cfme-orchestrator sa in project %s", proj_name) + auth_api.create_namespaced_role_binding(namespace=proj_name, body=view_role_binding) + + edit_role = self.kclient.V1ObjectReference(name='edit') + edit_role_binding_name = self.kclient.V1ObjectMeta(name='edit') + edit_role_binding = self.ociclient.V1RoleBinding(role_ref=edit_role, + subjects=[orchestrator_sa], + metadata=edit_role_binding_name) + self.logger.debug("creating 'edit' role binding " + "for cfme-orchestrator sa in project %s", proj_name) + auth_api.create_namespaced_role_binding(namespace=proj_name, body=edit_role_binding) + + self.logger.info("project sa created via api have no some mandatory roles. adding them") + self._restore_missing_project_role_bindings(namespace=proj_name) + progress_callback("Added all necessary role bindings to project `{}`".format(proj_name)) + + # creating common service with external ip + ext_ip = self.expose_db_ip(proj_name) + progress_callback("Common Service has been added") + + # adding config map with image stream urls and tags + image_repo_cm = image_repo_cm_template.format(tags=json.dumps(tags)) + self.create_config_map(namespace=proj_name, **yaml.safe_load(image_repo_cm)) + + # creating pods and etc + processing_params = {'DATABASE_PASSWORD': password, + 'APPLICATION_DOMAIN': proj_url} + processing_params.update(prepared_tags) + + # updating template parameters + processing_params.update(template_params) + self.logger.info(("processing template and passed params in order to " + "prepare list of required project entities")) + template_entities = self.process_template(name=template, namespace=self.default_namespace, + parameters=processing_params) + self.logger.debug("template entities:\n %r", template_entities) + progress_callback("Template has been processed") + self.create_template_entities(namespace=proj_name, entities=template_entities) + progress_callback("All template entities have been created") + + self.logger.info("verifying that all created entities are up and running") + progress_callback("Waiting for all pods to be ready and running") + try: + wait_for(self.is_vm_running, num_sec=600, + func_kwargs={'vm_name': proj_name, 'running_pods': running_pods}) + self.logger.info("all pods look up and running") + progress_callback("Everything has been deployed w/o errors") + return {'url': proj_url, + 'external_ip': ext_ip, + 'project': proj_name, + } + except TimedOutError: + self.logger.error("deployment failed. Please check failed pods details") + # todo: return and print all failed pod details + raise + + def create_template_entities(self, namespace, entities): + """Creates entities from openshift template. + + Since there is no methods in openshift/kubernetes rest api for app deployment from template, + it is necessary to create template entities one by one using respective entity api. + + Args: + namespace: (str) openshift namespace + entities: (list) openshift entities + + Returns: None + """ + self.logger.debug("passed template entities:\n %r", entities) + kinds = set([e['kind'] for e in entities]) + entity_names = {e: inflection.underscore(e) for e in kinds} + proc_names = {k: 'create_{e}'.format(e=p) for k, p in entity_names.items()} + + for entity in entities: + if entity['kind'] in kinds: + procedure = getattr(self, proc_names[entity['kind']], None) + obtained_entity = procedure(namespace=namespace, **entity) + self.logger.debug(obtained_entity) + else: + self.logger.error("some entity %s isn't present in entity creation list", entity) + + def start_vm(self, vm_name): + """Starts a vm. + + Args: + vm_name: name of the vm to be started + Returns: whether vm action has been initiated properly + """ + self.logger.info("starting vm/project %s", vm_name) + if self.does_project_exist(vm_name): + for pod in self.get_required_pods(vm_name): + self.scale_entity(name=pod, namespace=vm_name, replicas=1) + else: + raise ValueError("Project with name {n} doesn't exist".format(n=vm_name)) + + def stop_vm(self, vm_name): + """Stops a vm. + + Args: + vm_name: name of the vm to be stopped + Returns: whether vm action has been initiated properly + """ + self.logger.info("stopping vm/project %s", vm_name) + if self.does_project_exist(vm_name): + for pod in self.get_required_pods(vm_name): + self.scale_entity(name=pod, namespace=vm_name, replicas=0) + else: + raise ValueError("Project with name {n} doesn't exist".format(n=vm_name)) + + def delete_vm(self, vm_name): + """Deletes a vm. + + Args: + vm_name: name of the vm to be deleted + Returns: whether vm action has been initiated properly + """ + self.logger.info("removing vm/project %s", vm_name) + self.delete_project(name=vm_name) + return True + + def does_vm_exist(self, vm_name): + """Does VM exist? + + Args: + vm_name: The name of the VM + Returns: whether vm exists + """ + return self.does_project_exist(vm_name) + + @staticmethod + def _update_template_parameters(template, **params): + """Updates openshift template parameters. + Since Openshift REST API doesn't provide any api to change default parameter values as + it is implemented in `oc process`. This method implements such a parameter replacement. + + Args: + template: Openshift's template object + params: bunch of key=value parameters + Returns: updated template + """ + template = copy.deepcopy(template) + if template.parameters: + new_parameters = template.parameters + for new_param, new_value in params.items(): + for index, old_param in enumerate(new_parameters): + if old_param['name'] == new_param: + old_param = new_parameters.pop(index) + if 'generate' in old_param: + old_param['generate'] = None + old_param['_from'] = None + + old_param['value'] = new_value + new_parameters.append(old_param) + template.parameters = new_parameters + return template + + def process_template(self, name, namespace, parameters=None): + """Implements template processing mechanism similar to `oc process`. + + Args: + name: (str) template name + namespace: (str) openshift namespace + parameters: parameters and values to replace default ones + Return: list of objects stored in template + """ + # workaround for bug https://github.com/openshift/openshift-restclient-python/issues/60 + raw_response = self.o_api.read_namespaced_template(name=name, namespace=namespace, + _preload_content=False) + raw_data = json.loads(raw_response.data) + + return self.process_raw_template(body=raw_data, namespace=namespace, parameters=parameters) + + def process_raw_template(self, body, namespace, parameters=None): + """Implements template processing mechanism similar to `oc process`. + It does two functions + 1. parametrized templates have to be processed in order to replace parameters with values. + 2. templates consist of list of objects. Those objects have to be extracted + before creation accordingly. + + Args: + body: (dict) template body + namespace: (str) openshift namespace + parameters: parameters and values to replace default ones + Return: list of objects stored in template + """ + updated_data = self.rename_structure(body) + read_template = self.ociclient.V1Template(**updated_data) + if parameters: + updated_template = self._update_template_parameters(template=read_template, + **parameters) + else: + updated_template = read_template + raw_response = self.o_api.create_namespaced_processed_template(namespace=namespace, + body=updated_template, + _preload_content=False) + raw_data = json.loads(raw_response.data) + updated_data = self.rename_structure(raw_data) + processed_template = self.ociclient.V1Template(**updated_data) + return processed_template.objects + + def rename_structure(self, struct): + """Fixes inconsistency in input/output data of openshift python client methods + + Args: + struct: data to process and rename + Return: updated data + """ + if not isinstance(struct, six.string_types) and isinstance(struct, Iterable): + if isinstance(struct, dict): + for key in struct.keys(): + # we shouldn't rename something under data or spec + if key == 'stringData': + # this key has to be renamed but its contents should be left intact + struct[inflection.underscore(key)] = struct.pop(key) + elif key in ('spec', 'data', 'string_data', 'annotations'): + # these keys and data should be left intact + pass + else: + # all this data should be processed and updated + val = self.rename_structure(struct.pop(key)) + struct[inflection.underscore(key)] = val + return struct + else: + for index, item in enumerate(struct): + struct[index] = self.rename_structure(item) + return struct + else: + return struct + + def create_config_map(self, namespace, **kwargs): + """Creates ConfigMap entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: ConfigMap data + Return: data if entity was created w/o errors + """ + conf_map = self.kclient.V1ConfigMap(**kwargs) + conf_map_name = conf_map.to_dict()['metadata']['name'] + self.logger.info("creating config map %s", conf_map_name) + output = self.k_api.create_namespaced_config_map(namespace=namespace, body=conf_map) + self.wait_config_map_exist(namespace=namespace, name=conf_map_name) + return output + + def replace_config_map(self, namespace, **kwargs): + """Replace ConfigMap entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: ConfigMap data + Return: data if entity was created w/o errors + """ + conf_map = self.kclient.V1ConfigMap(**kwargs) + conf_map_name = conf_map.to_dict()['metadata']['name'] + self.logger.info("replacing config map %s", conf_map_name) + output = self.k_api.replace_namespaced_config_map(namespace=namespace, + name=conf_map_name, + body=conf_map) + return output + + def create_stateful_set(self, namespace, **kwargs): + """Creates StatefulSet entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: StatefulSet data + Return: data if entity was created w/o errors + """ + st = self.kclient.V1beta1StatefulSet(**kwargs) + st_name = st.to_dict()['metadata']['name'] + self.logger.info("creating stateful set %s", st_name) + api = self.kclient.AppsV1beta1Api(api_client=self.kapi_client) + output = api.create_namespaced_stateful_set(namespace=namespace, body=st) + self.wait_stateful_set_exist(namespace=namespace, name=st_name) + return output + + def create_service(self, namespace, **kwargs): + """Creates Service entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Service data + Return: data if entity was created w/o errors + """ + service = self.kclient.V1Service(**kwargs) + service_name = service.to_dict()['metadata']['name'] + self.logger.info("creating service %s", service_name) + output = self.k_api.create_namespaced_service(namespace=namespace, body=service) + self.wait_service_exist(namespace=namespace, name=service_name) + return output + + def create_endpoints(self, namespace, **kwargs): + """Creates Endpoints entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Endpoints data + Return: data if entity was created w/o errors + """ + endpoints = self.kclient.V1Endpoints(**kwargs) + endpoints_name = endpoints.to_dict()['metadata']['name'] + self.logger.info("creating endpoints %s", endpoints_name) + output = self.k_api.create_namespaced_endpoints(namespace=namespace, body=endpoints) + self.wait_endpoints_exist(namespace=namespace, name=endpoints_name) + return output + + def create_route(self, namespace, **kwargs): + """Creates Route entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Route data + Return: data if entity was created w/o errors + """ + route = self.ociclient.V1Route(**kwargs) + route_name = route.to_dict()['metadata']['name'] + self.logger.info("creating route %s", route_name) + output = self.o_api.create_namespaced_route(namespace=namespace, body=route) + self.wait_route_exist(namespace=namespace, name=route_name) + return output + + def create_service_account(self, namespace, **kwargs): + """Creates Service Account entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Service Account data + Return: data if entity was created w/o errors + """ + sa = self.kclient.V1ServiceAccount(**kwargs) + sa_name = sa.to_dict()['metadata']['name'] + self.logger.info("creating service account %s", sa_name) + output = self.k_api.create_namespaced_service_account(namespace=namespace, body=sa) + self.wait_service_account_exist(namespace=namespace, name=sa_name) + return output + + def create_role_binding(self, namespace, **kwargs): + """Creates RoleBinding entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: RoleBinding data + Return: data if entity was created w/o errors + """ + ObjectRef = self.kclient.V1ObjectReference # noqa + auth_api = self.ociclient.AuthorizationOpenshiftIoV1Api(api_client=self.oapi_client) + # there is some version mismatch in api. so, it would be better to remove version + kwargs.pop('api_version', None) + role_binding_name = kwargs['metadata']['name'] + + # role and subjects data should be turned into objects before passing them to RoleBinding + role_name = kwargs.pop('role_ref')['name'] + role = ObjectRef(name=role_name) + subjects = [ObjectRef(namespace=namespace, **subj) for subj in kwargs.pop('subjects')] + role_binding = self.ociclient.V1RoleBinding(role_ref=role, subjects=subjects, **kwargs) + self.logger.debug("creating role binding %s in project %s", role_binding_name, namespace) + output = auth_api.create_namespaced_role_binding(namespace=namespace, + body=role_binding) + self.wait_role_binding_exist(namespace=namespace, name=role_binding_name) + return output + + def create_image_stream(self, namespace, **kwargs): + """Creates Image Stream entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Image Stream data + Return: data if entity was created w/o errors + """ + image_stream = self.ociclient.V1ImageStream(**kwargs) + is_name = image_stream.to_dict()['metadata']['name'] + self.logger.info("creating image stream %s", is_name) + output = self.o_api.create_namespaced_image_stream(namespace=namespace, body=image_stream) + self.wait_image_stream_exist(namespace=namespace, name=is_name) + return output + + def create_secret(self, namespace, **kwargs): + """Creates Secret entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Secret data + Return: data if entity was created w/o errors + """ + secret = self.kclient.V1Secret(**kwargs) + secret_name = secret.to_dict()['metadata']['name'] + self.logger.info("creating secret %s", secret_name) + output = self.k_api.create_namespaced_secret(namespace=namespace, body=secret) + self.wait_secret_exist(namespace=namespace, name=secret_name) + return output + + def create_deployment_config(self, namespace, **kwargs): + """Creates Deployment Config entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Deployment Config data + Return: data if entity was created w/o errors + """ + dc = self.ociclient.V1DeploymentConfig(**kwargs) + dc_name = dc.to_dict()['metadata']['name'] + self.logger.info("creating deployment config %s", dc_name) + output = self.o_api.create_namespaced_deployment_config(namespace=namespace, body=dc) + self.wait_deployment_config_exist(namespace=namespace, + name=dc_name) + return output + + def create_persistent_volume_claim(self, namespace, **kwargs): + """Creates Persistent Volume Claim entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Persistent Volume Claim data + Return: data if entity was created w/o errors + """ + pv_claim = self.kclient.V1PersistentVolumeClaim(**kwargs) + pv_claim_name = pv_claim.to_dict()['metadata']['name'] + self.logger.info("creating persistent volume claim %s", pv_claim_name) + output = self.k_api.create_namespaced_persistent_volume_claim(namespace=namespace, + body=pv_claim) + self.wait_persistent_volume_claim_exist(namespace=namespace, + name=pv_claim_name) + return output + + def create_project(self, name, description=None): + """Creates Project(namespace) using REST API. + + Args: + name: openshift namespace name + description: project description. it is necessary to store appliance version + Return: data if entity was created w/o errors + """ + proj = self.ociclient.V1Project() + proj.metadata = {'name': name, 'annotations': {}} + if description: + proj.metadata['annotations'] = {'openshift.io/description': description} + self.logger.info("creating new project with name %s", name) + output = self.o_api.create_project(body=proj) + self.wait_project_exist(name=name) + return output + + def run_job(self, namespace, body): + """Creates job from passed template, runs it and waits for the job to be accomplished + + Args: + namespace: openshift namespace name + body: yaml job template + Return: True/False + """ + body = self.rename_structure(body) + job_name = body['metadata']['name'] + self.batch_api.create_namespaced_job(namespace=namespace, body=body) + + return self.wait_job_finished(namespace, job_name) + + def wait_job_finished(self, namespace, name, wait='15m'): + """Waits for job to accomplish + + Args: + namespace: openshift namespace name + name: job name + wait: stop waiting after "wait" time + Return: True/False + """ + def job_wait_accomplished(): + try: + job = self.batch_api.read_namespaced_job_status(name=name, + namespace=namespace) + # todo: replace with checking final statuses + return bool(job.status.succeeded) + except KeyError: + return False + return wait_for(job_wait_accomplished, num_sec=wait)[0] + + def wait_persistent_volume_claim_status(self, namespace, name, status, wait='1m'): + """Waits until pvc gets some particular status. + For example: Bound. + + Args: + namespace: openshift namespace name + name: job name + status: pvc status + wait: stop waiting after "wait" time + Return: True/False + """ + def pvc_wait_status(): + try: + pvc = self.k_api.read_namespaced_persistent_volume_claim(name=name, + namespace=namespace) + return pvc.status.phase == status + except KeyError: + return False + + return wait_for(pvc_wait_status, num_sec=wait)[0] + + def wait_project_exist(self, name, wait=60): + """Checks whether Project exists within some time. + + Args: + name: openshift namespace name + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.o_api.read_project, 'name': name})[0] + + def wait_config_map_exist(self, namespace, name, wait=60): + """Checks whether Config Map exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.k_api.read_namespaced_config_map, + 'name': name, + 'namespace': namespace})[0] + + def wait_stateful_set_exist(self, namespace, name, wait=900): + """Checks whether StatefulSet exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + api = self.kclient.AppsV1beta1Api(api_client=self.kapi_client) + read_st = api.read_namespaced_stateful_set + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': read_st, + 'name': name, + 'namespace': namespace})[0] + + def wait_service_exist(self, namespace, name, wait=60): + """Checks whether Service exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.k_api.read_namespaced_service, + 'name': name, + 'namespace': namespace})[0] + + def wait_endpoints_exist(self, namespace, name, wait=60): + """Checks whether Endpoints exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.k_api.read_namespaced_endpoints, + 'name': name, + 'namespace': namespace})[0] + + def wait_route_exist(self, namespace, name, wait=60): + """Checks whether Route exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.o_api.read_namespaced_route, + 'name': name, + 'namespace': namespace})[0] + + def wait_service_account_exist(self, namespace, name, wait=60): + """Checks whether Service Account exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.k_api.read_namespaced_service_account, + 'name': name, + 'namespace': namespace})[0] + + def wait_image_stream_exist(self, namespace, name, wait=60): + """Checks whether Image Stream exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.o_api.read_namespaced_image_stream, + 'name': name, + 'namespace': namespace})[0] + + def wait_role_binding_exist(self, namespace, name, wait=60): + """Checks whether RoleBinding exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + auth_api = self.ociclient.AuthorizationOpenshiftIoV1Api(api_client=self.oapi_client) + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': auth_api.read_namespaced_role_binding, + 'name': name, + 'namespace': namespace})[0] + + def wait_secret_exist(self, namespace, name, wait=90): + """Checks whether Secret exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.k_api.read_namespaced_secret, + 'name': name, + 'namespace': namespace})[0] + + def wait_persistent_volume_claim_exist(self, namespace, name, wait=60): + """Checks whether Persistent Volume Claim exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.k_api.read_namespaced_persistent_volume_claim, + 'name': name, + 'namespace': namespace})[0] + + def wait_deployment_config_exist(self, namespace, name, wait=600): + """Checks whether Deployment Config exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + read_dc = self.o_api.read_namespaced_deployment_config + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': read_dc, + 'name': name, + 'namespace': namespace})[0] + + def wait_template_exist(self, namespace, name, wait=60): + """Checks whether Template exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.o_api.read_namespaced_template, + 'name': name, + 'namespace': namespace})[0] + + def _does_exist(self, func, **kwargs): + try: + func(**kwargs) + return True + except ApiException as e: + self.logger.info("ApiException occurred %s, it looks like obj doesn't exist", e) + return False + + def _restore_missing_project_role_bindings(self, namespace): + """Fixes one of issues in Openshift REST API + create project doesn't add necessary roles to default sa, probably bug, this is workaround + + Args: + namespace: openshift namespace where roles are absent + Return: None + """ + # adding builder role binding + auth_api = self.ociclient.AuthorizationOpenshiftIoV1Api(api_client=self.oapi_client) + builder_role = self.kclient.V1ObjectReference(name='system:image-builder') + builder_sa = self.kclient.V1ObjectReference(name='builder', + kind='ServiceAccount', + namespace=namespace) + builder_role_binding_name = self.kclient.V1ObjectMeta(name='builder-binding') + builder_role_binding = self.ociclient.V1RoleBinding(role_ref=builder_role, + subjects=[builder_sa], + metadata=builder_role_binding_name) + auth_api.create_namespaced_role_binding(namespace=namespace, body=builder_role_binding) + + # adding deployer role binding + deployer_role = self.kclient.V1ObjectReference(name='system:deployer') + deployer_sa = self.kclient.V1ObjectReference(name='deployer', + kind='ServiceAccount', + namespace=namespace) + deployer_role_binding_name = self.kclient.V1ObjectMeta(name='deployer-binding') + deployer_role_binding = self.ociclient.V1RoleBinding(role_ref=deployer_role, + subjects=[deployer_sa], + metadata=deployer_role_binding_name) + auth_api.create_namespaced_role_binding(namespace=namespace, body=deployer_role_binding) + + # adding admin role binding + admin_role = self.kclient.V1ObjectReference(name='admin') + admin_user = self.kclient.V1ObjectReference(name='admin', + kind='User', + namespace=namespace) + admin_role_binding_name = self.kclient.V1ObjectMeta(name='admin-binding') + admin_role_binding = self.ociclient.V1RoleBinding(role_ref=admin_role, + subjects=[admin_user], + metadata=admin_role_binding_name) + auth_api.create_namespaced_role_binding(namespace=namespace, body=admin_role_binding) + + # adding image-puller role binding + puller_role = self.kclient.V1ObjectReference(name='system:image-puller') + group_name = 'system:serviceaccounts:{proj}'.format(proj=namespace) + puller_group = self.kclient.V1ObjectReference(name=group_name, + kind='SystemGroup', + namespace=namespace) + role_binding_name = self.kclient.V1ObjectMeta(name='image-puller-binding') + puller_role_binding = self.ociclient.V1RoleBinding(role_ref=puller_role, + subjects=[puller_group], + metadata=role_binding_name) + auth_api.create_namespaced_role_binding(namespace=namespace, body=puller_role_binding) + + def delete_project(self, name, wait=300): + """Removes project(namespace) and all entities in it. + + Args: + name: project name + wait: within this time project should disappear + Return: None + """ + self.logger.info("removing project %s", name) + if self.does_project_exist(name=name): + self.o_api.delete_project(name=name) + try: + wait_for(lambda name: not self.does_project_exist(name=name), num_sec=wait, + func_kwargs={'name': name}) + except TimedOutError: + raise TimedOutError('project {n} was not removed within {w} sec'.format(n=name, + w=wait)) + + def scale_entity(self, namespace, name, replicas, wait=60): + """Allows to scale up/down entities. + One of cases when this is necessary is emulation of stopping/starting appliance + + Args: + namespace: openshift namespace + name: entity name. it can be either stateless Pod from DeploymentConfig or StatefulSet + replicas: number of replicas 0..N + wait: time to wait for scale up/down + Return: None + """ + # only dc and statefulsets can be scaled + st_api = self.kclient.AppsV1beta1Api(api_client=self.kapi_client) + + scale_val = self.kclient.V1Scale(spec=self.kclient.V1ScaleSpec(replicas=replicas)) + if self.is_deployment_config(name=name, namespace=namespace): + self.o_api.patch_namespaced_deployment_config_scale(name=name, namespace=namespace, + body=scale_val) + + def check_scale_value(): + got_scale = self.o_api.read_namespaced_deployment_config_scale(name=name, + namespace=namespace) + return int(got_scale.spec.replicas or 0) + + elif self.is_stateful_set(name=name, namespace=namespace): + # replace this code with stateful_set_scale when kubernetes shipped with openshift + # client gets upgraded + st_spec = self.kclient.V1beta1StatefulSetSpec + st = self.kclient.V1beta1StatefulSet(spec=st_spec(replicas=replicas)) + st_api.patch_namespaced_stateful_set(name=name, namespace=namespace, body=st) + + def check_scale_value(): + got_scale = st_api.read_namespaced_stateful_set(name=name, namespace=namespace) + return int(got_scale.spec.replicas or 0) + else: + raise ValueError("This name %s is not found among " + "deployment configs or stateful sets", name) + self.logger.info("scaling entity %s to %s replicas", name, replicas) + wait_for(check_scale_value, num_sec=wait, fail_condition=lambda val: val != replicas) + + def get_project_by_name(self, project_name): + """Returns only the selected Project object""" + return next(proj for proj in self.list_project() if proj.metadata.name == project_name) + + def get_scc(self, name): + """Returns Security Context Constraint by name + + Args: + name: security context constraint name + Returns: security context constraint object + """ + return self.security_api.read_security_context_constraints(name) + + def create_scc(self, body): + """Creates Security Context Constraint from passed structure. + Main aim is to create scc from read and parsed yaml file. + + Args: + body: security context constraint structure + Returns: security context constraint object + """ + raw_scc = self.rename_structure(body) + if raw_scc.get('api_version') == 'v1': + # there is inconsistency between api and some scc files. v1 is not accepted by api now + raw_scc.pop('api_version') + scc = self.ociclient.V1SecurityContextConstraints(**raw_scc) + return self.security_api.create_security_context_constraints(body=scc) + + def append_sa_to_scc(self, scc_name, namespace, sa): + """Appends Service Account to respective Security Constraint + + Args: + scc_name: security context constraint name + namespace: service account's namespace + sa: service account's name + Returns: updated security context constraint object + """ + user = 'system:serviceaccount:{proj}:{usr}'.format(proj=namespace, + usr=sa) + if self.get_scc(scc_name).users is None: + # ocp 3.6 has None for users if there is no sa in it + update_scc_cmd = [ + {"op": "add", + "path": "/users", + "value": [user]}] + else: + update_scc_cmd = [ + {"op": "add", + "path": "/users/-", + "value": user}] + self.logger.debug("adding user %r to scc %r", user, scc_name) + return self.security_api.patch_security_context_constraints(name=scc_name, + body=update_scc_cmd) + + def remove_sa_from_scc(self, scc_name, namespace, sa): + """Removes Service Account from respective Security Constraint + + Args: + scc_name: security context constraint name + namespace: service account's namespace + sa: service account's name + Returns: updated security context constraint object + """ + user = 'system:serviceaccount:{proj}:{usr}'.format(proj=namespace, + usr=sa) + # json patch's remove works only with indexes. so we have to figure out index + try: + index = next(val[0] for val in enumerate(self.get_scc(scc_name).users) + if val[1] == user) + except StopIteration: + raise ValueError("No such sa {} in scc {}".format(user, scc_name)) + update_scc_cmd = [ + {"op": "remove", + "path": "/users/{}".format(index)}] + self.logger.debug("removing user %r from scc %s with index %s", user, scc_name, index) + return self.security_api.patch_security_context_constraints(name=scc_name, + body=update_scc_cmd) + + def is_vm_running(self, vm_name, running_pods=()): + """Emulates check is vm(appliance) up and running + + Args: + vm_name: (str) project(namespace) name + running_pods: (list) checks only passed number of pods. otherwise, default set. + Return: True/False + """ + if not self.does_vm_exist(vm_name): + return False + self.logger.info("checking all pod statuses for vm name %s", vm_name) + + for pod_name in running_pods or self.get_required_pods(vm_name): + if self.is_pod_running(namespace=vm_name, name=pod_name): + continue + else: + return False + + # todo: check url is available + db is accessable + return True + + def list_deployment_config_names(self, namespace): + """Extracts and returns list of Deployment Config names + + Args: + namespace: project(namespace) name + Return: (list) deployment config names + """ + dcs = self.o_api.list_namespaced_deployment_config(namespace=namespace) + return [dc.metadata.name for dc in dcs.items] + + def list_stateful_set_names(self, namespace): + """Returns list of Stateful Set names + + Args: + namespace: project(namespace) name + Return: (list) stateful set names + """ + st_api = self.kclient.AppsV1beta1Api(api_client=self.kapi_client) + sts = st_api.list_namespaced_stateful_set(namespace=namespace) + return [st.metadata.name for st in sts.items] + + def is_deployment_config(self, namespace, name): + """Checks whether passed name belongs to deployment configs in appropriate namespace + + Args: + namespace: project(namespace) name + name: entity name + Return: True/False + """ + return name in self.list_deployment_config_names(namespace=namespace) + + def is_stateful_set(self, namespace, name): + """Checks whether passed name belongs to Stateful Sets in appropriate namespace + + Args: + namespace: project(namespace) name + name: entity name + Return: True/False + """ + return name in self.list_stateful_set_names(namespace=namespace) + + def does_project_exist(self, name): + """Checks whether Project exists. + + Args: + name: openshift namespace name + Return: True/False + """ + return self._does_exist(func=self.o_api.read_project, name=name) + + def is_vm_stopped(self, vm_name): + """Check whether vm isn't running. + There is no such state stopped for vm in openshift therefore + it just checks that vm isn't running + + Args: + vm_name: project name + Return: True/False + """ + pods = self.k_api.list_namespaced_pod(namespace=vm_name).items + if pods: + self.logger.info(("some pods are still " + "running: {}").format([pod.metadata.name for pod in pods])) + return not bool(pods) + + def wait_vm_running(self, vm_name, num_sec=900): + """Checks whether all project pods are in ready state. + + Args: + vm_name: project name + num_sec: all pods should get ready for this time then - True, otherwise False + Return: True/False + """ + wait_for(self.is_vm_running, [vm_name], num_sec=num_sec) + return True + + def wait_vm_stopped(self, vm_name, num_sec=600): + """Checks whether all project pods are stopped. + + Args: + vm_name: project name + num_sec: all pods should not be ready for this time then - True, otherwise False + Return: True/False + """ + wait_for(self.is_vm_stopped, [vm_name], num_sec=num_sec) + return True + + def current_ip_address(self, vm_name): + """Tries to retrieve project's external ip + + Args: + vm_name: project name + Return: ip address or None + """ + try: + common_svc = self.k_api.read_namespaced_service(name='common-service', + namespace=vm_name) + return common_svc.spec.external_i_ps[0] + except Exception: + return None + + def is_vm_suspended(self, vm_name): + """There is no such state in openshift + + Args: + vm_name: project name + Return: False + """ + return False + + def in_steady_state(self, vm_name): + """Return whether the specified virtual machine is in steady state + + Args: + vm_name: VM name + Returns: True/False + """ + return (self.is_vm_running(vm_name) + or self.is_vm_stopped(vm_name) + or self.is_vm_suspended(vm_name)) + + @property + def can_rename(self): + return hasattr(self, "rename_vm") + + def list_project_names(self): + """Obtains project names + + Returns: list of project names + """ + projects = self.o_api.list_project().items + return [proj.metadata.name for proj in projects] + + list_vms = list_vm = list_project_names + + def get_appliance_version(self, vm_name): + """Returns appliance version if it is possible + + Args: + vm_name: the openshift project name of the podified appliance + Returns: version + """ + try: + proj = self.o_api.read_project(vm_name) + description = proj.metadata.annotations['openshift.io/description'] + return Version(TemplateName.parse_template(description).version) + except (ApiException, KeyError, ValueError): + try: + return Version(TemplateName.parse_template(vm_name).version) + except ValueError: + return None + + def delete_template(self, template_name, namespace='openshift'): + """Deletes template + + Args: + template_name: stored openshift template name + namespace: project name + Returns: result of delete operation + """ + options = self.kclient.V1DeleteOptions() + return self.o_api.delete_namespaced_template(name=template_name, namespace=namespace, + body=options) + + def get_meta_value(self, instance, key): + raise NotImplementedError( + 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) + + def set_meta_value(self, instance, key): + raise NotImplementedError( + 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) + + def vm_status(self, vm_name): + """Returns current vm/appliance state + + Args: + vm_name: the openshift project name of the podified appliance + Returns: up/down or exception if vm doesn't exist + """ + if not self.does_vm_exist(vm_name): + raise ValueError("Vm {} doesn't exist".format(vm_name)) + return 'up' if self.is_vm_running(vm_name) else 'down' + + def vm_creation_time(self, vm_name): + """Returns time when vm/appliance was created + + Args: + vm_name: the openshift project name of the podified appliance + Return: datetime obj + """ + if not self.does_vm_exist(vm_name): + raise ValueError("Vm {} doesn't exist".format(vm_name)) + projects = self.o_api.list_project().items + project = next(proj for proj in projects if proj.metadata.name == vm_name) + return project.metadata.creation_timestamp + + @staticmethod + def _progress_log_callback(logger, source, destination, progress): + logger.info("Provisioning progress {}->{}: {}".format( + source, destination, str(progress))) + + def vm_hardware_configuration(self, vm_name): + """Collects project's cpu and ram usage + + Args: + vm_name: openshift's data + Returns: collected data + """ + hw_config = {'ram': 0, + 'cpu': 0} + if not self.does_vm_exist(vm_name): + return hw_config + + proj_pods = self.k_api.list_namespaced_pod(vm_name) + for pod in proj_pods.items: + for container in pod.spec.containers: + cpu = container.resources.requests['cpu'] + hw_config['cpu'] += float(cpu[:-1]) / 1000 if cpu.endswith('m') else float(cpu) + + ram = container.resources.requests['memory'] + if ram.endswith('Mi'): + hw_config['ram'] += float(ram[:-2]) + elif ram.endswith('Gi'): + hw_config['ram'] += float(ram[:-2]) * 1024 + elif ram.endswith('Ki'): + hw_config['ram'] += float(ram[:-2]) / 1024 + else: + hw_config['ram'] += ram + return hw_config + + def usage_and_quota(self): + installed_ram = 0 + installed_cpu = 0 + used_ram = 0 + used_cpu = 0 + # todo: finish this method later + return { + # RAM + 'ram_used': used_ram, + 'ram_total': installed_ram, + 'ram_limit': None, + # CPU + 'cpu_used': used_cpu, + 'cpu_total': installed_cpu, + 'cpu_limit': None, + } + + def get_required_pods(self, vm_name): + """Provides list of pods which should be present in appliance + + Args: + vm_name: openshift project name + Returns: list + """ + version = self.get_appliance_version(vm_name) + if version and version < '5.9': + return self.required_project_pods58 + else: + return self.required_project_pods + + def get_ip_address(self, vm_name, timeout=600): + """ Returns the IP address for the selected appliance. + + Args: + vm_name: The name of the vm to obtain the IP for. + timeout: The IP address wait timeout. + Returns: A string containing the first found IP that isn't the device. + """ + try: + ip_address, tc = wait_for(lambda: self.current_ip_address(vm_name), + fail_condition=None, + delay=5, + num_sec=timeout, + message="get_ip_address from openshift") + except TimedOutError: + ip_address = None + return ip_address + + def disconnect(self): + pass + + def get_appliance_tags(self, name): + """Returns appliance tags stored in appropriate config map if it exists. + + Args: + name: appliance project name + Returns: dict with tags and urls + """ + try: + read_data = self.k_api.read_namespaced_config_map(name='image-repo-data', + namespace=name) + return json.loads(read_data.data['tags']) + except ApiException: + return {} + + def get_appliance_url(self, name): + """Returns appliance url assigned by Openshift + + Args: + name: appliance project name + Returns: url or None + """ + try: + route = self.o_api.list_namespaced_route(name) + return route.items[0].spec.host + except (ApiException, IndexError): + return None + + def get_appliance_uuid(self, name): + """Returns appliance uuid assigned by Openshift + + Args: + name: appliance project name + Returns: uuid + """ + return self.get_project_by_name(name).metadata.uid + + def is_appliance(self, name): + """Checks whether passed vm/project is appliance + + Args: + name: appliance project name + Returns: True/False + """ + return bool(self.get_appliance_tags(name)) + + def find_job_pods(self, namespace, name): + """Finds and returns all remaining job pods + + Args: + namespace: project(namespace) name + name: job name + Returns: list of pods + """ + pods = [] + for pod in self.list_pods(namespace=namespace): + if pod.metadata.labels.get('job-name', '') == name: + pods.append(pod) + return pods + + def read_pod_log(self, namespace, name): + """Reads and returns pod log + + Args: + namespace: project(namespace) name + name: pod name + Returns: list of pods + """ + return self.k_api.read_namespaced_pod_log(name=name, namespace=namespace) + + def delete_pod(self, namespace, name, options=None): + """Tries to remove passed pod + + Args: + namespace: project(namespace) name + name: pod name + options: delete options like force delete and etc + Returns: Pod + """ + return self.k_api.delete_namespaced_pod(namespace=namespace, name=name, + body=options or self.kclient.V1DeleteOptions()) + + def is_pod_running(self, namespace, name): + """Checks whether pod is running + + Args: + namespace: (str) project(namespace) name + name: (str) pod name + Return: True/False + """ + self.logger.info("checking pod status %s", name) + + if self.is_deployment_config(name=name, namespace=namespace): + dc = self.o_api.read_namespaced_deployment_config(name=name, namespace=namespace) + status = dc.status.ready_replicas + elif self.is_stateful_set(name=name, namespace=namespace): + pods = self.k_api.list_namespaced_pod(namespace=namespace, + label_selector='name={n}'.format(n=name)) + pod_stats = [pod.status.container_statuses[-1].ready for pod in pods.items] + status = all(pod_stats) + else: + raise ValueError("No such pod name among StatefulSets or Stateless Pods") + + if status and int(status) > 0: + self.logger.debug("pod %s looks up and running", name) + return True + else: + self.logger.debug("pod %s isn't up yet", name) + return False + + def wait_pod_running(self, namespace, name, num_sec=300): + """Waits for pod to switch to ready state + + Args: + namespace: project name + name: pod name + num_sec: all pods should get ready for this time then - True, otherwise TimeoutError + Return: True/False + """ + wait_for(self.is_pod_running, [namespace, name], fail_condition=False, num_sec=num_sec) + return True + + def is_pod_stopped(self, namespace, name): + """Check whether pod isn't running. + + Args: + namespace: (str) project(namespace) name + name: (str) pod name + Return: True/False + """ + pods = self.k_api.list_namespaced_pod(namespace=namespace).items + return not bool([pod for pod in pods if name == pod.metadata.name]) + + def wait_pod_stopped(self, namespace, name, num_sec=300): + """Waits for pod to stop + + Args: + namespace: project name + name: pod name + num_sec: all pods should disappear - True, otherwise TimeoutError + Return: True/False + """ + wait_for(self.is_pod_stopped, [namespace, name], num_sec=num_sec) + return True + + def run_command(self, namespace, name, cmd, **kwargs): + """Connects to pod and tries to run + + Args: + namespace: (str) project name + name: (str) pod name + cmd: (list) command to run + Return: command output + """ + # there are some limitations and this code isn't robust enough due to + # https://github.com/kubernetes-client/python/issues/58 + return self.k_api.connect_post_namespaced_pod_exec(namespace=namespace, + name=name, + command=cmd, + stdout=True, + stderr=True, + **kwargs) From 4c08f571ee12b167919aaa2b8d9f2f9f1a84d7b1 Mon Sep 17 00:00:00 2001 From: Justin Watts Date: Wed, 15 May 2019 15:22:28 -0400 Subject: [PATCH 2/9] Updating _connect method --- requirements.txt | 3 ++- wrapanapi/systems/openshift.py | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index 399c255d..840a10b9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -32,7 +32,8 @@ six tzlocal vspk==5.3.2 wait_for -websocket_client +#websocket_client +websocket_client==0.56.0 # suds jurko supports python3, suds is only used on python2 suds-jurko; python_version > '3.0' diff --git a/wrapanapi/systems/openshift.py b/wrapanapi/systems/openshift.py index f4ebcfad..a74492ac 100644 --- a/wrapanapi/systems/openshift.py +++ b/wrapanapi/systems/openshift.py @@ -168,9 +168,10 @@ def _connect(self): token = 'Bearer {token}'.format(token=self.token) config = kubeclientconfig.new_client_from_config() #config = ociclient.Configuration() - config.host = url - config.verify_ssl = self.verify_ssl - config.debug = self.debug + config.configuration.host = url + #config.configuration.verify_ssl = self.verify_ssl + config.configuration.verify_ssl = False + config.configuration.debug = self.debug config.configuration.api_key['authorization'] = token self.dyn_client = DynamicClient(config) From 5407db2acf441cdbd5651b55a81c6ee0f70e45b2 Mon Sep 17 00:00:00 2001 From: Justin Watts Date: Tue, 21 May 2019 16:31:10 -0400 Subject: [PATCH 3/9] Adding all list methods --- wrapanapi/entities/vm.py | 5 + wrapanapi/systems/openshift.py | 1760 +++++--------------------------- 2 files changed, 253 insertions(+), 1512 deletions(-) diff --git a/wrapanapi/entities/vm.py b/wrapanapi/entities/vm.py index 058865c6..d0bd1ea8 100644 --- a/wrapanapi/entities/vm.py +++ b/wrapanapi/entities/vm.py @@ -32,6 +32,11 @@ class VmState(object): UNKNOWN = 'VmState.UNKNOWN' SHELVED = 'VmState.SHELVED' SHELVED_OFFLOADED = 'VmState.SHELVED_OFFLOADED' + # Openshift Pod States + PENDING = 'VmState.PENDING' + SUCCEEDED = 'VmState.SUCCEEDED' + FAILED = 'VmState.FAILED' + UNKNOWN = 'VmState.UNKNOWN' @classmethod def valid_states(cls): diff --git a/wrapanapi/systems/openshift.py b/wrapanapi/systems/openshift.py index a74492ac..a4ace867 100644 --- a/wrapanapi/systems/openshift.py +++ b/wrapanapi/systems/openshift.py @@ -1,5 +1,6 @@ from __future__ import absolute_import +import re import copy import json import string @@ -18,6 +19,7 @@ from openshift import client as ociclient from wait_for import TimedOutError, wait_for +from wrapanapi.entities import (Template, TemplateMixin, Vm, VmMixin, VmState) from wrapanapi.systems.base import System @@ -86,1079 +88,81 @@ def wrap(*args, **kwargs): return wrap -@reconnect(unauthenticated_error_handler) -class Openshift(System): - - _stats_available = { - 'num_container': lambda self: len(self.list_container()), - 'num_pod': lambda self: len(self.list_pods()), - 'num_service': lambda self: len(self.list_service()), - 'num_replication_controller': - lambda self: len(self.list_replication_controller()), - 'num_image': lambda self: len(self.list_image_id()), - 'num_node': lambda self: len(self.list_node()), - 'num_image_registry': lambda self: len(self.list_image_registry()), - 'num_project': lambda self: len(self.list_project()), - 'num_route': lambda self: len(self.list_route()), - 'num_template': lambda self: len(self.list_template()) +class Pod(Vm): + state_map = { + 'pending': VmState.PENDING, + 'running': VmState.RUNNING, + 'succeeded': VmState.SUCCEEDED, + 'failed': VmState.FAILED, + 'unknown': VmState.UNKNOWN } - stream2template_tags_mapping59 = { - 'cfme-openshift-httpd': {'tag': 'HTTPD_IMG_TAG', 'url': 'HTTPD_IMG_NAME'}, - 'cfme-openshift-app': {'tag': 'BACKEND_APPLICATION_IMG_TAG', - 'url': 'BACKEND_APPLICATION_IMG_NAME'}, - 'cfme-openshift-app-ui': {'tag': 'FRONTEND_APPLICATION_IMG_TAG', - 'url': 'FRONTEND_APPLICATION_IMG_NAME'}, - 'cfme-openshift-embedded-ansible': {'tag': 'ANSIBLE_IMG_TAG', 'url': 'ANSIBLE_IMG_NAME'}, - 'cfme-openshift-memcached': {'tag': 'MEMCACHED_IMG_TAG', 'url': 'MEMCACHED_IMG_NAME'}, - 'cfme-openshift-postgresql': {'tag': 'POSTGRESQL_IMG_TAG', 'url': 'POSTGRESQL_IMG_NAME'}, - } - - stream2template_tags_mapping58 = { - 'cfme58-openshift-app': {'tag': 'APPLICATION_IMG_TAG', 'url': 'APPLICATION_IMG_NAME'}, - 'cfme58-openshift-memcached': {'tag': 'MEMCACHED_IMG_TAG', 'url': 'MEMCACHED_IMG_NAME'}, - 'cfme58-openshift-postgresql': {'tag': 'POSTGRESQL_IMG_TAG', 'url': 'POSTGRESQL_IMG_NAME'}, - } - - scc_user_mapping59 = ( - {'scc': 'anyuid', 'user': 'cfme-anyuid'}, - {'scc': 'anyuid', 'user': 'cfme-orchestrator'}, - {'scc': 'anyuid', 'user': 'cfme-httpd'}, - {'scc': 'privileged', 'user': 'cfme-privileged'}, - ) - - scc_user_mapping58 = ( - {'scc': 'anyuid', 'user': 'cfme-anyuid'}, - {'scc': 'privileged', 'user': 'default'}, - ) - - default_namespace = 'openshift' - required_project_pods = ('httpd', 'memcached', 'postgresql', - 'cloudforms', 'cloudforms-backend') - required_project_pods58 = ('memcached', 'postgresql', 'cloudforms') - not_required_project_pods = ('cloudforms-backend', 'ansible') - - def __init__(self, hostname, protocol="https", port=8443, debug=False, - verify_ssl=False, **kwargs): - super(Openshift, self).__init__(kwargs) - self.hostname = hostname - self.protocol = protocol - self.port = port - self.username = kwargs.get('username', '') - self.password = kwargs.get('password', '') - self.base_url = kwargs.get('base_url', None) - self.token = kwargs.get('token', '') - self.auth = self.token if self.token else (self.username, self.password) - self.debug = debug - self.verify_ssl = verify_ssl - - self._connect() - - def _identifying_attrs(self): - """ - Return a dict with key, value pairs for each kwarg that is used to - uniquely identify this system. - """ - return {'hostname': self.hostname, 'port': self.port} - - def _connect(self): - url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, - port=self.port) - - token = 'Bearer {token}'.format(token=self.token) - config = kubeclientconfig.new_client_from_config() - #config = ociclient.Configuration() - config.configuration.host = url - #config.configuration.verify_ssl = self.verify_ssl - config.configuration.verify_ssl = False - config.configuration.debug = self.debug - config.configuration.api_key['authorization'] = token - self.dyn_client = DynamicClient(config) - - # self.ociclient = ociclient - # self.kclient = kubeclient - # self.oapi_client = ociclient.ApiClient(config=config) - # self.kapi_client = kubeclient.ApiClient(config=config) - # self.o_api = ociclient.OapiApi(api_client=self.oapi_client) - # self.k_api = kubeclient.CoreV1Api(api_client=self.kapi_client) - # self.security_api = self.ociclient.SecurityOpenshiftIoV1Api(api_client=self.oapi_client) - # self.batch_api = self.kclient.BatchV1Api(api_client=self.kapi_client) # for job api - - def info(self): - url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, - port=self.port) - return "rhopenshift {}".format(url) - - def list_route(self, namespace=None): - """Returns list of routes""" - if namespace: - routes = self.o_api.list_namespaced_route(namespace=namespace).items - else: - routes = self.o_api.list_route_for_all_namespaces().items - return routes - - def list_image_streams(self, namespace=None): - """Returns list of image streams""" - if namespace: - image_streams = self.o_api.list_namespaced_image_stream(namespace=namespace).items - else: - image_streams = self.o_api.list_image_stream_for_all_namespaces().items - return image_streams - - def list_project(self): - """Returns list of projects""" - return self.o_api.list_project().items - - def list_template(self, namespace=None): - """Returns list of templates""" - if namespace: - return [t.metadata.name for t in self.o_api.list_namespaced_template(namespace).items] - else: - return [t.metadata.name for t in self.o_api.list_template_for_all_namespaces().items] - - # fixme: get rid of this mapping - list_templates = list_template - - def list_image_stream_images(self): - """Returns list of images (Docker registry only)""" - return [item for item in self.o_api.list_image().items - if item.docker_image_reference is not None] - - def list_deployment_config(self, namespace=None): - """Returns list of deployment configs""" - if namespace: - dc = self.o_api.list_namespaced_deployment_config(namespace=namespace).items - else: - dc = self.o_api.list_deployment_config_for_all_namespaces().items - return dc - - def list_service(self, namespace=None): - """Returns list of services.""" - if namespace: - svc = self.k_api.list_namespaced_service(namespace=namespace).items - else: - svc = self.k_api.list_service_for_all_namespaces().items - return svc - - def list_replication_controller(self, namespace=None): - """Returns list of replication controllers""" - if namespace: - rc = self.k_api.list_namespaced_replication_controller(namespace=namespace).items - else: - rc = self.k_api.list_replication_controller_for_all_namespaces().items - return rc - - def list_node(self): - """Returns list of nodes""" - nodes = self.k_api.list_node().items - return nodes - - def cluster_info(self): - """Returns information about the cluster - number of CPUs and memory in GB""" - aggregate_cpu, aggregate_mem = 0, 0 - for node in self.list_node(): - aggregate_cpu += int(node.status.capacity['cpu']) - # converting KiB to GB. 1KiB = 1.024E-6 GB - aggregate_mem += int(round(int(node.status.capacity['memory'][:-2]) * 0.00000102400)) - - return {'cpu': aggregate_cpu, 'memory': aggregate_mem} - - def list_persistent_volume(self): - """Returns list of persistent volumes""" - pv = self.k_api.list_persistent_volume().items - return pv - - def list_pods(self, namespace=None): - """Returns list of container groups (pods). - If project_name is passed, only the pods under the selected project will be returned""" - if namespace: - pods = self.k_api.list_namespaced_pod(namespace=namespace).items - else: - pods = self.k_api.list_pod_for_all_namespaces().items - return pods - - def list_container(self, namespace=None): - """Returns list of containers (derived from pods) - If project_name is passed, only the containers under the selected project will be returned - """ - pods = self.list_pods(namespace=namespace) - return [pod.spec.containers for pod in pods] - - def list_image_id(self, namespace=None): - """Returns list of unique image ids (derived from pods)""" - pods = self.list_pods(namespace=namespace) - statuses = [] - for pod in pods: - for status in pod.status.container_statuses: - statuses.append(status) - return sorted(set([status.image_id for status in statuses])) - - def list_image_registry(self, namespace=None): - """Returns list of image registries (derived from pods)""" - pods = self.list_pods(namespace=namespace) - statuses = [] - for pod in pods: - for status in pod.status.container_statuses: - statuses.append(status) - # returns only the image registry name, without the port number in case of local registry - return sorted(set([status.image.split('/')[0].split(':')[0] for status in statuses])) - - def expose_db_ip(self, namespace): - """Creates special service in appliance project (namespace) which makes internal appliance - db be available outside. - - Args: - namespace: (str) openshift namespace - Returns: ip - """ - # creating common service with external ip and extracting assigned ip - service_obj = self.kclient.V1Service(**json.loads(common_service)) - self.k_api.create_namespaced_service(namespace=namespace, body=service_obj) - # external ip isn't assigned immediately, so, we have to wait until it is assigned - - return self.get_ip_address(namespace) - - def deploy_template(self, template, tags=None, password='smartvm', **kwargs): - """Deploy a VM from a template - - Args: - template: (str) The name of the template to deploy - tags: (dict) dict with tags if some tag isn't passed it is set to 'latest' - vm_name: (str) is used as project name if passed. otherwise, name is generated (sprout) - progress_callback: (func) function to return current progress (sprout) - template_params: (dict) parameters to override during template deployment - running_pods: (list) checks that passed pods are running instead of default set - since input tags are image stream tags whereas template expects its own tags. - So, input tags should match stream2template_tags_mapping. - password: this password will be set as default everywhere - Returns: dict with parameters necessary for appliance setup or None if deployment failed - """ - self.logger.info("starting template %s deployment", template) - self.wait_template_exist(namespace=self.default_namespace, name=template) - - if not self.base_url: - raise ValueError("base url isn't provided") - - version = Version(TemplateName.parse_template(template).version) - - if version >= '5.9': - tags_mapping = self.stream2template_tags_mapping59 - else: - tags_mapping = self.stream2template_tags_mapping58 - - prepared_tags = {tag['tag']: 'latest' for tag in tags_mapping.values()} - if tags: - not_found_tags = [t for t in tags.keys() if t not in tags_mapping.keys()] - if not_found_tags: - raise ValueError("Some passed tags {t} don't exist".format(t=not_found_tags)) - for tag, value in tags.items(): - prepared_tags[tags_mapping[tag]['url']] = value['url'] - prepared_tags[tags_mapping[tag]['tag']] = value['tag'] - - # create project - # assuming this is cfme installation and generating project name - proj_id = "".join(choice(string.digits + string.lowercase) for _ in range(6)) - - # for sprout - if 'vm_name' in kwargs: - proj_name = kwargs['vm_name'] - else: - proj_name = "{t}-project-{proj_id}".format(t=template, proj_id=proj_id) - - template_params = kwargs.pop('template_params', {}) - running_pods = kwargs.pop('running_pods', ()) - proj_url = "{proj}.{base_url}".format(proj=proj_id, base_url=self.base_url) - self.logger.info("unique id %s, project name %s", proj_id, proj_name) - - default_progress_callback = partial(self._progress_log_callback, self.logger, template, - proj_name) - progress_callback = kwargs.get('progress_callback', default_progress_callback) - - self.create_project(name=proj_name, description=template) - progress_callback("Created Project `{}`".format(proj_name)) - - # grant rights according to scc - self.logger.info("granting rights to project %s sa", proj_name) - scc_user_mapping = self.scc_user_mapping59 if version >= '5.9' else self.scc_user_mapping58 - - self.logger.info("granting required rights to project's service accounts") - for mapping in scc_user_mapping: - self.append_sa_to_scc(scc_name=mapping['scc'], namespace=proj_name, sa=mapping['user']) - progress_callback("Added service accounts to appropriate scc") - - # appliances prior 5.9 don't need such rights - # and those rights are embedded into templates since 5.9.2.2 - if version >= '5.9' and version < '5.9.2.2': - # grant roles to orchestrator - self.logger.info("assigning additional roles to cfme-orchestrator") - auth_api = self.ociclient.AuthorizationOpenshiftIoV1Api(api_client=self.oapi_client) - orchestrator_sa = self.kclient.V1ObjectReference(name='cfme-orchestrator', - kind='ServiceAccount', - namespace=proj_name) - - view_role = self.kclient.V1ObjectReference(name='view') - view_role_binding_name = self.kclient.V1ObjectMeta(name='view') - view_role_binding = self.ociclient.V1RoleBinding(role_ref=view_role, - subjects=[orchestrator_sa], - metadata=view_role_binding_name) - self.logger.debug("creating 'view' role binding " - "for cfme-orchestrator sa in project %s", proj_name) - auth_api.create_namespaced_role_binding(namespace=proj_name, body=view_role_binding) - - edit_role = self.kclient.V1ObjectReference(name='edit') - edit_role_binding_name = self.kclient.V1ObjectMeta(name='edit') - edit_role_binding = self.ociclient.V1RoleBinding(role_ref=edit_role, - subjects=[orchestrator_sa], - metadata=edit_role_binding_name) - self.logger.debug("creating 'edit' role binding " - "for cfme-orchestrator sa in project %s", proj_name) - auth_api.create_namespaced_role_binding(namespace=proj_name, body=edit_role_binding) - - self.logger.info("project sa created via api have no some mandatory roles. adding them") - self._restore_missing_project_role_bindings(namespace=proj_name) - progress_callback("Added all necessary role bindings to project `{}`".format(proj_name)) - - # creating common service with external ip - ext_ip = self.expose_db_ip(proj_name) - progress_callback("Common Service has been added") - - # adding config map with image stream urls and tags - image_repo_cm = image_repo_cm_template.format(tags=json.dumps(tags)) - self.create_config_map(namespace=proj_name, **yaml.safe_load(image_repo_cm)) - - # creating pods and etc - processing_params = {'DATABASE_PASSWORD': password, - 'APPLICATION_DOMAIN': proj_url} - processing_params.update(prepared_tags) - - # updating template parameters - processing_params.update(template_params) - self.logger.info(("processing template and passed params in order to " - "prepare list of required project entities")) - template_entities = self.process_template(name=template, namespace=self.default_namespace, - parameters=processing_params) - self.logger.debug("template entities:\n %r", template_entities) - progress_callback("Template has been processed") - self.create_template_entities(namespace=proj_name, entities=template_entities) - progress_callback("All template entities have been created") - - self.logger.info("verifying that all created entities are up and running") - progress_callback("Waiting for all pods to be ready and running") - try: - wait_for(self.is_vm_running, num_sec=600, - func_kwargs={'vm_name': proj_name, 'running_pods': running_pods}) - self.logger.info("all pods look up and running") - progress_callback("Everything has been deployed w/o errors") - return {'url': proj_url, - 'external_ip': ext_ip, - 'project': proj_name, - } - except TimedOutError: - self.logger.error("deployment failed. Please check failed pods details") - # todo: return and print all failed pod details - raise - - def create_template_entities(self, namespace, entities): - """Creates entities from openshift template. - - Since there is no methods in openshift/kubernetes rest api for app deployment from template, - it is necessary to create template entities one by one using respective entity api. - - Args: - namespace: (str) openshift namespace - entities: (list) openshift entities - - Returns: None - """ - self.logger.debug("passed template entities:\n %r", entities) - kinds = set([e['kind'] for e in entities]) - entity_names = {e: inflection.underscore(e) for e in kinds} - proc_names = {k: 'create_{e}'.format(e=p) for k, p in entity_names.items()} - - for entity in entities: - if entity['kind'] in kinds: - procedure = getattr(self, proc_names[entity['kind']], None) - obtained_entity = procedure(namespace=namespace, **entity) - self.logger.debug(obtained_entity) - else: - self.logger.error("some entity %s isn't present in entity creation list", entity) - - def start_vm(self, vm_name): - """Starts a vm. - - Args: - vm_name: name of the vm to be started - Returns: whether vm action has been initiated properly - """ - self.logger.info("starting vm/project %s", vm_name) - if self.does_project_exist(vm_name): - for pod in self.get_required_pods(vm_name): - self.scale_entity(name=pod, namespace=vm_name, replicas=1) - else: - raise ValueError("Project with name {n} doesn't exist".format(n=vm_name)) - - def stop_vm(self, vm_name): - """Stops a vm. - - Args: - vm_name: name of the vm to be stopped - Returns: whether vm action has been initiated properly - """ - self.logger.info("stopping vm/project %s", vm_name) - if self.does_project_exist(vm_name): - for pod in self.get_required_pods(vm_name): - self.scale_entity(name=pod, namespace=vm_name, replicas=0) - else: - raise ValueError("Project with name {n} doesn't exist".format(n=vm_name)) - - def delete_vm(self, vm_name): - """Deletes a vm. - - Args: - vm_name: name of the vm to be deleted - Returns: whether vm action has been initiated properly - """ - self.logger.info("removing vm/project %s", vm_name) - self.delete_project(name=vm_name) - return True - - def does_vm_exist(self, vm_name): - """Does VM exist? - - Args: - vm_name: The name of the VM - Returns: whether vm exists - """ - return self.does_project_exist(vm_name) - - @staticmethod - def _update_template_parameters(template, **params): - """Updates openshift template parameters. - Since Openshift REST API doesn't provide any api to change default parameter values as - it is implemented in `oc process`. This method implements such a parameter replacement. - - Args: - template: Openshift's template object - params: bunch of key=value parameters - Returns: updated template - """ - template = copy.deepcopy(template) - if template.parameters: - new_parameters = template.parameters - for new_param, new_value in params.items(): - for index, old_param in enumerate(new_parameters): - if old_param['name'] == new_param: - old_param = new_parameters.pop(index) - if 'generate' in old_param: - old_param['generate'] = None - old_param['_from'] = None - - old_param['value'] = new_value - new_parameters.append(old_param) - template.parameters = new_parameters - return template - - def process_template(self, name, namespace, parameters=None): - """Implements template processing mechanism similar to `oc process`. - - Args: - name: (str) template name - namespace: (str) openshift namespace - parameters: parameters and values to replace default ones - Return: list of objects stored in template - """ - # workaround for bug https://github.com/openshift/openshift-restclient-python/issues/60 - raw_response = self.o_api.read_namespaced_template(name=name, namespace=namespace, - _preload_content=False) - raw_data = json.loads(raw_response.data) - - return self.process_raw_template(body=raw_data, namespace=namespace, parameters=parameters) - - def process_raw_template(self, body, namespace, parameters=None): - """Implements template processing mechanism similar to `oc process`. - It does two functions - 1. parametrized templates have to be processed in order to replace parameters with values. - 2. templates consist of list of objects. Those objects have to be extracted - before creation accordingly. - - Args: - body: (dict) template body - namespace: (str) openshift namespace - parameters: parameters and values to replace default ones - Return: list of objects stored in template - """ - updated_data = self.rename_structure(body) - read_template = self.ociclient.V1Template(**updated_data) - if parameters: - updated_template = self._update_template_parameters(template=read_template, - **parameters) - else: - updated_template = read_template - raw_response = self.o_api.create_namespaced_processed_template(namespace=namespace, - body=updated_template, - _preload_content=False) - raw_data = json.loads(raw_response.data) - updated_data = self.rename_structure(raw_data) - processed_template = self.ociclient.V1Template(**updated_data) - return processed_template.objects - - def rename_structure(self, struct): - """Fixes inconsistency in input/output data of openshift python client methods - - Args: - struct: data to process and rename - Return: updated data - """ - if not isinstance(struct, six.string_types) and isinstance(struct, Iterable): - if isinstance(struct, dict): - for key in struct.keys(): - # we shouldn't rename something under data or spec - if key == 'stringData': - # this key has to be renamed but its contents should be left intact - struct[inflection.underscore(key)] = struct.pop(key) - elif key in ('spec', 'data', 'string_data', 'annotations'): - # these keys and data should be left intact - pass - else: - # all this data should be processed and updated - val = self.rename_structure(struct.pop(key)) - struct[inflection.underscore(key)] = val - return struct - else: - for index, item in enumerate(struct): - struct[index] = self.rename_structure(item) - return struct - else: - return struct - - def create_config_map(self, namespace, **kwargs): - """Creates ConfigMap entity using REST API. - - Args: - namespace: openshift namespace where entity has to be created - kwargs: ConfigMap data - Return: data if entity was created w/o errors - """ - conf_map = self.kclient.V1ConfigMap(**kwargs) - conf_map_name = conf_map.to_dict()['metadata']['name'] - self.logger.info("creating config map %s", conf_map_name) - output = self.k_api.create_namespaced_config_map(namespace=namespace, body=conf_map) - self.wait_config_map_exist(namespace=namespace, name=conf_map_name) - return output - - def replace_config_map(self, namespace, **kwargs): - """Replace ConfigMap entity using REST API. - - Args: - namespace: openshift namespace where entity has to be created - kwargs: ConfigMap data - Return: data if entity was created w/o errors - """ - conf_map = self.kclient.V1ConfigMap(**kwargs) - conf_map_name = conf_map.to_dict()['metadata']['name'] - self.logger.info("replacing config map %s", conf_map_name) - output = self.k_api.replace_namespaced_config_map(namespace=namespace, - name=conf_map_name, - body=conf_map) - return output - - def create_stateful_set(self, namespace, **kwargs): - """Creates StatefulSet entity using REST API. - - Args: - namespace: openshift namespace where entity has to be created - kwargs: StatefulSet data - Return: data if entity was created w/o errors - """ - st = self.kclient.V1beta1StatefulSet(**kwargs) - st_name = st.to_dict()['metadata']['name'] - self.logger.info("creating stateful set %s", st_name) - api = self.kclient.AppsV1beta1Api(api_client=self.kapi_client) - output = api.create_namespaced_stateful_set(namespace=namespace, body=st) - self.wait_stateful_set_exist(namespace=namespace, name=st_name) - return output - - def create_service(self, namespace, **kwargs): - """Creates Service entity using REST API. - - Args: - namespace: openshift namespace where entity has to be created - kwargs: Service data - Return: data if entity was created w/o errors - """ - service = self.kclient.V1Service(**kwargs) - service_name = service.to_dict()['metadata']['name'] - self.logger.info("creating service %s", service_name) - output = self.k_api.create_namespaced_service(namespace=namespace, body=service) - self.wait_service_exist(namespace=namespace, name=service_name) - return output - - def create_endpoints(self, namespace, **kwargs): - """Creates Endpoints entity using REST API. - - Args: - namespace: openshift namespace where entity has to be created - kwargs: Endpoints data - Return: data if entity was created w/o errors - """ - endpoints = self.kclient.V1Endpoints(**kwargs) - endpoints_name = endpoints.to_dict()['metadata']['name'] - self.logger.info("creating endpoints %s", endpoints_name) - output = self.k_api.create_namespaced_endpoints(namespace=namespace, body=endpoints) - self.wait_endpoints_exist(namespace=namespace, name=endpoints_name) - return output - - def create_route(self, namespace, **kwargs): - """Creates Route entity using REST API. - - Args: - namespace: openshift namespace where entity has to be created - kwargs: Route data - Return: data if entity was created w/o errors - """ - route = self.ociclient.V1Route(**kwargs) - route_name = route.to_dict()['metadata']['name'] - self.logger.info("creating route %s", route_name) - output = self.o_api.create_namespaced_route(namespace=namespace, body=route) - self.wait_route_exist(namespace=namespace, name=route_name) - return output - - def create_service_account(self, namespace, **kwargs): - """Creates Service Account entity using REST API. - - Args: - namespace: openshift namespace where entity has to be created - kwargs: Service Account data - Return: data if entity was created w/o errors - """ - sa = self.kclient.V1ServiceAccount(**kwargs) - sa_name = sa.to_dict()['metadata']['name'] - self.logger.info("creating service account %s", sa_name) - output = self.k_api.create_namespaced_service_account(namespace=namespace, body=sa) - self.wait_service_account_exist(namespace=namespace, name=sa_name) - return output - - def create_role_binding(self, namespace, **kwargs): - """Creates RoleBinding entity using REST API. - - Args: - namespace: openshift namespace where entity has to be created - kwargs: RoleBinding data - Return: data if entity was created w/o errors - """ - ObjectRef = self.kclient.V1ObjectReference # noqa - auth_api = self.ociclient.AuthorizationOpenshiftIoV1Api(api_client=self.oapi_client) - # there is some version mismatch in api. so, it would be better to remove version - kwargs.pop('api_version', None) - role_binding_name = kwargs['metadata']['name'] - - # role and subjects data should be turned into objects before passing them to RoleBinding - role_name = kwargs.pop('role_ref')['name'] - role = ObjectRef(name=role_name) - subjects = [ObjectRef(namespace=namespace, **subj) for subj in kwargs.pop('subjects')] - role_binding = self.ociclient.V1RoleBinding(role_ref=role, subjects=subjects, **kwargs) - self.logger.debug("creating role binding %s in project %s", role_binding_name, namespace) - output = auth_api.create_namespaced_role_binding(namespace=namespace, - body=role_binding) - self.wait_role_binding_exist(namespace=namespace, name=role_binding_name) - return output - - def create_image_stream(self, namespace, **kwargs): - """Creates Image Stream entity using REST API. - - Args: - namespace: openshift namespace where entity has to be created - kwargs: Image Stream data - Return: data if entity was created w/o errors - """ - image_stream = self.ociclient.V1ImageStream(**kwargs) - is_name = image_stream.to_dict()['metadata']['name'] - self.logger.info("creating image stream %s", is_name) - output = self.o_api.create_namespaced_image_stream(namespace=namespace, body=image_stream) - self.wait_image_stream_exist(namespace=namespace, name=is_name) - return output - - def create_secret(self, namespace, **kwargs): - """Creates Secret entity using REST API. - - Args: - namespace: openshift namespace where entity has to be created - kwargs: Secret data - Return: data if entity was created w/o errors - """ - secret = self.kclient.V1Secret(**kwargs) - secret_name = secret.to_dict()['metadata']['name'] - self.logger.info("creating secret %s", secret_name) - output = self.k_api.create_namespaced_secret(namespace=namespace, body=secret) - self.wait_secret_exist(namespace=namespace, name=secret_name) - return output - - def create_deployment_config(self, namespace, **kwargs): - """Creates Deployment Config entity using REST API. - - Args: - namespace: openshift namespace where entity has to be created - kwargs: Deployment Config data - Return: data if entity was created w/o errors - """ - dc = self.ociclient.V1DeploymentConfig(**kwargs) - dc_name = dc.to_dict()['metadata']['name'] - self.logger.info("creating deployment config %s", dc_name) - output = self.o_api.create_namespaced_deployment_config(namespace=namespace, body=dc) - self.wait_deployment_config_exist(namespace=namespace, - name=dc_name) - return output - - def create_persistent_volume_claim(self, namespace, **kwargs): - """Creates Persistent Volume Claim entity using REST API. - - Args: - namespace: openshift namespace where entity has to be created - kwargs: Persistent Volume Claim data - Return: data if entity was created w/o errors - """ - pv_claim = self.kclient.V1PersistentVolumeClaim(**kwargs) - pv_claim_name = pv_claim.to_dict()['metadata']['name'] - self.logger.info("creating persistent volume claim %s", pv_claim_name) - output = self.k_api.create_namespaced_persistent_volume_claim(namespace=namespace, - body=pv_claim) - self.wait_persistent_volume_claim_exist(namespace=namespace, - name=pv_claim_name) - return output - - def create_project(self, name, description=None): - """Creates Project(namespace) using REST API. - - Args: - name: openshift namespace name - description: project description. it is necessary to store appliance version - Return: data if entity was created w/o errors - """ - proj = self.ociclient.V1Project() - proj.metadata = {'name': name, 'annotations': {}} - if description: - proj.metadata['annotations'] = {'openshift.io/description': description} - self.logger.info("creating new project with name %s", name) - output = self.o_api.create_project(body=proj) - self.wait_project_exist(name=name) - return output - - def run_job(self, namespace, body): - """Creates job from passed template, runs it and waits for the job to be accomplished - - Args: - namespace: openshift namespace name - body: yaml job template - Return: True/False - """ - body = self.rename_structure(body) - job_name = body['metadata']['name'] - self.batch_api.create_namespaced_job(namespace=namespace, body=body) - - return self.wait_job_finished(namespace, job_name) - - def wait_job_finished(self, namespace, name, wait='15m'): - """Waits for job to accomplish - - Args: - namespace: openshift namespace name - name: job name - wait: stop waiting after "wait" time - Return: True/False - """ - def job_wait_accomplished(): - try: - job = self.batch_api.read_namespaced_job_status(name=name, - namespace=namespace) - # todo: replace with checking final statuses - return bool(job.status.succeeded) - except KeyError: - return False - return wait_for(job_wait_accomplished, num_sec=wait)[0] - - def wait_persistent_volume_claim_status(self, namespace, name, status, wait='1m'): - """Waits until pvc gets some particular status. - For example: Bound. - - Args: - namespace: openshift namespace name - name: job name - status: pvc status - wait: stop waiting after "wait" time - Return: True/False - """ - def pvc_wait_status(): - try: - pvc = self.k_api.read_namespaced_persistent_volume_claim(name=name, - namespace=namespace) - return pvc.status.phase == status - except KeyError: - return False - - return wait_for(pvc_wait_status, num_sec=wait)[0] - - def wait_project_exist(self, name, wait=60): - """Checks whether Project exists within some time. - - Args: - name: openshift namespace name - wait: entity should appear for this time then - True, otherwise False - Return: True/False - """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.o_api.read_project, 'name': name})[0] - - def wait_config_map_exist(self, namespace, name, wait=60): - """Checks whether Config Map exists within some time. - - Args: - name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False - Return: True/False - """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.k_api.read_namespaced_config_map, - 'name': name, - 'namespace': namespace})[0] - - def wait_stateful_set_exist(self, namespace, name, wait=900): - """Checks whether StatefulSet exists within some time. - - Args: - name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False - Return: True/False + def __init__(self, system, raw=None, **kwargs): """ - api = self.kclient.AppsV1beta1Api(api_client=self.kapi_client) - read_st = api.read_namespaced_stateful_set - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': read_st, - 'name': name, - 'namespace': namespace})[0] - - def wait_service_exist(self, namespace, name, wait=60): - """Checks whether Service exists within some time. - - Args: - name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False - Return: True/False - """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.k_api.read_namespaced_service, - 'name': name, - 'namespace': namespace})[0] - - def wait_endpoints_exist(self, namespace, name, wait=60): - """Checks whether Endpoints exists within some time. - - Args: - name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False - Return: True/False - """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.k_api.read_namespaced_endpoints, - 'name': name, - 'namespace': namespace})[0] - - def wait_route_exist(self, namespace, name, wait=60): - """Checks whether Route exists within some time. - - Args: - name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False - Return: True/False - """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.o_api.read_namespaced_route, - 'name': name, - 'namespace': namespace})[0] - - def wait_service_account_exist(self, namespace, name, wait=60): - """Checks whether Service Account exists within some time. - - Args: - name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False - Return: True/False - """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.k_api.read_namespaced_service_account, - 'name': name, - 'namespace': namespace})[0] - - def wait_image_stream_exist(self, namespace, name, wait=60): - """Checks whether Image Stream exists within some time. + Construct a VMWareVirtualMachine instance Args: - name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False - Return: True/False + system: instance of VMWareSystem + raw: pyVmomi.vim.VirtualMachine object + name: name of VM """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.o_api.read_namespaced_image_stream, - 'name': name, - 'namespace': namespace})[0] + super(Pod, self).__init__(system, raw, **kwargs) + self._name = raw.metadata.name if raw else kwargs.get('name') + self._namespace = raw.metadata.namespace if raw else kwargs.get('namespace') + if not self._name: + raise ValueError("missing required kwarg 'name'") - def wait_role_binding_exist(self, namespace, name, wait=60): - """Checks whether RoleBinding exists within some time. + @property + def _identifying_attrs(self): + return {'name': self._name} - Args: - name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False - Return: True/False - """ - auth_api = self.ociclient.AuthorizationOpenshiftIoV1Api(api_client=self.oapi_client) - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': auth_api.read_namespaced_role_binding, - 'name': name, - 'namespace': namespace})[0] + @property + def name(self): + return self._name - def wait_secret_exist(self, namespace, name, wait=90): - """Checks whether Secret exists within some time. + @property + def uuid(self): + try: + return str(self.raw.metadata.uid) + except AttributeError: + return self.name - Args: - name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False - Return: True/False - """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.k_api.read_namespaced_secret, - 'name': name, - 'namespace': namespace})[0] + @property + def namespace(self): + return self._namespace - def wait_persistent_volume_claim_exist(self, namespace, name, wait=60): - """Checks whether Persistent Volume Claim exists within some time. + @property + def ip(self): + ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' + #self.refresh() + try: + return self.raw.status.podIP + except (AttributeError): + # AttributeError: vm doesn't have an ip address yet + return None - Args: - name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False - Return: True/False - """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.k_api.read_namespaced_persistent_volume_claim, - 'name': name, - 'namespace': namespace})[0] + def _get_state(self): + return self.raw.status.phase - def wait_deployment_config_exist(self, namespace, name, wait=600): - """Checks whether Deployment Config exists within some time. + def is_stateful_set(self, namespace, name): + """Checks whether passed name belongs to Stateful Sets in appropriate namespace Args: + namespace: project(namespace) name name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False Return: True/False """ - read_dc = self.o_api.read_namespaced_deployment_config - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': read_dc, - 'name': name, - 'namespace': namespace})[0] + return name in self.list_stateful_set_names(namespace=namespace) - def wait_template_exist(self, namespace, name, wait=60): - """Checks whether Template exists within some time. + def is_deployment_config(self, namespace, name): + """Checks whether passed name belongs to deployment configs in appropriate namespace Args: + namespace: project(namespace) name name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.o_api.read_namespaced_template, - 'name': name, - 'namespace': namespace})[0] - - def _does_exist(self, func, **kwargs): - try: - func(**kwargs) - return True - except ApiException as e: - self.logger.info("ApiException occurred %s, it looks like obj doesn't exist", e) - return False - - def _restore_missing_project_role_bindings(self, namespace): - """Fixes one of issues in Openshift REST API - create project doesn't add necessary roles to default sa, probably bug, this is workaround - - Args: - namespace: openshift namespace where roles are absent - Return: None - """ - # adding builder role binding - auth_api = self.ociclient.AuthorizationOpenshiftIoV1Api(api_client=self.oapi_client) - builder_role = self.kclient.V1ObjectReference(name='system:image-builder') - builder_sa = self.kclient.V1ObjectReference(name='builder', - kind='ServiceAccount', - namespace=namespace) - builder_role_binding_name = self.kclient.V1ObjectMeta(name='builder-binding') - builder_role_binding = self.ociclient.V1RoleBinding(role_ref=builder_role, - subjects=[builder_sa], - metadata=builder_role_binding_name) - auth_api.create_namespaced_role_binding(namespace=namespace, body=builder_role_binding) - - # adding deployer role binding - deployer_role = self.kclient.V1ObjectReference(name='system:deployer') - deployer_sa = self.kclient.V1ObjectReference(name='deployer', - kind='ServiceAccount', - namespace=namespace) - deployer_role_binding_name = self.kclient.V1ObjectMeta(name='deployer-binding') - deployer_role_binding = self.ociclient.V1RoleBinding(role_ref=deployer_role, - subjects=[deployer_sa], - metadata=deployer_role_binding_name) - auth_api.create_namespaced_role_binding(namespace=namespace, body=deployer_role_binding) - - # adding admin role binding - admin_role = self.kclient.V1ObjectReference(name='admin') - admin_user = self.kclient.V1ObjectReference(name='admin', - kind='User', - namespace=namespace) - admin_role_binding_name = self.kclient.V1ObjectMeta(name='admin-binding') - admin_role_binding = self.ociclient.V1RoleBinding(role_ref=admin_role, - subjects=[admin_user], - metadata=admin_role_binding_name) - auth_api.create_namespaced_role_binding(namespace=namespace, body=admin_role_binding) - - # adding image-puller role binding - puller_role = self.kclient.V1ObjectReference(name='system:image-puller') - group_name = 'system:serviceaccounts:{proj}'.format(proj=namespace) - puller_group = self.kclient.V1ObjectReference(name=group_name, - kind='SystemGroup', - namespace=namespace) - role_binding_name = self.kclient.V1ObjectMeta(name='image-puller-binding') - puller_role_binding = self.ociclient.V1RoleBinding(role_ref=puller_role, - subjects=[puller_group], - metadata=role_binding_name) - auth_api.create_namespaced_role_binding(namespace=namespace, body=puller_role_binding) - - def delete_project(self, name, wait=300): - """Removes project(namespace) and all entities in it. - - Args: - name: project name - wait: within this time project should disappear - Return: None - """ - self.logger.info("removing project %s", name) - if self.does_project_exist(name=name): - self.o_api.delete_project(name=name) - try: - wait_for(lambda name: not self.does_project_exist(name=name), num_sec=wait, - func_kwargs={'name': name}) - except TimedOutError: - raise TimedOutError('project {n} was not removed within {w} sec'.format(n=name, - w=wait)) + return name in self.list_deployment_config_names(namespace=namespace) def scale_entity(self, namespace, name, replicas, wait=60): """Allows to scale up/down entities. @@ -1200,544 +204,276 @@ def check_scale_value(): self.logger.info("scaling entity %s to %s replicas", name, replicas) wait_for(check_scale_value, num_sec=wait, fail_condition=lambda val: val != replicas) - def get_project_by_name(self, project_name): - """Returns only the selected Project object""" - return next(proj for proj in self.list_project() if proj.metadata.name == project_name) - - def get_scc(self, name): - """Returns Security Context Constraint by name - - Args: - name: security context constraint name - Returns: security context constraint object - """ - return self.security_api.read_security_context_constraints(name) - - def create_scc(self, body): - """Creates Security Context Constraint from passed structure. - Main aim is to create scc from read and parsed yaml file. - - Args: - body: security context constraint structure - Returns: security context constraint object - """ - raw_scc = self.rename_structure(body) - if raw_scc.get('api_version') == 'v1': - # there is inconsistency between api and some scc files. v1 is not accepted by api now - raw_scc.pop('api_version') - scc = self.ociclient.V1SecurityContextConstraints(**raw_scc) - return self.security_api.create_security_context_constraints(body=scc) + def start(self): + self.logger.info("starting vm/project %s", self.name) + if self.does_project_exist(self.name): + for pod in self.get_required_pods(self.name): + self.scale_entity(name=pod, namespace=self.name, replicas=1) + else: + raise ValueError("Project with name {n} doesn't exist".format(n=self.name)) - def append_sa_to_scc(self, scc_name, namespace, sa): - """Appends Service Account to respective Security Constraint + def stop(self): + """Stops a vm. - Args: - scc_name: security context constraint name - namespace: service account's namespace - sa: service account's name - Returns: updated security context constraint object + Args: + vm_name: name of the vm to be stopped + Returns: whether vm action has been initiated properly """ - user = 'system:serviceaccount:{proj}:{usr}'.format(proj=namespace, - usr=sa) - if self.get_scc(scc_name).users is None: - # ocp 3.6 has None for users if there is no sa in it - update_scc_cmd = [ - {"op": "add", - "path": "/users", - "value": [user]}] + self.logger.info("stopping vm/project %s", self.name) + if self.does_project_exist(self.name): + for pod in self.get_required_pods(self.name): + self.scale_entity(name=pod, namespace=self.name, replicas=0) else: - update_scc_cmd = [ - {"op": "add", - "path": "/users/-", - "value": user}] - self.logger.debug("adding user %r to scc %r", user, scc_name) - return self.security_api.patch_security_context_constraints(name=scc_name, - body=update_scc_cmd) + raise ValueError("Project with name {n} doesn't exist".format(n=self.name)) - def remove_sa_from_scc(self, scc_name, namespace, sa): - """Removes Service Account from respective Security Constraint - - Args: - scc_name: security context constraint name - namespace: service account's namespace - sa: service account's name - Returns: updated security context constraint object - """ - user = 'system:serviceaccount:{proj}:{usr}'.format(proj=namespace, - usr=sa) - # json patch's remove works only with indexes. so we have to figure out index - try: - index = next(val[0] for val in enumerate(self.get_scc(scc_name).users) - if val[1] == user) - except StopIteration: - raise ValueError("No such sa {} in scc {}".format(user, scc_name)) - update_scc_cmd = [ - {"op": "remove", - "path": "/users/{}".format(index)}] - self.logger.debug("removing user %r from scc %s with index %s", user, scc_name, index) - return self.security_api.patch_security_context_constraints(name=scc_name, - body=update_scc_cmd) - - def is_vm_running(self, vm_name, running_pods=()): - """Emulates check is vm(appliance) up and running + def restart(self): + raise NotImplementedError - Args: - vm_name: (str) project(namespace) name - running_pods: (list) checks only passed number of pods. otherwise, default set. - Return: True/False - """ - if not self.does_vm_exist(vm_name): - return False - self.logger.info("checking all pod statuses for vm name %s", vm_name) + def delete(self): + self.ocp_client.resources.get(api_version='v1', kind='Pod').delete(name=self.name, + namespace=self.namespace) + def refresh(self): + self.raw = self.system.get_pod_by_name(name=self.name, namespace=self.namespace).raw + return self.raw - for pod_name in running_pods or self.get_required_pods(vm_name): - if self.is_pod_running(namespace=vm_name, name=pod_name): - continue - else: - return False + def cleanup(self): + return self.delete() - # todo: check url is available + db is accessable - return True - def list_deployment_config_names(self, namespace): - """Extracts and returns list of Deployment Config names - Args: - namespace: project(namespace) name - Return: (list) deployment config names - """ - dcs = self.o_api.list_namespaced_deployment_config(namespace=namespace) - return [dc.metadata.name for dc in dcs.items] + @property + def creation_time(self): + """Detect the vm_creation_time either via uptime if non-zero, or by last boot time - def list_stateful_set_names(self, namespace): - """Returns list of Stateful Set names + The API provides no sensible way to actually get this value. The only way in which + vcenter API MAY have this is by filtering through events - Args: - namespace: project(namespace) name - Return: (list) stateful set names + Return tz-naive datetime object """ - st_api = self.kclient.AppsV1beta1Api(api_client=self.kapi_client) - sts = st_api.list_namespaced_stateful_set(namespace=namespace) - return [st.metadata.name for st in sts.items] - - def is_deployment_config(self, namespace, name): - """Checks whether passed name belongs to deployment configs in appropriate namespace + raise NotImplementedError - Args: - namespace: project(namespace) name - name: entity name - Return: True/False - """ - return name in self.list_deployment_config_names(namespace=namespace) - def is_stateful_set(self, namespace, name): - """Checks whether passed name belongs to Stateful Sets in appropriate namespace +@reconnect(unauthenticated_error_handler) +class Openshift(System, VmMixin): - Args: - namespace: project(namespace) name - name: entity name - Return: True/False - """ - return name in self.list_stateful_set_names(namespace=namespace) + _stats_available = { + 'num_container': lambda self: len(self.list_container()), + 'num_pod': lambda self: len(self.list_pods()), + 'num_service': lambda self: len(self.list_service()), + 'num_replication_controller': + lambda self: len(self.list_replication_controller()), + 'num_image': lambda self: len(self.list_image_id()), + 'num_node': lambda self: len(self.list_node()), + 'num_image_registry': lambda self: len(self.list_image_registry()), + 'num_project': lambda self: len(self.list_project()), + 'num_route': lambda self: len(self.list_route()), + 'num_template': lambda self: len(self.list_template()) + } - def does_project_exist(self, name): - """Checks whether Project exists. + stream2template_tags_mapping59 = { + 'cfme-openshift-httpd': {'tag': 'HTTPD_IMG_TAG', 'url': 'HTTPD_IMG_NAME'}, + 'cfme-openshift-app': {'tag': 'BACKEND_APPLICATION_IMG_TAG', + 'url': 'BACKEND_APPLICATION_IMG_NAME'}, + 'cfme-openshift-app-ui': {'tag': 'FRONTEND_APPLICATION_IMG_TAG', + 'url': 'FRONTEND_APPLICATION_IMG_NAME'}, + 'cfme-openshift-embedded-ansible': {'tag': 'ANSIBLE_IMG_TAG', 'url': 'ANSIBLE_IMG_NAME'}, + 'cfme-openshift-memcached': {'tag': 'MEMCACHED_IMG_TAG', 'url': 'MEMCACHED_IMG_NAME'}, + 'cfme-openshift-postgresql': {'tag': 'POSTGRESQL_IMG_TAG', 'url': 'POSTGRESQL_IMG_NAME'}, + } - Args: - name: openshift namespace name - Return: True/False - """ - return self._does_exist(func=self.o_api.read_project, name=name) + stream2template_tags_mapping58 = { + 'cfme58-openshift-app': {'tag': 'APPLICATION_IMG_TAG', 'url': 'APPLICATION_IMG_NAME'}, + 'cfme58-openshift-memcached': {'tag': 'MEMCACHED_IMG_TAG', 'url': 'MEMCACHED_IMG_NAME'}, + 'cfme58-openshift-postgresql': {'tag': 'POSTGRESQL_IMG_TAG', 'url': 'POSTGRESQL_IMG_NAME'}, + } - def is_vm_stopped(self, vm_name): - """Check whether vm isn't running. - There is no such state stopped for vm in openshift therefore - it just checks that vm isn't running + scc_user_mapping59 = ( + {'scc': 'anyuid', 'user': 'cfme-anyuid'}, + {'scc': 'anyuid', 'user': 'cfme-orchestrator'}, + {'scc': 'anyuid', 'user': 'cfme-httpd'}, + {'scc': 'privileged', 'user': 'cfme-privileged'}, + ) - Args: - vm_name: project name - Return: True/False - """ - pods = self.k_api.list_namespaced_pod(namespace=vm_name).items - if pods: - self.logger.info(("some pods are still " - "running: {}").format([pod.metadata.name for pod in pods])) - return not bool(pods) + scc_user_mapping58 = ( + {'scc': 'anyuid', 'user': 'cfme-anyuid'}, + {'scc': 'privileged', 'user': 'default'}, + ) - def wait_vm_running(self, vm_name, num_sec=900): - """Checks whether all project pods are in ready state. + default_namespace = 'openshift' + required_project_pods = ('httpd', 'memcached', 'postgresql', + 'cloudforms', 'cloudforms-backend') + required_project_pods58 = ('memcached', 'postgresql', 'cloudforms') + not_required_project_pods = ('cloudforms-backend', 'ansible') - Args: - vm_name: project name - num_sec: all pods should get ready for this time then - True, otherwise False - Return: True/False - """ - wait_for(self.is_vm_running, [vm_name], num_sec=num_sec) - return True + can_suspend = True + can_pause = False - def wait_vm_stopped(self, vm_name, num_sec=600): - """Checks whether all project pods are stopped. + def __init__(self, hostname, protocol="https", port=8443, debug=False, + verify_ssl=False, **kwargs): + super(Openshift, self).__init__(kwargs) + self.hostname = hostname + self.protocol = protocol + self.port = port + self.username = kwargs.get('username', '') + self.password = kwargs.get('password', '') + self.base_url = kwargs.get('base_url', None) + self.token = kwargs.get('token', '') + self.auth = self.token if self.token else (self.username, self.password) + self.debug = debug + self.verify_ssl = verify_ssl + self.ssl_ca_cert = kwargs.get('ssl_ca_cert', '') - Args: - vm_name: project name - num_sec: all pods should not be ready for this time then - True, otherwise False - Return: True/False - """ - wait_for(self.is_vm_stopped, [vm_name], num_sec=num_sec) - return True + self.k8s_client = self._k8s_client_connect() - def current_ip_address(self, vm_name): - """Tries to retrieve project's external ip + self.ocp_client = DynamicClient(self.k8s_client) - Args: - vm_name: project name - Return: ip address or None - """ - try: - common_svc = self.k_api.read_namespaced_service(name='common-service', - namespace=vm_name) - return common_svc.spec.external_i_ps[0] - except Exception: - return None + def _k8s_client_connect(self): - def is_vm_suspended(self, vm_name): - """There is no such state in openshift + aToken = self.token - Args: - vm_name: project name - Return: False - """ - return False + url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, + port=self.port) - def in_steady_state(self, vm_name): - """Return whether the specified virtual machine is in steady state + aConfiguration = kubeclient.Configuration() - Args: - vm_name: VM name - Returns: True/False - """ - return (self.is_vm_running(vm_name) - or self.is_vm_stopped(vm_name) - or self.is_vm_suspended(vm_name)) + aConfiguration.host = url - @property - def can_rename(self): - return hasattr(self, "rename_vm") + # Security part. + aConfiguration.verify_ssl = self.verify_ssl + aConfiguration.ssl_ca_cert = self.ssl_ca_cert - def list_project_names(self): - """Obtains project names + aConfiguration.api_key = {"authorization": "Bearer " + aToken} - Returns: list of project names - """ - projects = self.o_api.list_project().items - return [proj.metadata.name for proj in projects] + # Create a ApiClient with our config + return kubeclient.ApiClient(aConfiguration) - list_vms = list_vm = list_project_names + # def _connect(self): + # + # self.dyn_client = DynamicClient(self.k8s_client) - def get_appliance_version(self, vm_name): - """Returns appliance version if it is possible + # self.ociclient = ociclient + # self.kclient = kubeclient + # self.oapi_client = ociclient.ApiClient(config=config) + # self.kapi_client = kubeclient.ApiClient(config=config) + # self.o_api = ociclient.OapiApi(api_client=self.oapi_client) + # self.k_api = kubeclient.CoreV1Api(api_client=self.kapi_client) + # self.security_api = self.ociclient.SecurityOpenshiftIoV1Api(api_client=self.oapi_client) + # self.batch_api = self.kclient.BatchV1Api(api_client=self.kapi_client) # for job api - Args: - vm_name: the openshift project name of the podified appliance - Returns: version + @property + def _identifying_attrs(self): """ - try: - proj = self.o_api.read_project(vm_name) - description = proj.metadata.annotations['openshift.io/description'] - return Version(TemplateName.parse_template(description).version) - except (ApiException, KeyError, ValueError): - try: - return Version(TemplateName.parse_template(vm_name).version) - except ValueError: - return None - - def delete_template(self, template_name, namespace='openshift'): - """Deletes template - - Args: - template_name: stored openshift template name - namespace: project name - Returns: result of delete operation + Return a dict with key, value pairs for each kwarg that is used to + uniquely identify this system. """ - options = self.kclient.V1DeleteOptions() - return self.o_api.delete_namespaced_template(name=template_name, namespace=namespace, - body=options) - - def get_meta_value(self, instance, key): - raise NotImplementedError( - 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) - - def set_meta_value(self, instance, key): - raise NotImplementedError( - 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) - - def vm_status(self, vm_name): - """Returns current vm/appliance state + return {'hostname': self.hostname, 'port': self.port} - Args: - vm_name: the openshift project name of the podified appliance - Returns: up/down or exception if vm doesn't exist - """ - if not self.does_vm_exist(vm_name): - raise ValueError("Vm {} doesn't exist".format(vm_name)) - return 'up' if self.is_vm_running(vm_name) else 'down' + def info(self): + url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, + port=self.port) + return "rhopenshift {}".format(url) - def vm_creation_time(self, vm_name): - """Returns time when vm/appliance was created + @property + def can_suspend(self): + return True - Args: - vm_name: the openshift project name of the podified appliance - Return: datetime obj - """ - if not self.does_vm_exist(vm_name): - raise ValueError("Vm {} doesn't exist".format(vm_name)) - projects = self.o_api.list_project().items - project = next(proj for proj in projects if proj.metadata.name == vm_name) - return project.metadata.creation_timestamp + @property + def can_pause(self): + return False - @staticmethod - def _progress_log_callback(logger, source, destination, progress): - logger.info("Provisioning progress {}->{}: {}".format( - source, destination, str(progress))) + def get_ocp_obj_list(self, resource_type, namespace): - def vm_hardware_configuration(self, vm_name): - """Collects project's cpu and ram usage + return self.ocp_client.resources.get(api_version='v1', kind=resource_type).get( + namespace=namespace) - Args: - vm_name: openshift's data - Returns: collected data - """ - hw_config = {'ram': 0, - 'cpu': 0} - if not self.does_vm_exist(vm_name): - return hw_config - - proj_pods = self.k_api.list_namespaced_pod(vm_name) - for pod in proj_pods.items: - for container in pod.spec.containers: - cpu = container.resources.requests['cpu'] - hw_config['cpu'] += float(cpu[:-1]) / 1000 if cpu.endswith('m') else float(cpu) - - ram = container.resources.requests['memory'] - if ram.endswith('Mi'): - hw_config['ram'] += float(ram[:-2]) - elif ram.endswith('Gi'): - hw_config['ram'] += float(ram[:-2]) * 1024 - elif ram.endswith('Ki'): - hw_config['ram'] += float(ram[:-2]) / 1024 - else: - hw_config['ram'] += ram - return hw_config - - def usage_and_quota(self): - installed_ram = 0 - installed_cpu = 0 - used_ram = 0 - used_cpu = 0 - # todo: finish this method later - return { - # RAM - 'ram_used': used_ram, - 'ram_total': installed_ram, - 'ram_limit': None, - # CPU - 'cpu_used': used_cpu, - 'cpu_total': installed_cpu, - 'cpu_limit': None, - } - - def get_required_pods(self, vm_name): - """Provides list of pods which should be present in appliance + def get_ocp_obj(self, resource_type, name, namespace): + ocp_obj = None + for item in self.get_ocp_obj_list(resource_type=resource_type, namespace=namespace).items: + if item.metadata.name == name: + ocp_obj = item + break + return ocp_obj - Args: - vm_name: openshift project name - Returns: list + def get_pod_by_name(self, name, namespace=None): """ - version = self.get_appliance_version(vm_name) - if version and version < '5.9': - return self.required_project_pods58 - else: - return self.required_project_pods + Get a VM based on name - def get_ip_address(self, vm_name, timeout=600): - """ Returns the IP address for the selected appliance. + Passes args to find_vms to search for matches Args: - vm_name: The name of the vm to obtain the IP for. - timeout: The IP address wait timeout. - Returns: A string containing the first found IP that isn't the device. - """ - try: - ip_address, tc = wait_for(lambda: self.current_ip_address(vm_name), - fail_condition=None, - delay=5, - num_sec=timeout, - message="get_ip_address from openshift") - except TimedOutError: - ip_address = None - return ip_address + name (str) + namespace (str): Openshift namespace - def disconnect(self): - pass + Returns: + single PodInstance object - def get_appliance_tags(self, name): - """Returns appliance tags stored in appropriate config map if it exists. - - Args: - name: appliance project name - Returns: dict with tags and urls + Raises: + ValueError -- no name provided """ - try: - read_data = self.k_api.read_namespaced_config_map(name='image-repo-data', - namespace=name) - return json.loads(read_data.data['tags']) - except ApiException: - return {} + pod = self.get_ocp_obj(resource_type='Pod', name=name, namespace=namespace) - def get_appliance_url(self, name): - """Returns appliance url assigned by Openshift + return Pod(system=self, name=pod.metadata.name, namespace=pod.metadata.namespace, raw=pod) - Args: - name: appliance project name - Returns: url or None - """ - try: - route = self.o_api.list_namespaced_route(name) - return route.items[0].spec.host - except (ApiException, IndexError): - return None + get_vm = get_pod_by_name - def get_appliance_uuid(self, name): - """Returns appliance uuid assigned by Openshift + def create_vm(self, name, **kwargs): + raise NotImplementedError('This function has not yet been implemented.') - Args: - name: appliance project name - Returns: uuid + def list_pods(self, namespace=None): """ - return self.get_project_by_name(name).metadata.uid - - def is_appliance(self, name): - """Checks whether passed vm/project is appliance + List the Pods on system. Pods are treated as 'VMs' . + If project_name is passed, only the pods under the selected project will be returned Args: - name: appliance project name - Returns: True/False + namespace (str): Openshift namespace + + Returns: + list of wrapanapi.entities.Vm """ - return bool(self.get_appliance_tags(name)) + return self.get_ocp_obj_list(resource_type='Pod', namespace=namespace) - def find_job_pods(self, namespace, name): - """Finds and returns all remaining job pods + list_vms = list_pods - Args: - namespace: project(namespace) name - name: job name - Returns: list of pods - """ - pods = [] - for pod in self.list_pods(namespace=namespace): - if pod.metadata.labels.get('job-name', '') == name: - pods.append(pod) - return pods + def list_project(self, namespace=None): - def read_pod_log(self, namespace, name): - """Reads and returns pod log + return self.get_ocp_obj_list(resource_type='Project', namespace=namespace) - Args: - namespace: project(namespace) name - name: pod name - Returns: list of pods - """ - return self.k_api.read_namespaced_pod_log(name=name, namespace=namespace) + def list_routes(self, namespace=None): - def delete_pod(self, namespace, name, options=None): - """Tries to remove passed pod + return self.get_ocp_obj_list(resource_type='Route', namespace=namespace) - Args: - namespace: project(namespace) name - name: pod name - options: delete options like force delete and etc - Returns: Pod - """ - return self.k_api.delete_namespaced_pod(namespace=namespace, name=name, - body=options or self.kclient.V1DeleteOptions()) + def list_image_streams(self, namespace=None): - def is_pod_running(self, namespace, name): - """Checks whether pod is running + return self.get_ocp_obj_list(resource_type='ImageStreamList', namespace=namespace) - Args: - namespace: (str) project(namespace) name - name: (str) pod name - Return: True/False - """ - self.logger.info("checking pod status %s", name) + def list_image_stream_imagess(self, namespace=None): - if self.is_deployment_config(name=name, namespace=namespace): - dc = self.o_api.read_namespaced_deployment_config(name=name, namespace=namespace) - status = dc.status.ready_replicas - elif self.is_stateful_set(name=name, namespace=namespace): - pods = self.k_api.list_namespaced_pod(namespace=namespace, - label_selector='name={n}'.format(n=name)) - pod_stats = [pod.status.container_statuses[-1].ready for pod in pods.items] - status = all(pod_stats) - else: - raise ValueError("No such pod name among StatefulSets or Stateless Pods") + return self.get_ocp_obj_list(resource_type='ImageStreamImageList', namespace=namespace) - if status and int(status) > 0: - self.logger.debug("pod %s looks up and running", name) - return True - else: - self.logger.debug("pod %s isn't up yet", name) - return False + def list_templates(self, namespace=None): + return self.get_ocp_obj_list(resource_type='Template', namespace=namespace) - def wait_pod_running(self, namespace, name, num_sec=300): - """Waits for pod to switch to ready state + def list_deployment_config(self, namespace=None): + return self.get_ocp_obj_list(resource_type='DeploymentConfig', namespace=namespace) - Args: - namespace: project name - name: pod name - num_sec: all pods should get ready for this time then - True, otherwise TimeoutError - Return: True/False - """ - wait_for(self.is_pod_running, [namespace, name], fail_condition=False, num_sec=num_sec) - return True + def list_services(self, namespace=None): + return self.get_ocp_obj_list(resource_type='Service', namespace=namespace) - def is_pod_stopped(self, namespace, name): - """Check whether pod isn't running. + def list_replication_controller(self, namespace=None): + return self.get_ocp_obj_list(resource_type='ReplicationController', namespace=namespace) - Args: - namespace: (str) project(namespace) name - name: (str) pod name - Return: True/False - """ - pods = self.k_api.list_namespaced_pod(namespace=namespace).items - return not bool([pod for pod in pods if name == pod.metadata.name]) + def list_node(self, namespace=None): + return self.get_ocp_obj_list(resource_type='Node', namespace=namespace) - def wait_pod_stopped(self, namespace, name, num_sec=300): - """Waits for pod to stop + def list_persistent_volume(self, namespace=None): + return self.get_ocp_obj_list(resource_type='PersistentVolume', namespace=namespace) - Args: - namespace: project name - name: pod name - num_sec: all pods should disappear - True, otherwise TimeoutError - Return: True/False - """ - wait_for(self.is_pod_stopped, [namespace, name], num_sec=num_sec) - return True + def list_container(self, namespace=None): + return self.get_ocp_obj_list(resource_type='', namespace=namespace) - def run_command(self, namespace, name, cmd, **kwargs): - """Connects to pod and tries to run + def list_image_registry(self, namespace=None): + return self.get_ocp_obj_list(resource_type='', namespace=namespace) + + def find_vms(self, *args, **kwargs): + raise NotImplementedError - Args: - namespace: (str) project name - name: (str) pod name - cmd: (list) command to run - Return: command output - """ - # there are some limitations and this code isn't robust enough due to - # https://github.com/kubernetes-client/python/issues/58 - return self.k_api.connect_post_namespaced_pod_exec(namespace=namespace, - name=name, - command=cmd, - stdout=True, - stderr=True, - **kwargs) From d4dae6192a486ffee64394751314d14d33c61bd9 Mon Sep 17 00:00:00 2001 From: Justin Watts Date: Thu, 13 Jun 2019 10:44:22 -0400 Subject: [PATCH 4/9] more changes --- wrapanapi/entities/__init__.py | 1 + wrapanapi/entities/project.py | 85 +++++++++++++++ wrapanapi/systems/openshift.py | 186 +++++++++++++++++++++++++-------- 3 files changed, 228 insertions(+), 44 deletions(-) create mode 100644 wrapanapi/entities/project.py diff --git a/wrapanapi/entities/__init__.py b/wrapanapi/entities/__init__.py index 79cd1e8c..edae9d21 100644 --- a/wrapanapi/entities/__init__.py +++ b/wrapanapi/entities/__init__.py @@ -7,6 +7,7 @@ from .vm import Vm, VmState, VmMixin from .instance import Instance from .physical_container import PhysicalContainer +from .project import Project, ProjectMixin from .stack import Stack, StackMixin from .server import Server, ServerState diff --git a/wrapanapi/entities/project.py b/wrapanapi/entities/project.py new file mode 100644 index 00000000..4308f314 --- /dev/null +++ b/wrapanapi/entities/project.py @@ -0,0 +1,85 @@ +""" +wrapanapi.entities.project + +Methods/classes pertaining to performing actions on a template +""" +import six + +from abc import ABCMeta, abstractmethod, abstractproperty + +from wrapanapi.entities.base import Entity, EntityMixin +from wrapanapi.exceptions import MultipleItemsError, NotFoundError + + +class Project(six.with_metaclass(ABCMeta, Entity)): + """ + Represents a project on a system + """ + @abstractproperty + def get_quota(self): + """ + Deploy a VM/instance with name 'vm_name' using this template + + Returns: an implementation of a BaseVM object + """ + + +class ProjectMixin(six.with_metaclass(ABCMeta, EntityMixin)): + """ + Defines methods a wrapanapi.systems.System that manages Projects should have + """ + @abstractmethod + def get_project(self, name, **kwargs): + """ + Get template from system with name 'name' + + This should return only ONE matching entity. If multiple entities match + the criteria, a MultipleItemsError should be raised + + Returns: + wrapanapi.entities.Template if it exists + Raises: + wrapanapi.exceptions.MultipleItemsError if multiple matches are found + """ + + @abstractmethod + def create_project(self, name, **kwargs): + """ + Create template on system with name 'name' + + Returns: + wrapanapi.entities.Template for newly created templated + """ + + @abstractmethod + def list_project(self, **kwargs): + """ + List templates on system + + Returns: + list of wrapanapi.entities.Template + """ + + @abstractmethod + def find_projects(self, name, **kwargs): + """ + Find templates on system based on name or other filters in kwargs + + Should return an empty list if no matches were found + + Returns: + list of wrapanapi.entities.Template for matches found + """ + + def does_project_exist(self, name): + """ + Checks if a template with 'name' exists on the system + + If multiple templates with the same name exists, this still returns 'True' + """ + try: + return bool(self.get_project(name)) + except MultipleItemsError: + return True + except NotFoundError: + return False diff --git a/wrapanapi/systems/openshift.py b/wrapanapi/systems/openshift.py index a4ace867..db196a1d 100644 --- a/wrapanapi/systems/openshift.py +++ b/wrapanapi/systems/openshift.py @@ -11,6 +11,7 @@ import inflection import six +from cached_property import cached_property from kubernetes import client as kubeclient from kubernetes import config as kubeclientconfig from openshift.dynamic import DynamicClient @@ -19,7 +20,8 @@ from openshift import client as ociclient from wait_for import TimedOutError, wait_for -from wrapanapi.entities import (Template, TemplateMixin, Vm, VmMixin, VmState) +from wrapanapi.entities import (Template, TemplateMixin, Vm, VmMixin, VmState, ProjectMixin, + Project) from wrapanapi.systems.base import System @@ -88,6 +90,68 @@ def wrap(*args, **kwargs): return wrap +class Project(Project): + + def __init__(self, system, raw=None, **kwargs): + """ + Construct a VMWareVirtualMachine instance + + Args: + system: instance of VMWareSystem + raw: pyVmomi.vim.VirtualMachine object + name: name of VM + """ + super(Project, self).__init__(system, raw, **kwargs) + self._name = raw.metadata.name if raw else kwargs.get('name') + if not self._name: + raise ValueError("missing required kwarg 'name'") + self.v1_project = self.system.ocp_client.resources.get( + api_version='project.openshift.io/v1', kind='Project') + + @property + def get_quota(self): + return self.system.ocp_client.resources.get(api_version='v1', kind='ResourceQuota').get( + namespace=self.name) + + @property + def _identifying_attrs(self): + return {'name': self._name} + + @property + def name(self): + return self._name + + @property + def uuid(self): + try: + return str(self.raw.metadata.uid) + except AttributeError: + return self.name + + @property + def ip(self): + raise NotImplementedError + + def start(self): + raise NotImplementedError + + def stop(self): + raise NotImplementedError + + def restart(self): + raise NotImplementedError + + def delete(self): + self.v1_project.delete(name=self.name) + + def refresh(self): + self.raw = self.system.get_project(name=self.name).raw + return self.raw + + def cleanup(self): + return self.delete() + + class Pod(Vm): state_map = { 'pending': VmState.PENDING, @@ -111,6 +175,7 @@ def __init__(self, system, raw=None, **kwargs): self._namespace = raw.metadata.namespace if raw else kwargs.get('namespace') if not self._name: raise ValueError("missing required kwarg 'name'") + self.v1_pod = self.system.ocp_client.resources.get(api_version='v1', kind='Pod') @property def _identifying_attrs(self): @@ -230,17 +295,15 @@ def restart(self): raise NotImplementedError def delete(self): - self.ocp_client.resources.get(api_version='v1', kind='Pod').delete(name=self.name, - namespace=self.namespace) + self.v1_pod.delete(name=self.name, namespace=self.namespace) + def refresh(self): - self.raw = self.system.get_pod_by_name(name=self.name, namespace=self.namespace).raw + self.raw = self.system.get_pod(name=self.name, namespace=self.namespace).raw return self.raw def cleanup(self): return self.delete() - - @property def creation_time(self): """Detect the vm_creation_time either via uptime if non-zero, or by last boot time @@ -254,7 +317,7 @@ def creation_time(self): @reconnect(unauthenticated_error_handler) -class Openshift(System, VmMixin): +class Openshift(System, VmMixin, ProjectMixin): _stats_available = { 'num_container': lambda self: len(self.list_container()), @@ -323,6 +386,8 @@ def __init__(self, hostname, protocol="https", port=8443, debug=False, self.verify_ssl = verify_ssl self.ssl_ca_cert = kwargs.get('ssl_ca_cert', '') + self.ociclient = ociclient + self.k8s_client = self._k8s_client_connect() self.ocp_client = DynamicClient(self.k8s_client) @@ -373,6 +438,19 @@ def info(self): port=self.port) return "rhopenshift {}".format(url) + @cached_property + def v1_project(self): + return self.ocp_client.resources.get(api_version='project.openshift.io/v1', kind='Project') + + @cached_property + def v1_pod(self): + return self.ocp_client.resources.get(api_version='v1', kind='Pod') + + @cached_property + def v1_route(self): + return self.ocp_client.resources.get(api_version='route.openshift.io/v1', kind='Route') + + @property def can_suspend(self): return True @@ -381,20 +459,15 @@ def can_suspend(self): def can_pause(self): return False - def get_ocp_obj_list(self, resource_type, namespace): - - return self.ocp_client.resources.get(api_version='v1', kind=resource_type).get( - namespace=namespace) - - def get_ocp_obj(self, resource_type, name, namespace): + def get_ocp_obj(self, resource_type, name, namespace=None): ocp_obj = None - for item in self.get_ocp_obj_list(resource_type=resource_type, namespace=namespace).items: + for item in getattr(self, resource_type).get(namespace=namespace).items: if item.metadata.name == name: ocp_obj = item break return ocp_obj - def get_pod_by_name(self, name, namespace=None): + def get_pod(self, name, namespace=None): """ Get a VM based on name @@ -414,7 +487,7 @@ def get_pod_by_name(self, name, namespace=None): return Pod(system=self, name=pod.metadata.name, namespace=pod.metadata.namespace, raw=pod) - get_vm = get_pod_by_name + get_vm = get_pod def create_vm(self, name, **kwargs): raise NotImplementedError('This function has not yet been implemented.') @@ -430,49 +503,74 @@ def list_pods(self, namespace=None): Returns: list of wrapanapi.entities.Vm """ - return self.get_ocp_obj_list(resource_type='Pod', namespace=namespace) + return [ + Pod(system=self, name=pod.metadata.name, namespace=pod.metadata.namespace, raw=pod) + for pod in self.v1_pod.get(namespace=namespace).items] list_vms = list_pods - def list_project(self, namespace=None): - - return self.get_ocp_obj_list(resource_type='Project', namespace=namespace) - - def list_routes(self, namespace=None): - - return self.get_ocp_obj_list(resource_type='Route', namespace=namespace) + def create_project(self, name, description=None, **kwargs): - def list_image_streams(self, namespace=None): + proj = self.ociclient.V1Project() + proj.metadata = {'name': name, 'annotations': {}} + if description: + proj.metadata['annotations'] = {'openshift.io/description': description} - return self.get_ocp_obj_list(resource_type='ImageStreamList', namespace=namespace) + self.logger.info("creating new project with name %s", name) - def list_image_stream_imagess(self, namespace=None): + project = self.v1_project.create(body=proj) - return self.get_ocp_obj_list(resource_type='ImageStreamImageList', namespace=namespace) + return Project(system=self, name=project.metadata.name, raw=project) - def list_templates(self, namespace=None): - return self.get_ocp_obj_list(resource_type='Template', namespace=namespace) + def find_projects(self, *args, **kwargs): + raise NotImplementedError - def list_deployment_config(self, namespace=None): - return self.get_ocp_obj_list(resource_type='DeploymentConfig', namespace=namespace) + def get_project(self, name): + project = self.get_ocp_obj(resource_type='v1_project', name=name) - def list_services(self, namespace=None): - return self.get_ocp_obj_list(resource_type='Service', namespace=namespace) + return Project(system=self, name=project.metadata.name, raw=project) - def list_replication_controller(self, namespace=None): - return self.get_ocp_obj_list(resource_type='ReplicationController', namespace=namespace) + def list_project(self, namespace=None): - def list_node(self, namespace=None): - return self.get_ocp_obj_list(resource_type='Node', namespace=namespace) + return [ + Project(system=self, name=project.metadata.name, raw=project) + for project in self.v1_project.get(namespace=namespace).items] - def list_persistent_volume(self, namespace=None): - return self.get_ocp_obj_list(resource_type='PersistentVolume', namespace=namespace) + def list_routes(self, namespace=None): - def list_container(self, namespace=None): - return self.get_ocp_obj_list(resource_type='', namespace=namespace) + return self.v1_route.get(namespace=namespace) - def list_image_registry(self, namespace=None): - return self.get_ocp_obj_list(resource_type='', namespace=namespace) + # def list_image_streams(self, namespace=None): + # + # return self.get_ocp_obj_list(resource_type='ImageStreamList', namespace=namespace) + # + # def list_image_stream_imagess(self, namespace=None): + # + # return self.get_ocp_obj_list(resource_type='ImageStreamImageList', namespace=namespace) + # + # def list_templates(self, namespace=None): + # return self.get_ocp_obj_list(resource_type='Template', namespace=namespace) + # + # def list_deployment_config(self, namespace=None): + # return self.get_ocp_obj_list(resource_type='DeploymentConfig', namespace=namespace) + # + # def list_services(self, namespace=None): + # return self.get_ocp_obj_list(resource_type='Service', namespace=namespace) + # + # def list_replication_controller(self, namespace=None): + # return self.get_ocp_obj_list(resource_type='ReplicationController', namespace=namespace) + # + # def list_node(self, namespace=None): + # return self.get_ocp_obj_list(resource_type='Node', namespace=namespace) + # + # def list_persistent_volume(self, namespace=None): + # return self.get_ocp_obj_list(resource_type='PersistentVolume', namespace=namespace) + # + # def list_container(self, namespace=None): + # return self.get_ocp_obj_list(resource_type='', namespace=namespace) + # + # def list_image_registry(self, namespace=None): + # return self.get_ocp_obj_list(resource_type='', namespace=namespace) def find_vms(self, *args, **kwargs): raise NotImplementedError From 42d7bf28e8022515debdfcd4100c1cbafead9220 Mon Sep 17 00:00:00 2001 From: Justin Watts Date: Thu, 27 Jun 2019 11:21:06 -0400 Subject: [PATCH 5/9] All methods added and comments on which have moved in old rhopenshift --- wrapanapi/systems/container/rhopenshift.py | 99 +- wrapanapi/systems/openshift.py | 1519 +++++++++++++++++++- 2 files changed, 1551 insertions(+), 67 deletions(-) diff --git a/wrapanapi/systems/container/rhopenshift.py b/wrapanapi/systems/container/rhopenshift.py index 8400f134..1ca3b47a 100644 --- a/wrapanapi/systems/container/rhopenshift.py +++ b/wrapanapi/systems/container/rhopenshift.py @@ -179,11 +179,13 @@ def _connect(self): self.security_api = self.ociclient.SecurityOpenshiftIoV1Api(api_client=self.oapi_client) self.batch_api = self.kclient.BatchV1Api(api_client=self.kapi_client) # for job api + # TODO DONE def info(self): url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, port=self.port) return "rhopenshift {}".format(url) + # TODO DONE def list_route(self, namespace=None): """Returns list of routes""" if namespace: @@ -192,6 +194,7 @@ def list_route(self, namespace=None): routes = self.o_api.list_route_for_all_namespaces().items return routes + # TODO DONE def list_image_streams(self, namespace=None): """Returns list of image streams""" if namespace: @@ -200,10 +203,12 @@ def list_image_streams(self, namespace=None): image_streams = self.o_api.list_image_stream_for_all_namespaces().items return image_streams + # TODO DONE def list_project(self): """Returns list of projects""" return self.o_api.list_project().items + # TODO DONE def list_template(self, namespace=None): """Returns list of templates""" if namespace: @@ -214,11 +219,13 @@ def list_template(self, namespace=None): # fixme: get rid of this mapping list_templates = list_template + # TODO DONE def list_image_stream_images(self): """Returns list of images (Docker registry only)""" return [item for item in self.o_api.list_image().items if item.docker_image_reference is not None] + # TODO DONE def list_deployment_config(self, namespace=None): """Returns list of deployment configs""" if namespace: @@ -227,6 +234,7 @@ def list_deployment_config(self, namespace=None): dc = self.o_api.list_deployment_config_for_all_namespaces().items return dc + # TODO DONE def list_service(self, namespace=None): """Returns list of services.""" if namespace: @@ -235,6 +243,7 @@ def list_service(self, namespace=None): svc = self.k_api.list_service_for_all_namespaces().items return svc + # TODO DONE def list_replication_controller(self, namespace=None): """Returns list of replication controllers""" if namespace: @@ -243,11 +252,13 @@ def list_replication_controller(self, namespace=None): rc = self.k_api.list_replication_controller_for_all_namespaces().items return rc + # TODO DONE def list_node(self): """Returns list of nodes""" nodes = self.k_api.list_node().items return nodes + # TODO DONE def cluster_info(self): """Returns information about the cluster - number of CPUs and memory in GB""" aggregate_cpu, aggregate_mem = 0, 0 @@ -258,11 +269,13 @@ def cluster_info(self): return {'cpu': aggregate_cpu, 'memory': aggregate_mem} + # TODO DONE def list_persistent_volume(self): """Returns list of persistent volumes""" pv = self.k_api.list_persistent_volume().items return pv + # TODO DONE def list_pods(self, namespace=None): """Returns list of container groups (pods). If project_name is passed, only the pods under the selected project will be returned""" @@ -272,6 +285,7 @@ def list_pods(self, namespace=None): pods = self.k_api.list_pod_for_all_namespaces().items return pods + # TODO DONE def list_container(self, namespace=None): """Returns list of containers (derived from pods) If project_name is passed, only the containers under the selected project will be returned @@ -279,6 +293,7 @@ def list_container(self, namespace=None): pods = self.list_pods(namespace=namespace) return [pod.spec.containers for pod in pods] + # TODO DONE def list_image_id(self, namespace=None): """Returns list of unique image ids (derived from pods)""" pods = self.list_pods(namespace=namespace) @@ -288,6 +303,7 @@ def list_image_id(self, namespace=None): statuses.append(status) return sorted(set([status.image_id for status in statuses])) + # TODO DONE def list_image_registry(self, namespace=None): """Returns list of image registries (derived from pods)""" pods = self.list_pods(namespace=namespace) @@ -298,6 +314,7 @@ def list_image_registry(self, namespace=None): # returns only the image registry name, without the port number in case of local registry return sorted(set([status.image.split('/')[0].split(':')[0] for status in statuses])) + # TODO DONE def expose_db_ip(self, namespace): """Creates special service in appliance project (namespace) which makes internal appliance db be available outside. @@ -313,6 +330,7 @@ def expose_db_ip(self, namespace): return self.get_ip_address(namespace) + # TODO DONE def deploy_template(self, template, tags=None, password='smartvm', **kwargs): """Deploy a VM from a template @@ -453,6 +471,7 @@ def deploy_template(self, template, tags=None, password='smartvm', **kwargs): # todo: return and print all failed pod details raise + # TODO DONE def create_template_entities(self, namespace, entities): """Creates entities from openshift template. @@ -478,6 +497,7 @@ def create_template_entities(self, namespace, entities): else: self.logger.error("some entity %s isn't present in entity creation list", entity) + # TODO DONE def start_vm(self, vm_name): """Starts a vm. @@ -492,6 +512,7 @@ def start_vm(self, vm_name): else: raise ValueError("Project with name {n} doesn't exist".format(n=vm_name)) + # TODO DONE def stop_vm(self, vm_name): """Stops a vm. @@ -506,6 +527,7 @@ def stop_vm(self, vm_name): else: raise ValueError("Project with name {n} doesn't exist".format(n=vm_name)) + # TODO DONE def delete_vm(self, vm_name): """Deletes a vm. @@ -517,6 +539,7 @@ def delete_vm(self, vm_name): self.delete_project(name=vm_name) return True + # TODO DONE def does_vm_exist(self, vm_name): """Does VM exist? @@ -526,6 +549,7 @@ def does_vm_exist(self, vm_name): """ return self.does_project_exist(vm_name) + # TODO DONE @staticmethod def _update_template_parameters(template, **params): """Updates openshift template parameters. @@ -553,6 +577,7 @@ def _update_template_parameters(template, **params): template.parameters = new_parameters return template + # TODO DONE def process_template(self, name, namespace, parameters=None): """Implements template processing mechanism similar to `oc process`. @@ -569,6 +594,7 @@ def process_template(self, name, namespace, parameters=None): return self.process_raw_template(body=raw_data, namespace=namespace, parameters=parameters) + # TODO DONE def process_raw_template(self, body, namespace, parameters=None): """Implements template processing mechanism similar to `oc process`. It does two functions @@ -597,6 +623,7 @@ def process_raw_template(self, body, namespace, parameters=None): processed_template = self.ociclient.V1Template(**updated_data) return processed_template.objects + # TODO DONE def rename_structure(self, struct): """Fixes inconsistency in input/output data of openshift python client methods @@ -626,6 +653,7 @@ def rename_structure(self, struct): else: return struct + # TODO DONE def create_config_map(self, namespace, **kwargs): """Creates ConfigMap entity using REST API. @@ -641,6 +669,7 @@ def create_config_map(self, namespace, **kwargs): self.wait_config_map_exist(namespace=namespace, name=conf_map_name) return output + # TODO DONE def replace_config_map(self, namespace, **kwargs): """Replace ConfigMap entity using REST API. @@ -657,6 +686,7 @@ def replace_config_map(self, namespace, **kwargs): body=conf_map) return output + # TODO DONE def create_stateful_set(self, namespace, **kwargs): """Creates StatefulSet entity using REST API. @@ -673,6 +703,8 @@ def create_stateful_set(self, namespace, **kwargs): self.wait_stateful_set_exist(namespace=namespace, name=st_name) return output + + # TODO DONE def create_service(self, namespace, **kwargs): """Creates Service entity using REST API. @@ -688,6 +720,7 @@ def create_service(self, namespace, **kwargs): self.wait_service_exist(namespace=namespace, name=service_name) return output + # TODO DONE def create_endpoints(self, namespace, **kwargs): """Creates Endpoints entity using REST API. @@ -703,6 +736,7 @@ def create_endpoints(self, namespace, **kwargs): self.wait_endpoints_exist(namespace=namespace, name=endpoints_name) return output + # TODO DONE def create_route(self, namespace, **kwargs): """Creates Route entity using REST API. @@ -718,6 +752,7 @@ def create_route(self, namespace, **kwargs): self.wait_route_exist(namespace=namespace, name=route_name) return output + # TODO DONE def create_service_account(self, namespace, **kwargs): """Creates Service Account entity using REST API. @@ -733,6 +768,7 @@ def create_service_account(self, namespace, **kwargs): self.wait_service_account_exist(namespace=namespace, name=sa_name) return output + # TODO DONE def create_role_binding(self, namespace, **kwargs): """Creates RoleBinding entity using REST API. @@ -758,6 +794,7 @@ def create_role_binding(self, namespace, **kwargs): self.wait_role_binding_exist(namespace=namespace, name=role_binding_name) return output + # TODO DONE def create_image_stream(self, namespace, **kwargs): """Creates Image Stream entity using REST API. @@ -773,6 +810,7 @@ def create_image_stream(self, namespace, **kwargs): self.wait_image_stream_exist(namespace=namespace, name=is_name) return output + # TODO DONE def create_secret(self, namespace, **kwargs): """Creates Secret entity using REST API. @@ -788,6 +826,7 @@ def create_secret(self, namespace, **kwargs): self.wait_secret_exist(namespace=namespace, name=secret_name) return output + # TODO DONE def create_deployment_config(self, namespace, **kwargs): """Creates Deployment Config entity using REST API. @@ -804,6 +843,7 @@ def create_deployment_config(self, namespace, **kwargs): name=dc_name) return output + # TODO DONE def create_persistent_volume_claim(self, namespace, **kwargs): """Creates Persistent Volume Claim entity using REST API. @@ -821,6 +861,7 @@ def create_persistent_volume_claim(self, namespace, **kwargs): name=pv_claim_name) return output + # TODO DONE def create_project(self, name, description=None): """Creates Project(namespace) using REST API. @@ -838,6 +879,7 @@ def create_project(self, name, description=None): self.wait_project_exist(name=name) return output + # TODO DONE def run_job(self, namespace, body): """Creates job from passed template, runs it and waits for the job to be accomplished @@ -852,6 +894,7 @@ def run_job(self, namespace, body): return self.wait_job_finished(namespace, job_name) + # TODO DONE def wait_job_finished(self, namespace, name, wait='15m'): """Waits for job to accomplish @@ -871,6 +914,7 @@ def job_wait_accomplished(): return False return wait_for(job_wait_accomplished, num_sec=wait)[0] + # TODO DONE def wait_persistent_volume_claim_status(self, namespace, name, status, wait='1m'): """Waits until pvc gets some particular status. For example: Bound. @@ -892,6 +936,7 @@ def pvc_wait_status(): return wait_for(pvc_wait_status, num_sec=wait)[0] + # TODO DONE def wait_project_exist(self, name, wait=60): """Checks whether Project exists within some time. @@ -903,6 +948,7 @@ def wait_project_exist(self, name, wait=60): return wait_for(self._does_exist, num_sec=wait, func_kwargs={'func': self.o_api.read_project, 'name': name})[0] + # TODO DONE def wait_config_map_exist(self, namespace, name, wait=60): """Checks whether Config Map exists within some time. @@ -916,7 +962,7 @@ def wait_config_map_exist(self, namespace, name, wait=60): func_kwargs={'func': self.k_api.read_namespaced_config_map, 'name': name, 'namespace': namespace})[0] - + # TODO DONE def wait_stateful_set_exist(self, namespace, name, wait=900): """Checks whether StatefulSet exists within some time. @@ -933,6 +979,7 @@ def wait_stateful_set_exist(self, namespace, name, wait=900): 'name': name, 'namespace': namespace})[0] + # TODO DONE def wait_service_exist(self, namespace, name, wait=60): """Checks whether Service exists within some time. @@ -947,6 +994,7 @@ def wait_service_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] + # TODO DONE def wait_endpoints_exist(self, namespace, name, wait=60): """Checks whether Endpoints exists within some time. @@ -961,6 +1009,7 @@ def wait_endpoints_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] + # TODO DONE def wait_route_exist(self, namespace, name, wait=60): """Checks whether Route exists within some time. @@ -975,6 +1024,7 @@ def wait_route_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] + # TODO DONE def wait_service_account_exist(self, namespace, name, wait=60): """Checks whether Service Account exists within some time. @@ -989,6 +1039,7 @@ def wait_service_account_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] + # TODO DONE def wait_image_stream_exist(self, namespace, name, wait=60): """Checks whether Image Stream exists within some time. @@ -1003,6 +1054,7 @@ def wait_image_stream_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] + # TODO DONE def wait_role_binding_exist(self, namespace, name, wait=60): """Checks whether RoleBinding exists within some time. @@ -1018,6 +1070,7 @@ def wait_role_binding_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] + # TODO DONE def wait_secret_exist(self, namespace, name, wait=90): """Checks whether Secret exists within some time. @@ -1032,6 +1085,7 @@ def wait_secret_exist(self, namespace, name, wait=90): 'name': name, 'namespace': namespace})[0] + # TODO DONE def wait_persistent_volume_claim_exist(self, namespace, name, wait=60): """Checks whether Persistent Volume Claim exists within some time. @@ -1046,6 +1100,7 @@ def wait_persistent_volume_claim_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] + # TODO DONE def wait_deployment_config_exist(self, namespace, name, wait=600): """Checks whether Deployment Config exists within some time. @@ -1061,6 +1116,7 @@ def wait_deployment_config_exist(self, namespace, name, wait=600): 'name': name, 'namespace': namespace})[0] + # TODO DONE def wait_template_exist(self, namespace, name, wait=60): """Checks whether Template exists within some time. @@ -1075,6 +1131,7 @@ def wait_template_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] + # TODO DONE def _does_exist(self, func, **kwargs): try: func(**kwargs) @@ -1083,6 +1140,7 @@ def _does_exist(self, func, **kwargs): self.logger.info("ApiException occurred %s, it looks like obj doesn't exist", e) return False + # TODO DONE def _restore_missing_project_role_bindings(self, namespace): """Fixes one of issues in Openshift REST API create project doesn't add necessary roles to default sa, probably bug, this is workaround @@ -1137,6 +1195,7 @@ def _restore_missing_project_role_bindings(self, namespace): metadata=role_binding_name) auth_api.create_namespaced_role_binding(namespace=namespace, body=puller_role_binding) + # TODO DONE def delete_project(self, name, wait=300): """Removes project(namespace) and all entities in it. @@ -1155,6 +1214,7 @@ def delete_project(self, name, wait=300): raise TimedOutError('project {n} was not removed within {w} sec'.format(n=name, w=wait)) + # TODO DONE def scale_entity(self, namespace, name, replicas, wait=60): """Allows to scale up/down entities. One of cases when this is necessary is emulation of stopping/starting appliance @@ -1195,10 +1255,12 @@ def check_scale_value(): self.logger.info("scaling entity %s to %s replicas", name, replicas) wait_for(check_scale_value, num_sec=wait, fail_condition=lambda val: val != replicas) + # TODO DONE def get_project_by_name(self, project_name): """Returns only the selected Project object""" return next(proj for proj in self.list_project() if proj.metadata.name == project_name) + # TODO DONE def get_scc(self, name): """Returns Security Context Constraint by name @@ -1208,6 +1270,7 @@ def get_scc(self, name): """ return self.security_api.read_security_context_constraints(name) + # TODO DONE def create_scc(self, body): """Creates Security Context Constraint from passed structure. Main aim is to create scc from read and parsed yaml file. @@ -1223,6 +1286,7 @@ def create_scc(self, body): scc = self.ociclient.V1SecurityContextConstraints(**raw_scc) return self.security_api.create_security_context_constraints(body=scc) + # TODO DONE def append_sa_to_scc(self, scc_name, namespace, sa): """Appends Service Account to respective Security Constraint @@ -1249,6 +1313,7 @@ def append_sa_to_scc(self, scc_name, namespace, sa): return self.security_api.patch_security_context_constraints(name=scc_name, body=update_scc_cmd) + # TODO DONE def remove_sa_from_scc(self, scc_name, namespace, sa): """Removes Service Account from respective Security Constraint @@ -1273,6 +1338,7 @@ def remove_sa_from_scc(self, scc_name, namespace, sa): return self.security_api.patch_security_context_constraints(name=scc_name, body=update_scc_cmd) + # TODO DONE def is_vm_running(self, vm_name, running_pods=()): """Emulates check is vm(appliance) up and running @@ -1294,6 +1360,7 @@ def is_vm_running(self, vm_name, running_pods=()): # todo: check url is available + db is accessable return True + # TODO DONE def list_deployment_config_names(self, namespace): """Extracts and returns list of Deployment Config names @@ -1304,6 +1371,7 @@ def list_deployment_config_names(self, namespace): dcs = self.o_api.list_namespaced_deployment_config(namespace=namespace) return [dc.metadata.name for dc in dcs.items] + # TODO DONE def list_stateful_set_names(self, namespace): """Returns list of Stateful Set names @@ -1315,6 +1383,7 @@ def list_stateful_set_names(self, namespace): sts = st_api.list_namespaced_stateful_set(namespace=namespace) return [st.metadata.name for st in sts.items] + # TODO DONE def is_deployment_config(self, namespace, name): """Checks whether passed name belongs to deployment configs in appropriate namespace @@ -1325,6 +1394,7 @@ def is_deployment_config(self, namespace, name): """ return name in self.list_deployment_config_names(namespace=namespace) + # TODO DONE def is_stateful_set(self, namespace, name): """Checks whether passed name belongs to Stateful Sets in appropriate namespace @@ -1335,6 +1405,7 @@ def is_stateful_set(self, namespace, name): """ return name in self.list_stateful_set_names(namespace=namespace) + # TODO DONE def does_project_exist(self, name): """Checks whether Project exists. @@ -1344,6 +1415,7 @@ def does_project_exist(self, name): """ return self._does_exist(func=self.o_api.read_project, name=name) + # TODO DONE def is_vm_stopped(self, vm_name): """Check whether vm isn't running. There is no such state stopped for vm in openshift therefore @@ -1359,6 +1431,7 @@ def is_vm_stopped(self, vm_name): "running: {}").format([pod.metadata.name for pod in pods])) return not bool(pods) + # TODO DONE def wait_vm_running(self, vm_name, num_sec=900): """Checks whether all project pods are in ready state. @@ -1370,6 +1443,7 @@ def wait_vm_running(self, vm_name, num_sec=900): wait_for(self.is_vm_running, [vm_name], num_sec=num_sec) return True + # TODO DONE def wait_vm_stopped(self, vm_name, num_sec=600): """Checks whether all project pods are stopped. @@ -1381,6 +1455,7 @@ def wait_vm_stopped(self, vm_name, num_sec=600): wait_for(self.is_vm_stopped, [vm_name], num_sec=num_sec) return True + # TODO DONE def current_ip_address(self, vm_name): """Tries to retrieve project's external ip @@ -1395,6 +1470,7 @@ def current_ip_address(self, vm_name): except Exception: return None + # TODO DONE def is_vm_suspended(self, vm_name): """There is no such state in openshift @@ -1404,6 +1480,7 @@ def is_vm_suspended(self, vm_name): """ return False + # TODO DONE def in_steady_state(self, vm_name): """Return whether the specified virtual machine is in steady state @@ -1415,10 +1492,12 @@ def in_steady_state(self, vm_name): or self.is_vm_stopped(vm_name) or self.is_vm_suspended(vm_name)) + # TODO DONE @property def can_rename(self): return hasattr(self, "rename_vm") + # TODO DONE def list_project_names(self): """Obtains project names @@ -1429,6 +1508,7 @@ def list_project_names(self): list_vms = list_vm = list_project_names + # TODO DONE def get_appliance_version(self, vm_name): """Returns appliance version if it is possible @@ -1446,6 +1526,7 @@ def get_appliance_version(self, vm_name): except ValueError: return None + # TODO DONE def delete_template(self, template_name, namespace='openshift'): """Deletes template @@ -1458,14 +1539,17 @@ def delete_template(self, template_name, namespace='openshift'): return self.o_api.delete_namespaced_template(name=template_name, namespace=namespace, body=options) + # TODO DONE def get_meta_value(self, instance, key): raise NotImplementedError( 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) + # TODO DONE def set_meta_value(self, instance, key): raise NotImplementedError( 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) + # TODO DONE def vm_status(self, vm_name): """Returns current vm/appliance state @@ -1477,6 +1561,7 @@ def vm_status(self, vm_name): raise ValueError("Vm {} doesn't exist".format(vm_name)) return 'up' if self.is_vm_running(vm_name) else 'down' + # TODO DONE def vm_creation_time(self, vm_name): """Returns time when vm/appliance was created @@ -1490,11 +1575,13 @@ def vm_creation_time(self, vm_name): project = next(proj for proj in projects if proj.metadata.name == vm_name) return project.metadata.creation_timestamp + # TODO DONE @staticmethod def _progress_log_callback(logger, source, destination, progress): logger.info("Provisioning progress {}->{}: {}".format( source, destination, str(progress))) + # TODO DONE def vm_hardware_configuration(self, vm_name): """Collects project's cpu and ram usage @@ -1524,6 +1611,7 @@ def vm_hardware_configuration(self, vm_name): hw_config['ram'] += ram return hw_config + # TODO DONE def usage_and_quota(self): installed_ram = 0 installed_cpu = 0 @@ -1541,6 +1629,7 @@ def usage_and_quota(self): 'cpu_limit': None, } + # TODO DONE def get_required_pods(self, vm_name): """Provides list of pods which should be present in appliance @@ -1554,6 +1643,7 @@ def get_required_pods(self, vm_name): else: return self.required_project_pods + # TODO DONE def get_ip_address(self, vm_name, timeout=600): """ Returns the IP address for the selected appliance. @@ -1572,9 +1662,11 @@ def get_ip_address(self, vm_name, timeout=600): ip_address = None return ip_address + # TODO DONE def disconnect(self): pass + # TODO DONE def get_appliance_tags(self, name): """Returns appliance tags stored in appropriate config map if it exists. @@ -1589,6 +1681,7 @@ def get_appliance_tags(self, name): except ApiException: return {} + # TODO DONE def get_appliance_url(self, name): """Returns appliance url assigned by Openshift @@ -1602,6 +1695,7 @@ def get_appliance_url(self, name): except (ApiException, IndexError): return None + # TODO DONE def get_appliance_uuid(self, name): """Returns appliance uuid assigned by Openshift @@ -1611,6 +1705,7 @@ def get_appliance_uuid(self, name): """ return self.get_project_by_name(name).metadata.uid + # TODO DONE def is_appliance(self, name): """Checks whether passed vm/project is appliance @@ -1620,6 +1715,7 @@ def is_appliance(self, name): """ return bool(self.get_appliance_tags(name)) + # TODO DONE def find_job_pods(self, namespace, name): """Finds and returns all remaining job pods @@ -1634,6 +1730,7 @@ def find_job_pods(self, namespace, name): pods.append(pod) return pods + # TODO DONE def read_pod_log(self, namespace, name): """Reads and returns pod log diff --git a/wrapanapi/systems/openshift.py b/wrapanapi/systems/openshift.py index db196a1d..68db0336 100644 --- a/wrapanapi/systems/openshift.py +++ b/wrapanapi/systems/openshift.py @@ -105,8 +105,6 @@ def __init__(self, system, raw=None, **kwargs): self._name = raw.metadata.name if raw else kwargs.get('name') if not self._name: raise ValueError("missing required kwarg 'name'") - self.v1_project = self.system.ocp_client.resources.get( - api_version='project.openshift.io/v1', kind='Project') @property def get_quota(self): @@ -142,7 +140,7 @@ def restart(self): raise NotImplementedError def delete(self): - self.v1_project.delete(name=self.name) + self.system.v1_project.delete(name=self.name) def refresh(self): self.raw = self.system.get_project(name=self.name).raw @@ -154,11 +152,11 @@ def cleanup(self): class Pod(Vm): state_map = { - 'pending': VmState.PENDING, - 'running': VmState.RUNNING, - 'succeeded': VmState.SUCCEEDED, - 'failed': VmState.FAILED, - 'unknown': VmState.UNKNOWN + 'Pending': VmState.PENDING, + 'Running': VmState.RUNNING, + 'Succeeded': VmState.SUCCEEDED, + 'Failed': VmState.FAILED, + 'Unknown': VmState.UNKNOWN } def __init__(self, system, raw=None, **kwargs): @@ -207,7 +205,8 @@ def ip(self): return None def _get_state(self): - return self.raw.status.phase + self.refresh() + return self._api_state_to_vmstate(str(self.raw.status.phase)) def is_stateful_set(self, namespace, name): """Checks whether passed name belongs to Stateful Sets in appropriate namespace @@ -217,7 +216,7 @@ def is_stateful_set(self, namespace, name): name: entity name Return: True/False """ - return name in self.list_stateful_set_names(namespace=namespace) + return name in self.system.list_stateful_set_names(namespace=namespace) def is_deployment_config(self, namespace, name): """Checks whether passed name belongs to deployment configs in appropriate namespace @@ -227,8 +226,10 @@ def is_deployment_config(self, namespace, name): name: entity name Return: True/False """ - return name in self.list_deployment_config_names(namespace=namespace) + return name in self.system.list_deployment_config_names(namespace=namespace) + + # JUWATTS TODO def scale_entity(self, namespace, name, replicas, wait=60): """Allows to scale up/down entities. One of cases when this is necessary is emulation of stopping/starting appliance @@ -241,27 +242,27 @@ def scale_entity(self, namespace, name, replicas, wait=60): Return: None """ # only dc and statefulsets can be scaled - st_api = self.kclient.AppsV1beta1Api(api_client=self.kapi_client) + #st_api = self.system.kubeclient.AppsV1beta1Api(api_client=self.kapi_client) - scale_val = self.kclient.V1Scale(spec=self.kclient.V1ScaleSpec(replicas=replicas)) + scale_val = self.system.kubeclient.V1Scale(spec=self.kclient.V1ScaleSpec(replicas=replicas)) if self.is_deployment_config(name=name, namespace=namespace): - self.o_api.patch_namespaced_deployment_config_scale(name=name, namespace=namespace, + self.system.v1_deployment_config.scale.patch(name=name, namespace=namespace, body=scale_val) def check_scale_value(): - got_scale = self.o_api.read_namespaced_deployment_config_scale(name=name, - namespace=namespace) + got_scale = \ + self.system.v1_deployment_config.scale.get(name=name, namespace=namespace) return int(got_scale.spec.replicas or 0) elif self.is_stateful_set(name=name, namespace=namespace): # replace this code with stateful_set_scale when kubernetes shipped with openshift # client gets upgraded - st_spec = self.kclient.V1beta1StatefulSetSpec - st = self.kclient.V1beta1StatefulSet(spec=st_spec(replicas=replicas)) - st_api.patch_namespaced_stateful_set(name=name, namespace=namespace, body=st) + st_spec = self.systmem.kubeclient.V1beta1StatefulSetSpec + st = self.system.kubeclient.V1beta1StatefulSet(spec=st_spec(replicas=replicas)) + self.system.v1_stateful_sets.patch_(name=name, namespace=namespace,body=st) def check_scale_value(): - got_scale = st_api.read_namespaced_stateful_set(name=name, namespace=namespace) + got_scale = self.system.v1_stateful_sets.get(name=name, namespace=namespace) return int(got_scale.spec.replicas or 0) else: raise ValueError("This name %s is not found among " @@ -320,17 +321,17 @@ def creation_time(self): class Openshift(System, VmMixin, ProjectMixin): _stats_available = { - 'num_container': lambda self: len(self.list_container()), + 'num_container': lambda self: len(self.list_containers()), 'num_pod': lambda self: len(self.list_pods()), - 'num_service': lambda self: len(self.list_service()), + 'num_service': lambda self: len(self.list_services()), 'num_replication_controller': - lambda self: len(self.list_replication_controller()), - 'num_image': lambda self: len(self.list_image_id()), - 'num_node': lambda self: len(self.list_node()), - 'num_image_registry': lambda self: len(self.list_image_registry()), + lambda self: len(self.list_replication_controllers()), + 'num_image': lambda self: len(self.list_image_ids()), + 'num_node': lambda self: len(self.list_nodes()), + 'num_image_registry': lambda self: len(self.list_image_registries()), 'num_project': lambda self: len(self.list_project()), - 'num_route': lambda self: len(self.list_route()), - 'num_template': lambda self: len(self.list_template()) + 'num_route': lambda self: len(self.list_routes()), + 'num_template': lambda self: len(self.list_templates()) } stream2template_tags_mapping59 = { @@ -387,6 +388,7 @@ def __init__(self, hostname, protocol="https", port=8443, debug=False, self.ssl_ca_cert = kwargs.get('ssl_ca_cert', '') self.ociclient = ociclient + self.kubeclient = kubeclient self.k8s_client = self._k8s_client_connect() @@ -450,6 +452,75 @@ def v1_pod(self): def v1_route(self): return self.ocp_client.resources.get(api_version='route.openshift.io/v1', kind='Route') + @cached_property + def v1_deployment_config(self): + return self.ocp_client.resources.get(api_version='v1', kind='DeploymentConfig') + + @cached_property + def v1_stateful_sets(self): + return self.ocp_client.resources.get(api_version='apps/v1beta1', kind='StatefulSet') + + @cached_property + def v1_template(self): + return self.ocp_client.resources.get(api_version='template.openshift.io/v1', + kind='Template') + + @cached_property + def v1_image_stream(self): + return self.ocp_client.resources.get(api_version='v1', kind='ImageStream') + + @cached_property + def v1_image_stream_images(self): + return self.ocp_client.resources.get(api_version='v1', kind='ImageStreamImage') + + @cached_property + def v1_service(self): + return self.ocp_client.resources.get(api_version='v1', kind='Service') + + @cached_property + def v1_replication_controller(self): + return self.ocp_client.resources.get(api_version='v1', kind='ReplicationController') + + @cached_property + def v1_node(self): + return self.ocp_client.resources.get(api_version='v1', kind='Node') + + @cached_property + def v1_persistent_volume(self): + return self.ocp_client.resources.get(api_version='v1', kind='PersistentVolume') + + @cached_property + def v1_image_registry(self): + return self.ocp_client.resources.get(api_version='v1', kind='PersistentVolume') + + @cached_property + def v1_config_map(self): + return self.ocp_client.resources.get(api_version='v1', kind='ConfigMap') + + @cached_property + def v1_endpoint(self): + return self.ocp_client.resources.get(api_version='v1', kind='Endpoints') + + @cached_property + def v1_service_account(self): + return self.ocp_client.resources.get(api_version='v1', kind='ServiceAccount') + + @cached_property + def v1_role_binding(self): + return self.ocp_client.resources.get(api_version='authorization.openshift.io/v1', + kind='RoleBinding') + + @cached_property + def v1_secret(self): + return self.ocp_client.resources.get(api_version='v1', kind='Secret') + + @cached_property + def v1_job(self): + return self.ocp_client.resources.get(api_version='batch/v1', kind='Job') + + @cached_property + def v1_scc(self): + return self.ocp_client.resources.get(api_version='v1', kind='SecurityContextConstraints') @property def can_suspend(self): @@ -459,9 +530,102 @@ def can_suspend(self): def can_pause(self): return False + def _does_exist(self, func, **kwargs): + try: + func(**kwargs) + return True + except ApiException as e: + self.logger.info("ApiException occurred %s, it looks like obj doesn't exist", e) + return + + def _restore_missing_project_role_bindings(self, namespace): + """Fixes one of issues in Openshift REST API + create project doesn't add necessary roles to default sa, probably bug, this is workaround + + Args: + namespace: openshift namespace where roles are absent + Return: None + """ + # adding builder role binding + builder_role = self.kubeclient.V1ObjectReference(name='system:image-builder') + builder_sa = self.kubeclient.V1ObjectReference(name='builder', + kind='ServiceAccount', + namespace=namespace) + builder_role_binding_name = self.kubeclient.V1ObjectMeta(name='builder-binding') + builder_role_binding = self.ociclient.V1RoleBinding(role_ref=builder_role, + subjects=[builder_sa], + metadata=builder_role_binding_name) + self.v1_role_binding.create(namespace=namespace, body=builder_role_binding) + + # adding deployer role binding + deployer_role = self.kubeclient.V1ObjectReference(name='system:deployer') + deployer_sa = self.kubeclient.V1ObjectReference(name='deployer', + kind='ServiceAccount', + namespace=namespace) + deployer_role_binding_name = self.kubeclient.V1ObjectMeta(name='deployer-binding') + deployer_role_binding = self.ociclient.V1RoleBinding(role_ref=deployer_role, + subjects=[deployer_sa], + metadata=deployer_role_binding_name) + self.v1_role_binding.create(namespace=namespace, body=deployer_role_binding) + + # adding admin role binding + admin_role = self.kubeclient.V1ObjectReference(name='admin') + admin_user = self.kubeclient.V1ObjectReference(name='admin', + kind='User', + namespace=namespace) + admin_role_binding_name = self.kubeclient.V1ObjectMeta(name='admin-binding') + admin_role_binding = self.ociclient.V1RoleBinding(role_ref=admin_role, + subjects=[admin_user], + metadata=admin_role_binding_name) + self.v1_role_binding.create(namespace=namespace, body=admin_role_binding) + + # adding image-puller role binding + puller_role = self.kubeclient.V1ObjectReference(name='system:image-puller') + group_name = 'system:serviceaccounts:{proj}'.format(proj=namespace) + puller_group = self.kubeclient.V1ObjectReference(name=group_name, + kind='SystemGroup', + namespace=namespace) + role_binding_name = self.kubeclient.V1ObjectMeta(name='image-puller-binding') + puller_role_binding = self.ociclient.V1RoleBinding(role_ref=puller_role, + subjects=[puller_group], + metadata=role_binding_name) + self.v1_role_binding.create(namespace=namespace, body=puller_role_binding) + + def current_ip_address(self, vm_name): + """Tries to retrieve project's external ip + + Args: + vm_name: project name + Return: ip address or None + """ + try: + common_svc = self.v1_service.get(name='common-service', namespace=vm_name) + return common_svc.spec.external_i_ps[0] + except Exception: + return None + + def get_ip_address(self, vm_name, timeout=600): + """ Returns the IP address for the selected appliance. + + Args: + vm_name: The name of the vm to obtain the IP for. + timeout: The IP address wait timeout. + Returns: A string containing the first found IP that isn't the device. + """ + try: + ip_address, tc = wait_for(lambda: self.current_ip_address(vm_name), + fail_condition=None, + delay=5, + num_sec=timeout, + message="get_ip_address from openshift") + except TimedOutError: + ip_address = None + return ip_address + def get_ocp_obj(self, resource_type, name, namespace=None): ocp_obj = None - for item in getattr(self, resource_type).get(namespace=namespace).items: + # for item in getattr(self, resource_type).get(namespace=namespace).items: + for item in resource_type.get(namespace=namespace).items: if item.metadata.name == name: ocp_obj = item break @@ -483,11 +647,13 @@ def get_pod(self, name, namespace=None): Raises: ValueError -- no name provided """ - pod = self.get_ocp_obj(resource_type='Pod', name=name, namespace=namespace) + if namespace: + pod = self.v1_pod.get(name=name, namespace=namespace) + else: + pod = self.get_ocp_obj(resource_type=self.v1_pod, name=name) return Pod(system=self, name=pod.metadata.name, namespace=pod.metadata.namespace, raw=pod) - get_vm = get_pod def create_vm(self, name, **kwargs): raise NotImplementedError('This function has not yet been implemented.') @@ -507,7 +673,18 @@ def list_pods(self, namespace=None): Pod(system=self, name=pod.metadata.name, namespace=pod.metadata.namespace, raw=pod) for pod in self.v1_pod.get(namespace=namespace).items] - list_vms = list_pods + + def wait_project_exist(self, name, wait=60): + """Checks whether Project exists within some time. + + Args: + name: openshift namespace name + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.get_project(), + 'name': name})[0] def create_project(self, name, description=None, **kwargs): @@ -519,59 +696,1269 @@ def create_project(self, name, description=None, **kwargs): self.logger.info("creating new project with name %s", name) project = self.v1_project.create(body=proj) - + self.wait_project_exist(name=name) return Project(system=self, name=project.metadata.name, raw=project) def find_projects(self, *args, **kwargs): raise NotImplementedError def get_project(self, name): - project = self.get_ocp_obj(resource_type='v1_project', name=name) + project = self.v1_project.get(name=name) return Project(system=self, name=project.metadata.name, raw=project) + get_vm = get_project + def list_project(self, namespace=None): return [ Project(system=self, name=project.metadata.name, raw=project) for project in self.v1_project.get(namespace=namespace).items] + list_vms = list_project + + def list_project_names(self): + """Obtains project names + + Returns: list of project names + """ + projects = self.list_project() + return [proj.name for proj in projects] + def list_routes(self, namespace=None): - return self.v1_route.get(namespace=namespace) + return self.v1_route.get(namespace=namespace).items - # def list_image_streams(self, namespace=None): - # - # return self.get_ocp_obj_list(resource_type='ImageStreamList', namespace=namespace) - # - # def list_image_stream_imagess(self, namespace=None): - # - # return self.get_ocp_obj_list(resource_type='ImageStreamImageList', namespace=namespace) - # - # def list_templates(self, namespace=None): - # return self.get_ocp_obj_list(resource_type='Template', namespace=namespace) - # - # def list_deployment_config(self, namespace=None): - # return self.get_ocp_obj_list(resource_type='DeploymentConfig', namespace=namespace) - # - # def list_services(self, namespace=None): - # return self.get_ocp_obj_list(resource_type='Service', namespace=namespace) - # - # def list_replication_controller(self, namespace=None): - # return self.get_ocp_obj_list(resource_type='ReplicationController', namespace=namespace) - # - # def list_node(self, namespace=None): - # return self.get_ocp_obj_list(resource_type='Node', namespace=namespace) - # - # def list_persistent_volume(self, namespace=None): - # return self.get_ocp_obj_list(resource_type='PersistentVolume', namespace=namespace) - # - # def list_container(self, namespace=None): - # return self.get_ocp_obj_list(resource_type='', namespace=namespace) - # - # def list_image_registry(self, namespace=None): - # return self.get_ocp_obj_list(resource_type='', namespace=namespace) + def list_image_streams(self, namespace=None): + + return self.v1_image_stream.get(namespace=namespace).items + + def list_image_stream_images(self, namespace=None): + + return self.v1_image_stream_image.get(namespace=namespace).items + + def list_templates(self, namespace=None): + return self.v1_template.get(namespace=namespace) + + def list_deployment_configs(self, namespace=None): + return self.v1_deployment_config.get(namespace=namespace).items + + def wait_service_exist(self, namespace, name, wait=60): + """Checks whether Service exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.v1_service.get, + 'name': name, + 'namespace': namespace})[0] + + def create_service(self, namespace, **kwargs): + """Creates Service entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Service data + Return: data if entity was created w/o errors + """ + service = self.kubeclient.V1Service(**kwargs) + service_name = service.to_dict()['metadata']['name'] + self.logger.info("creating service %s", service_name) + output = self.v1_service.create(namespace=namespace, body=service) + self.wait_service_exist(namespace=namespace, name=service_name) + return output + + def list_services(self, namespace=None): + return self.v1_service.get(namespace=namespace).items + + def list_replication_controller(self, namespace=None): + return self.v1_replication_controller.get(namespace=namespace).items + + def list_nodes(self, namespace=None): + return self.v1_node.get(namespace=namespace).items + + def list_persistent_volumes(self, namespace=None): + return self.v1_persistent_volume.get(namespace=namespace).items + + def list_containers(self, namespace=None): + """Returns list of containers (derived from pods) + If project_name is passed, only the containers under the selected project will be returned + """ + pods = self.list_pods(namespace=namespace) + return [pod.raw.spec.containers for pod in pods] + + def list_image_ids(self, namespace=None): + """Returns list of unique image ids (derived from pods)""" + pods = self.list_pods(namespace=namespace) + statuses = [] + for pod in pods: + for status in pod.raw.status.containerStatuses: + statuses.append(status) + return sorted(set([status.imageID for status in statuses])) + + def list_image_registries(self, namespace=None): + """Returns list of image registries (derived from pods)""" + pods = self.list_pods(namespace=namespace) + statuses = [] + for pod in pods: + for status in pod.raw.status.containerStatuses: + statuses.append(status) + # returns only the image registry name, without the port number in case of local registry + return sorted(set([status.image.split('/')[0].split(':')[0] for status in statuses])) def find_vms(self, *args, **kwargs): raise NotImplementedError + def list_deployment_config_names(self , namespace=None): + + deployment_configs = self.v1_deployment_config.get(namespace=namespace) + + return [dc.metadata.name for dc in deployment_configs.items] + + def list_stateful_set_names(self, namespace=None): + + stateful_sets = self.v1_stateful_sets.get(namespace=namespace) + + return [ss.metadata.name for ss in stateful_sets.items] + + def cluster_info(self): + """Returns information about the cluster - number of CPUs and memory in GB""" + aggregate_cpu, aggregate_mem = 0, 0 + for node in self.list_nodes(): + aggregate_cpu += int(node.status.capacity['cpu']) + # converting KiB to GB. 1KiB = 1.024E-6 GB + aggregate_mem += int(round(int(node.status.capacity['memory'][:-2]) * 0.00000102400)) + + return {'cpu': aggregate_cpu, 'memory': aggregate_mem} + + def expose_db_ip(self, namespace): + """Creates special service in appliance project (namespace) which makes internal appliance + db be available outside. + + Args: + namespace: (str) openshift namespace + Returns: ip + """ + # creating common service with external ip and extracting assigned ip + service_obj = self.kubeclient.V1Service(**json.loads(common_service)) + self.v1_service.create(body=service_obj, namespace=namespace) + # external ip isn't assigned immediately, so, we have to wait until it is assigned + + return self.get_ip_address(namespace) + + def wait_config_map_exist(self, namespace, name, wait=60): + """Checks whether Config Map exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.v1_config_map.get, + 'name': name, + 'namespace': namespace})[0] + + def create_config_map(self, namespace, **kwargs): + """Creates ConfigMap entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: ConfigMap data + Return: data if entity was created w/o errors + """ + conf_map = self.kubeclient.V1ConfigMap(**kwargs) + conf_map_name = conf_map.to_dict()['metadata']['name'] + self.logger.info("creating config map %s", conf_map_name) + output = self.v1_config_map.create(body=conf_map, namespace=namespace) + self.wait_config_map_exist(namespace=namespace, name=conf_map_name) + return output + + def replace_config_map(self, namespace, **kwargs): + """Replace ConfigMap entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: ConfigMap data + Return: data if entity was created w/o errors + """ + conf_map = self.kubeclient.V1ConfigMap(**kwargs) + conf_map_name = conf_map.to_dict()['metadata']['name'] + self.logger.info("replacing config map %s", conf_map_name) + output = self.v1_config_map.replace(namespace=namespace, name=conf_map_name, body=conf_map) + + return output + + def list_config_maps(self, namespace=None): + return self.v1_config_map.get(namespace=namespace).items + + def get_config_maps(self, name, namespacee): + return self.v1_config_map.get(name=name, namespace=namespace) + + def wait_stateful_set_exist(self, namespace, name, wait=900): + """Checks whether StatefulSet exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + read_st = self.v1_stateful_sets.get + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': read_st, + 'name': name, + 'namespace': namespace})[0] + + def create_stateful_set(self, namespace, **kwargs): + """Creates StatefulSet entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: StatefulSet data + Return: data if entity was created w/o errors + """ + st = self.kubeclient.V1beta1StatefulSet(**kwargs) + st_name = st.to_dict()['metadata']['name'] + self.logger.info("creating stateful set %s", st_name) + output = self.v1_stateful_sets.create(body=st, namespace=namespace) + self.wait_stateful_set_exist(namespace=namespace, name=st_name) + return output + + def wait_endpoints_exist(self, namespace, name, wait=60): + """Checks whether Endpoints exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.v1_endpoint.get, + 'name': name, + 'namespace': namespace})[0] + + def create_endpoints(self, namespace, **kwargs): + """Creates Endpoints entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Endpoints data + Return: data if entity was created w/o errors + """ + endpoints = self.kubeclient.V1Endpoints(**kwargs) + endpoints_name = endpoints.to_dict()['metadata']['name'] + self.logger.info("creating endpoints %s", endpoints_name) + output = self.v1_endpoint.create(namespace=namespace, body=endpoints) + self.wait_endpoints_exist(namespace=namespace, name=endpoints_name) + return output + + def wait_route_exist(self, namespace, name, wait=60): + """Checks whether Route exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.v1_route.get, + 'name': name, + 'namespace': namespace})[0] + + def create_route(self, namespace, **kwargs): + """Creates Route entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Route data + Return: data if entity was created w/o errors + """ + route = self.ociclient.V1Route(**kwargs) + route_name = route.to_dict()['metadata']['name'] + self.logger.info("creating route %s", route_name) + output = self.v1_route.create(namespace=namespace, body=route) + self.wait_route_exist(namespace=namespace, name=route_name) + return output + + def wait_service_account_exist(self, namespace, name, wait=60): + """Checks whether Service Account exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.v1_service_account.get, + 'name': name, + 'namespace': namespace})[0] + + def create_service_account(self, namespace, **kwargs): + """Creates Service Account entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Service Account data + Return: data if entity was created w/o errors + """ + sa = self.kubeclient.V1ServiceAccount(**kwargs) + sa_name = sa.to_dict()['metadata']['name'] + self.logger.info("creating service account %s", sa_name) + output = self.v1_service_account.create(namespace=namespace, body=sa) + self.wait_service_account_exist(namespace=namespace, name=sa_name) + return output + + def wait_role_binding_exist(self, namespace, name, wait=60): + """Checks whether RoleBinding exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.v1_role_binding.get, + 'name': name, + 'namespace': namespace})[0] + + def create_role_binding(self, namespace, **kwargs): + """Creates RoleBinding entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: RoleBinding data + Return: data if entity was created w/o errors + """ + ObjectRef = self.kubeclient.V1ObjectReference # noqa + # there is some version mismatch in api. so, it would be better to remove version + # TODO Testing needed if this comment is still necessary, removing for now + # kwargs.pop('api_version', None) + role_binding_name = kwargs['metadata']['name'] + + # role and subjects data should be turned into objects before passing them to RoleBinding + role_name = kwargs.pop('role_ref')['name'] + role = ObjectRef(name=role_name) + subjects = [ObjectRef(namespace=namespace, **subj) for subj in kwargs.pop('subjects')] + role_binding = self.ociclient.V1RoleBinding(role_ref=role, subjects=subjects, **kwargs) + self.logger.debug("creating role binding %s in project %s", role_binding_name, namespace) + output = self.v1_role_binding.create(namespace=namespace, body=role_binding) + self.wait_role_binding_exist(namespace=namespace, name=role_binding_name) + return output + + def wait_image_stream_exist(self, namespace, name, wait=60): + """Checks whether Image Stream exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.v1_image_stream.get, + 'name': name, + 'namespace': namespace})[0] + + def create_image_stream(self, namespace, **kwargs): + """Creates Image Stream entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Image Stream data + Return: data if entity was created w/o errors + """ + image_stream = self.ociclient.V1ImageStream(**kwargs) + is_name = image_stream.to_dict()['metadata']['name'] + self.logger.info("creating image stream %s", is_name) + output = self.v1_image_stream.create(namespace=namespace, body=image_stream) + self.wait_image_stream_exist(namespace=namespace, name=is_name) + return output + + def wait_secret_exist(self, namespace, name, wait=90): + """Checks whether Secret exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.v1_secret.get, + 'name': name, + 'namespace': namespace})[0] + + def create_secret(self, namespace, **kwargs): + """Creates Secret entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Secret data + Return: data if entity was created w/o errors + """ + secret = self.kubeclient.V1Secret(**kwargs) + secret_name = secret.to_dict()['metadata']['name'] + self.logger.info("creating secret %s", secret_name) + output = self.v1_secret.create(namespace=namespace, body=secret) + self.wait_secret_exist(namespace=namespace, name=secret_name) + return output + + def wait_deployment_config_exist(self, namespace, name, wait=600): + """Checks whether Deployment Config exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.v1_deployment_config.get, + 'name': name, + 'namespace': namespace})[0] + + def create_deployment_config(self, namespace, **kwargs): + """Creates Deployment Config entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Deployment Config data + Return: data if entity was created w/o errors + """ + dc = self.ociclient.V1DeploymentConfig(**kwargs) + dc_name = dc.to_dict()['metadata']['name'] + self.logger.info("creating deployment config %s", dc_name) + output = self.v1_deployment_config.create(namespace=namespace, body=dc) + self.wait_deployment_config_exist(namespace=namespace, + name=dc_name) + return output + + def wait_persistent_volume_claim_status(self, namespace, name, status, wait='1m'): + """Waits until pvc gets some particular status. + For example: Bound. + + Args: + namespace: openshift namespace name + name: job name + status: pvc status + wait: stop waiting after "wait" time + Return: True/False + """ + def pvc_wait_status(): + try: + pvc = self.v1_persistent_volume.get(name=name, namespace=namespace) + return pvc.status.phase == status + except KeyError: + return False + + return wait_for(pvc_wait_status, num_sec=wait)[0] + + def wait_persistent_volume_claim_exist(self, namespace, name, wait=60): + """Checks whether Persistent Volume Claim exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.v1_persistent_volume.get, + 'name': name, + 'namespace': namespace})[0] + + def create_persistent_volume_claim(self, namespace, **kwargs): + """Creates Persistent Volume Claim entity using REST API. + + Args: + namespace: openshift namespace where entity has to be created + kwargs: Persistent Volume Claim data + Return: data if entity was created w/o errors + """ + pv_claim = self.kubeclient.V1PersistentVolumeClaim(**kwargs) + pv_claim_name = pv_claim.to_dict()['metadata']['name'] + self.logger.info("creating persistent volume claim %s", pv_claim_name) + output = self.v1_persistent_volume.create(namespace=namespace, body=pv_claim) + self.wait_persistent_volume_claim_exist(namespace=namespace, + name=pv_claim_name) + return output + + def wait_job_finished(self, namespace, name, wait='15m'): + """Waits for job to accomplish + + Args: + namespace: openshift namespace name + name: job name + wait: stop waiting after "wait" time + Return: True/False + """ + def job_wait_accomplished(): + try: + job = self.v1_job.get(name=name, namespace=namespace) + # todo: replace with checking final statuses + return bool(job.status.succeeded) + except KeyError: + return False + return wait_for(job_wait_accomplished, num_sec=wait)[0] + + def run_job(self, namespace, body): + """Creates job from passed template, runs it and waits for the job to be accomplished + + Args: + namespace: openshift namespace name + body: yaml job template + Return: True/False + """ + body = self.rename_structure(body) + job_name = body['metadata']['name'] + self.v1_job.create(namespace=namespace, body=body) + + return self.wait_job_finished(namespace, job_name) + + def get_scc(self, name=None): + """Returns Security Context Constraint by name + + Args: + name: security context constraint name + Returns: security context constraint object + """ + return self.v1_scc.get(name=name) + + def create_scc(self, body): + """Creates Security Context Constraint from passed structure. + Main aim is to create scc from read and parsed yaml file. + + Args: + body: security context constraint structure + Returns: security context constraint object + """ + raw_scc = self.rename_structure(body) + # JUWATTS DO WE STILL NEED THIS? + # if raw_scc.get('api_version') == 'v1': + # # there is inconsistency between api and some scc files. v1 is not accepted by api now + # raw_scc.pop('api_version') + scc = self.ociclient.V1SecurityContextConstraints(**raw_scc) + return self.v1_scc.create(body=scc) + + def append_sa_to_scc(self, scc_name, namespace, sa): + """Appends Service Account to respective Security Constraint + + Args: + scc_name: security context constraint name + namespace: service account's namespace + sa: service account's name + Returns: updated security context constraint object + """ + user = 'system:serviceaccount:{proj}:{usr}'.format(proj=namespace, + usr=sa) + if self.get_scc(name=scc_name).users is None: + # ocp 3.6 has None for users if there is no sa in it + update_scc_cmd = [ + {"op": "add", + "path": "/users", + "value": [user]}] + else: + update_scc_cmd = [ + {"op": "add", + "path": "/users/-", + "value": user}] + self.logger.debug("adding user %r to scc %r", user, scc_name) + return self.v1_scc.patch(name=scc_name, body=update_scc_cmd, namespace=namespace) + + def remove_sa_from_scc(self, scc_name, namespace, sa): + """Removes Service Account from respective Security Constraint + + Args: + scc_name: security context constraint name + namespace: service account's namespace + sa: service account's name + Returns: updated security context constraint object + """ + user = 'system:serviceaccount:{proj}:{usr}'.format(proj=namespace, + usr=sa) + # json patch's remove works only with indexes. so we have to figure out index + try: + index = next(val[0] for val in enumerate(self.get_scc(scc_name).users) + if val[1] == user) + except StopIteration: + raise ValueError("No such sa {} in scc {}".format(user, scc_name)) + update_scc_cmd = [ + {"op": "remove", + "path": "/users/{}".format(index)}] + self.logger.debug("removing user %r from scc %s with index %s", user, scc_name, index) + return self.v1_scc.patch(name=scc_name, body=update_scc_cmd, namespace=namespace) + + def is_vm_running(self, vm_name, running_pods=()): + """Emulates check is vm(appliance) up and running + + Args: + vm_name: (str) project(namespace) name + running_pods: (list) checks only passed number of pods. otherwise, default set. + Return: True/False + """ + if not self.does_vm_exist(vm_name): + return False + self.logger.info("checking all pod statuses for vm name %s", vm_name) + + for pod_name in running_pods or self.get_required_pods(vm_name): + if self.is_pod_running(namespace=vm_name, name=pod_name): + continue + else: + return False + + # todo: check url is available + db is accessable + return True + + def is_deployment_config(self, namespace, name): + """Checks whether passed name belongs to deployment configs in appropriate namespace + + Args: + namespace: project(namespace) name + name: entity name + Return: True/False + """ + return name in self.list_deployment_config_names(namespace=namespace) + + def is_stateful_set(self, namespace, name): + """Checks whether passed name belongs to Stateful Sets in appropriate namespace + + Args: + namespace: project(namespace) name + name: entity name + Return: True/False + """ + return name in self.list_stateful_set_names(namespace=namespace) + + def does_project_exist(self, name): + """Checks whether Project exists. + + Args: + name: openshift namespace name + Return: True/False + """ + return self._does_exist(func=self.v1_project.get, name=name) + + def is_vm_stopped(self, vm_name): + """Check whether vm isn't running. + There is no such state stopped for vm in openshift therefore + it just checks that vm isn't running + + Args: + vm_name: project name + Return: True/False + """ + pods = self.list_pods(namespace=vm_name) + if pods: + self.logger.info(("some pods are still " + "running: {}").format([pod.name for pod in pods])) + return not bool(pods) + + def wait_vm_running(self, vm_name, num_sec=900): + """Checks whether all project pods are in ready state. + + Args: + vm_name: project name + num_sec: all pods should get ready for this time then - True, otherwise False + Return: True/False + """ + wait_for(self.is_vm_running, [vm_name], num_sec=num_sec) + return True + + def wait_vm_stopped(self, vm_name, num_sec=600): + """Checks whether all project pods are stopped. + + Args: + vm_name: project name + num_sec: all pods should not be ready for this time then - True, otherwise False + Return: True/False + """ + wait_for(self.is_vm_stopped, [vm_name], num_sec=num_sec) + return True + + def is_vm_suspended(self, vm_name): + """There is no such state in openshift + + Args: + vm_name: project name + Return: False + """ + return False + + def in_steady_state(self, vm_name): + """Return whether the specified virtual machine is in steady state + + Args: + vm_name: VM name + Returns: True/False + """ + return (self.is_vm_running(vm_name) + or self.is_vm_stopped(vm_name) + or self.is_vm_suspended(vm_name)) + + @property + def can_rename(self): + return hasattr(self, "rename_vm") + + def get_appliance_version(self, vm_name): + """Returns appliance version if it is possible + + Args: + vm_name: the openshift project name of the podified appliance + Returns: version + """ + try: + proj = self.get_project(vm_name) + description = proj.raw.metadata.annotations['openshift.io/description'] + return Version(TemplateName.parse_template(description).version) + except (ApiException, KeyError, ValueError): + try: + return Version(TemplateName.parse_template(vm_name).version) + except ValueError: + return None + + + def get_meta_value(self, instance, key): + raise NotImplementedError( + 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) + + def set_meta_value(self, instance, key): + raise NotImplementedError( + 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) + + def vm_status(self, vm_name): + """Returns current vm/appliance state + + Args: + vm_name: the openshift project name of the podified appliance + Returns: up/down or exception if vm doesn't exist + """ + if not self.does_vm_exist(vm_name): + raise ValueError("Vm {} doesn't exist".format(vm_name)) + return 'up' if self.is_vm_running(vm_name) else 'down' + + def vm_creation_time(self, vm_name): + """Returns time when vm/appliance was created + + Args: + vm_name: the openshift project name of the podified appliance + Return: datetime obj + """ + if not self.does_vm_exist(vm_name): + raise ValueError("Vm {} doesn't exist".format(vm_name)) + project = self.v1_project.get(vm_name) + return project.raw.metadata.creation_timestamp + + @staticmethod + def _progress_log_callback(logger, source, destination, progress): + logger.info("Provisioning progress {}->{}: {}".format( + source, destination, str(progress))) + + def vm_hardware_configuration(self, vm_name): + """Collects project's cpu and ram usage + + Args: + vm_name: openshift's data + Returns: collected data + """ + hw_config = {'ram': 0, + 'cpu': 0} + if not self.does_vm_exist(vm_name): + return hw_config + + proj_pods = self.list_pods(namespace=vm_name) + for pod in proj_pods: + for container in pod.raw.spec.containers: + cpu = container.resources.requests['cpu'] + hw_config['cpu'] += float(cpu[:-1]) / 1000 if cpu.endswith('m') else float(cpu) + + ram = container.resources.requests['memory'] + if ram.endswith('Mi'): + hw_config['ram'] += float(ram[:-2]) + elif ram.endswith('Gi'): + hw_config['ram'] += float(ram[:-2]) * 1024 + elif ram.endswith('Ki'): + hw_config['ram'] += float(ram[:-2]) / 1024 + else: + hw_config['ram'] += ram + return hw_config + + def usage_and_quota(self): + installed_ram = 0 + installed_cpu = 0 + used_ram = 0 + used_cpu = 0 + # todo: finish this method later + return { + # RAM + 'ram_used': used_ram, + 'ram_total': installed_ram, + 'ram_limit': None, + # CPU + 'cpu_used': used_cpu, + 'cpu_total': installed_cpu, + 'cpu_limit': None, + } + + def get_required_pods(self, vm_name): + """Provides list of pods which should be present in appliance + + Args: + vm_name: openshift project name + Returns: list + """ + version = self.get_appliance_version(vm_name) + if version and version < '5.9': + return self.required_project_pods58 + else: + return self.required_project_pods + + def get_ip_address(self, vm_name, timeout=600): + """ Returns the IP address for the selected appliance. + + Args: + vm_name: The name of the vm to obtain the IP for. + timeout: The IP address wait timeout. + Returns: A string containing the first found IP that isn't the device. + """ + try: + ip_address, tc = wait_for(lambda: self.current_ip_address(vm_name), + fail_condition=None, + delay=5, + num_sec=timeout, + message="get_ip_address from openshift") + except TimedOutError: + ip_address = None + return ip_address + + def disconnect(self): + pass + + def get_appliance_tags(self, name): + """Returns appliance tags stored in appropriate config map if it exists. + + Args: + name: appliance project name + Returns: dict with tags and urls + """ + try: + read_data = self.get_config_maps(name='image-repo-data', namespace=name) + return json.loads(read_data.data['tags']) + except ApiException: + return {} + + def get_appliance_url(self, name): + """Returns appliance url assigned by Openshift + + Args: + name: appliance project name + Returns: url or None + """ + try: + route = self.list_routes(namespace=name) + return route.items[0].spec.host + except (ApiException, IndexError): + return None + + def get_appliance_uuid(self, name): + """Returns appliance uuid assigned by Openshift + + Args: + name: appliance project name + Returns: uuid + """ + return self.get_project(name=name).raw.metadata.uid + + def is_appliance(self, name): + """Checks whether passed vm/project is appliance + + Args: + name: appliance project name + Returns: True/False + """ + return bool(self.get_appliance_tags(name)) + + def find_job_pods(self, namespace, name): + """Finds and returns all remaining job pods + + Args: + namespace: project(namespace) name + name: job name + Returns: list of pods + """ + pods = [] + for pod in self.list_pods(namespace=namespace): + if pod.raw.metadata.labels.get('job-name', '') == name: + pods.append(pod) + return pods + + def read_pod_log(self, namespace, name): + """Reads and returns pod log + + Args: + namespace: project(namespace) name + name: pod name + Returns: list of pods + """ + return self.v1_pod.log.get(name=name, namespace=namespace) + + def wait_template_exist(self, namespace, name, wait=60): + """Checks whether Template exists within some time. + + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.v1_template.get, + 'name': name, + 'namespace': namespace})[0] + + def deploy_template(self, template, tags=None, password='smartvm', **kwargs): + """Deploy a VM from a template + + Args: + template: (str) The name of the template to deploy + tags: (dict) dict with tags if some tag isn't passed it is set to 'latest' + vm_name: (str) is used as project name if passed. otherwise, name is generated (sprout) + progress_callback: (func) function to return current progress (sprout) + template_params: (dict) parameters to override during template deployment + running_pods: (list) checks that passed pods are running instead of default set + since input tags are image stream tags whereas template expects its own tags. + So, input tags should match stream2template_tags_mapping. + password: this password will be set as default everywhere + Returns: dict with parameters necessary for appliance setup or None if deployment failed + """ + self.logger.info("starting template %s deployment", template) + self.wait_template_exist(namespace=self.default_namespace, name=template) + + if not self.base_url: + raise ValueError("base url isn't provided") + + version = Version(TemplateName.parse_template(template).version) + + if version >= '5.9': + tags_mapping = self.stream2template_tags_mapping59 + else: + tags_mapping = self.stream2template_tags_mapping58 + + prepared_tags = {tag['tag']: 'latest' for tag in tags_mapping.values()} + if tags: + not_found_tags = [t for t in tags.keys() if t not in tags_mapping.keys()] + if not_found_tags: + raise ValueError("Some passed tags {t} don't exist".format(t=not_found_tags)) + for tag, value in tags.items(): + prepared_tags[tags_mapping[tag]['url']] = value['url'] + prepared_tags[tags_mapping[tag]['tag']] = value['tag'] + + # create project + # assuming this is cfme installation and generating project name + proj_id = "".join(choice(string.digits + string.lowercase) for _ in range(6)) + + # for sprout + if 'vm_name' in kwargs: + proj_name = kwargs['vm_name'] + else: + proj_name = "{t}-project-{proj_id}".format(t=template, proj_id=proj_id) + + template_params = kwargs.pop('template_params', {}) + running_pods = kwargs.pop('running_pods', ()) + proj_url = "{proj}.{base_url}".format(proj=proj_id, base_url=self.base_url) + self.logger.info("unique id %s, project name %s", proj_id, proj_name) + + default_progress_callback = partial(self._progress_log_callback, self.logger, template, + proj_name) + progress_callback = kwargs.get('progress_callback', default_progress_callback) + + self.create_project(name=proj_name, description=template) + progress_callback("Created Project `{}`".format(proj_name)) + + # grant rights according to scc + self.logger.info("granting rights to project %s sa", proj_name) + scc_user_mapping = self.scc_user_mapping59 if version >= '5.9' else self.scc_user_mapping58 + + self.logger.info("granting required rights to project's service accounts") + for mapping in scc_user_mapping: + self.append_sa_to_scc(scc_name=mapping['scc'], namespace=proj_name, sa=mapping['user']) + progress_callback("Added service accounts to appropriate scc") + + # appliances prior 5.9 don't need such rights + # and those rights are embedded into templates since 5.9.2.2 + if version >= '5.9' and version < '5.9.2.2': + # grant roles to orchestrator + self.logger.info("assigning additional roles to cfme-orchestrator") + orchestrator_sa = self.kubeclient.V1ObjectReference(name='cfme-orchestrator', + kind='ServiceAccount', + namespace=proj_name) + + view_role = self.kubeclient.V1ObjectReference(name='view') + view_role_binding_name = self.kubeclient.V1ObjectMeta(name='view') + view_role_binding = self.ociclient.V1RoleBinding(role_ref=view_role, + subjects=[orchestrator_sa], + metadata=view_role_binding_name) + self.logger.debug("creating 'view' role binding " + "for cfme-orchestrator sa in project %s", proj_name) + self.v1_role_binding.create(namespace=proj_name, body=view_role_binding) + + edit_role = self.kubeclient.V1ObjectReference(name='edit') + edit_role_binding_name = self.kubeclient.V1ObjectMeta(name='edit') + edit_role_binding = self.ociclient.V1RoleBinding(role_ref=edit_role, + subjects=[orchestrator_sa], + metadata=edit_role_binding_name) + self.logger.debug("creating 'edit' role binding " + "for cfme-orchestrator sa in project %s", proj_name) + self.v1_role_binding.create(namespace=proj_name, body=edit_role_binding) + + self.logger.info("project sa created via api have no some mandatory roles. adding them") + self._restore_missing_project_role_bindings(namespace=proj_name) + progress_callback("Added all necessary role bindings to project `{}`".format(proj_name)) + + # creating common service with external ip + ext_ip = self.expose_db_ip(proj_name) + progress_callback("Common Service has been added") + + # adding config map with image stream urls and tags + image_repo_cm = image_repo_cm_template.format(tags=json.dumps(tags)) + self.create_config_map(namespace=proj_name, **yaml.safe_load(image_repo_cm)) + + # creating pods and etc + processing_params = {'DATABASE_PASSWORD': password, + 'APPLICATION_DOMAIN': proj_url} + processing_params.update(prepared_tags) + + # updating template parameters + processing_params.update(template_params) + self.logger.info(("processing template and passed params in order to " + "prepare list of required project entities")) + template_entities = self.process_template(name=template, namespace=self.default_namespace, + parameters=processing_params) + self.logger.debug("template entities:\n %r", template_entities) + progress_callback("Template has been processed") + self.create_template_entities(namespace=proj_name, entities=template_entities) + progress_callback("All template entities have been created") + + self.logger.info("verifying that all created entities are up and running") + progress_callback("Waiting for all pods to be ready and running") + try: + wait_for(self.is_vm_running, num_sec=600, + func_kwargs={'vm_name': proj_name, 'running_pods': running_pods}) + self.logger.info("all pods look up and running") + progress_callback("Everything has been deployed w/o errors") + return {'url': proj_url, + 'external_ip': ext_ip, + 'project': proj_name, + } + except TimedOutError: + self.logger.error("deployment failed. Please check failed pods details") + # todo: return and print all failed pod details + raise + + def create_template_entities(self, namespace, entities): + """Creates entities from openshift template. + + Since there is no methods in openshift/kubernetes rest api for app deployment from template, + it is necessary to create template entities one by one using respective entity api. + + Args: + namespace: (str) openshift namespace + entities: (list) openshift entities + + Returns: None + """ + self.logger.debug("passed template entities:\n %r", entities) + kinds = set([e['kind'] for e in entities]) + entity_names = {e: inflection.underscore(e) for e in kinds} + proc_names = {k: 'create_{e}'.format(e=p) for k, p in entity_names.items()} + + for entity in entities: + if entity['kind'] in kinds: + procedure = getattr(self, proc_names[entity['kind']], None) + obtained_entity = procedure(namespace=namespace, **entity) + self.logger.debug(obtained_entity) + else: + self.logger.error("some entity %s isn't present in entity creation list", entity) + + def start_vm(self, vm_name): + """Starts a vm. + + Args: + vm_name: name of the vm to be started + Returns: whether vm action has been initiated properly + """ + self.logger.info("starting vm/project %s", vm_name) + if self.does_project_exist(vm_name): + for pod in self.get_required_pods(vm_name): + self.scale_entity(name=pod, namespace=vm_name, replicas=1) + else: + raise ValueError("Project with name {n} doesn't exist".format(n=vm_name)) + + def stop_vm(self, vm_name): + """Stops a vm. + + Args: + vm_name: name of the vm to be stopped + Returns: whether vm action has been initiated properly + """ + self.logger.info("stopping vm/project %s", vm_name) + if self.does_project_exist(vm_name): + for pod in self.get_required_pods(vm_name): + self.scale_entity(name=pod, namespace=vm_name, replicas=0) + else: + raise ValueError("Project with name {n} doesn't exist".format(n=vm_name)) + + def delete_vm(self, vm_name): + """Deletes a vm. + + Args: + vm_name: name of the vm to be deleted + Returns: whether vm action has been initiated properly + """ + self.logger.info("removing vm/project %s", vm_name) + self.delete_project(name=vm_name) + return True + + def does_vm_exist(self, vm_name): + """Does VM exist? + + Args: + vm_name: The name of the VM + Returns: whether vm exists + """ + return self.does_project_exist(vm_name) + + @staticmethod + def _update_template_parameters(template, **params): + """Updates openshift template parameters. + Since Openshift REST API doesn't provide any api to change default parameter values as + it is implemented in `oc process`. This method implements such a parameter replacement. + + Args: + template: Openshift's template object + params: bunch of key=value parameters + Returns: updated template + """ + template = copy.deepcopy(template) + if template.parameters: + new_parameters = template.parameters + for new_param, new_value in params.items(): + for index, old_param in enumerate(new_parameters): + if old_param['name'] == new_param: + old_param = new_parameters.pop(index) + if 'generate' in old_param: + old_param['generate'] = None + old_param['_from'] = None + + old_param['value'] = new_value + new_parameters.append(old_param) + template.parameters = new_parameters + return template + + def process_template(self, name, namespace, parameters=None): + """Implements template processing mechanism similar to `oc process`. + + Args: + name: (str) template name + namespace: (str) openshift namespace + parameters: parameters and values to replace default ones + Return: list of objects stored in template + """ + # workaround for bug https://github.com/openshift/openshift-restclient-python/issues/60 + raw_response = self.v1_template.get(name=name, namespace=namespace, _preload_content=False) + raw_data = json.loads(raw_response.data) + + return self.process_raw_template(body=raw_data, namespace=namespace, parameters=parameters) + + def process_raw_template(self, body, namespace, parameters=None): + """Implements template processing mechanism similar to `oc process`. + It does two functions + 1. parametrized templates have to be processed in order to replace parameters with values. + 2. templates consist of list of objects. Those objects have to be extracted + before creation accordingly. + + Args: + body: (dict) template body + namespace: (str) openshift namespace + parameters: parameters and values to replace default ones + Return: list of objects stored in template + """ + updated_data = self.rename_structure(body) + read_template = self.ociclient.V1Template(**updated_data) + if parameters: + updated_template = self._update_template_parameters(template=read_template, + **parameters) + else: + updated_template = read_template + raw_response = self.v1_template.create(namespace=namespace, body=updated_template, + _preload_content=False) + raw_data = json.loads(raw_response.data) + updated_data = self.rename_structure(raw_data) + processed_template = self.ociclient.V1Template(**updated_data) + return processed_template.objects + + def rename_structure(self, struct): + """Fixes inconsistency in input/output data of openshift python client methods + + Args: + struct: data to process and rename + Return: updated data + """ + if not isinstance(struct, six.string_types) and isinstance(struct, Iterable): + if isinstance(struct, dict): + for key in struct.keys(): + # we shouldn't rename something under data or spec + if key == 'stringData': + # this key has to be renamed but its contents should be left intact + struct[inflection.underscore(key)] = struct.pop(key) + elif key in ('spec', 'data', 'string_data', 'annotations'): + # these keys and data should be left intact + pass + else: + # all this data should be processed and updated + val = self.rename_structure(struct.pop(key)) + struct[inflection.underscore(key)] = val + return struct + else: + for index, item in enumerate(struct): + struct[index] = self.rename_structure(item) + return struct + else: + return struct + + def delete_template(self, template_name, namespace='openshift'): + """Deletes template + + Args: + template_name: stored openshift template name + namespace: project name + Returns: result of delete operation + """ + options = self.kclient.V1DeleteOptions() + return self.v1_template.delete(name=template_name, namespace=namespace, body=options) + + def run_command(self, namespace, name, cmd, **kwargs): + """Connects to pod and tries to run + + Args: + namespace: (str) project name + name: (str) pod name + cmd: (list) command to run + Return: command output + """ + # there are some limitations and this code isn't robust enough due to + # https://github.com/kubernetes-client/python/issues/58 + return self.v1_pod.exec.post(namespace=namespace, name=name, + command=cmd, + stdout=True, + stderr=True, + **kwargs) \ No newline at end of file From a77e024b11dac327ad0619f211469be6c9468afb Mon Sep 17 00:00:00 2001 From: Justin Watts Date: Fri, 12 Jul 2019 09:27:44 -0400 Subject: [PATCH 6/9] more changes --- wrapanapi/systems/openshift.py | 812 ++++++++++++++++++--------------- 1 file changed, 440 insertions(+), 372 deletions(-) diff --git a/wrapanapi/systems/openshift.py b/wrapanapi/systems/openshift.py index 68db0336..b8698dc4 100644 --- a/wrapanapi/systems/openshift.py +++ b/wrapanapi/systems/openshift.py @@ -92,6 +92,11 @@ def wrap(*args, **kwargs): class Project(Project): + """ + We are assuming that a Project is a VM for purposes of simplicity for CFME-QE + + """ + def __init__(self, system, raw=None, **kwargs): """ Construct a VMWareVirtualMachine instance @@ -106,6 +111,24 @@ def __init__(self, system, raw=None, **kwargs): if not self._name: raise ValueError("missing required kwarg 'name'") + def _get_state(self): + pods = self.system.list_pods(namespace=self.name) + states = [] + for pod in pods: + states.append(pod.state) + + if len(set(states)) == 1: + return states[0] + else: + # TODO juwatts: what should be returned here + return VmState.FAILED + + def _does_project_exist(self): + if self.raw.status.phase == 'Active': + return True + else: + return False + @property def get_quota(self): return self.system.ocp_client.resources.get(api_version='v1', kind='ResourceQuota').get( @@ -131,10 +154,26 @@ def ip(self): raise NotImplementedError def start(self): - raise NotImplementedError + self.logger.info("starting vm/project %s", self.name) + if self._does_project_exist: + for pod in self.system.get_required_pods(self.name): + self.system.scale_entity(name=pod, namespace=self.name, replicas=1) + else: + raise ValueError("Project with name {n} doesn't exist".format(n=self.name)) def stop(self): - raise NotImplementedError + """Stops a vm. + + Args: + vm_name: name of the vm to be stopped + Returns: whether vm action has been initiated properly + """ + self.logger.info("stopping vm/project %s", self.name) + if self._does_project_exist: + for pod in self.system.get_required_pods(self.name): + self.system.scale_entity(name=pod, namespace=self.name, replicas=0) + else: + raise ValueError("Project with name {n} doesn't exist".format(n=self.name)) def restart(self): raise NotImplementedError @@ -187,7 +226,7 @@ def name(self): def uuid(self): try: return str(self.raw.metadata.uid) - except AttributeError: + except (AttributeError, ApiException): return self.name @property @@ -197,7 +236,7 @@ def namespace(self): @property def ip(self): ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' - #self.refresh() + self.refresh() try: return self.raw.status.podIP except (AttributeError): @@ -208,114 +247,344 @@ def _get_state(self): self.refresh() return self._api_state_to_vmstate(str(self.raw.status.phase)) - def is_stateful_set(self, namespace, name): - """Checks whether passed name belongs to Stateful Sets in appropriate namespace + def start(self): + raise NotImplementedError - Args: - namespace: project(namespace) name - name: entity name - Return: True/False - """ - return name in self.system.list_stateful_set_names(namespace=namespace) + def stop(self): + raise NotImplementedError - def is_deployment_config(self, namespace, name): - """Checks whether passed name belongs to deployment configs in appropriate namespace + def restart(self): + raise NotImplementedError - Args: - namespace: project(namespace) name - name: entity name - Return: True/False + def delete(self): + self.v1_pod.delete(name=self.name, namespace=self.namespace) + + def refresh(self): + self.raw = self.system.get_pod(name=self.name, namespace=self.namespace).raw + return self.raw + + def cleanup(self): + return self.delete() + + @property + def creation_time(self): + """Detect the vm_creation_time either via uptime if non-zero, or by last boot time + + The API provides no sensible way to actually get this value. The only way in which + vcenter API MAY have this is by filtering through events + + Return tz-naive datetime object """ - return name in self.system.list_deployment_config_names(namespace=namespace) + raise NotImplementedError - # JUWATTS TODO - def scale_entity(self, namespace, name, replicas, wait=60): - """Allows to scale up/down entities. - One of cases when this is necessary is emulation of stopping/starting appliance +class OpenShiftTemplate(Template): + + def __init__(self, system, raw=None, **kwargs): + """ + Construct a VMWareVirtualMachine instance Args: - namespace: openshift namespace - name: entity name. it can be either stateless Pod from DeploymentConfig or StatefulSet - replicas: number of replicas 0..N - wait: time to wait for scale up/down - Return: None + system: instance of VMWareSystem + raw: pyVmomi.vim.VirtualMachine object + name: name of VM """ - # only dc and statefulsets can be scaled - #st_api = self.system.kubeclient.AppsV1beta1Api(api_client=self.kapi_client) + super(OpenShiftTemplate, self).__init__(system, raw, **kwargs) + self._name = raw.metadata.name if raw else kwargs.get('name') + self._namespace = raw.metadata.namespace if raw else kwargs.get('namespace') + if not self._name: + raise ValueError("missing required kwarg 'name'") - scale_val = self.system.kubeclient.V1Scale(spec=self.kclient.V1ScaleSpec(replicas=replicas)) - if self.is_deployment_config(name=name, namespace=namespace): - self.system.v1_deployment_config.scale.patch(name=name, namespace=namespace, - body=scale_val) + @property + def _identifying_attrs(self): + return {'name': self._name} - def check_scale_value(): - got_scale = \ - self.system.v1_deployment_config.scale.get(name=name, namespace=namespace) - return int(got_scale.spec.replicas or 0) + @property + def name(self): + return self._name - elif self.is_stateful_set(name=name, namespace=namespace): - # replace this code with stateful_set_scale when kubernetes shipped with openshift - # client gets upgraded - st_spec = self.systmem.kubeclient.V1beta1StatefulSetSpec - st = self.system.kubeclient.V1beta1StatefulSet(spec=st_spec(replicas=replicas)) - self.system.v1_stateful_sets.patch_(name=name, namespace=namespace,body=st) + @property + def namespace(self): + return self._namespace - def check_scale_value(): - got_scale = self.system.v1_stateful_sets.get(name=name, namespace=namespace) - return int(got_scale.spec.replicas or 0) - else: - raise ValueError("This name %s is not found among " - "deployment configs or stateful sets", name) - self.logger.info("scaling entity %s to %s replicas", name, replicas) - wait_for(check_scale_value, num_sec=wait, fail_condition=lambda val: val != replicas) + @property + def uuid(self): + try: + return str(self.raw.metadata.uid) + except AttributeError: + return self.name def start(self): - self.logger.info("starting vm/project %s", self.name) - if self.does_project_exist(self.name): - for pod in self.get_required_pods(self.name): - self.scale_entity(name=pod, namespace=self.name, replicas=1) - else: - raise ValueError("Project with name {n} doesn't exist".format(n=self.name)) + raise NotImplementedError def stop(self): - """Stops a vm. - - Args: - vm_name: name of the vm to be stopped - Returns: whether vm action has been initiated properly - """ - self.logger.info("stopping vm/project %s", self.name) - if self.does_project_exist(self.name): - for pod in self.get_required_pods(self.name): - self.scale_entity(name=pod, namespace=self.name, replicas=0) - else: - raise ValueError("Project with name {n} doesn't exist".format(n=self.name)) + raise NotImplementedError def restart(self): raise NotImplementedError def delete(self): - self.v1_pod.delete(name=self.name, namespace=self.namespace) + self.system.v1_template.delete(name=self.name, namespace=self.namespace) def refresh(self): - self.raw = self.system.get_pod(name=self.name, namespace=self.namespace).raw + self.raw = self.system.get_template(name=self.name, namespace=self.namespace).raw return self.raw def cleanup(self): return self.delete() - @property - def creation_time(self): - """Detect the vm_creation_time either via uptime if non-zero, or by last boot time + def wait_template_exist(self, namespace, name, wait=60): + """Checks whether Template exists within some time. - The API provides no sensible way to actually get this value. The only way in which - vcenter API MAY have this is by filtering through events + Args: + name: entity name + namespace: openshift namespace where entity should exist + wait: entity should appear for this time then - True, otherwise False + Return: True/False + """ + return wait_for(self._does_exist, num_sec=wait, + func_kwargs={'func': self.v1_template.get, + 'name': name, + 'namespace': namespace})[0] - Return tz-naive datetime object + @staticmethod + def _update_template_parameters(template, **params): + """Updates openshift template parameters. + Since Openshift REST API doesn't provide any api to change default parameter values as + it is implemented in `oc process`. This method implements such a parameter replacement. + + Args: + template: Openshift's template object + params: bunch of key=value parameters + Returns: updated template """ - raise NotImplementedError + template = copy.deepcopy(template) + if template.parameters: + new_parameters = template.parameters + for new_param, new_value in params.items(): + for index, old_param in enumerate(new_parameters): + if old_param['name'] == new_param: + old_param = new_parameters.pop(index) + if 'generate' in old_param: + old_param['generate'] = None + old_param['_from'] = None + old_param['value'] = new_value + new_parameters.append(old_param) + template.parameters = new_parameters + return template + + def process_template(self, name, namespace, parameters=None): + """Implements template processing mechanism similar to `oc process`. + + Args: + name: (str) template name + namespace: (str) openshift namespace + parameters: parameters and values to replace default ones + Return: list of objects stored in template + """ + # workaround for bug https://github.com/openshift/openshift-restclient-python/issues/60 + raw_response = self.system.v1_template.get(name=name, namespace=namespace, + _preload_content=False) + raw_data = json.loads(raw_response.data) + + return self.process_raw_template(body=raw_data, namespace=namespace, parameters=parameters) + + def process_raw_template(self, body, namespace, parameters=None): + """Implements template processing mechanism similar to `oc process`. + It does two functions + 1. parametrized templates have to be processed in order to replace parameters with values. + 2. templates consist of list of objects. Those objects have to be extracted + before creation accordingly. + + Args: + body: (dict) template body + namespace: (str) openshift namespace + parameters: parameters and values to replace default ones + Return: list of objects stored in template + """ + updated_data = self.system.rename_structure(body) + read_template = self.system.ociclient.V1Template(**updated_data) + if parameters: + updated_template = self._update_template_parameters(template=read_template, + **parameters) + else: + updated_template = read_template + raw_response = self.system.v1_template.create(namespace=namespace, body=updated_template, + preload_content=False) + raw_data = json.loads(raw_response.data) + updated_data = self.system.rename_structure(raw_data) + processed_template = self.system.ociclient.V1Template(**updated_data) + return processed_template.objects + + def create_template_entities(self, namespace, entities): + """Creates entities from openshift template. + + Since there is no methods in openshift/kubernetes rest api for app deployment from template, + it is necessary to create template entities one by one using respective entity api. + + Args: + namespace: (str) openshift namespace + entities: (list) openshift entities + + Returns: None + """ + self.logger.debug("passed template entities:\n %r", entities) + kinds = set([e['kind'] for e in entities]) + entity_names = {e: inflection.underscore(e) for e in kinds} + proc_names = {k: 'create_{e}'.format(e=p) for k, p in entity_names.items()} + + for entity in entities: + if entity['kind'] in kinds: + procedure = getattr(self, proc_names[entity['kind']], None) + obtained_entity = procedure(namespace=namespace, **entity) + self.logger.debug(obtained_entity) + else: + self.logger.error("some entity %s isn't present in entity creation list", entity) + + def deploy(self, tags=None, password='smartvm', **kwargs): + """Deploy a VM from a template + + Args: + tags: (dict) dict with tags if some tag isn't passed it is set to 'latest' + vm_name: (str) is used as project name if passed. otherwise, name is generated (sprout) + progress_callback: (func) function to return current progress (sprout) + template_params: (dict) parameters to override during template deployment + running_pods: (list) checks that passed pods are running instead of default set + since input tags are image stream tags whereas template expects its own tags. + So, input tags should match stream2template_tags_mapping. + password: this password will be set as default everywhere + Returns: dict with parameters necessary for appliance setup or None if deployment failed + """ + self.logger.info("starting template %s deployment", self.name) + self.wait_template_exist(namespace=self.system.default_namespace, name=self.name) + + if not self.base_url: + raise ValueError("base url isn't provided") + + version = Version(TemplateName.parse_template(self.name).version) + + if version >= '5.9': + tags_mapping = self.system.stream2template_tags_mapping59 + else: + tags_mapping = self.system.stream2template_tags_mapping58 + + prepared_tags = {tag['tag']: 'latest' for tag in tags_mapping.values()} + if tags: + not_found_tags = [t for t in tags.keys() if t not in tags_mapping.keys()] + if not_found_tags: + raise ValueError("Some passed tags {t} don't exist".format(t=not_found_tags)) + for tag, value in tags.items(): + prepared_tags[tags_mapping[tag]['url']] = value['url'] + prepared_tags[tags_mapping[tag]['tag']] = value['tag'] + + # create project + # assuming this is cfme installation and generating project name + proj_id = "".join(choice(string.digits + string.lowercase) for _ in range(6)) + + # for sprout + if 'vm_name' in kwargs: + proj_name = kwargs['vm_name'] + else: + proj_name = "{t}-project-{proj_id}".format(t=self.name, proj_id=proj_id) + + template_params = kwargs.pop('template_params', {}) + running_pods = kwargs.pop('running_pods', ()) + proj_url = "{proj}.{base_url}".format(proj=proj_id, base_url=self.base_url) + self.logger.info("unique id %s, project name %s", proj_id, proj_name) + + default_progress_callback = partial(self._progress_log_callback, self.logger, self.name, + proj_name) + progress_callback = kwargs.get('progress_callback', default_progress_callback) + + project = self.system.create_project(name=proj_name, description=self.name) + progress_callback("Created Project `{}`".format(proj_name)) + + # grant rights according to scc + self.logger.info("granting rights to project %s sa", proj_name) + if version >= '5.9': + scc_user_mapping = self.system.scc_user_mapping59 + else: + scc_user_mapping = self.system.scc_user_mapping58 + + self.logger.info("granting required rights to project's service accounts") + for mapping in scc_user_mapping: + self.system.append_sa_to_scc(scc_name=mapping['scc'], namespace=proj_name, + sa=mapping['user']) + progress_callback("Added service accounts to appropriate scc") + + # appliances prior 5.9 don't need such rights + # and those rights are embedded into templates since 5.9.2.2 + if version >= '5.9' and version < '5.9.2.2': + # grant roles to orchestrator + self.logger.info("assigning additional roles to cfme-orchestrator") + orchestrator_sa = self.system.kubeclient.V1ObjectReference(name='cfme-orchestrator', + kind='ServiceAccount', + namespace=proj_name) + + view_role = self.system.kubeclient.V1ObjectReference(name='view') + view_role_binding_name = self.system.kubeclient.V1ObjectMeta(name='view') + view_role_binding = self.system.ociclient.V1RoleBinding(role_ref=view_role, + subjects=[orchestrator_sa], + metadata=view_role_binding_name) + self.logger.debug("creating 'view' role binding " + "for cfme-orchestrator sa in project %s", proj_name) + self.system.v1_role_binding.create(namespace=proj_name, body=view_role_binding) + + edit_role = self.system.kubeclient.V1ObjectReference(name='edit') + edit_role_binding_name = self.system.kubeclient.V1ObjectMeta(name='edit') + edit_role_binding = self.system.ociclient.V1RoleBinding(role_ref=edit_role, + subjects=[orchestrator_sa], + metadata=edit_role_binding_name) + self.logger.debug("creating 'edit' role binding " + "for cfme-orchestrator sa in project %s", proj_name) + self.system.v1_role_binding.create(namespace=proj_name, body=edit_role_binding) + + self.logger.info("project sa created via api have no some mandatory roles. adding them") + self.system._restore_missing_project_role_bindings(namespace=proj_name) + progress_callback("Added all necessary role bindings to project `{}`".format(proj_name)) + + # creating common service with external ip + ext_ip = self.system.expose_db_ip(proj_name) + progress_callback("Common Service has been added") + + # adding config map with image stream urls and tags + image_repo_cm = image_repo_cm_template.format(tags=json.dumps(tags)) + self.system.create_config_map(namespace=proj_name, **yaml.safe_load(image_repo_cm)) + + # creating pods and etc + processing_params = {'DATABASE_PASSWORD': password, + 'APPLICATION_DOMAIN': proj_url} + processing_params.update(prepared_tags) + + # updating template parameters + processing_params.update(template_params) + self.logger.info(("processing template and passed params in order to " + "prepare list of required project entities")) + template_entities = self.system.process_template(name=self.name, + namespace=self.default_namespace, + parameters=processing_params) + self.logger.debug("template entities:\n %r", template_entities) + progress_callback("Template has been processed") + self.create_template_entities(namespace=proj_name, entities=template_entities) + progress_callback("All template entities have been created") + + self.logger.info("verifying that all created entities are up and running") + progress_callback("Waiting for all pods to be ready and running") + # TODO Get PROJECT + try: + wait_for(project.is_running, num_sec=600, + func_kwargs={'vm_name': proj_name, 'running_pods': running_pods}) + self.logger.info("all pods look up and running") + progress_callback("Everything has been deployed w/o errors") + return {'url': proj_url, + 'external_ip': ext_ip, + 'project': proj_name, + } + except TimedOutError: + self.logger.error("deployment failed. Please check failed pods details") + # todo: return and print all failed pod details + raise @reconnect(unauthenticated_error_handler) class Openshift(System, VmMixin, ProjectMixin): @@ -463,7 +732,7 @@ def v1_stateful_sets(self): @cached_property def v1_template(self): return self.ocp_client.resources.get(api_version='template.openshift.io/v1', - kind='Template') + kind='Template', name='templates') @cached_property def v1_image_stream(self): @@ -538,7 +807,7 @@ def _does_exist(self, func, **kwargs): self.logger.info("ApiException occurred %s, it looks like obj doesn't exist", e) return - def _restore_missing_project_role_bindings(self, namespace): + def restore_missing_project_role_bindings(self, namespace): """Fixes one of issues in Openshift REST API create project doesn't add necessary roles to default sa, probably bug, this is workaround @@ -737,12 +1006,33 @@ def list_image_stream_images(self, namespace=None): return self.v1_image_stream_image.get(namespace=namespace).items + def get_template(self, name, namespace): + template = self.v1_template.get(name=name, namespace=namespace) + + return OpenShiftTemplate(system=self, name=template.metadata.name, raw=template) + def list_templates(self, namespace=None): - return self.v1_template.get(namespace=namespace) + return self.v1_template.get(namespace=namespace).items def list_deployment_configs(self, namespace=None): return self.v1_deployment_config.get(namespace=namespace).items + def list_deployment_config_names(self , namespace=None): + + deployment_configs = self.v1_deployment_config.get(namespace=namespace) + + return [dc.metadata.name for dc in deployment_configs.items] + + def is_deployment_config(self, namespace, name): + """Checks whether passed name belongs to deployment configs in appropriate namespace + + Args: + namespace: project(namespace) name + name: entity name + Return: True/False + """ + return name in self.list_deployment_config_names(namespace=namespace) + def wait_service_exist(self, namespace, name, wait=60): """Checks whether Service exists within some time. @@ -813,18 +1103,22 @@ def list_image_registries(self, namespace=None): def find_vms(self, *args, **kwargs): raise NotImplementedError - def list_deployment_config_names(self , namespace=None): - - deployment_configs = self.v1_deployment_config.get(namespace=namespace) - - return [dc.metadata.name for dc in deployment_configs.items] - def list_stateful_set_names(self, namespace=None): stateful_sets = self.v1_stateful_sets.get(namespace=namespace) return [ss.metadata.name for ss in stateful_sets.items] + def is_stateful_set(self, namespace, name): + """Checks whether passed name belongs to Stateful Sets in appropriate namespace + + Args: + namespace: project(namespace) name + name: entity name + Return: True/False + """ + return name in self.list_stateful_set_names(namespace=namespace) + def cluster_info(self): """Returns information about the cluster - number of CPUs and memory in GB""" aggregate_cpu, aggregate_mem = 0, 0 @@ -897,7 +1191,7 @@ def replace_config_map(self, namespace, **kwargs): def list_config_maps(self, namespace=None): return self.v1_config_map.get(namespace=namespace).items - def get_config_maps(self, name, namespacee): + def get_config_maps(self, name, namespace): return self.v1_config_map.get(name=name, namespace=namespace) def wait_stateful_set_exist(self, namespace, name, wait=900): @@ -1333,20 +1627,11 @@ def is_stateful_set(self, namespace, name): """Checks whether passed name belongs to Stateful Sets in appropriate namespace Args: - namespace: project(namespace) name - name: entity name - Return: True/False - """ - return name in self.list_stateful_set_names(namespace=namespace) - - def does_project_exist(self, name): - """Checks whether Project exists. - - Args: - name: openshift namespace name + namespace: project(namespace) name + name: entity name Return: True/False """ - return self._does_exist(func=self.v1_project.get, name=name) + return name in self.list_stateful_set_names(namespace=namespace) def is_vm_stopped(self, vm_name): """Check whether vm isn't running. @@ -1394,17 +1679,6 @@ def is_vm_suspended(self, vm_name): """ return False - def in_steady_state(self, vm_name): - """Return whether the specified virtual machine is in steady state - - Args: - vm_name: VM name - Returns: True/False - """ - return (self.is_vm_running(vm_name) - or self.is_vm_stopped(vm_name) - or self.is_vm_suspended(vm_name)) - @property def can_rename(self): return hasattr(self, "rename_vm") @@ -1426,7 +1700,6 @@ def get_appliance_version(self, vm_name): except ValueError: return None - def get_meta_value(self, instance, key): raise NotImplementedError( 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) @@ -1611,183 +1884,6 @@ def read_pod_log(self, namespace, name): """ return self.v1_pod.log.get(name=name, namespace=namespace) - def wait_template_exist(self, namespace, name, wait=60): - """Checks whether Template exists within some time. - - Args: - name: entity name - namespace: openshift namespace where entity should exist - wait: entity should appear for this time then - True, otherwise False - Return: True/False - """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.v1_template.get, - 'name': name, - 'namespace': namespace})[0] - - def deploy_template(self, template, tags=None, password='smartvm', **kwargs): - """Deploy a VM from a template - - Args: - template: (str) The name of the template to deploy - tags: (dict) dict with tags if some tag isn't passed it is set to 'latest' - vm_name: (str) is used as project name if passed. otherwise, name is generated (sprout) - progress_callback: (func) function to return current progress (sprout) - template_params: (dict) parameters to override during template deployment - running_pods: (list) checks that passed pods are running instead of default set - since input tags are image stream tags whereas template expects its own tags. - So, input tags should match stream2template_tags_mapping. - password: this password will be set as default everywhere - Returns: dict with parameters necessary for appliance setup or None if deployment failed - """ - self.logger.info("starting template %s deployment", template) - self.wait_template_exist(namespace=self.default_namespace, name=template) - - if not self.base_url: - raise ValueError("base url isn't provided") - - version = Version(TemplateName.parse_template(template).version) - - if version >= '5.9': - tags_mapping = self.stream2template_tags_mapping59 - else: - tags_mapping = self.stream2template_tags_mapping58 - - prepared_tags = {tag['tag']: 'latest' for tag in tags_mapping.values()} - if tags: - not_found_tags = [t for t in tags.keys() if t not in tags_mapping.keys()] - if not_found_tags: - raise ValueError("Some passed tags {t} don't exist".format(t=not_found_tags)) - for tag, value in tags.items(): - prepared_tags[tags_mapping[tag]['url']] = value['url'] - prepared_tags[tags_mapping[tag]['tag']] = value['tag'] - - # create project - # assuming this is cfme installation and generating project name - proj_id = "".join(choice(string.digits + string.lowercase) for _ in range(6)) - - # for sprout - if 'vm_name' in kwargs: - proj_name = kwargs['vm_name'] - else: - proj_name = "{t}-project-{proj_id}".format(t=template, proj_id=proj_id) - - template_params = kwargs.pop('template_params', {}) - running_pods = kwargs.pop('running_pods', ()) - proj_url = "{proj}.{base_url}".format(proj=proj_id, base_url=self.base_url) - self.logger.info("unique id %s, project name %s", proj_id, proj_name) - - default_progress_callback = partial(self._progress_log_callback, self.logger, template, - proj_name) - progress_callback = kwargs.get('progress_callback', default_progress_callback) - - self.create_project(name=proj_name, description=template) - progress_callback("Created Project `{}`".format(proj_name)) - - # grant rights according to scc - self.logger.info("granting rights to project %s sa", proj_name) - scc_user_mapping = self.scc_user_mapping59 if version >= '5.9' else self.scc_user_mapping58 - - self.logger.info("granting required rights to project's service accounts") - for mapping in scc_user_mapping: - self.append_sa_to_scc(scc_name=mapping['scc'], namespace=proj_name, sa=mapping['user']) - progress_callback("Added service accounts to appropriate scc") - - # appliances prior 5.9 don't need such rights - # and those rights are embedded into templates since 5.9.2.2 - if version >= '5.9' and version < '5.9.2.2': - # grant roles to orchestrator - self.logger.info("assigning additional roles to cfme-orchestrator") - orchestrator_sa = self.kubeclient.V1ObjectReference(name='cfme-orchestrator', - kind='ServiceAccount', - namespace=proj_name) - - view_role = self.kubeclient.V1ObjectReference(name='view') - view_role_binding_name = self.kubeclient.V1ObjectMeta(name='view') - view_role_binding = self.ociclient.V1RoleBinding(role_ref=view_role, - subjects=[orchestrator_sa], - metadata=view_role_binding_name) - self.logger.debug("creating 'view' role binding " - "for cfme-orchestrator sa in project %s", proj_name) - self.v1_role_binding.create(namespace=proj_name, body=view_role_binding) - - edit_role = self.kubeclient.V1ObjectReference(name='edit') - edit_role_binding_name = self.kubeclient.V1ObjectMeta(name='edit') - edit_role_binding = self.ociclient.V1RoleBinding(role_ref=edit_role, - subjects=[orchestrator_sa], - metadata=edit_role_binding_name) - self.logger.debug("creating 'edit' role binding " - "for cfme-orchestrator sa in project %s", proj_name) - self.v1_role_binding.create(namespace=proj_name, body=edit_role_binding) - - self.logger.info("project sa created via api have no some mandatory roles. adding them") - self._restore_missing_project_role_bindings(namespace=proj_name) - progress_callback("Added all necessary role bindings to project `{}`".format(proj_name)) - - # creating common service with external ip - ext_ip = self.expose_db_ip(proj_name) - progress_callback("Common Service has been added") - - # adding config map with image stream urls and tags - image_repo_cm = image_repo_cm_template.format(tags=json.dumps(tags)) - self.create_config_map(namespace=proj_name, **yaml.safe_load(image_repo_cm)) - - # creating pods and etc - processing_params = {'DATABASE_PASSWORD': password, - 'APPLICATION_DOMAIN': proj_url} - processing_params.update(prepared_tags) - - # updating template parameters - processing_params.update(template_params) - self.logger.info(("processing template and passed params in order to " - "prepare list of required project entities")) - template_entities = self.process_template(name=template, namespace=self.default_namespace, - parameters=processing_params) - self.logger.debug("template entities:\n %r", template_entities) - progress_callback("Template has been processed") - self.create_template_entities(namespace=proj_name, entities=template_entities) - progress_callback("All template entities have been created") - - self.logger.info("verifying that all created entities are up and running") - progress_callback("Waiting for all pods to be ready and running") - try: - wait_for(self.is_vm_running, num_sec=600, - func_kwargs={'vm_name': proj_name, 'running_pods': running_pods}) - self.logger.info("all pods look up and running") - progress_callback("Everything has been deployed w/o errors") - return {'url': proj_url, - 'external_ip': ext_ip, - 'project': proj_name, - } - except TimedOutError: - self.logger.error("deployment failed. Please check failed pods details") - # todo: return and print all failed pod details - raise - - def create_template_entities(self, namespace, entities): - """Creates entities from openshift template. - - Since there is no methods in openshift/kubernetes rest api for app deployment from template, - it is necessary to create template entities one by one using respective entity api. - - Args: - namespace: (str) openshift namespace - entities: (list) openshift entities - - Returns: None - """ - self.logger.debug("passed template entities:\n %r", entities) - kinds = set([e['kind'] for e in entities]) - entity_names = {e: inflection.underscore(e) for e in kinds} - proc_names = {k: 'create_{e}'.format(e=p) for k, p in entity_names.items()} - - for entity in entities: - if entity['kind'] in kinds: - procedure = getattr(self, proc_names[entity['kind']], None) - obtained_entity = procedure(namespace=namespace, **entity) - self.logger.debug(obtained_entity) - else: - self.logger.error("some entity %s isn't present in entity creation list", entity) def start_vm(self, vm_name): """Starts a vm. @@ -1837,75 +1933,6 @@ def does_vm_exist(self, vm_name): """ return self.does_project_exist(vm_name) - @staticmethod - def _update_template_parameters(template, **params): - """Updates openshift template parameters. - Since Openshift REST API doesn't provide any api to change default parameter values as - it is implemented in `oc process`. This method implements such a parameter replacement. - - Args: - template: Openshift's template object - params: bunch of key=value parameters - Returns: updated template - """ - template = copy.deepcopy(template) - if template.parameters: - new_parameters = template.parameters - for new_param, new_value in params.items(): - for index, old_param in enumerate(new_parameters): - if old_param['name'] == new_param: - old_param = new_parameters.pop(index) - if 'generate' in old_param: - old_param['generate'] = None - old_param['_from'] = None - - old_param['value'] = new_value - new_parameters.append(old_param) - template.parameters = new_parameters - return template - - def process_template(self, name, namespace, parameters=None): - """Implements template processing mechanism similar to `oc process`. - - Args: - name: (str) template name - namespace: (str) openshift namespace - parameters: parameters and values to replace default ones - Return: list of objects stored in template - """ - # workaround for bug https://github.com/openshift/openshift-restclient-python/issues/60 - raw_response = self.v1_template.get(name=name, namespace=namespace, _preload_content=False) - raw_data = json.loads(raw_response.data) - - return self.process_raw_template(body=raw_data, namespace=namespace, parameters=parameters) - - def process_raw_template(self, body, namespace, parameters=None): - """Implements template processing mechanism similar to `oc process`. - It does two functions - 1. parametrized templates have to be processed in order to replace parameters with values. - 2. templates consist of list of objects. Those objects have to be extracted - before creation accordingly. - - Args: - body: (dict) template body - namespace: (str) openshift namespace - parameters: parameters and values to replace default ones - Return: list of objects stored in template - """ - updated_data = self.rename_structure(body) - read_template = self.ociclient.V1Template(**updated_data) - if parameters: - updated_template = self._update_template_parameters(template=read_template, - **parameters) - else: - updated_template = read_template - raw_response = self.v1_template.create(namespace=namespace, body=updated_template, - _preload_content=False) - raw_data = json.loads(raw_response.data) - updated_data = self.rename_structure(raw_data) - processed_template = self.ociclient.V1Template(**updated_data) - return processed_template.objects - def rename_structure(self, struct): """Fixes inconsistency in input/output data of openshift python client methods @@ -1946,19 +1973,60 @@ def delete_template(self, template_name, namespace='openshift'): options = self.kclient.V1DeleteOptions() return self.v1_template.delete(name=template_name, namespace=namespace, body=options) - def run_command(self, namespace, name, cmd, **kwargs): - """Connects to pod and tries to run + + def scale_entity(self, namespace, name, replicas, wait=60): + """Allows to scale up/down entities. + One of cases when this is necessary is emulation of stopping/starting appliance Args: - namespace: (str) project name - name: (str) pod name - cmd: (list) command to run - Return: command output + namespace: openshift namespace + name: entity name. it can be either stateless Pod from DeploymentConfig or StatefulSet + replicas: number of replicas 0..N + wait: time to wait for scale up/down + Return: None """ - # there are some limitations and this code isn't robust enough due to - # https://github.com/kubernetes-client/python/issues/58 - return self.v1_pod.exec.post(namespace=namespace, name=name, - command=cmd, - stdout=True, - stderr=True, - **kwargs) \ No newline at end of file + # only dc and statefulsets can be scaled + # st_api = self.system.kubeclient.AppsV1beta1Api(api_client=self.kapi_client) + + scale_val = self.system.kubeclient.V1Scale(spec=self.kclient.V1ScaleSpec(replicas=replicas)) + if self.is_deployment_config(name=name, namespace=namespace): + self.system.v1_deployment_config.scale.patch(name=name, namespace=namespace, + body=scale_val) + + def check_scale_value(): + got_scale = \ + self.system.v1_deployment_config.scale.get(name=name, namespace=namespace) + return int(got_scale.spec.replicas or 0) + + elif self.is_stateful_set(name=name, namespace=namespace): + # replace this code with stateful_set_scale when kubernetes shipped with openshift + # client gets upgraded + st_spec = self.systmem.kubeclient.V1beta1StatefulSetSpec + st = self.system.kubeclient.V1beta1StatefulSet(spec=st_spec(replicas=replicas)) + self.system.v1_stateful_sets.patch_(name=name, namespace=namespace, body=st) + + def check_scale_value(): + got_scale = self.system.v1_stateful_sets.get(name=name, namespace=namespace) + return int(got_scale.spec.replicas or 0) + else: + raise ValueError("This name %s is not found among " + "deployment configs or stateful sets", name) + self.logger.info("scaling entity %s to %s replicas", name, replicas) + wait_for(check_scale_value, num_sec=wait, fail_condition=lambda val: val != replicas) + + # def run_command(self, namespace, name, cmd, **kwargs): + # """Connects to pod and tries to run + # + # Args: + # namespace: (str) project name + # name: (str) pod name + # cmd: (list) command to run + # Return: command output + # """ + # # there are some limitations and this code isn't robust enough due to + # # https://github.com/kubernetes-client/python/issues/58 + # return self.v1_pod.exec.post(namespace=namespace, name=name, + # command=cmd, + # stdout=True, + # stderr=True, + # **kwargs) \ No newline at end of file From 183c2e12867d96985b9c255ec10a0e0bfcc622cb Mon Sep 17 00:00:00 2001 From: Justin Watts Date: Tue, 16 Jul 2019 13:26:31 -0400 Subject: [PATCH 7/9] Lint corrections --- wrapanapi/__init__.py | 4 +- wrapanapi/systems/__init__.py | 4 +- wrapanapi/systems/container/rhopenshift.py | 99 +---------------- wrapanapi/systems/openshift.py | 117 +++++++-------------- 4 files changed, 43 insertions(+), 181 deletions(-) diff --git a/wrapanapi/__init__.py b/wrapanapi/__init__.py index 993c24e3..2632e9c5 100644 --- a/wrapanapi/__init__.py +++ b/wrapanapi/__init__.py @@ -14,7 +14,7 @@ from .systems.scvmm import SCVMMSystem from .systems.vcloud import VmwareCloudSystem from .systems.virtualcenter import VMWareSystem -from .systems.container.rhopenshift import Openshift +from .systems.openshift import OpenshiftSystem from .entities.vm import VmState @@ -22,5 +22,5 @@ 'EC2System', 'GoogleCloudSystem', 'HawkularSystem', 'LenovoSystem', 'AzureSystem', 'NuageSystem', 'OpenstackSystem', 'OpenstackInfraSystem', 'RedfishSystem', 'RHEVMSystem', 'SCVMMSystem', - 'VmwareCloudSystem', 'VMWareSystem', 'Openshift', 'VmState' + 'VmwareCloudSystem', 'VMWareSystem', 'OpenshiftSystem', 'VmState' ] diff --git a/wrapanapi/systems/__init__.py b/wrapanapi/systems/__init__.py index 6ba6bed3..301cef46 100644 --- a/wrapanapi/systems/__init__.py +++ b/wrapanapi/systems/__init__.py @@ -13,10 +13,10 @@ from .scvmm import SCVMMSystem from .vcloud import VmwareCloudSystem from .virtualcenter import VMWareSystem -from .openshift import Openshift +from .openshift import OpenshiftSystem __all__ = [ 'EC2System', 'GoogleCloudSystem', 'HawkularSystem', 'LenovoSystem', - 'AzureSystem', 'NuageSystem', 'OpenShift', 'OpenstackSystem', 'OpenstackInfraSystem', + 'AzureSystem', 'NuageSystem', 'OpenshiftSystem', 'OpenstackSystem', 'OpenstackInfraSystem', 'RedfishSystem', 'RHEVMSystem', 'SCVMMSystem', 'VmwareCloudSystem', 'VMWareSystem' ] diff --git a/wrapanapi/systems/container/rhopenshift.py b/wrapanapi/systems/container/rhopenshift.py index 1ca3b47a..8400f134 100644 --- a/wrapanapi/systems/container/rhopenshift.py +++ b/wrapanapi/systems/container/rhopenshift.py @@ -179,13 +179,11 @@ def _connect(self): self.security_api = self.ociclient.SecurityOpenshiftIoV1Api(api_client=self.oapi_client) self.batch_api = self.kclient.BatchV1Api(api_client=self.kapi_client) # for job api - # TODO DONE def info(self): url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, port=self.port) return "rhopenshift {}".format(url) - # TODO DONE def list_route(self, namespace=None): """Returns list of routes""" if namespace: @@ -194,7 +192,6 @@ def list_route(self, namespace=None): routes = self.o_api.list_route_for_all_namespaces().items return routes - # TODO DONE def list_image_streams(self, namespace=None): """Returns list of image streams""" if namespace: @@ -203,12 +200,10 @@ def list_image_streams(self, namespace=None): image_streams = self.o_api.list_image_stream_for_all_namespaces().items return image_streams - # TODO DONE def list_project(self): """Returns list of projects""" return self.o_api.list_project().items - # TODO DONE def list_template(self, namespace=None): """Returns list of templates""" if namespace: @@ -219,13 +214,11 @@ def list_template(self, namespace=None): # fixme: get rid of this mapping list_templates = list_template - # TODO DONE def list_image_stream_images(self): """Returns list of images (Docker registry only)""" return [item for item in self.o_api.list_image().items if item.docker_image_reference is not None] - # TODO DONE def list_deployment_config(self, namespace=None): """Returns list of deployment configs""" if namespace: @@ -234,7 +227,6 @@ def list_deployment_config(self, namespace=None): dc = self.o_api.list_deployment_config_for_all_namespaces().items return dc - # TODO DONE def list_service(self, namespace=None): """Returns list of services.""" if namespace: @@ -243,7 +235,6 @@ def list_service(self, namespace=None): svc = self.k_api.list_service_for_all_namespaces().items return svc - # TODO DONE def list_replication_controller(self, namespace=None): """Returns list of replication controllers""" if namespace: @@ -252,13 +243,11 @@ def list_replication_controller(self, namespace=None): rc = self.k_api.list_replication_controller_for_all_namespaces().items return rc - # TODO DONE def list_node(self): """Returns list of nodes""" nodes = self.k_api.list_node().items return nodes - # TODO DONE def cluster_info(self): """Returns information about the cluster - number of CPUs and memory in GB""" aggregate_cpu, aggregate_mem = 0, 0 @@ -269,13 +258,11 @@ def cluster_info(self): return {'cpu': aggregate_cpu, 'memory': aggregate_mem} - # TODO DONE def list_persistent_volume(self): """Returns list of persistent volumes""" pv = self.k_api.list_persistent_volume().items return pv - # TODO DONE def list_pods(self, namespace=None): """Returns list of container groups (pods). If project_name is passed, only the pods under the selected project will be returned""" @@ -285,7 +272,6 @@ def list_pods(self, namespace=None): pods = self.k_api.list_pod_for_all_namespaces().items return pods - # TODO DONE def list_container(self, namespace=None): """Returns list of containers (derived from pods) If project_name is passed, only the containers under the selected project will be returned @@ -293,7 +279,6 @@ def list_container(self, namespace=None): pods = self.list_pods(namespace=namespace) return [pod.spec.containers for pod in pods] - # TODO DONE def list_image_id(self, namespace=None): """Returns list of unique image ids (derived from pods)""" pods = self.list_pods(namespace=namespace) @@ -303,7 +288,6 @@ def list_image_id(self, namespace=None): statuses.append(status) return sorted(set([status.image_id for status in statuses])) - # TODO DONE def list_image_registry(self, namespace=None): """Returns list of image registries (derived from pods)""" pods = self.list_pods(namespace=namespace) @@ -314,7 +298,6 @@ def list_image_registry(self, namespace=None): # returns only the image registry name, without the port number in case of local registry return sorted(set([status.image.split('/')[0].split(':')[0] for status in statuses])) - # TODO DONE def expose_db_ip(self, namespace): """Creates special service in appliance project (namespace) which makes internal appliance db be available outside. @@ -330,7 +313,6 @@ def expose_db_ip(self, namespace): return self.get_ip_address(namespace) - # TODO DONE def deploy_template(self, template, tags=None, password='smartvm', **kwargs): """Deploy a VM from a template @@ -471,7 +453,6 @@ def deploy_template(self, template, tags=None, password='smartvm', **kwargs): # todo: return and print all failed pod details raise - # TODO DONE def create_template_entities(self, namespace, entities): """Creates entities from openshift template. @@ -497,7 +478,6 @@ def create_template_entities(self, namespace, entities): else: self.logger.error("some entity %s isn't present in entity creation list", entity) - # TODO DONE def start_vm(self, vm_name): """Starts a vm. @@ -512,7 +492,6 @@ def start_vm(self, vm_name): else: raise ValueError("Project with name {n} doesn't exist".format(n=vm_name)) - # TODO DONE def stop_vm(self, vm_name): """Stops a vm. @@ -527,7 +506,6 @@ def stop_vm(self, vm_name): else: raise ValueError("Project with name {n} doesn't exist".format(n=vm_name)) - # TODO DONE def delete_vm(self, vm_name): """Deletes a vm. @@ -539,7 +517,6 @@ def delete_vm(self, vm_name): self.delete_project(name=vm_name) return True - # TODO DONE def does_vm_exist(self, vm_name): """Does VM exist? @@ -549,7 +526,6 @@ def does_vm_exist(self, vm_name): """ return self.does_project_exist(vm_name) - # TODO DONE @staticmethod def _update_template_parameters(template, **params): """Updates openshift template parameters. @@ -577,7 +553,6 @@ def _update_template_parameters(template, **params): template.parameters = new_parameters return template - # TODO DONE def process_template(self, name, namespace, parameters=None): """Implements template processing mechanism similar to `oc process`. @@ -594,7 +569,6 @@ def process_template(self, name, namespace, parameters=None): return self.process_raw_template(body=raw_data, namespace=namespace, parameters=parameters) - # TODO DONE def process_raw_template(self, body, namespace, parameters=None): """Implements template processing mechanism similar to `oc process`. It does two functions @@ -623,7 +597,6 @@ def process_raw_template(self, body, namespace, parameters=None): processed_template = self.ociclient.V1Template(**updated_data) return processed_template.objects - # TODO DONE def rename_structure(self, struct): """Fixes inconsistency in input/output data of openshift python client methods @@ -653,7 +626,6 @@ def rename_structure(self, struct): else: return struct - # TODO DONE def create_config_map(self, namespace, **kwargs): """Creates ConfigMap entity using REST API. @@ -669,7 +641,6 @@ def create_config_map(self, namespace, **kwargs): self.wait_config_map_exist(namespace=namespace, name=conf_map_name) return output - # TODO DONE def replace_config_map(self, namespace, **kwargs): """Replace ConfigMap entity using REST API. @@ -686,7 +657,6 @@ def replace_config_map(self, namespace, **kwargs): body=conf_map) return output - # TODO DONE def create_stateful_set(self, namespace, **kwargs): """Creates StatefulSet entity using REST API. @@ -703,8 +673,6 @@ def create_stateful_set(self, namespace, **kwargs): self.wait_stateful_set_exist(namespace=namespace, name=st_name) return output - - # TODO DONE def create_service(self, namespace, **kwargs): """Creates Service entity using REST API. @@ -720,7 +688,6 @@ def create_service(self, namespace, **kwargs): self.wait_service_exist(namespace=namespace, name=service_name) return output - # TODO DONE def create_endpoints(self, namespace, **kwargs): """Creates Endpoints entity using REST API. @@ -736,7 +703,6 @@ def create_endpoints(self, namespace, **kwargs): self.wait_endpoints_exist(namespace=namespace, name=endpoints_name) return output - # TODO DONE def create_route(self, namespace, **kwargs): """Creates Route entity using REST API. @@ -752,7 +718,6 @@ def create_route(self, namespace, **kwargs): self.wait_route_exist(namespace=namespace, name=route_name) return output - # TODO DONE def create_service_account(self, namespace, **kwargs): """Creates Service Account entity using REST API. @@ -768,7 +733,6 @@ def create_service_account(self, namespace, **kwargs): self.wait_service_account_exist(namespace=namespace, name=sa_name) return output - # TODO DONE def create_role_binding(self, namespace, **kwargs): """Creates RoleBinding entity using REST API. @@ -794,7 +758,6 @@ def create_role_binding(self, namespace, **kwargs): self.wait_role_binding_exist(namespace=namespace, name=role_binding_name) return output - # TODO DONE def create_image_stream(self, namespace, **kwargs): """Creates Image Stream entity using REST API. @@ -810,7 +773,6 @@ def create_image_stream(self, namespace, **kwargs): self.wait_image_stream_exist(namespace=namespace, name=is_name) return output - # TODO DONE def create_secret(self, namespace, **kwargs): """Creates Secret entity using REST API. @@ -826,7 +788,6 @@ def create_secret(self, namespace, **kwargs): self.wait_secret_exist(namespace=namespace, name=secret_name) return output - # TODO DONE def create_deployment_config(self, namespace, **kwargs): """Creates Deployment Config entity using REST API. @@ -843,7 +804,6 @@ def create_deployment_config(self, namespace, **kwargs): name=dc_name) return output - # TODO DONE def create_persistent_volume_claim(self, namespace, **kwargs): """Creates Persistent Volume Claim entity using REST API. @@ -861,7 +821,6 @@ def create_persistent_volume_claim(self, namespace, **kwargs): name=pv_claim_name) return output - # TODO DONE def create_project(self, name, description=None): """Creates Project(namespace) using REST API. @@ -879,7 +838,6 @@ def create_project(self, name, description=None): self.wait_project_exist(name=name) return output - # TODO DONE def run_job(self, namespace, body): """Creates job from passed template, runs it and waits for the job to be accomplished @@ -894,7 +852,6 @@ def run_job(self, namespace, body): return self.wait_job_finished(namespace, job_name) - # TODO DONE def wait_job_finished(self, namespace, name, wait='15m'): """Waits for job to accomplish @@ -914,7 +871,6 @@ def job_wait_accomplished(): return False return wait_for(job_wait_accomplished, num_sec=wait)[0] - # TODO DONE def wait_persistent_volume_claim_status(self, namespace, name, status, wait='1m'): """Waits until pvc gets some particular status. For example: Bound. @@ -936,7 +892,6 @@ def pvc_wait_status(): return wait_for(pvc_wait_status, num_sec=wait)[0] - # TODO DONE def wait_project_exist(self, name, wait=60): """Checks whether Project exists within some time. @@ -948,7 +903,6 @@ def wait_project_exist(self, name, wait=60): return wait_for(self._does_exist, num_sec=wait, func_kwargs={'func': self.o_api.read_project, 'name': name})[0] - # TODO DONE def wait_config_map_exist(self, namespace, name, wait=60): """Checks whether Config Map exists within some time. @@ -962,7 +916,7 @@ def wait_config_map_exist(self, namespace, name, wait=60): func_kwargs={'func': self.k_api.read_namespaced_config_map, 'name': name, 'namespace': namespace})[0] - # TODO DONE + def wait_stateful_set_exist(self, namespace, name, wait=900): """Checks whether StatefulSet exists within some time. @@ -979,7 +933,6 @@ def wait_stateful_set_exist(self, namespace, name, wait=900): 'name': name, 'namespace': namespace})[0] - # TODO DONE def wait_service_exist(self, namespace, name, wait=60): """Checks whether Service exists within some time. @@ -994,7 +947,6 @@ def wait_service_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] - # TODO DONE def wait_endpoints_exist(self, namespace, name, wait=60): """Checks whether Endpoints exists within some time. @@ -1009,7 +961,6 @@ def wait_endpoints_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] - # TODO DONE def wait_route_exist(self, namespace, name, wait=60): """Checks whether Route exists within some time. @@ -1024,7 +975,6 @@ def wait_route_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] - # TODO DONE def wait_service_account_exist(self, namespace, name, wait=60): """Checks whether Service Account exists within some time. @@ -1039,7 +989,6 @@ def wait_service_account_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] - # TODO DONE def wait_image_stream_exist(self, namespace, name, wait=60): """Checks whether Image Stream exists within some time. @@ -1054,7 +1003,6 @@ def wait_image_stream_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] - # TODO DONE def wait_role_binding_exist(self, namespace, name, wait=60): """Checks whether RoleBinding exists within some time. @@ -1070,7 +1018,6 @@ def wait_role_binding_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] - # TODO DONE def wait_secret_exist(self, namespace, name, wait=90): """Checks whether Secret exists within some time. @@ -1085,7 +1032,6 @@ def wait_secret_exist(self, namespace, name, wait=90): 'name': name, 'namespace': namespace})[0] - # TODO DONE def wait_persistent_volume_claim_exist(self, namespace, name, wait=60): """Checks whether Persistent Volume Claim exists within some time. @@ -1100,7 +1046,6 @@ def wait_persistent_volume_claim_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] - # TODO DONE def wait_deployment_config_exist(self, namespace, name, wait=600): """Checks whether Deployment Config exists within some time. @@ -1116,7 +1061,6 @@ def wait_deployment_config_exist(self, namespace, name, wait=600): 'name': name, 'namespace': namespace})[0] - # TODO DONE def wait_template_exist(self, namespace, name, wait=60): """Checks whether Template exists within some time. @@ -1131,7 +1075,6 @@ def wait_template_exist(self, namespace, name, wait=60): 'name': name, 'namespace': namespace})[0] - # TODO DONE def _does_exist(self, func, **kwargs): try: func(**kwargs) @@ -1140,7 +1083,6 @@ def _does_exist(self, func, **kwargs): self.logger.info("ApiException occurred %s, it looks like obj doesn't exist", e) return False - # TODO DONE def _restore_missing_project_role_bindings(self, namespace): """Fixes one of issues in Openshift REST API create project doesn't add necessary roles to default sa, probably bug, this is workaround @@ -1195,7 +1137,6 @@ def _restore_missing_project_role_bindings(self, namespace): metadata=role_binding_name) auth_api.create_namespaced_role_binding(namespace=namespace, body=puller_role_binding) - # TODO DONE def delete_project(self, name, wait=300): """Removes project(namespace) and all entities in it. @@ -1214,7 +1155,6 @@ def delete_project(self, name, wait=300): raise TimedOutError('project {n} was not removed within {w} sec'.format(n=name, w=wait)) - # TODO DONE def scale_entity(self, namespace, name, replicas, wait=60): """Allows to scale up/down entities. One of cases when this is necessary is emulation of stopping/starting appliance @@ -1255,12 +1195,10 @@ def check_scale_value(): self.logger.info("scaling entity %s to %s replicas", name, replicas) wait_for(check_scale_value, num_sec=wait, fail_condition=lambda val: val != replicas) - # TODO DONE def get_project_by_name(self, project_name): """Returns only the selected Project object""" return next(proj for proj in self.list_project() if proj.metadata.name == project_name) - # TODO DONE def get_scc(self, name): """Returns Security Context Constraint by name @@ -1270,7 +1208,6 @@ def get_scc(self, name): """ return self.security_api.read_security_context_constraints(name) - # TODO DONE def create_scc(self, body): """Creates Security Context Constraint from passed structure. Main aim is to create scc from read and parsed yaml file. @@ -1286,7 +1223,6 @@ def create_scc(self, body): scc = self.ociclient.V1SecurityContextConstraints(**raw_scc) return self.security_api.create_security_context_constraints(body=scc) - # TODO DONE def append_sa_to_scc(self, scc_name, namespace, sa): """Appends Service Account to respective Security Constraint @@ -1313,7 +1249,6 @@ def append_sa_to_scc(self, scc_name, namespace, sa): return self.security_api.patch_security_context_constraints(name=scc_name, body=update_scc_cmd) - # TODO DONE def remove_sa_from_scc(self, scc_name, namespace, sa): """Removes Service Account from respective Security Constraint @@ -1338,7 +1273,6 @@ def remove_sa_from_scc(self, scc_name, namespace, sa): return self.security_api.patch_security_context_constraints(name=scc_name, body=update_scc_cmd) - # TODO DONE def is_vm_running(self, vm_name, running_pods=()): """Emulates check is vm(appliance) up and running @@ -1360,7 +1294,6 @@ def is_vm_running(self, vm_name, running_pods=()): # todo: check url is available + db is accessable return True - # TODO DONE def list_deployment_config_names(self, namespace): """Extracts and returns list of Deployment Config names @@ -1371,7 +1304,6 @@ def list_deployment_config_names(self, namespace): dcs = self.o_api.list_namespaced_deployment_config(namespace=namespace) return [dc.metadata.name for dc in dcs.items] - # TODO DONE def list_stateful_set_names(self, namespace): """Returns list of Stateful Set names @@ -1383,7 +1315,6 @@ def list_stateful_set_names(self, namespace): sts = st_api.list_namespaced_stateful_set(namespace=namespace) return [st.metadata.name for st in sts.items] - # TODO DONE def is_deployment_config(self, namespace, name): """Checks whether passed name belongs to deployment configs in appropriate namespace @@ -1394,7 +1325,6 @@ def is_deployment_config(self, namespace, name): """ return name in self.list_deployment_config_names(namespace=namespace) - # TODO DONE def is_stateful_set(self, namespace, name): """Checks whether passed name belongs to Stateful Sets in appropriate namespace @@ -1405,7 +1335,6 @@ def is_stateful_set(self, namespace, name): """ return name in self.list_stateful_set_names(namespace=namespace) - # TODO DONE def does_project_exist(self, name): """Checks whether Project exists. @@ -1415,7 +1344,6 @@ def does_project_exist(self, name): """ return self._does_exist(func=self.o_api.read_project, name=name) - # TODO DONE def is_vm_stopped(self, vm_name): """Check whether vm isn't running. There is no such state stopped for vm in openshift therefore @@ -1431,7 +1359,6 @@ def is_vm_stopped(self, vm_name): "running: {}").format([pod.metadata.name for pod in pods])) return not bool(pods) - # TODO DONE def wait_vm_running(self, vm_name, num_sec=900): """Checks whether all project pods are in ready state. @@ -1443,7 +1370,6 @@ def wait_vm_running(self, vm_name, num_sec=900): wait_for(self.is_vm_running, [vm_name], num_sec=num_sec) return True - # TODO DONE def wait_vm_stopped(self, vm_name, num_sec=600): """Checks whether all project pods are stopped. @@ -1455,7 +1381,6 @@ def wait_vm_stopped(self, vm_name, num_sec=600): wait_for(self.is_vm_stopped, [vm_name], num_sec=num_sec) return True - # TODO DONE def current_ip_address(self, vm_name): """Tries to retrieve project's external ip @@ -1470,7 +1395,6 @@ def current_ip_address(self, vm_name): except Exception: return None - # TODO DONE def is_vm_suspended(self, vm_name): """There is no such state in openshift @@ -1480,7 +1404,6 @@ def is_vm_suspended(self, vm_name): """ return False - # TODO DONE def in_steady_state(self, vm_name): """Return whether the specified virtual machine is in steady state @@ -1492,12 +1415,10 @@ def in_steady_state(self, vm_name): or self.is_vm_stopped(vm_name) or self.is_vm_suspended(vm_name)) - # TODO DONE @property def can_rename(self): return hasattr(self, "rename_vm") - # TODO DONE def list_project_names(self): """Obtains project names @@ -1508,7 +1429,6 @@ def list_project_names(self): list_vms = list_vm = list_project_names - # TODO DONE def get_appliance_version(self, vm_name): """Returns appliance version if it is possible @@ -1526,7 +1446,6 @@ def get_appliance_version(self, vm_name): except ValueError: return None - # TODO DONE def delete_template(self, template_name, namespace='openshift'): """Deletes template @@ -1539,17 +1458,14 @@ def delete_template(self, template_name, namespace='openshift'): return self.o_api.delete_namespaced_template(name=template_name, namespace=namespace, body=options) - # TODO DONE def get_meta_value(self, instance, key): raise NotImplementedError( 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) - # TODO DONE def set_meta_value(self, instance, key): raise NotImplementedError( 'Provider {} does not implement get_meta_value'.format(type(self).__name__)) - # TODO DONE def vm_status(self, vm_name): """Returns current vm/appliance state @@ -1561,7 +1477,6 @@ def vm_status(self, vm_name): raise ValueError("Vm {} doesn't exist".format(vm_name)) return 'up' if self.is_vm_running(vm_name) else 'down' - # TODO DONE def vm_creation_time(self, vm_name): """Returns time when vm/appliance was created @@ -1575,13 +1490,11 @@ def vm_creation_time(self, vm_name): project = next(proj for proj in projects if proj.metadata.name == vm_name) return project.metadata.creation_timestamp - # TODO DONE @staticmethod def _progress_log_callback(logger, source, destination, progress): logger.info("Provisioning progress {}->{}: {}".format( source, destination, str(progress))) - # TODO DONE def vm_hardware_configuration(self, vm_name): """Collects project's cpu and ram usage @@ -1611,7 +1524,6 @@ def vm_hardware_configuration(self, vm_name): hw_config['ram'] += ram return hw_config - # TODO DONE def usage_and_quota(self): installed_ram = 0 installed_cpu = 0 @@ -1629,7 +1541,6 @@ def usage_and_quota(self): 'cpu_limit': None, } - # TODO DONE def get_required_pods(self, vm_name): """Provides list of pods which should be present in appliance @@ -1643,7 +1554,6 @@ def get_required_pods(self, vm_name): else: return self.required_project_pods - # TODO DONE def get_ip_address(self, vm_name, timeout=600): """ Returns the IP address for the selected appliance. @@ -1662,11 +1572,9 @@ def get_ip_address(self, vm_name, timeout=600): ip_address = None return ip_address - # TODO DONE def disconnect(self): pass - # TODO DONE def get_appliance_tags(self, name): """Returns appliance tags stored in appropriate config map if it exists. @@ -1681,7 +1589,6 @@ def get_appliance_tags(self, name): except ApiException: return {} - # TODO DONE def get_appliance_url(self, name): """Returns appliance url assigned by Openshift @@ -1695,7 +1602,6 @@ def get_appliance_url(self, name): except (ApiException, IndexError): return None - # TODO DONE def get_appliance_uuid(self, name): """Returns appliance uuid assigned by Openshift @@ -1705,7 +1611,6 @@ def get_appliance_uuid(self, name): """ return self.get_project_by_name(name).metadata.uid - # TODO DONE def is_appliance(self, name): """Checks whether passed vm/project is appliance @@ -1715,7 +1620,6 @@ def is_appliance(self, name): """ return bool(self.get_appliance_tags(name)) - # TODO DONE def find_job_pods(self, namespace, name): """Finds and returns all remaining job pods @@ -1730,7 +1634,6 @@ def find_job_pods(self, namespace, name): pods.append(pod) return pods - # TODO DONE def read_pod_log(self, namespace, name): """Reads and returns pod log diff --git a/wrapanapi/systems/openshift.py b/wrapanapi/systems/openshift.py index b8698dc4..de355385 100644 --- a/wrapanapi/systems/openshift.py +++ b/wrapanapi/systems/openshift.py @@ -1,6 +1,5 @@ from __future__ import absolute_import -import re import copy import json import string @@ -13,14 +12,13 @@ import six from cached_property import cached_property from kubernetes import client as kubeclient -from kubernetes import config as kubeclientconfig from openshift.dynamic import DynamicClient from kubernetes.client.rest import ApiException from miq_version import TemplateName, Version from openshift import client as ociclient from wait_for import TimedOutError, wait_for -from wrapanapi.entities import (Template, TemplateMixin, Vm, VmMixin, VmState, ProjectMixin, +from wrapanapi.entities import (Template, Vm, VmMixin, VmState, ProjectMixin, Project) from wrapanapi.systems.base import System @@ -115,7 +113,7 @@ def _get_state(self): pods = self.system.list_pods(namespace=self.name) states = [] for pod in pods: - states.append(pod.state) + states.append(pod.state) if len(set(states)) == 1: return states[0] @@ -235,10 +233,14 @@ def namespace(self): @property def ip(self): - ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' + # TODO JUWATTS + # ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' self.refresh() try: return self.raw.status.podIP + if not re.match(ipv4_re, ip_address) or ip_address == '127.0.0.1': + ip_address = None + return ip_address except (AttributeError): # AttributeError: vm doesn't have an ip address yet return None @@ -586,8 +588,9 @@ def deploy(self, tags=None, password='smartvm', **kwargs): # todo: return and print all failed pod details raise + @reconnect(unauthenticated_error_handler) -class Openshift(System, VmMixin, ProjectMixin): +class OpenshiftSystem(System, VmMixin, ProjectMixin): _stats_available = { 'num_container': lambda self: len(self.list_containers()), @@ -643,7 +646,7 @@ class Openshift(System, VmMixin, ProjectMixin): def __init__(self, hostname, protocol="https", port=8443, debug=False, verify_ssl=False, **kwargs): - super(Openshift, self).__init__(kwargs) + super(OpenshiftSystem, self).__init__(kwargs) self.hostname = hostname self.protocol = protocol self.port = port @@ -670,18 +673,18 @@ def _k8s_client_connect(self): url = '{proto}://{host}:{port}'.format(proto=self.protocol, host=self.hostname, port=self.port) - aConfiguration = kubeclient.Configuration() + k8_configuration = kubeclient.Configuration() - aConfiguration.host = url + k8_configuration.host = url # Security part. - aConfiguration.verify_ssl = self.verify_ssl - aConfiguration.ssl_ca_cert = self.ssl_ca_cert + k8_configuration.verify_ssl = self.verify_ssl + k8_configuration.ssl_ca_cert = self.ssl_ca_cert - aConfiguration.api_key = {"authorization": "Bearer " + aToken} + k8_configuration.api_key = {"authorization": "Bearer " + aToken} # Create a ApiClient with our config - return kubeclient.ApiClient(aConfiguration) + return kubeclient.ApiClient(k8_configuration) # def _connect(self): # @@ -799,6 +802,15 @@ def can_suspend(self): def can_pause(self): return False + @staticmethod + def _progress_log_callback(logger, source, destination, progress): + logger.info("Provisioning progress {}->{}: {}".format( + source, destination, str(progress))) + + @property + def can_rename(self): + return hasattr(self, "rename_vm") + def _does_exist(self, func, **kwargs): try: func(**kwargs) @@ -923,7 +935,6 @@ def get_pod(self, name, namespace=None): return Pod(system=self, name=pod.metadata.name, namespace=pod.metadata.namespace, raw=pod) - def create_vm(self, name, **kwargs): raise NotImplementedError('This function has not yet been implemented.') @@ -942,7 +953,6 @@ def list_pods(self, namespace=None): Pod(system=self, name=pod.metadata.name, namespace=pod.metadata.namespace, raw=pod) for pod in self.v1_pod.get(namespace=namespace).items] - def wait_project_exist(self, name, wait=60): """Checks whether Project exists within some time. @@ -1613,26 +1623,6 @@ def is_vm_running(self, vm_name, running_pods=()): # todo: check url is available + db is accessable return True - def is_deployment_config(self, namespace, name): - """Checks whether passed name belongs to deployment configs in appropriate namespace - - Args: - namespace: project(namespace) name - name: entity name - Return: True/False - """ - return name in self.list_deployment_config_names(namespace=namespace) - - def is_stateful_set(self, namespace, name): - """Checks whether passed name belongs to Stateful Sets in appropriate namespace - - Args: - namespace: project(namespace) name - name: entity name - Return: True/False - """ - return name in self.list_stateful_set_names(namespace=namespace) - def is_vm_stopped(self, vm_name): """Check whether vm isn't running. There is no such state stopped for vm in openshift therefore @@ -1679,10 +1669,6 @@ def is_vm_suspended(self, vm_name): """ return False - @property - def can_rename(self): - return hasattr(self, "rename_vm") - def get_appliance_version(self, vm_name): """Returns appliance version if it is possible @@ -1731,11 +1717,6 @@ def vm_creation_time(self, vm_name): project = self.v1_project.get(vm_name) return project.raw.metadata.creation_timestamp - @staticmethod - def _progress_log_callback(logger, source, destination, progress): - logger.info("Provisioning progress {}->{}: {}".format( - source, destination, str(progress))) - def vm_hardware_configuration(self, vm_name): """Collects project's cpu and ram usage @@ -1795,24 +1776,6 @@ def get_required_pods(self, vm_name): else: return self.required_project_pods - def get_ip_address(self, vm_name, timeout=600): - """ Returns the IP address for the selected appliance. - - Args: - vm_name: The name of the vm to obtain the IP for. - timeout: The IP address wait timeout. - Returns: A string containing the first found IP that isn't the device. - """ - try: - ip_address, tc = wait_for(lambda: self.current_ip_address(vm_name), - fail_condition=None, - delay=5, - num_sec=timeout, - message="get_ip_address from openshift") - except TimedOutError: - ip_address = None - return ip_address - def disconnect(self): pass @@ -1973,7 +1936,6 @@ def delete_template(self, template_name, namespace='openshift'): options = self.kclient.V1DeleteOptions() return self.v1_template.delete(name=template_name, namespace=namespace, body=options) - def scale_entity(self, namespace, name, replicas, wait=60): """Allows to scale up/down entities. One of cases when this is necessary is emulation of stopping/starting appliance @@ -2014,19 +1976,16 @@ def check_scale_value(): self.logger.info("scaling entity %s to %s replicas", name, replicas) wait_for(check_scale_value, num_sec=wait, fail_condition=lambda val: val != replicas) - # def run_command(self, namespace, name, cmd, **kwargs): - # """Connects to pod and tries to run - # - # Args: - # namespace: (str) project name - # name: (str) pod name - # cmd: (list) command to run - # Return: command output - # """ - # # there are some limitations and this code isn't robust enough due to - # # https://github.com/kubernetes-client/python/issues/58 - # return self.v1_pod.exec.post(namespace=namespace, name=name, - # command=cmd, - # stdout=True, - # stderr=True, - # **kwargs) \ No newline at end of file + def run_command(self, namespace, name, cmd, **kwargs): + """Connects to pod and tries to run + + Args: + namespace: (str) project name + name: (str) pod name + cmd: (list) command to run + Return: command output + """ + # there are some limitations and this code isn't robust enough due to + # https://github.com/kubernetes-client/python/issues/58 + return self.v1_pod.exec.post(namespace=namespace, name=name, command=cmd, stdout=True, + stderr=True, **kwargs) From 080179483ec86a872d69dce88e975403b5d76366 Mon Sep 17 00:00:00 2001 From: Justin Watts Date: Wed, 17 Jul 2019 16:20:59 -0400 Subject: [PATCH 8/9] Fixing lint issues --- requirements.txt | 2 - wrapanapi/entities/__init__.py | 3 +- wrapanapi/entities/project.py | 24 ++++---- wrapanapi/systems/openshift.py | 107 ++++++++++++++++----------------- 4 files changed, 67 insertions(+), 69 deletions(-) diff --git a/requirements.txt b/requirements.txt index 840a10b9..61f7fc66 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,6 @@ inflection miq-version>=0.1.6 oauth2client ovirt-engine-sdk-python~=4.3 -#openshift==0.3.4 openshift==0.8.8 packaging pyvmomi>=6.5.0.2017.5.post1 @@ -32,7 +31,6 @@ six tzlocal vspk==5.3.2 wait_for -#websocket_client websocket_client==0.56.0 # suds jurko supports python3, suds is only used on python2 diff --git a/wrapanapi/entities/__init__.py b/wrapanapi/entities/__init__.py index edae9d21..0466c6f9 100644 --- a/wrapanapi/entities/__init__.py +++ b/wrapanapi/entities/__init__.py @@ -13,5 +13,6 @@ __all__ = [ 'Template', 'TemplateMixin', 'Vm', 'VmState', 'VmMixin', 'Instance', - 'PhysicalContainer', 'Server', 'ServerState', 'Stack', 'StackMixin' + 'PhysicalContainer', 'Server', 'ServerState', 'Stack', 'StackMixin', + 'Project', 'ProjectMixin' ] diff --git a/wrapanapi/entities/project.py b/wrapanapi/entities/project.py index 4308f314..35955755 100644 --- a/wrapanapi/entities/project.py +++ b/wrapanapi/entities/project.py @@ -1,11 +1,11 @@ """ wrapanapi.entities.project -Methods/classes pertaining to performing actions on a template +Methods/classes pertaining to performing actions on a project """ import six -from abc import ABCMeta, abstractmethod, abstractproperty +from abc import ABCMeta, abstractmethod from wrapanapi.entities.base import Entity, EntityMixin from wrapanapi.exceptions import MultipleItemsError, NotFoundError @@ -15,7 +15,7 @@ class Project(six.with_metaclass(ABCMeta, Entity)): """ Represents a project on a system """ - @abstractproperty + @abstractmethod def get_quota(self): """ Deploy a VM/instance with name 'vm_name' using this template @@ -31,13 +31,13 @@ class ProjectMixin(six.with_metaclass(ABCMeta, EntityMixin)): @abstractmethod def get_project(self, name, **kwargs): """ - Get template from system with name 'name' + Get project from system with name 'name' This should return only ONE matching entity. If multiple entities match the criteria, a MultipleItemsError should be raised Returns: - wrapanapi.entities.Template if it exists + wrapanapi.entities.Project if it exists Raises: wrapanapi.exceptions.MultipleItemsError if multiple matches are found """ @@ -45,16 +45,16 @@ def get_project(self, name, **kwargs): @abstractmethod def create_project(self, name, **kwargs): """ - Create template on system with name 'name' + Create project on system with name 'name' Returns: - wrapanapi.entities.Template for newly created templated + wrapanapi.entities.Project for newly created project """ @abstractmethod def list_project(self, **kwargs): """ - List templates on system + List projects on system Returns: list of wrapanapi.entities.Template @@ -63,19 +63,19 @@ def list_project(self, **kwargs): @abstractmethod def find_projects(self, name, **kwargs): """ - Find templates on system based on name or other filters in kwargs + Find project on system based on name or other filters in kwargs Should return an empty list if no matches were found Returns: - list of wrapanapi.entities.Template for matches found + list of wrapanapi.entities.Project for matches found """ def does_project_exist(self, name): """ - Checks if a template with 'name' exists on the system + Checks if a project with 'name' exists on the system - If multiple templates with the same name exists, this still returns 'True' + If multiple projects with the same name exists, this still returns 'True' """ try: return bool(self.get_project(name)) diff --git a/wrapanapi/systems/openshift.py b/wrapanapi/systems/openshift.py index de355385..bc5cb0c7 100644 --- a/wrapanapi/systems/openshift.py +++ b/wrapanapi/systems/openshift.py @@ -2,6 +2,7 @@ import copy import json +import re import string import yaml from collections import Iterable @@ -18,8 +19,7 @@ from openshift import client as ociclient from wait_for import TimedOutError, wait_for -from wrapanapi.entities import (Template, Vm, VmMixin, VmState, ProjectMixin, - Project) +from wrapanapi.entities import (Template, Vm, VmMixin, VmState, ProjectMixin, Project) from wrapanapi.systems.base import System @@ -88,23 +88,31 @@ def wrap(*args, **kwargs): return wrap -class Project(Project): +class RHOpenShiftProject(Project, Vm): """ We are assuming that a Project is a VM for purposes of simplicity for CFME-QE """ + state_map = { + 'Pending': VmState.PENDING, + 'Running': VmState.RUNNING, + 'Succeeded': VmState.SUCCEEDED, + 'Failed': VmState.FAILED, + 'Unknown': VmState.UNKNOWN + } + def __init__(self, system, raw=None, **kwargs): """ - Construct a VMWareVirtualMachine instance + Construct a RHOpenShiftProject instance Args: - system: instance of VMWareSystem - raw: pyVmomi.vim.VirtualMachine object - name: name of VM + system: instance of OpenShiftSystem + raw: openshift.dynamic.client.ResourceField + name: name of Project """ - super(Project, self).__init__(system, raw, **kwargs) + super(RHOpenShiftProject, self).__init__(system, raw, **kwargs) self._name = raw.metadata.name if raw else kwargs.get('name') if not self._name: raise ValueError("missing required kwarg 'name'") @@ -127,7 +135,6 @@ def _does_project_exist(self): else: return False - @property def get_quota(self): return self.system.ocp_client.resources.get(api_version='v1', kind='ResourceQuota').get( namespace=self.name) @@ -151,7 +158,18 @@ def uuid(self): def ip(self): raise NotImplementedError + @property + def creation_time(self): + """ + Detect the project creation time + + """ + raise NotImplementedError + def start(self): + """ + Start the CFME pods + """ self.logger.info("starting vm/project %s", self.name) if self._does_project_exist: for pod in self.system.get_required_pods(self.name): @@ -160,11 +178,8 @@ def start(self): raise ValueError("Project with name {n} doesn't exist".format(n=self.name)) def stop(self): - """Stops a vm. - - Args: - vm_name: name of the vm to be stopped - Returns: whether vm action has been initiated properly + """ + Stop the CFME pods """ self.logger.info("stopping vm/project %s", self.name) if self._does_project_exist: @@ -187,7 +202,7 @@ def cleanup(self): return self.delete() -class Pod(Vm): +class RHOpenShiftPod(Vm): state_map = { 'Pending': VmState.PENDING, 'Running': VmState.RUNNING, @@ -198,14 +213,14 @@ class Pod(Vm): def __init__(self, system, raw=None, **kwargs): """ - Construct a VMWareVirtualMachine instance + Construct a RHOpenShiftPod instance Args: - system: instance of VMWareSystem - raw: pyVmomi.vim.VirtualMachine object - name: name of VM + system: instance of OpenShiftSystem + raw: openshift.dynamic.client.ResourceField + name: name of Pod """ - super(Pod, self).__init__(system, raw, **kwargs) + super(RHOpenShiftPod, self).__init__(system, raw, **kwargs) self._name = raw.metadata.name if raw else kwargs.get('name') self._namespace = raw.metadata.namespace if raw else kwargs.get('namespace') if not self._name: @@ -233,16 +248,15 @@ def namespace(self): @property def ip(self): - # TODO JUWATTS - # ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' + ipv4_re = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' self.refresh() try: - return self.raw.status.podIP + ip_address=self.raw.status.podIP if not re.match(ipv4_re, ip_address) or ip_address == '127.0.0.1': ip_address = None return ip_address except (AttributeError): - # AttributeError: vm doesn't have an ip address yet + # AttributeError: pod doesn't have an ip address yet return None def _get_state(self): @@ -270,12 +284,7 @@ def cleanup(self): @property def creation_time(self): - """Detect the vm_creation_time either via uptime if non-zero, or by last boot time - - The API provides no sensible way to actually get this value. The only way in which - vcenter API MAY have this is by filtering through events - - Return tz-naive datetime object + """Detect the pods creation time """ raise NotImplementedError @@ -284,12 +293,12 @@ class OpenShiftTemplate(Template): def __init__(self, system, raw=None, **kwargs): """ - Construct a VMWareVirtualMachine instance + Construct a OpenShiftTemplate instance Args: - system: instance of VMWareSystem - raw: pyVmomi.vim.VirtualMachine object - name: name of VM + system: instance of OpenShiftSystem + raw: openshift.dynamic.client.ResourceField + name: name of Template """ super(OpenShiftTemplate, self).__init__(system, raw, **kwargs) self._name = raw.metadata.name if raw else kwargs.get('name') @@ -686,19 +695,6 @@ def _k8s_client_connect(self): # Create a ApiClient with our config return kubeclient.ApiClient(k8_configuration) - # def _connect(self): - # - # self.dyn_client = DynamicClient(self.k8s_client) - - # self.ociclient = ociclient - # self.kclient = kubeclient - # self.oapi_client = ociclient.ApiClient(config=config) - # self.kapi_client = kubeclient.ApiClient(config=config) - # self.o_api = ociclient.OapiApi(api_client=self.oapi_client) - # self.k_api = kubeclient.CoreV1Api(api_client=self.kapi_client) - # self.security_api = self.ociclient.SecurityOpenshiftIoV1Api(api_client=self.oapi_client) - # self.batch_api = self.kclient.BatchV1Api(api_client=self.kapi_client) # for job api - @property def _identifying_attrs(self): """ @@ -933,7 +929,10 @@ def get_pod(self, name, namespace=None): else: pod = self.get_ocp_obj(resource_type=self.v1_pod, name=name) - return Pod(system=self, name=pod.metadata.name, namespace=pod.metadata.namespace, raw=pod) + return RHOpenShiftPod(system=self, + name=pod.metadata.name, + namespace=pod.metadata.namespace, + raw=pod) def create_vm(self, name, **kwargs): raise NotImplementedError('This function has not yet been implemented.') @@ -950,7 +949,8 @@ def list_pods(self, namespace=None): list of wrapanapi.entities.Vm """ return [ - Pod(system=self, name=pod.metadata.name, namespace=pod.metadata.namespace, raw=pod) + RHOpenShiftPod(system=self, name=pod.metadata.name, namespace=pod.metadata.namespace, + raw=pod) for pod in self.v1_pod.get(namespace=namespace).items] def wait_project_exist(self, name, wait=60): @@ -976,7 +976,7 @@ def create_project(self, name, description=None, **kwargs): project = self.v1_project.create(body=proj) self.wait_project_exist(name=name) - return Project(system=self, name=project.metadata.name, raw=project) + return RHOpenShiftProject(system=self, name=project.metadata.name, raw=project) def find_projects(self, *args, **kwargs): raise NotImplementedError @@ -984,14 +984,14 @@ def find_projects(self, *args, **kwargs): def get_project(self, name): project = self.v1_project.get(name=name) - return Project(system=self, name=project.metadata.name, raw=project) + return RHOpenShiftProject(system=self, name=project.metadata.name, raw=project) get_vm = get_project def list_project(self, namespace=None): return [ - Project(system=self, name=project.metadata.name, raw=project) + RHOpenShiftProject(system=self, name=project.metadata.name, raw=project) for project in self.v1_project.get(namespace=namespace).items] list_vms = list_project @@ -1847,7 +1847,6 @@ def read_pod_log(self, namespace, name): """ return self.v1_pod.log.get(name=name, namespace=namespace) - def start_vm(self, vm_name): """Starts a vm. @@ -1987,5 +1986,5 @@ def run_command(self, namespace, name, cmd, **kwargs): """ # there are some limitations and this code isn't robust enough due to # https://github.com/kubernetes-client/python/issues/58 - return self.v1_pod.exec.post(namespace=namespace, name=name, command=cmd, stdout=True, + return self.v1_pod.exec(namespace=namespace, name=name, command=cmd, stdout=True, stderr=True, **kwargs) From 251c66d0d401fd99e2eaba42ac40cb8a4748efbb Mon Sep 17 00:00:00 2001 From: Justin Watts Date: Tue, 30 Jul 2019 11:27:52 -0400 Subject: [PATCH 9/9] Localy testing changes --- wrapanapi/systems/openshift.py | 82 ++++++++++++++++------------------ 1 file changed, 39 insertions(+), 43 deletions(-) diff --git a/wrapanapi/systems/openshift.py b/wrapanapi/systems/openshift.py index bc5cb0c7..b2da9aee 100644 --- a/wrapanapi/systems/openshift.py +++ b/wrapanapi/systems/openshift.py @@ -88,6 +88,11 @@ def wrap(*args, **kwargs): return wrap +def progress_log_callback(logger, source, destination, progress): + logger.info("Provisioning progress {}->{}: {}".format( + source, destination, str(progress))) + + class RHOpenShiftProject(Project, Vm): """ @@ -353,8 +358,8 @@ def wait_template_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.v1_template.get, + return wait_for(self.system.does_exist, num_sec=wait, + func_kwargs={'func': self.system.get_template, 'name': name, 'namespace': namespace})[0] @@ -470,7 +475,7 @@ def deploy(self, tags=None, password='smartvm', **kwargs): self.logger.info("starting template %s deployment", self.name) self.wait_template_exist(namespace=self.system.default_namespace, name=self.name) - if not self.base_url: + if not self.system.base_url: raise ValueError("base url isn't provided") version = Version(TemplateName.parse_template(self.name).version) @@ -487,11 +492,12 @@ def deploy(self, tags=None, password='smartvm', **kwargs): raise ValueError("Some passed tags {t} don't exist".format(t=not_found_tags)) for tag, value in tags.items(): prepared_tags[tags_mapping[tag]['url']] = value['url'] - prepared_tags[tags_mapping[tag]['tag']] = value['tag'] + if 'tag' in value.keys(): + prepared_tags[tags_mapping[tag]['tag']] = value['tag'] # create project # assuming this is cfme installation and generating project name - proj_id = "".join(choice(string.digits + string.lowercase) for _ in range(6)) + proj_id = "".join(choice(string.digits + string.ascii_lowercase) for _ in range(6)) # for sprout if 'vm_name' in kwargs: @@ -501,11 +507,11 @@ def deploy(self, tags=None, password='smartvm', **kwargs): template_params = kwargs.pop('template_params', {}) running_pods = kwargs.pop('running_pods', ()) - proj_url = "{proj}.{base_url}".format(proj=proj_id, base_url=self.base_url) + proj_url = "{proj}.{base_url}".format(proj=proj_id, base_url=self.system.base_url) self.logger.info("unique id %s, project name %s", proj_id, proj_name) - default_progress_callback = partial(self._progress_log_callback, self.logger, self.name, - proj_name) + default_progress_callback = partial(progress_log_callback, self.logger, + self.name, proj_name) progress_callback = kwargs.get('progress_callback', default_progress_callback) project = self.system.create_project(name=proj_name, description=self.name) @@ -521,7 +527,7 @@ def deploy(self, tags=None, password='smartvm', **kwargs): self.logger.info("granting required rights to project's service accounts") for mapping in scc_user_mapping: self.system.append_sa_to_scc(scc_name=mapping['scc'], namespace=proj_name, - sa=mapping['user']) + sa=mapping['user']) progress_callback("Added service accounts to appropriate scc") # appliances prior 5.9 don't need such rights @@ -552,7 +558,7 @@ def deploy(self, tags=None, password='smartvm', **kwargs): self.system.v1_role_binding.create(namespace=proj_name, body=edit_role_binding) self.logger.info("project sa created via api have no some mandatory roles. adding them") - self.system._restore_missing_project_role_bindings(namespace=proj_name) + self.system.restore_missing_project_role_bindings(namespace=proj_name) progress_callback("Added all necessary role bindings to project `{}`".format(proj_name)) # creating common service with external ip @@ -582,7 +588,6 @@ def deploy(self, tags=None, password='smartvm', **kwargs): self.logger.info("verifying that all created entities are up and running") progress_callback("Waiting for all pods to be ready and running") - # TODO Get PROJECT try: wait_for(project.is_running, num_sec=600, func_kwargs={'vm_name': proj_name, 'running_pods': running_pods}) @@ -798,16 +803,11 @@ def can_suspend(self): def can_pause(self): return False - @staticmethod - def _progress_log_callback(logger, source, destination, progress): - logger.info("Provisioning progress {}->{}: {}".format( - source, destination, str(progress))) - @property def can_rename(self): return hasattr(self, "rename_vm") - def _does_exist(self, func, **kwargs): + def does_exist(self, func, **kwargs): try: func(**kwargs) return True @@ -961,8 +961,8 @@ def wait_project_exist(self, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, - func_kwargs={'func': self.get_project(), + return wait_for(self.does_exist, num_sec=wait, + func_kwargs={'func': self.get_project, 'name': name})[0] def create_project(self, name, description=None, **kwargs): @@ -1022,7 +1022,10 @@ def get_template(self, name, namespace): return OpenShiftTemplate(system=self, name=template.metadata.name, raw=template) def list_templates(self, namespace=None): - return self.v1_template.get(namespace=namespace).items + return [ + OpenShiftTemplate(system=self, name=template.metadata.name, raw=template) + for template in self.v1_template.get(namespace=namespace).items + ] def list_deployment_configs(self, namespace=None): return self.v1_deployment_config.get(namespace=namespace).items @@ -1052,7 +1055,7 @@ def wait_service_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, + return wait_for(self.does_exist, num_sec=wait, func_kwargs={'func': self.v1_service.get, 'name': name, 'namespace': namespace})[0] @@ -1163,7 +1166,7 @@ def wait_config_map_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, + return wait_for(self.does_exist, num_sec=wait, func_kwargs={'func': self.v1_config_map.get, 'name': name, 'namespace': namespace})[0] @@ -1214,7 +1217,7 @@ def wait_stateful_set_exist(self, namespace, name, wait=900): Return: True/False """ read_st = self.v1_stateful_sets.get - return wait_for(self._does_exist, num_sec=wait, + return wait_for(self.does_exist, num_sec=wait, func_kwargs={'func': read_st, 'name': name, 'namespace': namespace})[0] @@ -1243,7 +1246,7 @@ def wait_endpoints_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, + return wait_for(self.does_exist, num_sec=wait, func_kwargs={'func': self.v1_endpoint.get, 'name': name, 'namespace': namespace})[0] @@ -1272,7 +1275,7 @@ def wait_route_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, + return wait_for(self.does_exist, num_sec=wait, func_kwargs={'func': self.v1_route.get, 'name': name, 'namespace': namespace})[0] @@ -1301,7 +1304,7 @@ def wait_service_account_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, + return wait_for(self.does_exist, num_sec=wait, func_kwargs={'func': self.v1_service_account.get, 'name': name, 'namespace': namespace})[0] @@ -1330,7 +1333,7 @@ def wait_role_binding_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, + return wait_for(self.does_exist, num_sec=wait, func_kwargs={'func': self.v1_role_binding.get, 'name': name, 'namespace': namespace})[0] @@ -1368,7 +1371,7 @@ def wait_image_stream_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, + return wait_for(self.does_exist, num_sec=wait, func_kwargs={'func': self.v1_image_stream.get, 'name': name, 'namespace': namespace})[0] @@ -1397,7 +1400,7 @@ def wait_secret_exist(self, namespace, name, wait=90): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, + return wait_for(self.does_exist, num_sec=wait, func_kwargs={'func': self.v1_secret.get, 'name': name, 'namespace': namespace})[0] @@ -1426,7 +1429,7 @@ def wait_deployment_config_exist(self, namespace, name, wait=600): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, + return wait_for(self.does_exist, num_sec=wait, func_kwargs={'func': self.v1_deployment_config.get, 'name': name, 'namespace': namespace})[0] @@ -1476,7 +1479,7 @@ def wait_persistent_volume_claim_exist(self, namespace, name, wait=60): wait: entity should appear for this time then - True, otherwise False Return: True/False """ - return wait_for(self._does_exist, num_sec=wait, + return wait_for(self.does_exist, num_sec=wait, func_kwargs={'func': self.v1_persistent_volume.get, 'name': name, 'namespace': namespace})[0] @@ -1565,19 +1568,12 @@ def append_sa_to_scc(self, scc_name, namespace, sa): """ user = 'system:serviceaccount:{proj}:{usr}'.format(proj=namespace, usr=sa) - if self.get_scc(name=scc_name).users is None: - # ocp 3.6 has None for users if there is no sa in it - update_scc_cmd = [ - {"op": "add", - "path": "/users", - "value": [user]}] - else: - update_scc_cmd = [ - {"op": "add", - "path": "/users/-", - "value": user}] + body = self.get_scc(name=scc_name) + body.users.append(user) self.logger.debug("adding user %r to scc %r", user, scc_name) - return self.v1_scc.patch(name=scc_name, body=update_scc_cmd, namespace=namespace) + #return self.v1_scc.patch(name=scc_name, body=update_scc_cmd, namespace=namespace) + + return self.v1_scc.patch(name=scc_name, body=body) def remove_sa_from_scc(self, scc_name, namespace, sa): """Removes Service Account from respective Security Constraint