diff --git a/lib/api/fusor_api.py b/lib/api/fusor_api.py index f45aaf2..db49e73 100644 --- a/lib/api/fusor_api.py +++ b/lib/api/fusor_api.py @@ -353,22 +353,87 @@ def add_deployment_subscription( return True -class FusorDeploymentApi(FusorApi): +class QCIDeploymentApi(FusorApi): """ - This is supposed to be an abstract base class RHEV/OSP deployment APIs - You should never instantiate this class + This class handles the deployment of all products supported by QCI using + the fusor API """ def __init__(self, fusor_ip, user, pw): - super(FusorDeploymentApi, self).__init__(fusor_ip, user, pw) + super(QCIDeploymentApi, self).__init__(fusor_ip, user, pw) self.fusor_data = None self.deployment_id = None - self.product_install_location = None + self.install_location_cfme = None + self.install_location_ocp = None + self.openstack_api_url = "https://{}/fusor/api/openstack/deployments/".format(self.fusor_ip) + # Id for deployment objects specific to the orchestration of an openstack deployment + # This will also be stored in fusor deployment object 'openstack_deployment_id' + self.openstack_deployment_id = None + + ################################################################################################ + # Private Helper Methods + ################################################################################################ + def _openstack_get_resource(self, resource): + self.last_response = requests.get( + "{}{}".format(self.openstack_api_url, resource), + auth=(self.username, self.password), verify=False) + return self.last_response + + def _openstack_put_resource(self, resource, data): + self.last_response = requests.put( + "{}{}".format(self.openstack_api_url, resource), json=data, + auth=(self.username, self.password), verify=False) + return self.last_response + + def _openstack_post_resource(self, resource, data): + self.last_response = requests.post( + "{}{}".format(self.openstack_api_url, resource), json=data, + auth=(self.username, self.password), verify=False) + return self.last_response + + def _openstack_delete_resource(self, resource, data): + self.last_response = requests.delete( + "{}{}".format(self.openstack_api_url, resource), json=data, + auth=(self.username, self.password), verify=False) + return self.last_response + + ################################################################################################ + # Public Helper Methods + ################################################################################################ def create_deployment( self, name, description=None, + deploy_rhv=False, deploy_osp=False, deploy_cfme=False, deploy_ose=False, organization_id='1', lifecycle_environment_id=None, access_insights=False): - raise NotImplementedError("Implement this method") + """ + Create a new deployment with the products specified and store the + deployment data returned + NOTE: RHCI currently only supports the Default organization + """ + data = {'deployment': { + 'name': name, + 'description': description, + 'deploy_rhev': deploy_rhv, + 'deploy_cfme': deploy_cfme, + 'deploy_openshift': deploy_ose, + 'deploy_openstack': deploy_osp, + 'organization_id': organization_id, + 'lifecycle_environment_id': lifecycle_environment_id, + 'enable_access_insights': access_insights, }, } + response = self._fusor_post_resource('deployments', data) + + if response.status_code != 200: + return False + + self.fusor_data = {} + response_data = response.json() + for key in response_data: + self.fusor_data[key] = response_data[key] + + self.deployment_id = self.fusor_data['deployment']['id'] + self.openstack_deployment_id = self.fusor_data['deployment']['openstack_deployment_id'] + + return True def deploy(self): """ @@ -473,6 +538,9 @@ def _remove_duplicate_keys(self): pass def delete_deployment(self): + """ + Delete the currently loaded deployment from Satellite + """ if not self.deployment_id: raise Exception("Unable to delete deployment because there is no deployment id") @@ -483,9 +551,7 @@ def delete_deployment(self): if response.status_code != 200: return False - response_data = response.json() - for key in response_data: - self.fusor_data[key] = response_data[key] + self.fusor_data = None return True @@ -568,7 +634,7 @@ def add_deployment_subscription( if not self.deployment_id: raise Exception("Unable to update deployment because there is no deployment id") - return super(FusorDeploymentApi, self).add_deployment_subscription( + return super(FusorApi, self).add_deployment_subscription( self.deployment_id, contract_number, product_name, quantity_attached, start_date, end_date, total_quantity, source, quantity_to_add) @@ -619,7 +685,7 @@ def get_deployment_progress(self): if not self.deployment_id: raise Exception("Unable to update deployment because there is no deployment id") - return super(FusorDeploymentApi, self).get_deployment_progress(self.deployment_id) + return super(QCIDeploymentApi, self).get_deployment_progress(self.deployment_id) def get_deployment_log(self): """ @@ -665,6 +731,9 @@ def set_deployment_property(self, property_name, property_value): return True + ################################################################################################ + # OpenShift Methods + ################################################################################################ def ose_set_storage_size(self, disk_size): """ Set the disk size of OpenShift docker storage. This will be the 2nd hard drive for the @@ -727,7 +796,6 @@ def _ose_set_node_specs( Helper function since master nodes set the same number and type of objects """ data = {'deployment': { - 'openshift_install_loc': self.product_install_location, node_count_name: node_count, node_vcpu_name: node_vcpu, node_ram_name: node_ram, @@ -745,6 +813,38 @@ def _ose_set_node_specs( return True + def set_install_location_ocp(self, location=None): + """ + Set the location where OpenShift will be deployed. + + location - can be either 'rhv' or 'osp' + """ + + location_dict = { + 'rhv': 'RHEV', + 'osp': 'OpenStack', } + + if not location or location.lower() not in ['rhv', 'osp']: + raise Exception('Location for OpenShift ({}) is invalid'.format(location)) + + self.install_location_ocp = location_dict[location.lower()] + + data = { + "deployment": { + 'openshift_install_loc': self.install_location_ocp, }} + + resource = 'deployments/{}'.format(self.deployment_id) + response = self._fusor_put_resource(resource, data) + + if response.status_code != 200: + return False + + response_data = response.json() + for key in response_data: + self.fusor_data[key] = response_data[key] + + return True + def set_ose_nfs_storage(self, storage_name, storage_host, storage_path): """ Set the nfs storage options for OpenShift @@ -822,6 +922,9 @@ def set_ose_subdomain(self, subdomain_name): return True + ################################################################################################ + # CloudForms Methods + ################################################################################################ def set_creds_cfme(self, pw): """ Set the CFME credentials @@ -831,7 +934,6 @@ def set_creds_cfme(self, pw): data = { "deployment": { - 'cfme_install_loc': self.product_install_location, 'cfme_root_password': pw, 'cfme_admin_password': pw, 'cfme_db_password': pw, }} @@ -848,81 +950,84 @@ def set_creds_cfme(self, pw): return True + def set_install_location_cfme(self, location=None): + """ + Set the location where CloudForms will be deployed. -class RHEVFusorApi(FusorDeploymentApi): - def __init__(self, fusor_ip, user, pw): - super(RHEVFusorApi, self).__init__(fusor_ip, user, pw) - self.product_install_location = 'RHEV' + location - can be either 'rhv' or 'osp' + """ - def is_self_hosted(self): - if not self.fusor_data.get('deployment'): - return False + location_dict = { + 'rhv': 'RHEV', + 'osp': 'OpenStack', } - return self.fusor_data['deployment']['rhev_is_self_hosted'] + if not location or location.lower() not in ['rhv', 'osp']: + raise Exception('Location for CloudForms ({}) is invalid'.format(location)) - def create_deployment( - self, name, description=None, - deploy_cfme=False, deploy_ose=False, - organization_id='1', lifecycle_environment_id=None, access_insights=False): - """ - Create a new RHEV deployment with CFME and store the deployment data returned - NOTE: RHCI currently only supports the Default organization - """ - data = {'deployment': { - 'name': name, - 'description': description, - 'deploy_rhev': True, - 'deploy_cfme': deploy_cfme, - 'deploy_openshift': deploy_ose, - 'deploy_openstack': False, - 'organization_id': organization_id, - 'lifecycle_environment_id': lifecycle_environment_id, - 'enable_access_insights': access_insights, }, } - response = self._fusor_post_resource('deployments', data) + self.install_location_cfme = location_dict[location.lower()] + + data = { + "deployment": { + 'cfme_install_loc': self.install_location_cfme, }} + + resource = 'deployments/{}'.format(self.deployment_id) + response = self._fusor_put_resource(resource, data) if response.status_code != 200: return False - self.fusor_data = {} response_data = response.json() for key in response_data: self.fusor_data[key] = response_data[key] - self.deployment_id = self.fusor_data['deployment']['id'] - return True - def delete_deployment(self): + ################################################################################################ + # RHV Methods + ################################################################################################ + def rhv_is_self_hosted(self): + if not self.fusor_data.get('deployment'): + raise Exception("There is no fusor deployment data") + + return self.fusor_data['deployment']['rhev_is_self_hosted'] + + def set_rhv_cpu_type(self, cpu_type): """ - Delete the currently loaded deployment from Satellite + Set the RHV cpu type """ + if not self.deployment_id: - raise Exception("Unable to delete deployment because there is no deployment id") + raise Exception("Unable to update deployment because there is no deployment id") + + data = { + "deployment": { + 'rhev_cpu_type': cpu_type, }} resource = 'deployments/{}'.format(self.deployment_id) - data = {} - response = self._fusor_delete_resource(resource, data) + response = self._fusor_put_resource(resource, data) if response.status_code != 200: return False - self.fusor_data = None + response_data = response.json() + for key in response_data: + self.fusor_data[key] = response_data[key] return True - def set_discovered_hosts(self, rhevh_macs, rhevm_mac=None, naming_scheme='Freeform'): + def set_rhv_hosts(self, rhvh_macs, rhvm_mac=None, naming_scheme='Freeform'): """ Set the hypervisor hosts (and RHEV engine). If rhevm mac is None then deploy self hosted - rhevh_macs - (List of Strings) hypervisor macs - rhevm_mac - (String) engine mac OPTIONAL + rhvh_macs - (List of Strings) hypervisor macs + rhvm_mac - (String) engine mac OPTIONAL """ if not self.deployment_id: raise Exception("Unable to update deployment because there is no deployment id") # Wrap string in a list - if type(rhevh_macs) is str: - rhevh_macs = [rhevh_macs] + if type(rhvh_macs) is str: + rhvh_macs = [rhvh_macs] # Grab a list of discovered hosts disco_hosts = self.get_discovered_hosts().get( @@ -932,12 +1037,12 @@ def set_discovered_hosts(self, rhevh_macs, rhevm_mac=None, naming_scheme='Freefo hypervisor_ids = [] for host in disco_hosts: - if host['mac'] == rhevm_mac: + if host['mac'] == rhvm_mac: engine_id = host['id'] - elif host['mac'] in rhevh_macs: + elif host['mac'] in rhvh_macs: hypervisor_ids.append(host['id']) - if (rhevm_mac and (not engine_id)) and not hypervisor_ids: + if (rhvm_mac and (not engine_id)) and not hypervisor_ids: return False data = { @@ -959,9 +1064,9 @@ def set_discovered_hosts(self, rhevh_macs, rhevm_mac=None, naming_scheme='Freefo return True - def set_creds_rhev(self, pw): + def set_creds_rhv(self, pw): """ - Set the RHEV admin/root password + Set the RHV admin/root password """ if not self.deployment_id: raise Exception("Unable to update deployment because there is no deployment id") @@ -983,14 +1088,14 @@ def set_creds_rhev(self, pw): return True - def set_nfs_storage(self, - data_name, data_address, data_path, - export_name, export_address, export_path, - hosted_storage_name=None, hosted_storage_address=None, hosted_storage_path=None, - rhev_data_center_name='Default', rhev_cluster_name='Default'): + def set_nfs_storage_rhv( + self, + data_name, data_address, data_path, + export_name, export_address, export_path, + hosted_storage_name=None, hosted_storage_address=None, hosted_storage_path=None, + rhev_data_center_name='Default', rhev_cluster_name='Default'): """ - Set the nfs storage options. If rhev_self_hosted deployment then the hosted storage values - will be set + Set the nfs storage options for RHV. """ if not self.deployment_id: raise Exception( @@ -1023,80 +1128,9 @@ def set_nfs_storage(self, return True - -class OSPFusorApi(FusorDeploymentApi): - def __init__(self, fusor_ip, user, pw): - super(OSPFusorApi, self).__init__(fusor_ip, user, pw) - self.openstack_api_url = "https://{}/fusor/api/openstack/deployments/".format(self.fusor_ip) - self.product_install_location = 'OpenStack' - - # Id for deployment objects specific to the orchestration of an openstack deployment - # This will also be stored in fusor deployment object 'openstack_deployment_id' - self.openstack_deployment_id = None - ################################################################################################ - # Private Helper Methods + # OpenStack Methods ################################################################################################ - def _openstack_get_resource(self, resource): - self.last_response = requests.get( - "{}{}".format(self.openstack_api_url, resource), - auth=(self.username, self.password), verify=False) - return self.last_response - - def _openstack_put_resource(self, resource, data): - self.last_response = requests.put( - "{}{}".format(self.openstack_api_url, resource), json=data, - auth=(self.username, self.password), verify=False) - return self.last_response - - def _openstack_post_resource(self, resource, data): - self.last_response = requests.post( - "{}{}".format(self.openstack_api_url, resource), json=data, - auth=(self.username, self.password), verify=False) - return self.last_response - - def _openstack_delete_resource(self, resource, data): - self.last_response = requests.delete( - "{}{}".format(self.openstack_api_url, resource), json=data, - auth=(self.username, self.password), verify=False) - return self.last_response - - ################################################################################################ - # Public Methods - ################################################################################################ - - def create_deployment( - self, name, description=None, - deploy_cfme=False, deploy_ose=False, - organization_id='1', lifecycle_environment_id=None, access_insights=False): - """ - Create a new RHEV deployment with CFME and store the deployment data returned - """ - data = {'deployment': { - 'name': name, - 'description': description, - 'deploy_rhev': False, - 'deploy_cfme': deploy_cfme, - 'deploy_openshift': deploy_ose, - 'deploy_openstack': True, - 'organization_id': organization_id, - 'lifecycle_environment_id': lifecycle_environment_id, - 'enable_access_insights': access_insights, }, } - response = self._fusor_post_resource('deployments', data) - - if response.status_code not in [200, 202]: - return False - - self.fusor_data = {} - response_data = response.json() - for key in response_data: - self.fusor_data[key] = response_data[key] - - self.deployment_id = self.fusor_data['deployment']['id'] - self.openstack_deployment_id = self.fusor_data['deployment']['openstack_deployment_id'] - - return True - def add_undercloud(self, ip, ssh_user, ssh_pass): if not self.deployment_id: raise Exception('Unable to add undercloud because there is no deployment id') @@ -1133,7 +1167,10 @@ def get_undercloud_status(self): return undercloud_status['deployed'] and (undercloud_status['failed'] is False) - def introspection_tasks(self): + def get_introspection_tasks(self): + """ + Retrieves the OpenStack introspection task currently running + """ if not self.deployment_id: raise Exception('Unable to get deployment id because there is no deployment id') @@ -1151,7 +1188,7 @@ def introspection_tasks(self): def get_openstack_images(self): """ - Get a list of the OSP images + Get a list of the OpenStack images """ if not self.deployment_id: raise Exception('Unable to get deployment id because there is no deployment id') @@ -1168,9 +1205,9 @@ def get_openstack_images(self): return response.json() - def get_nodes(self): + def get_osp_nodes(self): """ - Get a list of the registered OSP nodes for this deployment + Get a list of the registered OpenStack nodes for this deployment """ if not self.deployment_id: raise Exception('Unable to get deployment id because there is no deployment id') @@ -1187,7 +1224,7 @@ def get_nodes(self): return len(self.fusor_data['nodes']) > 0 - def register_nodes( + def register_osp_nodes( self, ipmi_driver, ipmi_ip, ipmi_user, ipmi_pass, node_mac, deploy_kernel_id, deploy_ramdisk_id, virt_type="virsh", capabilities="boot_option:local"): """ @@ -1228,7 +1265,7 @@ def register_nodes( return True - def wait_for_node_registration(self, delay=10, maxtime=30, introspection_attempts_max=1): + def wait_for_osp_node_registration(self, delay=10, maxtime=30, introspection_attempts_max=1): """ Wait for node registration to finish processing either by success/error If fusor_data['introspection_tasks'] is empty, it will attempt to refresh the @@ -1304,7 +1341,7 @@ def set_overcloud_node_count(self, node_count): return True - def get_deployment_plan(self): + def get_overcloud_deployment_plan(self): """ Get the Fusor deployment plan and save it in self.fusor_data Return True if sucessful @@ -1324,52 +1361,53 @@ def get_deployment_plan(self): return True - def update_role_compute(self, flavor, count): + def update_osp_role_compute(self, flavor, count): """ Assign compute role the specified flavor and count """ - return self.update_role( + return self.update_osp_role( 'overcloud_compute_flavor', flavor, 'overcloud_compute_count', count) - def update_role_controller(self, flavor, count): + def update_osp_role_controller(self, flavor, count): """ Assign controller role the specified flavor and count """ - return self.update_role( + return self.update_osp_role( 'overcloud_controller_flavor', flavor, 'overcloud_controller_count', count) - def update_role_ceph(self, flavor, count): + def update_osp_role_ceph(self, flavor, count): """ Assign ceph role the specified flavor and count """ + raise Exception("Local ceph storage is not supported in QCi") - return self.update_role( + return self.update_osp_role( 'overcloud_ceph_storage_flavor', flavor, 'overcloud_ceph_count', count) - def update_role_cinder(self, flavor, count): + def update_osp_role_cinder(self, flavor, count): """ Assign cinder role the specified flavor and count """ - return self.update_role( + return self.update_osp_role( 'overcloud_block_storage_flavor', flavor, 'overcloud_block_count', count) - def update_role_swift(self, flavor, count): + def update_osp_role_swift(self, flavor, count): """ Assign swift role the specified flavor and count """ - return self.update_role( + return self.update_osp_role( 'overcloud_object_storage_flavor', flavor, 'overcloud_object_count', count) - def update_role(self, flavor_role_name, flavor, count_role_name, count): + def update_osp_role(self, flavor_role_name, flavor, count_role_name, count): """ Assign a role count and flavor with one api call """ @@ -1394,7 +1432,7 @@ def update_role(self, flavor_role_name, flavor, count_role_name, count): return True - def update_role_flavor(self, role_name, role_flavor): + def update_osp_role_flavor(self, role_name, role_flavor): """ Assign the OSP flavor to the specified role """ @@ -1417,7 +1455,7 @@ def update_role_flavor(self, role_name, role_flavor): return True - def update_role_count(self, role_name, role_count): + def update_osp_role_count(self, role_name, role_count): """ Assign 'role_count' number of nodes to the specified OSP role """ @@ -1440,7 +1478,37 @@ def update_role_count(self, role_name, role_count): return True - def node_flavors(self): + def set_external_ceph_storage( + self, + ceph_host, ceph_fsid, ceph_username, ceph_key, + nova_pool_name, cinder_pool_name, glance_pool_name): + """ + Set the info for the external ceph storage + """ + data = { + 'openstack_deployment': { + 'ceph_ext_mon_host': ceph_host, + 'ceph_cluster_fsid': ceph_fsid, + 'ceph_client_username': ceph_username, + 'ceph_client_key': ceph_key, + 'nova_rbd_pool_name': nova_pool_name, + 'cinder_rbd_pool_name': cinder_pool_name, + 'glance_rbd_pool_name': glance_pool_name, + }, } + + resource = 'openstack_deployments/{}'.format(self.openstack_deployment_id) + response = self._fusor_put_resource(resource, data) + + if response.status_code not in [200, 202]: + return False + + response_data = response.json() + for key in response_data: + self.fusor_data[key] = response_data[key] + + return True + + def get_osp_node_flavors(self): """ Retrieve the list of OSP node flavors """ diff --git a/tests/api/test_api_osp_deployment.py b/tests/api/test_api_osp_deployment.py index c7588a2..d2ff87b 100644 --- a/tests/api/test_api_osp_deployment.py +++ b/tests/api/test_api_osp_deployment.py @@ -59,7 +59,7 @@ def osp_api(fusor_admin_username, fusor_admin_password, base_url): OSPFusorApi object with methods for accessing/editing OSP deployment objects """ fusor_ip = parse_ip_from_url(base_url) - return fusor_api.OSPFusorApi(fusor_ip, fusor_admin_username, fusor_admin_password) + return fusor_api.QCIDeploymentApi(fusor_ip, fusor_admin_username, fusor_admin_password) def deployment_attach_sub( @@ -119,6 +119,7 @@ def test_osp_api(osp_api, variables, deployment_name): masktocidr = {"255.255.255.0": "/24", "255.255.0.0": "/16", "255.0.0.0": "/8", } deploy_cfme = 'cfme' in dep['install'] + cfme_install_loc = 'osp' deploy_ose = 'ocp' in dep['install'] if not deployment_name: deployment_name = 'pytest-osp-api-{}{}'.format( @@ -154,6 +155,7 @@ def test_osp_api(osp_api, variables, deployment_name): kernel_image = None assert osp_api.create_deployment(deployment_name, deployment_desc, + deploy_osp=True, deploy_cfme=deploy_cfme, deploy_ose=deploy_ose) osp_api.refresh_deployment_info() @@ -197,7 +199,7 @@ def test_osp_api(osp_api, variables, deployment_name): sleep(image_query_wait) for node in overcloud_nodes: - assert osp_api.register_nodes( + assert osp_api.register_osp_nodes( node['driver_type'], node['host_ip'], node['host_username'], @@ -208,7 +210,7 @@ def test_osp_api(osp_api, variables, deployment_name): # "Failed to register OSP node: {}".format(node['mac_address']) osp_api.refresh_deployment_info() - assert osp_api.wait_for_node_registration() + assert osp_api.wait_for_osp_node_registration() # 'Overcloud nodes failed to finish registration successfully' # TODO: Verify that the finished tasks didn't fail @@ -217,26 +219,26 @@ def test_osp_api(osp_api, variables, deployment_name): assert osp_api.set_overcloud_node_count(overcloud_node_count) # 'Unable to set the overcloud node count to {}'.format(overcloud_node_count) - osp_api.node_flavors() + osp_api.get_osp_node_flavors() # Since nodes have the same HW specs we only have 1 flavor flavor_name = osp_api.fusor_data['osp_flavors'][0]['name'] - assert osp_api.update_role_controller(flavor_name, overcloud_controller_count) + assert osp_api.update_osp_role_controller(flavor_name, overcloud_controller_count) # "Unable to update controller role and count" - assert osp_api.update_role_compute(flavor_name, overcloud_compute_count) + assert osp_api.update_osp_role_compute(flavor_name, overcloud_compute_count) # "Unable to update compute role and count" # "Assigning cinder role flavor ({}) and count ({})".format( flavor_name, storage_role_count)) - assert osp_api.update_role_cinder(flavor_name, cinder_role_count) + assert osp_api.update_osp_role_cinder(flavor_name, cinder_role_count) # "Unable to update cinder role and count" # "Assigning swift role flavor ({}) and count ({})".format(flavor_name, swift_role_count)) - assert osp_api.update_role_swift(flavor_name, swift_role_count) + assert osp_api.update_osp_role_swift(flavor_name, swift_role_count) # "Unable to update swift role and count" # Local ceph storage not supported by QCI - assert osp_api.update_role_ceph(flavor_name, ceph_role_count) + assert osp_api.update_osp_role_ceph(flavor_name, ceph_role_count) # "Unable to update ceph role and count" # This should only be run for nested deployments. Baremetal doesn't need this @@ -253,6 +255,8 @@ def test_osp_api(osp_api, variables, deployment_name): # "Unable to set the overcloud network info" if deploy_cfme: + assert osp_api.set_install_location_cfme(cfme_install_loc), \ + "Unable to set the CFME install location" # "Setting info for a Cloudforms Deployment" # "Setting CFME passwords" assert osp_api.set_creds_cfme(cfme_root_password) diff --git a/tests/api/test_api_qci_deployment.py b/tests/api/test_api_qci_deployment.py new file mode 100644 index 0000000..aa6fd27 --- /dev/null +++ b/tests/api/test_api_qci_deployment.py @@ -0,0 +1,433 @@ +import pytest +import string +import random +from urlparse import urlsplit +from time import sleep + +from lib.api import fusor_api + + +def parse_ip_from_url(url): + """ + This will return the IP address from a valid url scheme. + Since urlparse won't separate the port, if present, from the IP we have to do it manually + """ + up = urlsplit(url) + + ip = up.netloc + if up.port: + ip, port = ip.split(':') + + return ip + + +# TODO: Make this global for all QCI tests +@pytest.fixture(scope="module") +def deployment_name(request): + dep_name = request.config.getoption("--deployment-name") + print "Deployment name to test: {}".format(dep_name) + return dep_name + + +@pytest.fixture(scope="module") +def fusor_admin_username(variables): + return variables['credentials']['fusor']['username'] + + +@pytest.fixture(scope="module") +def fusor_admin_password(variables): + return variables['credentials']['fusor']['password'] + + +@pytest.fixture(scope="module") +def deployment_id(variables): + """ + Retrieve deployment id used throughout this test + Currently just generates a random string + """ + return ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(8)]) + + +@pytest.fixture(scope="module") +def dep_api(fusor_admin_username, fusor_admin_password, base_url): + """ + RHEVFusorApi object with methods for accessing/editing RHEV deployment objects + """ + fusor_ip = parse_ip_from_url(base_url) + return fusor_api.QCIDeploymentApi(fusor_ip, fusor_admin_username, fusor_admin_password) + + +def deployment_attach_subscriptions( + dep_api, rhn_username, rhn_password, rhn_sma_uuid, sub_name, sub_quantity, **kwargs): + """ + Attach the specified subscriptions to the deployment loaded in the dep_api object + """ + + dep_api.rhn_login(rhn_username, rhn_password) + consumer = dep_api.rhn_get_consumer(rhn_sma_uuid) + + assert consumer, "No RHN consumer found for uuid: {}".format(rhn_sma_uuid) + + dep_api.rhn_set_upstream_consumer(consumer['name'], consumer['uuid']) + subscriptions = dep_api.rhn_get_consumer_subscriptions(rhn_sma_uuid) + + for sub in subscriptions: + pool = sub['pool'] + qty_attached = sub['quantity'] + pool_id = pool['id'] + pool_name = pool['productName'] + if pool_name == sub_name: + if qty_attached < sub_quantity: + qty_additional = sub_quantity - qty_attached + dep_api.rhn_attach_subscription( + consumer['uuid'], pool_id, qty_additional) + + +def test_api_deployment(dep_api, variables, deployment_name): + dep = variables['deployment'] + dep_rhv = dep['products']['rhv'] + dep_osp = dep['products']['osp'] + dep_cfme = dep['products']['cfme'] + dep_ose = dep['products']['ose'] + dep_sat = dep['products']['sat'] + + masktocidr = {"255.255.255.0": "/24", "255.255.0.0": "/16", "255.0.0.0": "/8", } + + enable_access_insights = dep_sat['enable_access_insights'] + rhv_is_self_hosted = ( + 'selfhost' in dep_rhv['rhv_setup_type'] or 'rhvsh' in dep['install']) + rhevm_mac = dep_rhv['rhvm_mac'] if not rhv_is_self_hosted else None + rhevh_macs = dep_rhv['rhvh_macs'] + data_address = dep_rhv['data_domain_address'] + data_name = dep_rhv['data_domain_name'] + data_path = dep_rhv['data_domain_share_path'] + export_address = dep_rhv['export_domain_address'] + export_name = dep_rhv['export_domain_name'] + export_path = dep_rhv['export_domain_share_path'] + selfhosted_name = dep_rhv['selfhosted_domain_name'] + selfhosted_address = dep_rhv['selfhosted_domain_address'] + selfhosted_path = dep_rhv['selfhosted_domain_share_path'] + selfhosted_engine_hostname = ( + dep_rhv['self_hosted_engine_hostname'] or 'rhv-selfhosted-engine') + deploy_rhv = ('rhv' in dep['install'] or 'rhvsh' in dep['install']) + deploy_osp = 'osp' in dep['install'] + deploy_cfme = 'cfme' in dep['install'] + deploy_ose = 'ocp' in dep['install'] + if not deployment_name: + deployment_name = 'pytest-api-{}{}{}{}'.format( + dep['deployment_id'], + '-rhv' if deploy_rhv else '', + '-osp' if deploy_osp else '', + '-cfme' if deploy_cfme else '', + '-ocp' if deploy_ose else '') + deployment_desc = 'API deployment using pytest' + + rhev_admin_password = dep_rhv['rhvm_adminpass'] + rhv_cpu_type = dep_rhv['cpu_type'] + cfme_root_password = dep_cfme['cfme_admin_password'] + cfme_install_loc = dep_cfme['cfme_install_loc'] + cfme_root_password = dep_cfme['cfme_root_password'] + + ose_install_loc = dep_ose['install_loc'] + ose_number_master_nodes = dep_ose['number_master_nodes'] + ose_master_vcpu = dep_ose['master_vcpu'] + ose_master_ram = dep_ose['master_ram'] + ose_master_disk = dep_ose['master_disk'] + ose_number_worker_nodes = dep_ose['number_worker_nodes'] + ose_node_vcpu = dep_ose['node_vcpu'] + ose_node_ram = dep_ose['node_ram'] + ose_node_disk = dep_ose['node_disk'] + ose_storage_size = dep_ose['storage_size'] + ose_storage_name = dep_ose['storage_name'] + ose_storage_host = dep_ose['storage_host'] + ose_export_path = dep_ose['export_path'] + ose_username = dep_ose['username'] + ose_user_password = dep_ose['user_password'] + ose_subdomain_name = dep_ose['subdomain_name'] if dep_ose['subdomain_name'] else dep['deployment_id'] + ose_sample_app_name = 'openshift_sample_helloworld' + ose_sample_apps = dep_ose['sample_apps'] # List of sample apps to include in the deployment + ose_sub_pool_name = dep_ose['subscription']['name'] + ose_sub_quantity = dep_ose['subscription']['quantity'] + + undercloud_ip = dep_osp['undercloud_address'] + undercloud_user = dep_osp['undercloud_user'] + undercloud_pass = dep_osp['undercloud_pass'] + overcloud_nodes = dep_osp['overcloud_nodes'] + osp_deploy_ramdisk_name = 'bm-deploy-ramdisk' + osp_deploy_kernel_name = 'bm-deploy-kernel' + osp_images = None + ramdisk_image = None + kernel_image = None + overcloud_controller_count = dep_osp['controller_count'] + overcloud_compute_count = dep_osp['compute_count'] + cinder_role_count = dep_osp.get('cinder_count', 0) + swift_role_count = dep_osp.get('swift_count', 0) + ceph_role_count = dep_osp.get('ceph_count', 0) # Ceph local storage not supported in QCI + deploy_external_ceph = dep_osp['external_ceph_storage'] + ceph_ext_mon_host = dep_osp['ceph']['ceph_ext_mon_host'] + ceph_cluster_fsid = dep_osp['ceph']['ceph_cluster_fsid'] + ceph_client_username = dep_osp['ceph']['ceph_client_username'] + ceph_client_key = dep_osp['ceph']['ceph_client_key'] + nova_rbd_pool_name = dep_osp['ceph']['nova_rbd_pool_name'] + cinder_rbd_pool_name = dep_osp['ceph']['cinder_rbd_pool_name'] + glance_rbd_pool_name = dep_osp['ceph']['glance_rbd_pool_name'] + + overcloud_admin_pass = dep_osp['undercloud_pass'] # Sync overcloud pw with undercloud + overcloud_prov_network = '{}{}'.format( + dep_osp['network']['provision_network']['network'], + masktocidr[dep_osp['network']['provision_network']['subnet']]) + overcloud_pub_network = '{}{}'.format( + dep_osp['network']['public_network']['network'], + masktocidr[dep_osp['network']['public_network']['subnet']]) + overcloud_pub_gateway = dep_osp['network']['public_network']['gateway'] + overcloud_libvirt_type = 'qemu' + rhn_username = variables['credentials']['cdn']['username'] + rhn_password = variables['credentials']['cdn']['password'] + rhn_sma_uuid = dep_sat['rhsm_satellite']['uuid'] + sat_sub_pool_name = dep_sat['subscription']['name'] + sat_sub_quantity = dep_sat['subscription']['quantity'] + + assert dep_api.create_deployment( + deployment_name, deployment_desc, + deploy_rhv=deploy_rhv, + deploy_osp=deploy_osp, + deploy_cfme=deploy_cfme, deploy_ose=deploy_ose, + access_insights=enable_access_insights), \ + "Unable to create QCI deployment ({})".format(deployment_name) + + if deploy_rhv: + assert dep_api.set_rhv_hosts(rhevh_macs, rhevm_mac), "Unable to set the RHEV Hosts" + + if rhv_is_self_hosted: + dep_api.set_deployment_property('rhev_self_hosted_engine_hostname', selfhosted_engine_hostname) + + assert dep_api.set_creds_rhv(rhev_admin_password), "Unable to set RHEV credentials" + + assert dep_api.set_rhv_cpu_type(rhv_cpu_type), "Unable to set RHV cpu type: {}".format( + rhv_cpu_type) + + assert dep_api.set_nfs_storage_rhv( + data_name, data_address, data_path, + export_name, export_address, export_path, + selfhosted_name, selfhosted_address, selfhosted_path), \ + "Unable to set the NFS storage for the deployment" + + if deploy_osp: + dep_api.refresh_deployment_info() + assert dep_api.add_undercloud(undercloud_ip, undercloud_user, undercloud_pass) + dep_api.refresh_deployment_info() + + image_query_wait = 10 + image_query_retries = 0 + image_query_retries_max = 3 + # Retry undercloud image query when automation moves faster than fusor + while ((not osp_images or not kernel_image or not ramdisk_image) and + image_query_retries < image_query_retries_max): + osp_images = dep_api.get_openstack_images() + image_query_retries += 1 + + try: + assert osp_images # "Unable to get the openstack image info" + + ramdisk_iterator = ( + image for image in osp_images['images'] if ( + image['name'] == osp_deploy_ramdisk_name)) + ramdisk_image = next(ramdisk_iterator, None) + + kernel_iterator = ( + image for image in osp_images['images'] if ( + image['name'] == osp_deploy_kernel_name)) + kernel_image = next(kernel_iterator, None) + + assert kernel_image # "Unable to get the openstack kernel image info" + assert ramdisk_image # "Unable to get the openstack ramdisk image info" + except AssertionError: + if image_query_retries >= image_query_retries_max: + print 'Maximum retries ({}) for querying osp images has been reached'.format( + image_query_retries) + print "Image Info:\n", osp_images + raise + + print 'Retrying openstack image query - ({})'.format(image_query_retries) + osp_images = None + ramdisk_image = None + kernel_image = None + sleep(image_query_wait) + + for node in overcloud_nodes: + assert dep_api.register_osp_nodes( + node['driver_type'], + node['host_ip'], + node['host_username'], + node['host_password'], + node['mac_address'], + kernel_image['id'], + ramdisk_image['id']), \ + "Failed to register node for introspection: {}".format(node['mac_address']) + + dep_api.refresh_deployment_info() + assert dep_api.wait_for_osp_node_registration(), \ + 'Overcloud nodes failed to finish registration successfully' + + # TODO: Verify that the finished tasks didn't fail + overcloud_node_count = len(overcloud_nodes) + + assert dep_api.set_overcloud_node_count(overcloud_node_count), \ + 'Unable to set the overcloud node count to {}'.format(overcloud_node_count) + + dep_api.get_osp_node_flavors() + # Since nodes have the same HW specs we only have 1 flavor + flavor_name = dep_api.fusor_data['osp_flavors'][0]['name'] + + assert dep_api.update_osp_role_controller(flavor_name, overcloud_controller_count), \ + "Unable to update controller role and count({})".format( + flavor_name, overcloud_controller_count) + + assert dep_api.update_osp_role_compute(flavor_name, overcloud_compute_count), \ + "Unable to update compute role and count({})".format( + flavor_name, overcloud_compute_count) + + assert dep_api.update_osp_role_cinder(flavor_name, cinder_role_count), \ + "Unable to update block storage role and count({})".format( + flavor_name, cinder_role_count) + + assert dep_api.update_osp_role_swift(flavor_name, swift_role_count), \ + "Unable to update object role and count({})".format( + flavor_name, swift_role_count) + + if ceph_role_count: + assert dep_api.update_osp_role_ceph(flavor_name, ceph_role_count) + + if deploy_external_ceph: + dep_api.set_external_ceph_storage( + ceph_ext_mon_host, ceph_cluster_fsid, ceph_client_username, ceph_client_key, + nova_rbd_pool_name, cinder_rbd_pool_name, glance_rbd_pool_name) + + # This should only be run for nested deployments. Baremetal doesn't need this + assert dep_api.set_nova_libvirt_type(overcloud_libvirt_type), \ + "Unable to set the overcloud libvirt type to {}".format(overcloud_libvirt_type) + + # "Setting overcloud credentials") + dep_api.set_creds_overcloud(overcloud_admin_pass) + + assert dep_api.set_overcloud_network( + overcloud_prov_network, + overcloud_pub_network, + overcloud_pub_gateway), \ + "Unable to set the overcloud network info" + + if deploy_cfme: + assert dep_api.set_install_location_cfme(cfme_install_loc), \ + "Unable to set the CFME install location" + assert dep_api.set_creds_cfme(cfme_root_password), \ + "Unable to set the CFME root/admin passwords" + + if deploy_ose: + assert dep_api.set_install_location_ocp(ose_install_loc), \ + "Unable to set the OpenShift install location" + + assert dep_api.ose_set_master_node_specs( + ose_number_master_nodes, + ose_master_vcpu, + ose_master_ram, + ose_master_disk), 'Unable to set the OpenShift master node specs' + + assert dep_api.ose_set_worker_node_specs( + ose_number_worker_nodes, + ose_node_vcpu, + ose_node_ram, + ose_node_disk), 'Unable to set the OpenShift worker node specs' + + assert dep_api.ose_set_storage_size(ose_storage_size), \ + 'Unable to set the OpenShift storage size for docker to {}'.format( + ose_storage_size) + + assert dep_api.set_ose_nfs_storage( + ose_storage_name, + ose_storage_host, + ose_export_path), 'Unable to set the OpenShift NFS storage' + + assert dep_api.set_ose_creds(ose_username, ose_user_password), \ + 'Unable to set the OpenShift credentials' + + assert dep_api.set_ose_subdomain(ose_subdomain_name), \ + 'Unable to set the OpenShift subdomain name' + + if ose_sample_apps: + for app in ose_sample_apps: + # Do some translation since the yaml app name value defaults to the element id + if 'hello_world' in app: + assert dep_api.set_deployment_property(ose_sample_app_name, True), \ + 'Unable to enable OpenShift sample application hello_world' + + # Add subscriptions + deployment_attach_subscriptions( + dep_api, rhn_username, rhn_password, rhn_sma_uuid, sat_sub_pool_name, sat_sub_quantity) + + if deploy_ose: + deployment_attach_subscriptions( + dep_api, rhn_username, rhn_password, rhn_sma_uuid, ose_sub_pool_name, ose_sub_quantity) + + dep_validation = dep_api.get_deployment_validation()['validation'] + + if deploy_osp: + # Sync with overcloud so we don't clobber any values that are set by fusor (overcloud admin pw) + # "Syncing OpenStack data with fusor" + dep_api.sync_openstack() + + assert not dep_validation['errors'], ("Validation contains errors:\n{}".format( + '\n'.join(dep_validation['errors']))) + + assert dep_api.deploy(), "API deployment failed: {}".format(dep_api.last_response.text) + + +# TODO: This should be generic for any type of deployment +def test_api_deployment_success(dep_api, variables, deployment_name): + """ + Query the fusor deployment object for the status of the Deploy task + """ + if deployment_name: + dep_api.load_deployment(deployment_name) + + dep = variables['deployment'] + + deployment_time = 0 + deployment_time_wait = 1 # Time (minutes) to wait between polling for progress + deployment_time_max = dep.get('deployment_timeout', 240) + deployment_success = False + fail_message = "Deployment timed out after {} hours".format(deployment_time_max / 60) + # Wait a while for the deployment to complete (or fail), + + while not deployment_success and deployment_time < deployment_time_max: + deployment_time += deployment_time_wait + sleep(deployment_time_wait * 60) + progress = dep_api.get_deployment_progress() + dep_api.refresh_deployment_info() + + if(progress['result'] == 'success' and + progress['state'] == 'stopped' and + progress['progress'] == 1.0): + deployment_success = True + print 'API Deployment Succeeded!' + elif progress['result'] == 'error' and progress['state'] == 'paused': + deployment_success = False + deployment_task_uuid = dep_api.fusor_data['deployment']['foreman_task_uuid'] + foreman_task = next( + task for task in dep_api.fusor_data['foreman_tasks'] if( + task['id'] == deployment_task_uuid)) + + # Loop through all sub tasks until we find one paused w/ error + for sub_task in foreman_task['sub_tasks']: + if sub_task['result'] == 'error': + sub_task_info = dep_api.foreman_task(sub_task['id'])['foreman_task'] + fail_message = 'Deployment Failed: {} -> {}'.format( + sub_task_info['label'], sub_task_info['humanized_errors']) + assert deployment_success, fail_message + + # If we got here then the logic for finding the failed task needs to be fixed + fail_message = "Unable to find the failed subtask for task: {}".format( + '\n'.join([step['action_class'] for step in foreman_task['failed_steps']])) + + assert deployment_success, fail_message + + assert deployment_success, "DEFAULT: {}".format(fail_message) diff --git a/tests/api/test_api_rhev_deployment.py b/tests/api/test_api_rhev_deployment.py index 63616f9..4dac1b0 100644 --- a/tests/api/test_api_rhev_deployment.py +++ b/tests/api/test_api_rhev_deployment.py @@ -54,7 +54,7 @@ def rhv_api(fusor_admin_username, fusor_admin_password, base_url): RHEVFusorApi object with methods for accessing/editing RHEV deployment objects """ fusor_ip = parse_ip_from_url(base_url) - return fusor_api.RHEVFusorApi(fusor_ip, fusor_admin_username, fusor_admin_password) + return fusor_api.QCIDeploymentApi(fusor_ip, fusor_admin_username, fusor_admin_password) def deployment_attach_subscriptions( @@ -125,15 +125,18 @@ def test_rhv_api(rhv_api, variables, deployment_name): selfhosted_address = dep_rhv['selfhosted_domain_address'] selfhosted_path = dep_rhv['selfhosted_domain_share_path'] selfhosted_engine_hostname = dep_rhv['self_hosted_engine_hostname'] or 'rhv-selfhosted-engine' + deploy_rhv = 'rhv' in dep['install'] deploy_cfme = 'cfme' in dep['install'] deploy_ose = 'ocp' in dep['install'] rhev_admin_password = dep_rhv['rhvm_adminpass'] cfme_root_password = dep_cfme['cfme_admin_password'] + cfme_install_loc = dep_cfme['cfme_install_loc'] if not deployment_name: deployment_name = 'pytest-rhv-api-{}{}{}'.format( dep['deployment_id'], '-cfme' if deploy_cfme else '', '-ocp' if deploy_ose else '') deployment_desc = 'Pytest of the fusor api for deploying RHEV' + ose_install_loc = dep_ose['install_loc'] ose_number_master_nodes = dep_ose['number_master_nodes'] ose_master_vcpu = dep_ose['master_vcpu'] ose_master_ram = dep_ose['master_ram'] @@ -160,30 +163,33 @@ def test_rhv_api(rhv_api, variables, deployment_name): # "Creating RHEV deployment: {}".format(deployment_name) assert rhv_api.create_deployment( deployment_name, deployment_desc, + deploy_rhv=deploy_rhv, deploy_cfme=deploy_cfme, deploy_ose=deploy_ose), "Unable to create RHEV deployment ({})".format(deployment_name) # log.info("Assigning RHEV Hypervisors: {}".format(rhevh_macs)) - assert rhv_api.set_discovered_hosts(rhevh_macs, rhevm_mac), "Unable to set the RHEV Hosts" + assert rhv_api.set_rhv_hosts(rhevh_macs, rhevm_mac), "Unable to set the RHEV Hosts" if rhv_is_self_hosted: rhv_api.set_deployment_property('rhev_self_hosted_engine_hostname', selfhosted_engine_hostname) # log.info("Setting the RHEV credentials") - assert rhv_api.set_creds_rhev(rhev_admin_password), "Unable to set RHEV credentials" + assert rhv_api.set_creds_rhv(rhev_admin_password), "Unable to set RHEV credentials" - # Set NFS for CloudForms or OpenShift # log.info("Setting NFS storage values") - assert rhv_api.set_nfs_storage( + assert rhv_api.set_nfs_storage_rhv( data_name, data_address, data_path, export_name, export_address, export_path, selfhosted_name, selfhosted_address, selfhosted_path), "Unable to set the NFS storage for the deployment" if deploy_cfme: - # log.info("Setting info for a Cloudforms Deployment") - # log.info("Setting CFME root/admin password") - assert rhv_api.set_creds_cfme(cfme_root_password), "Unable to set the CFME root/admin passwords" + assert rhv_api.set_install_location_cfme(cfme_install_loc), \ + "Unable to set the CFME install location" + assert rhv_api.set_creds_cfme(cfme_root_password), \ + "Unable to set the CFME root/admin passwords" if deploy_ose: + assert rhv_api.set_install_location_ocp(ose_install_loc), \ + "Unable to set the OpenShift install location" # log.info("Setting info for a OpenShift Deployment") # log.info("Setting Master({}) node specs: vcpu({}), ram({}), disk size({})".format( # ose_number_master_nodes, @@ -272,7 +278,7 @@ def test_rhv_api_deployment_success(rhv_api, variables, deployment_name): progress['state'] == 'stopped' and progress['progress'] == 1.0): deployment_success = True - print 'OpenStack Deployment Succeeded!' + print 'RHV Deployment Succeeded!' elif progress['result'] == 'error' and progress['state'] == 'paused': deployment_success = False deployment_task_uuid = rhv_api.fusor_data['deployment']['foreman_task_uuid'] diff --git a/variables.yaml.example b/variables.yaml.example index 9aba468..9ecd7f2 100644 --- a/variables.yaml.example +++ b/variables.yaml.example @@ -25,6 +25,7 @@ deployment: director_address: null director_ui_url: http://example.com director_vm_name: null + external_ceph_storage: false overcloud_nodes: - driver_type: pxe_ssh host_ip: 10.8.0.136 @@ -45,6 +46,14 @@ deployment: subnet: 255.255.255.0 network: 192.168.156.0 gateway: 192.168.156.1 + ceph: + ceph_ext_mon_host: null + ceph_cluster_fsid: null + ceph_client_username: openstack + ceph_client_key: null + nova_rbd_pool_name: vms + cinder_rbd_pool_name: volumes + glance_rbd_pool_name: images rhv: cluster_name: Default cpu_type: Intel Nehalem Family @@ -79,10 +88,11 @@ deployment: rhsm_satellite: name: sma_name # requires a valid SMA name. QCI may mangle the sma name by changing '-' to '_' uuid: sma_uuid # requires a valid SMA uuid - rhsm_subs: - - Red Hat Cloud Infrastructure with Smart Management sat_desc: description sat_name: BasicDeployment + subscription: + name: null + quantity: 0 update_lifecycle_immediately: true create_new_env: true # comes into play when update_lifecycle_immediately is set to false new_env: