From e174af257099be4a48f4933f5524963e7c9bfab9 Mon Sep 17 00:00:00 2001 From: Luigi Molinaro Date: Wed, 22 May 2024 08:55:51 +0000 Subject: [PATCH 01/14] PR 717 --- scripts/cp4d/cp4d-delete-instance.sh | 116 ++++++++++++++------------- 1 file changed, 62 insertions(+), 54 deletions(-) diff --git a/scripts/cp4d/cp4d-delete-instance.sh b/scripts/cp4d/cp4d-delete-instance.sh index 0412a1eb6..1e61c3fe7 100755 --- a/scripts/cp4d/cp4d-delete-instance.sh +++ b/scripts/cp4d/cp4d-delete-instance.sh @@ -18,14 +18,6 @@ wait_ns_deleted() { done } -wait_ns_deleted() { - NS=$1 - log "Waiting for deletion of namespace ${NS} ..." - while $(oc get ns ${NS} > /dev/null 2>&1);do - sleep 1 - done -} - delete_operator_ns() { CP4D_OPERATORS=$1 oc get project ${CP4D_OPERATORS} > /dev/null 2>&1 @@ -56,6 +48,22 @@ delete_operator_ns() { fi } +#When running cp4d-delete-instance.sh, the script always deletes the Certificate Manager and the License Manager. +#This should not be done if these shared services are still in use, for example in another CP4D instance. +check_shared_resources() { + SHARED_RESOURCE_TYPE=$1 + SHARED_RESOURCE_NAMESPACE=$2 + STATE_VAR=$3 + + if [ "$(oc get ${SHARED_RESOURCE_TYPE} --all-namespaces --no-headers 2>/dev/null)" != "" ]; then + echo "Found instances of ${SHARED_RESOURCE_TYPE}, keeping ${SHARED_RESOURCE_NAMESPACE} namespace" + eval "${STATE_VAR}=0" + else + echo "No instances of ${SHARED_RESOURCE_TYPE} found" + eval "${STATE_VAR}=1" + fi +} + CP4D_PROJECT=$1 if [ -z "${CP4D_PROJECT}" ];then echo "Usage: $0 " @@ -65,7 +73,7 @@ fi # Ask for final confirmation to delete the CP4D instance if [ -z "${CPD_CONFIRM_DELETE}" ];then read -p "Are you sure you want to delete CP4D instance ${CP4D_PROJECT} and Cloud Pak Foundational Services (y/N)? " -r - case "${REPLY}" in + case "${REPLY}" in y|Y) ;; * ) @@ -81,9 +89,9 @@ oc get project ${CP4D_PROJECT} > /dev/null 2>&1 if [ $? -eq 0 ];then log "Getting Custom Resources in OpenShift project ${CP4D_PROJECT}..." - oc get --no-headers -n $CP4D_PROJECT $(oc api-resources --namespaced=true --verbs=list -o name | grep -E 'ibm|caikitruntimestacks' | awk '{printf "%s%s",sep,$0;sep=","}') --ignore-not-found -o=custom-columns=KIND:.kind,NAME:.metadata.name --sort-by='kind' > ${temp_dir}/cp4d-resources.out + oc get --no-headers -n $CP4D_PROJECT $(oc api-resources --namespaced=true --verbs=list -o name | grep ibm | awk '{printf "%s%s",sep,$0;sep=","}') --ignore-not-found -o=custom-columns=KIND:.kind,NAME:.metadata.name --sort-by='kind' > ${temp_dir}/cp4d-resources.out - # + # # First the script deletes all CP4D custom resources in the specified project # Some of these may take too long or they may fail to delete, hence --wait=false it specified so that the command doesn't wait # Then the finalizer is removed using oc patch, which will delete the custom resource and all OpenShift resources it owns @@ -187,18 +195,6 @@ else echo "Project ${KNATIVE_SERVING} does not exist, skipping" fi -APP_CONNECT=ibm-app-connect -oc get project ${APP_CONNECT} > /dev/null 2>&1 -if [ $? -eq 0 ];then - log "Deleting everything in the ${APP_CONNECT} project" - - log "Deleting ${APP_CONNECT} project" - oc delete ns ${APP_CONNECT} --ignore-not-found --wait=false - wait_ns_deleted ${APP_CONNECT} -else - echo "Project ${APP_CONNECT} does not exist, skipping" -fi - IBM_SCHEDULING=ibm-scheduling oc get project ${IBM_SCHEDULING} > /dev/null 2>&1 if [ $? -eq 0 ];then @@ -216,41 +212,53 @@ else echo "Project ${IBM_SCHEDULING} does not exist, skipping" fi -IBM_LICENSING=ibm-licensing -oc get project ${IBM_LICENSING} > /dev/null 2>&1 -if [ $? -eq 0 ];then - log "Deleting everything in the ${IBM_LICENSING} project" - oc delete ibmlicensing --all --ignore-not-found - oc delete sub -n ${IBM_LICENSING} --all --ignore-not-found - oc delete csv -n ${IBM_LICENSING} --all --ignore-not-found - - log "Deleting ${IBM_LICENSING} project" - oc delete ns ${IBM_LICENSING} --ignore-not-found --wait=false - wait_ns_deleted ${IBM_LICENSING} - oc delete ns ${IBM_LICENSING} --ignore-not-found --wait=false - wait_ns_deleted ${IBM_LICENSING} + +check_shared_resources certificates.cert-manager.io ibm-cert-manager DELETE_CERT_MANAGER +check_shared_resources ibmlicensingdefinitions.operator.ibm.com ibm-licensing DELETE_LICENSING + +if [ "${DELETE_CERT_MANAGER}" -eq 1 ]; then + IBM_CERT_MANAGER=ibm-cert-manager + oc get project ${IBM_CERT_MANAGER} > /dev/null 2>&1 + if [ $? -eq 0 ]; then + log "Deleting everything in the ${IBM_CERT_MANAGER} project" + oc delete lease -n ${IBM_CERT_MANAGER} --all --ignore-not-found + oc delete endpointslice -n ${IBM_CERT_MANAGER} --all --ignore-not-found + oc delete endpoints -n ${IBM_CERT_MANAGER} --all --ignore-not-found + + oc delete sub -n ${IBM_CERT_MANAGER} --all --ignore-not-found + oc delete csv -n ${IBM_CERT_MANAGER} --all --ignore-not-found + + log "Deleting ${IBM_CERT_MANAGER} project" + oc delete ns ${IBM_CERT_MANAGER} --ignore-not-found --wait=false + wait_ns_deleted ${IBM_CERT_MANAGER} + oc delete ns ${IBM_CERT_MANAGER} --ignore-not-found --wait=false + wait_ns_deleted ${IBM_CERT_MANAGER} + else + echo "Project ${IBM_CERT_MANAGER} does not exist, skipping" + fi else - echo "Project ${IBM_LICENSING} does not exist, skipping" + echo "Keeping ${IBM_CERT_MANAGER} namespace due to shared resources" fi -IBM_CERT_MANAGER=ibm-cert-manager -oc get project ${IBM_CERT_MANAGER} > /dev/null 2>&1 -if [ $? -eq 0 ];then - log "Deleting everything in the ${IBM_CERT_MANAGER} project" - oc delete lease -n ${IBM_CERT_MANAGER} --all --ignore-not-found - oc delete endpointslice -n ${IBM_CERT_MANAGER} --all --ignore-not-found - oc delete endpoints -n ${IBM_CERT_MANAGER} --all --ignore-not-found - - oc delete sub -n ${IBM_CERT_MANAGER} --all --ignore-not-found - oc delete csv -n ${IBM_CERT_MANAGER} --all --ignore-not-found - - log "Deleting ${IBM_CERT_MANAGER} project" - oc delete ns ${IBM_CERT_MANAGER} --ignore-not-found --wait=false - wait_ns_deleted ${IBM_CERT_MANAGER} - oc delete ns ${IBM_CERT_MANAGER} --ignore-not-found --wait=false - wait_ns_deleted ${IBM_CERT_MANAGER} +if [ "${DELETE_LICENSING}" -eq 1 ]; then + IBM_LICENSING=ibm-licensing + oc get project ${IBM_LICENSING} > /dev/null 2>&1 + if [ $? -eq 0 ]; then + log "Deleting everything in the ${IBM_LICENSING} project" + oc delete ibmlicensing --all --ignore-not-found + oc delete sub -n ${IBM_LICENSING} --all --ignore-not-found + oc delete csv -n ${IBM_LICENSING} --all --ignore-not-found + + log "Deleting ${IBM_LICENSING} project" + oc delete ns ${IBM_LICENSING} --ignore-not-found --wait=false + wait_ns_deleted ${IBM_LICENSING} + oc delete ns ${IBM_LICENSING} --ignore-not-found --wait=false + wait_ns_deleted ${IBM_LICENSING} + else + echo "Project ${IBM_LICENSING} does not exist, skipping" + fi else - echo "Project ${IBM_CERT_MANAGER} does not exist, skipping" + echo "Keeping ${IBM_LICENSING} namespace due to shared resources" fi # Delete other elements belonging to CP4D install From bdd1e37c36cd0d07d5719d72587d5111e3b6bd59 Mon Sep 17 00:00:00 2001 From: Luigi Molinaro Date: Wed, 22 May 2024 09:05:26 +0000 Subject: [PATCH 02/14] Last review of 717 issue --- scripts/cp4d/cp4d-delete-instance.sh | 60 ++++++++++++++++------------ 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/scripts/cp4d/cp4d-delete-instance.sh b/scripts/cp4d/cp4d-delete-instance.sh index 1e61c3fe7..b0ff8b567 100755 --- a/scripts/cp4d/cp4d-delete-instance.sh +++ b/scripts/cp4d/cp4d-delete-instance.sh @@ -73,7 +73,7 @@ fi # Ask for final confirmation to delete the CP4D instance if [ -z "${CPD_CONFIRM_DELETE}" ];then read -p "Are you sure you want to delete CP4D instance ${CP4D_PROJECT} and Cloud Pak Foundational Services (y/N)? " -r - case "${REPLY}" in + case "${REPLY}" in y|Y) ;; * ) @@ -89,9 +89,9 @@ oc get project ${CP4D_PROJECT} > /dev/null 2>&1 if [ $? -eq 0 ];then log "Getting Custom Resources in OpenShift project ${CP4D_PROJECT}..." - oc get --no-headers -n $CP4D_PROJECT $(oc api-resources --namespaced=true --verbs=list -o name | grep ibm | awk '{printf "%s%s",sep,$0;sep=","}') --ignore-not-found -o=custom-columns=KIND:.kind,NAME:.metadata.name --sort-by='kind' > ${temp_dir}/cp4d-resources.out + oc get --no-headers -n $CP4D_PROJECT $(oc api-resources --namespaced=true --verbs=list -o name | grep -E 'ibm|caikitruntimestacks' | awk '{printf "%s%s",sep,$0;sep=","}') --ignore-not-found -o=custom-columns=KIND:.kind,NAME:.metadata.name --sort-by='kind' > ${temp_dir}/cp4d-resources.out - # + # # First the script deletes all CP4D custom resources in the specified project # Some of these may take too long or they may fail to delete, hence --wait=false it specified so that the command doesn't wait # Then the finalizer is removed using oc patch, which will delete the custom resource and all OpenShift resources it owns @@ -195,6 +195,18 @@ else echo "Project ${KNATIVE_SERVING} does not exist, skipping" fi +APP_CONNECT=ibm-app-connect +oc get project ${APP_CONNECT} > /dev/null 2>&1 +if [ $? -eq 0 ];then + log "Deleting everything in the ${APP_CONNECT} project" + + log "Deleting ${APP_CONNECT} project" + oc delete ns ${APP_CONNECT} --ignore-not-found --wait=false + wait_ns_deleted ${APP_CONNECT} +else + echo "Project ${APP_CONNECT} does not exist, skipping" +fi + IBM_SCHEDULING=ibm-scheduling oc get project ${IBM_SCHEDULING} > /dev/null 2>&1 if [ $? -eq 0 ];then @@ -212,9 +224,26 @@ else echo "Project ${IBM_SCHEDULING} does not exist, skipping" fi +if [ "${DELETE_LICENSING}" -eq 1 ]; then + IBM_LICENSING=ibm-licensing + oc get project ${IBM_LICENSING} > /dev/null 2>&1 + if [ $? -eq 0 ]; then + log "Deleting everything in the ${IBM_LICENSING} project" + oc delete ibmlicensing --all --ignore-not-found + oc delete sub -n ${IBM_LICENSING} --all --ignore-not-found + oc delete csv -n ${IBM_LICENSING} --all --ignore-not-found -check_shared_resources certificates.cert-manager.io ibm-cert-manager DELETE_CERT_MANAGER -check_shared_resources ibmlicensingdefinitions.operator.ibm.com ibm-licensing DELETE_LICENSING + log "Deleting ${IBM_LICENSING} project" + oc delete ns ${IBM_LICENSING} --ignore-not-found --wait=false + wait_ns_deleted ${IBM_LICENSING} + oc delete ns ${IBM_LICENSING} --ignore-not-found --wait=false + wait_ns_deleted ${IBM_LICENSING} + else + echo "Project ${IBM_LICENSING} does not exist, skipping" + fi +else + echo "Keeping ${IBM_LICENSING} namespace due to shared resources" +fi if [ "${DELETE_CERT_MANAGER}" -eq 1 ]; then IBM_CERT_MANAGER=ibm-cert-manager @@ -240,27 +269,6 @@ else echo "Keeping ${IBM_CERT_MANAGER} namespace due to shared resources" fi -if [ "${DELETE_LICENSING}" -eq 1 ]; then - IBM_LICENSING=ibm-licensing - oc get project ${IBM_LICENSING} > /dev/null 2>&1 - if [ $? -eq 0 ]; then - log "Deleting everything in the ${IBM_LICENSING} project" - oc delete ibmlicensing --all --ignore-not-found - oc delete sub -n ${IBM_LICENSING} --all --ignore-not-found - oc delete csv -n ${IBM_LICENSING} --all --ignore-not-found - - log "Deleting ${IBM_LICENSING} project" - oc delete ns ${IBM_LICENSING} --ignore-not-found --wait=false - wait_ns_deleted ${IBM_LICENSING} - oc delete ns ${IBM_LICENSING} --ignore-not-found --wait=false - wait_ns_deleted ${IBM_LICENSING} - else - echo "Project ${IBM_LICENSING} does not exist, skipping" - fi -else - echo "Keeping ${IBM_LICENSING} namespace due to shared resources" -fi - # Delete other elements belonging to CP4D install echo "Deleting MutatingWebhookConfigurations" oc delete MutatingWebhookConfiguration ibm-common-service-webhook-configuration --ignore-not-found From 6ea58d045964758314c3a445a99f19c2092a4c8d Mon Sep 17 00:00:00 2001 From: Luigi Molinaro Date: Wed, 22 May 2024 09:06:34 +0000 Subject: [PATCH 03/14] Review of 717 --- scripts/cp4d/cp4d-delete-instance.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/cp4d/cp4d-delete-instance.sh b/scripts/cp4d/cp4d-delete-instance.sh index b0ff8b567..9a91a5e89 100755 --- a/scripts/cp4d/cp4d-delete-instance.sh +++ b/scripts/cp4d/cp4d-delete-instance.sh @@ -315,4 +315,5 @@ for crd in $(oc get crd --no-headers | awk '{print $1}' | grep -E '\.ibm|mantafl fi done + exit 0 \ No newline at end of file From d5972cbff4731551bc72dd58632cb860aff5206b Mon Sep 17 00:00:00 2001 From: Frank Ketelaars Date: Sun, 9 Jun 2024 19:38:11 +0000 Subject: [PATCH 04/14] #727 Allow CP4D 5.0.0 install --- Dockerfile | 12 +++++++++++- Dockerfile.ppc64le | 9 ++++++++- .../cp4d/cp4d-cluster/tasks/main.yml | 6 ++++++ .../cp4d/cp4d-switch-olm-utils/tasks/main.yml | 14 ++++++++------ cp-deploy.sh | 13 +++++++++++++ 5 files changed, 46 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index 5e3f21350..67fba6ade 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,12 @@ # Container image including olm-utils ARG CPD_OLM_UTILS_V2_IMAGE +ARG CPD_OLM_UTILS_V3_IMAGE -FROM ${CPD_OLM_UTILS_V2_IMAGE} +FROM ${CPD_OLM_UTILS_V2_IMAGE} as olm-utils-v2 +RUN cd /opt/ansible && \ + tar czf /tmp/opt-ansible-v2.tar.gz * + +FROM ${CPD_OLM_UTILS_V3_IMAGE} as olmn-utils-v3 LABEL authors="Arthur Laimbock, \ Markus Wiegleb, \ @@ -35,6 +40,11 @@ RUN mkdir -p /cloud-pak-deployer && \ COPY . /cloud-pak-deployer/ COPY ./deployer-web/nginx.conf /etc/nginx/ +COPY --from=olm-utils-v2 /tmp/opt-ansible-v2.tar.gz /olm-utils/ + +RUN cd /opt/ansible && \ + tar czf /olm-utils/opt-ansible-v3.tar.gz * + # BUG with building wheel #RUN pip3 install -r /cloud-pak-deployer/deployer-web/requirements.txt > /tmp/deployer-web-pip-install.out 2>&1 RUN pip3 install "cython<3.0.0" wheel && pip3 install PyYAML==6.0 --no-build-isolation && pip3 install -r /cloud-pak-deployer/deployer-web/requirements.txt > /tmp/deployer-web-pip-install.out 2>&1 diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le index 18c672856..54d8b6391 100644 --- a/Dockerfile.ppc64le +++ b/Dockerfile.ppc64le @@ -1,7 +1,12 @@ # Container image including olm-utils ARG CPD_OLM_UTILS_V2_IMAGE +ARG CPD_OLM_UTILS_V3_IMAGE FROM ${CPD_OLM_UTILS_V2_IMAGE} +RUN cd /opt/ansible && \ + tar czf /tmp/opt-ansible-v2.tar.gz * + +FROM ${CPD_OLM_UTILS_V3_IMAGE} LABEL authors="Arthur Laimbock, \ Markus Wiegleb, \ @@ -33,8 +38,10 @@ RUN mkdir -p /cloud-pak-deployer && \ COPY . /cloud-pak-deployer/ COPY ./deployer-web/nginx.conf /etc/nginx/ +COPY --from=olm-utils-v2 /tmp/opt-ansible-v2.tar.gz /olm-utils/ + RUN cd /opt/ansible && \ - tar czf /olm-utils/opt-ansible-v2.tar.gz * + tar czf /olm-utils/opt-ansible-v3.tar.gz * # BUG with building wheel #RUN pip3 install -r /cloud-pak-deployer/deployer-web/requirements.txt > /tmp/deployer-web-pip-install.out 2>&1 diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/tasks/main.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/tasks/main.yml index 3ee886b8b..4fcfe1c95 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/tasks/main.yml +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/tasks/main.yml @@ -3,6 +3,12 @@ debug: var: current_cp4d_cluster +- name: Switch to the correct olm-utils version + include_role: + name: cp4d-switch-olm-utils + vars: + _p_current_cp4d_cluster: "{{ current_cp4d_cluster }}" + - include_role: name: cp4d-variables vars: diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-switch-olm-utils/tasks/main.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-switch-olm-utils/tasks/main.yml index 39c9d6e7b..23872d1b0 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-switch-olm-utils/tasks/main.yml +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-switch-olm-utils/tasks/main.yml @@ -1,14 +1,16 @@ --- -- name: Use olm-utils-v1 for older versions of Cloud Pak for Data +- name: Use olm-utils-v2 unarchive: - src: /olm-utils/opt-ansible-v1.tar.gz + src: /olm-utils/opt-ansible-v2.tar.gz dest: /opt/ansible remote_src: True - when: _p_current_cp4d_cluster.cp4d_version < "4.7.0" + when: + - _p_current_cp4d_cluster.cp4d_version >= "4.7.0" + - _p_current_cp4d_cluster.cp4d_version < "5.0.0" -- name: Use olm-utils-v2 +- name: Use olm-utils-v3 unarchive: - src: /olm-utils/opt-ansible-v2.tar.gz + src: /olm-utils/opt-ansible-v3.tar.gz dest: /opt/ansible remote_src: True - when: _p_current_cp4d_cluster.cp4d_version >= "4.7.0" \ No newline at end of file + when: _p_current_cp4d_cluster.cp4d_version >= "5.0.0" \ No newline at end of file diff --git a/cp-deploy.sh b/cp-deploy.sh index 9dafe270d..bb31012fe 100755 --- a/cp-deploy.sh +++ b/cp-deploy.sh @@ -632,6 +632,18 @@ else echo "Custom olm-utils-v2 image ${CPD_OLM_UTILS_V2_IMAGE} will be used." fi +# If images have not been overridden, set the variables here +if [ -z $CPD_OLM_UTILS_V3_IMAGE ];then + if [ "${ARCH}" == "x86_64" ]; then + export CPD_OLM_UTILS_V3_IMAGE=icr.io/cpopen/cpd/olm-utils-v3:latest + else + export CPD_OLM_UTILS_V3_IMAGE=icr.io/cpopen/cpd/olm-utils-v3:latest.$ARCH + fi +else + echo "Custom olm-utils-v2 image ${CPD_OLM_UTILS_V2_IMAGE} will be used." +fi + + if ! $INSIDE_CONTAINER;then # Check if podman or docker command was found if [ -z $CPD_CONTAINER_ENGINE ];then @@ -673,6 +685,7 @@ if ! $INSIDE_CONTAINER;then --pull \ -f ${SCRIPT_DIR}/${DOCKERFILE} \ --build-arg CPD_OLM_UTILS_V2_IMAGE=${CPD_OLM_UTILS_V2_IMAGE} \ + --build-arg CPD_OLM_UTILS_V3_IMAGE=${CPD_OLM_UTILS_V3_IMAGE} \ ${SCRIPT_DIR} exit $? fi From c66fe9d31978c2e146d17e04f88d5f1c69ac1b6a Mon Sep 17 00:00:00 2001 From: Frank Ketelaars Date: Mon, 10 Jun 2024 04:07:11 +0000 Subject: [PATCH 05/14] #727 Document CPD_OLM_UTILS_V3 variable --- docs/src/50-advanced/alternative-repo-reg.md | 7 ++++++- .../build-image-and-run-deployer-on-openshift.md | 4 ++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/docs/src/50-advanced/alternative-repo-reg.md b/docs/src/50-advanced/alternative-repo-reg.md index 8064dbe1d..9cec468a4 100644 --- a/docs/src/50-advanced/alternative-repo-reg.md +++ b/docs/src/50-advanced/alternative-repo-reg.md @@ -12,6 +12,11 @@ By default the Cloud Pak Deployer image is built on top of the `olm-utils` image export CPD_OLM_UTILS_V2_IMAGE=cp.staging.acme.com:4.8.0 ``` +Or, for Cloud Pak for Data 5.0: +``` +export CPD_OLM_UTILS_V3_IMAGE=cp.staging.acme.com:5.0.0 +``` + Subsequently, run the install commmand: ``` ./cp-deploy.sh build @@ -86,7 +91,7 @@ You can also set these tokens on the `cp-deploy.sh env apply` command line. ./cp-deploy.sh env apply -f -vs github-internal-repo=abc123def456 -vs cp-staging="cp-staging-user:cp-staging-password ``` -## Running the deploy +## Running the deployer To run the deployer you can now use the standard process: ``` ./cp-deploy.sh env apply -v diff --git a/docs/src/50-advanced/run-on-openshift/build-image-and-run-deployer-on-openshift.md b/docs/src/50-advanced/run-on-openshift/build-image-and-run-deployer-on-openshift.md index 04b4819f5..981145bd3 100644 --- a/docs/src/50-advanced/run-on-openshift/build-image-and-run-deployer-on-openshift.md +++ b/docs/src/50-advanced/run-on-openshift/build-image-and-run-deployer-on-openshift.md @@ -221,10 +221,10 @@ spec: type: Docker dockerStrategy: buildArgs: - - name: CPD_OLM_UTILS_V1_IMAGE - value: icr.io/cpopen/cpd/olm-utils:latest - name: CPD_OLM_UTILS_V2_IMAGE value: icr.io/cpopen/cpd/olm-utils-v2:latest + - name: CPD_OLM_UTILS_V3_IMAGE + value: icr.io/cpopen/cpd/olm-utils-v3:latest output: to: kind: ImageStreamTag From 8a8978156ce9f7f49c8c17aac8937bc622f3e73e Mon Sep 17 00:00:00 2001 From: Frank Ketelaars Date: Tue, 11 Jun 2024 13:22:49 +0000 Subject: [PATCH 06/14] #727 Allow ignoring cp_alt_repo for GA releases --- automation-generators/generic/cp4d/preprocessor.py | 5 +++++ .../templates/apply-olm-cartridge-wxo.j2 | 2 +- .../templates/apply-olm-create-catsrc.j2 | 2 +- .../cp4d/cp4d-cluster/tasks/openshift-prepare-project.yml | 8 ++++---- .../cp4d/cp4d-cluster/templates/authorize-instance.j2 | 2 +- .../cp4d/cp4d-cluster/templates/db2u-product-cm.j2 | 2 +- .../cp4d-cluster/templates/setup-instance-topology.j2 | 2 +- .../templates/apply-olm-cartridge-sub.j2 | 2 +- .../cp4d/cp4d-variables/tasks/main.yml | 2 +- .../cpfs/cp-alternative-repo/tasks/main.yml | 4 +++- docs/src/30-reference/configuration/cloud-pak.md | 1 + 11 files changed, 20 insertions(+), 12 deletions(-) diff --git a/automation-generators/generic/cp4d/preprocessor.py b/automation-generators/generic/cp4d/preprocessor.py index 6b5a4f2cc..7e93e9be3 100644 --- a/automation-generators/generic/cp4d/preprocessor.py +++ b/automation-generators/generic/cp4d/preprocessor.py @@ -225,6 +225,7 @@ def preprocessor(attributes=None, fullConfig=None, moduleVariables=None): g = GeneratorPreProcessor(attributes,fullConfig,moduleVariables) g('project').isRequired() + g('operators_project').isOptional() g('openshift_cluster_name').expandWith('openshift[*]',remoteIdentifier='name') g('cp4d_version').isRequired() g('cartridges').isRequired() @@ -273,6 +274,10 @@ def preprocessor(attributes=None, fullConfig=None, moduleVariables=None): if 'olm_utils' in ge and not 'sequential_install' in ge: g('sequential_install').set(ge['olm_utils']) + # Set operators project to -operators if not explicitly configure + if not 'operators_project' in ge: + g('operators_project').set('{}-operators'.format(ge['project'])) + # Check reference # - Retrieve the openshift element with name=openshift_cluster_name # - Within the openshift element retrieve, there must be an openshift_storage element with the name cp4d.openshift_storage_name diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cartridge-install/templates/apply-olm-cartridge-wxo.j2 b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cartridge-install/templates/apply-olm-cartridge-wxo.j2 index 87079fe06..1c64add7a 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cartridge-install/templates/apply-olm-cartridge-wxo.j2 +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cartridge-install/templates/apply-olm-cartridge-wxo.j2 @@ -2,6 +2,6 @@ --release={{ _p_current_cp4d_cluster.cp4d_version }} \ --case_download=false \ --catsrc=false --sub=true \ - --cpd_operator_ns={{ _p_current_cp4d_cluster.operators_project | default('cpd-operators') }} \ + --cpd_operator_ns={{ _p_current_cp4d_cluster.operators_project }} \ --upgrade={%- if _upgrade_cp4d -%}true{%- else -%}false{%- endif %} \ --components=watsonx_orchestrate \ No newline at end of file diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/templates/apply-olm-create-catsrc.j2 b/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/templates/apply-olm-create-catsrc.j2 index de16cb3c0..24c2cf3d1 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/templates/apply-olm-create-catsrc.j2 +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/templates/apply-olm-create-catsrc.j2 @@ -2,7 +2,7 @@ --release={{ _p_current_cp4d_cluster.cp4d_version }} \ --case_download={%- if cpd_airgap | default(False) | bool -%}false{%- else -%}true{%- endif %} \ --catsrc=true --sub=false \ - --cpd_operator_ns={{ _p_current_cp4d_cluster.operators_project | default('cpd-operators') }} \ + --cpd_operator_ns={{ _p_current_cp4d_cluster.operators_project }} \ --preview={%- if _p_preview_script -%}true{%- else -%}false{%- endif %} \ --upgrade={%- if _upgrade_cp4d -%}true{%- else -%}false{%- endif %} \ --components={{ _cartridges_to_install_list }} \ No newline at end of file diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/tasks/openshift-prepare-project.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/tasks/openshift-prepare-project.yml index c362dcabc..db73c79e0 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/tasks/openshift-prepare-project.yml +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/tasks/openshift-prepare-project.yml @@ -9,14 +9,14 @@ when: "cloud_platform not in implemented_cloud_platform_types" # Create project that will run operators of Cloud Pak for Data -- name: Validate if OpenShift project {{ current_cp4d_cluster.operators_project | default('cpd-operators') }} exists - shell: oc project {{ current_cp4d_cluster.operators_project | default('cpd-operators') }} +- name: Validate if OpenShift project {{ current_cp4d_cluster.operators_project }} exists + shell: oc project {{ current_cp4d_cluster.operators_project }} failed_when: False register: _cp4d_operators_project_exists when: current_cp4d_cluster.cp4d_version >= '4.7.0' -- name: Create OpenShift Project {{ current_cp4d_cluster.operators_project | default('cpd-operators') }} if it does not exist - command: oc new-project {{current_cp4d_cluster.operators_project | default('cpd-operators') }} +- name: Create OpenShift Project {{ current_cp4d_cluster.operators_project }} if it does not exist + command: oc new-project {{current_cp4d_cluster.operators_project }} when: - current_cp4d_cluster.cp4d_version >= '4.7.0' - _cp4d_operators_project_exists.rc != 0 diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/templates/authorize-instance.j2 b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/templates/authorize-instance.j2 index 8736af212..71926a0e2 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/templates/authorize-instance.j2 +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/templates/authorize-instance.j2 @@ -1,4 +1,4 @@ set -o pipefail authorize-instance-topology \ - --cpd_operator_ns={{ current_cp4d_cluster.operators_project | default('cpd-operators') }} \ + --cpd_operator_ns={{ current_cp4d_cluster.operators_project }} \ --cpd_instance_ns={{ current_cp4d_cluster.project }} 2>&1 | tee {{ status_dir }}/log/{{ current_cp4d_cluster.project }}-authorize-instance.log \ No newline at end of file diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/templates/db2u-product-cm.j2 b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/templates/db2u-product-cm.j2 index 0a967f069..1c19e8c0f 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/templates/db2u-product-cm.j2 +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/templates/db2u-product-cm.j2 @@ -2,6 +2,6 @@ apiVersion: v1 kind: ConfigMap metadata: name: db2u-product-cm - namespace: {{ current_cp4d_cluster.operators_project | default('cpd-operators') }} + namespace: {{ current_cp4d_cluster.operators_project }} data: DB2U_RUN_WITH_LIMITED_PRIVS: "{{ current_cp4d_cluster.db2u_limited_privileges | default(False) | string | lower }}" \ No newline at end of file diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/templates/setup-instance-topology.j2 b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/templates/setup-instance-topology.j2 index 6139c25c1..df0248bd1 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/templates/setup-instance-topology.j2 +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/templates/setup-instance-topology.j2 @@ -1,6 +1,6 @@ set -o pipefail setup-instance-topology \ - --cpd_operator_ns={{ current_cp4d_cluster.operators_project | default('cpd-operators') }} \ + --cpd_operator_ns={{ current_cp4d_cluster.operators_project }} \ --cpd_instance_ns={{ current_cp4d_cluster.project }} \ --release={{ current_cp4d_cluster.cp4d_version }} \ --block_storage_class={{ ocp_storage_class_block }} \ diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/templates/apply-olm-cartridge-sub.j2 b/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/templates/apply-olm-cartridge-sub.j2 index 89cf9e210..f6179550f 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/templates/apply-olm-cartridge-sub.j2 +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/templates/apply-olm-cartridge-sub.j2 @@ -2,7 +2,7 @@ --release={{ _p_current_cp4d_cluster.cp4d_version }} \ --case_download=false \ --catsrc=false --sub=true \ - --cpd_operator_ns={{ _p_current_cp4d_cluster.operators_project | default('cpd-operators') }} \ + --cpd_operator_ns={{ _p_current_cp4d_cluster.operators_project }} \ --upgrade={%- if _upgrade_cp4d -%}true{%- else -%}false{%- endif %} \ --preview={%- if _p_preview_script -%}true{%- else -%}false{%- endif %} \ --components={{ _apply_olm_cartridges_list }} \ No newline at end of file diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-variables/tasks/main.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-variables/tasks/main.yml index 90c5755fe..027c1bad2 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-variables/tasks/main.yml +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-variables/tasks/main.yml @@ -13,7 +13,7 @@ include_vars: vars-cp4d-installation.yml - set_fact: - foundational_services_project: "{{ _p_current_cp4d_cluster.operators_project | default('cpd-operators') }}" + foundational_services_project: "{{ _p_current_cp4d_cluster.operators_project }}" when: _p_current_cp4d_cluster.cp4d_version >= "4.7.0" # Set the license server project to the correct value, dependent if the license service is already installed in cs-control diff --git a/automation-roles/50-install-cloud-pak/cpfs/cp-alternative-repo/tasks/main.yml b/automation-roles/50-install-cloud-pak/cpfs/cp-alternative-repo/tasks/main.yml index f33f12b33..99a754df4 100644 --- a/automation-roles/50-install-cloud-pak/cpfs/cp-alternative-repo/tasks/main.yml +++ b/automation-roles/50-install-cloud-pak/cpfs/cp-alternative-repo/tasks/main.yml @@ -2,4 +2,6 @@ - include_tasks: delete-case-resolvers.yml - include_tasks: generate-case-resolvers.yml - when: (all_config.cp_alt_repo | default({})) != {} \ No newline at end of file + when: + - (all_config.cp_alt_repo | default({})) != {} + - (_p_current_cp4d_cluster.use_cp_alt_repo | default(True) | bool) \ No newline at end of file diff --git a/docs/src/30-reference/configuration/cloud-pak.md b/docs/src/30-reference/configuration/cloud-pak.md index 50d15aef9..c3590279d 100644 --- a/docs/src/30-reference/configuration/cloud-pak.md +++ b/docs/src/30-reference/configuration/cloud-pak.md @@ -41,6 +41,7 @@ cp4d: | cp4d_version | Cloud Pak for Data version to install, this will determine the version for all cartridges that do not specify a version | Yes | 4.x.x | | sequential_install | If set to `True` the deployer will run the **OLM utils** playbooks to install catalog sources, subscriptions and CRs. If set to `False`, deployer will use OLM utils to generate the scripts and then run them, which will cause the catalog sources, subscriptions and CRs to be created immediately and install in parallel | No | True (default), False | | use_fs_iam | If set to `True` the deployer will enable Foundational Services IAM for authentication | No | False (default), True | +| use_cp_alt_repo | When set to `False`, deployer will use use the alternative repo specified in `cp_alt_repo` resource | No | True (default), False | | change_node_settings | Controls whether the node settings using the machine configs will be applied onto the OpenShift cluster. | No | True, False | | db2u_limited_privileges | Depicts whether Db2U containers run with limited privileges. If they do (`True`), Deployer will create KubeletConfig and Tuned OpenShift resources as per the documentation. | No | False (default), True | | accept_licenses | Set to 'True' to accept Cloud Pak licenses. Alternatively the `--accept-all-licenses` can be used for the `cp-deploy.sh` command | No | True, False (default) | From e506d0fa7ef1316f251d0bff7c86a8a55914def9 Mon Sep 17 00:00:00 2001 From: Frank Ketelaars Date: Wed, 12 Jun 2024 17:59:20 +0000 Subject: [PATCH 07/14] #727 Allow installation of OpenShift AI --- .../configure-openshift/tasks/main.yml | 8 ++++- .../tasks/install-opernshift-ai.yml | 30 +++++++++++++++++++ .../openshift-ai/tasks/main.yml | 3 ++ .../templates/openshift-ai-operator.j2 | 22 ++++++++++++++ .../tasks/cp4d-prep-watsonx-ai.yml | 8 +++++ .../vars/vars-cp4d-installation.yml | 1 + 6 files changed, 71 insertions(+), 1 deletion(-) create mode 100644 automation-roles/40-configure-infra/openshift-ai/tasks/install-opernshift-ai.yml create mode 100644 automation-roles/40-configure-infra/openshift-ai/tasks/main.yml create mode 100644 automation-roles/40-configure-infra/openshift-ai/templates/openshift-ai-operator.j2 create mode 100644 automation-roles/50-install-cloud-pak/cp4d/cp4d-cartridge-install/tasks/cp4d-prep-watsonx-ai.yml diff --git a/automation-roles/40-configure-infra/configure-openshift/tasks/main.yml b/automation-roles/40-configure-infra/configure-openshift/tasks/main.yml index 8ce9038b8..9e52c424f 100644 --- a/automation-roles/40-configure-infra/configure-openshift/tasks/main.yml +++ b/automation-roles/40-configure-infra/configure-openshift/tasks/main.yml @@ -94,4 +94,10 @@ include_role: name: openshift-gpu vars: - _p_openshift_cluster: "{{ current_openshift_cluster }}" \ No newline at end of file + _p_openshift_cluster: "{{ current_openshift_cluster }}" + +- name: Configure OpenShift AI + include_role: + name: openshift-ai + vars: + _p_openshift_cluster: "{{ current_openshift_cluster }}" diff --git a/automation-roles/40-configure-infra/openshift-ai/tasks/install-opernshift-ai.yml b/automation-roles/40-configure-infra/openshift-ai/tasks/install-opernshift-ai.yml new file mode 100644 index 000000000..fe729f0e4 --- /dev/null +++ b/automation-roles/40-configure-infra/openshift-ai/tasks/install-opernshift-ai.yml @@ -0,0 +1,30 @@ +--- +- name: Create OpenShift AI operator namespace + shell: | + oc create ns redhat-ods-operator || true + +- name: Generate OpenShift AI operator {{ status_dir }}/openshift/openshift-{{ _p_openshift_cluster.name }}-openshift-ai-operator.yaml + template: + src: openshift-ai-operator.j2 + dest: "{{ status_dir }}/openshift/openshift-{{ _p_openshift_cluster.name }}-openshift-ai-operator.yaml" + +- name: Apply yaml for OpenShift AI operator + shell: | + oc apply -f {{ status_dir }}/openshift/openshift-{{ _p_openshift_cluster.name }}-openshift-ai-operator.yaml + +- name: Wait until OpenShift AI operator CSV has status Succeeded + shell: | + oc get csv -n redhat-ods-operator \ + -l operators.coreos.com/rhods-operator.redhat-ods-operator \ + --no-headers \ + -o custom-columns='name:metadata.name,phase:status.phase' | \ + grep -i succeeded | wc -l + register: _openshift_ai_csv_status + retries: 30 + delay: 30 + until: _openshift_ai_csv_status.stdout == "1" + vars: + ansible_callback_diy_runner_retry_msg: >- + {%- set result = ansible_callback_diy.result.output -%} + {%- set retries_left = result.retries - result.attempts -%} + Retrying: {{ ansible_callback_diy.task.name }} ({{ retries_left }} Retries left) ... \ No newline at end of file diff --git a/automation-roles/40-configure-infra/openshift-ai/tasks/main.yml b/automation-roles/40-configure-infra/openshift-ai/tasks/main.yml new file mode 100644 index 000000000..4bcad6701 --- /dev/null +++ b/automation-roles/40-configure-infra/openshift-ai/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- include_tasks: install-opernshift-ai.yml + when: _p_openshift_cluster.openshift_ai.install | default(False) | bool \ No newline at end of file diff --git a/automation-roles/40-configure-infra/openshift-ai/templates/openshift-ai-operator.j2 b/automation-roles/40-configure-infra/openshift-ai/templates/openshift-ai-operator.j2 new file mode 100644 index 000000000..4b021c2eb --- /dev/null +++ b/automation-roles/40-configure-infra/openshift-ai/templates/openshift-ai-operator.j2 @@ -0,0 +1,22 @@ +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: redhat-ods-operator + namespace: redhat-ods-operator +spec: + upgradeStrategy: Default +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + labels: + operators.coreos.com/ocs-operator.openshift-storage: "" + name: rhods-operator + namespace: redhat-ods-operator +spec: + channel: {{ _openshift_ai.channel | default('stable') }} + installPlanApproval: Automatic + name: rhods-operator + source: redhat-operators + sourceNamespace: openshift-marketplace \ No newline at end of file diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cartridge-install/tasks/cp4d-prep-watsonx-ai.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cartridge-install/tasks/cp4d-prep-watsonx-ai.yml new file mode 100644 index 000000000..7f164829e --- /dev/null +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cartridge-install/tasks/cp4d-prep-watsonx-ai.yml @@ -0,0 +1,8 @@ +--- +- name: Insert wkc options into {{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-install-options.yml + blockinfile: + path: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-install-options.yml" + marker: "# {mark} watsonx_ai options #" + block: |2 + watsonx_ai: + tuning_disabled: {{ _current_cp4d_cartridge.installation_options.tuning_disabled | default(false) }} \ No newline at end of file diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-variables/vars/vars-cp4d-installation.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-variables/vars/vars-cp4d-installation.yml index ca9af2d74..f7eb213a2 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-variables/vars/vars-cp4d-installation.yml +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-variables/vars/vars-cp4d-installation.yml @@ -353,6 +353,7 @@ cartridge_cr: force_sequential_install: True - name: watsonx-ai olm_utils_name: watsonx_ai + cr_preprocessing_script: cp4d-prep-watsonx-ai.yml cr_cr: Watsonxai cr_name: watsonxai-cr cr_status_attribute: watsonxaiStatus From c5b0badf83013c6b8ba234ea498a66f11aa0b45e Mon Sep 17 00:00:00 2001 From: Frank Ketelaars Date: Wed, 12 Jun 2024 18:27:03 +0000 Subject: [PATCH 08/14] #717 Correctly handle cert manager and license server --- scripts/cp4d/cp4d-delete-instance.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/scripts/cp4d/cp4d-delete-instance.sh b/scripts/cp4d/cp4d-delete-instance.sh index 9a91a5e89..54eed76d7 100755 --- a/scripts/cp4d/cp4d-delete-instance.sh +++ b/scripts/cp4d/cp4d-delete-instance.sh @@ -224,6 +224,7 @@ else echo "Project ${IBM_SCHEDULING} does not exist, skipping" fi +check_shared_resources ibmlicensingdefinition.operator.ibm.com ibm-licensing DELETE_LICENSING if [ "${DELETE_LICENSING}" -eq 1 ]; then IBM_LICENSING=ibm-licensing oc get project ${IBM_LICENSING} > /dev/null 2>&1 @@ -245,6 +246,7 @@ else echo "Keeping ${IBM_LICENSING} namespace due to shared resources" fi +check_shared_resources certificaterequests.cert-manager.io ibm-cert-manager DELETE_CERT_MANAGER if [ "${DELETE_CERT_MANAGER}" -eq 1 ]; then IBM_CERT_MANAGER=ibm-cert-manager oc get project ${IBM_CERT_MANAGER} > /dev/null 2>&1 @@ -310,7 +312,15 @@ oc delete catsrc -n openshift-marketplace \ # log "Deleting IBM CRDs that don't have an instance anymore" for crd in $(oc get crd --no-headers | awk '{print $1}' | grep -E '\.ibm|mantaflows\.adl');do - if [[ "$(oc get ${crd} --no-headers -A 2>/dev/null)" == "" ]] && [[ "${crd}" != *ocscluster* ]];then + if [[ "$(oc get ${crd} --no-headers -A 2>/dev/null)" != "" ]] ;then + log "Not deleting CRD ${crd}, still has some instances" + elif [[ "${crd}" == *ocscluster* ]];then + log "Not deleting OpenShift Data Foundation CRD ${crd}, still needed" + elif [[ "${crd}" == *ibmlicensing* ]] && [ "${DELETE_LICENSING}" -ne 1 ];then + log "Not deleting license server CRD ${crd}, still needed" + elif [[ "${crd}" == *cert* ]] && [ "${DELETE_CERT_MANAGER}" -ne 1 ];then + log "Not deleting certificate manager CRD ${crd}, still needed" + else oc delete crd $crd fi done From 9b9481030400223c605e2a7f5200af49e225acdc Mon Sep 17 00:00:00 2001 From: Frank Ketelaars Date: Thu, 13 Jun 2024 06:43:58 +0000 Subject: [PATCH 09/14] #727 Add OpenShift AI --- .../tasks/install-opernshift-ai.yml | 29 ++++++++++++--- .../templates/datasciencecluster.j2 | 35 +++++++++++++++++++ .../templates/openshift-ai-operator.j2 | 24 +++++++++++-- 3 files changed, 82 insertions(+), 6 deletions(-) create mode 100644 automation-roles/40-configure-infra/openshift-ai/templates/datasciencecluster.j2 diff --git a/automation-roles/40-configure-infra/openshift-ai/tasks/install-opernshift-ai.yml b/automation-roles/40-configure-infra/openshift-ai/tasks/install-opernshift-ai.yml index fe729f0e4..fe84b419b 100644 --- a/automation-roles/40-configure-infra/openshift-ai/tasks/install-opernshift-ai.yml +++ b/automation-roles/40-configure-infra/openshift-ai/tasks/install-opernshift-ai.yml @@ -1,8 +1,4 @@ --- -- name: Create OpenShift AI operator namespace - shell: | - oc create ns redhat-ods-operator || true - - name: Generate OpenShift AI operator {{ status_dir }}/openshift/openshift-{{ _p_openshift_cluster.name }}-openshift-ai-operator.yaml template: src: openshift-ai-operator.j2 @@ -23,6 +19,31 @@ retries: 30 delay: 30 until: _openshift_ai_csv_status.stdout == "1" + vars: + ansible_callback_diy_runner_retry_msg: >- + {%- set result = ansible_callback_diy.result.output -%} + {%- set retries_left = result.retries - result.attempts -%} + Retrying: {{ ansible_callback_diy.task.name }} ({{ retries_left }} Retries left) ... + +- name: Generate OpenShift AI DataScienceCluster {{ status_dir }}/openshift/openshift-{{ _p_openshift_cluster.name }}-openshift-ai-dsc.yaml + template: + src: datasciencecluster.j2 + dest: "{{ status_dir }}/openshift/openshift-{{ _p_openshift_cluster.name }}-openshift-ai-dsc.yaml" + +- name: Apply yaml for OpenShift AI DataScienceCluster + shell: | + oc apply -f {{ status_dir }}/openshift/openshift-{{ _p_openshift_cluster.name }}-openshift-ai-dsc.yaml + +- name: Wait until OpenShift AI DataScienceCluster default-dsc is ready + shell: | + oc get DataScienceCluster default-dsc \ + --no-headers \ + -o custom-columns='name:metadata.name,phase:status.phase' | \ + grep -i ready | wc -l + register: _openshift_ai_dsc_status + retries: 30 + delay: 30 + until: _openshift_ai_dsc_status.stdout == "1" vars: ansible_callback_diy_runner_retry_msg: >- {%- set result = ansible_callback_diy.result.output -%} diff --git a/automation-roles/40-configure-infra/openshift-ai/templates/datasciencecluster.j2 b/automation-roles/40-configure-infra/openshift-ai/templates/datasciencecluster.j2 new file mode 100644 index 000000000..3a88277fa --- /dev/null +++ b/automation-roles/40-configure-infra/openshift-ai/templates/datasciencecluster.j2 @@ -0,0 +1,35 @@ +--- +apiVersion: datasciencecluster.opendatahub.io/v1 +kind: DataScienceCluster +metadata: + name: default-dsc + labels: + app.kubernetes.io/name: datasciencecluster + app.kubernetes.io/instance: default-dsc + app.kubernetes.io/part-of: rhods-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: rhods-operator +spec: + components: + codeflare: + managementState: Managed + kserve: + serving: + ingressGateway: + certificate: + type: SelfSigned + managementState: Managed + name: knative-serving + managementState: Managed + ray: + managementState: Managed + kueue: + managementState: Managed + workbenches: + managementState: Managed + dashboard: + managementState: Managed + modelmeshserving: + managementState: Managed + datasciencepipelines: + managementState: Managed diff --git a/automation-roles/40-configure-infra/openshift-ai/templates/openshift-ai-operator.j2 b/automation-roles/40-configure-infra/openshift-ai/templates/openshift-ai-operator.j2 index 4b021c2eb..6c86c0140 100644 --- a/automation-roles/40-configure-infra/openshift-ai/templates/openshift-ai-operator.j2 +++ b/automation-roles/40-configure-infra/openshift-ai/templates/openshift-ai-operator.j2 @@ -1,4 +1,24 @@ --- +apiVersion: v1 +kind: Namespace +metadata: + name: redhat-ods-operator +--- +apiVersion: v1 +kind: Namespace +metadata: + name: redhat-ods-monitoring +--- +apiVersion: v1 +kind: Namespace +metadata: + name: redhat-ods-applications +--- +apiVersion: v1 +kind: Namespace +metadata: + name: rhods-notebooks +--- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: @@ -11,11 +31,11 @@ apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: labels: - operators.coreos.com/ocs-operator.openshift-storage: "" + operators.coreos.com/rhods-operator.redhat-ods-operator: "" name: rhods-operator namespace: redhat-ods-operator spec: - channel: {{ _openshift_ai.channel | default('stable') }} + channel: {{ _p_openshift_cluster.openshift_ai.channel | default('stable') }} installPlanApproval: Automatic name: rhods-operator source: redhat-operators From a2ce0266250b896449068cf0a1ec6ad0b6fc53cd Mon Sep 17 00:00:00 2001 From: Frank Ketelaars Date: Thu, 13 Jun 2024 06:44:14 +0000 Subject: [PATCH 10/14] #727 Doc and sample configs --- .../30-reference/configuration/openshift.md | 255 ++++++++----- .../{cp4d-483.yaml => cp4d-485.yaml} | 12 +- .../config-samples/cp4d-500.yaml | 359 ++++++++++++++++++ .../config-samples/ocp-aws-rosa-elastic.yaml | 3 + .../ocp-aws-rosa-existing-vpc.yaml | 3 + .../config-samples/ocp-aws-rosa-ocs.yaml | 3 + .../ocp-aws-self-managed-elastic-manual.yaml | 3 + .../ocp-aws-self-managed-elastic-sno.yaml | 3 + .../ocp-aws-self-managed-elastic.yaml | 3 + .../ocp-aws-self-managed-existing-vpc.yaml | 3 + .../ocp-aws-self-managed-ocs.yaml | 3 + .../config-samples/ocp-azure-aro.yaml | 3 + .../ocp-azure-self-managed.yaml | 3 + .../config-samples/ocp-existing-ocp-auto.yaml | 3 + .../ocp-existing-ocp-satellite-ocs.yaml | 3 + .../config-samples/ocp-existing-ocp.yaml | 3 + .../ocp-existing-roks-classic.yaml | 3 + .../ocp-ibm-cloud-roks-cp4waiops.yaml | 3 + .../ocp-ibm-cloud-roks-nfs.yaml | 3 + .../ocp-ibm-cloud-roks-ocs.yaml | 3 + .../ocp-ibm-cloud-roks-portworx.yaml | 3 + .../config-samples/ocp-vsphere-ocs-nfs.yaml | 3 + 22 files changed, 570 insertions(+), 113 deletions(-) rename sample-configurations/sample-dynamic/config-samples/{cp4d-483.yaml => cp4d-485.yaml} (96%) create mode 100644 sample-configurations/sample-dynamic/config-samples/cp4d-500.yaml diff --git a/docs/src/30-reference/configuration/openshift.md b/docs/src/30-reference/configuration/openshift.md index 67e769991..46ee996a4 100644 --- a/docs/src/30-reference/configuration/openshift.md +++ b/docs/src/30-reference/configuration/openshift.md @@ -21,6 +21,115 @@ Additionally, one can configure [Upstream DNS Servers](./dns.md) and [OpenShift The Multicloud Object Gateway (MCG) supports access to s3-compatible object storage via an underpinning block/file storage class, through the Noobaa operator. Some Cloud Pak for Data services such as Watson Assistant need object storage to run. MCG does not need to be installed if OpenShift Data Foundation (fka OCS) is also installed as the operator includes Noobaa. +### Existing OpenShift + +When using the Cloud Pak Deployer on an existing OpenShift cluster, the scripts assume that the cluster is already operational and that any storage classes have been pre-created. The deployer accesses the cluster through a vault secret with the kubeconfig information; the name of the secret is `-kubeconfig`. + +``` +openshift: +- name: sample + ocp_version: 4.8 + cluster_name: sample + domain_name: example.com + cloud_native_toolkit: False + oadp: False + infrastructure: + type: standard + processor_architecture: amd64 + upstream_dns: + - name: sample-dns + zones: + - example.com + dns_servers: + - 172.31.2.73:53 + gpu: + install: False + openshift_ai: + install: False + channel: fast + mcg: + install: True + storage_type: storage-class + storage_class: managed-nfs-storage + openshift_storage: + - storage_name: nfs-storage + storage_type: nfs + # ocp_storage_class_file: managed-nfs-storage + # ocp_storage_class_block: managed-nfs-storage +``` + +#### Property explanation for existing OpenShift clusters + +| Property | Description | Mandatory | Allowed values | +|----------------------|-----------------------------------------------------------------------------------------------------------------|-----------|-----------------------| +| name | Name of the OpenShift cluster | Yes | | +| ocp_version | OpenShift version of the cluster, used to download the client. If you want to install `4.10`, specify `"4.10"` | Yes | >= 4.6 | +| cluster_name | Name of the cluster (part of the FQDN) | Yes | | +| domain_name | Domain name of the cluster (part of the FQDN) | Yes | | +| cloud_native_toolkit | Must the Cloud Native Toolkit (OpenShift GitOps) be installed? | No | True, False (default) | +| oadp | Must the OpenShift Advanced Data Protection operator be installed | No | True, False (default) | +| infrastructure.type | Infrastructure OpenShfit is deployed on. See below for additional explanation | detect (default) | +| infrastructure.processor_architecture | Architecture of the processor that the OpenShift cluster is deployed on | No | amd64 (default), ppc64le, s390x | +| openshift_logging[] | Logging attributes for OpenShift cluster, see [OpenShift logging](logging-auditing.md) | No | | +| upstream_dns[] | Upstream DNS servers(s), see [Upstream DNS Servers](./dns.md) | No | | +| gpu | Control Node Feature Discovery and NVIDIA GPU operators | No | | +| gpu.install | Must Node Feature Discovery and NVIDIA GPU operators be installed (Once installed, False does not uninstall) | Yes | True, False | +| openshift_ai | Control installation of OpenShift AI | No | | +| openshift_ai.install | Must OpenShift AI be installed (Once installed, False does not uninstall | Yes | True, False | +| openshift_ai.channel | Which oeprator channel must be installed | No | fast (default), stable, ... | +| mcg | Multicloud Object Gateway properties | No | | +| mcg.install | Must Multicloud Object Gateway be installed (Once installed, False does not uninstall) | Yes | True, False | +| mcg.storage_type | Type of storage supporting the object Noobaa object storage | Yes | storage-class | +| mcg.storage_class | Storage class supporting the Noobaa object storage | Yes | Existing storage class | +| openshift_storage[] | List of storage definitions to be defined on OpenShift, see below for further explanation | Yes | | + +##### infastructure.type - Type of infrastructure +When deploying on existing OpenShift, the underlying infrastructure can pose some restrictions on capabilities available. For example, Red Hat OpenShift on IBM Cloud (aka ROKS) does not include the Machine Config Operator and ROSA on AWS does not allow to set labels for Machine Config Pools. This means that node settings required for Cloud Pak for Data must be applied in a non-standard manner. + +The following values are allowed for `infrastructure.type`: + +* `detect` (default): The deployer will attempt to detect the underlying cloud infrastructure. This is done by retrieving the existing storage classes and then inferring the cloud type. +* `standard`: The deployer will assume a standard OpenShift cluster with no further restrictions. This is the fallback value for `detect` if the underlying infra cannot be detected. +* `aws-self-managed`: A self-managed OpenShift cluster on AWS. No restrictions. +* `aws-rosa`: Managed Red Hat OpenShift on AWS. Some restrictions with regards to Machine Config Pools apply. +* `azure-aro`: Managed Red Hat OpenShift on Azure. No known restrictions. +* `vsphere`: OpenShift on vSphere. No known restrictions. + +##### openshift_storage[] - OpenShift storage definitions + +| Property | Description | Mandatory | Allowed values | +|-------------------------|---------------------------------------------------------------------------------------------|-----------------------------------|-------------------------------------| +| storage_name | Name of the storage definition, to be referenced by the Cloud Pak | Yes | | +| storage_type | Type of storage class to use in the OpenShift cluster | Yes | nfs, ocs, aws-elastic, auto, custom | +| ocp_storage_class_file | OpenShift storage class to use for file storage if different from default for storage_type | Yes if `storage_type` is `custom` | | +| ocp_storage_class_block | OpenShift storage class to use for block storage if different from default for storage_type | Yes if `storage_type` is `custom` | | + +!!! info + The custom storage_type can be used in case you want to use a non-standard storage class(es). In this case the storage class(es) must be already configured on the OCP cluster and set in the respective ocp_storage_class_file and ocp_storage_class_block variables + +!!! info + The auto storage_type will let the deployer automatically detect the storage type based on the existing storage classes in the OpenShift cluster. + +## Supported storage types +An `openshift` definition always includes the type(s) of storage that it will provide. When the OpenShift cluster is provisioned by the deployer, the necessary infrastructure and storage class(es) are also configured. In case an existing OpenShift cluster is referenced by the configuration, the storage classes are expected to exist already. + +The table below indicates which storage classes are supported by the Cloud Pak Deployer per cloud infrastructure. + +!!! warning + The ability to provision or use certain storage types does not imply support by the Cloud Paks or by OpenShift itself. There are several restrictions for production use OpenShift Data Foundation, for example when on ROSA. + +| Cloud Provider | NFS Storage | OCS/ODF Storage | Portworx | Elastic | Custom (2) | +|----------------|-------------|-----------------|----------|---------|------------| +| ibm-cloud | Yes | Yes | Yes | No | Yes | +| vsphere | Yes (1) | Yes | No | No | Yes | +| aws | No | Yes | No | Yes (3) | Yes | +| azure | No | Yes | No | No | Yes | +| existing-ocp | Yes | Yes | No | Yes | Yes | + +* (1) An existing NFS server can be specified so that the deployer configures the `managed-nfs-storage` storage class. The deployer will not provision or change the NFS server itself. +* (2) If you specify a `custom` storage type, you must specify the storage class to be used for block (RWO) and file (RWX) storage. +* (3) Specifying this storage type means that Elastic File Storage (EFS) and Elastic Block Storage (EBS) storage classes will be used. For EFS, an `nfs_server` object is required to define the "file server" storage on AWS. + ### OpenShift on IBM Cloud (ROKS) VPC-based OpenShift cluster on IBM Cloud, using the Red Hat OpenShift Kubernetes Services (ROKS). ``` @@ -52,6 +161,9 @@ openshift: install: True storage_type: storage-class storage_class: managed-nfs-storage + openshift_ai: + install: False + channel: fast openshift_storage: - storage_name: nfs-storage storage_type: nfs @@ -92,6 +204,11 @@ openshift: | infrastructure.secondary_storage | Reference to the storage flavour to be used as secondary storage, for example `"900gb.5iops-tier"` | No | Valid secondary storage flavour | | openshift_logging[] | Logging attributes for OpenShift cluster, see [OpenShift logging](logging-auditing.md) | No | | | upstream_dns[] | Upstream DNS servers(s), see [Upstream DNS Servers](./dns.md) | No | | +| gpu | Control Node Feature Discovery and NVIDIA GPU operators | No | | +| gpu.install | Must Node Feature Discovery and NVIDIA GPU operators be installed (Once installed, False does not uninstall) | Yes | True, False | +| openshift_ai | Control installation of OpenShift AI | No | | +| openshift_ai.install | Must OpenShift AI be installed (Once installed, False does not uninstall | Yes | True, False | +| openshift_ai.channel | Which oeprator channel must be installed | No | fast (default), stable, ... | | mcg | Multicloud Object Gateway properties | No | | | mcg.install | Must Multicloud Object Gateway be installed (Once installed, False does not uninstall) | Yes | True, False | | mcg.storage_type | Type of storage supporting the object Noobaa object storage | Yes | storage-class | @@ -148,6 +265,11 @@ openshift: - example.com dns_servers: - 172.31.2.73:53 + gpu: + install: False + openshift_ai: + install: False + channel: fast mcg: install: True storage_type: storage-class @@ -183,6 +305,11 @@ openshift: | infrastructure.openshift_cluster_network_cidr | Network CIDR used by the OpenShift pods. Normally you would not have to change this, unless other systems in the network are in the 10.128.0.0/14 subnet. | No | CIDR | | openshift_logging[] | Logging attributes for OpenShift cluster, see [OpenShift logging](logging-auditing.md) | No | | | upstream_dns[] | Upstream DNS servers(s), see [Upstream DNS Servers](./dns.md) | No | | +| gpu | Control Node Feature Discovery and NVIDIA GPU operators | No | | +| gpu.install | Must Node Feature Discovery and NVIDIA GPU operators be installed (Once installed, False does not uninstall) | Yes | True, False | +| openshift_ai | Control installation of OpenShift AI | No | | +| openshift_ai.install | Must OpenShift AI be installed (Once installed, False does not uninstall | Yes | True, False | +| openshift_ai.channel | Which oeprator channel must be installed | No | fast (default), stable, ... | | mcg | Multicloud Object Gateway properties | No | | | mcg.install | Must Multicloud Object Gateway be installed (Once installed, False does not uninstall) | Yes | True, False | | mcg.storage_type | Type of storage supporting the object Noobaa object storage | Yes | storage-class | @@ -273,6 +400,11 @@ openshift: | infrastructure.compute_iam_role | If not standard, specify the IAM role that the OpenShift installer must use for the compute nodes during cluster creation | No | | | infrastructure.ami_id | ID of the AWS AMI to boot all images | No | | | openshift_logging[] | Logging attributes for OpenShift cluster, see [OpenShift logging](logging-auditing.md) | No | | +| gpu | Control Node Feature Discovery and NVIDIA GPU operators | No | | +| gpu.install | Must Node Feature Discovery and NVIDIA GPU operators be installed (Once installed, False does not uninstall) | Yes | True, False | +| openshift_ai | Control installation of OpenShift AI | No | | +| openshift_ai.install | Must OpenShift AI be installed (Once installed, False does not uninstall | Yes | True, False | +| openshift_ai.channel | Which oeprator channel must be installed | No | fast (default), stable, ... | | mcg | Multicloud Object Gateway properties | No | | | mcg.install | Must Multicloud Object Gateway be installed (Once installed, False does not uninstall) | Yes | True, False | | mcg.storage_type | Type of storage supporting the object Noobaa object storage | Yes | storage-class | @@ -332,6 +464,11 @@ openshift: - example.com dns_servers: - 172.31.2.73:53 + gpu: + install: False + openshift_ai: + install: False + channel: fast mcg: install: True storage_type: storage-class @@ -366,6 +503,11 @@ openshift: | upstream_dns[] | Upstream DNS servers(s), see [Upstream DNS Servers](./dns.md) | No | | | openshift_logging[] | Logging attributes for OpenShift cluster, see [OpenShift logging](logging-auditing.md) | No | | | upstream_dns[] | Upstream DNS servers(s), see [Upstream DNS Servers](#upstream-dns-servers) | No | | +| gpu | Control Node Feature Discovery and NVIDIA GPU operators | No | | +| gpu.install | Must Node Feature Discovery and NVIDIA GPU operators be installed (Once installed, False does not uninstall) | Yes | True, False | +| openshift_ai | Control installation of OpenShift AI | No | | +| openshift_ai.install | Must OpenShift AI be installed (Once installed, False does not uninstall | Yes | True, False | +| openshift_ai.channel | Which oeprator channel must be installed | No | fast (default), stable, ... | | mcg | Multicloud Object Gateway properties | No | | | mcg.install | Must Multicloud Object Gateway be installed (Once installed, False does not uninstall) | Yes | True, False | | mcg.storage_type | Type of storage supporting the object Noobaa object storage | Yes | storage-class | @@ -410,6 +552,11 @@ openshift: network: pod_cidr: "10.128.0.0/14" service_cidr: "172.30.0.0/16" + gpu: + install: False + openshift_ai: + install: False + channel: fast openshift_storage: - storage_name: ocs-storage storage_type: ocs @@ -436,6 +583,11 @@ openshift: | network.service_cidr | CIDR of service network | Yes | Must be a minimum of /18 or larger. | | openshift_logging[] | Logging attributes for OpenShift cluster, see [OpenShift logging](logging-auditing.md) | No | | | upstream_dns[] | Upstream DNS servers(s), see [Upstream DNS Servers](./dns.md) | No | | +| gpu | Control Node Feature Discovery and NVIDIA GPU operators | No | | +| gpu.install | Must Node Feature Discovery and NVIDIA GPU operators be installed (Once installed, False does not uninstall) | Yes | True, False | +| openshift_ai | Control installation of OpenShift AI | No | | +| openshift_ai.install | Must OpenShift AI be installed (Once installed, False does not uninstall | Yes | True, False | +| openshift_ai.channel | Which oeprator channel must be installed | No | fast (default), stable, ... | | mcg | Multicloud Object Gateway properties | No | | | mcg.install | Must Multicloud Object Gateway be installed (Once installed, False does not uninstall) | Yes | True, False | | mcg.storage_type | Type of storage supporting the object Noobaa object storage | Yes | storage-class | @@ -453,106 +605,3 @@ openshift: | ocs_storage_label | Label (or rather a name) to be used for the dedicated OCS nodes in the cluster - together with the combination of Azure location and zone id | Yes if `storage_type` is `ocs` | | | ocs_storage_size_gb | Size of the OCS storage in Gibibytes (Gi) | Yes if `storage_type` is `ocs` | | | ocs_dynamic_storage_class | Storage class that will be used for provisioning OCS. In Azure, you must select `managed-premium` | Yes if `storage_type` is `ocs` | `managed-premium` | - -### Existing OpenShift - -When using the Cloud Pak Deployer on an existing OpenShift cluster, the scripts assume that the cluster is already operational and that any storage classes have been pre-created. The deployer accesses the cluster through a vault secret with the kubeconfig information; the name of the secret is `-kubeconfig`. - -``` -openshift: -- name: sample - ocp_version: 4.8 - cluster_name: sample - domain_name: example.com - cloud_native_toolkit: False - oadp: False - infrastructure: - type: standard - processor_architecture: amd64 - upstream_dns: - - name: sample-dns - zones: - - example.com - dns_servers: - - 172.31.2.73:53 - gpu: - install: False - mcg: - install: True - storage_type: storage-class - storage_class: managed-nfs-storage - openshift_storage: - - storage_name: nfs-storage - storage_type: nfs - # ocp_storage_class_file: managed-nfs-storage - # ocp_storage_class_block: managed-nfs-storage -``` - -#### Property explanation for existing OpenShift clusters - -| Property | Description | Mandatory | Allowed values | -|----------------------|-----------------------------------------------------------------------------------------------------------------|-----------|-----------------------| -| name | Name of the OpenShift cluster | Yes | | -| ocp_version | OpenShift version of the cluster, used to download the client. If you want to install `4.10`, specify `"4.10"` | Yes | >= 4.6 | -| cluster_name | Name of the cluster (part of the FQDN) | Yes | | -| domain_name | Domain name of the cluster (part of the FQDN) | Yes | | -| cloud_native_toolkit | Must the Cloud Native Toolkit (OpenShift GitOps) be installed? | No | True, False (default) | -| oadp | Must the OpenShift Advanced Data Protection operator be installed | No | True, False (default) | -| infrastructure.type | Infrastructure OpenShfit is deployed on. See below for additional explanation | detect (default) | -| infrastructure.processor_architecture | Architecture of the processor that the OpenShift cluster is deployed on | No | amd64 (default), ppc64le, s390x | -| openshift_logging[] | Logging attributes for OpenShift cluster, see [OpenShift logging](logging-auditing.md) | No | | -| upstream_dns[] | Upstream DNS servers(s), see [Upstream DNS Servers](./dns.md) | No | | -| gpu | Control Node Feature Discovery and NVIDIA GPU operators | No | | -| gpu.install | Must Node Feature Discovery and NVIDIA GPU operators be installed (Once installed, False does not uninstall) | Yes | True, False | -| mcg | Multicloud Object Gateway properties | No | | -| mcg.install | Must Multicloud Object Gateway be installed (Once installed, False does not uninstall) | Yes | True, False | -| mcg.storage_type | Type of storage supporting the object Noobaa object storage | Yes | storage-class | -| mcg.storage_class | Storage class supporting the Noobaa object storage | Yes | Existing storage class | -| openshift_storage[] | List of storage definitions to be defined on OpenShift, see below for further explanation | Yes | | - -##### infastructure.type - Type of infrastructure -When deploying on existing OpenShift, the underlying infrastructure can pose some restrictions on capabilities available. For example, Red Hat OpenShift on IBM Cloud (aka ROKS) does not include the Machine Config Operator and ROSA on AWS does not allow to set labels for Machine Config Pools. This means that node settings required for Cloud Pak for Data must be applied in a non-standard manner. - -The following values are allowed for `infrastructure.type`: - -* `detect` (default): The deployer will attempt to detect the underlying cloud infrastructure. This is done by retrieving the existing storage classes and then inferring the cloud type. -* `standard`: The deployer will assume a standard OpenShift cluster with no further restrictions. This is the fallback value for `detect` if the underlying infra cannot be detected. -* `aws-self-managed`: A self-managed OpenShift cluster on AWS. No restrictions. -* `aws-rosa`: Managed Red Hat OpenShift on AWS. Some restrictions with regards to Machine Config Pools apply. -* `azure-aro`: Managed Red Hat OpenShift on Azure. No known restrictions. -* `vsphere`: OpenShift on vSphere. No known restrictions. - -##### openshift_storage[] - OpenShift storage definitions - -| Property | Description | Mandatory | Allowed values | -|-------------------------|---------------------------------------------------------------------------------------------|-----------------------------------|-------------------------------------| -| storage_name | Name of the storage definition, to be referenced by the Cloud Pak | Yes | | -| storage_type | Type of storage class to use in the OpenShift cluster | Yes | nfs, ocs, aws-elastic, auto, custom | -| ocp_storage_class_file | OpenShift storage class to use for file storage if different from default for storage_type | Yes if `storage_type` is `custom` | | -| ocp_storage_class_block | OpenShift storage class to use for block storage if different from default for storage_type | Yes if `storage_type` is `custom` | | - -!!! info - The custom storage_type can be used in case you want to use a non-standard storage class(es). In this case the storage class(es) must be already configured on the OCP cluster and set in the respective ocp_storage_class_file and ocp_storage_class_block variables - -!!! info - The auto storage_type will let the deployer automatically detect the storage type based on the existing storage classes in the OpenShift cluster. - -## Supported storage types -An `openshift` definition always includes the type(s) of storage that it will provide. When the OpenShift cluster is provisioned by the deployer, the necessary infrastructure and storage class(es) are also configured. In case an existing OpenShift cluster is referenced by the configuration, the storage classes are expected to exist already. - -The table below indicates which storage classes are supported by the Cloud Pak Deployer per cloud infrastructure. - -!!! warning - The ability to provision or use certain storage types does not imply support by the Cloud Paks or by OpenShift itself. There are several restrictions for production use OpenShift Data Foundation, for example when on ROSA. - -| Cloud Provider | NFS Storage | OCS/ODF Storage | Portworx | Elastic | Custom (2) | -|----------------|-------------|-----------------|----------|---------|------------| -| ibm-cloud | Yes | Yes | Yes | No | Yes | -| vsphere | Yes (1) | Yes | No | No | Yes | -| aws | No | Yes | No | Yes (3) | Yes | -| azure | No | Yes | No | No | Yes | -| existing-ocp | Yes | Yes | No | Yes | Yes | - -* (1) An existing NFS server can be specified so that the deployer configures the `managed-nfs-storage` storage class. The deployer will not provision or change the NFS server itself. -* (2) If you specify a `custom` storage type, you must specify the storage class to be used for block (RWO) and file (RWX) storage. -* (3) Specifying this storage type means that Elastic File Storage (EFS) and Elastic Block Storage (EBS) storage classes will be used. For EFS, an `nfs_server` object is required to define the "file server" storage on AWS. \ No newline at end of file diff --git a/sample-configurations/sample-dynamic/config-samples/cp4d-483.yaml b/sample-configurations/sample-dynamic/config-samples/cp4d-485.yaml similarity index 96% rename from sample-configurations/sample-dynamic/config-samples/cp4d-483.yaml rename to sample-configurations/sample-dynamic/config-samples/cp4d-485.yaml index 454b80c85..1bd28a816 100644 --- a/sample-configurations/sample-dynamic/config-samples/cp4d-483.yaml +++ b/sample-configurations/sample-dynamic/config-samples/cp4d-485.yaml @@ -2,7 +2,7 @@ cp4d: - project: cpd openshift_cluster_name: "{{ env_id }}" - cp4d_version: 4.8.3 + cp4d_version: 4.8.5 cp4d_entitlement: cpd-enterprise # or: cpd-standard, watsonx-ai, watsonx-data, watsonx-gov-model-management, watsonx-gov-risk-compliance cp4d_production_license: True accept_licenses: False @@ -216,14 +216,6 @@ cp4d: - name: wd-instance description: "Watson Discovery instance" - - - name: watson-ks - description: Watson Knowledge Studio - size: small - # noobaa_account_secret: noobaa-admin - # noobaa_cert_secret: noobaa-s3-serving-cert - state: removed - - name: watson-openscale description: Watson OpenScale size: small @@ -280,7 +272,7 @@ cp4d: enableOpenscale: true - name: wkc - description: Watson Knowledge Catalog + description: IBM Knowledge Catalog size: small state: removed installation_options: diff --git a/sample-configurations/sample-dynamic/config-samples/cp4d-500.yaml b/sample-configurations/sample-dynamic/config-samples/cp4d-500.yaml new file mode 100644 index 000000000..abe1dcd26 --- /dev/null +++ b/sample-configurations/sample-dynamic/config-samples/cp4d-500.yaml @@ -0,0 +1,359 @@ +--- +cp4d: +- project: cpd + openshift_cluster_name: "{{ env_id }}" + cp4d_version: 5.0.0 + cp4d_entitlement: cpd-enterprise # or: cpd-standard, watsonx-ai, watsonx-data, watsonx-gov-model-management, watsonx-gov-risk-compliance + cp4d_production_license: True + accept_licenses: False + sequential_install: False + db2u_limited_privileges: False + use_fs_iam: True + operators_project: cpd-operators + cartridges: + - name: cp-foundation + license_service: + threads_per_core: 2 + + - name: lite + + - name: scheduler + state: removed + +# +# All tested cartridges. To install, change the "state" property to "installed". To uninstall, change the state +# to "removed" or comment out the entire cartridge. Make sure that the "-" and properties are aligned with the lite +# cartridge; the "-" is at position 3 and the property starts at position 5. +# +# If a cartridge has dependencies and you want to install it, you must ensure that the dependent cartridge is also +# installed. +# + + - name: analyticsengine + description: Analytics Engine Powered by Apache Spark + size: small + state: removed + + - name: bigsql + description: Db2 Big SQL + state: removed + + - name: ca + description: Cognos Analytics + size: small + instances: + - name: ca-instance + metastore_ref: ca-metastore + state: removed + + - name: dashboard + description: Cognos Dashboards + state: removed + + - name: datagate + description: Db2 Data Gate + state: removed + + - name: datastage-ent + description: DataStage Enterprise + state: removed + + - name: datastage-ent-plus + description: DataStage Enterprise Plus + state: removed + # instances: + # - name: ds-instance + # # Optional settings + # description: "datastage ds-instance" + # size: medium + # storage_class: efs-nfs-client + # storage_size_gb: 60 + # # Custom Scale options + # scale_px_runtime: + # replicas: 2 + # cpu_request: 500m + # cpu_limit: 2 + # memory_request: 2Gi + # memory_limit: 4Gi + # scale_px_compute: + # replicas: 2 + # cpu_request: 1 + # cpu_limit: 3 + # memory_request: 4Gi + # memory_limit: 12Gi + + - name: db2 + description: Db2 OLTP + size: small + instances: + - name: ca-metastore + metadata_size_gb: 20 + data_size_gb: 20 + backup_size_gb: 20 + transactionlog_size_gb: 20 + state: removed + + - name: db2wh + description: Db2 Warehouse + state: removed + + - name: dmc + description: Db2 Data Management Console + state: removed + instances: + - name: data-management-console + description: Data Management Console + size: medium + storage_size_gb: 50 + + - name: dods + description: Decision Optimization + size: small + state: removed + + - name: dp + description: Data Privacy + size: small + state: removed + + - name: dpra + description: Data Privacy Risk Assessment + state: removed + + - name: dv + description: Data Virtualization + size: small + instances: + - name: data-virtualization + state: removed + + # Please note that for EDB Postgress, a secret edb-postgres-license-key must be created in the vault + # before deploying + - name: edb_cp4d + description: EDB Postgres + state: removed + instances: + - name: instance1 + version: "15.4" + #type: Standard + #members: 1 + #size_gb: 50 + #resource_request_cpu: 1 + #resource_request_memory: 4Gi + #resource_limit_cpu: 1 + #resource_limit_memory: 4Gi + + - name: factsheet + description: AI Factsheets + size: small + state: removed + + - name: hadoop + description: Execution Engine for Apache Hadoop + size: small + state: removed + + - name: mantaflow + description: MANTA Automated Lineage + size: small + state: removed + + - name: match360 + description: IBM Match 360 + size: small + wkc_enabled: true + state: removed + + - name: openpages + description: OpenPages + state: removed + + # For Planning Analytics, the case version is needed due to defect in olm utils + - name: planning-analytics + description: Planning Analytics + state: removed + + - name: replication + description: Data Replication + license: IDRC + size: small + state: removed + + - name: rstudio + description: RStudio Server with R 3.6 + size: small + state: removed + + - name: spss + description: SPSS Modeler + state: removed + + - name: syntheticdata + description: Synthetic Data Generator + state: removed + + - name: voice-gateway + description: Voice Gateway + replicas: 1 + state: removed + + - name: watson-assistant + description: Watson Assistant + size: small + # noobaa_account_secret: noobaa-admin + # noobaa_cert_secret: noobaa-s3-serving-cert + state: removed + instances: + - name: wa-instance + description: "Watson Assistant instance" + + - name: watson-discovery + description: Watson Discovery + # noobaa_account_secret: noobaa-admin + # noobaa_cert_secret: noobaa-s3-serving-cert + state: removed + instances: + - name: wd-instance + description: "Watson Discovery instance" + + - name: watson-openscale + description: Watson OpenScale + size: small + state: removed + + - name: watson-speech + description: Watson Speech (STT and TTS) + stt_size: xsmall + tts_size: xsmall + # noobaa_account_secret: noobaa-admin + # noobaa_cert_secret: noobaa-s3-serving-cert + state: removed + + # Please note that for watsonx.ai, the following pre-requisites exist: + # If you want to use foundation models, you neeed to install the Node Feature Discovery and NVIDIA GPU operators. + # You can do so by setting the openshift.gpu.install property to True + # OpenShift AI is a requirement for watsonx.ai. You can install this by setting the openshift.openshift_ai.install property to True + - name: watsonx_ai + description: watsonx.ai + state: removed + installation_options: + tuning_disabled: true + models: + - model_id: allam-1-13b-instruct + state: removed + - model_id: codellama-codellama-34b-instruct-hf + state: removed + - model_id: elyza-japanese-llama-2-7b-instruct + state: removed + - model_id: google-flan-ul2 + state: removed + - model_id: google-flan-t5-xxl + state: removed + - model_id: eleutherai-gpt-neox-20b + state: removed + - model_id: ibm-granite-8b-japanese + state: removed + - model_id: ibm-granite-13b-chat-v1 + state: removed + - model_id: ibm-granite-13b-chat-v2 + state: removed + - model_id: ibm-granite-13b-instruct-v1 + state: removed + - model_id: ibm-granite-13b-instruct-v2 + state: removed + - model_id: ibm-granite-20b-multilingual + state: removed + - model_id: core42-jais-13b-chat + state: removed + - model_id: meta-llama-llama-2-13b-chat + state: removed + - model_id: meta-llama-llama3-8b-instruct + state: removed + - model_id: meta-llama-llama-2-70b-chat + state: removed + - model_id: mncai-llama-2-13b-dpo-v7 + state: removed + - model_id: ibm-mistralai-merlinite-7b + state: removed + - model_id: ibm-mpt-7b-instruct2 + state: removed + - model_id: mistralai-mixtral-8x7b-instruct-v01 + state: removed + - model_id: ibm-mistralai-mixtral-8x7b-instruct-v01-q + state: removed + - model_id: bigscience-mt0-xxl + state: removed + - model_id: bigcode-starcoder + state: removed + + - name: watsonx_data + description: watsonx.data + state: removed + + - name: watsonx_governance + description: watsonx.governance + state: removed + installation_options: + installType: all + enableFactsheet: true + enableOpenpages: true + enableOpenscale: true + + - name: watsonx_orchestrate + description: watsonx.orchestrate + app_connect: + app_connect_project: ibm-app-connect + app_connect_case_version: 11.3.0 + app_connect_channel_version: v11.3 + state: removed + + - name: wkc + description: Watson Knowledge Catalog + size: small + state: removed + installation_options: + install_wkc_core_only: False + enableKnowledgeGraph: False + enableDataQuality: False + enableFactSheet: False + + - name: wml + description: Watson Machine Learning + size: small + state: installed + + - name: wml-accelerator + description: Watson Machine Learning Accelerator + replicas: 1 + size: small + state: removed + + - name: ws + description: Watson Studio + state: installed + + - name: ws-pipelines + description: Watson Studio Pipelines + state: removed + + - name: ws-runtimes + description: Watson Studio Runtimes + runtimes: + - ibm-cpd-ws-runtime-py39 + - ibm-cpd-ws-runtime-222-py + - ibm-cpd-ws-runtime-py39gpu + - ibm-cpd-ws-runtime-222-pygpu + - ibm-cpd-ws-runtime-231-pygpu + - ibm-cpd-ws-runtime-r36 + - ibm-cpd-ws-runtime-222-r + - ibm-cpd-ws-runtime-231-r + state: removed + +# +# Cartridges where extra work is needed (will not install automatically) +# + # Product Master requires set up of the Db2 instance secret before install + - name: productmaster + description: Product Master + size: small + state: removed \ No newline at end of file diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-aws-rosa-elastic.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-aws-rosa-elastic.yaml index ec13fd958..f9e3561e0 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-aws-rosa-elastic.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-aws-rosa-elastic.yaml @@ -23,6 +23,9 @@ openshift: use_sts: False gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: True storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-aws-rosa-existing-vpc.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-aws-rosa-existing-vpc.yaml index 34152891d..fd0792fbe 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-aws-rosa-existing-vpc.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-aws-rosa-existing-vpc.yaml @@ -27,6 +27,9 @@ openshift: - subnet-0ea5ac344c0fbadf5 gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: True storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-aws-rosa-ocs.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-aws-rosa-ocs.yaml index 85b6d2a24..177da7967 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-aws-rosa-ocs.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-aws-rosa-ocs.yaml @@ -18,6 +18,9 @@ openshift: use_sts: False gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: False storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-elastic-manual.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-elastic-manual.yaml index a39340e99..ec4dba11e 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-elastic-manual.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-elastic-manual.yaml @@ -31,6 +31,9 @@ openshift: hosted_zone_id: Z03102383I1KQIAGVSU66 gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: True storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-elastic-sno.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-elastic-sno.yaml index 376d47c4d..ef305d4a9 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-elastic-sno.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-elastic-sno.yaml @@ -25,6 +25,9 @@ openshift: multi_zone: False gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: False storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-elastic.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-elastic.yaml index 00cd4ab0b..eb29636a1 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-elastic.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-elastic.yaml @@ -25,6 +25,9 @@ openshift: multi_zone: True gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: True storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-existing-vpc.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-existing-vpc.yaml index d4e433476..2a798c2c0 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-existing-vpc.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-existing-vpc.yaml @@ -35,6 +35,9 @@ openshift: ami_id: ami-09249dd86b1933dd5 gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: True storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-ocs.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-ocs.yaml index 8b55b18e9..530fc91e0 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-ocs.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-aws-self-managed-ocs.yaml @@ -20,6 +20,9 @@ openshift: multi_zone: True gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: False storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-azure-aro.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-azure-aro.yaml index 570169805..2cf0b6043 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-azure-aro.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-azure-aro.yaml @@ -40,6 +40,9 @@ openshift: service_cidr: "172.30.0.0/16" gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: False storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-azure-self-managed.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-azure-self-managed.yaml index 7cfc6717f..137920d93 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-azure-self-managed.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-azure-self-managed.yaml @@ -41,6 +41,9 @@ openshift: service_cidr: "172.30.0.0/16" gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: False storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-existing-ocp-auto.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-existing-ocp-auto.yaml index 0ce26f55a..c5bda0cec 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-existing-ocp-auto.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-existing-ocp-auto.yaml @@ -16,6 +16,9 @@ openshift: storage_class: managed-nfs-storage gpu: install: False + openshift_ai: + install: False + channel: fast openshift_storage: - storage_name: auto-storage storage_type: auto diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-existing-ocp-satellite-ocs.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-existing-ocp-satellite-ocs.yaml index f793a9311..8812c21ba 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-existing-ocp-satellite-ocs.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-existing-ocp-satellite-ocs.yaml @@ -20,6 +20,9 @@ openshift: type: ibm-roks gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: False storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-existing-ocp.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-existing-ocp.yaml index 866972e53..507474b95 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-existing-ocp.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-existing-ocp.yaml @@ -12,6 +12,9 @@ openshift: domain_name: example.com gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: False storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-existing-roks-classic.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-existing-roks-classic.yaml index 80111a2ea..c3eb43892 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-existing-roks-classic.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-existing-roks-classic.yaml @@ -12,6 +12,9 @@ openshift: domain_name: example.com gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: True storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-cp4waiops.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-cp4waiops.yaml index bd8a6a49a..151a59e2f 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-cp4waiops.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-cp4waiops.yaml @@ -44,6 +44,9 @@ openshift: domain_name: example.com gpu: install: False + openshift_ai: + install: False + channel: fast openshift_storage: - storage_name: auto-storage storage_type: auto diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-nfs.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-nfs.yaml index cc56acb84..f99c166a9 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-nfs.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-nfs.yaml @@ -84,6 +84,9 @@ openshift: cos_name: "{{ env_id }}-cos" gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: True storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-ocs.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-ocs.yaml index 754114a05..2cd372f6e 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-ocs.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-ocs.yaml @@ -66,6 +66,9 @@ openshift: cos_name: "{{ env_id }}-cos" gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: False storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-portworx.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-portworx.yaml index 31c273ca5..dfd1f1c41 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-portworx.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-ibm-cloud-roks-portworx.yaml @@ -66,6 +66,9 @@ openshift: cos_name: "{{ env_id }}-cos" gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: True storage_type: storage-class diff --git a/sample-configurations/sample-dynamic/config-samples/ocp-vsphere-ocs-nfs.yaml b/sample-configurations/sample-dynamic/config-samples/ocp-vsphere-ocs-nfs.yaml index 769d40435..69fb67da5 100644 --- a/sample-configurations/sample-dynamic/config-samples/ocp-vsphere-ocs-nfs.yaml +++ b/sample-configurations/sample-dynamic/config-samples/ocp-vsphere-ocs-nfs.yaml @@ -51,6 +51,9 @@ openshift: ingress_vip: 10.99.92.52 gpu: install: False + openshift_ai: + install: False + channel: fast mcg: install: True storage_type: storage-class From 5fafecf3a8982d1a8f35726414017f415e12ce08 Mon Sep 17 00:00:00 2001 From: Frank Ketelaars Date: Thu, 13 Jun 2024 20:14:11 +0000 Subject: [PATCH 11/14] #727 Fix message of custom v3 image --- cp-deploy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cp-deploy.sh b/cp-deploy.sh index bb31012fe..1f59cbb89 100755 --- a/cp-deploy.sh +++ b/cp-deploy.sh @@ -640,7 +640,7 @@ if [ -z $CPD_OLM_UTILS_V3_IMAGE ];then export CPD_OLM_UTILS_V3_IMAGE=icr.io/cpopen/cpd/olm-utils-v3:latest.$ARCH fi else - echo "Custom olm-utils-v2 image ${CPD_OLM_UTILS_V2_IMAGE} will be used." + echo "Custom olm-utils-v3 image ${CPD_OLM_UTILS_V3_IMAGE} will be used." fi From e5d109c2c12004bdcc3c301db039e4fdbff8cb0d Mon Sep 17 00:00:00 2001 From: Frank Ketelaars Date: Thu, 13 Jun 2024 21:47:04 +0000 Subject: [PATCH 12/14] #727 Add Data Product Hub --- .../cp4d/cp4d-variables/vars/vars-cp4d-installation.yml | 7 +++++++ .../sample-dynamic/config-samples/cp4d-500.yaml | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-variables/vars/vars-cp4d-installation.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-variables/vars/vars-cp4d-installation.yml index f7eb213a2..4628c6f90 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-variables/vars/vars-cp4d-installation.yml +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-variables/vars/vars-cp4d-installation.yml @@ -78,6 +78,13 @@ cartridge_cr: olm_utils_name: data_governor cr_internal: true force_sequential_install: False + - name: dataproduct + olm_utils_name: dataproduct + cr_cr: DataProduct + cr_name: dataproduct-cr + cr_status_attribute: dataProductStatus + cr_status_completed: Completed + force_sequential_install: False - name: datarefinery olm_utils_name: datarefinery cr_internal: true diff --git a/sample-configurations/sample-dynamic/config-samples/cp4d-500.yaml b/sample-configurations/sample-dynamic/config-samples/cp4d-500.yaml index abe1dcd26..4e95022ba 100644 --- a/sample-configurations/sample-dynamic/config-samples/cp4d-500.yaml +++ b/sample-configurations/sample-dynamic/config-samples/cp4d-500.yaml @@ -54,6 +54,10 @@ cp4d: description: Db2 Data Gate state: removed + - name: dataproduct + description: Data Product Hub + state: removed + - name: datastage-ent description: DataStage Enterprise state: removed From b548e293797e46209383db29f32bb3810039c63e Mon Sep 17 00:00:00 2001 From: Frank Ketelaars Date: Tue, 18 Jun 2024 11:29:05 +0000 Subject: [PATCH 13/14] #730 Install control plane separately from services --- .../tasks/create-catalog-source-olm-utils.yml | 36 +++++++++---------- .../cp4d/cp4d-catalog-source/tasks/main.yml | 10 +++--- .../templates/apply-olm-create-catsrc.j2 | 2 +- .../cp4d/cp4d-cluster/tasks/install-cp4d.yml | 20 +++++++++-- .../cp4d-create-subscriptions-olm-utils.yml | 36 +++++++++++-------- .../cp4d/cp4d-subscriptions/tasks/main.yml | 3 -- .../templates/apply-olm-cartridge-sub.j2 | 2 +- 7 files changed, 64 insertions(+), 45 deletions(-) diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/tasks/create-catalog-source-olm-utils.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/tasks/create-catalog-source-olm-utils.yml index 89d3246e7..937710344 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/tasks/create-catalog-source-olm-utils.yml +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/tasks/create-catalog-source-olm-utils.yml @@ -1,8 +1,4 @@ --- -- name: Create catalog sources from case files in {{ status_dir }}/cp4d/case using OLM utils - debug: - msg: "" - - name: Ensure that OLM utils work directory exists file: path: /tmp/work @@ -21,7 +17,7 @@ when: (cpd_airgap | default(False) | bool) # Always generate the preview script to log what will be done -- name: Generate command preview script to create catalog sources +- name: Generate command preview script to create catalog sources for Cloud Pak for Data {{ _p_catsrc_scope }} set_fact: _apply_olm_command_script: "{{ lookup('template', 'apply-olm-create-catsrc.j2') }}" vars: @@ -31,67 +27,67 @@ debug: var: _apply_olm_command_script -- name: Write script to "{{ status_dir }}/cp4d/apply-olm-catsrc.sh" +- name: Write script to "{{ status_dir }}/cp4d/apply-olm-catsrc-{{ _p_catsrc_scope }}.sh" copy: content: "{{ _apply_olm_command_script }}" - dest: "{{ status_dir }}/cp4d/apply-olm-catsrc.sh" + dest: "{{ status_dir }}/cp4d/apply-olm-catsrc-{{ _p_catsrc_scope }}.sh" -- name: Generate preview script to create catalog sources, logs are in {{ status_dir }}/log/apply-olm-create-catsrc.log +- name: Generate preview script to create catalog sources, logs are in {{ status_dir }}/log/apply-olm-create-catsrc-{{ _p_catsrc_scope }}.log shell: | - {{ _apply_olm_command_script }} > {{ status_dir }}/log/apply-olm-create-catsrc.log 2>&1 + {{ _apply_olm_command_script }} > {{ status_dir }}/log/apply-olm-create-catsrc-{{ _p_catsrc_scope }}.log 2>&1 -- name: Copy script to {{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc.sh +- name: Copy script to {{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc-{{ _p_catsrc_scope }}.sh copy: src: "/tmp/work/preview.sh" - dest: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc.sh" + dest: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc-{{ _p_catsrc_scope }}.sh" remote_src: True mode: u+rwx # TODO: Remove step once problem in preview.sh is fixed - name: Update script to fix invalid oc apply -f commands replace: - path: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc.sh" + path: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc-{{ _p_catsrc_scope }}.sh" regexp: '^(.*)oc apply -f << EOF(.*)' replace: 'oc apply -f - << EOF' # TODO: Remove step once problem in olm-utils is fixed - name: Update script to fix invalid cpd-platform catalog source image replace: - path: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc.sh" + path: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc-{{ _p_catsrc_scope }}.sh" regexp: '^ image: icr.io/cpopen/ibm-cpd-platform-operator-catalog@sha256:953403f1d7193fedb81186ec454fae3ea0852ef4c1929c3c56f12352189b1766' replace: ' image: icr.io/cpopen/ibm-cpd-platform-operator-catalog@sha256:54d3d7aff34444eb1991335831c18272ad217a6445f898e22f0b30f539b8c7cf' # TODO: Remove step once problem in olm-utils is fixed - name: Update script to fix invalid ws-pipelines create catalog source command replace: - path: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc.sh" + path: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc-{{ _p_catsrc_scope }}.sh" regexp: '(ibm-ws-pipelines-\d.\d.\d)(.tgz)' replace: '\1*\2' # TODO: Remove step once problem in olm-utils is fixed - name: Update script to fix invalid replication create catalog source command replace: - path: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc.sh" + path: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc-{{ _p_catsrc_scope }}.sh" regexp: '(ibm-replication-\d.\d.\d)(.tgz)' replace: '\1*\2' # TODO: Remove step once problem in olm-utils is fixed - name: Update script to fix invalid DataGate create catalog source command replace: - path: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc.sh" + path: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-create-catsrc-{{ _p_catsrc_scope }}.sh" regexp: '(ibm-datagate-prod-\d.\d.\d)(.tgz)' replace: '\1*\2' -- name: Run apply-olm command to create catalog sources +- name: Run apply-olm command to create catalog sources for Cloud Pak for Data {{ _p_catsrc_scope }} block: - - name: Generate OLM command to create catalog sources + - name: Generate OLM command to create catalog sources for Cloud Pak for Data {{ _p_catsrc_scope }} set_fact: _apply_olm_command: "{{ lookup('template', 'apply-olm-create-catsrc.j2') }}" vars: _p_preview_script: False - - name: Run apply-olm command to create catalog sources, logs are in {{ status_dir }}/log/apply-olm-create-catsrc.log + - name: Run apply-olm command to create catalog sources, logs are in {{ status_dir }}/log/apply-olm-create-catsrc-{{ _p_catsrc_scope }}.log shell: | - {{ _apply_olm_command }} > {{ status_dir }}/log/apply-olm-create-catsrc.log 2>&1 + {{ _apply_olm_command }} > {{ status_dir }}/log/apply-olm-create-catsrc{{ _p_catsrc_scope }}.log 2>&1 - name: If not air-gapped, copy case files from /tmp/work/offline to {{ status_dir }}/cp4d/offline copy: diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/tasks/main.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/tasks/main.yml index 61b828933..1b375d6ae 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/tasks/main.yml +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/tasks/main.yml @@ -1,9 +1,11 @@ --- -- include_role: - name: cp4d-variables +- set_fact: + _catsrc_cartridges_to_install_list: "{{ _cartridges_to_install_list }}" + +- set_fact: + _catsrc_cartridges_to_install_list: "cpfs,cpd_platform" + when: _p_catsrc_scope == 'platform' - include_tasks: create-catalog-source-olm-utils.yml - when: - - _p_current_cp4d_cluster.cp4d_version >= "4.5.0" - include_tasks: wait-catalog-sources-ready.yml \ No newline at end of file diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/templates/apply-olm-create-catsrc.j2 b/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/templates/apply-olm-create-catsrc.j2 index 24c2cf3d1..2f6957fc5 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/templates/apply-olm-create-catsrc.j2 +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-catalog-source/templates/apply-olm-create-catsrc.j2 @@ -5,4 +5,4 @@ --cpd_operator_ns={{ _p_current_cp4d_cluster.operators_project }} \ --preview={%- if _p_preview_script -%}true{%- else -%}false{%- endif %} \ --upgrade={%- if _upgrade_cp4d -%}true{%- else -%}false{%- endif %} \ - --components={{ _cartridges_to_install_list }} \ No newline at end of file + --components={{ _catsrc_cartridges_to_install_list }} \ No newline at end of file diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/tasks/install-cp4d.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/tasks/install-cp4d.yml index e6971c032..536c52e58 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/tasks/install-cp4d.yml +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-cluster/tasks/install-cp4d.yml @@ -50,17 +50,19 @@ - name: Prepare OpenShift project {{ current_cp4d_cluster.project }} for Cloud Pak for Data include_tasks: openshift-prepare-project.yml -- name: Create catalog sources for Cloud Pak for Data +- name: Create catalog sources for Cloud Pak for Data platform in project {{ current_cp4d_cluster.project }} include_role: name: cp4d-catalog-source vars: _p_current_cp4d_cluster: "{{ current_cp4d_cluster }}" + _p_catsrc_scope: platform -- name: Create subscriptions for Cloud Pak for Data +- name: Create subscriptions for Cloud Pak for Data platform in project {{ current_cp4d_cluster.project }} include_role: name: cp4d-subscriptions vars: _p_current_cp4d_cluster: "{{ current_cp4d_cluster }}" + _p_subscription_scope: platform - name: Install or uninstall scheduling service include_role: @@ -99,6 +101,20 @@ include_tasks: cp4d-apply-license.yml when: current_cp4d_cluster.cp4d_entitlement | default('') != '' +- name: Create catalog sources for Cloud Pak for Data cartridges in project {{ current_cp4d_cluster.project }} + include_role: + name: cp4d-catalog-source + vars: + _p_current_cp4d_cluster: "{{ current_cp4d_cluster }}" + _p_catsrc_scope: cartridges + +- name: Create subscriptions for Cloud Pak for Data cartridges in project {{ current_cp4d_cluster.project }} + include_role: + name: cp4d-subscriptions + vars: + _p_current_cp4d_cluster: "{{ current_cp4d_cluster }}" + _p_subscription_scope: cartridges + # Install selected cartridges - include_tasks: cp4d-cartridges.yml when: not (cpd_test_cartridges | default(False) | bool) diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/tasks/cp4d-create-subscriptions-olm-utils.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/tasks/cp4d-create-subscriptions-olm-utils.yml index 81434e73c..e82eaf96a 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/tasks/cp4d-create-subscriptions-olm-utils.yml +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/tasks/cp4d-create-subscriptions-olm-utils.yml @@ -8,50 +8,58 @@ map(attribute='olm_utils_name') | join(',')}} -- name: Generate preview script to create cartridge subscriptions +- set_fact: + _subscription_cartridges_to_install_list: "{{ _apply_olm_cartridges_list }}" + +- set_fact: + _subscription_cartridges_to_install_list: "cpfs,cpd_platform" + when: _p_subscription_scope == 'platform' + +- name: Generate preview script to create cartridge subscriptions for Cloud Pak for Data {{ _p_subscription_scope }} set_fact: _apply_olm_command_script: "{{ lookup('template', 'apply-olm-cartridge-sub.j2') }}" vars: _p_preview_script: True -- name: Show apply-olm command to create cartridge subscriptions +- name: Show apply-olm command to create cartridge subscriptions for Cloud Pak for Data {{ _p_subscription_scope }} debug: var: _apply_olm_command_script -- name: Generate preview script to install cartridge subscriptions, logs are in {{ status_dir }}/log/{{ _p_current_cp4d_cluster.project }}-apply-olm-cartridge-sub.log +- name: Generate preview script to install cartridge subscriptions, logs are in {{ status_dir }}/log/{{ _p_current_cp4d_cluster.project }}-apply-subscriptions-{{ _p_subscription_scope }}.log shell: | - {{ _apply_olm_command_script }} > {{ status_dir }}/log/{{ _p_current_cp4d_cluster.project }}-apply-olm-cartridge-sub.log 2>&1 + {{ _apply_olm_command_script }} > {{ status_dir }}/log/{{ _p_current_cp4d_cluster.project }}-apply-subscriptions-{{ _p_subscription_scope }}.log 2>&1 -- name: Copy script to {{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-apply-olm-cartridge-sub.sh +- name: Copy script to {{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-apply-subscriptions-{{ _p_subscription_scope }}.sh copy: src: "/tmp/work/preview.sh" - dest: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-apply-olm-cartridge-sub.sh" + dest: "{{ status_dir }}/cp4d/{{ _p_current_cp4d_cluster.project }}-apply-subscriptions-{{ _p_subscription_scope }}.sh" remote_src: True mode: u+rwx -- name: Generate OLM utils command to create cartridge subscriptions +- name: Generate OLM utils command to create cartridge subscriptions for Cloud Pak for Data {{ _p_subscription_scope }} set_fact: _apply_olm_command: "{{ lookup('template', 'apply-olm-cartridge-sub.j2') }}" vars: _p_preview_script: False -- name: Show apply-olm command to create cartridge subscriptions +- name: Show apply-olm command to create cartridge subscriptions for Cloud Pak for Data {{ _p_subscription_scope }} debug: var: _apply_olm_command -- name: Starting background task to patch OLM artifacts in project {{ foundational_services_project }}. Logs are in {{ status_dir }}/log/{{ foundational_services_project }}-patch-olm.log +- name: Starting background task to patch OLM artifacts in operators project {{ _p_current_cp4d_cluster.operators_project }}. Logs are in {{ status_dir }}/log/{{ _p_current_cp4d_cluster.operators_project }}-patch-olm.log shell: | {{ role_path }}/files/temp-patch-olm.sh \ {{ status_dir }} \ - {{ foundational_services_project }} \ + {{ _p_current_cp4d_cluster.operators_project }} \ {{ _p_current_cp4d_cluster.project }} async: 86400 poll: 0 register: _patch_olm + when: _p_subscription_scope == 'platform' -- name: Run apply-olm command to install cartridge subscriptions, logs are in {{ status_dir }}/log/{{ _p_current_cp4d_cluster.project }}-apply-olm-cartridge-sub.log +- name: Run apply-olm command to install cartridge subscriptions, logs are in {{ status_dir }}/log/{{ _p_current_cp4d_cluster.project }}-apply-subscriptions-{{ _p_subscription_scope }}.log shell: | - {{ _apply_olm_command }} > {{ status_dir }}/log/{{ _p_current_cp4d_cluster.project }}-apply-olm-cartridge-sub.log 2>&1 + {{ _apply_olm_command }} > {{ status_dir }}/log/{{ _p_current_cp4d_cluster.project }}-apply-subscriptions-{{ _p_subscription_scope }}.log 2>&1 failed_when: False retries: 2 delay: 10 @@ -60,9 +68,9 @@ - name: Show subscriptions state if apply-olm failed block: - - name: Retrieving state of all operators in project {{ foundational_services_project }} + - name: Retrieving state of all operators in project {{ _p_current_cp4d_cluster.operators_project }} shell: | - oc get subscriptions.operators.coreos.com -n {{ foundational_services_project }} \ + oc get subscriptions.operators.coreos.com -n {{ _p_current_cp4d_cluster.operators_project }} \ --no-headers \ --sort-by=.metadata.creationTimestamp \ -o jsonpath='{range .items[*]}{.metadata.name}{","}{.metadata.creationTimestamp}{","}{.status.installedCSV}{","}{.status.state}{"\n"}{end}' diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/tasks/main.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/tasks/main.yml index dda6bdd86..6dbab08d2 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/tasks/main.yml +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/tasks/main.yml @@ -1,5 +1,2 @@ --- -- include_role: - name: cp4d-variables - - include_tasks: cp4d-create-subscriptions-olm-utils.yml \ No newline at end of file diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/templates/apply-olm-cartridge-sub.j2 b/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/templates/apply-olm-cartridge-sub.j2 index f6179550f..f2e0ed0af 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/templates/apply-olm-cartridge-sub.j2 +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/templates/apply-olm-cartridge-sub.j2 @@ -5,4 +5,4 @@ --cpd_operator_ns={{ _p_current_cp4d_cluster.operators_project }} \ --upgrade={%- if _upgrade_cp4d -%}true{%- else -%}false{%- endif %} \ --preview={%- if _p_preview_script -%}true{%- else -%}false{%- endif %} \ - --components={{ _apply_olm_cartridges_list }} \ No newline at end of file + --components={{ _subscription_cartridges_to_install_list }} \ No newline at end of file From 6f8c78fc5a2a89b0ebecd6358da403283239fb7f Mon Sep 17 00:00:00 2001 From: Frank Ketelaars Date: Tue, 18 Jun 2024 18:52:21 +0000 Subject: [PATCH 14/14] #730 Clean up messages --- .../tasks/cp4d-create-subscriptions-olm-utils.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/tasks/cp4d-create-subscriptions-olm-utils.yml b/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/tasks/cp4d-create-subscriptions-olm-utils.yml index e82eaf96a..dec553fd8 100644 --- a/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/tasks/cp4d-create-subscriptions-olm-utils.yml +++ b/automation-roles/50-install-cloud-pak/cp4d/cp4d-subscriptions/tasks/cp4d-create-subscriptions-olm-utils.yml @@ -15,13 +15,13 @@ _subscription_cartridges_to_install_list: "cpfs,cpd_platform" when: _p_subscription_scope == 'platform' -- name: Generate preview script to create cartridge subscriptions for Cloud Pak for Data {{ _p_subscription_scope }} +- name: Generate preview script to create subscriptions for Cloud Pak for Data {{ _p_subscription_scope }} set_fact: _apply_olm_command_script: "{{ lookup('template', 'apply-olm-cartridge-sub.j2') }}" vars: _p_preview_script: True -- name: Show apply-olm command to create cartridge subscriptions for Cloud Pak for Data {{ _p_subscription_scope }} +- name: Show apply-olm command to create subscriptions for Cloud Pak for Data {{ _p_subscription_scope }} debug: var: _apply_olm_command_script @@ -36,13 +36,13 @@ remote_src: True mode: u+rwx -- name: Generate OLM utils command to create cartridge subscriptions for Cloud Pak for Data {{ _p_subscription_scope }} +- name: Generate OLM utils command to create subscriptions for Cloud Pak for Data {{ _p_subscription_scope }} set_fact: _apply_olm_command: "{{ lookup('template', 'apply-olm-cartridge-sub.j2') }}" vars: _p_preview_script: False -- name: Show apply-olm command to create cartridge subscriptions for Cloud Pak for Data {{ _p_subscription_scope }} +- name: Show apply-olm command to create subscriptions for Cloud Pak for Data {{ _p_subscription_scope }} debug: var: _apply_olm_command