Skip to content

Commit

Permalink
Merge pull request #431 from IBM/fk-misc
Browse files Browse the repository at this point in the history
Miscellaneous changes
  • Loading branch information
fketelaars authored May 21, 2023
2 parents 9dbeb46 + 36a421c commit 3ee9217
Show file tree
Hide file tree
Showing 20 changed files with 422 additions and 159 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
- include_role:
name: destroy-aws
loop: "{{ all_config.openshift | default([]) }}"
loop_control:
loop_var: _current_openshift_cluster
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
- include_role:
name: destroy-aro
loop: "{{ all_config.openshift | default([]) }}"
loop_control:
loop_var: current_openshift_cluster
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
---
- include_role:
name: destroy-terraform
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
---
- include_tasks: destroy-infra-ibm-cloud.yml
when: cloud_platform == 'ibm-cloud'

- include_tasks: destroy-infra-aws.yml
when: cloud_platform == 'aws'

- include_tasks: destroy-infra-azure.yml
when: cloud_platform == 'azure'
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
_p_openshift_cluster_name: "{{ current_openshift_cluster.name }}"

- name: Check if OcsCluster custom resource exists
command: |
shell: |
oc get OcsCluster
failed_when: False
register: _ocs_cluster_exists
Expand All @@ -32,24 +32,50 @@
debug:
var: _ocs_cluster_exists

- name: Delete namespaces which use OCS storage classes
shell: |
for ns in $(oc get pvc -A \
-o jsonpath='{range .items[*]}{@.metadata.namespace}{" "}{@.spec.storageClassName}{"\n"}' | \
grep -v -E '^openshift' | \
grep 'ocs-storagecluster' | \
awk '{print $1}' | uniq);do
oc delete namespace $ns --grace-period=5 --timeout=10s
done
failed_when: False
register: _delete_ns_result
- block:
- name: Delete OcsCluster custom resource
shell: |
oc delete OcsCluster --all --wait=false --ignore-not-found
oc delete ns openshift-storage --wait=false --ignore-not-found
- name: Show output of namespace deletion
debug:
var: _delete_ns_result
- name: Force deletion of OcsCluster
shell: |
oc -n openshift-storage patch OcsCluster ocs -p '{"metadata":{"finalizers":[]}}' --type=merge
failed_when: false

- name: Wait for OcsCluster to be deleted
pause:
seconds: 20

- name: Delete CRs in openshift-storage project
shell: |
oc -n openshift-storage patch persistentvolumeclaim/db-noobaa-db-0 -p '{"metadata":{"finalizers":[]}}' --type=merge
oc -n openshift-storage patch cephblockpool.ceph.rook.io/ocs-storagecluster-cephblockpool -p '{"metadata":{"finalizers":[]}}' --type=merge
oc -n openshift-storage patch cephcluster.ceph.rook.io/ocs-storagecluster-cephcluster -p '{"metadata":{"finalizers":[]}}' --type=merge
oc -n openshift-storage patch cephfilesystem.ceph.rook.io/ocs-storagecluster-cephfilesystem -p '{"metadata":{"finalizers":[]}}' --type=merge
oc -n openshift-storage patch cephobjectstore.ceph.rook.io/ocs-storagecluster-cephobjectstore -p '{"metadata":{"finalizers":[]}}' --type=merge
oc -n openshift-storage patch cephobjectstoreuser.ceph.rook.io/noobaa-ceph-objectstore-user -p '{"metadata":{"finalizers":[]}}' --type=merge
oc -n openshift-storage patch cephobjectstoreuser.ceph.rook.io/ocs-storagecluster-cephobjectstoreuser -p '{"metadata":{"finalizers":[]}}' --type=merge
oc -n openshift-storage patch noobaa/noobaa -p '{"metadata":{"finalizers":[]}}' --type=merge
oc -n openshift-storage patch backingstores.noobaa.io/noobaa-default-backing-store -p '{"metadata":{"finalizers":[]}}' --type=merge
oc -n openshift-storage patch bucketclasses.noobaa.io/noobaa-default-bucket-class -p '{"metadata":{"finalizers":[]}}' --type=merge
oc -n openshift-storage patch storagecluster.ocs.openshift.io/ocs-storagecluster -p '{"metadata":{"finalizers":[]}}' --type=merge
failed_when: false

- name: Wait for CRs to be deleted
pause:
seconds: 20

- name: Delete pods in openshift-storage project
shell: |
oc delete po -n openshift-storage --all --force --grace-period=0
- name: Wait for pods to be deleted
pause:
seconds: 20

- name: Delete all remaining PVs
shell:
oc delete pv --all --wait=false

- name: Delete all OcsCluster custom resources
command: |
oc delete OcsCluster --all --timeout=600s
when: _ocs_cluster_exists.rc == 0
when: _ibmcloud_oc_cluster_result_json.rc == 0
when: _ocs_cluster_exists.rc == 0
Original file line number Diff line number Diff line change
Expand Up @@ -4,37 +4,40 @@
var: _previous_cp4d_cluster

- set_fact:
_previous_openshift_cluster: "{{ _previous_all_config.openshift | json_query(query) | first | default({}) }}"
_previous_cp4d_openshift_cluster: "{{ all_config.openshift | json_query(query) | first | default({}) }}"
vars:
query: >-
[?name=='{{ _previous_cp4d_cluster.openshift_cluster_name }}']
- name: Found OpenShift in previous config
- name: Found OpenShift in current config
debug:
var: _previous_openshift_cluster
var: _previous_cp4d_openshift_cluster

- name: Login to the OpenShift cluster "{{ _previous_openshift_cluster.name }}"
include_role:
name: openshift-login
vars:
_p_openshift_cluster_name: "{{ _previous_openshift_cluster.name }}"
# Remove Cloud Pak for Data only if the OpenShift cluster is still referenced
- block:
- name: Login to OpenShift cluster {{ _previous_cp4d_openshift_cluster.name }}
include_role:
name: openshift-login
vars:
_p_openshift_cluster_name: "{{ _previous_cp4d_openshift_cluster.name }}"

- name: Delete all cartridge custom resources from Cloud Pak for Data project {{ _previous_cp4d_cluster.project }}
script: |
cp4d-delete-cartridges.sh \
{{ _previous_cp4d_cluster.project }}
register: _cp4d_delete_cartridges
- name: Delete all cartridge custom resources from Cloud Pak for Data project {{ _previous_cp4d_cluster.project }}
script: |
cp4d-delete-cartridges.sh \
{{ _previous_cp4d_cluster.project }}
register: _cp4d_delete_cartridges

- name: Result of deleting cartridges
debug:
var: _cp4d_delete_cartridges
- name: Result of deleting cartridges
debug:
var: _cp4d_delete_cartridges

- name: Delete Cloud Pak for Data control plane and project {{ _previous_cp4d_cluster.project }}
script: |
cp4d-delete-control-plane.sh \
{{ _previous_cp4d_cluster.project }}
register: _cp4d_delete_control_plane
- name: Delete Cloud Pak for Data control plane and project {{ _previous_cp4d_cluster.project }}
script: |
cp4d-delete-control-plane.sh \
{{ _previous_cp4d_cluster.project }}
register: _cp4d_delete_control_plane

- name: Result of deleting control plane
debug:
var: _cp4d_delete_control_plane
- name: Result of deleting control plane
debug:
var: _cp4d_delete_control_plane
when: _previous_cp4d_openshift_cluster != {}
Original file line number Diff line number Diff line change
Expand Up @@ -35,18 +35,11 @@
- name: Unpack cloudctl from {{ status_dir }}/downloads/cloudctl-linux-amd64.tar.gz
unarchive:
src: "{{ status_dir }}/downloads/cloudctl-linux-amd64.tar.gz"
dest: "{{ status_dir }}/downloads"
dest: "/usr/local/bin"

- name: Rename to cloudctl
shell: |
mv -f {{ status_dir }}/downloads/cloudctl-linux-amd64 {{ status_dir }}/downloads/cloudctl
- name: Make sure cloudctl can be run within path
copy:
src: "{{ status_dir }}/downloads/cloudctl"
dest: /usr/local/bin/cloudctl
mode: preserve
remote_src: True
mv -f /usr/local/bin/cloudctl-linux-amd64 /usr/local/bin/cloudctl
- name: Get cloudctl version
shell: |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@
- name: Validate if the OpenShift cluster {{ _p_openshift_cluster_name }} exists
command: "ibmcloud oc cluster get -c {{ _p_openshift_cluster_name }} --output json"
register: ibmcloud_oc_cluster_result_json
retries: 20
delay: 10
until: ibmcloud_oc_cluster_result_json.rc==0

- set_fact:
ibmcloud_oc_cluster: "{{ ibmcloud_oc_cluster_result_json.stdout | from_json }}"
Expand Down
Loading

0 comments on commit 3ee9217

Please sign in to comment.