|
| 1 | +#!/bin/bash |
| 2 | + |
| 3 | +TALISMAN_IMAGE=portworx/talisman |
| 4 | +TALISMAN_TAG=latest |
| 5 | +WIPE_CLUSTER="--wipecluster" |
| 6 | +MAX_RETRIES=60 |
| 7 | +TIME_BEFORE_RETRY=5 #seconds |
| 8 | +JOB_NAME=talisman |
| 9 | +KUBECTL_EXTRA_OPTS="" |
| 10 | +WIPER_IMAGE=portworx/px-node-wiper |
| 11 | +WIPER_TAG=latest |
| 12 | + |
| 13 | + |
| 14 | +logmessage() { |
| 15 | + echo "" 2>&1 |
| 16 | + echo "$@" 2>&1 |
| 17 | +} |
| 18 | + |
| 19 | +if ! which jq &>/dev/null |
| 20 | +then |
| 21 | + echo "Jq is not installed... exiting" |
| 22 | + exit 1 |
| 23 | +fi |
| 24 | + |
| 25 | +##Check ibmcloud instaled or not |
| 26 | +if ! which ibmcloud &>/dev/null |
| 27 | +then |
| 28 | + echo "IBM Cloud is not installed. Please install ibmcloud..... exiting" |
| 29 | + exit 1 |
| 30 | +fi |
| 31 | + |
| 32 | + |
| 33 | +ask() { |
| 34 | + # https://djm.me/ask |
| 35 | + local prompt default reply |
| 36 | + prompt="Y/n" |
| 37 | + default=N |
| 38 | + |
| 39 | + # Ask the question (not using "read -p" as it uses stderr not stdout)<Paste> |
| 40 | + echo -n "$1 [$prompt]:" |
| 41 | + |
| 42 | + # Read the answer (use /dev/tty in case stdin is redirected from somewhere else) |
| 43 | + read reply </dev/tty |
| 44 | + if [ $? -ne 0 ]; then |
| 45 | + logmessage "ERROR: Could not ask for user input - please run via interactive shell" |
| 46 | + fi |
| 47 | + |
| 48 | + # Default? (e.g user presses enter) |
| 49 | + if [ -z "$reply" ]; then |
| 50 | + reply=$default |
| 51 | + fi |
| 52 | + |
| 53 | + # Check if the reply is valid |
| 54 | + case "$reply" in |
| 55 | + Y*|y*) return 0 ;; |
| 56 | + N*|n*) return 1 ;; |
| 57 | + * ) echo "invalid reply: $reply"; return 1 ;; |
| 58 | + esac |
| 59 | +} |
| 60 | + CLUSTER_NAME=kubectl -n kube-system get cm cluster-info -o jsonpath='{.data.cluster-config\.json}' | jq -r '.name' |
| 61 | + |
| 62 | + if ! ask "The operation will delete Portworx components and metadata from the cluster. Do you want to continue?" N; then |
| 63 | + logmessage "Aborting Portworx wipe from the cluster..." |
| 64 | + exit 1 |
| 65 | + else |
| 66 | + if ! ask "Do you want to wipeout the data also from the volumes . Please enter?" N; then |
| 67 | + logmessage "The operation will delete Portworx components and metadata from the cluster.The data will not be wiped out fromm the voluems..." |
| 68 | + WIPE_CLUSTER="" |
| 69 | + else |
| 70 | + if ! ask "The operation will delete Portworx components and metadata from the cluster. The operation is irreversible and will lead to DATA LOSS. Do you want to continue?" N; then |
| 71 | + logmessage "The operation will delete Portworx components and metadata from the cluster.The data will not be wiped out fromm the voluems..." |
| 72 | + WIPE_CLUSTER="" |
| 73 | + else |
| 74 | + logmessage "The operation will delete Portworx components and metadata and the data on the volumes..." |
| 75 | + WIPE_CLUSTER="--wipecluster" |
| 76 | + fi |
| 77 | + fi |
| 78 | + fi |
| 79 | + |
| 80 | +########################## |
| 81 | +# Added IS_OPENSHIFT variable to check if cluster is an openshift cluster |
| 82 | +########################## |
| 83 | +if [[ $CLUSTER_VERSION == *"openshift"* ]]; then |
| 84 | + command -v oc |
| 85 | + if [ $? -eq 0 ]; then |
| 86 | + _out=$(command -v oc 2>&1 && oc version 2>&1) |
| 87 | + if [ $? -eq 0 ]; then |
| 88 | + echo "Detected OpenShift system. Adding talisman-account user to privileged scc" |
| 89 | + oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:talisman-account |
| 90 | + if [ $? -ne 0 ]; then |
| 91 | + logmessage "failed to add talisman-account to privileged scc. exit code: $?" |
| 92 | + fi |
| 93 | + else |
| 94 | + echo "oc binary found but oc version command failed. Not using OpenShift mode." |
| 95 | + fi |
| 96 | + fi |
| 97 | +fi |
| 98 | +########################## |
| 99 | + |
| 100 | +VER=$(kubectl version --short | awk -Fv '/Server Version: /{print $3}') |
| 101 | +if [ -z "$VER" ]; then |
| 102 | + logmessage "failed to get kubernetes server version. Make sure you have kubectl setup on current machine." |
| 103 | +fi |
| 104 | + |
| 105 | + |
| 106 | +VER=( ${VER//./ } ) |
| 107 | +echo "Parsed version is "${VER[0]}.${VER[1]}"" |
| 108 | + |
| 109 | +if [ "${VER[0]}.${VER[1]}" == "1.7" ] || [ "${VER[0]}.${VER[1]}" == "1.6" ]; then |
| 110 | + logmessage "This script doesn't support wiping Portworx from Kubernetes $VER clusters." |
| 111 | +fi |
| 112 | + |
| 113 | +CLIENT_VER=$(kubectl version --short | awk -Fv '/Client Version: /{print $2}') |
| 114 | +if [ -z "$CLIENT_VER" ]; then |
| 115 | + logmessage "failed to get kubernetes client version. Make sure you have kubectl setup on current machine." |
| 116 | +fi |
| 117 | + |
| 118 | +CLIENT_VERI=$(echo $CLIENT_VER | awk -F. '{print $1*100+$2}') |
| 119 | +echo "Parsed client version (int) is ${CLIENT_VERI}" |
| 120 | +if [ $CLIENT_VERI -lt 114 ]; then |
| 121 | + KUBECTL_EXTRA_OPTS="--show-all" |
| 122 | +else |
| 123 | + KUBECTL_EXTRA_OPTS="" |
| 124 | +fi |
| 125 | + |
| 126 | +kubectl delete -n kube-system job talisman 2>/dev/null || true |
| 127 | + |
| 128 | +RETRY_CNT=0 |
| 129 | +while true; do |
| 130 | + PODS=$(kubectl get pods -n kube-system -l name=$JOB_NAME $KUBECTL_EXTRA_OPTS 2>/dev/null) |
| 131 | + if [ $? -eq 0 ]; then |
| 132 | + NUM_PODS=$(echo -n "$PODS" | grep -c -v NAME) |
| 133 | + if [ $NUM_PODS -eq 0 ]; then |
| 134 | + break |
| 135 | + fi |
| 136 | + fi |
| 137 | + |
| 138 | + RETRY_CNT=$((RETRY_CNT+1)) |
| 139 | + if [ $RETRY_CNT -ge $MAX_RETRIES ]; then |
| 140 | + logmessage "failed to delete old talisman pods" |
| 141 | + fi |
| 142 | + |
| 143 | + sleep $TIME_BEFORE_RETRY |
| 144 | +done |
| 145 | + |
| 146 | + |
| 147 | +cat <<EOF | kubectl apply -f - |
| 148 | +--- |
| 149 | +apiVersion: v1 |
| 150 | +kind: ServiceAccount |
| 151 | +metadata: |
| 152 | + name: talisman-account |
| 153 | + namespace: kube-system |
| 154 | +--- |
| 155 | +kind: ClusterRoleBinding |
| 156 | +apiVersion: rbac.authorization.k8s.io/v1 |
| 157 | +metadata: |
| 158 | + name: talisman-role-binding |
| 159 | +subjects: |
| 160 | +- kind: ServiceAccount |
| 161 | + name: talisman-account |
| 162 | + namespace: kube-system |
| 163 | +roleRef: |
| 164 | + kind: ClusterRole |
| 165 | + name: cluster-admin |
| 166 | + apiGroup: rbac.authorization.k8s.io |
| 167 | +--- |
| 168 | +
|
| 169 | +apiVersion: batch/v1 |
| 170 | +kind: Job |
| 171 | +metadata: |
| 172 | + name: $JOB_NAME |
| 173 | + namespace: kube-system |
| 174 | +spec: |
| 175 | + backoffLimit: 1 |
| 176 | + template: |
| 177 | + metadata: |
| 178 | + labels: |
| 179 | + name: $JOB_NAME |
| 180 | + spec: |
| 181 | + serviceAccount: talisman-account |
| 182 | + containers: |
| 183 | + - name: $JOB_NAME |
| 184 | + image: $TALISMAN_IMAGE:$TALISMAN_TAG |
| 185 | + args: ["-operation", |
| 186 | + "delete", |
| 187 | + "$WIPE_CLUSTER", |
| 188 | + "-wiperimage", |
| 189 | + "$WIPER_IMAGE", |
| 190 | + "-wipertag", |
| 191 | + "$WIPER_TAG"] |
| 192 | + imagePullPolicy: Always |
| 193 | + volumeMounts: |
| 194 | + - name: etcpwx |
| 195 | + mountPath: /etc/pwx |
| 196 | + volumes: |
| 197 | + - name: etcpwx |
| 198 | + hostPath: |
| 199 | + path: /etc/pwx |
| 200 | + restartPolicy: Never |
| 201 | +EOF |
| 202 | + |
| 203 | +echo "Talisman job for wiping Portworx started. Monitor logs using: 'kubectl logs -n kube-system -l job-name=talisman'" |
| 204 | + |
| 205 | +NUM_DESIRED=1 |
| 206 | +RETRY_CNT=0 |
| 207 | +while true; do |
| 208 | + NUM_SUCCEEDED=0 |
| 209 | + NUM_FAILED=0 |
| 210 | + PODS=$(kubectl get pods -n kube-system -l name=$JOB_NAME 2>/dev/null) |
| 211 | + if [ $? -eq 0 ]; then |
| 212 | + CREATING=$(echo "$PODS" | grep ContainerCreating) |
| 213 | + if [ ! -z "$CREATING" ]; then |
| 214 | + echo "Pod that will perform wipe of Portworx is still in container creating phase" |
| 215 | + else |
| 216 | + NUM_FAILED=$(kubectl get job -n kube-system talisman $KUBECTL_EXTRA_OPTS -o jsonpath='{.status.failed}' 2>/dev/null) |
| 217 | + if [ $? -eq 0 ]; then |
| 218 | + if [ ! -z "$NUM_FAILED" ] && [ $NUM_FAILED -ge 1 ]; then |
| 219 | + kubectl logs -n kube-system -l name=$JOB_NAME |
| 220 | + logmessage "Job to wipe px cluster failed." |
| 221 | + fi |
| 222 | + fi |
| 223 | + |
| 224 | + NUM_SUCCEEDED=$(kubectl get job -n kube-system talisman $KUBECTL_EXTRA_OPTS -o jsonpath='{.status.succeeded}' 2>/dev/null) |
| 225 | + if [ ! -z "$NUM_SUCCEEDED" ] && [ $NUM_SUCCEEDED -eq $NUM_DESIRED ]; then |
| 226 | + break |
| 227 | + fi |
| 228 | + |
| 229 | + echo "waiting on $JOB_NAME to complete..." |
| 230 | + RUNNING_POD=$(echo "$PODS" | grep Running | awk '/^talisman/{print $1}') |
| 231 | + if [ ! -z "$RUNNING_POD" ]; then |
| 232 | + echo "Monitoring logs of pod: $RUNNING_POD" |
| 233 | + kubectl logs -n kube-system -f $RUNNING_POD |
| 234 | + fi |
| 235 | + fi |
| 236 | + fi |
| 237 | + |
| 238 | + RETRY_CNT=$((RETRY_CNT+1)) |
| 239 | + if [ $RETRY_CNT -ge $MAX_RETRIES ]; then |
| 240 | + kubectl logs -n kube-system -l name=$JOB_NAME |
| 241 | + logmessage "Timed out trying to wipe Portworx cluster." |
| 242 | + fi |
| 243 | + |
| 244 | + sleep $TIME_BEFORE_RETRY |
| 245 | +done |
| 246 | + |
| 247 | +echo "Portworx cluster wipe succesfully completed." |
| 248 | +_rc=0 |
| 249 | +kubectl delete job -n kube-system talisman || _rc=$? |
| 250 | +kubectl delete serviceaccount -n kube-system talisman-account || _rc=$? |
| 251 | +kubectl delete clusterrolebinding talisman-role-binding || _rc=$? |
| 252 | +kubectl delete crd volumeplacementstrategies.portworx.io |
| 253 | +if [ $_rc -ne 0 ]; then |
| 254 | + logmessage "error cleaning up pods" |
| 255 | +fi |
| 256 | +echo "removing the portworx helm from the cluster" |
| 257 | +_rc=0 |
| 258 | +helm_release=$(helm ls -A --output json | jq -r '.[]|select(.name=="portworx") | .name') |
| 259 | +[[ -z "$helm_release" ]] && { echo "Unable to find helm release for portworx. Ensure your helm client is at version 3 and has access to the cluster."; exit; } |
| 260 | +helm uninstall portworx || _rc=$? |
| 261 | +if [ $_rc -ne 0 ]; then |
| 262 | + logmessage "error removing the helm relese" |
| 263 | + exit 1; |
| 264 | +fi |
| 265 | +echo "Removing the Service from the catalog" |
| 266 | +Bx_PX_svc_name=$(ibmcloud resource service-instances --service-name portworx --output json | jq -r --arg CLUSTERNAME $CLUSTER_NAME '.[]|select((.parameters.clusters==$CLUSTERNAME)) | .name') |
| 267 | +ibmcloud resource service-instance-delete "${Bx_PX_svc_name}" -f |
0 commit comments