diff --git a/backup.jps b/backup.jps index 1032e3f..8c2b9c0 100644 --- a/backup.jps +++ b/backup.jps @@ -5,8 +5,7 @@ id: db-backup targetEditions: any logo: /images/backup-logo.png description: Backup Add-On for the database. It can be used to create scheduled backups according to any required timezone and restore corrupted databases, even if the content has been completely deleted. - -baseUrl: https://raw.githubusercontent.com/jelastic-jps/database-backup-addon/master +baseUrl: https://raw.githubusercontent.com/sych74/database-backup-addon/pitr targetNodes: nodeType: @@ -145,6 +144,25 @@ settings: tooltip: "Always unmount backup storage when backup/restore is finished." value: false hidden: false + - type: toggle + name: isPitr + caption: PITR + tooltip: "Enable Point-In-Time Recovery." + value: false + hidden: false + showIf: + true: + - type: displayfield + cls: warning + height: 30, + hideLabel: true, + markup: "Database is not configured for PITR. Click Apply to configure automatically, or close and configure manually following documentation." + - type: displayfield + name: displayPitr + markup: "" + hidden: true + hideLabel: true + cls: warning - type: displayfield name: displayfield markup: Please specify the database user that has enough privileges to access and modify all the databases stored on server. Username and password are required for all the DB servers except Redis. @@ -168,6 +186,10 @@ settings: restore: fields: [] onBeforeInit: scripts/restoreOnBeforeInit.js + + pitr: + fields: [] + onBeforeInit: scripts/pitrOnBeforeInit.js onBeforeInit: scripts/backupOnBeforeInit.js @@ -283,6 +305,7 @@ actions: backupExecNode: ${targetNodes.master.id} storageEnv: ${response.storageEnvShortName} isAlwaysUmount: ${this.isAlwaysUmount} + isPitr: ${this.isPitr} nodeGroup: ${this.nodeGroup} dbuser: ${this.dbuser} dbpass: ${this.dbpass} @@ -311,9 +334,20 @@ actions: restore: - cmd[${targetNodes.nodeGroup}]: |- + rm -f /root/.backupedenv /root/.backuptime /root/.backupid || exit 0; echo "${settings.backupedEnvName}" > /root/.backupedenv - echo "${settings.backupDir}" > /root/.backupid user: root + - if ("${settings.isPitr}" == "true"): + - script: | + var dateTimeInput = '${settings.restoreTime}'; + var [date, time] = dateTimeInput.split('T'); + var formattedDateTime = date + " " + time.slice(0, 5) + ":00"; + return {result: 0, formattedDateTime: formattedDateTime}; + - cmd[${targetNodes.nodeGroup}]: echo "${response.formattedDateTime}" > /root/.backuptime; + user: root + - else: + - cmd[${targetNodes.nodeGroup}]: echo "${settings.backupDir}" > /root/.backupid; + user: root - callScript: restore - deleteDBdump @@ -367,6 +401,7 @@ actions: - setGlobals: storageEnv: ${settings.storageName} isAlwaysUmount: ${settings.isAlwaysUmount} + isPitr: ${settings.isPitr} - if ("${settings.scheduleType}" == 2): - convert - else: @@ -377,14 +412,21 @@ actions: - else: - removePermanentMount - addPermanentMount + - if ("${settings.isPitr}" == "true"): setupPitr - installScript: cronTime: ${globals.cron} backupCount: ${settings.backupCount} isAlwaysUmount: ${globals.isAlwaysUmount} + isPitr: ${globals.isPitr} nodeGroup: ${targetNodes.nodeGroup} dbuser: ${settings.dbuser} dbpass: ${settings.dbpass} - + + setupPitr: + cmd[${nodes.sqldb.master.id}]: |- + wget --tries=10 -O /tmp/pitr.sh ${baseUrl}/scripts/pitr.sh && \ + chmod +x /tmp/pitr.sh && /tmp/pitr.sh setupPitr ${settings.dbuser} ${settings.dbpass}; + addPermanentMount: - getStorageCtid - script: | diff --git a/scripts/backup-logic.sh b/scripts/backup-logic.sh index 8a3638a..b6fa286 100644 --- a/scripts/backup-logic.sh +++ b/scripts/backup-logic.sh @@ -1,20 +1,58 @@ #!/bin/bash -BASE_URL=$2 -BACKUP_TYPE=$3 -NODE_ID=$4 -BACKUP_LOG_FILE=$5 -ENV_NAME=$6 -BACKUP_COUNT=$7 -DBUSER=$8 -DBPASSWD=$9 -USER_SESSION=${10} -USER_EMAIL=${11} - -BACKUP_ADDON_REPO=$(echo ${BASE_URL}|sed 's|https:\/\/raw.githubusercontent.com\/||'|awk -F / '{print $1"/"$2}') -BACKUP_ADDON_BRANCH=$(echo ${BASE_URL}|sed 's|https:\/\/raw.githubusercontent.com\/||'|awk -F / '{print $3}') +BASE_URL=$1 +BACKUP_TYPE=$2 +NODE_ID=$3 +BACKUP_LOG_FILE=$4 +ENV_NAME=$5 +BACKUP_COUNT=$6 +DBUSER=$7 +DBPASSWD=$8 +USER_SESSION=$9 +USER_EMAIL=${10} +PITR=${11} + +# Define PID file location after ENV_NAME is available +readonly LOCK_FILE="/var/run/${ENV_NAME}_backup.pid" + +# Add cleanup trap before any potential exit points +trap 'rm -f "${LOCK_FILE}"' EXIT + +# Check if another backup process is running +if [ -f "${LOCK_FILE}" ]; then + pid=$(cat "${LOCK_FILE}") + if kill -0 "${pid}" 2>/dev/null; then + echo "Another backup process (PID: ${pid}) is already running" | tee -a "${BACKUP_LOG_FILE}" + exit 1 + else + echo "Removing stale lock file" | tee -a "${BACKUP_LOG_FILE}" + rm -f "${LOCK_FILE}" + fi +fi + +# Create PID file +echo $$ > "${LOCK_FILE}" + +# Extract repository and branch information +BACKUP_ADDON_REPO=$(echo ${BASE_URL} | sed 's|https:\/\/raw.githubusercontent.com\/||' | awk -F / '{print $1"/"$2}') +BACKUP_ADDON_BRANCH=$(echo ${BASE_URL} | sed 's|https:\/\/raw.githubusercontent.com\/||' | awk -F / '{print $3}') BACKUP_ADDON_COMMIT_ID=$(git ls-remote https://github.com/${BACKUP_ADDON_REPO}.git | grep "/${BACKUP_ADDON_BRANCH}$" | awk '{print $1}') +# Define backup directories +DUMP_BACKUP_DIR=/root/backup/dump +BINLOGS_BACKUP_DIR=/root/backup/binlogs +SQL_DUMP_NAME=db_backup.sql + +# Prepare backup directories +rm -rf $DUMP_BACKUP_DIR && mkdir -p $DUMP_BACKUP_DIR +rm -rf $BINLOGS_BACKUP_DIR && mkdir -p $BINLOGS_BACKUP_DIR + +# Default PITR to false if not set +if [ -z "$PITR" ]; then + PITR="false" +fi + +# Determine MongoDB type if [ "$COMPUTE_TYPE" == "mongodb" ]; then if grep -q '^replication' /etc/mongod.conf; then MONGO_TYPE="-replica-set" @@ -23,8 +61,10 @@ if [ "$COMPUTE_TYPE" == "mongodb" ]; then fi fi -source /etc/jelastic/metainf.conf; +# Source external configuration +source /etc/jelastic/metainf.conf +# Determine Redis type if [ "$COMPUTE_TYPE" == "redis" ]; then REDIS_CONF_PATH=$(realpath /etc/redis.conf) if grep -q '^cluster-enabled yes' ${REDIS_CONF_PATH}; then @@ -34,37 +74,64 @@ if [ "$COMPUTE_TYPE" == "redis" ]; then fi fi -function forceInstallUpdateRestic(){ - wget --tries=10 -O /tmp/installUpdateRestic ${BASE_URL}/scripts/installUpdateRestic && \ - mv -f /tmp/installUpdateRestic /usr/sbin/installUpdateRestic && \ - chmod +x /usr/sbin/installUpdateRestic && /usr/sbin/installUpdateRestic +# Determine server IP address +SERVER_IP_ADDR=$(ip a | grep -A1 venet0 | grep inet | awk '{print $2}' | sed 's/\/[0-9]*//g' | tail -n 1) +[ -n "${SERVER_IP_ADDR}" ] || SERVER_IP_ADDR="localhost" + +# Determine MySQL/MariaDB client applications +if which mariadb 2>/dev/null; then + CLIENT_APP="mariadb" +else + CLIENT_APP="mysql" +fi + +if which mariadb-dump 2>/dev/null; then + DUMP_APP="mariadb-dump" +else + DUMP_APP="mysqldump" +fi + +# Forces installation or update of Restic backup tool +# Downloads and installs the latest version of Restic +# Ensures proper permissions and execution rights +function forceInstallUpdateRestic() { + wget --tries=10 -O /tmp/installUpdateRestic ${BASE_URL}/scripts/installUpdateRestic && \ + mv -f /tmp/installUpdateRestic /usr/sbin/installUpdateRestic && \ + chmod +x /usr/sbin/installUpdateRestic && /usr/sbin/installUpdateRestic } -function sendEmailNotification(){ +# Sends email notifications for backup events +# Handles platform version checking and email delivery +# @param: none - uses global environment variables +# Sends notifications for stale locks and backup issues +function sendEmailNotification() { if [ -e "/usr/lib/jelastic/modules/api.module" ]; then - [ -e "/var/run/jem.pid" ] && return 0; - CURRENT_PLATFORM_MAJOR_VERSION=$(jem api apicall -s --connect-timeout 3 --max-time 15 [API_DOMAIN]/1.0/statistic/system/rest/getversion 2>/dev/null |jq .version|grep -o [0-9.]*|awk -F . '{print $1}') + [ -e "/var/run/jem.pid" ] && return 0 + CURRENT_PLATFORM_MAJOR_VERSION=$(jem api apicall -s --connect-timeout 3 --max-time 15 [API_DOMAIN]/1.0/statistic/system/rest/getversion 2>/dev/null | jq .version | grep -o [0-9.]* | awk -F . '{print $1}') if [ "${CURRENT_PLATFORM_MAJOR_VERSION}" -ge "7" ]; then - echo $(date) ${ENV_NAME} "Sending e-mail notification about removing the stale lock" | tee -a $BACKUP_LOG_FILE; + echo $(date) ${ENV_NAME} "Sending e-mail notification about removing the stale lock" | tee -a $BACKUP_LOG_FILE SUBJECT="Stale lock is removed on /opt/backup/${ENV_NAME} backup repo" BODY="Please pay attention to /opt/backup/${ENV_NAME} backup repo because the stale lock left from previous operation is removed during the integrity check and backup rotation. Manual check of backup repo integrity and consistency is highly desired." jem api apicall -s --connect-timeout 3 --max-time 15 [API_DOMAIN]/1.0/message/email/rest/send --data-urlencode "session=$USER_SESSION" --data-urlencode "to=$USER_EMAIL" --data-urlencode "subject=$SUBJECT" --data-urlencode "body=$BODY" if [[ $? != 0 ]]; then - echo $(date) ${ENV_NAME} "Sending of e-mail notification failed" | tee -a $BACKUP_LOG_FILE; + echo $(date) ${ENV_NAME} "Sending of e-mail notification failed" | tee -a $BACKUP_LOG_FILE else - echo $(date) ${ENV_NAME} "E-mail notification is sent successfully" | tee -a $BACKUP_LOG_FILE; + echo $(date) ${ENV_NAME} "E-mail notification is sent successfully" | tee -a $BACKUP_LOG_FILE fi - elif [ -z "${CURRENT_PLATFORM_MAJOR_VERSION}" ]; then #this elif covers the case if the version is not received - echo $(date) ${ENV_NAME} "Error when checking the platform version" | tee -a $BACKUP_LOG_FILE; + elif [ -z "${CURRENT_PLATFORM_MAJOR_VERSION}" ]; then + echo $(date) ${ENV_NAME} "Error when checking the platform version" | tee -a $BACKUP_LOG_FILE else - echo $(date) ${ENV_NAME} "Email notification is not sent because this functionality is unavailable for current platform version." | tee -a $BACKUP_LOG_FILE; + echo $(date) ${ENV_NAME} "Email notification is not sent because this functionality is unavailable for current platform version." | tee -a $BACKUP_LOG_FILE fi else - echo $(date) ${ENV_NAME} "Email notification is not sent because this functionality is unavailable for current platform version." | tee -a $BACKUP_LOG_FILE; + echo $(date) ${ENV_NAME} "Email notification is not sent because this functionality is unavailable for current platform version." | tee -a $BACKUP_LOG_FILE fi } -function update_restic(){ +# Updates Restic to latest version or installs if missing +# Attempts self-update first, falls back to force install +# @return: none, exits on critical failure +function update_restic() { if which restic; then restic self-update || forceInstallUpdateRestic else @@ -72,15 +139,19 @@ function update_restic(){ fi } -function check_backup_repo(){ +# Checks and prepares backup repository +# Initializes new repo if needed +# Handles stale locks and integrity verification +# @return: exits with code 1 on failure +function check_backup_repo() { [ -d /opt/backup/${ENV_NAME} ] || mkdir -p /opt/backup/${ENV_NAME} - export FILES_COUNT=$(ls -n /opt/backup/${ENV_NAME}|awk '{print $2}'); - if [ "${FILES_COUNT}" != "0" ]; then - echo $(date) ${ENV_NAME} "Checking the backup repository integrity and consistency" | tee -a $BACKUP_LOG_FILE; - if [[ $(ls -A /opt/backup/${ENV_NAME}/locks) ]] ; then - echo $(date) ${ENV_NAME} "Backup repository has a slate lock, removing" | tee -a $BACKUP_LOG_FILE; + export FILES_COUNT=$(ls -n /opt/backup/${ENV_NAME} | awk '{print $2}') + if [ "${FILES_COUNT}" != "0" ]; then + echo $(date) ${ENV_NAME} "Checking the backup repository integrity and consistency" | tee -a $BACKUP_LOG_FILE + if [[ $(ls -A /opt/backup/${ENV_NAME}/locks) ]]; then + echo $(date) ${ENV_NAME} "Backup repository has a stale lock, removing" | tee -a $BACKUP_LOG_FILE GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic -r /opt/backup/${ENV_NAME} unlock - sendEmailNotification + sendEmailNotification fi GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic -q -r /opt/backup/${ENV_NAME} check --read-data-subset=5% || { echo "Backup repository integrity check failed."; exit 1; } else @@ -88,109 +159,291 @@ function check_backup_repo(){ fi } -function rotate_snapshots(){ +# Manages backup rotation according to retention policy +# Removes old backups keeping specified count +# Handles stale locks during rotation +# @return: exits with code 1 on failure +function rotate_snapshots() { echo $(date) ${ENV_NAME} "Rotating snapshots by keeping the last ${BACKUP_COUNT}" | tee -a ${BACKUP_LOG_FILE} - if [[ $(ls -A /opt/backup/${ENV_NAME}/locks) ]] ; then - echo $(date) ${ENV_NAME} "Backup repository has a slate lock, removing" | tee -a $BACKUP_LOG_FILE; + if [[ $(ls -A /opt/backup/${ENV_NAME}/locks) ]]; then + echo $(date) ${ENV_NAME} "Backup repository has a stale lock, removing" | tee -a $BACKUP_LOG_FILE GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic -r /opt/backup/${ENV_NAME} unlock - sendEmailNotification + sendEmailNotification fi { GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic forget -q -r /opt/backup/${ENV_NAME} --keep-last ${BACKUP_COUNT} --prune | tee -a $BACKUP_LOG_FILE; } || { echo "Backup rotation failed."; exit 1; } } -function create_snapshot(){ - source /etc/jelastic/metainf.conf - echo $(date) ${ENV_NAME} "Saving the DB dump to ${DUMP_NAME} snapshot" | tee -a ${BACKUP_LOG_FILE} +# Retrieves the current binlog file name from MySQL +# @return: binlog file name +function get_binlog_file() { + local binlog_file=$(${CLIENT_APP} -h ${SERVER_IP_ADDR} -u ${DBUSER} -p${DBPASSWD} mysql --execute="SHOW MASTER STATUS" | awk 'NR==2 {print $1}') + echo $(date) ${ENV_NAME} "Getting the binlog_file: ${binlog_file}" >> ${BACKUP_LOG_FILE} + echo $binlog_file +} + +# Retrieves the current binlog position from MySQL +# @return: binlog position +function get_binlog_position() { + local binlog_pos=$(${CLIENT_APP} -h ${SERVER_IP_ADDR} -u ${DBUSER} -p${DBPASSWD} mysql --execute="SHOW MASTER STATUS" | awk 'NR==2 {print $2}') + echo $(date) ${ENV_NAME} "Getting the binlog_position: ${binlog_pos}" >> ${BACKUP_LOG_FILE} + echo $binlog_pos +} + +# Creates a snapshot of the current database state +# Handles different database types and PITR +# @return: none, exits on critical failure +function create_snapshot() { + source /etc/jelastic/metainf.conf DUMP_NAME=$(date "+%F_%H%M%S_%Z"-${BACKUP_TYPE}\($COMPUTE_TYPE-$COMPUTE_TYPE_FULL_VERSION$REDIS_TYPE$MONGO_TYPE\)) + echo $(date) ${ENV_NAME} "Saving the DB dump to ${DUMP_NAME} snapshot" | tee -a ${BACKUP_LOG_FILE} if [ "$COMPUTE_TYPE" == "redis" ]; then - RDB_TO_BACKUP=$(ls -d /tmp/* |grep redis-dump.*); - GOGC=20 RESTIC_COMPRESSION=off RESTIC_PACK_SIZE=8 RESTIC_PASSWORD=${ENV_NAME} restic backup -q -r /opt/backup/${ENV_NAME} --tag "${DUMP_NAME} ${BACKUP_ADDON_COMMIT_ID} ${BACKUP_TYPE}" ${RDB_TO_BACKUP} | tee -a ${BACKUP_LOG_FILE}; + RDB_TO_BACKUP=$(ls -d /tmp/* | grep redis-dump.*) + GOGC=20 RESTIC_COMPRESSION=off RESTIC_PACK_SIZE=8 RESTIC_PASSWORD=${ENV_NAME} restic backup -q -r /opt/backup/${ENV_NAME} --tag "${DUMP_NAME} ${BACKUP_ADDON_COMMIT_ID} ${BACKUP_TYPE}" ${RDB_TO_BACKUP} | tee -a ${BACKUP_LOG_FILE} elif [ "$COMPUTE_TYPE" == "mongodb" ]; then - echo $(date) ${ENV_NAME} "Saving the DB dump to ${DUMP_NAME} snapshot" | tee -a ${BACKUP_LOG_FILE} - GOGC=20 RESTIC_COMPRESSION=off RESTIC_PACK_SIZE=8 RESTIC_PASSWORD=${ENV_NAME} restic backup -q -r /opt/backup/${ENV_NAME} --tag "${DUMP_NAME} ${BACKUP_ADDON_COMMIT_ID} ${BACKUP_TYPE}" ~/dump | tee -a ${BACKUP_LOG_FILE} + GOGC=20 RESTIC_COMPRESSION=off RESTIC_PACK_SIZE=8 RESTIC_PASSWORD=${ENV_NAME} restic backup -q -r /opt/backup/${ENV_NAME} --tag "${DUMP_NAME} ${BACKUP_ADDON_COMMIT_ID} ${BACKUP_TYPE}" ${DUMP_BACKUP_DIR} | tee -a ${BACKUP_LOG_FILE} + elif [ "$COMPUTE_TYPE" == "postgresql" ] && [ "$PITR" == "true" ]; then + GOGC=20 RESTIC_COMPRESSION=off RESTIC_PACK_SIZE=8 RESTIC_PASSWORD=${ENV_NAME} restic backup -q -r /opt/backup/${ENV_NAME} \ + --tag "${DUMP_NAME} ${BACKUP_ADDON_COMMIT_ID} ${BACKUP_TYPE}" \ + --tag "PITR" \ + --tag "$(cat ${DUMP_BACKUP_DIR}/wal_location)" \ + ${DUMP_BACKUP_DIR} | tee -a ${BACKUP_LOG_FILE} else - if [ -f "/root/db_backup.sql.gz" ]; then - DB_BACKUP_NAME="db_backup.sql.gz" - else - DB_BACKUP_NAME="db_backup.sql" + if [ "$PITR" == "true" ]; then + GOGC=20 RESTIC_COMPRESSION=off RESTIC_PACK_SIZE=8 RESTIC_PASSWORD=${ENV_NAME} restic backup -q -r /opt/backup/${ENV_NAME} --tag "${DUMP_NAME} ${BACKUP_ADDON_COMMIT_ID} ${BACKUP_TYPE}" --tag "PITR" --tag "$(get_binlog_file)" --tag "$(get_binlog_position)" ${DUMP_BACKUP_DIR} | tee -a ${BACKUP_LOG_FILE} + else + GOGC=20 RESTIC_COMPRESSION=off RESTIC_PACK_SIZE=8 RESTIC_PASSWORD=${ENV_NAME} restic backup -q -r /opt/backup/${ENV_NAME} --tag "${DUMP_NAME} ${BACKUP_ADDON_COMMIT_ID} ${BACKUP_TYPE}" ${DUMP_BACKUP_DIR} | tee -a ${BACKUP_LOG_FILE} fi - GOGC=20 RESTIC_COMPRESSION=off RESTIC_PACK_SIZE=8 RESTIC_PASSWORD=${ENV_NAME} restic backup -q -r /opt/backup/${ENV_NAME} --tag "${DUMP_NAME} ${BACKUP_ADDON_COMMIT_ID} ${BACKUP_TYPE}" ~/${DB_BACKUP_NAME} | tee -a ${BACKUP_LOG_FILE} fi } -function backup(){ - echo $$ > /var/run/${ENV_NAME}_backup.pid - echo $(date) ${ENV_NAME} "Creating the ${BACKUP_TYPE} backup (using the backup addon with commit id ${BACKUP_ADDON_COMMIT_ID}) on storage node ${NODE_ID}" | tee -a ${BACKUP_LOG_FILE} - source /etc/jelastic/metainf.conf; - echo $(date) ${ENV_NAME} "Creating the DB dump" | tee -a ${BACKUP_LOG_FILE} - if [ "$COMPUTE_TYPE" == "redis" ]; then - RDB_TO_REMOVE=$(ls -d /tmp/* |grep redis-dump.*) - rm -f ${RDB_TO_REMOVE} - export REDISCLI_AUTH=$(cat ${REDIS_CONF_PATH} |grep '^requirepass'|awk '{print $2}'); - if [ "$REDIS_TYPE" == "-standalone" ]; then - redis-cli --rdb /tmp/redis-dump-standalone.rdb - else - export MASTERS_LIST=$(redis-cli cluster nodes|grep master|grep -v fail|awk '{print $2}'|awk -F : '{print $1}'); - for i in $MASTERS_LIST - do - redis-cli -h $i --rdb /tmp/redis-dump-cluster-$i.rdb || { echo "DB backup process failed."; exit 1; } - done - fi - elif [ "$COMPUTE_TYPE" == "postgres" ]; then - PGPASSWORD="${DBPASSWD}" psql -U ${DBUSER} -d postgres -c "SELECT current_user" || { echo "DB credentials specified in add-on settings are incorrect!"; exit 1; } - PGPASSWORD="${DBPASSWD}" pg_dumpall -U ${DBUSER} --clean --if-exist | gzip > db_backup.sql.gz || { echo "DB backup process failed."; exit 1; } - elif [ "$COMPUTE_TYPE" == "mongodb" ]; then - if grep -q ^[[:space:]]*replSetName /etc/mongod.conf; then - RS_NAME=$(grep ^[[:space:]]*replSetName /etc/mongod.conf|awk '{print $2}'); - RS_SUFFIX="/?replicaSet=${RS_NAME}&readPreference=nearest"; - else - RS_SUFFIX=""; - fi - TLS_MODE=$(yq eval '.net.tls.mode' /etc/mongod.conf) - if [ "$TLS_MODE" == "requireTLS" ]; then - SSL_TLS_OPTIONS="--ssl --sslPEMKeyFile=/var/lib/jelastic/keys/SSL-TLS/client/client.pem --sslCAFile=/var/lib/jelastic/keys/SSL-TLS/client/root.pem --tlsInsecure" - else - SSL_TLS_OPTIONS="" - fi - mongodump ${SSL_TLS_OPTIONS} --uri="mongodb://${DBUSER}:${DBPASSWD}@localhost${RS_SUFFIX}" +# Retrieves the latest PITR snapshot ID +# @return: latest PITR snapshot ID +function get_latest_pitr_snapshot_id() { + local latest_pitr_snapshot_id=$(GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic -r /opt/backup/${ENV_NAME} snapshots --tag "PITR" --latest 1 --json | jq -r '.[0].short_id') + echo $(date) ${ENV_NAME} "Getting the latest PITR snapshot: ${latest_pitr_snapshot_id}" >> ${BACKUP_LOG_FILE} + echo ${latest_pitr_snapshot_id} +} + +# Retrieves the dump name by snapshot ID +# @param: snapshot_id - the snapshot ID to query +# @return: dump name +function get_dump_name_by_snapshot_id() { + local snapshot_id="$1" + local dump_name=$(GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic -r /opt/backup/${ENV_NAME} snapshots --json | jq -r --arg id "$snapshot_id" '.[] | select(.short_id == $id) | .tags[0]') + echo $(date) ${ENV_NAME} "Getting the dump name: ${dump_name}" >> ${BACKUP_LOG_FILE} + echo ${dump_name} +} + +# Retrieves the binlog file by snapshot ID +# @param: snapshot_id - the snapshot ID to query +# @return: binlog file name +function get_binlog_file_by_snapshot_id() { + local snapshot_id="$1" + local binlog_file=$(GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic -r /opt/backup/${ENV_NAME} snapshots --json | jq -r --arg id "$snapshot_id" '.[] | select(.short_id == $id) | .tags[2]') + echo $(date) ${ENV_NAME} "Getting the start binlog file name: ${binlog_file}" >> ${BACKUP_LOG_FILE} + echo ${binlog_file} +} + +# Retrieves the current PostgreSQL WAL location +# @return: WAL location +function get_pg_wal_location() { + local wal_location=$(PGPASSWORD="${DBPASSWD}" psql -U ${DBUSER} -d postgres -t -c "SELECT pg_current_wal_lsn();" | tr -d ' ') + echo $(date) ${ENV_NAME} "Getting the WAL location: ${wal_location}" >> ${BACKUP_LOG_FILE} + echo $wal_location +} + +# Backs up PostgreSQL WAL files +# Copies WAL files from archive directory to backup directory +# @return: none, exits on critical failure +function backup_postgres_wal() { + local wal_dir="/var/lib/postgresql/wal_archive" + echo $(date) ${ENV_NAME} "Backing up PostgreSQL WAL files..." | tee -a $BACKUP_LOG_FILE + rm -rf ${BINLOGS_BACKUP_DIR} && mkdir -p ${BINLOGS_BACKUP_DIR} + + # Copy WAL files from archive directory + if [ -d "$wal_dir" ]; then + cp -r $wal_dir/* ${BINLOGS_BACKUP_DIR}/ || { echo "WAL files backup failed."; exit 1; } else - SERVER_IP_ADDR=$(ip a | grep -A1 venet0 | grep inet | awk '{print $2}'| sed 's/\/[0-9]*//g' | tail -n 1) - [ -n "${SERVER_IP_ADDR}" ] || SERVER_IP_ADDR="localhost" - if which mariadb 2>/dev/null; then - CLIENT_APP="mariadb" - else - CLIENT_APP="mysql" + echo "Warning: WAL archive directory does not exist" | tee -a $BACKUP_LOG_FILE + fi + echo "PostgreSQL WAL files backup completed." | tee -a $BACKUP_LOG_FILE +} + +# Creates a snapshot of PostgreSQL WAL files +# @param: snapshot_name - the name of the snapshot +# @return: none, exits on critical failure +function create_wal_snapshot() { + local snapshot_name="$1" + echo $(date) ${ENV_NAME} "Saving the WAL files to ${snapshot_name} snapshot" | tee -a ${BACKUP_LOG_FILE} + GOGC=20 RESTIC_COMPRESSION=off RESTIC_PACK_SIZE=8 RESTIC_PASSWORD=${ENV_NAME} restic backup -q -r /opt/backup/${ENV_NAME} --tag "${snapshot_name}" --tag "PGWAL" ${BINLOGS_BACKUP_DIR} | tee -a ${BACKUP_LOG_FILE} +} + +# Backs up Redis database +# Handles both standalone and cluster modes +# @return: none, exits on critical failure +function backup_redis() { + source /etc/jelastic/metainf.conf + RDB_TO_REMOVE=$(ls -d /tmp/* | grep redis-dump.*) + rm -f ${RDB_TO_REMOVE} + export REDISCLI_AUTH=$(cat ${REDIS_CONF_PATH} | grep '^requirepass' | awk '{print $2}') + if [ "$REDIS_TYPE" == "-standalone" ]; then + redis-cli --rdb /tmp/redis-dump-standalone.rdb + else + export MASTERS_LIST=$(redis-cli cluster nodes | grep master | grep -v fail | awk '{print $2}' | awk -F : '{print $1}') + for i in $MASTERS_LIST; do + redis-cli -h $i --rdb /tmp/redis-dump-cluster-$i.rdb || { echo "DB backup process failed."; exit 1; } + done + fi +} + +# Backs up PostgreSQL database +# Handles both regular and PITR backups +# @return: none, exits on critical failure +function backup_postgres() { + PGPASSWORD="${DBPASSWD}" psql -U ${DBUSER} -d postgres -c "SELECT current_user" || { + echo "DB credentials specified in add-on settings are incorrect!" | tee -a $BACKUP_LOG_FILE + exit 1 + } + + if [ "$PITR" == "true" ]; then + # Get current WAL location before backup + local wal_location=$(get_pg_wal_location) + + # Perform backup with WAL position + PGPASSWORD="${DBPASSWD}" pg_dumpall -U webadmin --clean --if-exist > ${DUMP_BACKUP_DIR}/db_backup.sql || { + echo "DB backup process failed." | tee -a $BACKUP_LOG_FILE + exit 1 + } + echo $wal_location > ${DUMP_BACKUP_DIR}/wal_location + + # Get latest PITR snapshot and backup WAL files if exists + local latest_pitr_snapshot_id=$(get_latest_pitr_snapshot_id) + if [ "x$latest_pitr_snapshot_id" != "xnull" ]; then + local dump_name=$(get_dump_name_by_snapshot_id "$latest_pitr_snapshot_id") + backup_postgres_wal + create_wal_snapshot "${dump_name}" fi - if which mariadb-dump 2>/dev/null; then - DUMP_APP="mariadb-dump" - else - DUMP_APP="mysqldump" + else + # Regular backup without PITR + PGPASSWORD="${DBPASSWD}" pg_dumpall -U webadmin --clean --if-exist > ${DUMP_BACKUP_DIR}/db_backup.sql || { + echo "DB backup process failed." | tee -a $BACKUP_LOG_FILE + exit 1 + } + fi +} + +# Backs up MongoDB database +# Handles both standalone and replica set modes +# @return: none, exits on critical failure +function backup_mongodb() { + if grep -q ^[[:space:]]*replSetName /etc/mongod.conf; then + RS_NAME=$(grep ^[[:space:]]*replSetName /etc/mongod.conf | awk '{print $2}') + RS_SUFFIX="/?replicaSet=${RS_NAME}&readPreference=nearest" + else + RS_SUFFIX="" + fi + TLS_MODE=$(yq eval '.net.tls.mode' /etc/mongod.conf) + if [ "$TLS_MODE" == "requireTLS" ]; then + SSL_TLS_OPTIONS="--ssl --sslPEMKeyFile=/var/lib/jelastic/keys/SSL-TLS/client/client.pem --sslCAFile=/var/lib/jelastic/keys/SSL-TLS/client/root.pem --tlsInsecure" + else + SSL_TLS_OPTIONS="" + fi + mongodump ${SSL_TLS_OPTIONS} --uri="mongodb://${DBUSER}:${DBPASSWD}@localhost${RS_SUFFIX}" --out="${DUMP_BACKUP_DIR}" +} + +# Backs up MySQL database dump +# Handles both regular and PITR backups +# @return: none, exits on critical failure +function backup_mysql_dump() { + ${CLIENT_APP} -h ${SERVER_IP_ADDR} -u ${DBUSER} -p${DBPASSWD} mysql --execute="SHOW COLUMNS FROM user" || { echo "DB credentials specified in add-on settings are incorrect!"; exit 1; } + if [ "$PITR" == "true" ]; then + ${DUMP_APP} -h ${SERVER_IP_ADDR} -u ${DBUSER} -p${DBPASSWD} --master-data=2 --flush-logs --force --single-transaction --quote-names --opt --all-databases > ${DUMP_BACKUP_DIR}/${SQL_DUMP_NAME} || { echo "DB backup process failed."; exit 1; } + else + ${DUMP_APP} -h ${SERVER_IP_ADDR} -u ${DBUSER} -p${DBPASSWD} --force --single-transaction --quote-names --opt --all-databases > ${DUMP_BACKUP_DIR}/${SQL_DUMP_NAME} || { echo "DB backup process failed."; exit 1; } + fi +} + +# Backs up MySQL binary logs +# Copies binary logs from specified start file +# @param: start_binlog_file - the starting binlog file +# @return: none, exits on critical failure +function backup_mysql_binlogs() { + local start_binlog_file="$1" + echo $(date) ${ENV_NAME} "Backing up MySQL binary logs from $start_binlog_file..." | tee -a $BACKUP_LOG_FILE + rm -rf ${BINLOGS_BACKUP_DIR} && mkdir -p ${BINLOGS_BACKUP_DIR} + find /var/lib/mysql -type f -name "mysql-bin.*" -newer /var/lib/mysql/${start_binlog_file} -o -name "${start_binlog_file}" -exec cp {} ${BINLOGS_BACKUP_DIR} \; + echo "MySQL binary logs backup completed." | tee -a $BACKUP_LOG_FILE +} + +# Performs Point-In-Time Recovery (PITR) backup for MySQL +# Handles both dump and binary logs backup +# @return: none, exits on critical failure +function backup_mysql_pitr() { + echo $(date) ${ENV_NAME} "Starting Point-In-Time Recovery (PITR) backup..." | tee -a $BACKUP_LOG_FILE + backup_mysql_dump + backup_mysql_binlogs + echo $(date) ${ENV_NAME} "PITR backup completed." | tee -a $BACKUP_LOG_FILE +} + +# Creates a snapshot of MySQL binary logs +# @param: snapshot_name - the name of the snapshot +# @return: none, exits on critical failure +function create_binlog_snapshot() { + local snapshot_name="$1" + echo $(date) ${ENV_NAME} "Saving the BINLOGS to ${snapshot_name} snapshot" | tee -a ${BACKUP_LOG_FILE} + GOGC=20 RESTIC_COMPRESSION=off RESTIC_PACK_SIZE=8 RESTIC_PASSWORD=${ENV_NAME} restic backup -q -r /opt/backup/${ENV_NAME} --tag "${snapshot_name}" --tag "BINLOGS" ${BINLOGS_BACKUP_DIR} | tee -a ${BACKUP_LOG_FILE} +} + +# Backs up MySQL database +# Handles both regular and PITR backups +# @return: none, exits on critical failure +function backup_mysql() { + local exit_code=0 + backup_mysql_dump || exit_code=$? + + if [ $exit_code -ne 0 ]; then + echo "Error: MySQL dump failed" | tee -a "$BACKUP_LOG_FILE" + return $exit_code + fi + + if [ "$PITR" == "true" ]; then + local latest_pitr_snapshot_id + latest_pitr_snapshot_id=$(get_latest_pitr_snapshot_id) + + if [ "x$latest_pitr_snapshot_id" != "xnull" ]; then + local dump_name start_binlog_file + dump_name=$(get_dump_name_by_snapshot_id "$latest_pitr_snapshot_id") + start_binlog_file=$(get_binlog_file_by_snapshot_id "$latest_pitr_snapshot_id") + + backup_mysql_binlogs "$start_binlog_file" + create_binlog_snapshot "${dump_name}" fi - ${CLIENT_APP} -h ${SERVER_IP_ADDR} -u ${DBUSER} -p${DBPASSWD} mysql --execute="SHOW COLUMNS FROM user" || { echo "DB credentials specified in add-on settings are incorrect!"; exit 1; } - ${DUMP_APP} -h ${SERVER_IP_ADDR} -u ${DBUSER} -p${DBPASSWD} --force --single-transaction --quote-names --opt --all-databases | gzip > db_backup.sql.gz || { echo "DB backup process failed."; exit 1; } - fi - rm -f /var/run/${ENV_NAME}_backup.pid -} - -case "$1" in - backup) - $1 - ;; - check_backup_repo) - $1 - ;; - rotate_snapshots) - $1 - ;; - create_snapshot) - $1 - ;; - update_restic) - $1 - ;; - *) - echo "Usage: $0 {backup|check_backup_repo|rotate_snapshots|create_snapshot|update_restic}" - exit 2 -esac - -exit $? + fi +} + +# Main function to orchestrate the backup process +# Handles repository checks, snapshot creation, and rotation +# @return: none, exits on critical failure +main() { + echo "$(date) ${ENV_NAME} Starting backup process..." | tee -a "${BACKUP_LOG_FILE}" + + check_backup_repo + rotate_snapshots + source /etc/jelastic/metainf.conf + + echo "$(date) ${ENV_NAME} Creating DB dump..." | tee -a "${BACKUP_LOG_FILE}" + + case "$COMPUTE_TYPE" in + redis) backup_redis ;; + mongodb) backup_mongodb ;; + postgres) backup_postgres ;; + *) backup_mysql ;; + esac + + create_snapshot + rotate_snapshots + check_backup_repo +} + +# Execute main function +main "$@" diff --git a/scripts/backup-main.js b/scripts/backup-main.js index a9c17f2..16b4e57 100644 --- a/scripts/backup-main.js +++ b/scripts/backup-main.js @@ -17,6 +17,7 @@ function run() { backupCount : "${backupCount}", storageEnv : "${storageEnv}", isAlwaysUmount : "${isAlwaysUmount}", + isPitr : "${isPitr}", nodeGroup : "${nodeGroup}", dbuser : "${dbuser}", dbpass : "${dbpass}" diff --git a/scripts/backup-manager.js b/scripts/backup-manager.js index 61c42af..6671e7a 100644 --- a/scripts/backup-manager.js +++ b/scripts/backup-manager.js @@ -11,6 +11,7 @@ function BackupManager(config) { * envAppid : {String} * storageNodeId : {String} * isAlwaysUmount : {Boolean} + * isPitr : {Boolean} * backupExecNode : {String} * [nodeGroup] : {String} * [storageEnv] : {String} @@ -124,6 +125,7 @@ function BackupManager(config) { baseUrl : config.baseUrl, backupType : backupType, isAlwaysUmount : config.isAlwaysUmount, + isPitr : config.isPitr, dbuser: config.dbuser, dbpass: config.dbpass, session : session, @@ -139,32 +141,19 @@ function BackupManager(config) { [ me.addMountForBackup, config.isAlwaysUmount ], [ me.cmd, [ '[ -f /root/%(envName)_backup-logic.sh ] && rm -f /root/%(envName)_backup-logic.sh || true', - 'wget -O /root/%(envName)_backup-logic.sh %(baseUrl)/scripts/backup-logic.sh' + 'wget -O /root/%(envName)_backup-logic.sh %(baseUrl)/scripts/backup-logic.sh', + '[ -f /root/installUpdateRestic ] && rm -f /root/installUpdateRestic || true', + 'wget -O /root/installUpdateRestic %(baseUrl)/scripts/installUpdateRestic' ], { nodeId : config.backupExecNode, envName : config.envName, baseUrl : config.baseUrl }], [me.cmd, [ - 'bash /root/%(envName)_backup-logic.sh update_restic %(baseUrl)' + 'bash /root/installUpdateRestic' ], backupCallParams ], [ me.cmd, [ - 'bash /root/%(envName)_backup-logic.sh check_backup_repo %(baseUrl) %(backupType) %(nodeId) %(backupLogFile) %(envName) %(backupCount) %(dbuser) %(dbpass) %(session) %(email)' - ], backupCallParams ], - [ me.cmd, [ - 'bash /root/%(envName)_backup-logic.sh rotate_snapshots %(baseUrl) %(backupType) %(nodeId) %(backupLogFile) %(envName) %(backupCount) %(dbuser) %(dbpass) %(session) %(email)' - ], backupCallParams ], - [ me.cmd, [ - 'bash /root/%(envName)_backup-logic.sh backup %(baseUrl) %(backupType) %(nodeId) %(backupLogFile) %(envName) %(backupCount) %(dbuser) %(dbpass)' - ], backupCallParams ], - [ me.cmd, [ - 'bash /root/%(envName)_backup-logic.sh create_snapshot %(baseUrl) %(backupType) %(nodeId) %(backupLogFile) %(envName) %(backupCount) %(dbuser) %(dbpass) %(session) %(email)' - ], backupCallParams ], - [ me.cmd, [ - 'bash /root/%(envName)_backup-logic.sh rotate_snapshots %(baseUrl) %(backupType) %(nodeId) %(backupLogFile) %(envName) %(backupCount) %(dbuser) %(dbpass) %(session) %(email)' - ], backupCallParams ], - [ me.cmd, [ - 'bash /root/%(envName)_backup-logic.sh check_backup_repo %(baseUrl) %(backupType) %(nodeId) %(backupLogFile) %(envName) %(backupCount) %(dbuser) %(dbpass) %(session) %(email)' + 'bash /root/%(envName)_backup-logic.sh %(baseUrl) %(backupType) %(nodeId) %(backupLogFile) %(envName) %(backupCount) %(dbuser) %(dbpass) %(session) %(email) %(isPitr)' ], backupCallParams ], [ me.removeMounts, config.isAlwaysUmount ] ]); @@ -179,30 +168,24 @@ function BackupManager(config) { [ me.removeMounts, config.isAlwaysUmount], [ me.addMountForRestore, config.isAlwaysUmount ], [ me.cmd, [ - '[ -f /root/%(envName)_backup-logic.sh ] && rm -f /root/%(envName)_backup-logic.sh || true', - 'wget -O /root/%(envName)_backup-logic.sh %(baseUrl)/scripts/backup-logic.sh' + '[ -f /root/%(envName)_restore-logic.sh ] && rm -f /root/%(envName)_restore-logic.sh || true', + 'wget -O /root/%(envName)_restore-logic.sh %(baseUrl)/scripts/restore-logic.sh', + '[ -f /root/installUpdateRestic ] && rm -f /root/installUpdateRestic || true', + 'wget -O /root/installUpdateRestic %(baseUrl)/scripts/installUpdateRestic' ], { nodeId : config.backupExecNode, envName : config.envName, baseUrl : config.baseUrl }], [me.cmd, [ - 'bash /root/%(envName)_backup-logic.sh update_restic %(baseUrl)' + 'bash /root/installUpdateRestic' ], { nodeId : config.backupExecNode, envName : config.envName, baseUrl : config.baseUrl }], [ me.cmd, [ - 'SNAPSHOT_ID=$(RESTIC_PASSWORD=$(cat /root/.backupedenv) restic -r /opt/backup/$(cat /root/.backupedenv) snapshots|grep $(cat /root/.backupid)|awk \'{print $1}\')', - '[ -n "${SNAPSHOT_ID}" ] || false', - 'source /etc/jelastic/metainf.conf', - 'RESTIC_PASSWORD=$(cat /root/.backupedenv) GOGC=20 restic -r /opt/backup/$(cat /root/.backupedenv) restore ${SNAPSHOT_ID} --target /', - 'if [ "$COMPUTE_TYPE" == "redis" ]; then rm -f /root/redis-restore.sh; wget -O /root/redis-restore.sh %(baseUrl)/scripts/redis-restore.sh; chmod +x /root/redis-restore.sh; bash /root/redis-restore.sh 2> >(tee -a %(restoreLogFile) >&2); else true; fi', - 'if [ "$COMPUTE_TYPE" == "postgres" ]; then rm -f /root/postgres-restore.sh; wget -O /root/postgres-restore.sh %(baseUrl)/scripts/postgres-restore.sh; chmod +x /root/postgres-restore.sh; bash /root/postgres-restore.sh %(dbuser) %(dbpass) 2> >(tee -a %(restoreLogFile) >&2); else true; fi', - 'if [ "$COMPUTE_TYPE" == "mariadb" ]; then rm -f /root/mariadb-restore.sh; wget -O /root/mariadb-restore.sh %(baseUrl)/scripts/mariadb-restore.sh; chmod +x /root/mariadb-restore.sh; bash /root/mariadb-restore.sh %(dbuser) %(dbpass) 2> >(tee -a %(restoreLogFile) >&2); else true; fi', - 'if [ "$COMPUTE_TYPE" == "mysql" ] || [ "$COMPUTE_TYPE" == "percona" ]; then mysql --silent -h localhost -u %(dbuser) -p%(dbpass) --force < /root/db_backup.sql 2> >(tee -a %(restoreLogFile) >&2); else true; fi', - 'if [ "$COMPUTE_TYPE" == "mongodb" ]; then rm -f /root/mongo-restore.sh; wget -O /root/mongo-restore.sh %(baseUrl)/scripts/mongo-restore.sh; chmod +x /root/mongo-restore.sh; bash /root/mongo-restore.sh %(dbuser) %(dbpass) 2> >(tee -a %(restoreLogFile) >&2); else true; fi', + 'bash /root/%(envName)_restore-logic.sh %(dbuser) %(dbpass) %(restoreLogFile) %(isPitr)', 'jem service restart', 'if [ -n "$REPLICA_PSWD" ] && [ -n "$REPLICA_USER" ] ; then wget %(baseUrl)/scripts/setupUser.sh -O /root/setupUser.sh &>> /var/log/run.log; bash /root/setupUser.sh ${REPLICA_USER} ${REPLICA_PSWD} %(userEmail) %(envName) %(userSession); fi', 'echo $(date) %(envName) snapshot $(cat /root/.backupid) restored successfully| tee -a %(restoreLogFile)' @@ -210,6 +193,7 @@ function BackupManager(config) { nodeId : config.backupExecNode, envName : config.envName, isAlwaysUmount : config.isAlwaysUmount, + isPitr : config.isPitr, baseUrl : config.baseUrl, dbuser: config.dbuser, dbpass: config.dbpass, diff --git a/scripts/checkCredentials.sh b/scripts/checkCredentials.sh index ab58258..8d0133f 100644 --- a/scripts/checkCredentials.sh +++ b/scripts/checkCredentials.sh @@ -3,27 +3,29 @@ DBUSER=$2 DBPASSWD=$3 -function checkCredentials(){ +# Function to check database credentials +function checkCredentials() { source /etc/jelastic/metainf.conf jem service start >> /dev/null if [ "$COMPUTE_TYPE" == "postgres" ]; then - PGPASSWORD="${DBPASSWD}" psql -U ${DBUSER} -d postgres -c "SELECT current_user" || exit 1; + PGPASSWORD="${DBPASSWD}" psql -U ${DBUSER} -d postgres -c "SELECT current_user" || exit 1 elif [ "$COMPUTE_TYPE" == "mariadb" ] || [ "$COMPUTE_TYPE" == "mysql" ] || [ "$COMPUTE_TYPE" == "percona" ]; then - mysql -h localhost -u ${DBUSER} -p${DBPASSWD} mysql --execute="SHOW COLUMNS FROM user" 1>/dev/null || exit 1; + mysql -h localhost -u ${DBUSER} -p${DBPASSWD} mysql --execute="SHOW COLUMNS FROM user" 1>/dev/null || exit 1 elif [ "$COMPUTE_TYPE" == "mongodb" ]; then - which mongo && CLIENT="mongo" || CLIENT="mongosh" - TLS_MODE=$(yq eval '.net.tls.mode' /etc/mongod.conf) + which mongo && CLIENT="mongo" || CLIENT="mongosh" + TLS_MODE=$(yq eval '.net.tls.mode' /etc/mongod.conf) if [ "$TLS_MODE" == "requireTLS" ]; then - SSL_TLS_OPTIONS="--tls --tlsCertificateKeyFile /var/lib/jelastic/keys/SSL-TLS/client/client.pem --tlsCAFile /var/lib/jelastic/keys/SSL-TLS/client/root.pem --tlsAllowInvalidHostnames" + SSL_TLS_OPTIONS="--tls --tlsCertificateKeyFile /var/lib/jelastic/keys/SSL-TLS/client/client.pem --tlsCAFile /var/lib/jelastic/keys/SSL-TLS/client/root.pem --tlsAllowInvalidHostnames" else - SSL_TLS_OPTIONS="" + SSL_TLS_OPTIONS="" fi echo "show dbs" | ${CLIENT} --shell ${SSL_TLS_OPTIONS} --username ${DBUSER} --password ${DBPASSWD} --authenticationDatabase admin "mongodb://localhost:27017" else - true; + true fi } +# Main script logic if [ "x$1" == "xcheckCredentials" ]; then checkCredentials fi diff --git a/scripts/configOnBeforeInit.js b/scripts/configOnBeforeInit.js index a049037..a7e26a6 100644 --- a/scripts/configOnBeforeInit.js +++ b/scripts/configOnBeforeInit.js @@ -127,6 +127,7 @@ if (scheduleType == '1') { jps.settings.main.fields[2].default = '${settings.backupCount}'; jps.settings.main.fields[3].value = ${settings.isAlwaysUmount}; +jps.settings.main.fields[4].value = ${settings.isPitr}; jps.settings.main.fields[jps.settings.main.fields.length - 2].default = '${settings.dbuser}'; jps.settings.main.fields[jps.settings.main.fields.length - 1].default = '${settings.dbpass}'; diff --git a/scripts/create-backup-main-script.js b/scripts/create-backup-main-script.js index 58766ba..426861c 100644 --- a/scripts/create-backup-main-script.js +++ b/scripts/create-backup-main-script.js @@ -10,6 +10,7 @@ var scriptName = getParam("scriptName", "${env.envName}-wp-backup"), backupExecNode = getParam("backupExecNode"), storageEnv = getParam("storageEnv"), isAlwaysUmount = getParam("isAlwaysUmount"), + isPitr = getParam("isPitr"), nodeGroup = getParam("nodeGroup"), dbuser = getParam("dbuser"), dbpass = getParam("dbpass"); @@ -28,6 +29,7 @@ function run() { backupExecNode : backupExecNode, storageEnv : storageEnv, isAlwaysUmount : isAlwaysUmount, + isPitr : isPitr, nodeGroup : nodeGroup, dbuser : dbuser, dbpass : dbpass diff --git a/scripts/getBackupsAllEnvs.sh b/scripts/getBackupsAllEnvs.sh new file mode 100644 index 0000000..1ca5499 --- /dev/null +++ b/scripts/getBackupsAllEnvs.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +restic self-update &>/dev/null || true + +OUTPUT_JSON="{\"result\": 0, \"envs\": {" + +for i in $(ls /data); do + SNAPSHOTS_JSON=$(RESTIC_PASSWORD="$i" restic -r /data/$i snapshots --json) + DIRECTORY_LIST=$(echo "$SNAPSHOTS_JSON" | jq -r '[.[] | .tags[0] | split(" ")[0]] | map("\"" + . + "\"") | join(",")') + + SERVER_VERSION=$(echo "$SNAPSHOTS_JSON" | jq -r '[.[] | .tags[0] | capture("\\((?[^)]+)").server_version] | unique | .[0]') + + SERVER=$(echo "$SERVER_VERSION" | cut -d'-' -f1) + VERSION=$(echo "$SERVER_VERSION" | cut -d'-' -f2-) + + if [ -n "$DIRECTORY_LIST" ]; then + OUTPUT_JSON="${OUTPUT_JSON}\"${i}\": { \"server\": \"${SERVER}\", \"version\": \"${VERSION}\", \"backups\": [${DIRECTORY_LIST}] }," + else + OUTPUT_JSON="${OUTPUT_JSON}\"${i}\": { \"server\": \"${SERVER}\", \"version\": \"${VERSION}\", \"backups\": [] }," + fi +done + +OUTPUT_JSON=${OUTPUT_JSON::-1} +OUTPUT_JSON="${OUTPUT_JSON}}}" + +echo "$OUTPUT_JSON" \ No newline at end of file diff --git a/scripts/getBackupsAllEnvsJSON.sh b/scripts/getBackupsAllEnvsJSON.sh new file mode 100644 index 0000000..032e2eb --- /dev/null +++ b/scripts/getBackupsAllEnvsJSON.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +restic self-update &>/dev/null || true + +OUTPUT_JSON="{\"result\": 0, \"envs\": {" + +for i in $(ls /data); do + + PITR=false + PITR_START_TIME=null + + SNAPSHOTS_JSON=$(RESTIC_PASSWORD="$i" restic -r /data/$i snapshots --json | jq 'sort_by(.time) | reverse') + + DIRECTORY_LIST=$(echo "$SNAPSHOTS_JSON" | jq -r '[.[] | select(.tags | index("BINLOGS") | not) | .tags[0] | split(" ")[0]] | map("\"" + . + "\"") | join(",")') + + SERVER_VERSION=$(echo "$SNAPSHOTS_JSON" | jq -r '[.[] | .tags[0] | capture("\\((?[^)]+)").server_version] | unique | .[0]') + + SERVER=$(echo "$SERVER_VERSION" | cut -d'-' -f1) + VERSION=$(echo "$SERVER_VERSION" | cut -d'-' -f2-) + + if [[ "$SERVER" == "mariadb" || "$SERVER" == "postgres" || "$SERVER" == "mysql" ]]; then + FIRST_SNAPSHOT=$(echo "$SNAPSHOTS_JSON" | jq '.[0]') + + if echo "$FIRST_SNAPSHOT" | jq -e '.tags | index("PITR")' &>/dev/null; then + PITR=true + FIRST_TAG=$(echo "$FIRST_SNAPSHOT" | jq -r '.tags[0]') + PITR_START_TIME=$(echo "$FIRST_TAG" | grep -oE '^[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{6}_UTC') + + SNAPSHOT_COUNT=$(echo "$SNAPSHOTS_JSON" | jq 'length') + for (( j=1; j < SNAPSHOT_COUNT-2; j+=2 )); do + SECOND_TAG=$(echo "$SNAPSHOTS_JSON" | jq -r ".[$j].tags[0]") + THIRD_TAG=$(echo "$SNAPSHOTS_JSON" | jq -r ".[$((j+1))].tags[0]") + + if [[ "$SECOND_TAG" == "$THIRD_TAG" ]]; then + PITR_START_TIME=$(echo "$SECOND_TAG" | grep -oE '^[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{6}_UTC') + else + break + fi + done + fi + fi + + PITR_START_TIME=$(echo "$PITR_START_TIME" | jq -R) + + if [ -n "$DIRECTORY_LIST" ]; then + OUTPUT_JSON="${OUTPUT_JSON}\"${i}\": { \"server\": \"${SERVER}\", \"version\": \"${VERSION}\", \"pitr\": ${PITR}, \"pitrStartTime\": ${PITR_START_TIME}, \"backups\": [${DIRECTORY_LIST}] }," + else + OUTPUT_JSON="${OUTPUT_JSON}\"${i}\": { \"server\": \"${SERVER}\", \"version\": \"${VERSION}\", \"pitr\": ${PITR}, \"pitrStartTime\": ${PITR_START_TIME}, \"backups\": [] }," + fi +done + +OUTPUT_JSON=${OUTPUT_JSON::-1} +OUTPUT_JSON="${OUTPUT_JSON}}}" + +echo "$OUTPUT_JSON" diff --git a/scripts/mariadb-restore.sh b/scripts/mariadb-restore.sh deleted file mode 100644 index 38511f4..0000000 --- a/scripts/mariadb-restore.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -source /.jelenv - -SERVER_IP_ADDR=$(ip a | grep -A1 venet0 | grep inet | awk '{print $2}'| sed 's/\/[0-9]*//g' | tail -n 1) -[ -n "${SERVER_IP_ADDR}" ] || SERVER_IP_ADDR="localhost" -if which mariadb 2>/dev/null; then - CLIENT_APP="mariadb" -else - CLIENT_APP="mysql" -fi - -# Check if db_backup.sql is compressed and decompress it -if [ -f "/root/db_backup.sql.gz" ]; then - gunzip -c /root/db_backup.sql.gz > /root/db_backup.sql -fi - -${CLIENT_APP} --silent -h ${SERVER_IP_ADDR} -u ${1} -p${2} --force < /root/db_backup.sql - -if [ -n "${SCHEME}" ] && [ x"${SCHEME}" == x"galera" ]; then - curl --silent https://raw.githubusercontent.com/jelastic-jps/mysql-cluster/refs/heads/master/addons/recovery/scripts/db-recovery.sh > /tmp/db-recovery.sh - bash /tmp/db-recovery.sh --scenario restore_galera --donor-ip ${SERVER_IP_ADDR} -fi diff --git a/scripts/mongo-restore.sh b/scripts/mongo-restore.sh deleted file mode 100644 index 7f2a69d..0000000 --- a/scripts/mongo-restore.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -if grep -q ^[[:space:]]*replSetName /etc/mongod.conf; then - export RS_NAME=$(grep ^[[:space:]]*replSetName /etc/mongod.conf|awk '{print $2}'); - export RS_SUFFIX="/?replicaSet=${RS_NAME}&readPreference=nearest"; -else - export RS_SUFFIX=""; -fi -TLS_MODE=$(yq eval '.net.tls.mode' /etc/mongod.conf) -if [ "$TLS_MODE" == "requireTLS" ]; then - SSL_TLS_OPTIONS="--ssl --sslPEMKeyFile=/var/lib/jelastic/keys/SSL-TLS/client/client.pem --sslCAFile=/var/lib/jelastic/keys/SSL-TLS/client/root.pem --tlsInsecure" -else - SSL_TLS_OPTIONS="" -fi -mongorestore ${SSL_TLS_OPTIONS} --uri="mongodb://${1}:${2}@localhost${RS_SUFFIX}" ~/dump 1>/dev/null diff --git a/scripts/pitr.sh b/scripts/pitr.sh new file mode 100644 index 0000000..f971fba --- /dev/null +++ b/scripts/pitr.sh @@ -0,0 +1,127 @@ +#!/bin/bash + +ACTION=$1 +DBUSER=$2 +DBPASSWD=$3 + +# Configuration file paths +PITR_CONF_MYSQL='/etc/mysql/conf.d/pitr.cnf' +PITR_CONF_PG='/etc/postgresql/12/main/postgresql.conf' +ARCHIVE_DIR_PG='/var/lib/postgresql/wal_archive' +BACKUP_DIR_PG='/var/lib/postgresql/backups' + +# Source external configuration +source /etc/jelastic/metainf.conf + +# Format compute type version +COMPUTE_TYPE_FULL_VERSION_FORMATTED=$(echo "$COMPUTE_TYPE_FULL_VERSION" | sed -E 's/^([0-9]+)\.([0-9]+)\..*$/\1.\2/' | sed 's/\.//') + +# Determine binlog expire settings based on compute type +if [[ ("$COMPUTE_TYPE" == "mysql" || "$COMPUTE_TYPE" == "percona") && "$COMPUTE_TYPE_FULL_VERSION_FORMATTED" -ge "81" ]]; then + BINLOG_EXPIRE_SETTING="binlog_expire_logs_seconds" + EXPIRY_SETTING="604800" +elif [[ "$COMPUTE_TYPE" == "mariadb" ]]; then + BINLOG_EXPIRE_SETTING="expire_logs_days" + EXPIRY_SETTING="7" +else + BINLOG_EXPIRE_SETTING="" + EXPIRY_SETTING="" +fi + +# PostgreSQL WAL archive settings +WAL_ARCHIVE_SETTING="archive_mode" +WAL_ARCHIVE_COMMAND="archive_command" +WAL_TIMEOUT_SETTING="archive_timeout" +WAL_TIMEOUT_VALUE="60" +WAL_ARCHIVE_ON="on" + +# Function to check PITR configuration for MySQL +check_pitr_mysql() { + LOG_BIN=$(mysql -u"$DBUSER" -p"$DBPASSWD" -se "SHOW VARIABLES LIKE 'log_bin';" | grep "ON") + EXPIRE_LOGS=$(mysql -u"$DBUSER" -p"$DBPASSWD" -se "SHOW VARIABLES LIKE '$BINLOG_EXPIRE_SETTING';" | awk '{ print $2 }') + if [[ -n "$LOG_BIN" && "$EXPIRE_LOGS" == "$EXPIRY_SETTING" ]]; then + echo '{"result":0}'; + return 0; + else + echo '{"result":702}'; + fi +} + +# Function to set up PITR configuration for MySQL +setup_pitr_mysql() { + check_pitr_mysql | grep -q '"result":0' + if [[ $? -eq 0 ]]; then + exit 0; + fi + + CONFIG=" +[mysqld] +log-bin=mysql-bin +$BINLOG_EXPIRE_SETTING=$EXPIRY_SETTING +" + echo "$CONFIG" > "$PITR_CONF_MYSQL" + jem service restart; +} + +# Function to check PITR configuration for PostgreSQL +check_pitr_pg() { + ARCHIVE_MODE=$(sudo -u postgres psql -U "$DBUSER" -c "SHOW $WAL_ARCHIVE_SETTING;" | grep "on") + ARCHIVE_COMMAND=$(sudo -u postgres psql -U "$DBUSER" -c "SHOW $WAL_ARCHIVE_COMMAND;" | grep "$ARCHIVE_DIR_PG") + + if [[ -n "$ARCHIVE_MODE" && -n "$ARCHIVE_COMMAND" ]]; then + echo '{"result":0}'; + return 0; + else + echo '{"result":702}'; + fi +} + +# Function to set up PITR configuration for PostgreSQL +setup_pitr_pg() { + check_pitr_pg | grep -q '"result":0' + if [[ $? -eq 0 ]]; then + exit 0; + fi + + CONFIG=" +# PITR Configuration +archive_mode = on +archive_command = 'test ! -f $ARCHIVE_DIR_PG/%f && cp %p $ARCHIVE_DIR_PG/%f' +archive_timeout = $WAL_TIMEOUT_VALUE +" + + echo "$CONFIG" >> "$PITR_CONF_PG" + + if [ ! -d "$ARCHIVE_DIR_PG" ]; then + sudo mkdir -p "$ARCHIVE_DIR_PG" + sudo chown -R postgres:postgres "$ARCHIVE_DIR_PG" + fi + + jem service restart; + echo '{"result":0}'; +} + +# Main script logic +case $ACTION in + checkPitr) + if [[ "$COMPUTE_TYPE" == "mysql" || "$COMPUTE_TYPE" == "percona" || "$COMPUTE_TYPE" == "mariadb" ]]; then + check_pitr_mysql + elif [[ "$COMPUTE_TYPE" == "postgresql" ]]; then + check_pitr_pg + else + echo '{"result":99}'; + fi + ;; + setupPitr) + if [[ "$COMPUTE_TYPE" == "mysql" || "$COMPUTE_TYPE" == "percona" || "$COMPUTE_TYPE" == "mariadb" ]]; then + setup_pitr_mysql + elif [[ "$COMPUTE_TYPE" == "postgresql" ]]; then + setup_pitr_pg + else + echo '{"result":99}'; + fi + ;; + *) + echo "Usage: $0 {checkPitr|setupPitr} DBUSER DBPASSWD" + ;; +esac diff --git a/scripts/pitrOnBeforeInit.js b/scripts/pitrOnBeforeInit.js new file mode 100644 index 0000000..b3b2478 --- /dev/null +++ b/scripts/pitrOnBeforeInit.js @@ -0,0 +1,45 @@ +var respOut; +var pitr_conf_error_markup = "Database doesnt configured for PITR support. Please push apply for automatic configuring or close and manually configure acording to instruction and reinstall addon"; +var pitr_conf_success_markup = "Database configured for PITR support"; +var recovery_addon_markup = "Please use Database Corruption Diagnostic add-on for check after restore, and Database Recovery Add-on for fix if it is needed."; + +var checkPitrCmd = "wget " + '${baseUrl}' + "/scripts/pitr.sh -O /root/pitr.sh &>> /var/log/run.log; bash /root/pitr.sh checkPitr " + '${settings.dbuser}' + " " + '${settings.dbpass}'; +resp = jelastic.env.control.ExecCmdById('${env.envName}', session, '${nodes.sqldb.master.id}', toJSON([{ command: checkPitrCmd }]), true, "root"); +if (resp.result != 0) return resp; +respOut = resp.responses[0].out; +respOut = JSON.parse(respOut); +if (respOut.result == 702) { + settings.fields.push({ + caption: "PITR", + type: "toggle", + name: "isPitr", + tooltip: "Point in time recovery", + values: false, + hidden: false, + disabled: true + }, { + type: "displayfield", + cls: "warning", + height: 30, + hideLabel: true, + markup: pitr_conf_error_markup + }); +} else { + settings.fields.push({ + caption: "PITR", + type: "toggle", + name: "isPitr", + tooltip: "Point in time recovery", + values: false, + hidden: false, + disabled: false + }, { + type: "displayfield", + cls: "success", + height: 30, + hideLabel: true, + markup: pitr_conf_success_markup + }); +} +return settings; + diff --git a/scripts/postgres-restore.sh b/scripts/postgres-restore.sh deleted file mode 100644 index 9b451b6..0000000 --- a/scripts/postgres-restore.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -CLIENT_APP="psql" -ORIG_BACKUP="/root/db_backup.sql" -TEMP_BACKUP="/tmp/db_backup.sql" - -# Check if db_backup.sql is compressed and decompress it -if [ -f "/root/db_backup.sql.gz" ]; then - gunzip -c /root/db_backup.sql.gz > /root/db_backup.sql -fi - -[ -f "$TEMP_BACKUP" ] && rm -f "$TEMP_BACKUP" -cp "$ORIG_BACKUP" "$TEMP_BACKUP" - -sed -i -e "/^CREATE ROLE webadmin/d" \ - -e "/^CREATE ROLE postgres/d" \ - -e "/^CREATE ROLE ${1}/d" \ - -e "/^DROP ROLE IF EXISTS postgres/d" \ - -e "/^DROP ROLE IF EXISTS webadmin/d" \ - -e "/^DROP ROLE IF EXISTS ${1}/d" \ - -e "/^ALTER ROLE postgres WITH SUPERUSER/d" \ - -e "/^ALTER ROLE webadmin WITH SUPERUSER/d" \ - -e "/^ALTER ROLE ${1} WITH SUPERUSER/d" "$TEMP_BACKUP" - -PGPASSWORD=${2} ${CLIENT_APP} --no-readline -q -U ${1} -d postgres < "$TEMP_BACKUP" > /dev/null; - -[ -f "$TEMP_BACKUP" ] && rm -f "$TEMP_BACKUP" diff --git a/scripts/redis-restore.sh b/scripts/redis-restore.sh deleted file mode 100644 index dda7c22..0000000 --- a/scripts/redis-restore.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -REDIS_CONF_PATH=$(realpath /etc/redis.conf) -RDB_TO_RESTORE=$(ls -d /tmp/* |grep redis-dump.*); - -cd tmp; wget https://github.com/tair-opensource/RedisShake/releases/download/v3.1.11/redis-shake-linux-amd64.tar.gz; -tar -xf redis-shake-linux-amd64.tar.gz; -grep -q '^cluster-enabled yes' ${REDIS_CONF_PATH} && REDIS_TYPE="cluster" || REDIS_TYPE="standalone"; -sed -ci -e "s/^type =.*/type = '${REDIS_TYPE}'/" restore.toml; -sed -ci -e "1s/^type =.*/type = 'restore'/" restore.toml; -export REDISCLI_AUTH=$(cat ${REDIS_CONF_PATH} |grep '^requirepass'|awk '{print $2}'); -sed -ci -e "s/^password =.*/password = '${REDISCLI_AUTH}'/" restore.toml; -RESTORE_MASTER_ID=$(redis-cli cluster nodes|grep master|grep -v fail|head -n 1|awk '{print $2}'|awk -F : '{print $1}') -sed -ci -e "s/^address =.*/address = '${RESTORE_MASTER_ID}:6379'/" restore.toml; -for i in ${RDB_TO_RESTORE} -do - sed -ci -e "s|^rdb_file_path =.*|rdb_file_path = '${i}'|" restore.toml; - ./redis-shake restore.toml 1>/dev/null -done -rm -f ${RDB_TO_RESTORE} -rm -f redis-shake* sync.toml restore.toml diff --git a/scripts/restore-logic.sh b/scripts/restore-logic.sh new file mode 100644 index 0000000..7dcf21c --- /dev/null +++ b/scripts/restore-logic.sh @@ -0,0 +1,354 @@ +#!/bin/bash + +DBUSER=$1 +DBPASSWD=$2 +RESTORE_LOG_FILE=$3 + +DUMP_BACKUP_DIR=/root/backup/dump +BINLOGS_BACKUP_DIR=/root/backup/binlogs +SQL_DUMP_NAME=db_backup.sql + +#rm -rf $DUMP_BACKUP_DIR $BINLOGS_BACKUP_DIR + +if [ -f /root/.backupedenv ]; then + ENV_NAME=$(cat /root/.backupedenv) +else + echo "The /root/.backupedenv file with ENV_NAME doesn't exist." + exit 1 +fi + +if [ -f /root/.backuptime ]; then + PITR="true" + PITR_TIME=$(cat /root/.backuptime) +else + PITR="false" +fi + +if [ "$PITR" == "false" ]; then + if [ -f /root/.backupid ]; then + BACKUP_NAME=$(cat /root/.backupid) + else + echo "The /root/.backupid file with BACKUP_NAME doesn't exist." + exit 1 + fi +fi + +# Finds snapshot ID before specified timestamp +function get_snapshot_id_before_time() { + local target_datetime="$1" + + while read snapshot_time snapshot_id snapshot_tag; do + snapshot_tag_date=$(echo "$snapshot_tag" | grep -oP '\d{4}-\d{2}-\d{2}_\d{6}') + snapshot_datetime=$(echo "$snapshot_tag_date" | sed 's/_/ /' | sed 's/\(....\)-\(..\)-\(..\) \(..\)\(..\)\(..\)/\1-\2-\3 \4:\5:\6/') + + snapshot_datetime_epoch=$(date -d "$snapshot_datetime" +%s) + target_epoch=$(date -d "$target_datetime" +%s) + + if [ "$snapshot_datetime_epoch" -le "$target_epoch" ]; then + result_snapshot_id="$snapshot_id" + break + fi + done < <(GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic -r /opt/backup/${ENV_NAME} snapshots --tag "PITR" --json | jq -r '.[] | "\(.time) \(.short_id) \(.tags[0])"' | sort -r) + + if [[ -z "$result_snapshot_id" ]]; then + echo "$(date) ${ENV_NAME} Error: Failed to get DB dump snapshot ID before time $target_datetime" | tee -a ${RESTORE_LOG_FILE} + exit 1 + fi + echo "$(date) ${ENV_NAME} Getting DB dump snapshot ID before time $target_datetime: $result_snapshot_id" >> ${RESTORE_LOG_FILE} + echo "$result_snapshot_id"; +} + +# Retrieves snapshot ID for given backup name +function get_dump_snapshot_id_by_name(){ + local backup_name="$1" + local snapshot_id=$(GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic -r /opt/backup/${ENV_NAME} snapshots --json | \ + jq -r '.[] | select(.tags[0] | contains("'"$backup_name"'")) | + select((.tags | index("BINLOGS") | not)) | + .short_id' | head -n1) + + if [[ $? -ne 0 || -z "$snapshot_id" ]]; then + echo "$(date) ${ENV_NAME} Error: Failed to get DB dump snapshot ID" | tee -a ${RESTORE_LOG_FILE} + exit 1 + fi + + echo "$(date) ${ENV_NAME} Getting DB dump snapshot ID: $snapshot_id" >> ${RESTORE_LOG_FILE} + echo "$snapshot_id" +} + +# Retrieves binlog snapshot ID for backup name +function get_binlog_snapshot_id_by_name(){ + local backup_name="$1" + local snapshot_id=$(GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic -r /opt/backup/${ENV_NAME} snapshots --tag "BINLOGS" --json | jq -r --arg backup_name "$backup_name" '.[] | select(.tags[0] | contains($backup_name)) | .short_id') + + if [[ $? -ne 0 || -z "$snapshot_id" ]]; then + echo "$(date) ${ENV_NAME} Error: Failed to get DB binlogs snapshot ID" | tee -a ${RESTORE_LOG_FILE} + exit 1 + fi + + echo "$(date) ${ENV_NAME} Getting DB binlogs snapshot ID: $snapshot_id" >> ${RESTORE_LOG_FILE} + echo "$snapshot_id" +} + +function get_snapshot_name_by_id(){ + local snapshot_id="$1" + local snapshot_name=$(GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic -r /opt/backup/${ENV_NAME} snapshots --json | jq -r --arg id "$snapshot_id" '.[] | select(.short_id == $id) | .tags[0]') + if [[ $? -ne 0 || -z "${snapshot_name}" ]]; then + echo $(date) ${ENV_NAME} "Error: Failed to get snapshot name for $snapshot_id" | tee -a ${RESTORE_LOG_FILE} + exit 1 + fi + echo $(date) ${ENV_NAME} "Getting the snapshot name: ${snapshot_name}" >> ${RESTORE_LOG_FILE} + echo ${snapshot_name} +} + +function restore_snapshot_by_id(){ + local snapshot_id="$1" + RESTIC_PASSWORD=${ENV_NAME} GOGC=20 restic -r /opt/backup/${ENV_NAME} restore ${snapshot_id} --target / + if [[ $? -ne 0 ]]; then + echo $(date) ${ENV_NAME} "Error: Failed to restore snapshot ID $snapshot_id" | tee -a ${RESTORE_LOG_FILE}; + exit 1 + fi + echo $(date) ${ENV_NAME} "Snapshot ID: $snapshot_id restored successfully" >> ${RESTORE_LOG_FILE} +} + +function restore_mysql_dump(){ + if which mariadb 2>/dev/null; then + CLIENT_APP="mariadb" + else + CLIENT_APP="mysql" + fi + ${CLIENT_APP} -u "${DBUSER}" -p"${DBPASSWD}" < "${DUMP_BACKUP_DIR}/${SQL_DUMP_NAME}" + if [[ $? -ne 0 ]]; then + echo "$(date) ${ENV_NAME} Error: Failed to restore MySQL dump" | tee -a ${RESTORE_LOG_FILE} + exit 1 + fi + echo "$(date) ${ENV_NAME} MySQL dump restored successfully" | tee -a ${RESTORE_LOG_FILE} +} + +function apply_binlogs_until_time(){ + local stop_time="$1" + local binlog_files=($BINLOGS_BACKUP_DIR/mysql-bin.*) + + if which mariadb 2>/dev/null; then + BINLOG_APP="mariadb-binlog" + else + BINLOG_APP="mysqlbinlog" + fi + + for binlog_file in "${binlog_files[@]}"; do + ${BINLOG_APP} --stop-datetime="${stop_time}" "$binlog_file" | mysql -u "${DBUSER}" -p"${DBPASSWD}" + if [[ $? -ne 0 ]]; then + echo "$(date) ${ENV_NAME} Error: Failed to apply binlogs from $binlog_file" | tee -a ${RESTORE_LOG_FILE} + exit 1 + fi + echo "$(date) ${ENV_NAME} Applied binlogs from $binlog_file until $stop_time" >> ${RESTORE_LOG_FILE} + done +} + +function restore_mongodb(){ + dump_snapshot_id=$(get_dump_snapshot_id_by_name "${BACKUP_NAME}") + restore_snapshot_by_id "${dump_snapshot_id}" + if grep -q ^[[:space:]]*replSetName /etc/mongod.conf; then + export RS_NAME=$(grep ^[[:space:]]*replSetName /etc/mongod.conf|awk '{print $2}'); + export RS_SUFFIX="/?replicaSet=${RS_NAME}&readPreference=nearest"; + else + export RS_SUFFIX=""; + fi + TLS_MODE=$(yq eval '.net.tls.mode' /etc/mongod.conf) + if [ "$TLS_MODE" == "requireTLS" ]; then + SSL_TLS_OPTIONS="--ssl --sslPEMKeyFile=/var/lib/jelastic/keys/SSL-TLS/client/client.pem --sslCAFile=/var/lib/jelastic/keys/SSL-TLS/client/root.pem --tlsInsecure" + else + SSL_TLS_OPTIONS="" + fi + mongorestore ${SSL_TLS_OPTIONS} --uri="mongodb://${DBUSER}:${DBPASSWD}@localhost${RS_SUFFIX}" ${DUMP_BACKUP_DIR} 1>/dev/null + +} + +function restore_redis(){ + REDIS_CONF_PATH=$(realpath /etc/redis.conf) + RDB_TO_RESTORE=$(ls -d /tmp/* |grep redis-dump.*); + + dump_snapshot_id=$(get_dump_snapshot_id_by_name "${BACKUP_NAME}") + restore_snapshot_by_id "${dump_snapshot_id}" + + cd tmp; wget https://github.com/tair-opensource/RedisShake/releases/download/v3.1.11/redis-shake-linux-amd64.tar.gz; + tar -xf redis-shake-linux-amd64.tar.gz; + grep -q '^cluster-enabled yes' ${REDIS_CONF_PATH} && REDIS_TYPE="cluster" || REDIS_TYPE="standalone"; + sed -ci -e "s/^type =.*/type = '${REDIS_TYPE}'/" restore.toml; + sed -ci -e "1s/^type =.*/type = 'restore'/" restore.toml; + export REDISCLI_AUTH=$(cat ${REDIS_CONF_PATH} |grep '^requirepass'|awk '{print $2}'); + sed -ci -e "s/^password =.*/password = '${REDISCLI_AUTH}'/" restore.toml; + RESTORE_MASTER_ID=$(redis-cli cluster nodes|grep master|grep -v fail|head -n 1|awk '{print $2}'|awk -F : '{print $1}') + sed -ci -e "s/^address =.*/address = '${RESTORE_MASTER_ID}:6379'/" restore.toml; + for i in ${RDB_TO_RESTORE} + do + sed -ci -e "s|^rdb_file_path =.*|rdb_file_path = '${i}'|" restore.toml; + ./redis-shake restore.toml 1>/dev/null + done + rm -f ${RDB_TO_RESTORE} + rm -f redis-shake* sync.toml restore.toml +} + +# Function to get WAL location by snapshot ID +function get_wal_location_by_snapshot_id() { + local snapshot_id="$1" + local wal_location=$(GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic -r /opt/backup/${ENV_NAME} snapshots --json | jq -r --arg id "$snapshot_id" '.[] | select(.short_id == $id) | .tags[2]') + echo "$(date) ${ENV_NAME} Getting the WAL location: ${wal_location}" >> ${RESTORE_LOG_FILE} + echo "$wal_location" +} + +# Function to get WAL snapshot ID by name +function get_wal_snapshot_id_by_name() { + local backup_name="$1" + local snapshot_id=$(GOGC=20 RESTIC_PASSWORD=${ENV_NAME} restic -r /opt/backup/${ENV_NAME} snapshots --tag "PGWAL" --json | jq -r --arg backup_name "$backup_name" '.[] | select(.tags[0] | contains($backup_name)) | .short_id') + + if [[ $? -ne 0 || -z "$snapshot_id" ]]; then + echo "$(date) ${ENV_NAME} Error: Failed to get WAL snapshot ID" | tee -a ${RESTORE_LOG_FILE} + exit 1 + fi + + echo "$(date) ${ENV_NAME} Getting WAL snapshot ID: $snapshot_id" >> ${RESTORE_LOG_FILE} + echo "$snapshot_id" +} + +# Function to restore PostgreSQL WAL files +function restore_postgres_wal() { + local target_time="$1" + local wal_dir="/var/lib/postgresql/wal_archive" + + echo "$(date) ${ENV_NAME} Restoring WAL files until $target_time..." | tee -a ${RESTORE_LOG_FILE} + + # Ensure WAL archive directory exists + if [ ! -d "$wal_dir" ]; then + sudo mkdir -p "$wal_dir" + sudo chown postgres:postgres "$wal_dir" + fi + + # Copy WAL files to archive directory + sudo cp -r ${BINLOGS_BACKUP_DIR}/* "$wal_dir/" + sudo chown -R postgres:postgres "$wal_dir" + + # Create recovery configuration + sudo -u postgres bash -c "cat > /var/lib/postgresql/data/recovery.signal << EOF +# Recovery configuration +restore_command = 'cp /var/lib/postgresql/wal_archive/%f %p' +recovery_target_time = '$target_time' +EOF" + + echo "$(date) ${ENV_NAME} WAL files restored successfully" >> ${RESTORE_LOG_FILE} +} + +# Enhanced PostgreSQL restore function with PITR support +function restore_postgres() { + if [ "$PITR" == "true" ]; then + # Get snapshot before specified time + dump_snapshot_id=$(get_snapshot_id_before_time "${PITR_TIME}") + dump_snapshot_name=$(get_snapshot_name_by_id "${dump_snapshot_id}") + + # Get associated WAL snapshot + wal_snapshot_id=$(get_wal_snapshot_id_by_name "${dump_snapshot_name}") + + # Restore main backup + restore_snapshot_by_id "${dump_snapshot_id}" + + # Process dump file + local ORIG_BACKUP="${DUMP_BACKUP_DIR}/db_backup.sql" + local TEMP_BACKUP="/tmp/db_backup.sql" + + [ -f "$TEMP_BACKUP" ] && rm -f "$TEMP_BACKUP" + cp "$ORIG_BACKUP" "$TEMP_BACKUP" + + # Remove problematic role commands + sed -i -e '/^CREATE ROLE webadmin/d' \ + -e '/^CREATE ROLE postgres/d' \ + -e "/^CREATE ROLE ${DBUSER}/d" \ + -e '/^DROP ROLE IF EXISTS postgres/d' \ + -e '/^DROP ROLE IF EXISTS webadmin/d' \ + -e "/^DROP ROLE IF EXISTS ${DBUSER}/d" \ + -e '/^ALTER ROLE postgres WITH SUPERUSER/d' \ + -e '/^ALTER ROLE webadmin WITH SUPERUSER/d' \ + -e "/^ALTER ROLE ${DBUSER} WITH SUPERUSER/d" "$TEMP_BACKUP" + + # Restore the database + PGPASSWORD="${DBPASSWD}" psql --no-readline -q -U "${DBUSER}" -d postgres < "$TEMP_BACKUP" + + # Restore WAL files + restore_snapshot_by_id "${wal_snapshot_id}" + restore_postgres_wal "${PITR_TIME}" + + # Restart PostgreSQL to apply recovery + jem service restart postgresql + + [ -f "$TEMP_BACKUP" ] && rm -f "$TEMP_BACKUP" + + else + # Regular restore without PITR + local ORIG_BACKUP="${DUMP_BACKUP_DIR}/db_backup.sql" + local TEMP_BACKUP="/tmp/db_backup.sql" + + dump_snapshot_id=$(get_dump_snapshot_id_by_name "${BACKUP_NAME}") + restore_snapshot_by_id "${dump_snapshot_id}" + + [ -f "$TEMP_BACKUP" ] && rm -f "$TEMP_BACKUP" + cp "$ORIG_BACKUP" "$TEMP_BACKUP" + + # Remove problematic role commands + sed -i -e '/^CREATE ROLE webadmin/d' \ + -e '/^CREATE ROLE postgres/d' \ + -e "/^CREATE ROLE ${DBUSER}/d" \ + -e '/^DROP ROLE IF EXISTS postgres/d' \ + -e '/^DROP ROLE IF EXISTS webadmin/d' \ + -e "/^DROP ROLE IF EXISTS ${DBUSER}/d" \ + -e '/^ALTER ROLE postgres WITH SUPERUSER/d' \ + -e '/^ALTER ROLE webadmin WITH SUPERUSER/d' \ + -e "/^ALTER ROLE ${DBUSER} WITH SUPERUSER/d" "$TEMP_BACKUP" + + PGPASSWORD="${DBPASSWD}" psql --no-readline -q -U "${DBUSER}" -d postgres < "$TEMP_BACKUP" > /dev/null + if [[ $? -ne 0 ]]; then + echo "$(date) ${ENV_NAME} Error: Failed to restore PostgreSQL dump" | tee -a ${RESTORE_LOG_FILE} + [ -f "$TEMP_BACKUP" ] && rm -f "$TEMP_BACKUP" + exit 1 + fi + + echo "$(date) ${ENV_NAME} PostgreSQL dump restored successfully" | tee -a ${RESTORE_LOG_FILE} + [ -f "$TEMP_BACKUP" ] && rm -f "$TEMP_BACKUP" + fi +} + +function restore_mysql(){ + if [ "$PITR" == "true" ]; then + dump_snapshot_id=$(get_snapshot_id_before_time "${PITR_TIME}") + dump_snapshot_name=$(get_snapshot_name_by_id "${dump_snapshot_id}") + + binlog_snapshot_id=$(get_binlog_snapshot_id_by_name "${dump_snapshot_name}") + + restore_snapshot_by_id "${dump_snapshot_id}" + restore_mysql_dump + + restore_snapshot_by_id "${binlog_snapshot_id}" + apply_binlogs_until_time "${PITR_TIME}" + else + dump_snapshot_id=$(get_dump_snapshot_id_by_name "${BACKUP_NAME}") + restore_snapshot_by_id "${dump_snapshot_id}" + restore_mysql_dump + fi +} + +### Main block + +echo $$ > /var/run/${ENV_NAME}_restore.pid +source /etc/jelastic/metainf.conf; +echo $(date) ${ENV_NAME} "Restoring the DB dump" | tee -a ${RESTORE_LOG_FILE} +if [ "$COMPUTE_TYPE" == "redis" ]; then + restore_redis; + +elif [ "$COMPUTE_TYPE" == "mongodb" ]; then + restore_mongodb; + +elif [ "$COMPUTE_TYPE" == "postgres" ]; then + restore_postgres; + +else + restore_mysql; + +fi +rm -f /var/run/${ENV_NAME}_restore.pid diff --git a/scripts/restoreOnBeforeInit.js b/scripts/restoreOnBeforeInit.js index b4156f4..3b39826 100644 --- a/scripts/restoreOnBeforeInit.js +++ b/scripts/restoreOnBeforeInit.js @@ -4,9 +4,16 @@ var storageInfo = getStorageNodeid(); var storageEnvDomain = storageInfo.storageEnvShortName; var storageEnvMasterId = storageInfo.storageNodeId; var checkSchemaCommand = "if grep -q '^SCHEME=' /.jelenv; then echo true; else echo false; fi"; +var computeTypeCommand = "grep 'COMPUTE_TYPE=' /etc/jelastic/metainf.conf | cut -d'=' -f2"; var mysql_cluster_markup = "Be careful when restoring the dump from another DB environment (or environment with another replication schema) to the replicated MySQL/MariaDB/Percona solution."; var recovery_addon_markup = "Please use Database Corruption Diagnostic add-on for check after restore, and Database Recovery Add-on for fix if it is needed."; +var checkSchema = api.env.control.ExecCmdById("${env.name}", session, ${targetNodes.master.id}, toJSON([{"command": checkSchemaCommand, "params": ""}]), false, "root"); +if (checkSchema.result != 0) return checkSchema; +var computeTypeResp = api.env.control.ExecCmdById("${env.name}", session, ${targetNodes.master.id}, toJSON([{"command": computeTypeCommand, "params": ""}]), false, "root"); +if (computeTypeResp.result != 0) return computeTypeResp; +var computeType = computeTypeResp.responses[0].out.trim(); + resp = jelastic.env.control.GetEnvInfo(storageEnvDomain, session); if (resp.result != 0 && resp.result != 11) return resp; if (resp.result == 11) { @@ -16,17 +23,37 @@ if (resp.result == 11) { var updateResticOnStorageCommand = "wget --tries=10 -O /tmp/installUpdateRestic " + baseUrl + "/scripts/installUpdateRestic && mv -f /tmp/installUpdateRestic /usr/sbin/installUpdateRestic && chmod +x /usr/sbin/installUpdateRestic && /usr/sbin/installUpdateRestic"; var respUpdate = api.env.control.ExecCmdById(storageEnvDomain, session, storageEnvMasterId, toJSON([{"command": updateResticOnStorageCommand, "params": ""}]), false, "root"); if (respUpdate.result != 0) return respUpdate; - var backups = jelastic.env.control.ExecCmdById(storageEnvDomain, session, storageEnvMasterId, toJSON([{"command": "/root/getBackupsAllEnvs.sh", "params": ""}]), false, "root").responses[0].out; + var getBackupsAllEnvs = "wget --tries=10 -O /root/getBackupsAllEnvsJSON.sh " + baseUrl + "/scripts/getBackupsAllEnvsJSON.sh && chmod +x /root/getBackupsAllEnvsJSON.sh && /root/getBackupsAllEnvsJSON.sh"; + var backups = jelastic.env.control.ExecCmdById(storageEnvDomain, session, storageEnvMasterId, toJSON([{"command": getBackupsAllEnvs, "params": ""}]), false, "root").responses[0].out; var backupList = toNative(new JSONObject(String(backups))); - var envs = prepareEnvs(backupList.envs); - var backups = prepareBackups(backupList.backups); + + var filteredEnvs = []; + var filteredBackups = {}; + var filteredPitrEnvs = []; + var filteredPitrStartTime = {}; + + for (var env in backupList.envs) { + if (backupList.envs.hasOwnProperty(env)) { + var backupInfo = backupList.envs[env]; + + if (backupInfo.server == computeType) { + filteredEnvs.push({ caption: env, value: env }); + + filteredBackups[env] = backupInfo.backups.map(function(backup) { + return { caption: backup, value: backup }; + }); + + if (backupInfo.pitr === true) { + filteredPitrEnvs.push({ caption: env, value: env }); + filteredPitrStartTime[env] = [{ caption: backupInfo.pitrStartTime, value: backupInfo.pitrStartTime }]; + } + } + } + } } else { storage_unavailable_markup = "Storage environment " + storageEnvDomain + " is unavailable (stopped/sleeping)."; } -var checkSchema = api.env.control.ExecCmdById("${env.name}", session, ${targetNodes.master.id}, toJSON([{"command": checkSchemaCommand, "params": ""}]), false, "root"); -if (checkSchema.result != 0) return checkSchema; - function getStorageNodeid(){ var storageEnv = '${settings.storageName}' var storageEnvShortName = storageEnv.split(".")[0] @@ -40,62 +67,100 @@ function getStorageNodeid(){ } } -function prepareEnvs(values) { - var aResultValues = []; - - values = values || []; - - for (var i = 0, n = values.length; i < n; i++) { - aResultValues.push({ caption: values[i], value: values[i] }); - } - - return aResultValues; -} - -function prepareBackups(backups) { - var oResultBackups = {}; - var aValues; - - for (var envName in backups) { - if (Object.prototype.hasOwnProperty.call(backups, envName)) { - aValues = []; - - for (var i = 0, n = backups[envName].length; i < n; i++) { - aValues.push({ caption: backups[envName][i], value: backups[envName][i] }); +if (storage_unavailable_markup === "") { + if ('${settings.isPitr}' == 'true') { + settings.fields.push({ + "type": "toggle", + "name": "isPitr", + "caption": "PITR", + "tooltip": "Point in time recovery", + "value": true, + "hidden": false, + "showIf": { + "true": [ + { + "caption": "Restore from", + "type": "list", + "name": "backupedEnvName", + "required": true, + "values": filteredPitrEnvs, + "default": filteredPitrEnvs[0], + "tooltip": "Select the environment to restore from" + }, { + "caption": "PITR Start Time", + "type": "list", + "name": "pitrStartTime", + "required": true, + "tooltip": "PITR Start Time", + "dependsOn": { + "backupedEnvName" : filteredPitrStartTime + } + }, { + "caption": "Time for restore", + "type": "string", + "name": "restoreTime", + "inputType": "datetime-local", + "cls": "x-form-text", + "required": true, + "tooltip": "Select specific date and time for point-in-time recovery" + } + ], + "false": [ + { + "caption": "Restore from", + "type": "list", + "name": "backupedEnvName", + "required": true, + "default": filteredEnvs[0], + "values": filteredEnvs + }, { + "caption": "Backup", + "type": "list", + "name": "backupDir", + "required": true, + "tooltip": "Select the time stamp for which you want to restore the DB dump", + "dependsOn": { + "backupedEnvName" : filteredBackups + } + } + ] } - - oResultBackups[envName] = aValues; + }); + if (checkSchema.responses[0].out == "true") { + settings.fields.push( + {"type": "displayfield", "cls": "warning", "height": 30, "hideLabel": true, "markup": mysql_cluster_markup} + ); + settings.fields.push( + {"type": "displayfield", "cls": "warning", "height": 30, "hideLabel": true, "markup": recovery_addon_markup} + ); } - } - - return oResultBackups; -} - -if (storage_unavailable_markup === "") { - settings.fields.push({ + } else { + settings.fields.push({ "caption": "Restore from", "type": "list", "name": "backupedEnvName", "required": true, - "values": envs + "default": filteredEnvs[0], + "values": filteredEnvs }, { "caption": "Backup", "type": "list", "name": "backupDir", "required": true, - "tooltip": "Select the time stamp for which you want to restore the DB dump", + "tooltip": "Select the time stamp for which you want to restore the DB dump", "dependsOn": { - "backupedEnvName" : backups + "backupedEnvName" : filteredBackups } }); - if (checkSchema.responses[0].out == "true") { - settings.fields.push( - {"type": "displayfield", "cls": "warning", "height": 30, "hideLabel": true, "markup": mysql_cluster_markup} - ); - settings.fields.push( - {"type": "displayfield", "cls": "warning", "height": 30, "hideLabel": true, "markup": recovery_addon_markup} - ); - } + if (checkSchema.responses[0].out == "true") { + settings.fields.push( + {"type": "displayfield", "cls": "warning", "height": 30, "hideLabel": true, "markup": mysql_cluster_markup} + ); + settings.fields.push( + {"type": "displayfield", "cls": "warning", "height": 30, "hideLabel": true, "markup": recovery_addon_markup} + ); + } + } } else { settings.fields.push( {"type": "displayfield", "cls": "warning", "height": 30, "hideLabel": true, "markup": storage_unavailable_markup}