wiki:S3QLBackup

Table of Contents

  1. s3ql_backup
  2. s3ql_endpoint_check

S3QL Backup Script

Following is a modified version of the s3_backup.sh script which which was provided by the Debian package at /usr/share/doc/s3ql/examples/s3ql_backup.sh.

It expects a /etc/s3ql/SERVERNAME file listing directories to be backed up for each server and the SERVERNAME is expected to match the bucket name.

s3ql_backup

#!/bin/bash

# Abort entire script if any command fails
#set -e

# This script assumes that bucketname and directory under 
# /media match and is also assumes that a file, with the 
# same name with a list of directories to backup exists 
# in /etc/s3ql
CONFIG_DIR="/etc/s3ql"
# S3 server URL
S3QL_SERVER="s3c://s.qstack.advania.com:443"
# rsync command
RSYNC="rsync -aHAXx --delete-during --delete-excluded --partial -v"
# mount directory for s3ql file systems
S3QL_MOUNT="/media/s3ql"
# mount directory for sshfs file systems
SSHFS_MOUNT="/media/sshfs"
# directory where the scripts this script depends on are located
SCRIPT_DIR="/usr/local/bin"
# This script uses lockfile which comes with procmail
# lockfile lines commented out, to enable see lines starting with ##
##LOCKFILE_BINARY="/usr/bin/lockfile"
##LOCKFILE="/var/run/lock/$(basename $0).lock"
#echo "LOCKFILE: $LOCKFILE"
# Seconds between unmount attempts 
SLEEP="15"
# Mins after rsync before umnt it attempted 
LONG_SLEEP="5m"

# echo to standard out function
echoerr()
{
  echo "$@" 1>&2;
}

# Check that the script is being run by root
if [[ "$(id -u)" != "0" ]] ; then
  echoerr "You must run '$0' as root or via sudo"
  exit 1
fi

# Test if the lockfile binary can be found
##if [[ ! -e "$LOCKFILE_BINARY" ]]; then
##  echo "$LOCKFILE_BINARY not found, please install the procmail package."
##  exit 1
##fi

# check the $S3QL_MOUNT directory exists
if [[ ! -d "$S3QL_MOUNT" ]]; then
  echoerr "$S3QL_MOUNT doesn't exist"
  exit 1
fi

# check the $SSHFS_MOUNT directory exists
if [[ ! -d "$SSHFS_MOUNT" ]]; then
  echoerr "$SSHFS_MOUNT doesn't exist"
  exit 1
fi

# Check for bucket name / directory on standard input
if [[ $1 ]]; then
  BUCKET="$1"
elif [[ ! "$1" ]]; then
  echo "Type the bucketname and then [ENTER]:"
  read bucket
  BUCKET="$bucket"
fi

# check if the Transport endpoint is not connected
if $SCRIPT_DIR/s3ql_endpoint_check ; then
  echo "No problem with disconnected endpoints" 
else
  echoerr "Problem with a disconnected endpoint"
  exit 1
fi

# Check that a list of directories to backup exists at 
# $CONFIG_DIR/$BUCKET
if [[ ! -f "$CONFIG_DIR/$BUCKET" ]]; then
  echoerr "You need to create $CONFIG_DIR/$BUCKET with a list of directories to backup"
  exit 1
else
  BACKUP_LIST="$CONFIG_DIR/$BUCKET"
fi

# sshfs mounted at /media/server-name
SSHFS_SOURCE="$SSHFS_MOUNT/$BUCKET"
if [[ -d "$SSHFS_SOURCE" ]]; then
  # mount the sshfs
  if $SCRIPT_DIR/mnt-sshfs "$BUCKET" ; then
    echo "Success mounting $SSHFS_MOUNT/$BUCKET"
  else
    echoerr "Problem mounting $SSHFS_SOURCE"
    exit 1
  fi
else
  echoerr "$SSHFS_SOURCE doesn't exist"
  exit 1
fi

# check the bucket exists
if [[ -d "$S3QL_MOUNT/$BUCKET" ]]; then
  # mount the s3ql directory
  if $SCRIPT_DIR/mnt-s3ql "$BUCKET" ; then
    echo "Success mounting $S3QL_MOUNT/$BUCKET" 
  else
    echoerr "Problem mounting $S3QL_MOUNT/$BUCKET"
    exit 1
  fi
else
  echoerr "$S3QL_MOUNT/$BUCKET doesn't exist"
  exit 1
fi

# if the $LOCKFILE exists then exit
# the lockfile is read only
# the timeout is set to 2 hours (7200 secs)
# if the lockfile is older than this it will be removed
##DATE=$(date -R)
#$LOCKFILE_BINARY -r 1 -l 21600 $LOCKFILE || { echo "$LOGFILE exists exiting $DATE" ; exit 23 ; }
##$LOCKFILE_BINARY -r 1 -l 7200 $LOCKFILE || \
##  { echo "$LOCKFILE exists exiting $DATE" ; exit 1 ; }

# Figure out the most recent backup
cd "$S3QL_MOUNT/$BUCKET"
LAST_BACKUP=`python <<EOF
import os
import re
backups=sorted(x for x in os.listdir('.') if re.match(r'^[\\d-]{10}_[\\d:]{8}$', x))
if backups:
    print backups[-1]
EOF`

# Duplicate the most recent backup unless this is the first backup
NEW_BACKUP=`date "+%Y-%m-%d_%H:%M:%S"`
if [[ -n "$LAST_BACKUP" ]]; then
    echo "Copying ${LAST_BACKUP} to ${NEW_BACKUP}..."
    s3qlcp "$LAST_BACKUP" "$NEW_BACKUP"

    # Make the last backup immutable
    # (in case the previous backup was interrupted prematurely)
    echo "Locking ${LAST_BACKUP}"
    s3qllock "$LAST_BACKUP"
fi

# Check directories exist and run the rsync
if [[ ! -d "$S3QL_MOUNT/$BUCKET/$NEW_BACKUP" ]]; then
  echo "$S3QL_MOUNT/$BUCKET/$NEW_BACKUP doesn't exist so creating it"
  if mkdir -p "$S3QL_MOUNT/$BUCKET/$NEW_BACKUP" ; then
    echo "Created $S3QL_MOUNT/$BUCKET/$NEW_BACKUP"
  else
    echoerr "Problem making $S3QL_MOUNT/$BUCKET/$NEW_BACKUP"
    exit 1
  fi
fi
for dir in $(<${BACKUP_LIST}); do
  mkdir -p "$S3QL_MOUNT/$BUCKET/$NEW_BACKUP$dir/" || \
    { echo "Problem making $S3QL_MOUNT/$BUCKET/$NEW_BACKUP$dir/" ; exit 1 ; }
  #$RSYNC "$SSHFS_SOURCE$dir/" "$S3QL_MOUNT/$BUCKET/$NEW_BACKUP$dir/" || \
  #  { echo "Problem with $RSYNC $SSHFS_SOURCE$dir/ $SSHFS_MOUNT/$BUCKET/$NEW_BACKUP$dir/" ; rm -f "${LOCKFILE}" ; exit 1 ; }
  RSYNC_CMD="$RSYNC ${SSHFS_SOURCE}${dir}/ ${S3QL_MOUNT}/${BUCKET}/${NEW_BACKUP}${dir}/"
  if $RSYNC_CMD ; then
    echo "Success running rsync"
  else
    echoerr "Problem with rsync, try running this script again: $0 $1"
    exit 1
  fi
done

# Make the new backup immutable
echo "Locking ${NEW_BACKUP}"
s3qllock "$NEW_BACKUP"
# cd to home so the s3ql file system can be unmounted
echo "Changing directory to $HOME so ${S3QL_MOUNT}/${BUCKET} can be unmounted"
cd "${HOME}"
# unmount s3ql first, but sleep first so background tasks can complete
echo "Sleeping for $LONG_SLEEP" ; sleep $LONG_SLEEP
if $SCRIPT_DIR/umnt-s3ql "${BUCKET}" ; then
  echo "Success unmounting ${S3QL_SERVER}/${BUCKET}" 
else
  echo "Sleeping for ${LONG_SLEEP} and then using force"
  sleep ${LONG_SLEEP}
  if $SCRIPT_DIR/umnt-s3ql --force "${BUCKET}" ; then
    echo "Success using force unmounting ${S3QL_SERVER}/${BUCKET}"
  else
    echoerr "Failure using force to unmount ${S3QL_SERVER}/${BUCKET}"
    exit 1
  fi
fi
$SCRIPT_DIR/umnt-sshfs "${BUCKET}"

# remove the lock file
##echo "Removing the lock at ${LOCKFILE}"
##rm -f "${LOCKFILE}"

# Expire old backups

# Note that expire_backups.py comes from contrib/ and is not installed
# by default when you install from the source tarball. If you have
# installed an S3QL package for your distribution, this script *may*
# be installed, and it *may* also not have the .py ending.
#expire_backups --use-s3qlrm 1 7 14 31 90 180 360

s3ql_endpoint_check

S3QL "Transport endpoint is not connected" check script

#!/bin/bash

# Check for errors like this with s3ql buckets:
# df 
# df: ‘/media/s3ql/crin1’: Transport endpoint is not connected

# check if a Transport endpoint is not connected
S3QL_DISCONNECTED_ENDPOINTS=$(df 2>&1 | grep "Transport endpoint is not connected" | awk '{ print $2 }' | tr -d '‘' | tr -d '’:')
if [[ "$S3QL_DISCONNECTED_ENDPOINTS" ]]; then
  for s3qlfs in $S3QL_DISCONNECTED_ENDPOINTS; do
    if fusermount -u -z "${s3qlfs}" ; then
      echo "Success unmounting ${s3qlfs}"
      exit 0
    else
      echo "Problem unmounting ${s3qlfs}, consider using: killall -9 mount.s3ql" 
      exit 1
    fi
  done
fi
Last modified 3 years ago Last modified on Nov 5, 2015, 11:02:35 AM