Version 3 (modified by chris, 3 years ago) (diff) |
---|
Table of Contents
S3QL Backup Script
Following is a modified version of the s3_backup.sh script which which was provided by the Debian package at /usr/share/doc/s3ql/examples/s3ql_backup.sh.
It expects a /etc/s3ql/SERVERNAME file listing directories to be backed up for each server and the SERVERNAME is expected to match the bucket name.
s3ql_backup
#!/bin/bash # Abort entire script if any command fails set -e # This script assumes that bucketname and directory under # /media match and is also assumes that a file, with the # same name with a list of directories to backup exists # in /etc/s3ql # Check for bucket name / directory on standard input if [[ $1 ]]; then BUCKET=$1 elif [[ ! $1 ]]; then echo "Type the bucketname and then [ENTER]:" read bucket BUCKET=$bucket fi # Backup destination (storage url) storage_url="s3c://s.qstack.advania.com:443/$BUCKET" # Check that a list of diectories to backup exists at # /etc/s3ql/$BUCKET if [[ ! -f "/etc/s3ql/$BUCKET" ]]; then echo "You need to create /etc/s3ql/$BUCKET with a list of directories to backup" exit else BACKUP_LIST="/etc/s3ql/$BUCKET" fi # Recover cache if e.g. system was shut down while fs was mounted fsck.s3ql --backend-options="dumb-copy" --batch "$storage_url" # Create a temporary mountpoint and mount file system mountpoint="/tmp/s3ql_backup_$$" mkdir "$mountpoint" mount.s3ql --backend-options="dumb-copy" "$storage_url" "$mountpoint" # Make sure the file system is unmounted when we are done # Note that this overwrites the earlier trap, so we # also delete the lock file here. trap "cd /; umount.s3ql '$mountpoint'; rmdir '$mountpoint'; rm '$lock'" EXIT # Figure out the most recent backup cd "$mountpoint" last_backup=`python <<EOF import os import re backups=sorted(x for x in os.listdir('.') if re.match(r'^[\\d-]{10}_[\\d:]{8}$', x)) if backups: print backups[-1] EOF` # Duplicate the most recent backup unless this is the first backup new_backup=`date "+%Y-%m-%d_%H:%M:%S"` if [ -n "$last_backup" ]; then echo "Copying $last_backup to $new_backup..." s3qlcp "$last_backup" "$new_backup" # Make the last backup immutable # (in case the previous backup was interrupted prematurely) s3qllock "$last_backup" fi # ..and update the copy #rsync -aHAXx --delete-during --delete-excluded --partial -v \ # --exclude /.cache/ \ # --exclude /.s3ql/ \ # --exclude /.thumbnails/ \ # --exclude /tmp/ \ # "/home/my_username/" "./$new_backup/" if [[ -d $mountpoint ]]; then if [[ ! -d "$mountpoint/$new_backup" ]]; then echo "$mountpoint/$new_backup doesn't exist so creating it" mkdir -p $mountpoint/$new_backup fi for dir in $(<${BACKUP_LIST}); do mkdir -p "$mountpoint/$new_backup$dir/" rsync -aHAXx --delete-during --delete-excluded --partial -v "/media/$BUCKET$dir/" "$mountpoint/$new_backup$dir/" done else echo "$mountpoint doesn't exist - something has gone horribly wrong" exit fi # Make the new backup immutable s3qllock "$new_backup" # Expire old backups # Note that expire_backups.py comes from contrib/ and is not installed # by default when you install from the source tarball. If you have # installed an S3QL package for your distribution, this script *may* # be installed, and it *may* also not have the .py ending. expire_backups --use-s3qlrm 1 7 14 31 90 180 360