| 1 | = S3QL Backup Script = |
| 2 | |
| 3 | Following is a modified version of the [http://www.rath.org/s3ql-docs/contrib.html#s3-backup-sh s3_backup.sh] script which which was provided by the debian package at `/usr/share/doc/s3ql/examples/s3ql_backup.sh`. |
| 4 | |
| 5 | {{{ |
| 6 | #!/bin/bash |
| 7 | |
| 8 | # Abort entire script if any command fails |
| 9 | set -e |
| 10 | |
| 11 | # This script assumes that bucketname and directory under |
| 12 | # /media match and is also assumes that a file, with the |
| 13 | # same name with a list of directories to backup exists |
| 14 | # in /etc/s3ql |
| 15 | |
| 16 | # Check for bucket name / directory on standard input |
| 17 | if [[ $1 ]]; then |
| 18 | BUCKET=$1 |
| 19 | elif [[ ! $1 ]]; then |
| 20 | echo "Type the bucketname and then [ENTER]:" |
| 21 | read bucket |
| 22 | BUCKET=$bucket |
| 23 | fi |
| 24 | |
| 25 | # Backup destination (storage url) |
| 26 | storage_url="s3c://s.qstack.advania.com:443/$BUCKET" |
| 27 | |
| 28 | # Check that a list of diectories to backup exists at |
| 29 | # /etc/s3ql/$BUCKET |
| 30 | if [[ ! -f "/etc/s3ql/$BUCKET" ]]; then |
| 31 | echo "You need to create /etc/s3ql/$BUCKET with a list of directories to backup" |
| 32 | exit |
| 33 | else |
| 34 | BACKUP_LIST="/etc/s3ql/$BUCKET" |
| 35 | fi |
| 36 | |
| 37 | # Recover cache if e.g. system was shut down while fs was mounted |
| 38 | fsck.s3ql --backend-options="dumb-copy" --batch "$storage_url" |
| 39 | |
| 40 | # Create a temporary mountpoint and mount file system |
| 41 | mountpoint="/tmp/s3ql_backup_$$" |
| 42 | mkdir "$mountpoint" |
| 43 | mount.s3ql --backend-options="dumb-copy" "$storage_url" "$mountpoint" |
| 44 | |
| 45 | # Make sure the file system is unmounted when we are done |
| 46 | # Note that this overwrites the earlier trap, so we |
| 47 | # also delete the lock file here. |
| 48 | trap "cd /; umount.s3ql '$mountpoint'; rmdir '$mountpoint'; rm '$lock'" EXIT |
| 49 | |
| 50 | # Figure out the most recent backup |
| 51 | cd "$mountpoint" |
| 52 | last_backup=`python <<EOF |
| 53 | import os |
| 54 | import re |
| 55 | backups=sorted(x for x in os.listdir('.') if re.match(r'^[\\d-]{10}_[\\d:]{8}$', x)) |
| 56 | if backups: |
| 57 | print backups[-1] |
| 58 | EOF` |
| 59 | |
| 60 | # Duplicate the most recent backup unless this is the first backup |
| 61 | new_backup=`date "+%Y-%m-%d_%H:%M:%S"` |
| 62 | if [ -n "$last_backup" ]; then |
| 63 | echo "Copying $last_backup to $new_backup..." |
| 64 | s3qlcp "$last_backup" "$new_backup" |
| 65 | |
| 66 | # Make the last backup immutable |
| 67 | # (in case the previous backup was interrupted prematurely) |
| 68 | s3qllock "$last_backup" |
| 69 | fi |
| 70 | |
| 71 | # ..and update the copy |
| 72 | #rsync -aHAXx --delete-during --delete-excluded --partial -v \ |
| 73 | # --exclude /.cache/ \ |
| 74 | # --exclude /.s3ql/ \ |
| 75 | # --exclude /.thumbnails/ \ |
| 76 | # --exclude /tmp/ \ |
| 77 | # "/home/my_username/" "./$new_backup/" |
| 78 | |
| 79 | if [[ -d $mountpoint ]]; then |
| 80 | if [[ ! -d "$mountpoint/$new_backup" ]]; then |
| 81 | echo "$mountpoint/$new_backup doesn't exist so creating it" |
| 82 | mkdir -p $mountpoint/$new_backup |
| 83 | fi |
| 84 | for dir in $(<${BACKUP_LIST}); do |
| 85 | mkdir -p "$mountpoint/$new_backup$dir/" |
| 86 | rsync -aHAXx --delete-during --delete-excluded --partial -v "/media/$BUCKET$dir/" "$mountpoint/$new_backup$dir/" |
| 87 | done |
| 88 | else |
| 89 | echo "$mountpoint doesn't exist - something has gone horribly wrong" |
| 90 | exit |
| 91 | fi |
| 92 | |
| 93 | # Make the new backup immutable |
| 94 | s3qllock "$new_backup" |
| 95 | |
| 96 | # Expire old backups |
| 97 | |
| 98 | # Note that expire_backups.py comes from contrib/ and is not installed |
| 99 | # by default when you install from the source tarball. If you have |
| 100 | # installed an S3QL package for your distribution, this script *may* |
| 101 | # be installed, and it *may* also not have the .py ending. |
| 102 | expire_backups --use-s3qlrm 1 7 14 31 90 180 360 |
| 103 | }}} |