пятница, 11 сентября 2009 г.

Резервирование и восстановление ZFS IV

Как-то сложно мы восстанавливали датасет. "Хлопотно это". Зачем-то клоны делать со снапшотов, переименований куча, игры с точками монтирования. Все же пул нам удалось восстановить легко и непринужденно. Нельзя ли что-нибудь втереть, или вправить, или еще что-нибудь?

Пришлось немного поиграть с процедурой восстановления. Опуская подробности и неудачи, приведем конечный результат:

server5# zfs list
NAME USED AVAIL REFER MOUNTPOINT
data 150M 19.4G 50.0M /data
data/work1 100M 19.4G 20K /data/work1
data/work1/user1 100M 19.4G 100M /data/work1/user1
rpool 14.6G 12.7G 94K /rpool
rpool/ROOT 6.60G 12.7G 18K legacy
rpool/ROOT/zfsroot 6.60G 12.7G 6.51G /
rpool/ROOT/zfsroot/var 101M 12.7G 101M /var
rpool/dump 4.00G 12.7G 4.00G -
rpool/export 117K 12.7G 20K /export
rpool/export/home 97K 12.7G 97K /export/home
rpool/swap 4G 16.6G 130M -

server5# gzcat server5.data%work2.0.zfs.gz | zfs receive -vdF data
receiving full stream of data/work2@snapshot into data/work2@snapshot
received 15.0KB stream in 1 seconds (15.0KB/sec)
receiving full stream of data/work2/user2@snapshot into data/work2/user2@snapshot
received 100MB stream in 4 seconds (25.1MB/sec)

server5# zfs list
NAME USED AVAIL REFER MOUNTPOINT
data 250M 19.3G 50.0M /data
data/work1 100M 19.3G 20K /data/work1
data/work1/user1 100M 19.3G 100M /data/work1/user1
data/work2 100M 19.3G 20K /data/work2
data/work2@snapshot 0 - 20K -
data/work2/user2 100M 19.3G 100M /data/work2/user2
data/work2/user2@snapshot 0 - 100M -
rpool 14.6G 12.7G 94K /rpool
rpool/ROOT 6.60G 12.7G 18K legacy
rpool/ROOT/zfsroot 6.60G 12.7G 6.51G /
rpool/ROOT/zfsroot/var 101M 12.7G 101M /var
rpool/dump 4.00G 12.7G 4.00G -
rpool/export 117K 12.7G 20K /export
rpool/export/home 97K 12.7G 97K /export/home
rpool/swap 4G 16.6G 130M -

server5# zfs destroy -r data/work2@snapshot

server5# zfs list
NAME USED AVAIL REFER MOUNTPOINT
data 250M 19.3G 50.0M /data
data/work1 100M 19.3G 20K /data/work1
data/work1/user1 100M 19.3G 100M /data/work1/user1
data/work2 100M 19.3G 20K /data/work2
data/work2/user2 100M 19.3G 100M /data/work2/user2
rpool 14.6G 12.7G 94K /rpool
rpool/ROOT 6.60G 12.7G 18K legacy
rpool/ROOT/zfsroot 6.60G 12.7G 6.51G /
rpool/ROOT/zfsroot/var 101M 12.7G 101M /var
rpool/dump 4.00G 12.7G 4.00G -
rpool/export 117K 12.7G 20K /export
rpool/export/home 97K 12.7G 97K /export/home
rpool/swap 4G 16.6G 130M -

Ха, а ведь датасеты восстаналивать можно и коротким путем! Кратко - датасет нужно тупо восстаналивать точно так же, как и весь пул. Содержимое будет перезаписано поверх существующих данных, если таковые имеются (в существующих файловых системах).

Теперь мы практически созрели написать скрипт zfs_restore.sh:

#!/sbin/sh

#
# ZFS filesystem(s) restore from archive.
#
# ZFS archives uses for bare-metal restore
# and systems cloning.
#
# Archive names can be incremental, like that:
# [hostname].[pool|dataset].n.zfs<.gz>, n=0,1,2...
# Note: Do not rename archive files! Filenames will
# use to recovery purposes.
#
# Version 1.0 (C) 2009 Y.Voinov
#
# If you specify archive name (can be with destination directory)
# in command line, script will run in non-interactive mode.
# If you not specify pool/dataset name neither command line nor
# interactive mode, it will be read from archive name.
#
# ident "@(#)zfs_restore.sh 1.0 09/11/09 YV"
#

#############
# Variables #
#############

# Snapshots extension by default
# Correct it if these snapshots already exists in the system
SNAP_EXT="snapshot"
# Archiver name
ARC_NAME="gzip"
# Archive file suffix
ARC_SUFFIX=".gz"
# Default archive extension
ext="zfs"

# OS utilities
CUT=`which cut`
DATE=`which date`
ECHO=`which echo`
FILE=`which file`
GREP=`which grep`
GZIP=`which gzip`
ID=`which id`
PRINTF=`which printf`
SED=`which sed`
UNAME=`which uname`
WHOAMI=`which whoami`
ZFS=`which zfs`

OS_VER=`$UNAME -r|$CUT -f2 -d"."`
OS_NAME=`$UNAME -s|$CUT -f1 -d" "`
OS_FULL=`$UNAME -sr`

###############
# Subroutines #
###############

check_os ()
{
# Check OS
$PRINTF "Checking OS... "
if [ "$OS_NAME" = "SunOS" -a "$OS_VER" -lt "10" ]; then
$ECHO "ERROR: Unsupported OS: $OS_FULL"
$ECHO "Exiting..."
exit 1
else
$ECHO "$OS_FULL"
fi
}

check_root ()
{
# Check if user root
$PRINTF "Checking super-user... "
if [ -f /usr/xpg4/bin/id ]; then
WHO=`/usr/xpg4/bin/id -n -u`
elif [ "`$ID | $CUT -f1 -d" "`" = "uid=0(root)" ]; then
WHO="root"
else
WHO=$WHOAMI
fi

if [ ! "$WHO" = "root" ]; then
$ECHO "ERROR: You must be super-user to run this script."
exit 1
fi
$ECHO "$WHO"
}

check_fs_exists ()
{
# Check filesystem exists
arg_fs=$1

ret=`$ZFS list -H -o name $arg_fs > /dev/null 2>&1; $ECHO $?`

if [ "$ret" != "0" ]; then
$ECHO "ERROR: ZFS pool/dataset $arg_fs does not exist."
$ECHO " Please specify another ZFS."
$ECHO "Exiting..."
exit 1
fi
}

archive_type ()
{
# Check archive type using extension and header check
arg_file=$1

if [ ! -z "`$ECHO $arg_file | $GREP $ARC_SUFFIX$`" -a \
! -z "`$FILE $arg_file | $GREP $ARC_NAME`" ]; then
$ECHO "$ARC_NAME"
elif [ ! -z "`$ECHO $arg_file | $GREP $ext$`" -a \
! -z "`$FILE $arg_file | $GREP $ext`" ]; then
$ECHO "$ext"
else
$ECHO "unknown"
fi
}

archive_exists ()
{
# Check archive exist and it readable
arg_arc=$1

# First check archive exists and readable
if [ ! -f "$arg_arc" -a ! -r "$arg_arc" ]; then
$ECHO "ERROR: Archive $arg_arc does not exist"
$ECHO " or you haven't permissions to read."
$ECHO "Exiting..."
exit 1
fi

# Second we'll check archive type
if [ "`archive_type $arg_arc`" = "$ARC_NAME" -a \
"`archive_type $arg_arc`" = " $ext" ]; then
$ECHO "ERROR: Archive $arg_arc it has wrong type."
$ECHO "Exiting..."
exit 1
fi
}

check_non_interactive ()
{
# Check if script runs in non-interactive mode
arg1=$1
arg2=$2

# If script command-line argument not specify,
# then run in interactive mode
if [ "x$arg1" = "x" -a "x$arg2" = "x" ]; then

# Set interactive mode flag
interactive="1"

$ECHO "---------------------------------------"
$ECHO "ZFS Restore archive"
$ECHO "---------------------------------------"
$ECHO
$ECHO ">>> Press [Enter] to continue or"
$ECHO ">>> Press [Ctrl+C] to cancel operation."
$ECHO
read p

# Read archive to receive
$ECHO "Input archive name (can be with path)"
$PRINTF "and press enter: "
read archive_file

# Read pool to restore
$ECHO "Input existing pool to restore"
$PRINTF "and press enter: "
read filesystem

elif [ "x$arg1" = "x/?" -o "x$arg1" = "x/h" -o "x$arg1" = "x/help" -o "x$arg1" = "xhelp" ]; then

# Set interactive mode flag
interactive="0"

$ECHO "Usage: $0 calls script in interactive mode."
$ECHO " or"
$ECHO " $0 [/source path/archive] [pool]"
$ECHO " calls script in non-interactive mode."
$ECHO
$ECHO "Note: Archives will be decompressed if GZip installed."
exit 0

else
archive_file=$arg1
filesystem=$arg2
fi

# If filesystem not specified, let's get it from file name
if [ "x$filesystem" = "x" ]; then
filesystem=`$ECHO "$archive_file" | $CUT -f2 -d"." | $SED -e 's/%/\//g' | $CUT -f1 -d"/"`
fi

# Check filesystem exists
check_fs_exists $filesystem
# Check archive file exists and correct type
archive_exists $archive_file
}

destroy_fs ()
{
# Destroy filesystem(s) recursively
arg_fs=$1

$ZFS destroy -r "$arg_fs" > /dev/null 2>&1

# Check exit code
if [ "`$ECHO $?`" != "0" ]; then
$ECHO "WARNING: Filesystem $arg_fs does not exists."
fi
}

zfs_receive ()
{
# Receive filesystem(s) from archive
arg_file=$1 # Archive to receive
arg_filesys=$2 # Target filesystem

# Verbose output flag set in interactive mode
if [ "$interactive" = "1" ]; then
verb="v"
fi

if [ "$compress" = "1" -a "`archive_type $arg_file`" = "$ARC_NAME" ]; then
$GZIP -d -c $arg_file | $ZFS receive -dF"$verb" "$arg_filesys"
elif [ "`archive_type $arg_file`" = "$ext" ]; then
$ZFS receive -dF"$verb" "$arg_filesys" < "$arg_file"
fi }

############## # Main block # ##############

# Checking OS
check_os

# Checking root
check_root

# Check non-interactive mode
check_non_interactive $1 $2

$ECHO "*** BEGIN: ZFS restore for $filesystem at `$DATE`."

# Check archiver
if [ ! -f "$GZIP" -a ! -x "$GZIP" ]; then
$ECHO "WARNING: Decompression can NOT be used. GZip not found." compress="0"
else $ECHO "Archive can be decompressed with gzip." compress="1"
fi

# First destroy all snapshots recursively
destroy_fs "$filesystem@$SNAP_EXT"

# Restore ZFS pool/dataset
zfs_receive $archive_file $filesystem

# Finally destroy all snapshots recursively
destroy_fs "$filesystem@$SNAP_EXT"
$ECHO "*** DONE: ZFS restore for $filesystem at `$DATE`."


Вот он - тот самый пресловутый эквивалент zfsdump/zfsrestore, который, как священный Грааль, искали UFS-ники.

Решение по резервированию и восстановлению на основе вышеприведенной логики руководство по использованию (на двух языках) можно скачать здесь (реализовано в виде стандартного пакета Solaris, устанавливаемого в /usr/local/bin).

PS. Приведенные в статьях скрипты zfs_backup.sh/zfs_restore.sh работают с точками монтирования как локальными, так и удаленными (через NFS). Пакет поддерживает работу как с удаленного резервного сервера, так и на локальной машине, работает через SSH, позволяет выполнять удаленное сохранение/восстановление в файловый архив и файловую систему ZFS.