Merge remote-tracking branch 'marmarek/core3-backup' into core3-devel

This commit is contained in:
Wojtek Porczyk 2016-10-14 15:29:37 +02:00
commit 526f2c3751
15 changed files with 1658 additions and 450 deletions

View File

@ -15,6 +15,15 @@ Options
Show this help message and exit
.. option:: --verbose, -v
Increase verbosity
.. option:: --quiet, -q
Decrease verbosity
.. option:: --verify-only
Do not restore the data, only verify backup integrity
@ -31,6 +40,10 @@ Options
Do not restore VMs that are already present on the host
.. option:: --rename-conflicting
Restore VMs that are already present on the host under different names
.. option:: --force-root
Force to run, even with root privileges
@ -56,17 +69,11 @@ Options
Restore from a backup located in a specific AppVM
.. option:: --encrypted, -e
.. option:: --passphrase-file, -p
The backup is encrypted
Read passphrase from file, or use '-' to read from stdin
.. option:: --compressed. -z
The backup is compressed
.. option:: --debug
Enable (a lot of) debug output
Authors
=======

View File

@ -1,26 +1,86 @@
.. program:: qvm-backup
=======================================================
:program:`qvm-backup` -- Create backup of specified VMs
=======================================================
:program:`qvm-backup` -- None
=============================
Synopsis
========
:command:`qvm-backup` [*options*] <*backup-dir-path*>
--------
:command:`qvm-backup` skel-manpage.py [-h] [--verbose] [--quiet] [--force-root] [--exclude EXCLUDE_LIST] [--dest-vm *APPVM*] [--encrypt] [--no-encrypt] [--passphrase-file PASS_FILE] [--enc-algo CRYPTO_ALGORITHM] [--hmac-algo HMAC_ALGORITHM] [--compress] [--compress-filter COMPRESS_FILTER] [--tmpdir *TMPDIR*] backup_location [vms [vms ...]]
Options
=======
-------
.. option:: --help, -h
Show this help message and exit
show this help message and exit
.. option:: --exclude=EXCLUDE_LIST, -x EXCLUDE_LIST
.. option:: --verbose, -v
Exclude the specified VM from backup (might be repeated)
increase verbosity
.. option:: --quiet, -q
decrease verbosity
.. option:: --force-root
force to run as root
.. option:: --exclude, -x
Exclude the specified VM from the backup (may be repeated)
.. option:: --dest-vm, -d
Specify the destination VM to which the backup will be sent (implies -e)
.. option:: --encrypt, -e
Encrypt the backup
.. option:: --no-encrypt
Skip encryption even if sending the backup to a VM
.. option:: --passphrase-file, -p
Read passphrase from a file, or use '-' to read from stdin
.. option:: --enc-algo, -E
Specify a non-default encryption algorithm. For a list of supported algorithms, execute 'openssl list-cipher-algorithms' (implies -e)
.. option:: --hmac-algo, -H
Specify a non-default HMAC algorithm. For a list of supported algorithms, execute 'openssl list-message-digest-algorithms'
.. option:: --compress, -z
Compress the backup
.. option:: --compress-filter, -Z
Specify a non-default compression filter program (default: gzip)
.. option:: --tmpdir
Specify a temporary directory (if you have at least 1GB free RAM in dom0, use of /tmp is advised) (default: /var/tmp)
Arguments
---------
The first positional parameter is the backup location (directory path, or
command to pipe backup to). After that you may specify the qubes you'd like to
backup. If not specified, all qubes with `include_in_backups` property set are
included.
Authors
=======
-------
| Joanna Rutkowska <joanna at invisiblethingslab dot com>
| Rafal Wojtczuk <rafal at invisiblethingslab dot com>
| Marek Marczykowski <marmarek at invisiblethingslab dot com>
| Wojtek Porczyk <woju at invisiblethingslab dot com>
.. vim: ts=3 sw=3 et tw=80

View File

@ -26,6 +26,7 @@ import itertools
import logging
from qubes.utils import size_to_human
import sys
import stat
import os
import fcntl
import subprocess
@ -214,7 +215,7 @@ class SendWorker(Process):
class Backup(object):
class FileToBackup(object):
def __init__(self, file_path, subdir=None):
def __init__(self, file_path, subdir=None, name=None):
sz = qubes.storage.file.get_disk_usage(file_path)
if subdir is None:
@ -229,9 +230,16 @@ class Backup(object):
if len(subdir) > 0 and not subdir.endswith('/'):
subdir += '/'
#: real path to the file
self.path = file_path
#: size of the file
self.size = sz
#: directory in backup archive where file should be placed
self.subdir = subdir
#: use this name in the archive (aka rename)
self.name = os.path.basename(file_path)
if name is not None:
self.name = name
class VMToBackup(object):
def __init__(self, vm, files, subdir):
@ -340,11 +348,10 @@ class Backup(object):
subdir = None
vm_files = []
# TODO this is file pool specific. Change it to a more general
# solution
if vm.volumes['private'] is not None:
path_to_private_img = vm.volumes['private'].path
vm_files.append(self.FileToBackup(path_to_private_img, subdir))
path_to_private_img = vm.storage.export('private')
vm_files.append(self.FileToBackup(path_to_private_img, subdir,
'private.img'))
vm_files.append(self.FileToBackup(vm.icon_path, subdir))
vm_files.extend(self.FileToBackup(i, subdir)
@ -356,10 +363,9 @@ class Backup(object):
vm_files.append(self.FileToBackup(firewall_conf, subdir))
if vm.updateable:
# TODO this is file pool specific. Change it to a more general
# solution
path_to_root_img = vm.volumes['root'].path
vm_files.append(self.FileToBackup(path_to_root_img, subdir))
path_to_root_img = vm.storage.export('root')
vm_files.append(self.FileToBackup(path_to_root_img, subdir,
'root.img'))
files_to_backup[vm.qid] = self.VMToBackup(vm, vm_files, subdir)
# Dom0 user home
@ -592,7 +598,7 @@ class Backup(object):
backup_tempfile = os.path.join(
self.tmpdir, file_info.subdir,
os.path.basename(file_info.path))
file_info.name)
self.log.debug("Using temporary location: {}".format(
backup_tempfile))
@ -609,13 +615,27 @@ class Backup(object):
'-C', os.path.dirname(file_info.path)] +
(['--dereference'] if
file_info.subdir != "dom0-home/" else []) +
['--xform', 's:^%s:%s\\0:' % (
['--xform=s:^%s:%s\\0:' % (
os.path.basename(file_info.path),
file_info.subdir),
os.path.basename(file_info.path)
])
file_stat = os.stat(file_info.path)
if stat.S_ISBLK(file_stat.st_mode) or \
file_info.name != os.path.basename(file_info.path):
# tar doesn't handle content of block device, use our
# writer
# also use our tar writer when renaming file
assert not stat.S_ISDIR(file_stat.st_mode),\
"Renaming directories not supported"
tar_cmdline = ['python', '-m', 'qubes.tarwriter',
'--override-name=%s' % (
os.path.join(file_info.subdir, os.path.basename(
file_info.name))),
file_info.path,
backup_pipe]
if self.compressed:
tar_cmdline.insert(-1,
tar_cmdline.insert(-2,
"--use-compress-program=%s" % self.compression_filter)
self.log.debug(" ".join(tar_cmdline))
@ -859,25 +879,48 @@ class ExtractWorker2(Process):
def __init__(self, queue, base_dir, passphrase, encrypted,
progress_callback, vmproc=None,
compressed=False, crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM,
verify_only=False):
verify_only=False, relocate=None):
super(ExtractWorker2, self).__init__()
#: queue with files to extract
self.queue = queue
#: paths on the queue are relative to this dir
self.base_dir = base_dir
#: passphrase to decrypt/authenticate data
self.passphrase = passphrase
#: extract those files/directories to alternative locations (truncate,
# but not unlink target beforehand); if specific file is in the map,
# redirect it accordingly, otherwise check if the whole directory is
# there
self.relocate = relocate
#: is the backup encrypted?
self.encrypted = encrypted
#: is the backup compressed?
self.compressed = compressed
#: what crypto algorithm is used for encryption?
self.crypto_algorithm = crypto_algorithm
#: only verify integrity, don't extract anything
self.verify_only = verify_only
#: progress
self.blocks_backedup = 0
#: inner tar layer extraction (subprocess.Popen instance)
self.tar2_process = None
#: current inner tar archive name
self.tar2_current_file = None
#: set size of this file when tar report it on stderr (adjust LVM
# volume size)
self.adjust_output_size = None
#: decompressor subprocess.Popen instance
self.decompressor_process = None
#: decryptor subprocess.Popen instance
self.decryptor_process = None
#: callback reporting progress to UI
self.progress_callback = progress_callback
#: process (subprocess.Popen instance) feeding the data into
# extraction tool
self.vmproc = vmproc
#: pipe to feed the data into tar (use pipe instead of stdin,
# as stdin is used for tar control commands)
self.restore_pipe = os.path.join(self.base_dir, "restore_pipe")
self.log = logging.getLogger('qubes.backup.extract')
@ -908,9 +951,33 @@ class ExtractWorker2(Process):
debug_msg = filter(msg_re.match, new_lines)
self.log.debug('tar2_stderr: {}'.format('\n'.join(debug_msg)))
new_lines = filter(lambda x: not msg_re.match(x), new_lines)
if self.adjust_output_size:
# search for first file size reported by tar, after setting
# self.adjust_output_size (so don't look at self.tar2_stderr)
# this is used only when extracting single-file archive, so don't
# bother with checking file name
file_size_re = re.compile(r"^[^ ]+ [^ ]+/[^ ]+ *([0-9]+) .*")
for line in new_lines:
match = file_size_re.match(line)
if match:
file_size = match.groups()[0]
self.resize_lvm(self.adjust_output_size, file_size)
self.adjust_output_size = None
self.tar2_stderr += new_lines
def resize_lvm(self, dev, size):
# FIXME: HACK
try:
subprocess.check_call(
['sudo', 'lvresize', '-f', '-L', str(size) + 'B', dev],
stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if e.returncode == 3:
# already at the right size
pass
else:
raise
def run(self):
try:
self.__run__()
@ -929,6 +996,63 @@ class ExtractWorker2(Process):
self.log.error("ERROR: " + unicode(e))
raise e, None, exc_traceback
def handle_dir_relocations(self, dirname):
''' Relocate files in given director when it's already extracted
:param dirname: directory path to handle (relative to backup root),
without trailing slash
'''
for old, new in self.relocate:
if not old.startswith(dirname + '/'):
continue
# if directory is relocated too (most likely is), the file
# is extracted there
if dirname in self.relocate:
old = old.replace(dirname, self.relocate[dirname], 1)
try:
stat_buf = os.stat(new)
if stat.S_ISBLK(stat_buf.st_mode):
# output file is block device (LVM) - adjust its
# size, otherwise it may fail
# from lack of space
self.resize_lvm(new, stat_buf.st_size)
except OSError: # ENOENT
pass
subprocess.check_call(
['dd', 'if='+old, 'of='+new, 'conv=sparse'])
os.unlink(old)
def cleanup_tar2(self, wait=True, terminate=False):
if self.tar2_process is None:
return
if terminate:
self.tar2_process.terminate()
if wait:
self.tar2_process.wait()
elif self.tar2_process.poll() is None:
return
if self.tar2_process.returncode != 0:
self.collect_tar_output()
self.log.error(
"ERROR: unable to extract files for {0}, tar "
"output:\n {1}".
format(self.tar2_current_file,
"\n ".join(self.tar2_stderr)))
else:
# Finished extracting the tar file
self.collect_tar_output()
self.tar2_process = None
# if that was whole-directory archive, handle
# relocated files now
inner_name = os.path.splitext(self.tar2_current_file)[0]\
.replace(self.base_dir + '/', '')
if os.path.basename(inner_name) == '.':
self.handle_dir_relocations(
os.path.dirname(inner_name))
self.tar2_current_file = None
self.adjust_output_size = None
def __run__(self):
self.log.debug("Started sending thread")
self.log.debug("Moving to dir " + self.base_dir)
@ -944,27 +1068,48 @@ class ExtractWorker2(Process):
if filename.endswith('.000'):
# next file
if self.tar2_process is not None:
if self.tar2_process.wait() != 0:
self.collect_tar_output()
self.log.error(
"ERROR: unable to extract files for {0}, tar "
"output:\n {1}".
format(self.tar2_current_file,
"\n ".join(self.tar2_stderr)))
else:
# Finished extracting the tar file
self.tar2_process = None
self.tar2_current_file = None
self.cleanup_tar2(wait=True, terminate=False)
inner_name = filename.rstrip('.000').replace(
self.base_dir + '/', '')
redirect_stdout = None
if self.relocate and inner_name in self.relocate:
# TODO: add `dd conv=sparse` when removing tar layer
tar2_cmdline = ['tar',
'-%sMvvOf' % ("t" if self.verify_only else "x"),
self.restore_pipe,
inner_name]
output_file = self.relocate[inner_name]
try:
stat_buf = os.stat(output_file)
if stat.S_ISBLK(stat_buf.st_mode):
# output file is block device (LVM) - adjust its
# size during extraction, otherwise it may fail
# from lack of space
self.adjust_output_size = output_file
except OSError: # ENOENT
pass
redirect_stdout = open(output_file, 'w')
elif self.relocate and \
os.path.dirname(inner_name) in self.relocate:
tar2_cmdline = ['tar',
'-%sMf' % ("t" if self.verify_only else "x"),
self.restore_pipe,
'-C', self.relocate[os.path.dirname(inner_name)],
# strip all directories - leave only final filename
'--strip-components', str(inner_name.count(os.sep)),
inner_name]
else:
tar2_cmdline = ['tar',
'-%sMkf' % ("t" if self.verify_only else "x"),
self.restore_pipe,
inner_name]
tar2_cmdline = ['tar',
'-%sMkvf' % ("t" if self.verify_only else "x"),
self.restore_pipe,
os.path.relpath(filename.rstrip('.000'))]
self.log.debug("Running command " + unicode(tar2_cmdline))
self.tar2_process = subprocess.Popen(tar2_cmdline,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
stdin=subprocess.PIPE, stderr=subprocess.PIPE,
stdout=redirect_stdout)
fcntl.fcntl(self.tar2_process.stderr.fileno(), fcntl.F_SETFL,
fcntl.fcntl(self.tar2_process.stderr.fileno(),
fcntl.F_GETFL) | os.O_NONBLOCK)
@ -1038,11 +1183,9 @@ class ExtractWorker2(Process):
details = "\n".join(self.tar2_stderr)
else:
details = "%s failed" % run_error
self.tar2_process.terminate()
self.tar2_process.wait()
self.tar2_process = None
self.log.error("Error while processing '{}': {}".format(
self.tar2_current_file, details))
self.cleanup_tar2(wait=True, terminate=True)
# Delete the file as we don't need it anymore
self.log.debug("Removing file " + filename)
@ -1050,23 +1193,7 @@ class ExtractWorker2(Process):
os.unlink(self.restore_pipe)
if self.tar2_process is not None:
if filename == QUEUE_ERROR:
self.tar2_process.terminate()
self.tar2_process.wait()
elif self.tar2_process.wait() != 0:
self.collect_tar_output()
raise qubes.exc.QubesException(
"unable to extract files for {0}.{1} Tar command "
"output: %s".
format(self.tar2_current_file,
(" Perhaps the backup is encrypted?"
if not self.encrypted else "",
"\n".join(self.tar2_stderr))))
else:
# Finished extracting the tar file
self.tar2_process = None
self.cleanup_tar2(wait=True, terminate=(filename == QUEUE_ERROR))
self.log.debug("Finished extracting thread")
@ -1074,12 +1201,12 @@ class ExtractWorker3(ExtractWorker2):
def __init__(self, queue, base_dir, passphrase, encrypted,
progress_callback, vmproc=None,
compressed=False, crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM,
compression_filter=None, verify_only=False):
compression_filter=None, verify_only=False, relocate=None):
super(ExtractWorker3, self).__init__(queue, base_dir, passphrase,
encrypted,
progress_callback, vmproc,
compressed, crypto_algorithm,
verify_only)
verify_only, relocate)
self.compression_filter = compression_filter
os.unlink(self.restore_pipe)
@ -1101,21 +1228,40 @@ class ExtractWorker3(ExtractWorker2):
# next file
if self.tar2_process is not None:
input_pipe.close()
if self.tar2_process.wait() != 0:
self.collect_tar_output()
self.log.error(
"ERROR: unable to extract files for {0}, tar "
"output:\n {1}".
format(self.tar2_current_file,
"\n ".join(self.tar2_stderr)))
else:
# Finished extracting the tar file
self.tar2_process = None
self.tar2_current_file = None
self.cleanup_tar2(wait=True, terminate=False)
inner_name = filename.rstrip('.000').replace(
self.base_dir + '/', '')
redirect_stdout = None
if self.relocate and inner_name in self.relocate:
# TODO: add dd conv=sparse when removing tar layer
tar2_cmdline = ['tar',
'-%svvO' % ("t" if self.verify_only else "x"),
inner_name]
output_file = self.relocate[inner_name]
try:
stat_buf = os.stat(output_file)
if stat.S_ISBLK(stat_buf.st_mode):
# output file is block device (LVM) - adjust its
# size during extraction, otherwise it may fail
# from lack of space
self.adjust_output_size = output_file
except OSError: # ENOENT
pass
redirect_stdout = open(output_file, 'w')
elif self.relocate and \
os.path.dirname(inner_name) in self.relocate:
tar2_cmdline = ['tar',
'-%s' % ("t" if self.verify_only else "x"),
'-C', self.relocate[os.path.dirname(inner_name)],
# strip all directories - leave only final filename
'--strip-components', str(inner_name.count(os.sep)),
inner_name]
else:
tar2_cmdline = ['tar',
'-%sk' % ("t" if self.verify_only else "x"),
inner_name]
tar2_cmdline = ['tar',
'-%sk' % ("t" if self.verify_only else "x"),
os.path.relpath(filename.rstrip('.000'))]
if self.compressed:
if self.compression_filter:
tar2_cmdline.insert(-1,
@ -1140,12 +1286,14 @@ class ExtractWorker3(ExtractWorker2):
self.tar2_process = subprocess.Popen(
tar2_cmdline,
stdin=self.decryptor_process.stdout,
stdout=redirect_stdout,
stderr=subprocess.PIPE)
input_pipe = self.decryptor_process.stdin
else:
self.tar2_process = subprocess.Popen(
tar2_cmdline,
stdin=subprocess.PIPE,
stdout=redirect_stdout,
stderr=subprocess.PIPE)
input_pipe = self.tar2_process.stdin
@ -1184,11 +1332,9 @@ class ExtractWorker3(ExtractWorker2):
self.decryptor_process.terminate()
self.decryptor_process.wait()
self.decryptor_process = None
self.tar2_process.terminate()
self.tar2_process.wait()
self.tar2_process = None
self.log.error("Error while processing '{}': {}".format(
self.tar2_current_file, details))
self.cleanup_tar2(wait=True, terminate=True)
# Delete the file as we don't need it anymore
self.log.debug("Removing file " + filename)
@ -1201,20 +1347,7 @@ class ExtractWorker3(ExtractWorker2):
self.decryptor_process.terminate()
self.decryptor_process.wait()
self.decryptor_process = None
self.tar2_process.terminate()
self.tar2_process.wait()
elif self.tar2_process.wait() != 0:
self.collect_tar_output()
raise qubes.exc.QubesException(
"unable to extract files for {0}.{1} Tar command "
"output: %s".
format(self.tar2_current_file,
(" Perhaps the backup is encrypted?"
if not self.encrypted else "",
"\n".join(self.tar2_stderr))))
else:
# Finished extracting the tar file
self.tar2_process = None
self.cleanup_tar2(terminate=(filename == QUEUE_ERROR))
self.log.debug("Finished extracting thread")
@ -1260,6 +1393,8 @@ class BackupRestoreOptions(object):
self.rename_conflicting = True
#: list of VM names to exclude
self.exclude = []
#: restore VMs into selected storage pool
self.override_pool = None
class BackupRestore(object):
@ -1304,6 +1439,7 @@ class BackupRestore(object):
self.netvm = None
self.name = vm.name
self.orig_template = None
self.restored_vm = None
@property
def good_to_go(self):
@ -1576,7 +1712,7 @@ class BackupRestore(object):
)
return header_data
def _start_inner_extraction_worker(self, queue):
def _start_inner_extraction_worker(self, queue, relocate):
"""Start a worker process, extracting inner layer of bacup archive,
extract them to :py:attr:`tmpdir`.
End the data by pushing QUEUE_FINISHED or QUEUE_ERROR to the queue.
@ -1596,7 +1732,10 @@ class BackupRestore(object):
'crypto_algorithm': self.header_data.crypto_algorithm,
'verify_only': self.options.verify_only,
'progress_callback': self.progress_callback,
'relocate': relocate,
}
self.log.debug('Starting extraction worker in {}, file relocation '
'map: {!r}'.format(self.tmpdir, relocate))
format_version = self.header_data.version
if format_version == 2:
extract_proc = ExtractWorker2(**extractor_params)
@ -1626,7 +1765,7 @@ class BackupRestore(object):
queue.put("qubes.xml.000")
queue.put(QUEUE_FINISHED)
extract_proc = self._start_inner_extraction_worker(queue)
extract_proc = self._start_inner_extraction_worker(queue, None)
extract_proc.join()
if extract_proc.exitcode != 0:
raise qubes.exc.QubesException(
@ -1643,7 +1782,7 @@ class BackupRestore(object):
os.unlink(os.path.join(self.tmpdir, 'qubes.xml'))
return backup_app
def _restore_vm_dirs(self, vms_dirs, vms_size):
def _restore_vm_dirs(self, vms_dirs, vms_size, relocate):
# Currently each VM consists of at most 7 archives (count
# file_to_backup calls in backup_prepare()), but add some safety
# margin for further extensions. Each archive is divided into 100MB
@ -1658,12 +1797,14 @@ class BackupRestore(object):
# retrieve backup from the backup stream (either VM, or dom0 file)
(retrieve_proc, filelist_pipe, error_pipe) = \
self._start_retrieval_process(vms_dirs, limit_count, vms_size)
self._start_retrieval_process(
vms_dirs, limit_count, vms_size)
to_extract = Queue()
# extract data retrieved by retrieve_proc
extract_proc = self._start_inner_extraction_worker(to_extract)
extract_proc = self._start_inner_extraction_worker(
to_extract, relocate)
try:
filename = None
@ -1721,7 +1862,7 @@ class BackupRestore(object):
if retrieve_proc.wait() != 0:
raise qubes.exc.QubesException(
"unable to read the qubes backup file {0} ({1}): {2}"
"unable to read the qubes backup file {0}: {1}"
.format(self.backup_location, error_pipe.read(
MAX_STDERR_BYTES)))
# wait for other processes (if any)
@ -1959,11 +2100,10 @@ class BackupRestore(object):
"updbl": {"func": "'Yes' if vm.updateable else ''"},
"template": {"func": "'n/a' if not hasattr(vm, 'template') is None "
"template": {"func": "'n/a' if not hasattr(vm, 'template') "
"else vm_info.template"},
"netvm": {"func": "'n/a' if vm.provides_network else\
('*' if vm.property_is_default('netvm') else '') +\
"netvm": {"func": "('*' if vm.property_is_default('netvm') else '') +\
vm_info.netvm if vm_info.netvm is not None "
"else '-'"},
@ -2074,25 +2214,57 @@ class BackupRestore(object):
"*** Error while copying file {0} to {1}".format(backup_src_dir,
dst_dir))
@staticmethod
def _templates_first(vms):
def key_function(instance):
if isinstance(instance, qubes.vm.BaseVM):
return isinstance(instance, qubes.vm.templatevm.TemplateVM)
elif hasattr(instance, 'vm'):
return key_function(instance.vm)
else:
return 0
return sorted(vms,
key=key_function,
reverse=True)
def restore_do(self, restore_info):
'''
High level workflow:
1. Create VMs object in host collection (qubes.xml)
2. Create them on disk (vm.create_on_disk)
3. Restore VM data, overriding/converting VM files
4. Apply possible fixups and save qubes.xml
:param restore_info:
:return:
'''
# FIXME handle locking
self._restore_vms_metadata(restore_info)
# Perform VM restoration in backup order
vms_dirs = []
relocate = {}
vms_size = 0
vms = {}
for vm_info in restore_info.values():
assert isinstance(vm_info, self.VMToRestore)
if not vm_info.vm:
continue
if not vm_info.good_to_go:
continue
vm = vm_info.vm
if self.header_data.version >= 2:
if vm.features['backup-size']:
vms_size += int(vm.features['backup-size'])
vms_dirs.append(vm.features['backup-path'])
vms[vm.name] = vm
for vm_info in self._templates_first(restore_info.values()):
vm = vm_info.restored_vm
if vm:
vms_size += int(vm_info.size)
vms_dirs.append(vm_info.subdir)
relocate[vm_info.subdir.rstrip('/')] = vm.dir_path
for name, volume in vm.volumes.items():
if not volume.save_on_stop:
continue
export_path = vm.storage.export(name)
backup_path = os.path.join(
vm_info.vm.dir_path, name + '.img')
if backup_path != export_path:
relocate[
os.path.join(vm_info.subdir, name + '.img')] = \
export_path
if self.header_data.version >= 2:
if 'dom0' in restore_info.keys() and \
@ -2101,7 +2273,8 @@ class BackupRestore(object):
vms_size += restore_info['dom0'].size
try:
self._restore_vm_dirs(vms_dirs=vms_dirs, vms_size=vms_size)
self._restore_vm_dirs(vms_dirs=vms_dirs, vms_size=vms_size,
relocate=relocate)
except qubes.exc.QubesException:
if self.options.verify_only:
raise
@ -2111,6 +2284,22 @@ class BackupRestore(object):
"continuing anyway to restore at least some "
"VMs")
else:
for vm_info in self._templates_first(restore_info.values()):
vm = vm_info.restored_vm
if vm:
try:
self._restore_vm_dir_v1(vm_info.vm.dir_path,
os.path.dirname(vm.dir_path))
except qubes.exc.QubesException as e:
if self.options.verify_only:
raise
else:
self.log.error(
"Failed to restore VM '{}': {}".format(
vm.name, str(e)))
vm.remove_from_disk()
del self.app.domains[vm]
if self.options.verify_only:
self.log.warning(
"Backup verification not supported for this backup format.")
@ -2119,117 +2308,24 @@ class BackupRestore(object):
shutil.rmtree(self.tmpdir)
return
# First load templates, then other VMs
for vm in sorted(vms.values(),
key=lambda x: isinstance(x, qubes.vm.templatevm.TemplateVM),
reverse=True):
if self.canceled:
# only break the loop to save qubes.xml
# with already restored VMs
break
self.log.info("-> Restoring {0}...".format(vm.name))
retcode = subprocess.call(
["mkdir", "-p", os.path.dirname(vm.dir_path)])
if retcode != 0:
self.log.error("*** Cannot create directory: {0}?!".format(
vm.dir_path))
self.log.warning("Skipping VM {}...".format(vm.name))
for vm_info in self._templates_first(restore_info.values()):
if not vm_info.restored_vm:
continue
kwargs = {}
if hasattr(vm, 'template'):
template = restore_info[vm.name].template
# handle potentially renamed template
if template in restore_info \
and restore_info[template].good_to_go:
template = restore_info[template].name
kwargs['template'] = template
new_vm = None
vm_name = restore_info[vm.name].name
try:
# first only minimal set, later clone_properties
# will be called
new_vm = self.app.add_new_vm(
vm.__class__,
name=vm_name,
label=vm.label,
installed_by_rpm=False,
**kwargs)
if os.path.exists(new_vm.dir_path):
move_to_path = tempfile.mkdtemp('', os.path.basename(
new_vm.dir_path), os.path.dirname(new_vm.dir_path))
try:
os.rename(new_vm.dir_path, move_to_path)
self.log.warning(
"*** Directory {} already exists! It has "
"been moved to {}".format(new_vm.dir_path,
move_to_path))
except OSError:
self.log.error(
"*** Directory {} already exists and "
"cannot be moved!".format(new_vm.dir_path))
self.log.warning("Skipping VM {}...".format(
vm.name))
continue
if self.header_data.version == 1:
self._restore_vm_dir_v1(vm.dir_path,
os.path.dirname(new_vm.dir_path))
else:
shutil.move(os.path.join(self.tmpdir,
vm.features['backup-path']),
new_vm.dir_path)
new_vm.storage.verify()
except Exception as err:
self.log.error("ERROR: {0}".format(err))
self.log.warning("*** Skipping VM: {0}".format(vm.name))
if new_vm:
del self.app.domains[new_vm.qid]
continue
# remove no longer needed backup metadata
if 'backup-content' in vm.features:
del vm.features['backup-content']
del vm.features['backup-size']
del vm.features['backup-path']
try:
# exclude VM references - handled manually according to
# restore options
proplist = [prop for prop in new_vm.property_list()
if prop.clone and prop.__name__ not in
['template', 'netvm', 'dispvm_netvm']]
new_vm.clone_properties(vm, proplist=proplist)
except Exception as err:
self.log.error("ERROR: {0}".format(err))
self.log.warning("*** Some VM property will not be "
"restored")
try:
new_vm.fire_event('domain-restore')
vm_info.restored_vm.fire_event('domain-restore')
except Exception as err:
self.log.error("ERROR during appmenu restore: "
"{0}".format(err))
"{0}".format(err))
self.log.warning(
"*** VM '{0}' will not have appmenus".format(vm.name))
"*** VM '{0}' will not have appmenus".format(vm_info.name))
# Set network dependencies - only non-default netvm setting
for vm in vms.values():
vm_info = restore_info[vm.name]
vm_name = vm_info.name
try:
host_vm = self.app.domains[vm_name]
except KeyError:
# Failed/skipped VM
continue
if not vm.property_is_default('netvm'):
if vm_info.netvm in restore_info:
host_vm.netvm = restore_info[vm_info.netvm].name
else:
host_vm.netvm = vm_info.netvm
vm_info.restored_vm.storage.verify()
except Exception as err:
self.log.error("ERROR: {0}".format(err))
if vm_info.restored_vm:
vm_info.restored_vm.remove_from_disk()
del self.app.domains[vm_info.restored_vm]
self.app.save()
@ -2279,4 +2375,114 @@ class BackupRestore(object):
self.log.info("-> Done. Please install updates for all the restored "
"templates.")
def _restore_vms_metadata(self, restore_info):
vms = {}
for vm_info in restore_info.values():
assert isinstance(vm_info, self.VMToRestore)
if not vm_info.vm:
continue
if not vm_info.good_to_go:
continue
vm = vm_info.vm
vms[vm.name] = vm
# First load templates, then other VMs
for vm in self._templates_first(vms.values()):
if self.canceled:
# only break the loop to save qubes.xml
# with already restored VMs
break
self.log.info("-> Restoring {0}...".format(vm.name))
kwargs = {}
if hasattr(vm, 'template'):
template = restore_info[vm.name].template
# handle potentially renamed template
if template in restore_info \
and restore_info[template].good_to_go:
template = restore_info[template].name
kwargs['template'] = template
new_vm = None
vm_name = restore_info[vm.name].name
try:
# first only minimal set, later clone_properties
# will be called
cls = self.app.get_vm_class(vm.__class__.__name__)
new_vm = self.app.add_new_vm(
cls,
name=vm_name,
label=vm.label,
installed_by_rpm=False,
**kwargs)
if os.path.exists(new_vm.dir_path):
move_to_path = tempfile.mkdtemp('', os.path.basename(
new_vm.dir_path), os.path.dirname(new_vm.dir_path))
try:
os.rename(new_vm.dir_path, move_to_path)
self.log.warning(
"*** Directory {} already exists! It has "
"been moved to {}".format(new_vm.dir_path,
move_to_path))
except OSError:
self.log.error(
"*** Directory {} already exists and "
"cannot be moved!".format(new_vm.dir_path))
self.log.warning("Skipping VM {}...".format(
vm.name))
continue
except Exception as err:
self.log.error("ERROR: {0}".format(err))
self.log.warning("*** Skipping VM: {0}".format(vm.name))
if new_vm:
del self.app.domains[new_vm.qid]
continue
# remove no longer needed backup metadata
if 'backup-content' in vm.features:
del vm.features['backup-content']
del vm.features['backup-size']
del vm.features['backup-path']
try:
# exclude VM references - handled manually according to
# restore options
proplist = [prop for prop in new_vm.property_list()
if prop.clone and prop.__name__ not in
['template', 'netvm', 'dispvm_netvm']]
new_vm.clone_properties(vm, proplist=proplist)
except Exception as err:
self.log.error("ERROR: {0}".format(err))
self.log.warning("*** Some VM property will not be "
"restored")
if not self.options.verify_only:
try:
# have it here, to (maybe) patch storage config before
# creating child VMs (template first)
# TODO: adjust volumes config - especially size
new_vm.create_on_disk(pool=self.options.override_pool)
except qubes.exc.QubesException as e:
self.log.warning("Failed to create VM {}: {}".format(
vm.name, str(e)))
del self.app.domains[new_vm]
continue
restore_info[vm.name].restored_vm = new_vm
# Set network dependencies - only non-default netvm setting
for vm in vms.values():
vm_info = restore_info[vm.name]
vm_name = vm_info.name
try:
host_vm = self.app.domains[vm_name]
except KeyError:
# Failed/skipped VM
continue
if not vm.property_is_default('netvm'):
if vm_info.netvm in restore_info:
host_vm.netvm = restore_info[vm_info.netvm].name
else:
host_vm.netvm = vm_info.netvm
# vim:sw=4:et:

View File

@ -181,6 +181,10 @@ class Core2Qubes(qubes.Qubes):
"true":
kwargs[attr] = value
kwargs['hvm'] = "HVm" in vm_class_name
kwargs['provides_network'] = \
vm_class_name in ('QubesNetVm', 'QubesProxyVm')
if vm_class_name == 'QubesNetVm':
kwargs['netvm'] = None
vm = self.add_new_vm(vm_class,
qid=int(element.get('qid')), **kwargs)
services = element.get('services')

View File

@ -463,6 +463,15 @@ class Storage(object):
for target in parsed_xml.xpath(
"//domain/devices/disk/target")])
def export(self, volume):
''' Helper function to export volume (pool.export(volume))'''
assert isinstance(volume, (Volume, basestring)), \
"You need to pass a Volume or pool name as str"
if isinstance(volume, Volume):
return self.pools[volume.name].export(volume)
else:
return self.pools[volume].export(self.vm.volumes[volume])
class Pool(object):
''' A Pool is used to manage different kind of volumes (File

View File

@ -100,7 +100,12 @@ class ThinPool(qubes.storage.Pool):
def export(self, volume):
''' Returns an object that can be `open()`. '''
return '/dev/' + volume.vid
devpath = '/dev/' + volume.vid
if not os.access(devpath, os.R_OK):
# FIXME: convert to udev rules, and drop after introducing qubesd
subprocess.check_call(['sudo', 'chgrp', 'qubes', devpath])
subprocess.check_call(['sudo', 'chmod', 'g+rw', devpath])
return devpath
def init_volume(self, vm, volume_config):
''' Initialize a :py:class:`qubes.storage.Volume` from `volume_config`.

206
qubes/tarwriter.py Normal file
View File

@ -0,0 +1,206 @@
#!/usr/bin/python2
# -*- encoding: utf8 -*-
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2016 Marek Marczykowski-Górecki
# <marmarek@invisiblethingslab.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import argparse
import functools
import subprocess
import tarfile
import io
BUF_SIZE = 409600
class TarSparseInfo(tarfile.TarInfo):
def __init__(self, name="", sparsemap=None):
super(TarSparseInfo, self).__init__(name)
if sparsemap is not None:
self.type = tarfile.GNUTYPE_SPARSE
self.sparsemap = list(sparsemap)
# compact size
self.size = functools.reduce(lambda x, y: x+y[1], sparsemap, 0)
else:
self.sparsemap = []
@property
def realsize(self):
if len(self.sparsemap):
return self.sparsemap[-1][0] + self.sparsemap[-1][1]
else:
return self.size
def sparse_header_chunk(self, index):
if index < len(self.sparsemap):
return ''.join([
tarfile.itn(self.sparsemap[index][0], 12, tarfile.GNU_FORMAT),
tarfile.itn(self.sparsemap[index][1], 12, tarfile.GNU_FORMAT),
])
else:
return '\0' * 12 * 2
def get_gnu_header(self):
'''Part placed in 'prefix' field of posix header'''
parts = [
tarfile.itn(self.mtime, 12, tarfile.GNU_FORMAT), # atime
tarfile.itn(self.mtime, 12, tarfile.GNU_FORMAT), # ctime
tarfile.itn(0, 12, tarfile.GNU_FORMAT), # offset
tarfile.stn('', 4), # longnames
'\0', # unused_pad2
]
parts += [self.sparse_header_chunk(i) for i in range(4)]
parts += [
'\1' if len(self.sparsemap) > 4 else '\0', # isextended
tarfile.itn(self.realsize, 12, tarfile.GNU_FORMAT), # realsize
]
return ''.join(parts)
def get_info(self, encoding, errors):
info = super(TarSparseInfo, self).get_info(encoding, errors)
# place GNU extension into
info['prefix'] = self.get_gnu_header()
return info
def tobuf(self, format=tarfile.DEFAULT_FORMAT, encoding=tarfile.ENCODING,
errors="strict"):
# pylint: disable=redefined-builtin
header_buf = super(TarSparseInfo, self).tobuf(format, encoding, errors)
if len(self.sparsemap) > 4:
return header_buf + ''.join(self.create_ext_sparse_headers())
else:
return header_buf
def create_ext_sparse_headers(self):
for ext_hdr in range(4, len(self.sparsemap), 21):
sparse_parts = [self.sparse_header_chunk(i) for i in
range(ext_hdr, ext_hdr+21)]
sparse_parts += '\1' if ext_hdr+21 < len(self.sparsemap) else '\0'
yield tarfile.stn(''.join(sparse_parts), 512)
def get_sparse_map(input_file):
'''
Return map of the file where actual data is present, ignoring zero-ed
blocks. Last entry of the map spans to the end of file, even if that part is
zero-size (when file ends with zeros).
This function is performance critical.
:param input_file: io.File object
:return: iterable of (offset, size)
'''
zero_block = bytearray(tarfile.BLOCKSIZE)
buf = bytearray(BUF_SIZE)
in_data_block = False
data_block_start = 0
buf_start_offset = 0
while True:
buf_len = input_file.readinto(buf)
if not buf_len:
break
for offset in range(0, buf_len, tarfile.BLOCKSIZE):
if buf[offset:offset+tarfile.BLOCKSIZE] == zero_block:
if in_data_block:
in_data_block = False
yield (data_block_start,
buf_start_offset+offset-data_block_start)
else:
if not in_data_block:
in_data_block = True
data_block_start = buf_start_offset+offset
buf_start_offset += buf_len
if in_data_block:
yield (data_block_start, buf_start_offset-data_block_start)
else:
# always emit last slice to the input end - otherwise extracted file
# will be truncated
yield (buf_start_offset, 0)
def copy_sparse_data(input_stream, output_stream, sparse_map):
'''Copy data blocks from input to output according to sparse_map
:param input_stream: io.IOBase input instance
:param output_stream: io.IOBase output instance
:param sparse_map: iterable of (offset, size)
'''
buf = bytearray(BUF_SIZE)
for chunk in sparse_map:
input_stream.seek(chunk[0])
left = chunk[1]
while left:
if left > BUF_SIZE:
read = input_stream.readinto(buf)
output_stream.write(buf[:read])
else:
buf_trailer = input_stream.read(left)
read = len(buf_trailer)
output_stream.write(buf_trailer)
left -= read
if not read:
raise Exception('premature EOF')
def finalize(output):
'''Write EOF blocks'''
output.write('\0' * 512)
output.write('\0' * 512)
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--override-name', action='store', dest='override_name',
help='use this name in tar header')
parser.add_argument('--use-compress-program', default=None,
metavar='COMMAND', action='store', dest='use_compress_program',
help='Filter data through COMMAND.')
parser.add_argument('input_file',
help='input file name')
parser.add_argument('output_file', default='-', nargs='?',
help='output file name')
args = parser.parse_args(args)
input_file = io.open(args.input_file, 'rb')
sparse_map = list(get_sparse_map(input_file))
header_name = args.input_file
if args.override_name:
header_name = args.override_name
tar_info = TarSparseInfo(header_name, sparse_map)
if args.output_file == '-':
output = io.open('/dev/stdout', 'wb')
else:
output = io.open(args.output_file, 'wb')
if args.use_compress_program:
compress = subprocess.Popen([args.use_compress_program],
stdin=subprocess.PIPE, stdout=output)
output = compress.stdin
else:
compress = None
output.write(tar_info.tobuf(tarfile.GNU_FORMAT))
copy_sparse_data(input_file, output, sparse_map)
finalize(output)
input_file.close()
output.close()
if compress is not None:
compress.wait()
return compress.returncode
return 0
if __name__ == '__main__':
main()

View File

@ -33,27 +33,26 @@
don't run the tests.
"""
import __builtin__
import collections
from distutils import spawn
import functools
import multiprocessing
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import time
import traceback
import unittest
import __builtin__
from distutils import spawn
import lxml.etree
import time
import qubes.backup
import qubes.config
import qubes.devices
import qubes.events
import qubes.backup
import qubes.exc
import qubes.vm.standalonevm
@ -580,6 +579,25 @@ class SystemTestsMixin(object):
else:
os.unlink(dirpath)
@staticmethod
def _remove_vm_disk_lvm(prefix=VMPREFIX):
''' Remove LVM volumes with given prefix
This is "a bit" drastic, as it removes volumes regardless of volume
group, thin pool etc. But we assume no important data on test system.
'''
try:
volumes = subprocess.check_output(
['sudo', 'lvs', '--noheadings', '-o', 'vg_name,name',
'--separator', '/'])
if ('/' + prefix) not in volumes:
return
subprocess.check_call(['sudo', 'lvremove', '-f'] +
[vol.strip() for vol in volumes.splitlines()
if ('/' + prefix) in vol],
stdout=open(os.devnull, 'w'))
except subprocess.CalledProcessError:
pass
@classmethod
def remove_vms(cls, vms):
@ -624,6 +642,7 @@ class SystemTestsMixin(object):
vmnames.add(name)
for vmname in vmnames:
cls._remove_vm_disk(vmname)
cls._remove_vm_disk_lvm(prefix)
def qrexec_policy(self, service, source, destination, allow=True):
"""
@ -771,192 +790,6 @@ class SystemTestsMixin(object):
shutil.rmtree(mountpoint)
subprocess.check_call(['sudo', 'losetup', '-d', loopdev])
# noinspection PyAttributeOutsideInit
class BackupTestsMixin(SystemTestsMixin):
class BackupErrorHandler(logging.Handler):
def __init__(self, errors_queue, level=logging.NOTSET):
super(BackupTestsMixin.BackupErrorHandler, self).__init__(level)
self.errors_queue = errors_queue
def emit(self, record):
self.errors_queue.put(record.getMessage())
def setUp(self):
super(BackupTestsMixin, self).setUp()
try:
self.init_default_template(self.template)
except AttributeError:
self.init_default_template()
self.error_detected = multiprocessing.Queue()
self.verbose = False
if self.verbose:
print >>sys.stderr, "-> Creating backupvm"
self.backupdir = os.path.join(os.environ["HOME"], "test-backup")
if os.path.exists(self.backupdir):
shutil.rmtree(self.backupdir)
os.mkdir(self.backupdir)
self.error_handler = self.BackupErrorHandler(self.error_detected,
level=logging.WARNING)
backup_log = logging.getLogger('qubes.backup')
backup_log.addHandler(self.error_handler)
def tearDown(self):
super(BackupTestsMixin, self).tearDown()
shutil.rmtree(self.backupdir)
backup_log = logging.getLogger('qubes.backup')
backup_log.removeHandler(self.error_handler)
def fill_image(self, path, size=None, sparse=False):
block_size = 4096
if self.verbose:
print >>sys.stderr, "-> Filling %s" % path
f = open(path, 'w+')
if size is None:
f.seek(0, 2)
size = f.tell()
f.seek(0)
for block_num in xrange(size/block_size):
f.write('a' * block_size)
if sparse:
f.seek(block_size, 1)
f.close()
# NOTE: this was create_basic_vms
def create_backup_vms(self):
template = self.app.default_template
vms = []
vmname = self.make_vm_name('test-net')
if self.verbose:
print >>sys.stderr, "-> Creating %s" % vmname
testnet = self.app.add_new_vm(qubes.vm.appvm.AppVM,
name=vmname, template=template, provides_network=True, label='red')
testnet.create_on_disk()
testnet.features['services/ntpd'] = True
vms.append(testnet)
self.fill_image(testnet.volumes['private'].path, 20*1024*1024)
vmname = self.make_vm_name('test1')
if self.verbose:
print >>sys.stderr, "-> Creating %s" % vmname
testvm1 = self.app.add_new_vm(qubes.vm.appvm.AppVM,
name=vmname, template=template, label='red')
testvm1.uses_default_netvm = False
testvm1.netvm = testnet
testvm1.create_on_disk()
vms.append(testvm1)
self.fill_image(testvm1.volumes['private'].path, 100*1024*1024)
vmname = self.make_vm_name('testhvm1')
if self.verbose:
print >>sys.stderr, "-> Creating %s" % vmname
testvm2 = self.app.add_new_vm(qubes.vm.standalonevm.StandaloneVM,
name=vmname,
hvm=True,
label='red')
testvm2.create_on_disk()
self.fill_image(testvm2.volumes['root'].path, 1024 * 1024 * 1024, True)
vms.append(testvm2)
vmname = self.make_vm_name('template')
if self.verbose:
print >>sys.stderr, "-> Creating %s" % vmname
testvm3 = self.app.add_new_vm(qubes.vm.templatevm.TemplateVM,
name=vmname, label='red')
testvm3.create_on_disk()
self.fill_image(testvm3.volumes['root'].path, 100 * 1024 * 1024, True)
vms.append(testvm3)
vmname = self.make_vm_name('custom')
if self.verbose:
print >>sys.stderr, "-> Creating %s" % vmname
testvm4 = self.app.add_new_vm(qubes.vm.appvm.AppVM,
name=vmname, template=testvm3, label='red')
testvm4.create_on_disk()
vms.append(testvm4)
self.app.save()
return vms
def make_backup(self, vms, target=None, expect_failure=False, **kwargs):
if target is None:
target = self.backupdir
try:
backup = qubes.backup.Backup(self.app, vms, **kwargs)
except qubes.exc.QubesException as e:
if not expect_failure:
self.fail("QubesException during backup_prepare: %s" % str(e))
else:
raise
backup.passphrase = 'qubes'
backup.target_dir = target
try:
backup.backup_do()
except qubes.exc.QubesException as e:
if not expect_failure:
self.fail("QubesException during backup_do: %s" % str(e))
else:
raise
# FIXME why?
#self.reload_db()
def restore_backup(self, source=None, appvm=None, options=None,
expect_errors=None):
if source is None:
backupfile = os.path.join(self.backupdir,
sorted(os.listdir(self.backupdir))[-1])
else:
backupfile = source
with self.assertNotRaises(qubes.exc.QubesException):
restore_op = qubes.backup.BackupRestore(
self.app, backupfile, appvm, "qubes")
if options:
for key, value in options.iteritems():
setattr(restore_op.options, key, value)
restore_info = restore_op.get_restore_info()
if self.verbose:
print restore_op.get_restore_summary(restore_info)
with self.assertNotRaises(qubes.exc.QubesException):
restore_op.restore_do(restore_info)
# maybe someone forgot to call .save()
self.reload_db()
errors = []
if expect_errors is None:
expect_errors = []
else:
self.assertFalse(self.error_detected.empty(),
"Restore errors expected, but none detected")
while not self.error_detected.empty():
current_error = self.error_detected.get()
if any(map(current_error.startswith, expect_errors)):
continue
errors.append(current_error)
self.assertTrue(len(errors) == 0,
"Error(s) detected during backup_restore_do: %s" %
'\n'.join(errors))
if not appvm and not os.path.isdir(backupfile):
os.unlink(backupfile)
def create_sparse(self, path, size):
f = open(path, "w")
f.truncate(size)
f.close()
def load_tests(loader, tests, pattern): # pylint: disable=unused-argument
# discard any tests from this module, because it hosts base classes
@ -976,6 +809,7 @@ def load_tests(loader, tests, pattern): # pylint: disable=unused-argument
'qubes.tests.vm.mix.net',
'qubes.tests.vm.adminvm',
'qubes.tests.app',
'qubes.tests.tarwriter',
'qubes.tests.tools.qvm_device',
'qubes.tests.tools.qvm_firewall',
'qubes.tests.tools.qvm_ls',

View File

@ -22,24 +22,229 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import hashlib
import logging
import multiprocessing
import os
import shutil
import sys
import qubes
import qubes.backup
import qubes.exc
import qubes.storage.lvm
import qubes.tests
import qubes.tests.storage_lvm
import qubes.vm
import qubes.vm.appvm
import qubes.vm.templatevm
import qubes.vm.qubesvm
class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
def test_000_basic_backup(self):
vms = self.create_backup_vms()
self.make_backup(vms)
self.remove_vms(reversed(vms))
self.restore_backup()
# noinspection PyAttributeOutsideInit
class BackupTestsMixin(qubes.tests.SystemTestsMixin):
class BackupErrorHandler(logging.Handler):
def __init__(self, errors_queue, level=logging.NOTSET):
super(BackupTestsMixin.BackupErrorHandler, self).__init__(level)
self.errors_queue = errors_queue
def emit(self, record):
self.errors_queue.put(record.getMessage())
def setUp(self):
super(BackupTestsMixin, self).setUp()
try:
self.init_default_template(self.template)
except AttributeError:
self.init_default_template()
self.error_detected = multiprocessing.Queue()
self.log.debug("Creating backupvm")
self.backupdir = os.path.join(os.environ["HOME"], "test-backup")
if os.path.exists(self.backupdir):
shutil.rmtree(self.backupdir)
os.mkdir(self.backupdir)
self.error_handler = self.BackupErrorHandler(self.error_detected,
level=logging.WARNING)
backup_log = logging.getLogger('qubes.backup')
backup_log.addHandler(self.error_handler)
def tearDown(self):
super(BackupTestsMixin, self).tearDown()
shutil.rmtree(self.backupdir)
backup_log = logging.getLogger('qubes.backup')
backup_log.removeHandler(self.error_handler)
def fill_image(self, path, size=None, sparse=False):
block_size = 4096
self.log.debug("Filling %s" % path)
f = open(path, 'w+')
if size is None:
f.seek(0, 2)
size = f.tell()
f.seek(0)
for block_num in range(size/block_size):
if sparse:
f.seek(block_size, 1)
f.write('a' * block_size)
f.close()
# NOTE: this was create_basic_vms
def create_backup_vms(self, pool=None):
template = self.app.default_template
vms = []
vmname = self.make_vm_name('test-net')
self.log.debug("Creating %s" % vmname)
testnet = self.app.add_new_vm(qubes.vm.appvm.AppVM,
name=vmname, template=template, provides_network=True,
label='red')
testnet.create_on_disk(pool=pool)
testnet.features['services/ntpd'] = True
vms.append(testnet)
self.fill_image(testnet.storage.export('private'), 20*1024*1024)
vmname = self.make_vm_name('test1')
self.log.debug("Creating %s" % vmname)
testvm1 = self.app.add_new_vm(qubes.vm.appvm.AppVM,
name=vmname, template=template, label='red')
testvm1.uses_default_netvm = False
testvm1.netvm = testnet
testvm1.create_on_disk(pool=pool)
vms.append(testvm1)
self.fill_image(testvm1.storage.export('private'), 100 * 1024 * 1024)
vmname = self.make_vm_name('testhvm1')
self.log.debug("Creating %s" % vmname)
testvm2 = self.app.add_new_vm(qubes.vm.standalonevm.StandaloneVM,
name=vmname,
hvm=True,
label='red')
testvm2.create_on_disk(pool=pool)
self.fill_image(testvm2.storage.export('root'), 1024 * 1024 * 1024, \
True)
vms.append(testvm2)
vmname = self.make_vm_name('template')
self.log.debug("Creating %s" % vmname)
testvm3 = self.app.add_new_vm(qubes.vm.templatevm.TemplateVM,
name=vmname, label='red')
testvm3.create_on_disk(pool=pool)
self.fill_image(testvm3.storage.export('root'), 100 * 1024 * 1024, True)
vms.append(testvm3)
vmname = self.make_vm_name('custom')
self.log.debug("Creating %s" % vmname)
testvm4 = self.app.add_new_vm(qubes.vm.appvm.AppVM,
name=vmname, template=testvm3, label='red')
testvm4.create_on_disk(pool=pool)
vms.append(testvm4)
self.app.save()
return vms
def make_backup(self, vms, target=None, expect_failure=False, **kwargs):
if target is None:
target = self.backupdir
try:
backup = qubes.backup.Backup(self.app, vms, **kwargs)
except qubes.exc.QubesException as e:
if not expect_failure:
self.fail("QubesException during backup_prepare: %s" % str(e))
else:
raise
backup.passphrase = 'qubes'
backup.target_dir = target
try:
backup.backup_do()
except qubes.exc.QubesException as e:
if not expect_failure:
self.fail("QubesException during backup_do: %s" % str(e))
else:
raise
# FIXME why?
#self.reload_db()
def restore_backup(self, source=None, appvm=None, options=None,
expect_errors=None):
if source is None:
backupfile = os.path.join(self.backupdir,
sorted(os.listdir(self.backupdir))[-1])
else:
backupfile = source
with self.assertNotRaises(qubes.exc.QubesException):
restore_op = qubes.backup.BackupRestore(
self.app, backupfile, appvm, "qubes")
if options:
for key, value in options.items():
setattr(restore_op.options, key, value)
restore_info = restore_op.get_restore_info()
self.log.debug(restore_op.get_restore_summary(restore_info))
with self.assertNotRaises(qubes.exc.QubesException):
restore_op.restore_do(restore_info)
# maybe someone forgot to call .save()
self.reload_db()
errors = []
if expect_errors is None:
expect_errors = []
else:
self.assertFalse(self.error_detected.empty(),
"Restore errors expected, but none detected")
while not self.error_detected.empty():
current_error = self.error_detected.get()
if any(map(current_error.startswith, expect_errors)):
continue
errors.append(current_error)
self.assertTrue(len(errors) == 0,
"Error(s) detected during backup_restore_do: %s" %
'\n'.join(errors))
if not appvm and not os.path.isdir(backupfile):
os.unlink(backupfile)
def create_sparse(self, path, size):
f = open(path, "w")
f.truncate(size)
f.close()
def vm_checksum(self, vms):
hashes = {}
for vm in vms:
assert isinstance(vm, qubes.vm.qubesvm.QubesVM)
hashes[vm.name] = {}
for name, volume in vm.volumes.items():
if not volume.rw or not volume.save_on_stop:
continue
vol_path = vm.storage.get_pool(volume).export(volume)
hasher = hashlib.sha1()
with open(vol_path) as afile:
for buf in iter(lambda: afile.read(4096000), b''):
hasher.update(buf)
hashes[vm.name][name] = hasher.hexdigest()
return hashes
def assertCorrectlyRestored(self, orig_vms, orig_hashes):
''' Verify if restored VMs are identical to those before backup.
:param orig_vms: collection of original QubesVM objects
:param orig_hashes: result of :py:meth:`vm_checksum` on original VMs,
before backup
:return:
'''
for vm in orig_vms:
self.assertIn(vm.name, self.app.domains)
restored_vm = self.app.domains[vm.name]
for prop in ('name', 'kernel',
@ -69,42 +274,56 @@ class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
vm.name, prop))
for dev_class in vm.devices.keys():
for dev in vm.devices[dev_class]:
self.assertIn(dev, restored_vm.devices[dev_class])
self.assertIn(dev, restored_vm.devices[dev_class],
"VM {} - {} device not restored".format(
vm.name, dev_class))
# TODO: compare disk images
if orig_hashes:
hashes = self.vm_checksum([restored_vm])[restored_vm.name]
self.assertEqual(orig_hashes[vm.name], hashes,
"VM {} - disk images are not properly restored".format(
vm.name))
class TC_00_Backup(BackupTestsMixin, qubes.tests.QubesTestCase):
def test_000_basic_backup(self):
vms = self.create_backup_vms()
orig_hashes = self.vm_checksum(vms)
self.make_backup(vms)
self.remove_vms(reversed(vms))
self.restore_backup()
self.assertCorrectlyRestored(vms, orig_hashes)
self.remove_vms(reversed(vms))
def test_001_compressed_backup(self):
vms = self.create_backup_vms()
orig_hashes = self.vm_checksum(vms)
self.make_backup(vms, compressed=True)
self.remove_vms(reversed(vms))
self.restore_backup()
for vm in vms:
self.assertIn(vm.name, self.app.domains)
self.assertCorrectlyRestored(vms, orig_hashes)
def test_002_encrypted_backup(self):
vms = self.create_backup_vms()
orig_hashes = self.vm_checksum(vms)
self.make_backup(vms, encrypted=True)
self.remove_vms(reversed(vms))
self.restore_backup()
for vm in vms:
self.assertIn(vm.name, self.app.domains)
self.assertCorrectlyRestored(vms, orig_hashes)
def test_003_compressed_encrypted_backup(self):
vms = self.create_backup_vms()
orig_hashes = self.vm_checksum(vms)
self.make_backup(vms, compressed=True, encrypted=True)
self.remove_vms(reversed(vms))
self.restore_backup()
for vm in vms:
self.assertIn(vm.name, self.app.domains)
self.assertCorrectlyRestored(vms, orig_hashes)
def test_004_sparse_multipart(self):
vms = []
vmname = self.make_vm_name('testhvm2')
if self.verbose:
print >>sys.stderr, "-> Creating %s" % vmname
self.log.debug("Creating %s" % vmname)
hvmtemplate = self.app.add_new_vm(
qubes.vm.templatevm.TemplateVM, name=vmname, hvm=True, label='red')
@ -112,27 +331,33 @@ class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
self.fill_image(
os.path.join(hvmtemplate.dir_path, '00file'),
195 * 1024 * 1024 - 4096 * 3)
self.fill_image(hvmtemplate.volumes['private'].path,
self.fill_image(hvmtemplate.storage.export('private'),
195 * 1024 * 1024 - 4096 * 3)
self.fill_image(hvmtemplate.volumes['root'].path, 1024 * 1024 * 1024,
self.fill_image(hvmtemplate.storage.export('root'), 1024 * 1024 * 1024,
sparse=True)
vms.append(hvmtemplate)
self.app.save()
orig_hashes = self.vm_checksum(vms)
self.make_backup(vms)
self.remove_vms(reversed(vms))
self.restore_backup()
for vm in vms:
self.assertIn(vm.name, self.app.domains)
self.assertCorrectlyRestored(vms, orig_hashes)
# TODO check vm.backup_timestamp
def test_005_compressed_custom(self):
vms = self.create_backup_vms()
orig_hashes = self.vm_checksum(vms)
self.make_backup(vms, compression_filter="bzip2")
self.remove_vms(reversed(vms))
self.restore_backup()
for vm in vms:
self.assertIn(vm.name, self.app.domains)
self.assertCorrectlyRestored(vms, orig_hashes)
def test_010_selective_restore(self):
# create backup with internal dependencies (template, netvm etc)
# try restoring only AppVMs (but not templates, netvms) - should
# handle according to options set
self.skipTest('test not implemented')
def test_100_backup_dom0_no_restore(self):
# do not write it into dom0 home itself...
@ -147,6 +372,7 @@ class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
:return:
"""
vms = self.create_backup_vms()
orig_hashes = self.vm_checksum(vms)
self.make_backup(vms)
self.remove_vms(reversed(vms))
test_dir = vms[0].dir_path
@ -158,6 +384,7 @@ class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
'*** Directory {} already exists! It has been moved'.format(
test_dir)
])
self.assertCorrectlyRestored(vms, orig_hashes)
def test_210_auto_rename(self):
"""
@ -176,8 +403,59 @@ class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
if vm.netvm and not vm.property_is_default('netvm'):
self.assertEqual(restored_vm.netvm.name, vm.netvm.name + '1')
def _find_pool(self, volume_group, thin_pool):
''' Returns the pool matching the specified ``volume_group`` &
``thin_pool``, or None.
'''
pools = [p for p in self.app.pools
if issubclass(p.__class__, qubes.storage.lvm.ThinPool)]
for pool in pools:
if pool.volume_group == volume_group \
and pool.thin_pool == thin_pool:
return pool
return None
class TC_10_BackupVMMixin(qubes.tests.BackupTestsMixin):
@qubes.tests.storage_lvm.skipUnlessLvmPoolExists
def test_300_backup_lvm(self):
volume_group, thin_pool = \
qubes.tests.storage_lvm.DEFAULT_LVM_POOL.split('/', 1)
self.pool = self._find_pool(volume_group, thin_pool)
if not self.pool:
self.pool = self.app.add_pool(
**qubes.tests.storage_lvm.POOL_CONF)
self.created_pool = True
vms = self.create_backup_vms(pool=self.pool)
orig_hashes = self.vm_checksum(vms)
self.make_backup(vms)
self.remove_vms(reversed(vms))
self.restore_backup()
self.assertCorrectlyRestored(vms, orig_hashes)
self.remove_vms(reversed(vms))
@qubes.tests.storage_lvm.skipUnlessLvmPoolExists
def test_301_restore_to_lvm(self):
volume_group, thin_pool = \
qubes.tests.storage_lvm.DEFAULT_LVM_POOL.split('/', 1)
self.pool = self._find_pool(volume_group, thin_pool)
if not self.pool:
self.pool = self.app.add_pool(
**qubes.tests.storage_lvm.POOL_CONF)
self.created_pool = True
vms = self.create_backup_vms()
orig_hashes = self.vm_checksum(vms)
self.make_backup(vms)
self.remove_vms(reversed(vms))
self.restore_backup(options={'override_pool': self.pool.name})
self.assertCorrectlyRestored(vms, orig_hashes)
for vm in vms:
vm = self.app.domains[vm.name]
for volume in vm.volumes.values():
if volume.save_on_stop:
self.assertEqual(volume.pool, self.pool.name)
self.remove_vms(reversed(vms))
class TC_10_BackupVMMixin(BackupTestsMixin):
def setUp(self):
super(TC_10_BackupVMMixin, self).setUp()
self.backupvm = self.app.add_new_vm(

View File

@ -31,6 +31,7 @@ import sys
import re
import qubes.tests
import qubes.tests.int.backup
QUBESXML_R2B2 = '''
<QubesVmCollection updatevm="3" default_kernel="3.7.6-2" default_netvm="3" default_fw_netvm="2" default_template="1" clockvm="2">
@ -143,7 +144,8 @@ compressed={compressed}
compression-filter=gzip
'''
class TC_00_BackupCompatibility(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
class TC_00_BackupCompatibility(
qubes.tests.int.backup.BackupTestsMixin, qubes.tests.QubesTestCase):
def tearDown(self):
self.remove_test_vms(prefix="test-")

147
qubes/tests/tarwriter.py Normal file
View File

@ -0,0 +1,147 @@
#!/usr/bin/python2
# -*- encoding: utf8 -*-
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2016 Marek Marczykowski-Górecki
# <marmarek@invisiblethingslab.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import subprocess
import tempfile
import shutil
import qubes.tarwriter
import qubes.tests
class TC_00_TarWriter(qubes.tests.QubesTestCase):
def setUp(self):
super(TC_00_TarWriter, self).setUp()
self.input_path = tempfile.mktemp()
self.output_path = tempfile.mktemp()
self.extract_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.exists(self.input_path):
os.unlink(self.input_path)
if os.path.exists(self.output_path):
os.unlink(self.output_path)
if os.path.exists(self.extract_dir):
shutil.rmtree(self.extract_dir)
return super(TC_00_TarWriter, self).tearDown()
def assertTarExtractable(self, expected_name=None):
if expected_name is None:
expected_name = self.input_path
with self.assertNotRaises(subprocess.CalledProcessError):
tar_output = subprocess.check_output(
['tar', 'xvf', self.output_path],
cwd=self.extract_dir,
stderr=subprocess.STDOUT)
expected_output = expected_name + '\n'
if expected_name[0] == '/':
expected_output = (
'tar: Removing leading `/\' from member names\n' +
expected_output)
self.assertEqual(tar_output, expected_output)
extracted_path = os.path.join(self.extract_dir,
expected_name.lstrip('/'))
with self.assertNotRaises(subprocess.CalledProcessError):
subprocess.check_call(
['diff', '-q', self.input_path, extracted_path])
# make sure the file is still sparse
orig_stat = os.stat(self.input_path)
extracted_stat = os.stat(extracted_path)
self.assertEqual(orig_stat.st_blocks, extracted_stat.st_blocks)
self.assertEqual(orig_stat.st_size, extracted_stat.st_size)
def write_sparse_chunks(self, num_chunks):
with open(self.input_path, 'w') as f:
for i in range(num_chunks):
f.seek(8192 * i)
f.write('a' * 4096)
def test_000_simple(self):
self.write_sparse_chunks(1)
with open(self.input_path, 'w') as f:
f.write('a' * 4096)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_001_simple_sparse2(self):
self.write_sparse_chunks(2)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_002_simple_sparse3(self):
# tar header contains info about 4 chunks, check for off-by-one errors
self.write_sparse_chunks(3)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_003_simple_sparse4(self):
# tar header contains info about 4 chunks, check for off-by-one errors
self.write_sparse_chunks(4)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_004_simple_sparse5(self):
# tar header contains info about 4 chunks, check for off-by-one errors
self.write_sparse_chunks(5)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_005_simple_sparse24(self):
# tar header contains info about 4 chunks, next header contains 21 of
# them, check for off-by-one errors
self.write_sparse_chunks(24)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_006_simple_sparse25(self):
# tar header contains info about 4 chunks, next header contains 21 of
# them, check for off-by-one errors
self.write_sparse_chunks(25)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_007_simple_sparse26(self):
# tar header contains info about 4 chunks, next header contains 21 of
# them, check for off-by-one errors
self.write_sparse_chunks(26)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_010_override_name(self):
self.write_sparse_chunks(1)
qubes.tarwriter.main(['--override-name',
'different-name', self.input_path, self.output_path])
self.assertTarExtractable(expected_name='different-name')
def test_011_empty(self):
self.write_sparse_chunks(0)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_012_gzip(self):
self.write_sparse_chunks(0)
qubes.tarwriter.main([
'--use-compress-program=gzip', self.input_path, self.output_path])
with self.assertNotRaises(subprocess.CalledProcessError):
subprocess.check_call(['gzip', '--test', self.output_path])
self.assertTarExtractable()

185
qubes/tools/qvm_backup.py Normal file
View File

@ -0,0 +1,185 @@
#!/usr/bin/python2
# -*- encoding: utf8 -*-
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2016 Marek Marczykowski-Górecki
# <marmarek@invisiblethingslab.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function
import getpass
import locale
import os
import sys
import qubes.backup
import qubes.tools
import qubes.utils
parser = qubes.tools.QubesArgumentParser(want_force_root=True)
parser.add_argument("--exclude", "-x", action="append",
dest="exclude_list", default=[],
help="Exclude the specified VM from the backup (may be "
"repeated)")
parser.add_argument("--dest-vm", "-d", action="store",
dest="appvm", default=None,
help="Specify the destination VM to which the backup "
"will be sent (implies -e)")
parser.add_argument("--encrypt", "-e", action="store_true", dest="encrypted",
default=False,
help="Encrypt the backup")
parser.add_argument("--no-encrypt", action="store_true",
dest="no_encrypt", default=False,
help="Skip encryption even if sending the backup to a "
"VM")
parser.add_argument("--passphrase-file", "-p", action="store",
dest="pass_file", default=None,
help="Read passphrase from a file, or use '-' to read "
"from stdin")
parser.add_argument("--enc-algo", "-E", action="store",
dest="crypto_algorithm", default=None,
help="Specify a non-default encryption algorithm. For a "
"list of supported algorithms, execute 'openssl "
"list-cipher-algorithms' (implies -e)")
parser.add_argument("--hmac-algo", "-H", action="store",
dest="hmac_algorithm", default=None,
help="Specify a non-default HMAC algorithm. For a list "
"of supported algorithms, execute 'openssl "
"list-message-digest-algorithms'")
parser.add_argument("--compress", "-z", action="store_true", dest="compressed",
default=False,
help="Compress the backup")
parser.add_argument("--compress-filter", "-Z", action="store",
dest="compression_filter", default=False,
help="Specify a non-default compression filter program "
"(default: gzip)")
parser.add_argument("--tmpdir", action="store", dest="tmpdir", default=None,
help="Specify a temporary directory (if you have at least "
"1GB free RAM in dom0, use of /tmp is advised) ("
"default: /var/tmp)")
parser.add_argument("backup_location", action="store",
help="Backup location (directory path, or command to pipe backup to)")
parser.add_argument("vms", nargs="*", action=qubes.tools.VmNameAction,
help="Backup only those VMs")
def main(args=None):
args = parser.parse_args(args)
appvm = None
if args.appvm:
try:
appvm = args.app.domains[args.appvm]
except KeyError:
parser.error('no such domain: {!r}'.format(args.appvm))
args.app.log.info(("NOTE: VM {} will be excluded because it is "
"the backup destination.").format(args.appvm))
if appvm:
args.exclude_list.append(appvm.name)
if args.appvm or args.crypto_algorithm:
args.encrypted = True
if args.no_encrypt:
args.encrypted = False
try:
backup = qubes.backup.Backup(args.app,
args.domains if args.domains else None,
exclude_list=args.exclude_list)
except qubes.exc.QubesException as e:
parser.error_runtime(str(e))
# unreachable - error_runtime will raise SystemExit
return 1
backup.target_dir = args.backup_location
if not appvm:
if os.path.isdir(args.backup_location):
stat = os.statvfs(args.backup_location)
else:
stat = os.statvfs(os.path.dirname(args.backup_location))
backup_fs_free_sz = stat.f_bsize * stat.f_bavail
print()
if backup.total_backup_bytes > backup_fs_free_sz:
parser.error_runtime("Not enough space available on the "
"backup filesystem!")
args.app.log.info("Available space: {0}".format(
qubes.utils.size_to_human(backup_fs_free_sz)))
else:
stat = os.statvfs('/var/tmp')
backup_fs_free_sz = stat.f_bsize * stat.f_bavail
print()
if backup_fs_free_sz < 1000000000:
parser.error_runtime("Not enough space available "
"on the local filesystem (1GB required for temporary files)!")
if not appvm.is_running():
appvm.start()
if not args.encrypted:
args.app.log.info("WARNING: The backup will NOT be encrypted!")
if args.pass_file is not None:
pass_f = open(args.pass_file) if args.pass_file != "-" else sys.stdin
passphrase = pass_f.readline().rstrip()
if pass_f is not sys.stdin:
pass_f.close()
else:
if raw_input("Do you want to proceed? [y/N] ").upper() != "Y":
return 0
prompt = ("Please enter the passphrase that will be used to {}verify "
"the backup: ").format('encrypt and ' if args.encrypted else '')
passphrase = getpass.getpass(prompt)
if getpass.getpass("Enter again for verification: ") != passphrase:
parser.error_runtime("Passphrase mismatch!")
backup.encrypted = args.encrypted
backup.compressed = args.compressed
if args.compression_filter:
backup.compression_filter = args.compression_filter
encoding = sys.stdin.encoding or locale.getpreferredencoding()
backup.passphrase = passphrase.decode(encoding)
if args.hmac_algorithm:
backup.hmac_algorithm = args.hmac_algorithm
if args.crypto_algorithm:
backup.crypto_algorithm = args.crypto_algorithm
if args.tmpdir:
backup.tmpdir = args.tmpdir
if appvm:
backup.target_vm = appvm
try:
backup.backup_do()
except qubes.exc.QubesException as e:
parser.error_runtime(str(e))
print()
args.app.log.info("Backup completed.")
return 0
if __name__ == '__main__':
main()

View File

@ -0,0 +1,261 @@
#!/usr/bin/python2
# -*- encoding: utf8 -*-
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2016 Marek Marczykowski-Górecki
# <marmarek@invisiblethingslab.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function
import getpass
import locale
import sys
import qubes.backup
import qubes.tools
import qubes.utils
parser = qubes.tools.QubesArgumentParser(want_force_root=True)
parser.add_argument("--verify-only", action="store_true",
dest="verify_only", default=False,
help="Verify backup integrity without restoring any "
"data")
parser.add_argument("--skip-broken", action="store_true", dest="skip_broken",
default=False,
help="Do not restore VMs that have missing TemplateVMs "
"or NetVMs")
parser.add_argument("--ignore-missing", action="store_true",
dest="ignore_missing", default=False,
help="Restore VMs even if their associated TemplateVMs "
"and NetVMs are missing")
parser.add_argument("--skip-conflicting", action="store_true",
dest="skip_conflicting", default=False,
help="Do not restore VMs that are already present on "
"the host")
parser.add_argument("--rename-conflicting", action="store_true",
dest="rename_conflicting", default=False,
help="Restore VMs that are already present on the host "
"under different names")
parser.add_argument("--replace-template", action="append",
dest="replace_template", default=[],
help="Restore VMs using another TemplateVM; syntax: "
"old-template-name:new-template-name (may be "
"repeated)")
parser.add_argument("-x", "--exclude", action="append", dest="exclude",
default=[],
help="Skip restore of specified VM (may be repeated)")
parser.add_argument("--skip-dom0-home", action="store_false", dest="dom0_home",
default=True,
help="Do not restore dom0 user home directory")
parser.add_argument("--ignore-username-mismatch", action="store_true",
dest="ignore_username_mismatch", default=False,
help="Ignore dom0 username mismatch when restoring home "
"directory")
parser.add_argument("-d", "--dest-vm", action="store", dest="appvm",
help="Specify VM containing the backup to be restored")
parser.add_argument("-p", "--passphrase-file", action="store",
dest="pass_file", default=None,
help="Read passphrase from file, or use '-' to read from stdin")
parser.add_argument('backup_location', action='store',
help="Backup directory name, or command to pipe from")
parser.add_argument('vms', nargs='*', action='store', default='[]',
help='Restore only those VMs')
def handle_broken(app, args, restore_info):
there_are_conflicting_vms = False
there_are_missing_templates = False
there_are_missing_netvms = False
dom0_username_mismatch = False
for vm_info in restore_info.values():
assert isinstance(vm_info, qubes.backup.BackupRestore.VMToRestore)
if qubes.backup.BackupRestore.VMToRestore.EXCLUDED in vm_info.problems:
continue
if qubes.backup.BackupRestore.VMToRestore.MISSING_TEMPLATE in \
vm_info.problems:
there_are_missing_templates = True
if qubes.backup.BackupRestore.VMToRestore.MISSING_NETVM in \
vm_info.problems:
there_are_missing_netvms = True
if qubes.backup.BackupRestore.VMToRestore.ALREADY_EXISTS in \
vm_info.problems:
there_are_conflicting_vms = True
if qubes.backup.BackupRestore.Dom0ToRestore.USERNAME_MISMATCH in \
vm_info.problems:
dom0_username_mismatch = True
if there_are_conflicting_vms:
app.log.error(
"*** There are VMs with conflicting names on the host! ***")
if args.skip_conflicting:
app.log.error(
"Those VMs will not be restored. "
"The host VMs will NOT be overwritten.")
else:
raise qubes.exc.QubesException(
"Remove VMs with conflicting names from the host "
"before proceeding.\n"
"Or use --skip-conflicting to restore only those VMs that "
"do not exist on the host.\n"
"Or use --rename-conflicting to restore those VMs under "
"modified names (with numbers at the end).")
app.log.info("The above VMs will be copied and added to your system.")
app.log.info("Exisiting VMs will NOT be removed.")
if there_are_missing_templates:
app.log.warning("*** One or more TemplateVMs are missing on the "
"host! ***")
if not (args.skip_broken or args.ignore_missing):
raise qubes.exc.QubesException(
"Install them before proceeding with the restore."
"Or pass: --skip-broken or --ignore-missing.")
elif args.skip_broken:
app.log.warning("Skipping broken entries: VMs that depend on "
"missing TemplateVMs will NOT be restored.")
elif args.ignore_missing:
app.log.warning("Ignoring missing entries: VMs that depend "
"on missing TemplateVMs will NOT be restored.")
else:
raise qubes.exc.QubesException(
"INTERNAL ERROR! Please report this to the Qubes OS team!")
if there_are_missing_netvms:
app.log.warning("*** One or more NetVMs are missing on the "
"host! ***")
if not (args.skip_broken or args.ignore_missing):
raise qubes.exc.QubesException(
"Install them before proceeding with the restore."
"Or pass: --skip-broken or --ignore-missing.")
elif args.skip_broken:
app.log.warning("Skipping broken entries: VMs that depend on "
"missing NetVMs will NOT be restored.")
elif args.ignore_missing:
app.log.warning("Ignoring missing entries: VMs that depend "
"on missing NetVMs will NOT be restored.")
else:
raise qubes.exc.QubesException(
"INTERNAL ERROR! Please report this to the Qubes OS team!")
if 'dom0' in restore_info.keys() and args.dom0_home:
if dom0_username_mismatch:
app.log.warning("*** Dom0 username mismatch! This can break "
"some settings! ***")
if not args.ignore_username_mismatch:
raise qubes.exc.QubesException(
"Skip restoring the dom0 home directory "
"(--skip-dom0-home), or pass "
"--ignore-username-mismatch to continue anyway.")
else:
app.log.warning("Continuing as directed.")
app.log.warning("NOTE: Before restoring the dom0 home directory, "
"a new directory named "
"'home-pre-restore-<current-time>' will be "
"created inside the dom0 home directory. If any "
"restored files conflict with existing files, "
"the existing files will be moved to this new "
"directory.")
def main(args=None):
# pylint: disable=too-many-return-statements
args = parser.parse_args(args)
appvm = None
if args.appvm:
try:
appvm = args.app.domains[args.appvm]
except KeyError:
parser.error('no such domain: {!r}'.format(args.appvm))
if args.pass_file is not None:
pass_f = open(args.pass_file) if args.pass_file != "-" else sys.stdin
passphrase = pass_f.readline().rstrip()
if pass_f is not sys.stdin:
pass_f.close()
else:
passphrase = getpass.getpass("Please enter the passphrase to verify "
"and (if encrypted) decrypt the backup: ")
encoding = sys.stdin.encoding or locale.getpreferredencoding()
# pylint: disable=redefined-variable-type
passphrase = passphrase.decode(encoding)
args.app.log.info("Checking backup content...")
try:
backup = qubes.backup.BackupRestore(args.app, args.backup_location,
appvm, passphrase)
except qubes.exc.QubesException as e:
parser.error_runtime(str(e))
# unreachable - error_runtime will raise SystemExit
return 1
if args.ignore_missing:
backup.options.use_default_template = True
backup.options.use_default_netvm = True
if args.replace_template:
backup.options.replace_template = args.replace_template
if args.rename_conflicting:
backup.options.rename_conflicting = True
if not args.dom0_home:
backup.options.dom0_home = False
if args.ignore_username_mismatch:
backup.options.ignore_username_mismatch = True
if args.exclude:
backup.options.exclude = args.exclude
if args.verify_only:
backup.options.verify_only = True
restore_info = None
try:
restore_info = backup.get_restore_info()
except qubes.exc.QubesException as e:
parser.error_runtime(str(e))
print(backup.get_restore_summary(restore_info))
try:
handle_broken(args.app, args, restore_info)
except qubes.exc.QubesException as e:
parser.error_runtime(str(e))
if args.pass_file is None:
if raw_input("Do you want to proceed? [y/N] ").upper() != "Y":
exit(0)
try:
backup.restore_do(restore_info)
except qubes.exc.QubesException as e:
parser.error_runtime(str(e))
if __name__ == '__main__':
main()

View File

@ -1559,7 +1559,7 @@ def _patch_pool_config(config, pool=None, pools=None):
name = config['name']
if pool and is_exportable:
if pool and is_exportable and config['pool'] == 'default':
config['pool'] = str(pool)
elif pool and not is_exportable:
pass

View File

@ -220,6 +220,7 @@ fi
%{python_sitelib}/qubes/exc.py*
%{python_sitelib}/qubes/log.py*
%{python_sitelib}/qubes/rngdoc.py*
%{python_sitelib}/qubes/tarwriter.py*
%{python_sitelib}/qubes/utils.py*
%dir %{python_sitelib}/qubes/vm
@ -250,6 +251,8 @@ fi
%{python_sitelib}/qubes/tools/qubes_prefs.py*
%{python_sitelib}/qubes/tools/qvm_block.py*
%{python_sitelib}/qubes/tools/qubes_lvm.py*
%{python_sitelib}/qubes/tools/qvm_backup.py*
%{python_sitelib}/qubes/tools/qvm_backup_restore.py*
%{python_sitelib}/qubes/tools/qvm_create.py*
%{python_sitelib}/qubes/tools/qvm_device.py*
%{python_sitelib}/qubes/tools/qvm_features.py*
@ -288,6 +291,7 @@ fi
%{python_sitelib}/qubes/tests/storage.py*
%{python_sitelib}/qubes/tests/storage_file.py*
%{python_sitelib}/qubes/tests/storage_lvm.py*
%{python_sitelib}/qubes/tests/tarwriter.py*
%dir %{python_sitelib}/qubes/tests/vm
%{python_sitelib}/qubes/tests/vm/__init__.py*