From 68ed06a2005c3f4639313605c1f6111409348c69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Fri, 14 Jul 2017 04:14:46 +0200 Subject: [PATCH 01/17] Don't try to set 'created-by-' tag when cloning VM This tag can't be set from outside of qubesd. --- qubesadmin/app.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/qubesadmin/app.py b/qubesadmin/app.py index f8a5424..f0ca78f 100644 --- a/qubesadmin/app.py +++ b/qubesadmin/app.py @@ -344,6 +344,8 @@ class QubesBase(qubesadmin.base.PropertyHolder): raise for tag in src_vm.tags: + if tag.startswith('created-by-'): + continue try: dst_vm.tags.add(tag) except qubesadmin.exc.QubesException as e: From 268a3453a9b08c4a7ac9c97ca07a3da4f0c1fd76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Fri, 14 Jul 2017 04:15:47 +0200 Subject: [PATCH 02/17] backup: initial support for backup restore over Admin API The code is mostly copied from core-admin. QubesOS/qubes-issues#1214 --- ci/pylintrc | 1 + qubesadmin/backup/__init__.py | 1918 +++++++++++++++++++++++++++++++++ qubesadmin/backup/core2.py | 290 +++++ qubesadmin/backup/core3.py | 145 +++ 4 files changed, 2354 insertions(+) create mode 100644 qubesadmin/backup/__init__.py create mode 100644 qubesadmin/backup/core2.py create mode 100644 qubesadmin/backup/core3.py diff --git a/ci/pylintrc b/ci/pylintrc index 7583cd8..17f7984 100644 --- a/ci/pylintrc +++ b/ci/pylintrc @@ -8,6 +8,7 @@ disable= bad-continuation, duplicate-code, fixme, + cyclic-import, locally-disabled, locally-enabled diff --git a/qubesadmin/backup/__init__.py b/qubesadmin/backup/__init__.py new file mode 100644 index 0000000..2d935b7 --- /dev/null +++ b/qubesadmin/backup/__init__.py @@ -0,0 +1,1918 @@ +# -*- encoding: utf8 -*- +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2017 Marek Marczykowski-Górecki +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with this program; if not, see . + +'''Qubes backup''' + +import collections +import errno +import fcntl +import functools +import grp +import logging +import multiprocessing +from multiprocessing import Queue, Process +import os +import pwd +import re +import shutil +import subprocess +import sys +import tempfile +import termios +import time + +import qubesadmin +import qubesadmin.vm +from qubesadmin.devices import DeviceAssignment +from qubesadmin.exc import QubesException +from qubesadmin.utils import size_to_human + +# must be picklable +QUEUE_FINISHED = "!!!FINISHED" +QUEUE_ERROR = "!!!ERROR" + +HEADER_FILENAME = 'backup-header' +DEFAULT_CRYPTO_ALGORITHM = 'aes-256-cbc' +# 'scrypt' is not exactly HMAC algorithm, but a tool we use to +# integrity-protect the data +DEFAULT_HMAC_ALGORITHM = 'scrypt' +DEFAULT_COMPRESSION_FILTER = 'gzip' +# Maximum size of error message get from process stderr (including VM process) +MAX_STDERR_BYTES = 1024 +# header + qubes.xml max size +HEADER_QUBES_XML_MAX_SIZE = 1024 * 1024 +# hmac file max size - regardless of backup format version! +HMAC_MAX_SIZE = 4096 + +BLKSIZE = 512 + +_re_alphanum = re.compile(r'^[A-Za-z0-9-]*$') + +class BackupCanceledError(QubesException): + '''Exception raised when backup/restore was cancelled''' + def __init__(self, msg, tmpdir=None): + super(BackupCanceledError, self).__init__(msg) + self.tmpdir = tmpdir + +class BackupHeader(object): + '''Structure describing backup-header file included as the first file in + backup archive + ''' + header_keys = { + 'version': 'version', + 'encrypted': 'encrypted', + 'compressed': 'compressed', + 'compression-filter': 'compression_filter', + 'crypto-algorithm': 'crypto_algorithm', + 'hmac-algorithm': 'hmac_algorithm', + 'backup-id': 'backup_id' + } + bool_options = ['encrypted', 'compressed'] + int_options = ['version'] + + def __init__(self, + header_data=None, + version=None, + encrypted=None, + compressed=None, + compression_filter=None, + hmac_algorithm=None, + crypto_algorithm=None, + backup_id=None): + # repeat the list to help code completion... + self.version = version + self.encrypted = encrypted + self.compressed = compressed + # Options introduced in backup format 3+, which always have a header, + # so no need for fallback in function parameter + self.compression_filter = compression_filter + self.hmac_algorithm = hmac_algorithm + self.crypto_algorithm = crypto_algorithm + self.backup_id = backup_id + + if header_data is not None: + self.load(header_data) + + def load(self, untrusted_header_text): + """Parse backup header file. + + :param untrusted_header_text: header content + :type untrusted_header_text: basestring + + .. warning:: + This function may be exposed to not yet verified header, + so is security critical. + """ + try: + untrusted_header_text = untrusted_header_text.decode('ascii') + except UnicodeDecodeError: + raise QubesException( + "Non-ASCII characters in backup header") + for untrusted_line in untrusted_header_text.splitlines(): + if untrusted_line.count('=') != 1: + raise QubesException("Invalid backup header") + key, value = untrusted_line.strip().split('=', 1) + if not _re_alphanum.match(key): + raise QubesException("Invalid backup header (" + "key)") + if key not in self.header_keys.keys(): + # Ignoring unknown option + continue + if not _re_alphanum.match(value): + raise QubesException("Invalid backup header (" + "value)") + if getattr(self, self.header_keys[key]) is not None: + raise QubesException( + "Duplicated header line: {}".format(key)) + if key in self.bool_options: + value = value.lower() in ["1", "true", "yes"] + elif key in self.int_options: + value = int(value) + setattr(self, self.header_keys[key], value) + + self.validate() + + def validate(self): + '''Validate header data, according to header version''' + if self.version == 1: + # header not really present + pass + elif self.version in [2, 3, 4]: + expected_attrs = ['version', 'encrypted', 'compressed', + 'hmac_algorithm'] + if self.encrypted and self.version < 4: + expected_attrs += ['crypto_algorithm'] + if self.version >= 3 and self.compressed: + expected_attrs += ['compression_filter'] + if self.version >= 4: + expected_attrs += ['backup_id'] + for key in expected_attrs: + if getattr(self, key) is None: + raise QubesException( + "Backup header lack '{}' info".format(key)) + else: + raise QubesException( + "Unsupported backup version {}".format(self.version)) + + def save(self, filename): + '''Save backup header into a file''' + with open(filename, "w") as f_header: + # make sure 'version' is the first key + f_header.write('version={}\n'.format(self.version)) + for key, attr in self.header_keys.items(): + if key == 'version': + continue + if getattr(self, attr) is None: + continue + f_header.write("{!s}={!s}\n".format(key, getattr(self, attr))) + +def launch_proc_with_pty(args, stdin=None, stdout=None, stderr=None, echo=True): + """Similar to pty.fork, but handle stdin/stdout according to parameters + instead of connecting to the pty + + :return tuple (subprocess.Popen, pty_master) + """ + + def set_ctty(ctty_fd, master_fd): + '''Set controlling terminal''' + os.setsid() + os.close(master_fd) + fcntl.ioctl(ctty_fd, termios.TIOCSCTTY, 0) + if not echo: + termios_p = termios.tcgetattr(ctty_fd) + # termios_p.c_lflags + termios_p[3] &= ~termios.ECHO + termios.tcsetattr(ctty_fd, termios.TCSANOW, termios_p) + (pty_master, pty_slave) = os.openpty() + p = subprocess.Popen(args, stdin=stdin, stdout=stdout, + stderr=stderr, + preexec_fn=lambda: set_ctty(pty_slave, pty_master)) + os.close(pty_slave) + return p, open(pty_master, 'wb+', buffering=0) + +def launch_scrypt(action, input_name, output_name, passphrase): + ''' + Launch 'scrypt' process, pass passphrase to it and return + subprocess.Popen object. + + :param action: 'enc' or 'dec' + :param input_name: input path or '-' for stdin + :param output_name: output path or '-' for stdout + :param passphrase: passphrase + :return: subprocess.Popen object + ''' + command_line = ['scrypt', action, input_name, output_name] + (p, pty) = launch_proc_with_pty(command_line, + stdin=subprocess.PIPE if input_name == '-' else None, + stdout=subprocess.PIPE if output_name == '-' else None, + stderr=subprocess.PIPE, + echo=False) + if action == 'enc': + prompts = (b'Please enter passphrase: ', b'Please confirm passphrase: ') + else: + prompts = (b'Please enter passphrase: ',) + for prompt in prompts: + actual_prompt = p.stderr.read(len(prompt)) + if actual_prompt != prompt: + raise QubesException( + 'Unexpected prompt from scrypt: {}'.format(actual_prompt)) + pty.write(passphrase.encode('utf-8') + b'\n') + pty.flush() + # save it here, so garbage collector would not close it (which would kill + # the child) + p.pty = pty + return p + +class ExtractWorker3(Process): + '''Process for handling inner tar layer of backup archive''' + # pylint: disable=too-many-instance-attributes + def __init__(self, queue, base_dir, passphrase, encrypted, + progress_callback, vmproc=None, + compressed=False, crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM, + compression_filter=None, verify_only=False, handlers=None): + '''Start inner tar extraction worker + + The purpose of this class is to process files extracted from outer + archive layer and pass to appropriate handlers. Input files are given + through a queue. Insert :py:obj:`QUEUE_FINISHED` or + :py:obj:`QUEUE_ERROR` to end data processing (either cleanly, + or forcefully). + + Handlers are given as a map filename -> (data_func, size_func), + where data_func is called with file-like object to process, + and size_func is called with file size as argument. Note that + data_func and size_func may be called simultaneusly, in a different + processes. + + :param multiprocessing.Queue queue: a queue with filenames to + process; those files needs to be given as full path, inside *base_dir* + :param str base_dir: directory where all files to process live + :param str passphrase: passphrase to decrypt the data + :param bool encrypted: is encryption applied? + :param callable progress_callback: report extraction progress + :param subprocess.Popen vmproc: process extracting outer layer, + given here to monitor + it for failures (when it exits with non-zero exit code, inner layer + processing is stopped) + :param bool compressed: is the data compressed? + :param str crypto_algorithm: encryption algorithm, either `scrypt` or an + algorithm supported by openssl + :param str compression_filter: compression program, `gzip` by default + :param bool verify_only: only verify data integrity, do not extract + :param dict handlers: handlers for actual data + ''' + super(ExtractWorker3, self).__init__() + #: queue with files to extract + self.queue = queue + #: paths on the queue are relative to this dir + self.base_dir = base_dir + #: passphrase to decrypt/authenticate data + self.passphrase = passphrase + #: handlers for files; it should be dict filename -> (data_function, + # size_function), + # where data_function will get file-like object as the only argument and + # might be called in a separate process (multiprocessing.Process), + # and size_function will get file size (when known) in bytes + self.handlers = handlers + #: is the backup encrypted? + self.encrypted = encrypted + #: is the backup compressed? + self.compressed = compressed + #: what crypto algorithm is used for encryption? + self.crypto_algorithm = crypto_algorithm + #: only verify integrity, don't extract anything + self.verify_only = verify_only + #: progress + self.blocks_backedup = 0 + #: inner tar layer extraction (subprocess.Popen instance) + self.tar2_process = None + #: current inner tar archive name + self.tar2_current_file = None + #: call size_func handler for this file when tar report it on stderr + self.adjust_output_size = None + #: decompressor subprocess.Popen instance + self.decompressor_process = None + #: decryptor subprocess.Popen instance + self.decryptor_process = None + #: data import multiprocessing.Process instance + self.import_process = None + #: callback reporting progress to UI + self.progress_callback = progress_callback + #: process (subprocess.Popen instance) feeding the data into + # extraction tool + self.vmproc = vmproc + + self.log = logging.getLogger('qubesadmin.backup.extract') + self.stderr_encoding = sys.stderr.encoding or 'utf-8' + self.tar2_stderr = [] + self.compression_filter = compression_filter + + @staticmethod + def handle_streams(stream_in, streams_out, processes, size_limit=None, + progress_callback=None): + ''' + Copy stream_in to all streams_out and monitor all mentioned processes. + If any of them terminate with non-zero code, interrupt the process. Copy + at most `size_limit` data (if given). + + :param stream_in: file-like object to read data from + :param streams_out: dict of file-like objects to write data to + :param processes: dict of subprocess.Popen objects to monitor + :param size_limit: int maximum data amount to process + :param progress_callback: callable function to report progress, will be + given copied data size (it should accumulate internally) + :return: failed process name, failed stream name, "size_limit" or None ( + no error) + ''' + buffer_size = 409600 + bytes_copied = 0 + while True: + if size_limit: + to_copy = min(buffer_size, size_limit - bytes_copied) + if to_copy <= 0: + return "size_limit" + else: + to_copy = buffer_size + buf = stream_in.read(to_copy) + if not buf: + # done + return None + + if callable(progress_callback): + progress_callback(len(buf)) + for name, stream in streams_out.items(): + if stream is None: + continue + try: + stream.write(buf) + except IOError: + return name + bytes_copied += len(buf) + + for name, proc in processes.items(): + if proc is None: + continue + if isinstance(proc, Process): + if not proc.is_alive() and proc.exitcode != 0: + return name + elif proc.poll(): + return name + + def collect_tar_output(self): + '''Retrieve tar stderr and handle it appropriately + + Log errors, process file size if requested. + This use :py:attr:`tar2_process`. + ''' + if not self.tar2_process.stderr: + return + + if self.tar2_process.poll() is None: + try: + new_lines = self.tar2_process.stderr \ + .read(MAX_STDERR_BYTES).splitlines() + except IOError as e: + if e.errno == errno.EAGAIN: + return + else: + raise + else: + new_lines = self.tar2_process.stderr.readlines() + + new_lines = [x.decode(self.stderr_encoding) for x in new_lines] + + msg_re = re.compile(r".*#[0-9].*restore_pipe") + debug_msg = [msg for msg in new_lines if msg_re.match(msg)] + self.log.debug('tar2_stderr: %s', '\n'.join(debug_msg)) + new_lines = [msg for msg in new_lines if not msg_re.match(msg)] + if self.adjust_output_size: + # search for first file size reported by tar after setting + # self.adjust_output_size (so don't look at self.tar2_stderr) + # this is used only when extracting single-file archive, so don't + # bother with checking file name + file_size_re = re.compile(r"^[^ ]+ [^ ]+/[^ ]+ *([0-9]+) .*") + for line in new_lines: + match = file_size_re.match(line) + if match: + file_size = match.groups()[0] + self.adjust_output_size(file_size) + self.adjust_output_size = None + self.tar2_stderr += new_lines + + def run(self): + try: + self.__run__() + except Exception: + # Cleanup children + for process in [self.decompressor_process, + self.decryptor_process, + self.tar2_process]: + if process: + try: + process.terminate() + except OSError: + pass + process.wait() + self.log.exception('ERROR') + raise + + def handle_dir(self, dirname): + ''' Relocate files in given director when it's already extracted + + :param dirname: directory path to handle (relative to backup root), + without trailing slash + ''' + for fname, (data_func, size_func) in self.handlers.items(): + if not fname.startswith(dirname + '/'): + continue + if size_func is not None: + size_func(os.path.getsize(fname)) + with open(fname, 'rb') as input_file: + data_func(input_file) + os.unlink(fname) + shutil.rmtree(dirname) + + def cleanup_tar2(self, wait=True, terminate=False): + '''Cleanup running :py:attr:`tar2_process` + + :param wait: wait for it termination, otherwise method exit early if + process is still running + :param terminate: terminate the process if still running + ''' + if self.tar2_process is None: + return + if terminate: + if self.import_process is not None: + self.tar2_process.terminate() + self.import_process.terminate() + if wait: + self.tar2_process.wait() + if self.import_process is not None: + self.import_process.join() + elif self.tar2_process.poll() is None: + return + self.collect_tar_output() + if self.tar2_process.stderr: + self.tar2_process.stderr.close() + if self.tar2_process.returncode != 0: + self.log.error( + "ERROR: unable to extract files for %s, tar " + "output:\n %s", + self.tar2_current_file, + "\n ".join(self.tar2_stderr)) + else: + # Finished extracting the tar file + # if that was whole-directory archive, handle + # relocated files now + inner_name = self.tar2_current_file.rsplit('.', 1)[0] \ + .replace(self.base_dir + '/', '') + if os.path.basename(inner_name) == '.': + self.handle_dir( + os.path.dirname(inner_name)) + self.tar2_current_file = None + self.adjust_output_size = None + self.tar2_process = None + + @staticmethod + def _data_func_wrapper(close_fds, data_func, data_stream): + '''Close not needed file descriptors, then call data_func( + data_stream). + + This is to prevent holding write end of a pipe in subprocess, + preventing EOF transfer. + ''' + for fd in close_fds: + if fd == data_stream.fileno(): + continue + try: + os.close(fd) + except OSError: + pass + return data_func(data_stream) + + + def __run__(self): + self.log.debug("Started sending thread") + self.log.debug("Moving to dir " + self.base_dir) + os.chdir(self.base_dir) + + filename = None + + input_pipe = None + for filename in iter(self.queue.get, None): + if filename in (QUEUE_FINISHED, QUEUE_ERROR): + break + + assert isinstance(filename, str) + + self.log.debug("Extracting file " + filename) + + if filename.endswith('.000'): + # next file + if self.tar2_process is not None: + input_pipe.close() + self.cleanup_tar2(wait=True, terminate=False) + + inner_name = filename[:-len('.000')].replace( + self.base_dir + '/', '') + redirect_stdout = None + if os.path.basename(inner_name) == '.': + if (inner_name in self.handlers or + any(x.startswith(os.path.dirname(inner_name) + '/') + for x in self.handlers)): + tar2_cmdline = ['tar', + '-%s' % ("t" if self.verify_only else "x"), + inner_name] + else: + # ignore this directory + tar2_cmdline = None + elif inner_name in self.handlers: + tar2_cmdline = ['tar', + '-%svvO' % ("t" if self.verify_only else "x"), + inner_name] + redirect_stdout = subprocess.PIPE + else: + # no handlers for this file, ignore it + tar2_cmdline = None + + if tar2_cmdline is None: + # ignore the file + os.remove(filename) + continue + + if self.compressed: + if self.compression_filter: + tar2_cmdline.insert(-1, + "--use-compress-program=%s" % + self.compression_filter) + else: + tar2_cmdline.insert(-1, "--use-compress-program=%s" % + DEFAULT_COMPRESSION_FILTER) + + self.log.debug("Running command " + str(tar2_cmdline)) + if self.encrypted: + # Start decrypt + self.decryptor_process = subprocess.Popen( + ["openssl", "enc", + "-d", + "-" + self.crypto_algorithm, + "-pass", + "pass:" + self.passphrase], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + + self.tar2_process = subprocess.Popen( + tar2_cmdline, + stdin=self.decryptor_process.stdout, + stdout=redirect_stdout, + stderr=subprocess.PIPE) + self.decryptor_process.stdout.close() + input_pipe = self.decryptor_process.stdin + else: + self.tar2_process = subprocess.Popen( + tar2_cmdline, + stdin=subprocess.PIPE, + stdout=redirect_stdout, + stderr=subprocess.PIPE) + input_pipe = self.tar2_process.stdin + + if inner_name in self.handlers: + assert redirect_stdout is subprocess.PIPE + data_func, size_func = self.handlers[inner_name] + self.import_process = multiprocessing.Process( + target=self._data_func_wrapper, + args=([input_pipe.fileno()], + data_func, self.tar2_process.stdout)) + self.import_process.start() + self.tar2_process.stdout.close() + self.adjust_output_size = size_func + + fcntl.fcntl(self.tar2_process.stderr.fileno(), fcntl.F_SETFL, + fcntl.fcntl(self.tar2_process.stderr.fileno(), + fcntl.F_GETFL) | os.O_NONBLOCK) + self.tar2_stderr = [] + elif not self.tar2_process: + # Extracting of the current archive failed, skip to the next + # archive + os.remove(filename) + continue + else: + (basename, ext) = os.path.splitext(self.tar2_current_file) + previous_chunk_number = int(ext[1:]) + expected_filename = basename + '.%03d' % ( + previous_chunk_number+1) + if expected_filename != filename: + self.cleanup_tar2(wait=True, terminate=True) + self.log.error( + 'Unexpected file in archive: %s, expected %s', + filename, expected_filename) + os.remove(filename) + continue + self.log.debug("Releasing next chunck") + + self.tar2_current_file = filename + + input_file = open(filename, 'rb') + + run_error = self.handle_streams( + input_file, + {'target': input_pipe}, + {'vmproc': self.vmproc, + 'addproc': self.tar2_process, + 'data_import': self.import_process, + 'decryptor': self.decryptor_process, + }, + progress_callback=self.progress_callback) + input_file.close() + if run_error: + if run_error == "target": + self.collect_tar_output() + details = "\n".join(self.tar2_stderr) + else: + details = "%s failed" % run_error + if self.decryptor_process: + self.decryptor_process.terminate() + self.decryptor_process.wait() + self.decryptor_process = None + self.log.error('Error while processing \'%s\': %s', + self.tar2_current_file, details) + self.cleanup_tar2(wait=True, terminate=True) + + # Delete the file as we don't need it anymore + self.log.debug('Removing file %s', filename) + os.remove(filename) + + if self.tar2_process is not None: + input_pipe.close() + if filename == QUEUE_ERROR: + if self.decryptor_process: + self.decryptor_process.terminate() + self.decryptor_process.wait() + self.decryptor_process = None + self.cleanup_tar2(terminate=(filename == QUEUE_ERROR)) + + self.log.debug('Finished extracting thread') + + +def get_supported_hmac_algo(hmac_algorithm=None): + '''Generate a list of supported hmac algorithms + + :param hmac_algorithm: default algorithm, if given, it is placed as a + first element + ''' + # Start with provided default + if hmac_algorithm: + yield hmac_algorithm + if hmac_algorithm != 'scrypt': + yield 'scrypt' + proc = subprocess.Popen(['openssl', 'list-message-digest-algorithms'], + stdout=subprocess.PIPE) + try: + for algo in proc.stdout.readlines(): + algo = algo.decode('ascii') + if '=>' in algo: + continue + yield algo.strip() + finally: + proc.terminate() + proc.wait() + proc.stdout.close() + +class BackupApp(object): + '''Interface for backup collection''' + # pylint: disable=too-few-public-methods + def __init__(self, qubes_xml): + '''Initialize BackupApp object and load qubes.xml into it''' + self.store = qubes_xml + self.domains = {} + self.globals = {} + self.load() + + def load(self): + '''Load qubes.xml''' + raise NotImplementedError + +class BackupVM(object): + '''Interface for a single VM in the backup''' + # pylint: disable=too-few-public-methods + def __init__(self): + '''Initialize empty BackupVM object''' + #: VM class + self.klass = 'AppVM' + #: VM name + self.name = None + #: VM template + self.template = None + #: VM label + self.label = None + #: VM properties + self.properties = {} + #: VM features (key/value), aka services in core2 + self.features = {} + #: VM tags + self.tags = set() + #: VM devices - dict with key=devtype, value=dict of devices ( + # key=ident, value=options) + self.devices = collections.defaultdict(dict) + #: VM path in the backup + self.backup_path = None + #: size of the VM + self.size = 0 + + @property + def included_in_backup(self): + '''Report whether a VM is included in the backup''' + return False + +class BackupRestoreOptions(object): + '''Options for restore operation''' + # pylint: disable=too-few-public-methods + def __init__(self): + #: use default NetVM if the one referenced in backup do not exists on + # the host + self.use_default_netvm = True + #: set NetVM to "none" if the one referenced in backup do not exists + # on the host + self.use_none_netvm = False + #: set template to default if the one referenced in backup do not + # exists on the host + self.use_default_template = True + #: use default kernel if the one referenced in backup do not exists + # on the host + self.use_default_kernel = True + #: restore dom0 home + self.dom0_home = True + #: restore dom0 home even if username is different + self.ignore_username_mismatch = False + #: do not restore data, only verify backup integrity + self.verify_only = False + #: automatically rename VM during restore, when it would conflict + # with existing one + self.rename_conflicting = True + #: list of VM names to exclude + self.exclude = [] + #: restore VMs into selected storage pool + self.override_pool = None + +class BackupRestore(object): + """Usage: + + >>> restore_op = BackupRestore(...) + >>> # adjust restore_op.options here + >>> restore_info = restore_op.get_restore_info() + >>> # manipulate restore_info to select VMs to restore here + >>> restore_op.restore_do(restore_info) + """ + + class VMToRestore(object): + '''Information about a single VM to be restored''' + # pylint: disable=too-few-public-methods + #: VM excluded from restore by user + EXCLUDED = object() + #: VM with such name already exists on the host + ALREADY_EXISTS = object() + #: NetVM used by the VM does not exists on the host + MISSING_NETVM = object() + #: TemplateVM used by the VM does not exists on the host + MISSING_TEMPLATE = object() + #: Kernel used by the VM does not exists on the host + MISSING_KERNEL = object() + + def __init__(self, vm): + assert isinstance(vm, BackupVM) + self.vm = vm + self.name = vm.name + self.subdir = vm.backup_path + self.size = vm.size + self.problems = set() + self.template = vm.template + if vm.properties.get('netvm', None): + self.netvm = vm.properties['netvm'] + else: + self.netvm = None + self.orig_template = None + self.restored_vm = None + + @property + def good_to_go(self): + '''Is the VM ready for restore?''' + return len(self.problems) == 0 + + class Dom0ToRestore(VMToRestore): + '''Information about dom0 home to restore''' + # pylint: disable=too-few-public-methods + #: backup was performed on system with different dom0 username + USERNAME_MISMATCH = object() + + def __init__(self, vm, subdir=None): + super(BackupRestore.Dom0ToRestore, self).__init__(vm) + if subdir: + self.subdir = subdir + self.username = os.path.basename(subdir) + + def __init__(self, app, backup_location, backup_vm, passphrase): + super(BackupRestore, self).__init__() + + #: qubes.Qubes instance + self.app = app + + #: options how the backup should be restored + self.options = BackupRestoreOptions() + + #: VM from which backup should be retrieved + self.backup_vm = backup_vm + if backup_vm and backup_vm.qid == 0: + self.backup_vm = None + + #: backup path, inside VM pointed by :py:attr:`backup_vm` + self.backup_location = backup_location + + #: passphrase protecting backup integrity and optionally decryption + self.passphrase = passphrase + + #: temporary directory used to extract the data before moving to the + # final location + self.tmpdir = tempfile.mkdtemp(prefix="restore", dir="/var/tmp") + + #: list of processes (Popen objects) to kill on cancel + self.processes_to_kill_on_cancel = [] + + #: is the backup operation canceled + self.canceled = False + + #: report restore progress, called with one argument - percents of + # data restored + # FIXME: convert to float [0,1] + self.progress_callback = None + + self.log = logging.getLogger('qubesadmin.backup') + + #: basic information about the backup + self.header_data = self._retrieve_backup_header() + + #: VMs included in the backup + self.backup_app = self._process_qubes_xml() + + def _start_retrieval_process(self, filelist, limit_count, limit_bytes): + """Retrieve backup stream and extract it to :py:attr:`tmpdir` + + :param filelist: list of files to extract; listing directory name + will extract the whole directory; use empty list to extract the whole + archive + :param limit_count: maximum number of files to extract + :param limit_bytes: maximum size of extracted data + :return: a touple of (Popen object of started process, file-like + object for reading extracted files list, file-like object for reading + errors) + """ + + vmproc = None + if self.backup_vm is not None: + # If APPVM, STDOUT is a PIPE + vmproc = self.backup_vm.run_service('qubes.Restore') + vmproc.stdin.write( + (self.backup_location.replace("\r", "").replace("\n", + "") + "\n").encode()) + vmproc.stdin.flush() + + # Send to tar2qfile the VMs that should be extracted + vmproc.stdin.write((" ".join(filelist) + "\n").encode()) + vmproc.stdin.flush() + self.processes_to_kill_on_cancel.append(vmproc) + + backup_stdin = vmproc.stdout + tar1_command = ['/usr/libexec/qubes/qfile-dom0-unpacker', + str(os.getuid()), self.tmpdir, '-v'] + else: + backup_stdin = open(self.backup_location, 'rb') + + tar1_command = ['tar', + '-ixv', + '-C', self.tmpdir] + filelist + + tar1_env = os.environ.copy() + tar1_env['UPDATES_MAX_BYTES'] = str(limit_bytes) + tar1_env['UPDATES_MAX_FILES'] = str(limit_count) + self.log.debug("Run command" + str(tar1_command)) + command = subprocess.Popen( + tar1_command, + stdin=backup_stdin, + stdout=vmproc.stdin if vmproc else subprocess.PIPE, + stderr=subprocess.PIPE, + env=tar1_env) + backup_stdin.close() + self.processes_to_kill_on_cancel.append(command) + + # qfile-dom0-unpacker output filelist on stderr + # and have stdout connected to the VM), while tar output filelist + # on stdout + if self.backup_vm: + filelist_pipe = command.stderr + # let qfile-dom0-unpacker hold the only open FD to the write end of + # pipe, otherwise qrexec-client will not receive EOF when + # qfile-dom0-unpacker terminates + vmproc.stdin.close() + else: + filelist_pipe = command.stdout + + if self.backup_vm: + error_pipe = vmproc.stderr + else: + error_pipe = command.stderr + return command, filelist_pipe, error_pipe + + def _verify_hmac(self, filename, hmacfile, algorithm=None): + '''Verify hmac of a file using given algorithm. + + If algorithm is not specified, use the one from backup header ( + :py:attr:`header_data`). + + Raise :py:exc:`QubesException` on failure, return :py:obj:`True` on + success. + + 'scrypt' algorithm is supported only for header file; hmac file is + encrypted (and integrity protected) version of plain header. + + :param filename: path to file to be verified + :param hmacfile: path to hmac file for *filename* + :param algorithm: override algorithm + ''' + def load_hmac(hmac_text): + '''Parse hmac output by openssl. + + Return just hmac, without filename and other metadata. + ''' + if any(ord(x) not in range(128) for x in hmac_text): + raise QubesException( + "Invalid content of {}".format(hmacfile)) + hmac_text = hmac_text.strip().split("=") + if len(hmac_text) > 1: + hmac_text = hmac_text[1].strip() + else: + raise QubesException( + "ERROR: invalid hmac file content") + + return hmac_text + if algorithm is None: + algorithm = self.header_data.hmac_algorithm + passphrase = self.passphrase.encode('utf-8') + self.log.debug("Verifying file %s", filename) + + if os.stat(os.path.join(self.tmpdir, hmacfile)).st_size > \ + HMAC_MAX_SIZE: + raise QubesException('HMAC file {} too large'.format( + hmacfile)) + + if hmacfile != filename + ".hmac": + raise QubesException( + "ERROR: expected hmac for {}, but got {}". + format(filename, hmacfile)) + + if algorithm == 'scrypt': + # in case of 'scrypt' _verify_hmac is only used for backup header + assert filename == HEADER_FILENAME + self._verify_and_decrypt(hmacfile, HEADER_FILENAME + '.dec') + f_name = os.path.join(self.tmpdir, filename) + with open(f_name, 'rb') as f_one: + with open(f_name + '.dec', 'rb') as f_two: + if f_one.read() != f_two.read(): + raise QubesException( + 'Invalid hmac on {}'.format(filename)) + else: + return True + + with open(os.path.join(self.tmpdir, filename), 'rb') as f_input: + hmac_proc = subprocess.Popen( + ["openssl", "dgst", "-" + algorithm, "-hmac", passphrase], + stdin=f_input, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + hmac_stdout, hmac_stderr = hmac_proc.communicate() + + if hmac_stderr: + raise QubesException( + "ERROR: verify file {0}: {1}".format(filename, hmac_stderr)) + else: + self.log.debug("Loading hmac for file %s", filename) + with open(os.path.join(self.tmpdir, hmacfile), 'r', + encoding='ascii') as f_hmac: + hmac = load_hmac(f_hmac.read()) + + if hmac and load_hmac(hmac_stdout.decode('ascii')) == hmac: + os.unlink(os.path.join(self.tmpdir, hmacfile)) + self.log.debug( + "File verification OK -> Sending file %s", filename) + return True + else: + raise QubesException( + "ERROR: invalid hmac for file {0}: {1}. " + "Is the passphrase correct?". + format(filename, load_hmac(hmac_stdout.decode('ascii')))) + + def _verify_and_decrypt(self, filename, output=None): + '''Handle scrypt-wrapped file + + Decrypt the file, and verify its integrity - both tasks handled by + 'scrypt' tool. Filename (without extension) is also validated. + + :param filename: Input file name (relative to :py:attr:`tmpdir`), + needs to have `.enc` or `.hmac` extension + :param output: Output file name (relative to :py:attr:`tmpdir`), + use :py:obj:`None` to use *filename* without extension + :return: *filename* without extension + ''' + assert filename.endswith('.enc') or filename.endswith('.hmac') + fullname = os.path.join(self.tmpdir, filename) + (origname, _) = os.path.splitext(filename) + if output: + fulloutput = os.path.join(self.tmpdir, output) + else: + fulloutput = os.path.join(self.tmpdir, origname) + if origname == HEADER_FILENAME: + passphrase = u'{filename}!{passphrase}'.format( + filename=origname, + passphrase=self.passphrase) + else: + passphrase = u'{backup_id}!{filename}!{passphrase}'.format( + backup_id=self.header_data.backup_id, + filename=origname, + passphrase=self.passphrase) + try: + p = launch_scrypt('dec', fullname, fulloutput, passphrase) + except OSError as err: + raise QubesException('failed to decrypt {}: {!s}'.format( + fullname, err)) + (_, stderr) = p.communicate() + if hasattr(p, 'pty'): + p.pty.close() + if p.returncode != 0: + os.unlink(fulloutput) + raise QubesException('failed to decrypt {}: {}'.format( + fullname, stderr)) + # encrypted file is no longer needed + os.unlink(fullname) + return origname + + def _retrieve_backup_header_files(self, files, allow_none=False): + '''Retrieve backup header. + + Start retrieval process (possibly involving network access from + another VM). Returns a collection of retrieved file paths. + ''' + (retrieve_proc, filelist_pipe, error_pipe) = \ + self._start_retrieval_process( + files, len(files), 1024 * 1024) + filelist = filelist_pipe.read() + filelist_pipe.close() + retrieve_proc_returncode = retrieve_proc.wait() + if retrieve_proc in self.processes_to_kill_on_cancel: + self.processes_to_kill_on_cancel.remove(retrieve_proc) + extract_stderr = error_pipe.read(MAX_STDERR_BYTES) + error_pipe.close() + + # wait for other processes (if any) + for proc in self.processes_to_kill_on_cancel: + if proc.wait() != 0: + raise QubesException( + "Backup header retrieval failed (exit code {})".format( + proc.wait()) + ) + + if retrieve_proc_returncode != 0: + if not filelist and 'Not found in archive' in extract_stderr: + if allow_none: + return None + else: + raise QubesException( + "unable to read the qubes backup file {0} ({1}): {2}". + format( + self.backup_location, + retrieve_proc.wait(), + extract_stderr + )) + actual_files = filelist.decode('ascii').splitlines() + if sorted(actual_files) != sorted(files): + raise QubesException( + 'unexpected files in archive: got {!r}, expected {!r}'.format( + actual_files, files + )) + for fname in files: + if not os.path.exists(os.path.join(self.tmpdir, fname)): + if allow_none: + return None + else: + raise QubesException( + 'Unable to retrieve file {} from backup {}: {}'.format( + fname, self.backup_location, extract_stderr + ) + ) + return files + + def _retrieve_backup_header(self): + """Retrieve backup header and qubes.xml. Only backup header is + analyzed, qubes.xml is left as-is + (not even verified/decrypted/uncompressed) + + :return header_data + :rtype :py:class:`BackupHeader` + """ + + if not self.backup_vm and os.path.exists( + os.path.join(self.backup_location, 'qubes.xml')): + # backup format version 1 doesn't have header + header_data = BackupHeader() + header_data.version = 1 + return header_data + + header_files = self._retrieve_backup_header_files( + ['backup-header', 'backup-header.hmac'], allow_none=True) + + if not header_files: + # R2-Beta3 didn't have backup header, so if none is found, + # assume it's version=2 and use values present at that time + header_data = BackupHeader( + version=2, + # place explicitly this value, because it is what format_version + # 2 have + hmac_algorithm='SHA1', + crypto_algorithm='aes-256-cbc', + # TODO: set encrypted to something... + ) + else: + filename = HEADER_FILENAME + hmacfile = HEADER_FILENAME + '.hmac' + self.log.debug("Got backup header and hmac: %s, %s", + filename, hmacfile) + + file_ok = False + hmac_algorithm = DEFAULT_HMAC_ALGORITHM + for hmac_algo in get_supported_hmac_algo(hmac_algorithm): + try: + if self._verify_hmac(filename, hmacfile, hmac_algo): + file_ok = True + break + except QubesException as err: + self.log.debug( + 'Failed to verify %s using %s: %r', + hmacfile, hmac_algo, err) + # Ignore exception here, try the next algo + if not file_ok: + raise QubesException( + "Corrupted backup header (hmac verification " + "failed). Is the password correct?") + filename = os.path.join(self.tmpdir, filename) + with open(filename, 'rb') as f_header: + header_data = BackupHeader(f_header.read()) + os.unlink(filename) + + return header_data + + def _start_inner_extraction_worker(self, queue, handlers): + """Start a worker process, extracting inner layer of bacup archive, + extract them to :py:attr:`tmpdir`. + End the data by pushing QUEUE_FINISHED or QUEUE_ERROR to the queue. + + :param queue :py:class:`Queue` object to handle files from + """ + + # Setup worker to extract encrypted data chunks to the restore dirs + # Create the process here to pass it options extracted from + # backup header + extractor_params = { + 'queue': queue, + 'base_dir': self.tmpdir, + 'passphrase': self.passphrase, + 'encrypted': self.header_data.encrypted, + 'compressed': self.header_data.compressed, + 'crypto_algorithm': self.header_data.crypto_algorithm, + 'verify_only': self.options.verify_only, + 'progress_callback': self.progress_callback, + 'handlers': handlers, + } + self.log.debug( + 'Starting extraction worker in %s, file handlers map: %s', + self.tmpdir, repr(handlers)) + format_version = self.header_data.version + if format_version in [3, 4]: + extractor_params['compression_filter'] = \ + self.header_data.compression_filter + if format_version == 4: + # encryption already handled + extractor_params['encrypted'] = False + extract_proc = ExtractWorker3(**extractor_params) + else: + raise NotImplementedError( + "Backup format version %d not supported" % format_version) + extract_proc.start() + return extract_proc + + @staticmethod + def _save_qubes_xml(path, stream): + '''Handler for qubes.xml.000 content - just save the data to a file''' + with open(path, 'wb') as f_qubesxml: + f_qubesxml.write(stream.read()) + + def _process_qubes_xml(self): + """Verify, unpack and load qubes.xml. Possibly convert its format if + necessary. It expect that :py:attr:`header_data` is already populated, + and :py:meth:`retrieve_backup_header` was called. + """ + if self.header_data.version == 1: + raise NotImplementedError('Backup format version 1 not supported') + elif self.header_data.version in [2, 3]: + self._retrieve_backup_header_files( + ['qubes.xml.000', 'qubes.xml.000.hmac']) + self._verify_hmac("qubes.xml.000", "qubes.xml.000.hmac") + else: + self._retrieve_backup_header_files(['qubes.xml.000.enc']) + self._verify_and_decrypt('qubes.xml.000.enc') + + queue = Queue() + queue.put("qubes.xml.000") + queue.put(QUEUE_FINISHED) + + qubes_xml_path = os.path.join(self.tmpdir, 'qubes-restored.xml') + handlers = { + 'qubes.xml': ( + functools.partial(self._save_qubes_xml, qubes_xml_path), + None) + } + extract_proc = self._start_inner_extraction_worker(queue, handlers) + extract_proc.join() + if extract_proc.exitcode != 0: + raise QubesException( + "unable to extract the qubes backup. " + "Check extracting process errors.") + + if self.header_data.version in [2, 3]: + from qubesadmin.backup.core2 import Core2Qubes + backup_app = Core2Qubes(qubes_xml_path) + elif self.header_data.version in [4]: + from qubesadmin.backup.core3 import Core3Qubes + backup_app = Core3Qubes(qubes_xml_path) + else: + raise QubesException( + 'Unsupported qubes.xml format version: {}'.format( + self.header_data.version)) + # Not needed anymore - all the data stored in backup_app + os.unlink(qubes_xml_path) + return backup_app + + def _restore_vm_data(self, vms_dirs, vms_size, handlers): + '''Restore data of VMs + + :param vms_dirs: list of directories to extract (skip others) + :param vms_size: expected size (abort if source stream exceed this + value) + :param handlers: handlers for restored files - see + :py:class:`ExtractWorker3` for details + ''' + # Currently each VM consists of at most 7 archives (count + # file_to_backup calls in backup_prepare()), but add some safety + # margin for further extensions. Each archive is divided into 100MB + # chunks. Additionally each file have own hmac file. So assume upper + # limit as 2*(10*COUNT_OF_VMS+TOTAL_SIZE/100MB) + limit_count = str(2 * (10 * len(vms_dirs) + + int(vms_size / (100 * 1024 * 1024)))) + + self.log.debug("Working in temporary dir: %s", self.tmpdir) + self.log.info("Extracting data: %s to restore", size_to_human(vms_size)) + + # retrieve backup from the backup stream (either VM, or dom0 file) + (retrieve_proc, filelist_pipe, error_pipe) = \ + self._start_retrieval_process( + vms_dirs, limit_count, vms_size) + + to_extract = Queue() + + # extract data retrieved by retrieve_proc + extract_proc = self._start_inner_extraction_worker( + to_extract, handlers) + + try: + filename = None + hmacfile = None + nextfile = None + while True: + if self.canceled: + break + if not extract_proc.is_alive(): + retrieve_proc.terminate() + retrieve_proc.wait() + if retrieve_proc in self.processes_to_kill_on_cancel: + self.processes_to_kill_on_cancel.remove(retrieve_proc) + # wait for other processes (if any) + for proc in self.processes_to_kill_on_cancel: + proc.wait() + break + if nextfile is not None: + filename = nextfile + else: + filename = filelist_pipe.readline().decode('ascii').strip() + + self.log.debug("Getting new file: %s", filename) + + if not filename or filename == "EOF": + break + + # if reading archive directly with tar, wait for next filename - + # tar prints filename before processing it, so wait for + # the next one to be sure that whole file was extracted + if not self.backup_vm: + nextfile = filelist_pipe.readline().decode('ascii').strip() + + if self.header_data.version in [2, 3]: + if not self.backup_vm: + hmacfile = nextfile + nextfile = filelist_pipe.readline().\ + decode('ascii').strip() + else: + hmacfile = filelist_pipe.readline().\ + decode('ascii').strip() + + if self.canceled: + break + + self.log.debug("Getting hmac: %s", hmacfile) + if not hmacfile or hmacfile == "EOF": + # Premature end of archive, either of tar1_command or + # vmproc exited with error + break + else: # self.header_data.version == 4 + if not filename.endswith('.enc'): + raise qubesadmin.exc.QubesException( + 'Invalid file extension found in archive: {}'. + format(filename)) + + if not any(filename.startswith(x) for x in vms_dirs): + self.log.debug("Ignoring VM not selected for restore") + os.unlink(os.path.join(self.tmpdir, filename)) + if hmacfile: + os.unlink(os.path.join(self.tmpdir, hmacfile)) + continue + + if self.header_data.version in [2, 3]: + self._verify_hmac(filename, hmacfile) + else: + # _verify_and_decrypt will write output to a file with + # '.enc' extension cut off. This is safe because: + # - `scrypt` tool will override output, so if the file was + # already there (received from the VM), it will be removed + # - incoming archive extraction will refuse to override + # existing file, so if `scrypt` already created one, + # it can not be manipulated by the VM + # - when the file is retrieved from the VM, it appears at + # the final form - if it's visible, VM have no longer + # influence over its content + # + # This all means that if the file was correctly verified + # + decrypted, we will surely access the right file + filename = self._verify_and_decrypt(filename) + to_extract.put(os.path.join(self.tmpdir, filename)) + + if self.canceled: + raise BackupCanceledError("Restore canceled", + tmpdir=self.tmpdir) + + if retrieve_proc.wait() != 0: + raise QubesException( + "unable to read the qubes backup file {0}: {1}" + .format(self.backup_location, error_pipe.read( + MAX_STDERR_BYTES))) + # wait for other processes (if any) + for proc in self.processes_to_kill_on_cancel: + proc.wait() + if proc.returncode != 0: + raise QubesException( + "Backup completed, " + "but VM sending it reported an error (exit code {})". + format(proc.returncode)) + + if filename and filename != "EOF": + raise QubesException( + "Premature end of archive, the last file was %s" % filename) + except: + to_extract.put(QUEUE_ERROR) + extract_proc.join() + raise + else: + to_extract.put(QUEUE_FINISHED) + finally: + error_pipe.close() + filelist_pipe.close() + + self.log.debug("Waiting for the extraction process to finish...") + extract_proc.join() + self.log.debug("Extraction process finished with code: %s", + extract_proc.exitcode) + if extract_proc.exitcode != 0: + raise QubesException( + "unable to extract the qubes backup. " + "Check extracting process errors.") + + def new_name_for_conflicting_vm(self, orig_name, restore_info): + '''Generate new name for conflicting VM + + Add a number suffix, until the name is unique. If no unique name can + be found using this strategy, return :py:obj:`None` + ''' + number = 1 + if len(orig_name) > 29: + orig_name = orig_name[0:29] + new_name = orig_name + while (new_name in restore_info.keys() or + new_name in [x.name for x in restore_info.values()] or + new_name in self.app.domains): + new_name = str('{}{}'.format(orig_name, number)) + number += 1 + if number == 100: + # give up + return None + return new_name + + def restore_info_verify(self, restore_info): + '''Verify restore info - validate VM dependencies, name conflicts + etc. + ''' + for vm in restore_info.keys(): + if vm in ['dom0']: + continue + + vm_info = restore_info[vm] + assert isinstance(vm_info, self.VMToRestore) + + vm_info.problems.clear() + if vm in self.options.exclude: + vm_info.problems.add(self.VMToRestore.EXCLUDED) + + if not self.options.verify_only and \ + vm_info.name in self.app.domains: + if self.options.rename_conflicting: + new_name = self.new_name_for_conflicting_vm( + vm, restore_info + ) + if new_name is not None: + vm_info.name = new_name + else: + vm_info.problems.add(self.VMToRestore.ALREADY_EXISTS) + else: + vm_info.problems.add(self.VMToRestore.ALREADY_EXISTS) + + # check template + if vm_info.template: + template_name = vm_info.template + try: + host_template = self.app.domains[template_name] + except KeyError: + host_template = None + present_on_host = (host_template and + isinstance(host_template, qubesadmin.vm.TemplateVM)) + present_in_backup = (template_name in restore_info.keys() and + restore_info[template_name].good_to_go and + restore_info[template_name].vm.klass == + 'TemplateVM') + if not present_on_host and not present_in_backup: + if self.options.use_default_template and \ + self.app.default_template: + if vm_info.orig_template is None: + vm_info.orig_template = template_name + vm_info.template = self.app.default_template.name + else: + vm_info.problems.add( + self.VMToRestore.MISSING_TEMPLATE) + + # check netvm + if vm_info.vm.properties.get('netvm', None) is not None: + netvm_name = vm_info.netvm + + try: + netvm_on_host = self.app.domains[netvm_name] + except KeyError: + netvm_on_host = None + + present_on_host = (netvm_on_host is not None + and netvm_on_host.provides_network) + present_in_backup = (netvm_name in restore_info.keys() and + restore_info[netvm_name].good_to_go and + restore_info[netvm_name].vm.properties.get( + 'provides_network', False)) + if not present_on_host and not present_in_backup: + if self.options.use_default_netvm: + del vm_info.vm.properties['netvm'] + elif self.options.use_none_netvm: + vm_info.netvm = None + else: + vm_info.problems.add(self.VMToRestore.MISSING_NETVM) + + return restore_info + + def get_restore_info(self): + '''Get restore info + + Return information about what is included in the backup. + That dictionary can be adjusted to select what VM should be restore. + ''' + # Format versions: + # 1 - Qubes R1, Qubes R2 beta1, beta2 + # 2 - Qubes R2 beta3+ + # 3 - Qubes R2+ + # 4 - Qubes R4+ + + vms_to_restore = {} + + for vm in self.backup_app.domains.values(): + if vm.klass == 'AdminVM': + # Handle dom0 as special case later + continue + if vm.included_in_backup: + self.log.debug("%s is included in backup", vm.name) + + vms_to_restore[vm.name] = self.VMToRestore(vm) + + if vm.template is not None: + templatevm_name = vm.template + vms_to_restore[vm.name].template = templatevm_name + + vms_to_restore = self.restore_info_verify(vms_to_restore) + + # ...and dom0 home + if self.options.dom0_home and \ + self.backup_app.domains['dom0'].included_in_backup: + vm = self.backup_app.domains['dom0'] + vms_to_restore['dom0'] = self.Dom0ToRestore(vm) + local_user = grp.getgrnam('qubes').gr_mem[0] + + if vms_to_restore['dom0'].username != local_user: + if not self.options.ignore_username_mismatch: + vms_to_restore['dom0'].problems.add( + self.Dom0ToRestore.USERNAME_MISMATCH) + + return vms_to_restore + + @staticmethod + def get_restore_summary(restore_info): + '''Return a ASCII formatted table with restore info summary''' + fields = { + "name": {'func': lambda vm: vm.name}, + + "type": {'func': lambda vm: vm.klass}, + + "template": {'func': lambda vm: + 'n/a' if vm.template is None else vm.template}, + + "netvm": {'func': lambda vm: + '(default)' if 'netvm' not in vm.properties else + '-' if vm.properties['netvm'] is None else + vm.properties['netvm']}, + + "label": {'func': lambda vm: vm.label}, + } + + fields_to_display = ['name', 'type', 'template', + 'netvm', 'label'] + + # First calculate the maximum width of each field we want to display + total_width = 0 + for field in fields_to_display: + fields[field]['max_width'] = len(field) + for vm_info in restore_info.values(): + if vm_info.vm: + # noinspection PyUnusedLocal + field_len = len(str(fields[field]["func"](vm_info.vm))) + if field_len > fields[field]['max_width']: + fields[field]['max_width'] = field_len + total_width += fields[field]['max_width'] + + summary = "" + summary += "The following VMs are included in the backup:\n" + summary += "\n" + + # Display the header + for field in fields_to_display: + # noinspection PyTypeChecker + fmt = "{{0:-^{0}}}-+".format(fields[field]["max_width"] + 1) + summary += fmt.format('-') + summary += "\n" + for field in fields_to_display: + # noinspection PyTypeChecker + fmt = "{{0:>{0}}} |".format(fields[field]["max_width"] + 1) + summary += fmt.format(field) + summary += "\n" + for field in fields_to_display: + # noinspection PyTypeChecker + fmt = "{{0:-^{0}}}-+".format(fields[field]["max_width"] + 1) + summary += fmt.format('-') + summary += "\n" + + for vm_info in restore_info.values(): + assert isinstance(vm_info, BackupRestore.VMToRestore) + # Skip non-VM here + if not vm_info.vm: + continue + # noinspection PyUnusedLocal + summary_line = "" + for field in fields_to_display: + # noinspection PyTypeChecker + fmt = "{{0:>{0}}} |".format(fields[field]["max_width"] + 1) + summary_line += fmt.format(fields[field]["func"](vm_info.vm)) + + if BackupRestore.VMToRestore.EXCLUDED in vm_info.problems: + summary_line += " <-- Excluded from restore" + elif BackupRestore.VMToRestore.ALREADY_EXISTS in vm_info.problems: + summary_line += \ + " <-- A VM with the same name already exists on the host!" + elif BackupRestore.VMToRestore.MISSING_TEMPLATE in \ + vm_info.problems: + summary_line += " <-- No matching template on the host " \ + "or in the backup found!" + elif BackupRestore.VMToRestore.MISSING_NETVM in \ + vm_info.problems: + summary_line += " <-- No matching netvm on the host " \ + "or in the backup found!" + else: + if vm_info.template != vm_info.vm.template: + summary_line += " <-- Template change to '{}'".format( + vm_info.template) + if vm_info.name != vm_info.vm.name: + summary_line += " <-- Will be renamed to '{}'".format( + vm_info.name) + + summary += summary_line + "\n" + + if 'dom0' in restore_info.keys(): + summary_line = "" + for field in fields_to_display: + # noinspection PyTypeChecker + fmt = "{{0:>{0}}} |".format(fields[field]["max_width"] + 1) + if field == "name": + summary_line += fmt.format("Dom0") + elif field == "type": + summary_line += fmt.format("Home") + else: + summary_line += fmt.format("") + if BackupRestore.Dom0ToRestore.USERNAME_MISMATCH in \ + restore_info['dom0'].problems: + summary_line += " <-- username in backup and dom0 mismatch" + + summary += summary_line + "\n" + + return summary + + @staticmethod + def _templates_first(vms): + '''Sort templates befor other VM types (AppVM etc)''' + def key_function(instance): + '''Key function for :py:func:`sorted`''' + if isinstance(instance, BackupVM): + return instance.klass == 'TemplateVM' + elif hasattr(instance, 'vm'): + return key_function(instance.vm) + return 0 + return sorted(vms, + key=key_function, + reverse=True) + + + def _handle_dom0(self, backup_path): + '''Extract dom0 home''' + local_user = grp.getgrnam('qubes').gr_mem[0] + home_dir = pwd.getpwnam(local_user).pw_dir + backup_dom0_home_dir = os.path.join(self.tmpdir, backup_path) + restore_home_backupdir = "home-pre-restore-{0}".format( + time.strftime("%Y-%m-%d-%H%M%S")) + + self.log.info("Restoring home of user '%s'...", local_user) + self.log.info("Existing files/dirs backed up in '%s' dir", + restore_home_backupdir) + os.mkdir(home_dir + '/' + restore_home_backupdir) + for f_name in os.listdir(backup_dom0_home_dir): + home_file = home_dir + '/' + f_name + if os.path.exists(home_file): + os.rename(home_file, + home_dir + '/' + restore_home_backupdir + '/' + f_name) + if self.header_data.version == 1: + subprocess.call( + ["cp", "-nrp", "--reflink=auto", + backup_dom0_home_dir + '/' + f_name, home_file]) + elif self.header_data.version >= 2: + shutil.move(backup_dom0_home_dir + '/' + f_name, home_file) + retcode = subprocess.call(['sudo', 'chown', '-R', + local_user, home_dir]) + if retcode != 0: + self.log.error("*** Error while setting home directory owner") + + def restore_do(self, restore_info): + ''' + + High level workflow: + 1. Create VMs object in host collection (qubes.xml) + 2. Create them on disk (vm.create_on_disk) + 3. Restore VM data, overriding/converting VM files + 4. Apply possible fixups and save qubes.xml + + :param restore_info: + :return: + ''' + + if self.header_data.version == 1: + raise NotImplementedError('Backup format version 1 not supported') + + restore_info = self.restore_info_verify(restore_info) + + self._restore_vms_metadata(restore_info) + + # Perform VM restoration in backup order + vms_dirs = [] + handlers = {} + vms_size = 0 + for vm_info in self._templates_first(restore_info.values()): + vm = vm_info.restored_vm + if vm and vm_info.subdir: + vms_size += int(vm_info.size) + vms_dirs.append(vm_info.subdir) + for name, volume in vm.volumes.items(): + if not volume.save_on_stop: + continue + data_func = volume.import_data + size_func = volume.resize + handlers[os.path.join(vm_info.subdir, name + '.img')] = \ + (data_func, size_func) + # TODO applications whitelist + # TODO firewall + + if 'dom0' in restore_info.keys() and \ + restore_info['dom0'].good_to_go: + vms_dirs.append(os.path.dirname(restore_info['dom0'].subdir)) + vms_size += restore_info['dom0'].size + handlers[restore_info['dom0'].subdir] = (self._handle_dom0, None) + try: + self._restore_vm_data(vms_dirs=vms_dirs, vms_size=vms_size, + handlers=handlers) + except QubesException: + if self.options.verify_only: + raise + else: + self.log.warning( + "Some errors occurred during data extraction, " + "continuing anyway to restore at least some " + "VMs") + + if self.options.verify_only: + shutil.rmtree(self.tmpdir) + return + + if self.canceled: + raise BackupCanceledError("Restore canceled", + tmpdir=self.tmpdir) + + shutil.rmtree(self.tmpdir) + self.log.info("-> Done. Please install updates for all the restored " + "templates.") + + def _restore_vms_metadata(self, restore_info): + '''Restore VM metadata + + Create VMs, set their properties etc. + ''' + vms = {} + for vm_info in restore_info.values(): + assert isinstance(vm_info, self.VMToRestore) + if not vm_info.vm: + continue + if not vm_info.good_to_go: + continue + vm = vm_info.vm + vms[vm.name] = vm + + # First load templates, then other VMs + for vm in self._templates_first(vms.values()): + if self.canceled: + return + self.log.info("-> Restoring %s...", vm.name) + kwargs = {} + if vm.template: + template = restore_info[vm.name].template + # handle potentially renamed template + if template in restore_info \ + and restore_info[template].good_to_go: + template = restore_info[template].name + kwargs['template'] = template + + new_vm = None + vm_name = restore_info[vm.name].name + + try: + # first only create VMs, later setting may require other VMs + # be already created + new_vm = self.app.add_new_vm( + vm.klass, + name=vm_name, + label=vm.label, + pool=self.options.override_pool, + **kwargs) + except Exception: # pylint: disable=broad-except + self.log.exception('Error restoring VM %s, skipping', vm.name) + if new_vm: + del self.app.domains[new_vm.name] + continue + + restore_info[vm.name].restored_vm = new_vm + + for vm in vms.values(): + if self.canceled: + return + + new_vm = restore_info[vm.name].restored_vm + if not new_vm: + # skipped/failed + continue + + for prop, value in vm.properties.items(): + # exclude VM references - handled manually according to + # restore options + if prop in ['template', 'netvm', 'default_dispvm']: + continue + try: + setattr(new_vm, prop, value) + except Exception: # pylint: disable=broad-except + self.log.exception('Error setting %s.%s to %s', + vm.name, prop, value) + + for feature, value in vm.features.items(): + try: + new_vm.features[feature] = value + except Exception: # pylint: disable=broad-except + self.log.exception('Error setting %s.features[%s] to %s', + vm.name, feature, value) + + for tag in vm.tags: + try: + new_vm.tags.add(tag) + except Exception: # pylint: disable=broad-except + self.log.exception('Error adding tag %s to %s', + tag, vm.name) + + for bus in vm.devices: + for backend_domain, ident in vm.devices[bus]: + options = vm.devices[bus][(backend_domain, ident)] + assignment = DeviceAssignment( + backend_domain=backend_domain, + ident=ident, + options=options, + persistent=True) + try: + new_vm.devices[bus].attach(assignment) + except Exception: # pylint: disable=broad-except + self.log.exception('Error attaching device %s:%s to %s', + bus, ident, vm.name) + + # Set VM dependencies - only non-default setting + for vm in vms.values(): + vm_info = restore_info[vm.name] + vm_name = vm_info.name + try: + host_vm = self.app.domains[vm_name] + except KeyError: + # Failed/skipped VM + continue + + if 'netvm' in vm.properties: + if vm_info.netvm in restore_info: + value = restore_info[vm_info.netvm].name + else: + value = vm_info.netvm + + try: + host_vm.netvm = value + except Exception: # pylint: disable=broad-except + self.log.exception('Error setting %s.%s to %s', + vm.name, 'netvm', value) + + if 'default_dispvm' in vm.properties: + if vm.properties['default_dispvm'] in restore_info: + value = restore_info[vm.properties[ + 'default_dispvm']].name + else: + value = vm.properties['default_dispvm'] + + try: + host_vm.default_dispvm = value + except Exception: # pylint: disable=broad-except + self.log.exception('Error setting %s.%s to %s', + vm.name, 'default_dispvm', value) diff --git a/qubesadmin/backup/core2.py b/qubesadmin/backup/core2.py new file mode 100644 index 0000000..f21a717 --- /dev/null +++ b/qubesadmin/backup/core2.py @@ -0,0 +1,290 @@ +# -*- encoding: utf8 -*- +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2017 Marek Marczykowski-Górecki +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with this program; if not, see . + +'''Parser for core2 qubes.xml''' + +import ast +import xml.parsers +import logging +import lxml.etree + +import qubesadmin.backup + +service_to_feature = { + 'ntpd': 'service.ntpd', + 'qubes-update-check': 'check-updates', + 'meminfo-writer': 'services.meminfo-writer', +} + +class Core2VM(qubesadmin.backup.BackupVM): + '''VM object''' + # pylint: disable=too-few-public-methods + def __init__(self): + super(Core2VM, self).__init__() + self.backup_content = False + + @property + def included_in_backup(self): + return self.backup_content + +class Core2Qubes(qubesadmin.backup.BackupApp): + '''Parsed qubes.xml''' + def __init__(self, store=None): + if store is None: + raise ValueError("store path required") + self.qid_map = {} + self.log = logging.getLogger('qubesadmin.backup.core2') + super(Core2Qubes, self).__init__(store) + + def load_globals(self, element): + '''Load global settings + + :param element: XML element containing global settings (root node) + ''' + default_netvm = element.get("default_netvm") + if default_netvm is not None: + self.globals['default_netvm'] = self.qid_map[int(default_netvm)] \ + if default_netvm != "None" else None + + # default_fw_netvm = element.get("default_fw_netvm") + # if default_fw_netvm is not None: + # self.globals['default_fw_netvm'] = \ + # self.qid_map[int(default_fw_netvm)] \ + # if default_fw_netvm != "None" else None + + updatevm = element.get("updatevm") + if updatevm is not None: + self.globals['updatevm'] = self.qid_map[int(updatevm)] \ + if updatevm != "None" else None + + clockvm = element.get("clockvm") + if clockvm is not None: + self.globals['clockvm'] = self.qid_map[int(clockvm)] \ + if clockvm != "None" else None + + default_template = element.get("default_template") + self.globals['default_template'] = self.qid_map[int(default_template)] \ + if default_template.lower() != "none" else None + + + def set_netvm_dependency(self, element): + '''Set dependencies between VMs''' + kwargs = {} + attr_list = ("name", "uses_default_netvm", "netvm_qid") + + for attribute in attr_list: + kwargs[attribute] = element.get(attribute) + + vm = self.domains[kwargs["name"]] + + # netvm property + if element.get("uses_default_netvm") is None: + uses_default_netvm = True + else: + uses_default_netvm = ( + True if element.get("uses_default_netvm") == "True" else False) + if not uses_default_netvm: + netvm_qid = element.get("netvm_qid") + if netvm_qid is None or netvm_qid == "none": + vm.properties['netvm'] = None + else: + vm.properties['netvm'] = self.qid_map[int(netvm_qid)] + + # And DispVM netvm, translated to default_dispvm + if element.get("uses_default_dispvm_netvm") is None: + uses_default_dispvm_netvm = True + else: + uses_default_dispvm_netvm = ( + True if element.get("uses_default_dispvm_netvm") == "True" + else False) + if not uses_default_dispvm_netvm: + dispvm_netvm_qid = element.get("dispvm_netvm_qid") + if dispvm_netvm_qid is None or dispvm_netvm_qid == "none": + dispvm_netvm = None + else: + dispvm_netvm = self.qid_map[int(dispvm_netvm_qid)] + else: + dispvm_netvm = vm.properties.get('netvm', self.globals[ + 'default_netvm']) + + if dispvm_netvm != self.globals['default_netvm']: + if dispvm_netvm: + dispvm_tpl_name = 'disp-{}'.format(dispvm_netvm) + else: + dispvm_tpl_name = 'disp-no-netvm' + + vm.properties['default_dispvm'] = dispvm_tpl_name + + if dispvm_tpl_name not in self.domains: + vm = Core2VM() + vm.name = dispvm_tpl_name + vm.label = 'red' + vm.properties['netvm'] = dispvm_netvm + vm.properties['dispvm_allowed'] = True + vm.backup_content = True + vm.backup_path = None + self.domains[vm.name] = vm + # TODO: add support for #2075 + # TODO: set qrexec policy based on dispvm_netvm value + + def import_core2_vm(self, element): + '''Parse a single VM from given XML node + + This method load only VM properties not depending on other VMs + (other than template). VM connections are set later. + :param element: XML node + ''' + vm_class_name = element.tag + vm = Core2VM() + vm.name = element.get('name') + vm.label = element.get('label', 'red') + self.domains[vm.name] = vm + kwargs = {} + if vm_class_name in ["QubesTemplateVm", "QubesTemplateHVm"]: + vm.klass = "TemplateVM" + elif element.get('template_qid').lower() == "none": + kwargs['dir_path'] = element.get('dir_path') + vm.klass = "StandaloneVM" + else: + kwargs['dir_path'] = element.get('dir_path') + vm.template = \ + self.qid_map[int(element.get('template_qid'))] + vm.klass = "AppVM" + # simple attributes + for attr, default in { + #'installed_by_rpm': 'False', + 'include_in_backups': 'True', + 'qrexec_timeout': '60', + 'vcpus': '2', + 'memory': '400', + 'maxmem': '4000', + 'default_user': 'user', + 'debug': 'False', + 'mac': None, + 'autostart': 'False'}.items(): + value = element.get(attr) + if value and value != default: + vm.properties[attr] = value + # attributes with default value + for attr in ["kernel", "kernelopts"]: + value = element.get(attr) + if value and value.lower() == "none": + value = None + value_is_default = element.get( + "uses_default_{}".format(attr)) + if value_is_default and value_is_default.lower() != \ + "true": + vm.properties[attr] = value + vm.properties['hvm'] = "HVm" in vm_class_name + if vm_class_name in ('QubesNetVm', 'QubesProxyVm'): + vm.properties['provides_network'] = True + if vm_class_name == 'QubesNetVm': + vm.properties['netvm'] = None + if element.get('internal', False) == 'True': + vm.features['internal'] = True + + services = element.get('services') + if services: + services = ast.literal_eval(services) + else: + services = {} + for service, value in services.items(): + feature = service + for repl_service, repl_feature in \ + service_to_feature.items(): + if repl_service == service: + feature = repl_feature + vm.features[feature] = value + + vm.backup_content = element.get('backup_content', False) == 'True' + vm.backup_path = element.get('backup_path', None) + vm.size = element.get('backup_size', 0) + + pci_strictreset = element.get('pci_strictreset', True) + pcidevs = element.get('pcidevs') + if pcidevs: + pcidevs = ast.literal_eval(pcidevs) + for pcidev in pcidevs: + if not pci_strictreset: + vm.devices['pci'][('dom0', pcidev.replace(':', '_'))] = { + 'no-strict-reset': True} + else: + vm.devices['pci'][('dom0', pcidev.replace(':', '_'))] = {} + + def load(self): + with open(self.store) as fh: + try: + # pylint: disable=no-member + tree = lxml.etree.parse(fh) + except (EnvironmentError, # pylint: disable=broad-except + xml.parsers.expat.ExpatError) as err: + self.log.error(err) + return False + + self.globals['default_kernel'] = tree.getroot().get("default_kernel") + + vm_classes = ["AdminVM", "TemplateVm", "TemplateHVm", + "AppVm", "HVm", "NetVm", "ProxyVm"] + + # First build qid->name map + for vm_class_name in vm_classes: + vms_of_class = tree.findall("Qubes" + vm_class_name) + for element in vms_of_class: + qid = element.get('qid', None) + name = element.get('name', None) + if qid and name: + self.qid_map[int(qid)] = name + + # Qubes R2 din't have dom0 in qubes.xml + if 0 not in self.qid_map: + vm = Core2VM() + vm.name = 'dom0' + vm.klass = 'AdminVM' + vm.label = 'black' + self.domains['dom0'] = vm + self.qid_map[0] = 'dom0' + + # Then load all VMs - since we have qid_map, no need to preserve + # specific load older. + for vm_class_name in vm_classes: + vms_of_class = tree.findall("Qubes" + vm_class_name) + for element in vms_of_class: + self.import_core2_vm(element) + + # ... and load other VMs + for vm_class_name in ["AppVm", "HVm", "NetVm", "ProxyVm"]: + vms_of_class = tree.findall("Qubes" + vm_class_name) + # first non-template based, then template based + sorted_vms_of_class = sorted(vms_of_class, + key=lambda x: str(x.get('template_qid')).lower() != "none") + for element in sorted_vms_of_class: + self.import_core2_vm(element) + + # and load other defaults (default netvm, updatevm etc) + self.load_globals(tree.getroot()) + + # After importing all VMs, set netvm references, in the same order + for vm_class_name in vm_classes: + for element in tree.findall("Qubes" + vm_class_name): + try: + self.set_netvm_dependency(element) + except (ValueError, LookupError) as err: + self.log.error("VM %s: failed to set netvm dependency: %s", + element.get('name'), err) diff --git a/qubesadmin/backup/core3.py b/qubesadmin/backup/core3.py new file mode 100644 index 0000000..a975d00 --- /dev/null +++ b/qubesadmin/backup/core3.py @@ -0,0 +1,145 @@ +# -*- encoding: utf8 -*- +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2017 Marek Marczykowski-Górecki +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with this program; if not, see . + +'''Parser for core2 qubes.xml''' + +import xml.parsers +import logging +import lxml.etree + +import qubesadmin.backup + +class Core3VM(qubesadmin.backup.BackupVM): + '''VM object''' + # pylint: disable=too-few-public-methods + @property + def included_in_backup(self): + return self.backup_path is not None + +class Core3Qubes(qubesadmin.backup.BackupApp): + '''Parsed qubes.xml''' + def __init__(self, store=None): + if store is None: + raise ValueError("store path required") + self.log = logging.getLogger('qubesadmin.backup.core3') + self.labels = {} + super(Core3Qubes, self).__init__(store) + + @staticmethod + def get_property(xml_obj, prop): + '''Get property of given object (XML node) + + Object can be any PropertyHolder serialized to XML - in practice + :py:class:`BaseVM` or :py:class:`Qubes`. + ''' + xml_prop = xml_obj.findall('./property[@name=\'{}\']'.format(prop)) + if not xml_prop: + raise KeyError(prop) + return xml_prop[0].text + + def load_labels(self, labels_element): + '''Load labels table''' + for node in labels_element.findall('label'): + ident = node.get('id') + assert ident is not None + self.labels[ident] = node.text + + + def load_globals(self, globals_element): + '''Load global settings + + :param globals_element: XML element containing global settings + ''' + for node in globals_element.findall('property'): + name = node.get('name') + assert name is not None + self.globals[name] = node.text + + def import_core3_vm(self, element): + '''Parse a single VM from given XML node + + This method load only VM properties not depending on other VMs + (other than template). VM connections are set later. + :param element: XML node + ''' + vm = Core3VM() + vm.klass = element.get('class') + + for node in element.findall('./properties/property'): + name = node.get('name') + assert name is not None + vm.properties[name] = node.text + + for node in element.findall('./features/feature'): + name = node.get('name') + assert name is not None + vm.features[name] = False if node.text is None else node.text + + for node in element.findall('./tags/tag'): + name = node.get('name') + assert name is not None + vm.tags.add(name) + + for bus_node in element.findall('./devices'): + bus_name = bus_node.get('class') + assert bus_name is not None + for node in bus_node.findall('./device'): + backend_domain = node.get('backend-domain') + ident = node.get('id') + options = {} + for opt_node in node.findall('./option'): + opt_name = opt_node.get('name') + options[opt_name] = opt_node.text + vm.devices[bus_name][(backend_domain, ident)] = options + + # extract base properties + if vm.klass == 'AdminVM': + vm.name = 'dom0' + else: + vm.name = vm.properties.pop('name') + vm.label = self.labels[vm.properties.pop('label')] + vm.template = vm.properties.pop('template', None) + # skip UUID and qid, will be generated during restore + vm.properties.pop('uuid', None) + vm.properties.pop('qid', None) + + if vm.features.pop('backup-content', False): + vm.backup_path = vm.features.pop('backup-path', None) + vm.size = vm.features.pop('backup-size', 0) + + self.domains[vm.name] = vm + + def load(self): + with open(self.store) as fh: + try: + # pylint: disable=no-member + tree = lxml.etree.parse(fh) + except (EnvironmentError, # pylint: disable=broad-except + xml.parsers.expat.ExpatError) as err: + self.log.error(err) + return False + + self.load_labels(tree.find('./labels')) + + for element in tree.findall('./domains/domain'): + self.import_core3_vm(element) + + # and load other defaults (default netvm, updatevm etc) + self.load_globals(tree.find('./properties')) From e6d3425047517cda3729ac229f963829b6d7e06d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Fri, 14 Jul 2017 04:19:02 +0200 Subject: [PATCH 03/17] tests/backup: tests for backup-restore code Based on "backup compatibility" tests, which manually assemble the backup. This is because we don't have access to actual backup creation code here. QubesOS/qubes-issues#1214 --- ci/requirements.txt | 2 + qubesadmin/tests/backup/__init__.py | 279 +++++ .../tests/backup/backupcompatibility.py | 972 ++++++++++++++++++ 3 files changed, 1253 insertions(+) create mode 100644 qubesadmin/tests/backup/__init__.py create mode 100644 qubesadmin/tests/backup/backupcompatibility.py diff --git a/ci/requirements.txt b/ci/requirements.txt index 97dfab5..8ce179d 100644 --- a/ci/requirements.txt +++ b/ci/requirements.txt @@ -5,3 +5,5 @@ pylint sphinx codecov python-daemon +mock +lxml diff --git a/qubesadmin/tests/backup/__init__.py b/qubesadmin/tests/backup/__init__.py new file mode 100644 index 0000000..34a8a25 --- /dev/null +++ b/qubesadmin/tests/backup/__init__.py @@ -0,0 +1,279 @@ +# -*- encoding: utf8 -*- +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2017 Marek Marczykowski-Górecki +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with this program; if not, see . +import hashlib +import logging + +import multiprocessing +import os + +import shutil + +import qubesadmin.backup +import qubesadmin.exc +import qubesadmin.tests + +SIGNATURE_LEN = 512 + +class BackupTestCase(qubesadmin.tests.QubesTestCase): + class BackupErrorHandler(logging.Handler): + def __init__(self, errors_queue, level=logging.NOTSET): + super(BackupTestCase.BackupErrorHandler, self).__init__(level) + self.errors_queue = errors_queue + + def emit(self, record): + self.errors_queue.put(record.getMessage()) + + def make_vm_name(self, name): + try: + return super(BackupTestCase, self).make_vm_name(name) + except AttributeError: + return 'test-' + name + + def setUp(self): + super(BackupTestCase, self).setUp() + self.error_detected = multiprocessing.Queue() + self.log = logging.getLogger('qubesadmin.tests.backup') + self.log.debug("Creating backupvm") + + self.backupdir = os.path.join(os.environ["HOME"], "test-backup") + if os.path.exists(self.backupdir): + shutil.rmtree(self.backupdir) + os.mkdir(self.backupdir) + + self.error_handler = self.BackupErrorHandler(self.error_detected, + level=logging.WARNING) + backup_log = logging.getLogger('qubesadmin.backup') + backup_log.addHandler(self.error_handler) + + def tearDown(self): + super(BackupTestCase, self).tearDown() + shutil.rmtree(self.backupdir) + + backup_log = logging.getLogger('qubes.backup') + backup_log.removeHandler(self.error_handler) + + def fill_image(self, path, size=None, sparse=False, signature=b''): + block_size = 4096 + + self.log.debug("Filling %s" % path) + f = open(path, 'wb+') + if size is None: + f.seek(0, 2) + size = f.tell() + f.seek(0) + f.write(signature) + f.write(b'\0' * (SIGNATURE_LEN - len(signature))) + + for block_num in range(int(size/block_size)): + if sparse: + f.seek(block_size, 1) + f.write(b'a' * block_size) + + f.close() + + # NOTE: this was create_basic_vms + def create_backup_vms(self, pool=None): + template = self.app.default_template + + vms = [] + vmname = self.make_vm_name('test-net') + self.log.debug("Creating %s" % vmname) + testnet = self.app.add_new_vm('AppVM', + name=vmname, + label='red') + testnet.provides_network = True + testnet.create_on_disk(pool=pool) + testnet.features['services/ntpd'] = True + vms.append(testnet) + self.fill_image(testnet.storage.export('private'), 20*1024*1024) + + vmname = self.make_vm_name('test1') + self.log.debug("Creating %s" % vmname) + testvm1 = self.app.add_new_vm('AppVM', + name=vmname, template=template, label='red') + testvm1.uses_default_netvm = False + testvm1.netvm = testnet + testvm1.create_on_disk(pool=pool) + vms.append(testvm1) + self.fill_image(testvm1.storage.export('private'), 100 * 1024 * 1024) + + vmname = self.make_vm_name('testhvm1') + self.log.debug("Creating %s" % vmname) + testvm2 = self.app.add_new_vm('StandaloneVM', + name=vmname, + label='red') + testvm2.hvm = True + testvm2.create_on_disk(pool=pool) + self.fill_image(testvm2.storage.export('root'), 1024 * 1024 * 1024, \ + True) + vms.append(testvm2) + + vmname = self.make_vm_name('template') + self.log.debug("Creating %s" % vmname) + testvm3 = self.app.add_new_vm('TemplateVM', + name=vmname, label='red') + testvm3.create_on_disk(pool=pool) + self.fill_image(testvm3.storage.export('root'), 100 * 1024 * 1024, True) + vms.append(testvm3) + + vmname = self.make_vm_name('custom') + self.log.debug("Creating %s" % vmname) + testvm4 = self.app.add_new_vm('AppVM', + name=vmname, template=testvm3, label='red') + testvm4.create_on_disk(pool=pool) + vms.append(testvm4) + + self.app.save() + + return vms + + def make_backup(self, vms, target=None, expect_failure=False, **kwargs): + if target is None: + target = self.backupdir + try: + backup = qubesadmin.backup.Backup(self.app, vms, **kwargs) + except qubesadmin.exc.QubesException as e: + if not expect_failure: + self.fail("QubesException during backup_prepare: %s" % str(e)) + else: + raise + + if 'passphrase' not in kwargs: + backup.passphrase = 'qubes' + backup.target_dir = target + + try: + backup.backup_do() + except qubesadmin.exc.QubesException as e: + if not expect_failure: + self.fail("QubesException during backup_do: %s" % str(e)) + else: + raise + + def restore_backup(self, source=None, appvm=None, options=None, + expect_errors=None, manipulate_restore_info=None, + passphrase='qubes'): + if source is None: + backupfile = os.path.join(self.backupdir, + sorted(os.listdir(self.backupdir))[-1]) + else: + backupfile = source + + with self.assertNotRaises(qubesadmin.exc.QubesException): + restore_op = qubesadmin.backup.BackupRestore( + self.app, backupfile, appvm, passphrase) + if options: + for key, value in options.items(): + setattr(restore_op.options, key, value) + restore_info = restore_op.get_restore_info() + if callable(manipulate_restore_info): + restore_info = manipulate_restore_info(restore_info) + self.log.debug(restore_op.get_restore_summary(restore_info)) + + with self.assertNotRaises(qubesadmin.exc.QubesException): + restore_op.restore_do(restore_info) + + errors = [] + if expect_errors is None: + expect_errors = [] + else: + self.assertFalse(self.error_detected.empty(), + "Restore errors expected, but none detected") + while not self.error_detected.empty(): + current_error = self.error_detected.get() + if any(map(current_error.startswith, expect_errors)): + continue + errors.append(current_error) + self.assertTrue(len(errors) == 0, + "Error(s) detected during backup_restore_do: %s" % + '\n'.join(errors)) + if not appvm and not os.path.isdir(backupfile): + os.unlink(backupfile) + + def create_sparse(self, path, size, signature=b''): + f = open(path, "wb") + f.write(signature) + f.write(b'\0' * (SIGNATURE_LEN - len(signature))) + f.truncate(size) + f.close() + + def vm_checksum(self, vms): + hashes = {} + for vm in vms: + assert isinstance(vm, qubesadmin.vm.QubesVM) + hashes[vm.name] = {} + for name, volume in vm.volumes.items(): + if not volume.rw or not volume.save_on_stop: + continue + vol_path = vm.storage.get_pool(volume).export(volume) + hasher = hashlib.sha1() + with open(vol_path, 'rb') as afile: + for buf in iter(lambda: afile.read(4096000), b''): + hasher.update(buf) + hashes[vm.name][name] = hasher.hexdigest() + return hashes + + def assertCorrectlyRestored(self, orig_vms, orig_hashes): + ''' Verify if restored VMs are identical to those before backup. + + :param orig_vms: collection of original QubesVM objects + :param orig_hashes: result of :py:meth:`vm_checksum` on original VMs, + before backup + :return: + ''' + for vm in orig_vms: + self.assertIn(vm.name, self.app.domains) + restored_vm = self.app.domains[vm.name] + for prop in ('name', 'kernel', + 'memory', 'maxmem', 'kernelopts', + 'services', 'vcpus', 'features' + 'include_in_backups', 'default_user', 'qrexec_timeout', + 'autostart', 'pci_strictreset', 'debug', + 'internal'): + if not hasattr(vm, prop): + continue + self.assertEqual( + getattr(vm, prop), getattr(restored_vm, prop), + "VM {} - property {} not properly restored".format( + vm.name, prop)) + for prop in ('netvm', 'template', 'label'): + if not hasattr(vm, prop): + continue + orig_value = getattr(vm, prop) + restored_value = getattr(restored_vm, prop) + if orig_value and restored_value: + self.assertEqual(orig_value.name, restored_value.name, + "VM {} - property {} not properly restored".format( + vm.name, prop)) + else: + self.assertEqual(orig_value, restored_value, + "VM {} - property {} not properly restored".format( + vm.name, prop)) + for dev_class in vm.devices.keys(): + for dev in vm.devices[dev_class]: + self.assertIn(dev, restored_vm.devices[dev_class], + "VM {} - {} device not restored".format( + vm.name, dev_class)) + + if orig_hashes: + hashes = self.vm_checksum([restored_vm])[restored_vm.name] + self.assertEqual(orig_hashes[vm.name], hashes, + "VM {} - disk images are not properly restored".format( + vm.name)) diff --git a/qubesadmin/tests/backup/backupcompatibility.py b/qubesadmin/tests/backup/backupcompatibility.py new file mode 100644 index 0000000..48a24e4 --- /dev/null +++ b/qubesadmin/tests/backup/backupcompatibility.py @@ -0,0 +1,972 @@ +# -*- encoding: utf8 -*- +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2014 Marek Marczykowski-Górecki +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public License +# as published by the Free Software Foundation; either version 2.1 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +import functools +import tempfile +from multiprocessing import Queue + +import os +import subprocess + +try: + import unittest.mock as mock +except ImportError: + import mock +import re + +import multiprocessing + +import sys + +import qubesadmin.backup.core2 +import qubesadmin.storage +import qubesadmin.tests +import qubesadmin.tests.backup + +QUBESXML_R2B2 = ''' + + + + + + + + + + + + + + + + + +''' + +QUBESXML_R2 = ''' + + + + + + + + + + + + + + + + + + +''' + +MANGLED_SUBDIRS_R2 = { + "test-work": "vm5", + "test-template-clone": "vm9", + "test-custom-template-appvm": "vm10", + "test-standalonevm": "vm11", + "test-testproxy": "vm12", + "test-testhvm": "vm14", + "test-net": "vm16", +} + +APPTEMPLATE_R2B2 = ''' +[Desktop Entry] +Name=%VMNAME%: {name} +GenericName=%VMNAME%: {name} +GenericName[ca]=%VMNAME%: Navegador web +GenericName[cs]=%VMNAME%: Webový prohlížeč +GenericName[es]=%VMNAME%: Navegador web +GenericName[fa]=%VMNAME%: مرورر اینترنتی +GenericName[fi]=%VMNAME%: WWW-selain +GenericName[fr]=%VMNAME%: Navigateur Web +GenericName[hu]=%VMNAME%: Webböngésző +GenericName[it]=%VMNAME%: Browser Web +GenericName[ja]=%VMNAME%: ウェブ・ブラウザ +GenericName[ko]=%VMNAME%: 웹 브라우저 +GenericName[nb]=%VMNAME%: Nettleser +GenericName[nl]=%VMNAME%: Webbrowser +GenericName[nn]=%VMNAME%: Nettlesar +GenericName[no]=%VMNAME%: Nettleser +GenericName[pl]=%VMNAME%: Przeglądarka WWW +GenericName[pt]=%VMNAME%: Navegador Web +GenericName[pt_BR]=%VMNAME%: Navegador Web +GenericName[sk]=%VMNAME%: Internetový prehliadač +GenericName[sv]=%VMNAME%: Webbläsare +Comment={comment} +Comment[ca]=Navegueu per el web +Comment[cs]=Prohlížení stránek World Wide Webu +Comment[de]=Im Internet surfen +Comment[es]=Navegue por la web +Comment[fa]=صفحات شبه جهانی اینترنت را مرور نمایید +Comment[fi]=Selaa Internetin WWW-sivuja +Comment[fr]=Navigue sur Internet +Comment[hu]=A világháló böngészése +Comment[it]=Esplora il web +Comment[ja]=ウェブを閲覧します +Comment[ko]=웹을 돌아 다닙니다 +Comment[nb]=Surf på nettet +Comment[nl]=Verken het internet +Comment[nn]=Surf på nettet +Comment[no]=Surf på nettet +Comment[pl]=Przeglądanie stron WWW +Comment[pt]=Navegue na Internet +Comment[pt_BR]=Navegue na Internet +Comment[sk]=Prehliadanie internetu +Comment[sv]=Surfa på webben +Exec=qvm-run -q --tray -a %VMNAME% '{command} %u' +Categories=Network;WebBrowser; +X-Qubes-VmName=%VMNAME% +Icon=%VMDIR%/icon.png +''' + +QUBESXML_R1 = ''' + +''' + +BACKUP_HEADER_R2 = '''version=3 +hmac-algorithm=SHA512 +crypto-algorithm=aes-256-cbc +encrypted={encrypted} +compressed={compressed} +compression-filter=gzip +''' + +parsed_qubes_xml_r2 = { + 'domains': { + 'dom0': { + 'klass': 'AdminVM', + 'label': 'black', + 'properties': {}, + 'devices': {}, + 'tags': set(), + 'features': {}, + 'template': None, + 'backup_path': None, + 'included_in_backup': False, + }, + 'fedora-20-x64': { + 'klass': 'TemplateVM', + 'label': 'black', + 'properties': { + 'hvm': False, + 'maxmem': '1535', + }, + 'devices': {}, + 'tags': set(), + 'features': {'services.meminfo-writer': True}, + 'template': None, + 'backup_path': None, + 'included_in_backup': False, + }, + 'netvm': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': { + 'hvm': False, + 'maxmem': '1535', + 'memory': '200', + 'netvm': None, + 'default_dispvm': 'disp-no-netvm', + 'provides_network': True}, + 'devices': { + 'pci': { + ('dom0', '02_00.0'): {}, + ('dom0', '03_00.0'): {}, + } + }, + 'tags': set(), + 'features': { + 'service.ntpd': False, + 'services.meminfo-writer': False + }, + 'template': 'fedora-20-x64', + 'backup_path': None, + 'included_in_backup': False, + }, + 'firewallvm': { + 'klass': 'AppVM', + 'label': 'green', + 'properties': { + 'hvm': False, + 'maxmem': '1535', + 'memory': '200', + 'provides_network': True + }, + 'devices': {}, + 'tags': set(), + 'features': {'services.meminfo-writer': True}, + 'template': 'fedora-20-x64', + 'backup_path': None, + 'included_in_backup': False, + }, + 'fedora-20-x64-dvm': { + 'klass': 'AppVM', + 'label': 'gray', + 'properties': { + 'hvm': False, + 'maxmem': '1535', + 'vcpus': '1' + }, + 'devices': {}, + 'tags': set(), + 'features': { + 'internal': True, 'services.meminfo-writer': True}, + 'template': 'fedora-20-x64', + 'backup_path': None, + 'included_in_backup': False, + }, + 'banking': { + 'klass': 'AppVM', + 'label': 'green', + 'properties': {'hvm': False, 'maxmem': '1535'}, + 'devices': {}, + 'tags': set(), + 'features': {'services.meminfo-writer': True}, + 'template': 'fedora-20-x64', + 'backup_path': None, + 'included_in_backup': False, + }, + 'personal': { + 'klass': 'AppVM', + 'label': 'yellow', + 'properties': {'hvm': False, 'maxmem': '1535'}, + 'devices': {}, + 'tags': set(), + 'features': {'services.meminfo-writer': True}, + 'template': 'fedora-20-x64', + 'backup_path': None, + 'included_in_backup': False, + }, + 'untrusted': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': { + 'hvm': False, + 'maxmem': '1535', + 'netvm': 'test-testproxy', + 'default_dispvm': 'disp-test-testproxy', + }, + 'devices': {}, + 'tags': set(), + 'features': {'services.meminfo-writer': True}, + 'template': 'fedora-20-x64', + 'backup_path': None, + 'included_in_backup': False, + }, + 'testproxy2': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': { + 'hvm': False, + 'maxmem': '1535', + 'memory': '200', + 'provides_network': True}, + 'devices': {}, + 'tags': set(), + 'features': {'services.meminfo-writer': True}, + 'template': 'test-template-clone', + 'backup_path': None, + 'included_in_backup': False, + }, + 'test-testproxy': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': { + 'hvm': False, + 'maxmem': '1535', + 'memory': '200', + 'provides_network': True}, + 'devices': {}, + 'tags': set(), + 'features': {'services.meminfo-writer': True}, + 'template': 'fedora-20-x64', + 'backup_path': 'servicevms/test-testproxy', + 'included_in_backup': True, + }, + 'test-testhvm': { + 'klass': 'StandaloneVM', + 'label': 'purple', + 'properties': {'hvm': True, 'memory': '512'}, + 'devices': {}, + 'tags': set(), + 'features': {'services.meminfo-writer': False}, + 'template': None, + 'backup_path': 'appvms/test-testhvm', + 'included_in_backup': True, + 'root_size': 209715712, + }, + 'test-work': { + 'klass': 'AppVM', + 'label': 'green', + 'properties': {'hvm': False, 'maxmem': '1535'}, + 'devices': {}, + 'tags': set(), + 'features': {'services.meminfo-writer': True}, + 'template': 'fedora-20-x64', + 'backup_path': 'appvms/test-work', + 'included_in_backup': True, + }, + 'test-template-clone': { + 'klass': 'TemplateVM', + 'label': 'green', + 'properties': {'hvm': False, 'maxmem': '1535'}, + 'devices': {}, + 'tags': set(), + 'features': {'services.meminfo-writer': True}, + 'template': None, + 'backup_path': 'vm-templates/test-template-clone', + 'included_in_backup': True, + }, + 'test-custom-template-appvm': { + 'klass': 'AppVM', + 'label': 'yellow', + 'properties': {'hvm': False, 'maxmem': '1535'}, + 'devices': {}, + 'tags': set(), + 'features': {'services.meminfo-writer': True}, + 'template': 'test-template-clone', + 'backup_path': 'appvms/test-custom-template-appvm', + 'included_in_backup': True, + }, + 'test-standalonevm': { + 'klass': 'StandaloneVM', + 'label': 'blue', + 'properties': {'hvm': False, 'maxmem': '1535'}, + 'devices': {}, + 'tags': set(), + 'features': {'services.meminfo-writer': True}, + 'template': None, + 'backup_path': 'appvms/test-standalonevm', + 'included_in_backup': True, + 'root_size': 209715712, + }, + 'test-net': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': {'hvm': False, + 'maxmem': '1535', + 'memory': '200', + 'netvm': None, + 'default_dispvm': 'disp-no-netvm', + 'provides_network': True}, + 'devices': { + 'pci': { + ('dom0', '02_00.0'): {}, + ('dom0', '03_00.0'): {}, + } + }, + 'tags': set(), + 'features': { + 'service.ntpd': False, + 'services.meminfo-writer': False + }, + 'template': 'fedora-20-x64', + 'backup_path': 'servicevms/test-net', + 'included_in_backup': True, + }, + 'disp-no-netvm': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': { + 'netvm': None, + 'dispvm_allowed': True, + }, + 'devices': {}, + 'features': {}, + 'tags': set(), + 'template': None, # default + 'backup_path': None, + 'included_in_backup': True, + }, + 'disp-test-testproxy': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': { + 'netvm': 'test-testproxy', + 'dispvm_allowed': True, + }, + 'devices': {}, + 'features': {}, + 'tags': set(), + 'template': None, # default + 'backup_path': None, + 'included_in_backup': True, + }, + }, + 'globals': { + 'default_template': 'fedora-20-x64', + 'default_kernel': '3.7.6-2', + 'default_netvm': 'firewallvm', + 'clockvm': 'netvm', + 'updatevm': 'firewallvm' + }, +} + + +class TC_00_QubesXML(qubesadmin.tests.QubesTestCase): + + def assertCorrectlyConverted(self, xml_data, expected_data): + with tempfile.NamedTemporaryFile() as qubes_xml: + qubes_xml.file.write(xml_data.encode()) + backup_app = qubesadmin.backup.core2.Core2Qubes(qubes_xml.name) + self.assertCountEqual(backup_app.domains.keys(), + expected_data['domains'].keys()) + for vm in expected_data['domains']: + self.assertEqual(backup_app.domains[vm].name, vm) + self.assertEqual(backup_app.domains[vm].properties, + expected_data['domains'][vm]['properties'], vm) + for devtype in expected_data['domains'][vm]['devices']: + self.assertEqual(backup_app.domains[vm].devices[devtype], + expected_data['domains'][vm]['devices'][devtype], vm) + self.assertEqual(backup_app.domains[vm].tags, + expected_data['domains'][vm]['tags'], vm) + self.assertEqual(backup_app.domains[vm].features, + expected_data['domains'][vm]['features'], vm) + self.assertEqual(backup_app.domains[vm].template, + expected_data['domains'][vm]['template'], vm) + self.assertEqual(backup_app.domains[vm].klass, + expected_data['domains'][vm]['klass'], vm) + self.assertEqual(backup_app.domains[vm].label, + expected_data['domains'][vm]['label'], vm) + self.assertEqual(backup_app.domains[vm].backup_path, + expected_data['domains'][vm]['backup_path'], vm) + self.assertEqual(backup_app.domains[vm].included_in_backup, + expected_data['domains'][vm]['included_in_backup'], vm) + + self.assertEqual(backup_app.globals, expected_data['globals']) + + def test_000_qubes_xml_r2(self): + self.assertCorrectlyConverted(QUBESXML_R2, parsed_qubes_xml_r2) + +# backup code use multiprocessing, synchronize with main process +class AppProxy(object): + def __init__(self, app, sync_queue): + self._app = app + self._sync_queue = sync_queue + + def qubesd_call(self, dest, method, arg=None, payload=None, + payload_stream=None): + if payload_stream: + signature_bin = payload_stream.read(512) + payload = signature_bin.split(b'\0', 1)[0] + subprocess.call(['cat'], stdin=payload_stream, + stdout=subprocess.DEVNULL) + payload_stream.close() + self._sync_queue.put((dest, method, arg, payload)) + return self._app.qubesd_call(dest, method, arg, payload) + + +class MockVolume(qubesadmin.storage.Volume): + def __init__(self, import_data_queue, *args, **kwargs): + super(MockVolume, self).__init__(*args, **kwargs) + self.app = AppProxy(self.app, import_data_queue) + + +class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): + + storage_pool = None + + def tearDown(self): + try: + for vm in self.app.domains: + if vm.name.startswith('test-'): + del self.app.domains[vm.name] + except: + pass + super(TC_10_BackupCompatibility, self).tearDown() + + def create_whitelisted_appmenus(self, filename): + with open(filename, "w") as f: + f.write("gnome-terminal.desktop\n") + f.write("nautilus.desktop\n") + f.write("firefox.desktop\n") + f.write("mozilla-thunderbird.desktop\n") + f.write("libreoffice-startcenter.desktop\n") + + def create_appmenus(self, dir, template, list): + for name in list: + with open(os.path.join(dir, name + ".desktop"), "w") as f: + f.write(template.format(name=name, comment=name, command=name)) + + def create_private_img(self, filename): + signature = '/'.join(os.path.splitext(filename)[0].split('/')[-2:]) + self.create_sparse(filename, 2*2**30, signature=signature.encode()) + #subprocess.check_call(["/usr/sbin/mkfs.ext4", "-q", "-F", filename]) + + def create_volatile_img(self, filename): + self.create_sparse(filename, 11.5*2**30) + # here used to be sfdisk call with "0,1024,S\n,10240,L\n" input, + # but since sfdisk folks like to change command arguments in + # incompatible way, have an partition table verbatim here + ptable = ( + '\x00\x00\x00\x00\x00\x00\x00\x00\xab\x39\xd5\xd4\x00\x00\x20\x00' + '\x00\x21\xaa\x82\x82\x28\x08\x00\x00\x00\x00\x00\x00\x20\xaa\x00' + '\x82\x29\x15\x83\x9c\x79\x08\x00\x00\x20\x00\x00\x01\x40\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\x55' + ) + with open(filename, 'r+') as f: + f.seek(0x1b0) + f.write(ptable) + + # TODO: mkswap + + def fullpath(self, name): + return os.path.join(self.backupdir, name) + + def create_v1_files(self, r2b2=False): + appmenus_list = [ + "firefox", "gnome-terminal", "evince", "evolution", + "mozilla-thunderbird", "libreoffice-startcenter", "nautilus", + "gedit", "gpk-update-viewer", "gpk-application" + ] + + os.mkdir(self.fullpath("appvms")) + os.mkdir(self.fullpath("servicevms")) + os.mkdir(self.fullpath("vm-templates")) + + # normal AppVM + os.mkdir(self.fullpath("appvms/test-work")) + self.create_whitelisted_appmenus(self.fullpath( + "appvms/test-work/whitelisted-appmenus.list")) + os.symlink("/usr/share/qubes/icons/green.png", + self.fullpath("appvms/test-work/icon.png")) + self.create_private_img(self.fullpath("appvms/test-work/private.img")) + + # StandaloneVM + os.mkdir(self.fullpath("appvms/test-standalonevm")) + self.create_whitelisted_appmenus(self.fullpath( + "appvms/test-standalonevm/whitelisted-appmenus.list")) + os.symlink("/usr/share/qubes/icons/blue.png", + self.fullpath("appvms/test-standalonevm/icon.png")) + self.create_private_img(self.fullpath( + "appvms/test-standalonevm/private.img")) + self.create_sparse( + self.fullpath("appvms/test-standalonevm/root.img"), 10*2**30) + self.fill_image(self.fullpath("appvms/test-standalonevm/root.img"), + 100*1024*1024, True, + signature=b'test-standalonevm/root') + os.mkdir(self.fullpath("appvms/test-standalonevm/apps.templates")) + self.create_appmenus(self.fullpath("appvms/test-standalonevm/apps" + ".templates"), + APPTEMPLATE_R2B2, + appmenus_list) + os.mkdir(self.fullpath("appvms/test-standalonevm/kernels")) + for k_file in ["initramfs", "vmlinuz", "modules.img"]: + self.fill_image(self.fullpath("appvms/test-standalonevm/kernels/" + + k_file), 10*1024*1024) + + # VM based on custom template + subprocess.check_call( + ["/bin/cp", "-a", self.fullpath("appvms/test-work"), + self.fullpath("appvms/test-custom-template-appvm")]) + # override for correct signature + self.create_private_img( + self.fullpath("appvms/test-custom-template-appvm/private.img")) + + # HVM + if r2b2: + subprocess.check_call( + ["/bin/cp", "-a", self.fullpath("appvms/test-standalonevm"), + self.fullpath("appvms/test-testhvm")]) + # override for correct signature + self.create_private_img( + self.fullpath("appvms/test-testhvm/private.img")) + self.fill_image(self.fullpath("appvms/test-testhvm/root.img"), + 100*1024*1024, True, + signature=b'test-testhvm/root') + + # ProxyVM + os.mkdir(self.fullpath("servicevms/test-testproxy")) + self.create_whitelisted_appmenus(self.fullpath( + "servicevms/test-testproxy/whitelisted-appmenus.list")) + self.create_private_img( + self.fullpath("servicevms/test-testproxy/private.img")) + + # NetVM + os.mkdir(self.fullpath("servicevms/test-net")) + self.create_whitelisted_appmenus(self.fullpath( + "servicevms/test-net/whitelisted-appmenus.list")) + self.create_private_img( + self.fullpath("servicevms/test-net/private.img")) + + # Custom template + os.mkdir(self.fullpath("vm-templates/test-template-clone")) + self.create_private_img( + self.fullpath("vm-templates/test-template-clone/private.img")) + self.create_sparse(self.fullpath( + "vm-templates/test-template-clone/root-cow.img"), 10*2**30) + self.create_sparse(self.fullpath( + "vm-templates/test-template-clone/root.img"), 10*2**30) + self.fill_image(self.fullpath( + "vm-templates/test-template-clone/root.img"), 1*2**30, True, + signature=b'test-template-clone/root') + self.create_volatile_img(self.fullpath( + "vm-templates/test-template-clone/volatile.img")) + subprocess.check_call([ + "/bin/tar", "cS", + "-f", self.fullpath( + "vm-templates/test-template-clone/clean-volatile.img.tar"), + "-C", self.fullpath("vm-templates/test-template-clone"), + "volatile.img"]) + self.create_whitelisted_appmenus(self.fullpath( + "vm-templates/test-template-clone/whitelisted-appmenus.list")) + self.create_whitelisted_appmenus(self.fullpath( + "vm-templates/test-template-clone/vm-whitelisted-appmenus.list")) + if r2b2: + self.create_whitelisted_appmenus(self.fullpath( + "vm-templates/test-template-clone/netvm-whitelisted-appmenus" + ".list")) + os.symlink("/usr/share/qubes/icons/green.png", + self.fullpath("vm-templates/test-template-clone/icon.png")) + os.mkdir( + self.fullpath("vm-templates/test-template-clone/apps.templates")) + self.create_appmenus( + self.fullpath("vm-templates/test-template-clone/apps.templates"), + APPTEMPLATE_R2B2, + appmenus_list) + os.mkdir(self.fullpath("vm-templates/test-template-clone/apps")) + self.create_appmenus( + self.fullpath("vm-templates/test-template-clone/apps"), + APPTEMPLATE_R2B2.replace("%VMNAME%", "test-template-clone") + .replace("%VMDIR%", self.fullpath( + "vm-templates/test-template-clone")), + appmenus_list) + + def calculate_hmac(self, f_name, algorithm="sha512", password="qubes"): + with open(self.fullpath(f_name), "r") as f_data: + with open(self.fullpath(f_name+".hmac"), "w") as f_hmac: + subprocess.check_call( + ["openssl", "dgst", "-"+algorithm, "-hmac", password], + stdin=f_data, stdout=f_hmac) + + def append_backup_stream(self, f_name, stream, basedir=None): + if not basedir: + basedir = self.backupdir + subprocess.check_call(["tar", "-cO", "--posix", "-C", basedir, + f_name], + stdout=stream) + + def handle_v3_file(self, f_name, subdir, stream, compressed=True, + encrypted=True): + # create inner archive + tar_cmdline = ["tar", "-Pc", '--sparse', + '-C', self.fullpath(os.path.dirname(f_name)), + '--xform', 's:^%s:%s\\0:' % ( + os.path.basename(f_name), + subdir), + os.path.basename(f_name) + ] + if compressed: + tar_cmdline.insert(-1, "--use-compress-program=%s" % "gzip") + tar = subprocess.Popen(tar_cmdline, stdout=subprocess.PIPE) + if encrypted: + encryptor = subprocess.Popen( + ["openssl", "enc", "-e", "-aes-256-cbc", "-pass", "pass:qubes"], + stdin=tar.stdout, + stdout=subprocess.PIPE) + tar.stdout.close() + data = encryptor.stdout + else: + data = tar.stdout + + stage1_dir = self.fullpath(os.path.join("stage1", subdir)) + if not os.path.exists(stage1_dir): + os.makedirs(stage1_dir) + subprocess.check_call(["split", "--numeric-suffixes", + "--suffix-length=3", + "--bytes="+str(100*1024*1024), "-", + os.path.join(stage1_dir, + os.path.basename(f_name+"."))], + stdin=data) + data.close() + + for part in sorted(os.listdir(stage1_dir)): + if not re.match( + r"^{}.[0-9][0-9][0-9]$".format(os.path.basename(f_name)), + part): + continue + part_with_dir = os.path.join(subdir, part) + self.calculate_hmac(os.path.join("stage1", part_with_dir)) + self.append_backup_stream(part_with_dir, stream, + basedir=self.fullpath("stage1")) + self.append_backup_stream(part_with_dir+".hmac", stream, + basedir=self.fullpath("stage1")) + + def create_v3_backup(self, encrypted=True, compressed=True): + """ + Create "backup format 3" backup - used in R2 and R3.0 + + :param encrypted: Should the backup be encrypted + :param compressed: Should the backup be compressed + :return: + """ + output = open(self.fullpath("backup.bin"), "w") + with open(self.fullpath("backup-header"), "w") as f: + f.write(BACKUP_HEADER_R2.format( + encrypted=str(encrypted), + compressed=str(compressed) + )) + self.calculate_hmac("backup-header") + self.append_backup_stream("backup-header", output) + self.append_backup_stream("backup-header.hmac", output) + with open(self.fullpath("qubes.xml"), "w") as f: + if encrypted: + qubesxml = QUBESXML_R2 + for vmname, subdir in MANGLED_SUBDIRS_R2.items(): + qubesxml = re.sub(r"[a-z-]*/{}".format(vmname), + subdir, qubesxml) + f.write(qubesxml) + else: + f.write(QUBESXML_R2) + + self.handle_v3_file("qubes.xml", "", output, encrypted=encrypted, + compressed=compressed) + + self.create_v1_files(r2b2=True) + for vm_type in ["appvms", "servicevms"]: + for vm_name in os.listdir(self.fullpath(vm_type)): + vm_dir = os.path.join(vm_type, vm_name) + for f_name in os.listdir(self.fullpath(vm_dir)): + if encrypted: + subdir = MANGLED_SUBDIRS_R2[vm_name] + else: + subdir = vm_dir + self.handle_v3_file( + os.path.join(vm_dir, f_name), + subdir+'/', output, encrypted=encrypted) + + for vm_name in os.listdir(self.fullpath("vm-templates")): + vm_dir = os.path.join("vm-templates", vm_name) + if encrypted: + subdir = MANGLED_SUBDIRS_R2[vm_name] + else: + subdir = vm_dir + self.handle_v3_file( + os.path.join(vm_dir, "."), + subdir+'/', output, encrypted=encrypted) + + output.close() + + def setup_expected_calls(self, parsed_qubes_xml, templates_map=None): + if templates_map is None: + templates_map = {} + + extra_vm_list_lines = [] + for name, vm in parsed_qubes_xml['domains'].items(): + if not vm['included_in_backup']: + continue + + if self.storage_pool: + self.app.expected_calls[ + ('dom0', 'admin.vm.CreateInPool.' + vm['klass'], + templates_map.get(vm['template'], vm['template']), + 'name={} label={} pool={}'.format( + name, vm['label'], self.storage_pool).encode())] = \ + b'0\0' + else: + self.app.expected_calls[ + ('dom0', 'admin.vm.Create.' + vm['klass'], + templates_map.get(vm['template'], vm['template']), + 'name={} label={}'.format(name, vm['label']).encode())] =\ + b'0\0' + extra_vm_list_lines.append( + '{} class={} state=Halted\n'.format(name, vm['klass']).encode()) + if vm['backup_path']: + self.app.expected_calls[ + (name, 'admin.vm.volume.List', None, None)] = \ + b'0\0root\nprivate\nvolatile\n' + if vm['klass'] == 'AppVM': + self.app.expected_calls[ + (name, 'admin.vm.volume.Info', 'root', None)] = \ + b'0\0' \ + b'pool=default\n' \ + b'vid=' + name.encode() + b'/root\n' \ + b'size=1024\n' \ + b'usage=512\n' \ + b'rw=False\n' \ + b'snap_on_start=True\n' \ + b'save_on_stop=False\n' \ + b'source=\n' \ + b'internal=True\n' \ + b'revisions_to_keep=3\n' + else: + self.app.expected_calls[ + (name, 'admin.vm.volume.Info', 'root', None)] = \ + b'0\0' \ + b'pool=default\n' \ + b'vid=' + name.encode() + b'/root\n' \ + b'size=1024\n' \ + b'usage=512\n' \ + b'rw=True\n' \ + b'snap_on_start=False\n' \ + b'save_on_stop=True\n' \ + b'internal=True\n' \ + b'revisions_to_keep=3\n' + self.app.expected_calls[ + (name, 'admin.vm.volume.Resize', 'root', + str(vm.get('root_size', 2147484160)).encode())] = \ + b'0\0' + self.app.expected_calls[ + (name, 'admin.vm.volume.Import', 'root', + name.encode() + b'/root')] = \ + b'0\0' + + self.app.expected_calls[ + (name, 'admin.vm.volume.Info', 'private', None)] = \ + b'0\0' \ + b'pool=default\n' \ + b'vid=' + name.encode() + b'/private\n' \ + b'size=1024\n' \ + b'usage=512\n' \ + b'rw=True\n' \ + b'snap_on_start=False\n' \ + b'save_on_stop=True\n' \ + b'revisions_to_keep=3\n' + self.app.expected_calls[ + (name, 'admin.vm.volume.Resize', 'private', b'2147483648')] = \ + b'0\0' + self.app.expected_calls[ + (name, 'admin.vm.volume.Import', 'private', + name.encode() + b'/private')] = \ + b'0\0' + self.app.expected_calls[ + (name, 'admin.vm.volume.Info', 'volatile', None)] = \ + b'0\0' \ + b'pool=default\n' \ + b'vid=' + name.encode() + b'/root\n' \ + b'size=1024\n' \ + b'usage=512\n' \ + b'rw=True\n' \ + b'snap_on_start=False\n' \ + b'save_on_stop=False\n' \ + b'revisions_to_keep=3\n' + + for prop, value in vm['properties'].items(): + self.app.expected_calls[ + (name, 'admin.vm.property.Set', prop, + str(value).encode() if value is not None else b'')] = b'0\0' + + for bus, devices in vm['devices'].items(): + for (backend_domain, ident), options in devices.items(): + all_options = options.copy() + all_options['persistent'] = True + encoded_options = ' '.join('{}={}'.format(key, value) for + key, value in all_options.items()).encode() + self.app.expected_calls[ + (name, 'admin.vm.device.{}.Attach'.format(bus), + '{}+{}'.format(backend_domain, ident), + encoded_options)] = b'0\0' + + for feature, value in vm['features'].items(): + if value is False: + value = '' + self.app.expected_calls[ + (name, 'admin.vm.feature.Set', feature, + str(value).encode())] = b'0\0' + + orig_admin_vm_list = self.app.expected_calls[ + ('dom0', 'admin.vm.List', None, None)] + self.app.expected_calls[('dom0', 'admin.vm.List', None, None)] = \ + [orig_admin_vm_list] + \ + [orig_admin_vm_list + b''.join(extra_vm_list_lines)] * \ + len(extra_vm_list_lines) + + def test_210_r2(self): + self.create_v3_backup(False) + self.app.expected_calls[('dom0', 'admin.vm.List', None, None)] = ( + b'0\0dom0 class=AdminVM state=Running\n' + b'fedora-25 class=TemplateVM state=Halted\n' + b'testvm class=AppVM state=Running\n' + ) + self.app.expected_calls[ + ('dom0', 'admin.property.Get', 'default_template', None)] = \ + b'0\0default=no type=vm fedora-25' + self.setup_expected_calls(parsed_qubes_xml_r2, templates_map={ + 'fedora-20-x64': 'fedora-25' + }) + + qubesd_calls_queue = multiprocessing.Queue() + + with mock.patch('qubesadmin.storage.Volume', + functools.partial(MockVolume, qubesd_calls_queue)): + self.restore_backup(self.fullpath("backup.bin"), options={ + 'use-default-template': True, + 'use-default-netvm': True, + }) + + # retrieve calls from other multiprocess.Process instances + while not qubesd_calls_queue.empty(): + call_args = qubesd_calls_queue.get() + self.app.qubesd_call(*call_args) + qubesd_calls_queue.close() + + self.assertAllCalled() + + def test_220_r2_encrypted(self): + self.create_v3_backup(True) + + self.app.expected_calls[('dom0', 'admin.vm.List', None, None)] = ( + b'0\0dom0 class=AdminVM state=Running\n' + b'fedora-25 class=TemplateVM state=Halted\n' + b'testvm class=AppVM state=Running\n' + ) + self.app.expected_calls[ + ('dom0', 'admin.property.Get', 'default_template', None)] = \ + b'0\0default=no type=vm fedora-25' + self.setup_expected_calls(parsed_qubes_xml_r2, templates_map={ + 'fedora-20-x64': 'fedora-25' + }) + + qubesd_calls_queue = multiprocessing.Queue() + + with mock.patch('qubesadmin.storage.Volume', + functools.partial(MockVolume, qubesd_calls_queue)): + self.restore_backup(self.fullpath("backup.bin"), options={ + 'use-default-template': True, + 'use-default-netvm': True, + }) + + # retrieve calls from other multiprocess.Process instances + while not qubesd_calls_queue.empty(): + call_args = qubesd_calls_queue.get() + self.app.qubesd_call(*call_args) + qubesd_calls_queue.close() + + self.assertAllCalled() + + +class TC_11_BackupCompatibilityIntoLVM(TC_10_BackupCompatibility): + storage_pool = 'some-pool' + + + def restore_backup(self, source=None, appvm=None, options=None, + expect_errors=None, manipulate_restore_info=None, + passphrase='qubes'): + if options is None: + options = {} + options['override_pool'] = self.storage_pool + super(TC_11_BackupCompatibilityIntoLVM, self).restore_backup(source, + appvm, options, expect_errors, manipulate_restore_info) From 2ab31e63dc1b440826948869d3660a1a58debb9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sat, 15 Jul 2017 14:12:39 +0200 Subject: [PATCH 04/17] Exclude 'qubesadmin.backup' module from python2 install --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 1df7133..6283ecd 100644 --- a/setup.py +++ b/setup.py @@ -7,6 +7,7 @@ import sys exclude=[] if sys.version_info[0:2] < (3, 4): exclude += ['qubesadmin.tools', 'qubesadmin.tests.tools'] + exclude += ['qubesadmin.backup', 'qubesadmin.tests.backup'] if sys.version_info[0:2] < (3, 5): exclude += ['qubesadmin.events'] From a91372a919e0c291a952651c5abd3deea6bb2287 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 16 Jul 2017 01:10:03 +0200 Subject: [PATCH 05/17] devices,features: fix bool values handling API define False value serialized as '' and True as 'True'. Do not serialize 0 as '' (features) or True as 'yes' (devices). --- qubesadmin/devices.py | 2 +- qubesadmin/features.py | 2 +- qubesadmin/tests/devices.py | 4 ++-- qubesadmin/tests/tools/qvm_device.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/qubesadmin/devices.py b/qubesadmin/devices.py index e0192ba..1d0bf5b 100644 --- a/qubesadmin/devices.py +++ b/qubesadmin/devices.py @@ -144,7 +144,7 @@ class DeviceCollection(object): options = device_assignment.options.copy() if device_assignment.persistent: - options['persistent'] = 'yes' + options['persistent'] = 'True' options_str = ' '.join('{}={}'.format(opt, val) for opt, val in sorted(options.items())) self._vm.qubesd_call(None, diff --git a/qubesadmin/features.py b/qubesadmin/features.py index cd33d59..4ed8ef8 100644 --- a/qubesadmin/features.py +++ b/qubesadmin/features.py @@ -42,7 +42,7 @@ class Features(object): self.vm.qubesd_call(self.vm.name, 'admin.vm.feature.Remove', key) def __setitem__(self, key, value): - if not value: + if value is False: # False value needs to be serialized as empty string self.vm.qubesd_call(self.vm.name, 'admin.vm.feature.Set', key, b'') else: diff --git a/qubesadmin/tests/devices.py b/qubesadmin/tests/devices.py index f169e09..7c3f2ed 100644 --- a/qubesadmin/tests/devices.py +++ b/qubesadmin/tests/devices.py @@ -129,7 +129,7 @@ class TC_00_DeviceCollection(qubesadmin.tests.QubesTestCase): def test_022_attach_persistent(self): self.app.expected_calls[ ('test-vm', 'admin.vm.device.test.Attach', 'test-vm2+dev1', - b'persistent=yes')] = b'0\0' + b'persistent=True')] = b'0\0' assign = qubesadmin.devices.DeviceAssignment( self.app.domains['test-vm2'], 'dev1') assign.persistent = True @@ -139,7 +139,7 @@ class TC_00_DeviceCollection(qubesadmin.tests.QubesTestCase): def test_023_attach_persistent_options(self): self.app.expected_calls[ ('test-vm', 'admin.vm.device.test.Attach', 'test-vm2+dev1', - b'persistent=yes ro=True')] = b'0\0' + b'persistent=True ro=True')] = b'0\0' assign = qubesadmin.devices.DeviceAssignment( self.app.domains['test-vm2'], 'dev1') assign.persistent = True diff --git a/qubesadmin/tests/tools/qvm_device.py b/qubesadmin/tests/tools/qvm_device.py index 4d70ef3..d0cdace 100644 --- a/qubesadmin/tests/tools/qvm_device.py +++ b/qubesadmin/tests/tools/qvm_device.py @@ -87,7 +87,7 @@ class TC_00_qvm_device(qubesadmin.tests.QubesTestCase): None, None)] = b'0\0' self.app.expected_calls[('test-vm3', 'admin.vm.device.test.List', None, None)] = \ - b'0\0test-vm1+dev1 persistent=yes\n' + b'0\0test-vm1+dev1 persistent=True\n' with qubesadmin.tests.tools.StdoutBuffer() as buf: qubesadmin.tools.qvm_device.main( @@ -144,7 +144,7 @@ class TC_00_qvm_device(qubesadmin.tests.QubesTestCase): def test_011_attach_persistent(self): ''' Test attach action ''' self.app.expected_calls[('test-vm2', 'admin.vm.device.test.Attach', - 'test-vm1+dev1', b'persistent=yes')] = b'0\0' + 'test-vm1+dev1', b'persistent=True')] = b'0\0' qubesadmin.tools.qvm_device.main( ['test', 'attach', '-p', 'test-vm2', 'test-vm1:dev1'], app=self.app) From 525f8dc7f394c78135473dc635b75032500ee822 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 16 Jul 2017 01:15:02 +0200 Subject: [PATCH 06/17] tests/backup: use smaller images This will be less realistic (private.img of 2MB?!), but makes tests much quicker. And since tar is used to make files sparse, we don't really test multi-part archives anyway. --- .../tests/backup/backupcompatibility.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/qubesadmin/tests/backup/backupcompatibility.py b/qubesadmin/tests/backup/backupcompatibility.py index 48a24e4..5569f0a 100644 --- a/qubesadmin/tests/backup/backupcompatibility.py +++ b/qubesadmin/tests/backup/backupcompatibility.py @@ -315,7 +315,7 @@ parsed_qubes_xml_r2 = { 'template': None, 'backup_path': 'appvms/test-testhvm', 'included_in_backup': True, - 'root_size': 209715712, + 'root_size': 2097664, }, 'test-work': { 'klass': 'AppVM', @@ -360,7 +360,7 @@ parsed_qubes_xml_r2 = { 'template': None, 'backup_path': 'appvms/test-standalonevm', 'included_in_backup': True, - 'root_size': 209715712, + 'root_size': 2097664, }, 'test-net': { 'klass': 'AppVM', @@ -512,11 +512,11 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): def create_private_img(self, filename): signature = '/'.join(os.path.splitext(filename)[0].split('/')[-2:]) - self.create_sparse(filename, 2*2**30, signature=signature.encode()) + self.create_sparse(filename, 2*2**20, signature=signature.encode()) #subprocess.check_call(["/usr/sbin/mkfs.ext4", "-q", "-F", filename]) def create_volatile_img(self, filename): - self.create_sparse(filename, 11.5*2**30) + self.create_sparse(filename, 11.5*2**20) # here used to be sfdisk call with "0,1024,S\n,10240,L\n" input, # but since sfdisk folks like to change command arguments in # incompatible way, have an partition table verbatim here @@ -566,7 +566,7 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): self.create_sparse( self.fullpath("appvms/test-standalonevm/root.img"), 10*2**30) self.fill_image(self.fullpath("appvms/test-standalonevm/root.img"), - 100*1024*1024, True, + 1024*1024, True, signature=b'test-standalonevm/root') os.mkdir(self.fullpath("appvms/test-standalonevm/apps.templates")) self.create_appmenus(self.fullpath("appvms/test-standalonevm/apps" @@ -576,7 +576,7 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): os.mkdir(self.fullpath("appvms/test-standalonevm/kernels")) for k_file in ["initramfs", "vmlinuz", "modules.img"]: self.fill_image(self.fullpath("appvms/test-standalonevm/kernels/" - + k_file), 10*1024*1024) + + k_file), 1024*1024) # VM based on custom template subprocess.check_call( @@ -595,7 +595,7 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): self.create_private_img( self.fullpath("appvms/test-testhvm/private.img")) self.fill_image(self.fullpath("appvms/test-testhvm/root.img"), - 100*1024*1024, True, + 1024*1024, True, signature=b'test-testhvm/root') # ProxyVM @@ -617,11 +617,11 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): self.create_private_img( self.fullpath("vm-templates/test-template-clone/private.img")) self.create_sparse(self.fullpath( - "vm-templates/test-template-clone/root-cow.img"), 10*2**30) + "vm-templates/test-template-clone/root-cow.img"), 10*2**20) self.create_sparse(self.fullpath( - "vm-templates/test-template-clone/root.img"), 10*2**30) + "vm-templates/test-template-clone/root.img"), 10*2**20) self.fill_image(self.fullpath( - "vm-templates/test-template-clone/root.img"), 1*2**30, True, + "vm-templates/test-template-clone/root.img"), 1*2**20, True, signature=b'test-template-clone/root') self.create_volatile_img(self.fullpath( "vm-templates/test-template-clone/volatile.img")) @@ -827,7 +827,7 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): b'revisions_to_keep=3\n' self.app.expected_calls[ (name, 'admin.vm.volume.Resize', 'root', - str(vm.get('root_size', 2147484160)).encode())] = \ + str(vm.get('root_size', 2097664)).encode())] = \ b'0\0' self.app.expected_calls[ (name, 'admin.vm.volume.Import', 'root', @@ -846,7 +846,7 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): b'save_on_stop=True\n' \ b'revisions_to_keep=3\n' self.app.expected_calls[ - (name, 'admin.vm.volume.Resize', 'private', b'2147483648')] = \ + (name, 'admin.vm.volume.Resize', 'private', b'2097152')] = \ b'0\0' self.app.expected_calls[ (name, 'admin.vm.volume.Import', 'private', From 04ad224a9d672ba100aea6c86bd2633ff6482ba2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Sun, 16 Jul 2017 05:22:13 +0200 Subject: [PATCH 07/17] tests: add v4 (Qubes 4.0) backup format tests, move qubes.xml Move qubes.xml to be tested into separate files. The backup tests script is long enouch already. --- ci/coveragerc | 2 + .../tests/backup/backupcompatibility.py | 636 ++++++++++++++++-- qubesadmin/tests/backup/v3-qubes.xml | 18 + qubesadmin/tests/backup/v4-qubes.xml | 526 +++++++++++++++ setup.py | 3 + 5 files changed, 1134 insertions(+), 51 deletions(-) create mode 100644 qubesadmin/tests/backup/v3-qubes.xml create mode 100644 qubesadmin/tests/backup/v4-qubes.xml diff --git a/ci/coveragerc b/ci/coveragerc index 37b61cc..98a5244 100644 --- a/ci/coveragerc +++ b/ci/coveragerc @@ -1,3 +1,5 @@ [run] source = qubesadmin omit = qubesadmin/tests/* +# breaks backup tests for unknown reason +# concurrency=multiprocessing diff --git a/qubesadmin/tests/backup/backupcompatibility.py b/qubesadmin/tests/backup/backupcompatibility.py index 5569f0a..d5b98c6 100644 --- a/qubesadmin/tests/backup/backupcompatibility.py +++ b/qubesadmin/tests/backup/backupcompatibility.py @@ -20,11 +20,15 @@ # import functools import tempfile +import unittest +from distutils import spawn from multiprocessing import Queue import os import subprocess +import logging + try: import unittest.mock as mock except ImportError: @@ -32,54 +36,15 @@ except ImportError: import re import multiprocessing - +import pkg_resources import sys import qubesadmin.backup.core2 +import qubesadmin.backup.core3 import qubesadmin.storage import qubesadmin.tests import qubesadmin.tests.backup -QUBESXML_R2B2 = ''' - - - - - - - - - - - - - - - - - -''' - -QUBESXML_R2 = ''' - - - - - - - - - - - - - - - - - - -''' MANGLED_SUBDIRS_R2 = { "test-work": "vm5", @@ -90,6 +55,16 @@ MANGLED_SUBDIRS_R2 = { "test-testhvm": "vm14", "test-net": "vm16", } +MANGLED_SUBDIRS_R4 = { + "test-work": "vm3", + "test-fedora-25-clone": "vm7", + "test-custom-template-appvm": "vm31", + "test-standalonevm": "vm4", + "test-proxy": "vm30", + "test-hvm": "vm9", + "test-net": "vm6", + "test-d8test": "vm20", +} APPTEMPLATE_R2B2 = ''' [Desktop Entry] @@ -153,6 +128,14 @@ compressed={compressed} compression-filter=gzip ''' +BACKUP_HEADER_R4 = '''version=4 +hmac-algorithm=scrypt +encrypted=True +compressed={compressed} +compression-filter=gzip +backup-id=20161020T123455-1234 +''' + parsed_qubes_xml_r2 = { 'domains': { 'dom0': { @@ -424,13 +407,314 @@ parsed_qubes_xml_r2 = { }, } +parsed_qubes_xml_v4 = { + 'domains': { + 'dom0': { + 'klass': 'AdminVM', + 'label': 'black', + 'properties': {}, + 'devices': {}, + 'tags': set(), + 'features': {}, + 'template': None, + 'backup_path': None, + 'included_in_backup': False, + }, + 'fedora-25': { + 'klass': 'TemplateVM', + 'label': 'black', + 'properties': {}, + 'devices': {}, + 'tags': {'created-by-test-work'}, + 'features': { + 'gui': '1', + 'qrexec': 'True', + 'updates-available': False + }, + 'template': None, + 'backup_path': None, + 'included_in_backup': False, + }, + 'fedora-25-lvm': { + 'klass': 'TemplateVM', + 'label': 'black', + 'properties': { + 'maxmem': '4000', + }, + 'devices': {}, + 'tags': set(), + 'features': {}, + 'template': None, + 'backup_path': None, + 'included_in_backup': False, + }, + 'debian-8': { + 'klass': 'TemplateVM', + 'label': 'black', + 'properties': {}, + 'devices': {}, + 'tags': {'created-by-dom0'}, + 'features': { + 'gui': '1', + 'qrexec': 'True', + 'updates-available': False}, + 'template': None, + 'backup_path': None, + 'included_in_backup': False, + }, + 'sys-net': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': { + 'hvm': 'False', + 'kernelopts': 'nopat i8042.nokbd i8042.noaux', + 'maxmem': '300', + 'memory': '300', + 'netvm': None, + 'default_user': 'user', + 'provides_network': 'True'}, + 'devices': { + 'pci': { + ('dom0', '02_00.0'): {}, + } + }, + 'tags': set(), + 'features': { + 'service.clocksync': '1', + 'service.meminfo-writer': False + }, + 'template': 'fedora-25', + 'backup_path': None, + 'included_in_backup': False, + }, + 'sys-firewall': { + 'klass': 'AppVM', + 'label': 'green', + 'properties': { + 'autostart': 'True', + 'memory': '500', + 'provides_network': 'True' + }, + 'devices': {}, + 'tags': set(), + 'features': {}, + 'template': 'fedora-25', + 'backup_path': None, + 'included_in_backup': False, + }, + 'test-d8test': { + 'klass': 'AppVM', + 'label': 'gray', + 'properties': {'debug': 'True', 'kernel': None}, + 'devices': {}, + 'tags': {'created-by-dom0'}, + 'features': {}, + 'template': 'debian-8', + 'backup_path': 'appvms/test-d8test', + 'included_in_backup': True, + }, + 'fedora-25-dvm': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': { + 'dispvm_allowed': 'True', + 'vcpus': '1', + }, + 'devices': {}, + 'tags': set(), + 'features': { + 'internal': '1', 'service.meminfo-writer': '1'}, + 'template': 'fedora-25', + 'backup_path': None, + 'included_in_backup': False, + }, + 'fedora-25-clone-dvm': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': { + 'vcpus': '1', + 'dispvm_allowed': 'True', + }, + 'devices': {}, + 'tags': set(), + 'features': { + 'internal': '1', 'service.meminfo-writer': '1'}, + 'template': 'test-fedora-25-clone', + 'backup_path': None, + 'included_in_backup': False, + }, + 'vault': { + 'klass': 'AppVM', + 'label': 'black', + 'properties': {'hvm': 'False', 'maxmem': '1536', 'netvm': None}, + 'devices': {}, + 'tags': set(), + 'features': {}, + 'template': 'fedora-25', + 'backup_path': None, + 'included_in_backup': False, + }, + 'personal': { + 'klass': 'AppVM', + 'label': 'yellow', + 'properties': {'netvm': 'sys-firewall'}, + 'devices': {}, + 'tags': set(), + 'features': { + 'feat1': '1', + 'feat2': False, + 'feat32': '1', + 'featdis': False, + 'xxx': '1' + }, + 'template': 'fedora-25', + 'backup_path': None, + 'included_in_backup': False, + }, + 'untrusted': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': { + 'netvm': None, + 'backup_timestamp': '1474318497', + 'default_dispvm': 'fedora-25-clone-dvm', + }, + 'devices': {}, + 'tags': set(), + 'features': {'service.meminfo-writer': '1'}, + 'template': 'fedora-25', + 'backup_path': None, + 'included_in_backup': False, + }, + 'sys-usb': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': { + 'hvm': 'False', + 'autostart': 'True', + 'maxmem': '400', + 'provides_network': 'True', + }, + 'devices': {}, + 'tags': set(), + 'features': { + 'service.meminfo-writer': False, + 'service.network-manager': False, + }, + 'template': 'fedora-25', + 'backup_path': None, + 'included_in_backup': False, + }, + 'test-proxy': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': {'netvm': 'sys-net', 'provides_network': 'True'}, + 'devices': {}, + 'tags': {'created-by-dom0'}, + 'features': {}, + 'template': 'debian-8', + 'backup_path': 'appvms/test-proxy', + 'included_in_backup': True, + }, + 'test-hvm': { + 'klass': 'StandaloneVM', + 'label': 'purple', + 'properties': {'hvm': 'True', 'maxmem': '4000'}, + 'devices': {}, + 'tags': set(), + 'features': {'service.meminfo-writer': False}, + 'template': None, + 'backup_path': 'appvms/test-hvm', + 'included_in_backup': True, + 'root_size': 2097664, + }, + 'test-work': { + 'klass': 'AppVM', + 'label': 'green', + 'properties': { + 'ip': '192.168.0.1', + 'maxmem': '4000', + 'memory': '400'}, + 'devices': {}, + 'tags': {'tag1', 'tag2'}, + 'features': {'service.meminfo-writer': '1'}, + 'template': 'fedora-25', + 'backup_path': 'appvms/test-work', + 'included_in_backup': True, + }, + 'test-fedora-25-clone': { + 'klass': 'TemplateVM', + 'label': 'black', + 'properties': {'maxmem': '4000'}, + 'devices': {}, + 'tags': set(), + 'features': {'service.meminfo-writer': '1'}, + 'template': None, + 'backup_path': 'vm-templates/test-fedora-25-clone', + 'included_in_backup': True, + }, + 'test-custom-template-appvm': { + 'klass': 'AppVM', + 'label': 'yellow', + 'properties': {'debug': 'True', 'kernel': None}, + 'devices': {}, + 'tags': {'created-by-dom0'}, + 'features': {}, + 'template': 'test-fedora-25-clone', + 'backup_path': 'appvms/test-custom-template-appvm', + 'included_in_backup': True, + }, + 'test-standalonevm': { + 'klass': 'StandaloneVM', + 'label': 'blue', + 'properties': {'maxmem': '4000'}, + 'devices': {}, + 'tags': set(), + 'features': {}, + 'template': None, + 'backup_path': 'appvms/test-standalonevm', + 'included_in_backup': True, + 'root_size': 2097664, + }, + 'test-net': { + 'klass': 'AppVM', + 'label': 'red', + 'properties': { + 'maxmem': '300', + 'memory': '300', + 'netvm': None, + 'provides_network': 'True' + }, + 'devices': { + 'pci': { + ('dom0', '03_00.0'): {}, + } + }, + 'tags': set(), + 'features': { + 'service.ntpd': False, + 'service.meminfo-writer': False + }, + 'template': 'fedora-25', + 'backup_path': 'appvms/test-net', + 'included_in_backup': True, + }, + }, + 'globals': { + 'default_template': 'fedora-25', + 'default_kernel': '4.9.31-17', + 'default_netvm': 'sys-firewall', + 'default_dispvm': 'fedora-25-dvm', + #'default_fw_netvm': 'sys-net', + 'clockvm': 'sys-net', + 'updatevm': 'sys-firewall' + }, +} + class TC_00_QubesXML(qubesadmin.tests.QubesTestCase): - def assertCorrectlyConverted(self, xml_data, expected_data): - with tempfile.NamedTemporaryFile() as qubes_xml: - qubes_xml.file.write(xml_data.encode()) - backup_app = qubesadmin.backup.core2.Core2Qubes(qubes_xml.name) + def assertCorrectlyConverted(self, backup_app, expected_data): self.assertCountEqual(backup_app.domains.keys(), expected_data['domains'].keys()) for vm in expected_data['domains']: @@ -458,7 +742,19 @@ class TC_00_QubesXML(qubesadmin.tests.QubesTestCase): self.assertEqual(backup_app.globals, expected_data['globals']) def test_000_qubes_xml_r2(self): - self.assertCorrectlyConverted(QUBESXML_R2, parsed_qubes_xml_r2) + xml_data = pkg_resources.resource_string(__name__, 'v3-qubes.xml') + with tempfile.NamedTemporaryFile() as qubes_xml: + qubes_xml.file.write(xml_data) + backup_app = qubesadmin.backup.core2.Core2Qubes(qubes_xml.name) + self.assertCorrectlyConverted(backup_app, parsed_qubes_xml_r2) + + def test_010_qubes_xml_r4(self): + self.maxDiff = None + xml_data = pkg_resources.resource_string(__name__, 'v4-qubes.xml') + with tempfile.NamedTemporaryFile() as qubes_xml: + qubes_xml.file.write(xml_data) + backup_app = qubesadmin.backup.core3.Core3Qubes(qubes_xml.name) + self.assertCorrectlyConverted(backup_app, parsed_qubes_xml_v4) # backup code use multiprocessing, synchronize with main process class AppProxy(object): @@ -655,6 +951,91 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): "vm-templates/test-template-clone")), appmenus_list) + def create_v4_files(self): + appmenus_list = [ + "firefox", "gnome-terminal", "evince", "evolution", + "mozilla-thunderbird", "libreoffice-startcenter", "nautilus", + "gedit", "gpk-update-viewer", "gpk-application" + ] + + os.mkdir(self.fullpath("appvms")) + os.mkdir(self.fullpath("vm-templates")) + + # normal AppVMs + for vm in ('test-work', 'test-d8test', 'test-proxy', + 'test-custom-template-appvm', 'test-net'): + os.mkdir(self.fullpath('appvms/{}'.format(vm))) + self.create_whitelisted_appmenus(self.fullpath( + 'appvms/{}/whitelisted-appmenus.list'.format(vm))) + self.create_private_img(self.fullpath('appvms/{}/private.img'.format( + vm))) + + # StandaloneVMs + for vm in ('test-standalonevm', 'test-hvm'): + os.mkdir(self.fullpath('appvms/{}'.format(vm))) + self.create_whitelisted_appmenus(self.fullpath( + 'appvms/{}/whitelisted-appmenus.list'.format(vm))) + self.create_private_img(self.fullpath( + 'appvms/{}/private.img'.format(vm))) + self.create_sparse( + self.fullpath('appvms/{}/root.img'.format(vm)), 10*2**30) + self.fill_image(self.fullpath('appvms/{}/root.img'.format(vm)), + 1024*1024, True, + signature='{}/root'.format(vm).encode()) + + # only for Linux one + os.mkdir(self.fullpath('appvms/test-standalonevm/apps.templates')) + self.create_appmenus( + self.fullpath('appvms/test-standalonevm/apps.templates'), + APPTEMPLATE_R2B2, + appmenus_list) + + # Custom template + os.mkdir(self.fullpath("vm-templates/test-fedora-25-clone")) + self.create_private_img( + self.fullpath("vm-templates/test-fedora-25-clone/private.img")) + self.create_sparse(self.fullpath( + "vm-templates/test-fedora-25-clone/root.img"), 10*2**20) + self.fill_image(self.fullpath( + "vm-templates/test-fedora-25-clone/root.img"), 1*2**20, True, + signature=b'test-fedora-25-clone/root') + self.create_volatile_img(self.fullpath( + "vm-templates/test-fedora-25-clone/volatile.img")) + self.create_whitelisted_appmenus(self.fullpath( + "vm-templates/test-fedora-25-clone/whitelisted-appmenus.list")) + self.create_whitelisted_appmenus(self.fullpath( + "vm-templates/test-fedora-25-clone/vm-whitelisted-appmenus.list")) + os.mkdir( + self.fullpath("vm-templates/test-fedora-25-clone/apps.templates")) + self.create_appmenus( + self.fullpath("vm-templates/test-fedora-25-clone/apps.templates"), + APPTEMPLATE_R2B2, + appmenus_list) + os.mkdir(self.fullpath("vm-templates/test-fedora-25-clone/apps")) + self.create_appmenus( + self.fullpath("vm-templates/test-fedora-25-clone/apps"), + APPTEMPLATE_R2B2.replace("%VMNAME%", "test-fedora-25-clone") + .replace("%VMDIR%", self.fullpath( + "vm-templates/test-fedora-25-clone")), + appmenus_list) + + def scrypt_encrypt(self, f_name, output_name=None, password='qubes', + basedir=None): + if basedir is None: + basedir = self.backupdir + if output_name is None: + output_name = f_name + '.enc' + if f_name == 'backup-header': + scrypt_pass = 'backup-header!' + password + else: + scrypt_pass = '20161020T123455-1234!{}!{}'.format(f_name, password) + p = subprocess.Popen(['scrypt', 'enc', '-P', '-t', '0.1', + os.path.join(basedir, f_name), os.path.join(basedir, output_name)], + stdin=subprocess.PIPE) + p.communicate(scrypt_pass.encode()) + assert p.wait() == 0 + return output_name + def calculate_hmac(self, f_name, algorithm="sha512", password="qubes"): with open(self.fullpath(f_name), "r") as f_data: with open(self.fullpath(f_name+".hmac"), "w") as f_hmac: @@ -715,6 +1096,42 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): self.append_backup_stream(part_with_dir+".hmac", stream, basedir=self.fullpath("stage1")) + def handle_v4_file(self, f_name, subdir, stream, compressed=True): + # create inner archive + tar_cmdline = ["tar", "-Pc", '--sparse', + '-C', self.fullpath(os.path.dirname(f_name)), + '--xform', 's:^%s:%s\\0:' % ( + os.path.basename(f_name), + subdir), + os.path.basename(f_name) + ] + if compressed: + tar_cmdline.insert(-1, "--use-compress-program=%s" % "gzip") + tar = subprocess.Popen(tar_cmdline, stdout=subprocess.PIPE) + data = tar.stdout + + stage1_dir = self.fullpath(os.path.join("stage1", subdir)) + if not os.path.exists(stage1_dir): + os.makedirs(stage1_dir) + subprocess.check_call(["split", "--numeric-suffixes", + "--suffix-length=3", + "--bytes="+str(100*1024*1024), "-", + os.path.join(stage1_dir, + os.path.basename(f_name+"."))], + stdin=data) + data.close() + + for part in sorted(os.listdir(stage1_dir)): + if not re.match( + r"^{}.[0-9][0-9][0-9]$".format(os.path.basename(f_name)), + part): + continue + part_with_dir = os.path.join(subdir, part) + f_name = self.scrypt_encrypt(part_with_dir, + basedir=self.fullpath('stage1')) + self.append_backup_stream(f_name, stream, + basedir=self.fullpath("stage1")) + def create_v3_backup(self, encrypted=True, compressed=True): """ Create "backup format 3" backup - used in R2 and R3.0 @@ -732,15 +1149,15 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): self.calculate_hmac("backup-header") self.append_backup_stream("backup-header", output) self.append_backup_stream("backup-header.hmac", output) - with open(self.fullpath("qubes.xml"), "w") as f: + with open(self.fullpath("qubes.xml"), "wb") as f: + qubesxml = pkg_resources.resource_string(__name__, 'v3-qubes.xml') if encrypted: - qubesxml = QUBESXML_R2 for vmname, subdir in MANGLED_SUBDIRS_R2.items(): - qubesxml = re.sub(r"[a-z-]*/{}".format(vmname), - subdir, qubesxml) + qubesxml = re.sub(r"[a-z-]*/{}".format(vmname).encode(), + subdir.encode(), qubesxml) f.write(qubesxml) else: - f.write(QUBESXML_R2) + f.write(qubesxml) self.handle_v3_file("qubes.xml", "", output, encrypted=encrypted, compressed=compressed) @@ -770,6 +1187,44 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): output.close() + def create_v4_backup(self, compressed=True): + """ + Create "backup format 4" backup - used in R4.0 + + :param compressed: Should the backup be compressed + :return: + """ + output = open(self.fullpath("backup.bin"), "w") + with open(self.fullpath("backup-header"), "w") as f: + f.write(BACKUP_HEADER_R4.format( + compressed=str(compressed) + )) + self.scrypt_encrypt("backup-header", output_name='backup-header.hmac') + self.append_backup_stream("backup-header", output) + self.append_backup_stream("backup-header.hmac", output) + with open(self.fullpath("qubes.xml"), "wb") as f: + qubesxml = pkg_resources.resource_string(__name__, 'v4-qubes.xml') + for vmname, subdir in MANGLED_SUBDIRS_R4.items(): + qubesxml = re.sub( + r'backup-path">[a-z-]*/{}'.format(vmname).encode(), + ('backup-path">' + subdir).encode(), + qubesxml) + f.write(qubesxml) + + self.handle_v4_file("qubes.xml", "", output, compressed=compressed) + + self.create_v4_files() + for vm_type in ["appvms", "vm-templates"]: + for vm_name in os.listdir(self.fullpath(vm_type)): + vm_dir = os.path.join(vm_type, vm_name) + for f_name in os.listdir(self.fullpath(vm_dir)): + subdir = MANGLED_SUBDIRS_R4[vm_name] + self.handle_v4_file( + os.path.join(vm_dir, f_name), + subdir+'/', output, compressed=compressed) + + output.close() + def setup_expected_calls(self, parsed_qubes_xml, templates_map=None): if templates_map is None: templates_map = {} @@ -887,6 +1342,10 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): (name, 'admin.vm.feature.Set', feature, str(value).encode())] = b'0\0' + for tag in vm['tags']: + self.app.expected_calls[ + (name, 'admin.vm.tag.Set', tag, None)] = b'0\0' + orig_admin_vm_list = self.app.expected_calls[ ('dom0', 'admin.vm.List', None, None)] self.app.expected_calls[('dom0', 'admin.vm.List', None, None)] = \ @@ -957,6 +1416,81 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): self.assertAllCalled() + @unittest.skipUnless(spawn.find_executable('scrypt'), + "scrypt not installed") + def test_230_r4(self): + self.create_v4_backup(False) + self.app.expected_calls[('dom0', 'admin.vm.List', None, None)] = ( + b'0\0dom0 class=AdminVM state=Running\n' + b'fedora-25 class=TemplateVM state=Halted\n' + b'testvm class=AppVM state=Running\n' + b'sys-net class=AppVM state=Running\n' + ) + self.app.expected_calls[ + ('dom0', 'admin.property.Get', 'default_template', None)] = \ + b'0\0default=no type=vm fedora-25' + self.app.expected_calls[ + ('sys-net', 'admin.vm.property.Get', 'provides_network', None)] = \ + b'0\0default=no type=bool True' + self.setup_expected_calls(parsed_qubes_xml_v4, templates_map={ + 'debian-8': 'fedora-25' + }) + + qubesd_calls_queue = multiprocessing.Queue() + + with mock.patch('qubesadmin.storage.Volume', + functools.partial(MockVolume, qubesd_calls_queue)): + self.restore_backup(self.fullpath("backup.bin"), options={ + 'use-default-template': True, + 'use-default-netvm': True, + }) + + # retrieve calls from other multiprocess.Process instances + while not qubesd_calls_queue.empty(): + call_args = qubesd_calls_queue.get() + self.app.qubesd_call(*call_args) + qubesd_calls_queue.close() + + self.assertAllCalled() + + @unittest.skipUnless(spawn.find_executable('scrypt'), + "scrypt not installed") + def test_230_r4_compressed(self): + self.create_v4_backup(True) + + self.app.expected_calls[('dom0', 'admin.vm.List', None, None)] = ( + b'0\0dom0 class=AdminVM state=Running\n' + b'fedora-25 class=TemplateVM state=Halted\n' + b'testvm class=AppVM state=Running\n' + b'sys-net class=AppVM state=Running\n' + ) + self.app.expected_calls[ + ('dom0', 'admin.property.Get', 'default_template', None)] = \ + b'0\0default=no type=vm fedora-25' + self.app.expected_calls[ + ('sys-net', 'admin.vm.property.Get', 'provides_network', None)] = \ + b'0\0default=no type=bool True' + self.setup_expected_calls(parsed_qubes_xml_v4, templates_map={ + 'debian-8': 'fedora-25' + }) + + qubesd_calls_queue = multiprocessing.Queue() + + with mock.patch('qubesadmin.storage.Volume', + functools.partial(MockVolume, qubesd_calls_queue)): + self.restore_backup(self.fullpath("backup.bin"), options={ + 'use-default-template': True, + 'use-default-netvm': True, + }) + + # retrieve calls from other multiprocess.Process instances + while not qubesd_calls_queue.empty(): + call_args = qubesd_calls_queue.get() + self.app.qubesd_call(*call_args) + qubesd_calls_queue.close() + + self.assertAllCalled() + class TC_11_BackupCompatibilityIntoLVM(TC_10_BackupCompatibility): storage_pool = 'some-pool' diff --git a/qubesadmin/tests/backup/v3-qubes.xml b/qubesadmin/tests/backup/v3-qubes.xml new file mode 100644 index 0000000..1d60230 --- /dev/null +++ b/qubesadmin/tests/backup/v3-qubes.xml @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff --git a/qubesadmin/tests/backup/v4-qubes.xml b/qubesadmin/tests/backup/v4-qubes.xml new file mode 100644 index 0000000..f02c4ac --- /dev/null +++ b/qubesadmin/tests/backup/v4-qubes.xml @@ -0,0 +1,526 @@ + + + + + + + + + + + + + + + + + + sys-net + fedora-25-dvm + 4.9.31-17 + sys-firewall + fedora-25 + sys-firewall + + + + + True + + label-5 + test-d8test + 20 + 9204481c-7e37-42a6-8a66-b4cc63d65f11 + debian-8 + + + True + appvms/test-d8test + 20971520 + + + + + + + + + + + + + + + + label-1 + test-proxy + 30 + 367c64e6-ab8c-42df-91b6-c9d4c7d015f2 + debian-8 + True + sys-net + + + True + appvms/test-proxy + 209715200 + + + + + + + + + + + + + + + + label-8 + debian-8 + 16 + 0e2fa953-016f-4486-9e36-c9b386fc2bac + + + True + + 1 + + + + + + + + + + + + + + + + label-8 + + + + + + + + + + True + + label-3 + test-custom-template-appvm + 31 + 1bdb7b32-8a4b-4bb7-8da0-6b06494f642c + test-fedora-25-clone + + + True + appvms/test-custom-template-appvm + 20971520 + + + + + + + + + + + + + + + + + label-8 + 4000 + test-fedora-25-clone + 7 + e3b8d458-8c4c-4ccb-b00d-b7ae454cb08f + + + 1 + True + vm-templates/test-fedora-25-clone + 2097152000 + + + + + + + + + + + + + + True + label-1 + fedora-25-clone-dvm + 10 + 30daa6b5-693b-460a-9da7-99078a2d9d14 + test-fedora-25-clone + 1 + + + 1 + 1 + + + + + + + + + + + + + + True + label-1 + fedora-25-dvm + 10 + 30daa6b5-693b-460a-9da7-99078a2d9d14 + fedora-25 + 1 + + + 1 + 1 + + + + + + + + + + + + + + label-8 + 4000 + fedora-25-lvm + 14 + 20785bf4-42fa-4035-ab95-c1bf054c153a + + + + + + + + + + + + + + + label-8 + fedora-25 + 8 + 51870d8e-2e71-41d2-9582-30661f611004 + + + True + + 1 + + + + + + + + + + + + + + + + True + label-7 + 4000 + test-hvm + 9 + 9909066b-0f03-4725-ad9e-fa3561d5566e + + + + True + appvms/test-hvm + 2097152000 + + + + + + + + + + + + + + 1474318497 + label-1 + untrusted + 11 + 359b8e38-9e50-46a3-a42c-8d3bb15d3890 + fedora-25-clone-dvm + + fedora-25 + + + 1 + + + + + + + + + + + + + + label-3 + personal + 22 + efa8ddc4-6661-4231-9bfd-ef34907da358 + sys-firewall + fedora-25 + + + 1 + 1 + + + 1 + + + + + + + + + + + + + + True + label-4 + 500 + sys-firewall + True + 21 + 40f0775a-c259-44bc-be57-6148c67c42c7 + fedora-25 + + + + + + + + + + + + + + + + user + False + label-1 + 300 + 300 + sys-net + + True + nopat i8042.nokbd i8042.noaux + 2 + eb8b1680-d4fa-449b-8baa-b5146d9b62b7 + fedora-25 + + + + 1 + + + + + + + + + + + + + + + + + label-1 + 300 + test-net + + 300 + True + 6 + d5e9a792-9247-4f6f-a936-b7c65a1adfac + fedora-25 + + + + + True + appvms/test-net + 209715200 + + + + + + + + + + + + + + + + True + label-1 + False + 400 + sys-usb + 5 + 0da5616e-f2db-4c17-9c0d-cdef2e728344 + fedora-25 + True + + + + + + + + + + + + + + + + + + + + + + + + + label-8 + vault + + 13 + d5284828-988d-46e2-8388-a09c495475e3 + fedora-25 + False + 1536 + + + + + + + + + + + + + + + 192.168.0.1 + label-4 + 4000 + 400 + test-work + 3 + 07c17d1e-0982-417d-b19e-a81d51bed423 + fedora-25 + + + 1 + True + appvms/test-work + 2097152000 + + + + + + + + + + + + + + + + + label-6 + 4000 + test-standalonevm + 4 + e8034b8a-29b3-4f02-b8cb-05cd74a4bb68 + + + True + appvms/test-standalonevm + 2097152000 + + + + + + + + + + + diff --git a/setup.py b/setup.py index 6283ecd..25b04dd 100644 --- a/setup.py +++ b/setup.py @@ -33,6 +33,9 @@ if __name__ == '__main__': license='LGPL2.1+', url='https://www.qubes-os.org/', packages=setuptools.find_packages(exclude=exclude), + package_data={ + 'qubesadmin.tests.backup': ['*.xml'], + }, entry_points={ 'console_scripts': list(get_console_scripts()), 'qubesadmin.vm': [ From f1036c27a7bfff7aad42f56fe138085d8f83aec7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 17 Jul 2017 02:33:25 +0200 Subject: [PATCH 08/17] backup: add firewall and appmenus list handling --- qubesadmin/backup/__init__.py | 23 ++++++++- qubesadmin/backup/core2.py | 97 +++++++++++++++++++++++++++++++++++ qubesadmin/backup/core3.py | 17 ++++++ 3 files changed, 135 insertions(+), 2 deletions(-) diff --git a/qubesadmin/backup/__init__.py b/qubesadmin/backup/__init__.py index 2d935b7..3dad6cd 100644 --- a/qubesadmin/backup/__init__.py +++ b/qubesadmin/backup/__init__.py @@ -442,6 +442,9 @@ class ExtractWorker3(Process): for fname, (data_func, size_func) in self.handlers.items(): if not fname.startswith(dirname + '/'): continue + if not os.path.exists(fname): + # for example firewall.xml + continue if size_func is not None: size_func(os.path.getsize(fname)) with open(fname, 'rb') as input_file: @@ -741,6 +744,10 @@ class BackupVM(object): '''Report whether a VM is included in the backup''' return False + def handle_firewall_xml(self, vm, stream): + '''Import appropriate format of firewall.xml''' + raise NotImplementedError + class BackupRestoreOptions(object): '''Options for restore operation''' # pylint: disable=too-few-public-methods @@ -1717,6 +1724,15 @@ class BackupRestore(object): if retcode != 0: self.log.error("*** Error while setting home directory owner") + def _handle_appmenus_list(self, vm, stream): + '''Handle whitelisted-appmenus.list file''' + try: + subprocess.check_call( + ['qvm-appmenus', '--set-whitelist=-', vm.name], + stdin=stream) + except subprocess.CalledProcessError: + self.log.exception('Failed to set application list for %s', vm.name) + def restore_do(self, restore_info): ''' @@ -1753,8 +1769,11 @@ class BackupRestore(object): size_func = volume.resize handlers[os.path.join(vm_info.subdir, name + '.img')] = \ (data_func, size_func) - # TODO applications whitelist - # TODO firewall + handlers[os.path.join(vm_info.subdir, 'firewall.xml')] = ( + functools.partial(vm_info.vm.handle_firewall_xml, vm), None) + handlers[os.path.join(vm_info.subdir, + 'whitelisted-appmenus.list')] = ( + functools.partial(self._handle_appmenus_list, vm), None) if 'dom0' in restore_info.keys() and \ restore_info['dom0'].good_to_go: diff --git a/qubesadmin/backup/core2.py b/qubesadmin/backup/core2.py index f21a717..a6326a4 100644 --- a/qubesadmin/backup/core2.py +++ b/qubesadmin/backup/core2.py @@ -25,6 +25,7 @@ import xml.parsers import logging import lxml.etree +from qubesadmin.firewall import Rule, Action, Proto, DstHost, SpecialTarget import qubesadmin.backup service_to_feature = { @@ -44,6 +45,102 @@ class Core2VM(qubesadmin.backup.BackupVM): def included_in_backup(self): return self.backup_content + @staticmethod + def rule_from_xml_v1(node, action): + '''Parse single rule in old XML format (pre Qubes 4.0) + + :param node: XML node for the rule + :param action: action to apply (in old format it wasn't part of the + rule itself) + ''' + netmask = node.get('netmask') + if netmask is None: + netmask = 32 + else: + netmask = int(netmask) + address = node.get('address') + if address: + dsthost = DstHost(address, netmask) + else: + dsthost = None + + proto = node.get('proto') + + port = node.get('port') + toport = node.get('toport') + if port and toport: + dstports = port + '-' + toport + elif port: + dstports = port + else: + dstports = None + + # backward compatibility: protocol defaults to TCP if port is specified + if dstports and not proto: + proto = 'tcp' + + if proto == 'any': + proto = None + + expire = node.get('expire') + + kwargs = { + 'action': action, + } + if dsthost: + kwargs['dsthost'] = dsthost + if dstports: + kwargs['dstports'] = dstports + if proto: + kwargs['proto'] = proto + if expire: + kwargs['expire'] = expire + + return Rule(None, **kwargs) + + + def handle_firewall_xml(self, vm, stream): + '''Load old (Qubes < 4.0) firewall XML format''' + try: + tree = lxml.etree.parse(stream) # pylint: disable=no-member + xml_root = tree.getroot() + policy_v1 = xml_root.get('policy') + assert policy_v1 in ('allow', 'deny') + default_policy_is_accept = (policy_v1 == 'allow') + rules = [] + + def _translate_action(key): + '''Translate action name''' + if xml_root.get(key, policy_v1) == 'allow': + return Action.accept + return Action.drop + + rules.append(Rule(None, + action=_translate_action('dns'), + specialtarget=SpecialTarget('dns'))) + + rules.append(Rule(None, + action=_translate_action('icmp'), + proto=Proto.icmp)) + + if default_policy_is_accept: + rule_action = Action.drop + else: + rule_action = Action.accept + + for element in xml_root: + rule = self.rule_from_xml_v1(element, rule_action) + rules.append(rule) + if default_policy_is_accept: + rules.append(Rule(None, action='accept')) + else: + rules.append(Rule(None, action='drop')) + + vm.firewall.rules = rules + except: # pylint: disable=bare-except + vm.log.exception('Failed to set firewall') + + class Core2Qubes(qubesadmin.backup.BackupApp): '''Parsed qubes.xml''' def __init__(self, store=None): diff --git a/qubesadmin/backup/core3.py b/qubesadmin/backup/core3.py index a975d00..7a32ca8 100644 --- a/qubesadmin/backup/core3.py +++ b/qubesadmin/backup/core3.py @@ -25,6 +25,7 @@ import logging import lxml.etree import qubesadmin.backup +import qubesadmin.firewall class Core3VM(qubesadmin.backup.BackupVM): '''VM object''' @@ -33,6 +34,22 @@ class Core3VM(qubesadmin.backup.BackupVM): def included_in_backup(self): return self.backup_path is not None + def handle_firewall_xml(self, vm, stream): + '''Load new (Qubes >= 4.0) firewall XML format''' + try: + tree = lxml.etree.parse(stream) # pylint: disable=no-member + xml_root = tree.getroot() + rules = [] + for rule_node in xml_root.findall('./rules/rule'): + rule_opts = {} + for rule_opt in rule_node.findall('./properties/property'): + rule_opts[rule_opt.get('name')] = rule_opt.text + rules.append(qubesadmin.firewall.Rule(None, **rule_opts)) + + vm.firewall.rules = rules + except: # pylint: disable=bare-except + vm.log.exception('Failed to set firewall') + class Core3Qubes(qubesadmin.backup.BackupApp): '''Parsed qubes.xml''' def __init__(self, store=None): From 51f77d5834cbd2733f3dd06ccdf8af48a56fd5ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 17 Jul 2017 02:33:50 +0200 Subject: [PATCH 09/17] firewall: fix handling DstHost.prefixlen=0 Do not silently convert it into 32 or 128 netmask. And also do not include it actual rule (it's no-op check). --- qubesadmin/firewall.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/qubesadmin/firewall.py b/qubesadmin/firewall.py index b95f6d0..a1067c9 100644 --- a/qubesadmin/firewall.py +++ b/qubesadmin/firewall.py @@ -88,7 +88,10 @@ class DstHost(RuleOption): # add prefix length to bare IP addresses try: socket.inet_pton(socket.AF_INET6, value) - self.prefixlen = prefixlen or 128 + if prefixlen is not None: + self.prefixlen = prefixlen + else: + self.prefixlen = 128 if self.prefixlen < 0 or self.prefixlen > 128: raise ValueError( 'netmask for IPv6 must be between 0 and 128') @@ -100,7 +103,10 @@ class DstHost(RuleOption): if value.count('.') != 3: raise ValueError( 'Invalid number of dots in IPv4 address') - self.prefixlen = prefixlen or 32 + if prefixlen is not None: + self.prefixlen = prefixlen + else: + self.prefixlen = 32 if self.prefixlen < 0 or self.prefixlen > 32: raise ValueError( 'netmask for IPv4 must be between 0 and 32') @@ -137,6 +143,10 @@ class DstHost(RuleOption): @property def rule(self): '''API representation of this rule element''' + if self.prefixlen == 0 and self.type != 'dsthost': + # 0.0.0.0/0 or ::/0, doesn't limit to any particular host, + # so skip it + return None return self.type + '=' + str(self) From 137e1ed877a9c0df52a69b8124dcf317502de634 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 17 Jul 2017 02:35:29 +0200 Subject: [PATCH 10/17] tests: backup: add firewall.xml and appmenus checking --- .../tests/backup/backupcompatibility.py | 161 ++++++++++++++++-- qubesadmin/tests/backup/v3-firewall.xml | 7 + qubesadmin/tests/backup/v4-firewall.xml | 43 +++++ 3 files changed, 201 insertions(+), 10 deletions(-) create mode 100644 qubesadmin/tests/backup/v3-firewall.xml create mode 100644 qubesadmin/tests/backup/v4-firewall.xml diff --git a/qubesadmin/tests/backup/backupcompatibility.py b/qubesadmin/tests/backup/backupcompatibility.py index d5b98c6..f23dda9 100644 --- a/qubesadmin/tests/backup/backupcompatibility.py +++ b/qubesadmin/tests/backup/backupcompatibility.py @@ -41,6 +41,7 @@ import sys import qubesadmin.backup.core2 import qubesadmin.backup.core3 +import qubesadmin.firewall import qubesadmin.storage import qubesadmin.tests import qubesadmin.tests.backup @@ -779,6 +780,11 @@ class MockVolume(qubesadmin.storage.Volume): super(MockVolume, self).__init__(*args, **kwargs) self.app = AppProxy(self.app, import_data_queue) +class MockFirewall(qubesadmin.firewall.Firewall): + def __init__(self, import_data_queue, *args, **kwargs): + super(MockFirewall, self).__init__(*args, **kwargs) + self.vm.app = AppProxy(self.vm.app, import_data_queue) + class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): @@ -843,13 +849,17 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): os.mkdir(self.fullpath("servicevms")) os.mkdir(self.fullpath("vm-templates")) - # normal AppVM + # normal AppVM, with firewall os.mkdir(self.fullpath("appvms/test-work")) self.create_whitelisted_appmenus(self.fullpath( "appvms/test-work/whitelisted-appmenus.list")) os.symlink("/usr/share/qubes/icons/green.png", self.fullpath("appvms/test-work/icon.png")) self.create_private_img(self.fullpath("appvms/test-work/private.img")) + with open(self.fullpath("appvms/test-work/firewall.xml"), "wb") as \ + f_firewall: + f_firewall.write( + pkg_resources.resource_string(__name__, 'v3-firewall.xml')) # StandaloneVM os.mkdir(self.fullpath("appvms/test-standalonevm")) @@ -970,6 +980,12 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): self.create_private_img(self.fullpath('appvms/{}/private.img'.format( vm))) + # setup firewall only on one VM + with open(self.fullpath("appvms/test-work/firewall.xml"), "wb") as \ + f_firewall: + f_firewall.write( + pkg_resources.resource_string(__name__, 'v4-firewall.xml')) + # StandaloneVMs for vm in ('test-standalonevm', 'test-hvm'): os.mkdir(self.fullpath('appvms/{}'.format(vm))) @@ -1346,6 +1362,17 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): self.app.expected_calls[ (name, 'admin.vm.tag.Set', tag, None)] = b'0\0' + if vm['backup_path']: + appmenus = ( + b'gnome-terminal.desktop\n' + b'nautilus.desktop\n' + b'firefox.desktop\n' + b'mozilla-thunderbird.desktop\n' + b'libreoffice-startcenter.desktop\n' + ) + self.app.expected_calls[ + (name, 'appmenus', None, appmenus)] = b'0\0' + orig_admin_vm_list = self.app.expected_calls[ ('dom0', 'admin.vm.List', None, None)] self.app.expected_calls[('dom0', 'admin.vm.List', None, None)] = \ @@ -1353,6 +1380,9 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): [orig_admin_vm_list + b''.join(extra_vm_list_lines)] * \ len(extra_vm_list_lines) + def mock_appmenus(self, queue, vm, stream): + queue.put((vm.name, 'appmenus', None, stream.read())) + def test_210_r2(self): self.create_v3_backup(False) self.app.expected_calls[('dom0', 'admin.vm.List', None, None)] = ( @@ -1366,15 +1396,44 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): self.setup_expected_calls(parsed_qubes_xml_r2, templates_map={ 'fedora-20-x64': 'fedora-25' }) - + firewall_data = ( + 'action=accept specialtarget=dns\n' + 'action=accept proto=icmp\n' + 'action=accept proto=tcp dstports=22-22\n' + 'action=accept proto=tcp dstports=9418-9418\n' + 'action=accept proto=tcp dst4=192.168.0.1/32 dstports=1234-1234\n' + 'action=accept proto=tcp dsthost=fedorahosted.org dstports=443-443\n' + 'action=accept proto=tcp dsthost=xenbits.xen.org dstports=80-80\n' + 'action=drop\n' + ) + self.app.expected_calls[ + ('test-work', 'admin.vm.firewall.Set', None, + firewall_data.encode())] = b'0\0' + self.app.expected_calls[ + ('test-custom-template-appvm', 'admin.vm.firewall.Set', None, + firewall_data.encode())] = b'0\0' qubesd_calls_queue = multiprocessing.Queue() - with mock.patch('qubesadmin.storage.Volume', - functools.partial(MockVolume, qubesd_calls_queue)): + patches = [ + mock.patch('qubesadmin.storage.Volume', + functools.partial(MockVolume, qubesd_calls_queue)), + mock.patch( + 'qubesadmin.backup.BackupRestore._handle_appmenus_list', + functools.partial(self.mock_appmenus, qubesd_calls_queue)), + mock.patch( + 'qubesadmin.firewall.Firewall', + functools.partial(MockFirewall, qubesd_calls_queue)), + ] + for patch in patches: + patch.start() + try: self.restore_backup(self.fullpath("backup.bin"), options={ 'use-default-template': True, 'use-default-netvm': True, }) + finally: + for patch in patches: + patch.stop() # retrieve calls from other multiprocess.Process instances while not qubesd_calls_queue.empty(): @@ -1398,15 +1457,45 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): self.setup_expected_calls(parsed_qubes_xml_r2, templates_map={ 'fedora-20-x64': 'fedora-25' }) + firewall_data = ( + 'action=accept specialtarget=dns\n' + 'action=accept proto=icmp\n' + 'action=accept proto=tcp dstports=22-22\n' + 'action=accept proto=tcp dstports=9418-9418\n' + 'action=accept proto=tcp dst4=192.168.0.1/32 dstports=1234-1234\n' + 'action=accept proto=tcp dsthost=fedorahosted.org dstports=443-443\n' + 'action=accept proto=tcp dsthost=xenbits.xen.org dstports=80-80\n' + 'action=drop\n' + ) + self.app.expected_calls[ + ('test-work', 'admin.vm.firewall.Set', None, + firewall_data.encode())] = b'0\0' + self.app.expected_calls[ + ('test-custom-template-appvm', 'admin.vm.firewall.Set', None, + firewall_data.encode())] = b'0\0' qubesd_calls_queue = multiprocessing.Queue() - with mock.patch('qubesadmin.storage.Volume', - functools.partial(MockVolume, qubesd_calls_queue)): + patches = [ + mock.patch('qubesadmin.storage.Volume', + functools.partial(MockVolume, qubesd_calls_queue)), + mock.patch( + 'qubesadmin.backup.BackupRestore._handle_appmenus_list', + functools.partial(self.mock_appmenus, qubesd_calls_queue)), + mock.patch( + 'qubesadmin.firewall.Firewall', + functools.partial(MockFirewall, qubesd_calls_queue)), + ] + for patch in patches: + patch.start() + try: self.restore_backup(self.fullpath("backup.bin"), options={ 'use-default-template': True, 'use-default-netvm': True, }) + finally: + for patch in patches: + patch.stop() # retrieve calls from other multiprocess.Process instances while not qubesd_calls_queue.empty(): @@ -1435,15 +1524,41 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): self.setup_expected_calls(parsed_qubes_xml_v4, templates_map={ 'debian-8': 'fedora-25' }) + firewall_data = ( + 'action=accept specialtarget=dns\n' + 'action=accept proto=icmp\n' + 'action=accept proto=tcp dstports=22-22\n' + 'action=accept proto=tcp dsthost=www.qubes-os.org ' + 'dstports=443-443\n' + 'action=accept proto=tcp dst4=192.168.0.0/24\n' + 'action=drop\n' + ) + self.app.expected_calls[ + ('test-work', 'admin.vm.firewall.Set', None, + firewall_data.encode())] = b'0\0' qubesd_calls_queue = multiprocessing.Queue() - with mock.patch('qubesadmin.storage.Volume', - functools.partial(MockVolume, qubesd_calls_queue)): + patches = [ + mock.patch('qubesadmin.storage.Volume', + functools.partial(MockVolume, qubesd_calls_queue)), + mock.patch( + 'qubesadmin.backup.BackupRestore._handle_appmenus_list', + functools.partial(self.mock_appmenus, qubesd_calls_queue)), + mock.patch( + 'qubesadmin.firewall.Firewall', + functools.partial(MockFirewall, qubesd_calls_queue)), + ] + for patch in patches: + patch.start() + try: self.restore_backup(self.fullpath("backup.bin"), options={ 'use-default-template': True, 'use-default-netvm': True, }) + finally: + for patch in patches: + patch.stop() # retrieve calls from other multiprocess.Process instances while not qubesd_calls_queue.empty(): @@ -1473,15 +1588,41 @@ class TC_10_BackupCompatibility(qubesadmin.tests.backup.BackupTestCase): self.setup_expected_calls(parsed_qubes_xml_v4, templates_map={ 'debian-8': 'fedora-25' }) + firewall_data = ( + 'action=accept specialtarget=dns\n' + 'action=accept proto=icmp\n' + 'action=accept proto=tcp dstports=22-22\n' + 'action=accept proto=tcp dsthost=www.qubes-os.org ' + 'dstports=443-443\n' + 'action=accept proto=tcp dst4=192.168.0.0/24\n' + 'action=drop\n' + ) + self.app.expected_calls[ + ('test-work', 'admin.vm.firewall.Set', None, + firewall_data.encode())] = b'0\0' qubesd_calls_queue = multiprocessing.Queue() - with mock.patch('qubesadmin.storage.Volume', - functools.partial(MockVolume, qubesd_calls_queue)): + patches = [ + mock.patch('qubesadmin.storage.Volume', + functools.partial(MockVolume, qubesd_calls_queue)), + mock.patch( + 'qubesadmin.backup.BackupRestore._handle_appmenus_list', + functools.partial(self.mock_appmenus, qubesd_calls_queue)), + mock.patch( + 'qubesadmin.firewall.Firewall', + functools.partial(MockFirewall, qubesd_calls_queue)), + ] + for patch in patches: + patch.start() + try: self.restore_backup(self.fullpath("backup.bin"), options={ 'use-default-template': True, 'use-default-netvm': True, }) + finally: + for patch in patches: + patch.stop() # retrieve calls from other multiprocess.Process instances while not qubesd_calls_queue.empty(): diff --git a/qubesadmin/tests/backup/v3-firewall.xml b/qubesadmin/tests/backup/v3-firewall.xml new file mode 100644 index 0000000..94a09a0 --- /dev/null +++ b/qubesadmin/tests/backup/v3-firewall.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/qubesadmin/tests/backup/v4-firewall.xml b/qubesadmin/tests/backup/v4-firewall.xml new file mode 100644 index 0000000..6e2cbd0 --- /dev/null +++ b/qubesadmin/tests/backup/v4-firewall.xml @@ -0,0 +1,43 @@ + + + + + accept + dns + + + + + accept + icmp + + + + + accept + tcp + 22 + + + + + accept + www.qubes-os.org + tcp + 443 + + + + + accept + 192.168.0.0/24 + tcp + + + + + drop + + + + From f0151d73b30b35bae8a124ea14440a3d2344e583 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 17 Jul 2017 15:53:14 +0200 Subject: [PATCH 11/17] tools: add qvm-backup-restore Frontend tool for backup restore code. Fixes QubesOS/qubes-issues#1214 --- doc/manpages/qvm-backup-restore.rst | 4 - qubesadmin/tools/qvm_backup_restore.py | 259 +++++++++++++++++++++++++ 2 files changed, 259 insertions(+), 4 deletions(-) create mode 100644 qubesadmin/tools/qvm_backup_restore.py diff --git a/doc/manpages/qvm-backup-restore.rst b/doc/manpages/qvm-backup-restore.rst index bdc837f..2b8eda5 100644 --- a/doc/manpages/qvm-backup-restore.rst +++ b/doc/manpages/qvm-backup-restore.rst @@ -53,10 +53,6 @@ Options Restore VMs that are already present on the host under different names -.. option:: --force-root - - Force to run, even with root privileges - .. option:: --replace-template=REPLACE_TEMPLATE Restore VMs using another template, syntax: diff --git a/qubesadmin/tools/qvm_backup_restore.py b/qubesadmin/tools/qvm_backup_restore.py new file mode 100644 index 0000000..85e40be --- /dev/null +++ b/qubesadmin/tools/qvm_backup_restore.py @@ -0,0 +1,259 @@ +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2016 Marek Marczykowski-Górecki +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +'''Console frontend for backup restore code''' + +import getpass +import sys + +import qubesadmin.backup +import qubesadmin.exc +import qubesadmin.tools +import qubesadmin.utils + +parser = qubesadmin.tools.QubesArgumentParser() + +parser.add_argument("--verify-only", action="store_true", + dest="verify_only", default=False, + help="Verify backup integrity without restoring any " + "data") + +parser.add_argument("--skip-broken", action="store_true", dest="skip_broken", + default=False, + help="Do not restore VMs that have missing TemplateVMs " + "or NetVMs") + +parser.add_argument("--ignore-missing", action="store_true", + dest="ignore_missing", default=False, + help="Restore VMs even if their associated TemplateVMs " + "and NetVMs are missing") + +parser.add_argument("--skip-conflicting", action="store_true", + dest="skip_conflicting", default=False, + help="Do not restore VMs that are already present on " + "the host") + +parser.add_argument("--rename-conflicting", action="store_true", + dest="rename_conflicting", default=False, + help="Restore VMs that are already present on the host " + "under different names") + +parser.add_argument("--replace-template", action="append", + dest="replace_template", default=[], + help="Restore VMs using another TemplateVM; syntax: " + "old-template-name:new-template-name (may be " + "repeated)") + +parser.add_argument("-x", "--exclude", action="append", dest="exclude", + default=[], + help="Skip restore of specified VM (may be repeated)") + +parser.add_argument("--skip-dom0-home", action="store_false", dest="dom0_home", + default=True, + help="Do not restore dom0 user home directory") + +parser.add_argument("--ignore-username-mismatch", action="store_true", + dest="ignore_username_mismatch", default=False, + help="Ignore dom0 username mismatch when restoring home " + "directory") + +parser.add_argument("-d", "--dest-vm", action="store", dest="appvm", + help="Specify VM containing the backup to be restored") + +parser.add_argument("-p", "--passphrase-file", action="store", + dest="pass_file", default=None, + help="Read passphrase from file, or use '-' to read from stdin") + +parser.add_argument('backup_location', action='store', + help="Backup directory name, or command to pipe from") + +parser.add_argument('vms', nargs='*', action='store', default='[]', + help='Restore only those VMs') + + +def handle_broken(app, args, restore_info): + '''Display information about problems with VMs selected for resetore''' + there_are_conflicting_vms = False + there_are_missing_templates = False + there_are_missing_netvms = False + dom0_username_mismatch = False + + for vm_info in restore_info.values(): + assert isinstance(vm_info, qubesadmin.backup.BackupRestore.VMToRestore) + if qubesadmin.backup.BackupRestore.VMToRestore.EXCLUDED in \ + vm_info.problems: + continue + if qubesadmin.backup.BackupRestore.VMToRestore.MISSING_TEMPLATE in \ + vm_info.problems: + there_are_missing_templates = True + if qubesadmin.backup.BackupRestore.VMToRestore.MISSING_NETVM in \ + vm_info.problems: + there_are_missing_netvms = True + if qubesadmin.backup.BackupRestore.VMToRestore.ALREADY_EXISTS in \ + vm_info.problems: + there_are_conflicting_vms = True + if qubesadmin.backup.BackupRestore.Dom0ToRestore.USERNAME_MISMATCH in \ + vm_info.problems: + dom0_username_mismatch = True + + + if there_are_conflicting_vms: + app.log.error( + "*** There are VMs with conflicting names on the host! ***") + if args.skip_conflicting: + app.log.error( + "Those VMs will not be restored. " + "The host VMs will NOT be overwritten.") + else: + raise qubesadmin.exc.QubesException( + "Remove VMs with conflicting names from the host " + "before proceeding.\n" + "Or use --skip-conflicting to restore only those VMs that " + "do not exist on the host.\n" + "Or use --rename-conflicting to restore those VMs under " + "modified names (with numbers at the end).") + + app.log.info("The above VMs will be copied and added to your system.") + app.log.info("Exisiting VMs will NOT be removed.") + + if there_are_missing_templates: + app.log.warning("*** One or more TemplateVMs are missing on the " + "host! ***") + if not (args.skip_broken or args.ignore_missing): + raise qubesadmin.exc.QubesException( + "Install them before proceeding with the restore." + "Or pass: --skip-broken or --ignore-missing.") + elif args.skip_broken: + app.log.warning("Skipping broken entries: VMs that depend on " + "missing TemplateVMs will NOT be restored.") + elif args.ignore_missing: + app.log.warning("Ignoring missing entries: VMs that depend " + "on missing TemplateVMs will NOT be restored.") + else: + raise qubesadmin.exc.QubesException( + "INTERNAL ERROR! Please report this to the Qubes OS team!") + + if there_are_missing_netvms: + app.log.warning("*** One or more NetVMs are missing on the " + "host! ***") + if not (args.skip_broken or args.ignore_missing): + raise qubesadmin.exc.QubesException( + "Install them before proceeding with the restore." + "Or pass: --skip-broken or --ignore-missing.") + elif args.skip_broken: + app.log.warning("Skipping broken entries: VMs that depend on " + "missing NetVMs will NOT be restored.") + elif args.ignore_missing: + app.log.warning("Ignoring missing entries: VMs that depend " + "on missing NetVMs will NOT be restored.") + else: + raise qubesadmin.exc.QubesException( + "INTERNAL ERROR! Please report this to the Qubes OS team!") + + if 'dom0' in restore_info.keys() and args.dom0_home: + if dom0_username_mismatch: + app.log.warning("*** Dom0 username mismatch! This can break " + "some settings! ***") + if not args.ignore_username_mismatch: + raise qubesadmin.exc.QubesException( + "Skip restoring the dom0 home directory " + "(--skip-dom0-home), or pass " + "--ignore-username-mismatch to continue anyway.") + else: + app.log.warning("Continuing as directed.") + app.log.warning("NOTE: Before restoring the dom0 home directory, " + "a new directory named " + "'home-pre-restore-' will be " + "created inside the dom0 home directory. If any " + "restored files conflict with existing files, " + "the existing files will be moved to this new " + "directory.") + +def main(args=None): + '''Main function of qvm-backup-restore''' + # pylint: disable=too-many-return-statements + args = parser.parse_args(args) + + appvm = None + if args.appvm: + try: + appvm = args.app.domains[args.appvm] + except KeyError: + parser.error('no such domain: {!r}'.format(args.appvm)) + + if args.pass_file is not None: + pass_f = open(args.pass_file) if args.pass_file != "-" else sys.stdin + passphrase = pass_f.readline().rstrip() + if pass_f is not sys.stdin: + pass_f.close() + else: + passphrase = getpass.getpass("Please enter the passphrase to verify " + "and (if encrypted) decrypt the backup: ") + + args.app.log.info("Checking backup content...") + + try: + backup = qubesadmin.backup.BackupRestore(args.app, args.backup_location, + appvm, passphrase) + except qubesadmin.exc.QubesException as e: + parser.error_runtime(str(e)) + # unreachable - error_runtime will raise SystemExit + return 1 + + if args.ignore_missing: + backup.options.use_default_template = True + backup.options.use_default_netvm = True + if args.replace_template: + backup.options.replace_template = args.replace_template + if args.rename_conflicting: + backup.options.rename_conflicting = True + if not args.dom0_home: + backup.options.dom0_home = False + if args.ignore_username_mismatch: + backup.options.ignore_username_mismatch = True + if args.exclude: + backup.options.exclude = args.exclude + if args.verify_only: + backup.options.verify_only = True + + restore_info = None + try: + restore_info = backup.get_restore_info() + except qubesadmin.exc.QubesException as e: + parser.error_runtime(str(e)) + + print(backup.get_restore_summary(restore_info)) + + try: + handle_broken(args.app, args, restore_info) + except qubesadmin.exc.QubesException as e: + parser.error_runtime(str(e)) + + if args.pass_file is None: + if input("Do you want to proceed? [y/N] ").upper() != "Y": + exit(0) + + try: + backup.restore_do(restore_info) + except qubesadmin.exc.QubesException as e: + parser.error_runtime(str(e)) + +if __name__ == '__main__': + main() From ca399c1a5a64cca5d3da85bdea5e5106ffb553df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 17 Jul 2017 16:15:39 +0200 Subject: [PATCH 12/17] app: call admin.vm.volume.Import as root This is needed to write LVM data. --- qubesadmin/app.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/qubesadmin/app.py b/qubesadmin/app.py index f0ca78f..7316a85 100644 --- a/qubesadmin/app.py +++ b/qubesadmin/app.py @@ -445,11 +445,12 @@ class QubesLocal(QubesBase): if not os.path.exists(method_path): raise qubesadmin.exc.QubesDaemonCommunicationError( '{} not found'.format(method_path)) - qrexec_call_env = os.environ.copy() - qrexec_call_env['QREXEC_REMOTE_DOMAIN'] = 'dom0' - qrexec_call_env['QREXEC_REQUESTED_TARGET'] = dest - proc = subprocess.Popen([method_path, arg], stdin=payload_stream, - stdout=subprocess.PIPE, env=qrexec_call_env) + command = ['env', 'QREXEC_REMOTE_DOMAIN=dom0', + 'QREXEC_REQUESTED_TARGET=' + dest, method_path, arg] + if os.getuid() != 0: + command.insert(0, 'sudo') + proc = subprocess.Popen(command, stdin=payload_stream, + stdout=subprocess.PIPE) payload_stream.close() (return_data, _) = proc.communicate() return self._parse_qubesd_response(return_data) From ced735b4766cb579591c8d0f938a17841d38a54c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 17 Jul 2017 19:32:30 +0200 Subject: [PATCH 13/17] backup: do not show full stacktrace to the user Opt for a simple one-liner error messages, instead of meaningless stack trace (it's most of the time about qubesd responding with error, so the stack trace of actual problem is elsewhere). --- qubesadmin/backup/__init__.py | 43 ++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/qubesadmin/backup/__init__.py b/qubesadmin/backup/__init__.py index 3dad6cd..6feaac3 100644 --- a/qubesadmin/backup/__init__.py +++ b/qubesadmin/backup/__init__.py @@ -1731,7 +1731,7 @@ class BackupRestore(object): ['qvm-appmenus', '--set-whitelist=-', vm.name], stdin=stream) except subprocess.CalledProcessError: - self.log.exception('Failed to set application list for %s', vm.name) + self.log.error('Failed to set application list for %s', vm.name) def restore_do(self, restore_info): ''' @@ -1845,8 +1845,9 @@ class BackupRestore(object): label=vm.label, pool=self.options.override_pool, **kwargs) - except Exception: # pylint: disable=broad-except - self.log.exception('Error restoring VM %s, skipping', vm.name) + except Exception as err: # pylint: disable=broad-except + self.log.error('Error restoring VM %s, skipping: %s', + vm.name, err) if new_vm: del self.app.domains[new_vm.name] continue @@ -1869,23 +1870,23 @@ class BackupRestore(object): continue try: setattr(new_vm, prop, value) - except Exception: # pylint: disable=broad-except - self.log.exception('Error setting %s.%s to %s', - vm.name, prop, value) + except Exception as err: # pylint: disable=broad-except + self.log.error('Error setting %s.%s to %s: %s', + vm.name, prop, value, err) for feature, value in vm.features.items(): try: new_vm.features[feature] = value - except Exception: # pylint: disable=broad-except - self.log.exception('Error setting %s.features[%s] to %s', - vm.name, feature, value) + except Exception as err: # pylint: disable=broad-except + self.log.error('Error setting %s.features[%s] to %s: %s', + vm.name, feature, value, err) for tag in vm.tags: try: new_vm.tags.add(tag) - except Exception: # pylint: disable=broad-except - self.log.exception('Error adding tag %s to %s', - tag, vm.name) + except Exception as err: # pylint: disable=broad-except + self.log.error('Error adding tag %s to %s: %s', + tag, vm.name, err) for bus in vm.devices: for backend_domain, ident in vm.devices[bus]: @@ -1897,9 +1898,9 @@ class BackupRestore(object): persistent=True) try: new_vm.devices[bus].attach(assignment) - except Exception: # pylint: disable=broad-except - self.log.exception('Error attaching device %s:%s to %s', - bus, ident, vm.name) + except Exception as err: # pylint: disable=broad-except + self.log.error('Error attaching device %s:%s to %s: %s', + bus, ident, vm.name, err) # Set VM dependencies - only non-default setting for vm in vms.values(): @@ -1919,9 +1920,9 @@ class BackupRestore(object): try: host_vm.netvm = value - except Exception: # pylint: disable=broad-except - self.log.exception('Error setting %s.%s to %s', - vm.name, 'netvm', value) + except Exception as err: # pylint: disable=broad-except + self.log.error('Error setting %s.%s to %s: %s', + vm.name, 'netvm', value, err) if 'default_dispvm' in vm.properties: if vm.properties['default_dispvm'] in restore_info: @@ -1932,6 +1933,6 @@ class BackupRestore(object): try: host_vm.default_dispvm = value - except Exception: # pylint: disable=broad-except - self.log.exception('Error setting %s.%s to %s', - vm.name, 'default_dispvm', value) + except Exception as err: # pylint: disable=broad-except + self.log.error('Error setting %s.%s to %s: %s', + vm.name, 'default_dispvm', value, err) From 96d4a2f06658da5629c34a5161ac1585343bb3d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 17 Jul 2017 20:40:08 +0200 Subject: [PATCH 14/17] backup: change 'hvm' property to 'virt_mode' QubesOS/qubes-issues#2912 --- qubesadmin/backup/core2.py | 2 +- .../tests/backup/backupcompatibility.py | 30 +++++++++---------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/qubesadmin/backup/core2.py b/qubesadmin/backup/core2.py index a6326a4..e48197c 100644 --- a/qubesadmin/backup/core2.py +++ b/qubesadmin/backup/core2.py @@ -289,7 +289,7 @@ class Core2Qubes(qubesadmin.backup.BackupApp): if value_is_default and value_is_default.lower() != \ "true": vm.properties[attr] = value - vm.properties['hvm'] = "HVm" in vm_class_name + vm.properties['virt_mode'] = 'hvm' if "HVm" in vm_class_name else 'pv' if vm_class_name in ('QubesNetVm', 'QubesProxyVm'): vm.properties['provides_network'] = True if vm_class_name == 'QubesNetVm': diff --git a/qubesadmin/tests/backup/backupcompatibility.py b/qubesadmin/tests/backup/backupcompatibility.py index f23dda9..7759108 100644 --- a/qubesadmin/tests/backup/backupcompatibility.py +++ b/qubesadmin/tests/backup/backupcompatibility.py @@ -154,7 +154,7 @@ parsed_qubes_xml_r2 = { 'klass': 'TemplateVM', 'label': 'black', 'properties': { - 'hvm': False, + 'virt_mode': 'pv', 'maxmem': '1535', }, 'devices': {}, @@ -168,7 +168,7 @@ parsed_qubes_xml_r2 = { 'klass': 'AppVM', 'label': 'red', 'properties': { - 'hvm': False, + 'virt_mode': 'pv', 'maxmem': '1535', 'memory': '200', 'netvm': None, @@ -193,7 +193,7 @@ parsed_qubes_xml_r2 = { 'klass': 'AppVM', 'label': 'green', 'properties': { - 'hvm': False, + 'virt_mode': 'pv', 'maxmem': '1535', 'memory': '200', 'provides_network': True @@ -209,7 +209,7 @@ parsed_qubes_xml_r2 = { 'klass': 'AppVM', 'label': 'gray', 'properties': { - 'hvm': False, + 'virt_mode': 'pv', 'maxmem': '1535', 'vcpus': '1' }, @@ -224,7 +224,7 @@ parsed_qubes_xml_r2 = { 'banking': { 'klass': 'AppVM', 'label': 'green', - 'properties': {'hvm': False, 'maxmem': '1535'}, + 'properties': {'virt_mode': 'pv', 'maxmem': '1535'}, 'devices': {}, 'tags': set(), 'features': {'services.meminfo-writer': True}, @@ -235,7 +235,7 @@ parsed_qubes_xml_r2 = { 'personal': { 'klass': 'AppVM', 'label': 'yellow', - 'properties': {'hvm': False, 'maxmem': '1535'}, + 'properties': {'virt_mode': 'pv', 'maxmem': '1535'}, 'devices': {}, 'tags': set(), 'features': {'services.meminfo-writer': True}, @@ -247,7 +247,7 @@ parsed_qubes_xml_r2 = { 'klass': 'AppVM', 'label': 'red', 'properties': { - 'hvm': False, + 'virt_mode': 'pv', 'maxmem': '1535', 'netvm': 'test-testproxy', 'default_dispvm': 'disp-test-testproxy', @@ -263,7 +263,7 @@ parsed_qubes_xml_r2 = { 'klass': 'AppVM', 'label': 'red', 'properties': { - 'hvm': False, + 'virt_mode': 'pv', 'maxmem': '1535', 'memory': '200', 'provides_network': True}, @@ -278,7 +278,7 @@ parsed_qubes_xml_r2 = { 'klass': 'AppVM', 'label': 'red', 'properties': { - 'hvm': False, + 'virt_mode': 'pv', 'maxmem': '1535', 'memory': '200', 'provides_network': True}, @@ -292,7 +292,7 @@ parsed_qubes_xml_r2 = { 'test-testhvm': { 'klass': 'StandaloneVM', 'label': 'purple', - 'properties': {'hvm': True, 'memory': '512'}, + 'properties': {'virt_mode': 'hvm', 'memory': '512'}, 'devices': {}, 'tags': set(), 'features': {'services.meminfo-writer': False}, @@ -304,7 +304,7 @@ parsed_qubes_xml_r2 = { 'test-work': { 'klass': 'AppVM', 'label': 'green', - 'properties': {'hvm': False, 'maxmem': '1535'}, + 'properties': {'virt_mode': 'pv', 'maxmem': '1535'}, 'devices': {}, 'tags': set(), 'features': {'services.meminfo-writer': True}, @@ -315,7 +315,7 @@ parsed_qubes_xml_r2 = { 'test-template-clone': { 'klass': 'TemplateVM', 'label': 'green', - 'properties': {'hvm': False, 'maxmem': '1535'}, + 'properties': {'virt_mode': 'pv', 'maxmem': '1535'}, 'devices': {}, 'tags': set(), 'features': {'services.meminfo-writer': True}, @@ -326,7 +326,7 @@ parsed_qubes_xml_r2 = { 'test-custom-template-appvm': { 'klass': 'AppVM', 'label': 'yellow', - 'properties': {'hvm': False, 'maxmem': '1535'}, + 'properties': {'virt_mode': 'pv', 'maxmem': '1535'}, 'devices': {}, 'tags': set(), 'features': {'services.meminfo-writer': True}, @@ -337,7 +337,7 @@ parsed_qubes_xml_r2 = { 'test-standalonevm': { 'klass': 'StandaloneVM', 'label': 'blue', - 'properties': {'hvm': False, 'maxmem': '1535'}, + 'properties': {'virt_mode': 'pv', 'maxmem': '1535'}, 'devices': {}, 'tags': set(), 'features': {'services.meminfo-writer': True}, @@ -349,7 +349,7 @@ parsed_qubes_xml_r2 = { 'test-net': { 'klass': 'AppVM', 'label': 'red', - 'properties': {'hvm': False, + 'properties': {'virt_mode': 'pv', 'maxmem': '1535', 'memory': '200', 'netvm': None, From f2fa613dce03389dbab45b8dc387a5fc3b27c28d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 17 Jul 2017 23:30:37 +0200 Subject: [PATCH 15/17] backup: use 'cat' instead of read-write loop in python The most important part is fixing resize handling - call size_func before data_func, but after tar gets initial data (and output file size). But other than that, it makes the process a little faster. QubesOS/qubes-issues#1214 --- qubesadmin/backup/__init__.py | 210 +++++++++++++++++----------------- 1 file changed, 105 insertions(+), 105 deletions(-) diff --git a/qubesadmin/backup/__init__.py b/qubesadmin/backup/__init__.py index 6feaac3..132ce1c 100644 --- a/qubesadmin/backup/__init__.py +++ b/qubesadmin/backup/__init__.py @@ -64,6 +64,8 @@ HMAC_MAX_SIZE = 4096 BLKSIZE = 512 _re_alphanum = re.compile(r'^[A-Za-z0-9-]*$') +_tar_msg_re = re.compile(r".*#[0-9].*restore_pipe") +_tar_file_size_re = re.compile(r"^[^ ]+ [^ ]+/[^ ]+ *([0-9]+) .*") class BackupCanceledError(QubesException): '''Exception raised when backup/restore was cancelled''' @@ -305,8 +307,8 @@ class ExtractWorker3(Process): self.tar2_process = None #: current inner tar archive name self.tar2_current_file = None - #: call size_func handler for this file when tar report it on stderr - self.adjust_output_size = None + #: cat process feeding tar2_process + self.tar2_feeder = None #: decompressor subprocess.Popen instance self.decompressor_process = None #: decryptor subprocess.Popen instance @@ -324,57 +326,6 @@ class ExtractWorker3(Process): self.tar2_stderr = [] self.compression_filter = compression_filter - @staticmethod - def handle_streams(stream_in, streams_out, processes, size_limit=None, - progress_callback=None): - ''' - Copy stream_in to all streams_out and monitor all mentioned processes. - If any of them terminate with non-zero code, interrupt the process. Copy - at most `size_limit` data (if given). - - :param stream_in: file-like object to read data from - :param streams_out: dict of file-like objects to write data to - :param processes: dict of subprocess.Popen objects to monitor - :param size_limit: int maximum data amount to process - :param progress_callback: callable function to report progress, will be - given copied data size (it should accumulate internally) - :return: failed process name, failed stream name, "size_limit" or None ( - no error) - ''' - buffer_size = 409600 - bytes_copied = 0 - while True: - if size_limit: - to_copy = min(buffer_size, size_limit - bytes_copied) - if to_copy <= 0: - return "size_limit" - else: - to_copy = buffer_size - buf = stream_in.read(to_copy) - if not buf: - # done - return None - - if callable(progress_callback): - progress_callback(len(buf)) - for name, stream in streams_out.items(): - if stream is None: - continue - try: - stream.write(buf) - except IOError: - return name - bytes_copied += len(buf) - - for name, proc in processes.items(): - if proc is None: - continue - if isinstance(proc, Process): - if not proc.is_alive() and proc.exitcode != 0: - return name - elif proc.poll(): - return name - def collect_tar_output(self): '''Retrieve tar stderr and handle it appropriately @@ -398,22 +349,9 @@ class ExtractWorker3(Process): new_lines = [x.decode(self.stderr_encoding) for x in new_lines] - msg_re = re.compile(r".*#[0-9].*restore_pipe") - debug_msg = [msg for msg in new_lines if msg_re.match(msg)] + debug_msg = [msg for msg in new_lines if _tar_msg_re.match(msg)] self.log.debug('tar2_stderr: %s', '\n'.join(debug_msg)) - new_lines = [msg for msg in new_lines if not msg_re.match(msg)] - if self.adjust_output_size: - # search for first file size reported by tar after setting - # self.adjust_output_size (so don't look at self.tar2_stderr) - # this is used only when extracting single-file archive, so don't - # bother with checking file name - file_size_re = re.compile(r"^[^ ]+ [^ ]+/[^ ]+ *([0-9]+) .*") - for line in new_lines: - match = file_size_re.match(line) - if match: - file_size = match.groups()[0] - self.adjust_output_size(file_size) - self.adjust_output_size = None + new_lines = [msg for msg in new_lines if not _tar_msg_re.match(msg)] self.tar2_stderr += new_lines def run(self): @@ -490,26 +428,98 @@ class ExtractWorker3(Process): self.handle_dir( os.path.dirname(inner_name)) self.tar2_current_file = None - self.adjust_output_size = None self.tar2_process = None - @staticmethod - def _data_func_wrapper(close_fds, data_func, data_stream): - '''Close not needed file descriptors, then call data_func( - data_stream). + def _data_import_wrapper(self, close_fds, data_func, size_func, + tar2_process): + '''Close not needed file descriptors, handle output size reported + by tar (if needed) then call data_func(tar2_process.stdout). This is to prevent holding write end of a pipe in subprocess, preventing EOF transfer. ''' for fd in close_fds: - if fd == data_stream.fileno(): + if fd in (tar2_process.stdout.fileno(), + tar2_process.stderr.fileno()): continue try: os.close(fd) except OSError: pass - return data_func(data_stream) + # retrieve file size from tar's stderr; warning: we do + # not read data from tar's stdout at this point, it will + # hang if it tries to output file content before + # reporting its size on stderr first + if size_func: + # process lines on stderr until we get file size + # search for first file size reported by tar - + # this is used only when extracting single-file archive, so don't + # bother with checking file name + # Also, this needs to be called before anything is retrieved + # from tar stderr, otherwise the process may deadlock waiting for + # size (at this point nothing is retrieving data from tar stdout + # yet, so it will hang on write() when the output pipe fill up). + while True: + line = tar2_process.stderr.readline() + line = line.decode() + if _tar_msg_re.match(line): + self.log.debug('tar2_stderr: %s', line) + else: + match = _tar_file_size_re.match(line) + if match: + file_size = match.groups()[0] + size_func(file_size) + break + else: + self.log.warning( + 'unexpected tar output (no file size report): %s', + line) + + return data_func(tar2_process.stdout) + + def feed_tar2(self, filename, input_pipe): + '''Feed data from *filename* to *input_pipe* + + Start a cat process to do that (do not block this process). Cat + subprocess instance will be in :py:attr:`tar2_feeder` + ''' + assert self.tar2_feeder is None + + self.tar2_feeder = subprocess.Popen(['cat', filename], + stdout=input_pipe) + + def check_processes(self, processes): + '''Check if any process failed. + + And if so, wait for other relevant processes to cleanup. + ''' + run_error = None + for name, proc in processes.items(): + if proc is None: + continue + + if isinstance(proc, Process): + if not proc.is_alive() and proc.exitcode != 0: + run_error = name + break + elif proc.poll(): + run_error = name + break + + if run_error: + if run_error == "target": + self.collect_tar_output() + details = "\n".join(self.tar2_stderr) + else: + details = "%s failed" % run_error + if self.decryptor_process: + self.decryptor_process.terminate() + self.decryptor_process.wait() + self.decryptor_process = None + self.log.error('Error while processing \'%s\': %s', + self.tar2_current_file, details) + self.cleanup_tar2(wait=True, terminate=True) def __run__(self): self.log.debug("Started sending thread") @@ -596,20 +606,19 @@ class ExtractWorker3(Process): stderr=subprocess.PIPE) input_pipe = self.tar2_process.stdin + self.feed_tar2(filename, input_pipe) + if inner_name in self.handlers: assert redirect_stdout is subprocess.PIPE data_func, size_func = self.handlers[inner_name] self.import_process = multiprocessing.Process( - target=self._data_func_wrapper, + target=self._data_import_wrapper, args=([input_pipe.fileno()], - data_func, self.tar2_process.stdout)) + data_func, size_func, self.tar2_process)) + self.import_process.start() self.tar2_process.stdout.close() - self.adjust_output_size = size_func - fcntl.fcntl(self.tar2_process.stderr.fileno(), fcntl.F_SETFL, - fcntl.fcntl(self.tar2_process.stderr.fileno(), - fcntl.F_GETFL) | os.O_NONBLOCK) self.tar2_stderr = [] elif not self.tar2_process: # Extracting of the current archive failed, skip to the next @@ -628,35 +637,26 @@ class ExtractWorker3(Process): filename, expected_filename) os.remove(filename) continue + self.log.debug("Releasing next chunck") + self.feed_tar2(filename, input_pipe) self.tar2_current_file = filename - input_file = open(filename, 'rb') + self.tar2_feeder.wait() + # check if any process failed + processes = { + 'target': self.tar2_feeder, + 'vmproc': self.vmproc, + 'addproc': self.tar2_process, + 'data_import': self.import_process, + 'decryptor': self.decryptor_process, + } + self.check_processes(processes) + self.tar2_feeder = None - run_error = self.handle_streams( - input_file, - {'target': input_pipe}, - {'vmproc': self.vmproc, - 'addproc': self.tar2_process, - 'data_import': self.import_process, - 'decryptor': self.decryptor_process, - }, - progress_callback=self.progress_callback) - input_file.close() - if run_error: - if run_error == "target": - self.collect_tar_output() - details = "\n".join(self.tar2_stderr) - else: - details = "%s failed" % run_error - if self.decryptor_process: - self.decryptor_process.terminate() - self.decryptor_process.wait() - self.decryptor_process = None - self.log.error('Error while processing \'%s\': %s', - self.tar2_current_file, details) - self.cleanup_tar2(wait=True, terminate=True) + if callable(self.progress_callback): + self.progress_callback(os.path.getsize(filename)) # Delete the file as we don't need it anymore self.log.debug('Removing file %s', filename) From ce2215c6032943d3f39641ba9173457f798a8ac8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 17 Jul 2017 23:32:31 +0200 Subject: [PATCH 16/17] backup: improve error logging Include VM and volume name in data-related error mesages. QubesOS/qubes-issues#1214 --- qubesadmin/backup/__init__.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/qubesadmin/backup/__init__.py b/qubesadmin/backup/__init__.py index 132ce1c..66c8f54 100644 --- a/qubesadmin/backup/__init__.py +++ b/qubesadmin/backup/__init__.py @@ -1733,6 +1733,22 @@ class BackupRestore(object): except subprocess.CalledProcessError: self.log.error('Failed to set application list for %s', vm.name) + def _handle_volume_data(self, vm, volume, stream): + '''Wrap volume data import with logging''' + try: + volume.import_data(stream) + except Exception as err: # pylint: disable=broad-except + self.log.error('Failed to restore volume %s of VM %s: %s', + volume.name, vm.name, err) + + def _handle_volume_size(self, vm, volume, size): + '''Wrap volume resize with logging''' + try: + volume.resize(size) + except Exception as err: # pylint: disable=broad-except + self.log.error('Failed to resize volume %s of VM %s: %s', + volume.name, vm.name, err) + def restore_do(self, restore_info): ''' @@ -1765,8 +1781,10 @@ class BackupRestore(object): for name, volume in vm.volumes.items(): if not volume.save_on_stop: continue - data_func = volume.import_data - size_func = volume.resize + data_func = functools.partial( + self._handle_volume_data, vm, volume) + size_func = functools.partial( + self._handle_volume_size, vm, volume) handlers[os.path.join(vm_info.subdir, name + '.img')] = \ (data_func, size_func) handlers[os.path.join(vm_info.subdir, 'firewall.xml')] = ( From e7ee06936a36f00ead21fcbdb93e055c0967b607 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 17 Jul 2017 23:34:03 +0200 Subject: [PATCH 17/17] tools/qvm-backup-restore: handle VMs selection, not only exclusion QubesOS/qubes-issues#1214 --- qubesadmin/tools/qvm_backup_restore.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/qubesadmin/tools/qvm_backup_restore.py b/qubesadmin/tools/qvm_backup_restore.py index 85e40be..b240367 100644 --- a/qubesadmin/tools/qvm_backup_restore.py +++ b/qubesadmin/tools/qvm_backup_restore.py @@ -239,6 +239,11 @@ def main(args=None): except qubesadmin.exc.QubesException as e: parser.error_runtime(str(e)) + if args.vms: + backup.options.exclude += [vm for vm in restore_info + if vm not in args.vms] + restore_info = backup.restore_info_verify(restore_info) + print(backup.get_restore_summary(restore_info)) try: