2013-11-25 05:41:13 +01:00
|
|
|
#
|
|
|
|
# The Qubes OS Project, http://www.qubes-os.org
|
|
|
|
#
|
2017-07-20 02:48:44 +02:00
|
|
|
# Copyright (C) 2013-2017 Marek Marczykowski-Górecki
|
2015-05-03 14:45:01 +02:00
|
|
|
# <marmarek@invisiblethingslab.com>
|
2013-11-25 05:41:13 +01:00
|
|
|
# Copyright (C) 2013 Olivier Médoc <o_medoc@yahoo.fr>
|
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU Lesser General Public
|
|
|
|
# License as published by the Free Software Foundation; either
|
|
|
|
# version 2.1 of the License, or (at your option) any later version.
|
2013-11-25 05:41:13 +01:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is distributed in the hope that it will be useful,
|
2013-11-25 05:41:13 +01:00
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2017-10-12 00:11:50 +02:00
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
# Lesser General Public License for more details.
|
2013-11-25 05:41:13 +01:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# You should have received a copy of the GNU Lesser General Public
|
|
|
|
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
|
2013-11-25 05:41:13 +01:00
|
|
|
#
|
|
|
|
#
|
2017-02-20 22:42:23 +01:00
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
import asyncio
|
2018-06-11 12:32:05 +02:00
|
|
|
import datetime
|
2014-09-17 14:40:26 +02:00
|
|
|
import fcntl
|
2018-06-11 12:32:05 +02:00
|
|
|
import functools
|
|
|
|
import grp
|
|
|
|
import itertools
|
|
|
|
import logging
|
|
|
|
import os
|
|
|
|
import pwd
|
2013-11-25 05:41:13 +01:00
|
|
|
import re
|
|
|
|
import shutil
|
2018-06-11 12:32:05 +02:00
|
|
|
import stat
|
|
|
|
import string
|
|
|
|
import subprocess
|
2013-11-26 16:45:51 +01:00
|
|
|
import tempfile
|
2018-06-11 12:32:05 +02:00
|
|
|
import termios
|
2013-11-25 05:41:13 +01:00
|
|
|
import time
|
2018-06-11 12:32:05 +02:00
|
|
|
|
|
|
|
from .utils import size_to_human
|
2016-03-10 11:22:52 +01:00
|
|
|
import qubes
|
2016-04-02 23:53:03 +02:00
|
|
|
import qubes.storage
|
2016-05-21 03:31:29 +02:00
|
|
|
import qubes.storage.file
|
2017-02-23 00:55:43 +01:00
|
|
|
import qubes.vm.templatevm
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2016-04-02 23:51:37 +02:00
|
|
|
QUEUE_ERROR = "ERROR"
|
|
|
|
|
|
|
|
QUEUE_FINISHED = "FINISHED"
|
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
HEADER_FILENAME = 'backup-header'
|
2014-01-15 03:50:29 +01:00
|
|
|
DEFAULT_CRYPTO_ALGORITHM = 'aes-256-cbc'
|
2017-02-23 00:45:58 +01:00
|
|
|
# 'scrypt' is not exactly HMAC algorithm, but a tool we use to
|
|
|
|
# integrity-protect the data
|
|
|
|
DEFAULT_HMAC_ALGORITHM = 'scrypt'
|
2014-09-26 03:44:17 +02:00
|
|
|
DEFAULT_COMPRESSION_FILTER = 'gzip'
|
2016-03-16 01:01:13 +01:00
|
|
|
CURRENT_BACKUP_FORMAT_VERSION = '4'
|
2014-01-15 03:35:12 +01:00
|
|
|
# Maximum size of error message get from process stderr (including VM process)
|
|
|
|
MAX_STDERR_BYTES = 1024
|
|
|
|
# header + qubes.xml max size
|
|
|
|
HEADER_QUBES_XML_MAX_SIZE = 1024 * 1024
|
2017-02-25 02:28:54 +01:00
|
|
|
# hmac file max size - regardless of backup format version!
|
|
|
|
HMAC_MAX_SIZE = 4096
|
2014-01-15 03:35:12 +01:00
|
|
|
|
2016-03-10 11:22:52 +01:00
|
|
|
BLKSIZE = 512
|
|
|
|
|
2016-04-02 23:53:56 +02:00
|
|
|
_re_alphanum = re.compile(r'^[A-Za-z0-9-]*$')
|
2016-03-10 11:22:52 +01:00
|
|
|
|
2017-02-23 00:55:43 +01:00
|
|
|
|
2016-03-10 11:22:52 +01:00
|
|
|
class BackupCanceledError(qubes.exc.QubesException):
|
2014-03-08 03:55:47 +01:00
|
|
|
def __init__(self, msg, tmpdir=None):
|
|
|
|
super(BackupCanceledError, self).__init__(msg)
|
|
|
|
self.tmpdir = tmpdir
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class BackupHeader:
|
2017-04-21 19:18:11 +02:00
|
|
|
'''Structure describing backup-header file included as the first file in
|
|
|
|
backup archive
|
|
|
|
'''
|
2017-07-20 03:04:21 +02:00
|
|
|
# pylint: disable=too-few-public-methods
|
2016-03-13 13:49:42 +01:00
|
|
|
header_keys = {
|
|
|
|
'version': 'version',
|
|
|
|
'encrypted': 'encrypted',
|
|
|
|
'compressed': 'compressed',
|
|
|
|
'compression-filter': 'compression_filter',
|
|
|
|
'crypto-algorithm': 'crypto_algorithm',
|
|
|
|
'hmac-algorithm': 'hmac_algorithm',
|
2016-10-25 21:28:59 +02:00
|
|
|
'backup-id': 'backup_id'
|
2016-03-13 13:49:42 +01:00
|
|
|
}
|
2014-01-15 03:53:45 +01:00
|
|
|
bool_options = ['encrypted', 'compressed']
|
2014-09-26 03:24:19 +02:00
|
|
|
int_options = ['version']
|
2014-01-15 03:53:45 +01:00
|
|
|
|
2016-04-02 23:56:00 +02:00
|
|
|
def __init__(self,
|
|
|
|
version=None,
|
|
|
|
encrypted=None,
|
|
|
|
compressed=None,
|
|
|
|
compression_filter=None,
|
|
|
|
hmac_algorithm=None,
|
2016-10-25 21:28:59 +02:00
|
|
|
crypto_algorithm=None,
|
|
|
|
backup_id=None):
|
2016-03-13 13:49:42 +01:00
|
|
|
# repeat the list to help code completion...
|
2016-04-02 23:56:00 +02:00
|
|
|
self.version = version
|
|
|
|
self.encrypted = encrypted
|
|
|
|
self.compressed = compressed
|
2016-03-13 13:49:42 +01:00
|
|
|
# Options introduced in backup format 3+, which always have a header,
|
|
|
|
# so no need for fallback in function parameter
|
2016-04-02 23:56:00 +02:00
|
|
|
self.compression_filter = compression_filter
|
|
|
|
self.hmac_algorithm = hmac_algorithm
|
|
|
|
self.crypto_algorithm = crypto_algorithm
|
2016-10-25 21:28:59 +02:00
|
|
|
self.backup_id = backup_id
|
2016-03-13 13:49:42 +01:00
|
|
|
|
|
|
|
def save(self, filename):
|
2017-07-17 15:12:43 +02:00
|
|
|
with open(filename, "w") as f_header:
|
2016-03-13 13:49:42 +01:00
|
|
|
# make sure 'version' is the first key
|
2017-07-17 15:12:43 +02:00
|
|
|
f_header.write('version={}\n'.format(self.version))
|
2017-02-20 22:42:23 +01:00
|
|
|
for key, attr in self.header_keys.items():
|
2016-03-13 13:49:42 +01:00
|
|
|
if key == 'version':
|
|
|
|
continue
|
|
|
|
if getattr(self, attr) is None:
|
|
|
|
continue
|
2017-07-17 15:12:43 +02:00
|
|
|
f_header.write("{!s}={!s}\n".format(key, getattr(self, attr)))
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2016-03-14 13:34:17 +01:00
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class SendWorker:
|
2017-07-20 02:48:44 +02:00
|
|
|
# pylint: disable=too-few-public-methods
|
2013-11-25 05:41:13 +01:00
|
|
|
def __init__(self, queue, base_dir, backup_stdout):
|
2014-01-13 04:27:19 +01:00
|
|
|
super(SendWorker, self).__init__()
|
2013-11-25 05:41:13 +01:00
|
|
|
self.queue = queue
|
|
|
|
self.base_dir = base_dir
|
|
|
|
self.backup_stdout = backup_stdout
|
2016-03-14 12:04:07 +01:00
|
|
|
self.log = logging.getLogger('qubes.backup')
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
@asyncio.coroutine
|
2013-11-25 05:41:13 +01:00
|
|
|
def run(self):
|
2016-03-14 12:04:07 +01:00
|
|
|
self.log.debug("Started sending thread")
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
while True:
|
|
|
|
filename = yield from self.queue.get()
|
2016-04-02 23:51:37 +02:00
|
|
|
if filename in (QUEUE_FINISHED, QUEUE_ERROR):
|
2013-11-25 05:41:13 +01:00
|
|
|
break
|
|
|
|
|
2016-03-14 12:04:07 +01:00
|
|
|
self.log.debug("Sending file {}".format(filename))
|
2013-11-25 05:41:13 +01:00
|
|
|
# This tar used for sending data out need to be as simple, as
|
|
|
|
# simple, as featureless as possible. It will not be
|
|
|
|
# verified before untaring.
|
|
|
|
tar_final_cmd = ["tar", "-cO", "--posix",
|
2015-05-03 14:45:01 +02:00
|
|
|
"-C", self.base_dir, filename]
|
2018-07-15 23:08:23 +02:00
|
|
|
# pylint: disable=not-an-iterable
|
2017-07-20 02:48:44 +02:00
|
|
|
final_proc = yield from asyncio.create_subprocess_exec(
|
|
|
|
*tar_final_cmd,
|
|
|
|
stdout=self.backup_stdout)
|
|
|
|
retcode = yield from final_proc.wait()
|
|
|
|
if retcode >= 2:
|
2015-05-03 14:45:01 +02:00
|
|
|
# handle only exit code 2 (tar fatal error) or
|
|
|
|
# greater (call failed?)
|
2016-03-10 11:22:52 +01:00
|
|
|
raise qubes.exc.QubesException(
|
2015-05-03 14:45:01 +02:00
|
|
|
"ERROR: Failed to write the backup, out of disk space? "
|
|
|
|
"Check console output or ~/.xsession-errors for details.")
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# Delete the file as we don't need it anymore
|
2016-03-14 12:04:07 +01:00
|
|
|
self.log.debug("Removing file {}".format(filename))
|
2017-07-20 02:48:44 +02:00
|
|
|
os.remove(os.path.join(self.base_dir, filename))
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2016-03-14 12:04:07 +01:00
|
|
|
self.log.debug("Finished sending thread")
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
@asyncio.coroutine
|
2017-02-23 00:45:58 +01:00
|
|
|
def launch_proc_with_pty(args, stdin=None, stdout=None, stderr=None, echo=True):
|
|
|
|
"""Similar to pty.fork, but handle stdin/stdout according to parameters
|
|
|
|
instead of connecting to the pty
|
|
|
|
|
|
|
|
:return tuple (subprocess.Popen, pty_master)
|
|
|
|
"""
|
|
|
|
|
|
|
|
def set_ctty(ctty_fd, master_fd):
|
|
|
|
os.setsid()
|
|
|
|
os.close(master_fd)
|
|
|
|
fcntl.ioctl(ctty_fd, termios.TIOCSCTTY, 0)
|
|
|
|
if not echo:
|
|
|
|
termios_p = termios.tcgetattr(ctty_fd)
|
|
|
|
# termios_p.c_lflags
|
|
|
|
termios_p[3] &= ~termios.ECHO
|
|
|
|
termios.tcsetattr(ctty_fd, termios.TCSANOW, termios_p)
|
|
|
|
(pty_master, pty_slave) = os.openpty()
|
2018-07-15 23:08:23 +02:00
|
|
|
# pylint: disable=not-an-iterable
|
2017-07-20 02:48:44 +02:00
|
|
|
p = yield from asyncio.create_subprocess_exec(*args,
|
|
|
|
stdin=stdin,
|
|
|
|
stdout=stdout,
|
|
|
|
stderr=stderr,
|
2017-02-23 00:45:58 +01:00
|
|
|
preexec_fn=lambda: set_ctty(pty_slave, pty_master))
|
|
|
|
os.close(pty_slave)
|
2017-07-20 02:48:44 +02:00
|
|
|
return p, open(pty_master, 'wb+', buffering=0)
|
2017-02-23 00:45:58 +01:00
|
|
|
|
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
@asyncio.coroutine
|
2017-02-23 00:45:58 +01:00
|
|
|
def launch_scrypt(action, input_name, output_name, passphrase):
|
|
|
|
'''
|
|
|
|
Launch 'scrypt' process, pass passphrase to it and return
|
|
|
|
subprocess.Popen object.
|
|
|
|
|
|
|
|
:param action: 'enc' or 'dec'
|
|
|
|
:param input_name: input path or '-' for stdin
|
|
|
|
:param output_name: output path or '-' for stdout
|
|
|
|
:param passphrase: passphrase
|
2017-07-28 22:48:49 +02:00
|
|
|
:type passphrase: bytes
|
2017-02-23 00:45:58 +01:00
|
|
|
:return: subprocess.Popen object
|
|
|
|
'''
|
|
|
|
command_line = ['scrypt', action, input_name, output_name]
|
2017-07-20 02:48:44 +02:00
|
|
|
(p, pty) = yield from launch_proc_with_pty(command_line,
|
2017-02-23 00:45:58 +01:00
|
|
|
stdin=subprocess.PIPE if input_name == '-' else None,
|
|
|
|
stdout=subprocess.PIPE if output_name == '-' else None,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
echo=False)
|
|
|
|
if action == 'enc':
|
2017-02-23 00:55:43 +01:00
|
|
|
prompts = (b'Please enter passphrase: ', b'Please confirm passphrase: ')
|
2017-02-23 00:45:58 +01:00
|
|
|
else:
|
2017-02-23 00:55:43 +01:00
|
|
|
prompts = (b'Please enter passphrase: ',)
|
2017-02-23 00:45:58 +01:00
|
|
|
for prompt in prompts:
|
2017-07-20 02:48:44 +02:00
|
|
|
actual_prompt = yield from p.stderr.read(len(prompt))
|
2017-02-23 00:45:58 +01:00
|
|
|
if actual_prompt != prompt:
|
|
|
|
raise qubes.exc.QubesException(
|
|
|
|
'Unexpected prompt from scrypt: {}'.format(actual_prompt))
|
2017-07-28 22:48:49 +02:00
|
|
|
pty.write(passphrase + b'\n')
|
2017-02-23 00:45:58 +01:00
|
|
|
pty.flush()
|
|
|
|
# save it here, so garbage collector would not close it (which would kill
|
|
|
|
# the child)
|
|
|
|
p.pty = pty
|
|
|
|
return p
|
|
|
|
|
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class Backup:
|
2017-04-21 19:18:11 +02:00
|
|
|
'''Backup operation manager. Usage:
|
|
|
|
|
|
|
|
>>> app = qubes.Qubes()
|
|
|
|
>>> # optional - you can use 'None' to use default list (based on
|
|
|
|
>>> # vm.include_in_backups property)
|
|
|
|
>>> vms = [app.domains[name] for name in ['my-vm1', 'my-vm2', 'my-vm3']]
|
|
|
|
>>> exclude_vms = []
|
|
|
|
>>> options = {
|
|
|
|
>>> 'encrypted': True,
|
|
|
|
>>> 'compressed': True,
|
|
|
|
>>> 'passphrase': 'This is very weak backup passphrase',
|
|
|
|
>>> 'target_vm': app.domains['sys-usb'],
|
|
|
|
>>> 'target_dir': '/media/disk',
|
|
|
|
>>> }
|
|
|
|
>>> backup_op = Backup(app, vms, exclude_vms, **options)
|
|
|
|
>>> print(backup_op.get_backup_summary())
|
2017-07-20 02:48:44 +02:00
|
|
|
>>> asyncio.get_event_loop().run_until_complete(backup_op.backup_do())
|
2017-04-21 19:18:11 +02:00
|
|
|
|
|
|
|
See attributes of this object for all available options.
|
|
|
|
|
|
|
|
'''
|
2017-07-17 15:12:43 +02:00
|
|
|
# pylint: disable=too-many-instance-attributes
|
2018-07-15 23:08:23 +02:00
|
|
|
class FileToBackup:
|
2017-07-17 15:12:43 +02:00
|
|
|
# pylint: disable=too-few-public-methods
|
2017-07-20 03:05:13 +02:00
|
|
|
def __init__(self, file_path, subdir=None, name=None, size=None):
|
|
|
|
if size is None:
|
|
|
|
size = qubes.storage.file.get_disk_usage(file_path)
|
2016-04-03 03:09:15 +02:00
|
|
|
|
|
|
|
if subdir is None:
|
2018-11-11 01:12:00 +01:00
|
|
|
abs_file_path = os.path.abspath(file_path)
|
|
|
|
abs_base_dir = os.path.abspath(
|
|
|
|
qubes.config.system_path["qubes_base_dir"]) + '/'
|
|
|
|
abs_file_dir = os.path.dirname(abs_file_path) + '/'
|
|
|
|
(nothing, directory, subdir) = \
|
|
|
|
abs_file_dir.partition(abs_base_dir)
|
|
|
|
assert nothing == ""
|
|
|
|
assert directory == abs_base_dir
|
|
|
|
else:
|
|
|
|
if subdir and not subdir.endswith('/'):
|
|
|
|
subdir += '/'
|
2016-04-03 03:09:15 +02:00
|
|
|
|
2016-10-05 01:55:30 +02:00
|
|
|
#: real path to the file
|
2016-04-03 03:09:15 +02:00
|
|
|
self.path = file_path
|
2016-10-05 01:55:30 +02:00
|
|
|
#: size of the file
|
2017-07-20 03:05:13 +02:00
|
|
|
self.size = size
|
2016-10-05 01:55:30 +02:00
|
|
|
#: directory in backup archive where file should be placed
|
2016-04-03 03:09:15 +02:00
|
|
|
self.subdir = subdir
|
2016-10-05 01:55:30 +02:00
|
|
|
#: use this name in the archive (aka rename)
|
|
|
|
self.name = os.path.basename(file_path)
|
|
|
|
if name is not None:
|
|
|
|
self.name = name
|
2016-04-03 03:09:15 +02:00
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class VMToBackup:
|
2017-07-17 15:12:43 +02:00
|
|
|
# pylint: disable=too-few-public-methods
|
2016-04-03 03:09:15 +02:00
|
|
|
def __init__(self, vm, files, subdir):
|
|
|
|
self.vm = vm
|
|
|
|
self.files = files
|
|
|
|
self.subdir = subdir
|
|
|
|
|
|
|
|
@property
|
|
|
|
def size(self):
|
2017-02-20 22:42:23 +01:00
|
|
|
return functools.reduce(lambda x, y: x + y.size, self.files, 0)
|
2016-04-03 03:09:15 +02:00
|
|
|
|
2016-03-13 13:49:42 +01:00
|
|
|
def __init__(self, app, vms_list=None, exclude_list=None, **kwargs):
|
|
|
|
"""
|
2020-01-29 00:02:43 +01:00
|
|
|
If vms = None, use default list based on vm.include_in_backups property;
|
2016-03-13 13:49:42 +01:00
|
|
|
exclude_list is always applied
|
|
|
|
"""
|
|
|
|
super(Backup, self).__init__()
|
|
|
|
|
|
|
|
#: progress of the backup - bytes handled of the current VM
|
|
|
|
self.chunk_size = 100 * 1024 * 1024
|
|
|
|
self._current_vm_bytes = 0
|
|
|
|
#: progress of the backup - bytes handled of finished VMs
|
|
|
|
self._done_vms_bytes = 0
|
|
|
|
#: total backup size (set by :py:meth:`get_files_to_backup`)
|
|
|
|
self.total_backup_bytes = 0
|
2016-04-03 03:11:57 +02:00
|
|
|
#: application object
|
2016-03-13 13:49:42 +01:00
|
|
|
self.app = app
|
|
|
|
#: directory for temporary files - set after creating the directory
|
|
|
|
self.tmpdir = None
|
|
|
|
|
|
|
|
# Backup settings - defaults
|
|
|
|
#: should the backup be compressed?
|
|
|
|
self.compressed = True
|
|
|
|
#: what passphrase should be used to intergrity protect (and encrypt)
|
2016-04-03 03:11:57 +02:00
|
|
|
#: the backup; required
|
2016-03-13 13:49:42 +01:00
|
|
|
self.passphrase = None
|
|
|
|
#: custom compression filter; a program which process stdin to stdout
|
|
|
|
self.compression_filter = DEFAULT_COMPRESSION_FILTER
|
|
|
|
#: VM to which backup should be sent (if any)
|
|
|
|
self.target_vm = None
|
|
|
|
#: directory to save backup in (either in dom0 or target VM,
|
2016-04-03 03:11:57 +02:00
|
|
|
#: depending on :py:attr:`target_vm`
|
2016-03-13 13:49:42 +01:00
|
|
|
self.target_dir = None
|
|
|
|
#: callback for progress reporting. Will be called with one argument
|
2016-04-03 03:11:57 +02:00
|
|
|
#: - progress in percents
|
2016-03-13 13:49:42 +01:00
|
|
|
self.progress_callback = None
|
2018-10-22 21:14:43 +02:00
|
|
|
self.last_progress_time = time.time()
|
2016-10-25 21:28:59 +02:00
|
|
|
#: backup ID, needs to be unique (for a given user),
|
|
|
|
#: not necessary unpredictable; automatically generated
|
|
|
|
self.backup_id = datetime.datetime.now().strftime(
|
|
|
|
'%Y%m%dT%H%M%S-' + str(os.getpid()))
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2017-02-20 22:42:23 +01:00
|
|
|
for key, value in kwargs.items():
|
2016-03-13 13:49:42 +01:00
|
|
|
if hasattr(self, key):
|
|
|
|
setattr(self, key, value)
|
|
|
|
else:
|
|
|
|
raise AttributeError(key)
|
|
|
|
|
2016-03-14 12:04:07 +01:00
|
|
|
self.log = logging.getLogger('qubes.backup')
|
|
|
|
|
2016-03-13 13:49:42 +01:00
|
|
|
if exclude_list is None:
|
|
|
|
exclude_list = []
|
|
|
|
|
|
|
|
if vms_list is None:
|
2016-04-03 03:13:27 +02:00
|
|
|
vms_list = [vm for vm in app.domains if vm.include_in_backups]
|
|
|
|
|
2016-03-13 13:49:42 +01:00
|
|
|
# Apply exclude list
|
2016-04-02 23:56:00 +02:00
|
|
|
self.vms_for_backup = [vm for vm in vms_list
|
|
|
|
if vm.name not in exclude_list]
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2016-06-15 19:10:19 +02:00
|
|
|
self._files_to_backup = self.get_files_to_backup()
|
|
|
|
|
2016-03-13 13:49:42 +01:00
|
|
|
def __del__(self):
|
|
|
|
if self.tmpdir and os.path.exists(self.tmpdir):
|
|
|
|
shutil.rmtree(self.tmpdir)
|
|
|
|
|
|
|
|
def get_files_to_backup(self):
|
|
|
|
files_to_backup = {}
|
|
|
|
for vm in self.vms_for_backup:
|
|
|
|
if vm.qid == 0:
|
|
|
|
# handle dom0 later
|
|
|
|
continue
|
|
|
|
|
2017-07-17 15:12:43 +02:00
|
|
|
subdir = 'vm%d/' % vm.qid
|
2016-03-13 13:49:42 +01:00
|
|
|
|
|
|
|
vm_files = []
|
2017-07-20 03:05:13 +02:00
|
|
|
for name, volume in vm.volumes.items():
|
|
|
|
if not volume.save_on_stop:
|
|
|
|
continue
|
|
|
|
vm_files.append(self.FileToBackup(
|
|
|
|
volume.export(),
|
|
|
|
subdir,
|
|
|
|
name + '.img',
|
|
|
|
volume.usage))
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2016-04-03 03:09:15 +02:00
|
|
|
vm_files.extend(self.FileToBackup(i, subdir)
|
|
|
|
for i in vm.fire_event('backup-get-files'))
|
2016-03-13 13:49:42 +01:00
|
|
|
|
|
|
|
firewall_conf = os.path.join(vm.dir_path, vm.firewall_conf)
|
|
|
|
if os.path.exists(firewall_conf):
|
2016-04-03 03:09:15 +02:00
|
|
|
vm_files.append(self.FileToBackup(firewall_conf, subdir))
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2018-02-24 23:43:53 +01:00
|
|
|
if not vm_files:
|
|
|
|
# subdir/ is needed in the tar file, otherwise restore
|
|
|
|
# of a (Disp)VM without any backed up files is going
|
|
|
|
# to fail. Adding a zero-sized file here happens to be
|
|
|
|
# more straightforward than adding an empty directory.
|
|
|
|
empty = self.FileToBackup("/var/run/qubes/empty", subdir)
|
|
|
|
assert empty.size == 0
|
|
|
|
vm_files.append(empty)
|
|
|
|
|
2016-04-03 03:09:15 +02:00
|
|
|
files_to_backup[vm.qid] = self.VMToBackup(vm, vm_files, subdir)
|
2016-03-13 13:49:42 +01:00
|
|
|
|
|
|
|
# Dom0 user home
|
|
|
|
if 0 in [vm.qid for vm in self.vms_for_backup]:
|
|
|
|
local_user = grp.getgrnam('qubes').gr_mem[0]
|
|
|
|
home_dir = pwd.getpwnam(local_user).pw_dir
|
2016-03-14 13:34:17 +01:00
|
|
|
# Home dir should have only user-owned files, so fix it now
|
|
|
|
# to prevent permissions problems - some root-owned files can
|
|
|
|
# left after 'sudo bash' and similar commands
|
2016-03-13 13:49:42 +01:00
|
|
|
subprocess.check_call(['sudo', 'chown', '-R', local_user, home_dir])
|
|
|
|
|
|
|
|
home_to_backup = [
|
2016-04-03 03:09:15 +02:00
|
|
|
self.FileToBackup(home_dir, 'dom0-home/')]
|
2016-03-13 13:49:42 +01:00
|
|
|
vm_files = home_to_backup
|
|
|
|
|
2016-04-03 03:09:15 +02:00
|
|
|
files_to_backup[0] = self.VMToBackup(self.app.domains[0],
|
|
|
|
vm_files,
|
|
|
|
os.path.join('dom0-home', os.path.basename(home_dir)))
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2017-02-20 22:42:23 +01:00
|
|
|
self.total_backup_bytes = functools.reduce(
|
2016-04-03 03:09:15 +02:00
|
|
|
lambda x, y: x + y.size, files_to_backup.values(), 0)
|
2016-03-13 13:49:42 +01:00
|
|
|
return files_to_backup
|
|
|
|
|
|
|
|
|
|
|
|
def get_backup_summary(self):
|
|
|
|
summary = ""
|
|
|
|
|
|
|
|
fields_to_display = [
|
|
|
|
{"name": "VM", "width": 16},
|
|
|
|
{"name": "type", "width": 12},
|
|
|
|
{"name": "size", "width": 12}
|
|
|
|
]
|
|
|
|
|
|
|
|
# Display the header
|
2017-07-17 15:12:43 +02:00
|
|
|
for field in fields_to_display:
|
|
|
|
fmt = "{{0:-^{0}}}-+".format(field["width"] + 1)
|
2016-03-13 13:49:42 +01:00
|
|
|
summary += fmt.format('-')
|
|
|
|
summary += "\n"
|
2017-07-17 15:12:43 +02:00
|
|
|
for field in fields_to_display:
|
|
|
|
fmt = "{{0:>{0}}} |".format(field["width"] + 1)
|
|
|
|
summary += fmt.format(field["name"])
|
2016-03-13 13:49:42 +01:00
|
|
|
summary += "\n"
|
2017-07-17 15:12:43 +02:00
|
|
|
for field in fields_to_display:
|
|
|
|
fmt = "{{0:-^{0}}}-+".format(field["width"] + 1)
|
2016-03-13 13:49:42 +01:00
|
|
|
summary += fmt.format('-')
|
|
|
|
summary += "\n"
|
|
|
|
|
2016-06-15 19:10:19 +02:00
|
|
|
files_to_backup = self._files_to_backup
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2017-02-20 22:42:23 +01:00
|
|
|
for qid, vm_info in files_to_backup.items():
|
2017-07-17 15:12:43 +02:00
|
|
|
summary_line = ""
|
2016-03-13 13:49:42 +01:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
|
2017-07-20 03:06:32 +02:00
|
|
|
summary_line += fmt.format(vm_info.vm.name)
|
2016-03-13 13:49:42 +01:00
|
|
|
|
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
|
|
|
|
if qid == 0:
|
2017-07-17 15:12:43 +02:00
|
|
|
summary_line += fmt.format("User home")
|
2017-07-20 03:06:32 +02:00
|
|
|
elif isinstance(vm_info.vm, qubes.vm.templatevm.TemplateVM):
|
2017-07-17 15:12:43 +02:00
|
|
|
summary_line += fmt.format("Template VM")
|
2016-03-13 13:49:42 +01:00
|
|
|
else:
|
2017-07-17 15:12:43 +02:00
|
|
|
summary_line += fmt.format("VM" + (" + Sys" if
|
2017-07-20 03:06:32 +02:00
|
|
|
vm_info.vm.updateable else ""))
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2017-07-20 03:06:32 +02:00
|
|
|
vm_size = vm_info.size
|
2016-03-13 13:49:42 +01:00
|
|
|
|
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
|
2017-07-17 15:12:43 +02:00
|
|
|
summary_line += fmt.format(size_to_human(vm_size))
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2017-07-20 03:06:32 +02:00
|
|
|
if qid != 0 and vm_info.vm.is_running():
|
|
|
|
summary_line += " <-- The VM is running, backup will contain " \
|
|
|
|
"its state from before its start!"
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2017-07-17 15:12:43 +02:00
|
|
|
summary += summary_line + "\n"
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2017-07-17 15:12:43 +02:00
|
|
|
for field in fields_to_display:
|
|
|
|
fmt = "{{0:-^{0}}}-+".format(field["width"] + 1)
|
2016-03-13 13:49:42 +01:00
|
|
|
summary += fmt.format('-')
|
|
|
|
summary += "\n"
|
|
|
|
|
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
|
|
|
|
summary += fmt.format("Total size:")
|
|
|
|
fmt = "{{0:>{0}}} |".format(
|
|
|
|
fields_to_display[1]["width"] + 1 + 2 + fields_to_display[2][
|
|
|
|
"width"] + 1)
|
|
|
|
summary += fmt.format(size_to_human(self.total_backup_bytes))
|
|
|
|
summary += "\n"
|
|
|
|
|
2017-07-17 15:12:43 +02:00
|
|
|
for field in fields_to_display:
|
|
|
|
fmt = "{{0:-^{0}}}-+".format(field["width"] + 1)
|
2016-03-13 13:49:42 +01:00
|
|
|
summary += fmt.format('-')
|
|
|
|
summary += "\n"
|
|
|
|
|
|
|
|
vms_not_for_backup = [vm.name for vm in self.app.domains
|
|
|
|
if vm not in self.vms_for_backup]
|
|
|
|
summary += "VMs not selected for backup:\n - " + "\n - ".join(
|
2017-07-20 03:06:32 +02:00
|
|
|
sorted(vms_not_for_backup)) + "\n"
|
2016-03-13 13:49:42 +01:00
|
|
|
|
|
|
|
return summary
|
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def _prepare_backup_header(self):
|
2016-03-13 13:49:42 +01:00
|
|
|
header_file_path = os.path.join(self.tmpdir, HEADER_FILENAME)
|
2016-04-02 23:56:00 +02:00
|
|
|
backup_header = BackupHeader(
|
|
|
|
version=CURRENT_BACKUP_FORMAT_VERSION,
|
2017-07-17 15:12:43 +02:00
|
|
|
hmac_algorithm=DEFAULT_HMAC_ALGORITHM,
|
|
|
|
encrypted=True,
|
2016-04-02 23:56:00 +02:00
|
|
|
compressed=self.compressed,
|
|
|
|
compression_filter=self.compression_filter,
|
2016-10-25 21:28:59 +02:00
|
|
|
backup_id=self.backup_id,
|
2016-04-02 23:56:00 +02:00
|
|
|
)
|
2016-03-13 13:49:42 +01:00
|
|
|
backup_header.save(header_file_path)
|
2017-02-23 00:45:58 +01:00
|
|
|
# Start encrypt, scrypt will also handle integrity
|
|
|
|
# protection
|
2017-07-28 22:48:49 +02:00
|
|
|
scrypt_passphrase = '{filename}!'.format(
|
|
|
|
filename=HEADER_FILENAME).encode() + self.passphrase
|
2017-07-20 02:48:44 +02:00
|
|
|
scrypt = yield from launch_scrypt(
|
2017-02-23 00:45:58 +01:00
|
|
|
'enc', header_file_path, header_file_path + '.hmac',
|
|
|
|
scrypt_passphrase)
|
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
retcode = yield from scrypt.wait()
|
|
|
|
if retcode:
|
2016-03-13 13:49:42 +01:00
|
|
|
raise qubes.exc.QubesException(
|
2017-02-23 00:45:58 +01:00
|
|
|
"Failed to compute hmac of header file: "
|
|
|
|
+ scrypt.stderr.read())
|
2016-03-13 13:49:42 +01:00
|
|
|
return HEADER_FILENAME, HEADER_FILENAME + ".hmac"
|
|
|
|
|
|
|
|
|
|
|
|
def _send_progress_update(self):
|
2017-07-20 02:48:44 +02:00
|
|
|
if not self.total_backup_bytes:
|
|
|
|
return
|
2016-03-13 13:49:42 +01:00
|
|
|
if callable(self.progress_callback):
|
2018-10-22 21:14:43 +02:00
|
|
|
if time.time() - self.last_progress_time >= 1: # avoid flooding
|
|
|
|
progress = (
|
|
|
|
100 * (self._done_vms_bytes + self._current_vm_bytes) /
|
|
|
|
self.total_backup_bytes)
|
2018-10-22 22:12:46 +02:00
|
|
|
self.last_progress_time = time.time()
|
|
|
|
# pylint: disable=not-callable
|
|
|
|
self.progress_callback(progress)
|
2014-03-17 21:15:39 +01:00
|
|
|
|
2016-03-13 13:49:42 +01:00
|
|
|
def _add_vm_progress(self, bytes_done):
|
|
|
|
self._current_vm_bytes += bytes_done
|
|
|
|
self._send_progress_update()
|
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def _split_and_send(self, input_stream, file_basename,
|
|
|
|
output_queue):
|
|
|
|
'''Split *input_stream* into parts of max *chunk_size* bytes and send
|
|
|
|
to *output_queue*.
|
|
|
|
|
|
|
|
:param input_stream: stream (asyncio reader stream) of data to split
|
|
|
|
:param file_basename: basename (i.e. without part number and '.enc')
|
|
|
|
of output files
|
|
|
|
:param output_queue: asyncio.Queue instance to put produced files to
|
|
|
|
- queue will get only filenames of written chunks
|
|
|
|
'''
|
|
|
|
# Wait for compressor (tar) process to finish or for any
|
|
|
|
# error of other subprocesses
|
|
|
|
i = 0
|
|
|
|
run_error = "size_limit"
|
|
|
|
scrypt = None
|
|
|
|
while run_error == "size_limit":
|
|
|
|
# Prepare a first chunk
|
|
|
|
chunkfile = file_basename + ".%03d.enc" % i
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
# Start encrypt, scrypt will also handle integrity
|
|
|
|
# protection
|
|
|
|
scrypt_passphrase = \
|
2017-07-28 22:48:49 +02:00
|
|
|
'{backup_id}!{filename}!'.format(
|
2017-07-20 02:48:44 +02:00
|
|
|
backup_id=self.backup_id,
|
|
|
|
filename=os.path.relpath(chunkfile[:-4],
|
2017-07-28 22:48:49 +02:00
|
|
|
self.tmpdir)).encode() + self.passphrase
|
2017-07-20 02:48:44 +02:00
|
|
|
try:
|
|
|
|
scrypt = yield from launch_scrypt(
|
|
|
|
"enc", "-", chunkfile, scrypt_passphrase)
|
|
|
|
|
|
|
|
run_error = yield from handle_streams(
|
|
|
|
input_stream,
|
|
|
|
scrypt.stdin,
|
|
|
|
self.chunk_size,
|
|
|
|
self._add_vm_progress
|
|
|
|
)
|
|
|
|
|
|
|
|
self.log.debug(
|
|
|
|
"handle_streams returned: {}".format(run_error))
|
|
|
|
except:
|
|
|
|
scrypt.terminate()
|
|
|
|
raise
|
|
|
|
|
|
|
|
scrypt.stdin.close()
|
|
|
|
yield from scrypt.wait()
|
|
|
|
self.log.debug("scrypt return code: {}".format(
|
|
|
|
scrypt.returncode))
|
|
|
|
|
|
|
|
# Send the chunk to the backup target
|
|
|
|
yield from output_queue.put(
|
|
|
|
os.path.relpath(chunkfile, self.tmpdir))
|
|
|
|
|
|
|
|
@asyncio.coroutine
|
|
|
|
def _wrap_and_send_files(self, files_to_backup, output_queue):
|
|
|
|
for vm_info in files_to_backup:
|
|
|
|
for file_info in vm_info.files:
|
|
|
|
|
|
|
|
self.log.debug("Backing up {}".format(file_info))
|
|
|
|
|
|
|
|
backup_tempfile = os.path.join(
|
|
|
|
self.tmpdir, file_info.subdir,
|
|
|
|
file_info.name)
|
|
|
|
self.log.debug("Using temporary location: {}".format(
|
|
|
|
backup_tempfile))
|
|
|
|
|
|
|
|
# Ensure the temporary directory exists
|
|
|
|
if not os.path.isdir(os.path.dirname(backup_tempfile)):
|
|
|
|
os.makedirs(os.path.dirname(backup_tempfile))
|
|
|
|
|
|
|
|
# The first tar cmd can use any complex feature as we want.
|
|
|
|
# Files will be verified before untaring this.
|
|
|
|
# Prefix the path in archive with filename["subdir"] to have it
|
|
|
|
# verified during untar
|
|
|
|
tar_cmdline = (["tar", "-Pc", '--sparse',
|
|
|
|
'-C', os.path.dirname(file_info.path)] +
|
|
|
|
(['--dereference'] if
|
|
|
|
file_info.subdir != "dom0-home/" else []) +
|
|
|
|
['--xform=s:^%s:%s\\0:' % (
|
|
|
|
os.path.basename(file_info.path),
|
|
|
|
file_info.subdir),
|
|
|
|
os.path.basename(file_info.path)
|
|
|
|
])
|
|
|
|
file_stat = os.stat(file_info.path)
|
|
|
|
if stat.S_ISBLK(file_stat.st_mode) or \
|
|
|
|
file_info.name != os.path.basename(file_info.path):
|
|
|
|
# tar doesn't handle content of block device, use our
|
|
|
|
# writer
|
|
|
|
# also use our tar writer when renaming file
|
|
|
|
assert not stat.S_ISDIR(file_stat.st_mode), \
|
|
|
|
"Renaming directories not supported"
|
|
|
|
tar_cmdline = ['python3', '-m', 'qubes.tarwriter',
|
|
|
|
'--override-name=%s' % (
|
|
|
|
os.path.join(file_info.subdir, os.path.basename(
|
|
|
|
file_info.name))),
|
|
|
|
file_info.path]
|
|
|
|
if self.compressed:
|
|
|
|
tar_cmdline.insert(-2,
|
|
|
|
"--use-compress-program=%s" % self.compression_filter)
|
|
|
|
|
|
|
|
self.log.debug(" ".join(tar_cmdline))
|
|
|
|
|
|
|
|
# Pipe: tar-sparse | scrypt | tar | backup_target
|
|
|
|
# TODO: log handle stderr
|
2018-07-15 23:08:23 +02:00
|
|
|
# pylint: disable=not-an-iterable
|
2017-07-20 02:48:44 +02:00
|
|
|
tar_sparse = yield from asyncio.create_subprocess_exec(
|
|
|
|
*tar_cmdline, stdout=subprocess.PIPE)
|
|
|
|
|
|
|
|
try:
|
|
|
|
yield from self._split_and_send(
|
|
|
|
tar_sparse.stdout,
|
|
|
|
backup_tempfile,
|
|
|
|
output_queue)
|
|
|
|
except:
|
|
|
|
try:
|
|
|
|
tar_sparse.terminate()
|
|
|
|
except ProcessLookupError:
|
|
|
|
pass
|
|
|
|
raise
|
|
|
|
|
2017-07-21 03:11:02 +02:00
|
|
|
yield from tar_sparse.wait()
|
|
|
|
if tar_sparse.returncode:
|
|
|
|
raise qubes.exc.QubesException(
|
|
|
|
'Failed to archive {} file'.format(file_info.path))
|
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
|
|
|
|
# This VM done, update progress
|
|
|
|
self._done_vms_bytes += vm_info.size
|
|
|
|
self._current_vm_bytes = 0
|
|
|
|
self._send_progress_update()
|
|
|
|
|
|
|
|
yield from output_queue.put(QUEUE_FINISHED)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
@asyncio.coroutine
|
|
|
|
def _monitor_process(proc, error_message):
|
|
|
|
try:
|
|
|
|
yield from proc.wait()
|
|
|
|
except:
|
|
|
|
proc.terminate()
|
|
|
|
raise
|
|
|
|
|
|
|
|
if proc.returncode:
|
2017-11-06 20:50:02 +01:00
|
|
|
if proc.stderr is not None:
|
|
|
|
proc_stderr = (yield from proc.stderr.read())
|
|
|
|
proc_stderr = proc_stderr.decode('ascii', errors='ignore')
|
|
|
|
proc_stderr = ''.join(
|
|
|
|
c for c in proc_stderr if c in string.printable and
|
|
|
|
c not in '\r\n%{}')
|
|
|
|
error_message += ': ' + proc_stderr
|
2017-07-20 02:48:44 +02:00
|
|
|
raise qubes.exc.QubesException(error_message)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
@asyncio.coroutine
|
|
|
|
def _cancel_on_error(future, previous_task):
|
|
|
|
'''If further element of chain fail, cancel previous one to
|
|
|
|
avoid deadlock.
|
|
|
|
When earlier element of chain fail, it will be handled by
|
|
|
|
:py:meth:`backup_do`.
|
|
|
|
|
|
|
|
The chain is:
|
|
|
|
:py:meth:`_wrap_and_send_files` -> :py:class:`SendWorker` -> vmproc
|
|
|
|
'''
|
|
|
|
try:
|
|
|
|
yield from future
|
|
|
|
except: # pylint: disable=bare-except
|
|
|
|
previous_task.cancel()
|
|
|
|
|
|
|
|
@asyncio.coroutine
|
2016-03-13 13:49:42 +01:00
|
|
|
def backup_do(self):
|
2017-07-17 15:12:43 +02:00
|
|
|
# pylint: disable=too-many-statements
|
2016-03-13 13:49:42 +01:00
|
|
|
if self.passphrase is None:
|
|
|
|
raise qubes.exc.QubesException("No passphrase set")
|
2017-07-28 22:48:49 +02:00
|
|
|
if not isinstance(self.passphrase, bytes):
|
|
|
|
self.passphrase = self.passphrase.encode('utf-8')
|
2016-04-02 23:47:41 +02:00
|
|
|
qubes_xml = self.app.store
|
2016-03-13 13:49:42 +01:00
|
|
|
self.tmpdir = tempfile.mkdtemp()
|
|
|
|
shutil.copy(qubes_xml, os.path.join(self.tmpdir, 'qubes.xml'))
|
|
|
|
qubes_xml = os.path.join(self.tmpdir, 'qubes.xml')
|
2017-11-06 15:58:29 +01:00
|
|
|
backup_app = qubes.Qubes(qubes_xml, offline_mode=True)
|
2017-07-20 02:48:44 +02:00
|
|
|
backup_app.events_enabled = False
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2016-06-15 19:10:19 +02:00
|
|
|
files_to_backup = self._files_to_backup
|
2016-03-13 13:49:42 +01:00
|
|
|
# make sure backup_content isn't set initially
|
|
|
|
for vm in backup_app.domains:
|
2017-07-20 02:48:44 +02:00
|
|
|
vm.events_enabled = False
|
2016-04-03 03:33:58 +02:00
|
|
|
vm.features['backup-content'] = False
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2017-02-20 22:42:23 +01:00
|
|
|
for qid, vm_info in files_to_backup.items():
|
2016-03-13 13:49:42 +01:00
|
|
|
# VM is included in the backup
|
2016-04-03 03:33:58 +02:00
|
|
|
backup_app.domains[qid].features['backup-content'] = True
|
|
|
|
backup_app.domains[qid].features['backup-path'] = vm_info.subdir
|
|
|
|
backup_app.domains[qid].features['backup-size'] = vm_info.size
|
2016-03-13 13:49:42 +01:00
|
|
|
backup_app.save()
|
2017-11-06 15:58:29 +01:00
|
|
|
del backup_app
|
2016-03-13 13:49:42 +01:00
|
|
|
|
|
|
|
vmproc = None
|
|
|
|
if self.target_vm is not None:
|
|
|
|
# Prepare the backup target (Qubes service call)
|
|
|
|
# If APPVM, STDOUT is a PIPE
|
2017-07-20 02:48:44 +02:00
|
|
|
read_fd, write_fd = os.pipe()
|
|
|
|
vmproc = yield from self.target_vm.run_service('qubes.Backup',
|
2017-11-06 20:50:02 +01:00
|
|
|
stdin=read_fd,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
stdout=subprocess.DEVNULL)
|
2017-07-20 02:48:44 +02:00
|
|
|
os.close(read_fd)
|
|
|
|
os.write(write_fd, (self.target_dir.
|
2017-02-20 22:42:23 +01:00
|
|
|
replace("\r", "").replace("\n", "") + "\n").encode())
|
2017-07-20 02:48:44 +02:00
|
|
|
backup_stdout = write_fd
|
2016-03-13 13:49:42 +01:00
|
|
|
else:
|
|
|
|
# Prepare the backup target (local file)
|
|
|
|
if os.path.isdir(self.target_dir):
|
|
|
|
backup_target = self.target_dir + "/qubes-{0}". \
|
|
|
|
format(time.strftime("%Y-%m-%dT%H%M%S"))
|
|
|
|
else:
|
|
|
|
backup_target = self.target_dir
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2016-03-13 13:49:42 +01:00
|
|
|
# Create the target directory
|
|
|
|
if not os.path.exists(os.path.dirname(self.target_dir)):
|
|
|
|
raise qubes.exc.QubesException(
|
|
|
|
"ERROR: the backup directory for {0} does not exists".
|
|
|
|
format(self.target_dir))
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2016-03-13 13:49:42 +01:00
|
|
|
# If not APPVM, STDOUT is a local file
|
|
|
|
backup_stdout = open(backup_target, 'wb')
|
2014-01-15 03:53:45 +01:00
|
|
|
|
2016-03-14 13:34:17 +01:00
|
|
|
# Tar with tape length does not deals well with stdout
|
|
|
|
# (close stdout between two tapes)
|
2016-03-13 13:49:42 +01:00
|
|
|
# For this reason, we will use named pipes instead
|
2016-03-14 12:04:07 +01:00
|
|
|
self.log.debug("Working in {}".format(self.tmpdir))
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2016-03-14 12:04:07 +01:00
|
|
|
self.log.debug("Will backup: {}".format(files_to_backup))
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
header_files = yield from self._prepare_backup_header()
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2016-03-13 13:49:42 +01:00
|
|
|
# Setup worker to send encrypted data chunks to the backup_target
|
2017-07-20 02:48:44 +02:00
|
|
|
to_send = asyncio.Queue(10)
|
2016-03-13 13:49:42 +01:00
|
|
|
send_proc = SendWorker(to_send, self.tmpdir, backup_stdout)
|
2017-07-20 02:48:44 +02:00
|
|
|
send_task = asyncio.ensure_future(send_proc.run())
|
|
|
|
|
|
|
|
vmproc_task = None
|
|
|
|
if vmproc is not None:
|
2017-11-06 20:50:02 +01:00
|
|
|
vmproc_task = asyncio.ensure_future(
|
2017-07-20 02:48:44 +02:00
|
|
|
self._monitor_process(vmproc,
|
|
|
|
'Writing backup to VM {} failed'.format(
|
2017-11-06 20:50:02 +01:00
|
|
|
self.target_vm.name)))
|
|
|
|
asyncio.ensure_future(self._cancel_on_error(
|
|
|
|
vmproc_task, send_task))
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2017-07-17 15:12:43 +02:00
|
|
|
for file_name in header_files:
|
2017-07-20 02:48:44 +02:00
|
|
|
yield from to_send.put(file_name)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2016-04-03 03:09:15 +02:00
|
|
|
qubes_xml_info = self.VMToBackup(
|
|
|
|
None,
|
|
|
|
[self.FileToBackup(qubes_xml, '')],
|
|
|
|
''
|
|
|
|
)
|
2017-07-20 02:48:44 +02:00
|
|
|
inner_archive_task = asyncio.ensure_future(
|
|
|
|
self._wrap_and_send_files(
|
|
|
|
itertools.chain([qubes_xml_info], files_to_backup.values()),
|
|
|
|
to_send
|
|
|
|
))
|
|
|
|
asyncio.ensure_future(
|
|
|
|
self._cancel_on_error(send_task, inner_archive_task))
|
2016-03-13 13:49:42 +01:00
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
try:
|
|
|
|
try:
|
|
|
|
yield from inner_archive_task
|
|
|
|
except:
|
|
|
|
yield from to_send.put(QUEUE_ERROR)
|
|
|
|
# in fact we may be handling CancelledError, induced by
|
2017-11-06 20:50:02 +01:00
|
|
|
# exception in send_task or vmproc_task (and propagated by
|
2017-07-20 02:48:44 +02:00
|
|
|
# self._cancel_on_error call above); in such a case this
|
|
|
|
# yield from will raise exception, covering CancelledError -
|
|
|
|
# this is intended behaviour
|
2017-11-06 20:50:02 +01:00
|
|
|
if vmproc_task:
|
|
|
|
yield from vmproc_task
|
2017-07-20 02:48:44 +02:00
|
|
|
yield from send_task
|
|
|
|
raise
|
|
|
|
|
|
|
|
yield from send_task
|
|
|
|
|
|
|
|
finally:
|
|
|
|
if isinstance(backup_stdout, int):
|
|
|
|
os.close(backup_stdout)
|
|
|
|
else:
|
|
|
|
backup_stdout.close()
|
2017-07-29 00:00:43 +02:00
|
|
|
try:
|
|
|
|
if vmproc_task:
|
|
|
|
yield from vmproc_task
|
|
|
|
finally:
|
|
|
|
shutil.rmtree(self.tmpdir)
|
|
|
|
|
|
|
|
# Save date of last backup, only when backup succeeded
|
|
|
|
for qid, vm_info in files_to_backup.items():
|
|
|
|
if vm_info.vm:
|
2018-01-11 03:46:39 +01:00
|
|
|
vm_info.vm.backup_timestamp = \
|
|
|
|
int(datetime.datetime.now().strftime('%s'))
|
2016-03-13 13:49:42 +01:00
|
|
|
|
|
|
|
self.app.save()
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def handle_streams(stream_in, stream_out, size_limit=None,
|
2016-10-19 22:41:49 +02:00
|
|
|
progress_callback=None):
|
2016-04-07 12:46:57 +02:00
|
|
|
'''
|
2016-10-19 22:41:49 +02:00
|
|
|
Copy stream_in to all streams_out and monitor all mentioned processes.
|
|
|
|
If any of them terminate with non-zero code, interrupt the process. Copy
|
|
|
|
at most `size_limit` data (if given).
|
|
|
|
|
2017-07-20 02:48:44 +02:00
|
|
|
:param stream_in: StreamReader object to read data from
|
|
|
|
:param stream_out: StreamWriter object to write data to
|
2016-10-19 22:41:49 +02:00
|
|
|
:param size_limit: int maximum data amount to process
|
|
|
|
:param progress_callback: callable function to report progress, will be
|
2017-06-27 02:57:50 +02:00
|
|
|
given copied data size (it should accumulate internally)
|
2017-07-20 02:48:44 +02:00
|
|
|
:return: "size_limit" or None (no error)
|
2016-04-07 12:46:57 +02:00
|
|
|
'''
|
|
|
|
buffer_size = 409600
|
2014-09-26 03:24:19 +02:00
|
|
|
bytes_copied = 0
|
2016-10-19 22:41:49 +02:00
|
|
|
while True:
|
|
|
|
if size_limit:
|
|
|
|
to_copy = min(buffer_size, size_limit - bytes_copied)
|
|
|
|
if to_copy <= 0:
|
|
|
|
return "size_limit"
|
|
|
|
else:
|
|
|
|
to_copy = buffer_size
|
2017-07-20 02:48:44 +02:00
|
|
|
buf = yield from stream_in.read(to_copy)
|
2017-07-17 15:12:43 +02:00
|
|
|
if not buf:
|
2016-10-19 22:41:49 +02:00
|
|
|
# done
|
2017-12-21 18:19:10 +01:00
|
|
|
break
|
2016-04-07 12:46:57 +02:00
|
|
|
|
2016-03-13 13:49:42 +01:00
|
|
|
if callable(progress_callback):
|
|
|
|
progress_callback(len(buf))
|
2017-07-20 02:48:44 +02:00
|
|
|
stream_out.write(buf)
|
2015-05-03 14:56:30 +02:00
|
|
|
bytes_copied += len(buf)
|
2017-12-21 18:19:10 +01:00
|
|
|
return None
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# vim:sw=4:et:
|