2013-11-25 05:41:13 +01:00
|
|
|
#!/usr/bin/python
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
#
|
|
|
|
# The Qubes OS Project, http://www.qubes-os.org
|
|
|
|
#
|
2015-05-03 14:45:01 +02:00
|
|
|
# Copyright (C) 2013-2015 Marek Marczykowski-Górecki
|
|
|
|
# <marmarek@invisiblethingslab.com>
|
2013-11-25 05:41:13 +01:00
|
|
|
# Copyright (C) 2013 Olivier Médoc <o_medoc@yahoo.fr>
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
#
|
|
|
|
#
|
2014-09-17 14:34:45 +02:00
|
|
|
from __future__ import unicode_literals
|
2015-05-03 14:45:01 +02:00
|
|
|
from qubes import QubesException, QubesVmCollection
|
2013-11-25 05:41:13 +01:00
|
|
|
from qubes import QubesVmClasses
|
2015-05-03 14:45:01 +02:00
|
|
|
from qubes import system_path, vm_files
|
2014-04-24 21:50:12 +02:00
|
|
|
from qubesutils import size_to_human, print_stdout, print_stderr, get_disk_usage
|
2013-11-25 05:41:13 +01:00
|
|
|
import sys
|
|
|
|
import os
|
2014-09-17 14:40:26 +02:00
|
|
|
import fcntl
|
2013-11-25 05:41:13 +01:00
|
|
|
import subprocess
|
|
|
|
import re
|
|
|
|
import shutil
|
2013-11-26 16:45:51 +01:00
|
|
|
import tempfile
|
2013-11-25 05:41:13 +01:00
|
|
|
import time
|
2015-05-03 14:45:01 +02:00
|
|
|
import grp
|
|
|
|
import pwd
|
2014-01-10 03:31:15 +01:00
|
|
|
import errno
|
2014-03-10 04:29:14 +01:00
|
|
|
import datetime
|
2015-05-03 14:45:01 +02:00
|
|
|
from multiprocessing import Queue, Process
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2013-12-01 02:35:42 +01:00
|
|
|
BACKUP_DEBUG = False
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
HEADER_FILENAME = 'backup-header'
|
2014-01-15 03:50:29 +01:00
|
|
|
DEFAULT_CRYPTO_ALGORITHM = 'aes-256-cbc'
|
2014-09-18 08:25:58 +02:00
|
|
|
DEFAULT_HMAC_ALGORITHM = 'SHA512'
|
2014-09-26 03:44:17 +02:00
|
|
|
DEFAULT_COMPRESSION_FILTER = 'gzip'
|
2014-09-26 03:24:19 +02:00
|
|
|
CURRENT_BACKUP_FORMAT_VERSION = '3'
|
2014-01-15 03:35:12 +01:00
|
|
|
# Maximum size of error message get from process stderr (including VM process)
|
|
|
|
MAX_STDERR_BYTES = 1024
|
|
|
|
# header + qubes.xml max size
|
|
|
|
HEADER_QUBES_XML_MAX_SIZE = 1024 * 1024
|
|
|
|
|
2014-03-08 03:55:47 +01:00
|
|
|
# global state for backup_cancel()
|
|
|
|
running_backup_operation = None
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2014-03-08 03:55:47 +01:00
|
|
|
class BackupOperationInfo:
|
|
|
|
def __init__(self):
|
|
|
|
self.canceled = False
|
|
|
|
self.processes_to_kill_on_cancel = []
|
|
|
|
self.tmpdir_to_remove = None
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2014-03-08 03:55:47 +01:00
|
|
|
class BackupCanceledError(QubesException):
|
|
|
|
def __init__(self, msg, tmpdir=None):
|
|
|
|
super(BackupCanceledError, self).__init__(msg)
|
|
|
|
self.tmpdir = tmpdir
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
class BackupHeader:
|
2014-09-26 03:24:19 +02:00
|
|
|
version = 'version'
|
2014-01-15 03:53:45 +01:00
|
|
|
encrypted = 'encrypted'
|
|
|
|
compressed = 'compressed'
|
2014-09-26 03:44:17 +02:00
|
|
|
compression_filter = 'compression-filter'
|
2014-01-15 03:53:45 +01:00
|
|
|
crypto_algorithm = 'crypto-algorithm'
|
|
|
|
hmac_algorithm = 'hmac-algorithm'
|
|
|
|
bool_options = ['encrypted', 'compressed']
|
2014-09-26 03:24:19 +02:00
|
|
|
int_options = ['version']
|
2014-01-15 03:53:45 +01:00
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
def file_to_backup(file_path, subdir=None):
|
|
|
|
sz = get_disk_usage(file_path)
|
2013-11-27 03:19:23 +01:00
|
|
|
|
|
|
|
if subdir is None:
|
2015-05-03 14:45:01 +02:00
|
|
|
abs_file_path = os.path.abspath(file_path)
|
|
|
|
abs_base_dir = os.path.abspath(system_path["qubes_base_dir"]) + '/'
|
|
|
|
abs_file_dir = os.path.dirname(abs_file_path) + '/'
|
2015-05-03 14:56:30 +02:00
|
|
|
(nothing, directory, subdir) = abs_file_dir.partition(abs_base_dir)
|
2013-11-27 03:19:23 +01:00
|
|
|
assert nothing == ""
|
2015-05-03 14:56:30 +02:00
|
|
|
assert directory == abs_base_dir
|
2013-11-27 03:19:23 +01:00
|
|
|
else:
|
|
|
|
if len(subdir) > 0 and not subdir.endswith('/'):
|
|
|
|
subdir += '/'
|
2015-05-03 14:45:01 +02:00
|
|
|
return [{"path": file_path, "size": sz, "subdir": subdir}]
|
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-03-08 03:55:47 +01:00
|
|
|
def backup_cancel():
|
|
|
|
"""
|
|
|
|
Cancel currently running backup/restore operation
|
|
|
|
|
|
|
|
@return: True if any operation was signaled
|
|
|
|
"""
|
|
|
|
if running_backup_operation is None:
|
|
|
|
return False
|
|
|
|
|
|
|
|
running_backup_operation.canceled = True
|
|
|
|
for proc in running_backup_operation.processes_to_kill_on_cancel:
|
|
|
|
try:
|
|
|
|
proc.terminate()
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
return True
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
|
|
|
def backup_prepare(vms_list=None, exclude_list=None,
|
|
|
|
print_callback=print_stdout, hide_vm_names=True):
|
|
|
|
"""
|
|
|
|
If vms = None, include all (sensible) VMs;
|
|
|
|
exclude_list is always applied
|
|
|
|
"""
|
|
|
|
files_to_backup = file_to_backup(system_path["qubes_store_filename"])
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if exclude_list is None:
|
|
|
|
exclude_list = []
|
|
|
|
|
|
|
|
qvm_collection = QubesVmCollection()
|
|
|
|
qvm_collection.lock_db_for_writing()
|
|
|
|
qvm_collection.load()
|
|
|
|
|
|
|
|
if vms_list is None:
|
|
|
|
all_vms = [vm for vm in qvm_collection.values()]
|
|
|
|
selected_vms = [vm for vm in all_vms if vm.include_in_backups]
|
2015-05-03 14:45:01 +02:00
|
|
|
appvms_to_backup = [vm for vm in selected_vms if
|
|
|
|
vm.is_appvm() and not vm.internal]
|
|
|
|
netvms_to_backup = [vm for vm in selected_vms if
|
|
|
|
vm.is_netvm() and not vm.qid == 0]
|
2015-03-31 05:54:41 +02:00
|
|
|
template_vms_worth_backingup = [vm for vm in selected_vms if (
|
|
|
|
vm.is_template() and vm.include_in_backups)]
|
2015-05-03 14:45:01 +02:00
|
|
|
dom0 = [qvm_collection[0]]
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
vms_list = appvms_to_backup + netvms_to_backup + \
|
|
|
|
template_vms_worth_backingup + dom0
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
vms_for_backup = vms_list
|
|
|
|
# Apply exclude list
|
|
|
|
if exclude_list:
|
|
|
|
vms_for_backup = [vm for vm in vms_list if vm.name not in exclude_list]
|
|
|
|
|
|
|
|
there_are_running_vms = False
|
|
|
|
|
|
|
|
fields_to_display = [
|
2015-05-03 14:45:01 +02:00
|
|
|
{"name": "VM", "width": 16},
|
|
|
|
{"name": "type", "width": 12},
|
|
|
|
{"name": "size", "width": 12}
|
2013-11-25 05:41:13 +01:00
|
|
|
]
|
|
|
|
|
|
|
|
# Display the header
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:-^{0}}}-+".format(f["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format('-')
|
|
|
|
print_callback(s)
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(f["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format(f["name"])
|
|
|
|
print_callback(s)
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:-^{0}}}-+".format(f["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format('-')
|
|
|
|
print_callback(s)
|
|
|
|
|
2014-02-05 05:48:55 +01:00
|
|
|
files_to_backup_index = 0
|
2013-11-25 05:41:13 +01:00
|
|
|
for vm in vms_for_backup:
|
|
|
|
if vm.is_template():
|
|
|
|
# handle templates later
|
|
|
|
continue
|
2014-01-10 03:19:22 +01:00
|
|
|
if vm.qid == 0:
|
|
|
|
# handle dom0 later
|
|
|
|
continue
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2013-11-27 03:19:23 +01:00
|
|
|
if hide_vm_names:
|
2013-12-10 15:05:03 +01:00
|
|
|
subdir = 'vm%d/' % vm.qid
|
2013-11-27 03:19:23 +01:00
|
|
|
else:
|
|
|
|
subdir = None
|
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
if vm.private_img is not None:
|
2013-11-27 03:19:23 +01:00
|
|
|
files_to_backup += file_to_backup(vm.private_img, subdir)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if vm.is_appvm():
|
2013-11-27 03:19:23 +01:00
|
|
|
files_to_backup += file_to_backup(vm.icon_path, subdir)
|
2013-11-25 05:41:13 +01:00
|
|
|
if vm.updateable:
|
|
|
|
if os.path.exists(vm.dir_path + "/apps.templates"):
|
|
|
|
# template
|
2015-05-03 14:45:01 +02:00
|
|
|
files_to_backup += file_to_backup(
|
|
|
|
vm.dir_path + "/apps.templates", subdir)
|
2013-11-25 05:41:13 +01:00
|
|
|
else:
|
|
|
|
# standaloneVM
|
2013-11-27 03:19:23 +01:00
|
|
|
files_to_backup += file_to_backup(vm.dir_path + "/apps", subdir)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if os.path.exists(vm.dir_path + "/kernels"):
|
2015-05-03 14:45:01 +02:00
|
|
|
files_to_backup += file_to_backup(vm.dir_path + "/kernels",
|
|
|
|
subdir)
|
|
|
|
if os.path.exists(vm.firewall_conf):
|
2013-11-27 03:19:23 +01:00
|
|
|
files_to_backup += file_to_backup(vm.firewall_conf, subdir)
|
2013-11-25 05:41:13 +01:00
|
|
|
if 'appmenus_whitelist' in vm_files and \
|
2015-05-03 14:45:01 +02:00
|
|
|
os.path.exists(os.path.join(vm.dir_path,
|
|
|
|
vm_files['appmenus_whitelist'])):
|
2013-11-27 03:18:14 +01:00
|
|
|
files_to_backup += file_to_backup(
|
2015-05-03 14:45:01 +02:00
|
|
|
os.path.join(vm.dir_path, vm_files['appmenus_whitelist']),
|
|
|
|
subdir)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if vm.updateable:
|
2013-11-27 03:19:23 +01:00
|
|
|
files_to_backup += file_to_backup(vm.root_img, subdir)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
s = ""
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format(vm.name)
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
if vm.is_netvm():
|
|
|
|
s += fmt.format("NetVM" + (" + Sys" if vm.updateable else ""))
|
|
|
|
else:
|
|
|
|
s += fmt.format("AppVM" + (" + Sys" if vm.updateable else ""))
|
|
|
|
|
2014-02-05 05:48:55 +01:00
|
|
|
vm_size = reduce(lambda x, y: x + y["size"],
|
|
|
|
files_to_backup[files_to_backup_index:],
|
|
|
|
0)
|
|
|
|
files_to_backup_index = len(files_to_backup)
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
|
2014-02-05 05:48:55 +01:00
|
|
|
s += fmt.format(size_to_human(vm_size))
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if vm.is_running():
|
2015-05-03 14:45:01 +02:00
|
|
|
s += " <-- The VM is running, please shut it down before proceeding " \
|
|
|
|
"with the backup!"
|
2013-11-25 05:41:13 +01:00
|
|
|
there_are_running_vms = True
|
|
|
|
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
for vm in vms_for_backup:
|
|
|
|
if not vm.is_template():
|
|
|
|
# already handled
|
|
|
|
continue
|
2014-01-10 03:19:22 +01:00
|
|
|
if vm.qid == 0:
|
|
|
|
# handle dom0 later
|
|
|
|
continue
|
2013-11-25 05:41:13 +01:00
|
|
|
vm_sz = vm.get_disk_utilization()
|
2013-11-27 03:19:23 +01:00
|
|
|
if hide_vm_names:
|
2013-12-10 15:05:03 +01:00
|
|
|
template_subdir = 'vm%d/' % vm.qid
|
2013-11-27 03:19:23 +01:00
|
|
|
else:
|
|
|
|
template_subdir = os.path.relpath(
|
2015-05-03 14:45:01 +02:00
|
|
|
vm.dir_path,
|
|
|
|
system_path["qubes_base_dir"]) + '/'
|
|
|
|
template_to_backup = [{"path": vm.dir_path + '/.',
|
|
|
|
"size": vm_sz,
|
|
|
|
"subdir": template_subdir}]
|
2013-11-26 16:46:34 +01:00
|
|
|
files_to_backup += template_to_backup
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
s = ""
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format(vm.name)
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format("Template VM")
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format(size_to_human(vm_sz))
|
|
|
|
|
|
|
|
if vm.is_running():
|
2015-05-03 14:45:01 +02:00
|
|
|
s += " <-- The VM is running, please shut it down before proceeding " \
|
|
|
|
"with the backup!"
|
2013-11-25 05:41:13 +01:00
|
|
|
there_are_running_vms = True
|
|
|
|
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
# Initialize backup flag on all VMs
|
|
|
|
vms_for_backup_qid = [vm.qid for vm in vms_for_backup]
|
|
|
|
for vm in qvm_collection.values():
|
2014-03-08 03:46:41 +01:00
|
|
|
vm.backup_content = False
|
2014-01-10 03:19:22 +01:00
|
|
|
if vm.qid == 0:
|
|
|
|
# handle dom0 later
|
|
|
|
continue
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if vm.qid in vms_for_backup_qid:
|
|
|
|
vm.backup_content = True
|
|
|
|
vm.backup_size = vm.get_disk_utilization()
|
2013-11-27 03:19:23 +01:00
|
|
|
if hide_vm_names:
|
|
|
|
vm.backup_path = 'vm%d' % vm.qid
|
|
|
|
else:
|
2015-05-03 14:45:01 +02:00
|
|
|
vm.backup_path = os.path.relpath(vm.dir_path,
|
|
|
|
system_path["qubes_base_dir"])
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# Dom0 user home
|
2014-01-10 03:19:22 +01:00
|
|
|
if 0 in vms_for_backup_qid:
|
2013-11-25 05:41:13 +01:00
|
|
|
local_user = grp.getgrnam('qubes').gr_mem[0]
|
|
|
|
home_dir = pwd.getpwnam(local_user).pw_dir
|
|
|
|
# Home dir should have only user-owned files, so fix it now to prevent
|
|
|
|
# permissions problems - some root-owned files can left after
|
|
|
|
# 'sudo bash' and similar commands
|
|
|
|
subprocess.check_call(['sudo', 'chown', '-R', local_user, home_dir])
|
|
|
|
|
|
|
|
home_sz = get_disk_usage(home_dir)
|
2015-05-03 14:45:01 +02:00
|
|
|
home_to_backup = [
|
|
|
|
{"path": home_dir, "size": home_sz, "subdir": 'dom0-home/'}]
|
2013-11-25 05:41:13 +01:00
|
|
|
files_to_backup += home_to_backup
|
|
|
|
|
|
|
|
vm = qvm_collection[0]
|
|
|
|
vm.backup_content = True
|
|
|
|
vm.backup_size = home_sz
|
|
|
|
vm.backup_path = os.path.join('dom0-home', os.path.basename(home_dir))
|
|
|
|
|
|
|
|
s = ""
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format('Dom0')
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format("User home")
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format(size_to_human(home_sz))
|
|
|
|
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
qvm_collection.save()
|
|
|
|
# FIXME: should be after backup completed
|
|
|
|
qvm_collection.unlock_db()
|
|
|
|
|
|
|
|
total_backup_sz = 0
|
2015-05-03 14:45:01 +02:00
|
|
|
for f in files_to_backup:
|
|
|
|
total_backup_sz += f["size"]
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:-^{0}}}-+".format(f["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format('-')
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
s = ""
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format("Total size:")
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(
|
|
|
|
fields_to_display[1]["width"] + 1 + 2 + fields_to_display[2][
|
|
|
|
"width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format(size_to_human(total_backup_sz))
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:-^{0}}}-+".format(f["width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format('-')
|
|
|
|
print_callback(s)
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
vms_not_for_backup = [vm.name for vm in qvm_collection.values()
|
|
|
|
if not vm.backup_content]
|
2014-03-14 16:31:04 +01:00
|
|
|
print_callback("VMs not selected for backup: %s" % " ".join(
|
|
|
|
vms_not_for_backup))
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
if there_are_running_vms:
|
2013-11-25 05:41:13 +01:00
|
|
|
raise QubesException("Please shutdown all VMs before proceeding.")
|
|
|
|
|
2013-11-26 16:46:34 +01:00
|
|
|
for fileinfo in files_to_backup:
|
|
|
|
assert len(fileinfo["subdir"]) == 0 or fileinfo["subdir"][-1] == '/', \
|
2014-09-17 14:34:45 +02:00
|
|
|
"'subdir' must ends with a '/': %s" % unicode(fileinfo)
|
2013-11-26 16:46:34 +01:00
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
return files_to_backup
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2014-01-13 04:27:19 +01:00
|
|
|
class SendWorker(Process):
|
2013-11-25 05:41:13 +01:00
|
|
|
def __init__(self, queue, base_dir, backup_stdout):
|
2014-01-13 04:27:19 +01:00
|
|
|
super(SendWorker, self).__init__()
|
2013-11-25 05:41:13 +01:00
|
|
|
self.queue = queue
|
|
|
|
self.base_dir = base_dir
|
|
|
|
self.backup_stdout = backup_stdout
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Started sending thread"
|
|
|
|
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Moving to temporary dir", self.base_dir
|
|
|
|
os.chdir(self.base_dir)
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
for filename in iter(self.queue.get, None):
|
2014-03-08 03:55:47 +01:00
|
|
|
if filename == "FINISHED" or filename == "ERROR":
|
2013-11-25 05:41:13 +01:00
|
|
|
break
|
|
|
|
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Sending file", filename
|
|
|
|
# This tar used for sending data out need to be as simple, as
|
|
|
|
# simple, as featureless as possible. It will not be
|
|
|
|
# verified before untaring.
|
|
|
|
tar_final_cmd = ["tar", "-cO", "--posix",
|
2015-05-03 14:45:01 +02:00
|
|
|
"-C", self.base_dir, filename]
|
|
|
|
final_proc = subprocess.Popen(tar_final_cmd,
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=self.backup_stdout)
|
2013-12-02 14:04:20 +01:00
|
|
|
if final_proc.wait() >= 2:
|
2015-05-03 14:45:01 +02:00
|
|
|
# handle only exit code 2 (tar fatal error) or
|
|
|
|
# greater (call failed?)
|
|
|
|
raise QubesException(
|
|
|
|
"ERROR: Failed to write the backup, out of disk space? "
|
|
|
|
"Check console output or ~/.xsession-errors for details.")
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# Delete the file as we don't need it anymore
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Removing file", filename
|
|
|
|
os.remove(filename)
|
|
|
|
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Finished sending thread"
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
def prepare_backup_header(target_directory, passphrase, compressed=False,
|
|
|
|
encrypted=False,
|
|
|
|
hmac_algorithm=DEFAULT_HMAC_ALGORITHM,
|
2014-09-26 03:44:17 +02:00
|
|
|
crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM,
|
|
|
|
compression_filter=None):
|
2014-01-15 03:53:45 +01:00
|
|
|
header_file_path = os.path.join(target_directory, HEADER_FILENAME)
|
|
|
|
with open(header_file_path, "w") as f:
|
2014-09-26 03:24:19 +02:00
|
|
|
f.write(str("%s=%s\n" % (BackupHeader.version,
|
|
|
|
CURRENT_BACKUP_FORMAT_VERSION)))
|
2014-09-25 00:35:44 +02:00
|
|
|
f.write(str("%s=%s\n" % (BackupHeader.hmac_algorithm, hmac_algorithm)))
|
|
|
|
f.write(str("%s=%s\n" % (BackupHeader.crypto_algorithm,
|
|
|
|
crypto_algorithm)))
|
|
|
|
f.write(str("%s=%s\n" % (BackupHeader.encrypted, str(encrypted))))
|
|
|
|
f.write(str("%s=%s\n" % (BackupHeader.compressed, str(compressed))))
|
2014-09-26 03:44:17 +02:00
|
|
|
if compressed:
|
|
|
|
f.write(str("%s=%s\n" % (BackupHeader.compression_filter,
|
|
|
|
str(compression_filter))))
|
2014-01-15 03:53:45 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
hmac = subprocess.Popen(["openssl", "dgst",
|
|
|
|
"-" + hmac_algorithm, "-hmac", passphrase],
|
|
|
|
stdin=open(header_file_path, "r"),
|
|
|
|
stdout=open(header_file_path + ".hmac", "w"))
|
2014-01-15 03:53:45 +01:00
|
|
|
if hmac.wait() != 0:
|
|
|
|
raise QubesException("Failed to compute hmac of header file")
|
2015-05-03 14:45:01 +02:00
|
|
|
return HEADER_FILENAME, HEADER_FILENAME + ".hmac"
|
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
|
2013-12-02 14:05:41 +01:00
|
|
|
def backup_do(base_backup_dir, files_to_backup, passphrase,
|
2015-05-03 14:45:01 +02:00
|
|
|
progress_callback=None, encrypted=False, appvm=None,
|
|
|
|
compressed=False, hmac_algorithm=DEFAULT_HMAC_ALGORITHM,
|
|
|
|
crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM):
|
2014-03-08 03:55:47 +01:00
|
|
|
global running_backup_operation
|
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
total_backup_sz = 0
|
2014-02-05 06:51:47 +01:00
|
|
|
passphrase = passphrase.encode('utf-8')
|
2015-05-03 14:45:01 +02:00
|
|
|
for f in files_to_backup:
|
|
|
|
total_backup_sz += f["size"]
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-09-26 03:44:17 +02:00
|
|
|
if isinstance(compressed, str):
|
|
|
|
compression_filter = compressed
|
|
|
|
else:
|
|
|
|
compression_filter = DEFAULT_COMPRESSION_FILTER
|
2014-01-15 05:00:13 +01:00
|
|
|
|
2014-03-08 03:55:47 +01:00
|
|
|
running_backup_operation = BackupOperationInfo()
|
2013-11-25 05:41:13 +01:00
|
|
|
vmproc = None
|
2015-05-03 14:56:30 +02:00
|
|
|
tar_sparse = None
|
2015-05-03 14:45:01 +02:00
|
|
|
if appvm is not None:
|
2013-11-25 05:41:13 +01:00
|
|
|
# Prepare the backup target (Qubes service call)
|
2014-01-15 01:05:54 +01:00
|
|
|
backup_target = "QUBESRPC qubes.Backup dom0"
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# If APPVM, STDOUT is a PIPE
|
2014-01-15 01:05:54 +01:00
|
|
|
vmproc = appvm.run(command=backup_target, passio_popen=True,
|
|
|
|
passio_stderr=True)
|
|
|
|
vmproc.stdin.write(base_backup_dir.
|
2015-05-03 14:45:01 +02:00
|
|
|
replace("\r", "").replace("\n", "") + "\n")
|
2013-11-25 05:41:13 +01:00
|
|
|
backup_stdout = vmproc.stdin
|
2014-03-08 03:55:47 +01:00
|
|
|
running_backup_operation.processes_to_kill_on_cancel.append(vmproc)
|
2013-11-25 05:41:13 +01:00
|
|
|
else:
|
|
|
|
# Prepare the backup target (local file)
|
2014-03-17 21:15:39 +01:00
|
|
|
if os.path.isdir(base_backup_dir):
|
|
|
|
backup_target = base_backup_dir + "/qubes-{0}". \
|
|
|
|
format(time.strftime("%Y-%m-%dT%H%M%S"))
|
|
|
|
else:
|
|
|
|
backup_target = base_backup_dir
|
|
|
|
|
|
|
|
# Create the target directory
|
2015-05-03 14:45:01 +02:00
|
|
|
if not os.path.exists(os.path.dirname(base_backup_dir)):
|
2014-03-17 21:15:39 +01:00
|
|
|
raise QubesException(
|
|
|
|
"ERROR: the backup directory for {0} does not exists".
|
|
|
|
format(base_backup_dir))
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# If not APPVM, STDOUT is a local file
|
2015-05-03 14:45:01 +02:00
|
|
|
backup_stdout = open(backup_target, 'wb')
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
global blocks_backedup
|
|
|
|
blocks_backedup = 0
|
2014-09-18 07:45:16 +02:00
|
|
|
if callable(progress_callback):
|
|
|
|
progress = blocks_backedup * 11 / total_backup_sz
|
|
|
|
progress_callback(progress)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
backup_tmpdir = tempfile.mkdtemp(prefix="/var/tmp/backup_")
|
2014-03-08 03:55:47 +01:00
|
|
|
running_backup_operation.tmpdir_to_remove = backup_tmpdir
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
# Tar with tape length does not deals well with stdout (close stdout between
|
2013-11-25 05:41:13 +01:00
|
|
|
# two tapes)
|
|
|
|
# For this reason, we will use named pipes instead
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Working in", backup_tmpdir
|
|
|
|
|
2014-01-15 01:05:54 +01:00
|
|
|
backup_pipe = os.path.join(backup_tmpdir, "backup_pipe")
|
2013-11-25 05:41:13 +01:00
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Creating pipe in:", backup_pipe
|
|
|
|
os.mkfifo(backup_pipe)
|
|
|
|
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Will backup:", files_to_backup
|
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
header_files = prepare_backup_header(backup_tmpdir, passphrase,
|
2014-09-26 03:44:17 +02:00
|
|
|
compressed=bool(compressed),
|
2014-01-15 03:53:45 +01:00
|
|
|
encrypted=encrypted,
|
|
|
|
hmac_algorithm=hmac_algorithm,
|
2014-09-26 03:44:17 +02:00
|
|
|
crypto_algorithm=crypto_algorithm,
|
|
|
|
compression_filter=compression_filter)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
# Setup worker to send encrypted data chunks to the backup_target
|
2015-05-03 14:56:30 +02:00
|
|
|
def compute_progress(new_size, total_backup_size):
|
2013-11-25 05:41:13 +01:00
|
|
|
global blocks_backedup
|
|
|
|
blocks_backedup += new_size
|
2014-09-18 07:45:16 +02:00
|
|
|
if callable(progress_callback):
|
2015-05-03 14:56:30 +02:00
|
|
|
this_progress = blocks_backedup / float(total_backup_size)
|
|
|
|
progress_callback(int(round(this_progress * 100, 2)))
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-01-15 01:05:54 +01:00
|
|
|
to_send = Queue(10)
|
2014-01-13 04:27:19 +01:00
|
|
|
send_proc = SendWorker(to_send, backup_tmpdir, backup_stdout)
|
2013-11-25 05:41:13 +01:00
|
|
|
send_proc.start()
|
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
for f in header_files:
|
|
|
|
to_send.put(f)
|
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
for filename in files_to_backup:
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Backing up", filename
|
|
|
|
|
|
|
|
backup_tempfile = os.path.join(backup_tmpdir,
|
2014-01-15 01:05:54 +01:00
|
|
|
filename["subdir"],
|
|
|
|
os.path.basename(filename["path"]))
|
2013-11-25 05:41:13 +01:00
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Using temporary location:", backup_tempfile
|
|
|
|
|
|
|
|
# Ensure the temporary directory exists
|
|
|
|
if not os.path.isdir(os.path.dirname(backup_tempfile)):
|
|
|
|
os.makedirs(os.path.dirname(backup_tempfile))
|
|
|
|
|
|
|
|
# The first tar cmd can use any complex feature as we want. Files will
|
|
|
|
# be verified before untaring this.
|
2015-05-03 14:45:01 +02:00
|
|
|
# Prefix the path in archive with filename["subdir"] to have it
|
|
|
|
# verified during untar
|
2013-11-25 05:41:13 +01:00
|
|
|
tar_cmdline = ["tar", "-Pc", '--sparse',
|
2014-01-15 01:05:54 +01:00
|
|
|
"-f", backup_pipe,
|
|
|
|
'-C', os.path.dirname(filename["path"]),
|
2014-05-13 03:11:06 +02:00
|
|
|
'--xform', 's:^%s:%s\\0:' % (
|
2015-05-03 14:45:01 +02:00
|
|
|
os.path.basename(filename["path"]),
|
|
|
|
filename["subdir"]),
|
2014-01-15 01:05:54 +01:00
|
|
|
os.path.basename(filename["path"])
|
|
|
|
]
|
2014-09-26 03:44:17 +02:00
|
|
|
if compressed:
|
2015-05-03 14:45:01 +02:00
|
|
|
tar_cmdline.insert(-1,
|
|
|
|
"--use-compress-program=%s" % compression_filter)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print " ".join(tar_cmdline)
|
|
|
|
|
|
|
|
# Tips: Popen(bufsize=0)
|
|
|
|
# Pipe: tar-sparse | encryptor [| hmac] | tar | backup_target
|
|
|
|
# Pipe: tar-sparse [| hmac] | tar | backup_target
|
2015-05-03 14:45:01 +02:00
|
|
|
tar_sparse = subprocess.Popen(tar_cmdline, stdin=subprocess.PIPE,
|
|
|
|
stderr=(open(os.devnull, 'w')
|
|
|
|
if not BACKUP_DEBUG
|
|
|
|
else None))
|
2014-03-08 03:55:47 +01:00
|
|
|
running_backup_operation.processes_to_kill_on_cancel.append(tar_sparse)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# Wait for compressor (tar) process to finish or for any error of other
|
|
|
|
# subprocesses
|
|
|
|
i = 0
|
|
|
|
run_error = "paused"
|
2014-09-26 03:24:19 +02:00
|
|
|
encryptor = None
|
|
|
|
if encrypted:
|
|
|
|
# Start encrypt
|
|
|
|
# If no cipher is provided, the data is forwarded unencrypted !!!
|
2015-05-03 14:45:01 +02:00
|
|
|
encryptor = subprocess.Popen(["openssl", "enc",
|
|
|
|
"-e", "-" + crypto_algorithm,
|
|
|
|
"-pass", "pass:" + passphrase],
|
|
|
|
stdin=open(backup_pipe, 'rb'),
|
|
|
|
stdout=subprocess.PIPE)
|
2014-09-26 03:24:19 +02:00
|
|
|
pipe = encryptor.stdout
|
|
|
|
else:
|
2015-05-03 14:45:01 +02:00
|
|
|
pipe = open(backup_pipe, 'rb')
|
2014-09-26 03:24:19 +02:00
|
|
|
while run_error == "paused":
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# Start HMAC
|
2015-05-03 14:45:01 +02:00
|
|
|
hmac = subprocess.Popen(["openssl", "dgst",
|
|
|
|
"-" + hmac_algorithm, "-hmac", passphrase],
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# Prepare a first chunk
|
|
|
|
chunkfile = backup_tempfile + "." + "%03d" % i
|
|
|
|
i += 1
|
2015-05-03 14:45:01 +02:00
|
|
|
chunkfile_p = open(chunkfile, 'wb')
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
common_args = {
|
2015-05-03 14:45:01 +02:00
|
|
|
'backup_target': chunkfile_p,
|
|
|
|
'total_backup_sz': total_backup_sz,
|
|
|
|
'hmac': hmac,
|
|
|
|
'vmproc': vmproc,
|
|
|
|
'addproc': tar_sparse,
|
|
|
|
'progress_callback': compute_progress,
|
|
|
|
'size_limit': 100 * 1024 * 1024,
|
2013-11-25 05:41:13 +01:00
|
|
|
}
|
2014-09-26 03:24:19 +02:00
|
|
|
run_error = wait_backup_feedback(
|
2015-05-03 14:45:01 +02:00
|
|
|
in_stream=pipe, streamproc=encryptor,
|
|
|
|
**common_args)
|
2013-11-25 05:41:13 +01:00
|
|
|
chunkfile_p.close()
|
|
|
|
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Wait_backup_feedback returned:", run_error
|
|
|
|
|
2014-03-08 03:55:47 +01:00
|
|
|
if running_backup_operation.canceled:
|
|
|
|
try:
|
|
|
|
tar_sparse.terminate()
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
hmac.terminate()
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
tar_sparse.wait()
|
|
|
|
hmac.wait()
|
|
|
|
to_send.put("ERROR")
|
|
|
|
send_proc.join()
|
|
|
|
shutil.rmtree(backup_tmpdir)
|
|
|
|
running_backup_operation = None
|
|
|
|
raise BackupCanceledError("Backup canceled")
|
2014-09-26 03:24:19 +02:00
|
|
|
if run_error and run_error != "size_limit":
|
2013-11-25 05:41:13 +01:00
|
|
|
send_proc.terminate()
|
2014-01-10 03:20:48 +01:00
|
|
|
if run_error == "VM" and vmproc:
|
2015-05-03 14:45:01 +02:00
|
|
|
raise QubesException(
|
|
|
|
"Failed to write the backup, VM output:\n" +
|
|
|
|
vmproc.stderr.read(MAX_STDERR_BYTES))
|
2014-01-10 03:20:48 +01:00
|
|
|
else:
|
2015-05-03 14:45:01 +02:00
|
|
|
raise QubesException("Failed to perform backup: error in " +
|
|
|
|
run_error)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# Send the chunk to the backup target
|
|
|
|
to_send.put(os.path.relpath(chunkfile, backup_tmpdir))
|
|
|
|
|
|
|
|
# Close HMAC
|
|
|
|
hmac.stdin.close()
|
|
|
|
hmac.wait()
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "HMAC proc return code:", hmac.poll()
|
|
|
|
|
|
|
|
# Write HMAC data next to the chunk file
|
|
|
|
hmac_data = hmac.stdout.read()
|
|
|
|
if BACKUP_DEBUG:
|
2015-05-03 14:45:01 +02:00
|
|
|
print "Writing hmac to", chunkfile + ".hmac"
|
|
|
|
hmac_file = open(chunkfile + ".hmac", 'w')
|
2013-11-25 05:41:13 +01:00
|
|
|
hmac_file.write(hmac_data)
|
|
|
|
hmac_file.flush()
|
|
|
|
hmac_file.close()
|
|
|
|
|
|
|
|
# Send the HMAC to the backup target
|
2015-05-03 14:45:01 +02:00
|
|
|
to_send.put(os.path.relpath(chunkfile, backup_tmpdir) + ".hmac")
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-09-26 03:24:19 +02:00
|
|
|
if tar_sparse.poll() is None or run_error == "size_limit":
|
2015-05-03 14:45:01 +02:00
|
|
|
run_error = "paused"
|
2013-11-25 05:41:13 +01:00
|
|
|
else:
|
2014-03-08 03:55:47 +01:00
|
|
|
running_backup_operation.processes_to_kill_on_cancel.remove(
|
|
|
|
tar_sparse)
|
2013-11-25 05:41:13 +01:00
|
|
|
if BACKUP_DEBUG:
|
2015-05-03 14:45:01 +02:00
|
|
|
print "Finished tar sparse with exit code", tar_sparse \
|
2014-09-26 03:24:19 +02:00
|
|
|
.poll()
|
|
|
|
pipe.close()
|
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
to_send.put("FINISHED")
|
|
|
|
send_proc.join()
|
2014-03-08 03:55:47 +01:00
|
|
|
shutil.rmtree(backup_tmpdir)
|
|
|
|
|
|
|
|
if running_backup_operation.canceled:
|
|
|
|
running_backup_operation = None
|
|
|
|
raise BackupCanceledError("Backup canceled")
|
|
|
|
|
|
|
|
running_backup_operation = None
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if send_proc.exitcode != 0:
|
2015-05-03 14:45:01 +02:00
|
|
|
raise QubesException(
|
|
|
|
"Failed to send backup: error in the sending process")
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if vmproc:
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "VMProc1 proc return code:", vmproc.poll()
|
2015-05-03 14:56:30 +02:00
|
|
|
if tar_sparse is not None:
|
|
|
|
print "Sparse1 proc return code:", tar_sparse.poll()
|
2013-11-25 05:41:13 +01:00
|
|
|
vmproc.stdin.close()
|
|
|
|
|
2014-03-10 04:29:14 +01:00
|
|
|
# Save date of last backup
|
|
|
|
qvm_collection = QubesVmCollection()
|
|
|
|
qvm_collection.lock_db_for_writing()
|
|
|
|
qvm_collection.load()
|
|
|
|
|
|
|
|
for vm in qvm_collection.values():
|
|
|
|
if vm.backup_content:
|
|
|
|
vm.backup_timestamp = datetime.datetime.now()
|
|
|
|
|
|
|
|
qvm_collection.save()
|
|
|
|
qvm_collection.unlock_db()
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
'''
|
|
|
|
' Wait for backup chunk to finish
|
|
|
|
' - Monitor all the processes (streamproc, hmac, vmproc, addproc) for errors
|
|
|
|
' - Copy stdout of streamproc to backup_target and hmac stdin if available
|
|
|
|
' - Compute progress based on total_backup_sz and send progress to
|
|
|
|
' progress_callback function
|
|
|
|
' - Returns if
|
|
|
|
' - one of the monitored processes error out (streamproc, hmac, vmproc,
|
|
|
|
' addproc), along with the processe that failed
|
|
|
|
' - all of the monitored processes except vmproc finished successfully
|
|
|
|
' (vmproc termination is controlled by the python script)
|
|
|
|
' - streamproc does not delivers any data anymore (return with the error
|
|
|
|
' "")
|
2014-09-26 03:24:19 +02:00
|
|
|
' - size_limit is provided and is about to be exceeded
|
2013-11-25 05:41:13 +01:00
|
|
|
'''
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
|
|
|
def wait_backup_feedback(progress_callback, in_stream, streamproc,
|
|
|
|
backup_target, total_backup_sz, hmac=None, vmproc=None,
|
|
|
|
addproc=None,
|
2015-05-03 14:56:30 +02:00
|
|
|
size_limit=None):
|
2013-11-25 05:41:13 +01:00
|
|
|
buffer_size = 409600
|
|
|
|
|
|
|
|
run_error = None
|
|
|
|
run_count = 1
|
2014-09-26 03:24:19 +02:00
|
|
|
bytes_copied = 0
|
2015-05-03 14:45:01 +02:00
|
|
|
while run_count > 0 and run_error is None:
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-09-26 03:24:19 +02:00
|
|
|
if size_limit and bytes_copied + buffer_size > size_limit:
|
|
|
|
return "size_limit"
|
2015-05-03 14:56:30 +02:00
|
|
|
buf = in_stream.read(buffer_size)
|
|
|
|
progress_callback(len(buf), total_backup_sz)
|
|
|
|
bytes_copied += len(buf)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
run_count = 0
|
|
|
|
if hmac:
|
2015-05-03 14:45:01 +02:00
|
|
|
retcode = hmac.poll()
|
|
|
|
if retcode is not None:
|
2013-11-25 05:41:13 +01:00
|
|
|
if retcode != 0:
|
|
|
|
run_error = "hmac"
|
|
|
|
else:
|
|
|
|
run_count += 1
|
|
|
|
|
|
|
|
if addproc:
|
2015-05-03 14:45:01 +02:00
|
|
|
retcode = addproc.poll()
|
|
|
|
if retcode is not None:
|
2013-11-25 05:41:13 +01:00
|
|
|
if retcode != 0:
|
|
|
|
run_error = "addproc"
|
|
|
|
else:
|
|
|
|
run_count += 1
|
|
|
|
|
|
|
|
if vmproc:
|
|
|
|
retcode = vmproc.poll()
|
2015-05-03 14:45:01 +02:00
|
|
|
if retcode is not None:
|
2013-11-25 05:41:13 +01:00
|
|
|
if retcode != 0:
|
|
|
|
run_error = "VM"
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print vmproc.stdout.read()
|
|
|
|
else:
|
|
|
|
# VM should run until the end
|
|
|
|
pass
|
|
|
|
|
|
|
|
if streamproc:
|
2015-05-03 14:45:01 +02:00
|
|
|
retcode = streamproc.poll()
|
|
|
|
if retcode is not None:
|
2013-11-25 05:41:13 +01:00
|
|
|
if retcode != 0:
|
|
|
|
run_error = "streamproc"
|
|
|
|
break
|
2015-05-03 14:56:30 +02:00
|
|
|
elif retcode == 0 and len(buf) <= 0:
|
2013-11-25 05:41:13 +01:00
|
|
|
return ""
|
|
|
|
run_count += 1
|
|
|
|
|
|
|
|
else:
|
2015-05-03 14:56:30 +02:00
|
|
|
if len(buf) <= 0:
|
2013-11-25 05:41:13 +01:00
|
|
|
return ""
|
|
|
|
|
2014-09-17 14:31:20 +02:00
|
|
|
try:
|
2015-05-03 14:56:30 +02:00
|
|
|
backup_target.write(buf)
|
2014-09-17 14:31:20 +02:00
|
|
|
except IOError as e:
|
|
|
|
if e.errno == errno.EPIPE:
|
|
|
|
run_error = "target"
|
|
|
|
else:
|
|
|
|
raise
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if hmac:
|
2015-05-03 14:56:30 +02:00
|
|
|
hmac.stdin.write(buf)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
return run_error
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2014-01-15 03:50:29 +01:00
|
|
|
def verify_hmac(filename, hmacfile, passphrase, algorithm):
|
2013-11-25 05:41:13 +01:00
|
|
|
if BACKUP_DEBUG:
|
2015-05-03 14:45:01 +02:00
|
|
|
print "Verifying file " + filename
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if hmacfile != filename + ".hmac":
|
|
|
|
raise QubesException(
|
2015-05-03 14:45:01 +02:00
|
|
|
"ERROR: expected hmac for {}, but got {}".
|
2013-11-25 05:41:13 +01:00
|
|
|
format(filename, hmacfile))
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
hmac_proc = subprocess.Popen(["openssl", "dgst", "-" + algorithm,
|
|
|
|
"-hmac", passphrase],
|
|
|
|
stdin=open(filename, 'rb'),
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
2013-11-25 05:41:13 +01:00
|
|
|
hmac_stdout, hmac_stderr = hmac_proc.communicate()
|
|
|
|
|
|
|
|
if len(hmac_stderr) > 0:
|
2015-05-03 14:45:01 +02:00
|
|
|
raise QubesException(
|
|
|
|
"ERROR: verify file {0}: {1}".format(filename, hmac_stderr))
|
2013-11-25 05:41:13 +01:00
|
|
|
else:
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Loading hmac for file " + filename
|
2015-05-03 14:45:01 +02:00
|
|
|
hmac = load_hmac(open(hmacfile, 'r').read())
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if len(hmac) > 0 and load_hmac(hmac_stdout) == hmac:
|
|
|
|
os.unlink(hmacfile)
|
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "File verification OK -> Sending file " + filename
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
raise QubesException(
|
2015-05-03 14:45:01 +02:00
|
|
|
"ERROR: invalid hmac for file {0}: {1}. "
|
|
|
|
"Is the passphrase correct?".
|
|
|
|
format(filename, load_hmac(hmac_stdout)))
|
2013-11-25 05:41:13 +01:00
|
|
|
# Not reachable
|
|
|
|
return False
|
|
|
|
|
2014-01-13 04:27:19 +01:00
|
|
|
|
2014-09-26 03:24:19 +02:00
|
|
|
class ExtractWorker2(Process):
|
2013-11-25 05:41:13 +01:00
|
|
|
def __init__(self, queue, base_dir, passphrase, encrypted, total_size,
|
2014-09-17 13:54:33 +02:00
|
|
|
print_callback, error_callback, progress_callback, vmproc=None,
|
2015-05-03 14:45:01 +02:00
|
|
|
compressed=False, crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM,
|
2014-09-17 13:54:33 +02:00
|
|
|
verify_only=False):
|
2014-09-26 03:24:19 +02:00
|
|
|
super(ExtractWorker2, self).__init__()
|
2013-11-25 05:41:13 +01:00
|
|
|
self.queue = queue
|
|
|
|
self.base_dir = base_dir
|
|
|
|
self.passphrase = passphrase
|
|
|
|
self.encrypted = encrypted
|
2013-12-02 14:05:41 +01:00
|
|
|
self.compressed = compressed
|
2014-01-15 03:50:29 +01:00
|
|
|
self.crypto_algorithm = crypto_algorithm
|
2014-09-17 13:54:33 +02:00
|
|
|
self.verify_only = verify_only
|
2013-11-25 05:41:13 +01:00
|
|
|
self.total_size = total_size
|
|
|
|
self.blocks_backedup = 0
|
2014-01-10 03:23:51 +01:00
|
|
|
self.tar2_process = None
|
2013-11-25 05:41:13 +01:00
|
|
|
self.tar2_current_file = None
|
2014-01-10 03:23:51 +01:00
|
|
|
self.decompressor_process = None
|
|
|
|
self.decryptor_process = None
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
self.print_callback = print_callback
|
|
|
|
self.error_callback = error_callback
|
|
|
|
self.progress_callback = progress_callback
|
|
|
|
|
|
|
|
self.vmproc = vmproc
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
self.restore_pipe = os.path.join(self.base_dir, "restore_pipe")
|
2013-11-25 05:41:13 +01:00
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Creating pipe in:", self.restore_pipe
|
|
|
|
os.mkfifo(self.restore_pipe)
|
|
|
|
|
2014-09-17 14:40:26 +02:00
|
|
|
self.stderr_encoding = sys.stderr.encoding or 'utf-8'
|
|
|
|
|
2015-05-03 14:56:30 +02:00
|
|
|
def compute_progress(self, new_size, _):
|
2014-01-15 01:05:54 +01:00
|
|
|
if self.progress_callback:
|
|
|
|
self.blocks_backedup += new_size
|
|
|
|
progress = self.blocks_backedup / float(self.total_size)
|
2015-05-03 14:45:01 +02:00
|
|
|
progress = int(round(progress * 100, 2))
|
2014-01-15 01:05:54 +01:00
|
|
|
self.progress_callback(progress)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-09-17 14:40:26 +02:00
|
|
|
def collect_tar_output(self):
|
|
|
|
if not self.tar2_process.stderr:
|
|
|
|
return
|
|
|
|
|
|
|
|
if self.tar2_process.poll() is None:
|
2014-09-18 07:39:19 +02:00
|
|
|
try:
|
2015-05-03 14:45:01 +02:00
|
|
|
new_lines = self.tar2_process.stderr \
|
|
|
|
.read(MAX_STDERR_BYTES).splitlines()
|
2014-09-18 07:39:19 +02:00
|
|
|
except IOError as e:
|
|
|
|
if e.errno == errno.EAGAIN:
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
raise
|
2014-09-17 14:40:26 +02:00
|
|
|
else:
|
|
|
|
new_lines = self.tar2_process.stderr.readlines()
|
|
|
|
|
|
|
|
new_lines = map(lambda x: x.decode(self.stderr_encoding), new_lines)
|
|
|
|
|
|
|
|
if not BACKUP_DEBUG:
|
|
|
|
msg_re = re.compile(r".*#[0-9].*restore_pipe")
|
|
|
|
new_lines = filter(lambda x: not msg_re.match(x), new_lines)
|
|
|
|
|
|
|
|
self.tar2_stderr += new_lines
|
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
def run(self):
|
2014-01-10 03:23:51 +01:00
|
|
|
try:
|
|
|
|
self.__run__()
|
|
|
|
except Exception as e:
|
2014-02-01 14:01:21 +01:00
|
|
|
exc_type, exc_value, exc_traceback = sys.exc_info()
|
2014-01-10 03:23:51 +01:00
|
|
|
# Cleanup children
|
|
|
|
for process in [self.decompressor_process,
|
2015-05-03 14:45:01 +02:00
|
|
|
self.decryptor_process,
|
|
|
|
self.tar2_process]:
|
2014-01-10 03:23:51 +01:00
|
|
|
if process:
|
|
|
|
# FIXME: kill()?
|
2014-01-15 03:57:59 +01:00
|
|
|
try:
|
|
|
|
process.terminate()
|
|
|
|
except OSError:
|
|
|
|
pass
|
2014-01-10 03:23:51 +01:00
|
|
|
process.wait()
|
2014-09-17 14:34:45 +02:00
|
|
|
self.error_callback("ERROR: " + unicode(e))
|
2014-02-01 14:01:21 +01:00
|
|
|
raise e, None, exc_traceback
|
2014-01-10 03:23:51 +01:00
|
|
|
|
|
|
|
def __run__(self):
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(self.print_callback):
|
2013-11-25 05:41:13 +01:00
|
|
|
self.print_callback("Started sending thread")
|
2015-05-03 14:45:01 +02:00
|
|
|
self.print_callback("Moving to dir " + self.base_dir)
|
2013-11-25 05:41:13 +01:00
|
|
|
os.chdir(self.base_dir)
|
|
|
|
|
2014-01-13 04:37:54 +01:00
|
|
|
filename = None
|
|
|
|
|
2014-01-10 03:28:53 +01:00
|
|
|
for filename in iter(self.queue.get, None):
|
|
|
|
if filename == "FINISHED" or filename == "ERROR":
|
2013-11-25 05:41:13 +01:00
|
|
|
break
|
|
|
|
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(self.print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
self.print_callback("Extracting file " + filename)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if filename.endswith('.000'):
|
|
|
|
# next file
|
2015-05-03 14:45:01 +02:00
|
|
|
if self.tar2_process is not None:
|
2014-01-10 03:23:51 +01:00
|
|
|
if self.tar2_process.wait() != 0:
|
2014-09-17 14:40:26 +02:00
|
|
|
self.collect_tar_output()
|
2014-09-17 23:09:31 +02:00
|
|
|
self.error_callback(
|
2015-05-03 14:45:01 +02:00
|
|
|
"ERROR: unable to extract files for {0}, tar "
|
|
|
|
"output:\n {1}".
|
|
|
|
format(self.tar2_current_file,
|
|
|
|
"\n ".join(self.tar2_stderr)))
|
2013-11-25 05:41:13 +01:00
|
|
|
else:
|
|
|
|
# Finished extracting the tar file
|
2014-01-10 03:23:51 +01:00
|
|
|
self.tar2_process = None
|
2013-11-25 05:41:13 +01:00
|
|
|
self.tar2_current_file = None
|
|
|
|
|
|
|
|
tar2_cmdline = ['tar',
|
2015-05-03 14:45:01 +02:00
|
|
|
'-%sMk%sf' % ("t" if self.verify_only else "x",
|
|
|
|
"v" if BACKUP_DEBUG else ""),
|
|
|
|
self.restore_pipe,
|
|
|
|
os.path.relpath(filename.rstrip('.000'))]
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(self.print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
self.print_callback("Running command " +
|
2014-09-17 14:34:45 +02:00
|
|
|
unicode(tar2_cmdline))
|
2014-01-10 03:23:51 +01:00
|
|
|
self.tar2_process = subprocess.Popen(tar2_cmdline,
|
2015-05-03 14:45:01 +02:00
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
2014-09-17 14:40:26 +02:00
|
|
|
fcntl.fcntl(self.tar2_process.stderr.fileno(), fcntl.F_SETFL,
|
|
|
|
fcntl.fcntl(self.tar2_process.stderr.fileno(),
|
|
|
|
fcntl.F_GETFL) | os.O_NONBLOCK)
|
|
|
|
self.tar2_stderr = []
|
2014-09-17 23:09:31 +02:00
|
|
|
elif not self.tar2_process:
|
|
|
|
# Extracting of the current archive failed, skip to the next
|
|
|
|
# archive
|
|
|
|
if not BACKUP_DEBUG:
|
|
|
|
os.remove(filename)
|
|
|
|
continue
|
2013-11-25 05:41:13 +01:00
|
|
|
else:
|
2014-09-17 14:40:26 +02:00
|
|
|
self.collect_tar_output()
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(self.print_callback):
|
2013-11-25 05:41:13 +01:00
|
|
|
self.print_callback("Releasing next chunck")
|
2014-01-10 03:23:51 +01:00
|
|
|
self.tar2_process.stdin.write("\n")
|
|
|
|
self.tar2_process.stdin.flush()
|
2013-11-25 05:41:13 +01:00
|
|
|
self.tar2_current_file = filename
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
pipe = open(self.restore_pipe, 'wb')
|
2013-11-25 05:41:13 +01:00
|
|
|
common_args = {
|
2015-05-03 14:45:01 +02:00
|
|
|
'backup_target': pipe,
|
|
|
|
'total_backup_sz': self.total_size,
|
|
|
|
'hmac': None,
|
|
|
|
'vmproc': self.vmproc,
|
|
|
|
'addproc': self.tar2_process
|
2013-11-25 05:41:13 +01:00
|
|
|
}
|
|
|
|
if self.encrypted:
|
|
|
|
# Start decrypt
|
2015-05-03 14:45:01 +02:00
|
|
|
self.decryptor_process = subprocess.Popen(
|
|
|
|
["openssl", "enc",
|
|
|
|
"-d",
|
|
|
|
"-" + self.crypto_algorithm,
|
|
|
|
"-pass",
|
|
|
|
"pass:" + self.passphrase] +
|
|
|
|
(["-z"] if self.compressed else []),
|
|
|
|
stdin=open(filename, 'rb'),
|
|
|
|
stdout=subprocess.PIPE)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
run_error = wait_backup_feedback(
|
2015-05-03 14:45:01 +02:00
|
|
|
progress_callback=self.compute_progress,
|
|
|
|
in_stream=self.decryptor_process.stdout,
|
|
|
|
streamproc=self.decryptor_process,
|
|
|
|
**common_args)
|
2013-12-02 14:05:41 +01:00
|
|
|
elif self.compressed:
|
2015-05-03 14:45:01 +02:00
|
|
|
self.decompressor_process = subprocess.Popen(
|
|
|
|
["gzip", "-d"],
|
|
|
|
stdin=open(filename, 'rb'),
|
|
|
|
stdout=subprocess.PIPE)
|
2013-12-02 14:05:41 +01:00
|
|
|
|
|
|
|
run_error = wait_backup_feedback(
|
2015-05-03 14:45:01 +02:00
|
|
|
progress_callback=self.compute_progress,
|
|
|
|
in_stream=self.decompressor_process.stdout,
|
|
|
|
streamproc=self.decompressor_process,
|
|
|
|
**common_args)
|
2013-11-25 05:41:13 +01:00
|
|
|
else:
|
|
|
|
run_error = wait_backup_feedback(
|
2015-05-03 14:45:01 +02:00
|
|
|
progress_callback=self.compute_progress,
|
|
|
|
in_stream=open(filename, "rb"), streamproc=None,
|
|
|
|
**common_args)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-01-10 03:31:15 +01:00
|
|
|
try:
|
|
|
|
pipe.close()
|
|
|
|
except IOError as e:
|
|
|
|
if e.errno == errno.EPIPE:
|
|
|
|
if BACKUP_DEBUG:
|
2015-05-03 14:45:01 +02:00
|
|
|
self.error_callback(
|
|
|
|
"Got EPIPE while closing pipe to "
|
|
|
|
"the inner tar process")
|
|
|
|
# ignore the error
|
2014-01-10 03:31:15 +01:00
|
|
|
else:
|
|
|
|
raise
|
|
|
|
if len(run_error):
|
2014-09-17 14:40:26 +02:00
|
|
|
if run_error == "target":
|
|
|
|
self.collect_tar_output()
|
|
|
|
details = "\n".join(self.tar2_stderr)
|
|
|
|
else:
|
|
|
|
details = "%s failed" % run_error
|
2014-09-17 23:09:31 +02:00
|
|
|
self.tar2_process.terminate()
|
|
|
|
self.tar2_process.wait()
|
|
|
|
self.tar2_process = None
|
2015-05-03 14:45:01 +02:00
|
|
|
self.error_callback("Error while processing '%s': %s " %
|
|
|
|
(self.tar2_current_file, details))
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# Delete the file as we don't need it anymore
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(self.print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
self.print_callback("Removing file " + filename)
|
2013-11-25 05:41:13 +01:00
|
|
|
os.remove(filename)
|
|
|
|
|
2014-01-10 03:28:53 +01:00
|
|
|
os.unlink(self.restore_pipe)
|
|
|
|
|
2014-01-15 01:05:54 +01:00
|
|
|
if self.tar2_process is not None:
|
2014-01-10 03:28:53 +01:00
|
|
|
if filename == "ERROR":
|
|
|
|
self.tar2_process.terminate()
|
2014-03-08 03:50:08 +01:00
|
|
|
self.tar2_process.wait()
|
|
|
|
elif self.tar2_process.wait() != 0:
|
2014-09-17 14:40:26 +02:00
|
|
|
self.collect_tar_output()
|
2013-11-25 05:41:13 +01:00
|
|
|
raise QubesException(
|
2014-09-17 14:40:26 +02:00
|
|
|
"unable to extract files for {0}.{1} Tar command "
|
|
|
|
"output: %s".
|
2014-01-15 01:05:54 +01:00
|
|
|
format(self.tar2_current_file,
|
|
|
|
(" Perhaps the backup is encrypted?"
|
2014-09-17 14:40:26 +02:00
|
|
|
if not self.encrypted else "",
|
2015-05-03 14:45:01 +02:00
|
|
|
"\n".join(self.tar2_stderr))))
|
2013-11-25 05:41:13 +01:00
|
|
|
else:
|
|
|
|
# Finished extracting the tar file
|
2014-01-10 03:23:51 +01:00
|
|
|
self.tar2_process = None
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(self.print_callback):
|
2013-11-25 05:41:13 +01:00
|
|
|
self.print_callback("Finished extracting thread")
|
|
|
|
|
2014-09-26 03:24:19 +02:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
class ExtractWorker3(ExtractWorker2):
|
2014-09-26 03:24:19 +02:00
|
|
|
def __init__(self, queue, base_dir, passphrase, encrypted, total_size,
|
|
|
|
print_callback, error_callback, progress_callback, vmproc=None,
|
|
|
|
compressed=False, crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM,
|
2015-05-03 14:45:01 +02:00
|
|
|
compression_filter=None, verify_only=False):
|
2014-09-26 03:24:19 +02:00
|
|
|
super(ExtractWorker3, self).__init__(queue, base_dir, passphrase,
|
|
|
|
encrypted, total_size,
|
|
|
|
print_callback, error_callback,
|
|
|
|
progress_callback, vmproc,
|
|
|
|
compressed, crypto_algorithm,
|
|
|
|
verify_only)
|
2014-09-26 03:44:17 +02:00
|
|
|
self.compression_filter = compression_filter
|
2014-09-26 03:24:19 +02:00
|
|
|
os.unlink(self.restore_pipe)
|
|
|
|
|
|
|
|
def __run__(self):
|
|
|
|
if BACKUP_DEBUG and callable(self.print_callback):
|
|
|
|
self.print_callback("Started sending thread")
|
2015-05-03 14:45:01 +02:00
|
|
|
self.print_callback("Moving to dir " + self.base_dir)
|
2014-09-26 03:24:19 +02:00
|
|
|
os.chdir(self.base_dir)
|
|
|
|
|
|
|
|
filename = None
|
|
|
|
|
|
|
|
input_pipe = None
|
|
|
|
for filename in iter(self.queue.get, None):
|
|
|
|
if filename == "FINISHED" or filename == "ERROR":
|
|
|
|
break
|
|
|
|
|
|
|
|
if BACKUP_DEBUG and callable(self.print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
self.print_callback("Extracting file " + filename)
|
2014-09-26 03:24:19 +02:00
|
|
|
|
|
|
|
if filename.endswith('.000'):
|
|
|
|
# next file
|
2015-05-03 14:45:01 +02:00
|
|
|
if self.tar2_process is not None:
|
2014-09-26 03:24:19 +02:00
|
|
|
input_pipe.close()
|
|
|
|
if self.tar2_process.wait() != 0:
|
|
|
|
self.collect_tar_output()
|
|
|
|
self.error_callback(
|
2015-05-03 14:45:01 +02:00
|
|
|
"ERROR: unable to extract files for {0}, tar "
|
|
|
|
"output:\n {1}".
|
|
|
|
format(self.tar2_current_file,
|
|
|
|
"\n ".join(self.tar2_stderr)))
|
2014-09-26 03:24:19 +02:00
|
|
|
else:
|
|
|
|
# Finished extracting the tar file
|
|
|
|
self.tar2_process = None
|
|
|
|
self.tar2_current_file = None
|
|
|
|
|
|
|
|
tar2_cmdline = ['tar',
|
2015-05-03 14:45:01 +02:00
|
|
|
'-%sk%s' % ("t" if self.verify_only else "x",
|
|
|
|
"v" if BACKUP_DEBUG else ""),
|
|
|
|
os.path.relpath(filename.rstrip('.000'))]
|
2014-09-26 03:44:17 +02:00
|
|
|
if self.compressed:
|
|
|
|
if self.compression_filter:
|
|
|
|
tar2_cmdline.insert(-1,
|
|
|
|
"--use-compress-program=%s" %
|
|
|
|
self.compression_filter)
|
|
|
|
else:
|
|
|
|
tar2_cmdline.insert(-1, "--use-compress-program=%s" %
|
2015-05-03 14:45:01 +02:00
|
|
|
DEFAULT_COMPRESSION_FILTER)
|
2014-09-26 03:24:19 +02:00
|
|
|
|
|
|
|
if BACKUP_DEBUG and callable(self.print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
self.print_callback("Running command " +
|
2014-09-26 03:24:19 +02:00
|
|
|
unicode(tar2_cmdline))
|
|
|
|
if self.encrypted:
|
|
|
|
# Start decrypt
|
2015-05-03 14:45:01 +02:00
|
|
|
self.decryptor_process = subprocess.Popen(
|
|
|
|
["openssl", "enc",
|
|
|
|
"-d",
|
|
|
|
"-" + self.crypto_algorithm,
|
|
|
|
"-pass",
|
|
|
|
"pass:" + self.passphrase],
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE)
|
2014-09-26 03:24:19 +02:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
self.tar2_process = subprocess.Popen(
|
|
|
|
tar2_cmdline,
|
|
|
|
stdin=self.decryptor_process.stdout,
|
|
|
|
stderr=subprocess.PIPE)
|
2014-09-26 03:24:19 +02:00
|
|
|
input_pipe = self.decryptor_process.stdin
|
|
|
|
else:
|
2015-05-03 14:45:01 +02:00
|
|
|
self.tar2_process = subprocess.Popen(
|
|
|
|
tar2_cmdline,
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
2014-09-26 03:24:19 +02:00
|
|
|
input_pipe = self.tar2_process.stdin
|
|
|
|
|
|
|
|
fcntl.fcntl(self.tar2_process.stderr.fileno(), fcntl.F_SETFL,
|
|
|
|
fcntl.fcntl(self.tar2_process.stderr.fileno(),
|
|
|
|
fcntl.F_GETFL) | os.O_NONBLOCK)
|
|
|
|
self.tar2_stderr = []
|
|
|
|
elif not self.tar2_process:
|
|
|
|
# Extracting of the current archive failed, skip to the next
|
|
|
|
# archive
|
|
|
|
if not BACKUP_DEBUG:
|
|
|
|
os.remove(filename)
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
if BACKUP_DEBUG and callable(self.print_callback):
|
|
|
|
self.print_callback("Releasing next chunck")
|
|
|
|
self.tar2_current_file = filename
|
|
|
|
|
|
|
|
common_args = {
|
2015-05-03 14:45:01 +02:00
|
|
|
'backup_target': input_pipe,
|
|
|
|
'total_backup_sz': self.total_size,
|
|
|
|
'hmac': None,
|
|
|
|
'vmproc': self.vmproc,
|
|
|
|
'addproc': self.tar2_process
|
2014-09-26 03:24:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
run_error = wait_backup_feedback(
|
2015-05-03 14:45:01 +02:00
|
|
|
progress_callback=self.compute_progress,
|
|
|
|
in_stream=open(filename, "rb"), streamproc=None,
|
|
|
|
**common_args)
|
2014-09-26 03:24:19 +02:00
|
|
|
|
|
|
|
if len(run_error):
|
|
|
|
if run_error == "target":
|
|
|
|
self.collect_tar_output()
|
|
|
|
details = "\n".join(self.tar2_stderr)
|
|
|
|
else:
|
|
|
|
details = "%s failed" % run_error
|
|
|
|
if self.decryptor_process:
|
|
|
|
self.decryptor_process.terminate()
|
|
|
|
self.decryptor_process.wait()
|
|
|
|
self.decryptor_process = None
|
|
|
|
self.tar2_process.terminate()
|
|
|
|
self.tar2_process.wait()
|
|
|
|
self.tar2_process = None
|
2015-05-03 14:45:01 +02:00
|
|
|
self.error_callback("Error while processing '%s': %s " %
|
|
|
|
(self.tar2_current_file, details))
|
2014-09-26 03:24:19 +02:00
|
|
|
|
|
|
|
# Delete the file as we don't need it anymore
|
|
|
|
if BACKUP_DEBUG and callable(self.print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
self.print_callback("Removing file " + filename)
|
2014-09-26 03:24:19 +02:00
|
|
|
os.remove(filename)
|
|
|
|
|
|
|
|
if self.tar2_process is not None:
|
|
|
|
input_pipe.close()
|
|
|
|
if filename == "ERROR":
|
|
|
|
if self.decryptor_process:
|
|
|
|
self.decryptor_process.terminate()
|
|
|
|
self.decryptor_process.wait()
|
|
|
|
self.decryptor_process = None
|
|
|
|
self.tar2_process.terminate()
|
|
|
|
self.tar2_process.wait()
|
|
|
|
elif self.tar2_process.wait() != 0:
|
|
|
|
self.collect_tar_output()
|
|
|
|
raise QubesException(
|
|
|
|
"unable to extract files for {0}.{1} Tar command "
|
|
|
|
"output: %s".
|
|
|
|
format(self.tar2_current_file,
|
|
|
|
(" Perhaps the backup is encrypted?"
|
|
|
|
if not self.encrypted else "",
|
2015-05-03 14:45:01 +02:00
|
|
|
"\n".join(self.tar2_stderr))))
|
2014-09-26 03:24:19 +02:00
|
|
|
else:
|
|
|
|
# Finished extracting the tar file
|
|
|
|
self.tar2_process = None
|
|
|
|
|
|
|
|
if BACKUP_DEBUG and callable(self.print_callback):
|
|
|
|
self.print_callback("Finished extracting thread")
|
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
|
|
|
|
def get_supported_hmac_algo(hmac_algorithm):
|
|
|
|
# Start with provided default
|
|
|
|
if hmac_algorithm:
|
|
|
|
yield hmac_algorithm
|
|
|
|
proc = subprocess.Popen(['openssl', 'list-message-digest-algorithms'],
|
|
|
|
stdout=subprocess.PIPE)
|
|
|
|
for algo in proc.stdout.readlines():
|
|
|
|
if '=>' in algo:
|
|
|
|
continue
|
|
|
|
yield algo.strip()
|
|
|
|
proc.wait()
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
def parse_backup_header(filename):
|
|
|
|
header_data = {}
|
|
|
|
with open(filename, 'r') as f:
|
|
|
|
for line in f.readlines():
|
|
|
|
if line.count('=') != 1:
|
|
|
|
raise QubesException("Invalid backup header (line %s)" % line)
|
|
|
|
(key, value) = line.strip().split('=')
|
|
|
|
if not any([key == getattr(BackupHeader, attr) for attr in dir(
|
|
|
|
BackupHeader)]):
|
|
|
|
# Ignoring unknown option
|
|
|
|
continue
|
|
|
|
if key in BackupHeader.bool_options:
|
|
|
|
value = value.lower() in ["1", "true", "yes"]
|
2014-09-26 03:24:19 +02:00
|
|
|
elif key in BackupHeader.int_options:
|
|
|
|
value = int(value)
|
2014-01-15 03:53:45 +01:00
|
|
|
header_data[key] = value
|
|
|
|
return header_data
|
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
def restore_vm_dirs(backup_source, restore_tmpdir, passphrase, vms_dirs, vms,
|
|
|
|
vms_size, print_callback=None, error_callback=None,
|
|
|
|
progress_callback=None, encrypted=False, appvm=None,
|
|
|
|
compressed=False, hmac_algorithm=DEFAULT_HMAC_ALGORITHM,
|
|
|
|
crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM,
|
|
|
|
verify_only=False,
|
|
|
|
format_version=CURRENT_BACKUP_FORMAT_VERSION,
|
|
|
|
compression_filter=None):
|
2014-03-08 03:55:47 +01:00
|
|
|
global running_backup_operation
|
|
|
|
|
2014-09-18 07:45:16 +02:00
|
|
|
if callable(print_callback):
|
|
|
|
if BACKUP_DEBUG:
|
2015-05-03 14:45:01 +02:00
|
|
|
print_callback("Working in temporary dir:" + restore_tmpdir)
|
|
|
|
print_callback(
|
|
|
|
"Extracting data: " + size_to_human(vms_size) + " to restore")
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-02-05 06:51:47 +01:00
|
|
|
passphrase = passphrase.encode('utf-8')
|
2014-01-15 03:53:45 +01:00
|
|
|
header_data = None
|
2013-11-25 05:41:13 +01:00
|
|
|
vmproc = None
|
2015-05-03 14:45:01 +02:00
|
|
|
if appvm is not None:
|
2013-11-25 05:41:13 +01:00
|
|
|
# Prepare the backup target (Qubes service call)
|
|
|
|
backup_target = "QUBESRPC qubes.Restore dom0"
|
|
|
|
|
|
|
|
# If APPVM, STDOUT is a PIPE
|
2015-05-03 14:45:01 +02:00
|
|
|
vmproc = appvm.run(command=backup_target, passio_popen=True,
|
|
|
|
passio_stderr=True)
|
|
|
|
vmproc.stdin.write(
|
|
|
|
backup_source.replace("\r", "").replace("\n", "") + "\n")
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# Send to tar2qfile the VMs that should be extracted
|
2015-05-03 14:45:01 +02:00
|
|
|
vmproc.stdin.write(" ".join(vms_dirs) + "\n")
|
2014-03-08 03:55:47 +01:00
|
|
|
if running_backup_operation:
|
|
|
|
running_backup_operation.processes_to_kill_on_cancel.append(vmproc)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
backup_stdin = vmproc.stdout
|
|
|
|
tar1_command = ['/usr/libexec/qubes/qfile-dom0-unpacker',
|
2015-05-03 14:45:01 +02:00
|
|
|
str(os.getuid()), restore_tmpdir, '-v']
|
2013-11-25 05:41:13 +01:00
|
|
|
else:
|
2015-05-03 14:45:01 +02:00
|
|
|
backup_stdin = open(backup_source, 'rb')
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
tar1_command = ['tar',
|
2015-05-03 14:45:01 +02:00
|
|
|
'-ixvf', backup_source,
|
|
|
|
'-C', restore_tmpdir] + vms_dirs
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
tar1_env = os.environ.copy()
|
|
|
|
# TODO: add some safety margin?
|
|
|
|
tar1_env['UPDATES_MAX_BYTES'] = str(vms_size)
|
|
|
|
# Restoring only header
|
2014-01-15 03:53:45 +01:00
|
|
|
if vms_dirs and vms_dirs[0] == HEADER_FILENAME:
|
|
|
|
# backup-header, backup-header.hmac, qubes-xml.000, qubes-xml.000.hmac
|
|
|
|
tar1_env['UPDATES_MAX_FILES'] = '4'
|
2013-11-25 05:41:13 +01:00
|
|
|
else:
|
2014-02-05 02:48:00 +01:00
|
|
|
# Currently each VM consists of at most 7 archives (count
|
|
|
|
# file_to_backup calls in backup_prepare()), but add some safety
|
|
|
|
# margin for further extensions. Each archive is divided into 100MB
|
|
|
|
# chunks. Additionally each file have own hmac file. So assume upper
|
|
|
|
# limit as 2*(10*COUNT_OF_VMS+TOTAL_SIZE/100MB)
|
2015-05-03 14:45:01 +02:00
|
|
|
tar1_env['UPDATES_MAX_FILES'] = str(2 * (10 * len(vms_dirs) +
|
|
|
|
int(vms_size /
|
|
|
|
(100 * 1024 * 1024))))
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
print_callback("Run command" + unicode(tar1_command))
|
|
|
|
command = subprocess.Popen(
|
|
|
|
tar1_command,
|
|
|
|
stdin=backup_stdin,
|
|
|
|
stdout=vmproc.stdin if vmproc else subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
env=tar1_env)
|
2014-03-08 03:55:47 +01:00
|
|
|
if running_backup_operation:
|
|
|
|
running_backup_operation.processes_to_kill_on_cancel.append(command)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# qfile-dom0-unpacker output filelist on stderr (and have stdout connected
|
|
|
|
# to the VM), while tar output filelist on stdout
|
|
|
|
if appvm:
|
|
|
|
filelist_pipe = command.stderr
|
2015-02-18 21:41:22 +01:00
|
|
|
# let qfile-dom0-unpacker hold the only open FD to the write end of
|
|
|
|
# pipe, otherwise qrexec-client will not receive EOF when
|
|
|
|
# qfile-dom0-unpacker terminates
|
|
|
|
vmproc.stdin.close()
|
2013-11-25 05:41:13 +01:00
|
|
|
else:
|
|
|
|
filelist_pipe = command.stdout
|
|
|
|
|
2014-01-19 04:50:56 +01:00
|
|
|
expect_tar_error = False
|
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
to_extract = Queue()
|
2014-02-01 14:03:26 +01:00
|
|
|
nextfile = None
|
2014-01-15 03:53:45 +01:00
|
|
|
|
|
|
|
# If want to analyze backup header, do it now
|
|
|
|
if vms_dirs and vms_dirs[0] == HEADER_FILENAME:
|
2014-02-01 14:02:38 +01:00
|
|
|
filename = filelist_pipe.readline().strip()
|
|
|
|
hmacfile = filelist_pipe.readline().strip()
|
2014-03-08 03:52:21 +01:00
|
|
|
if not appvm:
|
|
|
|
nextfile = filelist_pipe.readline().strip()
|
2014-01-15 03:53:45 +01:00
|
|
|
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(print_callback):
|
2014-01-15 03:53:45 +01:00
|
|
|
print_callback("Got backup header and hmac: %s, %s" % (filename,
|
|
|
|
hmacfile))
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
if not filename or filename == "EOF" or \
|
2014-01-15 03:53:45 +01:00
|
|
|
not hmacfile or hmacfile == "EOF":
|
|
|
|
if appvm:
|
|
|
|
vmproc.wait()
|
|
|
|
proc_error_msg = vmproc.stderr.read(MAX_STDERR_BYTES)
|
|
|
|
else:
|
|
|
|
command.wait()
|
|
|
|
proc_error_msg = command.stderr.read(MAX_STDERR_BYTES)
|
|
|
|
raise QubesException("Premature end of archive while receiving "
|
|
|
|
"backup header. Process output:\n" +
|
|
|
|
proc_error_msg)
|
|
|
|
filename = os.path.join(restore_tmpdir, filename)
|
|
|
|
hmacfile = os.path.join(restore_tmpdir, hmacfile)
|
|
|
|
file_ok = False
|
|
|
|
for hmac_algo in get_supported_hmac_algo(hmac_algorithm):
|
|
|
|
try:
|
|
|
|
if verify_hmac(filename, hmacfile, passphrase, hmac_algo):
|
|
|
|
file_ok = True
|
|
|
|
hmac_algorithm = hmac_algo
|
|
|
|
break
|
|
|
|
except QubesException:
|
|
|
|
# Ignore exception here, try the next algo
|
|
|
|
pass
|
|
|
|
if not file_ok:
|
|
|
|
raise QubesException("Corrupted backup header (hmac verification "
|
2014-02-05 04:34:30 +01:00
|
|
|
"failed). Is the password correct?")
|
2014-01-15 03:53:45 +01:00
|
|
|
if os.path.basename(filename) == HEADER_FILENAME:
|
|
|
|
header_data = parse_backup_header(filename)
|
2014-09-26 03:24:19 +02:00
|
|
|
if BackupHeader.version in header_data:
|
|
|
|
format_version = header_data[BackupHeader.version]
|
2014-01-15 03:53:45 +01:00
|
|
|
if BackupHeader.crypto_algorithm in header_data:
|
|
|
|
crypto_algorithm = header_data[BackupHeader.crypto_algorithm]
|
|
|
|
if BackupHeader.hmac_algorithm in header_data:
|
|
|
|
hmac_algorithm = header_data[BackupHeader.hmac_algorithm]
|
|
|
|
if BackupHeader.compressed in header_data:
|
|
|
|
compressed = header_data[BackupHeader.compressed]
|
|
|
|
if BackupHeader.encrypted in header_data:
|
|
|
|
encrypted = header_data[BackupHeader.encrypted]
|
2014-09-26 03:44:17 +02:00
|
|
|
if BackupHeader.compression_filter in header_data:
|
2015-05-03 14:45:01 +02:00
|
|
|
compression_filter = header_data[
|
|
|
|
BackupHeader.compression_filter]
|
2014-01-15 03:53:45 +01:00
|
|
|
os.unlink(filename)
|
|
|
|
else:
|
2014-09-18 08:25:58 +02:00
|
|
|
# if no header found, create one with guessed HMAC algo
|
2015-05-03 14:45:01 +02:00
|
|
|
header_data = {BackupHeader.hmac_algorithm: hmac_algorithm}
|
2014-01-15 03:53:45 +01:00
|
|
|
# If this isn't backup header, pass it to ExtractWorker
|
|
|
|
to_extract.put(filename)
|
2014-01-19 04:50:56 +01:00
|
|
|
# when tar do not find expected file in archive, it exit with
|
|
|
|
# code 2. This will happen because we've requested backup-header
|
|
|
|
# file, but the archive do not contain it. Ignore this particular
|
|
|
|
# error.
|
|
|
|
if not appvm:
|
|
|
|
expect_tar_error = True
|
2014-01-15 03:53:45 +01:00
|
|
|
|
|
|
|
# Setup worker to extract encrypted data chunks to the restore dirs
|
|
|
|
# Create the process here to pass it options extracted from backup header
|
2014-09-26 03:24:19 +02:00
|
|
|
extractor_params = {
|
|
|
|
'queue': to_extract,
|
|
|
|
'base_dir': restore_tmpdir,
|
|
|
|
'passphrase': passphrase,
|
|
|
|
'encrypted': encrypted,
|
|
|
|
'compressed': compressed,
|
|
|
|
'crypto_algorithm': crypto_algorithm,
|
|
|
|
'verify_only': verify_only,
|
|
|
|
'total_size': vms_size,
|
|
|
|
'print_callback': print_callback,
|
|
|
|
'error_callback': error_callback,
|
|
|
|
'progress_callback': progress_callback,
|
2015-05-03 14:45:01 +02:00
|
|
|
}
|
2014-09-26 03:24:19 +02:00
|
|
|
if format_version == 2:
|
|
|
|
extract_proc = ExtractWorker2(**extractor_params)
|
|
|
|
elif format_version == 3:
|
2014-09-26 03:44:17 +02:00
|
|
|
extractor_params['compression_filter'] = compression_filter
|
2014-09-26 03:24:19 +02:00
|
|
|
extract_proc = ExtractWorker3(**extractor_params)
|
|
|
|
else:
|
2015-05-03 14:45:01 +02:00
|
|
|
raise NotImplemented(
|
|
|
|
"Backup format version %d not supported" % format_version)
|
2014-01-15 03:53:45 +01:00
|
|
|
extract_proc.start()
|
|
|
|
|
2014-01-10 03:28:53 +01:00
|
|
|
try:
|
2014-01-15 01:05:54 +01:00
|
|
|
filename = None
|
2014-01-10 03:28:53 +01:00
|
|
|
while True:
|
2014-03-08 03:55:47 +01:00
|
|
|
if running_backup_operation and running_backup_operation.canceled:
|
|
|
|
break
|
2014-09-17 14:31:20 +02:00
|
|
|
if not extract_proc.is_alive():
|
|
|
|
command.terminate()
|
|
|
|
command.wait()
|
|
|
|
expect_tar_error = True
|
|
|
|
if vmproc:
|
|
|
|
vmproc.terminate()
|
|
|
|
vmproc.wait()
|
|
|
|
vmproc = None
|
|
|
|
break
|
2014-02-01 14:03:26 +01:00
|
|
|
if nextfile is not None:
|
|
|
|
filename = nextfile
|
|
|
|
else:
|
|
|
|
filename = filelist_pipe.readline().strip()
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
print_callback("Getting new file:" + filename)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
if not filename or filename == "EOF":
|
2014-01-10 03:28:53 +01:00
|
|
|
break
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-02-01 14:02:38 +01:00
|
|
|
hmacfile = filelist_pipe.readline().strip()
|
2014-03-08 03:55:47 +01:00
|
|
|
|
|
|
|
if running_backup_operation and running_backup_operation.canceled:
|
|
|
|
break
|
2014-02-01 14:03:26 +01:00
|
|
|
# if reading archive directly with tar, wait for next filename -
|
2015-05-03 14:45:01 +02:00
|
|
|
# tar prints filename before processing it, so wait for
|
|
|
|
# the next one to be sure that whole file was extracted
|
2014-02-01 14:03:26 +01:00
|
|
|
if not appvm:
|
|
|
|
nextfile = filelist_pipe.readline().strip()
|
|
|
|
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
print_callback("Getting hmac:" + hmacfile)
|
|
|
|
if not hmacfile or hmacfile == "EOF":
|
|
|
|
# Premature end of archive, either of tar1_command or
|
|
|
|
# vmproc exited with error
|
2014-01-10 03:28:53 +01:00
|
|
|
break
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-01-10 03:28:53 +01:00
|
|
|
if not any(map(lambda x: filename.startswith(x), vms_dirs)):
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(print_callback):
|
2014-01-10 03:28:53 +01:00
|
|
|
print_callback("Ignoring VM not selected for restore")
|
|
|
|
os.unlink(os.path.join(restore_tmpdir, filename))
|
|
|
|
os.unlink(os.path.join(restore_tmpdir, hmacfile))
|
|
|
|
continue
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
if verify_hmac(os.path.join(restore_tmpdir, filename),
|
|
|
|
os.path.join(restore_tmpdir, hmacfile),
|
|
|
|
passphrase, hmac_algorithm):
|
2014-01-10 03:28:53 +01:00
|
|
|
to_extract.put(os.path.join(restore_tmpdir, filename))
|
|
|
|
|
2014-03-08 03:55:47 +01:00
|
|
|
if running_backup_operation and running_backup_operation.canceled:
|
|
|
|
raise BackupCanceledError("Restore canceled",
|
|
|
|
tmpdir=restore_tmpdir)
|
|
|
|
|
2014-01-19 04:50:56 +01:00
|
|
|
if command.wait() != 0 and not expect_tar_error:
|
2013-11-25 05:41:13 +01:00
|
|
|
raise QubesException(
|
2015-05-03 14:45:01 +02:00
|
|
|
"unable to read the qubes backup file {0} ({1}). "
|
|
|
|
"Is it really a backup?".format(backup_source, command.wait()))
|
2014-01-10 03:28:53 +01:00
|
|
|
if vmproc:
|
|
|
|
if vmproc.wait() != 0:
|
|
|
|
raise QubesException(
|
2015-05-03 14:45:01 +02:00
|
|
|
"unable to read the qubes backup {0} "
|
|
|
|
"because of a VM error: {1}".format(
|
|
|
|
backup_source, vmproc.stderr.read(MAX_STDERR_BYTES)))
|
2014-01-10 03:28:53 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
if filename and filename != "EOF":
|
|
|
|
raise QubesException(
|
|
|
|
"Premature end of archive, the last file was %s" % filename)
|
2014-01-10 03:28:53 +01:00
|
|
|
except:
|
|
|
|
to_extract.put("ERROR")
|
|
|
|
extract_proc.join()
|
|
|
|
raise
|
|
|
|
else:
|
|
|
|
to_extract.put("FINISHED")
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(print_callback):
|
2013-11-25 05:41:13 +01:00
|
|
|
print_callback("Waiting for the extraction process to finish...")
|
|
|
|
extract_proc.join()
|
2014-09-18 07:45:16 +02:00
|
|
|
if BACKUP_DEBUG and callable(print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
print_callback("Extraction process finished with code:" +
|
|
|
|
str(extract_proc.exitcode))
|
2013-11-25 05:41:13 +01:00
|
|
|
if extract_proc.exitcode != 0:
|
|
|
|
raise QubesException(
|
2015-05-03 14:45:01 +02:00
|
|
|
"unable to extract the qubes backup. "
|
|
|
|
"Check extracting process errors.")
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
return header_data
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
def backup_restore_set_defaults(options):
|
|
|
|
if 'use-default-netvm' not in options:
|
|
|
|
options['use-default-netvm'] = False
|
|
|
|
if 'use-none-netvm' not in options:
|
|
|
|
options['use-none-netvm'] = False
|
|
|
|
if 'use-default-template' not in options:
|
|
|
|
options['use-default-template'] = False
|
|
|
|
if 'dom0-home' not in options:
|
|
|
|
options['dom0-home'] = True
|
|
|
|
if 'replace-template' not in options:
|
|
|
|
options['replace-template'] = []
|
2014-06-25 16:00:32 +02:00
|
|
|
if 'ignore-username-mismatch' not in options:
|
|
|
|
options['ignore-username-mismatch'] = False
|
2014-09-17 13:54:33 +02:00
|
|
|
if 'verify-only' not in options:
|
|
|
|
options['verify-only'] = False
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
return options
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
def load_hmac(hmac):
|
2014-02-01 14:02:38 +01:00
|
|
|
hmac = hmac.strip().split("=")
|
2013-11-25 05:41:13 +01:00
|
|
|
if len(hmac) > 1:
|
|
|
|
hmac = hmac[1].strip()
|
|
|
|
else:
|
|
|
|
raise QubesException("ERROR: invalid hmac file content")
|
|
|
|
|
|
|
|
return hmac
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2013-11-29 03:25:41 +01:00
|
|
|
def backup_detect_format_version(backup_location):
|
|
|
|
if os.path.exists(os.path.join(backup_location, 'qubes.xml')):
|
|
|
|
return 1
|
|
|
|
else:
|
2014-09-26 03:24:19 +02:00
|
|
|
# this could mean also 3, but not distinguishable until backup header
|
|
|
|
# is read
|
2013-11-29 03:25:41 +01:00
|
|
|
return 2
|
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
def backup_restore_header(source, passphrase,
|
|
|
|
print_callback=print_stdout,
|
|
|
|
error_callback=print_stderr,
|
|
|
|
encrypted=False, appvm=None, compressed=False,
|
|
|
|
format_version=None,
|
|
|
|
hmac_algorithm=DEFAULT_HMAC_ALGORITHM,
|
|
|
|
crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM):
|
2014-03-08 03:55:47 +01:00
|
|
|
global running_backup_operation
|
|
|
|
running_backup_operation = None
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
restore_tmpdir = tempfile.mkdtemp(prefix="/var/tmp/restore_")
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
if format_version is None:
|
2013-11-29 03:25:41 +01:00
|
|
|
format_version = backup_detect_format_version(source)
|
|
|
|
|
|
|
|
if format_version == 1:
|
2015-05-03 14:45:01 +02:00
|
|
|
return restore_tmpdir, os.path.join(source, 'qubes.xml'), None
|
2013-11-29 03:25:41 +01:00
|
|
|
|
2013-11-25 06:31:38 +01:00
|
|
|
# tar2qfile matches only beginnings, while tar full path
|
|
|
|
if appvm:
|
2014-01-15 03:53:45 +01:00
|
|
|
extract_filter = [HEADER_FILENAME, 'qubes.xml.000']
|
2013-11-25 06:31:38 +01:00
|
|
|
else:
|
2015-05-03 14:45:01 +02:00
|
|
|
extract_filter = [HEADER_FILENAME, HEADER_FILENAME + '.hmac',
|
2014-01-15 03:53:45 +01:00
|
|
|
'qubes.xml.000', 'qubes.xml.000.hmac']
|
2013-11-25 06:31:38 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
header_data = restore_vm_dirs(source,
|
|
|
|
restore_tmpdir,
|
|
|
|
passphrase=passphrase,
|
|
|
|
vms_dirs=extract_filter,
|
|
|
|
vms=None,
|
|
|
|
vms_size=HEADER_QUBES_XML_MAX_SIZE,
|
|
|
|
format_version=format_version,
|
|
|
|
hmac_algorithm=hmac_algorithm,
|
|
|
|
crypto_algorithm=crypto_algorithm,
|
|
|
|
print_callback=print_callback,
|
|
|
|
error_callback=error_callback,
|
|
|
|
progress_callback=None,
|
|
|
|
encrypted=encrypted,
|
|
|
|
compressed=compressed,
|
|
|
|
appvm=appvm)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
return (restore_tmpdir, os.path.join(restore_tmpdir, "qubes.xml"),
|
|
|
|
header_data)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2014-01-13 04:45:02 +01:00
|
|
|
def restore_info_verify(restore_info, host_collection):
|
|
|
|
options = restore_info['$OPTIONS$']
|
|
|
|
for vm in restore_info.keys():
|
|
|
|
if vm in ['$OPTIONS$', 'dom0']:
|
|
|
|
continue
|
|
|
|
|
|
|
|
vm_info = restore_info[vm]
|
|
|
|
|
|
|
|
vm_info.pop('excluded', None)
|
|
|
|
if 'exclude' in options.keys():
|
|
|
|
if vm in options['exclude']:
|
|
|
|
vm_info['excluded'] = True
|
|
|
|
|
|
|
|
vm_info.pop('already-exists', None)
|
2014-09-17 13:54:33 +02:00
|
|
|
if not options['verify-only'] and \
|
2015-05-03 14:45:01 +02:00
|
|
|
host_collection.get_vm_by_name(vm) is not None:
|
2014-01-13 04:45:02 +01:00
|
|
|
vm_info['already-exists'] = True
|
|
|
|
|
|
|
|
# check template
|
|
|
|
vm_info.pop('missing-template', None)
|
|
|
|
if vm_info['template']:
|
|
|
|
template_name = vm_info['template']
|
|
|
|
host_template = host_collection.get_vm_by_name(template_name)
|
|
|
|
if not host_template or not host_template.is_template():
|
|
|
|
# Maybe the (custom) template is in the backup?
|
|
|
|
if not (template_name in restore_info.keys() and
|
2015-05-03 14:45:01 +02:00
|
|
|
restore_info[template_name]['vm'].is_template()):
|
2014-01-13 04:45:02 +01:00
|
|
|
if options['use-default-template']:
|
|
|
|
if 'orig-template' not in vm_info.keys():
|
|
|
|
vm_info['orig-template'] = template_name
|
2015-05-03 14:45:01 +02:00
|
|
|
vm_info['template'] = host_collection \
|
2014-01-13 04:45:02 +01:00
|
|
|
.get_default_template().name
|
|
|
|
else:
|
|
|
|
vm_info['missing-template'] = True
|
|
|
|
|
|
|
|
# check netvm
|
|
|
|
vm_info.pop('missing-netvm', None)
|
|
|
|
if vm_info['netvm']:
|
|
|
|
netvm_name = vm_info['netvm']
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
netvm_on_host = host_collection.get_vm_by_name(netvm_name)
|
2014-01-13 04:45:02 +01:00
|
|
|
|
|
|
|
# No netvm on the host?
|
|
|
|
if not ((netvm_on_host is not None) and netvm_on_host.is_netvm()):
|
|
|
|
|
|
|
|
# Maybe the (custom) netvm is in the backup?
|
2015-05-03 14:45:01 +02:00
|
|
|
if not (netvm_name in restore_info.keys() and
|
2014-01-13 04:45:02 +01:00
|
|
|
restore_info[netvm_name]['vm'].is_netvm()):
|
|
|
|
if options['use-default-netvm']:
|
2015-05-03 14:45:01 +02:00
|
|
|
vm_info['netvm'] = host_collection \
|
2014-01-13 04:45:02 +01:00
|
|
|
.get_default_netvm().name
|
|
|
|
vm_info['vm'].uses_default_netvm = True
|
|
|
|
elif options['use-none-netvm']:
|
|
|
|
vm_info['netvm'] = None
|
|
|
|
else:
|
|
|
|
vm_info['missing-netvm'] = True
|
|
|
|
|
|
|
|
vm_info['good-to-go'] = not any([(prop in vm_info.keys()) for
|
|
|
|
prop in ['missing-netvm',
|
|
|
|
'missing-template',
|
|
|
|
'already-exists',
|
|
|
|
'excluded']])
|
|
|
|
|
|
|
|
return restore_info
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
|
|
|
def backup_restore_prepare(backup_location, passphrase, options=None,
|
|
|
|
host_collection=None, encrypted=False, appvm=None,
|
|
|
|
compressed=False, print_callback=print_stdout,
|
|
|
|
error_callback=print_stderr,
|
|
|
|
format_version=None,
|
|
|
|
hmac_algorithm=DEFAULT_HMAC_ALGORITHM,
|
|
|
|
crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM):
|
2014-09-28 03:13:15 +02:00
|
|
|
if options is None:
|
|
|
|
options = {}
|
2013-11-25 05:41:13 +01:00
|
|
|
# Defaults
|
|
|
|
backup_restore_set_defaults(options)
|
2014-09-26 03:44:17 +02:00
|
|
|
# Options introduced in backup format 3+, which always have a header,
|
|
|
|
# so no need for fallback in function parameter
|
|
|
|
compression_filter = DEFAULT_COMPRESSION_FILTER
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
# Private functions begin
|
2015-05-03 14:56:30 +02:00
|
|
|
def is_vm_included_in_backup_v1(backup_dir, check_vm):
|
|
|
|
if check_vm.qid == 0:
|
2015-05-03 14:45:01 +02:00
|
|
|
return os.path.exists(os.path.join(backup_dir, 'dom0-home'))
|
2013-11-29 03:25:41 +01:00
|
|
|
|
2015-05-03 14:56:30 +02:00
|
|
|
backup_vm_dir_path = check_vm.dir_path.replace(
|
|
|
|
system_path["qubes_base_dir"], backup_dir)
|
2013-11-29 03:25:41 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
if os.path.exists(backup_vm_dir_path):
|
2013-11-29 03:25:41 +01:00
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
2015-05-03 14:45:01 +02:00
|
|
|
|
2015-05-03 14:56:30 +02:00
|
|
|
def is_vm_included_in_backup_v2(_, check_vm):
|
|
|
|
if check_vm.backup_content:
|
2013-11-25 05:41:13 +01:00
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def find_template_name(template, replaces):
|
|
|
|
rx_replace = re.compile("(.*):(.*)")
|
|
|
|
for r in replaces:
|
|
|
|
m = rx_replace.match(r)
|
|
|
|
if m.group(1) == template:
|
|
|
|
return m.group(2)
|
|
|
|
|
|
|
|
return template
|
2015-05-03 14:45:01 +02:00
|
|
|
|
|
|
|
# Private functions end
|
2013-11-29 03:25:41 +01:00
|
|
|
|
|
|
|
# Format versions:
|
2015-05-03 14:45:01 +02:00
|
|
|
# 1 - Qubes R1, Qubes R2 beta1, beta2
|
2014-01-15 01:05:54 +01:00
|
|
|
# 2 - Qubes R2 beta3+
|
2013-11-29 03:25:41 +01:00
|
|
|
|
|
|
|
if format_version is None:
|
|
|
|
format_version = backup_detect_format_version(backup_location)
|
|
|
|
|
|
|
|
if format_version == 1:
|
|
|
|
is_vm_included_in_backup = is_vm_included_in_backup_v1
|
2014-09-26 03:24:19 +02:00
|
|
|
elif format_version in [2, 3]:
|
2013-11-29 03:25:41 +01:00
|
|
|
is_vm_included_in_backup = is_vm_included_in_backup_v2
|
2014-01-19 04:51:46 +01:00
|
|
|
if not appvm:
|
|
|
|
if not os.path.isfile(backup_location):
|
|
|
|
raise QubesException("Invalid backup location (not a file or "
|
|
|
|
"directory with qubes.xml)"
|
2015-05-03 14:45:01 +02:00
|
|
|
": %s" % unicode(backup_location))
|
2013-11-29 03:25:41 +01:00
|
|
|
else:
|
2015-05-03 14:45:01 +02:00
|
|
|
raise QubesException(
|
|
|
|
"Unknown backup format version: %s" % str(format_version))
|
2013-11-29 03:25:41 +01:00
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
(restore_tmpdir, qubes_xml, header_data) = backup_restore_header(
|
2014-01-15 03:50:29 +01:00
|
|
|
backup_location,
|
|
|
|
passphrase,
|
|
|
|
encrypted=encrypted,
|
|
|
|
appvm=appvm,
|
|
|
|
compressed=compressed,
|
|
|
|
hmac_algorithm=hmac_algorithm,
|
|
|
|
crypto_algorithm=crypto_algorithm,
|
|
|
|
print_callback=print_callback,
|
|
|
|
error_callback=error_callback,
|
|
|
|
format_version=format_version)
|
2014-01-13 04:45:02 +01:00
|
|
|
|
2014-01-15 03:53:45 +01:00
|
|
|
if header_data:
|
2014-09-26 03:24:19 +02:00
|
|
|
if BackupHeader.version in header_data:
|
|
|
|
format_version = header_data[BackupHeader.version]
|
2014-01-15 03:53:45 +01:00
|
|
|
if BackupHeader.crypto_algorithm in header_data:
|
|
|
|
crypto_algorithm = header_data[BackupHeader.crypto_algorithm]
|
|
|
|
if BackupHeader.hmac_algorithm in header_data:
|
|
|
|
hmac_algorithm = header_data[BackupHeader.hmac_algorithm]
|
|
|
|
if BackupHeader.compressed in header_data:
|
|
|
|
compressed = header_data[BackupHeader.compressed]
|
|
|
|
if BackupHeader.encrypted in header_data:
|
|
|
|
encrypted = header_data[BackupHeader.encrypted]
|
2014-09-26 03:44:17 +02:00
|
|
|
if BackupHeader.compression_filter in header_data:
|
|
|
|
compression_filter = header_data[BackupHeader.compression_filter]
|
2014-01-15 03:53:45 +01:00
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
if BACKUP_DEBUG:
|
|
|
|
print "Loading file", qubes_xml
|
2015-05-03 14:45:01 +02:00
|
|
|
backup_collection = QubesVmCollection(store_filename=qubes_xml)
|
2013-11-25 05:41:13 +01:00
|
|
|
backup_collection.lock_db_for_reading()
|
|
|
|
backup_collection.load()
|
|
|
|
|
|
|
|
if host_collection is None:
|
|
|
|
host_collection = QubesVmCollection()
|
|
|
|
host_collection.lock_db_for_reading()
|
|
|
|
host_collection.load()
|
|
|
|
host_collection.unlock_db()
|
|
|
|
|
|
|
|
backup_vms_list = [vm for vm in backup_collection.values()]
|
|
|
|
vms_to_restore = {}
|
|
|
|
|
|
|
|
# ... and the actual data
|
|
|
|
for vm in backup_vms_list:
|
|
|
|
if vm.qid == 0:
|
|
|
|
# Handle dom0 as special case later
|
|
|
|
continue
|
2015-05-03 14:45:01 +02:00
|
|
|
if is_vm_included_in_backup(backup_location, vm):
|
2013-11-25 05:41:13 +01:00
|
|
|
if BACKUP_DEBUG:
|
2015-05-03 14:45:01 +02:00
|
|
|
print vm.name, "is included in backup"
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
vms_to_restore[vm.name] = {}
|
2014-09-26 03:19:21 +02:00
|
|
|
vms_to_restore[vm.name]['vm'] = vm
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
if vm.template is None:
|
|
|
|
vms_to_restore[vm.name]['template'] = None
|
|
|
|
else:
|
2015-05-03 14:45:01 +02:00
|
|
|
templatevm_name = find_template_name(vm.template.name, options[
|
|
|
|
'replace-template'])
|
2013-11-25 05:41:13 +01:00
|
|
|
vms_to_restore[vm.name]['template'] = templatevm_name
|
|
|
|
|
|
|
|
if vm.netvm is None:
|
|
|
|
vms_to_restore[vm.name]['netvm'] = None
|
|
|
|
else:
|
|
|
|
netvm_name = vm.netvm.name
|
|
|
|
vms_to_restore[vm.name]['netvm'] = netvm_name
|
|
|
|
# Set to None to not confuse QubesVm object from backup
|
|
|
|
# collection with host collection (further in clone_attrs). Set
|
|
|
|
# directly _netvm to suppress setter action, especially
|
|
|
|
# modifying firewall
|
|
|
|
vm._netvm = None
|
|
|
|
|
2014-01-13 04:45:02 +01:00
|
|
|
# Store restore parameters
|
|
|
|
options['location'] = backup_location
|
|
|
|
options['restore_tmpdir'] = restore_tmpdir
|
|
|
|
options['passphrase'] = passphrase
|
|
|
|
options['encrypted'] = encrypted
|
|
|
|
options['compressed'] = compressed
|
2014-09-26 03:44:17 +02:00
|
|
|
options['compression_filter'] = compression_filter
|
2014-01-15 03:50:29 +01:00
|
|
|
options['hmac_algorithm'] = hmac_algorithm
|
|
|
|
options['crypto_algorithm'] = crypto_algorithm
|
2014-01-13 04:45:02 +01:00
|
|
|
options['appvm'] = appvm
|
|
|
|
options['format_version'] = format_version
|
|
|
|
vms_to_restore['$OPTIONS$'] = options
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-01-13 04:45:02 +01:00
|
|
|
vms_to_restore = restore_info_verify(vms_to_restore, host_collection)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# ...and dom0 home
|
|
|
|
if options['dom0-home'] and \
|
2013-11-29 03:25:41 +01:00
|
|
|
is_vm_included_in_backup(backup_location, backup_collection[0]):
|
2013-11-25 05:41:13 +01:00
|
|
|
vm = backup_collection[0]
|
|
|
|
vms_to_restore['dom0'] = {}
|
2013-11-29 03:25:41 +01:00
|
|
|
if format_version == 1:
|
|
|
|
vms_to_restore['dom0']['subdir'] = \
|
|
|
|
os.listdir(os.path.join(backup_location, 'dom0-home'))[0]
|
2015-05-03 14:45:01 +02:00
|
|
|
vms_to_restore['dom0']['size'] = 0 # unknown
|
2013-11-29 03:25:41 +01:00
|
|
|
else:
|
|
|
|
vms_to_restore['dom0']['subdir'] = vm.backup_path
|
|
|
|
vms_to_restore['dom0']['size'] = vm.backup_size
|
2013-11-25 05:41:13 +01:00
|
|
|
local_user = grp.getgrnam('qubes').gr_mem[0]
|
|
|
|
|
2013-11-29 03:25:41 +01:00
|
|
|
dom0_home = vms_to_restore['dom0']['subdir']
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
vms_to_restore['dom0']['username'] = os.path.basename(dom0_home)
|
|
|
|
if vms_to_restore['dom0']['username'] != local_user:
|
|
|
|
vms_to_restore['dom0']['username-mismatch'] = True
|
2014-06-25 16:00:32 +02:00
|
|
|
if options['ignore-username-mismatch']:
|
|
|
|
vms_to_restore['dom0']['ignore-username-mismatch'] = True
|
|
|
|
else:
|
2013-11-25 05:41:13 +01:00
|
|
|
vms_to_restore['dom0']['good-to-go'] = False
|
|
|
|
|
|
|
|
if 'good-to-go' not in vms_to_restore['dom0']:
|
|
|
|
vms_to_restore['dom0']['good-to-go'] = True
|
|
|
|
|
|
|
|
# Not needed - all the data stored in vms_to_restore
|
2014-09-26 03:24:19 +02:00
|
|
|
if format_version >= 2:
|
2013-11-29 03:25:41 +01:00
|
|
|
os.unlink(qubes_xml)
|
2013-11-25 05:41:13 +01:00
|
|
|
return vms_to_restore
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
|
|
|
|
def backup_restore_print_summary(restore_info, print_callback=print_stdout):
|
2013-11-25 05:41:13 +01:00
|
|
|
fields = {
|
|
|
|
"qid": {"func": "vm.qid"},
|
|
|
|
|
|
|
|
"name": {"func": "('[' if vm.is_template() else '')\
|
|
|
|
+ ('{' if vm.is_netvm() else '')\
|
|
|
|
+ vm.name \
|
|
|
|
+ (']' if vm.is_template() else '')\
|
|
|
|
+ ('}' if vm.is_netvm() else '')"},
|
|
|
|
|
|
|
|
"type": {"func": "'Tpl' if vm.is_template() else \
|
|
|
|
'HVM' if vm.type == 'HVM' else \
|
|
|
|
vm.type.replace('VM','')"},
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
"updbl": {"func": "'Yes' if vm.updateable else ''"},
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
"template": {"func": "'n/a' if vm.is_template() or vm.template is None else\
|
|
|
|
vm_info['template']"},
|
|
|
|
|
|
|
|
"netvm": {"func": "'n/a' if vm.is_netvm() and not vm.is_proxyvm() else\
|
|
|
|
('*' if vm.uses_default_netvm else '') +\
|
|
|
|
vm_info['netvm'] if vm_info['netvm'] is not None else '-'"},
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
"label": {"func": "vm.label.name"},
|
2013-11-25 05:41:13 +01:00
|
|
|
}
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
fields_to_display = ["name", "type", "template", "updbl", "netvm", "label"]
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# First calculate the maximum width of each field we want to display
|
2014-01-13 04:37:54 +01:00
|
|
|
total_width = 0
|
2013-11-25 05:41:13 +01:00
|
|
|
for f in fields_to_display:
|
|
|
|
fields[f]["max_width"] = len(f)
|
|
|
|
for vm_info in restore_info.values():
|
|
|
|
if 'vm' in vm_info.keys():
|
2015-05-11 02:31:56 +02:00
|
|
|
# noinspection PyUnusedLocal
|
|
|
|
vm = vm_info['vm']
|
2014-09-17 14:34:45 +02:00
|
|
|
l = len(unicode(eval(fields[f]["func"])))
|
2013-11-25 05:41:13 +01:00
|
|
|
if l > fields[f]["max_width"]:
|
|
|
|
fields[f]["max_width"] = l
|
|
|
|
total_width += fields[f]["max_width"]
|
|
|
|
|
|
|
|
print_callback("")
|
|
|
|
print_callback("The following VMs are included in the backup:")
|
|
|
|
print_callback("")
|
|
|
|
|
|
|
|
# Display the header
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:-^{0}}}-+".format(fields[f]["max_width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format('-')
|
|
|
|
print_callback(s)
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format(f)
|
|
|
|
print_callback(s)
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:-^{0}}}-+".format(fields[f]["max_width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format('-')
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
for vm_info in restore_info.values():
|
|
|
|
# Skip non-VM here
|
2015-05-03 14:45:01 +02:00
|
|
|
if 'vm' not in vm_info:
|
2013-11-25 05:41:13 +01:00
|
|
|
continue
|
2015-05-11 02:31:56 +02:00
|
|
|
# noinspection PyUnusedLocal
|
|
|
|
vm = vm_info['vm']
|
2013-11-25 05:41:13 +01:00
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
s += fmt.format(eval(fields[f]["func"]))
|
|
|
|
|
|
|
|
if 'excluded' in vm_info and vm_info['excluded']:
|
|
|
|
s += " <-- Excluded from restore"
|
|
|
|
elif 'already-exists' in vm_info:
|
2015-05-03 14:45:01 +02:00
|
|
|
s += " <-- A VM with the same name already exists on the host!"
|
2013-11-25 05:41:13 +01:00
|
|
|
elif 'missing-template' in vm_info:
|
|
|
|
s += " <-- No matching template on the host or in the backup found!"
|
|
|
|
elif 'missing-netvm' in vm_info:
|
|
|
|
s += " <-- No matching netvm on the host or in the backup found!"
|
|
|
|
elif 'orig-template' in vm_info:
|
|
|
|
s += " <-- Original template was '%s'" % (vm_info['orig-template'])
|
|
|
|
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
if 'dom0' in restore_info.keys():
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
2015-05-03 14:45:01 +02:00
|
|
|
fmt = "{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
|
2013-11-25 05:41:13 +01:00
|
|
|
if f == "name":
|
|
|
|
s += fmt.format("Dom0")
|
|
|
|
elif f == "type":
|
|
|
|
s += fmt.format("Home")
|
|
|
|
else:
|
|
|
|
s += fmt.format("")
|
|
|
|
if 'username-mismatch' in restore_info['dom0']:
|
|
|
|
s += " <-- username in backup and dom0 mismatch"
|
2014-06-25 16:00:32 +02:00
|
|
|
if 'ignore-username-mismatch' in restore_info['dom0']:
|
|
|
|
s += " (ignored)"
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
print_callback(s)
|
|
|
|
|
2013-11-29 03:25:41 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
def backup_restore_do(restore_info,
|
|
|
|
host_collection=None, print_callback=print_stdout,
|
|
|
|
error_callback=print_stderr, progress_callback=None,
|
|
|
|
):
|
2014-03-08 03:55:47 +01:00
|
|
|
global running_backup_operation
|
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
# Private functions begin
|
|
|
|
def restore_vm_dir_v1(backup_dir, src_dir, dst_dir):
|
2013-11-29 03:25:41 +01:00
|
|
|
|
2015-05-03 14:45:01 +02:00
|
|
|
backup_src_dir = src_dir.replace(system_path["qubes_base_dir"],
|
|
|
|
backup_dir)
|
2013-11-29 03:25:41 +01:00
|
|
|
|
|
|
|
# We prefer to use Linux's cp, because it nicely handles sparse files
|
2015-05-03 14:56:30 +02:00
|
|
|
cp_retcode = subprocess.call(["cp", "-rp", backup_src_dir, dst_dir])
|
|
|
|
if cp_retcode != 0:
|
2014-01-13 04:37:54 +01:00
|
|
|
raise QubesException(
|
|
|
|
"*** Error while copying file {0} to {1}".format(backup_src_dir,
|
|
|
|
dst_dir))
|
2015-05-03 14:45:01 +02:00
|
|
|
|
|
|
|
# Private functions end
|
2013-11-29 03:25:41 +01:00
|
|
|
|
2014-01-13 04:45:02 +01:00
|
|
|
options = restore_info['$OPTIONS$']
|
|
|
|
backup_location = options['location']
|
|
|
|
restore_tmpdir = options['restore_tmpdir']
|
|
|
|
passphrase = options['passphrase']
|
|
|
|
encrypted = options['encrypted']
|
|
|
|
compressed = options['compressed']
|
2014-09-26 03:44:17 +02:00
|
|
|
compression_filter = options['compression_filter']
|
2014-01-15 03:50:29 +01:00
|
|
|
hmac_algorithm = options['hmac_algorithm']
|
|
|
|
crypto_algorithm = options['crypto_algorithm']
|
2014-09-17 13:54:33 +02:00
|
|
|
verify_only = options['verify-only']
|
2014-01-13 04:45:02 +01:00
|
|
|
appvm = options['appvm']
|
|
|
|
format_version = options['format_version']
|
|
|
|
|
2013-11-29 03:25:41 +01:00
|
|
|
if format_version is None:
|
|
|
|
format_version = backup_detect_format_version(backup_location)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
lock_obtained = False
|
|
|
|
if host_collection is None:
|
|
|
|
host_collection = QubesVmCollection()
|
|
|
|
host_collection.lock_db_for_writing()
|
|
|
|
host_collection.load()
|
|
|
|
lock_obtained = True
|
|
|
|
|
|
|
|
# Perform VM restoration in backup order
|
2014-01-15 01:05:54 +01:00
|
|
|
vms_dirs = []
|
|
|
|
vms_size = 0
|
|
|
|
vms = {}
|
|
|
|
for vm_info in restore_info.values():
|
|
|
|
if 'vm' not in vm_info:
|
|
|
|
continue
|
|
|
|
if not vm_info['good-to-go']:
|
|
|
|
continue
|
|
|
|
vm = vm_info['vm']
|
2014-09-26 03:24:19 +02:00
|
|
|
if format_version >= 2:
|
2013-11-29 03:25:41 +01:00
|
|
|
vms_size += vm.backup_size
|
|
|
|
vms_dirs.append(vm.backup_path)
|
2014-01-15 01:05:54 +01:00
|
|
|
vms[vm.name] = vm
|
2013-11-29 03:25:41 +01:00
|
|
|
|
2014-03-08 03:55:47 +01:00
|
|
|
running_backup_operation = BackupOperationInfo()
|
|
|
|
|
2014-09-26 03:24:19 +02:00
|
|
|
if format_version >= 2:
|
2013-11-29 03:25:41 +01:00
|
|
|
if 'dom0' in restore_info.keys() and restore_info['dom0']['good-to-go']:
|
2014-05-13 03:11:06 +02:00
|
|
|
vms_dirs.append(os.path.dirname(restore_info['dom0']['subdir']))
|
2013-11-29 03:25:41 +01:00
|
|
|
vms_size += restore_info['dom0']['size']
|
|
|
|
|
2014-09-17 23:09:31 +02:00
|
|
|
try:
|
2015-05-03 14:45:01 +02:00
|
|
|
restore_vm_dirs(backup_location,
|
|
|
|
restore_tmpdir,
|
|
|
|
passphrase=passphrase,
|
|
|
|
vms_dirs=vms_dirs,
|
|
|
|
vms=vms,
|
|
|
|
vms_size=vms_size,
|
|
|
|
format_version=format_version,
|
|
|
|
hmac_algorithm=hmac_algorithm,
|
|
|
|
crypto_algorithm=crypto_algorithm,
|
|
|
|
verify_only=verify_only,
|
|
|
|
print_callback=print_callback,
|
|
|
|
error_callback=error_callback,
|
|
|
|
progress_callback=progress_callback,
|
|
|
|
encrypted=encrypted,
|
|
|
|
compressed=compressed,
|
|
|
|
compression_filter=compression_filter,
|
|
|
|
appvm=appvm)
|
2015-05-03 14:56:30 +02:00
|
|
|
except QubesException:
|
2014-09-17 23:09:31 +02:00
|
|
|
if verify_only:
|
|
|
|
raise
|
|
|
|
else:
|
2014-09-18 07:45:16 +02:00
|
|
|
if callable(print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
print_callback(
|
|
|
|
"Some errors occurred during data extraction, "
|
|
|
|
"continuing anyway to restore at least some "
|
|
|
|
"VMs")
|
2014-09-17 13:54:33 +02:00
|
|
|
else:
|
|
|
|
if verify_only:
|
2014-09-18 07:45:16 +02:00
|
|
|
if callable(print_callback):
|
|
|
|
print_callback("WARNING: Backup verification not supported for "
|
2015-05-03 14:45:01 +02:00
|
|
|
"this backup format.")
|
2014-09-17 13:54:33 +02:00
|
|
|
|
|
|
|
if verify_only:
|
|
|
|
shutil.rmtree(restore_tmpdir)
|
|
|
|
return
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# Add VM in right order
|
|
|
|
for (vm_class_name, vm_class) in sorted(QubesVmClasses.items(),
|
2015-05-03 14:45:01 +02:00
|
|
|
key=lambda _x: _x[1].load_order):
|
2014-03-08 03:55:47 +01:00
|
|
|
if running_backup_operation.canceled:
|
|
|
|
break
|
2014-01-15 01:05:54 +01:00
|
|
|
for vm in vms.values():
|
2014-03-08 03:55:47 +01:00
|
|
|
if running_backup_operation.canceled:
|
|
|
|
# only break the loop to save qubes.xml with already restored
|
2015-05-03 14:45:01 +02:00
|
|
|
# VMs
|
2014-03-08 03:55:47 +01:00
|
|
|
break
|
2013-11-25 05:41:13 +01:00
|
|
|
if not vm.__class__ == vm_class:
|
|
|
|
continue
|
2014-09-18 07:45:16 +02:00
|
|
|
if callable(print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
print_callback("-> Restoring {type} {0}...".
|
|
|
|
format(vm.name, type=vm_class_name))
|
|
|
|
retcode = subprocess.call(
|
|
|
|
["mkdir", "-p", os.path.dirname(vm.dir_path)])
|
2013-11-25 05:41:13 +01:00
|
|
|
if retcode != 0:
|
2014-01-13 04:37:54 +01:00
|
|
|
error_callback("*** Cannot create directory: {0}?!".format(
|
|
|
|
vm.dir_path))
|
2013-11-25 05:41:13 +01:00
|
|
|
error_callback("Skipping...")
|
|
|
|
continue
|
|
|
|
|
|
|
|
template = None
|
|
|
|
if vm.template is not None:
|
2014-01-15 01:05:54 +01:00
|
|
|
template_name = restore_info[vm.name]['template']
|
2013-11-25 05:41:13 +01:00
|
|
|
template = host_collection.get_vm_by_name(template_name)
|
|
|
|
|
|
|
|
new_vm = None
|
|
|
|
|
|
|
|
try:
|
|
|
|
new_vm = host_collection.add_new_vm(vm_class_name, name=vm.name,
|
2015-05-03 14:45:01 +02:00
|
|
|
conf_file=vm.conf_file,
|
|
|
|
dir_path=vm.dir_path,
|
|
|
|
template=template,
|
|
|
|
installed_by_rpm=False)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2013-11-29 03:25:41 +01:00
|
|
|
if format_version == 1:
|
|
|
|
restore_vm_dir_v1(backup_location,
|
2015-05-03 14:45:01 +02:00
|
|
|
vm.dir_path,
|
|
|
|
os.path.dirname(new_vm.dir_path))
|
2014-09-26 03:24:19 +02:00
|
|
|
elif format_version >= 2:
|
2013-11-29 03:25:41 +01:00
|
|
|
shutil.move(os.path.join(restore_tmpdir, vm.backup_path),
|
2015-05-03 14:45:01 +02:00
|
|
|
new_vm.dir_path)
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
new_vm.verify_files()
|
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR: {0}".format(err))
|
|
|
|
error_callback("*** Skipping VM: {0}".format(vm.name))
|
|
|
|
if new_vm:
|
|
|
|
host_collection.pop(new_vm.qid)
|
|
|
|
continue
|
|
|
|
|
2014-09-28 03:13:32 +02:00
|
|
|
# FIXME: cannot check for 'kernel' property, because it is always
|
2015-05-03 14:45:01 +02:00
|
|
|
# defined - accessing it touches non-existent '_kernel'
|
2014-09-28 03:13:32 +02:00
|
|
|
if not isinstance(vm, QubesVmClasses['QubesHVm']):
|
|
|
|
# TODO: add a setting for this?
|
2015-05-03 14:45:01 +02:00
|
|
|
if vm.kernel and vm.kernel not in \
|
|
|
|
os.listdir(system_path['qubes_kernels_base_dir']):
|
2014-09-28 03:13:32 +02:00
|
|
|
if callable(print_callback):
|
|
|
|
print_callback("WARNING: Kernel %s not installed, "
|
|
|
|
"using default one" % vm.kernel)
|
|
|
|
vm.uses_default_kernel = True
|
|
|
|
vm.kernel = host_collection.get_default_kernel()
|
2013-11-25 05:41:13 +01:00
|
|
|
try:
|
|
|
|
new_vm.clone_attrs(vm)
|
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR: {0}".format(err))
|
|
|
|
error_callback("*** Some VM property will not be restored")
|
|
|
|
|
|
|
|
try:
|
2014-09-18 07:45:16 +02:00
|
|
|
new_vm.appmenus_create(verbose=callable(print_callback))
|
2013-11-25 05:41:13 +01:00
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR during appmenu restore: {0}".format(err))
|
2015-05-03 14:45:01 +02:00
|
|
|
error_callback(
|
|
|
|
"*** VM '{0}' will not have appmenus".format(vm.name))
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
# Set network dependencies - only non-default netvm setting
|
2014-01-15 01:05:54 +01:00
|
|
|
for vm in vms.values():
|
2013-11-25 05:41:13 +01:00
|
|
|
host_vm = host_collection.get_vm_by_name(vm.name)
|
|
|
|
if host_vm is None:
|
|
|
|
# Failed/skipped VM
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not vm.uses_default_netvm:
|
2014-01-15 01:05:54 +01:00
|
|
|
if restore_info[vm.name]['netvm'] is not None:
|
2015-05-03 14:45:01 +02:00
|
|
|
host_vm.netvm = host_collection.get_vm_by_name(
|
2014-01-15 01:05:54 +01:00
|
|
|
restore_info[vm.name]['netvm'])
|
|
|
|
else:
|
|
|
|
host_vm.netvm = None
|
2013-11-25 05:41:13 +01:00
|
|
|
|
|
|
|
host_collection.save()
|
|
|
|
if lock_obtained:
|
|
|
|
host_collection.unlock_db()
|
|
|
|
|
2014-03-08 03:55:47 +01:00
|
|
|
if running_backup_operation.canceled:
|
2014-09-26 03:24:19 +02:00
|
|
|
if format_version >= 2:
|
2014-03-08 03:55:47 +01:00
|
|
|
raise BackupCanceledError("Restore canceled",
|
|
|
|
tmpdir=restore_tmpdir)
|
|
|
|
else:
|
|
|
|
raise BackupCanceledError("Restore canceled")
|
|
|
|
|
2013-11-25 05:41:13 +01:00
|
|
|
# ... and dom0 home as last step
|
|
|
|
if 'dom0' in restore_info.keys() and restore_info['dom0']['good-to-go']:
|
|
|
|
backup_path = restore_info['dom0']['subdir']
|
|
|
|
local_user = grp.getgrnam('qubes').gr_mem[0]
|
|
|
|
home_dir = pwd.getpwnam(local_user).pw_dir
|
2013-12-06 06:55:23 +01:00
|
|
|
if format_version == 1:
|
|
|
|
backup_dom0_home_dir = os.path.join(backup_location, backup_path)
|
|
|
|
else:
|
|
|
|
backup_dom0_home_dir = os.path.join(restore_tmpdir, backup_path)
|
2015-05-03 14:45:01 +02:00
|
|
|
restore_home_backupdir = "home-pre-restore-{0}".format(
|
|
|
|
time.strftime("%Y-%m-%d-%H%M%S"))
|
2013-11-25 05:41:13 +01:00
|
|
|
|
2014-09-18 07:45:16 +02:00
|
|
|
if callable(print_callback):
|
2015-05-03 14:45:01 +02:00
|
|
|
print_callback(
|
|
|
|
"-> Restoring home of user '{0}'...".format(local_user))
|
|
|
|
print_callback(
|
|
|
|
"--> Existing files/dirs backed up in '{0}' dir".format(
|
|
|
|
restore_home_backupdir))
|
2013-11-25 05:41:13 +01:00
|
|
|
os.mkdir(home_dir + '/' + restore_home_backupdir)
|
|
|
|
for f in os.listdir(backup_dom0_home_dir):
|
|
|
|
home_file = home_dir + '/' + f
|
|
|
|
if os.path.exists(home_file):
|
2015-05-03 14:45:01 +02:00
|
|
|
os.rename(home_file,
|
|
|
|
home_dir + '/' + restore_home_backupdir + '/' + f)
|
2013-11-29 03:25:41 +01:00
|
|
|
if format_version == 1:
|
2015-05-03 14:56:30 +02:00
|
|
|
subprocess.call(
|
2015-05-03 14:45:01 +02:00
|
|
|
["cp", "-nrp", backup_dom0_home_dir + '/' + f, home_file])
|
2014-09-26 03:24:19 +02:00
|
|
|
elif format_version >= 2:
|
2013-11-29 03:25:41 +01:00
|
|
|
shutil.move(backup_dom0_home_dir + '/' + f, home_file)
|
2013-11-25 05:41:13 +01:00
|
|
|
retcode = subprocess.call(['sudo', 'chown', '-R', local_user, home_dir])
|
|
|
|
if retcode != 0:
|
|
|
|
error_callback("*** Error while setting home directory owner")
|
|
|
|
|
|
|
|
shutil.rmtree(restore_tmpdir)
|
|
|
|
|
|
|
|
# vim:sw=4:et:
|