backup.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876
  1. #
  2. # The Qubes OS Project, http://www.qubes-os.org
  3. #
  4. # Copyright (C) 2013-2017 Marek Marczykowski-Górecki
  5. # <marmarek@invisiblethingslab.com>
  6. # Copyright (C) 2013 Olivier Médoc <o_medoc@yahoo.fr>
  7. #
  8. # This library is free software; you can redistribute it and/or
  9. # modify it under the terms of the GNU Lesser General Public
  10. # License as published by the Free Software Foundation; either
  11. # version 2.1 of the License, or (at your option) any later version.
  12. #
  13. # This library is distributed in the hope that it will be useful,
  14. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. # Lesser General Public License for more details.
  17. #
  18. # You should have received a copy of the GNU Lesser General Public
  19. # License along with this library; if not, see <https://www.gnu.org/licenses/>.
  20. #
  21. #
  22. from __future__ import unicode_literals
  23. import asyncio
  24. import datetime
  25. import fcntl
  26. import functools
  27. import grp
  28. import itertools
  29. import logging
  30. import os
  31. import pathlib
  32. import pwd
  33. import re
  34. import shutil
  35. import stat
  36. import string
  37. import subprocess
  38. import tempfile
  39. import termios
  40. import time
  41. from .utils import size_to_human
  42. import qubes
  43. import qubes.core2migration
  44. import qubes.storage
  45. import qubes.storage.file
  46. import qubes.vm.templatevm
  47. QUEUE_ERROR = "ERROR"
  48. QUEUE_FINISHED = "FINISHED"
  49. HEADER_FILENAME = 'backup-header'
  50. DEFAULT_CRYPTO_ALGORITHM = 'aes-256-cbc'
  51. # 'scrypt' is not exactly HMAC algorithm, but a tool we use to
  52. # integrity-protect the data
  53. DEFAULT_HMAC_ALGORITHM = 'scrypt'
  54. DEFAULT_COMPRESSION_FILTER = 'gzip'
  55. CURRENT_BACKUP_FORMAT_VERSION = '4'
  56. # Maximum size of error message get from process stderr (including VM process)
  57. MAX_STDERR_BYTES = 1024
  58. # header + qubes.xml max size
  59. HEADER_QUBES_XML_MAX_SIZE = 1024 * 1024
  60. # hmac file max size - regardless of backup format version!
  61. HMAC_MAX_SIZE = 4096
  62. BLKSIZE = 512
  63. _re_alphanum = re.compile(r'^[A-Za-z0-9-]*$')
  64. class BackupCanceledError(qubes.exc.QubesException):
  65. def __init__(self, msg, tmpdir=None):
  66. super(BackupCanceledError, self).__init__(msg)
  67. self.tmpdir = tmpdir
  68. class BackupHeader:
  69. '''Structure describing backup-header file included as the first file in
  70. backup archive
  71. '''
  72. # pylint: disable=too-few-public-methods
  73. header_keys = {
  74. 'version': 'version',
  75. 'encrypted': 'encrypted',
  76. 'compressed': 'compressed',
  77. 'compression-filter': 'compression_filter',
  78. 'crypto-algorithm': 'crypto_algorithm',
  79. 'hmac-algorithm': 'hmac_algorithm',
  80. 'backup-id': 'backup_id'
  81. }
  82. bool_options = ['encrypted', 'compressed']
  83. int_options = ['version']
  84. def __init__(self,
  85. version=None,
  86. encrypted=None,
  87. compressed=None,
  88. compression_filter=None,
  89. hmac_algorithm=None,
  90. crypto_algorithm=None,
  91. backup_id=None):
  92. # repeat the list to help code completion...
  93. self.version = version
  94. self.encrypted = encrypted
  95. self.compressed = compressed
  96. # Options introduced in backup format 3+, which always have a header,
  97. # so no need for fallback in function parameter
  98. self.compression_filter = compression_filter
  99. self.hmac_algorithm = hmac_algorithm
  100. self.crypto_algorithm = crypto_algorithm
  101. self.backup_id = backup_id
  102. def save(self, filename):
  103. with open(filename, "w") as f_header:
  104. # make sure 'version' is the first key
  105. f_header.write('version={}\n'.format(self.version))
  106. for key, attr in self.header_keys.items():
  107. if key == 'version':
  108. continue
  109. if getattr(self, attr) is None:
  110. continue
  111. f_header.write("{!s}={!s}\n".format(key, getattr(self, attr)))
  112. class SendWorker:
  113. # pylint: disable=too-few-public-methods
  114. def __init__(self, queue, base_dir, backup_stdout):
  115. super(SendWorker, self).__init__()
  116. self.queue = queue
  117. self.base_dir = base_dir
  118. self.backup_stdout = backup_stdout
  119. self.log = logging.getLogger('qubes.backup')
  120. @asyncio.coroutine
  121. def run(self):
  122. self.log.debug("Started sending thread")
  123. while True:
  124. filename = yield from self.queue.get()
  125. if filename in (QUEUE_FINISHED, QUEUE_ERROR):
  126. break
  127. self.log.debug("Sending file {}".format(filename))
  128. # This tar used for sending data out need to be as simple, as
  129. # simple, as featureless as possible. It will not be
  130. # verified before untaring.
  131. tar_final_cmd = ["tar", "-cO", "--posix",
  132. "-C", self.base_dir, filename]
  133. # pylint: disable=not-an-iterable
  134. final_proc = yield from asyncio.create_subprocess_exec(
  135. *tar_final_cmd,
  136. stdout=self.backup_stdout)
  137. retcode = yield from final_proc.wait()
  138. if retcode >= 2:
  139. # handle only exit code 2 (tar fatal error) or
  140. # greater (call failed?)
  141. raise qubes.exc.QubesException(
  142. "ERROR: Failed to write the backup, out of disk space? "
  143. "Check console output or ~/.xsession-errors for details.")
  144. # Delete the file as we don't need it anymore
  145. self.log.debug("Removing file {}".format(filename))
  146. os.remove(os.path.join(self.base_dir, filename))
  147. self.log.debug("Finished sending thread")
  148. @asyncio.coroutine
  149. def launch_proc_with_pty(args, stdin=None, stdout=None, stderr=None, echo=True):
  150. """Similar to pty.fork, but handle stdin/stdout according to parameters
  151. instead of connecting to the pty
  152. :return tuple (subprocess.Popen, pty_master)
  153. """
  154. def set_ctty(ctty_fd, master_fd):
  155. os.setsid()
  156. os.close(master_fd)
  157. fcntl.ioctl(ctty_fd, termios.TIOCSCTTY, 0)
  158. if not echo:
  159. termios_p = termios.tcgetattr(ctty_fd)
  160. # termios_p.c_lflags
  161. termios_p[3] &= ~termios.ECHO
  162. termios.tcsetattr(ctty_fd, termios.TCSANOW, termios_p)
  163. (pty_master, pty_slave) = os.openpty()
  164. # pylint: disable=not-an-iterable
  165. p = yield from asyncio.create_subprocess_exec(*args,
  166. stdin=stdin,
  167. stdout=stdout,
  168. stderr=stderr,
  169. preexec_fn=lambda: set_ctty(pty_slave, pty_master))
  170. os.close(pty_slave)
  171. return p, open(pty_master, 'wb+', buffering=0)
  172. @asyncio.coroutine
  173. def launch_scrypt(action, input_name, output_name, passphrase):
  174. '''
  175. Launch 'scrypt' process, pass passphrase to it and return
  176. subprocess.Popen object.
  177. :param action: 'enc' or 'dec'
  178. :param input_name: input path or '-' for stdin
  179. :param output_name: output path or '-' for stdout
  180. :param passphrase: passphrase
  181. :type passphrase: bytes
  182. :return: subprocess.Popen object
  183. '''
  184. command_line = ['scrypt', action, input_name, output_name]
  185. (p, pty) = yield from launch_proc_with_pty(command_line,
  186. stdin=subprocess.PIPE if input_name == '-' else None,
  187. stdout=subprocess.PIPE if output_name == '-' else None,
  188. stderr=subprocess.PIPE,
  189. echo=False)
  190. if action == 'enc':
  191. prompts = (b'Please enter passphrase: ', b'Please confirm passphrase: ')
  192. else:
  193. prompts = (b'Please enter passphrase: ',)
  194. for prompt in prompts:
  195. actual_prompt = yield from p.stderr.read(len(prompt))
  196. if actual_prompt != prompt:
  197. raise qubes.exc.QubesException(
  198. 'Unexpected prompt from scrypt: {}'.format(actual_prompt))
  199. pty.write(passphrase + b'\n')
  200. pty.flush()
  201. # save it here, so garbage collector would not close it (which would kill
  202. # the child)
  203. p.pty = pty
  204. return p
  205. class Backup:
  206. '''Backup operation manager. Usage:
  207. >>> app = qubes.Qubes()
  208. >>> # optional - you can use 'None' to use default list (based on
  209. >>> # vm.include_in_backups property)
  210. >>> vms = [app.domains[name] for name in ['my-vm1', 'my-vm2', 'my-vm3']]
  211. >>> exclude_vms = []
  212. >>> options = {
  213. >>> 'encrypted': True,
  214. >>> 'compressed': True,
  215. >>> 'passphrase': 'This is very weak backup passphrase',
  216. >>> 'target_vm': app.domains['sys-usb'],
  217. >>> 'target_dir': '/media/disk',
  218. >>> }
  219. >>> backup_op = Backup(app, vms, exclude_vms, **options)
  220. >>> print(backup_op.get_backup_summary())
  221. >>> asyncio.get_event_loop().run_until_complete(backup_op.backup_do())
  222. See attributes of this object for all available options.
  223. '''
  224. # pylint: disable=too-many-instance-attributes
  225. class FileToBackup:
  226. # pylint: disable=too-few-public-methods
  227. def __init__(self, file_path, subdir=None, name=None, size=None):
  228. if size is None:
  229. size = qubes.storage.file.get_disk_usage(file_path)
  230. if subdir is None:
  231. abs_file_dir = pathlib.Path(file_path).resolve().parent
  232. abs_base_dir = pathlib.Path(
  233. qubes.config.system_path["qubes_base_dir"]).resolve()
  234. # this raises ValueError if abs_file_dir is not in abs_base_dir
  235. subdir = str(abs_file_dir.relative_to(abs_base_dir))
  236. if not subdir.endswith(os.path.sep):
  237. subdir += os.path.sep
  238. #: real path to the file
  239. self.path = file_path
  240. #: size of the file
  241. self.size = size
  242. #: directory in backup archive where file should be placed
  243. self.subdir = subdir
  244. #: use this name in the archive (aka rename)
  245. self.name = os.path.basename(file_path)
  246. if name is not None:
  247. self.name = name
  248. class VMToBackup:
  249. # pylint: disable=too-few-public-methods
  250. def __init__(self, vm, files, subdir):
  251. self.vm = vm
  252. self.files = files
  253. self.subdir = subdir
  254. @property
  255. def size(self):
  256. return functools.reduce(lambda x, y: x + y.size, self.files, 0)
  257. def __init__(self, app, vms_list=None, exclude_list=None, **kwargs):
  258. """
  259. If vms = None, include all (sensible) VMs;
  260. exclude_list is always applied
  261. """
  262. super(Backup, self).__init__()
  263. #: progress of the backup - bytes handled of the current VM
  264. self.chunk_size = 100 * 1024 * 1024
  265. self._current_vm_bytes = 0
  266. #: progress of the backup - bytes handled of finished VMs
  267. self._done_vms_bytes = 0
  268. #: total backup size (set by :py:meth:`get_files_to_backup`)
  269. self.total_backup_bytes = 0
  270. #: application object
  271. self.app = app
  272. #: directory for temporary files - set after creating the directory
  273. self.tmpdir = None
  274. # Backup settings - defaults
  275. #: should the backup be compressed?
  276. self.compressed = True
  277. #: what passphrase should be used to intergrity protect (and encrypt)
  278. #: the backup; required
  279. self.passphrase = None
  280. #: custom compression filter; a program which process stdin to stdout
  281. self.compression_filter = DEFAULT_COMPRESSION_FILTER
  282. #: VM to which backup should be sent (if any)
  283. self.target_vm = None
  284. #: directory to save backup in (either in dom0 or target VM,
  285. #: depending on :py:attr:`target_vm`
  286. self.target_dir = None
  287. #: callback for progress reporting. Will be called with one argument
  288. #: - progress in percents
  289. self.progress_callback = None
  290. self.last_progress_time = time.time()
  291. #: backup ID, needs to be unique (for a given user),
  292. #: not necessary unpredictable; automatically generated
  293. self.backup_id = datetime.datetime.now().strftime(
  294. '%Y%m%dT%H%M%S-' + str(os.getpid()))
  295. for key, value in kwargs.items():
  296. if hasattr(self, key):
  297. setattr(self, key, value)
  298. else:
  299. raise AttributeError(key)
  300. self.log = logging.getLogger('qubes.backup')
  301. if exclude_list is None:
  302. exclude_list = []
  303. if vms_list is None:
  304. vms_list = [vm for vm in app.domains if vm.include_in_backups]
  305. # Apply exclude list
  306. self.vms_for_backup = [vm for vm in vms_list
  307. if vm.name not in exclude_list]
  308. self._files_to_backup = self.get_files_to_backup()
  309. def __del__(self):
  310. if self.tmpdir and os.path.exists(self.tmpdir):
  311. shutil.rmtree(self.tmpdir)
  312. def get_files_to_backup(self):
  313. files_to_backup = {}
  314. for vm in self.vms_for_backup:
  315. if vm.qid == 0:
  316. # handle dom0 later
  317. continue
  318. subdir = 'vm%d/' % vm.qid
  319. vm_files = []
  320. for name, volume in vm.volumes.items():
  321. if not volume.save_on_stop:
  322. continue
  323. vm_files.append(self.FileToBackup(
  324. volume.export(),
  325. subdir,
  326. name + '.img',
  327. volume.usage))
  328. vm_files.extend(self.FileToBackup(i, subdir)
  329. for i in vm.fire_event('backup-get-files'))
  330. firewall_conf = os.path.join(vm.dir_path, vm.firewall_conf)
  331. if os.path.exists(firewall_conf):
  332. vm_files.append(self.FileToBackup(firewall_conf, subdir))
  333. if not vm_files:
  334. # subdir/ is needed in the tar file, otherwise restore
  335. # of a (Disp)VM without any backed up files is going
  336. # to fail. Adding a zero-sized file here happens to be
  337. # more straightforward than adding an empty directory.
  338. empty = self.FileToBackup("/var/run/qubes/empty", subdir)
  339. assert empty.size == 0
  340. vm_files.append(empty)
  341. files_to_backup[vm.qid] = self.VMToBackup(vm, vm_files, subdir)
  342. # Dom0 user home
  343. if 0 in [vm.qid for vm in self.vms_for_backup]:
  344. local_user = grp.getgrnam('qubes').gr_mem[0]
  345. home_dir = pwd.getpwnam(local_user).pw_dir
  346. # Home dir should have only user-owned files, so fix it now
  347. # to prevent permissions problems - some root-owned files can
  348. # left after 'sudo bash' and similar commands
  349. subprocess.check_call(['sudo', 'chown', '-R', local_user, home_dir])
  350. home_to_backup = [
  351. self.FileToBackup(home_dir, 'dom0-home/')]
  352. vm_files = home_to_backup
  353. files_to_backup[0] = self.VMToBackup(self.app.domains[0],
  354. vm_files,
  355. os.path.join('dom0-home', os.path.basename(home_dir)))
  356. self.total_backup_bytes = functools.reduce(
  357. lambda x, y: x + y.size, files_to_backup.values(), 0)
  358. return files_to_backup
  359. def get_backup_summary(self):
  360. summary = ""
  361. fields_to_display = [
  362. {"name": "VM", "width": 16},
  363. {"name": "type", "width": 12},
  364. {"name": "size", "width": 12}
  365. ]
  366. # Display the header
  367. for field in fields_to_display:
  368. fmt = "{{0:-^{0}}}-+".format(field["width"] + 1)
  369. summary += fmt.format('-')
  370. summary += "\n"
  371. for field in fields_to_display:
  372. fmt = "{{0:>{0}}} |".format(field["width"] + 1)
  373. summary += fmt.format(field["name"])
  374. summary += "\n"
  375. for field in fields_to_display:
  376. fmt = "{{0:-^{0}}}-+".format(field["width"] + 1)
  377. summary += fmt.format('-')
  378. summary += "\n"
  379. files_to_backup = self._files_to_backup
  380. for qid, vm_info in files_to_backup.items():
  381. summary_line = ""
  382. fmt = "{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
  383. summary_line += fmt.format(vm_info.vm.name)
  384. fmt = "{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
  385. if qid == 0:
  386. summary_line += fmt.format("User home")
  387. elif isinstance(vm_info.vm, qubes.vm.templatevm.TemplateVM):
  388. summary_line += fmt.format("Template VM")
  389. else:
  390. summary_line += fmt.format("VM" + (" + Sys" if
  391. vm_info.vm.updateable else ""))
  392. vm_size = vm_info.size
  393. fmt = "{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
  394. summary_line += fmt.format(size_to_human(vm_size))
  395. if qid != 0 and vm_info.vm.is_running():
  396. summary_line += " <-- The VM is running, backup will contain " \
  397. "its state from before its start!"
  398. summary += summary_line + "\n"
  399. for field in fields_to_display:
  400. fmt = "{{0:-^{0}}}-+".format(field["width"] + 1)
  401. summary += fmt.format('-')
  402. summary += "\n"
  403. fmt = "{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
  404. summary += fmt.format("Total size:")
  405. fmt = "{{0:>{0}}} |".format(
  406. fields_to_display[1]["width"] + 1 + 2 + fields_to_display[2][
  407. "width"] + 1)
  408. summary += fmt.format(size_to_human(self.total_backup_bytes))
  409. summary += "\n"
  410. for field in fields_to_display:
  411. fmt = "{{0:-^{0}}}-+".format(field["width"] + 1)
  412. summary += fmt.format('-')
  413. summary += "\n"
  414. vms_not_for_backup = [vm.name for vm in self.app.domains
  415. if vm not in self.vms_for_backup]
  416. summary += "VMs not selected for backup:\n - " + "\n - ".join(
  417. sorted(vms_not_for_backup)) + "\n"
  418. return summary
  419. @asyncio.coroutine
  420. def _prepare_backup_header(self):
  421. header_file_path = os.path.join(self.tmpdir, HEADER_FILENAME)
  422. backup_header = BackupHeader(
  423. version=CURRENT_BACKUP_FORMAT_VERSION,
  424. hmac_algorithm=DEFAULT_HMAC_ALGORITHM,
  425. encrypted=True,
  426. compressed=self.compressed,
  427. compression_filter=self.compression_filter,
  428. backup_id=self.backup_id,
  429. )
  430. backup_header.save(header_file_path)
  431. # Start encrypt, scrypt will also handle integrity
  432. # protection
  433. scrypt_passphrase = '{filename}!'.format(
  434. filename=HEADER_FILENAME).encode() + self.passphrase
  435. scrypt = yield from launch_scrypt(
  436. 'enc', header_file_path, header_file_path + '.hmac',
  437. scrypt_passphrase)
  438. retcode = yield from scrypt.wait()
  439. if retcode:
  440. raise qubes.exc.QubesException(
  441. "Failed to compute hmac of header file: "
  442. + scrypt.stderr.read())
  443. return HEADER_FILENAME, HEADER_FILENAME + ".hmac"
  444. def _send_progress_update(self):
  445. if not self.total_backup_bytes:
  446. return
  447. if callable(self.progress_callback):
  448. if time.time() - self.last_progress_time >= 1: # avoid flooding
  449. progress = (
  450. 100 * (self._done_vms_bytes + self._current_vm_bytes) /
  451. self.total_backup_bytes)
  452. self.last_progress_time = time.time()
  453. # pylint: disable=not-callable
  454. self.progress_callback(progress)
  455. def _add_vm_progress(self, bytes_done):
  456. self._current_vm_bytes += bytes_done
  457. self._send_progress_update()
  458. @asyncio.coroutine
  459. def _split_and_send(self, input_stream, file_basename,
  460. output_queue):
  461. '''Split *input_stream* into parts of max *chunk_size* bytes and send
  462. to *output_queue*.
  463. :param input_stream: stream (asyncio reader stream) of data to split
  464. :param file_basename: basename (i.e. without part number and '.enc')
  465. of output files
  466. :param output_queue: asyncio.Queue instance to put produced files to
  467. - queue will get only filenames of written chunks
  468. '''
  469. # Wait for compressor (tar) process to finish or for any
  470. # error of other subprocesses
  471. i = 0
  472. run_error = "size_limit"
  473. scrypt = None
  474. while run_error == "size_limit":
  475. # Prepare a first chunk
  476. chunkfile = file_basename + ".%03d.enc" % i
  477. i += 1
  478. # Start encrypt, scrypt will also handle integrity
  479. # protection
  480. scrypt_passphrase = \
  481. '{backup_id}!{filename}!'.format(
  482. backup_id=self.backup_id,
  483. filename=os.path.relpath(chunkfile[:-4],
  484. self.tmpdir)).encode() + self.passphrase
  485. try:
  486. scrypt = yield from launch_scrypt(
  487. "enc", "-", chunkfile, scrypt_passphrase)
  488. run_error = yield from handle_streams(
  489. input_stream,
  490. scrypt.stdin,
  491. self.chunk_size,
  492. self._add_vm_progress
  493. )
  494. self.log.debug(
  495. "handle_streams returned: {}".format(run_error))
  496. except:
  497. scrypt.terminate()
  498. raise
  499. scrypt.stdin.close()
  500. yield from scrypt.wait()
  501. self.log.debug("scrypt return code: {}".format(
  502. scrypt.returncode))
  503. # Send the chunk to the backup target
  504. yield from output_queue.put(
  505. os.path.relpath(chunkfile, self.tmpdir))
  506. @asyncio.coroutine
  507. def _wrap_and_send_files(self, files_to_backup, output_queue):
  508. for vm_info in files_to_backup:
  509. for file_info in vm_info.files:
  510. self.log.debug("Backing up {}".format(file_info))
  511. backup_tempfile = os.path.join(
  512. self.tmpdir, file_info.subdir,
  513. file_info.name)
  514. self.log.debug("Using temporary location: {}".format(
  515. backup_tempfile))
  516. # Ensure the temporary directory exists
  517. if not os.path.isdir(os.path.dirname(backup_tempfile)):
  518. os.makedirs(os.path.dirname(backup_tempfile))
  519. # The first tar cmd can use any complex feature as we want.
  520. # Files will be verified before untaring this.
  521. # Prefix the path in archive with filename["subdir"] to have it
  522. # verified during untar
  523. tar_cmdline = (["tar", "-Pc", '--sparse',
  524. '-C', os.path.dirname(file_info.path)] +
  525. (['--dereference'] if
  526. file_info.subdir != "dom0-home/" else []) +
  527. ['--xform=s:^%s:%s\\0:' % (
  528. os.path.basename(file_info.path),
  529. file_info.subdir),
  530. os.path.basename(file_info.path)
  531. ])
  532. file_stat = os.stat(file_info.path)
  533. if stat.S_ISBLK(file_stat.st_mode) or \
  534. file_info.name != os.path.basename(file_info.path):
  535. # tar doesn't handle content of block device, use our
  536. # writer
  537. # also use our tar writer when renaming file
  538. assert not stat.S_ISDIR(file_stat.st_mode), \
  539. "Renaming directories not supported"
  540. tar_cmdline = ['python3', '-m', 'qubes.tarwriter',
  541. '--override-name=%s' % (
  542. os.path.join(file_info.subdir, os.path.basename(
  543. file_info.name))),
  544. file_info.path]
  545. if self.compressed:
  546. tar_cmdline.insert(-2,
  547. "--use-compress-program=%s" % self.compression_filter)
  548. self.log.debug(" ".join(tar_cmdline))
  549. # Pipe: tar-sparse | scrypt | tar | backup_target
  550. # TODO: log handle stderr
  551. # pylint: disable=not-an-iterable
  552. tar_sparse = yield from asyncio.create_subprocess_exec(
  553. *tar_cmdline, stdout=subprocess.PIPE)
  554. try:
  555. yield from self._split_and_send(
  556. tar_sparse.stdout,
  557. backup_tempfile,
  558. output_queue)
  559. except:
  560. try:
  561. tar_sparse.terminate()
  562. except ProcessLookupError:
  563. pass
  564. raise
  565. yield from tar_sparse.wait()
  566. if tar_sparse.returncode:
  567. raise qubes.exc.QubesException(
  568. 'Failed to archive {} file'.format(file_info.path))
  569. # This VM done, update progress
  570. self._done_vms_bytes += vm_info.size
  571. self._current_vm_bytes = 0
  572. self._send_progress_update()
  573. yield from output_queue.put(QUEUE_FINISHED)
  574. @staticmethod
  575. @asyncio.coroutine
  576. def _monitor_process(proc, error_message):
  577. try:
  578. yield from proc.wait()
  579. except:
  580. proc.terminate()
  581. raise
  582. if proc.returncode:
  583. if proc.stderr is not None:
  584. proc_stderr = (yield from proc.stderr.read())
  585. proc_stderr = proc_stderr.decode('ascii', errors='ignore')
  586. proc_stderr = ''.join(
  587. c for c in proc_stderr if c in string.printable and
  588. c not in '\r\n%{}')
  589. error_message += ': ' + proc_stderr
  590. raise qubes.exc.QubesException(error_message)
  591. @staticmethod
  592. @asyncio.coroutine
  593. def _cancel_on_error(future, previous_task):
  594. '''If further element of chain fail, cancel previous one to
  595. avoid deadlock.
  596. When earlier element of chain fail, it will be handled by
  597. :py:meth:`backup_do`.
  598. The chain is:
  599. :py:meth:`_wrap_and_send_files` -> :py:class:`SendWorker` -> vmproc
  600. '''
  601. try:
  602. yield from future
  603. except: # pylint: disable=bare-except
  604. previous_task.cancel()
  605. @asyncio.coroutine
  606. def backup_do(self):
  607. # pylint: disable=too-many-statements
  608. if self.passphrase is None:
  609. raise qubes.exc.QubesException("No passphrase set")
  610. if not isinstance(self.passphrase, bytes):
  611. self.passphrase = self.passphrase.encode('utf-8')
  612. qubes_xml = self.app.store
  613. self.tmpdir = tempfile.mkdtemp()
  614. shutil.copy(qubes_xml, os.path.join(self.tmpdir, 'qubes.xml'))
  615. qubes_xml = os.path.join(self.tmpdir, 'qubes.xml')
  616. backup_app = qubes.Qubes(qubes_xml, offline_mode=True)
  617. backup_app.events_enabled = False
  618. files_to_backup = self._files_to_backup
  619. # make sure backup_content isn't set initially
  620. for vm in backup_app.domains:
  621. vm.events_enabled = False
  622. vm.features['backup-content'] = False
  623. for qid, vm_info in files_to_backup.items():
  624. # VM is included in the backup
  625. backup_app.domains[qid].features['backup-content'] = True
  626. backup_app.domains[qid].features['backup-path'] = vm_info.subdir
  627. backup_app.domains[qid].features['backup-size'] = vm_info.size
  628. backup_app.save()
  629. del backup_app
  630. vmproc = None
  631. if self.target_vm is not None:
  632. # Prepare the backup target (Qubes service call)
  633. # If APPVM, STDOUT is a PIPE
  634. read_fd, write_fd = os.pipe()
  635. vmproc = yield from self.target_vm.run_service('qubes.Backup',
  636. stdin=read_fd,
  637. stderr=subprocess.PIPE,
  638. stdout=subprocess.DEVNULL)
  639. os.close(read_fd)
  640. os.write(write_fd, (self.target_dir.
  641. replace("\r", "").replace("\n", "") + "\n").encode())
  642. backup_stdout = write_fd
  643. else:
  644. # Prepare the backup target (local file)
  645. if os.path.isdir(self.target_dir):
  646. backup_target = self.target_dir + "/qubes-{0}". \
  647. format(time.strftime("%Y-%m-%dT%H%M%S"))
  648. else:
  649. backup_target = self.target_dir
  650. # Create the target directory
  651. if not os.path.exists(os.path.dirname(self.target_dir)):
  652. raise qubes.exc.QubesException(
  653. "ERROR: the backup directory for {0} does not exists".
  654. format(self.target_dir))
  655. # If not APPVM, STDOUT is a local file
  656. backup_stdout = open(backup_target, 'wb')
  657. # Tar with tape length does not deals well with stdout
  658. # (close stdout between two tapes)
  659. # For this reason, we will use named pipes instead
  660. self.log.debug("Working in {}".format(self.tmpdir))
  661. self.log.debug("Will backup: {}".format(files_to_backup))
  662. header_files = yield from self._prepare_backup_header()
  663. # Setup worker to send encrypted data chunks to the backup_target
  664. to_send = asyncio.Queue(10)
  665. send_proc = SendWorker(to_send, self.tmpdir, backup_stdout)
  666. send_task = asyncio.ensure_future(send_proc.run())
  667. vmproc_task = None
  668. if vmproc is not None:
  669. vmproc_task = asyncio.ensure_future(
  670. self._monitor_process(vmproc,
  671. 'Writing backup to VM {} failed'.format(
  672. self.target_vm.name)))
  673. asyncio.ensure_future(self._cancel_on_error(
  674. vmproc_task, send_task))
  675. for file_name in header_files:
  676. yield from to_send.put(file_name)
  677. qubes_xml_info = self.VMToBackup(
  678. None,
  679. [self.FileToBackup(qubes_xml, '')],
  680. ''
  681. )
  682. inner_archive_task = asyncio.ensure_future(
  683. self._wrap_and_send_files(
  684. itertools.chain([qubes_xml_info], files_to_backup.values()),
  685. to_send
  686. ))
  687. asyncio.ensure_future(
  688. self._cancel_on_error(send_task, inner_archive_task))
  689. try:
  690. try:
  691. yield from inner_archive_task
  692. except:
  693. yield from to_send.put(QUEUE_ERROR)
  694. # in fact we may be handling CancelledError, induced by
  695. # exception in send_task or vmproc_task (and propagated by
  696. # self._cancel_on_error call above); in such a case this
  697. # yield from will raise exception, covering CancelledError -
  698. # this is intended behaviour
  699. if vmproc_task:
  700. yield from vmproc_task
  701. yield from send_task
  702. raise
  703. yield from send_task
  704. finally:
  705. if isinstance(backup_stdout, int):
  706. os.close(backup_stdout)
  707. else:
  708. backup_stdout.close()
  709. try:
  710. if vmproc_task:
  711. yield from vmproc_task
  712. finally:
  713. shutil.rmtree(self.tmpdir)
  714. # Save date of last backup, only when backup succeeded
  715. for qid, vm_info in files_to_backup.items():
  716. if vm_info.vm:
  717. vm_info.vm.backup_timestamp = \
  718. int(datetime.datetime.now().strftime('%s'))
  719. self.app.save()
  720. @asyncio.coroutine
  721. def handle_streams(stream_in, stream_out, size_limit=None,
  722. progress_callback=None):
  723. '''
  724. Copy stream_in to all streams_out and monitor all mentioned processes.
  725. If any of them terminate with non-zero code, interrupt the process. Copy
  726. at most `size_limit` data (if given).
  727. :param stream_in: StreamReader object to read data from
  728. :param stream_out: StreamWriter object to write data to
  729. :param size_limit: int maximum data amount to process
  730. :param progress_callback: callable function to report progress, will be
  731. given copied data size (it should accumulate internally)
  732. :return: "size_limit" or None (no error)
  733. '''
  734. buffer_size = 409600
  735. bytes_copied = 0
  736. while True:
  737. if size_limit:
  738. to_copy = min(buffer_size, size_limit - bytes_copied)
  739. if to_copy <= 0:
  740. return "size_limit"
  741. else:
  742. to_copy = buffer_size
  743. buf = yield from stream_in.read(to_copy)
  744. if not buf:
  745. # done
  746. break
  747. if callable(progress_callback):
  748. progress_callback(len(buf))
  749. stream_out.write(buf)
  750. bytes_copied += len(buf)
  751. return None
  752. # vim:sw=4:et: