restore.py 83 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102
  1. # -*- encoding: utf8 -*-
  2. #
  3. # The Qubes OS Project, http://www.qubes-os.org
  4. #
  5. # Copyright (C) 2017 Marek Marczykowski-Górecki
  6. # <marmarek@invisiblethingslab.com>
  7. #
  8. # This program is free software; you can redistribute it and/or modify
  9. # it under the terms of the GNU Lesser General Public License as published by
  10. # the Free Software Foundation; either version 2.1 of the License, or
  11. # (at your option) any later version.
  12. #
  13. # This program is distributed in the hope that it will be useful,
  14. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. # GNU Lesser General Public License for more details.
  17. #
  18. # You should have received a copy of the GNU Lesser General Public License along
  19. # with this program; if not, see <http://www.gnu.org/licenses/>.
  20. '''Backup restore module'''
  21. import errno
  22. import fcntl
  23. import functools
  24. import getpass
  25. import grp
  26. import logging
  27. import multiprocessing
  28. from multiprocessing import Queue, Process
  29. import os
  30. import pwd
  31. import re
  32. import shutil
  33. import subprocess
  34. import sys
  35. import tempfile
  36. import termios
  37. import time
  38. import collections
  39. import qubesadmin
  40. import qubesadmin.vm
  41. from qubesadmin.backup import BackupVM
  42. from qubesadmin.backup.core2 import Core2Qubes
  43. from qubesadmin.backup.core3 import Core3Qubes
  44. from qubesadmin.devices import DeviceAssignment
  45. from qubesadmin.exc import QubesException
  46. from qubesadmin.utils import size_to_human
  47. # must be picklable
  48. QUEUE_FINISHED = "!!!FINISHED"
  49. QUEUE_ERROR = "!!!ERROR"
  50. HEADER_FILENAME = 'backup-header'
  51. DEFAULT_CRYPTO_ALGORITHM = 'aes-256-cbc'
  52. # 'scrypt' is not exactly HMAC algorithm, but a tool we use to
  53. # integrity-protect the data
  54. DEFAULT_HMAC_ALGORITHM = 'scrypt'
  55. DEFAULT_COMPRESSION_FILTER = 'gzip'
  56. KNOWN_COMPRESSION_FILTERS = ('gzip', 'bzip2', 'xz')
  57. # lazy loaded
  58. KNOWN_CRYPTO_ALGORITHMS = []
  59. # lazy loaded
  60. KNOWN_HMAC_ALGORITHMS = []
  61. # Maximum size of error message get from process stderr (including VM process)
  62. MAX_STDERR_BYTES = 1024
  63. # header + qubes.xml max size
  64. HEADER_QUBES_XML_MAX_SIZE = 1024 * 1024
  65. # hmac file max size - regardless of backup format version!
  66. HMAC_MAX_SIZE = 4096
  67. BLKSIZE = 512
  68. _re_alphanum = re.compile(r'^[A-Za-z0-9-]*$')
  69. _tar_msg_re = re.compile(r".*#[0-9].*restore_pipe")
  70. _tar_file_size_re = re.compile(r"^[^ ]+ [^ ]+/[^ ]+ *([0-9]+) .*")
  71. class BackupCanceledError(QubesException):
  72. '''Exception raised when backup/restore was cancelled'''
  73. def __init__(self, msg, tmpdir=None):
  74. super(BackupCanceledError, self).__init__(msg)
  75. self.tmpdir = tmpdir
  76. def init_supported_hmac_and_crypto():
  77. """Collect supported hmac and crypto algorithms.
  78. This calls openssl to list actual supported algos.
  79. """
  80. if not KNOWN_HMAC_ALGORITHMS:
  81. KNOWN_HMAC_ALGORITHMS.extend(get_supported_hmac_algo())
  82. if not KNOWN_CRYPTO_ALGORITHMS:
  83. KNOWN_CRYPTO_ALGORITHMS.extend(get_supported_crypto_algo())
  84. class BackupHeader(object):
  85. '''Structure describing backup-header file included as the first file in
  86. backup archive
  87. '''
  88. Header = collections.namedtuple('Header', ['field', 't', 'validator'])
  89. known_headers = {
  90. 'version': Header(field='version', t=int,
  91. validator=lambda x: 1 <= x <= 4),
  92. 'encrypted': Header(field='encrypted', t=bool,
  93. validator=lambda x: True),
  94. 'compressed': Header(field='compressed', t=bool,
  95. validator=lambda x: True),
  96. 'compression-filter': Header(
  97. field='compression_filter',
  98. t=str,
  99. validator=lambda x: x in KNOWN_COMPRESSION_FILTERS),
  100. 'crypto-algorithm': Header(
  101. field='crypto_algorithm',
  102. t=str,
  103. validator=lambda x: x.lower() in KNOWN_CRYPTO_ALGORITHMS),
  104. 'hmac-algorithm': Header(
  105. field='hmac_algorithm',
  106. t=str,
  107. validator=lambda x: x.lower() in KNOWN_HMAC_ALGORITHMS),
  108. 'backup-id': Header(
  109. field='backup_id',
  110. t=str,
  111. validator=lambda x: not x.startswith('-') and x != ''),
  112. }
  113. def __init__(self,
  114. header_data=None,
  115. version=None,
  116. encrypted=None,
  117. compressed=None,
  118. compression_filter=None,
  119. hmac_algorithm=None,
  120. crypto_algorithm=None,
  121. backup_id=None):
  122. # repeat the list to help code completion...
  123. self.version = version
  124. self.encrypted = encrypted
  125. self.compressed = compressed
  126. # Options introduced in backup format 3+, which always have a header,
  127. # so no need for fallback in function parameter
  128. self.compression_filter = compression_filter
  129. self.hmac_algorithm = hmac_algorithm
  130. self.crypto_algorithm = crypto_algorithm
  131. self.backup_id = backup_id
  132. init_supported_hmac_and_crypto()
  133. if header_data is not None:
  134. self.load(header_data)
  135. def load(self, untrusted_header_text):
  136. """Parse backup header file.
  137. :param untrusted_header_text: header content
  138. :type untrusted_header_text: basestring
  139. .. warning::
  140. This function may be exposed to not yet verified header,
  141. so is security critical.
  142. """
  143. try:
  144. untrusted_header_text = untrusted_header_text.decode('ascii')
  145. except UnicodeDecodeError:
  146. raise QubesException(
  147. "Non-ASCII characters in backup header")
  148. seen = set()
  149. for untrusted_line in untrusted_header_text.splitlines():
  150. if untrusted_line.count('=') != 1:
  151. raise QubesException("Invalid backup header")
  152. key, value = untrusted_line.strip().split('=', 1)
  153. if not _re_alphanum.match(key):
  154. raise QubesException("Invalid backup header (key)")
  155. if key not in self.known_headers:
  156. # Ignoring unknown option
  157. continue
  158. header = self.known_headers[key]
  159. if key in seen:
  160. raise QubesException("Duplicated header line: {}".format(key))
  161. seen.add(key)
  162. if getattr(self, header.field, None) is not None:
  163. # ignore options already set (potentially forced values)
  164. continue
  165. if not _re_alphanum.match(value):
  166. raise QubesException("Invalid backup header (value)")
  167. if header.t is bool:
  168. value = value.lower() in ["1", "true", "yes"]
  169. elif header.t is int:
  170. value = int(value)
  171. elif header.t is str:
  172. pass
  173. else:
  174. raise QubesException("Unrecognized header type")
  175. if not header.validator(value):
  176. if key == 'compression-filter':
  177. raise QubesException(
  178. "Unusual compression filter '{f}' found. Use "
  179. "--compression-filter={f} to use it anyway.".format(
  180. f=value))
  181. raise QubesException("Invalid value for header: {}".format(key))
  182. setattr(self, header.field, value)
  183. self.validate()
  184. def validate(self):
  185. '''Validate header data, according to header version'''
  186. if self.version == 1:
  187. # header not really present
  188. pass
  189. elif self.version in [2, 3, 4]:
  190. expected_attrs = ['version', 'encrypted', 'compressed',
  191. 'hmac_algorithm']
  192. if self.encrypted and self.version < 4:
  193. expected_attrs += ['crypto_algorithm']
  194. if self.version >= 3 and self.compressed:
  195. expected_attrs += ['compression_filter']
  196. if self.version >= 4:
  197. expected_attrs += ['backup_id']
  198. for key in expected_attrs:
  199. if getattr(self, key) is None:
  200. raise QubesException(
  201. "Backup header lack '{}' info".format(key))
  202. else:
  203. raise QubesException(
  204. "Unsupported backup version {}".format(self.version))
  205. def save(self, filename):
  206. '''Save backup header into a file'''
  207. with open(filename, "w") as f_header:
  208. # make sure 'version' is the first key
  209. f_header.write('version={}\n'.format(self.version))
  210. for key, header in self.known_headers.items():
  211. if key == 'version':
  212. continue
  213. attr = header.field
  214. if getattr(self, attr) is None:
  215. continue
  216. f_header.write("{!s}={!s}\n".format(key, getattr(self, attr)))
  217. def launch_proc_with_pty(args, stdin=None, stdout=None, stderr=None, echo=True):
  218. """Similar to pty.fork, but handle stdin/stdout according to parameters
  219. instead of connecting to the pty
  220. :return tuple (subprocess.Popen, pty_master)
  221. """
  222. def set_ctty(ctty_fd, master_fd):
  223. '''Set controlling terminal'''
  224. os.setsid()
  225. os.close(master_fd)
  226. fcntl.ioctl(ctty_fd, termios.TIOCSCTTY, 0)
  227. if not echo:
  228. termios_p = termios.tcgetattr(ctty_fd)
  229. # termios_p.c_lflags
  230. termios_p[3] &= ~termios.ECHO
  231. termios.tcsetattr(ctty_fd, termios.TCSANOW, termios_p)
  232. (pty_master, pty_slave) = os.openpty()
  233. # pylint: disable=subprocess-popen-preexec-fn
  234. p = subprocess.Popen(args, stdin=stdin, stdout=stdout,
  235. stderr=stderr,
  236. preexec_fn=lambda: set_ctty(pty_slave, pty_master))
  237. os.close(pty_slave)
  238. return p, open(pty_master, 'wb+', buffering=0)
  239. def launch_scrypt(action, input_name, output_name, passphrase):
  240. '''
  241. Launch 'scrypt' process, pass passphrase to it and return
  242. subprocess.Popen object.
  243. :param action: 'enc' or 'dec'
  244. :param input_name: input path or '-' for stdin
  245. :param output_name: output path or '-' for stdout
  246. :param passphrase: passphrase
  247. :return: subprocess.Popen object
  248. '''
  249. command_line = ['scrypt', action, '-f', input_name, output_name]
  250. (p, pty) = launch_proc_with_pty(command_line,
  251. stdin=subprocess.PIPE if input_name == '-' else None,
  252. stdout=subprocess.PIPE if output_name == '-' else None,
  253. stderr=subprocess.PIPE,
  254. echo=False)
  255. if action == 'enc':
  256. prompts = (b'Please enter passphrase: ', b'Please confirm passphrase: ')
  257. else:
  258. prompts = (b'Please enter passphrase: ',)
  259. for prompt in prompts:
  260. actual_prompt = p.stderr.read(len(prompt))
  261. if actual_prompt != prompt:
  262. raise QubesException(
  263. 'Unexpected prompt from scrypt: {}'.format(actual_prompt))
  264. pty.write(passphrase.encode('utf-8') + b'\n')
  265. pty.flush()
  266. # save it here, so garbage collector would not close it (which would kill
  267. # the child)
  268. p.pty = pty
  269. return p
  270. def _fix_logging_lock_after_fork():
  271. """
  272. HACK:
  273. This is running in a child process, parent might hold some lock
  274. while fork was called (but will be released only in a parent
  275. process). This specifically applies to a logging module and
  276. results in a deadlock (if one is unlucky). "Fix" this by
  277. reinitialize a lock on all registered logging handlers
  278. just after a fork() call, until fixed upstream:
  279. https://bugs.python.org/issue6721
  280. """
  281. if not hasattr(logging, '_handlerList'):
  282. return
  283. # pylint: disable=protected-access
  284. for handler_ref in logging._handlerList:
  285. handler = handler_ref()
  286. if handler is None:
  287. continue
  288. if handler.lock:
  289. handler.lock = type(handler.lock)()
  290. class ExtractWorker3(Process):
  291. '''Process for handling inner tar layer of backup archive'''
  292. # pylint: disable=too-many-instance-attributes
  293. def __init__(self, queue, base_dir, passphrase, encrypted,
  294. progress_callback, vmproc=None,
  295. compressed=False, crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM,
  296. compression_filter=None, verify_only=False, handlers=None):
  297. '''Start inner tar extraction worker
  298. The purpose of this class is to process files extracted from outer
  299. archive layer and pass to appropriate handlers. Input files are given
  300. through a queue. Insert :py:obj:`QUEUE_FINISHED` or
  301. :py:obj:`QUEUE_ERROR` to end data processing (either cleanly,
  302. or forcefully).
  303. Handlers are given as a map filename -> (data_func, size_func),
  304. where data_func is called with file-like object to process,
  305. and size_func is called with file size as argument. Note that
  306. data_func and size_func may be called simultaneusly, in a different
  307. processes.
  308. :param multiprocessing.Queue queue: a queue with filenames to
  309. process; those files needs to be given as full path, inside *base_dir*
  310. :param str base_dir: directory where all files to process live
  311. :param str passphrase: passphrase to decrypt the data
  312. :param bool encrypted: is encryption applied?
  313. :param callable progress_callback: report extraction progress
  314. :param subprocess.Popen vmproc: process extracting outer layer,
  315. given here to monitor
  316. it for failures (when it exits with non-zero exit code, inner layer
  317. processing is stopped)
  318. :param bool compressed: is the data compressed?
  319. :param str crypto_algorithm: encryption algorithm, either `scrypt` or an
  320. algorithm supported by openssl
  321. :param str compression_filter: compression program, `gzip` by default
  322. :param bool verify_only: only verify data integrity, do not extract
  323. :param dict handlers: handlers for actual data
  324. '''
  325. super(ExtractWorker3, self).__init__()
  326. #: queue with files to extract
  327. self.queue = queue
  328. #: paths on the queue are relative to this dir
  329. self.base_dir = base_dir
  330. #: passphrase to decrypt/authenticate data
  331. self.passphrase = passphrase
  332. #: handlers for files; it should be dict filename -> (data_function,
  333. # size_function),
  334. # where data_function will get file-like object as the only argument and
  335. # might be called in a separate process (multiprocessing.Process),
  336. # and size_function will get file size (when known) in bytes
  337. self.handlers = handlers
  338. #: is the backup encrypted?
  339. self.encrypted = encrypted
  340. #: is the backup compressed?
  341. self.compressed = compressed
  342. #: what crypto algorithm is used for encryption?
  343. self.crypto_algorithm = crypto_algorithm
  344. #: only verify integrity, don't extract anything
  345. self.verify_only = verify_only
  346. #: progress
  347. self.blocks_backedup = 0
  348. #: inner tar layer extraction (subprocess.Popen instance)
  349. self.tar2_process = None
  350. #: current inner tar archive name
  351. self.tar2_current_file = None
  352. #: cat process feeding tar2_process
  353. self.tar2_feeder = None
  354. #: decompressor subprocess.Popen instance
  355. self.decompressor_process = None
  356. #: decryptor subprocess.Popen instance
  357. self.decryptor_process = None
  358. #: data import multiprocessing.Process instance
  359. self.import_process = None
  360. #: callback reporting progress to UI
  361. self.progress_callback = progress_callback
  362. #: process (subprocess.Popen instance) feeding the data into
  363. # extraction tool
  364. self.vmproc = vmproc
  365. self.log = logging.getLogger('qubesadmin.backup.extract')
  366. self.stderr_encoding = sys.stderr.encoding or 'utf-8'
  367. self.tar2_stderr = []
  368. self.compression_filter = compression_filter
  369. def collect_tar_output(self):
  370. '''Retrieve tar stderr and handle it appropriately
  371. Log errors, process file size if requested.
  372. This use :py:attr:`tar2_process`.
  373. '''
  374. if not self.tar2_process.stderr:
  375. return
  376. if self.tar2_process.poll() is None:
  377. try:
  378. new_lines = self.tar2_process.stderr \
  379. .read(MAX_STDERR_BYTES).splitlines()
  380. except IOError as e:
  381. if e.errno == errno.EAGAIN:
  382. return
  383. raise
  384. else:
  385. new_lines = self.tar2_process.stderr.readlines()
  386. new_lines = [x.decode(self.stderr_encoding) for x in new_lines]
  387. debug_msg = [msg for msg in new_lines if _tar_msg_re.match(msg)]
  388. self.log.debug('tar2_stderr: %s', '\n'.join(debug_msg))
  389. new_lines = [msg for msg in new_lines if not _tar_msg_re.match(msg)]
  390. self.tar2_stderr += new_lines
  391. def run(self):
  392. try:
  393. _fix_logging_lock_after_fork()
  394. self.__run__()
  395. except Exception:
  396. # Cleanup children
  397. for process in [self.decompressor_process,
  398. self.decryptor_process,
  399. self.tar2_process]:
  400. if process:
  401. try:
  402. process.terminate()
  403. except OSError:
  404. pass
  405. process.wait()
  406. self.log.exception('ERROR')
  407. raise
  408. def handle_dir(self, dirname):
  409. ''' Relocate files in given director when it's already extracted
  410. :param dirname: directory path to handle (relative to backup root),
  411. without trailing slash
  412. '''
  413. for fname, (data_func, size_func) in self.handlers.items():
  414. if not fname.startswith(dirname + '/'):
  415. continue
  416. if not os.path.exists(fname):
  417. # for example firewall.xml
  418. continue
  419. if size_func is not None:
  420. size_func(os.path.getsize(fname))
  421. with open(fname, 'rb') as input_file:
  422. data_func(input_file)
  423. os.unlink(fname)
  424. shutil.rmtree(dirname)
  425. def cleanup_tar2(self, wait=True, terminate=False):
  426. '''Cleanup running :py:attr:`tar2_process`
  427. :param wait: wait for it termination, otherwise method exit early if
  428. process is still running
  429. :param terminate: terminate the process if still running
  430. '''
  431. if self.tar2_process is None:
  432. return
  433. if terminate:
  434. if self.import_process is not None:
  435. self.tar2_process.terminate()
  436. self.import_process.terminate()
  437. if wait:
  438. self.tar2_process.wait()
  439. if self.import_process is not None:
  440. self.import_process.join()
  441. elif self.tar2_process.poll() is None:
  442. return
  443. self.collect_tar_output()
  444. if self.tar2_process.stderr:
  445. self.tar2_process.stderr.close()
  446. if self.tar2_process.returncode != 0:
  447. self.log.error(
  448. "ERROR: unable to extract files for %s, tar "
  449. "output:\n %s",
  450. self.tar2_current_file,
  451. "\n ".join(self.tar2_stderr))
  452. else:
  453. # Finished extracting the tar file
  454. # if that was whole-directory archive, handle
  455. # relocated files now
  456. inner_name = self.tar2_current_file.rsplit('.', 1)[0] \
  457. .replace(self.base_dir + '/', '')
  458. if os.path.basename(inner_name) == '.':
  459. self.handle_dir(
  460. os.path.dirname(inner_name))
  461. self.tar2_current_file = None
  462. self.tar2_process = None
  463. def _data_import_wrapper(self, close_fds, data_func, size_func,
  464. tar2_process):
  465. '''Close not needed file descriptors, handle output size reported
  466. by tar (if needed) then call data_func(tar2_process.stdout).
  467. This is to prevent holding write end of a pipe in subprocess,
  468. preventing EOF transfer.
  469. '''
  470. for fd in close_fds:
  471. if fd in (tar2_process.stdout.fileno(),
  472. tar2_process.stderr.fileno()):
  473. continue
  474. try:
  475. os.close(fd)
  476. except OSError:
  477. pass
  478. # retrieve file size from tar's stderr; warning: we do
  479. # not read data from tar's stdout at this point, it will
  480. # hang if it tries to output file content before
  481. # reporting its size on stderr first
  482. if size_func:
  483. # process lines on stderr until we get file size
  484. # search for first file size reported by tar -
  485. # this is used only when extracting single-file archive, so don't
  486. # bother with checking file name
  487. # Also, this needs to be called before anything is retrieved
  488. # from tar stderr, otherwise the process may deadlock waiting for
  489. # size (at this point nothing is retrieving data from tar stdout
  490. # yet, so it will hang on write() when the output pipe fill up).
  491. while True:
  492. line = tar2_process.stderr.readline()
  493. if not line:
  494. self.log.warning('EOF from tar before got file size info')
  495. break
  496. line = line.decode()
  497. if _tar_msg_re.match(line):
  498. self.log.debug('tar2_stderr: %s', line)
  499. else:
  500. match = _tar_file_size_re.match(line)
  501. if match:
  502. file_size = int(match.groups()[0])
  503. size_func(file_size)
  504. break
  505. self.log.warning(
  506. 'unexpected tar output (no file size report): %s', line)
  507. return data_func(tar2_process.stdout)
  508. def feed_tar2(self, filename, input_pipe):
  509. '''Feed data from *filename* to *input_pipe*
  510. Start a cat process to do that (do not block this process). Cat
  511. subprocess instance will be in :py:attr:`tar2_feeder`
  512. '''
  513. assert self.tar2_feeder is None
  514. self.tar2_feeder = subprocess.Popen(['cat', filename],
  515. stdout=input_pipe)
  516. def check_processes(self, processes):
  517. '''Check if any process failed.
  518. And if so, wait for other relevant processes to cleanup.
  519. '''
  520. run_error = None
  521. for name, proc in processes.items():
  522. if proc is None:
  523. continue
  524. if isinstance(proc, Process):
  525. if not proc.is_alive() and proc.exitcode != 0:
  526. run_error = name
  527. break
  528. elif proc.poll():
  529. run_error = name
  530. break
  531. if run_error:
  532. if run_error == "target":
  533. self.collect_tar_output()
  534. details = "\n".join(self.tar2_stderr)
  535. else:
  536. details = "%s failed" % run_error
  537. if self.decryptor_process:
  538. self.decryptor_process.terminate()
  539. self.decryptor_process.wait()
  540. self.decryptor_process = None
  541. self.log.error('Error while processing \'%s\': %s',
  542. self.tar2_current_file, details)
  543. self.cleanup_tar2(wait=True, terminate=True)
  544. def __run__(self):
  545. self.log.debug("Started sending thread")
  546. self.log.debug("Moving to dir %s", self.base_dir)
  547. os.chdir(self.base_dir)
  548. filename = None
  549. input_pipe = None
  550. for filename in iter(self.queue.get, None):
  551. if filename in (QUEUE_FINISHED, QUEUE_ERROR):
  552. break
  553. assert isinstance(filename, str)
  554. self.log.debug("Extracting file %s", filename)
  555. if filename.endswith('.000'):
  556. # next file
  557. if self.tar2_process is not None:
  558. input_pipe.close()
  559. self.cleanup_tar2(wait=True, terminate=False)
  560. inner_name = filename[:-len('.000')].replace(
  561. self.base_dir + '/', '')
  562. redirect_stdout = None
  563. if os.path.basename(inner_name) == '.':
  564. if (inner_name in self.handlers or
  565. any(x.startswith(os.path.dirname(inner_name) + '/')
  566. for x in self.handlers)):
  567. tar2_cmdline = ['tar',
  568. '-%s' % ("t" if self.verify_only else "x"),
  569. inner_name]
  570. else:
  571. # ignore this directory
  572. tar2_cmdline = None
  573. elif os.path.dirname(inner_name) == "dom0-home":
  574. tar2_cmdline = ['cat']
  575. redirect_stdout = subprocess.PIPE
  576. elif inner_name in self.handlers:
  577. tar2_cmdline = ['tar',
  578. '-%svvO' % ("t" if self.verify_only else "x"),
  579. inner_name]
  580. redirect_stdout = subprocess.PIPE
  581. else:
  582. # no handlers for this file, ignore it
  583. tar2_cmdline = None
  584. if tar2_cmdline is None:
  585. # ignore the file
  586. os.remove(filename)
  587. continue
  588. tar_compress_cmd = None
  589. if self.compressed:
  590. if self.compression_filter:
  591. tar_compress_cmd = self.compression_filter
  592. else:
  593. tar_compress_cmd = DEFAULT_COMPRESSION_FILTER
  594. if os.path.dirname(inner_name) == "dom0-home":
  595. # Replaces 'cat' for compressed dom0-home!
  596. tar2_cmdline = [tar_compress_cmd, "-d"]
  597. else:
  598. tar2_cmdline.insert(-1, "--use-compress-program=%s " %
  599. tar_compress_cmd)
  600. self.log.debug("Running command %s", str(tar2_cmdline))
  601. if self.encrypted:
  602. # Start decrypt
  603. self.decryptor_process = subprocess.Popen(
  604. ["openssl", "enc",
  605. "-d",
  606. "-" + self.crypto_algorithm,
  607. "-pass",
  608. "pass:" + self.passphrase],
  609. stdin=subprocess.PIPE,
  610. stdout=subprocess.PIPE)
  611. self.tar2_process = subprocess.Popen(
  612. tar2_cmdline,
  613. stdin=self.decryptor_process.stdout,
  614. stdout=redirect_stdout,
  615. stderr=subprocess.PIPE)
  616. self.decryptor_process.stdout.close()
  617. input_pipe = self.decryptor_process.stdin
  618. else:
  619. self.tar2_process = subprocess.Popen(
  620. tar2_cmdline,
  621. stdin=subprocess.PIPE,
  622. stdout=redirect_stdout,
  623. stderr=subprocess.PIPE)
  624. input_pipe = self.tar2_process.stdin
  625. self.feed_tar2(filename, input_pipe)
  626. if inner_name in self.handlers:
  627. assert redirect_stdout is subprocess.PIPE
  628. data_func, size_func = self.handlers[inner_name]
  629. self.import_process = multiprocessing.Process(
  630. target=self._data_import_wrapper,
  631. args=([input_pipe.fileno()],
  632. data_func, size_func, self.tar2_process))
  633. self.import_process.start()
  634. self.tar2_process.stdout.close()
  635. self.tar2_stderr = []
  636. elif not self.tar2_process:
  637. # Extracting of the current archive failed, skip to the next
  638. # archive
  639. os.remove(filename)
  640. continue
  641. else:
  642. # os.path.splitext fails to handle 'something/..000'
  643. (basename, ext) = self.tar2_current_file.rsplit('.', 1)
  644. previous_chunk_number = int(ext)
  645. expected_filename = basename + '.%03d' % (
  646. previous_chunk_number+1)
  647. if expected_filename != filename:
  648. self.cleanup_tar2(wait=True, terminate=True)
  649. self.log.error(
  650. 'Unexpected file in archive: %s, expected %s',
  651. filename, expected_filename)
  652. os.remove(filename)
  653. continue
  654. self.log.debug("Releasing next chunk")
  655. self.feed_tar2(filename, input_pipe)
  656. self.tar2_current_file = filename
  657. self.tar2_feeder.wait()
  658. # check if any process failed
  659. processes = {
  660. 'target': self.tar2_feeder,
  661. 'vmproc': self.vmproc,
  662. 'addproc': self.tar2_process,
  663. 'data_import': self.import_process,
  664. 'decryptor': self.decryptor_process,
  665. }
  666. self.check_processes(processes)
  667. self.tar2_feeder = None
  668. if callable(self.progress_callback):
  669. self.progress_callback(os.path.getsize(filename))
  670. # Delete the file as we don't need it anymore
  671. self.log.debug('Removing file %s', filename)
  672. os.remove(filename)
  673. if self.tar2_process is not None:
  674. input_pipe.close()
  675. if filename == QUEUE_ERROR:
  676. if self.decryptor_process:
  677. self.decryptor_process.terminate()
  678. self.decryptor_process.wait()
  679. self.decryptor_process = None
  680. self.cleanup_tar2(terminate=(filename == QUEUE_ERROR))
  681. self.log.debug('Finished extracting thread')
  682. def get_supported_hmac_algo(hmac_algorithm=None):
  683. '''Generate a list of supported hmac algorithms
  684. :param hmac_algorithm: default algorithm, if given, it is placed as a
  685. first element
  686. '''
  687. # Start with provided default
  688. if hmac_algorithm:
  689. yield hmac_algorithm
  690. if hmac_algorithm != 'scrypt':
  691. yield 'scrypt'
  692. proc = subprocess.Popen(
  693. 'openssl list-message-digest-algorithms || '
  694. 'openssl list -digest-algorithms',
  695. shell=True,
  696. stdout=subprocess.PIPE,
  697. stderr=subprocess.DEVNULL)
  698. try:
  699. for algo in proc.stdout.readlines():
  700. algo = algo.decode('ascii')
  701. if '=>' in algo:
  702. continue
  703. yield algo.strip().lower()
  704. finally:
  705. proc.terminate()
  706. proc.wait()
  707. proc.stdout.close()
  708. def get_supported_crypto_algo(crypto_algorithm=None):
  709. '''Generate a list of supported hmac algorithms
  710. :param crypto_algorithm: default algorithm, if given, it is placed as a
  711. first element
  712. '''
  713. # Start with provided default
  714. if crypto_algorithm:
  715. yield crypto_algorithm
  716. if crypto_algorithm != 'scrypt':
  717. yield 'scrypt'
  718. proc = subprocess.Popen(
  719. 'openssl list-cipher-algorithms || '
  720. 'openssl list -cipher-algorithms',
  721. shell=True,
  722. stdout=subprocess.PIPE,
  723. stderr=subprocess.DEVNULL)
  724. try:
  725. for algo in proc.stdout.readlines():
  726. algo = algo.decode('ascii')
  727. if '=>' in algo:
  728. continue
  729. yield algo.strip().lower()
  730. finally:
  731. proc.terminate()
  732. proc.wait()
  733. proc.stdout.close()
  734. class BackupRestoreOptions(object):
  735. '''Options for restore operation'''
  736. # pylint: disable=too-few-public-methods
  737. def __init__(self):
  738. #: use default NetVM if the one referenced in backup do not exists on
  739. # the host
  740. self.use_default_netvm = True
  741. #: set NetVM to "none" if the one referenced in backup do not exists
  742. # on the host
  743. self.use_none_netvm = False
  744. #: set template to default if the one referenced in backup do not
  745. # exists on the host
  746. self.use_default_template = True
  747. #: use default kernel if the one referenced in backup do not exists
  748. # on the host
  749. self.use_default_kernel = True
  750. #: restore dom0 home
  751. self.dom0_home = True
  752. #: restore dom0 home even if username is different
  753. self.ignore_username_mismatch = False
  754. #: do not restore data, only verify backup integrity
  755. self.verify_only = False
  756. #: automatically rename VM during restore, when it would conflict
  757. # with existing one
  758. self.rename_conflicting = True
  759. #: list of VM names to exclude
  760. self.exclude = []
  761. #: restore VMs into selected storage pool
  762. self.override_pool = None
  763. #: ignore size limit calculated from backup metadata
  764. self.ignore_size_limit = False
  765. class BackupRestore(object):
  766. """Usage:
  767. >>> restore_op = BackupRestore(...)
  768. >>> # adjust restore_op.options here
  769. >>> restore_info = restore_op.get_restore_info()
  770. >>> # manipulate restore_info to select VMs to restore here
  771. >>> restore_op.restore_do(restore_info)
  772. """
  773. class VMToRestore(object):
  774. '''Information about a single VM to be restored'''
  775. # pylint: disable=too-few-public-methods
  776. #: VM excluded from restore by user
  777. EXCLUDED = object()
  778. #: VM with such name already exists on the host
  779. ALREADY_EXISTS = object()
  780. #: NetVM used by the VM does not exists on the host
  781. MISSING_NETVM = object()
  782. #: TemplateVM used by the VM does not exists on the host
  783. MISSING_TEMPLATE = object()
  784. #: Kernel used by the VM does not exists on the host
  785. MISSING_KERNEL = object()
  786. def __init__(self, vm):
  787. assert isinstance(vm, BackupVM)
  788. self.vm = vm
  789. self.name = vm.name
  790. self.subdir = vm.backup_path
  791. self.size = vm.size
  792. self.problems = set()
  793. self.template = vm.template
  794. if vm.properties.get('netvm', None):
  795. self.netvm = vm.properties['netvm']
  796. else:
  797. self.netvm = None
  798. self.orig_template = None
  799. self.restored_vm = None
  800. @property
  801. def good_to_go(self):
  802. '''Is the VM ready for restore?'''
  803. return len(self.problems) == 0
  804. class Dom0ToRestore(VMToRestore):
  805. '''Information about dom0 home to restore'''
  806. # pylint: disable=too-few-public-methods
  807. #: backup was performed on system with different dom0 username
  808. USERNAME_MISMATCH = object()
  809. def __init__(self, vm, subdir=None):
  810. super(BackupRestore.Dom0ToRestore, self).__init__(vm)
  811. if subdir:
  812. self.subdir = subdir
  813. self.username = os.path.basename(subdir)
  814. def __init__(self, app, backup_location, backup_vm, passphrase,
  815. location_is_service=False, force_compression_filter=None):
  816. super(BackupRestore, self).__init__()
  817. #: qubes.Qubes instance
  818. self.app = app
  819. #: options how the backup should be restored
  820. self.options = BackupRestoreOptions()
  821. #: VM from which backup should be retrieved
  822. self.backup_vm = backup_vm
  823. if backup_vm and backup_vm.name == 'dom0':
  824. self.backup_vm = None
  825. #: backup path, inside VM pointed by :py:attr:`backup_vm`
  826. self.backup_location = backup_location
  827. #: use alternative qrexec service to retrieve backup data, instead of
  828. #: ``qubes.Restore`` with *backup_location* given on stdin
  829. self.location_is_service = location_is_service
  830. #: force using specific application for (de)compression, instead of
  831. #: the one named in the backup header
  832. self.force_compression_filter = force_compression_filter
  833. #: passphrase protecting backup integrity and optionally decryption
  834. self.passphrase = passphrase
  835. #: temporary directory used to extract the data before moving to the
  836. # final location
  837. self.tmpdir = tempfile.mkdtemp(prefix="restore", dir="/var/tmp")
  838. #: list of processes (Popen objects) to kill on cancel
  839. self.processes_to_kill_on_cancel = []
  840. #: is the backup operation canceled
  841. self.canceled = False
  842. #: report restore progress, called with one argument - percents of
  843. # data restored
  844. # FIXME: convert to float [0,1]
  845. self.progress_callback = None
  846. self.log = logging.getLogger('qubesadmin.backup')
  847. #: basic information about the backup
  848. self.header_data = self._retrieve_backup_header()
  849. #: VMs included in the backup
  850. self.backup_app = self._process_qubes_xml()
  851. def _start_retrieval_process(self, filelist, limit_count, limit_bytes):
  852. """Retrieve backup stream and extract it to :py:attr:`tmpdir`
  853. :param filelist: list of files to extract; listing directory name
  854. will extract the whole directory; use empty list to extract the whole
  855. archive
  856. :param limit_count: maximum number of files to extract
  857. :param limit_bytes: maximum size of extracted data
  858. :return: a touple of (Popen object of started process, file-like
  859. object for reading extracted files list, file-like object for reading
  860. errors)
  861. """
  862. vmproc = None
  863. if self.backup_vm is not None:
  864. # If APPVM, STDOUT is a PIPE
  865. if self.location_is_service:
  866. vmproc = self.backup_vm.run_service(self.backup_location)
  867. else:
  868. vmproc = self.backup_vm.run_service('qubes.Restore')
  869. vmproc.stdin.write(
  870. (self.backup_location.replace("\r", "").replace("\n",
  871. "") + "\n").encode())
  872. vmproc.stdin.flush()
  873. # Send to tar2qfile the VMs that should be extracted
  874. vmproc.stdin.write((" ".join(filelist) + "\n").encode())
  875. vmproc.stdin.flush()
  876. self.processes_to_kill_on_cancel.append(vmproc)
  877. backup_stdin = vmproc.stdout
  878. if isinstance(self.app, qubesadmin.app.QubesRemote):
  879. qfile_unpacker_path = '/usr/lib/qubes/qfile-unpacker'
  880. else:
  881. qfile_unpacker_path = '/usr/libexec/qubes/qfile-dom0-unpacker'
  882. # keep at least 500M free for decryption of a previous chunk
  883. tar1_command = [qfile_unpacker_path,
  884. str(os.getuid()), self.tmpdir, '-v',
  885. '-w', str(500 * 1024 * 1024)]
  886. else:
  887. backup_stdin = open(self.backup_location, 'rb')
  888. tar1_command = ['tar',
  889. '-ixv',
  890. '--occurrence=1',
  891. '-C', self.tmpdir] + filelist
  892. tar1_env = os.environ.copy()
  893. tar1_env['UPDATES_MAX_BYTES'] = str(limit_bytes)
  894. tar1_env['UPDATES_MAX_FILES'] = str(limit_count)
  895. self.log.debug("Run command %s", str(tar1_command))
  896. command = subprocess.Popen(
  897. tar1_command,
  898. stdin=backup_stdin,
  899. stdout=vmproc.stdin if vmproc else subprocess.PIPE,
  900. stderr=subprocess.PIPE,
  901. env=tar1_env)
  902. backup_stdin.close()
  903. self.processes_to_kill_on_cancel.append(command)
  904. # qfile-dom0-unpacker output filelist on stderr
  905. # and have stdout connected to the VM), while tar output filelist
  906. # on stdout
  907. if self.backup_vm:
  908. filelist_pipe = command.stderr
  909. # let qfile-dom0-unpacker hold the only open FD to the write end of
  910. # pipe, otherwise qrexec-client will not receive EOF when
  911. # qfile-dom0-unpacker terminates
  912. vmproc.stdin.close()
  913. else:
  914. filelist_pipe = command.stdout
  915. if self.backup_vm:
  916. error_pipe = vmproc.stderr
  917. else:
  918. error_pipe = command.stderr
  919. return command, filelist_pipe, error_pipe
  920. def _verify_hmac(self, filename, hmacfile, algorithm=None):
  921. '''Verify hmac of a file using given algorithm.
  922. If algorithm is not specified, use the one from backup header (
  923. :py:attr:`header_data`).
  924. Raise :py:exc:`QubesException` on failure, return :py:obj:`True` on
  925. success.
  926. 'scrypt' algorithm is supported only for header file; hmac file is
  927. encrypted (and integrity protected) version of plain header.
  928. :param filename: path to file to be verified
  929. :param hmacfile: path to hmac file for *filename*
  930. :param algorithm: override algorithm
  931. '''
  932. def load_hmac(hmac_text):
  933. '''Parse hmac output by openssl.
  934. Return just hmac, without filename and other metadata.
  935. '''
  936. if any(ord(x) not in range(128) for x in hmac_text):
  937. raise QubesException(
  938. "Invalid content of {}".format(hmacfile))
  939. hmac_text = hmac_text.strip().split("=")
  940. if len(hmac_text) > 1:
  941. hmac_text = hmac_text[1].strip()
  942. else:
  943. raise QubesException(
  944. "ERROR: invalid hmac file content")
  945. return hmac_text
  946. if algorithm is None:
  947. algorithm = self.header_data.hmac_algorithm
  948. passphrase = self.passphrase.encode('utf-8')
  949. self.log.debug("Verifying file %s", filename)
  950. if os.stat(os.path.join(self.tmpdir, hmacfile)).st_size > \
  951. HMAC_MAX_SIZE:
  952. raise QubesException('HMAC file {} too large'.format(
  953. hmacfile))
  954. if hmacfile != filename + ".hmac":
  955. raise QubesException(
  956. "ERROR: expected hmac for {}, but got {}".
  957. format(filename, hmacfile))
  958. if algorithm == 'scrypt':
  959. # in case of 'scrypt' _verify_hmac is only used for backup header
  960. assert filename == HEADER_FILENAME
  961. self._verify_and_decrypt(hmacfile, HEADER_FILENAME + '.dec')
  962. f_name = os.path.join(self.tmpdir, filename)
  963. with open(f_name, 'rb') as f_one:
  964. with open(f_name + '.dec', 'rb') as f_two:
  965. if f_one.read() != f_two.read():
  966. raise QubesException(
  967. 'Invalid hmac on {}'.format(filename))
  968. return True
  969. with open(os.path.join(self.tmpdir, filename), 'rb') as f_input:
  970. hmac_proc = subprocess.Popen(
  971. ["openssl", "dgst", "-" + algorithm, "-hmac", passphrase],
  972. stdin=f_input,
  973. stdout=subprocess.PIPE, stderr=subprocess.PIPE)
  974. hmac_stdout, hmac_stderr = hmac_proc.communicate()
  975. if hmac_stderr:
  976. raise QubesException(
  977. "ERROR: verify file {0}: {1}".format(filename, hmac_stderr))
  978. self.log.debug("Loading hmac for file %s", filename)
  979. try:
  980. with open(os.path.join(self.tmpdir, hmacfile), 'r',
  981. encoding='ascii') as f_hmac:
  982. hmac = load_hmac(f_hmac.read())
  983. except UnicodeDecodeError as err:
  984. raise QubesException('Cannot load hmac file: ' + str(err))
  985. if hmac and load_hmac(hmac_stdout.decode('ascii')) == hmac:
  986. os.unlink(os.path.join(self.tmpdir, hmacfile))
  987. self.log.debug(
  988. "File verification OK -> Sending file %s", filename)
  989. return True
  990. raise QubesException(
  991. "ERROR: invalid hmac for file {0}: {1}. "
  992. "Is the passphrase correct?".
  993. format(filename, load_hmac(hmac_stdout.decode('ascii'))))
  994. def _verify_and_decrypt(self, filename, output=None):
  995. '''Handle scrypt-wrapped file
  996. Decrypt the file, and verify its integrity - both tasks handled by
  997. 'scrypt' tool. Filename (without extension) is also validated.
  998. :param filename: Input file name (relative to :py:attr:`tmpdir`),
  999. needs to have `.enc` or `.hmac` extension
  1000. :param output: Output file name (relative to :py:attr:`tmpdir`),
  1001. use :py:obj:`None` to use *filename* without extension
  1002. :return: *filename* without extension
  1003. '''
  1004. assert filename.endswith('.enc') or filename.endswith('.hmac')
  1005. fullname = os.path.join(self.tmpdir, filename)
  1006. (origname, _) = os.path.splitext(filename)
  1007. if output:
  1008. fulloutput = os.path.join(self.tmpdir, output)
  1009. else:
  1010. fulloutput = os.path.join(self.tmpdir, origname)
  1011. if origname == HEADER_FILENAME:
  1012. passphrase = u'{filename}!{passphrase}'.format(
  1013. filename=origname,
  1014. passphrase=self.passphrase)
  1015. else:
  1016. passphrase = u'{backup_id}!{filename}!{passphrase}'.format(
  1017. backup_id=self.header_data.backup_id,
  1018. filename=origname,
  1019. passphrase=self.passphrase)
  1020. try:
  1021. p = launch_scrypt('dec', fullname, fulloutput, passphrase)
  1022. except OSError as err:
  1023. raise QubesException('failed to decrypt {}: {!s}'.format(
  1024. fullname, err))
  1025. (_, stderr) = p.communicate()
  1026. if hasattr(p, 'pty'):
  1027. p.pty.close()
  1028. if p.returncode != 0:
  1029. os.unlink(fulloutput)
  1030. raise QubesException('failed to decrypt {}: {}'.format(
  1031. fullname, stderr))
  1032. # encrypted file is no longer needed
  1033. os.unlink(fullname)
  1034. return origname
  1035. def _retrieve_backup_header_files(self, files, allow_none=False):
  1036. '''Retrieve backup header.
  1037. Start retrieval process (possibly involving network access from
  1038. another VM). Returns a collection of retrieved file paths.
  1039. '''
  1040. (retrieve_proc, filelist_pipe, error_pipe) = \
  1041. self._start_retrieval_process(
  1042. files, len(files), 1024 * 1024)
  1043. filelist = filelist_pipe.read()
  1044. filelist_pipe.close()
  1045. retrieve_proc_returncode = retrieve_proc.wait()
  1046. if retrieve_proc in self.processes_to_kill_on_cancel:
  1047. self.processes_to_kill_on_cancel.remove(retrieve_proc)
  1048. extract_stderr = error_pipe.read(MAX_STDERR_BYTES)
  1049. error_pipe.close()
  1050. # wait for other processes (if any)
  1051. for proc in self.processes_to_kill_on_cancel:
  1052. if proc.wait() != 0:
  1053. raise QubesException(
  1054. "Backup header retrieval failed (exit code {})".format(
  1055. proc.wait())
  1056. )
  1057. if retrieve_proc_returncode != 0:
  1058. if not filelist and 'Not found in archive' in extract_stderr:
  1059. if allow_none:
  1060. return None
  1061. raise QubesException(
  1062. "unable to read the qubes backup file {0} ({1}): {2}".
  1063. format(
  1064. self.backup_location,
  1065. retrieve_proc.wait(),
  1066. extract_stderr
  1067. ))
  1068. actual_files = filelist.decode('ascii').splitlines()
  1069. if sorted(actual_files) != sorted(files):
  1070. raise QubesException(
  1071. 'unexpected files in archive: got {!r}, expected {!r}'.format(
  1072. actual_files, files
  1073. ))
  1074. for fname in files:
  1075. if not os.path.exists(os.path.join(self.tmpdir, fname)):
  1076. if allow_none:
  1077. return None
  1078. raise QubesException(
  1079. 'Unable to retrieve file {} from backup {}: {}'.format(
  1080. fname, self.backup_location, extract_stderr
  1081. )
  1082. )
  1083. return files
  1084. def _retrieve_backup_header(self):
  1085. """Retrieve backup header and qubes.xml. Only backup header is
  1086. analyzed, qubes.xml is left as-is
  1087. (not even verified/decrypted/uncompressed)
  1088. :return header_data
  1089. :rtype :py:class:`BackupHeader`
  1090. """
  1091. if not self.backup_vm and os.path.exists(
  1092. os.path.join(self.backup_location, 'qubes.xml')):
  1093. # backup format version 1 doesn't have header
  1094. header_data = BackupHeader()
  1095. header_data.version = 1
  1096. return header_data
  1097. header_files = self._retrieve_backup_header_files(
  1098. ['backup-header', 'backup-header.hmac'], allow_none=True)
  1099. if not header_files:
  1100. # R2-Beta3 didn't have backup header, so if none is found,
  1101. # assume it's version=2 and use values present at that time
  1102. header_data = BackupHeader(
  1103. version=2,
  1104. # place explicitly this value, because it is what format_version
  1105. # 2 have
  1106. hmac_algorithm='SHA1',
  1107. crypto_algorithm='aes-256-cbc',
  1108. # TODO: set encrypted to something...
  1109. )
  1110. else:
  1111. filename = HEADER_FILENAME
  1112. hmacfile = HEADER_FILENAME + '.hmac'
  1113. self.log.debug("Got backup header and hmac: %s, %s",
  1114. filename, hmacfile)
  1115. file_ok = False
  1116. hmac_algorithm = DEFAULT_HMAC_ALGORITHM
  1117. for hmac_algo in get_supported_hmac_algo(hmac_algorithm):
  1118. try:
  1119. if self._verify_hmac(filename, hmacfile, hmac_algo):
  1120. file_ok = True
  1121. break
  1122. except QubesException as err:
  1123. self.log.debug(
  1124. 'Failed to verify %s using %s: %r',
  1125. hmacfile, hmac_algo, err)
  1126. # Ignore exception here, try the next algo
  1127. if not file_ok:
  1128. raise QubesException(
  1129. "Corrupted backup header (hmac verification "
  1130. "failed). Is the password correct?")
  1131. filename = os.path.join(self.tmpdir, filename)
  1132. with open(filename, 'rb') as f_header:
  1133. header_data = BackupHeader(
  1134. f_header.read(),
  1135. compression_filter=self.force_compression_filter)
  1136. os.unlink(filename)
  1137. return header_data
  1138. def _start_inner_extraction_worker(self, queue, handlers):
  1139. """Start a worker process, extracting inner layer of bacup archive,
  1140. extract them to :py:attr:`tmpdir`.
  1141. End the data by pushing QUEUE_FINISHED or QUEUE_ERROR to the queue.
  1142. :param queue :py:class:`Queue` object to handle files from
  1143. """
  1144. # Setup worker to extract encrypted data chunks to the restore dirs
  1145. # Create the process here to pass it options extracted from
  1146. # backup header
  1147. extractor_params = {
  1148. 'queue': queue,
  1149. 'base_dir': self.tmpdir,
  1150. 'passphrase': self.passphrase,
  1151. 'encrypted': self.header_data.encrypted,
  1152. 'compressed': self.header_data.compressed,
  1153. 'crypto_algorithm': self.header_data.crypto_algorithm,
  1154. 'verify_only': self.options.verify_only,
  1155. 'progress_callback': self.progress_callback,
  1156. 'handlers': handlers,
  1157. }
  1158. self.log.debug(
  1159. 'Starting extraction worker in %s, file handlers map: %s',
  1160. self.tmpdir, repr(handlers))
  1161. format_version = self.header_data.version
  1162. if format_version in [3, 4]:
  1163. extractor_params['compression_filter'] = \
  1164. self.header_data.compression_filter
  1165. if format_version == 4:
  1166. # encryption already handled
  1167. extractor_params['encrypted'] = False
  1168. extract_proc = ExtractWorker3(**extractor_params)
  1169. else:
  1170. raise NotImplementedError(
  1171. "Backup format version %d not supported" % format_version)
  1172. extract_proc.start()
  1173. return extract_proc
  1174. @staticmethod
  1175. def _save_qubes_xml(path, stream):
  1176. '''Handler for qubes.xml.000 content - just save the data to a file'''
  1177. with open(path, 'wb') as f_qubesxml:
  1178. f_qubesxml.write(stream.read())
  1179. def _process_qubes_xml(self):
  1180. """Verify, unpack and load qubes.xml. Possibly convert its format if
  1181. necessary. It expect that :py:attr:`header_data` is already populated,
  1182. and :py:meth:`retrieve_backup_header` was called.
  1183. """
  1184. if self.header_data.version == 1:
  1185. raise NotImplementedError('Backup format version 1 not supported')
  1186. if self.header_data.version in [2, 3]:
  1187. self._retrieve_backup_header_files(
  1188. ['qubes.xml.000', 'qubes.xml.000.hmac'])
  1189. self._verify_hmac("qubes.xml.000", "qubes.xml.000.hmac")
  1190. else:
  1191. self._retrieve_backup_header_files(['qubes.xml.000.enc'])
  1192. self._verify_and_decrypt('qubes.xml.000.enc')
  1193. queue = Queue()
  1194. queue.put("qubes.xml.000")
  1195. queue.put(QUEUE_FINISHED)
  1196. qubes_xml_path = os.path.join(self.tmpdir, 'qubes-restored.xml')
  1197. handlers = {
  1198. 'qubes.xml': (
  1199. functools.partial(self._save_qubes_xml, qubes_xml_path),
  1200. None)
  1201. }
  1202. extract_proc = self._start_inner_extraction_worker(queue, handlers)
  1203. extract_proc.join()
  1204. if extract_proc.exitcode != 0:
  1205. raise QubesException(
  1206. "unable to extract the qubes backup. "
  1207. "Check extracting process errors.")
  1208. if self.header_data.version in [2, 3]:
  1209. backup_app = Core2Qubes(qubes_xml_path)
  1210. elif self.header_data.version in [4]:
  1211. backup_app = Core3Qubes(qubes_xml_path)
  1212. else:
  1213. raise QubesException(
  1214. 'Unsupported qubes.xml format version: {}'.format(
  1215. self.header_data.version))
  1216. # Not needed anymore - all the data stored in backup_app
  1217. os.unlink(qubes_xml_path)
  1218. return backup_app
  1219. def _restore_vm_data(self, vms_dirs, vms_size, handlers):
  1220. '''Restore data of VMs
  1221. :param vms_dirs: list of directories to extract (skip others)
  1222. :param vms_size: expected size (abort if source stream exceed this
  1223. value)
  1224. :param handlers: handlers for restored files - see
  1225. :py:class:`ExtractWorker3` for details
  1226. '''
  1227. # Currently each VM consists of at most 7 archives (count
  1228. # file_to_backup calls in backup_prepare()), but add some safety
  1229. # margin for further extensions. Each archive is divided into 100MB
  1230. # chunks. Additionally each file have own hmac file. So assume upper
  1231. # limit as 2*(10*COUNT_OF_VMS+TOTAL_SIZE/100MB)
  1232. limit_count = str(2 * (10 * len(vms_dirs) +
  1233. int(vms_size / (100 * 1024 * 1024))))
  1234. if self.options.ignore_size_limit:
  1235. limit_count = '0'
  1236. vms_size = 0
  1237. self.log.debug("Working in temporary dir: %s", self.tmpdir)
  1238. self.log.info("Extracting data: %s to restore", size_to_human(vms_size))
  1239. # retrieve backup from the backup stream (either VM, or dom0 file)
  1240. (retrieve_proc, filelist_pipe, error_pipe) = \
  1241. self._start_retrieval_process(
  1242. vms_dirs, limit_count, vms_size)
  1243. to_extract = Queue()
  1244. # extract data retrieved by retrieve_proc
  1245. extract_proc = self._start_inner_extraction_worker(
  1246. to_extract, handlers)
  1247. try:
  1248. filename = None
  1249. hmacfile = None
  1250. nextfile = None
  1251. while True:
  1252. if self.canceled:
  1253. break
  1254. if not extract_proc.is_alive():
  1255. retrieve_proc.terminate()
  1256. retrieve_proc.wait()
  1257. if retrieve_proc in self.processes_to_kill_on_cancel:
  1258. self.processes_to_kill_on_cancel.remove(retrieve_proc)
  1259. # wait for other processes (if any)
  1260. for proc in self.processes_to_kill_on_cancel:
  1261. proc.wait()
  1262. break
  1263. if nextfile is not None:
  1264. filename = nextfile
  1265. else:
  1266. filename = filelist_pipe.readline().decode('ascii').strip()
  1267. self.log.debug("Getting new file: %s", filename)
  1268. if not filename or filename == "EOF":
  1269. break
  1270. # if reading archive directly with tar, wait for next filename -
  1271. # tar prints filename before processing it, so wait for
  1272. # the next one to be sure that whole file was extracted
  1273. if not self.backup_vm:
  1274. nextfile = filelist_pipe.readline().decode('ascii').strip()
  1275. if self.header_data.version in [2, 3]:
  1276. if not self.backup_vm:
  1277. hmacfile = nextfile
  1278. nextfile = filelist_pipe.readline().\
  1279. decode('ascii').strip()
  1280. else:
  1281. hmacfile = filelist_pipe.readline().\
  1282. decode('ascii').strip()
  1283. if self.canceled:
  1284. break
  1285. self.log.debug("Getting hmac: %s", hmacfile)
  1286. if not hmacfile or hmacfile == "EOF":
  1287. # Premature end of archive, either of tar1_command or
  1288. # vmproc exited with error
  1289. break
  1290. else: # self.header_data.version == 4
  1291. if not filename.endswith('.enc'):
  1292. raise qubesadmin.exc.QubesException(
  1293. 'Invalid file extension found in archive: {}'.
  1294. format(filename))
  1295. if not any(filename.startswith(x) for x in vms_dirs):
  1296. self.log.debug("Ignoring VM not selected for restore")
  1297. os.unlink(os.path.join(self.tmpdir, filename))
  1298. if hmacfile:
  1299. os.unlink(os.path.join(self.tmpdir, hmacfile))
  1300. continue
  1301. if self.header_data.version in [2, 3]:
  1302. self._verify_hmac(filename, hmacfile)
  1303. else:
  1304. # _verify_and_decrypt will write output to a file with
  1305. # '.enc' extension cut off. This is safe because:
  1306. # - `scrypt` tool will override output, so if the file was
  1307. # already there (received from the VM), it will be removed
  1308. # - incoming archive extraction will refuse to override
  1309. # existing file, so if `scrypt` already created one,
  1310. # it can not be manipulated by the VM
  1311. # - when the file is retrieved from the VM, it appears at
  1312. # the final form - if it's visible, VM have no longer
  1313. # influence over its content
  1314. #
  1315. # This all means that if the file was correctly verified
  1316. # + decrypted, we will surely access the right file
  1317. filename = self._verify_and_decrypt(filename)
  1318. if not self.options.verify_only:
  1319. to_extract.put(os.path.join(self.tmpdir, filename))
  1320. else:
  1321. os.unlink(os.path.join(self.tmpdir, filename))
  1322. if self.canceled:
  1323. raise BackupCanceledError("Restore canceled",
  1324. tmpdir=self.tmpdir)
  1325. if retrieve_proc.wait() != 0:
  1326. if retrieve_proc.returncode == errno.EDQUOT:
  1327. raise QubesException(
  1328. 'retrieved backup size exceed expected size, if you '
  1329. 'believe this is ok, use --ignore-size-limit option')
  1330. raise QubesException(
  1331. "unable to read the qubes backup file {} ({}): {}"
  1332. .format(self.backup_location,
  1333. retrieve_proc.returncode, error_pipe.read(
  1334. MAX_STDERR_BYTES)))
  1335. # wait for other processes (if any)
  1336. for proc in self.processes_to_kill_on_cancel:
  1337. proc.wait()
  1338. if proc.returncode != 0:
  1339. raise QubesException(
  1340. "Backup completed, "
  1341. "but VM sending it reported an error (exit code {})".
  1342. format(proc.returncode))
  1343. if filename and filename != "EOF":
  1344. raise QubesException(
  1345. "Premature end of archive, the last file was %s" % filename)
  1346. except:
  1347. to_extract.put(QUEUE_ERROR)
  1348. extract_proc.join()
  1349. raise
  1350. else:
  1351. to_extract.put(QUEUE_FINISHED)
  1352. finally:
  1353. error_pipe.close()
  1354. filelist_pipe.close()
  1355. self.log.debug("Waiting for the extraction process to finish...")
  1356. extract_proc.join()
  1357. self.log.debug("Extraction process finished with code: %s",
  1358. extract_proc.exitcode)
  1359. if extract_proc.exitcode != 0:
  1360. raise QubesException(
  1361. "unable to extract the qubes backup. "
  1362. "Check extracting process errors.")
  1363. def new_name_for_conflicting_vm(self, orig_name, restore_info):
  1364. '''Generate new name for conflicting VM
  1365. Add a number suffix, until the name is unique. If no unique name can
  1366. be found using this strategy, return :py:obj:`None`
  1367. '''
  1368. number = 1
  1369. if len(orig_name) > 29:
  1370. orig_name = orig_name[0:29]
  1371. new_name = orig_name
  1372. while (new_name in restore_info.keys() or
  1373. new_name in [x.name for x in restore_info.values()] or
  1374. new_name in self.app.domains):
  1375. new_name = str('{}{}'.format(orig_name, number))
  1376. number += 1
  1377. if number == 100:
  1378. # give up
  1379. return None
  1380. return new_name
  1381. def restore_info_verify(self, restore_info):
  1382. '''Verify restore info - validate VM dependencies, name conflicts
  1383. etc.
  1384. '''
  1385. for vm in restore_info.keys():
  1386. if vm in ['dom0']:
  1387. continue
  1388. vm_info = restore_info[vm]
  1389. assert isinstance(vm_info, self.VMToRestore)
  1390. vm_info.problems.clear()
  1391. if vm in self.options.exclude:
  1392. vm_info.problems.add(self.VMToRestore.EXCLUDED)
  1393. if not self.options.verify_only and \
  1394. vm_info.name in self.app.domains:
  1395. if self.options.rename_conflicting:
  1396. new_name = self.new_name_for_conflicting_vm(
  1397. vm, restore_info
  1398. )
  1399. if new_name is not None:
  1400. vm_info.name = new_name
  1401. else:
  1402. vm_info.problems.add(self.VMToRestore.ALREADY_EXISTS)
  1403. else:
  1404. vm_info.problems.add(self.VMToRestore.ALREADY_EXISTS)
  1405. # check template
  1406. if vm_info.template:
  1407. present_on_host = False
  1408. if vm_info.template in self.app.domains:
  1409. host_tpl = self.app.domains[vm_info.template]
  1410. if vm_info.vm.klass == 'DispVM':
  1411. present_on_host = (
  1412. getattr(host_tpl, 'template_for_dispvms', False))
  1413. else:
  1414. present_on_host = host_tpl.klass == 'TemplateVM'
  1415. present_in_backup = False
  1416. if vm_info.template in restore_info:
  1417. bak_tpl = restore_info[vm_info.template]
  1418. if bak_tpl.good_to_go:
  1419. if vm_info.vm.klass == 'DispVM':
  1420. present_in_backup = (
  1421. bak_tpl.vm.properties.get(
  1422. 'template_for_dispvms', False))
  1423. else:
  1424. present_in_backup = (
  1425. bak_tpl.vm.klass == 'TemplateVM')
  1426. self.log.debug(
  1427. "vm=%s template=%s on_host=%s in_backup=%s",
  1428. vm_info.name, vm_info.template,
  1429. present_on_host, present_in_backup)
  1430. if not present_on_host and not present_in_backup:
  1431. if vm_info.vm.klass == 'DispVM':
  1432. default_template = self.app.default_dispvm
  1433. else:
  1434. default_template = self.app.default_template
  1435. if (self.options.use_default_template
  1436. and default_template is not None):
  1437. if vm_info.orig_template is None:
  1438. vm_info.orig_template = vm_info.template
  1439. vm_info.template = default_template.name
  1440. self.log.debug(
  1441. "vm=%s orig_template=%s -> default_template=%s",
  1442. vm_info.name, vm_info.orig_template,
  1443. default_template.name)
  1444. else:
  1445. vm_info.problems.add(self.VMToRestore.MISSING_TEMPLATE)
  1446. # check netvm
  1447. if vm_info.vm.properties.get('netvm', None) is not None:
  1448. netvm_name = vm_info.netvm
  1449. try:
  1450. netvm_on_host = self.app.domains[netvm_name]
  1451. except KeyError:
  1452. netvm_on_host = None
  1453. present_on_host = (netvm_on_host is not None
  1454. and netvm_on_host.provides_network)
  1455. present_in_backup = (netvm_name in restore_info.keys() and
  1456. restore_info[netvm_name].good_to_go and
  1457. restore_info[netvm_name].vm.properties.get(
  1458. 'provides_network', False))
  1459. if not present_on_host and not present_in_backup:
  1460. if self.options.use_default_netvm:
  1461. del vm_info.vm.properties['netvm']
  1462. elif self.options.use_none_netvm:
  1463. vm_info.netvm = None
  1464. else:
  1465. vm_info.problems.add(self.VMToRestore.MISSING_NETVM)
  1466. return restore_info
  1467. def get_restore_info(self):
  1468. '''Get restore info
  1469. Return information about what is included in the backup.
  1470. That dictionary can be adjusted to select what VM should be restore.
  1471. '''
  1472. # Format versions:
  1473. # 1 - Qubes R1, Qubes R2 beta1, beta2
  1474. # 2 - Qubes R2 beta3+
  1475. # 3 - Qubes R2+
  1476. # 4 - Qubes R4+
  1477. vms_to_restore = {}
  1478. for vm in self.backup_app.domains.values():
  1479. if vm.klass == 'AdminVM':
  1480. # Handle dom0 as special case later
  1481. continue
  1482. if vm.included_in_backup:
  1483. self.log.debug("%s is included in backup", vm.name)
  1484. vms_to_restore[vm.name] = self.VMToRestore(vm)
  1485. if vm.template is not None:
  1486. templatevm_name = vm.template
  1487. vms_to_restore[vm.name].template = templatevm_name
  1488. vms_to_restore = self.restore_info_verify(vms_to_restore)
  1489. # ...and dom0 home
  1490. if self.options.dom0_home and \
  1491. self.backup_app.domains['dom0'].included_in_backup:
  1492. vm = self.backup_app.domains['dom0']
  1493. vms_to_restore['dom0'] = self.Dom0ToRestore(vm,
  1494. self.backup_app.domains['dom0'].backup_path)
  1495. try:
  1496. local_user = grp.getgrnam('qubes').gr_mem[0]
  1497. except KeyError:
  1498. # if no qubes group is present, assume username matches
  1499. local_user = vms_to_restore['dom0'].username
  1500. if vms_to_restore['dom0'].username != local_user:
  1501. if not self.options.ignore_username_mismatch:
  1502. vms_to_restore['dom0'].problems.add(
  1503. self.Dom0ToRestore.USERNAME_MISMATCH)
  1504. return vms_to_restore
  1505. @staticmethod
  1506. def get_restore_summary(restore_info):
  1507. '''Return a ASCII formatted table with restore info summary'''
  1508. fields = {
  1509. "name": {'func': lambda vm: vm.name},
  1510. "type": {'func': lambda vm: vm.klass},
  1511. "template": {'func': lambda vm:
  1512. 'n/a' if vm.template is None else vm.template},
  1513. "netvm": {'func': lambda vm:
  1514. '(default)' if 'netvm' not in vm.properties else
  1515. '-' if vm.properties['netvm'] is None else
  1516. vm.properties['netvm']},
  1517. "label": {'func': lambda vm: vm.label},
  1518. }
  1519. fields_to_display = ['name', 'type', 'template',
  1520. 'netvm', 'label']
  1521. # First calculate the maximum width of each field we want to display
  1522. total_width = 0
  1523. for field in fields_to_display:
  1524. fields[field]['max_width'] = len(field)
  1525. for vm_info in restore_info.values():
  1526. if vm_info.vm:
  1527. # noinspection PyUnusedLocal
  1528. field_len = len(str(fields[field]["func"](vm_info.vm)))
  1529. if field_len > fields[field]['max_width']:
  1530. fields[field]['max_width'] = field_len
  1531. total_width += fields[field]['max_width']
  1532. summary = ""
  1533. summary += "The following VMs are included in the backup:\n"
  1534. summary += "\n"
  1535. # Display the header
  1536. for field in fields_to_display:
  1537. # noinspection PyTypeChecker
  1538. fmt = "{{0:-^{0}}}-+".format(fields[field]["max_width"] + 1)
  1539. summary += fmt.format('-')
  1540. summary += "\n"
  1541. for field in fields_to_display:
  1542. # noinspection PyTypeChecker
  1543. fmt = "{{0:>{0}}} |".format(fields[field]["max_width"] + 1)
  1544. summary += fmt.format(field)
  1545. summary += "\n"
  1546. for field in fields_to_display:
  1547. # noinspection PyTypeChecker
  1548. fmt = "{{0:-^{0}}}-+".format(fields[field]["max_width"] + 1)
  1549. summary += fmt.format('-')
  1550. summary += "\n"
  1551. for vm_info in restore_info.values():
  1552. assert isinstance(vm_info, BackupRestore.VMToRestore)
  1553. # Skip non-VM here
  1554. if not vm_info.vm:
  1555. continue
  1556. # noinspection PyUnusedLocal
  1557. summary_line = ""
  1558. for field in fields_to_display:
  1559. # noinspection PyTypeChecker
  1560. fmt = "{{0:>{0}}} |".format(fields[field]["max_width"] + 1)
  1561. summary_line += fmt.format(fields[field]["func"](vm_info.vm))
  1562. if BackupRestore.VMToRestore.EXCLUDED in vm_info.problems:
  1563. summary_line += " <-- Excluded from restore"
  1564. elif BackupRestore.VMToRestore.ALREADY_EXISTS in vm_info.problems:
  1565. summary_line += \
  1566. " <-- A VM with the same name already exists on the host!"
  1567. elif BackupRestore.VMToRestore.MISSING_TEMPLATE in \
  1568. vm_info.problems:
  1569. summary_line += " <-- No matching template on the host " \
  1570. "or in the backup found!"
  1571. elif BackupRestore.VMToRestore.MISSING_NETVM in \
  1572. vm_info.problems:
  1573. summary_line += " <-- No matching netvm on the host " \
  1574. "or in the backup found!"
  1575. elif vm_info.name == "dom0" and \
  1576. BackupRestore.Dom0ToRestore.USERNAME_MISMATCH in \
  1577. restore_info['dom0'].problems:
  1578. summary_line += " <-- username in backup and dom0 mismatch"
  1579. else:
  1580. if vm_info.template != vm_info.vm.template:
  1581. summary_line += " <-- Template change to '{}'".format(
  1582. vm_info.template)
  1583. if vm_info.name != vm_info.vm.name:
  1584. summary_line += " <-- Will be renamed to '{}'".format(
  1585. vm_info.name)
  1586. summary += summary_line + "\n"
  1587. return summary
  1588. @staticmethod
  1589. def _templates_first(vms):
  1590. '''Sort templates before other VM types'''
  1591. def key_function(instance):
  1592. '''Key function for :py:func:`sorted`'''
  1593. if isinstance(instance, BackupVM):
  1594. if instance.klass == 'TemplateVM':
  1595. return 0
  1596. if instance.properties.get('template_for_dispvms', False):
  1597. return 1
  1598. return 2
  1599. if hasattr(instance, 'vm'):
  1600. return key_function(instance.vm)
  1601. return 9
  1602. return sorted(vms, key=key_function)
  1603. def _handle_dom0(self, stream):
  1604. '''Extract dom0 home'''
  1605. try:
  1606. local_user = grp.getgrnam('qubes').gr_mem[0]
  1607. home_dir = pwd.getpwnam(local_user).pw_dir
  1608. except KeyError:
  1609. home_dir = os.path.expanduser('~')
  1610. local_user = getpass.getuser()
  1611. restore_home_backupdir = "home-restore-{0}".format(
  1612. time.strftime("%Y-%m-%d-%H%M%S"))
  1613. self.log.info("Restoring home of user '%s' to '%s' directory...",
  1614. local_user, restore_home_backupdir)
  1615. os.mkdir(os.path.join(home_dir, restore_home_backupdir))
  1616. tar3_cmdline = ['tar', '-C',
  1617. os.path.join(home_dir, restore_home_backupdir), '-x']
  1618. retcode = subprocess.call(tar3_cmdline, stdin=stream)
  1619. if retcode != 0:
  1620. raise QubesException("Inner tar error for dom0-home")
  1621. retcode = subprocess.call(['sudo', 'chown', '-R',
  1622. local_user, os.path.join(home_dir, restore_home_backupdir)])
  1623. if retcode != 0:
  1624. self.log.error("*** Error while setting restore directory owner")
  1625. def _handle_appmenus_list(self, vm, stream):
  1626. '''Handle whitelisted-appmenus.list file'''
  1627. try:
  1628. subprocess.check_call(
  1629. ['qvm-appmenus', '--set-whitelist=-', vm.name],
  1630. stdin=stream)
  1631. except (subprocess.CalledProcessError, FileNotFoundError):
  1632. self.log.error('Failed to set application list for %s', vm.name)
  1633. def _handle_volume_data(self, vm, volume, stream):
  1634. '''Wrap volume data import with logging'''
  1635. try:
  1636. volume.import_data(stream)
  1637. except Exception as err: # pylint: disable=broad-except
  1638. self.log.error('Failed to restore volume %s of VM %s: %s',
  1639. volume.name, vm.name, err)
  1640. def _handle_volume_size(self, vm, volume, size):
  1641. '''Wrap volume resize with logging'''
  1642. try:
  1643. if volume.size < size:
  1644. volume.resize(size)
  1645. except Exception as err: # pylint: disable=broad-except
  1646. self.log.error('Failed to resize volume %s of VM %s to %d: %s',
  1647. volume.name, vm.name, size, err)
  1648. def restore_do(self, restore_info):
  1649. '''
  1650. High level workflow:
  1651. 1. Create VMs object in host collection (qubes.xml)
  1652. 2. Create them on disk (vm.create_on_disk)
  1653. 3. Restore VM data, overriding/converting VM files
  1654. 4. Apply possible fixups and save qubes.xml
  1655. :param restore_info:
  1656. :return:
  1657. '''
  1658. if self.header_data.version == 1:
  1659. raise NotImplementedError('Backup format version 1 not supported')
  1660. restore_info = self.restore_info_verify(restore_info)
  1661. self._restore_vms_metadata(restore_info)
  1662. # Perform VM restoration in backup order
  1663. vms_dirs = []
  1664. handlers = {}
  1665. vms_size = 0
  1666. for vm_info in self._templates_first(restore_info.values()):
  1667. vm = vm_info.restored_vm
  1668. if vm and vm_info.subdir:
  1669. if isinstance(vm_info, self.Dom0ToRestore) and \
  1670. vm_info.good_to_go:
  1671. vms_dirs.append(os.path.dirname(vm_info.subdir))
  1672. vms_size += int(vm_info.size)
  1673. if self.options.verify_only:
  1674. continue
  1675. handlers[vm_info.subdir] = (self._handle_dom0, None)
  1676. else:
  1677. vms_size += int(vm_info.size)
  1678. vms_dirs.append(vm_info.subdir)
  1679. if self.options.verify_only:
  1680. continue
  1681. for name, volume in vm.volumes.items():
  1682. if not volume.save_on_stop:
  1683. continue
  1684. data_func = functools.partial(
  1685. self._handle_volume_data, vm, volume)
  1686. size_func = functools.partial(
  1687. self._handle_volume_size, vm, volume)
  1688. img_path = os.path.join(vm_info.subdir, name + '.img')
  1689. handlers[img_path] = (data_func, size_func)
  1690. handlers[os.path.join(vm_info.subdir, 'firewall.xml')] = (
  1691. functools.partial(vm_info.vm.handle_firewall_xml, vm),
  1692. None)
  1693. handlers[os.path.join(vm_info.subdir,
  1694. 'whitelisted-appmenus.list')] = (
  1695. functools.partial(self._handle_appmenus_list, vm), None)
  1696. try:
  1697. self._restore_vm_data(vms_dirs=vms_dirs, vms_size=vms_size,
  1698. handlers=handlers)
  1699. except QubesException as err:
  1700. if self.options.verify_only:
  1701. raise
  1702. self.log.error('Error extracting data: %s', str(err))
  1703. finally:
  1704. if self.log.getEffectiveLevel() > logging.DEBUG:
  1705. shutil.rmtree(self.tmpdir)
  1706. if self.canceled:
  1707. raise BackupCanceledError("Restore canceled",
  1708. tmpdir=self.tmpdir)
  1709. self.log.info("-> Done.")
  1710. if not self.options.verify_only:
  1711. self.log.info("-> Please install updates for all the restored "
  1712. "templates.")
  1713. def _restore_property(self, vm, prop, value):
  1714. '''Restore a single VM property, logging exceptions'''
  1715. try:
  1716. setattr(vm, prop, value)
  1717. except Exception as err: # pylint: disable=broad-except
  1718. self.log.error('Error setting %s.%s to %s: %s',
  1719. vm.name, prop, value, err)
  1720. def _restore_vms_metadata(self, restore_info):
  1721. '''Restore VM metadata
  1722. Create VMs, set their properties etc.
  1723. '''
  1724. vms = {}
  1725. for vm_info in restore_info.values():
  1726. assert isinstance(vm_info, self.VMToRestore)
  1727. if not vm_info.vm:
  1728. continue
  1729. if not vm_info.good_to_go:
  1730. continue
  1731. vm = vm_info.vm
  1732. vms[vm.name] = vm
  1733. # First load templates, then other VMs
  1734. for vm in self._templates_first(vms.values()):
  1735. if self.canceled:
  1736. return
  1737. if self.options.verify_only:
  1738. self.log.info("-> Verifying %s...", vm.name)
  1739. else:
  1740. self.log.info("-> Restoring %s...", vm.name)
  1741. kwargs = {}
  1742. if vm.template:
  1743. template = restore_info[vm.name].template
  1744. # handle potentially renamed template
  1745. if template in restore_info \
  1746. and restore_info[template].good_to_go:
  1747. template = restore_info[template].name
  1748. kwargs['template'] = template
  1749. new_vm = None
  1750. vm_name = restore_info[vm.name].name
  1751. if self.options.verify_only or vm.name == 'dom0':
  1752. # can't create vm, but need backup info
  1753. new_vm = self.backup_app.domains[vm_name]
  1754. else:
  1755. try:
  1756. # first only create VMs, later setting may require other VMs
  1757. # be already created
  1758. new_vm = self.app.add_new_vm(
  1759. vm.klass,
  1760. name=vm_name,
  1761. label=vm.label,
  1762. pool=self.options.override_pool,
  1763. **kwargs)
  1764. except Exception as err: # pylint: disable=broad-except
  1765. self.log.error('Error restoring VM %s, skipping: %s',
  1766. vm.name, err)
  1767. if new_vm:
  1768. del self.app.domains[new_vm.name]
  1769. continue
  1770. # restore this property early to be ready for dependent DispVMs
  1771. prop = 'template_for_dispvms'
  1772. value = vm.properties.get(prop, None)
  1773. if value is not None:
  1774. self._restore_property(new_vm, prop, value)
  1775. restore_info[vm.name].restored_vm = new_vm
  1776. for vm in vms.values():
  1777. if self.canceled:
  1778. return
  1779. new_vm = restore_info[vm.name].restored_vm
  1780. if not new_vm:
  1781. # skipped/failed
  1782. continue
  1783. for prop, value in vm.properties.items():
  1784. # can't reset the first; already handled the second
  1785. if prop in ['dispid', 'template_for_dispvms']:
  1786. continue
  1787. # exclude VM references - handled manually according to
  1788. # restore options
  1789. if prop in ['template', 'netvm', 'default_dispvm']:
  1790. continue
  1791. # exclude as this only applied before restoring
  1792. if prop in ['installed_by_rpm']:
  1793. continue
  1794. self._restore_property(new_vm, prop, value)
  1795. for feature, value in vm.features.items():
  1796. try:
  1797. new_vm.features[feature] = value
  1798. except Exception as err: # pylint: disable=broad-except
  1799. self.log.error('Error setting %s.features[%s] to %s: %s',
  1800. vm.name, feature, value, err)
  1801. for tag in vm.tags:
  1802. try:
  1803. new_vm.tags.add(tag)
  1804. except Exception as err: # pylint: disable=broad-except
  1805. if tag not in new_vm.tags:
  1806. self.log.error('Error adding tag %s to %s: %s',
  1807. tag, vm.name, err)
  1808. for bus in vm.devices:
  1809. for backend_domain, ident in vm.devices[bus]:
  1810. options = vm.devices[bus][(backend_domain, ident)]
  1811. assignment = DeviceAssignment(
  1812. backend_domain=backend_domain,
  1813. ident=ident,
  1814. options=options,
  1815. persistent=True)
  1816. try:
  1817. if not self.options.verify_only:
  1818. new_vm.devices[bus].attach(assignment)
  1819. except Exception as err: # pylint: disable=broad-except
  1820. self.log.error('Error attaching device %s:%s to %s: %s',
  1821. bus, ident, vm.name, err)
  1822. # Set VM dependencies - only non-default setting
  1823. for vm in vms.values():
  1824. vm_info = restore_info[vm.name]
  1825. vm_name = vm_info.name
  1826. try:
  1827. host_vm = self.app.domains[vm_name]
  1828. except KeyError:
  1829. # Failed/skipped VM
  1830. continue
  1831. if 'netvm' in vm.properties:
  1832. if vm_info.netvm in restore_info:
  1833. value = restore_info[vm_info.netvm].name
  1834. else:
  1835. value = vm_info.netvm
  1836. try:
  1837. host_vm.netvm = value
  1838. except Exception as err: # pylint: disable=broad-except
  1839. self.log.error('Error setting %s.%s to %s: %s',
  1840. vm.name, 'netvm', value, err)
  1841. if 'default_dispvm' in vm.properties:
  1842. if vm.properties['default_dispvm'] in restore_info:
  1843. value = restore_info[vm.properties[
  1844. 'default_dispvm']].name
  1845. else:
  1846. value = vm.properties['default_dispvm']
  1847. try:
  1848. host_vm.default_dispvm = value
  1849. except Exception as err: # pylint: disable=broad-except
  1850. self.log.error('Error setting %s.%s to %s: %s',
  1851. vm.name, 'default_dispvm', value, err)