2012-10-09 01:18:49 +02:00
|
|
|
#!/usr/bin/python
|
2011-09-29 13:52:26 +02:00
|
|
|
#
|
|
|
|
# The Qubes OS Project, http://www.qubes-os.org
|
|
|
|
#
|
|
|
|
# Copyright (C) 2011 Marek Marczykowski <marmarek@invisiblethingslab.com>
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
2012-02-07 16:09:35 +01:00
|
|
|
from qubes import QubesVm,QubesException,QubesVmCollection
|
2011-10-31 13:31:17 +01:00
|
|
|
from qubes import xs, xl_ctx, qubes_guid_path, qubes_clipd_path, qrexec_client_path
|
2012-02-07 16:09:35 +01:00
|
|
|
from qubes import qubes_store_filename, qubes_base_dir
|
2012-02-10 10:56:03 +01:00
|
|
|
from qubes import qubes_servicevms_dir, qubes_templates_dir, qubes_appvms_dir
|
2011-09-29 13:52:26 +02:00
|
|
|
import sys
|
2011-10-31 13:31:17 +01:00
|
|
|
import os
|
2011-09-29 13:52:26 +02:00
|
|
|
import subprocess
|
|
|
|
import re
|
2012-02-07 16:09:35 +01:00
|
|
|
import time
|
|
|
|
import grp,pwd
|
2011-09-29 13:52:26 +02:00
|
|
|
from datetime import datetime
|
|
|
|
from qmemman_client import QMemmanClient
|
|
|
|
|
|
|
|
import xen.lowlevel.xc
|
|
|
|
import xen.lowlevel.xl
|
|
|
|
import xen.lowlevel.xs
|
|
|
|
|
|
|
|
def mbytes_to_kmg(size):
|
|
|
|
if size > 1024:
|
2012-02-07 16:22:04 +01:00
|
|
|
return "%d GiB" % (size/1024)
|
2011-09-29 13:52:26 +02:00
|
|
|
else:
|
2012-02-07 16:22:04 +01:00
|
|
|
return "%d MiB" % size
|
2011-09-29 13:52:26 +02:00
|
|
|
|
|
|
|
def kbytes_to_kmg(size):
|
|
|
|
if size > 1024:
|
|
|
|
return mbytes_to_kmg(size/1024)
|
|
|
|
else:
|
2012-02-07 16:22:04 +01:00
|
|
|
return "%d KiB" % size
|
2011-09-29 13:52:26 +02:00
|
|
|
|
|
|
|
def bytes_to_kmg(size):
|
|
|
|
if size > 1024:
|
|
|
|
return kbytes_to_kmg(size/1024)
|
|
|
|
else:
|
|
|
|
return "%d B" % size
|
|
|
|
|
2012-02-07 16:09:35 +01:00
|
|
|
def size_to_human (size):
|
|
|
|
"""Humane readable size, with 1/10 precission"""
|
|
|
|
if size < 1024:
|
|
|
|
return str (size);
|
|
|
|
elif size < 1024*1024:
|
|
|
|
return str(round(size/1024.0,1)) + ' KiB'
|
|
|
|
elif size < 1024*1024*1024:
|
|
|
|
return str(round(size/(1024.0*1024),1)) + ' MiB'
|
|
|
|
else:
|
|
|
|
return str(round(size/(1024.0*1024*1024),1)) + ' GiB'
|
|
|
|
|
2012-02-10 20:46:35 +01:00
|
|
|
def parse_size(size):
|
|
|
|
units = [ ('K', 1024), ('KB', 1024),
|
|
|
|
('M', 1024*1024), ('MB', 1024*1024),
|
|
|
|
('G', 1024*1024*1024), ('GB', 1024*1024*1024),
|
|
|
|
]
|
|
|
|
|
|
|
|
size = size.strip().upper()
|
|
|
|
if size.isdigit():
|
2012-03-28 00:20:13 +02:00
|
|
|
return int(size)
|
2012-02-10 20:46:35 +01:00
|
|
|
|
|
|
|
for unit, multiplier in units:
|
|
|
|
if size.endswith(unit):
|
|
|
|
size = size[:-len(unit)].strip()
|
|
|
|
return int(size)*multiplier
|
|
|
|
|
|
|
|
raise QubesException("Invalid size: {0}.".format(size))
|
|
|
|
|
2012-02-10 10:54:18 +01:00
|
|
|
def print_stdout(text):
|
|
|
|
print (text)
|
|
|
|
|
|
|
|
def print_stderr(text):
|
|
|
|
print >> sys.stderr, (text)
|
2011-09-29 13:52:26 +02:00
|
|
|
|
2012-04-11 01:35:51 +02:00
|
|
|
###### Block devices ########
|
|
|
|
|
2011-09-29 13:52:26 +02:00
|
|
|
def block_devid_to_name(devid):
|
|
|
|
major = devid / 256
|
|
|
|
minor = devid % 256
|
|
|
|
|
|
|
|
dev_class = ""
|
|
|
|
if major == 202:
|
|
|
|
dev_class = "xvd"
|
|
|
|
elif major == 8:
|
|
|
|
dev_class = "sd"
|
|
|
|
else:
|
|
|
|
raise QubesException("Unknown device class %d" % major)
|
|
|
|
|
|
|
|
if minor % 16 == 0:
|
|
|
|
return "%s%c" % (dev_class, ord('a')+minor/16)
|
|
|
|
else:
|
|
|
|
return "%s%c%d" % (dev_class, ord('a')+minor/16, minor%16)
|
|
|
|
|
|
|
|
def block_name_to_majorminor(name):
|
|
|
|
# check if it is already devid
|
|
|
|
if isinstance(name, int):
|
|
|
|
return (name / 256, name % 256)
|
|
|
|
if name.isdigit():
|
|
|
|
return (int(name) / 256, int(name) % 256)
|
|
|
|
|
|
|
|
major = 0
|
|
|
|
minor = 0
|
2012-03-15 10:58:57 +01:00
|
|
|
dXpY_style = False
|
2011-09-29 13:52:26 +02:00
|
|
|
disk = True
|
2012-08-26 14:41:35 +02:00
|
|
|
|
|
|
|
if name.startswith("xvd"):
|
2011-09-29 13:52:26 +02:00
|
|
|
major = 202
|
2012-08-26 14:41:35 +02:00
|
|
|
elif name.startswith("sd"):
|
2011-09-29 13:52:26 +02:00
|
|
|
major = 8
|
2012-08-26 14:41:35 +02:00
|
|
|
elif name.startswith("mmcblk"):
|
|
|
|
dXpY_style = True
|
2012-03-15 10:58:57 +01:00
|
|
|
major = 179
|
2011-09-29 13:52:26 +02:00
|
|
|
elif name.startswith("scd"):
|
|
|
|
disk = False
|
|
|
|
major = 11
|
|
|
|
elif name.startswith("sr"):
|
|
|
|
disk = False
|
|
|
|
major = 11
|
2012-03-26 20:29:49 +02:00
|
|
|
elif name.startswith("loop"):
|
|
|
|
disk = False
|
|
|
|
major = 7
|
2012-03-09 00:04:36 +01:00
|
|
|
elif name.startswith("md"):
|
|
|
|
disk = False
|
|
|
|
major = 9
|
2011-09-29 13:52:26 +02:00
|
|
|
else:
|
2012-03-09 00:00:20 +01:00
|
|
|
# Unknown device
|
|
|
|
return (0, 0)
|
2011-09-29 13:52:26 +02:00
|
|
|
|
2012-08-26 14:41:35 +02:00
|
|
|
if not dXpY_style:
|
|
|
|
name_match = re.match(r"^([a-z]+)([a-z])([0-9]*)$", name)
|
|
|
|
else:
|
|
|
|
name_match = re.match(r"^([a-z]+)([0-9]*)(?:p([0-9]+))?$", name)
|
|
|
|
if not name_match:
|
|
|
|
raise QubesException("Invalid device name: %s" % name)
|
|
|
|
|
2011-09-29 13:52:26 +02:00
|
|
|
if disk:
|
2012-03-15 10:58:57 +01:00
|
|
|
if dXpY_style:
|
|
|
|
minor = int(name_match.group(2))*8
|
|
|
|
else:
|
|
|
|
minor = (ord(name_match.group(2))-ord('a')) * 16
|
2011-09-29 13:52:26 +02:00
|
|
|
else:
|
|
|
|
minor = 0
|
|
|
|
if name_match.group(3):
|
|
|
|
minor += int(name_match.group(3))
|
|
|
|
|
|
|
|
return (major, minor)
|
|
|
|
|
|
|
|
|
|
|
|
def block_name_to_devid(name):
|
|
|
|
# check if it is already devid
|
|
|
|
if isinstance(name, int):
|
|
|
|
return name
|
|
|
|
if name.isdigit():
|
|
|
|
return int(name)
|
|
|
|
|
|
|
|
(major, minor) = block_name_to_majorminor(name)
|
|
|
|
return major << 8 | minor
|
|
|
|
|
2012-02-14 20:57:43 +01:00
|
|
|
def block_find_unused_frontend(vm = None):
|
|
|
|
assert vm is not None
|
|
|
|
assert vm.is_running()
|
|
|
|
|
|
|
|
vbd_list = xs.ls('', '/local/domain/%d/device/vbd' % vm.xid)
|
|
|
|
# xvd* devices
|
|
|
|
major = 202
|
|
|
|
# prefer xvdi
|
|
|
|
for minor in range(8*16,254,16)+range(0,8*16,16):
|
2012-02-16 10:25:16 +01:00
|
|
|
if vbd_list is None or str(major << 8 | minor) not in vbd_list:
|
2012-02-14 20:57:43 +01:00
|
|
|
return block_devid_to_name(major << 8 | minor)
|
|
|
|
return None
|
|
|
|
|
2012-03-27 12:59:47 +02:00
|
|
|
def block_list(vm = None, system_disks = False):
|
2012-08-26 14:41:35 +02:00
|
|
|
device_re = re.compile(r"^[a-z0-9]{1,12}$")
|
2011-09-29 13:52:26 +02:00
|
|
|
# FIXME: any better idea of desc_re?
|
|
|
|
desc_re = re.compile(r"^.{1,255}$")
|
|
|
|
mode_re = re.compile(r"^[rw]$")
|
|
|
|
|
2012-10-08 00:48:20 +02:00
|
|
|
xs_trans = xs.transaction_start()
|
|
|
|
|
2011-09-29 13:52:26 +02:00
|
|
|
vm_list = []
|
|
|
|
if vm is not None:
|
|
|
|
if not vm.is_running():
|
2012-10-08 01:26:07 +02:00
|
|
|
xs.transaction_end(xs_trans)
|
2011-09-29 13:52:26 +02:00
|
|
|
return []
|
|
|
|
else:
|
|
|
|
vm_list = [ str(vm.xid) ]
|
|
|
|
else:
|
2012-10-08 01:26:07 +02:00
|
|
|
vm_list = xs.ls(xs_trans, '/local/domain')
|
2011-09-29 13:52:26 +02:00
|
|
|
|
|
|
|
devices_list = {}
|
|
|
|
for xid in vm_list:
|
2012-04-02 09:52:29 +02:00
|
|
|
vm_name = xs.read(xs_trans, '/local/domain/%s/name' % xid)
|
|
|
|
vm_devices = xs.ls(xs_trans, '/local/domain/%s/qubes-block-devices' % xid)
|
2011-09-29 13:52:26 +02:00
|
|
|
if vm_devices is None:
|
|
|
|
continue
|
|
|
|
for device in vm_devices:
|
|
|
|
# Sanitize device name
|
|
|
|
if not device_re.match(device):
|
|
|
|
print >> sys.stderr, "Invalid device name in VM '%s'" % vm_name
|
|
|
|
continue
|
|
|
|
|
2012-04-02 09:52:29 +02:00
|
|
|
device_size = xs.read(xs_trans, '/local/domain/%s/qubes-block-devices/%s/size' % (xid, device))
|
|
|
|
device_desc = xs.read(xs_trans, '/local/domain/%s/qubes-block-devices/%s/desc' % (xid, device))
|
|
|
|
device_mode = xs.read(xs_trans, '/local/domain/%s/qubes-block-devices/%s/mode' % (xid, device))
|
2011-09-29 13:52:26 +02:00
|
|
|
|
2012-03-30 00:19:15 +02:00
|
|
|
if device_size is None or device_desc is None or device_mode is None:
|
|
|
|
print >> sys.stderr, "Missing field in %s device parameters" % device
|
|
|
|
continue
|
2011-09-29 13:52:26 +02:00
|
|
|
if not device_size.isdigit():
|
|
|
|
print >> sys.stderr, "Invalid %s device size in VM '%s'" % (device, vm_name)
|
|
|
|
continue
|
|
|
|
if not desc_re.match(device_desc):
|
|
|
|
print >> sys.stderr, "Invalid %s device desc in VM '%s'" % (device, vm_name)
|
|
|
|
continue
|
|
|
|
if not mode_re.match(device_mode):
|
|
|
|
print >> sys.stderr, "Invalid %s device mode in VM '%s'" % (device, vm_name)
|
|
|
|
continue
|
2012-03-09 00:00:20 +01:00
|
|
|
# Check if we know major number for this device; attach will work without this, but detach and check_attached don't
|
|
|
|
if block_name_to_majorminor(device) == (0, 0):
|
|
|
|
print >> sys.stderr, "Unsupported device %s:%s" % (vm_name, device)
|
|
|
|
continue
|
2012-03-27 12:59:47 +02:00
|
|
|
|
|
|
|
if not system_disks:
|
|
|
|
if xid == '0' and device_desc.startswith(qubes_base_dir):
|
|
|
|
continue
|
|
|
|
|
2011-09-29 13:52:26 +02:00
|
|
|
visible_name = "%s:%s" % (vm_name, device)
|
|
|
|
devices_list[visible_name] = {"name": visible_name, "xid":int(xid),
|
|
|
|
"vm": vm_name, "device":device, "size":int(device_size),
|
|
|
|
"desc":device_desc, "mode":device_mode}
|
|
|
|
|
2012-04-02 09:52:29 +02:00
|
|
|
xs.transaction_end(xs_trans)
|
2011-09-29 13:52:26 +02:00
|
|
|
return devices_list
|
|
|
|
|
|
|
|
def block_check_attached(backend_vm, device, backend_xid = None):
|
|
|
|
if backend_xid is None:
|
|
|
|
backend_xid = backend_vm.xid
|
2012-04-02 09:52:29 +02:00
|
|
|
xs_trans = xs.transaction_start()
|
|
|
|
vm_list = xs.ls(xs_trans, '/local/domain/%d/backend/vbd' % backend_xid)
|
2011-09-29 13:52:26 +02:00
|
|
|
if vm_list is None:
|
2012-04-02 09:52:29 +02:00
|
|
|
xs.transaction_end(xs_trans)
|
2011-09-29 13:52:26 +02:00
|
|
|
return None
|
2012-03-26 20:29:49 +02:00
|
|
|
device_majorminor = None
|
|
|
|
try:
|
|
|
|
device_majorminor = block_name_to_majorminor(device)
|
|
|
|
except:
|
|
|
|
# Unknown devices will be compared directly - perhaps it is a filename?
|
|
|
|
pass
|
2011-09-29 13:52:26 +02:00
|
|
|
for vm_xid in vm_list:
|
2012-04-02 09:52:29 +02:00
|
|
|
for devid in xs.ls(xs_trans, '/local/domain/%d/backend/vbd/%s' % (backend_xid, vm_xid)):
|
2012-02-14 20:55:51 +01:00
|
|
|
(tmp_major, tmp_minor) = (0, 0)
|
2012-04-02 09:52:29 +02:00
|
|
|
phys_device = xs.read(xs_trans, '/local/domain/%d/backend/vbd/%s/%s/physical-device' % (backend_xid, vm_xid, devid))
|
|
|
|
dev_params = xs.read(xs_trans, '/local/domain/%d/backend/vbd/%s/%s/params' % (backend_xid, vm_xid, devid))
|
2012-02-14 20:55:51 +01:00
|
|
|
if phys_device and phys_device.find(':'):
|
|
|
|
(tmp_major, tmp_minor) = phys_device.split(":")
|
|
|
|
tmp_major = int(tmp_major, 16)
|
|
|
|
tmp_minor = int(tmp_minor, 16)
|
|
|
|
else:
|
|
|
|
# perhaps not ready yet - check params
|
2012-03-26 20:29:49 +02:00
|
|
|
if not dev_params:
|
2012-02-14 20:55:51 +01:00
|
|
|
# Skip not-phy devices
|
|
|
|
continue
|
2012-03-26 20:29:49 +02:00
|
|
|
elif not dev_params.startswith('/dev/'):
|
|
|
|
# will compare params directly
|
|
|
|
pass
|
2012-02-14 20:55:51 +01:00
|
|
|
else:
|
|
|
|
(tmp_major, tmp_minor) = block_name_to_majorminor(dev_params.lstrip('/dev/'))
|
|
|
|
|
2012-03-26 20:29:49 +02:00
|
|
|
if (device_majorminor and (tmp_major, tmp_minor) == device_majorminor) or \
|
|
|
|
(device_majorminor is None and dev_params == device):
|
2011-09-29 13:52:26 +02:00
|
|
|
vm_name = xl_ctx.domid_to_name(int(vm_xid))
|
|
|
|
frontend = block_devid_to_name(int(devid))
|
2012-04-02 09:52:29 +02:00
|
|
|
xs.transaction_end(xs_trans)
|
2011-09-29 13:52:26 +02:00
|
|
|
return {"xid":int(vm_xid), "frontend": frontend, "devid": int(devid), "vm": vm_name}
|
2012-04-02 09:52:29 +02:00
|
|
|
xs.transaction_end(xs_trans)
|
2011-09-29 13:52:26 +02:00
|
|
|
return None
|
|
|
|
|
2012-04-12 18:03:40 +02:00
|
|
|
def block_attach(vm, backend_vm, device, frontend=None, mode="w", auto_detach=False, wait=True):
|
2012-10-09 23:15:47 +02:00
|
|
|
device_attach_check(vm, backend_vm, device, frontend)
|
|
|
|
do_block_attach(vm, backend_vm, device, frontend, mode, auto_detach, wait)
|
|
|
|
|
|
|
|
def device_attach_check(vm, backend_vm, device, frontend):
|
|
|
|
""" Checks all the parameters, dies on errors """
|
2011-09-29 13:52:26 +02:00
|
|
|
if not vm.is_running():
|
|
|
|
raise QubesException("VM %s not running" % vm.name)
|
|
|
|
|
|
|
|
if not backend_vm.is_running():
|
|
|
|
raise QubesException("VM %s not running" % backend_vm.name)
|
|
|
|
|
2012-10-09 23:15:47 +02:00
|
|
|
def do_block_attach(vm, backend_vm, device, frontend, mode, auto_detach, wait):
|
2012-02-14 20:57:43 +01:00
|
|
|
if frontend is None:
|
|
|
|
frontend = block_find_unused_frontend(vm)
|
|
|
|
if frontend is None:
|
|
|
|
raise QubesException("No unused frontend found")
|
|
|
|
else:
|
|
|
|
# Check if any device attached at this frontend
|
|
|
|
if xs.read('', '/local/domain/%d/device/vbd/%d/state' % (vm.xid, block_name_to_devid(frontend))) == '4':
|
|
|
|
raise QubesException("Frontend %s busy in VM %s, detach it first" % (frontend, vm.name))
|
2011-09-29 13:52:26 +02:00
|
|
|
|
|
|
|
# Check if this device is attached to some domain
|
|
|
|
attached_vm = block_check_attached(backend_vm, device)
|
|
|
|
if attached_vm:
|
|
|
|
if auto_detach:
|
2012-03-31 19:10:47 +02:00
|
|
|
block_detach(None, attached_vm['devid'], vm_xid=attached_vm['xid'])
|
2011-09-29 13:52:26 +02:00
|
|
|
else:
|
|
|
|
raise QubesException("Device %s from %s already connected to VM %s as %s" % (device, backend_vm.name, attached_vm['vm'], attached_vm['frontend']))
|
|
|
|
|
2012-03-26 20:29:49 +02:00
|
|
|
if device.startswith('/'):
|
|
|
|
backend_dev = 'script:file:' + device
|
|
|
|
else:
|
|
|
|
backend_dev = 'phy:/dev/' + device
|
|
|
|
|
|
|
|
xl_cmd = [ '/usr/sbin/xl', 'block-attach', vm.name, backend_dev, frontend, mode, str(backend_vm.xid) ]
|
2011-09-29 13:52:26 +02:00
|
|
|
subprocess.check_call(xl_cmd)
|
2012-04-12 18:03:40 +02:00
|
|
|
if wait:
|
|
|
|
be_path = '/local/domain/%d/backend/vbd/%d/%d' % (backend_vm.xid, vm.xid, block_name_to_devid(frontend))
|
|
|
|
# There is no way to use xenstore watch with a timeout, so must check in a loop
|
|
|
|
interval = 0.100
|
|
|
|
# 5sec timeout
|
|
|
|
timeout = 5/interval
|
|
|
|
while timeout > 0:
|
|
|
|
be_state = xs.read('', be_path + '/state')
|
|
|
|
hotplug_state = xs.read('', be_path + '/hotplug-status')
|
|
|
|
if be_state is None:
|
2012-10-09 01:19:17 +02:00
|
|
|
raise QubesException("Backend device disappeared, something weird happened")
|
2012-04-12 18:03:40 +02:00
|
|
|
elif int(be_state) == 4:
|
|
|
|
# Ok
|
|
|
|
return
|
|
|
|
elif int(be_state) > 4:
|
|
|
|
# Error
|
2012-06-30 00:58:06 +02:00
|
|
|
error = xs.read('', '/local/domain/%d/error/backend/vbd/%d/%d/error' % (backend_vm.xid, vm.xid, block_name_to_devid(frontend)))
|
2012-07-30 20:38:45 +02:00
|
|
|
if error is not None:
|
2012-04-12 18:03:40 +02:00
|
|
|
raise QubesException("Error while connecting block device: " + error)
|
|
|
|
else:
|
|
|
|
raise QubesException("Unknown error while connecting block device")
|
|
|
|
elif hotplug_state == 'error':
|
|
|
|
hotplug_error = xs.read('', be_path + '/hotplug-error')
|
|
|
|
if hotplug_error:
|
|
|
|
raise QubesException("Error while connecting block device: " + hotplug_error)
|
|
|
|
else:
|
|
|
|
raise QubesException("Unknown hotplug error while connecting block device")
|
|
|
|
time.sleep(interval)
|
|
|
|
timeout -= interval
|
|
|
|
raise QubesException("Timeout while waiting for block defice connection")
|
2011-09-29 13:52:26 +02:00
|
|
|
|
|
|
|
def block_detach(vm, frontend = "xvdi", vm_xid = None):
|
|
|
|
# Get XID if not provided already
|
|
|
|
if vm_xid is None:
|
|
|
|
if not vm.is_running():
|
|
|
|
raise QubesException("VM %s not running" % vm.name)
|
|
|
|
# FIXME: potential race
|
|
|
|
vm_xid = vm.xid
|
|
|
|
|
|
|
|
# Check if this device is really connected
|
|
|
|
if not xs.read('', '/local/domain/%d/device/vbd/%d/state' % (vm_xid, block_name_to_devid(frontend))) == '4':
|
|
|
|
# Do nothing - device already detached
|
|
|
|
return
|
|
|
|
|
|
|
|
xl_cmd = [ '/usr/sbin/xl', 'block-detach', str(vm_xid), str(frontend)]
|
|
|
|
subprocess.check_call(xl_cmd)
|
|
|
|
|
2012-04-13 00:29:13 +02:00
|
|
|
def block_detach_all(vm, vm_xid = None):
|
|
|
|
""" Detach all non-system devices"""
|
|
|
|
# Get XID if not provided already
|
|
|
|
if vm_xid is None:
|
|
|
|
if not vm.is_running():
|
|
|
|
raise QubesException("VM %s not running" % vm.name)
|
|
|
|
# FIXME: potential race
|
|
|
|
vm_xid = vm.xid
|
|
|
|
|
|
|
|
xs_trans = xs.transaction_start()
|
|
|
|
devices = xs.ls(xs_trans, '/local/domain/%d/device/vbd' % vm_xid)
|
|
|
|
if devices is None:
|
|
|
|
return
|
|
|
|
devices_to_detach = []
|
|
|
|
for devid in devices:
|
|
|
|
# check if this is system disk
|
|
|
|
be_path = xs.read(xs_trans, '/local/domain/%d/device/vbd/%s/backend' % (vm_xid, devid))
|
|
|
|
assert be_path is not None
|
|
|
|
be_params = xs.read(xs_trans, be_path + '/params')
|
|
|
|
if be_path.startswith('/local/domain/0/') and be_params is not None and be_params.startswith(qubes_base_dir):
|
|
|
|
# system disk
|
|
|
|
continue
|
|
|
|
devices_to_detach.append(devid)
|
|
|
|
xs.transaction_end(xs_trans)
|
|
|
|
for devid in devices_to_detach:
|
|
|
|
xl_cmd = [ '/usr/sbin/xl', 'block-detach', str(vm_xid), devid]
|
|
|
|
subprocess.check_call(xl_cmd)
|
|
|
|
|
2012-10-08 00:44:40 +02:00
|
|
|
####### USB devices ######
|
|
|
|
|
2012-10-26 23:30:55 +02:00
|
|
|
usb_ver_re = re.compile(r"^(1|2)$")
|
2012-11-07 01:33:19 +01:00
|
|
|
usb_device_re = re.compile(r"^[0-9]+-[0-9]+(_[0-9]+)?$")
|
2012-11-07 17:43:41 +01:00
|
|
|
usb_port_re = re.compile(r"^$|^[0-9]+-[0-9]+(\.[0-9]+)?$")
|
2012-10-26 23:30:55 +02:00
|
|
|
|
2012-10-24 22:30:24 +02:00
|
|
|
def usb_setup(backend_vm_xid, vm_xid, devid, usb_ver):
|
2012-10-10 22:01:06 +02:00
|
|
|
"""
|
|
|
|
Attach frontend to the backend.
|
2012-10-12 21:28:22 +02:00
|
|
|
backend_vm_xid - id of the backend domain
|
|
|
|
vm_xid - id of the frontend domain
|
|
|
|
devid - id of the pvusb controller
|
2012-10-10 22:01:06 +02:00
|
|
|
"""
|
2012-10-12 22:06:42 +02:00
|
|
|
num_ports = 8
|
2012-10-10 22:01:06 +02:00
|
|
|
trans = xs.transaction_start()
|
|
|
|
|
2012-10-12 22:06:42 +02:00
|
|
|
be_path = "/local/domain/%d/backend/vusb/%d/%d" % (backend_vm_xid, vm_xid, devid)
|
2012-10-12 21:28:22 +02:00
|
|
|
fe_path = "/local/domain/%d/device/vusb/%d" % (vm_xid, devid)
|
2012-10-10 22:01:06 +02:00
|
|
|
|
2012-10-12 21:28:22 +02:00
|
|
|
be_perm = [{'dom': backend_vm_xid}, {'dom': vm_xid, 'read': True} ]
|
|
|
|
fe_perm = [{'dom': vm_xid}, {'dom': backend_vm_xid, 'read': True} ]
|
2012-10-10 22:01:06 +02:00
|
|
|
|
|
|
|
# Create directories and set permissions
|
|
|
|
xs.write(trans, be_path, "")
|
|
|
|
xs.set_permissions(trans, be_path, be_perm)
|
|
|
|
|
|
|
|
xs.write(trans, fe_path, "")
|
|
|
|
xs.set_permissions(trans, fe_path, fe_perm)
|
|
|
|
|
2012-10-12 21:28:22 +02:00
|
|
|
# Write backend information into the location that frontend looks for
|
|
|
|
xs.write(trans, "%s/backend-id" % fe_path, str(backend_vm_xid))
|
2012-10-10 22:01:06 +02:00
|
|
|
xs.write(trans, "%s/backend" % fe_path, be_path)
|
|
|
|
|
2012-10-12 21:28:22 +02:00
|
|
|
# Write frontend information into the location that backend looks for
|
|
|
|
xs.write(trans, "%s/frontend-id" % be_path, str(vm_xid))
|
2012-10-10 22:01:06 +02:00
|
|
|
xs.write(trans, "%s/frontend" % be_path, fe_path)
|
|
|
|
|
|
|
|
# Write USB Spec version field.
|
2012-10-24 22:30:24 +02:00
|
|
|
xs.write(trans, "%s/usb-ver" % be_path, usb_ver)
|
2012-10-10 22:01:06 +02:00
|
|
|
|
|
|
|
# Write virtual root hub field.
|
|
|
|
xs.write(trans, "%s/num-ports" % be_path, str(num_ports))
|
|
|
|
for port in range(1, num_ports+1):
|
|
|
|
# Set all port to disconnected state
|
|
|
|
xs.write(trans, "%s/port/%d" % (be_path, port), "")
|
|
|
|
|
|
|
|
# Set state to XenbusStateInitialising
|
|
|
|
xs.write(trans, "%s/state" % fe_path, "1")
|
|
|
|
xs.write(trans, "%s/state" % be_path, "1")
|
|
|
|
xs.write(trans, "%s/online" % be_path, "1")
|
|
|
|
|
|
|
|
xs.transaction_end(trans)
|
|
|
|
|
2012-11-07 01:14:12 +01:00
|
|
|
def usb_decode_device_from_xs(xs_encoded_device):
|
|
|
|
""" recover actual device name (xenstore doesn't allow dot in key names, so it was translated to underscore) """
|
|
|
|
return xs_encoded_device.replace('_', '.')
|
|
|
|
|
|
|
|
def usb_encode_device_for_xs(device):
|
|
|
|
""" encode actual device name (xenstore doesn't allow dot in key names, so translated it into underscore) """
|
|
|
|
return device.replace('.', '_')
|
|
|
|
|
2012-10-08 00:44:40 +02:00
|
|
|
def usb_list():
|
2012-10-10 16:54:23 +02:00
|
|
|
"""
|
|
|
|
Returns a dictionary of USB devices (for PVUSB backends running in all VM).
|
|
|
|
The dictionary is keyed by 'name' (see below), each element is a dictionary itself:
|
|
|
|
vm = name of the backend domain
|
|
|
|
xid = xid of the backend domain
|
|
|
|
device = <frontend device number>-<frontend port number>
|
|
|
|
name = <name of backend domain>:<frontend device number>-<frontend port number>
|
|
|
|
desc = description
|
|
|
|
"""
|
2012-10-08 00:44:40 +02:00
|
|
|
# FIXME: any better idea of desc_re?
|
|
|
|
desc_re = re.compile(r"^.{1,255}$")
|
|
|
|
|
|
|
|
devices_list = {}
|
|
|
|
|
|
|
|
xs_trans = xs.transaction_start()
|
2012-10-10 16:55:38 +02:00
|
|
|
vm_list = xs.ls(xs_trans, '/local/domain')
|
2012-10-08 00:44:40 +02:00
|
|
|
|
|
|
|
for xid in vm_list:
|
|
|
|
vm_name = xs.read(xs_trans, '/local/domain/%s/name' % xid)
|
|
|
|
vm_devices = xs.ls(xs_trans, '/local/domain/%s/qubes-usb-devices' % xid)
|
|
|
|
if vm_devices is None:
|
|
|
|
continue
|
2012-11-07 01:14:12 +01:00
|
|
|
# when listing devices in xenstore we get encoded names
|
|
|
|
for xs_encoded_device in vm_devices:
|
2012-10-08 00:44:40 +02:00
|
|
|
# Sanitize device id
|
2012-11-07 01:33:19 +01:00
|
|
|
if not usb_device_re.match(xs_encoded_device):
|
2012-11-08 02:59:55 +01:00
|
|
|
print >> sys.stderr, "Invalid device id in backend VM '%s'" % vm_name
|
2012-10-08 00:44:40 +02:00
|
|
|
continue
|
2012-11-07 01:14:12 +01:00
|
|
|
device = usb_decode_device_from_xs(xs_encoded_device)
|
|
|
|
device_desc = xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/desc' % (xid, xs_encoded_device))
|
2012-10-08 00:44:40 +02:00
|
|
|
if not desc_re.match(device_desc):
|
|
|
|
print >> sys.stderr, "Invalid %s device desc in VM '%s'" % (device, vm_name)
|
|
|
|
continue
|
|
|
|
visible_name = "%s:%s" % (vm_name, device)
|
2012-10-26 23:09:36 +02:00
|
|
|
# grab version
|
2012-11-07 01:14:12 +01:00
|
|
|
usb_ver = xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/usb-ver' % (xid, xs_encoded_device))
|
2012-10-26 23:09:36 +02:00
|
|
|
if usb_ver is None or not usb_ver_re.match(usb_ver):
|
|
|
|
print >> sys.stderr, "Invalid %s device USB version in VM '%s'" % (device, vm_name)
|
|
|
|
continue
|
2012-10-08 00:44:40 +02:00
|
|
|
devices_list[visible_name] = {"name": visible_name, "xid":int(xid),
|
|
|
|
"vm": vm_name, "device":device,
|
2012-10-26 23:09:36 +02:00
|
|
|
"desc":device_desc,
|
|
|
|
"usb_ver":usb_ver}
|
2012-10-08 00:44:40 +02:00
|
|
|
|
|
|
|
xs.transaction_end(xs_trans)
|
|
|
|
return devices_list
|
|
|
|
|
2012-10-12 23:34:18 +02:00
|
|
|
def usb_check_attached(xs_trans, backend_vm, device):
|
2012-10-10 16:54:23 +02:00
|
|
|
"""
|
|
|
|
Checks if the given device in the given backend attached to any frontend.
|
|
|
|
Parameters:
|
|
|
|
backend_vm - xid of the backend domain
|
|
|
|
device - device name in the backend domain
|
|
|
|
Returns None or a dictionary:
|
|
|
|
vm - the name of the frontend domain
|
|
|
|
xid - xid of the frontend domain
|
2012-10-12 23:34:18 +02:00
|
|
|
frontend - frontend device number FIXME
|
|
|
|
devid - frontend port number FIXME
|
2012-10-10 16:54:23 +02:00
|
|
|
"""
|
2012-10-09 23:33:17 +02:00
|
|
|
# sample xs content: /local/domain/0/backend/vusb/4/0/port/1 = "7-5"
|
2012-10-10 16:07:34 +02:00
|
|
|
attached_dev = None
|
2012-10-09 23:15:47 +02:00
|
|
|
vms = xs.ls(xs_trans, '/local/domain/%d/backend/vusb' % backend_vm)
|
|
|
|
if vms is None:
|
|
|
|
return None
|
|
|
|
for vm in vms:
|
2012-10-10 17:08:24 +02:00
|
|
|
if not vm.isdigit():
|
|
|
|
print >> sys.stderr, "Invalid VM id"
|
|
|
|
continue
|
2012-10-09 23:15:47 +02:00
|
|
|
frontend_devs = xs.ls(xs_trans, '/local/domain/%d/backend/vusb/%s' % (backend_vm, vm))
|
|
|
|
if frontend_devs is None:
|
|
|
|
continue
|
|
|
|
for frontend_dev in frontend_devs:
|
2012-10-10 17:08:24 +02:00
|
|
|
if not frontend_dev.isdigit():
|
|
|
|
print >> sys.stderr, "Invalid frontend in VM %s" % vm
|
|
|
|
continue
|
2012-10-09 23:15:47 +02:00
|
|
|
ports = xs.ls(xs_trans, '/local/domain/%d/backend/vusb/%s/%s/port' % (backend_vm, vm, frontend_dev))
|
|
|
|
if ports is None:
|
|
|
|
continue
|
|
|
|
for port in ports:
|
2012-11-07 01:33:19 +01:00
|
|
|
# FIXME: refactor, see similar loop in usb_find_unused_frontend(), use usb_list() instead?
|
2012-10-10 17:08:24 +02:00
|
|
|
if not port.isdigit():
|
|
|
|
print >> sys.stderr, "Invalid port in VM %s frontend %s" % (vm, frontend)
|
|
|
|
continue
|
2012-11-07 17:43:41 +01:00
|
|
|
dev = xs.read(xs_trans, '/local/domain/%d/backend/vusb/%s/%s/port/%s' % (backend_vm, vm, frontend_dev, port))
|
|
|
|
if dev == "":
|
2012-11-08 02:42:09 +01:00
|
|
|
continue
|
2012-11-07 01:33:19 +01:00
|
|
|
# Sanitize device id
|
2012-11-07 17:43:41 +01:00
|
|
|
if not usb_port_re.match(dev):
|
2012-11-08 02:59:55 +01:00
|
|
|
print >> sys.stderr, "Invalid device id in backend VM %d @ %s/%s/port/%s" % \
|
|
|
|
(backend_vm, vm, frontend_dev, port)
|
2012-11-07 01:33:19 +01:00
|
|
|
continue
|
2012-11-07 17:43:41 +01:00
|
|
|
if dev == device:
|
2012-10-09 23:15:47 +02:00
|
|
|
frontend = "%s-%s" % (frontend_dev, port)
|
2012-10-10 16:58:20 +02:00
|
|
|
vm_name = xl_ctx.domid_to_name(int(vm))
|
2012-10-11 01:08:49 +02:00
|
|
|
if vm_name is None:
|
|
|
|
# FIXME: should we wipe references to frontends running on nonexistent VMs?
|
|
|
|
continue
|
2012-10-10 16:58:20 +02:00
|
|
|
attached_dev = {"xid":int(vm), "frontend": frontend, "devid": device, "vm": vm_name}
|
2012-10-10 16:07:34 +02:00
|
|
|
break
|
|
|
|
return attached_dev
|
2012-10-08 23:08:54 +02:00
|
|
|
|
2012-10-12 22:12:40 +02:00
|
|
|
#def usb_check_frontend_busy(vm, front_dev, port):
|
|
|
|
# devport = frontend.split("-")
|
|
|
|
# if len(devport) != 2:
|
|
|
|
# raise QubesException("Malformed frontend syntax, must be in device-port format")
|
|
|
|
# # FIXME:
|
|
|
|
# # return xs.read('', '/local/domain/%d/device/vusb/%d/state' % (vm.xid, frontend)) == '4'
|
|
|
|
# return False
|
2012-10-09 23:15:47 +02:00
|
|
|
|
2012-10-24 22:30:24 +02:00
|
|
|
def usb_find_unused_frontend(xs_trans, backend_vm_xid, vm_xid, usb_ver):
|
2012-10-12 21:28:22 +02:00
|
|
|
"""
|
|
|
|
Find an unused frontend/port to link the given backend with the given frontend.
|
2012-10-12 23:34:18 +02:00
|
|
|
Creates new frontend if needed.
|
|
|
|
Returns frontend specification in <device>-<port> format.
|
2012-10-12 21:28:22 +02:00
|
|
|
"""
|
|
|
|
|
2012-11-07 00:41:13 +01:00
|
|
|
# This variable holds an index of last frontend scanned by the loop below.
|
|
|
|
# If nothing found, this value will be used to derive the index of a new frontend.
|
2012-10-12 21:28:22 +02:00
|
|
|
last_frontend_dev = -1
|
2012-11-07 00:41:13 +01:00
|
|
|
|
2012-10-12 21:28:22 +02:00
|
|
|
frontend_devs = xs.ls(xs_trans, "/local/domain/%d/device/vusb" % vm_xid)
|
|
|
|
if frontend_devs is not None:
|
|
|
|
for frontend_dev in frontend_devs:
|
|
|
|
if not frontend_dev.isdigit():
|
|
|
|
print >> sys.stderr, "Invalid frontend_dev in VM %d" % vm_xid
|
|
|
|
continue
|
|
|
|
frontend_dev = int(frontend_dev)
|
|
|
|
fe_path = "/local/domain/%d/device/vusb/%d" % (vm_xid, frontend_dev)
|
|
|
|
if xs.read(xs_trans, "%s/backend-id" % fe_path) == str(backend_vm_xid):
|
2012-10-24 22:30:24 +02:00
|
|
|
if xs.read(xs_trans, '/local/domain/%d/backend/vusb/%d/%d/usb-ver' % (backend_vm_xid, vm_xid, frontend_dev)) != usb_ver:
|
2012-10-26 23:30:55 +02:00
|
|
|
last_frontend_dev = frontend_dev
|
2012-10-24 22:30:24 +02:00
|
|
|
continue
|
2012-11-07 00:24:05 +01:00
|
|
|
# here: found an existing frontend already connected to right backend using an appropriate USB version
|
2012-10-12 21:28:22 +02:00
|
|
|
ports = xs.ls(xs_trans, '/local/domain/%d/backend/vusb/%d/%d/port' % (backend_vm_xid, vm_xid, frontend_dev))
|
|
|
|
if ports is None:
|
2012-11-07 00:37:50 +01:00
|
|
|
print >> sys.stderr, "No ports in VM %d frontend_dev %d?" % (vm_xid, frontend_dev)
|
2012-10-26 23:30:55 +02:00
|
|
|
last_frontend_dev = frontend_dev
|
2012-10-12 21:28:22 +02:00
|
|
|
continue
|
|
|
|
for port in ports:
|
2012-11-07 01:33:19 +01:00
|
|
|
# FIXME: refactor, see similar loop in usb_check_attached(), use usb_list() instead?
|
2012-10-12 21:28:22 +02:00
|
|
|
if not port.isdigit():
|
|
|
|
print >> sys.stderr, "Invalid port in VM %d frontend_dev %d" % (vm_xid, frontend_dev)
|
|
|
|
continue
|
|
|
|
port = int(port)
|
2012-11-07 17:43:41 +01:00
|
|
|
dev = xs.read(xs_trans, '/local/domain/%d/backend/vusb/%s/%s/port/%s' % (backend_vm, vm, frontend_dev, port))
|
2012-11-07 01:33:19 +01:00
|
|
|
# Sanitize device id
|
2012-11-07 17:43:41 +01:00
|
|
|
if not usb_port_re.match(dev):
|
2012-11-08 02:42:09 +01:00
|
|
|
print >> sys.stderr, "Invalid device id in backend VM %d @ %d/%d/port/%d" % \
|
|
|
|
(backend_vm_xid, vm_xid, frontend_dev, port)
|
2012-11-07 01:33:19 +01:00
|
|
|
continue
|
2012-11-07 17:43:41 +01:00
|
|
|
if dev == "":
|
2012-10-12 21:28:22 +02:00
|
|
|
return '%d-%d' % (frontend_dev, port)
|
|
|
|
last_frontend_dev = frontend_dev
|
|
|
|
|
2012-10-12 22:06:42 +02:00
|
|
|
# create a new frontend_dev and link it to the backend
|
|
|
|
frontend_dev = last_frontend_dev + 1
|
2012-10-24 22:30:24 +02:00
|
|
|
usb_setup(backend_vm_xid, vm_xid, frontend_dev, usb_ver)
|
2012-10-12 22:06:42 +02:00
|
|
|
return '%d-%d' % (frontend_dev, 1)
|
2012-10-09 23:15:47 +02:00
|
|
|
|
2012-10-08 23:08:54 +02:00
|
|
|
def usb_attach(vm, backend_vm, device, frontend=None, auto_detach=False, wait=True):
|
2012-10-09 23:15:47 +02:00
|
|
|
device_attach_check(vm, backend_vm, device, frontend)
|
|
|
|
|
2012-10-12 23:34:18 +02:00
|
|
|
xs_trans = xs.transaction_start()
|
2012-10-26 23:30:55 +02:00
|
|
|
|
2012-11-07 01:14:12 +01:00
|
|
|
xs_encoded_device = usb_encode_device_for_xs(device)
|
|
|
|
usb_ver = xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/usb-ver' % (backend_vm.xid, xs_encoded_device))
|
2012-10-26 23:30:55 +02:00
|
|
|
if usb_ver is None or not usb_ver_re.match(usb_ver):
|
|
|
|
xs.transaction_end(xs_trans)
|
|
|
|
raise QubesException("Invalid %s device USB version in VM '%s'" % (device, backend_vm.name))
|
|
|
|
|
2012-10-09 23:15:47 +02:00
|
|
|
if frontend is None:
|
2012-10-24 22:30:24 +02:00
|
|
|
frontend = usb_find_unused_frontend(xs_trans, backend_vm.xid, vm.xid, usb_ver)
|
2012-10-09 23:15:47 +02:00
|
|
|
else:
|
|
|
|
# Check if any device attached at this frontend
|
2012-10-12 22:12:40 +02:00
|
|
|
#if usb_check_frontend_busy(vm, frontend):
|
|
|
|
# raise QubesException("Frontend %s busy in VM %s, detach it first" % (frontend, vm.name))
|
2012-10-12 23:34:18 +02:00
|
|
|
xs.transaction_end(xs_trans)
|
2012-10-12 22:12:40 +02:00
|
|
|
raise NotImplementedError("Explicit USB frontend specification is not implemented yet")
|
2012-10-09 23:15:47 +02:00
|
|
|
|
|
|
|
# Check if this device is attached to some domain
|
2012-10-12 23:34:18 +02:00
|
|
|
attached_vm = usb_check_attached(xs_trans, backend_vm.xid, device)
|
|
|
|
xs.transaction_end(xs_trans)
|
|
|
|
|
2012-10-09 23:15:47 +02:00
|
|
|
if attached_vm:
|
|
|
|
if auto_detach:
|
2012-10-12 23:34:18 +02:00
|
|
|
usb_detach(backend_vm, attached_vm)
|
2012-10-09 23:15:47 +02:00
|
|
|
else:
|
|
|
|
raise QubesException("Device %s from %s already connected to VM %s as %s" % (device, backend_vm.name, attached_vm['vm'], attached_vm['frontend']))
|
|
|
|
|
2012-10-11 02:10:47 +02:00
|
|
|
# Run helper script
|
|
|
|
xl_cmd = [ '/usr/lib/qubes/xl-qvm-usb-attach.py', str(vm.xid), device, frontend, str(backend_vm.xid) ]
|
2012-10-09 23:15:47 +02:00
|
|
|
subprocess.check_call(xl_cmd)
|
2012-10-08 23:08:54 +02:00
|
|
|
|
2012-10-12 23:34:18 +02:00
|
|
|
def usb_detach(backend_vm, attachment):
|
|
|
|
xl_cmd = [ '/usr/lib/qubes/xl-qvm-usb-detach.py', str(attachment['xid']), attachment['devid'], attachment['frontend'], str(backend_vm.xid) ]
|
|
|
|
subprocess.check_call(xl_cmd)
|
2012-10-08 23:08:54 +02:00
|
|
|
|
2012-10-12 23:34:18 +02:00
|
|
|
def usb_detach_all(vm):
|
|
|
|
raise NotImplementedError("Detaching all devices from a given VM is not implemented yet")
|
2012-10-08 23:08:54 +02:00
|
|
|
|
2012-04-11 01:35:51 +02:00
|
|
|
####### QubesWatch ######
|
|
|
|
|
2012-04-11 01:34:17 +02:00
|
|
|
def only_in_first_list(l1, l2):
|
|
|
|
ret=[]
|
|
|
|
for i in l1:
|
|
|
|
if not i in l2:
|
|
|
|
ret.append(i)
|
|
|
|
return ret
|
|
|
|
|
|
|
|
class QubesWatch(object):
|
|
|
|
class WatchType(object):
|
|
|
|
def __init__(self, fn, param):
|
|
|
|
self.fn = fn
|
|
|
|
self.param = param
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.xs = xen.lowlevel.xs.xs()
|
|
|
|
self.watch_tokens_block = {}
|
|
|
|
self.watch_tokens_vbd = {}
|
|
|
|
self.block_callback = None
|
|
|
|
self.domain_callback = None
|
|
|
|
self.xs.watch('@introduceDomain', QubesWatch.WatchType(self.domain_list_changed, None))
|
|
|
|
self.xs.watch('@releaseDomain', QubesWatch.WatchType(self.domain_list_changed, None))
|
|
|
|
|
|
|
|
def setup_block_watch(self, callback):
|
|
|
|
old_block_callback = self.block_callback
|
|
|
|
self.block_callback = callback
|
|
|
|
if old_block_callback is not None and callback is None:
|
|
|
|
# remove watches
|
|
|
|
self.update_watches_vbd([])
|
|
|
|
self.update_watches_block([])
|
|
|
|
else:
|
|
|
|
# possibly add watches
|
|
|
|
self.domain_list_changed(None)
|
|
|
|
|
|
|
|
def setup_domain_watch(self, callback):
|
|
|
|
self.domain_callback = callback
|
|
|
|
|
|
|
|
def get_block_key(self, xid):
|
|
|
|
return '/local/domain/%s/qubes-block-devices' % xid
|
|
|
|
|
|
|
|
def get_vbd_key(self, xid):
|
|
|
|
return '/local/domain/%s/device/vbd' % xid
|
|
|
|
|
|
|
|
def update_watches_block(self, xid_list):
|
|
|
|
for i in only_in_first_list(xid_list, self.watch_tokens_block.keys()):
|
|
|
|
#new domain has been created
|
|
|
|
watch = QubesWatch.WatchType(self.block_callback, i)
|
|
|
|
self.watch_tokens_block[i] = watch
|
|
|
|
self.xs.watch(self.get_block_key(i), watch)
|
|
|
|
for i in only_in_first_list(self.watch_tokens_block.keys(), xid_list):
|
|
|
|
#domain destroyed
|
|
|
|
self.xs.unwatch(self.get_block_key(i), self.watch_tokens_block[i])
|
|
|
|
self.watch_tokens_block.pop(i)
|
|
|
|
|
|
|
|
def update_watches_vbd(self, xid_list):
|
|
|
|
for i in only_in_first_list(xid_list, self.watch_tokens_vbd.keys()):
|
|
|
|
#new domain has been created
|
|
|
|
watch = QubesWatch.WatchType(self.block_callback, i)
|
2012-04-12 15:51:10 +02:00
|
|
|
self.watch_tokens_vbd[i] = watch
|
2012-04-11 01:34:17 +02:00
|
|
|
self.xs.watch(self.get_vbd_key(i), watch)
|
|
|
|
for i in only_in_first_list(self.watch_tokens_vbd.keys(), xid_list):
|
|
|
|
#domain destroyed
|
|
|
|
self.xs.unwatch(self.get_vbd_key(i), self.watch_tokens_vbd[i])
|
|
|
|
self.watch_tokens_vbd.pop(i)
|
|
|
|
|
|
|
|
def domain_list_changed(self, param):
|
|
|
|
curr = self.xs.ls('', '/local/domain')
|
|
|
|
if curr == None:
|
|
|
|
return
|
|
|
|
if self.domain_callback:
|
|
|
|
self.domain_callback()
|
|
|
|
if self.block_callback:
|
|
|
|
self.update_watches_block(curr)
|
|
|
|
self.update_watches_vbd(curr)
|
|
|
|
|
|
|
|
def watch_single(self):
|
|
|
|
result = self.xs.read_watch()
|
|
|
|
token = result[1]
|
|
|
|
token.fn(token.param)
|
|
|
|
|
|
|
|
def watch_loop(self):
|
|
|
|
while True:
|
|
|
|
self.watch_single()
|
|
|
|
|
2012-04-11 01:35:51 +02:00
|
|
|
######## Backups #########
|
|
|
|
|
2012-02-07 16:09:35 +01:00
|
|
|
def get_disk_usage(file_or_dir):
|
|
|
|
if not os.path.exists(file_or_dir):
|
|
|
|
return 0
|
|
|
|
|
|
|
|
p = subprocess.Popen (["du", "-s", "--block-size=1", file_or_dir],
|
|
|
|
stdout=subprocess.PIPE)
|
|
|
|
result = p.communicate()
|
|
|
|
m = re.match(r"^(\d+)\s.*", result[0])
|
|
|
|
sz = int(m.group(1)) if m is not None else 0
|
|
|
|
return sz
|
|
|
|
|
|
|
|
|
|
|
|
def file_to_backup (file_path, sz = None):
|
|
|
|
if sz is None:
|
|
|
|
sz = os.path.getsize (qubes_store_filename)
|
|
|
|
|
|
|
|
abs_file_path = os.path.abspath (file_path)
|
|
|
|
abs_base_dir = os.path.abspath (qubes_base_dir) + '/'
|
|
|
|
abs_file_dir = os.path.dirname (abs_file_path) + '/'
|
|
|
|
(nothing, dir, subdir) = abs_file_dir.partition (abs_base_dir)
|
|
|
|
assert nothing == ""
|
|
|
|
assert dir == abs_base_dir
|
|
|
|
return [ { "path" : file_path, "size": sz, "subdir": subdir} ]
|
|
|
|
|
|
|
|
def backup_prepare(base_backup_dir, vms_list = None, exclude_list = [], print_callback = print_stdout):
|
|
|
|
"""If vms = None, include all (sensible) VMs; exclude_list is always applied"""
|
|
|
|
|
|
|
|
if not os.path.exists (base_backup_dir):
|
|
|
|
raise QubesException("The target directory doesn't exist!")
|
|
|
|
|
|
|
|
files_to_backup = file_to_backup (qubes_store_filename)
|
|
|
|
|
2012-02-29 03:16:31 +01:00
|
|
|
if exclude_list is None:
|
|
|
|
exclude_list = []
|
|
|
|
|
2012-02-07 16:09:35 +01:00
|
|
|
if vms_list is None:
|
|
|
|
qvm_collection = QubesVmCollection()
|
|
|
|
qvm_collection.lock_db_for_reading()
|
|
|
|
qvm_collection.load()
|
|
|
|
# FIXME: should be after backup completed
|
|
|
|
qvm_collection.unlock_db()
|
|
|
|
|
|
|
|
all_vms = [vm for vm in qvm_collection.values()]
|
|
|
|
appvms_to_backup = [vm for vm in all_vms if vm.is_appvm() and not vm.internal]
|
|
|
|
netvms_to_backup = [vm for vm in all_vms if vm.is_netvm() and not vm.qid == 0]
|
|
|
|
template_vms_worth_backingup = [vm for vm in all_vms if (vm.is_template() and not vm.installed_by_rpm)]
|
|
|
|
|
|
|
|
vms_list = appvms_to_backup + netvms_to_backup + template_vms_worth_backingup
|
|
|
|
|
|
|
|
vms_for_backup = vms_list
|
|
|
|
# Apply exclude list
|
|
|
|
if exclude_list:
|
|
|
|
vms_for_backup = [vm for vm in vms_list if vm.name not in exclude_list]
|
|
|
|
|
|
|
|
no_vms = len (vms_for_backup)
|
|
|
|
|
|
|
|
there_are_running_vms = False
|
|
|
|
|
|
|
|
fields_to_display = [
|
|
|
|
{ "name": "VM", "width": 16},
|
|
|
|
{ "name": "type","width": 12 },
|
|
|
|
{ "name": "size", "width": 12}
|
|
|
|
]
|
|
|
|
|
|
|
|
# Display the header
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
|
|
|
fmt="{{0:-^{0}}}-+".format(f["width"] + 1)
|
|
|
|
s += fmt.format('-')
|
|
|
|
print_callback(s)
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
|
|
|
fmt="{{0:>{0}}} |".format(f["width"] + 1)
|
|
|
|
s += fmt.format(f["name"])
|
|
|
|
print_callback(s)
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
|
|
|
fmt="{{0:-^{0}}}-+".format(f["width"] + 1)
|
|
|
|
s += fmt.format('-')
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
for vm in vms_for_backup:
|
|
|
|
if vm.is_template():
|
|
|
|
# handle templates later
|
|
|
|
continue
|
|
|
|
|
2012-03-10 20:57:59 +01:00
|
|
|
if vm.private_img is not None:
|
|
|
|
vm_sz = vm.get_disk_usage (vm.private_img)
|
|
|
|
files_to_backup += file_to_backup(vm.private_img, vm_sz )
|
2012-02-07 16:09:35 +01:00
|
|
|
|
|
|
|
if vm.is_appvm():
|
|
|
|
files_to_backup += file_to_backup(vm.icon_path)
|
2012-03-09 11:28:06 +01:00
|
|
|
if vm.updateable:
|
2012-02-07 16:09:35 +01:00
|
|
|
if os.path.exists(vm.dir_path + "/apps.templates"):
|
|
|
|
# template
|
|
|
|
files_to_backup += file_to_backup(vm.dir_path + "/apps.templates")
|
|
|
|
else:
|
|
|
|
# standaloneVM
|
|
|
|
files_to_backup += file_to_backup(vm.dir_path + "/apps")
|
|
|
|
|
|
|
|
if os.path.exists(vm.dir_path + "/kernels"):
|
|
|
|
files_to_backup += file_to_backup(vm.dir_path + "/kernels")
|
|
|
|
if os.path.exists (vm.firewall_conf):
|
|
|
|
files_to_backup += file_to_backup(vm.firewall_conf)
|
|
|
|
if os.path.exists(vm.dir_path + '/whitelisted-appmenus.list'):
|
|
|
|
files_to_backup += file_to_backup(vm.dir_path + '/whitelisted-appmenus.list')
|
|
|
|
|
2012-03-09 11:28:06 +01:00
|
|
|
if vm.updateable:
|
2012-02-07 16:09:35 +01:00
|
|
|
sz = vm.get_disk_usage(vm.root_img)
|
|
|
|
files_to_backup += file_to_backup(vm.root_img, sz)
|
|
|
|
vm_sz += sz
|
|
|
|
|
|
|
|
s = ""
|
|
|
|
fmt="{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
|
|
|
|
s += fmt.format(vm.name)
|
|
|
|
|
|
|
|
fmt="{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
|
|
|
|
if vm.is_netvm():
|
2012-03-09 11:28:06 +01:00
|
|
|
s += fmt.format("NetVM" + (" + Sys" if vm.updateable else ""))
|
2012-02-07 16:09:35 +01:00
|
|
|
else:
|
2012-03-09 11:28:06 +01:00
|
|
|
s += fmt.format("AppVM" + (" + Sys" if vm.updateable else ""))
|
2012-02-07 16:09:35 +01:00
|
|
|
|
|
|
|
fmt="{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
|
|
|
|
s += fmt.format(size_to_human(vm_sz))
|
|
|
|
|
|
|
|
if vm.is_running():
|
|
|
|
s += " <-- The VM is running, please shut it down before proceeding with the backup!"
|
|
|
|
there_are_running_vms = True
|
|
|
|
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
for vm in vms_for_backup:
|
|
|
|
if not vm.is_template():
|
|
|
|
# already handled
|
|
|
|
continue
|
|
|
|
vm_sz = vm.get_disk_utilization()
|
|
|
|
files_to_backup += file_to_backup (vm.dir_path, vm_sz)
|
|
|
|
|
|
|
|
s = ""
|
|
|
|
fmt="{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
|
|
|
|
s += fmt.format(vm.name)
|
|
|
|
|
|
|
|
fmt="{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
|
|
|
|
s += fmt.format("Template VM")
|
|
|
|
|
|
|
|
fmt="{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
|
|
|
|
s += fmt.format(size_to_human(vm_sz))
|
|
|
|
|
|
|
|
if vm.is_running():
|
|
|
|
s += " <-- The VM is running, please shut it down before proceeding with the backup!"
|
|
|
|
there_are_running_vms = True
|
|
|
|
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
# Dom0 user home
|
2012-02-17 10:05:23 +01:00
|
|
|
if not 'dom0' in exclude_list:
|
|
|
|
local_user = grp.getgrnam('qubes').gr_mem[0]
|
|
|
|
home_dir = pwd.getpwnam(local_user).pw_dir
|
2012-05-01 02:22:36 +02:00
|
|
|
# Home dir should have only user-owned files, so fix it now to prevent
|
|
|
|
# permissions problems - some root-owned files can left after
|
|
|
|
# 'sudo bash' and similar commands
|
|
|
|
subprocess.check_call(['sudo', 'chown', '-R', local_user, home_dir])
|
|
|
|
|
2012-02-17 10:05:23 +01:00
|
|
|
home_sz = get_disk_usage(home_dir)
|
|
|
|
home_to_backup = [ { "path" : home_dir, "size": home_sz, "subdir": 'dom0-home'} ]
|
|
|
|
files_to_backup += home_to_backup
|
2012-02-07 16:09:35 +01:00
|
|
|
|
2012-02-17 10:05:23 +01:00
|
|
|
s = ""
|
|
|
|
fmt="{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
|
|
|
|
s += fmt.format('Dom0')
|
2012-02-07 16:09:35 +01:00
|
|
|
|
2012-02-17 10:05:23 +01:00
|
|
|
fmt="{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
|
|
|
|
s += fmt.format("User home")
|
2012-02-07 16:09:35 +01:00
|
|
|
|
2012-02-17 10:05:23 +01:00
|
|
|
fmt="{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
|
|
|
|
s += fmt.format(size_to_human(home_sz))
|
2012-02-07 16:09:35 +01:00
|
|
|
|
2012-02-17 10:05:23 +01:00
|
|
|
print_callback(s)
|
2012-02-07 16:09:35 +01:00
|
|
|
|
|
|
|
total_backup_sz = 0
|
|
|
|
for file in files_to_backup:
|
|
|
|
total_backup_sz += file["size"]
|
|
|
|
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
|
|
|
fmt="{{0:-^{0}}}-+".format(f["width"] + 1)
|
|
|
|
s += fmt.format('-')
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
s = ""
|
|
|
|
fmt="{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
|
|
|
|
s += fmt.format("Total size:")
|
|
|
|
fmt="{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1 + 2 + fields_to_display[2]["width"] + 1)
|
|
|
|
s += fmt.format(size_to_human(total_backup_sz))
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
|
|
|
fmt="{{0:-^{0}}}-+".format(f["width"] + 1)
|
|
|
|
s += fmt.format('-')
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
stat = os.statvfs(base_backup_dir)
|
|
|
|
backup_fs_free_sz = stat.f_bsize * stat.f_bavail
|
|
|
|
print_callback("")
|
|
|
|
if (total_backup_sz > backup_fs_free_sz):
|
2012-10-09 01:19:17 +02:00
|
|
|
raise QubesException("Not enough space available on the backup filesystem!")
|
2012-02-07 16:09:35 +01:00
|
|
|
|
|
|
|
if (there_are_running_vms):
|
|
|
|
raise QubesException("Please shutdown all VMs before proceeding.")
|
|
|
|
|
2012-02-17 10:01:04 +01:00
|
|
|
print_callback("-> Available space: {0}".format(size_to_human(backup_fs_free_sz)))
|
2012-02-07 16:09:35 +01:00
|
|
|
|
|
|
|
return files_to_backup
|
|
|
|
|
|
|
|
def backup_do(base_backup_dir, files_to_backup, progress_callback = None):
|
|
|
|
|
|
|
|
total_backup_sz = 0
|
|
|
|
for file in files_to_backup:
|
|
|
|
total_backup_sz += file["size"]
|
|
|
|
|
|
|
|
backup_dir = base_backup_dir + "/qubes-{0}".format (time.strftime("%Y-%m-%d-%H%M%S"))
|
|
|
|
if os.path.exists (backup_dir):
|
|
|
|
raise QubesException("ERROR: the path {0} already exists?!".format(backup_dir))
|
|
|
|
|
|
|
|
os.mkdir (backup_dir)
|
|
|
|
|
|
|
|
if not os.path.exists (backup_dir):
|
|
|
|
raise QubesException("Strange: couldn't create backup dir: {0}?!".format(backup_dir))
|
|
|
|
|
|
|
|
bytes_backedup = 0
|
|
|
|
for file in files_to_backup:
|
|
|
|
# We prefer to use Linux's cp, because it nicely handles sparse files
|
|
|
|
progress = bytes_backedup * 100 / total_backup_sz
|
|
|
|
progress_callback(progress)
|
|
|
|
dest_dir = backup_dir + '/' + file["subdir"]
|
|
|
|
if file["subdir"] != "":
|
|
|
|
retcode = subprocess.call (["mkdir", "-p", dest_dir])
|
|
|
|
if retcode != 0:
|
|
|
|
raise QubesException("Cannot create directory: {0}?!".format(dest_dir))
|
|
|
|
|
|
|
|
retcode = subprocess.call (["cp", "-rp", file["path"], dest_dir])
|
|
|
|
if retcode != 0:
|
|
|
|
raise QubesException("Error while copying file {0} to {1}".format(file["path"], dest_dir))
|
|
|
|
|
|
|
|
bytes_backedup += file["size"]
|
|
|
|
progress = bytes_backedup * 100 / total_backup_sz
|
|
|
|
progress_callback(progress)
|
|
|
|
|
2012-02-10 10:56:03 +01:00
|
|
|
def backup_restore_set_defaults(options):
|
|
|
|
if 'use-default-netvm' not in options:
|
|
|
|
options['use-default-netvm'] = False
|
|
|
|
if 'use-none-netvm' not in options:
|
|
|
|
options['use-none-netvm'] = False
|
|
|
|
if 'use-default-template' not in options:
|
|
|
|
options['use-default-template'] = False
|
|
|
|
if 'dom0-home' not in options:
|
|
|
|
options['dom0-home'] = True
|
|
|
|
if 'replace-template' not in options:
|
|
|
|
options['replace-template'] = []
|
|
|
|
|
|
|
|
return options
|
|
|
|
|
|
|
|
|
|
|
|
def backup_restore_prepare(backup_dir, options = {}, host_collection = None):
|
|
|
|
# Defaults
|
|
|
|
backup_restore_set_defaults(options)
|
|
|
|
|
|
|
|
#### Private functions begin
|
|
|
|
def is_vm_included_in_backup (backup_dir, vm):
|
|
|
|
if vm.qid == 0:
|
|
|
|
# Dom0 is not included, obviously
|
|
|
|
return False
|
|
|
|
|
|
|
|
backup_vm_dir_path = vm.dir_path.replace (qubes_base_dir, backup_dir)
|
|
|
|
|
|
|
|
if os.path.exists (backup_vm_dir_path):
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def find_template_name(template, replaces):
|
|
|
|
rx_replace = re.compile("(.*):(.*)")
|
|
|
|
for r in replaces:
|
|
|
|
m = rx_replace.match(r)
|
|
|
|
if m.group(1) == template:
|
|
|
|
return m.group(2)
|
|
|
|
|
|
|
|
return template
|
|
|
|
|
|
|
|
#### Private functions end
|
|
|
|
|
|
|
|
if not os.path.exists (backup_dir):
|
|
|
|
raise QubesException("The backup directory doesn't exist!")
|
|
|
|
|
|
|
|
backup_collection = QubesVmCollection(store_filename = backup_dir + "/qubes.xml")
|
|
|
|
backup_collection.lock_db_for_reading()
|
|
|
|
backup_collection.load()
|
|
|
|
|
|
|
|
if host_collection is None:
|
|
|
|
host_collection = QubesVmCollection()
|
|
|
|
host_collection.lock_db_for_reading()
|
|
|
|
host_collection.load()
|
|
|
|
host_collection.unlock_db()
|
|
|
|
|
|
|
|
backup_vms_list = [vm for vm in backup_collection.values()]
|
|
|
|
host_vms_list = [vm for vm in host_collection.values()]
|
|
|
|
vms_to_restore = {}
|
|
|
|
|
|
|
|
there_are_conflicting_vms = False
|
|
|
|
there_are_missing_templates = False
|
|
|
|
there_are_missing_netvms = False
|
|
|
|
dom0_username_mismatch = False
|
|
|
|
restore_home = False
|
|
|
|
# ... and the actual data
|
|
|
|
for vm in backup_vms_list:
|
|
|
|
if is_vm_included_in_backup (backup_dir, vm):
|
|
|
|
|
|
|
|
vms_to_restore[vm.name] = {}
|
|
|
|
vms_to_restore[vm.name]['vm'] = vm;
|
|
|
|
if 'exclude' in options.keys():
|
|
|
|
vms_to_restore[vm.name]['excluded'] = vm.name in options['exclude']
|
|
|
|
vms_to_restore[vm.name]['good-to-go'] = False
|
|
|
|
|
|
|
|
if host_collection.get_vm_by_name (vm.name) is not None:
|
|
|
|
vms_to_restore[vm.name]['already-exists'] = True
|
|
|
|
vms_to_restore[vm.name]['good-to-go'] = False
|
|
|
|
|
2012-03-09 11:01:20 +01:00
|
|
|
if vm.template is None:
|
2012-02-10 10:56:03 +01:00
|
|
|
vms_to_restore[vm.name]['template'] = None
|
|
|
|
else:
|
2012-03-09 11:01:20 +01:00
|
|
|
templatevm_name = find_template_name(vm.template.name, options['replace-template'])
|
2012-02-10 10:56:03 +01:00
|
|
|
vms_to_restore[vm.name]['template'] = templatevm_name
|
|
|
|
template_vm_on_host = host_collection.get_vm_by_name (templatevm_name)
|
|
|
|
|
|
|
|
# No template on the host?
|
|
|
|
if not ((template_vm_on_host is not None) and template_vm_on_host.is_template()):
|
|
|
|
# Maybe the (custom) template is in the backup?
|
|
|
|
template_vm_on_backup = backup_collection.get_vm_by_name (templatevm_name)
|
2012-07-20 22:54:12 +02:00
|
|
|
if template_vm_on_backup is None or not \
|
|
|
|
(is_vm_included_in_backup(backup_dir, template_vm_on_backup) and \
|
|
|
|
template_vm_on_backup.is_template()):
|
2012-02-10 10:56:03 +01:00
|
|
|
if options['use-default-template']:
|
2012-02-29 03:34:57 +01:00
|
|
|
vms_to_restore[vm.name]['orig-template'] = templatevm_name
|
2012-03-09 11:01:20 +01:00
|
|
|
vms_to_restore[vm.name]['template'] = host_collection.get_default_template().name
|
2012-02-10 10:56:03 +01:00
|
|
|
else:
|
|
|
|
vms_to_restore[vm.name]['missing-template'] = True
|
|
|
|
vms_to_restore[vm.name]['good-to-go'] = False
|
|
|
|
|
2012-03-04 21:59:02 +01:00
|
|
|
if vm.netvm is None:
|
2012-02-10 10:56:03 +01:00
|
|
|
vms_to_restore[vm.name]['netvm'] = None
|
|
|
|
else:
|
2012-03-04 21:59:02 +01:00
|
|
|
netvm_name = vm.netvm.name
|
2012-02-10 10:56:03 +01:00
|
|
|
vms_to_restore[vm.name]['netvm'] = netvm_name
|
2012-04-20 02:52:05 +02:00
|
|
|
# Set to None to not confuse QubesVm object from backup
|
|
|
|
# collection with host collection (further in clone_attrs). Set
|
|
|
|
# directly _netvm to suppress setter action, especially
|
|
|
|
# modifying firewall
|
|
|
|
vm._netvm = None
|
|
|
|
|
2012-03-04 21:59:02 +01:00
|
|
|
netvm_on_host = host_collection.get_vm_by_name (netvm_name)
|
2012-02-10 10:56:03 +01:00
|
|
|
|
|
|
|
# No netvm on the host?
|
2012-03-04 21:59:02 +01:00
|
|
|
if not ((netvm_on_host is not None) and netvm_on_host.is_netvm()):
|
2012-02-10 10:56:03 +01:00
|
|
|
|
|
|
|
# Maybe the (custom) netvm is in the backup?
|
2012-03-04 21:59:02 +01:00
|
|
|
netvm_on_backup = backup_collection.get_vm_by_name (netvm_name)
|
2012-07-20 22:54:12 +02:00
|
|
|
if not ((netvm_on_backup is not None) and netvm_on_backup.is_netvm() and is_vm_included_in_backup(backup_dir, netvm_on_backup)):
|
2012-02-10 10:56:03 +01:00
|
|
|
if options['use-default-netvm']:
|
2012-03-04 21:59:02 +01:00
|
|
|
vms_to_restore[vm.name]['netvm'] = host_collection.get_default_netvm().name
|
2012-02-10 10:56:03 +01:00
|
|
|
vm.uses_default_netvm = True
|
|
|
|
elif options['use-none-netvm']:
|
|
|
|
vms_to_restore[vm.name]['netvm'] = None
|
|
|
|
else:
|
|
|
|
vms_to_restore[vm.name]['missing-netvm'] = True
|
|
|
|
vms_to_restore[vm.name]['good-to-go'] = False
|
|
|
|
|
|
|
|
if 'good-to-go' not in vms_to_restore[vm.name].keys():
|
|
|
|
vms_to_restore[vm.name]['good-to-go'] = True
|
|
|
|
|
|
|
|
# ...and dom0 home
|
|
|
|
if options['dom0-home'] and os.path.exists(backup_dir + '/dom0-home'):
|
|
|
|
vms_to_restore['dom0'] = {}
|
|
|
|
local_user = grp.getgrnam('qubes').gr_mem[0]
|
|
|
|
|
|
|
|
dom0_homes = os.listdir(backup_dir + '/dom0-home')
|
|
|
|
if len(dom0_homes) > 1:
|
|
|
|
raise QubesException("More than one dom0 homedir in backup")
|
|
|
|
|
|
|
|
vms_to_restore['dom0']['username'] = dom0_homes[0]
|
|
|
|
if dom0_homes[0] != local_user:
|
|
|
|
vms_to_restore['dom0']['username-mismatch'] = True
|
|
|
|
if not options['ignore-dom0-username-mismatch']:
|
|
|
|
vms_to_restore['dom0']['good-to-go'] = False
|
|
|
|
|
|
|
|
if 'good-to-go' not in vms_to_restore['dom0']:
|
|
|
|
vms_to_restore['dom0']['good-to-go'] = True
|
|
|
|
|
|
|
|
return vms_to_restore
|
|
|
|
|
|
|
|
def backup_restore_print_summary(restore_info, print_callback = print_stdout):
|
|
|
|
fields = {
|
|
|
|
"qid": {"func": "vm.qid"},
|
|
|
|
|
|
|
|
"name": {"func": "('[' if vm.is_template() else '')\
|
|
|
|
+ ('{' if vm.is_netvm() else '')\
|
|
|
|
+ vm.name \
|
|
|
|
+ (']' if vm.is_template() else '')\
|
|
|
|
+ ('}' if vm.is_netvm() else '')"},
|
|
|
|
|
|
|
|
"type": {"func": "'Tpl' if vm.is_template() else \
|
2012-04-20 02:55:11 +02:00
|
|
|
'HVM' if vm.type == 'HVM' else \
|
2012-04-20 00:11:06 +02:00
|
|
|
vm.type.replace('VM','')"},
|
2012-02-10 10:56:03 +01:00
|
|
|
|
2012-03-09 11:28:06 +01:00
|
|
|
"updbl" : {"func": "'Yes' if vm.updateable else ''"},
|
2012-02-10 10:56:03 +01:00
|
|
|
|
2012-03-09 11:01:20 +01:00
|
|
|
"template": {"func": "'n/a' if vm.is_template() or vm.template is None else\
|
2012-02-10 10:56:03 +01:00
|
|
|
vm_info['template']"},
|
|
|
|
|
2012-04-20 02:56:09 +02:00
|
|
|
"netvm": {"func": "'n/a' if vm.is_netvm() and not vm.is_proxyvm() else\
|
2012-02-10 10:56:03 +01:00
|
|
|
('*' if vm.uses_default_netvm else '') +\
|
2012-07-21 00:12:18 +02:00
|
|
|
vm_info['netvm'] if vm_info['netvm'] is not None else '-'"},
|
2012-02-10 10:56:03 +01:00
|
|
|
|
|
|
|
"label" : {"func" : "vm.label.name"},
|
|
|
|
}
|
|
|
|
|
|
|
|
fields_to_display = ["name", "type", "template", "updbl", "netvm", "label" ]
|
|
|
|
|
|
|
|
# First calculate the maximum width of each field we want to display
|
|
|
|
total_width = 0;
|
|
|
|
for f in fields_to_display:
|
|
|
|
fields[f]["max_width"] = len(f)
|
|
|
|
for vm_info in restore_info.values():
|
|
|
|
if 'vm' in vm_info.keys():
|
|
|
|
vm = vm_info['vm']
|
|
|
|
l = len(str(eval(fields[f]["func"])))
|
|
|
|
if l > fields[f]["max_width"]:
|
|
|
|
fields[f]["max_width"] = l
|
|
|
|
total_width += fields[f]["max_width"]
|
|
|
|
|
|
|
|
print_callback("")
|
|
|
|
print_callback("The following VMs are included in the backup:")
|
|
|
|
print_callback("")
|
|
|
|
|
|
|
|
# Display the header
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
|
|
|
fmt="{{0:-^{0}}}-+".format(fields[f]["max_width"] + 1)
|
|
|
|
s += fmt.format('-')
|
|
|
|
print_callback(s)
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
|
|
|
fmt="{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
|
|
|
|
s += fmt.format(f)
|
|
|
|
print_callback(s)
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
|
|
|
fmt="{{0:-^{0}}}-+".format(fields[f]["max_width"] + 1)
|
|
|
|
s += fmt.format('-')
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
for vm_info in restore_info.values():
|
|
|
|
# Skip non-VM here
|
|
|
|
if not 'vm' in vm_info:
|
|
|
|
continue
|
|
|
|
vm = vm_info['vm']
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
|
|
|
fmt="{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
|
|
|
|
s += fmt.format(eval(fields[f]["func"]))
|
|
|
|
|
|
|
|
if 'excluded' in vm_info and vm_info['excluded']:
|
|
|
|
s += " <-- Excluded from restore"
|
|
|
|
elif 'already-exists' in vm_info:
|
|
|
|
s += " <-- A VM with the same name already exists on the host!"
|
|
|
|
elif 'missing-template' in vm_info:
|
|
|
|
s += " <-- No matching template on the host or in the backup found!"
|
|
|
|
elif 'missing-netvm' in vm_info:
|
|
|
|
s += " <-- No matching netvm on the host or in the backup found!"
|
2012-02-29 03:34:57 +01:00
|
|
|
elif 'orig-template' in vm_info:
|
|
|
|
s += " <-- Original template was '%s'" % (vm_info['orig-template'])
|
2012-02-10 10:56:03 +01:00
|
|
|
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
if 'dom0' in restore_info.keys():
|
|
|
|
s = ""
|
|
|
|
for f in fields_to_display:
|
|
|
|
fmt="{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
|
|
|
|
if f == "name":
|
|
|
|
s += fmt.format("Dom0")
|
|
|
|
elif f == "type":
|
|
|
|
s += fmt.format("Home")
|
|
|
|
else:
|
|
|
|
s += fmt.format("")
|
|
|
|
if 'username-mismatch' in restore_info['dom0']:
|
|
|
|
s += " <-- username in backup and dom0 mismatch"
|
|
|
|
|
|
|
|
print_callback(s)
|
|
|
|
|
|
|
|
def backup_restore_do(backup_dir, restore_info, host_collection = None, print_callback = print_stdout, error_callback = print_stderr):
|
|
|
|
|
2012-10-09 01:18:49 +02:00
|
|
|
### Private functions begin
|
2012-02-10 10:56:03 +01:00
|
|
|
def restore_vm_dir (backup_dir, src_dir, dst_dir):
|
|
|
|
|
|
|
|
backup_src_dir = src_dir.replace (qubes_base_dir, backup_dir)
|
|
|
|
|
|
|
|
# We prefer to use Linux's cp, because it nicely handles sparse files
|
|
|
|
retcode = subprocess.call (["cp", "-rp", backup_src_dir, dst_dir])
|
|
|
|
if retcode != 0:
|
|
|
|
raise QubesException("*** Error while copying file {0} to {1}".format(backup_src_dir, dest_dir))
|
2012-10-09 01:18:49 +02:00
|
|
|
### Private functions end
|
2012-02-10 10:56:03 +01:00
|
|
|
|
|
|
|
lock_obtained = False
|
|
|
|
if host_collection is None:
|
|
|
|
host_collection = QubesVmCollection()
|
|
|
|
host_collection.lock_db_for_writing()
|
|
|
|
host_collection.load()
|
|
|
|
lock_obtained = True
|
|
|
|
|
|
|
|
# Add templates...
|
|
|
|
for vm_info in restore_info.values():
|
|
|
|
if not vm_info['good-to-go']:
|
|
|
|
continue
|
|
|
|
if 'vm' not in vm_info:
|
|
|
|
continue
|
|
|
|
vm = vm_info['vm']
|
|
|
|
if not vm.is_template():
|
|
|
|
continue
|
|
|
|
print_callback("-> Restoring Template VM {0}...".format(vm.name))
|
|
|
|
retcode = subprocess.call (["mkdir", "-p", vm.dir_path])
|
|
|
|
if retcode != 0:
|
|
|
|
error_callback("*** Cannot create directory: {0}?!".format(dest_dir))
|
2012-10-09 01:19:17 +02:00
|
|
|
error_callback("Skipping...")
|
2012-02-10 10:56:03 +01:00
|
|
|
continue
|
|
|
|
|
|
|
|
new_vm = None
|
|
|
|
|
|
|
|
try:
|
|
|
|
restore_vm_dir (backup_dir, vm.dir_path, qubes_templates_dir);
|
|
|
|
new_vm = host_collection.add_new_templatevm(vm.name,
|
|
|
|
conf_file=vm.conf_file,
|
|
|
|
dir_path=vm.dir_path,
|
|
|
|
installed_by_rpm=False)
|
|
|
|
|
|
|
|
new_vm.verify_files()
|
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR: {0}".format(err))
|
2012-10-09 01:19:17 +02:00
|
|
|
error_callback("*** Skipping VM: {0}".vm.name)
|
2012-02-10 10:56:03 +01:00
|
|
|
if new_vm:
|
|
|
|
host_collection.pop(new_vm.qid)
|
|
|
|
continue
|
|
|
|
|
2012-04-20 02:52:05 +02:00
|
|
|
try:
|
|
|
|
new_vm.clone_attrs(vm)
|
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR: {0}".format(err))
|
|
|
|
error_callback("*** Some VM property will not be restored")
|
|
|
|
|
2012-02-10 10:56:03 +01:00
|
|
|
try:
|
|
|
|
new_vm.create_appmenus(verbose=True)
|
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR during appmenu restore: {0}".format(err))
|
|
|
|
error_callback("*** VM '{0}' will not have appmenus".format(vm.name))
|
|
|
|
|
|
|
|
# ... then NetVMs...
|
|
|
|
for vm_info in restore_info.values():
|
|
|
|
if not vm_info['good-to-go']:
|
|
|
|
continue
|
|
|
|
if 'vm' not in vm_info:
|
|
|
|
continue
|
|
|
|
vm = vm_info['vm']
|
|
|
|
if not vm.is_netvm():
|
|
|
|
continue
|
|
|
|
|
|
|
|
print_callback("-> Restoring {0} {1}...".format(vm.type, vm.name))
|
|
|
|
retcode = subprocess.call (["mkdir", "-p", vm.dir_path])
|
|
|
|
if retcode != 0:
|
|
|
|
error_callback("*** Cannot create directory: {0}?!".format(dest_dir))
|
2012-10-09 01:19:17 +02:00
|
|
|
error_callback("Skipping...")
|
2012-02-10 10:56:03 +01:00
|
|
|
continue
|
|
|
|
|
2012-03-09 11:01:20 +01:00
|
|
|
template = None
|
|
|
|
if vm.template is not None:
|
2012-02-10 10:56:03 +01:00
|
|
|
template_name = vm_info['template']
|
2012-03-09 11:01:20 +01:00
|
|
|
template = host_collection.get_vm_by_name(template_name)
|
2012-02-10 10:56:03 +01:00
|
|
|
|
|
|
|
new_vm = None
|
|
|
|
try:
|
|
|
|
restore_vm_dir (backup_dir, vm.dir_path, qubes_servicevms_dir);
|
|
|
|
|
|
|
|
if vm.type == "NetVM":
|
2012-03-09 11:01:20 +01:00
|
|
|
new_vm = host_collection.add_new_netvm(vm.name, template,
|
2012-02-10 10:56:03 +01:00
|
|
|
conf_file=vm.conf_file,
|
|
|
|
dir_path=vm.dir_path,
|
|
|
|
label=vm.label)
|
|
|
|
elif vm.type == "ProxyVM":
|
2012-03-09 11:01:20 +01:00
|
|
|
new_vm = host_collection.add_new_proxyvm(vm.name, template,
|
2012-02-10 10:56:03 +01:00
|
|
|
conf_file=vm.conf_file,
|
|
|
|
dir_path=vm.dir_path,
|
|
|
|
label=vm.label)
|
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR: {0}".format(err))
|
2012-10-09 01:19:17 +02:00
|
|
|
error_callback("*** Skipping VM: {0}".format(vm.name))
|
2012-02-10 10:56:03 +01:00
|
|
|
if new_vm:
|
|
|
|
host_collection.pop(new_vm.qid)
|
|
|
|
continue
|
|
|
|
|
2012-04-20 02:52:05 +02:00
|
|
|
try:
|
|
|
|
new_vm.clone_attrs(vm)
|
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR: {0}".format(err))
|
|
|
|
error_callback("*** Some VM property will not be restored")
|
|
|
|
|
2012-02-10 10:56:03 +01:00
|
|
|
try:
|
|
|
|
new_vm.verify_files()
|
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR: {0}".format(err))
|
2012-10-09 01:19:17 +02:00
|
|
|
error_callback("*** Skipping VM: {0}".format(vm.name))
|
2012-02-10 10:56:03 +01:00
|
|
|
host_collection.pop(new_vm.qid)
|
|
|
|
continue
|
|
|
|
|
|
|
|
# ... then appvms...
|
|
|
|
for vm_info in restore_info.values():
|
|
|
|
if not vm_info['good-to-go']:
|
|
|
|
continue
|
|
|
|
if 'vm' not in vm_info:
|
|
|
|
continue
|
|
|
|
vm = vm_info['vm']
|
|
|
|
if not vm.is_appvm():
|
|
|
|
continue
|
|
|
|
|
|
|
|
print_callback("-> Restoring AppVM {0}...".format(vm.name))
|
|
|
|
retcode = subprocess.call (["mkdir", "-p", vm.dir_path])
|
|
|
|
if retcode != 0:
|
|
|
|
error_callback("*** Cannot create directory: {0}?!".format(dest_dir))
|
2012-10-09 01:19:17 +02:00
|
|
|
error_callback("Skipping...")
|
2012-02-10 10:56:03 +01:00
|
|
|
continue
|
|
|
|
|
2012-03-09 11:01:20 +01:00
|
|
|
template = None
|
|
|
|
if vm.template is not None:
|
2012-02-10 10:56:03 +01:00
|
|
|
template_name = vm_info['template']
|
2012-03-09 11:01:20 +01:00
|
|
|
template = host_collection.get_vm_by_name(template_name)
|
2012-02-10 10:56:03 +01:00
|
|
|
|
|
|
|
new_vm = None
|
|
|
|
try:
|
|
|
|
restore_vm_dir (backup_dir, vm.dir_path, qubes_appvms_dir);
|
2012-04-20 00:11:06 +02:00
|
|
|
if vm.type == "HVM":
|
|
|
|
new_vm = host_collection.add_new_hvm(vm.name,
|
|
|
|
label=vm.label)
|
|
|
|
else:
|
|
|
|
new_vm = host_collection.add_new_appvm(vm.name, template,
|
|
|
|
conf_file=vm.conf_file,
|
|
|
|
dir_path=vm.dir_path,
|
|
|
|
label=vm.label)
|
2012-02-10 10:56:03 +01:00
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR: {0}".format(err))
|
2012-10-09 01:19:17 +02:00
|
|
|
error_callback("*** Skipping VM: {0}".format(vm.name))
|
2012-02-10 10:56:03 +01:00
|
|
|
if new_vm:
|
|
|
|
host_collection.pop(new_vm.qid)
|
|
|
|
continue
|
|
|
|
|
2012-04-20 02:52:05 +02:00
|
|
|
try:
|
|
|
|
new_vm.clone_attrs(vm)
|
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR: {0}".format(err))
|
|
|
|
error_callback("*** Some VM property will not be restored")
|
|
|
|
|
2012-02-10 10:56:03 +01:00
|
|
|
try:
|
|
|
|
new_vm.create_appmenus(verbose=True)
|
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR during appmenu restore: {0}".format(err))
|
|
|
|
error_callback("*** VM '{0}' will not have appmenus".format(vm.name))
|
|
|
|
|
|
|
|
try:
|
|
|
|
new_vm.verify_files()
|
|
|
|
except Exception as err:
|
|
|
|
error_callback("ERROR: {0}".format(err))
|
2012-10-09 01:19:17 +02:00
|
|
|
error_callback("*** Skipping VM: {0}".format(vm.name))
|
2012-02-10 10:56:03 +01:00
|
|
|
host_collection.pop(new_vm.qid)
|
|
|
|
continue
|
|
|
|
|
2012-04-20 02:56:46 +02:00
|
|
|
# Set network dependencies - only non-default netvm setting
|
|
|
|
for vm_info in restore_info.values():
|
|
|
|
if not vm_info['good-to-go']:
|
|
|
|
continue
|
|
|
|
if 'vm' not in vm_info:
|
|
|
|
continue
|
|
|
|
vm = vm_info['vm']
|
|
|
|
host_vm = host_collection.get_vm_by_name(vm.name)
|
|
|
|
if host_vm is None:
|
|
|
|
# Failed/skipped VM
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not vm.uses_default_netvm:
|
|
|
|
host_vm.netvm = host_collection.get_vm_by_name (vm_info['netvm']) if vm_info['netvm'] is not None else None
|
|
|
|
|
2012-02-10 10:56:03 +01:00
|
|
|
host_collection.save()
|
|
|
|
if lock_obtained:
|
|
|
|
host_collection.unlock_db()
|
|
|
|
|
|
|
|
# ... and dom0 home as last step
|
|
|
|
if 'dom0' in restore_info.keys() and restore_info['dom0']['good-to-go']:
|
|
|
|
backup_info = restore_info['dom0']
|
|
|
|
local_user = grp.getgrnam('qubes').gr_mem[0]
|
|
|
|
home_dir = pwd.getpwnam(local_user).pw_dir
|
2012-02-29 21:26:37 +01:00
|
|
|
backup_dom0_home_dir = backup_dir + '/dom0-home/' + backup_info['username']
|
2012-02-10 10:56:03 +01:00
|
|
|
restore_home_backupdir = "home-pre-restore-{0}".format (time.strftime("%Y-%m-%d-%H%M%S"))
|
|
|
|
|
|
|
|
print_callback("-> Restoring home of user '{0}'...".format(local_user))
|
|
|
|
print_callback("--> Existing files/dirs backed up in '{0}' dir".format(restore_home_backupdir))
|
|
|
|
os.mkdir(home_dir + '/' + restore_home_backupdir)
|
|
|
|
for f in os.listdir(backup_dom0_home_dir):
|
|
|
|
home_file = home_dir + '/' + f
|
|
|
|
if os.path.exists(home_file):
|
|
|
|
os.rename(home_file, home_dir + '/' + restore_home_backupdir + '/' + f)
|
|
|
|
retcode = subprocess.call (["cp", "-nrp", backup_dom0_home_dir + '/' + f, home_file])
|
|
|
|
if retcode != 0:
|
|
|
|
error_callback("*** Error while copying file {0} to {1}".format(backup_dom0_home_dir + '/' + f, home_file))
|
2012-02-07 16:09:35 +01:00
|
|
|
|
2011-09-29 13:52:26 +02:00
|
|
|
# vim:sw=4:et:
|