Merge remote-tracking branch 'marmarek/master' into core3-devel

The following list is bollocks. There were many, many more.

Conflicts:
    core-modules/003QubesTemplateVm.py
    core-modules/005QubesNetVm.py
    core/qubes.py
    core/storage/__init__.py
    core/storage/xen.py
    doc/qvm-tools/qvm-pci.rst
    doc/qvm-tools/qvm-prefs.rst
    qubes/tools/qmemmand.py
    qvm-tools/qvm-create
    qvm-tools/qvm-prefs
    qvm-tools/qvm-start
    tests/__init__.py
    vm-config/xen-vm-template-hvm.xml

This commit took 2 days (26-27.01.2016) and put our friendship to test.
    --Wojtek and Marek
This commit is contained in:
Wojtek Porczyk 2016-03-03 01:05:23 +01:00
commit 9eafa57539
70 changed files with 4548 additions and 994 deletions

View File

@ -59,9 +59,10 @@ endif
python setup.py install -O1 --skip-build --root $(DESTDIR)
# $(MAKE) install -C tests
$(MAKE) install -C relaxng
mkdir -p $(DESTDIR)/etc/qubes
cp etc/storage.conf $(DESTDIR)/etc/qubes/
ifeq ($(BACKEND_VMM),xen)
# Currently supported only on xen
mkdir -p $(DESTDIR)/etc/qubes
cp etc/qmemman.conf $(DESTDIR)/etc/qubes/
endif
$(MAKE) install -C dispvm

View File

@ -23,6 +23,8 @@
#
import datetime
import base64
import hashlib
import logging
import lxml.etree
import os
@ -34,15 +36,17 @@ import sys
import time
import uuid
import xml.parsers.expat
import signal
from qubes import qmemman
from qubes import qmemman_algo
import libvirt
import warnings
from qubes.qubes import dry_run,vmm
from qubes.qubes import register_qubes_vm_class
from qubes.qubes import QubesVmCollection,QubesException,QubesHost,QubesVmLabels
from qubes.qubes import defaults,system_path,vm_files,qubes_max_qid
from qubes.storage import get_pool
qmemman_present = False
try:
from qubes.qmemman_client import QMemmanClient
@ -105,6 +109,7 @@ class QubesVm(object):
"name": { "order": 1 },
"uuid": { "order": 0, "eval": 'uuid.UUID(value) if value else None' },
"dir_path": { "default": None, "order": 2 },
"pool_name": { "default":"default" },
"conf_file": {
"func": lambda value: self.absolute_path(value, self.name +
".conf"),
@ -145,6 +150,7 @@ class QubesVm(object):
"order": 31,
"func": lambda value: value if not self.uses_default_kernelopts\
else defaults["kernelopts_pcidevs"] if len(self.pcidevs)>0 \
else self.template.kernelopts if self.template
else defaults["kernelopts"] },
"mac": { "attr": "_mac", "default": None },
"include_in_backups": {
@ -193,10 +199,10 @@ class QubesVm(object):
'kernelopts', 'services', 'installed_by_rpm',\
'uses_default_netvm', 'include_in_backups', 'debug',\
'qrexec_timeout', 'autostart', 'uses_default_dispvm_netvm',
'backup_content', 'backup_size', 'backup_path' ]:
'backup_content', 'backup_size', 'backup_path', 'pool_name' ]:
attrs[prop]['save'] = lambda prop=prop: str(getattr(self, prop))
# Simple paths
for prop in ['conf_file']:
for prop in ['conf_file', 'firewall_conf']:
attrs[prop]['save'] = \
lambda prop=prop: self.relative_path(getattr(self, prop))
attrs[prop]['save_skip'] = \
@ -334,12 +340,23 @@ class QubesVm(object):
if len(self.pcidevs) > 0:
self.services['meminfo-writer'] = False
if 'xml_element' not in kwargs:
# New VM, disable updates check if requested for new VMs
if os.path.exists(qubes.qubesutils.UPDATES_DEFAULT_VM_DISABLE_FLAG):
self.services['qubes-update-check'] = False
# Initialize VM image storage class
self.storage = defaults["storage_class"](self)
self.storage = get_pool(self.pool_name, self).getStorage()
self.dir_path = self.storage.vmdir
self.icon_path = os.path.join(self.storage.vmdir, 'icon.png')
self.conf_file = os.path.join(self.storage.vmdir, self.name + '.conf')
if hasattr(self, 'kernels_dir'):
self.storage.modules_img = os.path.join(self.kernels_dir,
modules_path = os.path.join(self.kernels_dir,
"modules.img")
self.storage.modules_img_rw = self.kernel is None
if os.path.exists(modules_path):
self.storage.modules_img = modules_path
self.storage.modules_img_rw = self.kernel is None
# Some additional checks for template based VM
if self.template is not None:
@ -369,8 +386,13 @@ class QubesVm(object):
def absolute_path(self, arg, default):
if arg is not None and os.path.isabs(arg):
return arg
else:
elif self.dir_path is not None:
return os.path.join(self.dir_path, (arg if arg is not None else default))
else:
# cannot provide any meaningful value without dir_path; this is
# only to import some older format of `qubes.xml` (for example
# during migration from older release)
return None
def _absolute_path_gen(self, default):
return lambda value: self.absolute_path(value, default)
@ -502,13 +524,14 @@ class QubesVm(object):
if not os.path.exists(os.path.join(system_path[
'qubes_kernels_base_dir'], new_value)):
raise QubesException("Kernel '%s' not installed" % new_value)
for f in ('vmlinuz', 'modules.img'):
for f in ('vmlinuz', 'initramfs'):
if not os.path.exists(os.path.join(
system_path['qubes_kernels_base_dir'], new_value, f)):
raise QubesException(
"Kernel '%s' not properly installed: missing %s "
"file" % (new_value, f))
self._kernel = new_value
self.uses_default_kernel = False
@property
def updateable(self):
@ -543,9 +566,16 @@ class QubesVm(object):
return False
if len(name) > 31:
return False
if name == 'lost+found':
# avoid conflict when /var/lib/qubes/appvms is mounted on
# separate partition
return False
return re.match(r"^[a-zA-Z][a-zA-Z0-9_.-]*$", name) is not None
def pre_rename(self, new_name):
if self.autostart:
subprocess.check_call(['sudo', 'systemctl', '-q', 'disable',
'qubes-vm@{}.service'.format(self.name)])
# fire hooks
for hook in self.hooks_pre_rename:
hook(self, new_name)
@ -583,11 +613,17 @@ class QubesVm(object):
self.icon_path = self.icon_path.replace(old_dirpath, new_dirpath)
if hasattr(self, 'kernels_dir') and self.kernels_dir is not None:
self.kernels_dir = self.kernels_dir.replace(old_dirpath, new_dirpath)
if self.firewall_conf is not None:
self.firewall_conf = self.firewall_conf.replace(old_dirpath,
new_dirpath)
self._update_libvirt_domain()
self.post_rename(old_name)
def post_rename(self, old_name):
if self.autostart:
# force setter to be called again
self.autostart = self.autostart
# fire hooks
for hook in self.hooks_post_rename:
hook(self, old_name)
@ -684,9 +720,11 @@ class QubesVm(object):
try:
return self.libvirt_domain.ID()
except libvirt.libvirtError as e:
if e.err[0] == libvirt.VIR_ERR_NO_DOMAIN:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return -1
else:
print >>sys.stderr, "libvirt error code: {!r}".format(
e.get_error_code())
raise
@ -696,7 +734,17 @@ class QubesVm(object):
def _update_libvirt_domain(self):
domain_config = self.create_config_file()
self._libvirt_domain = vmm.libvirt_conn.defineXML(domain_config)
try:
self._libvirt_domain = vmm.libvirt_conn.defineXML(domain_config)
except libvirt.libvirtError as e:
# shouldn't this be in QubesHVm implementation?
if e.get_error_code() == libvirt.VIR_ERR_OS_TYPE and \
e.get_str2() == 'hvm':
raise QubesException("HVM domains not supported on this "
"machine. Check BIOS settings for "
"VT-x/AMD-V extensions.")
else:
raise e
self.uuid = uuid.UUID(bytes=self._libvirt_domain.UUID())
@property
@ -726,9 +774,14 @@ class QubesVm(object):
return 0
return self.libvirt_domain.info()[1]
except libvirt.libvirtError as e:
if e.err[0] == libvirt.VIR_ERR_NO_DOMAIN:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return 0
# libxl_domain_info failed - domain no longer exists
elif e.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR:
return 0
else:
print >>sys.stderr, "libvirt error code: {!r}".format(
e.get_error_code())
raise
def get_cputime(self):
@ -740,9 +793,14 @@ class QubesVm(object):
return 0
return self.libvirt_domain.info()[4]
except libvirt.libvirtError as e:
if e.err[0] == libvirt.VIR_ERR_NO_DOMAIN:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return 0
# libxl_domain_info failed - domain no longer exists
elif e.get_error_code() == libvirt.VIR_INTERNAL_ERROR:
return 0
else:
print >>sys.stderr, "libvirt error code: {!r}".format(
e.get_error_code())
raise
def get_mem_static_max(self):
@ -752,7 +810,7 @@ class QubesVm(object):
try:
return self.libvirt_domain.maxMemory()
except libvirt.libvirtError as e:
if e.err[0] == libvirt.VIR_ERR_NO_DOMAIN:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return 0
else:
raise
@ -781,9 +839,11 @@ class QubesVm(object):
else:
return 0
except libvirt.libvirtError as e:
if e.err[0] == libvirt.VIR_ERR_NO_DOMAIN:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return 0
else:
print >>sys.stderr, "libvirt error code: {!r}".format(
e.get_error_code())
raise
def get_disk_utilization_root_img(self):
@ -820,7 +880,7 @@ class QubesVm(object):
else:
return 'Halted'
except libvirt.libvirtError as e:
if e.err[0] == libvirt.VIR_ERR_NO_DOMAIN:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return "Halted"
else:
raise
@ -848,15 +908,19 @@ class QubesVm(object):
return True
def is_running(self):
if vmm.offline_mode:
return False
try:
if self.libvirt_domain.isActive():
return True
else:
return False
except libvirt.libvirtError as e:
if e.err[0] == libvirt.VIR_ERR_NO_DOMAIN:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return False
else:
print >>sys.stderr, "libvirt error code: {!r}".format(
e.get_error_code())
raise
def is_paused(self):
@ -866,9 +930,11 @@ class QubesVm(object):
else:
return False
except libvirt.libvirtError as e:
if e.err[0] == libvirt.VIR_ERR_NO_DOMAIN:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return False
else:
print >>sys.stderr, "libvirt error code: {!r}".format(
e.get_error_code())
raise
def get_start_time(self):
@ -974,8 +1040,9 @@ class QubesVm(object):
return None
if tz_info.st_nlink > 1:
p = subprocess.Popen(['find', '/usr/share/zoneinfo',
'-inum', str(tz_info.st_ino)],
stdout=subprocess.PIPE)
'-inum', str(tz_info.st_ino),
'-print', '-quit'],
stdout=subprocess.PIPE)
tz_path = p.communicate()[0].strip()
return tz_path.replace('/usr/share/zoneinfo/', '')
return None
@ -1008,6 +1075,10 @@ class QubesVm(object):
self.qdb.write("/name", self.name)
self.qdb.write("/qubes-vm-type", self.type)
self.qdb.write("/qubes-vm-updateable", str(self.updateable))
self.qdb.write("/qubes-vm-persistence",
"full" if self.updateable else "rw-only")
self.qdb.write("/qubes-base-template",
self.template.name if self.template else '')
if self.is_netvm():
self.qdb.write("/qubes-netvm-gateway", self.gateway)
@ -1036,6 +1107,8 @@ class QubesVm(object):
self.qdb.write("/qubes-debug-mode", str(int(self.debug)))
self.provide_random_seed_to_vm()
# TODO: Currently the whole qmemman is quite Xen-specific, so stay with
# xenstore for it until decided otherwise
if qmemman_present:
@ -1046,6 +1119,14 @@ class QubesVm(object):
for hook in self.hooks_create_qubesdb_entries:
hook(self)
def provide_random_seed_to_vm(self):
f = open('/dev/urandom', 'r')
s = f.read(64)
if len(s) != 64:
raise IOError("failed to read seed from /dev/urandom")
f.close()
self.qdb.write("/qubes-random-seed", base64.b64encode(hashlib.sha512(s).digest()))
def _format_net_dev(self, ip, mac, backend):
template = " <interface type='ethernet'>\n" \
" <mac address='{mac}'/>\n" \
@ -1096,8 +1177,10 @@ class QubesVm(object):
args['dns2'] = self.secondary_dns
args['netmask'] = self.netmask
args['netdev'] = self._format_net_dev(self.ip, self.mac, self.netvm.name)
args['disable_network1'] = '';
args['disable_network2'] = '';
args['network_begin'] = ''
args['network_end'] = ''
args['no_network_begin'] = '<!--'
args['no_network_end'] = '-->'
else:
args['ip'] = ''
args['mac'] = ''
@ -1106,8 +1189,10 @@ class QubesVm(object):
args['dns2'] = ''
args['netmask'] = ''
args['netdev'] = ''
args['disable_network1'] = '<!--';
args['disable_network2'] = '-->';
args['network_begin'] = '<!--'
args['network_end'] = '-->'
args['no_network_begin'] = ''
args['no_network_end'] = ''
args.update(self.storage.get_config_params())
if hasattr(self, 'kernelopts'):
args['kernelopts'] = self.kernelopts
@ -1192,16 +1277,20 @@ class QubesVm(object):
shutil.copy(self.label.icon_path, self.icon_path)
# Make sure that we have UUID allocated
self._update_libvirt_domain()
if not vmm.offline_mode:
self._update_libvirt_domain()
else:
self.uuid = uuid.uuid4()
# fire hooks
for hook in self.hooks_create_on_disk:
hook(self, verbose, source_template=source_template)
def get_clone_attrs(self):
attrs = ['kernel', 'uses_default_kernel', 'netvm', 'uses_default_netvm', \
'memory', 'maxmem', 'kernelopts', 'uses_default_kernelopts', 'services', 'vcpus', \
'_mac', 'pcidevs', 'include_in_backups', '_label', 'default_user']
attrs = ['kernel', 'uses_default_kernel', 'netvm', 'uses_default_netvm',
'memory', 'maxmem', 'kernelopts', 'uses_default_kernelopts',
'services', 'vcpus', '_mac', 'pcidevs', 'include_in_backups',
'_label', 'default_user', 'qrexec_timeout']
# fire hooks
for hook in self.hooks_get_clone_attrs:
@ -1246,6 +1335,9 @@ class QubesVm(object):
print >> sys.stderr, "--> Copying icon: {0} -> {1}".format(src_vm.icon_path, self.icon_path)
shutil.copy(src_vm.icon_path, self.icon_path)
if src_vm.has_firewall():
self.write_firewall_conf(src_vm.get_firewall_conf())
# Make sure that we have UUID allocated
self._update_libvirt_domain()
@ -1287,10 +1379,12 @@ class QubesVm(object):
try:
self.libvirt_domain.undefine()
except libvirt.libvirtError as e:
if e.err[0] == libvirt.VIR_ERR_NO_DOMAIN:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
# already undefined
pass
else:
print >>sys.stderr, "libvirt error code: {!r}".format(
e.get_error_code())
raise
self.storage.remove_from_disk()
@ -1527,7 +1621,7 @@ class QubesVm(object):
call_kwargs = {}
if ignore_stderr or not passio:
null = open("/dev/null", "rw")
null = open("/dev/null", "w+")
call_kwargs['stderr'] = null
if not passio:
call_kwargs['stdin'] = null
@ -1552,16 +1646,22 @@ class QubesVm(object):
return retcode
def run_service(self, service, source="dom0", user=None,
passio_popen = False, input=None):
if input and passio_popen:
raise ValueError("'input' and 'passio_popen' cannot be used "
"together")
if input:
passio_popen=False, input=None, localcmd=None, gui=False,
wait=True):
if bool(input) + bool(passio_popen) + bool(localcmd) > 1:
raise ValueError("'input', 'passio_popen', 'localcmd' cannot be "
"used together")
if localcmd:
return self.run("QUBESRPC %s %s" % (service, source),
localcmd="echo %s" % input, user=user, wait=True)
localcmd=localcmd, user=user, wait=wait, gui=gui)
elif input:
return self.run("QUBESRPC %s %s" % (service, source),
localcmd="echo %s" % input, user=user, wait=wait,
gui=gui)
else:
return self.run("QUBESRPC %s %s" % (service, source),
passio_popen=passio_popen, user=user, wait=True)
passio_popen=passio_popen, user=user, wait=wait,
gui=gui)
def attach_network(self, verbose = False, wait = True, netvm = None):
self.log.debug('attach_network(netvm={!r})'.format(netvm))
@ -1646,7 +1746,10 @@ class QubesVm(object):
if verbose:
print >> sys.stderr, "--> Sending monitor layout..."
monitor_layout = qubes.monitorlayoutnotify.get_monitor_layout()
qubes.monitorlayoutnotify.notify_vm(self, monitor_layout)
# Notify VM only if we've got a monitor_layout which is not empty
# or else we break proper VM resolution set by gui-agent
if len(monitor_layout) > 0:
qubes.monitorlayoutnotify.notify_vm(self, monitor_layout)
except ImportError as e:
print >>sys.stderr, "ERROR: %s" % e
@ -1671,14 +1774,56 @@ class QubesVm(object):
def start_qubesdb(self):
self.log.debug('start_qubesdb()')
pidfile = '/var/run/qubes/qubesdb.{}.pid'.format(self.name)
try:
if os.path.exists(pidfile):
old_qubesdb_pid = open(pidfile, 'r').read()
try:
os.kill(int(old_qubesdb_pid), signal.SIGTERM)
except OSError:
raise QubesException(
"Failed to kill old QubesDB instance (PID {}). "
"Terminate it manually and retry. "
"If that isn't QubesDB process, "
"remove the pidfile: {}".format(old_qubesdb_pid,
pidfile))
timeout = 25
while os.path.exists(pidfile) and timeout:
time.sleep(0.2)
timeout -= 1
except IOError: # ENOENT (pidfile)
pass
# force connection to a new daemon
self._qdb_connection = None
retcode = subprocess.call ([
system_path["qubesdb_daemon_path"],
str(self.xid),
self.name])
if retcode != 0:
self.force_shutdown()
raise OSError("ERROR: Cannot execute qubesdb-daemon!")
def request_memory(self, mem_required = None):
# Overhead of per-VM/per-vcpu Xen structures, taken from OpenStack nova/virt/xenapi/driver.py
# see https://wiki.openstack.org/wiki/XenServer/Overhead
# add an extra MB because Nova rounds up to MBs
MEM_OVERHEAD_BASE = (3 + 1) * 1024 * 1024
MEM_OVERHEAD_PER_VCPU = 3 * 1024 * 1024 / 2
if mem_required is None:
mem_required = int(self.memory) * 1024 * 1024
if qmemman_present:
qmemman_client = QMemmanClient()
try:
mem_required_with_overhead = mem_required + MEM_OVERHEAD_BASE + self.vcpus * MEM_OVERHEAD_PER_VCPU
got_memory = qmemman_client.request_memory(mem_required_with_overhead)
except IOError as e:
raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e))
if not got_memory:
qmemman_client.close()
raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name)
return qmemman_client
def start(self, verbose = False, preparing_dvm = False, start_guid = True,
notify_function = None, mem_required = None):
self.log.debug('start('
@ -1706,24 +1851,14 @@ class QubesVm(object):
self._update_libvirt_domain()
if mem_required is None:
mem_required = int(self.memory) * 1024 * 1024
if qmemman_present:
qmemman_client = QMemmanClient()
try:
got_memory = qmemman_client.request_memory(mem_required)
except IOError as e:
raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e))
if not got_memory:
qmemman_client.close()
raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name)
qmemman_client = self.request_memory(mem_required)
# Bind pci devices to pciback driver
for pci in self.pcidevs:
try:
nd = vmm.libvirt_conn.nodeDeviceLookupByName('pci_0000_' + pci.replace(':','_').replace('.','_'))
except libvirt.libvirtError as e:
if e.err[0] == libvirt.VIR_ERR_NO_NODE_DEVICE:
if e.get_error_code() == libvirt.VIR_ERR_NO_NODE_DEVICE:
raise QubesException(
"PCI device {} does not exist (domain {})".
format(pci, self.name))
@ -1732,39 +1867,43 @@ class QubesVm(object):
try:
nd.dettach()
except libvirt.libvirtError as e:
if e.err[0] == libvirt.VIR_ERR_INTERNAL_ERROR:
# allready detached
if e.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR:
# already detached
pass
else:
raise
self.libvirt_domain.createWithFlags(libvirt.VIR_DOMAIN_START_PAUSED)
if verbose:
print >> sys.stderr, "--> Starting Qubes DB..."
self.start_qubesdb()
try:
if verbose:
print >> sys.stderr, "--> Starting Qubes DB..."
self.start_qubesdb()
xid = self.xid
self.log.debug('xid={}'.format(xid))
xid = self.xid
self.log.debug('xid={}'.format(xid))
if preparing_dvm:
self.services['qubes-dvm'] = True
if verbose:
print >> sys.stderr, "--> Setting Qubes DB info for the VM..."
self.create_qubesdb_entries()
if preparing_dvm:
self.services['qubes-dvm'] = True
if verbose:
print >> sys.stderr, "--> Setting Qubes DB info for the VM..."
self.create_qubesdb_entries()
if verbose:
print >> sys.stderr, "--> Updating firewall rules..."
netvm = self.netvm
while netvm is not None:
if netvm.is_proxyvm() and netvm.is_running():
netvm.write_iptables_qubesdb_entry()
netvm = netvm.netvm
if verbose:
print >> sys.stderr, "--> Updating firewall rules..."
netvm = self.netvm
while netvm is not None:
if netvm.is_proxyvm() and netvm.is_running():
netvm.write_iptables_qubesdb_entry()
netvm = netvm.netvm
# fire hooks
for hook in self.hooks_start:
hook(self, verbose = verbose, preparing_dvm = preparing_dvm,
start_guid = start_guid, notify_function = notify_function)
# fire hooks
for hook in self.hooks_start:
hook(self, verbose = verbose, preparing_dvm = preparing_dvm,
start_guid = start_guid, notify_function = notify_function)
except:
self.force_shutdown()
raise
if verbose:
print >> sys.stderr, "--> Starting the VM..."
@ -1828,6 +1967,15 @@ class QubesVm(object):
if not self.is_running():
raise QubesException ("VM already stopped!")
# try to gracefully detach PCI devices before shutdown, to mitigate
# timeouts on forcible detach at domain destroy; if that fails, too bad
try:
for pcidev in self.pcidevs:
self.libvirt_domain.detachDevice(self._format_pci_dev(pcidev))
except libvirt.libvirtError as e:
print >>sys.stderr, "WARNING: {}, continuing VM shutdown " \
"anyway".format(str(e))
self.libvirt_domain.shutdown()
def force_shutdown(self, xid = None):

View File

@ -0,0 +1,77 @@
#!/usr/bin/python2
# -*- encoding: utf8 -*-
#
# The Qubes OS Project, http://www.qubes-os.org
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from qubes.qubes import (
register_qubes_vm_class,
QubesException,
QubesVm,
)
from time import sleep
class QubesResizableVm(QubesVm):
def resize_root_img(self, size, allow_start=False):
if self.template:
raise QubesException("Cannot resize root.img of template-based VM"
". Resize the root.img of the template "
"instead.")
if self.is_running():
raise QubesException("Cannot resize root.img of running VM")
if size < self.get_root_img_sz():
raise QubesException(
"For your own safety shringing of root.img is disabled. If "
"you really know what you are doing, use 'truncate' manually.")
f_root = open(self.root_img, "a+b")
f_root.truncate(size)
f_root.close()
class QubesResizableVmWithResize2fs(QubesResizableVm):
def resize_root_img(self, size, allow_start=False):
super(QubesResizableVmWithResize2fs, self).\
resize_root_img(size, allow_start=allow_start)
if not allow_start:
raise QubesException("VM start required to complete the "
"operation, but not allowed. Either run the "
"operation again allowing VM start this "
"time, or run resize2fs in the VM manually.")
self.start(start_guid=False)
self.run("resize2fs /dev/mapper/dmroot", user="root", wait=True,
gui=False)
self.shutdown()
while self.is_running():
sleep(1)
register_qubes_vm_class(QubesResizableVm)
register_qubes_vm_class(QubesResizableVmWithResize2fs)

View File

@ -58,6 +58,8 @@ class QubesProxyVm(QubesNetVm):
def _set_netvm(self, new_netvm):
old_netvm = self.netvm
super(QubesProxyVm, self)._set_netvm(new_netvm)
if vmm.offline_mode:
return
if self.netvm is not None:
self.netvm.add_external_ip_permission(self.get_xid())
self.write_netvm_domid_entry()

View File

@ -24,9 +24,15 @@
import os.path
from qubes.qubes import QubesVm,QubesVmLabel,register_qubes_vm_class,system_path
from qubes.qubes import (
register_qubes_vm_class,
system_path,
QubesResizableVmWithResize2fs,
QubesVmLabel,
)
class QubesAppVm(QubesVm):
class QubesAppVm(QubesResizableVmWithResize2fs):
"""
A class that represents an AppVM. A child of QubesVm.
"""

View File

@ -97,6 +97,11 @@ class QubesDisposableVm(QubesVm):
disp_template = kwargs['disp_template']
kwargs['template'] = disp_template.template
kwargs['dir_path'] = disp_template.dir_path
kwargs['kernel'] = disp_template.kernel
kwargs['uses_default_kernel'] = disp_template.uses_default_kernel
kwargs['kernelopts'] = disp_template.kernelopts
kwargs['uses_default_kernelopts'] = \
disp_template.uses_default_kernelopts
super(QubesDisposableVm, self).__init__(**kwargs)
assert self.template is not None, "Missing template for DisposableVM!"
@ -151,6 +156,7 @@ class QubesDisposableVm(QubesVm):
def create_qubesdb_entries(self):
super(QubesDisposableVm, self).create_qubesdb_entries()
self.qdb.write("/qubes-vm-persistence", "none")
self.qdb.write('/qubes-restore-complete', '1')
def start(self, verbose = False, **kwargs):
@ -162,8 +168,13 @@ class QubesDisposableVm(QubesVm):
if self.get_power_state() != "Halted":
raise QubesException ("VM is already running!")
# skip netvm state checking - calling VM have the same netvm, so it
# must be already running
if self.netvm is not None:
if self.netvm.qid != 0:
if not self.netvm.is_running():
if verbose:
print >> sys.stderr, "--> Starting NetVM {0}...".\
format(self.netvm.name)
self.netvm.start(verbose=verbose, **kwargs)
if verbose:
print >> sys.stderr, "--> Loading the VM (type = {0})...".format(self.type)
@ -172,17 +183,7 @@ class QubesDisposableVm(QubesVm):
# refresh config file
domain_config = self.create_config_file()
if qmemman_present:
mem_required = int(self.memory) * 1024 * 1024
print >>sys.stderr, "time=%s, getting %d memory" % (str(time.time()), mem_required)
qmemman_client = QMemmanClient()
try:
got_memory = qmemman_client.request_memory(mem_required)
except IOError as e:
raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e))
if not got_memory:
qmemman_client.close()
raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name)
qmemman_client = self.request_memory()
# dispvm cannot have PCI devices
assert (len(self.pcidevs) == 0), "DispVM cannot have PCI devices"
@ -236,5 +237,9 @@ class QubesDisposableVm(QubesVm):
return self.xid
def remove_from_disk(self):
# nothing to remove
pass
# register classes
register_qubes_vm_class(QubesDisposableVm)

View File

@ -26,16 +26,20 @@ import os
import os.path
import signal
import subprocess
import stat
import sys
import re
import shutil
import stat
from xml.etree import ElementTree
from qubes.qubes import QubesVm,register_qubes_vm_class,vmm,dry_run
from qubes.qubes import system_path,defaults
from qubes.qubes import QubesException
from qubes.qubes import (
dry_run,
defaults,
register_qubes_vm_class,
system_path,
vmm,
QubesException,
QubesResizableVm,
)
system_path["config_template_hvm"] = '/usr/share/qubes/vm-template-hvm.xml'
@ -44,7 +48,7 @@ defaults["hvm_private_img_size"] = 2*1024*1024*1024
defaults["hvm_memory"] = 512
class QubesHVm(QubesVm):
class QubesHVm(QubesResizableVm):
"""
A class that represents an HVM. A child of QubesVm.
"""
@ -96,8 +100,6 @@ class QubesHVm(QubesVm):
(not 'xml_element' in kwargs or kwargs['xml_element'].get('guiagent_installed') is None):
self.services['meminfo-writer'] = False
self.storage.volatile_img = None
@property
def type(self):
return "HVM"
@ -233,35 +235,7 @@ class QubesHVm(QubesVm):
if self.is_running():
raise NotImplementedError("Online resize of HVM's private.img not implemented, shutdown the VM first")
f_private = open (self.private_img, "a+b")
f_private.truncate (size)
f_private.close ()
def resize_root_img(self, size):
if self.template:
raise QubesException("Cannot resize root.img of template-based VM"
". Resize the root.img of the template "
"instead.")
if self.is_running():
raise QubesException("Cannot resize root.img of running HVM")
if size < self.get_root_img_sz():
raise QubesException(
"For your own safety shringing of root.img is disabled. If "
"you really know what you are doing, use 'truncate' manually.")
f_root = open (self.root_img, "a+b")
f_root.truncate (size)
f_root.close ()
def get_rootdev(self, source_template=None):
if self.template:
return "'script:snapshot:{template_root}:{volatile},xvda,w',".format(
template_root=self.template.root_img,
volatile=self.volatile_img)
else:
return "'script:file:{root_img},xvda,w',".format(root_img=self.root_img)
self.storage.resize_private_img(size)
def get_config_params(self):
@ -272,8 +246,8 @@ class QubesHVm(QubesVm):
params['volatiledev'] = ''
if self.timezone.lower() == 'localtime':
params['time_basis'] = 'localtime'
params['timeoffset'] = '0'
params['time_basis'] = 'localtime'
params['timeoffset'] = '0'
elif self.timezone.isdigit():
params['time_basis'] = 'UTC'
params['timeoffset'] = self.timezone
@ -295,34 +269,6 @@ class QubesHVm(QubesVm):
return True
def reset_volatile_storage(self, **kwargs):
assert not self.is_running(), "Attempt to clean volatile image of running VM!"
source_template = kwargs.get("source_template", self.template)
if source_template is None:
# Nothing to do on non-template based VM
return
if os.path.exists (self.volatile_img):
if self.debug:
if os.path.getmtime(self.template.root_img) > os.path.getmtime(self.volatile_img):
if kwargs.get("verbose", False):
print >>sys.stderr, "--> WARNING: template have changed, resetting root.img"
else:
if kwargs.get("verbose", False):
print >>sys.stderr, "--> Debug mode: not resetting root.img"
print >>sys.stderr, "--> Debug mode: if you want to force root.img reset, either update template VM, or remove volatile.img file"
return
os.remove (self.volatile_img)
f_volatile = open (self.volatile_img, "w")
f_root = open (self.template.root_img, "r")
f_root.seek(0, os.SEEK_END)
f_volatile.truncate (f_root.tell()) # make empty sparse file of the same size as root.img
f_volatile.close ()
f_root.close()
@property
def vif(self):
if self.xid < 0:
@ -367,12 +313,16 @@ class QubesHVm(QubesVm):
return -1
def start(self, *args, **kwargs):
# make it available to storage.prepare_for_vm_startup, which is
# called before actually building VM libvirt configuration
self.storage.drive = self.drive
if self.template and self.template.is_running():
raise QubesException("Cannot start the HVM while its template is running")
try:
if 'mem_required' not in kwargs:
# Reserve 32MB for stubdomain
kwargs['mem_required'] = (self.memory + 32) * 1024 * 1024
# Reserve 44MB for stubdomain
kwargs['mem_required'] = (self.memory + 44) * 1024 * 1024
return super(QubesHVm, self).start(*args, **kwargs)
except QubesException as e:
capabilities = vmm.libvirt_conn.getCapabilities()
@ -400,25 +350,28 @@ class QubesHVm(QubesVm):
if (retcode != 0) :
raise QubesException("Cannot start qubes-guid!")
def start_guid(self, verbose = True, notify_function = None,
before_qrexec=False, **kwargs):
# If user force the guiagent, start_guid will mimic a standard QubesVM
if not before_qrexec and self.guiagent_installed:
kwargs['extra_guid_args'] = kwargs.get('extra_guid_args', []) + \
['-Q']
super(QubesHVm, self).start_guid(verbose, notify_function, **kwargs)
stubdom_guid_pidfile = '/var/run/qubes/guid-running.%d' % self.stubdom_xid
if os.path.exists(stubdom_guid_pidfile) and not self.debug:
try:
stubdom_guid_pid = int(open(stubdom_guid_pidfile, 'r').read())
os.kill(stubdom_guid_pid, signal.SIGTERM)
except Exception as ex:
print >> sys.stderr, "WARNING: Failed to kill stubdom gui daemon: %s" % str(ex)
elif before_qrexec and (not self.guiagent_installed or self.debug):
def start_guid(self, verbose=True, notify_function=None,
before_qrexec=False, **kwargs):
if not before_qrexec:
return
if not self.guiagent_installed or self.debug:
if verbose:
print >> sys.stderr, "--> Starting Qubes GUId (full screen)..."
self.start_stubdom_guid(verbose=verbose)
kwargs['extra_guid_args'] = kwargs.get('extra_guid_args', []) + \
['-Q', '-n']
stubdom_guid_pidfile = \
'/var/run/qubes/guid-running.%d' % self.stubdom_xid
if not self.debug and os.path.exists(stubdom_guid_pidfile):
# Terminate stubdom guid once "real" gui agent connects
stubdom_guid_pid = int(open(stubdom_guid_pidfile, 'r').read())
kwargs['extra_guid_args'] += ['-K', str(stubdom_guid_pid)]
super(QubesHVm, self).start_guid(verbose, notify_function, **kwargs)
def start_qrexec_daemon(self, **kwargs):
if not self.qrexec_installed:
if kwargs.get('verbose', False):
@ -463,7 +416,6 @@ class QubesHVm(QubesVm):
guid_pid = open(guid_pidfile).read().strip()
os.kill(int(guid_pid), 15)
def suspend(self):
if dry_run:
return

View File

@ -29,7 +29,7 @@ import stat
import sys
import re
from qubes.qubes import QubesHVm,register_qubes_vm_class,dry_run
from qubes.qubes import QubesHVm,register_qubes_vm_class,dry_run,vmm
from qubes.qubes import QubesException,QubesVmCollection
from qubes.qubes import system_path,defaults
@ -70,6 +70,7 @@ class QubesTemplateHVm(QubesHVm):
def is_appvm(self):
return False
@property
def rootcow_img(self):
return self.storage.rootcow_img
@ -95,7 +96,15 @@ class QubesTemplateHVm(QubesHVm):
def commit_changes (self, verbose = False):
self.log.debug('commit_changes()')
# nothing to do as long as root-cow.img is unused
pass
if not vmm.offline_mode:
assert not self.is_running(), "Attempt to commit changes on running Template VM!"
if verbose:
print >> sys.stderr, "--> Commiting template updates... COW: {0}...".format (self.rootcow_img)
if dry_run:
return
self.storage.commit_template_changes()
register_qubes_vm_class(QubesTemplateHVm)

View File

@ -400,6 +400,10 @@ class SendWorker(Process):
stdin=subprocess.PIPE,
stdout=self.backup_stdout)
if final_proc.wait() >= 2:
if self.queue.full():
# if queue is already full, remove some entry to wake up
# main thread, so it will be able to notice error
self.queue.get()
# handle only exit code 2 (tar fatal error) or
# greater (call failed?)
raise QubesException(
@ -448,6 +452,17 @@ def backup_do(base_backup_dir, files_to_backup, passphrase,
crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM):
global running_backup_operation
def queue_put_with_check(proc, vmproc, queue, element):
if queue.full():
if not proc.is_alive():
if vmproc:
message = ("Failed to write the backup, VM output:\n" +
vmproc.stderr.read())
else:
message = "Failed to write the backup. Out of disk space?"
raise QubesException(message)
queue.put(element)
total_backup_sz = 0
passphrase = passphrase.encode('utf-8')
for f in files_to_backup:
@ -552,14 +567,16 @@ def backup_do(base_backup_dir, files_to_backup, passphrase,
# be verified before untaring this.
# Prefix the path in archive with filename["subdir"] to have it
# verified during untar
tar_cmdline = ["tar", "-Pc", '--sparse',
tar_cmdline = (["tar", "-Pc", '--sparse',
"-f", backup_pipe,
'-C', os.path.dirname(filename["path"]),
'--xform', 's:^%s:%s\\0:' % (
'-C', os.path.dirname(filename["path"])] +
(['--dereference'] if filename["subdir"] != "dom0-home/"
else []) +
['--xform', 's:^%s:%s\\0:' % (
os.path.basename(filename["path"]),
filename["subdir"]),
os.path.basename(filename["path"])
]
])
if compressed:
tar_cmdline.insert(-1,
"--use-compress-program=%s" % compression_filter)
@ -649,7 +666,9 @@ def backup_do(base_backup_dir, files_to_backup, passphrase,
run_error)
# Send the chunk to the backup target
to_send.put(os.path.relpath(chunkfile, backup_tmpdir))
queue_put_with_check(
send_proc, vmproc, to_send,
os.path.relpath(chunkfile, backup_tmpdir))
# Close HMAC
hmac.stdin.close()
@ -667,7 +686,9 @@ def backup_do(base_backup_dir, files_to_backup, passphrase,
hmac_file.close()
# Send the HMAC to the backup target
to_send.put(os.path.relpath(chunkfile, backup_tmpdir) + ".hmac")
queue_put_with_check(
send_proc, vmproc, to_send,
os.path.relpath(chunkfile, backup_tmpdir) + ".hmac")
if tar_sparse.poll() is None or run_error == "size_limit":
run_error = "paused"
@ -679,7 +700,7 @@ def backup_do(base_backup_dir, files_to_backup, passphrase,
.poll()
pipe.close()
to_send.put("FINISHED")
queue_put_with_check(send_proc, vmproc, to_send, "FINISHED")
send_proc.join()
shutil.rmtree(backup_tmpdir)
@ -1552,6 +1573,8 @@ def backup_restore_set_defaults(options):
options['ignore-username-mismatch'] = False
if 'verify-only' not in options:
options['verify-only'] = False
if 'rename-conflicting' not in options:
options['rename-conflicting'] = False
return options
@ -1619,6 +1642,22 @@ def backup_restore_header(source, passphrase,
return (restore_tmpdir, os.path.join(restore_tmpdir, "qubes.xml"),
header_data)
def generate_new_name_for_conflicting_vm(orig_name, host_collection,
restore_info):
number = 1
if len(orig_name) > 29:
orig_name = orig_name[0:29]
new_name = orig_name
while (new_name in restore_info.keys() or
new_name in map(lambda x: x.get('rename_to', None),
restore_info.values()) or
host_collection.get_vm_by_name(new_name)):
new_name = str('{}{}'.format(orig_name, number))
number += 1
if number == 100:
# give up
return None
return new_name
def restore_info_verify(restore_info, host_collection):
options = restore_info['$OPTIONS$']
@ -1636,7 +1675,16 @@ def restore_info_verify(restore_info, host_collection):
vm_info.pop('already-exists', None)
if not options['verify-only'] and \
host_collection.get_vm_by_name(vm) is not None:
vm_info['already-exists'] = True
if options['rename-conflicting']:
new_name = generate_new_name_for_conflicting_vm(
vm, host_collection, restore_info
)
if new_name is not None:
vm_info['rename-to'] = new_name
else:
vm_info['already-exists'] = True
else:
vm_info['already-exists'] = True
# check template
vm_info.pop('missing-template', None)
@ -1683,6 +1731,22 @@ def restore_info_verify(restore_info, host_collection):
'already-exists',
'excluded']])
# update references to renamed VMs:
for vm in restore_info.keys():
if vm in ['$OPTIONS$', 'dom0']:
continue
vm_info = restore_info[vm]
template_name = vm_info['template']
if (template_name in restore_info and
restore_info[template_name]['good-to-go'] and
'rename-to' in restore_info[template_name]):
vm_info['template'] = restore_info[template_name]['rename-to']
netvm_name = vm_info['netvm']
if (netvm_name in restore_info and
restore_info[netvm_name]['good-to-go'] and
'rename-to' in restore_info[netvm_name]):
vm_info['netvm'] = restore_info[netvm_name]['rename-to']
return restore_info
@ -1706,6 +1770,10 @@ def backup_restore_prepare(backup_location, passphrase, options=None,
if check_vm.qid == 0:
return os.path.exists(os.path.join(backup_dir, 'dom0-home'))
# DisposableVM
if check_vm.dir_path is None:
return False
backup_vm_dir_path = check_vm.dir_path.replace(
system_path["qubes_base_dir"], backup_dir)
@ -1951,8 +2019,11 @@ def backup_restore_print_summary(restore_info, print_callback=print_stdout):
s += " <-- No matching template on the host or in the backup found!"
elif 'missing-netvm' in vm_info:
s += " <-- No matching netvm on the host or in the backup found!"
elif 'orig-template' in vm_info:
s += " <-- Original template was '%s'" % (vm_info['orig-template'])
else:
if 'orig-template' in vm_info:
s += " <-- Original template was '%s'" % (vm_info['orig-template'])
if 'rename-to' in vm_info:
s += " <-- Will be renamed to '%s'" % vm_info['rename-to']
print_callback(s)
@ -1987,7 +2058,7 @@ def backup_restore_do(restore_info,
backup_dir)
# We prefer to use Linux's cp, because it nicely handles sparse files
cp_retcode = subprocess.call(["cp", "-rp", backup_src_dir, dst_dir])
cp_retcode = subprocess.call(["cp", "-rp", "--reflink=auto", backup_src_dir, dst_dir])
if cp_retcode != 0:
raise QubesException(
"*** Error while copying file {0} to {1}".format(backup_src_dir,
@ -2106,13 +2177,29 @@ def backup_restore_do(restore_info,
template = host_collection.get_vm_by_name(template_name)
new_vm = None
vm_name = vm.name
if 'rename-to' in restore_info[vm.name]:
vm_name = restore_info[vm.name]['rename-to']
try:
new_vm = host_collection.add_new_vm(vm_class_name, name=vm.name,
conf_file=vm.conf_file,
dir_path=vm.dir_path,
new_vm = host_collection.add_new_vm(vm_class_name, name=vm_name,
template=template,
installed_by_rpm=False)
if os.path.exists(new_vm.dir_path):
move_to_path = tempfile.mkdtemp('', os.path.basename(
new_vm.dir_path), os.path.dirname(new_vm.dir_path))
try:
os.rename(new_vm.dir_path, move_to_path)
error_callback(
"*** Directory {} already exists! It has "
"been moved to {}".format(new_vm.dir_path,
move_to_path))
except OSError:
error_callback(
"*** Directory {} already exists and "
"cannot be moved!".format(new_vm.dir_path))
error_callback("Skipping...")
continue
if format_version == 1:
restore_vm_dir_v1(backup_location,
@ -2156,7 +2243,11 @@ def backup_restore_do(restore_info,
# Set network dependencies - only non-default netvm setting
for vm in vms.values():
host_vm = host_collection.get_vm_by_name(vm.name)
vm_name = vm.name
if 'rename-to' in restore_info[vm.name]:
vm_name = restore_info[vm.name]['rename-to']
host_vm = host_collection.get_vm_by_name(vm_name)
if host_vm is None:
# Failed/skipped VM
continue
@ -2205,7 +2296,7 @@ def backup_restore_do(restore_info,
home_dir + '/' + restore_home_backupdir + '/' + f)
if format_version == 1:
subprocess.call(
["cp", "-nrp", backup_dom0_home_dir + '/' + f, home_file])
["cp", "-nrp", "--reflink=auto", backup_dom0_home_dir + '/' + f, home_file])
elif format_version >= 2:
shutil.move(backup_dom0_home_dir + '/' + f, home_file)
retcode = subprocess.call(['sudo', 'chown', '-R', local_user, home_dir])

View File

@ -65,7 +65,7 @@ defaults = {
'libvirt_uri': 'xen:///',
'memory': 400,
'kernelopts': "nopat",
'kernelopts_pcidevs': "nopat iommu=soft swiotlb=4096",
'kernelopts_pcidevs': "nopat iommu=soft swiotlb=8192",
'dom0_update_check_interval': 6*3600,

View File

@ -29,7 +29,7 @@ from lxml import etree
from lxml.etree import ElementTree, SubElement, Element
from qubes.qubes import QubesException
from qubes.qubes import vmm
from qubes.qubes import vmm,defaults
from qubes.qubes import system_path,vm_files
import sys
import os
@ -297,6 +297,8 @@ def block_check_attached(qvmc, device):
if vm.qid == 0:
# Connecting devices to dom0 not supported
continue
if not vm.is_running():
continue
try:
libvirt_domain = vm.libvirt_domain
if libvirt_domain:
@ -313,9 +315,8 @@ def block_check_attached(qvmc, device):
disks = parsed_xml.xpath("//domain/devices/disk")
for disk in disks:
backend_name = 'dom0'
# FIXME: move <domain/> into <source/>
if disk.find('domain') is not None:
backend_name = disk.find('domain').get('name')
if disk.find('backenddomain') is not None:
backend_name = disk.find('backenddomain').get('name')
source = disk.find('source')
if disk.get('type') == 'file':
path = source.get('file')
@ -696,11 +697,16 @@ class QubesWatch(object):
self.block_callback = None
self.meminfo_callback = None
self.domain_callback = None
vmm.libvirt_conn.domainEventRegisterAny(
libvirt.virEventRegisterDefaultImpl()
# open new libvirt connection because above
# virEventRegisterDefaultImpl is in practice effective only for new
# connections
self.libvirt_conn = libvirt.open(defaults['libvirt_uri'])
self.libvirt_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._domain_list_changed, None)
vmm.libvirt_conn.domainEventRegisterAny(
self.libvirt_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED,
self._device_removed, None)
@ -709,10 +715,10 @@ class QubesWatch(object):
try:
if vm.isActive():
self._register_watches(vm)
except libvirt.libvirtError:
except libvirt.libvirtError as e:
# this will happen if we loose a race with another tool,
# which can just remove the domain
if vmm.libvirt_conn.virConnGetLastError()[0] == libvirt.VIR_ERR_NO_DOMAIN:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
pass
raise
# and for dom0
@ -749,29 +755,41 @@ class QubesWatch(object):
name = libvirt_domain.name()
if name in self._qdb:
return
if not libvirt_domain.isActive():
return
# open separate connection to Qubes DB:
# 1. to not confuse pull() with responses to real commands sent from
# other threads (like read, write etc) with watch events
# 2. to not think whether QubesDB is thread-safe (it isn't)
while libvirt_domain.isActive() and name not in self._qdb:
try:
self._qdb[name] = QubesDB(name)
except Error as e:
if e.args[0] != 2:
raise
time.sleep(0.5)
if name not in self._qdb:
# domain no longer active
try:
self._qdb[name] = QubesDB(name)
except Error as e:
if e.args[0] != 2:
raise
libvirt.virEventAddTimeout(500, self._retry_register_watches,
libvirt_domain)
return
else:
name = "dom0"
self._qdb[name] = QubesDB(name)
self._qdb[name].watch('/qubes-block-devices')
try:
self._qdb[name].watch('/qubes-block-devices')
except Error as e:
if e.args[0] == 102: # Connection reset by peer
# QubesDB daemon not running - most likely we've connected to
# stale daemon which just exited; retry later
libvirt.virEventAddTimeout(500, self._retry_register_watches,
libvirt_domain)
return
self._qdb_events[name] = libvirt.virEventAddHandle(
self._qdb[name].watch_fd(),
libvirt.VIR_EVENT_HANDLE_READABLE,
self._qdb_handler, name)
def _retry_register_watches(self, timer, libvirt_domain):
libvirt.virEventRemoveTimeout(timer)
self._register_watches(libvirt_domain)
def _unregister_watches(self, libvirt_domain):
name = libvirt_domain.name()
if name in self._qdb_events:
@ -782,7 +800,9 @@ class QubesWatch(object):
del(self._qdb[name])
def _domain_list_changed(self, conn, domain, event, reason, param):
if event == libvirt.VIR_DOMAIN_EVENT_STARTED:
# use VIR_DOMAIN_EVENT_RESUMED instead of VIR_DOMAIN_EVENT_STARTED to
# make sure that qubesdb daemon is already running
if event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
self._register_watches(domain)
elif event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
self._unregister_watches(domain)
@ -802,9 +822,24 @@ class QubesWatch(object):
##### updates check #####
#
# XXX this whole section is a new global property
# TODO make event handlers
#
UPDATES_DOM0_DISABLE_FLAG='/var/lib/qubes/updates/disable-updates'
UPDATES_DEFAULT_VM_DISABLE_FLAG=\
'/var/lib/qubes/updates/vm-default-disable-updates'
def updates_vms_toggle(qvm_collection, value):
# Flag for new VMs
if value:
if os.path.exists(UPDATES_DEFAULT_VM_DISABLE_FLAG):
os.unlink(UPDATES_DEFAULT_VM_DISABLE_FLAG)
else:
open(UPDATES_DEFAULT_VM_DISABLE_FLAG, "w").close()
# Change for existing VMs
for vm in qvm_collection.values():
if vm.qid == 0:
continue
@ -834,5 +869,16 @@ def updates_dom0_toggle(qvm_collection, value):
def updates_dom0_status(qvm_collection):
return not os.path.exists(UPDATES_DOM0_DISABLE_FLAG)
def updates_vms_status(qvm_collection):
# default value:
status = not os.path.exists(UPDATES_DEFAULT_VM_DISABLE_FLAG)
# check if all the VMs uses the default value
for vm in qvm_collection.values():
if vm.qid == 0:
continue
if vm.services.get('qubes-update-check', True) != status:
# "mixed"
return None
return status
# vim:sw=4:et:

View File

@ -1,45 +0,0 @@
#!/usr/bin/python2
from __future__ import absolute_import
import _winreg
import os
import sys
from qubes.storage.wni import QubesWniVmStorage
DEFAULT_INSTALLDIR = 'c:\\program files\\Invisible Things Lab\\Qubes WNI'
DEFAULT_STOREDIR = 'c:\\qubes'
def apply(system_path, vm_files, defaults):
system_path['qubes_base_dir'] = DEFAULT_STOREDIR
installdir = DEFAULT_INSTALLDIR
try:
reg_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
"Software\\Invisible Things Lab\\Qubes WNI")
installdir = _winreg.QueryValueEx(reg_key, "InstallDir")[0]
system_path['qubes_base_dir'] = \
_winreg.QueryValueEx(reg_key, "StoreDir")[0]
except WindowsError as e:
print >>sys.stderr, \
"WARNING: invalid installation: missing registry entries (%s)" \
% str(e)
system_path['config_template_pv'] = \
os.path.join(installdir, 'vm-template.xml')
system_path['config_template_hvm'] = \
os.path.join(installdir, 'vm-template-hvm.xml')
system_path['qubes_icon_dir'] = os.path.join(installdir, 'icons')
system_path['qubesdb_daemon_path'] = \
os.path.join(installdir, 'bin\\qubesdb-daemon.exe')
system_path['qrexec_daemon_path'] = \
os.path.join(installdir, 'bin\\qrexec-daemon.exe')
system_path['qrexec_client_path'] = \
os.path.join(installdir, 'bin\\qrexec-client.exe')
system_path['qrexec_policy_dir'] = \
os.path.join(installdir, 'qubes-rpc\\policy')
# Specific to WNI - normally VM have this file
system_path['qrexec_agent_path'] = \
os.path.join(installdir, 'bin\\qrexec-agent.exe')
defaults['libvirt_uri'] = 'wni:///'
defaults['storage_class'] = QubesWniVmStorage

View File

@ -2,7 +2,10 @@
from __future__ import absolute_import
from qubes.storage.xen import QubesXenVmStorage
from qubes.storage.xen import XenStorage, XenPool
def apply(system_path, vm_files, defaults):
defaults['storage_class'] = QubesXenVmStorage
defaults['storage_class'] = XenStorage
defaults['pool_drivers'] = {'xen': XenPool}
defaults['pool_config'] = {'dir_path': '/var/lib/qubes/'}

View File

@ -1,5 +1,6 @@
OS ?= Linux
SYSCONFDIR ?= /etc
PYTHON_QUBESPATH = $(PYTHON_SITEPATH)/qubes
all:
@ -13,6 +14,8 @@ endif
mkdir -p $(DESTDIR)$(PYTHON_QUBESPATH)/storage
cp __init__.py $(DESTDIR)$(PYTHON_QUBESPATH)/storage
cp __init__.py[co] $(DESTDIR)$(PYTHON_QUBESPATH)/storage
mkdir -p $(DESTDIR)$(SYSCONFDIR)/qubes
cp storage.conf $(DESTDIR)$(SYSCONFDIR)/qubes/
ifneq ($(BACKEND_VMM),)
if [ -r $(BACKEND_VMM).py ]; then \
cp $(BACKEND_VMM).py $(DESTDIR)$(PYTHON_QUBESPATH)/storage && \

View File

@ -1,138 +0,0 @@
#!/usr/bin/python2
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2013 Marek Marczykowski <marmarek@invisiblethingslab.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
from __future__ import absolute_import
import sys
import os
import os.path
import win32api
import win32net
import win32netcon
import win32security
import win32profile
import pywintypes
import random
from qubes.storage import QubesVmStorage
from qubes.qubes import QubesException,system_path
class QubesWniVmStorage(QubesVmStorage):
"""
Class for VM storage of WNI VMs.
"""
def __init__(self, *args, **kwargs):
super(QubesWniVmStorage, self).__init__(*args, **kwargs)
# Use the user profile as "private.img"
self.home_root = win32profile.GetProfilesDirectory()
# FIXME: the assignment below may not always be correct,
# but GetUserProfileDirectory needs a user token...
self.private_img = os.path.join(self.home_root, self._get_username())
# Pass paths for WNI libvirt driver
os.putenv("WNI_DRIVER_QUBESDB_PATH", system_path['qubesdb_daemon_path'])
os.putenv("WNI_DRIVER_QREXEC_AGENT_PATH", system_path['qrexec_agent_path'])
def _get_username(self, vmname = None):
if vmname is None:
vmname = self.vm.name
return "qubes-vm-%s" % vmname
def _get_random_password(self, vmname = None):
if vmname is None:
vmname = self.vm.name
return '%x' % random.SystemRandom().getrandombits(256)
def get_config_params(self):
return {}
def create_on_disk_private_img(self, verbose, source_template = None):
# FIXME: this may not always be correct
home_dir = os.path.join(self.home_root, self._get_username())
# Create user data in information level 1 (PyUSER_INFO_1) format.
user_data = {}
user_data['name'] = self._get_username()
user_data['full_name'] = self._get_username()
# libvirt driver doesn't need to know the password anymore
user_data['password'] = self._get_random_password()
user_data['flags'] = (
win32netcon.UF_NORMAL_ACCOUNT |
win32netcon.UF_SCRIPT |
win32netcon.UF_DONT_EXPIRE_PASSWD |
win32netcon.UF_PASSWD_CANT_CHANGE
)
user_data['priv'] = win32netcon.USER_PRIV_USER
user_data['home_dir'] = home_dir
user_data['max_storage'] = win32netcon.USER_MAXSTORAGE_UNLIMITED
# TODO: catch possible exception
win32net.NetUserAdd(None, 1, user_data)
def create_on_disk_root_img(self, verbose, source_template = None):
pass
def remove_from_disk(self):
try:
sid = win32security.LookupAccountName(None, self._get_username())[0]
string_sid = win32security.ConvertSidToStringSid(sid)
win32profile.DeleteProfile(string_sid)
win32net.NetUserDel(None, self._get_username())
except pywintypes.error, details:
if details[0] == 2221:
# "The user name cannot be found."
raise IOError("User %s doesn't exist" % self._get_username())
else:
raise
super(QubesWniVmStorage, self).remove_from_disk()
def rename(self, old_name, new_name):
super(QubesWniVmStorage, self).rename(old_name, new_name)
user_data = {}
user_data['name'] = self._get_username(new_name)
win32net.NetUserSetInfo(None,
self._get_username(old_name), 0, user_data)
#TODO: rename user profile
def verify_files(self):
if not os.path.exists (self.vmdir):
raise QubesException (
"VM directory doesn't exist: {0}".\
format(self.vmdir))
try:
# TemplateVm in WNI is quite virtual, so do not require the user
if not self.vm.is_template():
win32net.NetUserGetInfo(None, self._get_username(), 0)
except pywintypes.error, details:
if details[0] == 2221:
# "The user name cannot be found."
raise QubesException("User %s doesn't exist" % self._get_username())
else:
raise
def reset_volatile_storage(self, verbose = False, source_template = None):
pass
def prepare_for_vm_startup(self, verbose = False):
if self.vm.is_template():
raise QubesException("Starting TemplateVM is not supported")

View File

@ -28,13 +28,12 @@ import sys
import shutil
import time
from qubes.qubes import QubesVmCollection
from qubes.qubes import QubesVmCollection, QubesException
from qubes.qubes import QubesDispVmLabels
from qubes.notify import tray_notify, tray_notify_error, tray_notify_init
current_savefile = '/var/run/qubes/current-savefile'
current_dvm_conf = '/var/run/qubes/current-dvm.conf'
current_savefile_vmdir = '/var/lib/qubes/dvmdata/vmdir'
@ -80,10 +79,13 @@ class QfileDaemonDvm:
label=label)
print >>sys.stderr, "time=%s, VM created" % (str(time.time()))
# By default inherit firewall rules from calling VM
disp_firewall_conf = '/var/run/qubes/%s-firewall.xml' % dispvm.name
dispvm.firewall_conf = disp_firewall_conf
if os.path.exists(vm.firewall_conf):
disp_firewall_conf = '/var/run/qubes/%s-firewall.xml' % dispvm.name
shutil.copy(vm.firewall_conf, disp_firewall_conf)
dispvm.firewall_conf = disp_firewall_conf
elif vm.qid == 0 and os.path.exists(vm_disptempl.firewall_conf):
# for DispVM called from dom0, copy use rules from DispVM template
shutil.copy(vm_disptempl.firewall_conf, disp_firewall_conf)
if len(sys.argv) > 5 and len(sys.argv[5]) > 0:
assert os.path.exists(sys.argv[5]), "Invalid firewall.conf location"
dispvm.firewall_conf = sys.argv[5]
@ -99,7 +101,11 @@ class QfileDaemonDvm:
qvm_collection.unlock_db()
return None
print >>sys.stderr, "time=%s, VM starting" % (str(time.time()))
dispvm.start()
try:
dispvm.start()
except (MemoryError, QubesException) as e:
tray_notify_error(str(e))
raise
if vm.qid != 0:
# if need to enable/disable netvm, do it while DispVM is alive
if (dispvm.netvm is None) != (vm.dispvm_netvm is None):
@ -176,7 +182,11 @@ def main():
subprocess.call(['/usr/lib/qubes/qrexec-client', '-d', dispvm.name,
user+':exec /usr/lib/qubes/qubes-rpc-multiplexer ' +
exec_index + " " + src_vmname])
dispvm.force_shutdown()
try:
dispvm.force_shutdown()
except QubesException:
# VM already destroyed
pass
qfile.remove_disposable_from_qdb(dispvm.name)
main()
main()

View File

@ -70,5 +70,12 @@ else
fi
ln -snf $VMDIR /var/lib/qubes/dvmdata/vmdir
cd $VMDIR
bsdtar -cSf saved-cows.tar volatile.img
fstype=`df --output=fstype $VMDIR | tail -n 1`
if [ "$fstype" = "tmpfs" ]; then
# bsdtar doesn't work on tmpfs because FS_IOC_FIEMAP ioctl isn't supported
# there
tar -cSf saved-cows.tar volatile.img
else
bsdtar -cSf saved-cows.tar volatile.img
fi
echo "DVM savefile created successfully."

View File

@ -6,10 +6,8 @@ printf "\x00\x00\x00\x00" > /var/run/qubes/dispVM.seq
chown root:qubes /var/run/qubes/dispVM.seq
chmod 660 /var/run/qubes/dispVM.seq
DEFAULT=/var/lib/qubes/dvmdata/default-savefile
DEFAULT_CONFIG=/var/lib/qubes/dvmdata/default-dvm.conf
# setup DispVM files only when they exists
if [ -r $DEFAULT ]; then
ln -s $DEFAULT_CONFIG /var/run/qubes/current-dvm.conf
if [ -f /var/lib/qubes/dvmdata/dont-use-shm ] ; then
ln -s $DEFAULT /var/run/qubes/current-savefile
else

View File

@ -25,6 +25,12 @@ Options
Add a PCI device to specified VM
.. option:: --add-class, -C
Add all devices of given class:
net - network interfaces
usb - USB controllers
.. option:: --delete, -d
Remove a PCI device from specified VM

View File

@ -55,12 +55,11 @@ include_in_backups
Accepted values: ``True``, ``False``
Control whenever this VM will be included in backups by default (for now
works only in qubes-manager). You can always manually select or deselect
any VM for backup.
works only in qubes-manager). You can always manually select or
deselect any VM for backup.
pcidevs
PCI devices assigned to the VM. Should be edited using
:manpage:`qvm-pci(1)` tool.
PCI devices assigned to the VM. Should be edited using qvm-pci tool.
pci_strictreset
Accepted values: ``True``, ``False``
@ -86,23 +85,19 @@ netvm
default NetVM (managed by qubes-prefs). Setting to ``none`` will disable
networking in this VM.
.. note::
When setting to ``none``, firewall will be set to block all traffic -
it will be used by DispVM started from this VM. Setting back to some
NetVM will *NOT* restore previous firewall settings.
dispvm_netvm
Accepted values: netvm name, ``default``, ``none``
Which NetVM should be used for Disposable VMs started by this one. ``default`` is to use the same NetVM as the VM itself.
Which NetVM should be used for Disposable VMs started by this one.
``default`` is to use the same NetVM as the VM itself.
maxmem
Accepted values: memory size in MB
Maximum memory size available for this VM. Dynamic memory management (aka
qmemman) will not be able to balloon over this limit. For VMs with qmemman
disabled, this will be overridden by *memory* property (at VM startup).
qmemman) will not be able to balloon over this limit. For VMs with
qmemman disabled, this will be overridden by *memory* property (at VM
startup).
memory
Accepted values: memory size in MB
@ -115,12 +110,11 @@ kernel
Accepted values: kernel version, ``default``, ``none``
Kernel version to use (only for PV VMs). Available kernel versions will be
listed when no value given (there are in
:file:`/var/lib/qubes/vm-kernels`). Setting to ``default`` will follow
system-global default kernel (managed via qubes-prefs). Setting to ``none``
will use "kernels" subdir in VM directory - this allows having VM-specific
kernel; also this the only case when :file:`/lib/modules` is writable from
within VM.
listed when no value given (there are in /var/lib/qubes/vm-kernels).
Setting to ``default`` will follow system-global default kernel (managed
via qubes-prefs). Setting to ``none`` will use "kernels" subdir in
VM directory - this allows having VM-specific kernel; also this the only
case when /lib/modules is writable from within VM.
template
Accepted values: TemplateVM name
@ -139,8 +133,10 @@ kernelopts
VM kernel parameters (available only for PV VMs). This can be used to
workaround some hardware specific problems (eg for NetVM). Setting to
``default`` will use some reasonable defaults (currently different for VMs
with PCI devices and without). Some helpful options (for debugging
purposes): ``earlyprintk=xen``, ``init=/bin/bash``
with PCI devices and without). For VM without PCI devices
``default`` option means inherit this value from the VM template (if any).
Some helpful options (for debugging purposes): ``earlyprintk=xen``,
``init=/bin/bash``
name
Accepted values: alphanumerical name
@ -148,12 +144,12 @@ name
Name of the VM. Can be only changed when VM isn't running.
drive
Accepted values: [hd:\|cdrom:][backend-vm:]\ *path*
Accepted values: [hd:\|cdrom:][backend-vm:]path
Additional drive for the VM (available only for HVMs). This can be used to
attach installation image. ``path`` can be file or physical device (eg.
:file:`/dev/sr0`). The same syntax can be used in :option:`qvm-start
--drive` - to attach drive only temporarily.
:file:`/dev/sr0`). The same syntax can be used in
:option:`qvm-start --drive` - to attach drive only temporarily.
mac
Accepted values: MAC address, ``auto``
@ -161,7 +157,6 @@ mac
Can be used to force specific of virtual ethernet card in the VM. Setting
to ``auto`` will use automatic-generated MAC - based on VM id. Especially
useful when some licencing depending on static MAC address.
For template-based HVM ``auto`` mode means to clone template MAC.
default_user
@ -181,8 +176,8 @@ debug
:file:`root.img` (actually :file:`volatile.img`) before each VM startup, so
changes made to root filesystem stays intact. To force reset
:file:`root.img` when debug mode enabled, either change something in the
template (simple start+stop will do, even touch its root.img is enough), or
remove VM's :file:`volatile.img` (check the path with
template (simple start+stop will do, even touch its :file:`root.img` is
enough), or remove VM's :file:`volatile.img` (check the path with
:manpage:`qvm-prefs(1)`).
qrexec_installed
@ -199,7 +194,7 @@ guiagent_installed
Accepted values: ``True``, ``False``
This HVM have gui agent installed. This option disables full screen GUI
virtualization and enables per-window seamless GUI mode. This option will
virtualization and enables per-window seemless GUI mode. This option will
be automatically turned on during Qubes Windows Tools installation, but if
you install qubes gui agent in some other OS, you need to turn this option
on manually. You can turn this option off to troubleshoot some early HVM OS
@ -208,7 +203,7 @@ guiagent_installed
startup).
.. note::
when Windows GUI agent is installed in the VM, SVGA device (used to
full screen video) is disabled, so even if you disable this option, you
will not get functional full desktop access (on normal VM startup). Use

View File

@ -92,7 +92,7 @@ cups
Enable CUPS service. The user can disable cups in VM which do not need
printing to speed up booting.
cron
crond
Default: disabled
Enable CRON service.

12
etc/storage.conf Normal file
View File

@ -0,0 +1,12 @@
[default] ; poolname
driver=xen ; the default xen storage
; class = qubes.storage.xen.XenStorage ; class always overwrites the driver
;
; To use our own storage adapter, you need just to specify the module path and
; class name
; [pool-b]
; class = foo.bar.MyStorage
;
; [test-dummy]
; driver=dummy

View File

@ -6,6 +6,10 @@ fi
set -e
if ! echo $PATH | grep -q sbin; then
PATH=$PATH:/sbin:/usr/sbin
fi
FILENAME=$1
ROOT_SIZE=$2
SWAP_SIZE=$[ 1024 ]
@ -22,13 +26,3 @@ fi
TOTAL_SIZE=$[ $ROOT_SIZE + $SWAP_SIZE + 512 ]
truncate -s ${TOTAL_SIZE}M "$FILENAME"
sfdisk --no-reread -u M "$FILENAME" > /dev/null 2> /dev/null <<EOF
0,${SWAP_SIZE},S
,${ROOT_SIZE},L
EOF
loopdev=`losetup -f --show --partscan "$FILENAME"`
udevadm settle
mkswap -f ${loopdev}p1 > /dev/null
losetup -d ${loopdev} || :
chown --reference `dirname "$FILENAME"` "$FILENAME"

View File

@ -8,7 +8,7 @@ xenstore-write domid 0
DOM0_MAXMEM=`/usr/sbin/xl info | grep total_memory | awk '{ print $3 }'`
xenstore-write /local/domain/0/memory/static-max $[ $DOM0_MAXMEM * 1024 ]
xl sched-credit -d 0 -w 512
xl sched-credit -d 0 -w 2000
cp /var/lib/qubes/qubes.xml /var/lib/qubes/backup/qubes-$(date +%F-%T).xml
/usr/lib/qubes/cleanup-dispvms

View File

@ -1,6 +1,6 @@
[Unit]
Description=Qubes Dom0 startup setup
After=qubes-db-dom0.service libvirtd.service
After=qubes-db-dom0.service libvirtd.service xenconsoled.service
# Cover legacy init.d script
[Service]
@ -9,6 +9,9 @@ StandardOutput=syslog
RemainAfterExit=yes
ExecStart=/usr/lib/qubes/startup-misc.sh
ExecStop=/usr/bin/qvm-shutdown -q --all --wait
# QubesDB daemons stop after 60s timeout in worst case; speed it up, since no
# VMs are running now
ExecStop=-/usr/bin/killall qubesdb-daemon
[Install]
WantedBy=multi-user.target

View File

@ -1,7 +1,7 @@
[Unit]
Description=Qubes NetVM startup
After=qubes-core.service qubes-qmemman.service libvirtd.service
Before=plymouth-quit.service
Before=systemd-user-sessions.service
[Service]
Type=oneshot

View File

@ -1,12 +1,12 @@
[Unit]
Description=Start Qubes VM %i
After=qubes-netvm.service
Before=plymouth-quit.service
Before=systemd-user-sessions.service
[Service]
Type=oneshot
Environment=DISPLAY=:0
ExecStart=/usr/bin/qvm-start --no-guid %i
ExecStart=/usr/bin/qvm-start --no-guid --skip-if-running %i
Group=qubes
RemainAfterExit=yes

View File

@ -5,7 +5,6 @@ import re
import sys
import subprocess
from qubes.qubes import QubesVmCollection,QubesException,QubesHVm
from qubes.qubes import xs
def main():
@ -15,6 +14,8 @@ def main():
print >> sys.stderr, 'This script must be called as qrexec service!'
exit(1)
prev_qrexec_installed = False
source_vm = None
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_writing()
try:
@ -66,6 +67,7 @@ def main():
else:
user = None
prev_qrexec_installed = source_vm.qrexec_installed
# Let the tools to be able to enable *or disable* each particular component
source_vm.qrexec_installed = qrexec > 0
source_vm.guiagent_installed = gui > 0
@ -75,15 +77,18 @@ def main():
qvm_collection.save()
retcode = subprocess.call(['qvm-sync-appmenus', '--force-rpc'])
if retcode == 0 and hasattr(source_vm, 'appmenus_recreate'):
# TODO: call the same for child VMs? This isn't done for Linux VMs,
# so probably should be ignored for Windows also
source_vm.appmenus_recreate()
except Exception as e:
print >> sys.stderr, e.message
exit(1)
finally:
qvm_collection.unlock_db()
if not prev_qrexec_installed and source_vm.qrexec_installed:
retcode = subprocess.call(['qvm-sync-appmenus', '--force-rpc'])
if retcode == 0 and hasattr(source_vm, 'appmenus_recreate'):
# TODO: call the same for child VMs? This isn't done for Linux VMs,
# so probably should be ignored for Windows also
source_vm.appmenus_recreate()
main()

View File

@ -44,7 +44,7 @@ def main():
source_vm = qvm_collection.get_vm_by_name(source)
if source_vm is None:
print >> sys.stderr, 'Domain ' + source + ' does not exists (?!)'
print >> sys.stderr, 'Domain ' + source + ' does not exist (?!)'
exit(1)
os.umask(0002)
@ -61,7 +61,10 @@ def main():
update_f = open(source_vm.dir_path + '/' + vm_files["updates_stat_file"], "w")
update_f.write(update_count)
update_f.close()
os.chown(source_vm.dir_path + '/' + vm_files["updates_stat_file"], -1, qubes_gid)
try:
os.chown(source_vm.dir_path + '/' + vm_files["updates_stat_file"], -1, qubes_gid)
except OSError:
pass
elif source_vm.template is not None:
# Hint about updates availability in template
# If template is running - it will notify about updates itself
@ -79,7 +82,10 @@ def main():
update_f = open(stat_file, "w")
update_f.write(update_count)
update_f.close()
os.chown(stat_file, -1, qubes_gid)
try:
os.chown(stat_file, -1, qubes_gid)
except OSError:
pass
else:
print >> sys.stderr, 'Ignoring notification of no updates'

View File

@ -1142,6 +1142,11 @@ class Qubes(PropertyHolder):
default_kernel = property('default_kernel', load_stage=3,
doc='Which kernel to use when not overriden in VM')
# TODO #1637 #892
check_updates_vm = property('check_updates_vm',
type=bool, setter=property.bool,
doc='check for updates inside qubes')
def __init__(self, store=None, load=True, **kwargs):
#: logger instance for logging global messages

View File

@ -22,6 +22,12 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
# THIS FILE SHOULD BE CONFIGURED PER PRODUCT
# or better, once first custom product arrives,
# make a real /etc/qubes/master.conf or whatever
#
'''Constants which can be configured in one place'''
qubes_base_dir = "/var/lib/qubes"
@ -69,14 +75,16 @@ defaults = {
'libvirt_uri': 'xen:///',
'memory': 400,
'kernelopts': "nopat",
'kernelopts_pcidevs': "nopat iommu=soft swiotlb=4096",
'kernelopts_pcidevs': "nopat iommu=soft swiotlb=8192",
'dom0_update_check_interval': 6*3600,
'private_img_size': 2*1024*1024*1024,
'root_img_size': 10*1024*1024*1024,
'storage_class': 'qubes.storage.xen.XenVMStorage',
'storage_class': 'qubes.storage.xen.XenStorage',
'pool_drivers': {'xen': 'qubes.storage.xen.XenPool'}
'pool_config': {'dir_path': '/var/lib/qubes'}
# how long (in sec) to wait for VMs to shutdown,
# before killing them (when used qvm-run with --wait option),

View File

@ -42,7 +42,9 @@ slow_memset_react_msg="VM didn't give back all requested memory"
class DomainState:
def __init__(self, id):
self.meminfo = None #dictionary of memory info read from client
self.memory_actual = None #the current memory size
self.memory_current = 0 # the current memory size
self.memory_actual = None # the current memory allocation (what VM
# is using or can use at any time)
self.memory_maximum = None #the maximum memory size
self.mem_used = None #used memory, computed based on meminfo
self.id = id #domain id
@ -50,6 +52,9 @@ class DomainState:
self.no_progress = False #no react to memset
self.slow_memset_react = False #slow react to memset (after few tries still above target)
def __repr__(self):
return self.__dict__.__repr__()
class SystemState(object):
def __init__(self):
self.log = logging.getLogger('qmemman.systemstate')
@ -61,30 +66,62 @@ class SystemState(object):
self.BALOON_DELAY = 0.1
self.XEN_FREE_MEM_LEFT = 50*1024*1024
self.XEN_FREE_MEM_MIN = 25*1024*1024
self.ALL_PHYS_MEM = self.xc.physinfo()['total_memory']*1024
# Overhead of per-page Xen structures, taken from OpenStack nova/virt/xenapi/driver.py
# see https://wiki.openstack.org/wiki/XenServer/Overhead
# we divide total and free physical memory by this to get "assignable" memory
self.MEM_OVERHEAD_FACTOR = 1.0 / 1.00781
self.ALL_PHYS_MEM = int(self.xc.physinfo()['total_memory']*1024 * self.MEM_OVERHEAD_FACTOR)
def add_domain(self, id):
self.log.debug('add_domain(id={!r})'.format(id))
self.domdict[id] = DomainState(id)
# TODO: move to DomainState.__init__
target_str = self.xs.read('', '/local/domain/' + id + '/memory/target')
if target_str:
self.domdict[id].last_target = int(target_str) * 1024
def del_domain(self, id):
self.log.debug('del_domain(id={!r})'.format(id))
self.domdict.pop(id)
def get_free_xen_memory(self):
return self.xc.physinfo()['free_memory']*1024
# hosts = self.xend_session.session.xenapi.host.get_all()
# host_record = self.xend_session.session.xenapi.host.get_record(hosts[0])
# host_metrics_record = self.xend_session.session.xenapi.host_metrics.get_record(host_record["metrics"])
# ret = host_metrics_record["memory_free"]
# return long(ret)
xen_free = int(self.xc.physinfo()['free_memory']*1024 *
self.MEM_OVERHEAD_FACTOR)
# now check for domains which have assigned more memory than really
# used - do not count it as "free", because domain is free to use it
# at any time
# assumption: self.refresh_memactual was called before
# (so domdict[id].memory_actual is up to date)
assigned_but_unused = reduce(
lambda acc, dom: acc + max(0, dom.last_target-dom.memory_current),
self.domdict.values(),
0
)
# If, at any time, Xen have less memory than XEN_FREE_MEM_MIN,
# it is a failure of qmemman. Collect as much data as possible to
# debug it
if xen_free < self.XEN_FREE_MEM_MIN:
self.log.error("Xen free = {!r} below acceptable value! "
"assigned_but_unused={!r}, domdict={!r}".format(
xen_free, assigned_but_unused, self.domdict))
elif xen_free < assigned_but_unused+self.XEN_FREE_MEM_MIN:
self.log.error("Xen free = {!r} too small for satisfy assignments! "
"assigned_but_unused={!r}, domdict={!r}".format(
xen_free, assigned_but_unused, self.domdict))
return xen_free - assigned_but_unused
#refresh information on memory assigned to all domains
def refresh_memactual(self):
for domain in self.xc.domain_getinfo():
id = str(domain['domid'])
if self.domdict.has_key(id):
self.domdict[id].memory_actual = domain['mem_kb']*1024
# real memory usage
self.domdict[id].memory_current = domain['mem_kb']*1024
# what VM is using or can use
self.domdict[id].memory_actual = max(
self.domdict[id].memory_current,
self.domdict[id].last_target
)
self.domdict[id].memory_maximum = self.xs.read('', '/local/domain/%s/memory/static-max' % str(id))
if self.domdict[id].memory_maximum:
self.domdict[id].memory_maximum = int(self.domdict[id].memory_maximum)*1024
@ -151,21 +188,38 @@ class SystemState(object):
#perform memory ballooning, across all domains, to add "memsize" to Xen free memory
def do_balloon(self, memsize):
self.log.info('do_balloon(memsize={!r})'.format(memsize))
MAX_TRIES = 20
CHECK_PERIOD_S = 3
CHECK_MB_S = 100
niter = 0
prev_memory_actual = None
for i in self.domdict.keys():
self.domdict[i].no_progress = False
#: number of loop iterations for CHECK_PERIOD_S seconds
check_period = max(1, int((CHECK_PERIOD_S + 0.0) / self.BALOON_DELAY))
#: number of free memory bytes expected to get during CHECK_PERIOD_S
#: seconds
check_delta = CHECK_PERIOD_S * CHECK_MB_S * 1024 * 1024
#: helper array for holding free memory size, CHECK_PERIOD_S seconds
#: ago, at every loop iteration
xenfree_ring = [0] * check_period
while True:
self.log.debug('niter={:2d}/{:2d}'.format(niter, MAX_TRIES))
self.log.debug('niter={:2d}'.format(niter))
self.refresh_memactual()
xenfree = self.get_free_xen_memory()
self.log.info('xenfree={!r}'.format(xenfree))
if xenfree >= memsize + self.XEN_FREE_MEM_MIN:
self.inhibit_balloon_up()
return True
# fail the request if over past CHECK_PERIOD_S seconds,
# we got less than CHECK_MB_S MB/s on average
ring_slot = niter % check_period
if niter >= check_period and xenfree < xenfree_ring[ring_slot] + check_delta:
return False
xenfree_ring[ring_slot] = xenfree
if prev_memory_actual is not None:
for i in prev_memory_actual.keys():
if prev_memory_actual[i] == self.domdict[i].memory_actual:
@ -174,7 +228,7 @@ class SystemState(object):
self.log.info('domain {} stuck at {}'.format(i, self.domdict[i].memory_actual))
memset_reqs = qubes.qmemman.algo.balloon(memsize + self.XEN_FREE_MEM_LEFT - xenfree, self.domdict)
self.log.info('memset_reqs={!r}'.format(memset_reqs))
if niter > MAX_TRIES or len(memset_reqs) == 0:
if len(memset_reqs) == 0:
return False
prev_memory_actual = {}
for i in memset_reqs:
@ -266,11 +320,11 @@ class SystemState(object):
self.log.debug('do_balance dom={!r} sleeping ntries={}'.format(
dom, ntries))
time.sleep(self.BALOON_DELAY)
self.refresh_memactual()
ntries -= 1
if ntries <= 0:
# Waiting haven't helped; Find which domain get stuck and
# abort balance (after distributing what we have)
self.refresh_memactual()
for rq2 in memset_reqs:
dom2, mem2 = rq2
if dom2 == dom:

5
qubes/storage/README.md Normal file
View File

@ -0,0 +1,5 @@
# WNI File storage
Before v3.1 there existed a draft wni storage. You can find it in the git
history
(it was in /core/storage directory, now gone)

View File

@ -39,14 +39,19 @@ import qubes.exc
import qubes.utils
BLKSIZE = 512
CONFIG_FILE = '/etc/qubes/storage.conf'
class VMStorage(object):
class Storage(object):
'''Class for handling VM virtual disks.
This is base class for all other implementations, mostly with Xen on Linux
in mind.
''' # pylint: disable=abstract-class-little-used
root_img = None
private_img = None
volatile_img = None
def __init__(self, vm, private_img_size=None, root_img_size=None):
#: Domain for which we manage storage
@ -66,29 +71,54 @@ class VMStorage(object):
self.drive = None
@property
def private_img(self):
'''Path to the private image'''
return self.abspath(qubes.config.vm_files['private_img'])
def get_config_params(self):
args = {}
args['rootdev'] = self.root_dev_config()
args['privatedev'] = self.private_dev_config()
args['volatiledev'] = self.volatile_dev_config()
args['otherdevs'] = self.other_dev_config()
args['kerneldir'] = self.kernels_dir
return args
@property
def root_img(self):
'''Path to the root image'''
return self.vm.template.root_img if hasattr(self.vm, 'template') \
else self.abspath(qubes.config.vm_files['root_img'])
def root_dev_config(self):
raise NotImplementedError()
def private_dev_config(self):
raise NotImplementedError()
@property
def rootcow_img(self):
'''Path to the root COW image'''
return self.abspath(qubes.config.vm_files['rootcow_img'])
def volatile_dev_config(self):
raise NotImplementedError()
def other_dev_config(self)
if self.modules_img is not None:
return self.format_disk_dev(self.modules_img, None,
self.modules_dev, rw=self.modules_img_rw)
elif self.drive is not None:
(drive_type, drive_domain, drive_path) = self.drive.split(":")
if drive_type == 'hd':
drive_type = 'disk'
@property
def volatile_img(self):
'''Path to the volatile image'''
return self.abspath(qubes.config.vm_files['volatile_img'])
rw = (drive_type == 'disk')
if drive_domain.lower() == "dom0":
drive_domain = None
return self.format_disk_dev(drive_path,
None,
self.modules_dev,
rw=rw,
type=drive_type,
domain=drive_domain)
else:
return ''
def format_disk_dev(self, path, script, vdev, rw=True, type='disk',
domain=None):
raise NotImplementedError()
@property
@ -111,7 +141,13 @@ class VMStorage(object):
Depending on domain, this may be global or inside domain's dir.
'''
return os.path.join(self.kernels_dir, 'modules.img')
modules_path = os.path.join(self.kernels_dir, 'modules.img')
if os.path.exists(modules_path):
return modules_path
else:
return None
@property
@ -142,7 +178,7 @@ class VMStorage(object):
# We prefer to use Linux's cp, because it nicely handles sparse files
try:
subprocess.check_call(['cp', source, destination])
subprocess.check_call(['cp', '--reflink=auto', source, destination])
except subprocess.CalledProcessError:
raise IOError('Error while copying {!r} to {!r}'.format(
source, destination))
@ -208,7 +244,7 @@ class VMStorage(object):
.. note::
The arguments are in different order than in :program:`cp` utility.
.. versionchange:: 3.0
.. versionchange:: 4.0
This is now dummy method that just passes everything to
:py:func:`os.rename`.
@ -256,7 +292,7 @@ class VMStorage(object):
# For StandaloneVM create it only if not already exists
# (eg after backup-restore)
if hasattr(self.vm, 'volatile_img') \
if hasattr(self, 'volatile_img') \
and not os.path.exists(self.vm.volatile_img):
self.vm.log.info(
'Creating volatile image: {0}'.format(self.volatile_img))
@ -317,13 +353,198 @@ def get_disk_usage(path):
return ret
def get_storage(vm):
'''Factory yielding storage class instances for domains.
#def get_storage(vm):
# '''Factory yielding storage class instances for domains.
#
# :raises ImportError: when storage class specified in config cannot be found
# :raises KeyError: when storage class specified in config cannot be found
# '''
# pkg, cls = qubes.config.defaults['storage_class'].strip().rsplit('.', 1)
#
# # this may raise ImportError or KeyError, that's okay
# return importlib.import_module(pkg).__dict__[cls](vm)
:raises ImportError: when storage class specified in config cannot be found
:raises KeyError: when storage class specified in config cannot be found
'''
pkg, cls = qubes.config.defaults['storage_class'].strip().rsplit('.', 1)
# this may raise ImportError or KeyError, that's okay
return importlib.import_module(pkg).__dict__[cls](vm)
def dump(o):
""" Returns a string represention of the given object
Args:
o (object): anything that response to `__module__` and `__class__`
Given the class :class:`qubes.storage.QubesVmStorage` it returns
'qubes.storage.QubesVmStorage' as string
"""
return o.__module__ + '.' + o.__class__.__name__
def load(string):
""" Given a dotted full module string representation of a class it loads it
Args:
string (str) i.e. 'qubes.storage.xen.QubesXenVmStorage'
Returns:
type
See also:
:func:`qubes.storage.dump`
"""
if not type(string) is str:
# This is a hack which allows giving a real class to a vm instead of a
# string as string_class parameter.
return string
components = string.split(".")
module_path = ".".join(components[:-1])
klass = components[-1:][0]
module = __import__(module_path, fromlist=[klass])
return getattr(module, klass)
def get_pool(name, vm):
""" Instantiates the storage for the specified vm """
config = _get_storage_config_parser()
klass = _get_pool_klass(name, config)
keys = [k for k in config.options(name) if k != 'driver' and k != 'class']
values = [config.get(name, o) for o in keys]
config_kwargs = dict(zip(keys, values))
if name == 'default':
kwargs = defaults['pool_config'].copy()
kwargs.update(keys)
else:
kwargs = config_kwargs
return klass(vm, **kwargs)
def pool_exists(name):
""" Check if the specified pool exists """
try:
_get_pool_klass(name)
return True
except StoragePoolException:
return False
def add_pool(name, **kwargs):
""" Add a storage pool to config."""
config = _get_storage_config_parser()
config.add_section(name)
for key, value in kwargs.iteritems():
config.set(name, key, value)
_write_config(config)
def remove_pool(name):
""" Remove a storage pool from config file. """
config = _get_storage_config_parser()
config.remove_section(name)
_write_config(config)
def _write_config(config):
with open(CONFIG_FILE, 'w') as configfile:
config.write(configfile)
def _get_storage_config_parser():
""" Instantiates a `ConfigParaser` for specified storage config file.
Returns:
RawConfigParser
"""
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
return config
def _get_pool_klass(name, config=None):
""" Returns the storage klass for the specified pool.
Args:
name: The pool name.
config: If ``config`` is not specified
`_get_storage_config_parser()` is called.
Returns:
type: A class inheriting from `QubesVmStorage`
"""
if config is None:
config = _get_storage_config_parser()
if not config.has_section(name):
raise StoragePoolException('Uknown storage pool ' + name)
if config.has_option(name, 'class'):
klass = load(config.get(name, 'class'))
elif config.has_option(name, 'driver'):
pool_driver = config.get(name, 'driver')
klass = defaults['pool_drivers'][pool_driver]
else:
raise StoragePoolException('Uknown storage pool driver ' + name)
return klass
class StoragePoolException(QubesException):
pass
class Pool(object):
def __init__(self, vm, dir_path):
assert vm is not None
assert dir_path is not None
self.vm = vm
self.dir_path = dir_path
self.create_dir_if_not_exists(self.dir_path)
self.vmdir = self.vmdir_path(vm, self.dir_path)
appvms_path = os.path.join(self.dir_path, 'appvms')
self.create_dir_if_not_exists(appvms_path)
servicevms_path = os.path.join(self.dir_path, 'servicevms')
self.create_dir_if_not_exists(servicevms_path)
vm_templates_path = os.path.join(self.dir_path, 'vm-templates')
self.create_dir_if_not_exists(vm_templates_path)
def vmdir_path(self, vm, pool_dir):
""" Returns the path to vmdir depending on the type of the VM.
The default QubesOS file storage saves the vm images in three
different directories depending on the ``QubesVM`` type:
* ``appvms`` for ``QubesAppVm`` or ``QubesHvm``
* ``vm-templates`` for ``QubesTemplateVm`` or ``QubesTemplateHvm``
* ``servicevms`` for any subclass of ``QubesNetVm``
Args:
vm: a QubesVM
pool_dir: the root directory of the pool
Returns:
string (str) absolute path to the directory where the vm files
are stored
"""
if vm.is_appvm():
subdir = 'appvms'
elif vm.is_template():
subdir = 'vm-templates'
elif vm.is_netvm():
subdir = 'servicevms'
elif vm.is_disposablevm():
subdir = 'appvms'
return os.path.join(pool_dir, subdir, vm.template.name + '-dvm')
else:
raise QubesException(vm.type() + ' unknown vm type')
return os.path.join(pool_dir, subdir, vm.name)
def create_dir_if_not_exists(self, path):
""" Check if a directory exists in if not create it.
This method does not create any parent directories.
"""
if not os.path.exists(path):
os.mkdir(path)

View File

@ -39,7 +39,7 @@ import qubes.storage
import qubes.vm.templatevm
class XenVMStorage(qubes.storage.VMStorage):
class XenStorage(qubes.storage.Storage):
'''Class for VM storage of Xen VMs.
'''
@ -48,10 +48,53 @@ class XenVMStorage(qubes.storage.VMStorage):
volatile_dev = 'xvdc'
modules_dev = 'xvdd'
def __init__(self, vm, vmdir, **kwargs):
""" Instantiate the storage.
Args:
vm: a QubesVM
vmdir: the root directory of the pool
"""
assert vm is not None
assert vmdir is not None
super(XenStorage, self).__init__(vm, **kwargs)
self.vmdir = vmdir
@property
def private_img(self):
'''Path to the private image'''
return self.abspath(qubes.config.vm_files['private_img'])
@property
def root_img(self):
'''Path to the root image'''
return self.vm.template.root_img if hasattr(self.vm, 'template') \
else self.abspath(qubes.config.vm_files['root_img'])
@property
def rootcow_img(self):
'''Path to the root COW image'''
if isinstance(self.vm, qubes.vm.templatevm.TemplateVM):
return self.abspath(qubes.config.vm_files['rootcow_img'])
return None
@property
def volatile_img(self):
'''Path to the volatile image'''
return self.abspath(qubes.config.vm_files['volatile_img'])
# pylint: disable=redefined-builtin
@staticmethod
def _format_disk_dev(path, vdev, script=None, rw=True, type='disk',
def format_disk_dev(path, vdev, script=None, rw=True, type='disk',
domain=None):
if path is None:
return ''
@ -76,52 +119,50 @@ class XenVMStorage(qubes.storage.VMStorage):
return lxml.etree.tostring(element)
def _get_rootdev(self):
def root_dev_config(self):
if isinstance(self.vm, qubes.vm.templatevm.TemplateVM):
return self._format_disk_dev(
'{}:{}'.format(self.root_img, self.vm.rootcow_img),
return self.format_disk_dev(
'{root}:{rootcow}'.format(
root=self.root_img,
rootcow=self.rootcow_img),
self.root_dev,
script='block-origin')
elif self.vm.hvm and hasattr(self.vm, 'template'):
# HVM template-based VM - only one device-mapper layer, in dom0
# (root+volatile)
# HVM detection based on 'kernel' property is massive hack,
# but taken from assumption that VM needs Qubes-specific kernel
# (actually initramfs) to assemble the second layer of device-mapper
return self.format_disk_dev(
'{root}:{volatile}'.format(
root=self.vm.template.storage.root_img,
volatile=self.volatile_img),
self.root_dev,
script='block-snapshot')
elif hasattr(self.vm, 'template'):
return self._format_disk_dev(
'{}:{}'.format(self.root_img, self.vm.template.rootcow_img),
# any other template-based VM - two device-mapper layers: one
# in dom0 (here) from root+root-cow, and another one from
# this+volatile.img
return self.format_disk_dev(
'{root}:{template_rootcow}'.format(
root=self.root_img,
template_rootcow=self.vm.template.storage.rootcow_img),
self.root_dev,
script='block-snapshot',
rw=False)
else:
return self._format_disk_dev(self.root_img, self.root_dev)
# standalone qube
return self.format_disk_dev(self.root_img, self.root_dev)
def get_config_params(self):
args = {}
args['rootdev'] = self._get_rootdev()
args['privatedev'] = self._format_disk_dev(self.private_img,
self.private_dev)
args['volatiledev'] = self._format_disk_dev(self.volatile_img,
self.volatile_dev)
def private_dev_config(self):
self.format_disk_dev(self.private_img, self.private_dev)
args['kerneldir'] = self.kernels_dir
if self.modules_img is not None:
args['otherdevs'] = self._format_disk_dev(self.modules_img,
self.modules_dev, rw=self.modules_img_rw)
elif self.drive is not None:
(drive_type, drive_domain, drive_path) = self.drive.split(":")
if drive_domain.lower() == "dom0":
drive_domain = None
args['otherdevs'] = self._format_disk_dev(drive_path,
self.modules_dev,
rw=(drive_type == "disk"),
type=drive_type,
domain=drive_domain)
else:
args['otherdevs'] = ''
return args
def volatile_dev_config(self):
self.format_disk_dev(self.volatile_img, self.volatile_dev)
def create_on_disk_private_img(self, source_template=None):
@ -185,3 +226,70 @@ class XenVMStorage(qubes.storage.VMStorage):
f_cow.close()
f_root.close()
os.umask(old_umask)
def reset_volatile_storage(self, source_template=None):
if source_template is None:
source_template = self.vm.template
if source_template is not None:
# template-based VM with only one device-mapper layer -
# volatile.img used as upper layer on root.img, no root-cow.img
# intermediate layer
# XXX marmarek says this is either always true or always false;
# rootcow_img got smashed in 35cb82 (#1573)
# this may have remain after HVM check
# this probably should have happen anyway
if not source_template.storage.rootcow_img:
if os.path.exists(self.volatile_img):
if self.vm.debug:
if os.path.getmtime(source_template.storage.root_img) \
> os.path.getmtime(self.volatile_img):
self.vm.log.warning(
'Template have changed, resetting root.img')
else:
self.vm.log.warning(
'Debug mode: not resetting root.img; if you'
' want to force root.img reset, either'
' update template VM, or remove volatile.img'
' file.')
return
os.remove(self.volatile_img)
# FIXME stat on f_root; with open() ...
f_volatile = open(self.volatile_img, "w")
f_root = open(source_template.storage.root_img, "r")
f_root.seek(0, os.SEEK_END)
f_volatile.truncate(f_root.tell()) # make empty sparse file of the same size as root.img
f_volatile.close()
f_root.close()
return # XXX why is that? super() does not run
super(XenStorage, self).reset_volatile_storage(
source_template=source_template)
def prepare_for_vm_startup(self):
super(XenStorage, self).prepare_for_vm_startup()
if self.drive is not None:
(drive_type, drive_domain, drive_path) = self.drive.split(":")
if drive_domain.lower() != "dom0":
# XXX "VM '{}' holding '{}' does not exists".format(
drive_vm = self.vm.app.domains[drive_domain]
if not drive_vm.is_running():
raise qubes.exc.QubesVMNotRunningError(drive_vm,
'VM {!r} holding {!r} isn\'t running'.format(
drive_domain, drive_path))
if self.rootcow_img and not os.path.exists(self.rootcow_img):
self.commit_template_changes()
class XenPool(qubes.storage.Pool):
def get_storage(self):
""" Returns an instantiated ``XenStorage``. """
return XenStorage(self.vm, vmdir=self.vmdir)

View File

@ -238,7 +238,7 @@ class QubesTestCase(unittest.TestCase):
:param xml2: second element
:type xml1: :py:class:`lxml.etree._Element`
:type xml2: :py:class:`lxml.etree._Element`
'''
''' # pylint: disable=invalid-name
self.assertEqual(xml1.tag, xml2.tag)
self.assertEqual(xml1.text, xml2.text)

View File

@ -26,6 +26,8 @@
import multiprocessing
import os
import shutil
import subprocess
import tempfile
import unittest
import time
@ -34,6 +36,8 @@ from qubes.qubes import QubesVmCollection, QubesException, system_path
import qubes
import qubes.vm.qubesvm
import qubes.tests
from qubes.qubes import QubesVmLabels
class TC_00_Basic(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
def setUp(self):
@ -81,6 +85,12 @@ class TC_01_Properties(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
newname = self.make_vm_name('newname')
self.assertEqual(self.vm.name, self.vmname)
self.vm.write_firewall_conf({'allow': False, 'allowDns': False})
self.vm.autostart = True
self.addCleanup(os.system,
'sudo systemctl -q disable qubes-vm@{}.service || :'.
format(self.vmname))
pre_rename_firewall = self.vm.get_firewall_conf()
#TODO: change to setting property when implemented
self.vm.set_name(newname)
@ -106,6 +116,16 @@ class TC_01_Properties(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
self.assertFalse(os.path.exists(
os.path.join(os.getenv("HOME"), ".local/share/applications",
self.vmname + "-firefox.desktop")))
self.assertEquals(pre_rename_firewall, self.vm.get_firewall_conf())
with self.assertNotRaises((QubesException, OSError)):
self.vm.write_firewall_conf({'allow': False})
self.assertTrue(self.vm.autostart)
self.assertTrue(os.path.exists(
'/etc/systemd/system/multi-user.target.wants/'
'qubes-vm@{}.service'.format(newname)))
self.assertFalse(os.path.exists(
'/etc/systemd/system/multi-user.target.wants/'
'qubes-vm@{}.service'.format(self.vmname)))
def test_010_netvm(self):
if self.qc.get_default_netvm() is None:
@ -166,6 +186,664 @@ class TC_01_Properties(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
with self.assertRaises(ValueError):
self.vm.dispvm_netvm = self.vm
def test_030_clone(self):
testvm1 = self.qc.add_new_vm(
"QubesAppVm",
name=self.make_vm_name("vm"),
template=self.qc.get_default_template())
testvm1.create_on_disk(verbose=False)
testvm2 = self.qc.add_new_vm(testvm1.__class__.__name__,
name=self.make_vm_name("clone"),
template=testvm1.template,
)
testvm2.clone_attrs(src_vm=testvm1)
testvm2.clone_disk_files(src_vm=testvm1, verbose=False)
# qubes.xml reload
self.save_and_reload_db()
testvm1 = self.qc[testvm1.qid]
testvm2 = self.qc[testvm2.qid]
self.assertEquals(testvm1.label, testvm2.label)
self.assertEquals(testvm1.netvm, testvm2.netvm)
self.assertEquals(testvm1.uses_default_netvm,
testvm2.uses_default_netvm)
self.assertEquals(testvm1.kernel, testvm2.kernel)
self.assertEquals(testvm1.kernelopts, testvm2.kernelopts)
self.assertEquals(testvm1.uses_default_kernel,
testvm2.uses_default_kernel)
self.assertEquals(testvm1.uses_default_kernelopts,
testvm2.uses_default_kernelopts)
self.assertEquals(testvm1.memory, testvm2.memory)
self.assertEquals(testvm1.maxmem, testvm2.maxmem)
self.assertEquals(testvm1.pcidevs, testvm2.pcidevs)
self.assertEquals(testvm1.include_in_backups,
testvm2.include_in_backups)
self.assertEquals(testvm1.default_user, testvm2.default_user)
self.assertEquals(testvm1.services, testvm2.services)
self.assertEquals(testvm1.get_firewall_conf(),
testvm2.get_firewall_conf())
# now some non-default values
testvm1.netvm = None
testvm1.uses_default_netvm = False
testvm1.label = QubesVmLabels['orange']
testvm1.memory = 512
firewall = testvm1.get_firewall_conf()
firewall['allowDns'] = False
firewall['allowYumProxy'] = False
firewall['rules'] = [{'address': '1.2.3.4',
'netmask': 24,
'proto': 'tcp',
'portBegin': 22,
'portEnd': 22,
}]
testvm1.write_firewall_conf(firewall)
testvm3 = self.qc.add_new_vm(testvm1.__class__.__name__,
name=self.make_vm_name("clone2"),
template=testvm1.template,
)
testvm3.clone_attrs(src_vm=testvm1)
testvm3.clone_disk_files(src_vm=testvm1, verbose=False)
# qubes.xml reload
self.save_and_reload_db()
testvm1 = self.qc[testvm1.qid]
testvm3 = self.qc[testvm3.qid]
self.assertEquals(testvm1.label, testvm3.label)
self.assertEquals(testvm1.netvm, testvm3.netvm)
self.assertEquals(testvm1.uses_default_netvm,
testvm3.uses_default_netvm)
self.assertEquals(testvm1.kernel, testvm3.kernel)
self.assertEquals(testvm1.kernelopts, testvm3.kernelopts)
self.assertEquals(testvm1.uses_default_kernel,
testvm3.uses_default_kernel)
self.assertEquals(testvm1.uses_default_kernelopts,
testvm3.uses_default_kernelopts)
self.assertEquals(testvm1.memory, testvm3.memory)
self.assertEquals(testvm1.maxmem, testvm3.maxmem)
self.assertEquals(testvm1.pcidevs, testvm3.pcidevs)
self.assertEquals(testvm1.include_in_backups,
testvm3.include_in_backups)
self.assertEquals(testvm1.default_user, testvm3.default_user)
self.assertEquals(testvm1.services, testvm3.services)
self.assertEquals(testvm1.get_firewall_conf(),
testvm3.get_firewall_conf())
class TC_02_QvmPrefs(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
def setup_appvm(self):
self.testvm = self.qc.add_new_vm(
"QubesAppVm",
name=self.make_vm_name("vm"),
template=self.qc.get_default_template())
self.testvm.create_on_disk(verbose=False)
self.save_and_reload_db()
self.qc.unlock_db()
def setup_hvm(self):
self.testvm = self.qc.add_new_vm(
"QubesHVm",
name=self.make_vm_name("hvm"))
self.testvm.create_on_disk(verbose=False)
self.save_and_reload_db()
self.qc.unlock_db()
def pref_set(self, name, value, valid=True):
p = subprocess.Popen(
['qvm-prefs', '-s', '--', self.testvm.name, name, value],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(stdout, stderr) = p.communicate()
if valid:
self.assertEquals(p.returncode, 0,
"qvm-prefs -s .. '{}' '{}' failed: {}{}".format(
name, value, stdout, stderr
))
else:
self.assertNotEquals(p.returncode, 0,
"qvm-prefs should reject value '{}' for "
"property '{}'".format(value, name))
def pref_get(self, name):
p = subprocess.Popen(['qvm-prefs', '-g', self.testvm.name, name],
stdout=subprocess.PIPE)
(stdout, _) = p.communicate()
self.assertEquals(p.returncode, 0)
return stdout.strip()
bool_test_values = [
('true', 'True', True),
('False', 'False', True),
('0', 'False', True),
('1', 'True', True),
('invalid', '', False)
]
def execute_tests(self, name, values):
"""
Helper function, which executes tests for given property.
:param values: list of tuples (value, expected, valid),
where 'value' is what should be set and 'expected' is what should
qvm-prefs returns as a property value and 'valid' marks valid and
invalid values - if it's False, qvm-prefs should reject the value
:return: None
"""
for (value, expected, valid) in values:
self.pref_set(name, value, valid)
if valid:
self.assertEquals(self.pref_get(name), expected)
def test_000_kernel(self):
self.setup_appvm()
default_kernel = self.qc.get_default_kernel()
self.execute_tests('kernel', [
('default', default_kernel, True),
(default_kernel, default_kernel, True),
('invalid', '', False),
])
def test_001_include_in_backups(self):
self.setup_appvm()
self.execute_tests('include_in_backups', self.bool_test_values)
def test_002_qrexec_timeout(self):
self.setup_appvm()
self.execute_tests('qrexec_timeout', [
('60', '60', True),
('0', '0', True),
('-10', '', False),
('invalid', '', False)
])
def test_003_internal(self):
self.setup_appvm()
self.execute_tests('include_in_backups', self.bool_test_values)
def test_004_label(self):
self.setup_appvm()
self.execute_tests('label', [
('red', 'red', True),
('blue', 'blue', True),
('amber', '', False),
])
def test_005_kernelopts(self):
self.setup_appvm()
self.execute_tests('kernelopts', [
('option', 'option', True),
('default', 'nopat', True),
('', '', True),
])
def test_006_template(self):
templates = [tpl for tpl in self.qc.values() if tpl.is_template()]
if not templates:
self.skip("No templates installed")
some_template = templates[0].name
self.setup_appvm()
self.execute_tests('template', [
(some_template, some_template, True),
('invalid', '', False),
])
def test_007_memory(self):
self.setup_appvm()
qh = qubes.qubes.QubesHost()
memory_total = qh.memory_total
self.execute_tests('memory', [
('300', '300', True),
('1500', '1500', True),
# TODO:
#('500M', '500', True),
#(str(self.testvm.maxmem+500), '', False),
(str(2*memory_total), '', False),
])
def test_008_maxmem(self):
self.setup_appvm()
qh = qubes.qubes.QubesHost()
memory_total = qh.memory_total
self.execute_tests('memory', [
('300', '300', True),
('1500', '1500', True),
# TODO:
#('500M', '500', True),
#(str(self.testvm.memory-50), '', False),
(str(2*memory_total), '', False),
])
def test_009_autostart(self):
self.setup_appvm()
self.execute_tests('autostart', self.bool_test_values)
def test_010_pci_strictreset(self):
self.setup_appvm()
self.execute_tests('pci_strictreset', self.bool_test_values)
def test_011_dispvm_netvm(self):
self.setup_appvm()
default_netvm = self.qc.get_default_netvm().name
netvms = [tpl for tpl in self.qc.values() if tpl.is_netvm()]
if not netvms:
self.skip("No netvms installed")
some_netvm = netvms[0].name
if some_netvm == default_netvm:
if len(netvms) <= 1:
self.skip("At least two NetVM/ProxyVM required")
some_netvm = netvms[1].name
self.execute_tests('dispvm_netvm', [
(some_netvm, some_netvm, True),
(default_netvm, default_netvm, True),
('default', default_netvm, True),
('none', '', True),
(self.testvm.name, '', False),
('invalid', '', False)
])
def test_012_mac(self):
self.setup_appvm()
default_mac = self.testvm.mac
self.execute_tests('mac', [
('00:11:22:33:44:55', '00:11:22:33:44:55', True),
('auto', default_mac, True),
# TODO:
#('00:11:22:33:44:55:66', '', False),
('invalid', '', False),
])
def test_013_default_user(self):
self.setup_appvm()
self.execute_tests('default_user', [
('someuser', self.testvm.template.default_user, True)
# TODO: tests for standalone VMs
])
def test_014_pcidevs(self):
self.setup_appvm()
self.execute_tests('pcidevs', [
('[]', '[]', True),
('[ "00:00.0" ]', "['00:00.0']", True),
('invalid', '', False),
('[invalid]', '', False),
# TODO:
#('["12:12.0"]', '', False)
])
def test_015_name(self):
self.setup_appvm()
self.execute_tests('name', [
('invalid!@#name', '', False),
# TODO: duplicate name test - would fail for now...
])
newname = self.make_vm_name('newname')
self.pref_set('name', newname, True)
self.qc.lock_db_for_reading()
self.qc.load()
self.qc.unlock_db()
self.testvm = self.qc.get_vm_by_name(newname)
self.assertEquals(self.pref_get('name'), newname)
def test_016_vcpus(self):
self.setup_appvm()
self.execute_tests('vcpus', [
('1', '1', True),
('100', '', False),
('-1', '', False),
('invalid', '', False),
])
def test_017_debug(self):
self.setup_appvm()
self.execute_tests('debug', [
('on', 'True', True),
('off', 'False', True),
('true', 'True', True),
('0', 'False', True),
('invalid', '', False)
])
def test_018_netvm(self):
self.setup_appvm()
default_netvm = self.qc.get_default_netvm().name
netvms = [tpl for tpl in self.qc.values() if tpl.is_netvm()]
if not netvms:
self.skip("No netvms installed")
some_netvm = netvms[0].name
if some_netvm == default_netvm:
if len(netvms) <= 1:
self.skip("At least two NetVM/ProxyVM required")
some_netvm = netvms[1].name
self.execute_tests('netvm', [
(some_netvm, some_netvm, True),
(default_netvm, default_netvm, True),
('default', default_netvm, True),
('none', '', True),
(self.testvm.name, '', False),
('invalid', '', False)
])
def test_019_guiagent_installed(self):
self.setup_hvm()
self.execute_tests('guiagent_installed', self.bool_test_values)
def test_020_qrexec_installed(self):
self.setup_hvm()
self.execute_tests('qrexec_installed', self.bool_test_values)
def test_021_seamless_gui_mode(self):
self.setup_hvm()
# should reject seamless mode without gui agent
self.execute_tests('seamless_gui_mode', [
('True', '', False),
('False', 'False', True),
])
self.execute_tests('guiagent_installed', [('True', 'True', True)])
self.execute_tests('seamless_gui_mode', self.bool_test_values)
def test_022_drive(self):
self.setup_hvm()
self.execute_tests('drive', [
('hd:dom0:/tmp/drive.img', 'hd:dom0:/tmp/drive.img', True),
('hd:/tmp/drive.img', 'hd:dom0:/tmp/drive.img', True),
('cdrom:dom0:/tmp/drive.img', 'cdrom:dom0:/tmp/drive.img', True),
('cdrom:/tmp/drive.img', 'cdrom:dom0:/tmp/drive.img', True),
('/tmp/drive.img', 'cdrom:dom0:/tmp/drive.img', True),
('hd:drive.img', '', False),
('drive.img', '', False),
])
def test_023_timezone(self):
self.setup_hvm()
self.execute_tests('timezone', [
('localtime', 'localtime', True),
('0', '0', True),
('3600', '3600', True),
('-7200', '-7200', True),
('invalid', '', False),
])
def test_024_pv_reject_hvm_props(self):
self.setup_appvm()
self.execute_tests('guiagent_installed', [('False', '', False)])
self.execute_tests('qrexec_installed', [('False', '', False)])
self.execute_tests('drive', [('/tmp/drive.img', '', False)])
self.execute_tests('timezone', [('localtime', '', False)])
def test_025_hvm_reject_pv_props(self):
self.setup_hvm()
self.execute_tests('kernel', [('default', '', False)])
self.execute_tests('kernelopts', [('default', '', False)])
class TC_03_QvmRevertTemplateChanges(qubes.tests.SystemTestsMixin,
qubes.tests.QubesTestCase):
def setup_pv_template(self):
self.test_template = self.qc.add_new_vm(
"QubesTemplateVm",
name=self.make_vm_name("pv-clone"),
)
self.test_template.clone_attrs(src_vm=self.qc.get_default_template())
self.test_template.clone_disk_files(
src_vm=self.qc.get_default_template(),
verbose=False)
self.save_and_reload_db()
self.qc.unlock_db()
def setup_hvm_template(self):
self.test_template = self.qc.add_new_vm(
"QubesTemplateHVm",
name=self.make_vm_name("hvm"),
)
self.test_template.create_on_disk(verbose=False)
self.save_and_reload_db()
self.qc.unlock_db()
def get_rootimg_checksum(self):
p = subprocess.Popen(['sha1sum', self.test_template.root_img],
stdout=subprocess.PIPE)
return p.communicate()[0]
def _do_test(self):
checksum_before = self.get_rootimg_checksum()
self.test_template.start(verbose=False)
self.shutdown_and_wait(self.test_template)
checksum_changed = self.get_rootimg_checksum()
if checksum_before == checksum_changed:
self.log.warning("template not modified, test result will be "
"unreliable")
with self.assertNotRaises(subprocess.CalledProcessError):
subprocess.check_call(['sudo', 'qvm-revert-template-changes',
'--force', self.test_template.name])
checksum_after = self.get_rootimg_checksum()
self.assertEquals(checksum_before, checksum_after)
def test_000_revert_pv(self):
"""
Test qvm-revert-template-changes for PV template
"""
self.setup_pv_template()
self._do_test()
def test_000_revert_hvm(self):
"""
Test qvm-revert-template-changes for HVM template
"""
# TODO: have some system there, so the root.img will get modified
self.setup_hvm_template()
self._do_test()
class TC_04_DispVM(qubes.tests.SystemTestsMixin,
qubes.tests.QubesTestCase):
@staticmethod
def get_dispvm_template_name():
vmdir = os.readlink('/var/lib/qubes/dvmdata/vmdir')
return os.path.basename(vmdir)
def test_000_firewall_propagation(self):
"""
Check firewall propagation VM->DispVM, when VM have some firewall rules
"""
# FIXME: currently qubes.xml doesn't contain this information...
dispvm_template_name = self.get_dispvm_template_name()
dispvm_template = self.qc.get_vm_by_name(dispvm_template_name)
testvm1 = self.qc.add_new_vm("QubesAppVm",
name=self.make_vm_name('vm1'),
template=self.qc.get_default_template())
testvm1.create_on_disk(verbose=False)
firewall = testvm1.get_firewall_conf()
firewall['allowDns'] = False
firewall['allowYumProxy'] = False
firewall['rules'] = [{'address': '1.2.3.4',
'netmask': 24,
'proto': 'tcp',
'portBegin': 22,
'portEnd': 22,
}]
testvm1.write_firewall_conf(firewall)
self.qc.save()
self.qc.unlock_db()
testvm1.start()
p = testvm1.run("qvm-run --dispvm 'qubesdb-read /name; echo ERROR;"
" read x'",
passio_popen=True)
dispvm_name = p.stdout.readline().strip()
self.qc.lock_db_for_reading()
self.qc.load()
self.qc.unlock_db()
dispvm = self.qc.get_vm_by_name(dispvm_name)
self.assertIsNotNone(dispvm, "DispVM {} not found in qubes.xml".format(
dispvm_name))
# check if firewall was propagated to the DispVM
self.assertEquals(testvm1.get_firewall_conf(),
dispvm.get_firewall_conf())
# and only there (#1608)
self.assertNotEquals(dispvm_template.get_firewall_conf(),
dispvm.get_firewall_conf())
# then modify some rule
firewall = dispvm.get_firewall_conf()
firewall['rules'] = [{'address': '4.3.2.1',
'netmask': 24,
'proto': 'tcp',
'portBegin': 22,
'portEnd': 22,
}]
dispvm.write_firewall_conf(firewall)
# and check again if wasn't saved anywhere else (#1608)
self.assertNotEquals(dispvm_template.get_firewall_conf(),
dispvm.get_firewall_conf())
self.assertNotEquals(testvm1.get_firewall_conf(),
dispvm.get_firewall_conf())
p.stdin.write('\n')
p.wait()
def test_001_firewall_propagation(self):
"""
Check firewall propagation VM->DispVM, when VM have no firewall rules
"""
testvm1 = self.qc.add_new_vm("QubesAppVm",
name=self.make_vm_name('vm1'),
template=self.qc.get_default_template())
testvm1.create_on_disk(verbose=False)
self.qc.save()
self.qc.unlock_db()
# FIXME: currently qubes.xml doesn't contain this information...
dispvm_template_name = self.get_dispvm_template_name()
dispvm_template = self.qc.get_vm_by_name(dispvm_template_name)
original_firewall = None
if os.path.exists(dispvm_template.firewall_conf):
original_firewall = tempfile.TemporaryFile()
with open(dispvm_template.firewall_conf) as f:
original_firewall.write(f.read())
try:
firewall = dispvm_template.get_firewall_conf()
firewall['allowDns'] = False
firewall['allowYumProxy'] = False
firewall['rules'] = [{'address': '1.2.3.4',
'netmask': 24,
'proto': 'tcp',
'portBegin': 22,
'portEnd': 22,
}]
dispvm_template.write_firewall_conf(firewall)
testvm1.start()
p = testvm1.run("qvm-run --dispvm 'qubesdb-read /name; echo ERROR;"
" read x'",
passio_popen=True)
dispvm_name = p.stdout.readline().strip()
self.qc.lock_db_for_reading()
self.qc.load()
self.qc.unlock_db()
dispvm = self.qc.get_vm_by_name(dispvm_name)
self.assertIsNotNone(dispvm, "DispVM {} not found in qubes.xml".format(
dispvm_name))
# check if firewall was propagated to the DispVM from the right VM
self.assertEquals(testvm1.get_firewall_conf(),
dispvm.get_firewall_conf())
# and only there (#1608)
self.assertNotEquals(dispvm_template.get_firewall_conf(),
dispvm.get_firewall_conf())
# then modify some rule
firewall = dispvm.get_firewall_conf()
firewall['rules'] = [{'address': '4.3.2.1',
'netmask': 24,
'proto': 'tcp',
'portBegin': 22,
'portEnd': 22,
}]
dispvm.write_firewall_conf(firewall)
# and check again if wasn't saved anywhere else (#1608)
self.assertNotEquals(dispvm_template.get_firewall_conf(),
dispvm.get_firewall_conf())
self.assertNotEquals(testvm1.get_firewall_conf(),
dispvm.get_firewall_conf())
p.stdin.write('\n')
p.wait()
finally:
if original_firewall:
original_firewall.seek(0)
with open(dispvm_template.firewall_conf, 'w') as f:
f.write(original_firewall.read())
original_firewall.close()
else:
os.unlink(dispvm_template.firewall_conf)
def test_002_cleanup(self):
self.qc.unlock_db()
p = subprocess.Popen(['/usr/lib/qubes/qfile-daemon-dvm',
'qubes.VMShell', 'dom0', 'DEFAULT'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'))
(stdout, _) = p.communicate(input="echo test; qubesdb-read /name; "
"echo ERROR\n")
self.assertEquals(p.returncode, 0)
lines = stdout.splitlines()
self.assertEqual(lines[0], "test")
dispvm_name = lines[1]
self.qc.lock_db_for_reading()
self.qc.load()
self.qc.unlock_db()
dispvm = self.qc.get_vm_by_name(dispvm_name)
self.assertIsNone(dispvm, "DispVM {} still exists in qubes.xml".format(
dispvm_name))
def test_003_cleanup_destroyed(self):
"""
Check if DispVM is properly removed even if it terminated itself (#1660)
:return:
"""
self.qc.unlock_db()
p = subprocess.Popen(['/usr/lib/qubes/qfile-daemon-dvm',
'qubes.VMShell', 'dom0', 'DEFAULT'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'))
p.stdin.write("qubesdb-read /name\n")
p.stdin.write("echo ERROR\n")
p.stdin.write("poweroff\n")
# do not close p.stdin on purpose - wait to automatic disconnect when
# domain is destroyed
timeout = 30
while timeout > 0:
if p.poll():
break
time.sleep(1)
timeout -= 1
# includes check for None - timeout
self.assertEquals(p.returncode, 0)
lines = p.stdout.read().splitlines()
dispvm_name = lines[0]
self.assertNotEquals(dispvm_name, "ERROR")
self.qc.lock_db_for_reading()
self.qc.load()
self.qc.unlock_db()
dispvm = self.qc.get_vm_by_name(dispvm_name)
self.assertIsNone(dispvm, "DispVM {} still exists in qubes.xml".format(
dispvm_name))
# vim: ts=4 sw=4 et

View File

@ -42,6 +42,15 @@ LOG_PATH = '/var/log/qubes/qmemman.log'
system_state = qubes.qmemman.SystemState()
global_lock = thread.allocate_lock()
# If XS_Watcher will
# handle meminfo event before @introduceDomain, it will use
# incomplete domain list for that and may redistribute memory
# allocated to some VM, but not yet used (see #1389).
# To fix that, system_state should be updated (refresh domain
# list) before processing other changes, every time some process requested
# memory for a new VM, before releasing the lock. Then XS_Watcher will check
# this flag before processing other event.
force_refresh_domain_list = False
def only_in_first_list(l1, l2):
ret = []
@ -65,41 +74,65 @@ class XS_Watcher(object):
self.log.debug('XS_Watcher()')
self.handle = xen.lowlevel.xs.xs()
self.handle.watch('@introduceDomain', WatchType(XS_Watcher.domain_list_changed, None))
self.handle.watch('@releaseDomain', WatchType(XS_Watcher.domain_list_changed, None))
self.handle.watch('@introduceDomain', WatchType(
XS_Watcher.domain_list_changed, False))
self.handle.watch('@releaseDomain', WatchType(
XS_Watcher.domain_list_changed, False))
self.watch_token_dict = {}
def domain_list_changed(self, refresh_only=False):
"""
Check if any domain was created/destroyed. If it was, update
appropriate list. Then redistribute memory.
def domain_list_changed(self, param):
self.log.debug('domain_list_changed(param={!r})'.format(param))
:param refresh_only If True, only refresh domain list, do not
redistribute memory. In this mode, caller must already hold
global_lock.
"""
self.log.debug('domain_list_changed(only_refresh={!r})'.format(
refresh_only))
curr = self.handle.ls('', '/local/domain')
self.log.debug('curr={!r}'.format(curr))
got_lock = False
if not refresh_only:
self.log.debug('acquiring global_lock')
global_lock.acquire()
got_lock = True
self.log.debug('global_lock acquired')
try:
curr = self.handle.ls('', '/local/domain')
if curr is None:
return
if curr == None:
return
# check if domain is really there, it may happen that some empty
# directories are left in xenstore
curr = filter(
lambda x:
self.handle.read('',
'/local/domain/{}/domid'.format(x)
) is not None,
curr
)
self.log.debug('curr={!r}'.format(curr))
self.log.debug('acquiring global_lock')
global_lock.acquire()
self.log.debug('global_lock acquired')
for i in only_in_first_list(curr, self.watch_token_dict.keys()):
# new domain has been created
watch = WatchType(XS_Watcher.meminfo_changed, i)
self.watch_token_dict[i] = watch
self.handle.watch(get_domain_meminfo_key(i), watch)
system_state.add_domain(i)
for i in only_in_first_list(curr, self.watch_token_dict.keys()):
#new domain has been created
watch = WatchType(XS_Watcher.meminfo_changed, i)
self.watch_token_dict[i] = watch
self.handle.watch(get_domain_meminfo_key(i), watch)
system_state.add_domain(i)
for i in only_in_first_list(self.watch_token_dict.keys(), curr):
# domain destroyed
self.handle.unwatch(get_domain_meminfo_key(i), self.watch_token_dict[i])
self.watch_token_dict.pop(i)
system_state.del_domain(i)
finally:
if got_lock:
global_lock.release()
self.log.debug('global_lock released')
for i in only_in_first_list(self.watch_token_dict.keys(), curr):
#domain destroyed
self.handle.unwatch(get_domain_meminfo_key(i), self.watch_token_dict[i])
self.watch_token_dict.pop(i)
system_state.del_domain(i)
global_lock.release()
self.log.debug('global_lock released')
system_state.do_balance()
if not refresh_only:
system_state.do_balance()
def meminfo_changed(self, domain_id):
@ -112,6 +145,8 @@ class XS_Watcher(object):
self.log.debug('acquiring global_lock')
global_lock.acquire()
self.log.debug('global_lock acquired')
if force_refresh_domain_list:
self.domain_list_changed(refresh_only=True)
system_state.refresh_meminfo(domain_id, untrusted_meminfo_key)
@ -148,6 +183,8 @@ class QMemmanReqHandler(SocketServer.BaseRequestHandler):
if len(self.data) == 0:
self.log.info('EOF')
if got_lock:
global force_refresh_domain_list
force_refresh_domain_list = True
global_lock.release()
self.log.debug('global_lock released')
return

View File

@ -46,6 +46,10 @@ parser.add_argument('--property', '--prop', '-p',
action=qubes.tools.PropertyAction,
help='set domain\'s property, like "internal", "memory" or "vcpus"')
parser.add_argument('--pool-name', '--pool', '-P',
action=qubes.tools.SinglePropertyAction,
help='specify the storage pool to use')
parser.add_argument('--template', '-t',
action=qubes.tools.SinglePropertyAction,
help='specify the TemplateVM to use')

View File

@ -89,6 +89,10 @@ parser.add_argument('--no-start-guid',
action='store_false', dest='start_guid', default=True,
help='do not start the gui daemon (ignored)')
parser.add_argument('--skip-if-running',
action='store_true', default=False,
help='Do not fail if the qube is already runnning')
#parser.add_option ("--tray", action="store_true", dest="tray", default=False,
# help="Use tray notifications instead of stdout" )
@ -108,6 +112,9 @@ def main(args=None):
vm = args.vm
if args.skip_if_running and vm.is_running():
return
if args.drive is not None:
if 'drive' not in (prop.__name__ for prop in vm.property_list()):
parser.error(

View File

@ -57,8 +57,8 @@ def get_timezone():
return None
if tz_info.st_nlink > 1:
p = subprocess.Popen(['find', '/usr/share/zoneinfo',
'-inum', str(tz_info.st_ino)],
stdout=subprocess.PIPE)
'-inum', str(tz_info.st_ino), '-print', '-quit'],
stdout=subprocess.PIPE)
tz_path = p.communicate()[0].strip()
return tz_path.replace('/usr/share/zoneinfo/', '')
return None
@ -104,3 +104,9 @@ def parse_size(size):
return int(size)*multiplier
raise qubes.exc.QubesException("Invalid size: {0}.".format(size))
def urandom(size):
rand = os.urandom(size)
if rand is None:
raise IOError('failed to read urandom')
return hashlib.sha512(rand).digest()

View File

@ -341,8 +341,10 @@ class BaseVM(qubes.PropertyHolder):
args['netmask'] = self.netmask
args['netdev'] = lxml.etree.tostring(
self.lvxml_net_dev(self.ip, self.mac, self.netvm))
args['disable_network1'] = ''
args['disable_network2'] = ''
args['network_begin'] = ''
args['network_end'] = ''
args['no_network_begin'] = '<!--'
args['no_network_end'] = '-->'
else:
args['ip'] = ''
args['mac'] = ''
@ -351,8 +353,10 @@ class BaseVM(qubes.PropertyHolder):
args['dns2'] = ''
args['netmask'] = ''
args['netdev'] = ''
args['disable_network1'] = '<!--'
args['disable_network2'] = '-->'
args['network_begin'] = '<!--'
args['network_end'] = '-->'
args['no_network_begin'] = ''
args['no_network_end'] = ''
args.update(self.storage.get_config_params())

View File

@ -254,6 +254,7 @@ class NetVMMixin(object):
@qubes.events.handler('property-set:netvm')
def on_property_set_netvm(self, event, name, new_netvm, old_netvm=None):
# pylint: disable=unused-argument
# TODO offline_mode
if self.is_running() and new_netvm is not None \
and not new_netvm.is_running():
raise qubes.exc.QubesVMNotStartedError(new_netvm,

View File

@ -75,6 +75,9 @@ def _setter_name(self, prop, value):
if len(value) > 31:
raise ValueError('{} value must be shorter than 32 characters'.format(
prop.__name__))
# this regexp does not contain '+'; if it had it, we should specifically
# disallow 'lost+found' #1440
if re.match(r"^[a-zA-Z][a-zA-Z0-9_-]*$", value) is None:
raise ValueError('{} value contains illegal characters'.format(
prop.__name__))
@ -101,7 +104,7 @@ def _setter_kernel(self, prop, value):
if not os.path.exists(dirname):
raise qubes.exc.QubesPropertyValueError(self, prop, value,
'Kernel {!r} not installed'.format(value))
for filename in ('vmlinuz', 'modules.img'):
for filename in ('vmlinuz', 'initramfs'):
if not os.path.exists(os.path.join(dirname, filename)):
raise qubes.exc.QubesPropertyValueError(
'Kernel {!r} not properly installed: missing {!r} file'.format(
@ -119,10 +122,6 @@ def _setter_label(self, prop, value):
return self.app.get_label(value)
def _default_conf_file(self, name=None):
return (name or self.name) + '.conf'
class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
'''Base functionality of Qubes VM shared between all VMs.'''
@ -162,11 +161,6 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
ls_width=36,
doc='UUID from libvirt.')
conf_file = qubes.property('conf_file', type=str,
default=_default_conf_file,
saver=(lambda self, prop, value: self.relative_path(value)),
doc='XXX libvirt config file?')
# XXX this should be part of qubes.xml
firewall_conf = qubes.property('firewall_conf', type=str,
default='firewall.xml')
@ -196,6 +190,13 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
ls_width=2,
doc='FIXME')
pool_name = qubes.property('pool_name',
default='default',
doc='storage pool for this qube devices')
dir_path = property((lambda self: self.storage.vmdir),
doc='Root directory for files related to this domain')
# XXX swallowed uses_default_kernel
# XXX not applicable to HVM?
kernel = qubes.property('kernel', type=str,
@ -209,6 +210,7 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
kernelopts = qubes.property('kernelopts', type=str, load_stage=4,
default=(lambda self: qubes.config.defaults['kernelopts_pcidevs'] \
if len(self.devices['pci']) > 0 \
else self.template.kernelopts if hasattr(self, 'template') \
else qubes.config.defaults['kernelopts']),
ls_width=30,
doc='Kernel command line passed to domain.')
@ -291,7 +293,15 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
if self.libvirt_domain is None:
return -1
return self.libvirt_domain.ID()
try:
return self.libvirt_domain.ID()
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return -1
else:
self.log.exception('libvirt error code: {!r}'.format(
e.get_error_code()))
raise
@property
@ -327,16 +337,6 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
return self._qdb_connection
def _get_dir_path(self, name=None):
return os.path.join(
qubes.config.system_path['qubes_base_dir'],
self.dir_path_prefix,
name if name is not None else self.name)
dir_path = property(_get_dir_path,
doc='Root directory for files related to this domain')
# XXX this should go to to AppVM?
@property
def private_img(self):
@ -367,14 +367,13 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
@property
def uses_custom_config(self):
'''True if this machine has config in non-standard place.'''
return not self.property_is_default('conf_file')
# return self.conf_file != self.storage.abspath(self.name + '.conf')
def icon_path(self):
return os.path.join(self.dir_path, 'icon.png')
@property
def icon_path(self):
return os.path.join(self.dir_path, "icon.png")
def conf_file(self):
return os.path.join(self.dir_path, self.name + '.conf')
# XXX I don't know what to do with these; probably should be isinstance(...)
@ -444,8 +443,15 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
# Always set if meminfo-writer should be active or not
self.services['meminfo-writer'] = True
if xml is None:
# new qube, disable updates check if requested for new qubes
# TODO: when features (#1637) are done, migrate to plugin
if not self.app.check_updates_vm:
self.services['qubes-update-check'] = False
# Initialize VM image storage class
self.storage = qubes.storage.get_storage(self)
self.storage = qubes.storage.get_pool(
self.pool_name, self).get_storage(self)
# fire hooks
if xml is None:
@ -483,11 +489,16 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
@qubes.events.handler('property-pre-set:name')
def on_property_pre_set_name(self, event, name, newvalue, oldvalue=None):
# pylint: disable=unused-argument
# TODO not self.is_stopped() would be more appropriate
if self.is_running():
raise qubes.exc.QubesVMNotHaltedError(
'Cannot change name of running domain {!r}'.format(oldvalue))
if self.autostart:
subprocess.check_call(['sudo', 'systemctl', '-q', 'disable',
'qubes-vm@{}.service'.format(oldvalue)])
@qubes.events.handler('property-set:name')
def on_property_set_name(self, event, name, new_name, old_name=None):
@ -517,6 +528,9 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
self._update_libvirt_domain()
if self.autostart:
self.autostart = self.autostart
@qubes.events.handler('property-pre-set:autostart')
def on_property_pre_set_autostart(self, event, prop, name, value,
@ -607,71 +621,72 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
self.storage.prepare_for_vm_startup()
self._update_libvirt_domain()
if mem_required is None:
mem_required = int(self.memory) * 1024 * 1024
if qmemman_present:
qmemman_client = qubes.qmemman.client.QMemmanClient()
try:
got_memory = qmemman_client.request_memory(mem_required)
except IOError as e:
raise IOError('Failed to connect to qmemman: {!s}'.format(e))
if not got_memory:
qmemman_client.close()
raise qubes.exc.QubesMemoryError(self)
qmemman_client = self.request_memory(mem_required)
# Bind pci devices to pciback driver
for pci in self.devices['pci']:
node = self.app.vmm.libvirt_conn.nodeDeviceLookupByName(
'pci_0000_' + pci.replace(':', '_').replace('.', '_'))
try:
node = self.app.vmm.libvirt_conn.nodeDeviceLookupByName(
'pci_0000_' + pci.replace(':', '_').replace('.', '_'))
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_NODE_DEVICE:
raise qubes.exc.QubesException(
'PCI device {!r} does not exist (domain {!r})'.format(
pci, self.name))
try:
node.dettach()
except libvirt.libvirtError:
if self.app.vmm.libvirt_conn.virConnGetLastError()[0] == \
libvirt.VIR_ERR_INTERNAL_ERROR:
# already detached
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR:
# allreaddy dettached
pass
else:
raise
self.libvirt_domain.createWithFlags(libvirt.VIR_DOMAIN_START_PAUSED)
if preparing_dvm:
self.services['qubes-dvm'] = True
try:
if preparing_dvm:
self.services['qubes-dvm'] = True
self.log.info('Setting Qubes DB info for the VM')
self.start_qubesdb()
self.create_qdb_entries()
self.log.info('Setting Qubes DB info for the VM')
self.start_qubesdb()
self.create_qdb_entries()
self.log.info('Updating firewall rules')
self.log.info('Updating firewall rules')
for vm in self.app.domains:
if vm.is_proxyvm() and vm.is_running():
vm.write_iptables_xenstore_entry()
for vm in self.app.domains:
if vm.is_proxyvm() and vm.is_running():
vm.write_iptables_xenstore_entry()
self.log.warning('Activating the {} VM'.format(self.name))
self.libvirt_domain.resume()
self.log.warning('Activating the {} VM'.format(self.name))
self.libvirt_domain.resume()
# close() is not really needed, because the descriptor is close-on-exec
# anyway, the reason to postpone close() is that possibly xl is not done
# constructing the domain after its main process exits
# so we close() when we know the domain is up
# the successful unpause is some indicator of it
if qmemman_present:
qmemman_client.close()
# close() is not really needed, because the descriptor is close-on-exec
# anyway, the reason to postpone close() is that possibly xl is not done
# constructing the domain after its main process exits
# so we close() when we know the domain is up
# the successful unpause is some indicator of it
if qmemman_client:
qmemman_client.close()
# if self._start_guid_first and start_guid and not preparing_dvm \
# and os.path.exists('/var/run/shm.id'):
# self.start_guid()
# if self._start_guid_first and start_guid and not preparing_dvm \
# and os.path.exists('/var/run/shm.id'):
# self.start_guid()
if not preparing_dvm:
self.start_qrexec_daemon()
if not preparing_dvm:
self.start_qrexec_daemon()
if start_guid and not preparing_dvm \
and os.path.exists('/var/run/shm.id'):
self.start_guid()
if start_guid and not preparing_dvm \
and os.path.exists('/var/run/shm.id'):
self.start_guid()
self.fire_event('domain-started',
preparing_dvm=preparing_dvm, start_guid=start_guid)
self.fire_event('domain-started',
preparing_dvm=preparing_dvm, start_guid=start_guid)
except: # pylint: disable=bare-except
self.force_shutdown()
raise
def shutdown(self, force=False):
@ -685,6 +700,19 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
raise qubes.exc.QubesVMNotStartedError(self)
self.fire_event_pre('pre-domain-shutdown', force=force)
# try to gracefully detach PCI devices before shutdown, to mitigate
# timeouts on forcible detach at domain destroy; if that fails, too bad
for pci in self.devices['pci']:
try:
self.libvirt_domain.detachDevice(self.lvxml_pci_dev(pci))
except libvirt.libvirtError as e:
self.log.warning(
'error while gracefully detaching PCI device ({!r}) during'
' shutdown of {!r}; error code: {!r}; continuing'
' anyway'.format(pci, self.name, e.get_error_code()),
exc_info=1)
self.libvirt_domain.shutdown()
@ -850,7 +878,8 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
def run_service(self, service, source=None, user=None,
passio_popen=False, input=None):
passio_popen=False, input=None, localcmd=None, gui=False,
wait=True):
'''Run service on this VM
**passio_popen** and **input** are mutually exclusive.
@ -862,21 +891,51 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
:param str input: string passed as input to service
''' # pylint: disable=redefined-builtin
if input is not None and passio_popen is not None:
raise ValueError("'input' and 'passio_popen' cannot be used "
"together")
if len(i for i in (input, passio_popen, localcmd) if i) > 1:
raise ValueError(
'input, passio_popen and localcmd cannot be used together')
if input:
localcmd = 'printf %s {}'.format(pipes.quote(input))
source = 'dom0' if source is None else self.app.domains[source].name
# XXX TODO FIXME this looks bad...
if input:
return self.run("QUBESRPC %s %s" % (service, source),
localcmd="echo %s" % input, user=user, wait=True)
else:
return self.run("QUBESRPC %s %s" % (service, source),
passio_popen=passio_popen, user=user, wait=True)
return self.run('QUBESRPC {} {}'.format(service, source),
localcmd=localcmd, passio_popen=passio_popen, user=user, wait=wait,
gui=gui)
def request_memory(self, mem_required=None):
# overhead of per-qube/per-vcpu Xen structures,
# taken from OpenStack nova/virt/xenapi/driver.py
# see https://wiki.openstack.org/wiki/XenServer/Overhead
# add an extra MB because Nova rounds up to MBs
if not qmemman_present:
return
MEM_OVERHEAD_BASE = (3 + 1) * 1024 * 1024
MEM_OVERHEAD_PER_CPU = 3 * 1024 * 1024 / 2
if mem_required is None:
mem_required = int(self.memory) * 1024 * 1024
qmemman_client = qubes.qmemman.client.QMemmanClient()
try:
mem_required_with_overhead = mem_required + MEM_OVERHEAD_BASE \
+ self.vcpus * MEM_OVERHEAD_PER_VCPU
got_memory = qmemman_client.request_memory(
mem_required_with_overhead)
except IOError as e:
raise IOError('Failed to connect to qmemman: {!s}'.format(e))
if not got_memory:
qmemman_client.close()
raise qubes.exc.QubesMemoryError(self)
return qmemman_client
def start_guid(self, extra_guid_args=None):
'''Launch gui daemon.
@ -900,22 +959,15 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
guid_cmd += ['-v', '-v']
# elif not verbose:
guid_cmd += ['-q']
else:
guid_cmd += ['-q']
retcode = subprocess.call(guid_cmd)
if retcode != 0:
raise qubes.exc.QubesVMError(self,
'Cannot start qubes-guid for domain {!r}'.format(self.name))
self.log.info('Sending monitor layout')
try:
subprocess.call(
[qubes.config.system_path['monitor_layout_notify_cmd'],
self.name])
except Exception as e: # pylint: disable=broad-except
self.log.error('error starting gui-daemon: {!s}'.format(e))
self.notify_monitor_layout()
self.wait_for_session()
@ -946,13 +998,13 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
self.log.info('Starting Qubes DB')
# FIXME #1694 #1241
retcode = subprocess.call([
qubes.config.system_path["qubesdb_daemon_path"],
str(self.xid),
self.name])
if retcode != 0:
self.force_shutdown()
raise OSError("ERROR: Cannot execute qubesdb-daemon!")
raise qubes.exc.QubesException('Cannot execute qubesdb-daemon')
def wait_for_session(self):
@ -970,6 +1022,19 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
p.communicate(input=self.default_user)
# TODO event, extension
def notify_monitor_layout(self):
monitor_layout = qubes.monitor_layout.get_monitor_layout()
# notify qube only if we've got a non-empty monitor_layout or else we
# break proper qube resolution set by gui-agent
if not monitor_layout:
return
self.log.info('Sending monitor layout')
qubes.monitor_layout.notify_vm(self, monitor_layout)
# TODO move to storage
def create_on_disk(self, source_template=None):
'''Create files needed for VM.
@ -1007,6 +1072,7 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
self.fire_event('domain-created-on-disk', source_template)
# TODO move to storage
def resize_private_img(self, size):
'''Resize private image.'''
@ -1031,6 +1097,47 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
raise qubes.exc.QubesException('resize2fs failed')
# TODO move to storage
def resize_root_img(self, size, allow_start=False):
if hasattr(self, 'template'):
raise qubes.exc.QubesVMError(self,
'Cannot resize root.img of template based qube. Resize the'
' root.img of the template instead.')
# TODO self.is_halted
if self.is_running():
raise qubes.exc.QubesVMNotHaltedError(self,
'Cannot resize root.img of a running qube')
if size < self.get_root_img_sz():
raise qubes.exc.QubesValueError(
'For your own safety, shrinking of root.img is disabled. If you'
' really know what you are doing, use `truncate` manually.')
with open(self.root_img, 'a+b') as fd:
fd.truncate(size)
if False: #self.hvm:
return
if not allow_start:
raise qubes.exc.QubesException(
'The qube has to be started to complete the operation, but is'
' required not to start. Either run the operation again allowing'
' starting of the qube this time, or run resize2fs in the qube'
' manually.')
self.start(start_guid=False)
# TODO run_service #1695
self.run('resize2fs /dev/mapper/dmroot', user='root',
wait=True, gui=False)
self.shutdown()
while self.is_running(): #1696
time.sleep(1)
def remove_from_disk(self):
'''Remove domain remnants from disk.'''
self.fire_event('domain-removed-from-disk')
@ -1180,28 +1287,34 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
libvirt_domain = self.libvirt_domain
if libvirt_domain is None:
return "NA"
if libvirt_domain.isActive():
if libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PAUSED:
return "Paused"
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_CRASHED:
return "Crashed"
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_SHUTDOWN:
return "Halting"
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_SHUTOFF:
return "Dying"
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PMSUSPENDED:
return "Suspended"
else:
if not self.is_fully_usable():
return "Transient"
else:
return "Running"
else:
return 'Halted'
return "NA"
try:
if libvirt_domain.isActive():
if libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PAUSED:
return "Paused"
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_CRASHED:
return "Crashed"
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_SHUTDOWN:
return "Halting"
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_SHUTOFF:
return "Dying"
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PMSUSPENDED:
return "Suspended"
else:
if not self.is_fully_usable():
return "Transient"
else:
return "Running"
else:
return 'Halted'
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return 'Halted'
else:
raise
assert False
def is_running(self):
@ -1212,6 +1325,7 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
:rtype: bool
'''
# TODO context manager #1693
return self.libvirt_domain and self.libvirt_domain.isActive()
@ -1283,10 +1397,26 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
if self.libvirt_domain is None:
return 0
if not self.libvirt_domain.isActive():
return 0
return self.libvirt_domain.info()[1]
try:
if not self.libvirt_domain.isActive():
return 0
return self.libvirt_domain.info()[1]
except libvirt.libvirtError as e:
if e.get_error_code() in (
# qube no longer exists
libvirt.VIR_ERR_NO_DOMAIN,
# libxl_domain_info failed (race condition from isActive)
libvirt.VIR_ERR_INTERNAL_ERROR,
):
return 0
else:
self.log.exception(
'libvirt error code: {!r}'.format(e.get_error_code()))
raise
def get_mem_static_max(self):
@ -1299,23 +1429,65 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
if self.libvirt_domain is None:
return 0
return self.libvirt_domain.maxMemory()
try:
return self.libvirt_domain.maxMemory()
except libvirt.libvirtError as e:
if e.get_error_code() in (
# qube no longer exists
libvirt.VIR_ERR_NO_DOMAIN,
# libxl_domain_info failed (race condition from isActive)
libvirt.VIR_ERR_INTERNAL_ERROR,
):
return 0
else:
self.log.exception(
'libvirt error code: {!r}'.format(e.get_error_code()))
raise
def get_per_cpu_time(self):
def get_cputime(self):
'''Get total CPU time burned by this domain since start.
:returns: CPU time usage [FIXME unit].
:rtype: FIXME
'''
if self.libvirt_domain is None:
return 0
if self.libvirt_domain is None:
return 0
if not self.libvirt_domain.isActive():
return 0
return self.libvirt_domain.getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)[0]['cpu_time']/10**9
try:
if not self.libvirt_domain.isActive():
return 0
# this does not work, because libvirt
# return self.libvirt_domain.getCPUStats(
# libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)[0]['cpu_time']/10**9
return self.libvirt_domain.info()[4]
except libvirt.libvirtError as e:
if e.get_error_code() in (
# qube no longer exists
libvirt.VIR_ERR_NO_DOMAIN,
# libxl_domain_info failed (race condition from isActive)
libvirt.VIR_ERR_INTERNAL_ERROR,
):
return 0
else:
self.log.exception(
'libvirt error code: {!r}'.format(e.get_error_code()))
raise
# XXX shouldn't this go only to vms that have root image?
@ -1502,7 +1674,15 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
self.qdb.write('/name', self.name)
self.qdb.write('/type', self.__class__.__name__)
self.qdb.write('/updateable', str(self.updateable))
self.qdb.write('/persistence', 'full' if self.updateable else 'rw-only')
self.qdb.write('/debug', str(int(self.debug)))
try:
self.qdb.write('/template', self.template.name)
except AttributeError:
self.qdb.write('/template', '')
self.qdb.write('/random-seed',
base64.b64encode(qubes.utils.urandom(64)))
if self.provides_network:
self.qdb.write('/network-provider/gateway', self.gateway)
@ -1544,17 +1724,15 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
def _update_libvirt_domain(self):
'''Re-initialise :py:attr:`libvirt_domain`.'''
domain_config = self.create_config_file()
if self._libvirt_domain is not None:
self._libvirt_domain.undefine()
try:
self._libvirt_domain = self.app.vmm.libvirt_conn.defineXML(
domain_config)
except libvirt.libvirtError:
if self.app.vmm.libvirt_conn.virConnGetLastError()[0] == \
libvirt.VIR_ERR_NO_DOMAIN:
# accept the fact that libvirt doesn't know anything about this
# domain...
pass
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_OS_TYPE \
and e.get_str2() == 'hvm':
raise qubes.exc.QubesVMError(self,
'HVM qubes are not supported on this machine. '
'Check BIOS settings for VT-x/AMD-V extensions.')
else:
raise

View File

@ -28,6 +28,8 @@ from optparse import OptionParser
import subprocess
import os
import sys
from qubes.qubes import vmm
def handle_vm(vms, label, new_value = None):
functions = { # label: [ getter, setter ],
@ -116,6 +118,8 @@ def main():
else:
options.do_set = True
vmm.offline_mode = True
if options.do_set:
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_writing()

View File

@ -20,27 +20,37 @@
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
from optparse import OptionParser
import optparse
import os
import sys
from qubes.qubes import QubesVmCollection
from qubes.qubesutils import updates_vms_toggle,updates_dom0_toggle,\
updates_dom0_status
updates_dom0_status,updates_vms_status
from qubes.qubes import vmm
def usage():
print "Usage: qubes-set-updates enable|disable|status"
print " Enable or disable globally checking for updates (both dom0 and VM)"
print " Status option checks only dom0 updates status"
def main():
if len(sys.argv) < 2:
usage()
return 1
action = sys.argv[1]
usage = "%prog enable|disable|status\n"\
" Enable or disable globally checking for updates (both dom0 and VM)"
parser = OptionParser (usage)
parser.add_option("--offline-mode", dest="offline_mode",
action="store_true", default=False,
help=optparse.SUPPRESS_HELP)
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("You must provide an action")
action = args[0]
if action not in ['enable', 'disable', 'status']:
usage()
return 1
parser.error("Invalid action")
if options.offline_mode:
vmm.offline_mode = True
qvm_collection = QubesVmCollection()
if action == 'status':
@ -56,9 +66,16 @@ def main():
updates_vms_toggle(qvm_collection, False)
else:
if updates_dom0_status(qvm_collection):
print "enabled"
print "dom0: enabled"
else:
print "disabled"
print "dom0: disabled"
status_vms = updates_vms_status(qvm_collection)
if status_vms is None:
print "vms: mixed"
elif status_vms:
print "vms: enabled"
else:
print "vms: disabled"
if action != 'status':
qvm_collection.save()

View File

@ -63,6 +63,10 @@ def main():
"list-message-digest-algorithms'")
parser.add_option ("-z", "--compress", action="store_true", dest="compress", default=False,
help="Compress the backup")
parser.add_option ("-Z", "--compress-filter", action="store",
dest="compress_filter", default=False,
help="Compress the backup using specified filter "
"program (default: gzip)")
parser.add_option ("--debug", action="store_true", dest="debug",
default=False, help="Enable (a lot of) debug output")
@ -181,7 +185,7 @@ def main():
backup_do(base_backup_dir, files_to_backup, passphrase,
progress_callback=print_progress,
encrypted=options.encrypt,
compressed=options.compress,
compressed=options.compress_filter or options.compress,
appvm=appvm, **kwargs)
except QubesException as e:
print >>sys.stderr, "ERROR: %s" % str(e)

View File

@ -54,6 +54,10 @@ def main():
parser.add_option ("--skip-conflicting", action="store_true", dest="skip_conflicting", default=False,
help="Do not restore VMs that are already present on the host")
parser.add_option ("--rename-conflicting", action="store_true",
dest="rename_conflicting", default=False,
help="Restore VMs that are already present on the host under different name")
parser.add_option ("--force-root", action="store_true", dest="force_root", default=False,
help="Force to run, even with root privileges")
@ -193,6 +197,9 @@ def main():
else:
print >> sys.stderr, "Remove VMs with conflicting names from the host before proceeding."
print >> sys.stderr, "... or use --skip-conflicting to restore only those VMs that do not exist on the host."
print >> sys.stderr, "... or use --rename-conflicting to " \
"restore those VMs under modified " \
"name (with number at the end)"
exit (1)
print "The above VMs will be copied and added to your system."

View File

@ -17,37 +17,43 @@
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
import os
import sys
from optparse import OptionParser
from qubes.qubes import QubesVmCollection
from qubes.qubes import QubesAppVm, QubesTemplateVm, QubesHVm
from qubes.qubes import QubesException
from optparse import OptionParser;
import sys
import os
def main():
usage = "usage: %prog [options] <src-name> <new-name>\n"\
"Clones an existing VM by copying all its disk files"
parser = OptionParser (usage)
parser.add_option ("-q", "--quiet", action="store_false", dest="verbose", default=True)
parser.add_option ("-p", "--path", dest="dir_path",
help="Specify path to the template directory")
parser.add_option ("--force-root", action="store_true", dest="force_root", default=False,
help="Force to run, even with root privileges")
(options, args) = parser.parse_args ()
if (len (args) != 2):
parser.error ("You must specify at least the src and dst TemplateVM names!")
parser = OptionParser(usage)
parser.add_option("-q", "--quiet", action="store_false", dest="verbose",
default=True)
parser.add_option("-p", "--path", dest="dir_path",
help="Specify path to the template directory")
parser.add_option("--force-root", action="store_true", dest="force_root",
default=False,
help="Force to run, even with root privileges")
parser.add_option("-P", "--pool", dest="pool_name",
help="Specify in to which storage pool to clone")
(options, args) = parser.parse_args()
if (len(args) != 2):
parser.error(
"You must specify at least the src and dst TemplateVM names!")
srcname = args[0]
dstname = args[1]
if hasattr(os, "geteuid") and os.geteuid() == 0:
if not options.force_root:
print >> sys.stderr, "*** Running this tool as root is strongly discouraged, this will lead you in permissions problems."
print >> sys.stderr, "*** Running this tool as root is" + \
" strongly discouraged, this will lead you in permissions" + \
"problems."
print >> sys.stderr, "Retry as unprivileged user."
print >> sys.stderr, "... or use --force-root to continue anyway."
exit(1)
@ -57,12 +63,21 @@ def main():
qvm_collection.load()
src_vm = qvm_collection.get_vm_by_name(srcname)
if src_vm is None:
print >> sys.stderr, "ERROR: A VM with the name '{0}' does not exist in the system.".format(srcname)
if src_vm is None:
print >> sys.stderr, \
"ERROR: A VM with the name '{0}' does not exist in the system." \
.format(srcname)
exit(1)
if options.pool_name is None:
pool_name = src_vm.pool_name
else:
pool_name = options.pool_name
if qvm_collection.get_vm_by_name(dstname) is not None:
print >> sys.stderr, "ERROR: A VM with the name '{0}' already exists in the system.".format(dstname)
print >> sys.stderr, \
"ERROR: A VM with the name '{0}' already exists in the system." \
.format(dstname)
exit(1)
if src_vm.is_disposablevm():
@ -70,19 +85,21 @@ def main():
exit(1)
dst_vm = qvm_collection.add_new_vm(src_vm.__class__.__name__,
name=dstname, template=src_vm.template,
dir_path=options.dir_path, installed_by_rpm=False)
name=dstname, template=src_vm.template,
pool_name=pool_name,
dir_path=options.dir_path,
installed_by_rpm=False)
try:
dst_vm.clone_attrs(src_vm)
dst_vm.clone_disk_files (src_vm=src_vm, verbose=options.verbose)
dst_vm.clone_disk_files(src_vm=src_vm, verbose=options.verbose)
except (IOError, OSError) as err:
print >> sys.stderr, "ERROR: {0}".format(err)
qvm_collection.pop(dst_vm.qid)
dst_vm.remove_from_disk()
exit (1)
exit(1)
qvm_collection.save()
qvm_collection.unlock_db()
main()

View File

@ -46,16 +46,12 @@ if ! /usr/lib/qubes/qubes-prepare-saved-domain.sh \
exit 1
fi
DEFAULT=/var/lib/qubes/dvmdata/default-savefile
DEFAULTCONF=/var/lib/qubes/dvmdata/default-dvm.conf
CURRENT=/var/run/qubes/current-savefile
CURRENTCONF=/var/run/qubes/current-dvm.conf
SHMDIR=/dev/shm/qubes
SHMCOPY=$SHMDIR/current-savefile
rm -f $ROOT $DEFAULT $CURRENT $DEFAULTCONF $CURRENTCONF
rm -f $ROOT $DEFAULT $CURRENT
ln -s "/var/lib/qubes/appvms/$DVMTMPL/dvm-savefile" $DEFAULT
ln -s "/var/lib/qubes/vm-templates/$TEMPLATENAME/root.img" $ROOT
ln -s $DVMTMPLDIR/dvm.conf $DEFAULTCONF
ln -s $DVMTMPLDIR/dvm.conf $CURRENTCONF
if [ -f /var/lib/qubes/dvmdata/dont-use-shm ] ; then
ln -s $DEFAULT $CURRENT
else

View File

@ -34,6 +34,10 @@ def main():
usage = "usage: %prog <vm-name> <size>"
parser = OptionParser (usage)
parser.add_option("--allow-start", action="store_true",
dest="allow_start", default=False,
help="Allow VM to be started to complete the operation")
(options, args) = parser.parse_args ()
if (len (args) != 2):
parser.error ("You must specify VM name and new size!")
@ -57,7 +61,7 @@ def main():
exit(1)
try:
vm.resize_root_img(size_bytes)
vm.resize_root_img(size_bytes, allow_start=options.allow_start)
except (IOError, OSError, QubesException) as err:
print >> sys.stderr, "ERROR: {0}".format(err)
exit (1)

View File

@ -26,6 +26,27 @@ from optparse import OptionParser
import subprocess
import os
import sys
from qubes.qubes import vmm
import re
def find_devices_of_class(klass):
p = subprocess.Popen(["/sbin/lspci", "-mm", "-n"], stdout=subprocess.PIPE)
result = p.communicate()
retcode = p.returncode
if retcode != 0:
print "ERROR when executing lspci!"
raise IOError
rx_netdev = re.compile(r"^([0-9a-f]{2}:[0-9a-f]{2}.[0-9a-f]) \"" +
klass)
for dev in str(result[0]).splitlines():
match = rx_netdev.match(dev)
if match is not None:
dev_bdf = match.group(1)
assert dev_bdf is not None
yield dev_bdf
def main():
usage = "usage: %prog -l [options] <vm-name>\n"\
@ -37,6 +58,12 @@ def main():
parser.add_option ("-l", "--list", action="store_true", dest="do_list", default=False)
parser.add_option ("-a", "--add", action="store_true", dest="do_add", default=False)
parser.add_option ("-d", "--delete", action="store_true", dest="do_delete", default=False)
parser.add_option("-C", "--add-class", action="store_true",
dest="do_add_class", default=False,
help="Add all devices of given class (net, usb)")
parser.add_option ("--offline-mode", dest="offline_mode",
action="store_true", default=False,
help="Offline mode")
(options, args) = parser.parse_args ()
if (len (args) < 1):
@ -44,11 +71,15 @@ def main():
vmname = args[0]
if options.do_list + options.do_add + options.do_delete > 1:
print >> sys.stderr, "Only one of -l -a -d is allowed!"
exit (1)
if options.do_list + options.do_add + options.do_delete + \
options.do_add_class > 1:
print >> sys.stderr, "Only one of -l -a -d -C is allowed!"
exit(1)
if options.do_add or options.do_delete:
if options.offline_mode:
vmm.offline_mode = True
if options.do_add or options.do_delete or options.do_add_class:
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_writing()
qvm_collection.load()
@ -73,6 +104,26 @@ def main():
qvm_collection.save()
qvm_collection.unlock_db()
elif options.do_add_class:
if len(args) < 2:
print >> sys.stderr, "You must specify the PCI device class to add"
exit(1)
klass = args[1]
if klass == 'net':
devs = find_devices_of_class("02")
elif klass == 'usb':
devs = find_devices_of_class("0c03")
else:
print >> sys.stderr, "Supported classes: net, usb"
exit(1)
for dev in devs:
vm.pci_add(dev)
qvm_collection.save()
qvm_collection.unlock_db()
elif options.do_delete:
if len (args) < 2:
print >> sys.stderr, "You must specify the PCI device to delete"

View File

@ -75,7 +75,7 @@ def main():
if vm.installed_by_rpm and not options.remove_from_db_only:
if options.verbose:
print >> sys.stderr, "This VM has been installed by RPM, use rpm -e <pkg name> to remove it!"
print >> sys.stderr, "This VM has been installed by RPM, use yum remove <pkg name> to remove it!"
exit (1)
try:

View File

@ -36,6 +36,10 @@ def main():
help="Force operation, even if may damage other VMs (eg shutdown of NetVM)")
parser.add_option ("--wait", action="store_true", dest="wait_for_shutdown", default=False,
help="Wait for the VM(s) to shutdown")
parser.add_option("--wait-time", action="store", dest="wait_time",
default=defaults["shutdown_counter_max"],
help="Timout after which VM will be killed when --wait "
"is used")
parser.add_option ("--all", action="store_true", dest="shutdown_all", default=False,
help="Shutdown all running VMs")
parser.add_option ("--exclude", action="append", dest="exclude_list",
@ -107,7 +111,7 @@ def main():
continue
else:
halting_vms.append(vm)
if shutdown_counter > defaults["shutdown_counter_max"]:
if shutdown_counter > int(options.wait_time):
# kill the VM
if options.verbose:
print >> sys.stderr, "Killing the (apparently hanging) VM '{0}'...".format(vm.name)

View File

@ -102,8 +102,10 @@ def main():
if verbose:
print >> sys.stderr, '--> Syncing dom0 clock.'
subprocess.check_call(['sudo', 'date', '-u', '-Iseconds', '-s', date_out])
subprocess.check_call(['sudo', 'hwclock', '--systohc'])
subprocess.check_call(['sudo', 'date', '-u', '-Iseconds', '-s', date_out],
stdout=None if verbose else open(os.devnull, 'w'))
subprocess.check_call(['sudo', 'hwclock', '--systohc'],
stdout=None if verbose else open(os.devnull, 'w'))
# Sync other VMs clock
for vm in qvm_collection.values():
@ -111,8 +113,8 @@ def main():
if verbose:
print >> sys.stderr, '--> Syncing \'%s\' clock.' % vm.name
try:
vm.run('date -u -R -s "%s"' % date_out, user="root",
gui=False, verbose=verbose)
vm.run_service("qubes.SetDateTime", user="root",
localcmd="date -u -Iseconds")
except Exception as e:
print >> sys.stderr, "ERROR syncing time in VM '%s': %s" % (vm.name, str(e))
pass

View File

@ -32,7 +32,7 @@ def main():
parser = OptionParser (usage)
parser.add_option ("--offline-mode", dest="offline_mode",
action="store_true", default=False,
help="Offline mode (ignored in this version)")
help="Offline mode")
(options, args) = parser.parse_args ()
if (len (args) != 1):

View File

@ -118,7 +118,12 @@ def main():
''')
fstrim_process.stdin.close()
qubesutils.block_attach(fstrim_vm, qvm_collection[0], tvm.root_img,
qubesutils.block_attach(qvm_collection, fstrim_vm,
{
'vm': 'dom0',
'device': tvm.root_img,
'mode': 'w',
},
mode='w',
frontend='xvdi')

View File

@ -70,7 +70,7 @@ Requires: libvirt-python
%if x%{?backend_vmm} == xxen
Requires: xen-runtime
Requires: xen-hvm
Requires: libvirt-daemon-xen >= 1.2.12-3
Requires: libvirt-daemon-xen >= 1.2.20-4
%endif
Requires: createrepo
Requires: gnome-packagekit
@ -171,7 +171,6 @@ if ! grep -q ^qubes: /etc/group ; then
fi
%triggerin -- xen-runtime
sed -i 's/\/block /\/block.qubes /' /etc/udev/rules.d/xen-backend.rules
/usr/lib/qubes/fix-dir-perms.sh
%preun
@ -187,12 +186,12 @@ if [ "$1" = 0 ] ; then
chgrp root /etc/xen
chmod 700 /etc/xen
groupdel qubes
sed -i 's/\/block.qubes /\/block /' /etc/udev/rules.d/xen-backend.rules
fi
%files
%defattr(-,root,root,-)
%config(noreplace) %attr(0664,root,qubes) %{_sysconfdir}/qubes/qmemman.conf
%config(noreplace) %attr(0664,root,qubes) %{_sysconfdir}/qubes/storage.conf
/usr/bin/qvm-*
/usr/bin/qubes-*
/usr/bin/qmemmand

View File

@ -27,3 +27,7 @@ endif
cp regressions.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
cp run.py $(DESTDIR)$(PYTHON_TESTSPATH)
cp run.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
cp storage.py $(DESTDIR)$(PYTHON_TESTSPATH)
cp storage.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
cp storage_xen.py $(DESTDIR)$(PYTHON_TESTSPATH)
cp storage_xen.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)

View File

@ -1,3 +1,412 @@
#!/usr/bin/python2 -O
# vim: fileencoding=utf-8
#
# The Qubes OS Project, https://www.qubes-os.org/
#
# Copyright (C) 2014-2015
# Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
# Copyright (C) 2015 Wojtek Porczyk <woju@invisiblethingslab.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import multiprocessing
import logging
import os
import shutil
import subprocess
import unittest
import lxml.etree
import sys
import qubes.backup
import qubes.qubes
import time
VMPREFIX = 'test-'
#: :py:obj:`True` if running in dom0, :py:obj:`False` otherwise
in_dom0 = False
#: :py:obj:`False` if outside of git repo,
#: path to root of the directory otherwise
in_git = False
try:
import libvirt
libvirt.openReadOnly(qubes.qubes.defaults['libvirt_uri']).close()
in_dom0 = True
except libvirt.libvirtError:
pass
try:
in_git = subprocess.check_output(
['git', 'rev-parse', '--show-toplevel'],
stderr=open(os.devnull, 'w')).strip()
except subprocess.CalledProcessError:
# git returned nonzero, we are outside git repo
pass
except OSError:
# command not found; let's assume we're outside
pass
def skipUnlessDom0(test_item):
'''Decorator that skips test outside dom0.
Some tests (especially integration tests) have to be run in more or less
working dom0. This is checked by connecting to libvirt.
''' # pylint: disable=invalid-name
return unittest.skipUnless(in_dom0, 'outside dom0')(test_item)
def skipUnlessGit(test_item):
'''Decorator that skips test outside git repo.
There are very few tests that an be run only in git. One example is
correctness of example code that won't get included in RPM.
''' # pylint: disable=invalid-name
return unittest.skipUnless(in_git, 'outside git tree')(test_item)
class _AssertNotRaisesContext(object):
"""A context manager used to implement TestCase.assertNotRaises methods.
Stolen from unittest and hacked. Regexp support stripped.
"""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
return True
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if issubclass(exc_type, self.expected):
raise self.failureException(
"{0} raised".format(exc_name))
else:
# pass through
return False
self.exception = exc_value # store for later retrieval
class BeforeCleanExit(BaseException):
pass
class QubesTestCase(unittest.TestCase):
'''Base class for Qubes unit tests.
'''
def __init__(self, *args, **kwargs):
super(QubesTestCase, self).__init__(*args, **kwargs)
self.longMessage = True
self.log = logging.getLogger('{}.{}.{}'.format(
self.__class__.__module__,
self.__class__.__name__,
self._testMethodName))
def __str__(self):
return '{}/{}/{}'.format(
'.'.join(self.__class__.__module__.split('.')[2:]),
self.__class__.__name__,
self._testMethodName)
def tearDown(self):
super(QubesTestCase, self).tearDown()
result = self._resultForDoCleanups
l = result.failures \
+ result.errors \
+ [(tc, None) for tc in result.unexpectedSuccesses]
if getattr(result, 'do_not_clean', False) \
and filter((lambda (tc, exc): tc is self), l):
raise BeforeCleanExit()
def assertNotRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail if an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
raised, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertNotRaisesContext(excClass, self)
if callableObj is None:
return context
with context:
callableObj(*args, **kwargs)
def assertXMLEqual(self, xml1, xml2):
"""Check for equality of two XML objects.
:param xml1: first element
:param xml2: second element
:type xml1: :py:class:`lxml.etree._Element`
:type xml2: :py:class:`lxml.etree._Element`
""" # pylint: disable=invalid-name
self.assertEqual(xml1.tag, xml2.tag)
self.assertEqual(xml1.text, xml2.text)
self.assertItemsEqual(xml1.keys(), xml2.keys())
for key in xml1.keys():
self.assertEqual(xml1.get(key), xml2.get(key))
class SystemTestsMixin(object):
def setUp(self):
"""Set up the test.
.. warning::
This method instantiates QubesVmCollection acquires write lock for
it. You can use is as :py:attr:`qc`. You can (and probably
should) release the lock at the end of setUp in subclass
"""
super(SystemTestsMixin, self).setUp()
self.qc = qubes.qubes.QubesVmCollection()
self.qc.lock_db_for_writing()
self.qc.load()
self.conn = libvirt.open(qubes.qubes.defaults['libvirt_uri'])
self.remove_test_vms()
def tearDown(self):
super(SystemTestsMixin, self).tearDown()
# release the lock, because we have no way to check whether it was
# read or write lock
try:
self.qc.unlock_db()
except qubes.qubes.QubesException:
pass
self.kill_test_vms()
self.qc.lock_db_for_writing()
self.qc.load()
self.remove_test_vms()
self.qc.save()
self.qc.unlock_db()
del self.qc
self.conn.close()
def make_vm_name(self, name):
return VMPREFIX + name
def save_and_reload_db(self):
self.qc.save()
self.qc.unlock_db()
self.qc.lock_db_for_writing()
self.qc.load()
def kill_test_vms(self):
# do not keep write lock while killing VMs, because that may cause a
# deadlock with disk hotplug scripts (namely qvm-template-commit
# called when shutting down TemplateVm)
self.qc.lock_db_for_reading()
self.qc.load()
self.qc.unlock_db()
for vm in self.qc.values():
if vm.name.startswith(VMPREFIX):
if vm.is_running():
vm.force_shutdown()
def _remove_vm_qubes(self, vm):
vmname = vm.name
try:
# XXX .is_running() may throw libvirtError if undefined
if vm.is_running():
vm.force_shutdown()
except:
pass
try:
vm.remove_from_disk()
except:
pass
try:
vm.libvirt_domain.undefine()
except libvirt.libvirtError:
pass
self.qc.pop(vm.qid)
del vm
# Now ensure it really went away. This may not have happened,
# for example if vm.libvirtDomain malfunctioned.
try:
dom = self.conn.lookupByName(vmname)
except:
pass
else:
self._remove_vm_libvirt(dom)
self._remove_vm_disk(vmname)
def _remove_vm_libvirt(self, dom):
try:
dom.destroy()
except libvirt.libvirtError: # not running
pass
dom.undefine()
def _remove_vm_disk(self, vmname):
for dirspec in (
'qubes_appvms_dir',
'qubes_servicevms_dir',
'qubes_templates_dir'):
dirpath = os.path.join(qubes.qubes.system_path['qubes_base_dir'],
qubes.qubes.system_path[dirspec], vmname)
if os.path.exists(dirpath):
if os.path.isdir(dirpath):
shutil.rmtree(dirpath)
else:
os.unlink(dirpath)
def remove_vms(self, vms):
for vm in vms: self._remove_vm_qubes(vm)
self.save_and_reload_db()
def remove_test_vms(self):
"""Aggresively remove any domain that has name in testing namespace.
.. warning::
The test suite hereby claims any domain whose name starts with
:py:data:`VMPREFIX` as fair game. This is needed to enforce sane
test executing environment. If you have domains named ``test-*``,
don't run the tests.
"""
# first, remove them Qubes-way
something_removed = False
for vm in self.qc.values():
if vm.name.startswith(VMPREFIX):
self._remove_vm_qubes(vm)
something_removed = True
if something_removed:
self.save_and_reload_db()
# now remove what was only in libvirt
for dom in self.conn.listAllDomains():
if dom.name().startswith(VMPREFIX):
self._remove_vm_libvirt(dom)
# finally remove anything that is left on disk
vmnames = set()
for dirspec in (
'qubes_appvms_dir',
'qubes_servicevms_dir',
'qubes_templates_dir'):
dirpath = os.path.join(qubes.qubes.system_path['qubes_base_dir'],
qubes.qubes.system_path[dirspec])
for name in os.listdir(dirpath):
if name.startswith(VMPREFIX):
vmnames.add(name)
for vmname in vmnames:
self._remove_vm_disk(vmname)
def wait_for_window(self, title, timeout=30, show=True):
"""
Wait for a window with a given title. Depending on show parameter,
it will wait for either window to show or to disappear.
:param title: title of the window to wait for
:param timeout: timeout of the operation, in seconds
:param show: if True - wait for the window to be visible,
otherwise - to not be visible
:return: None
"""
wait_count = 0
while subprocess.call(['xdotool', 'search', '--name', title],
stdout=open(os.path.devnull, 'w'),
stderr=subprocess.STDOUT) == int(show):
wait_count += 1
if wait_count > timeout*10:
self.fail("Timeout while waiting for {} window to {}".format(
title, "show" if show else "hide")
)
time.sleep(0.1)
def enter_keys_in_window(self, title, keys):
"""
Search for window with given title, then enter listed keys there.
The function will wait for said window to appear.
:param title: title of window
:param keys: list of keys to enter, as for `xdotool key`
:return: None
"""
# 'xdotool search --sync' sometimes crashes on some race when
# accessing window properties
self.wait_for_window(title)
command = ['xdotool', 'search', '--name', title,
'windowactivate',
'key'] + keys
subprocess.check_call(command)
def shutdown_and_wait(self, vm, timeout=60):
vm.shutdown()
while timeout > 0:
if not vm.is_running():
return
time.sleep(1)
timeout -= 1
self.fail("Timeout while waiting for VM {} shutdown".format(vm.name))
class BackupTestsMixin(SystemTestsMixin):
def setUp(self):
super(BackupTestsMixin, self).setUp()
@ -7,39 +416,28 @@ class BackupTestsMixin(SystemTestsMixin):
if self.verbose:
print >>sys.stderr, "-> Creating backupvm"
# TODO: allow non-default template
self.backupvm = self.qc.add_new_vm("QubesAppVm",
name=self.make_vm_name('backupvm'),
template=self.qc.get_default_template())
self.backupvm.create_on_disk(verbose=self.verbose)
self.backupdir = os.path.join(os.environ["HOME"], "test-backup")
if os.path.exists(self.backupdir):
shutil.rmtree(self.backupdir)
os.mkdir(self.backupdir)
def tearDown(self):
super(BackupTestsMixin, self).tearDown()
shutil.rmtree(self.backupdir)
def print_progress(self, progress):
if self.verbose:
print >> sys.stderr, "\r-> Backing up files: {0}%...".format(progress)
def error_callback(self, message):
self.error_detected.put(message)
if self.verbose:
print >> sys.stderr, "ERROR: {0}".format(message)
def print_callback(self, msg):
if self.verbose:
print msg
def fill_image(self, path, size=None, sparse=False):
block_size = 4096
@ -58,17 +456,27 @@ class BackupTestsMixin(SystemTestsMixin):
f.close()
# NOTE: this was create_basic_vms
def create_backup_vms(self):
template=self.qc.get_default_template()
vms = []
vmname = self.make_vm_name('test-net')
if self.verbose:
print >>sys.stderr, "-> Creating %s" % vmname
testnet = self.qc.add_new_vm('QubesNetVm',
name=vmname, template=template)
testnet.create_on_disk(verbose=self.verbose)
vms.append(testnet)
self.fill_image(testnet.private_img, 20*1024*1024)
vmname = self.make_vm_name('test1')
if self.verbose:
print >>sys.stderr, "-> Creating %s" % vmname
testvm1 = self.qc.add_new_vm('QubesAppVm',
name=vmname, template=template)
testvm1.uses_default_netvm = False
testvm1.netvm = testnet
testvm1.create_on_disk(verbose=self.verbose)
vms.append(testvm1)
self.fill_image(testvm1.private_img, 100*1024*1024)
@ -85,9 +493,8 @@ class BackupTestsMixin(SystemTestsMixin):
return vms
def make_backup(self, vms, prepare_kwargs=dict(), do_kwargs=dict(),
target=None):
target=None, expect_failure=False):
# XXX: bakup_prepare and backup_do don't support host_collection
self.qc.unlock_db()
if target is None:
@ -98,20 +505,26 @@ class BackupTestsMixin(SystemTestsMixin):
print_callback=self.print_callback,
**prepare_kwargs)
except qubes.qubes.QubesException as e:
self.fail("QubesException during backup_prepare: %s" % str(e))
if not expect_failure:
self.fail("QubesException during backup_prepare: %s" % str(e))
else:
raise
try:
qubes.backup.backup_do(target, files_to_backup, "qubes",
progress_callback=self.print_progress,
**do_kwargs)
except qubes.qubes.QubesException as e:
self.fail("QubesException during backup_do: %s" % str(e))
if not expect_failure:
self.fail("QubesException during backup_do: %s" % str(e))
else:
raise
self.qc.lock_db_for_writing()
self.qc.load()
def restore_backup(self, source=None, appvm=None, options=None):
def restore_backup(self, source=None, appvm=None, options=None,
expect_errors=None):
if source is None:
backupfile = os.path.join(self.backupdir,
sorted(os.listdir(self.backupdir))[-1])
@ -140,16 +553,43 @@ class BackupTestsMixin(SystemTestsMixin):
self.qc.load()
errors = []
if expect_errors is None:
expect_errors = []
while not self.error_detected.empty():
errors.append(self.error_detected.get())
current_error = self.error_detected.get()
if any(map(current_error.startswith, expect_errors)):
continue
errors.append(current_error)
self.assertTrue(len(errors) == 0,
"Error(s) detected during backup_restore_do: %s" %
'\n'.join(errors))
if not appvm and not os.path.isdir(backupfile):
os.unlink(backupfile)
def create_sparse(self, path, size):
f = open(path, "w")
f.truncate(size)
f.close()
def load_tests(loader, tests, pattern):
# discard any tests from this module, because it hosts base classes
tests = unittest.TestSuite()
for modname in (
'qubes.tests.basic',
'qubes.tests.dom0_update',
'qubes.tests.network',
'qubes.tests.vm_qrexec_gui',
'qubes.tests.backup',
'qubes.tests.backupcompatibility',
'qubes.tests.regressions',
'qubes.tests.storage',
'qubes.tests.storage_xen',
):
tests.addTests(loader.loadTestsFromName(modname))
return tests
# vim: ts=4 sw=4 et

View File

@ -28,7 +28,7 @@ import os
import unittest
import sys
from qubes.qubes import QubesException, QubesTemplateVm
import qubes.tests
class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
@ -63,7 +63,6 @@ class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
self.restore_backup()
self.remove_vms(vms)
def test_004_sparse_multipart(self):
vms = []
@ -85,9 +84,84 @@ class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
self.restore_backup()
self.remove_vms(vms)
def test_005_compressed_custom(self):
vms = self.create_backup_vms()
self.make_backup(vms, do_kwargs={'compressed': "bzip2"})
self.remove_vms(vms)
self.restore_backup()
self.remove_vms(vms)
# TODO: iterate over templates
def test_100_send_to_vm(self):
def test_100_backup_dom0_no_restore(self):
self.make_backup([self.qc[0]])
# TODO: think of some safe way to test restore...
def test_200_restore_over_existing_directory(self):
"""
Regression test for #1386
:return:
"""
vms = self.create_backup_vms()
self.make_backup(vms)
self.remove_vms(vms)
test_dir = vms[0].dir_path
os.mkdir(test_dir)
with open(os.path.join(test_dir, 'some-file.txt'), 'w') as f:
f.write('test file\n')
self.restore_backup(
expect_errors=[
'*** Directory {} already exists! It has been moved'.format(
test_dir)
])
self.remove_vms(vms)
def test_210_auto_rename(self):
"""
Test for #869
:return:
"""
vms = self.create_backup_vms()
self.make_backup(vms)
self.restore_backup(options={
'rename-conflicting': True
})
for vm in vms:
self.assertIsNotNone(self.qc.get_vm_by_name(vm.name+'1'))
restored_vm = self.qc.get_vm_by_name(vm.name+'1')
if vm.netvm and not vm.uses_default_netvm:
self.assertEqual(restored_vm.netvm.name, vm.netvm.name+'1')
self.remove_vms(vms)
class TC_10_BackupVMMixin(qubes.tests.BackupTestsMixin):
def setUp(self):
super(TC_10_BackupVMMixin, self).setUp()
self.backupvm = self.qc.add_new_vm(
"QubesAppVm",
name=self.make_vm_name('backupvm'),
template=self.qc.get_vm_by_name(self.template)
)
self.backupvm.create_on_disk(verbose=self.verbose)
def test_100_send_to_vm_file_with_spaces(self):
vms = self.create_backup_vms()
self.backupvm.start()
self.backupvm.run("mkdir '/var/tmp/backup directory'", wait=True)
self.make_backup(vms,
do_kwargs={
'appvm': self.backupvm,
'compressed': True,
'encrypted': True},
target='/var/tmp/backup directory')
self.remove_vms(vms)
p = self.backupvm.run("ls /var/tmp/backup*/qubes-backup*",
passio_popen=True)
(backup_path, _) = p.communicate()
backup_path = backup_path.strip()
self.restore_backup(source=backup_path,
appvm=self.backupvm)
self.remove_vms(vms)
def test_110_send_to_vm_command(self):
vms = self.create_backup_vms()
self.backupvm.start()
self.make_backup(vms,
@ -100,3 +174,52 @@ class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
self.restore_backup(source='dd if=/var/tmp/backup-test',
appvm=self.backupvm)
self.remove_vms(vms)
def test_110_send_to_vm_no_space(self):
"""
Check whether backup properly report failure when no enough space is
available
:return:
"""
vms = self.create_backup_vms()
self.backupvm.start()
retcode = self.backupvm.run(
"truncate -s 50M /home/user/backup.img && "
"mkfs.ext4 -F /home/user/backup.img && "
"mkdir /home/user/backup && "
"mount /home/user/backup.img /home/user/backup -o loop &&"
"chmod 777 /home/user/backup",
user="root", wait=True)
if retcode != 0:
raise RuntimeError("Failed to prepare backup directory")
with self.assertRaises(QubesException):
self.make_backup(vms,
do_kwargs={
'appvm': self.backupvm,
'compressed': False,
'encrypted': True},
target='/home/user/backup',
expect_failure=True)
self.qc.lock_db_for_writing()
self.qc.load()
self.remove_vms(vms)
def load_tests(loader, tests, pattern):
try:
qc = qubes.qubes.QubesVmCollection()
qc.lock_db_for_reading()
qc.load()
qc.unlock_db()
templates = [vm.name for vm in qc.values() if
isinstance(vm, QubesTemplateVm)]
except OSError:
templates = []
for template in templates:
tests.addTests(loader.loadTestsFromTestCase(
type(
'TC_10_BackupVM_' + template,
(TC_10_BackupVMMixin, qubes.tests.QubesTestCase),
{'template': template})))
return tests

File diff suppressed because one or more lines are too long

View File

@ -34,8 +34,10 @@ VM_PREFIX = "test-"
@unittest.skipUnless(os.path.exists('/usr/bin/rpmsign') and
os.path.exists('/usr/bin/rpmbuild'),
'rpm-sign and/or rpm-build not installed')
class TC_00_Dom0Upgrade(qubes.tests.QubesTestCase):
cleanup_paths = []
class TC_00_Dom0UpgradeMixin(qubes.tests.SystemTestsMixin):
"""
Tests for downloading dom0 updates using VMs based on different templates
"""
pkg_name = 'qubes-test-pkg'
dom0_update_common_opts = ['--disablerepo=*', '--enablerepo=test',
'--setopt=test.copy_local=1']
@ -70,10 +72,9 @@ Expire-Date: 0
@classmethod
def setUpClass(cls):
super(TC_00_Dom0Upgrade, cls).setUpClass()
super(TC_00_Dom0UpgradeMixin, cls).setUpClass()
cls.tmpdir = tempfile.mkdtemp()
cls.cleanup_paths += [cls.tmpdir]
cls.keyid = cls.generate_key(cls.tmpdir)
@ -95,17 +96,14 @@ enabled = 1
subprocess.check_call(['sudo', 'rm', '-f',
'/etc/yum.repos.d/test.repo'])
for dir in cls.cleanup_paths:
shutil.rmtree(dir)
cls.cleanup_paths = []
shutil.rmtree(cls.tmpdir)
def setUp(self):
self.qc = QubesVmCollection()
self.qc.lock_db_for_writing()
self.qc.load()
self.updatevm = self.qc.add_new_vm("QubesProxyVm",
name="%supdatevm" % VM_PREFIX,
template=self.qc.get_default_template())
super(TC_00_Dom0UpgradeMixin, self).setUp()
self.updatevm = self.qc.add_new_vm(
"QubesProxyVm",
name=self.make_vm_name("updatevm"),
template=self.qc.get_vm_by_name(self.template))
self.updatevm.create_on_disk(verbose=False)
self.saved_updatevm = self.qc.get_updatevm_vm()
self.qc.set_updatevm_vm(self.updatevm)
@ -117,35 +115,13 @@ enabled = 1
os.path.join(self.tmpdir, 'pubkey.asc')])
self.updatevm.start()
def remove_vms(self, vms):
def tearDown(self):
self.qc.lock_db_for_writing()
self.qc.load()
self.qc.set_updatevm_vm(self.qc[self.saved_updatevm.qid])
for vm in vms:
if isinstance(vm, str):
vm = self.qc.get_vm_by_name(vm)
else:
vm = self.qc[vm.qid]
if vm.is_running():
try:
vm.force_shutdown()
except:
pass
try:
vm.remove_from_disk()
except OSError:
pass
self.qc.pop(vm.qid)
self.qc.save()
self.qc.unlock_db()
def tearDown(self):
vmlist = [vm for vm in self.qc.values() if vm.name.startswith(
VM_PREFIX)]
self.remove_vms(vmlist)
super(TC_00_Dom0UpgradeMixin, self).tearDown()
subprocess.call(['sudo', 'rpm', '-e', self.pkg_name], stderr=open(
os.devnull, 'w'))
@ -202,7 +178,13 @@ Test package
p.stdin.write(open(filename).read())
p.stdin.close()
p.wait()
self.updatevm.run('cd /tmp/repo; createrepo .', wait=True)
retcode = self.updatevm.run('cd /tmp/repo; createrepo .', wait=True)
if retcode == 127:
self.skipTest("createrepo not installed in template {}".format(
self.template))
elif retcode != 0:
self.skipTest("createrepo failed with code {}, cannot perform the "
"test".format(retcode))
def test_000_update(self):
filename = self.create_pkg(self.tmpdir, self.pkg_name, '1.0')
@ -297,3 +279,23 @@ Test package
self.pkg_name)], stdout=open('/dev/null', 'w'))
self.assertEqual(retcode, 1,
'UNSIGNED package {}-1.0 installed'.format(self.pkg_name))
def load_tests(loader, tests, pattern):
try:
qc = qubes.qubes.QubesVmCollection()
qc.lock_db_for_reading()
qc.load()
qc.unlock_db()
templates = [vm.name for vm in qc.values() if
isinstance(vm, qubes.qubes.QubesTemplateVm)]
except OSError:
templates = []
for template in templates:
tests.addTests(loader.loadTestsFromTestCase(
type(
'TC_00_Dom0Upgrade_' + template,
(TC_00_Dom0UpgradeMixin, qubes.tests.QubesTestCase),
{'template': template})))
return tests

View File

@ -320,12 +320,323 @@ class VmNetworkingMixin(qubes.tests.SystemTestsMixin):
self.testvm1.start()
self.assertEqual(self.run_cmd(self.testvm1, self.ping_ip), 0)
self.testvm1.run("ip addr flush dev eth0", user="root")
self.testvm1.run("ip addr add 10.137.1.128/24 dev eth0", user="root")
self.testvm1.run("ip route add dev eth0", user="root")
self.testvm1.run("ip addr flush dev eth0", user="root", wait=True)
self.testvm1.run("ip addr add 10.137.1.128/24 dev eth0", user="root",
wait=True)
self.testvm1.run("ip route add default dev eth0", user="root",
wait=True)
self.assertNotEqual(self.run_cmd(self.testvm1, self.ping_ip), 0,
"Spoofed ping should be blocked")
class VmUpdatesMixin(qubes.tests.SystemTestsMixin):
"""
Tests for VM updates
"""
# made this way to work also when no package build tools are installed
"""
$ cat test-pkg.spec:
Name: test-pkg
Version: 1.0
Release: 1%{?dist}
Summary: Test package
Group: System
License: GPL
URL: http://example.com/
%description
Test package
%files
%changelog
$ rpmbuild -bb test-pkg.spec
$ cat test-pkg-1.0-1.fc21.x86_64.rpm | gzip | base64
"""
RPM_PACKAGE_GZIP_BASE64 = (
"H4sIAPzRLlYAA+2Y728URRjHn7ueUCkERKJVJDnTxLSxs7293o8WOER6ljYYrtKCLUSa3"
"bnZ64bd22VmTq8nr4wJbwxvjNHIG0x8oTHGGCHB8AcYE1/0lS80GgmQFCJU3wgB4ZjdfZ"
"q2xDe8NNlvMjfzmeeZH7tPbl98b35169cOUEpIJiTxT9SIrmVUs2hWh8dUAp54dOrM14s"
"JHK4D2DKl+j2qrVfjsuq3qEWbohjuAB2Lqk+p1o/8Z5QPmSi/YwnjezH+F8bLQZjqllW0"
"hvODRmFIL5hFk9JMXi/mi5ZuDleNwSEzP5wtmLnouNQnm3/6fndz7FLt9M/Hruj37gav4"
"tTjPnasWLFixYoVK1asWLFixYoV63+p0KNot9vnIPQc1vgYOwCSgXfxCoS+QzKHOVXVOj"
"Fn2ccIfI0k8nXkLuQbyJthxed4UrVnkG8i9yDfgsj3yCAv4foc8t+w1hf5B+Nl5Du43xj"
"yvxivIN9HpsgPkO2IU9uQfeRn8Xk/iJ4x1Y3nfxH1qecwfhH5+YgT25F7o/0SRdxvOppP"
"7MX9ZjB/DNnE/OOYX404uRGZIT+FbCFvQ3aQ8f0+/WF0XjJ8nyOw7H+BrmUA/a8pNZf2D"
"XrCqLG1cERbWHI8ajhznpBY9P0Tr8PkvJDMhTkp/Z0DA6xpuL7DNOq5A+DY9UYTmkOF2U"
"IO/sNt0wSnGvfdlZssD3rVIlLI9UUX37C6qXzHNntHPNfnTAhWHbUddtBwmegDjAUzZbu"
"m9lqZmzDmHc8Ik8WY8Tab4Myym4+Gx8V0qw8GtYyWIzrktEJwV9UHv3ktG471rAqHTmFQ"
"685V5uGqIalk06SWJr7tszR503Ac9cs493jJ8rhrSCIYbXBbzqt5v5+UZ0crh6bGR2dmJ"
"yuHD428VlLLLdakzJe2VxcKhFSFID73JKPS40RI7tXVCcQ3uOGWhPCJ2bAspiJ2i5Vy6n"
"jOqMerpEYpEe/Yks4xkU4Tt6BirmzUWanG6ozbFKhve9BsQRaLRTirzqk7hgUktXojKnf"
"n8jeg3X4QepP3i63po6oml+9t/CwJLya2Bn/ei6f7/4B3Ycdb0L3pt5Q5mNz16rWJ9fLk"
"vvOff/nxS7//8O2P2gvt7nDDnoV9L1du9N4+ucjl9u/8+a7dC5Nnvjlv9Ox5r+v9Cy0NE"
"m+c6rv60S/dZw98Gn6MNswcfQiWUvg3wBUAAA=="
)
"""
Minimal package generated by running dh_make on empty directory
Then cat test-pkg_1.0-1_amd64.deb | gzip | base64
"""
DEB_PACKAGE_GZIP_BASE64 = (
"H4sIACTXLlYAA1O0SSxKzrDjSklNykzM003KzEssqlRQUDA0MTG1NDQwNDVTUDBQAAEIa"
"WhgYGZioqBgogADCVxGegZcyfl5JUX5OXoliUV66VVE6DcwheuX7+ZgAAEW5rdXHb0PG4"
"iwf5j3WfMT6zWzzMuZgoE3jjYraNzbbFKWGms0SaRw/r2SV23WZ4IdP8preM4yqf0jt95"
"3c8qnacfNxJUkf9/w+/3X9ph2GEdgQdixrz/niHKKTnYXizf4oSC7tHOz2Zzq+/6vn8/7"
"ezQ7c1tmi7xZ3SGJ4yzhT2dcr7V+W3zM5ZPu/56PSv4Zdok+7Yv/V/6buWaKVlFkkV58S"
"N3GmLgnqzRmeZ3V3ymmurS5fGa85/LNx1bpZMin3S6dvXKqydp3ubP1vmyarJZb/qSh62"
"C8oIdxqm/BtvkGDza+On/Vfv2py7/0LV7VH+qR6a+bkKUbHXt5/SG187d+nps1a5PJfMO"
"i11dWcUe1HjwaW3Q5RHXn9LmcHy+tW9YcKf0768XVB1t3R0bKrzs5t9P+6r7rZ99svH10"
"+Q6F/o8tf1fO/32y+fWa14eifd+WxUy0jcxYH7N9/tUvmnUZL74pW32qLeuRU+ZwYGASa"
"GBgUWBgxM90ayy3VdmykkGDgYErJbEkERydFVWQmCMQo8aWZvAY/WteFRHFwMCYqXTPjI"
"lBkVEMGLsl+k8XP1D/z+gXyyDOvUemlnHqAVkvu0rRQ2fUFodkN3mtU9uwhqk8V+TqPEE"
"Nc7fzoQ4n71lqRs/7kbbT0+qOZuKH4r8mjzsc1k/YkCHN8Pjg48fbpE+teHa96LNcfu0V"
"5n2/Z2xa2KDvaCOx8cqBFxc514uZ3TmadXS+6cpzU7wSzq5SWfapJOD9n6wLXSwtlgxZh"
"xITzWW7buhx/bb291RcVlEfeC9K5hlrqunSzIMSZT7/Nqgc/qMvMNW227WI8ezB8mVuZh"
"0hERJSvysfburr4Dx0I9BW57UwR4+e1gxu49PcEt8sbK18Xpvt//Hj5UYm+Zc25q+T4xl"
"rJvxfVnh80oadq57OZxPaU1bbztv1yF365W4t45Yr+XrFzov237GVY1Zgf7NvE4+W2SuR"
"lQtLauR1TQ/mbOiIONYya6tU1jPGpWfk/i1+ttiXe3ZO14n0YOWggndznjGlGLyfVbBC6"
"MRP5aMM7aCco/s7sZqB8RlTQwADw8rnuT/sDHi7mUASjJFRAAbWwNLiAwAA"
)
def run_cmd(self, vm, cmd, user="root"):
p = vm.run(cmd, user=user, passio_popen=True, ignore_stderr=True)
p.stdin.close()
p.stdout.read()
return p.wait()
def setUp(self):
super(VmUpdatesMixin, self).setUp()
self.update_cmd = None
if self.template.count("debian"):
self.update_cmd = "set -o pipefail; apt-get update 2>&1 | " \
"{ ! grep '^W:\|^E:'; }"
self.install_cmd = "apt-get install -y {}"
self.install_test_cmd = "dpkg -l {}"
self.exit_code_ok = [0]
elif self.template.count("fedora"):
cmd = "yum"
try:
# assume template name in form "fedora-XX-suffix"
if int(self.template.split("-")[1]) > 21:
cmd = "dnf"
except ValueError:
pass
self.update_cmd = "{cmd} clean all; {cmd} check-update".format(
cmd=cmd)
self.install_cmd = cmd + " install -y {}"
self.install_test_cmd = "rpm -q {}"
self.exit_code_ok = [0, 100]
else:
self.skipTest("Template {} not supported by this test".format(
self.template))
self.testvm1 = self.qc.add_new_vm(
"QubesAppVm",
name=self.make_vm_name('vm1'),
template=self.qc.get_vm_by_name(self.template))
self.testvm1.create_on_disk(verbose=False)
def test_000_simple_update(self):
self.save_and_reload_db()
self.qc.unlock_db()
# reload the VM to have all the properties properly set (especially
# default netvm)
self.testvm1 = self.qc[self.testvm1.qid]
self.testvm1.start()
p = self.testvm1.run(self.update_cmd, wait=True, user="root",
passio_popen=True, passio_stderr=True)
(stdout, stderr) = p.communicate()
self.assertIn(p.wait(), self.exit_code_ok,
"{}: {}\n{}".format(self.update_cmd, stdout, stderr)
)
def create_repo_apt(self):
pkg_file_name = "test-pkg_1.0-1_amd64.deb"
p = self.netvm_repo.run("mkdir /tmp/apt-repo && cd /tmp/apt-repo &&"
"base64 -d | zcat > {}".format(pkg_file_name),
passio_popen=True)
p.stdin.write(self.DEB_PACKAGE_GZIP_BASE64)
p.stdin.close()
if p.wait() != 0:
raise RuntimeError("Failed to write {}".format(pkg_file_name))
# do not assume dpkg-scanpackage installed
packages_path = "dists/test/main/binary-amd64/Packages"
p = self.netvm_repo.run(
"mkdir -p /tmp/apt-repo/dists/test/main/binary-amd64 && "
"cd /tmp/apt-repo && "
"cat > {packages} && "
"echo MD5sum: $(openssl md5 -r {pkg} | cut -f 1 -d ' ')"
" >> {packages} && "
"echo SHA1: $(openssl sha1 -r {pkg} | cut -f 1 -d ' ')"
" >> {packages} && "
"echo SHA256: $(openssl sha256 -r {pkg} | cut -f 1 -d ' ')"
" >> {packages} && "
"gzip < {packages} > {packages}.gz".format(pkg=pkg_file_name,
packages=packages_path),
passio_popen=True, passio_stderr=True)
p.stdin.write(
"Package: test-pkg\n"
"Version: 1.0-1\n"
"Architecture: amd64\n"
"Maintainer: unknown <user@host>\n"
"Installed-Size: 25\n"
"Filename: {pkg}\n"
"Size: 994\n"
"Section: unknown\n"
"Priority: optional\n"
"Description: Test package\n".format(pkg=pkg_file_name)
)
p.stdin.close()
if p.wait() != 0:
raise RuntimeError("Failed to write Packages file: {}".format(
p.stderr.read()))
p = self.netvm_repo.run(
"mkdir -p /tmp/apt-repo/dists/test && "
"cd /tmp/apt-repo/dists/test && "
"cat > Release <<EOF && "
"echo '' $(sha1sum {p} | cut -f 1 -d ' ') $(stat -c %s {p}) {p}"
" >> Release && "
"echo '' $(sha1sum {z} | cut -f 1 -d ' ') $(stat -c %s {z}) {z}"
" >> Release"
.format(p="main/binary-amd64/Packages",
z="main/binary-amd64/Packages.gz"),
passio_popen=True, passio_stderr=True
)
p.stdin.write(
"Label: Test repo\n"
"Suite: test\n"
"Codename: test\n"
"Date: Tue, 27 Oct 2015 03:22:09 +0100\n"
"Architectures: amd64\n"
"Components: main\n"
"SHA1:\n"
"EOF\n"
)
p.stdin.close()
if p.wait() != 0:
raise RuntimeError("Failed to write Release file: {}".format(
p.stderr.read()))
def create_repo_yum(self):
pkg_file_name = "test-pkg-1.0-1.fc21.x86_64.rpm"
p = self.netvm_repo.run("mkdir /tmp/yum-repo && cd /tmp/yum-repo &&"
"base64 -d | zcat > {}".format(pkg_file_name),
passio_popen=True, passio_stderr=True)
p.stdin.write(self.RPM_PACKAGE_GZIP_BASE64)
p.stdin.close()
if p.wait() != 0:
raise RuntimeError("Failed to write {}: {}".format(pkg_file_name,
p.stderr.read()))
# createrepo is installed by default in Fedora template
p = self.netvm_repo.run("createrepo /tmp/yum-repo",
passio_popen=True,
passio_stderr=True)
if p.wait() != 0:
raise RuntimeError("Failed to create yum metadata: {}".format(
p.stderr.read()))
def create_repo_and_serve(self):
if self.template.count("debian") or self.template.count("whonix"):
self.create_repo_apt()
self.netvm_repo.run("cd /tmp/apt-repo &&"
"python -m SimpleHTTPServer 8080")
elif self.template.count("fedora"):
self.create_repo_yum()
self.netvm_repo.run("cd /tmp/yum-repo &&"
"python -m SimpleHTTPServer 8080")
else:
# not reachable...
self.skipTest("Template {} not supported by this test".format(
self.template))
def configure_test_repo(self):
"""
Configure test repository in test-vm and disable rest of them.
The critical part is to use "localhost" - this will work only when
accessed through update proxy and this is exactly what we want to
test here.
"""
if self.template.count("debian") or self.template.count("whonix"):
self.testvm1.run(
"rm -f /etc/apt/sources.list.d/* &&"
"echo 'deb [trusted=yes] http://localhost:8080 test main' "
"> /etc/apt/sources.list",
user="root")
elif self.template.count("fedora"):
self.testvm1.run(
"rm -f /etc/yum.repos.d/*.repo &&"
"echo '[test]' > /etc/yum.repos.d/test.repo &&"
"echo 'name=Test repo' >> /etc/yum.repos.d/test.repo &&"
"echo 'gpgcheck=0' >> /etc/yum.repos.d/test.repo &&"
"echo 'baseurl=http://localhost:8080/'"
" >> /etc/yum.repos.d/test.repo",
user="root"
)
else:
# not reachable...
self.skipTest("Template {} not supported by this test".format(
self.template))
def test_010_update_via_proxy(self):
"""
Test both whether updates proxy works and whether is actually used by the VM
"""
if self.template.count("minimal"):
self.skipTest("Template {} not supported by this test".format(
self.template))
self.netvm_repo = self.qc.add_new_vm(
"QubesNetVm",
name=self.make_vm_name('net'),
template=self.qc.get_vm_by_name(self.template))
self.netvm_repo.create_on_disk(verbose=False)
self.testvm1.netvm = self.netvm_repo
# NetVM should have qubes-updates-proxy enabled by default
#self.netvm_repo.services['qubes-updates-proxy'] = True
# TODO: consider also adding a test for the template itself
self.testvm1.services['updates-proxy-setup'] = True
self.qc.save()
self.qc.unlock_db()
# Setup test repo
self.netvm_repo.start()
self.create_repo_and_serve()
# Configure local repo
self.testvm1.start()
self.configure_test_repo()
# update repository metadata
p = self.testvm1.run(self.update_cmd, wait=True, user="root",
passio_popen=True, passio_stderr=True)
(stdout, stderr) = p.communicate()
self.assertIn(p.wait(), self.exit_code_ok,
"{}: {}\n{}".format(self.update_cmd, stdout, stderr)
)
# install test package
p = self.testvm1.run(self.install_cmd.format('test-pkg'),
wait=True, user="root",
passio_popen=True, passio_stderr=True)
(stdout, stderr) = p.communicate()
self.assertIn(p.wait(), self.exit_code_ok,
"{}: {}\n{}".format(self.update_cmd, stdout, stderr)
)
# verify if it was really installed
p = self.testvm1.run(self.install_test_cmd.format('test-pkg'),
wait=True, user="root",
passio_popen=True, passio_stderr=True)
(stdout, stderr) = p.communicate()
self.assertIn(p.wait(), self.exit_code_ok,
"{}: {}\n{}".format(self.update_cmd, stdout, stderr)
)
def load_tests(loader, tests, pattern):
try:
@ -343,4 +654,9 @@ def load_tests(loader, tests, pattern):
'VmNetworking_' + template,
(VmNetworkingMixin, qubes.tests.QubesTestCase),
{'template': template})))
tests.addTests(loader.loadTestsFromTestCase(
type(
'VmUpdates_' + template,
(VmUpdatesMixin, qubes.tests.QubesTestCase),
{'template': template})))
return tests

View File

@ -1,4 +1,5 @@
#!/usr/bin/python2 -O
# coding=utf-8
#
# The Qubes OS Project, https://www.qubes-os.org/
@ -23,11 +24,14 @@
#
import multiprocessing
import os
import time
import unittest
import qubes.qubes
import qubes.tests
import subprocess
class TC_00_Regressions(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
# Bug: #906
@ -56,3 +60,22 @@ class TC_00_Regressions(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase)
self.assertIsNotNone(qc.get_vm_by_name(vmname1))
self.assertIsNotNone(qc.get_vm_by_name(vmname2))
def test_bug_1389_dispvm_qubesdb_crash(self):
"""
Sometimes QubesDB instance in DispVM crashes at startup.
Unfortunately we don't have reliable way to reproduce it, so try twice
:return:
"""
self.qc.unlock_db()
for try_no in xrange(2):
p = subprocess.Popen(['/usr/lib/qubes/qfile-daemon-dvm',
'qubes.VMShell', 'dom0', 'DEFAULT'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'))
p.stdin.write("qubesdb-read /name || echo ERROR\n")
dispvm_name = p.stdout.readline()
p.stdin.close()
self.assertTrue(dispvm_name.startswith("disp"),
"Try {} failed".format(try_no))

77
tests/storage.py Normal file
View File

@ -0,0 +1,77 @@
# The Qubes OS Project, https://www.qubes-os.org/
#
# Copyright (C) 2015 Bahtiar `kalkin-` Gadimov <bahtiar@gadimov.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import qubes.storage
from qubes.qubes import defaults
from qubes.storage.xen import XenPool, XenStorage
from qubes.tests import QubesTestCase, SystemTestsMixin
class TC_00_Storage(SystemTestsMixin, QubesTestCase):
""" This class tests the utility methods from :mod:``qubes.storage`` """
def test_000_dump(self):
""" Dumps storage instance to a storage string """
vmname = self.make_vm_name('appvm')
template = self.qc.get_default_template()
vm = self.qc.add_new_vm('QubesAppVm', name=vmname,
pool_name='default', template=template)
storage = vm.storage
result = qubes.storage.dump(storage)
expected = 'qubes.storage.xen.XenStorage'
self.assertEquals(result, expected)
def test_001_load(self):
""" Loads storage driver from a storage string """
result = qubes.storage.load('qubes.storage.xen.XenStorage')
self.assertTrue(result is XenStorage)
def test_002_default_pool_drivers(self):
""" The only predifined pool driver is xen """
result = defaults['pool_drivers'].keys()
expected = ["xen"]
self.assertEquals(result, expected)
def test_003_get_pool_klass(self):
""" Expect the default pool to be `XenPool` """
result = qubes.storage._get_pool_klass('default')
self.assertTrue(result is XenPool)
def test_004_pool_exists_default(self):
""" Expect the default pool to exists """
self.assertTrue(qubes.storage.pool_exists('default'))
def test_005_pool_exists_random(self):
""" Expect this pool to not a exist """
self.assertFalse(
qubes.storage.pool_exists('asdh312096r832598213iudhas'))
def test_006_add_remove_pool(self):
""" Tries to adding and removing a pool. """
pool_name = 'asdjhrp89132'
# make sure it's really does not exist
qubes.storage.remove_pool(pool_name)
qubes.storage.add_pool(pool_name, driver='xen')
self.assertTrue(qubes.storage.pool_exists(pool_name))
qubes.storage.remove_pool(pool_name)
self.assertFalse(qubes.storage.pool_exists(pool_name))

228
tests/storage_xen.py Normal file
View File

@ -0,0 +1,228 @@
# The Qubes OS Project, https://www.qubes-os.org/
#
# Copyright (C) 2015 Bahtiar `kalkin-` Gadimov <bahtiar@gadimov.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import shutil
import qubes.storage
from qubes.tests import QubesTestCase, SystemTestsMixin
from qubes.storage.xen import XenStorage
class TC_00_XenPool(SystemTestsMixin, QubesTestCase):
""" This class tests some properties of the 'default' pool. """
def test000_default_pool_dir(self):
""" The predefined dir for the default pool should be ``/var/lib/qubes``
.. sealso::
Data :data:``qubes.qubes.defaults['pool_config']``.
"""
vm = self._init_app_vm()
result = qubes.storage.get_pool("default", vm).dir_path
expected = '/var/lib/qubes/'
self.assertEquals(result, expected)
def test001_default_storage_class(self):
""" Check when using default pool the Storage is ``XenStorage``. """
result = self._init_app_vm().storage
self.assertIsInstance(result, XenStorage)
def test_002_default_pool_name(self):
""" Default pool_name is 'default'. """
vm = self._init_app_vm()
self.assertEquals(vm.pool_name, "default")
def _init_app_vm(self):
""" Return initalised, but not created, AppVm. """
vmname = self.make_vm_name('appvm')
template = self.qc.get_default_template()
return self.qc.add_new_vm('QubesAppVm', name=vmname, template=template,
pool_name='default')
class TC_01_XenPool(SystemTestsMixin, QubesTestCase):
""" Test the paths for the default Xen file based storage (``XenStorage``).
"""
POOL_DIR = '/var/lib/qubes/test-pool'
APPVMS_DIR = '/var/lib/qubes/test-pool/appvms'
TEMPLATES_DIR = '/var/lib/qubes/test-pool/vm-templates'
SERVICE_DIR = '/var/lib/qubes/test-pool/servicevms'
def setUp(self):
""" Add a test file based storage pool """
super(TC_01_XenPool, self).setUp()
qubes.storage.add_pool('test-pool', driver='xen',
dir_path=self.POOL_DIR)
def tearDown(self):
""" Remove the file based storage pool after testing """
super(TC_01_XenPool, self).tearDown()
qubes.storage.remove_pool("test-pool")
shutil.rmtree(self.POOL_DIR, ignore_errors=True)
def test_001_pool_exists(self):
""" Check if the storage pool was added to the storage pool config """
self.assertTrue(qubes.storage.pool_exists('test-pool'))
def test_002_pool_dir_create(self):
""" Check if the storage pool dir and subdirs were created """
# The dir should not exists before
self.assertFalse(os.path.exists(self.POOL_DIR))
vmname = self.make_vm_name('appvm')
template = self.qc.get_default_template()
self.qc.add_new_vm('QubesAppVm', name=vmname, template=template,
pool_name='test-pool')
self.assertTrue(os.path.exists(self.POOL_DIR))
self.assertTrue(os.path.exists(self.APPVMS_DIR))
self.assertTrue(os.path.exists(self.SERVICE_DIR))
self.assertTrue(os.path.exists(self.TEMPLATES_DIR))
def test_003_pool_dir(self):
""" Check if the vm storage pool_dir is the same as specified """
vmname = self.make_vm_name('appvm')
template = self.qc.get_default_template()
vm = self.qc.add_new_vm('QubesAppVm', name=vmname, template=template,
pool_name='test-pool')
result = qubes.storage.get_pool('test-pool', vm).dir_path
self.assertEquals(self.POOL_DIR, result)
def test_004_app_vmdir(self):
""" Check the vm storage dir for an AppVm"""
vmname = self.make_vm_name('appvm')
template = self.qc.get_default_template()
vm = self.qc.add_new_vm('QubesAppVm', name=vmname, template=template,
pool_name='test-pool')
expected = os.path.join(self.APPVMS_DIR, vm.name)
result = vm.storage.vmdir
self.assertEquals(expected, result)
def test_005_hvm_vmdir(self):
""" Check the vm storage dir for a HVM"""
vmname = self.make_vm_name('hvm')
vm = self.qc.add_new_vm('QubesHVm', name=vmname,
pool_name='test-pool')
expected = os.path.join(self.APPVMS_DIR, vm.name)
result = vm.storage.vmdir
self.assertEquals(expected, result)
def test_006_net_vmdir(self):
""" Check the vm storage dir for a Netvm"""
vmname = self.make_vm_name('hvm')
vm = self.qc.add_new_vm('QubesNetVm', name=vmname,
pool_name='test-pool')
expected = os.path.join(self.SERVICE_DIR, vm.name)
result = vm.storage.vmdir
self.assertEquals(expected, result)
def test_007_proxy_vmdir(self):
""" Check the vm storage dir for a ProxyVm"""
vmname = self.make_vm_name('proxyvm')
vm = self.qc.add_new_vm('QubesProxyVm', name=vmname,
pool_name='test-pool')
expected = os.path.join(self.SERVICE_DIR, vm.name)
result = vm.storage.vmdir
self.assertEquals(expected, result)
def test_008_admin_vmdir(self):
""" Check the vm storage dir for a AdminVm"""
# TODO How to test AdminVm?
pass
def test_009_template_vmdir(self):
""" Check the vm storage dir for a TemplateVm"""
vmname = self.make_vm_name('templatevm')
vm = self.qc.add_new_vm('QubesTemplateVm', name=vmname,
pool_name='test-pool')
expected = os.path.join(self.TEMPLATES_DIR, vm.name)
result = vm.storage.vmdir
self.assertEquals(expected, result)
def test_010_template_hvm_vmdir(self):
""" Check the vm storage dir for a TemplateHVm"""
vmname = self.make_vm_name('templatehvm')
vm = self.qc.add_new_vm('QubesTemplateHVm', name=vmname,
pool_name='test-pool')
expected = os.path.join(self.TEMPLATES_DIR, vm.name)
result = vm.storage.vmdir
self.assertEquals(expected, result)
def test_011_appvm_file_images(self):
""" Check if all the needed image files are created for an AppVm"""
vmname = self.make_vm_name('appvm')
template = self.qc.get_default_template()
vm = self.qc.add_new_vm('QubesAppVm', name=vmname, template=template,
pool_name='test-pool')
vm.create_on_disk(verbose=False)
expected_vmdir = os.path.join(self.APPVMS_DIR, vm.name)
self.assertEqualsAndExists(vm.storage.vmdir, expected_vmdir)
expected_private_path = os.path.join(expected_vmdir, 'private.img')
self.assertEqualsAndExists(vm.storage.private_img,
expected_private_path)
expected_volatile_path = os.path.join(expected_vmdir, 'volatile.img')
self.assertEqualsAndExists(vm.storage.volatile_img,
expected_volatile_path)
def test_012_hvm_file_images(self):
""" Check if all the needed image files are created for a HVm"""
vmname = self.make_vm_name('hvm')
vm = self.qc.add_new_vm('QubesHVm', name=vmname,
pool_name='test-pool')
vm.create_on_disk(verbose=False)
expected_vmdir = os.path.join(self.APPVMS_DIR, vm.name)
self.assertEqualsAndExists(vm.storage.vmdir, expected_vmdir)
expected_private_path = os.path.join(expected_vmdir, 'private.img')
self.assertEqualsAndExists(vm.storage.private_img,
expected_private_path)
expected_root_path = os.path.join(expected_vmdir, 'root.img')
self.assertEqualsAndExists(vm.storage.root_img, expected_root_path)
expected_volatile_path = os.path.join(expected_vmdir, 'volatile.img')
self.assertEqualsAndExists(vm.storage.volatile_img,
expected_volatile_path)
def assertEqualsAndExists(self, result_path, expected_path):
""" Check if the ``result_path``, matches ``expected_path`` and exists.
See also: :meth:``assertExist``
"""
self.assertEquals(result_path, expected_path)
self.assertExist(result_path)
def assertExist(self, path):
""" Assert that the given path exists. """
self.assertTrue(os.path.exists(path))

File diff suppressed because it is too large Load Diff

View File

@ -1 +1 @@
3.0.14
3.1.11

View File

@ -9,8 +9,6 @@
<loader>hvmloader</loader>
<boot dev='cdrom'/>
<boot dev='hd'/>
<!-- server_ip is the address of stubdomain. It hosts it's own DNS server. -->
{disable_network1}<cmdline>-net lwip,client_ip={ip},server_ip={dns2},dns={dns1},gw={gateway},netmask={netmask}</cmdline>{disable_network2}
</os>
<features>
<pae/>
@ -23,14 +21,18 @@
<on_reboot>destroy</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator type='stubdom'/>
{no_network_begin}<emulator type='stubdom'/>{no_network_end}
<!-- server_ip is the address of stubdomain. It hosts it's own DNS server. -->
{network_begin}<emulator type='stubdom' cmdline='-net lwip,client_ip={ip},server_ip={dns2},dns={dns1},gw={gateway},netmask={netmask}'/>{network_end}
{rootdev}
{privatedev}
{otherdevs}
{netdev}
{pcidevs}
<input type='tablet' bus='usb'/>
<video type='vga'/>
<video type='vga'>
<model type='xen' vram='16384'/>
</video>
</devices>
</domain>