diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 00000000..b99d8d48
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,9 @@
+sudo: required
+dist: trusty
+language: generic
+install: git clone https://github.com/QubesOS/qubes-builder ~/qubes-builder
+# debootstrap in trusty is old...
+before_script: sudo ln -s sid /usr/share/debootstrap/scripts/stretch
+script: ~/qubes-builder/scripts/travis-build
+env:
+ - DIST_DOM0=fc23 USE_QUBES_REPO_VERSION=3.2 USE_QUBES_REPO_TESTING=1
diff --git a/Makefile b/Makefile
index 2282173e..545e8089 100644
--- a/Makefile
+++ b/Makefile
@@ -72,12 +72,15 @@ endif
mkdir -p $(DESTDIR)/usr/libexec/qubes
cp qubes-rpc-policy/qubes.Filecopy.policy $(DESTDIR)/etc/qubes-rpc/policy/qubes.Filecopy
cp qubes-rpc-policy/qubes.OpenInVM.policy $(DESTDIR)/etc/qubes-rpc/policy/qubes.OpenInVM
+ cp qubes-rpc-policy/qubes.OpenURL.policy $(DESTDIR)/etc/qubes-rpc/policy/qubes.OpenURL
cp qubes-rpc-policy/qubes.VMShell.policy $(DESTDIR)/etc/qubes-rpc/policy/qubes.VMShell
cp qubes-rpc-policy/qubes.NotifyUpdates.policy $(DESTDIR)/etc/qubes-rpc/policy/qubes.NotifyUpdates
cp qubes-rpc-policy/qubes.NotifyTools.policy $(DESTDIR)/etc/qubes-rpc/policy/qubes.NotifyTools
cp qubes-rpc-policy/qubes.GetImageRGBA.policy $(DESTDIR)/etc/qubes-rpc/policy/qubes.GetImageRGBA
+ cp qubes-rpc-policy/qubes.GetRandomizedTime.policy $(DESTDIR)/etc/qubes-rpc/policy/qubes.GetRandomizedTime
cp qubes-rpc/qubes.NotifyUpdates $(DESTDIR)/etc/qubes-rpc/
cp qubes-rpc/qubes.NotifyTools $(DESTDIR)/etc/qubes-rpc/
+ cp qubes-rpc/qubes.GetRandomizedTime $(DESTDIR)/etc/qubes-rpc/
cp qubes-rpc/qubes-notify-updates $(DESTDIR)/usr/libexec/qubes/
cp qubes-rpc/qubes-notify-tools $(DESTDIR)/usr/libexec/qubes/
mkdir -p "$(DESTDIR)$(FILESDIR)"
diff --git a/core-modules/000QubesVm.py b/core-modules/000QubesVm.py
index f7411a97..3c0760a6 100644
--- a/core-modules/000QubesVm.py
+++ b/core-modules/000QubesVm.py
@@ -26,6 +26,7 @@ import datetime
import base64
import hashlib
import logging
+import grp
import lxml.etree
import os
import os.path
@@ -37,15 +38,16 @@ import time
import uuid
import xml.parsers.expat
import signal
+import pwd
from qubes import qmemman
from qubes import qmemman_algo
import libvirt
-import warnings
from qubes.qubes import dry_run,vmm
from qubes.qubes import register_qubes_vm_class
from qubes.qubes import QubesVmCollection,QubesException,QubesHost,QubesVmLabels
from qubes.qubes import defaults,system_path,vm_files,qubes_max_qid
+from qubes.storage import get_pool
qmemman_present = False
try:
@@ -109,6 +111,7 @@ class QubesVm(object):
"name": { "order": 1 },
"uuid": { "order": 0, "eval": 'uuid.UUID(value) if value else None' },
"dir_path": { "default": None, "order": 2 },
+ "pool_name": { "default":"default" },
"conf_file": {
"func": lambda value: self.absolute_path(value, self.name +
".conf"),
@@ -133,9 +136,10 @@ class QubesVm(object):
eval(value) if value.find("[") >= 0 else
eval("[" + value + "]") },
"pci_strictreset": {"default": True},
+ "pci_e820_host": {"default": True},
# Internal VM (not shown in qubes-manager, doesn't create appmenus entries
"internal": { "default": False, 'attr': '_internal' },
- "vcpus": { "default": None },
+ "vcpus": { "default": 2 },
"uses_default_kernel": { "default": True, 'order': 30 },
"uses_default_kernelopts": { "default": True, 'order': 30 },
"kernel": {
@@ -198,7 +202,8 @@ class QubesVm(object):
'kernelopts', 'services', 'installed_by_rpm',\
'uses_default_netvm', 'include_in_backups', 'debug',\
'qrexec_timeout', 'autostart', 'uses_default_dispvm_netvm',
- 'backup_content', 'backup_size', 'backup_path' ]:
+ 'backup_content', 'backup_size', 'backup_path', 'pool_name',\
+ 'pci_e820_host']:
attrs[prop]['save'] = lambda prop=prop: str(getattr(self, prop))
# Simple paths
for prop in ['conf_file', 'firewall_conf']:
@@ -326,11 +331,6 @@ class QubesVm(object):
if self.maxmem > self.memory * 10:
self.maxmem = self.memory * 10
- # By default allow use all VCPUs
- if self.vcpus is None and not vmm.offline_mode:
- qubes_host = QubesHost()
- self.vcpus = qubes_host.no_cpus
-
# Always set if meminfo-writer should be active or not
if 'meminfo-writer' not in self.services:
self.services['meminfo-writer'] = not (len(self.pcidevs) > 0)
@@ -345,7 +345,11 @@ class QubesVm(object):
self.services['qubes-update-check'] = False
# Initialize VM image storage class
- self.storage = defaults["storage_class"](self)
+ self.storage = get_pool(self.pool_name, self).getStorage()
+ self.dir_path = self.storage.vmdir
+ self.icon_path = os.path.join(self.storage.vmdir, 'icon.png')
+ self.conf_file = os.path.join(self.storage.vmdir, self.name + '.conf')
+
if hasattr(self, 'kernels_dir'):
modules_path = os.path.join(self.kernels_dir,
"modules.img")
@@ -561,6 +565,10 @@ class QubesVm(object):
return False
if len(name) > 31:
return False
+ if name == 'lost+found':
+ # avoid conflict when /var/lib/qubes/appvms is mounted on
+ # separate partition
+ return False
return re.match(r"^[a-zA-Z][a-zA-Z0-9_.-]*$", name) is not None
def pre_rename(self, new_name):
@@ -582,9 +590,18 @@ class QubesVm(object):
if self.installed_by_rpm:
raise QubesException("Cannot rename VM installed by RPM -- first clone VM and then use yum to remove package.")
+ assert self._collection is not None
+ if self._collection.get_vm_by_name(name):
+ raise QubesException("VM with this name already exists")
+
self.pre_rename(name)
- if self.libvirt_domain:
+ try:
self.libvirt_domain.undefine()
+ except libvirt.libvirtError as e:
+ if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
+ pass
+ else:
+ raise
if self._qdb_connection:
self._qdb_connection.close()
self._qdb_connection = None
@@ -714,6 +731,8 @@ class QubesVm(object):
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return -1
else:
+ print >>sys.stderr, "libvirt error code: {!r}".format(
+ e.get_error_code())
raise
@@ -766,9 +785,13 @@ class QubesVm(object):
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return 0
# libxl_domain_info failed - domain no longer exists
- elif e.get_error_code() == libvirt.VIR_INTERNAL_ERROR:
+ elif e.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR:
+ return 0
+ elif e.get_error_code() is None: # unknown...
return 0
else:
+ print >>sys.stderr, "libvirt error code: {!r}".format(
+ e.get_error_code())
raise
def get_cputime(self):
@@ -783,9 +806,13 @@ class QubesVm(object):
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return 0
# libxl_domain_info failed - domain no longer exists
- elif e.get_error_code() == libvirt.VIR_INTERNAL_ERROR:
+ elif e.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR:
+ return 0
+ elif e.get_error_code() is None: # unknown...
return 0
else:
+ print >>sys.stderr, "libvirt error code: {!r}".format(
+ e.get_error_code())
raise
def get_mem_static_max(self):
@@ -827,6 +854,8 @@ class QubesVm(object):
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return 0
else:
+ print >>sys.stderr, "libvirt error code: {!r}".format(
+ e.get_error_code())
raise
def get_disk_utilization_root_img(self):
@@ -901,7 +930,14 @@ class QubesVm(object):
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return False
+ # libxl_domain_info failed - domain no longer exists
+ elif e.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR:
+ return False
+ elif e.get_error_code() is None: # unknown...
+ return False
else:
+ print >>sys.stderr, "libvirt error code: {!r}".format(
+ e.get_error_code())
raise
def is_paused(self):
@@ -913,7 +949,14 @@ class QubesVm(object):
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return False
+ # libxl_domain_info failed - domain no longer exists
+ elif e.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR:
+ return False
+ elif e.get_error_code() is None: # unknown...
+ return False
else:
+ print >>sys.stderr, "libvirt error code: {!r}".format(
+ e.get_error_code())
raise
def get_start_time(self):
@@ -924,7 +967,7 @@ class QubesVm(object):
uuid = self.uuid
start_time = vmm.xs.read('', "/vm/%s/start_time" % str(uuid))
- if start_time != '':
+ if start_time:
return datetime.datetime.fromtimestamp(float(start_time))
else:
return None
@@ -1061,6 +1104,7 @@ class QubesVm(object):
if self.is_netvm():
self.qdb.write("/qubes-netvm-gateway", self.gateway)
+ self.qdb.write("/qubes-netvm-primary-dns", self.gateway)
self.qdb.write("/qubes-netvm-secondary-dns", self.secondary_dns)
self.qdb.write("/qubes-netvm-netmask", self.netmask)
self.qdb.write("/qubes-netvm-network", self.network)
@@ -1069,6 +1113,7 @@ class QubesVm(object):
self.qdb.write("/qubes-ip", self.ip)
self.qdb.write("/qubes-netmask", self.netvm.netmask)
self.qdb.write("/qubes-gateway", self.netvm.gateway)
+ self.qdb.write("/qubes-primary-dns", self.netvm.gateway)
self.qdb.write("/qubes-secondary-dns", self.netvm.secondary_dns)
tzname = self.get_timezone()
@@ -1148,6 +1193,7 @@ class QubesVm(object):
# If dynamic memory management disabled, set maxmem=mem
args['maxmem'] = args['mem']
args['vcpus'] = str(self.vcpus)
+ args['features'] = ''
if self.netvm is not None:
args['ip'] = self.ip
args['mac'] = self.mac
@@ -1156,8 +1202,10 @@ class QubesVm(object):
args['dns2'] = self.secondary_dns
args['netmask'] = self.netmask
args['netdev'] = self._format_net_dev(self.ip, self.mac, self.netvm.name)
- args['disable_network1'] = '';
- args['disable_network2'] = '';
+ args['network_begin'] = ''
+ args['network_end'] = ''
+ args['no_network_begin'] = ''
else:
args['ip'] = ''
args['mac'] = ''
@@ -1166,8 +1214,12 @@ class QubesVm(object):
args['dns2'] = ''
args['netmask'] = ''
args['netdev'] = ''
- args['disable_network1'] = '';
+ args['network_begin'] = ''
+ args['no_network_begin'] = ''
+ args['no_network_end'] = ''
+ if len(self.pcidevs) and self.pci_e820_host:
+ args['features'] = ''
args.update(self.storage.get_config_params())
if hasattr(self, 'kernelopts'):
args['kernelopts'] = self.kernelopts
@@ -1262,9 +1314,10 @@ class QubesVm(object):
hook(self, verbose, source_template=source_template)
def get_clone_attrs(self):
- attrs = ['kernel', 'uses_default_kernel', 'netvm', 'uses_default_netvm', \
- 'memory', 'maxmem', 'kernelopts', 'uses_default_kernelopts', 'services', 'vcpus', \
- '_mac', 'pcidevs', 'include_in_backups', '_label', 'default_user']
+ attrs = ['kernel', 'uses_default_kernel', 'netvm', 'uses_default_netvm',
+ 'memory', 'maxmem', 'kernelopts', 'uses_default_kernelopts',
+ 'services', 'vcpus', '_mac', 'pcidevs', 'include_in_backups',
+ '_label', 'default_user', 'qrexec_timeout']
# fire hooks
for hook in self.hooks_get_clone_attrs:
@@ -1357,8 +1410,16 @@ class QubesVm(object):
# already undefined
pass
else:
+ print >>sys.stderr, "libvirt error code: {!r}".format(
+ e.get_error_code())
raise
+ if os.path.exists("/etc/systemd/system/multi-user.target.wants/qubes-vm@" + self.name + ".service"):
+ retcode = subprocess.call(["sudo", "systemctl", "-q", "disable",
+ "qubes-vm@" + self.name + ".service"])
+ if retcode != 0:
+ raise QubesException("Failed to delete autostart entry for VM")
+
self.storage.remove_from_disk()
def write_firewall_conf(self, conf):
@@ -1623,17 +1684,22 @@ class QubesVm(object):
if bool(input) + bool(passio_popen) + bool(localcmd) > 1:
raise ValueError("'input', 'passio_popen', 'localcmd' cannot be "
"used together")
+ if not wait and (localcmd or input):
+ raise ValueError("Cannot use wait=False with input or "
+ "localcmd specified")
if localcmd:
return self.run("QUBESRPC %s %s" % (service, source),
localcmd=localcmd, user=user, wait=wait, gui=gui)
elif input:
- return self.run("QUBESRPC %s %s" % (service, source),
- localcmd="echo %s" % input, user=user, wait=wait,
- gui=gui)
+ p = self.run("QUBESRPC %s %s" % (service, source),
+ user=user, wait=wait, gui=gui, passio_popen=True,
+ passio_stderr=True)
+ p.communicate(input)
+ return p.returncode
else:
return self.run("QUBESRPC %s %s" % (service, source),
passio_popen=passio_popen, user=user, wait=wait,
- gui=gui)
+ gui=gui, passio_stderr=passio_popen)
def attach_network(self, verbose = False, wait = True, netvm = None):
self.log.debug('attach_network(netvm={!r})'.format(netvm))
@@ -1695,7 +1761,15 @@ class QubesVm(object):
if verbose:
print >> sys.stderr, "--> Starting Qubes GUId..."
- guid_cmd = [system_path["qubes_guid_path"],
+ guid_cmd = []
+ if os.getuid() == 0:
+ # try to always have guid running as normal user, otherwise
+ # clipboard file may be created as root and other permission
+ # problems
+ qubes_group = grp.getgrnam('qubes')
+ guid_cmd = ['runuser', '-u', qubes_group.gr_mem[0], '--']
+
+ guid_cmd += [system_path["qubes_guid_path"],
"-d", str(self.xid), "-N", self.name,
"-c", self.label.color,
"-i", self.label.icon_path,
@@ -1706,6 +1780,33 @@ class QubesVm(object):
guid_cmd += ['-v', '-v']
elif not verbose:
guid_cmd += ['-q']
+ # Avoid using environment variables for checking the current session,
+ # because this script may be called with cleared env (like with sudo).
+ if subprocess.check_output(
+ ['xprop', '-root', '-notype', 'KDE_SESSION_VERSION']) == \
+ 'KDE_SESSION_VERSION = 5\n':
+ # native decoration plugins is used, so adjust window properties
+ # accordingly
+ guid_cmd += ['-T'] # prefix window titles with VM name
+ # get owner of X11 session
+ session_owner = None
+ for line in subprocess.check_output(['xhost']).splitlines():
+ if line == 'SI:localuser:root':
+ pass
+ elif line.startswith('SI:localuser:'):
+ session_owner = line.split(":")[2]
+ if session_owner is not None:
+ data_dir = os.path.expanduser(
+ '~{}/.local/share'.format(session_owner))
+ else:
+ # fallback to current user
+ data_dir = os.path.expanduser('~/.local/share')
+
+ guid_cmd += ['-p',
+ '_KDE_NET_WM_COLOR_SCHEME=s:{}'.format(
+ os.path.join(data_dir,
+ 'qubes-kde', self.label.name + '.colors'))]
+
retcode = subprocess.call (guid_cmd)
if (retcode != 0) :
raise QubesException("Cannot start qubes-guid!")
@@ -1734,13 +1835,21 @@ class QubesVm(object):
self.log.debug('start_qrexec_daemon()')
if verbose:
print >> sys.stderr, "--> Starting the qrexec daemon..."
+ qrexec = []
+ if os.getuid() == 0:
+ # try to always have qrexec running as normal user, otherwise
+ # many qrexec services would need to deal with root/user
+ # permission problems
+ qubes_group = grp.getgrnam('qubes')
+ qrexec = ['runuser', '-u', qubes_group.gr_mem[0], '--']
+
+ qrexec += ['env', 'QREXEC_STARTUP_TIMEOUT=' + str(self.qrexec_timeout),
+ system_path["qrexec_daemon_path"]]
+
qrexec_args = [str(self.xid), self.name, self.default_user]
if not verbose:
qrexec_args.insert(0, "-q")
- qrexec_env = os.environ
- qrexec_env['QREXEC_STARTUP_TIMEOUT'] = str(self.qrexec_timeout)
- retcode = subprocess.call ([system_path["qrexec_daemon_path"]] +
- qrexec_args, env=qrexec_env)
+ retcode = subprocess.call(qrexec + qrexec_args)
if (retcode != 0) :
raise OSError ("Cannot execute qrexec-daemon!")
@@ -1769,10 +1878,19 @@ class QubesVm(object):
# force connection to a new daemon
self._qdb_connection = None
- retcode = subprocess.call ([
+ qubesdb_cmd = []
+ if os.getuid() == 0:
+ # try to always have qubesdb running as normal user, otherwise
+ # killing it at VM restart (see above) will always fail
+ qubes_group = grp.getgrnam('qubes')
+ qubesdb_cmd = ['runuser', '-u', qubes_group.gr_mem[0], '--']
+
+ qubesdb_cmd += [
system_path["qubesdb_daemon_path"],
str(self.xid),
- self.name])
+ self.name]
+
+ retcode = subprocess.call (qubesdb_cmd)
if retcode != 0:
raise OSError("ERROR: Cannot execute qubesdb-daemon!")
diff --git a/core-modules/001QubesResizableVm.py b/core-modules/001QubesResizableVm.py
index 88467ed1..f50ead70 100644
--- a/core-modules/001QubesResizableVm.py
+++ b/core-modules/001QubesResizableVm.py
@@ -31,11 +31,12 @@ from qubes.qubes import (
QubesException,
QubesVm,
)
+from time import sleep
class QubesResizableVm(QubesVm):
- def resize_root_img(self, size):
+ def resize_root_img(self, size, allow_start=False):
if self.template:
raise QubesException("Cannot resize root.img of template-based VM"
". Resize the root.img of the template "
@@ -56,12 +57,20 @@ class QubesResizableVm(QubesVm):
class QubesResizableVmWithResize2fs(QubesResizableVm):
- def resize_root_img(self, size):
- super(QubesResizableVmWithResize2fs, self).resize_root_img(size)
+ def resize_root_img(self, size, allow_start=False):
+ super(QubesResizableVmWithResize2fs, self).\
+ resize_root_img(size, allow_start=allow_start)
+ if not allow_start:
+ raise QubesException("VM start required to complete the "
+ "operation, but not allowed. Either run the "
+ "operation again allowing VM start this "
+ "time, or run resize2fs in the VM manually.")
self.start(start_guid=False)
self.run("resize2fs /dev/mapper/dmroot", user="root", wait=True,
gui=False)
self.shutdown()
+ while self.is_running():
+ sleep(1)
register_qubes_vm_class(QubesResizableVm)
diff --git a/core-modules/005QubesNetVm.py b/core-modules/005QubesNetVm.py
index 33934f4b..904e74f1 100644
--- a/core-modules/005QubesNetVm.py
+++ b/core-modules/005QubesNetVm.py
@@ -42,6 +42,7 @@ class QubesNetVm(QubesVm):
attrs_config['dir_path']['func'] = \
lambda value: value if value is not None else \
os.path.join(system_path["qubes_servicevms_dir"], self.name)
+ attrs_config['uses_default_netvm']['func'] = lambda x: False
attrs_config['label']['default'] = defaults["servicevm_label"]
attrs_config['memory']['default'] = 300
diff --git a/core-modules/01QubesDisposableVm.py b/core-modules/01QubesDisposableVm.py
index 8fedb742..4e3ebdd8 100644
--- a/core-modules/01QubesDisposableVm.py
+++ b/core-modules/01QubesDisposableVm.py
@@ -168,8 +168,13 @@ class QubesDisposableVm(QubesVm):
if self.get_power_state() != "Halted":
raise QubesException ("VM is already running!")
- # skip netvm state checking - calling VM have the same netvm, so it
- # must be already running
+ if self.netvm is not None:
+ if self.netvm.qid != 0:
+ if not self.netvm.is_running():
+ if verbose:
+ print >> sys.stderr, "--> Starting NetVM {0}...".\
+ format(self.netvm.name)
+ self.netvm.start(verbose=verbose, **kwargs)
if verbose:
print >> sys.stderr, "--> Loading the VM (type = {0})...".format(self.type)
@@ -232,5 +237,9 @@ class QubesDisposableVm(QubesVm):
return self.xid
+ def remove_from_disk(self):
+ # nothing to remove
+ pass
+
# register classes
register_qubes_vm_class(QubesDisposableVm)
diff --git a/core-modules/01QubesHVm.py b/core-modules/01QubesHVm.py
index c3b692a8..c98b67b5 100644
--- a/core-modules/01QubesHVm.py
+++ b/core-modules/01QubesHVm.py
@@ -100,8 +100,6 @@ class QubesHVm(QubesResizableVm):
(not 'xml_element' in kwargs or kwargs['xml_element'].get('guiagent_installed') is None):
self.services['meminfo-writer'] = False
- self.storage.rootcow_img = None
-
@property
def type(self):
return "HVM"
@@ -314,7 +312,16 @@ class QubesHVm(QubesResizableVm):
else:
return -1
+ def validate_drive_path(self, drive):
+ drive_type, drive_domain, drive_path = drive.split(':', 2)
+ if drive_domain == 'dom0':
+ if not os.path.exists(drive_path):
+ raise QubesException("Invalid drive path '{}'".format(
+ drive_path))
+
def start(self, *args, **kwargs):
+ if self.drive:
+ self.validate_drive_path(self.drive)
# make it available to storage.prepare_for_vm_startup, which is
# called before actually building VM libvirt configuration
self.storage.drive = self.drive
@@ -352,25 +359,28 @@ class QubesHVm(QubesResizableVm):
if (retcode != 0) :
raise QubesException("Cannot start qubes-guid!")
- def start_guid(self, verbose = True, notify_function = None,
- before_qrexec=False, **kwargs):
- # If user force the guiagent, start_guid will mimic a standard QubesVM
- if not before_qrexec and self.guiagent_installed:
- kwargs['extra_guid_args'] = kwargs.get('extra_guid_args', []) + \
- ['-Q']
- super(QubesHVm, self).start_guid(verbose, notify_function, **kwargs)
- stubdom_guid_pidfile = '/var/run/qubes/guid-running.%d' % self.stubdom_xid
- if os.path.exists(stubdom_guid_pidfile) and not self.debug:
- try:
- stubdom_guid_pid = int(open(stubdom_guid_pidfile, 'r').read())
- os.kill(stubdom_guid_pid, signal.SIGTERM)
- except Exception as ex:
- print >> sys.stderr, "WARNING: Failed to kill stubdom gui daemon: %s" % str(ex)
- elif before_qrexec and (not self.guiagent_installed or self.debug):
+ def start_guid(self, verbose=True, notify_function=None,
+ before_qrexec=False, **kwargs):
+ if not before_qrexec:
+ return
+
+ if not self.guiagent_installed or self.debug:
if verbose:
print >> sys.stderr, "--> Starting Qubes GUId (full screen)..."
self.start_stubdom_guid(verbose=verbose)
+ kwargs['extra_guid_args'] = kwargs.get('extra_guid_args', []) + \
+ ['-Q', '-n']
+
+ stubdom_guid_pidfile = \
+ '/var/run/qubes/guid-running.%d' % self.stubdom_xid
+ if not self.debug and os.path.exists(stubdom_guid_pidfile):
+ # Terminate stubdom guid once "real" gui agent connects
+ stubdom_guid_pid = int(open(stubdom_guid_pidfile, 'r').read())
+ kwargs['extra_guid_args'] += ['-K', str(stubdom_guid_pid)]
+
+ super(QubesHVm, self).start_guid(verbose, notify_function, **kwargs)
+
def start_qrexec_daemon(self, **kwargs):
if not self.qrexec_installed:
if kwargs.get('verbose', False):
diff --git a/core-modules/02QubesTemplateHVm.py b/core-modules/02QubesTemplateHVm.py
index eb5b309e..6452a8eb 100644
--- a/core-modules/02QubesTemplateHVm.py
+++ b/core-modules/02QubesTemplateHVm.py
@@ -29,7 +29,7 @@ import stat
import sys
import re
-from qubes.qubes import QubesHVm,register_qubes_vm_class,dry_run
+from qubes.qubes import QubesHVm,register_qubes_vm_class,dry_run,vmm
from qubes.qubes import QubesException,QubesVmCollection
from qubes.qubes import system_path,defaults
@@ -70,6 +70,7 @@ class QubesTemplateHVm(QubesHVm):
def is_appvm(self):
return False
+ @property
def rootcow_img(self):
return self.storage.rootcow_img
@@ -95,7 +96,15 @@ class QubesTemplateHVm(QubesHVm):
def commit_changes (self, verbose = False):
self.log.debug('commit_changes()')
- # nothing to do as long as root-cow.img is unused
- pass
+ if not vmm.offline_mode:
+ assert not self.is_running(), "Attempt to commit changes on running Template VM!"
+
+ if verbose:
+ print >> sys.stderr, "--> Commiting template updates... COW: {0}...".format (self.rootcow_img)
+
+ if dry_run:
+ return
+
+ self.storage.commit_template_changes()
register_qubes_vm_class(QubesTemplateHVm)
diff --git a/core/backup.py b/core/backup.py
index 6b20212c..e9915d56 100644
--- a/core/backup.py
+++ b/core/backup.py
@@ -357,8 +357,8 @@ def backup_prepare(vms_list=None, exclude_list=None,
vms_not_for_backup = [vm.name for vm in qvm_collection.values()
if not vm.backup_content]
- print_callback("VMs not selected for backup: %s" % " ".join(
- vms_not_for_backup))
+ print_callback("VMs not selected for backup:\n%s" % "\n".join(sorted(
+ vms_not_for_backup)))
if there_are_running_vms:
raise QubesException("Please shutdown all VMs before proceeding.")
@@ -400,6 +400,10 @@ class SendWorker(Process):
stdin=subprocess.PIPE,
stdout=self.backup_stdout)
if final_proc.wait() >= 2:
+ if self.queue.full():
+ # if queue is already full, remove some entry to wake up
+ # main thread, so it will be able to notice error
+ self.queue.get()
# handle only exit code 2 (tar fatal error) or
# greater (call failed?)
raise QubesException(
@@ -445,9 +449,21 @@ def prepare_backup_header(target_directory, passphrase, compressed=False,
def backup_do(base_backup_dir, files_to_backup, passphrase,
progress_callback=None, encrypted=False, appvm=None,
compressed=False, hmac_algorithm=DEFAULT_HMAC_ALGORITHM,
- crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM):
+ crypto_algorithm=DEFAULT_CRYPTO_ALGORITHM,
+ tmpdir=None):
global running_backup_operation
+ def queue_put_with_check(proc, vmproc, queue, element):
+ if queue.full():
+ if not proc.is_alive():
+ if vmproc:
+ message = ("Failed to write the backup, VM output:\n" +
+ vmproc.stderr.read())
+ else:
+ message = "Failed to write the backup. Out of disk space?"
+ raise QubesException(message)
+ queue.put(element)
+
total_backup_sz = 0
passphrase = passphrase.encode('utf-8')
for f in files_to_backup:
@@ -495,7 +511,7 @@ def backup_do(base_backup_dir, files_to_backup, passphrase,
progress = blocks_backedup * 11 / total_backup_sz
progress_callback(progress)
- backup_tmpdir = tempfile.mkdtemp(prefix="/var/tmp/backup_")
+ backup_tmpdir = tempfile.mkdtemp(prefix="backup_", dir=tmpdir)
running_backup_operation.tmpdir_to_remove = backup_tmpdir
# Tar with tape length does not deals well with stdout (close stdout between
@@ -552,15 +568,16 @@ def backup_do(base_backup_dir, files_to_backup, passphrase,
# be verified before untaring this.
# Prefix the path in archive with filename["subdir"] to have it
# verified during untar
- tar_cmdline = ["tar", "-Pc", '--sparse',
+ tar_cmdline = (["tar", "-Pc", '--sparse',
"-f", backup_pipe,
- '-C', os.path.dirname(filename["path"]),
- '--dereference',
- '--xform', 's:^%s:%s\\0:' % (
+ '-C', os.path.dirname(filename["path"])] +
+ (['--dereference'] if filename["subdir"] != "dom0-home/"
+ else []) +
+ ['--xform', 's:^%s:%s\\0:' % (
os.path.basename(filename["path"]),
filename["subdir"]),
os.path.basename(filename["path"])
- ]
+ ])
if compressed:
tar_cmdline.insert(-1,
"--use-compress-program=%s" % compression_filter)
@@ -650,7 +667,9 @@ def backup_do(base_backup_dir, files_to_backup, passphrase,
run_error)
# Send the chunk to the backup target
- to_send.put(os.path.relpath(chunkfile, backup_tmpdir))
+ queue_put_with_check(
+ send_proc, vmproc, to_send,
+ os.path.relpath(chunkfile, backup_tmpdir))
# Close HMAC
hmac.stdin.close()
@@ -668,7 +687,9 @@ def backup_do(base_backup_dir, files_to_backup, passphrase,
hmac_file.close()
# Send the HMAC to the backup target
- to_send.put(os.path.relpath(chunkfile, backup_tmpdir) + ".hmac")
+ queue_put_with_check(
+ send_proc, vmproc, to_send,
+ os.path.relpath(chunkfile, backup_tmpdir) + ".hmac")
if tar_sparse.poll() is None or run_error == "size_limit":
run_error = "paused"
@@ -680,7 +701,7 @@ def backup_do(base_backup_dir, files_to_backup, passphrase,
.poll()
pipe.close()
- to_send.put("FINISHED")
+ queue_put_with_check(send_proc, vmproc, to_send, "FINISHED")
send_proc.join()
shutil.rmtree(backup_tmpdir)
@@ -1553,6 +1574,8 @@ def backup_restore_set_defaults(options):
options['ignore-username-mismatch'] = False
if 'verify-only' not in options:
options['verify-only'] = False
+ if 'rename-conflicting' not in options:
+ options['rename-conflicting'] = False
return options
@@ -1620,6 +1643,22 @@ def backup_restore_header(source, passphrase,
return (restore_tmpdir, os.path.join(restore_tmpdir, "qubes.xml"),
header_data)
+def generate_new_name_for_conflicting_vm(orig_name, host_collection,
+ restore_info):
+ number = 1
+ if len(orig_name) > 29:
+ orig_name = orig_name[0:29]
+ new_name = orig_name
+ while (new_name in restore_info.keys() or
+ new_name in map(lambda x: x.get('rename_to', None),
+ restore_info.values()) or
+ host_collection.get_vm_by_name(new_name)):
+ new_name = str('{}{}'.format(orig_name, number))
+ number += 1
+ if number == 100:
+ # give up
+ return None
+ return new_name
def restore_info_verify(restore_info, host_collection):
options = restore_info['$OPTIONS$']
@@ -1637,7 +1676,16 @@ def restore_info_verify(restore_info, host_collection):
vm_info.pop('already-exists', None)
if not options['verify-only'] and \
host_collection.get_vm_by_name(vm) is not None:
- vm_info['already-exists'] = True
+ if options['rename-conflicting']:
+ new_name = generate_new_name_for_conflicting_vm(
+ vm, host_collection, restore_info
+ )
+ if new_name is not None:
+ vm_info['rename-to'] = new_name
+ else:
+ vm_info['already-exists'] = True
+ else:
+ vm_info['already-exists'] = True
# check template
vm_info.pop('missing-template', None)
@@ -1658,7 +1706,11 @@ def restore_info_verify(restore_info, host_collection):
# check netvm
vm_info.pop('missing-netvm', None)
- if vm_info['netvm']:
+ if vm_info['vm'].uses_default_netvm:
+ default_netvm = host_collection.get_default_netvm()
+ vm_info['netvm'] = default_netvm.name if \
+ default_netvm else None
+ elif vm_info['netvm']:
netvm_name = vm_info['netvm']
netvm_on_host = host_collection.get_vm_by_name(netvm_name)
@@ -1670,8 +1722,9 @@ def restore_info_verify(restore_info, host_collection):
if not (netvm_name in restore_info.keys() and
restore_info[netvm_name]['vm'].is_netvm()):
if options['use-default-netvm']:
- vm_info['netvm'] = host_collection \
- .get_default_netvm().name
+ default_netvm = host_collection.get_default_netvm()
+ vm_info['netvm'] = default_netvm.name if \
+ default_netvm else None
vm_info['vm'].uses_default_netvm = True
elif options['use-none-netvm']:
vm_info['netvm'] = None
@@ -1684,6 +1737,22 @@ def restore_info_verify(restore_info, host_collection):
'already-exists',
'excluded']])
+ # update references to renamed VMs:
+ for vm in restore_info.keys():
+ if vm in ['$OPTIONS$', 'dom0']:
+ continue
+ vm_info = restore_info[vm]
+ template_name = vm_info['template']
+ if (template_name in restore_info and
+ restore_info[template_name]['good-to-go'] and
+ 'rename-to' in restore_info[template_name]):
+ vm_info['template'] = restore_info[template_name]['rename-to']
+ netvm_name = vm_info['netvm']
+ if (netvm_name in restore_info and
+ restore_info[netvm_name]['good-to-go'] and
+ 'rename-to' in restore_info[netvm_name]):
+ vm_info['netvm'] = restore_info[netvm_name]['rename-to']
+
return restore_info
@@ -1956,8 +2025,11 @@ def backup_restore_print_summary(restore_info, print_callback=print_stdout):
s += " <-- No matching template on the host or in the backup found!"
elif 'missing-netvm' in vm_info:
s += " <-- No matching netvm on the host or in the backup found!"
- elif 'orig-template' in vm_info:
- s += " <-- Original template was '%s'" % (vm_info['orig-template'])
+ else:
+ if 'orig-template' in vm_info:
+ s += " <-- Original template was '%s'" % (vm_info['orig-template'])
+ if 'rename-to' in vm_info:
+ s += " <-- Will be renamed to '%s'" % vm_info['rename-to']
print_callback(s)
@@ -2105,33 +2177,35 @@ def backup_restore_do(restore_info,
error_callback("Skipping...")
continue
- if os.path.exists(vm.dir_path):
- move_to_path = tempfile.mkdtemp('', os.path.basename(
- vm.dir_path), os.path.dirname(vm.dir_path))
- try:
- os.rename(vm.dir_path, move_to_path)
- error_callback("*** Directory {} already exists! It has "
- "been moved to {}".format(vm.dir_path,
- move_to_path))
- except OSError:
- error_callback("*** Directory {} already exists and "
- "cannot be moved!".format(vm.dir_path))
- error_callback("Skipping...")
- continue
-
template = None
if vm.template is not None:
template_name = restore_info[vm.name]['template']
template = host_collection.get_vm_by_name(template_name)
new_vm = None
+ vm_name = vm.name
+ if 'rename-to' in restore_info[vm.name]:
+ vm_name = restore_info[vm.name]['rename-to']
try:
- new_vm = host_collection.add_new_vm(vm_class_name, name=vm.name,
- conf_file=vm.conf_file,
- dir_path=vm.dir_path,
+ new_vm = host_collection.add_new_vm(vm_class_name, name=vm_name,
template=template,
installed_by_rpm=False)
+ if os.path.exists(new_vm.dir_path):
+ move_to_path = tempfile.mkdtemp('', os.path.basename(
+ new_vm.dir_path), os.path.dirname(new_vm.dir_path))
+ try:
+ os.rename(new_vm.dir_path, move_to_path)
+ error_callback(
+ "*** Directory {} already exists! It has "
+ "been moved to {}".format(new_vm.dir_path,
+ move_to_path))
+ except OSError:
+ error_callback(
+ "*** Directory {} already exists and "
+ "cannot be moved!".format(new_vm.dir_path))
+ error_callback("Skipping...")
+ continue
if format_version == 1:
restore_vm_dir_v1(backup_location,
@@ -2166,6 +2240,13 @@ def backup_restore_do(restore_info,
error_callback("ERROR: {0}".format(err))
error_callback("*** Some VM property will not be restored")
+ try:
+ for service, value in vm.services.items():
+ new_vm.services[service] = value
+ except Exception as err:
+ error_callback("ERROR: {0}".format(err))
+ error_callback("*** Some VM property will not be restored")
+
try:
new_vm.appmenus_create(verbose=callable(print_callback))
except Exception as err:
@@ -2175,7 +2256,11 @@ def backup_restore_do(restore_info,
# Set network dependencies - only non-default netvm setting
for vm in vms.values():
- host_vm = host_collection.get_vm_by_name(vm.name)
+ vm_name = vm.name
+ if 'rename-to' in restore_info[vm.name]:
+ vm_name = restore_info[vm.name]['rename-to']
+ host_vm = host_collection.get_vm_by_name(vm_name)
+
if host_vm is None:
# Failed/skipped VM
continue
@@ -2231,6 +2316,10 @@ def backup_restore_do(restore_info,
if retcode != 0:
error_callback("*** Error while setting home directory owner")
+ if callable(print_callback):
+ print_callback("-> Done. Please install updates for all the restored "
+ "templates.")
+
shutil.rmtree(restore_tmpdir)
# vim:sw=4:et:
diff --git a/core/qubes.py b/core/qubes.py
index e910b66b..f3be12d3 100755
--- a/core/qubes.py
+++ b/core/qubes.py
@@ -224,7 +224,7 @@ class QubesHost(object):
cputime = vm.get_cputime()
previous[vm.xid] = {}
previous[vm.xid]['cpu_time'] = (
- cputime / vm.vcpus)
+ cputime / max(vm.vcpus, 1))
previous[vm.xid]['cpu_usage'] = 0
time.sleep(wait_time)
@@ -651,6 +651,8 @@ class QubesVmCollection(dict):
self.qubes_store_file.close()
def unlock_db(self):
+ if self.qubes_store_file is None:
+ return
# intentionally do not call explicit unlock to not unlock the file
# before all buffers are flushed
self.log.debug('unlock_db()')
@@ -711,18 +713,13 @@ class QubesVmCollection(dict):
def set_netvm_dependency(self, element):
kwargs = {}
- attr_list = ("qid", "uses_default_netvm", "netvm_qid")
+ attr_list = ("qid", "netvm_qid")
for attribute in attr_list:
kwargs[attribute] = element.get(attribute)
vm = self[int(kwargs["qid"])]
- if "uses_default_netvm" not in kwargs:
- vm.uses_default_netvm = True
- else:
- vm.uses_default_netvm = (
- True if kwargs["uses_default_netvm"] == "True" else False)
if vm.uses_default_netvm is True:
if vm.is_proxyvm():
netvm = self.get_default_fw_netvm()
diff --git a/core/qubesutils.py b/core/qubesutils.py
index 689138a3..ce15dc10 100644
--- a/core/qubesutils.py
+++ b/core/qubesutils.py
@@ -25,6 +25,7 @@
from __future__ import absolute_import
import string
+import errno
from lxml import etree
from lxml.etree import ElementTree, SubElement, Element
@@ -50,6 +51,9 @@ BLKSIZE = 512
AVAILABLE_FRONTENDS = ['xvd'+c for c in
string.lowercase[8:]+string.lowercase[:8]]
+class USBProxyNotInstalled(QubesException):
+ pass
+
def mbytes_to_kmg(size):
if size > 1024:
return "%d GiB" % (size/1024)
@@ -420,6 +424,8 @@ def block_attach(qvmc, vm, device, frontend=None, mode="w", auto_detach=False, w
SubElement(disk, 'target').set('dev', frontend)
if backend_vm.qid != 0:
SubElement(disk, 'backenddomain').set('name', device['vm'])
+ if mode == "r":
+ SubElement(disk, 'readonly')
vm.libvirt_domain.attachDevice(etree.tostring(disk, encoding='utf-8'))
try:
# trigger watches to update device status
@@ -463,263 +469,205 @@ def block_detach_all(vm):
usb_ver_re = re.compile(r"^(1|2)$")
usb_device_re = re.compile(r"^[0-9]+-[0-9]+(_[0-9]+)?$")
usb_port_re = re.compile(r"^$|^[0-9]+-[0-9]+(\.[0-9]+)?$")
+usb_desc_re = re.compile(r"^[ -~]{1,255}$")
+# should match valid VM name
+usb_connected_to_re = re.compile(r"^[a-zA-Z][a-zA-Z0-9_.-]*$")
-def usb_setup(backend_vm_xid, vm_xid, devid, usb_ver):
- """
- Attach frontend to the backend.
- backend_vm_xid - id of the backend domain
- vm_xid - id of the frontend domain
- devid - id of the pvusb controller
- """
- num_ports = 8
- trans = vmm.xs.transaction_start()
-
- be_path = "/local/domain/%d/backend/vusb/%d/%d" % (backend_vm_xid, vm_xid, devid)
- fe_path = "/local/domain/%d/device/vusb/%d" % (vm_xid, devid)
-
- be_perm = [{'dom': backend_vm_xid}, {'dom': vm_xid, 'read': True} ]
- fe_perm = [{'dom': vm_xid}, {'dom': backend_vm_xid, 'read': True} ]
-
- # Create directories and set permissions
- vmm.xs.write(trans, be_path, "")
- vmm.xs.set_permissions(trans, be_path, be_perm)
-
- vmm.xs.write(trans, fe_path, "")
- vmm.xs.set_permissions(trans, fe_path, fe_perm)
-
- # Write backend information into the location that frontend looks for
- vmm.xs.write(trans, "%s/backend-id" % fe_path, str(backend_vm_xid))
- vmm.xs.write(trans, "%s/backend" % fe_path, be_path)
-
- # Write frontend information into the location that backend looks for
- vmm.xs.write(trans, "%s/frontend-id" % be_path, str(vm_xid))
- vmm.xs.write(trans, "%s/frontend" % be_path, fe_path)
-
- # Write USB Spec version field.
- vmm.xs.write(trans, "%s/usb-ver" % be_path, usb_ver)
-
- # Write virtual root hub field.
- vmm.xs.write(trans, "%s/num-ports" % be_path, str(num_ports))
- for port in range(1, num_ports+1):
- # Set all port to disconnected state
- vmm.xs.write(trans, "%s/port/%d" % (be_path, port), "")
-
- # Set state to XenbusStateInitialising
- vmm.xs.write(trans, "%s/state" % fe_path, "1")
- vmm.xs.write(trans, "%s/state" % be_path, "1")
- vmm.xs.write(trans, "%s/online" % be_path, "1")
-
- vmm.xs.transaction_end(trans)
-
-def usb_decode_device_from_xs(xs_encoded_device):
+def usb_decode_device_from_qdb(qdb_encoded_device):
""" recover actual device name (xenstore doesn't allow dot in key names, so it was translated to underscore) """
- return xs_encoded_device.replace('_', '.')
+ return qdb_encoded_device.replace('_', '.')
-def usb_encode_device_for_xs(device):
+def usb_encode_device_for_qdb(device):
""" encode actual device name (xenstore doesn't allow dot in key names, so translated it into underscore) """
return device.replace('.', '_')
-def usb_list():
+def usb_list_vm(qvmc, vm):
+ if not vm.is_running():
+ return {}
+
+ try:
+ untrusted_devices = vm.qdb.multiread('/qubes-usb-devices/')
+ except Error:
+ vm.refresh()
+ return {}
+
+ def get_dev_item(dev, item):
+ return untrusted_devices.get(
+ '/qubes-usb-devices/%s/%s' % (dev, item),
+ None)
+
+ devices = {}
+
+ untrusted_devices_names = list(set(map(lambda x: x.split("/")[2],
+ untrusted_devices.keys())))
+ for untrusted_dev_name in untrusted_devices_names:
+ if usb_device_re.match(untrusted_dev_name):
+ dev_name = untrusted_dev_name
+ untrusted_device_desc = get_dev_item(dev_name, 'desc')
+ if not usb_desc_re.match(untrusted_device_desc):
+ print >> sys.stderr, "Invalid %s device desc in VM '%s'" % (
+ dev_name, vm.name)
+ continue
+ device_desc = untrusted_device_desc
+
+ untrusted_connected_to = get_dev_item(dev_name, 'connected-to')
+ if untrusted_connected_to:
+ if not usb_connected_to_re.match(untrusted_connected_to):
+ print >>sys.stderr, \
+ "Invalid %s device 'connected-to' in VM '%s'" % (
+ dev_name, vm.name)
+ continue
+ connected_to = qvmc.get_vm_by_name(untrusted_connected_to)
+ if connected_to is None:
+ print >>sys.stderr, \
+ "Device {} appears to be connected to {}, " \
+ "but such VM doesn't exist".format(
+ dev_name, untrusted_connected_to)
+ else:
+ connected_to = None
+
+ device = usb_decode_device_from_qdb(dev_name)
+
+ full_name = vm.name + ':' + device
+
+ devices[full_name] = {
+ 'vm': vm,
+ 'device': device,
+ 'qdb_path': '/qubes-usb-devices/' + dev_name,
+ 'name': full_name,
+ 'desc': device_desc,
+ 'connected-to': connected_to,
+ }
+ return devices
+
+
+def usb_list(qvmc, vm=None):
"""
Returns a dictionary of USB devices (for PVUSB backends running in all VM).
The dictionary is keyed by 'name' (see below), each element is a dictionary itself:
- vm = name of the backend domain
- xid = xid of the backend domain
- device = -
- name = :-
+ vm = backend domain object
+ device = device ID
+ name = :
desc = description
"""
- # FIXME: any better idea of desc_re?
- desc_re = re.compile(r"^.{1,255}$")
+ if vm is not None:
+ if not vm.is_running():
+ return {}
+ else:
+ vm_list = [vm]
+ else:
+ vm_list = qvmc.values()
devices_list = {}
-
- xs_trans = vmm.xs.transaction_start()
- vm_list = vmm.xs.ls(xs_trans, '/local/domain')
-
- for xid in vm_list:
- vm_name = vmm.xs.read(xs_trans, '/local/domain/%s/name' % xid)
- vm_devices = vmm.xs.ls(xs_trans, '/local/domain/%s/qubes-usb-devices' % xid)
- if vm_devices is None:
- continue
- # when listing devices in xenstore we get encoded names
- for xs_encoded_device in vm_devices:
- # Sanitize device id
- if not usb_device_re.match(xs_encoded_device):
- print >> sys.stderr, "Invalid device id in backend VM '%s'" % vm_name
- continue
- device = usb_decode_device_from_xs(xs_encoded_device)
- device_desc = vmm.xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/desc' % (xid, xs_encoded_device))
- if not desc_re.match(device_desc):
- print >> sys.stderr, "Invalid %s device desc in VM '%s'" % (device, vm_name)
- continue
- visible_name = "%s:%s" % (vm_name, device)
- # grab version
- usb_ver = vmm.xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/usb-ver' % (xid, xs_encoded_device))
- if usb_ver is None or not usb_ver_re.match(usb_ver):
- print >> sys.stderr, "Invalid %s device USB version in VM '%s'" % (device, vm_name)
- continue
- devices_list[visible_name] = {"name": visible_name, "xid":int(xid),
- "vm": vm_name, "device":device,
- "desc":device_desc,
- "usb_ver":usb_ver}
-
- vmm.xs.transaction_end(xs_trans)
+ for vm in vm_list:
+ devices_list.update(usb_list_vm(qvmc, vm))
return devices_list
-def usb_check_attached(xs_trans, backend_vm, device):
- """
- Checks if the given device in the given backend attached to any frontend.
- Parameters:
- backend_vm - xid of the backend domain
- device - device name in the backend domain
- Returns None or a dictionary:
- vm - the name of the frontend domain
- xid - xid of the frontend domain
- frontend - frontend device number FIXME
- devid - frontend port number FIXME
- """
- # sample xs content: /local/domain/0/backend/vusb/4/0/port/1 = "7-5"
- attached_dev = None
- vms = vmm.xs.ls(xs_trans, '/local/domain/%d/backend/vusb' % backend_vm)
- if vms is None:
- return None
- for vm in vms:
- if not vm.isdigit():
- print >> sys.stderr, "Invalid VM id"
- continue
- frontend_devs = vmm.xs.ls(xs_trans, '/local/domain/%d/backend/vusb/%s' % (backend_vm, vm))
- if frontend_devs is None:
- continue
- for frontend_dev in frontend_devs:
- if not frontend_dev.isdigit():
- print >> sys.stderr, "Invalid frontend in VM %s" % vm
- continue
- ports = vmm.xs.ls(xs_trans, '/local/domain/%d/backend/vusb/%s/%s/port' % (backend_vm, vm, frontend_dev))
- if ports is None:
- continue
- for port in ports:
- # FIXME: refactor, see similar loop in usb_find_unused_frontend(), use usb_list() instead?
- if not port.isdigit():
- print >> sys.stderr, "Invalid port in VM %s frontend %s" % (vm, frontend)
- continue
- dev = vmm.xs.read(xs_trans, '/local/domain/%d/backend/vusb/%s/%s/port/%s' % (backend_vm, vm, frontend_dev, port))
- if dev == "":
- continue
- # Sanitize device id
- if not usb_port_re.match(dev):
- print >> sys.stderr, "Invalid device id in backend VM %d @ %s/%s/port/%s" % \
- (backend_vm, vm, frontend_dev, port)
- continue
- if dev == device:
- frontend = "%s-%s" % (frontend_dev, port)
- #TODO
- vm_name = xl_ctx.domid_to_name(int(vm))
- if vm_name is None:
- # FIXME: should we wipe references to frontends running on nonexistent VMs?
- continue
- attached_dev = {"xid":int(vm), "frontend": frontend, "devid": device, "vm": vm_name}
- break
- return attached_dev
-
-#def usb_check_frontend_busy(vm, front_dev, port):
-# devport = frontend.split("-")
-# if len(devport) != 2:
-# raise QubesException("Malformed frontend syntax, must be in device-port format")
-# # FIXME:
-# # return vmm.xs.read('', '/local/domain/%d/device/vusb/%d/state' % (vm.xid, frontend)) == '4'
-# return False
-
-def usb_find_unused_frontend(xs_trans, backend_vm_xid, vm_xid, usb_ver):
- """
- Find an unused frontend/port to link the given backend with the given frontend.
- Creates new frontend if needed.
- Returns frontend specification in - format.
- """
-
- # This variable holds an index of last frontend scanned by the loop below.
- # If nothing found, this value will be used to derive the index of a new frontend.
- last_frontend_dev = -1
-
- frontend_devs = vmm.xs.ls(xs_trans, "/local/domain/%d/device/vusb" % vm_xid)
- if frontend_devs is not None:
- for frontend_dev in frontend_devs:
- if not frontend_dev.isdigit():
- print >> sys.stderr, "Invalid frontend_dev in VM %d" % vm_xid
- continue
- frontend_dev = int(frontend_dev)
- fe_path = "/local/domain/%d/device/vusb/%d" % (vm_xid, frontend_dev)
- if vmm.xs.read(xs_trans, "%s/backend-id" % fe_path) == str(backend_vm_xid):
- if vmm.xs.read(xs_trans, '/local/domain/%d/backend/vusb/%d/%d/usb-ver' % (backend_vm_xid, vm_xid, frontend_dev)) != usb_ver:
- last_frontend_dev = frontend_dev
- continue
- # here: found an existing frontend already connected to right backend using an appropriate USB version
- ports = vmm.xs.ls(xs_trans, '/local/domain/%d/backend/vusb/%d/%d/port' % (backend_vm_xid, vm_xid, frontend_dev))
- if ports is None:
- print >> sys.stderr, "No ports in VM %d frontend_dev %d?" % (vm_xid, frontend_dev)
- last_frontend_dev = frontend_dev
- continue
- for port in ports:
- # FIXME: refactor, see similar loop in usb_check_attached(), use usb_list() instead?
- if not port.isdigit():
- print >> sys.stderr, "Invalid port in VM %d frontend_dev %d" % (vm_xid, frontend_dev)
- continue
- port = int(port)
- dev = vmm.xs.read(xs_trans, '/local/domain/%d/backend/vusb/%d/%s/port/%s' % (backend_vm_xid, vm_xid, frontend_dev, port))
- # Sanitize device id
- if not usb_port_re.match(dev):
- print >> sys.stderr, "Invalid device id in backend VM %d @ %d/%d/port/%d" % \
- (backend_vm_xid, vm_xid, frontend_dev, port)
- continue
- if dev == "":
- return '%d-%d' % (frontend_dev, port)
- last_frontend_dev = frontend_dev
-
- # create a new frontend_dev and link it to the backend
- frontend_dev = last_frontend_dev + 1
- usb_setup(backend_vm_xid, vm_xid, frontend_dev, usb_ver)
- return '%d-%d' % (frontend_dev, 1)
-
-def usb_attach(vm, backend_vm, device, frontend=None, auto_detach=False, wait=True):
- device_attach_check(vm, backend_vm, device, frontend)
-
- xs_trans = vmm.xs.transaction_start()
-
- xs_encoded_device = usb_encode_device_for_xs(device)
- usb_ver = vmm.xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/usb-ver' % (backend_vm.xid, xs_encoded_device))
- if usb_ver is None or not usb_ver_re.match(usb_ver):
- vmm.xs.transaction_end(xs_trans)
- raise QubesException("Invalid %s device USB version in VM '%s'" % (device, backend_vm.name))
-
- if frontend is None:
- frontend = usb_find_unused_frontend(xs_trans, backend_vm.xid, vm.xid, usb_ver)
+def usb_check_attached(qvmc, device):
+ """Reread device attachment status"""
+ vm = device['vm']
+ untrusted_connected_to = vm.qdb.read(
+ '{}/connected-to'.format(device['qdb_path']))
+ if untrusted_connected_to:
+ if not usb_connected_to_re.match(untrusted_connected_to):
+ raise QubesException(
+ "Invalid %s device 'connected-to' in VM '%s'" % (
+ device['device'], vm.name))
+ connected_to = qvmc.get_vm_by_name(untrusted_connected_to)
+ if connected_to is None:
+ print >>sys.stderr, \
+ "Device {} appears to be connected to {}, " \
+ "but such VM doesn't exist".format(
+ device['device'], untrusted_connected_to)
else:
- # Check if any device attached at this frontend
- #if usb_check_frontend_busy(vm, frontend):
- # raise QubesException("Frontend %s busy in VM %s, detach it first" % (frontend, vm.name))
- vmm.xs.transaction_end(xs_trans)
- raise NotImplementedError("Explicit USB frontend specification is not implemented yet")
+ connected_to = None
+ return connected_to
- # Check if this device is attached to some domain
- attached_vm = usb_check_attached(xs_trans, backend_vm.xid, device)
- vmm.xs.transaction_end(xs_trans)
+def usb_attach(qvmc, vm, device, auto_detach=False, wait=True):
+ if not vm.is_running():
+ raise QubesException("VM {} not running".format(vm.name))
- if attached_vm:
+ if not device['vm'].is_running():
+ raise QubesException("VM {} not running".format(device['vm'].name))
+
+ connected_to = usb_check_attached(qvmc, device)
+ if connected_to:
if auto_detach:
- usb_detach(backend_vm, attached_vm)
+ usb_detach(qvmc, device)
else:
- raise QubesException("Device %s from %s already connected to VM %s as %s" % (device, backend_vm.name, attached_vm['vm'], attached_vm['frontend']))
+ raise QubesException("Device {} already connected, to {}".format(
+ device['name'], connected_to
+ ))
- # Run helper script
- xl_cmd = [ '/usr/lib/qubes/xl-qvm-usb-attach.py', str(vm.xid), device, frontend, str(backend_vm.xid) ]
- subprocess.check_call(xl_cmd)
+ # set qrexec policy to allow this device
+ policy_line = '{} {} allow\n'.format(vm.name, device['vm'].name)
+ policy_path = '/etc/qubes-rpc/policy/qubes.USB+{}'.format(device['device'])
+ policy_exists = os.path.exists(policy_path)
+ if not policy_exists:
+ try:
+ fd = os.open(policy_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
+ with os.fdopen(fd, 'w') as f:
+ f.write(policy_line)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+ else:
+ with open(policy_path, 'r+') as f:
+ policy = f.readlines()
+ policy.insert(0, policy_line)
+ f.truncate(0)
+ f.seek(0)
+ f.write(''.join(policy))
+ try:
+ # and actual attach
+ p = vm.run_service('qubes.USBAttach', passio_popen=True, user='root')
+ (stdout, stderr) = p.communicate(
+ '{} {}\n'.format(device['vm'].name, device['device']))
+ if p.returncode == 127:
+ raise USBProxyNotInstalled(
+ "qubes-usb-proxy not installed in the VM")
+ elif p.returncode != 0:
+ # TODO: sanitize and include stdout
+ sanitized_stderr = ''.join([c for c in stderr if ord(c) >= 0x20])
+ raise QubesException('Device attach failed: {}'.format(
+ sanitized_stderr))
+ finally:
+ # FIXME: there is a race condition here - some other process might
+ # modify the file in the meantime. This may result in unexpected
+ # denials, but will not allow too much
+ if not policy_exists:
+ os.unlink(policy_path)
+ else:
+ with open(policy_path, 'r+') as f:
+ policy = f.readlines()
+ policy.remove('{} {} allow\n'.format(vm.name, device['vm'].name))
+ f.truncate(0)
+ f.seek(0)
+ f.write(''.join(policy))
-def usb_detach(backend_vm, attachment):
- xl_cmd = [ '/usr/lib/qubes/xl-qvm-usb-detach.py', str(attachment['xid']), attachment['devid'], attachment['frontend'], str(backend_vm.xid) ]
- subprocess.check_call(xl_cmd)
+def usb_detach(qvmc, vm, device):
+ connected_to = usb_check_attached(qvmc, device)
+ # detect race conditions; there is still race here, but much smaller
+ if connected_to is None or connected_to.qid != vm.qid:
+ raise QubesException(
+ "Device {} not connected to VM {}".format(
+ device['name'], vm.name))
-def usb_detach_all(vm):
- raise NotImplementedError("Detaching all devices from a given VM is not implemented yet")
+ p = device['vm'].run_service('qubes.USBDetach', passio_popen=True,
+ user='root')
+ (stdout, stderr) = p.communicate(
+ '{}\n'.format(device['device']))
+ if p.returncode != 0:
+ # TODO: sanitize and include stdout
+ raise QubesException('Device detach failed')
+
+def usb_detach_all(qvmc, vm):
+ for dev in usb_list(qvmc).values():
+ connected_to = dev['connected-to']
+ if connected_to is not None and connected_to.qid == vm.qid:
+ usb_detach(qvmc, connected_to, dev)
####### QubesWatch ######
@@ -760,7 +708,8 @@ class QubesWatch(object):
# which can just remove the domain
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
pass
- raise
+ else:
+ raise
# and for dom0
self._register_watches(None)
@@ -791,6 +740,10 @@ class QubesWatch(object):
return '/local/domain/%s/memory/meminfo' % xid
def _register_watches(self, libvirt_domain):
+ if libvirt_domain and libvirt_domain.ID() == 0:
+ # don't use libvirt object for dom0, to always have the same
+ # hardcoded "dom0" name
+ libvirt_domain = None
if libvirt_domain:
name = libvirt_domain.name()
if name in self._qdb:
@@ -811,6 +764,8 @@ class QubesWatch(object):
return
else:
name = "dom0"
+ if name in self._qdb:
+ return
self._qdb[name] = QubesDB(name)
try:
self._qdb[name].watch('/qubes-block-devices')
@@ -831,7 +786,10 @@ class QubesWatch(object):
self._register_watches(libvirt_domain)
def _unregister_watches(self, libvirt_domain):
- name = libvirt_domain.name()
+ if libvirt_domain and libvirt_domain.ID() == 0:
+ name = "dom0"
+ else:
+ name = libvirt_domain.name()
if name in self._qdb_events:
libvirt.virEventRemoveHandle(self._qdb_events[name])
del(self._qdb_events[name])
diff --git a/core/settings-wni-Windows_NT.py b/core/settings-wni-Windows_NT.py
deleted file mode 100644
index 6e646c9d..00000000
--- a/core/settings-wni-Windows_NT.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/python2
-
-from __future__ import absolute_import
-import _winreg
-import os
-import sys
-
-from qubes.storage.wni import QubesWniVmStorage
-
-DEFAULT_INSTALLDIR = 'c:\\program files\\Invisible Things Lab\\Qubes WNI'
-DEFAULT_STOREDIR = 'c:\\qubes'
-
-def apply(system_path, vm_files, defaults):
- system_path['qubes_base_dir'] = DEFAULT_STOREDIR
- installdir = DEFAULT_INSTALLDIR
- try:
- reg_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
- "Software\\Invisible Things Lab\\Qubes WNI")
- installdir = _winreg.QueryValueEx(reg_key, "InstallDir")[0]
- system_path['qubes_base_dir'] = \
- _winreg.QueryValueEx(reg_key, "StoreDir")[0]
- except WindowsError as e:
- print >>sys.stderr, \
- "WARNING: invalid installation: missing registry entries (%s)" \
- % str(e)
-
- system_path['config_template_pv'] = \
- os.path.join(installdir, 'vm-template.xml')
- system_path['config_template_hvm'] = \
- os.path.join(installdir, 'vm-template-hvm.xml')
- system_path['qubes_icon_dir'] = os.path.join(installdir, 'icons')
- system_path['qubesdb_daemon_path'] = \
- os.path.join(installdir, 'bin\\qubesdb-daemon.exe')
- system_path['qrexec_daemon_path'] = \
- os.path.join(installdir, 'bin\\qrexec-daemon.exe')
- system_path['qrexec_client_path'] = \
- os.path.join(installdir, 'bin\\qrexec-client.exe')
- system_path['qrexec_policy_dir'] = \
- os.path.join(installdir, 'qubes-rpc\\policy')
- # Specific to WNI - normally VM have this file
- system_path['qrexec_agent_path'] = \
- os.path.join(installdir, 'bin\\qrexec-agent.exe')
-
- defaults['libvirt_uri'] = 'wni:///'
- defaults['storage_class'] = QubesWniVmStorage
diff --git a/core/settings-xen-Linux.py b/core/settings-xen-Linux.py
index de5084f5..c413e8ae 100644
--- a/core/settings-xen-Linux.py
+++ b/core/settings-xen-Linux.py
@@ -2,7 +2,10 @@
from __future__ import absolute_import
-from qubes.storage.xen import QubesXenVmStorage
+from qubes.storage.xen import XenStorage, XenPool
+
def apply(system_path, vm_files, defaults):
- defaults['storage_class'] = QubesXenVmStorage
+ defaults['storage_class'] = XenStorage
+ defaults['pool_drivers'] = {'xen': XenPool}
+ defaults['pool_config'] = {'dir_path': '/var/lib/qubes/'}
diff --git a/core/storage/Makefile b/core/storage/Makefile
index ec59cc64..7c7af60e 100644
--- a/core/storage/Makefile
+++ b/core/storage/Makefile
@@ -1,5 +1,6 @@
OS ?= Linux
+SYSCONFDIR ?= /etc
PYTHON_QUBESPATH = $(PYTHON_SITEPATH)/qubes
all:
@@ -13,6 +14,8 @@ endif
mkdir -p $(DESTDIR)$(PYTHON_QUBESPATH)/storage
cp __init__.py $(DESTDIR)$(PYTHON_QUBESPATH)/storage
cp __init__.py[co] $(DESTDIR)$(PYTHON_QUBESPATH)/storage
+ mkdir -p $(DESTDIR)$(SYSCONFDIR)/qubes
+ cp storage.conf $(DESTDIR)$(SYSCONFDIR)/qubes/
ifneq ($(BACKEND_VMM),)
if [ -r $(BACKEND_VMM).py ]; then \
cp $(BACKEND_VMM).py $(DESTDIR)$(PYTHON_QUBESPATH)/storage && \
diff --git a/core/storage/README.md b/core/storage/README.md
new file mode 100644
index 00000000..7258512f
--- /dev/null
+++ b/core/storage/README.md
@@ -0,0 +1,3 @@
+# WNI File storage
+Before v3.1 there existed a draft wni storage. You can find it in the git
+history
diff --git a/core/storage/__init__.py b/core/storage/__init__.py
index 29cf8654..b5a744aa 100644
--- a/core/storage/__init__.py
+++ b/core/storage/__init__.py
@@ -16,22 +16,24 @@
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-#
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+# USA.
#
from __future__ import absolute_import
+import ConfigParser
import os
import os.path
-import re
import shutil
import subprocess
import sys
-from qubes.qubes import vm_files,system_path,defaults
-from qubes.qubes import QubesException
import qubes.qubesutils
+from qubes.qubes import QubesException, defaults, system_path
+
+CONFIG_FILE = '/etc/qubes/storage.conf'
+
class QubesVmStorage(object):
"""
@@ -55,12 +57,10 @@ class QubesVmStorage(object):
else:
self.root_img_size = defaults['root_img_size']
- self.private_img = vm.absolute_path(vm_files["private_img"], None)
- if self.vm.template:
- self.root_img = self.vm.template.root_img
- else:
- self.root_img = vm.absolute_path(vm_files["root_img"], None)
- self.volatile_img = vm.absolute_path(vm_files["volatile_img"], None)
+ self.root_dev = "xvda"
+ self.private_dev = "xvdb"
+ self.volatile_dev = "xvdc"
+ self.modules_dev = "xvdd"
# For now compute this path still in QubesVm
self.modules_img = modules_img
@@ -69,9 +69,69 @@ class QubesVmStorage(object):
# Additional drive (currently used only by HVM)
self.drive = None
+ def format_disk_dev(self, path, script, vdev, rw=True, type="disk",
+ domain=None):
+ if path is None:
+ return ''
+ template = " \n" \
+ " \n" \
+ " \n" \
+ " \n" \
+ "{params}" \
+ " \n"
+ params = ""
+ if not rw:
+ params += " \n"
+ if domain:
+ params += " \n" % domain
+ if script:
+ params += " \n" % script
+ return template.format(path=path, vdev=vdev, type=type, params=params)
+
def get_config_params(self):
+ args = {}
+ args['rootdev'] = self.root_dev_config()
+ args['privatedev'] = self.private_dev_config()
+ args['volatiledev'] = self.volatile_dev_config()
+ args['otherdevs'] = self.other_dev_config()
+
+ return args
+
+ def root_dev_config(self):
raise NotImplementedError
+ def private_dev_config(self):
+ raise NotImplementedError
+
+ def volatile_dev_config(self):
+ raise NotImplementedError
+
+ def other_dev_config(self):
+ if self.modules_img is not None:
+ return self.format_disk_dev(self.modules_img,
+ None,
+ self.modules_dev,
+ self.modules_img_rw)
+ elif self.drive is not None:
+ (drive_type, drive_domain, drive_path) = self.drive.split(":")
+ if drive_type == "hd":
+ drive_type = "disk"
+
+ writable = False
+ if drive_type == "disk":
+ writable = True
+
+ if drive_domain.lower() == "dom0":
+ drive_domain = None
+
+ return self.format_disk_dev(drive_path, None,
+ self.modules_dev,
+ rw=writable,
+ type=drive_type,
+ domain=drive_domain)
+ else:
+ return ''
+
def _copy_file(self, source, destination):
"""
Effective file copy, preserving sparse files etc.
@@ -199,3 +259,188 @@ class QubesVmStorage(object):
print >>sys.stderr, "WARNING: Creating empty VM private image file: {0}".\
format(self.private_img)
self.create_on_disk_private_img(verbose=False)
+
+
+def dump(o):
+ """ Returns a string represention of the given object
+
+ Args:
+ o (object): anything that response to `__module__` and `__class__`
+
+ Given the class :class:`qubes.storage.QubesVmStorage` it returns
+ 'qubes.storage.QubesVmStorage' as string
+ """
+ return o.__module__ + '.' + o.__class__.__name__
+
+
+def load(string):
+ """ Given a dotted full module string representation of a class it loads it
+
+ Args:
+ string (str) i.e. 'qubes.storage.xen.QubesXenVmStorage'
+
+ Returns:
+ type
+
+ See also:
+ :func:`qubes.storage.dump`
+ """
+ if not type(string) is str:
+ # This is a hack which allows giving a real class to a vm instead of a
+ # string as string_class parameter.
+ return string
+
+ components = string.split(".")
+ module_path = ".".join(components[:-1])
+ klass = components[-1:][0]
+ module = __import__(module_path, fromlist=[klass])
+ return getattr(module, klass)
+
+
+def get_pool(name, vm):
+ """ Instantiates the storage for the specified vm """
+ config = _get_storage_config_parser()
+
+ klass = _get_pool_klass(name, config)
+
+ keys = [k for k in config.options(name) if k != 'driver' and k != 'class']
+ values = [config.get(name, o) for o in keys]
+ config_kwargs = dict(zip(keys, values))
+
+ if name == 'default':
+ kwargs = defaults['pool_config'].copy()
+ kwargs.update(keys)
+ else:
+ kwargs = config_kwargs
+
+ return klass(vm, **kwargs)
+
+
+def pool_exists(name):
+ """ Check if the specified pool exists """
+ try:
+ _get_pool_klass(name)
+ return True
+ except StoragePoolException:
+ return False
+
+def add_pool(name, **kwargs):
+ """ Add a storage pool to config."""
+ config = _get_storage_config_parser()
+ config.add_section(name)
+ for key, value in kwargs.iteritems():
+ config.set(name, key, value)
+ _write_config(config)
+
+def remove_pool(name):
+ """ Remove a storage pool from config file. """
+ config = _get_storage_config_parser()
+ config.remove_section(name)
+ _write_config(config)
+
+def _write_config(config):
+ with open(CONFIG_FILE, 'w') as configfile:
+ config.write(configfile)
+
+def _get_storage_config_parser():
+ """ Instantiates a `ConfigParaser` for specified storage config file.
+
+ Returns:
+ RawConfigParser
+ """
+ config = ConfigParser.RawConfigParser()
+ config.read(CONFIG_FILE)
+ return config
+
+
+def _get_pool_klass(name, config=None):
+ """ Returns the storage klass for the specified pool.
+
+ Args:
+ name: The pool name.
+ config: If ``config`` is not specified
+ `_get_storage_config_parser()` is called.
+
+ Returns:
+ type: A class inheriting from `QubesVmStorage`
+ """
+ if config is None:
+ config = _get_storage_config_parser()
+
+ if not config.has_section(name):
+ raise StoragePoolException('Uknown storage pool ' + name)
+
+ if config.has_option(name, 'class'):
+ klass = load(config.get(name, 'class'))
+ elif config.has_option(name, 'driver'):
+ pool_driver = config.get(name, 'driver')
+ klass = defaults['pool_drivers'][pool_driver]
+ else:
+ raise StoragePoolException('Uknown storage pool driver ' + name)
+ return klass
+
+
+class StoragePoolException(QubesException):
+ pass
+
+
+class Pool(object):
+ def __init__(self, vm, dir_path):
+ assert vm is not None
+ assert dir_path is not None
+
+ self.vm = vm
+ self.dir_path = dir_path
+
+ self.create_dir_if_not_exists(self.dir_path)
+
+ self.vmdir = self.vmdir_path(vm, self.dir_path)
+
+ appvms_path = os.path.join(self.dir_path, 'appvms')
+ self.create_dir_if_not_exists(appvms_path)
+
+ servicevms_path = os.path.join(self.dir_path, 'servicevms')
+ self.create_dir_if_not_exists(servicevms_path)
+
+ vm_templates_path = os.path.join(self.dir_path, 'vm-templates')
+ self.create_dir_if_not_exists(vm_templates_path)
+
+ def vmdir_path(self, vm, pool_dir):
+ """ Returns the path to vmdir depending on the type of the VM.
+
+ The default QubesOS file storage saves the vm images in three
+ different directories depending on the ``QubesVM`` type:
+
+ * ``appvms`` for ``QubesAppVm`` or ``QubesHvm``
+ * ``vm-templates`` for ``QubesTemplateVm`` or ``QubesTemplateHvm``
+ * ``servicevms`` for any subclass of ``QubesNetVm``
+
+ Args:
+ vm: a QubesVM
+ pool_dir: the root directory of the pool
+
+ Returns:
+ string (str) absolute path to the directory where the vm files
+ are stored
+ """
+ if vm.is_appvm():
+ subdir = 'appvms'
+ elif vm.is_template():
+ subdir = 'vm-templates'
+ elif vm.is_netvm():
+ subdir = 'servicevms'
+ elif vm.is_disposablevm():
+ subdir = 'appvms'
+ return os.path.join(pool_dir, subdir, vm.template.name + '-dvm')
+ else:
+ raise QubesException(vm.type() + ' unknown vm type')
+
+ return os.path.join(pool_dir, subdir, vm.name)
+
+ def create_dir_if_not_exists(self, path):
+ """ Check if a directory exists in if not create it.
+
+ This method does not create any parent directories.
+ """
+ if not os.path.exists(path):
+ os.mkdir(path)
diff --git a/core/storage/storage.conf b/core/storage/storage.conf
new file mode 100644
index 00000000..e9d067e5
--- /dev/null
+++ b/core/storage/storage.conf
@@ -0,0 +1,12 @@
+[default] ; poolname
+driver=xen ; the default xen storage
+; class = qubes.storage.xen.XenStorage ; class always overwrites the driver
+;
+; To use our own storage adapter, you need just to specify the module path and
+; class name
+; [pool-b]
+; class = foo.bar.MyStorage
+;
+; [test-dummy]
+; driver=dummy
+
diff --git a/core/storage/wni.py b/core/storage/wni.py
deleted file mode 100644
index a3571765..00000000
--- a/core/storage/wni.py
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/usr/bin/python2
-#
-# The Qubes OS Project, http://www.qubes-os.org
-#
-# Copyright (C) 2013 Marek Marczykowski
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-#
-#
-
-from __future__ import absolute_import
-
-import sys
-import os
-import os.path
-import win32api
-import win32net
-import win32netcon
-import win32security
-import win32profile
-import pywintypes
-import random
-
-from qubes.storage import QubesVmStorage
-from qubes.qubes import QubesException,system_path
-
-class QubesWniVmStorage(QubesVmStorage):
- """
- Class for VM storage of WNI VMs.
- """
-
- def __init__(self, *args, **kwargs):
- super(QubesWniVmStorage, self).__init__(*args, **kwargs)
- # Use the user profile as "private.img"
- self.home_root = win32profile.GetProfilesDirectory()
- # FIXME: the assignment below may not always be correct,
- # but GetUserProfileDirectory needs a user token...
- self.private_img = os.path.join(self.home_root, self._get_username())
-
- # Pass paths for WNI libvirt driver
- os.putenv("WNI_DRIVER_QUBESDB_PATH", system_path['qubesdb_daemon_path'])
- os.putenv("WNI_DRIVER_QREXEC_AGENT_PATH", system_path['qrexec_agent_path'])
-
- def _get_username(self, vmname = None):
- if vmname is None:
- vmname = self.vm.name
- return "qubes-vm-%s" % vmname
-
- def _get_random_password(self, vmname = None):
- if vmname is None:
- vmname = self.vm.name
- return '%x' % random.SystemRandom().getrandombits(256)
-
- def get_config_params(self):
- return {}
-
- def create_on_disk_private_img(self, verbose, source_template = None):
- # FIXME: this may not always be correct
- home_dir = os.path.join(self.home_root, self._get_username())
- # Create user data in information level 1 (PyUSER_INFO_1) format.
- user_data = {}
- user_data['name'] = self._get_username()
- user_data['full_name'] = self._get_username()
- # libvirt driver doesn't need to know the password anymore
- user_data['password'] = self._get_random_password()
- user_data['flags'] = (
- win32netcon.UF_NORMAL_ACCOUNT |
- win32netcon.UF_SCRIPT |
- win32netcon.UF_DONT_EXPIRE_PASSWD |
- win32netcon.UF_PASSWD_CANT_CHANGE
- )
- user_data['priv'] = win32netcon.USER_PRIV_USER
- user_data['home_dir'] = home_dir
- user_data['max_storage'] = win32netcon.USER_MAXSTORAGE_UNLIMITED
- # TODO: catch possible exception
- win32net.NetUserAdd(None, 1, user_data)
-
- def create_on_disk_root_img(self, verbose, source_template = None):
- pass
-
- def remove_from_disk(self):
- try:
- sid = win32security.LookupAccountName(None, self._get_username())[0]
- string_sid = win32security.ConvertSidToStringSid(sid)
- win32profile.DeleteProfile(string_sid)
- win32net.NetUserDel(None, self._get_username())
- except pywintypes.error, details:
- if details[0] == 2221:
- # "The user name cannot be found."
- raise IOError("User %s doesn't exist" % self._get_username())
- else:
- raise
-
- super(QubesWniVmStorage, self).remove_from_disk()
-
- def rename(self, old_name, new_name):
- super(QubesWniVmStorage, self).rename(old_name, new_name)
- user_data = {}
- user_data['name'] = self._get_username(new_name)
- win32net.NetUserSetInfo(None,
- self._get_username(old_name), 0, user_data)
- #TODO: rename user profile
-
- def verify_files(self):
- if not os.path.exists (self.vmdir):
- raise QubesException (
- "VM directory doesn't exist: {0}".\
- format(self.vmdir))
-
- try:
- # TemplateVm in WNI is quite virtual, so do not require the user
- if not self.vm.is_template():
- win32net.NetUserGetInfo(None, self._get_username(), 0)
- except pywintypes.error, details:
- if details[0] == 2221:
- # "The user name cannot be found."
- raise QubesException("User %s doesn't exist" % self._get_username())
- else:
- raise
-
- def reset_volatile_storage(self, verbose = False, source_template = None):
- pass
-
- def prepare_for_vm_startup(self, verbose = False):
- if self.vm.is_template():
- raise QubesException("Starting TemplateVM is not supported")
diff --git a/core/storage/xen.py b/core/storage/xen.py
index 00c3b3da..9ea7ba73 100644
--- a/core/storage/xen.py
+++ b/core/storage/xen.py
@@ -16,115 +16,92 @@
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-#
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+# USA.
#
from __future__ import absolute_import
import os
import os.path
+import re
import subprocess
import sys
-import re
-from qubes.storage import QubesVmStorage
from qubes.qubes import QubesException, vm_files
+from qubes.storage import Pool, QubesVmStorage
-class QubesXenVmStorage(QubesVmStorage):
+class XenStorage(QubesVmStorage):
"""
Class for VM storage of Xen VMs.
"""
- def __init__(self, vm, **kwargs):
- super(QubesXenVmStorage, self).__init__(vm, **kwargs)
+ def __init__(self, vm, vmdir, **kwargs):
+ """ Instantiate the storage.
- self.root_dev = "xvda"
- self.private_dev = "xvdb"
- self.volatile_dev = "xvdc"
- self.modules_dev = "xvdd"
+ Args:
+ vm: a QubesVM
+ vmdir: the root directory of the pool
+ """
+ assert vm is not None
+ assert vmdir is not None
+
+ super(XenStorage, self).__init__(vm, **kwargs)
+
+ self.vmdir = vmdir
if self.vm.is_template():
- self.rootcow_img = os.path.join(self.vmdir, vm_files["rootcow_img"])
+ self.rootcow_img = os.path.join(self.vmdir,
+ vm_files["rootcow_img"])
else:
self.rootcow_img = None
- def _format_disk_dev(self, path, script, vdev, rw=True, type="disk", domain=None):
- if path is None:
- return ''
- template = " \n" \
- " \n" \
- " \n" \
- " \n" \
- "{params}" \
- " \n"
- params = ""
- if not rw:
- params += " \n"
- if domain:
- params += " \n" % domain
- if script:
- params += " \n" % script
- return template.format(path=path, vdev=vdev, type=type,
- params=params)
+ self.private_img = os.path.join(vmdir, 'private.img')
+ if self.vm.template:
+ self.root_img = self.vm.template.root_img
+ else:
+ self.root_img = os.path.join(vmdir, 'root.img')
+ self.volatile_img = os.path.join(vmdir, 'volatile.img')
- def _get_rootdev(self):
- if self.vm.is_template() and \
- os.path.exists(os.path.join(self.vmdir, "root-cow.img")):
- return self._format_disk_dev(
- "{dir}/root.img:{dir}/root-cow.img".format(
- dir=self.vmdir),
+ def root_dev_config(self):
+ if self.vm.is_template() and self.rootcow_img:
+ return self.format_disk_dev(
+ "{root}:{rootcow}".format(
+ root=self.root_img, rootcow=self.rootcow_img),
"block-origin", self.root_dev, True)
- elif self.vm.template and not self.vm.template.storage.rootcow_img:
- # HVM template-based VM - template doesn't have own
- # root-cow.img, only one device-mapper layer
- return self._format_disk_dev(
- "{tpldir}/root.img:{vmdir}/volatile.img".format(
- tpldir=self.vm.template.dir_path,
- vmdir=self.vmdir),
+ elif self.vm.template and not hasattr(self.vm, 'kernel'):
+ # HVM template-based VM - only one device-mapper layer, in dom0 (
+ # root+volatile)
+ # HVM detection based on 'kernel' property is massive hack,
+ # but taken from assumption that VM needs Qubes-specific kernel (
+ # actually initramfs) to assemble the second layer of device-mapper
+ return self.format_disk_dev(
+ "{root}:{volatile}".format(
+ root=self.vm.template.storage.root_img,
+ volatile=self.volatile_img),
"block-snapshot", self.root_dev, True)
elif self.vm.template:
# any other template-based VM - two device-mapper layers: one
# in dom0 (here) from root+root-cow, and another one from
# this+volatile.img
- return self._format_disk_dev(
- "{dir}/root.img:{dir}/root-cow.img".format(
- dir=self.vm.template.dir_path),
+ return self.format_disk_dev(
+ "{root}:{rootcow}".format(
+ root=self.root_img,
+ rootcow=self.vm.template.storage.rootcow_img),
"block-snapshot", self.root_dev, False)
else:
- return self._format_disk_dev(
- "{dir}/root.img".format(dir=self.vmdir),
+ return self.format_disk_dev(
+ "{root}".format(root=self.root_img),
None, self.root_dev, True)
- def get_config_params(self):
- args = {}
- args['rootdev'] = self._get_rootdev()
- args['privatedev'] = \
- self._format_disk_dev(self.private_img,
- None, self.private_dev, True)
- args['volatiledev'] = \
- self._format_disk_dev(self.volatile_img,
- None, self.volatile_dev, True)
- if self.modules_img is not None:
- args['otherdevs'] = \
- self._format_disk_dev(self.modules_img,
- None, self.modules_dev, self.modules_img_rw)
- elif self.drive is not None:
- (drive_type, drive_domain, drive_path) = self.drive.split(":")
- if drive_type == "hd":
- drive_type = "disk"
- if drive_domain.lower() == "dom0":
- drive_domain = None
+ def private_dev_config(self):
+ return self.format_disk_dev(self.private_img, None,
+ self.private_dev, True)
- args['otherdevs'] = self._format_disk_dev(drive_path, None,
- self.modules_dev,
- rw=True if drive_type == "disk" else False, type=drive_type,
- domain=drive_domain)
- else:
- args['otherdevs'] = ''
-
- return args
+ def volatile_dev_config(self):
+ return self.format_disk_dev(self.volatile_img, None,
+ self.volatile_dev, True)
def create_on_disk_private_img(self, verbose, source_template = None):
if source_template:
@@ -158,7 +135,7 @@ class QubesXenVmStorage(QubesVmStorage):
self.commit_template_changes()
def rename(self, old_name, new_name):
- super(QubesXenVmStorage, self).rename(old_name, new_name)
+ super(XenStorage, self).rename(old_name, new_name)
old_dirpath = os.path.join(os.path.dirname(self.vmdir), old_name)
if self.rootcow_img:
@@ -226,11 +203,11 @@ class QubesXenVmStorage(QubesVmStorage):
f_volatile.close()
f_root.close()
return
- super(QubesXenVmStorage, self).reset_volatile_storage(
+ super(XenStorage, self).reset_volatile_storage(
verbose=verbose, source_template=source_template)
def prepare_for_vm_startup(self, verbose):
- super(QubesXenVmStorage, self).prepare_for_vm_startup(verbose=verbose)
+ super(XenStorage, self).prepare_for_vm_startup(verbose=verbose)
if self.drive is not None:
(drive_type, drive_domain, drive_path) = self.drive.split(":")
@@ -249,3 +226,15 @@ class QubesXenVmStorage(QubesVmStorage):
raise QubesException(
"VM '{}' holding '{}' does not exists".format(
drive_domain, drive_path))
+ if self.rootcow_img and not os.path.exists(self.rootcow_img):
+ self.commit_template_changes()
+
+
+class XenPool(Pool):
+
+ def __init__(self, vm, dir_path):
+ super(XenPool, self).__init__(vm, dir_path)
+
+ def getStorage(self):
+ """ Returns an instantiated ``XenStorage``. """
+ return XenStorage(self.vm, vmdir=self.vmdir)
diff --git a/dispvm/qfile-daemon-dvm b/dispvm/qfile-daemon-dvm
index a4ccd8b6..209e2228 100755
--- a/dispvm/qfile-daemon-dvm
+++ b/dispvm/qfile-daemon-dvm
@@ -28,7 +28,7 @@ import sys
import shutil
import time
-from qubes.qubes import QubesVmCollection
+from qubes.qubes import QubesVmCollection, QubesException
from qubes.qubes import QubesDispVmLabels
from qubes.notify import tray_notify, tray_notify_error, tray_notify_init
@@ -51,61 +51,67 @@ class QfileDaemonDvm:
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_writing()
+ try:
- tar_process = subprocess.Popen(
- ['bsdtar', '-C', current_savefile_vmdir,
- '-xSUf', os.path.join(current_savefile_vmdir, 'saved-cows.tar')])
+ tar_process = subprocess.Popen(
+ ['bsdtar', '-C', current_savefile_vmdir,
+ '-xSUf', os.path.join(current_savefile_vmdir, 'saved-cows.tar')])
- qvm_collection.load()
- print >>sys.stderr, "time=%s, collection loaded" % (str(time.time()))
+ qvm_collection.load()
+ print >>sys.stderr, "time=%s, collection loaded" % (str(time.time()))
- vm = qvm_collection.get_vm_by_name(self.name)
- if vm is None:
- sys.stderr.write('Domain ' + self.name + ' does not exist ?')
- qvm_collection.unlock_db()
- return None
- label = vm.label
- if len(sys.argv) > 4 and len(sys.argv[4]) > 0:
- assert sys.argv[4] in QubesDispVmLabels.keys(), "Invalid label"
- label = QubesDispVmLabels[sys.argv[4]]
- disp_templ = self.get_disp_templ()
- vm_disptempl = qvm_collection.get_vm_by_name(disp_templ)
- if vm_disptempl is None:
- sys.stderr.write('Domain ' + disp_templ + ' does not exist ?')
- qvm_collection.unlock_db()
- return None
- dispvm = qvm_collection.add_new_vm('QubesDisposableVm',
- disp_template=vm_disptempl,
- label=label)
- print >>sys.stderr, "time=%s, VM created" % (str(time.time()))
- # By default inherit firewall rules from calling VM
- if os.path.exists(vm.firewall_conf):
+ vm = qvm_collection.get_vm_by_name(self.name)
+ if vm is None:
+ sys.stderr.write('Domain ' + self.name + ' does not exist ?')
+ return None
+ label = vm.label
+ if len(sys.argv) > 4 and len(sys.argv[4]) > 0:
+ assert sys.argv[4] in QubesDispVmLabels.keys(), "Invalid label"
+ label = QubesDispVmLabels[sys.argv[4]]
+ disp_templ = self.get_disp_templ()
+ vm_disptempl = qvm_collection.get_vm_by_name(disp_templ)
+ if vm_disptempl is None:
+ sys.stderr.write('Domain ' + disp_templ + ' does not exist ?')
+ return None
+ dispvm = qvm_collection.add_new_vm('QubesDisposableVm',
+ disp_template=vm_disptempl,
+ label=label)
+ print >>sys.stderr, "time=%s, VM created" % (str(time.time()))
+ # By default inherit firewall rules from calling VM
disp_firewall_conf = '/var/run/qubes/%s-firewall.xml' % dispvm.name
- shutil.copy(vm.firewall_conf, disp_firewall_conf)
dispvm.firewall_conf = disp_firewall_conf
- if len(sys.argv) > 5 and len(sys.argv[5]) > 0:
- assert os.path.exists(sys.argv[5]), "Invalid firewall.conf location"
- dispvm.firewall_conf = sys.argv[5]
- if vm.qid != 0:
- dispvm.uses_default_netvm = False
- # netvm can be changed before restore,
- # but cannot be enabled/disabled
- if (dispvm.netvm is None) == (vm.dispvm_netvm is None):
- dispvm.netvm = vm.dispvm_netvm
- # Wait for tar to finish
- if tar_process.wait() != 0:
- sys.stderr.write('Failed to unpack saved-cows.tar')
+ if os.path.exists(vm.firewall_conf):
+ shutil.copy(vm.firewall_conf, disp_firewall_conf)
+ elif vm.qid == 0 and os.path.exists(vm_disptempl.firewall_conf):
+ # for DispVM called from dom0, copy use rules from DispVM template
+ shutil.copy(vm_disptempl.firewall_conf, disp_firewall_conf)
+ if len(sys.argv) > 5 and len(sys.argv[5]) > 0:
+ assert os.path.exists(sys.argv[5]), "Invalid firewall.conf location"
+ dispvm.firewall_conf = sys.argv[5]
+ if vm.qid != 0:
+ dispvm.uses_default_netvm = False
+ # netvm can be changed before restore,
+ # but cannot be enabled/disabled
+ if (dispvm.netvm is None) == (vm.dispvm_netvm is None):
+ dispvm.netvm = vm.dispvm_netvm
+ # Wait for tar to finish
+ if tar_process.wait() != 0:
+ sys.stderr.write('Failed to unpack saved-cows.tar')
+ return None
+ print >>sys.stderr, "time=%s, VM starting" % (str(time.time()))
+ try:
+ dispvm.start()
+ except (MemoryError, QubesException) as e:
+ tray_notify_error(str(e))
+ raise
+ if vm.qid != 0:
+ # if need to enable/disable netvm, do it while DispVM is alive
+ if (dispvm.netvm is None) != (vm.dispvm_netvm is None):
+ dispvm.netvm = vm.dispvm_netvm
+ print >>sys.stderr, "time=%s, VM started" % (str(time.time()))
+ qvm_collection.save()
+ finally:
qvm_collection.unlock_db()
- return None
- print >>sys.stderr, "time=%s, VM starting" % (str(time.time()))
- dispvm.start()
- if vm.qid != 0:
- # if need to enable/disable netvm, do it while DispVM is alive
- if (dispvm.netvm is None) != (vm.dispvm_netvm is None):
- dispvm.netvm = vm.dispvm_netvm
- print >>sys.stderr, "time=%s, VM started" % (str(time.time()))
- qvm_collection.save()
- qvm_collection.unlock_db()
# Reload firewall rules
print >>sys.stderr, "time=%s, reloading firewall" % (str(time.time()))
for vm in qvm_collection.values():
@@ -144,7 +150,7 @@ class QfileDaemonDvm:
return self.do_get_dvm()
@staticmethod
- def remove_disposable_from_qdb(name):
+ def finish_disposable(name):
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_writing()
qvm_collection.load()
@@ -152,6 +158,12 @@ class QfileDaemonDvm:
if vm is None:
qvm_collection.unlock_db()
return False
+
+ try:
+ vm.force_shutdown()
+ except QubesException:
+ # VM already destroyed
+ pass
qvm_collection.pop(vm.qid)
qvm_collection.save()
qvm_collection.unlock_db()
@@ -159,6 +171,10 @@ class QfileDaemonDvm:
def main():
exec_index = sys.argv[1]
+ if exec_index == "FINISH":
+ QfileDaemonDvm.finish_disposable(sys.argv[2])
+ return
+
src_vmname = sys.argv[2]
user = sys.argv[3]
# accessed directly by get_dvm()
@@ -171,11 +187,14 @@ def main():
qfile = QfileDaemonDvm(src_vmname)
dispvm = qfile.get_dvm()
if dispvm is not None:
+ if exec_index == "LAUNCH":
+ print dispvm.name
+ return
+
print >>sys.stderr, "time=%s, starting VM process" % (str(time.time()))
subprocess.call(['/usr/lib/qubes/qrexec-client', '-d', dispvm.name,
user+':exec /usr/lib/qubes/qubes-rpc-multiplexer ' +
exec_index + " " + src_vmname])
- dispvm.force_shutdown()
- qfile.remove_disposable_from_qdb(dispvm.name)
+ QfileDaemonDvm.finish_disposable(dispvm.name)
main()
diff --git a/dispvm/qubes-prepare-saved-domain.sh b/dispvm/qubes-prepare-saved-domain.sh
index 9c5c0f98..d5c3ad96 100755
--- a/dispvm/qubes-prepare-saved-domain.sh
+++ b/dispvm/qubes-prepare-saved-domain.sh
@@ -74,8 +74,13 @@ fstype=`df --output=fstype $VMDIR | tail -n 1`
if [ "$fstype" = "tmpfs" ]; then
# bsdtar doesn't work on tmpfs because FS_IOC_FIEMAP ioctl isn't supported
# there
- tar -cSf saved-cows.tar volatile.img
+ tar -cSf saved-cows.tar volatile.img || exit 1
else
- bsdtar -cSf saved-cows.tar volatile.img
+ errors=`bsdtar -cSf saved-cows.tar volatile.img 2>&1`
+ if [ -n "$errors" ]; then
+ echo "Failed to create saved-cows.tar: $errors" >&2
+ rm -f saved-cows.tar
+ exit 1
+ fi
fi
echo "DVM savefile created successfully."
diff --git a/doc/qvm-tools/qvm-add-appvm.rst b/doc/qvm-tools/qvm-add-appvm.rst
index 5fefab79..186e8603 100644
--- a/doc/qvm-tools/qvm-add-appvm.rst
+++ b/doc/qvm-tools/qvm-add-appvm.rst
@@ -6,7 +6,7 @@ NAME
====
qvm-add-appvm - add an already installed appvm to the Qubes DB
-WARNING: Noramlly you would not need this command, and you would use qvm-create instead!
+WARNING: Normally you should not need this command, and you should use qvm-create instead!
:Date: 2012-04-10
@@ -22,6 +22,8 @@ OPTIONS
Specify path to the template directory
-c CONF_FILE, --conf=CONF_FILE
Specify the Xen VM .conf file to use(relative to the template dir path)
+--force-root
+ Force to run, even with root privileges
AUTHORS
=======
diff --git a/doc/qvm-tools/qvm-backup-restore.rst b/doc/qvm-tools/qvm-backup-restore.rst
index e078f7fa..034b472e 100644
--- a/doc/qvm-tools/qvm-backup-restore.rst
+++ b/doc/qvm-tools/qvm-backup-restore.rst
@@ -16,22 +16,34 @@ OPTIONS
=======
-h, --help
Show this help message and exit
+--verify-only
+ Do not restore the data, only verify backup integrity
--skip-broken
Do not restore VMs that have missing templates or netvms
--ignore-missing
- Ignore missing templates or netvms, restore VMs anyway
+ Ignore missing templates and netvms, and restore the VMs anyway
--skip-conflicting
Do not restore VMs that are already present on the host
--force-root
- Force to run, even with root privileges
+ Force to run with root privileges
--replace-template=REPLACE_TEMPLATE
- Restore VMs using another template, syntax: old-template-name:new-template-name (might be repeated)
+ Restore VMs using another template, syntax: old-template-name:new-template-name (can be repeated)
-x EXCLUDE, --exclude=EXCLUDE
- Skip restore of specified VM (might be repeated)
+ Skip restore of specified VM (can be repeated)
--skip-dom0-home
- Do not restore dom0 user home dir
+ Do not restore dom0's user home directory
--ignore-username-mismatch
- Ignore dom0 username mismatch while restoring homedir
+ Ignore dom0 username mismatch when restoring dom0's user home directory
+-d APPVM, --dest-vm=APPVM
+ Restore from a backup located in a specific AppVM
+-e, --encrypted
+ The backup is encrypted
+-p, --passphrase-file
+ Read passphrase from file, or use '-' to read from stdin
+-z, --compressed
+ The backup is compressed
+--debug
+ Enable (a lot of) debug output
AUTHORS
=======
diff --git a/doc/qvm-tools/qvm-backup.rst b/doc/qvm-tools/qvm-backup.rst
index 9d4dba8e..11094836 100644
--- a/doc/qvm-tools/qvm-backup.rst
+++ b/doc/qvm-tools/qvm-backup.rst
@@ -18,6 +18,28 @@ OPTIONS
Show this help message and exit
-x EXCLUDE_LIST, --exclude=EXCLUDE_LIST
Exclude the specified VM from backup (might be repeated)
+--force-root
+ Force to run with root privileges
+-d, --dest-vm
+ Specify the destination VM to which the backup will be set (implies -e)
+-e, --encrypt
+ Encrypt the backup
+--no-encrypt
+ Skip encryption even if sending the backup to a VM
+-p, --passphrase-file
+ Read passphrase from a file, or use '-' to read from stdin
+-E, --enc-algo
+ Specify a non-default encryption algorithm. For a list of supported algorithms, execute 'openssl list-cipher-algorithms' (implies -e)
+-H, --hmac-algo
+ Specify a non-default HMAC algorithm. For a list of supported algorithms, execute 'openssl list-message-digest-algorithms'
+-z, --compress
+ Compress the backup
+-Z, --compress-filter
+ Specify a non-default compression filter program (default: gzip)
+--tmpdir
+ Specify a temporary directory (if you have at least 1GB free RAM in dom0, use of /tmp is advised) (default: /var/tmp)
+--debug
+ Enable (a lot of) debug output
AUTHORS
=======
diff --git a/doc/qvm-tools/qvm-block.rst b/doc/qvm-tools/qvm-block.rst
index 839cfc6d..4110b0b7 100644
--- a/doc/qvm-tools/qvm-block.rst
+++ b/doc/qvm-tools/qvm-block.rst
@@ -6,7 +6,6 @@ NAME
====
qvm-block - list/set VM PCI devices.
-
:Date: 2012-04-10
SYNOPSIS
@@ -16,13 +15,14 @@ SYNOPSIS
| qvm-block -d [options]
| qvm-block -d [options]
-
OPTIONS
=======
-h, --help
Show this help message and exit
-l, --list
List block devices
+-A, --attach-file
+ Attach specified file instead of physical device
-a, --attach
Attach block device to specified VM
-d, --detach
@@ -33,6 +33,10 @@ OPTIONS
Force read-only mode
--no-auto-detach
Fail when device already connected to other VM
+--show-system-disks
+ List also system disks
+--force-root
+ Force to run, even with root privileges
AUTHORS
=======
diff --git a/doc/qvm-tools/qvm-check.rst b/doc/qvm-tools/qvm-check.rst
new file mode 100644
index 00000000..ac775ec8
--- /dev/null
+++ b/doc/qvm-tools/qvm-check.rst
@@ -0,0 +1,32 @@
+=========
+qvm-check
+=========
+
+NAME
+====
+qvm-check - Specify no state options to check if VM exists
+
+:Date: 2013-06-23
+
+SYNOPSIS
+========
+| qvm-check [options]
+
+OPTIONS
+=======
+-h, --help
+ Show this help message and exit
+-q, --quiet
+ Be quiet
+--running
+ Determine if VM is running
+--paused
+ Determine if VM is paused
+--template
+ Determine if VM is a template
+
+AUTHORS
+=======
+| Joanna Rutkowska
+| Rafal Wojtczuk
+| Marek Marczykowski
diff --git a/doc/qvm-tools/qvm-clone-template.rst b/doc/qvm-tools/qvm-clone-template.rst
deleted file mode 100644
index b7808059..00000000
--- a/doc/qvm-tools/qvm-clone-template.rst
+++ /dev/null
@@ -1,29 +0,0 @@
-==================
-qvm-clone-template
-==================
-
-NAME
-====
-qvm-clone-template - clones an existing template by copying all its disk files
-
-:Date: 2012-04-10
-
-SYNOPSIS
-========
-| qvm-clone-template [options]
-
-OPTIONS
-=======
--h, --help
- Show this help message and exit
--q, --quiet
- Be quiet
--p DIR_PATH, --path=DIR_PATH
- Specify path to the template directory
-
-AUTHORS
-=======
-| Joanna Rutkowska
-| Rafal Wojtczuk
-| Marek Marczykowski
-
diff --git a/doc/qvm-tools/qvm-clone.rst b/doc/qvm-tools/qvm-clone.rst
index 548a683f..b31c2d0a 100644
--- a/doc/qvm-tools/qvm-clone.rst
+++ b/doc/qvm-tools/qvm-clone.rst
@@ -20,6 +20,10 @@ OPTIONS
Be quiet
-p DIR_PATH, --path=DIR_PATH
Specify path to the template directory
+--force-root
+ Force to run, even with root privileges
+-P, --pool
+ Specify in to which storage pool to clone
AUTHORS
=======
diff --git a/doc/qvm-tools/qvm-firewall.rst b/doc/qvm-tools/qvm-firewall.rst
index 903da725..ad3e5e9d 100644
--- a/doc/qvm-tools/qvm-firewall.rst
+++ b/doc/qvm-tools/qvm-firewall.rst
@@ -36,8 +36,12 @@ OPTIONS
-Y SET_YUM_PROXY, --yum-proxy=SET_YUM_PROXY
Set access to Qubes yum proxy (allow/deny).
*Note:* if set to "deny", access will be rejected even if policy set to "allow"
+-r, --reload
+ Reload firewall (implied by any change action)
-n, --numeric
Display port numbers instead of services (makes sense only with --list)
+--force-root
+ Force to run, even with root privileges
AUTHORS
=======
diff --git a/doc/qvm-tools/qvm-grow-root.rst b/doc/qvm-tools/qvm-grow-root.rst
new file mode 100644
index 00000000..1c733064
--- /dev/null
+++ b/doc/qvm-tools/qvm-grow-root.rst
@@ -0,0 +1,26 @@
+=============
+qvm-grow-root
+=============
+
+NAME
+====
+qvm-grow-root - increase root storage capacity of a specified VM
+
+:Date: 2014-03-21
+
+SYNOPSIS
+========
+| qvm-grow-root
+
+OPTIONS
+=======
+-h, --help
+ Show this help message and exit
+--allow-start
+ Allow VM to be started to complete the operation
+
+AUTHORS
+=======
+| Joanna Rutkowska
+| Rafal Wojtczuk
+| Marek Marczykowski
diff --git a/doc/qvm-tools/qvm-ls.rst b/doc/qvm-tools/qvm-ls.rst
index 2abbd9bc..9867dd1d 100644
--- a/doc/qvm-tools/qvm-ls.rst
+++ b/doc/qvm-tools/qvm-ls.rst
@@ -32,6 +32,8 @@ OPTIONS
Show date of last VM backup
--raw-list
List only VM names one per line
+--raw-data
+ Display specify data of specified VMs. Intended for bash-parsing.
AUTHORS
=======
diff --git a/doc/qvm-tools/qvm-pci.rst b/doc/qvm-tools/qvm-pci.rst
index c2e667f8..c344c7ab 100644
--- a/doc/qvm-tools/qvm-pci.rst
+++ b/doc/qvm-tools/qvm-pci.rst
@@ -23,8 +23,14 @@ OPTIONS
List VM PCI devices
-a, --add
Add a PCI device to specified VM
+-C, --add-class
+ Add all devices of given class:
+ net - network interfaces,
+ usb - USB controllers
-d, --delete
Remove a PCI device from specified VM
+--offline-mode
+ Offline mode
AUTHORS
=======
diff --git a/doc/qvm-tools/qvm-prefs.rst b/doc/qvm-tools/qvm-prefs.rst
index 3120f143..1132f421 100644
--- a/doc/qvm-tools/qvm-prefs.rst
+++ b/doc/qvm-tools/qvm-prefs.rst
@@ -25,6 +25,10 @@ OPTIONS
Get a single property of a specified VM
-s, --set
Set properties of a specified VM
+--force-root
+ Force to run, even with root privileges
+--offline-mode
+ Offline mode
PROPERTIES
==========
@@ -49,6 +53,14 @@ pci_strictreset
cases it could make sense - for example when the VM to which it is assigned
is trusted one, or is running all the time.
+pci_e820_host
+ Accepted values: ``True``, ``False``
+
+ Give VM with PCI devices a memory map (e820) of the host. This is
+ required for some devices to properly resolve conflicts in address space.
+ This option is enabled by default for VMs with PCI devices and have no
+ effect for VMs without devices.
+
label
Accepted values: ``red``, ``orange``, ``yellow``, ``green``, ``gray``,
``blue``, ``purple``, ``black``
@@ -134,7 +146,7 @@ mac
Can be used to force specific of virtual ethernet card in the VM. Setting
to ``auto`` will use automatic-generated MAC - based on VM id. Especially
- useful when some licencing depending on static MAC address.
+ useful when licensing requires a static MAC address.
For template-based HVM ``auto`` mode means to clone template MAC.
default_user
@@ -147,7 +159,7 @@ debug
Accepted values: ``on``, ``off``
Enables debug mode for VM. This can be used to turn on/off verbose logging
- in many qubes components at once (gui virtualization, VM kernel, some other
+ in many Qubes components at once (gui virtualization, VM kernel, some other
services).
For template-based HVM, enabling debug mode also disables automatic reset
root.img (actually volatile.img) before each VM startup, so changes made to
@@ -172,7 +184,7 @@ guiagent_installed
This HVM have gui agent installed. This option disables full screen GUI
virtualization and enables per-window seemless GUI mode. This option will
be automatically turned on during Qubes Windows Tools installation, but if
- you install qubes gui agent in some other OS, you need to turn this option
+ you install Qubes gui agent in some other OS, you need to turn this option
on manually. You can turn this option off to troubleshoot some early HVM OS
boot problems (enter safe mode etc), but the option will be automatically
enabled at first VM normal startup (and will take effect from the next
diff --git a/doc/qvm-tools/qvm-revert-template-changes.rst b/doc/qvm-tools/qvm-revert-template-changes.rst
index f250cda2..358964c6 100644
--- a/doc/qvm-tools/qvm-revert-template-changes.rst
+++ b/doc/qvm-tools/qvm-revert-template-changes.rst
@@ -17,7 +17,7 @@ OPTIONS
-h, --help
Show this help message and exit
--force
- Do not prompt for comfirmation
+ Do not prompt for confirmation
AUTHORS
=======
diff --git a/doc/qvm-tools/qvm-run.rst b/doc/qvm-tools/qvm-run.rst
index 15eb6779..00ed150e 100644
--- a/doc/qvm-tools/qvm-run.rst
+++ b/doc/qvm-tools/qvm-run.rst
@@ -40,6 +40,20 @@ OPTIONS
Pass stdin/stdout/stderr from remote program
--localcmd=LOCALCMD
With --pass-io, pass stdin/stdout/stderr to the given program
+--nogui
+ Run command without gui
+--filter-escape-chars
+ Filter terminal escape sequences (default if output is terminal)
+--no-filter-escape-chars
+ Do not filter terminal escape sequences - overrides --filter-escape-chars, DANGEROUS when output is terminal
+--no-color-output
+ Disable marking VM output with red color
+--no-color-stderr
+ Disable marking VM stderr with red color
+--color-output
+ Force marking VM output with given ANSI style (use 31 for red)
+--color-stderr
+ Force marking VM stderr with given ANSI style (use 31 for red)
--force
Force operation, even if may damage other VMs (eg. shutdown of NetVM)
diff --git a/doc/qvm-tools/qvm-service.rst b/doc/qvm-tools/qvm-service.rst
index 3a552b07..f85b6c08 100644
--- a/doc/qvm-tools/qvm-service.rst
+++ b/doc/qvm-tools/qvm-service.rst
@@ -30,7 +30,7 @@ OPTIONS
SUPPORTED SERVICES
==================
-This list can be incomplete as VM can implement any additional service without knowlege of qubes-core code.
+This list can be incomplete as VM can implement any additional service without knowledge of qubes-core code.
meminfo-writer
Default: enabled everywhere excluding NetVM
@@ -38,7 +38,7 @@ meminfo-writer
This service reports VM memory usage to dom0, which effectively enables dynamic memory management for the VM.
*Note:* this service is enforced to be set by dom0 code. If you try to
- remove it (reset to defult state), will be recreated with the rule: enabled
+ remove it (reset to default state), will be recreated with the rule: enabled
if VM have no PCI devices assigned, otherwise disabled.
qubes-dvm
@@ -73,7 +73,7 @@ cups
Enable CUPS service. The user can disable cups in VM which do not need printing to speed up booting.
-cron
+crond
Default: disabled
Enable CRON service.
diff --git a/doc/qvm-tools/qvm-shutdown.rst b/doc/qvm-tools/qvm-shutdown.rst
index d38fc4ea..cefda324 100644
--- a/doc/qvm-tools/qvm-shutdown.rst
+++ b/doc/qvm-tools/qvm-shutdown.rst
@@ -22,6 +22,8 @@ OPTIONS
Force operation, even if may damage other VMs (eg. shutdown of NetVM)
--wait
Wait for the VM(s) to shutdown
+--wait-time
+ Timeout after which VM will be killed when --wait is used
--all
Shutdown all running VMs
--exclude=EXCLUDE_LIST
diff --git a/doc/qvm-tools/qvm-start.rst b/doc/qvm-tools/qvm-start.rst
index 3645b169..9b10b7ed 100644
--- a/doc/qvm-tools/qvm-start.rst
+++ b/doc/qvm-tools/qvm-start.rst
@@ -18,14 +18,26 @@ OPTIONS
Show this help message and exit
-q, --quiet
Be quiet
+--tray
+ Use tray notifications instead of stdout
--no-guid
Do not start the GUId (ignored)
---console
- Attach debugging console to the newly started VM
+--drive
+ Temporarily attach specified drive as CD/DVD or hard disk (can be specified with prefix 'hd' or 'cdrom:', default is cdrom)
+--hddisk
+ Temporarily attach specified drive as hard disk
+--cdrom
+ Temporarily attach specified drive as CD/DVD
+--install-windows-tools
+ Attach Windows tools CDROM to the VM
--dvm
Do actions necessary when preparing DVM image
--custom-config=CUSTOM_CONFIG
Use custom Xen config instead of Qubes-generated one
+--skip-if-running
+ Do no fail if the VM is already running
+--debug
+ Enable debug mode for this VM (until its shutdown)
AUTHORS
=======
diff --git a/doc/qvm-tools/qvm-template-commit.rst b/doc/qvm-tools/qvm-template-commit.rst
index bb2bf857..60722c95 100644
--- a/doc/qvm-tools/qvm-template-commit.rst
+++ b/doc/qvm-tools/qvm-template-commit.rst
@@ -16,6 +16,8 @@ OPTIONS
=======
-h, --help
Show this help message and exit
+--offline-mode
+ Offline mode
AUTHORS
=======
diff --git a/doc/qvm-tools/qvm-usb.rst b/doc/qvm-tools/qvm-usb.rst
new file mode 100644
index 00000000..50c92d22
--- /dev/null
+++ b/doc/qvm-tools/qvm-usb.rst
@@ -0,0 +1,36 @@
+=======
+qvm-usb
+=======
+
+NAME
+====
+qvm-usb - List/set VM USB devices
+
+:Date: 2013-03-16
+
+SYNOPSIS
+========
+| qvm-usb -l [options]
+| qvm-usb -a [options] :
+| qvm-usb -d [options] :
+
+OPTIONS
+=======
+-h, --help
+ Show this help message and exit
+-l, -list
+ List devices
+-a, --attach
+ Attach specified device to specified VM
+-d, --detach
+ Detach specified device
+--no-auto-detach
+ Fail when device already connected to other VM
+--force-root
+ Force to run, even with root privileges
+
+AUTHORS
+=======
+| Joanna Rutkowska
+| Rafal Wojtczuk
+| Marek Marczykowski
diff --git a/linux/aux-tools/prepare-volatile-img.sh b/linux/aux-tools/prepare-volatile-img.sh
index 33dfc9b9..822affa9 100755
--- a/linux/aux-tools/prepare-volatile-img.sh
+++ b/linux/aux-tools/prepare-volatile-img.sh
@@ -1,9 +1,5 @@
#!/bin/sh
-if [ "`id -u`" != "0" ]; then
- exec sudo $0 $*
-fi
-
set -e
if ! echo $PATH | grep -q sbin; then
@@ -24,30 +20,6 @@ if [ -e "$FILENAME" ]; then
exit 1
fi
+umask 002
TOTAL_SIZE=$[ $ROOT_SIZE + $SWAP_SIZE + 512 ]
truncate -s ${TOTAL_SIZE}M "$FILENAME"
-sfdisk --no-reread -u M "$FILENAME" > /dev/null 2> /dev/null < /dev/null
- if [ "$created" = "yes" ]; then
- rm -f ${loopdev}p*
- fi
- losetup -d ${loopdev} || :
- chown --reference `dirname "$FILENAME"` "$FILENAME"
-) 200>"/var/run/qubes/prepare-volatile-img.lock"
diff --git a/linux/systemd/qubes-netvm.service b/linux/systemd/qubes-netvm.service
index 450a9903..bf556f3c 100644
--- a/linux/systemd/qubes-netvm.service
+++ b/linux/systemd/qubes-netvm.service
@@ -1,7 +1,7 @@
[Unit]
Description=Qubes NetVM startup
After=qubes-core.service qubes-qmemman.service libvirtd.service
-Before=plymouth-quit.service
+Before=systemd-user-sessions.service
[Service]
Type=oneshot
diff --git a/linux/systemd/qubes-vm@.service b/linux/systemd/qubes-vm@.service
index 07607d10..2eb09ec5 100644
--- a/linux/systemd/qubes-vm@.service
+++ b/linux/systemd/qubes-vm@.service
@@ -1,12 +1,12 @@
[Unit]
Description=Start Qubes VM %i
After=qubes-netvm.service
-Before=plymouth-quit.service
+Before=systemd-user-sessions.service
[Service]
Type=oneshot
Environment=DISPLAY=:0
-ExecStart=/usr/bin/qvm-start --no-guid %i
+ExecStart=/usr/bin/qvm-start --no-guid --skip-if-running %i
Group=qubes
RemainAfterExit=yes
diff --git a/qmemman/qmemman.py b/qmemman/qmemman.py
index 2d124ff6..8d1d60c7 100755
--- a/qmemman/qmemman.py
+++ b/qmemman/qmemman.py
@@ -37,7 +37,9 @@ slow_memset_react_msg="VM didn't give back all requested memory"
class DomainState:
def __init__(self, id):
self.meminfo = None #dictionary of memory info read from client
- self.memory_actual = None #the current memory size
+ self.memory_current = 0 # the current memory size
+ self.memory_actual = None # the current memory allocation (what VM
+ # is using or can use at any time)
self.memory_maximum = None #the maximum memory size
self.mem_used = None #used memory, computed based on meminfo
self.id = id #domain id
@@ -45,6 +47,9 @@ class DomainState:
self.no_progress = False #no react to memset
self.slow_memset_react = False #slow react to memset (after few tries still above target)
+ def __repr__(self):
+ return self.__dict__.__repr__()
+
class SystemState(object):
def __init__(self):
self.log = logging.getLogger('qmemman.systemstate')
@@ -65,25 +70,53 @@ class SystemState(object):
def add_domain(self, id):
self.log.debug('add_domain(id={!r})'.format(id))
self.domdict[id] = DomainState(id)
+ # TODO: move to DomainState.__init__
+ target_str = self.xs.read('', '/local/domain/' + id + '/memory/target')
+ if target_str:
+ self.domdict[id].last_target = int(target_str) * 1024
def del_domain(self, id):
self.log.debug('del_domain(id={!r})'.format(id))
self.domdict.pop(id)
def get_free_xen_memory(self):
- return int(self.xc.physinfo()['free_memory']*1024 * self.MEM_OVERHEAD_FACTOR)
-# hosts = self.xend_session.session.xenapi.host.get_all()
-# host_record = self.xend_session.session.xenapi.host.get_record(hosts[0])
-# host_metrics_record = self.xend_session.session.xenapi.host_metrics.get_record(host_record["metrics"])
-# ret = host_metrics_record["memory_free"]
-# return long(ret)
+ xen_free = int(self.xc.physinfo()['free_memory']*1024 *
+ self.MEM_OVERHEAD_FACTOR)
+ # now check for domains which have assigned more memory than really
+ # used - do not count it as "free", because domain is free to use it
+ # at any time
+ # assumption: self.refresh_memactual was called before
+ # (so domdict[id].memory_actual is up to date)
+ assigned_but_unused = reduce(
+ lambda acc, dom: acc + max(0, dom.last_target-dom.memory_current),
+ self.domdict.values(),
+ 0
+ )
+ # If, at any time, Xen have less memory than XEN_FREE_MEM_MIN,
+ # it is a failure of qmemman. Collect as much data as possible to
+ # debug it
+ if xen_free < self.XEN_FREE_MEM_MIN:
+ self.log.error("Xen free = {!r} below acceptable value! "
+ "assigned_but_unused={!r}, domdict={!r}".format(
+ xen_free, assigned_but_unused, self.domdict))
+ elif xen_free < assigned_but_unused+self.XEN_FREE_MEM_MIN:
+ self.log.error("Xen free = {!r} too small for satisfy assignments! "
+ "assigned_but_unused={!r}, domdict={!r}".format(
+ xen_free, assigned_but_unused, self.domdict))
+ return xen_free - assigned_but_unused
#refresh information on memory assigned to all domains
def refresh_memactual(self):
for domain in self.xc.domain_getinfo():
id = str(domain['domid'])
if self.domdict.has_key(id):
- self.domdict[id].memory_actual = domain['mem_kb']*1024
+ # real memory usage
+ self.domdict[id].memory_current = domain['mem_kb']*1024
+ # what VM is using or can use
+ self.domdict[id].memory_actual = max(
+ self.domdict[id].memory_current,
+ self.domdict[id].last_target
+ )
self.domdict[id].memory_maximum = self.xs.read('', '/local/domain/%s/memory/static-max' % str(id))
if self.domdict[id].memory_maximum:
self.domdict[id].memory_maximum = int(self.domdict[id].memory_maximum)*1024
@@ -272,11 +305,11 @@ class SystemState(object):
self.log.debug('do_balance dom={!r} sleeping ntries={}'.format(
dom, ntries))
time.sleep(self.BALOON_DELAY)
+ self.refresh_memactual()
ntries -= 1
if ntries <= 0:
# Waiting haven't helped; Find which domain get stuck and
# abort balance (after distributing what we have)
- self.refresh_memactual()
for rq2 in memset_reqs:
dom2, mem2 = rq2
if dom2 == dom:
diff --git a/qmemman/qmemman_server.py b/qmemman/qmemman_server.py
index cf1bb2fb..ff8ae69c 100755
--- a/qmemman/qmemman_server.py
+++ b/qmemman/qmemman_server.py
@@ -42,6 +42,15 @@ LOG_PATH='/var/log/qubes/qmemman.log'
system_state = SystemState()
global_lock = thread.allocate_lock()
+# If XS_Watcher will
+# handle meminfo event before @introduceDomain, it will use
+# incomplete domain list for that and may redistribute memory
+# allocated to some VM, but not yet used (see #1389).
+# To fix that, system_state should be updated (refresh domain
+# list) before processing other changes, every time some process requested
+# memory for a new VM, before releasing the lock. Then XS_Watcher will check
+# this flag before processing other event.
+force_refresh_domain_list = False
def only_in_first_list(l1, l2):
ret=[]
@@ -65,41 +74,65 @@ class XS_Watcher:
self.log.debug('XS_Watcher()')
self.handle = xen.lowlevel.xs.xs()
- self.handle.watch('@introduceDomain', WatchType(XS_Watcher.domain_list_changed, None))
- self.handle.watch('@releaseDomain', WatchType(XS_Watcher.domain_list_changed, None))
+ self.handle.watch('@introduceDomain', WatchType(
+ XS_Watcher.domain_list_changed, False))
+ self.handle.watch('@releaseDomain', WatchType(
+ XS_Watcher.domain_list_changed, False))
self.watch_token_dict = {}
+ def domain_list_changed(self, refresh_only=False):
+ """
+ Check if any domain was created/destroyed. If it was, update
+ appropriate list. Then redistribute memory.
- def domain_list_changed(self, param):
- self.log.debug('domain_list_changed(param={!r})'.format(param))
+ :param refresh_only If True, only refresh domain list, do not
+ redistribute memory. In this mode, caller must already hold
+ global_lock.
+ """
+ self.log.debug('domain_list_changed(only_refresh={!r})'.format(
+ refresh_only))
- curr = self.handle.ls('', '/local/domain')
- self.log.debug('curr={!r}'.format(curr))
+ got_lock = False
+ if not refresh_only:
+ self.log.debug('acquiring global_lock')
+ global_lock.acquire()
+ got_lock = True
+ self.log.debug('global_lock acquired')
+ try:
+ curr = self.handle.ls('', '/local/domain')
+ if curr is None:
+ return
- if curr == None:
- return
+ # check if domain is really there, it may happen that some empty
+ # directories are left in xenstore
+ curr = filter(
+ lambda x:
+ self.handle.read('',
+ '/local/domain/{}/domid'.format(x)
+ ) is not None,
+ curr
+ )
+ self.log.debug('curr={!r}'.format(curr))
- self.log.debug('acquiring global_lock')
- global_lock.acquire()
- self.log.debug('global_lock acquired')
+ for i in only_in_first_list(curr, self.watch_token_dict.keys()):
+ # new domain has been created
+ watch = WatchType(XS_Watcher.meminfo_changed, i)
+ self.watch_token_dict[i] = watch
+ self.handle.watch(get_domain_meminfo_key(i), watch)
+ system_state.add_domain(i)
- for i in only_in_first_list(curr, self.watch_token_dict.keys()):
-#new domain has been created
- watch = WatchType(XS_Watcher.meminfo_changed, i)
- self.watch_token_dict[i] = watch
- self.handle.watch(get_domain_meminfo_key(i), watch)
- system_state.add_domain(i)
+ for i in only_in_first_list(self.watch_token_dict.keys(), curr):
+ # domain destroyed
+ self.handle.unwatch(get_domain_meminfo_key(i), self.watch_token_dict[i])
+ self.watch_token_dict.pop(i)
+ system_state.del_domain(i)
+ finally:
+ if got_lock:
+ global_lock.release()
+ self.log.debug('global_lock released')
- for i in only_in_first_list(self.watch_token_dict.keys(), curr):
-#domain destroyed
- self.handle.unwatch(get_domain_meminfo_key(i), self.watch_token_dict[i])
- self.watch_token_dict.pop(i)
- system_state.del_domain(i)
-
- global_lock.release()
- self.log.debug('global_lock released')
-
- system_state.do_balance()
+ if not refresh_only:
+ system_state.do_balance()
def meminfo_changed(self, domain_id):
@@ -111,6 +144,8 @@ class XS_Watcher:
self.log.debug('acquiring global_lock')
global_lock.acquire()
self.log.debug('global_lock acquired')
+ if force_refresh_domain_list:
+ self.domain_list_changed(refresh_only=True)
system_state.refresh_meminfo(domain_id, untrusted_meminfo_key)
@@ -140,35 +175,41 @@ class QMemmanReqHandler(SocketServer.BaseRequestHandler):
self.log = logging.getLogger('qmemman.daemon.reqhandler')
got_lock = False
- # self.request is the TCP socket connected to the client
- while True:
- self.data = self.request.recv(1024).strip()
- self.log.debug('data={!r}'.format(self.data))
- if len(self.data) == 0:
- self.log.info('EOF')
+ try:
+ # self.request is the TCP socket connected to the client
+ while True:
+ self.data = self.request.recv(1024).strip()
+ self.log.debug('data={!r}'.format(self.data))
+ if len(self.data) == 0:
+ self.log.info('EOF')
+ if got_lock:
+ global force_refresh_domain_list
+ force_refresh_domain_list = True
+ return
+
+ # XXX something is wrong here: return without release?
if got_lock:
- global_lock.release()
- self.log.debug('global_lock released')
- return
+ self.log.warning('Second request over qmemman.sock?')
+ return
- # XXX something is wrong here: return without release?
+ self.log.debug('acquiring global_lock')
+ global_lock.acquire()
+ self.log.debug('global_lock acquired')
+
+ got_lock = True
+ if system_state.do_balloon(int(self.data)):
+ resp = "OK\n"
+ else:
+ resp = "FAIL\n"
+ self.log.debug('resp={!r}'.format(resp))
+ self.request.send(resp)
+ except BaseException as e:
+ self.log.exception(
+ "exception while handling request: {!r}".format(e))
+ finally:
if got_lock:
- self.log.warning('Second request over qmemman.sock?')
- return
-
- self.log.debug('acquiring global_lock')
- global_lock.acquire()
- self.log.debug('global_lock acquired')
-
- got_lock = True
- if system_state.do_balloon(int(self.data)):
- resp = "OK\n"
- else:
- resp = "FAIL\n"
- self.log.debug('resp={!r}'.format(resp))
- self.request.send(resp)
-
- # XXX no release of lock?
+ global_lock.release()
+ self.log.debug('global_lock released')
def start_server(server):
@@ -194,8 +235,13 @@ class QMemmanServer:
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-c", "--config", action="store", dest="config", default=config_path)
+ parser.add_option("-d", "--debug", action="store_true", dest="debug",
+ default=False, help="Enable debugging")
(options, args) = parser.parse_args()
+ if options.debug:
+ logging.root.setLevel(logging.DEBUG)
+
# close io
sys.stdin.close()
sys.stdout.close()
diff --git a/qubes-rpc-policy/qubes.GetRandomizedTime.policy b/qubes-rpc-policy/qubes.GetRandomizedTime.policy
new file mode 100644
index 00000000..0f00b0b6
--- /dev/null
+++ b/qubes-rpc-policy/qubes.GetRandomizedTime.policy
@@ -0,0 +1,6 @@
+## Note that policy parsing stops at the first match,
+## so adding anything below "$anyvm $anyvm action" line will have no effect
+
+## Please use a single # to start your custom comments
+
+$anyvm dom0 allow
diff --git a/qubes-rpc-policy/qubes.OpenInVM.policy b/qubes-rpc-policy/qubes.OpenInVM.policy
index 41217337..27303cc9 100644
--- a/qubes-rpc-policy/qubes.OpenInVM.policy
+++ b/qubes-rpc-policy/qubes.OpenInVM.policy
@@ -3,5 +3,8 @@
## Please use a single # to start your custom comments
+sys-whonix anon-whonix allow
+whonix-gw anon-whonix allow
+whonix-ws anon-whonix allow
$anyvm $dispvm allow
$anyvm $anyvm ask
diff --git a/qubes-rpc-policy/qubes.OpenURL.policy b/qubes-rpc-policy/qubes.OpenURL.policy
new file mode 100644
index 00000000..27303cc9
--- /dev/null
+++ b/qubes-rpc-policy/qubes.OpenURL.policy
@@ -0,0 +1,10 @@
+## Note that policy parsing stops at the first match,
+## so adding anything below "$anyvm $anyvm action" line will have no effect
+
+## Please use a single # to start your custom comments
+
+sys-whonix anon-whonix allow
+whonix-gw anon-whonix allow
+whonix-ws anon-whonix allow
+$anyvm $dispvm allow
+$anyvm $anyvm ask
diff --git a/qubes-rpc/qubes.GetRandomizedTime b/qubes-rpc/qubes.GetRandomizedTime
new file mode 100755
index 00000000..53341f21
--- /dev/null
+++ b/qubes-rpc/qubes.GetRandomizedTime
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+# The Qubes OS Project, http://www.qubes-os.org
+#
+# Copyright (C) 2016 Patrick Schleizer
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+## Similar code as Boot Clock Randomization.
+## https://www.whonix.org/wiki/Boot_Clock_Randomization
+
+set -e
+
+## Get a random 0 or 1.
+## Will use this to decide to use plus or minus.
+ZERO_OR_ONE="$(shuf -i0-1 -n1 --random-source=/dev/random)"
+
+## Create a random number between 0 and 180.
+DELAY="$(shuf -i0-180 -n1 --random-source=/dev/random)"
+
+## Create a random number between 0 and 999999999.
+##
+## Thanks to
+## https://stackoverflow.com/questions/22887891/how-can-i-get-a-random-dev-random-number-between-0-and-999999999-in-bash
+NANOSECONDS="$(shuf -i0-999999999 -n1 --random-source=/dev/random)"
+
+## Examples NANOSECONDS:
+## 117752805
+## 38653957
+
+## Add leading zeros, because `date` expects 9 digits.
+NANOSECONDS="$(printf '%0*d\n' 9 "$NANOSECONDS")"
+
+## Using
+## printf '%0*d\n' 9 "38653957"
+## 38653957
+## becomes
+## 038653957
+
+## Examples NANOSECONDS:
+## 117752805
+## 038653957
+
+if [ "$ZERO_OR_ONE" = "0" ]; then
+ PLUS_OR_MINUS="-"
+elif [ "$ZERO_OR_ONE" = "1" ]; then
+ PLUS_OR_MINUS="+"
+else
+ exit 2
+fi
+
+#OLD_TIME="$(date)"
+#OLD_TIME_NANOSECONDS="$(date +%s.%N)"
+
+OLD_UNIXTIME="$(date +%s)"
+
+NEW_TIME="$(( $OLD_UNIXTIME $PLUS_OR_MINUS $DELAY ))"
+
+NEW_TIME_NANOSECONDS="$NEW_TIME.$NANOSECONDS"
+
+echo "$NEW_TIME_NANOSECONDS"
+
+## Testing the `date` syntax:
+## date --date @1396733199.112834496
+## date --date "@$NEW_TIME_NANOSECONDS"
diff --git a/qvm-tools/qubes-bug-report b/qvm-tools/qubes-bug-report
new file mode 100755
index 00000000..9bb23fcc
--- /dev/null
+++ b/qvm-tools/qubes-bug-report
@@ -0,0 +1,228 @@
+#!/usr/bin/env python3
+
+import subprocess
+import argparse
+import time
+import sys
+import os
+
+from os.path import expanduser
+
+#the term qube refers to a qubes vm
+
+def is_program_installed_in_qube( program, qube_name ):
+ is_installed = True
+
+ try:
+ command = 'command -v ' + program
+ subprocess.check_call([ 'qvm-run', qube_name, '--pass-io', '--no-color-output', command ], stdout = open( os.devnull, 'w' ) )
+
+ except subprocess.CalledProcessError:
+ is_installed = False
+
+ return is_installed
+
+
+#this function requires virsh
+#domstate only works for Xen domU (guests)
+def is_qube_running( qube_name ):
+ runs = False
+
+ out = subprocess.check_output([ "virsh", "-c", "xen:///", "domstate", qube_name ])
+ out = out.decode('utf-8').replace('\n', '')
+
+ if 'running' == out:
+ runs = True
+
+ return runs
+
+
+def get_qube_packages( qube_name ):
+ content = "## Qubes Packages\n\n"
+
+ #a qube can have more than one package manager installed (only one is functional)
+ pkg_cmd = { 'dpkg' : 'dpkg -l qubes-*', 'pacman' : 'pacman -Qs qubes', 'rpm' : 'rpm -qa qubes-*' }
+
+ if is_qube_running( qube_name ):
+
+ for package_manager in pkg_cmd.keys():
+ if is_program_installed_in_qube( package_manager, qube_name ):
+ pkg_list_cmd = pkg_cmd[package_manager]
+
+ try:
+ out = subprocess.check_output([ 'qvm-run', qube_name, '--pass-io', '--no-color-output', pkg_list_cmd ], stderr = open( os.devnull, 'w' ) )
+ out = out.decode('utf-8')
+ content += create_heading( ( "Package Manager: " + package_manager ), 3 )
+ content += wrap_code( out )
+ except subprocess.CalledProcessError:
+ pass #do nothing
+
+ else:
+ content += "**No packages listed, because Qube " + qube_name + " was not running**\n\n"
+
+ return content
+
+
+def get_dom0_packages():
+ content = create_heading( "Dom0 Packages", 2 )
+ out = subprocess.check_output([ "rpm", "-qa", "qubes-*" ])
+ out = out.decode('utf-8')
+ content += wrap_code( out )
+
+ return content
+
+
+def wrap_code( text ):
+ code = "~~~\n" + text + "~~~\n\n"
+
+ return code
+
+def create_heading( heading, level ):
+ heading = heading + "\n\n"
+
+ if 1 == level:
+ heading = "# " + heading
+ elif 2 == level:
+ heading = "## " + heading
+ else:
+ heading = "### " + heading
+
+ return heading
+
+
+
+def get_log_file_content( qube_name ):
+ content = "## Log Files\n\n"
+ qubes_os_log = "/var/log/qubes/"
+ ext = ".log"
+
+ log_prefix = [ "guid", "pacat", "qubesdb", "qrexec" ]
+
+ #constructs for each log file prefix the full path and reads the log file
+ for prefix in log_prefix:
+ log_file = prefix + "." + qube_name + ext
+ content += create_heading( ( "Log File: " + log_file ), 3 )
+ content += wrap_code( get_log_file( qubes_os_log + log_file ) )
+
+ return content
+
+
+def get_qube_prefs( qube_name ):
+ qube_prefs = subprocess.check_output([ "qvm-prefs", qube_name ])
+ qube_prefs = qube_prefs.decode('utf-8')
+
+ content = create_heading( "Qube Prefs", 2 )
+ content += wrap_code( qube_prefs )
+
+ return content
+
+
+def report( qube_name ):
+ template = '''{title}
+{content}
+'''
+
+ title_text = create_heading( "Bug report: " + qube_name, 1 )
+
+ content_text = get_qube_prefs( qube_name )
+ content_text += get_dom0_packages()
+ content_text += get_log_file_content( qube_name )
+ content_text += get_qube_packages( qube_name )
+
+
+ report = template.format( title=title_text, content=content_text )
+
+ return report
+
+
+def write_report( report_content, file_path ):
+ with open( file_path, 'w' ) as report_file:
+ report_file.write( report_content )
+
+
+def send_report( dest_qube, file_path):
+ #if dest_qube is not running -> start dest_qube
+ if not is_qube_running( dest_qube ):
+ try:
+ subprocess.check_call([ "qvm-start", dest_qube ])
+ except subprocess.CalledProcessError:
+ print( "Error while starting: " + dest_qube, file = sys.stderr )
+
+ try:
+ subprocess.check_call([ "qvm-move-to-vm", dest_qube, file_path ])
+ except subprocess.calledProcessError:
+ print( "Moving file bug-report failed", file = sys.stderr )
+
+
+def get_log_file( log_file ):
+ data = ""
+
+ #open and close the file
+ with open( log_file ) as log:
+ data = log.read()
+
+ return data
+
+
+def qube_exist( qube_name ):
+ exists = True
+
+ try:
+ #calls: qvm-check --quiet vmanme
+ subprocess.check_call([ "qvm-check", "--quiet", qube_name ])
+
+ except subprocess.CalledProcessError:
+ exists = False
+
+ return exists
+
+
+def get_report_file_path( qube_name ):
+ #exapanduser -> works corss platform
+ home_dir = expanduser("~")
+ date = time.strftime("%H%M%S")
+ file_path = home_dir + "/" + qube_name + "_bug-report_" + date + ".md"
+
+ return file_path
+
+
+def main():
+ parser = argparse.ArgumentParser( description = 'Generates a bug report for a specific qube (Qubes VM)' )
+ parser.add_argument( 'vmname', metavar = '', type = str )
+ parser.add_argument( '-d', '--dest-vm', metavar = '', dest = "destvm", type = str, default = 'dom0', help = "send the report to the destination VM" )
+ parser.add_argument( '-p', '--print-report', action = 'store_const', const = "print_report", required = False, help = "prints the report without writing it or sending it to a destination VM" )
+ args = parser.parse_args()
+
+ if qube_exist( args.vmname ):
+
+ if qube_exist( args.destvm ):
+ #get the report
+ report_content = report( args.vmname )
+
+ #if -p or --print-report is an argument print the report
+ if args.print_report:
+ print( report_content )
+
+ #write and send the report
+ else:
+ file_path = get_report_file_path( args.vmname )
+ write_report( report_content, file_path )
+ print( "Report written to: " + file_path )
+
+ if 'dom0' != args.destvm:
+ send_report( args.destvm, file_path )
+ print( "Report send to VM: " + args.destvm )
+
+ exit(0)
+
+ else:
+ print ( "Destination VM does not exist" )
+ exit(1)
+
+ else:
+ print( "VM does not exist" )
+ exit(1)
+
+
+#calls the main function -> program start point
+main()
diff --git a/qvm-tools/qubes-hcl-report b/qvm-tools/qubes-hcl-report
index 047a79d0..b484f666 100755
--- a/qvm-tools/qubes-hcl-report
+++ b/qvm-tools/qubes-hcl-report
@@ -18,7 +18,7 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-VERSION=2.4
+VERSION=2.5
COPY2VM="dom0"
SUPPORT_FILES=0
@@ -101,9 +101,10 @@ XEN_EXTRA=`cat $TEMP_DIR/xl-info |grep xen_extra |cut -d: -f2 |tr -d ' '`
QUBES=`cat $TEMP_DIR/qubes-release |cut -d '(' -f2 |cut -d ')' -f1`
XL_VTX=`cat $TEMP_DIR/xl-info |grep xen_caps | grep hvm`
XL_VTD=`cat $TEMP_DIR/xl-info |grep virt_caps |grep hvm_directio`
+XL_HAP=`cat $TEMP_DIR/xl-dmesg |grep 'HVM: Hardware Assisted Paging (HAP) detected'`
PCRS=`find /sys/devices/ -name pcrs`
-FILENAME="Qubes-HCL-${BRAND// /_}-${PRODUCT// /_}-$DATE"
+FILENAME="Qubes-HCL-${BRAND//[^[:alnum:]]/_}-${PRODUCT//[^[:alnum:]]/_}-$DATE"
if [[ $XL_VTX ]]
then
@@ -127,6 +128,12 @@ if [[ $XL_VTD ]]
fi
+if [ -n "$XL_HAP" ]; then
+ HAP="yes"
+else
+ HAP="no"
+fi
+
if [[ $PCRS ]]
then
# try tu run tcsd and: grep the logs, try get version info.
@@ -152,6 +159,7 @@ echo -e "Net:\n$NET\n"
echo -e "SCSI:\n$SCSI\n"
echo -e "HVM:\t\t$VTX"
echo -e "I/O MMU:\t$VTD"
+echo -e "HAP/SLAT:\t${HAP^}"
echo -e "TPM:\t\t$TPM"
echo
@@ -164,6 +172,8 @@ hvm:
'$HVM'
iommu:
'$IOMMU'
+slat:
+ '$HAP'
tpm:
'$TPM_s'
brand: |
@@ -209,7 +219,7 @@ versions:
FIXLINK
---
-" >> $HOME/$FILENAME.yml
+" >> "$HOME/$FILENAME.yml"
if [[ "$SUPPORT_FILES" == 1 ]]
@@ -217,7 +227,7 @@ if [[ "$SUPPORT_FILES" == 1 ]]
# cpio
cd $TEMP_DIR
- find -print0 |cpio --quiet -o -H crc --null |gzip >$HOME/$FILENAME.cpio.gz
+ find -print0 | cpio --quiet -o -H crc --null | gzip > "$HOME/$FILENAME.cpio.gz"
cd
fi
@@ -225,16 +235,16 @@ fi
if [[ "$COPY2VM" != "dom0" ]]
then
# Copy to VM
- qvm-start -q $COPY2VM 2>/dev/null
+ qvm-start -q $COPY2VM 2> /dev/null
if [[ -f "$HOME/$FILENAME.cpio.gz" ]]
then
- cat $HOME/$FILENAME.cpio.gz | qvm-run -a -q --pass-io $COPY2VM "cat >/home/user/$FILENAME.cpio.gz"
+ cat "$HOME/$FILENAME.cpio.gz" | qvm-run -a -q --pass-io $COPY2VM "cat > \"/home/user/$FILENAME.cpio.gz\""
fi
if [[ -f "$HOME/$FILENAME.yml" ]]
then
- cat $HOME/$FILENAME.yml | qvm-run -a -q --pass-io $COPY2VM "cat >/home/user/$FILENAME.yml"
+ cat "$HOME/$FILENAME.yml" | qvm-run -a -q --pass-io $COPY2VM "cat > \"/home/user/$FILENAME.yml\""
fi
fi
diff --git a/qvm-tools/qvm-backup b/qvm-tools/qvm-backup
index dff4e9e5..14791219 100755
--- a/qvm-tools/qvm-backup
+++ b/qvm-tools/qvm-backup
@@ -30,6 +30,7 @@ import qubes.backup
import os
import sys
import getpass
+from locale import getpreferredencoding
def print_progress(progress):
print >> sys.stderr, "\r-> Backing up files: {0}%...".format (progress),
@@ -40,46 +41,64 @@ def main():
parser.add_option ("-x", "--exclude", action="append",
dest="exclude_list", default=[],
- help="Exclude the specified VM from backup (may be "
+ help="Exclude the specified VM from the backup (may be "
"repeated)")
parser.add_option ("--force-root", action="store_true", dest="force_root", default=False,
- help="Force to run, even with root privileges")
+ help="Force to run with root privileges")
parser.add_option ("-d", "--dest-vm", action="store", dest="appvm",
- help="The AppVM to send backups to (implies -e)")
+ help="Specify the destination VM to which the backup "
+ "will be sent (implies -e)")
parser.add_option ("-e", "--encrypt", action="store_true", dest="encrypt", default=False,
- help="Encrypts the backup")
+ help="Encrypt the backup")
parser.add_option ("--no-encrypt", action="store_true",
dest="no_encrypt", default=False,
- help="Skip encryption even if sending the backup to VM")
+ help="Skip encryption even if sending the backup to a "
+ "VM")
+ parser.add_option ("-p", "--passphrase-file", action="store",
+ dest="pass_file", default=None,
+ help="Read passphrase from a file, or use '-' to read "
+ "from stdin")
parser.add_option ("-E", "--enc-algo", action="store",
dest="crypto_algorithm", default=None,
- help="Specify non-default encryption algorithm. For "
- "list of supported algos execute 'openssl "
+ help="Specify a non-default encryption algorithm. For a "
+ "list of supported algorithms, execute 'openssl "
"list-cipher-algorithms' (implies -e)")
parser.add_option ("-H", "--hmac-algo", action="store",
dest="hmac_algorithm", default=None,
- help="Specify non-default hmac algorithm. For list of "
- "supported algos execute 'openssl "
+ help="Specify a non-default HMAC algorithm. For a list "
+ "of supported algorithms, execute 'openssl "
"list-message-digest-algorithms'")
parser.add_option ("-z", "--compress", action="store_true", dest="compress", default=False,
help="Compress the backup")
+ parser.add_option ("-Z", "--compress-filter", action="store",
+ dest="compress_filter", default=False,
+ help="Specify a non-default compression filter program "
+ "(default: gzip)")
+ parser.add_option("--tmpdir", action="store", dest="tmpdir", default=None,
+ help="Specify a temporary directory (if you have at least "
+ "1GB free RAM in dom0, use of /tmp is advised) ("
+ "default: /var/tmp)")
parser.add_option ("--debug", action="store_true", dest="debug",
default=False, help="Enable (a lot of) debug output")
(options, args) = parser.parse_args ()
if (len (args) < 1):
- print >> sys.stderr, "You must specify the target backup directory (e.g. /mnt/backup)"
- print >> sys.stderr, "qvm-backup will create a subdirectory there for each individual backup."
+ print >> sys.stderr, "You must specify the target backup directory "\
+ " (e.g. /mnt/backup)."
+ print >> sys.stderr, "qvm-backup will create a subdirectory there for "\
+ " each individual backup."
exit (0)
base_backup_dir = args[0]
if hasattr(os, "geteuid") and os.geteuid() == 0:
if not options.force_root:
- print >> sys.stderr, "*** Running this tool as root is strongly discouraged, this will lead you in permissions problems."
- print >> sys.stderr, "Retry as unprivileged user."
- print >> sys.stderr, "... or use --force-root to continue anyway."
+ print >> sys.stderr, "*** Running this tool as root is strongly "\
+ "discouraged. This will lead to permissions "\
+ "problems."
+ print >> sys.stderr, "Retry as an unprivileged user, or use "\
+ "--force-root to continue anyway."
exit(1)
# Only for locking
@@ -123,14 +142,15 @@ def main():
backup_fs_free_sz = stat.f_bsize * stat.f_bavail
print
if (total_backup_sz > backup_fs_free_sz):
- print >>sys.stderr, "ERROR: Not enough space available on the backup filesystem!"
+ print >>sys.stderr, "ERROR: Not enough space available on the "\
+ "backup filesystem!"
exit(1)
print "-> Available space: {0}".format(size_to_human(backup_fs_free_sz))
else:
appvm = qvm_collection.get_vm_by_name(options.appvm)
if appvm is None:
- print >>sys.stderr, "ERROR: VM {0} does not exist".format(options.appvm)
+ print >>sys.stderr, "ERROR: VM {0} does not exist!".format(options.appvm)
exit(1)
stat = os.statvfs('/var/tmp')
@@ -138,50 +158,54 @@ def main():
print
if (backup_fs_free_sz < 1000000000):
print >>sys.stderr, "ERROR: Not enough space available " \
- "on the local filesystem (needs 1GB for temporary files)!"
+ "on the local filesystem (1GB required for temporary files)!"
exit(1)
if not appvm.is_running():
appvm.start(verbose=True)
if options.appvm:
- print >>sys.stderr, ("WARNING: VM {} excluded because it's used to "
- "store the backup.").format(options.appvm)
+ print >>sys.stderr, ("NOTE: VM {} will be excluded because it is "
+ "the backup destination.").format(options.appvm)
options.exclude_list.append(options.appvm)
if not options.encrypt:
- print >>sys.stderr, "WARNING: encryption will not be used"
+ print >>sys.stderr, "WARNING: The backup will NOT be encrypted!"
- prompt = raw_input ("Do you want to proceed? [y/N] ")
- if not (prompt == "y" or prompt == "Y"):
- exit (0)
+ if options.pass_file is not None:
+ f = open(options.pass_file) if options.pass_file != "-" else sys.stdin
+ passphrase = f.readline().rstrip()
+ if f is not sys.stdin:
+ f.close()
- if options.encrypt:
- passphrase = getpass.getpass("Please enter the pass phrase that will "
- "be used to encrypt and verify the "
- "backup: ")
else:
- passphrase = getpass.getpass("Please enter the pass phrase that will "
- "be used to verify the backup: ")
+ if raw_input("Do you want to proceed? [y/N] ").upper() != "Y":
+ exit(0)
- passphrase2 = getpass.getpass("Enter again for verification: ")
- if passphrase != passphrase2:
- print >>sys.stderr, "ERROR: Password mismatch"
- exit(1)
+ s = ("Please enter the passphrase that will be used to {}verify "
+ "the backup: ").format('encrypt and ' if options.encrypt else '')
+ passphrase = getpass.getpass(s)
- passphrase = passphrase.decode(sys.stdin.encoding)
+ if getpass.getpass("Enter again for verification: ") != passphrase:
+ print >>sys.stderr, "ERROR: Passphrase mismatch!"
+ exit(1)
+
+ encoding = sys.stdin.encoding or getpreferredencoding()
+ passphrase = passphrase.decode(encoding)
kwargs = {}
if options.hmac_algorithm:
kwargs['hmac_algorithm'] = options.hmac_algorithm
if options.crypto_algorithm:
kwargs['crypto_algorithm'] = options.crypto_algorithm
+ if options.tmpdir:
+ kwargs['tmpdir'] = options.tmpdir
try:
backup_do(base_backup_dir, files_to_backup, passphrase,
progress_callback=print_progress,
encrypted=options.encrypt,
- compressed=options.compress,
+ compressed=options.compress_filter or options.compress,
appvm=appvm, **kwargs)
except QubesException as e:
print >>sys.stderr, "ERROR: %s" % str(e)
diff --git a/qvm-tools/qvm-backup-restore b/qvm-tools/qvm-backup-restore
index c07119d6..aa0c62a3 100755
--- a/qvm-tools/qvm-backup-restore
+++ b/qvm-tools/qvm-backup-restore
@@ -31,6 +31,7 @@ from qubes.backup import backup_restore_do
import qubes.backup
import sys
from optparse import OptionParser
+from locale import getpreferredencoding
import os
import sys
@@ -42,23 +43,31 @@ def main():
parser.add_option ("--verify-only", action="store_true",
dest="verify_only", default=False,
- help="Do not restore the data, only verify backup "
- "integrify.")
+ help="Verify backup integrity without restoring any "
+ "data")
parser.add_option ("--skip-broken", action="store_true", dest="skip_broken", default=False,
- help="Do not restore VMs that have missing templates or netvms")
+ help="Do not restore VMs that have missing TemplateVMs "
+ "or NetVMs")
parser.add_option ("--ignore-missing", action="store_true", dest="ignore_missing", default=False,
- help="Ignore missing templates or netvms, restore VMs anyway")
+ help="Restore VMs even if their associated TemplateVMs "
+ "and NetVMs are missing")
parser.add_option ("--skip-conflicting", action="store_true", dest="skip_conflicting", default=False,
- help="Do not restore VMs that are already present on the host")
+ help="Do not restore VMs that are already present on "
+ "the host")
+
+ parser.add_option ("--rename-conflicting", action="store_true",
+ dest="rename_conflicting", default=False,
+ help="Restore VMs that are already present on the host "
+ "under different names")
parser.add_option ("--force-root", action="store_true", dest="force_root", default=False,
- help="Force to run, even with root privileges")
+ help="Force to run with root privileges")
parser.add_option ("--replace-template", action="append", dest="replace_template", default=[],
- help="Restore VMs using another template, syntax: "
+ help="Restore VMs using another TemplateVM; syntax: "
"old-template-name:new-template-name (may be "
"repeated)")
@@ -66,17 +75,22 @@ def main():
help="Skip restore of specified VM (may be repeated)")
parser.add_option ("--skip-dom0-home", action="store_false", dest="dom0_home", default=True,
- help="Do not restore dom0 user home dir")
+ help="Do not restore dom0 user home directory")
parser.add_option ("--ignore-username-mismatch", action="store_true", dest="ignore_username_mismatch", default=False,
- help="Ignore dom0 username mismatch while restoring homedir")
+ help="Ignore dom0 username mismatch when restoring home "
+ "directory")
parser.add_option ("-d", "--dest-vm", action="store", dest="appvm",
- help="The AppVM to send backups to")
+ help="Specify VM containing the backup to be restored")
parser.add_option ("-e", "--encrypted", action="store_true", dest="decrypt", default=False,
help="The backup is encrypted")
+ parser.add_option ("-p", "--passphrase-file", action="store",
+ dest="pass_file", default=None,
+ help="Read passphrase from file, or use '-' to read from stdin")
+
parser.add_option ("-z", "--compressed", action="store_true", dest="compressed", default=False,
help="The backup is compressed")
@@ -86,7 +100,8 @@ def main():
(options, args) = parser.parse_args ()
if (len (args) < 1):
- print >> sys.stderr, "You must specify the backup directory (e.g. /mnt/backup/qubes-2010-12-01-235959)"
+ print >> sys.stderr, "You must specify the backup directory "\
+ "(e.g. /mnt/backup/qubes-2010-12-01-235959)"
exit (0)
backup_dir = args[0]
@@ -106,6 +121,8 @@ def main():
restore_options['use-default-netvm'] = True
if options.replace_template:
restore_options['replace-template'] = options.replace_template
+ if options.rename_conflicting:
+ restore_options['rename-conflicting'] = True
if not options.dom0_home:
restore_options['dom0-home'] = False
if options.ignore_username_mismatch:
@@ -124,8 +141,17 @@ def main():
print >>sys.stderr, "ERROR: VM {0} does not exist".format(options.appvm)
exit(1)
- passphrase = getpass.getpass("Please enter the pass phrase that will be used to decrypt/verify the backup: ")
- passphrase = passphrase.decode(sys.stdin.encoding)
+ if options.pass_file is not None:
+ f = open(options.pass_file) if options.pass_file != "-" else sys.stdin
+ passphrase = f.readline().rstrip()
+ if f is not sys.stdin:
+ f.close()
+ else:
+ passphrase = getpass.getpass("Please enter the passphrase to verify "
+ "and (if encrypted) decrypt the backup: ")
+
+ encoding = sys.stdin.encoding or getpreferredencoding()
+ passphrase = passphrase.decode(encoding)
print >> sys.stderr, "Checking backup content..."
@@ -178,69 +204,97 @@ def main():
print
if hasattr(os, "geteuid") and os.geteuid() == 0:
- print >> sys.stderr, "*** Running this tool as root is strongly discouraged, this will lead you in permissions problems."
+ print >> sys.stderr, "*** Running this tool as root is strongly "\
+ "discouraged. This will lead to permissions "\
+ "problems."
if options.force_root:
- print >> sys.stderr, "Continuing as commanded. You have been warned."
+ print >> sys.stderr, "Continuing as commanded. You have been "\
+ "warned."
else:
- print >> sys.stderr, "Retry as unprivileged user."
- print >> sys.stderr, "... or use --force-root to continue anyway."
+ print >> sys.stderr, "Retry as an unprivileged user, or use "\
+ "--force-root to continue anyway."
exit(1)
if there_are_conflicting_vms:
- print >> sys.stderr, "*** There VMs with conflicting names on the host! ***"
+ print >> sys.stderr, "*** There are VMs with conflicting names on the "\
+ "host! ***"
if options.skip_conflicting:
- print >> sys.stderr, "Those VMs will not be restored, the host VMs will not be overwritten!"
+ print >> sys.stderr, "Those VMs will not be restored. The host "\
+ "VMs will NOT be overwritten."
else:
- print >> sys.stderr, "Remove VMs with conflicting names from the host before proceeding."
- print >> sys.stderr, "... or use --skip-conflicting to restore only those VMs that do not exist on the host."
+ print >> sys.stderr, "Remove VMs with conflicting names from the "\
+ "host before proceeding."
+ print >> sys.stderr, "Or use --skip-conflicting to restore only "\
+ "those VMs that do not exist on the host."
+ print >> sys.stderr, "Or use --rename-conflicting to restore " \
+ "those VMs under modified names (with "\
+ "numbers at the end)."
exit (1)
print "The above VMs will be copied and added to your system."
- print "Exisiting VMs will not be removed."
+ print "Exisiting VMs will NOT be removed."
if there_are_missing_templates:
- print >> sys.stderr, "*** One or more template VM is missing on the host! ***"
+ print >> sys.stderr, "*** One or more TemplateVMs are missing on the"\
+ "host! ***"
if not (options.skip_broken or options.ignore_missing):
- print >> sys.stderr, "Install it first, before proceeding with backup restore."
- print >> sys.stderr, "Or pass: --skip-broken or --ignore-missing switch."
+ print >> sys.stderr, "Install them before proceeding with the "\
+ "restore."
+ print >> sys.stderr, "Or pass: --skip-broken or --ignore-missing."
exit (1)
elif options.skip_broken:
- print >> sys.stderr, "... VMs that depend on it will not be restored (--skip-broken used)"
+ print >> sys.stderr, "Skipping broken entries: VMs that depend on "\
+ "missing TemplateVMs will NOT be restored."
elif options.ignore_missing:
- print >> sys.stderr, "... VMs that depend on it will be restored anyway (--ignore-missing used)"
+ print >> sys.stderr, "Ignoring missing entries: VMs that depend "\
+ "on missing TemplateVMs will NOT be restored."
else:
- print >> sys.stderr, "INTERNAL ERROR?!"
+ print >> sys.stderr, "INTERNAL ERROR! Please report this to the "\
+ "Qubes OS team!"
exit (1)
if there_are_missing_netvms:
- print >> sys.stderr, "*** One or more network VM is missing on the host! ***"
+ print >> sys.stderr, "*** One or more NetVMs are missing on the "\
+ "host! ***"
if not (options.skip_broken or options.ignore_missing):
- print >> sys.stderr, "Install it first, before proceeding with backup restore."
- print >> sys.stderr, "Or pass: --skip_broken or --ignore_missing switch."
+ print >> sys.stderr, "Install them before proceeding with the "\
+ "restore."
+ print >> sys.stderr, "Or pass: --skip-broken or --ignore-missing."
exit (1)
elif options.skip_broken:
- print >> sys.stderr, "... VMs that depend on it will not be restored (--skip-broken used)"
+ print >> sys.stderr, "Skipping broken entries: VMs that depend on "\
+ "missing NetVMs will NOT be restored."
elif options.ignore_missing:
- print >> sys.stderr, "... VMs that depend on it be restored anyway (--ignore-missing used)"
+ print >> sys.stderr, "Ignoring missing entries: VMs that depend "\
+ "on missing NetVMs will NOT be restored."
else:
- print >> sys.stderr, "INTERNAL ERROR?!"
+ print >> sys.stderr, "INTERNAL ERROR! Please report this to the "\
+ "Qubes OS team!"
exit (1)
if 'dom0' in restore_info.keys() and options.dom0_home:
if dom0_username_mismatch:
- print >> sys.stderr, "*** Dom0 username mismatch! This can break some settings ***"
+ print >> sys.stderr, "*** Dom0 username mismatch! This can break "\
+ "some settings! ***"
if not options.ignore_username_mismatch:
- print >> sys.stderr, "Skip dom0 home restore (--skip-dom0-home)"
- print >> sys.stderr, "Or pass: --ignore-username-mismatch to continue anyway"
+ print >> sys.stderr, "Skip restoring the dom0 home directory "\
+ "(--skip-dom0-home), or pass "\
+ "--ignore-username-mismatch to continue "\
+ "anyway."
exit(1)
else:
- print >> sys.stderr, "Continuing as directed"
- print >> sys.stderr, "While restoring user homedir, existing files/dirs will be backed up in 'home-pre-restore-' dir"
-
- prompt = raw_input ("Do you want to proceed? [y/N] ")
- if not (prompt == "y" or prompt == "Y"):
- exit (0)
+ print >> sys.stderr, "Continuing as directed."
+ print >> sys.stderr, "NOTE: Before restoring the dom0 home directory, "\
+ "a new directory named "\
+ "'home-pre-restore-' will be "\
+ "created inside the dom0 home directory. If any "\
+ "restored files conflict with existing files, "\
+ "the existing files will be moved to this new "\
+ "directory."
+ if options.pass_file is None:
+ if raw_input("Do you want to proceed? [y/N] ").upper() != "Y":
+ exit(0)
try:
backup_restore_do(restore_info,
diff --git a/qvm-tools/qvm-check b/qvm-tools/qvm-check
index 671aeda2..cc69113b 100755
--- a/qvm-tools/qvm-check
+++ b/qvm-tools/qvm-check
@@ -27,11 +27,18 @@ import sys
import time
def main():
- usage = "usage: %prog [options] "
+ usage = """usage: %prog [options] \n
+Specify no state options to check if VM exists"""
parser = OptionParser (usage)
parser.add_option ("-q", "--quiet", action="store_false", dest="verbose", default=True)
-
+ parser.add_option ("--running", action="store_true", dest="running", default=False,
+ help="Determine if VM is running")
+ parser.add_option ("--paused", action="store_true", dest="paused", default=False,
+ help="Determine if VM is paused")
+ parser.add_option ("--template", action="store_true", dest="template", default=False,
+ help="Determine if VM is a template")
+
(options, args) = parser.parse_args ()
if (len (args) != 1):
parser.error ("You must specify VM name!")
@@ -48,8 +55,26 @@ def main():
print >> sys.stdout, "A VM with the name '{0}' does not exist in the system!".format(vmname)
exit(1)
+ elif options.running:
+ vm_state = not vm.is_running()
+ if options.verbose:
+ print >> sys.stdout, "A VM with the name {0} is {1}running.".format(vmname, "not " * vm_state)
+ exit(vm_state)
+
+ elif options.paused:
+ vm_state = not vm.is_paused()
+ if options.verbose:
+ print >> sys.stdout, "A VM with the name {0} is {1}paused.".format(vmname, "not " * vm_state)
+ exit(vm_state)
+
+ elif options.template:
+ vm_state = not vm.is_template()
+ if options.verbose:
+ print >> sys.stdout, "A VM with the name {0} is {1}a template.".format(vmname, "not " * vm_state)
+ exit(vm_state)
+
else:
- if options.verbose:
+ if options.verbose:
print >> sys.stdout, "A VM with the name '{0}' does exist.".format(vmname)
exit(0)
diff --git a/qvm-tools/qvm-clone b/qvm-tools/qvm-clone
index e2eba3b3..5ec22c5c 100755
--- a/qvm-tools/qvm-clone
+++ b/qvm-tools/qvm-clone
@@ -17,37 +17,43 @@
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-#
-#
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+# USA.
+
+import os
+import sys
+from optparse import OptionParser
from qubes.qubes import QubesVmCollection
-from qubes.qubes import QubesAppVm, QubesTemplateVm, QubesHVm
-from qubes.qubes import QubesException
-from optparse import OptionParser;
-import sys
-import os
+
def main():
usage = "usage: %prog [options] \n"\
"Clones an existing VM by copying all its disk files"
-
- parser = OptionParser (usage)
- parser.add_option ("-q", "--quiet", action="store_false", dest="verbose", default=True)
- parser.add_option ("-p", "--path", dest="dir_path",
- help="Specify path to the template directory")
- parser.add_option ("--force-root", action="store_true", dest="force_root", default=False,
- help="Force to run, even with root privileges")
- (options, args) = parser.parse_args ()
- if (len (args) != 2):
- parser.error ("You must specify at least the src and dst TemplateVM names!")
+ parser = OptionParser(usage)
+ parser.add_option("-q", "--quiet", action="store_false", dest="verbose",
+ default=True)
+ parser.add_option("-p", "--path", dest="dir_path",
+ help="Specify path to the template directory")
+ parser.add_option("--force-root", action="store_true", dest="force_root",
+ default=False,
+ help="Force to run, even with root privileges")
+ parser.add_option("-P", "--pool", dest="pool_name",
+ help="Specify in to which storage pool to clone")
+
+ (options, args) = parser.parse_args()
+ if (len(args) != 2):
+ parser.error(
+ "You must specify at least the src and dst TemplateVM names!")
srcname = args[0]
dstname = args[1]
if hasattr(os, "geteuid") and os.geteuid() == 0:
if not options.force_root:
- print >> sys.stderr, "*** Running this tool as root is strongly discouraged, this will lead you in permissions problems."
+ print >> sys.stderr, "*** Running this tool as root is" + \
+ " strongly discouraged, this will lead you in permissions" + \
+ "problems."
print >> sys.stderr, "Retry as unprivileged user."
print >> sys.stderr, "... or use --force-root to continue anyway."
exit(1)
@@ -57,12 +63,21 @@ def main():
qvm_collection.load()
src_vm = qvm_collection.get_vm_by_name(srcname)
- if src_vm is None:
- print >> sys.stderr, "ERROR: A VM with the name '{0}' does not exist in the system.".format(srcname)
+ if src_vm is None:
+ print >> sys.stderr, \
+ "ERROR: A VM with the name '{0}' does not exist in the system." \
+ .format(srcname)
exit(1)
+ if options.pool_name is None:
+ pool_name = src_vm.pool_name
+ else:
+ pool_name = options.pool_name
+
if qvm_collection.get_vm_by_name(dstname) is not None:
- print >> sys.stderr, "ERROR: A VM with the name '{0}' already exists in the system.".format(dstname)
+ print >> sys.stderr, \
+ "ERROR: A VM with the name '{0}' already exists in the system." \
+ .format(dstname)
exit(1)
if src_vm.is_disposablevm():
@@ -70,19 +85,21 @@ def main():
exit(1)
dst_vm = qvm_collection.add_new_vm(src_vm.__class__.__name__,
- name=dstname, template=src_vm.template,
- dir_path=options.dir_path, installed_by_rpm=False)
+ name=dstname, template=src_vm.template,
+ pool_name=pool_name,
+ dir_path=options.dir_path,
+ installed_by_rpm=False)
try:
dst_vm.clone_attrs(src_vm)
- dst_vm.clone_disk_files (src_vm=src_vm, verbose=options.verbose)
+ dst_vm.clone_disk_files(src_vm=src_vm, verbose=options.verbose)
except (IOError, OSError) as err:
print >> sys.stderr, "ERROR: {0}".format(err)
qvm_collection.pop(dst_vm.qid)
dst_vm.remove_from_disk()
- exit (1)
+ exit(1)
qvm_collection.save()
qvm_collection.unlock_db()
-
+
main()
diff --git a/qvm-tools/qvm-create b/qvm-tools/qvm-create
index a014febf..18e45c87 100755
--- a/qvm-tools/qvm-create
+++ b/qvm-tools/qvm-create
@@ -41,6 +41,8 @@ def main():
help="Specify the label to use for the new VM (e.g. red, yellow, green, ...)")
parser.add_option ("-p", "--proxy", action="store_true", dest="proxyvm", default=False,
help="Create ProxyVM")
+ parser.add_option ("-P", "--pool", dest="pool_name",
+ help="Specify which storage pool to use")
parser.add_option ("-H", "--hvm", action="store_true", dest="hvm", default=False,
help="Create HVM (standalone unless --template option used)")
parser.add_option ("--hvm-template", action="store_true", dest="hvm_template", default=False,
@@ -71,6 +73,11 @@ def main():
parser.error ("You must specify VM name!")
vmname = args[0]
+ if options.pool_name is None:
+ pool_name = "default"
+ else :
+ pool_name = options.pool_name
+
if (options.netvm + options.proxyvm + options.hvm + options.hvm_template) > 1:
parser.error ("You must specify at most one VM type switch")
@@ -123,6 +130,10 @@ def main():
if options.offline_mode:
vmm.offline_mode = True
+ if re.match('^disp\d+$', vmname):
+ print >> sys.stderr, 'The name "{0}" is reserved for internal use.'.format(vmname)
+ exit(1)
+
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_writing()
qvm_collection.load()
@@ -170,7 +181,9 @@ def main():
vmtype = "QubesAppVm"
try:
- vm = qvm_collection.add_new_vm(vmtype, name=vmname, template=new_vm_template, label = label)
+ vm=qvm_collection.add_new_vm(vmtype, name=vmname,
+ template=new_vm_template, label=label,
+ pool_name=pool_name)
except QubesException as err:
print >> sys.stderr, "ERROR: {0}".format(err)
exit (1)
diff --git a/qvm-tools/qvm-grow-root b/qvm-tools/qvm-grow-root
index 93c55bb2..5e650ab7 100755
--- a/qvm-tools/qvm-grow-root
+++ b/qvm-tools/qvm-grow-root
@@ -34,6 +34,10 @@ def main():
usage = "usage: %prog "
parser = OptionParser (usage)
+ parser.add_option("--allow-start", action="store_true",
+ dest="allow_start", default=False,
+ help="Allow VM to be started to complete the operation")
+
(options, args) = parser.parse_args ()
if (len (args) != 2):
parser.error ("You must specify VM name and new size!")
@@ -57,7 +61,7 @@ def main():
exit(1)
try:
- vm.resize_root_img(size_bytes)
+ vm.resize_root_img(size_bytes, allow_start=options.allow_start)
except (IOError, OSError, QubesException) as err:
print >> sys.stderr, "ERROR: {0}".format(err)
exit (1)
diff --git a/qvm-tools/qvm-ls b/qvm-tools/qvm-ls
index 42e6f4c7..ce8e8609 100755
--- a/qvm-tools/qvm-ls
+++ b/qvm-tools/qvm-ls
@@ -24,7 +24,7 @@
from qubes.qubes import QubesVmCollection
from qubes.qubes import QubesHost
from qubes.qubes import QubesException
-from optparse import OptionParser
+from argparse import ArgumentParser
import sys
@@ -91,119 +91,140 @@ fields = {
def main():
- usage = "usage: %prog [options] "
- parser = OptionParser (usage)
+ usage = "%(prog)s [options]"
+ parser = ArgumentParser ()
- parser.add_option ("-n", "--network", dest="network",
+ parser.add_argument ("VMs", action="store", nargs="*",
+ help="Specify VMs to be queried")
+
+ parser.add_argument ("-n", "--network", dest="network",
action="store_true", default=False,
help="Show network addresses assigned to VMs")
- parser.add_option ("-c", "--cpu", dest="cpu",
+ parser.add_argument ("-c", "--cpu", dest="cpu",
action="store_true", default=False,
help="Show CPU load")
- parser.add_option ("-m", "--mem", dest="mem",
+ parser.add_argument ("-m", "--mem", dest="mem",
action="store_true", default=False,
help="Show memory usage")
- parser.add_option ("-d", "--disk", dest="disk",
+ parser.add_argument ("-d", "--disk", dest="disk",
action="store_true", default=False,
help="Show VM disk utilization statistics")
- parser.add_option ("-k", "--kernel", dest="kernel",
+ parser.add_argument ("-k", "--kernel", dest="kernel",
action="store_true", default=False,
- help="Show VM kernel options")
+ help="Show VM kernel arguments")
- parser.add_option ("-i", "--ids", dest="ids",
+ parser.add_argument ("-i", "--ids", dest="ids",
action="store_true", default=False,
help="Show Qubes and Xen id#s")
- parser.add_option("-b", "--last-backup", dest="backup",
+ parser.add_argument("-b", "--last-backup", dest="backup",
action="store_true", default=False,
help="Show date of last VM backup")
- parser.add_option("--raw-list", dest="raw_list",
+ parser.add_argument("--raw-list", dest="raw_list",
action="store_true", default=False,
help="List only VM names one per line")
+ parser.add_argument("--raw-data", dest="raw_data",
+ action="store", nargs="+",
+ help="Display specify data of specified VMs.\
+ Intended for bash-parsing.")
- (options, args) = parser.parse_args ()
+
+ arguments = parser.parse_args ()
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_reading()
qvm_collection.load()
qvm_collection.unlock_db()
- if options.raw_list:
+ if arguments.raw_list:
for vm in qvm_collection.values():
print vm.name
return
- fields_to_display = ["name", "on", "state", "updbl", "type", "template", "netvm", "label" ]
-
cpu_usages = None
- if (options.ids):
- fields_to_display += ["qid", "xid"]
+ if arguments.raw_data:
+ fields_to_display = arguments.raw_data
+ if 'cpu' in arguments.raw_data:
+ qhost = QubesHost()
+ (measure_time, cpu_usages) = qhost.measure_cpu_usage(qvm_collection)
+ else:
+ fields_to_display = ["name", "on", "state", "updbl", "type", "template", "netvm", "label" ]
- if (options.cpu):
- qhost = QubesHost()
- (measure_time, cpu_usages) = qhost.measure_cpu_usage()
- fields_to_display += ["cpu"]
+ if (arguments.ids):
+ fields_to_display += ["qid", "xid"]
- if (options.mem):
- fields_to_display += ["mem"]
+ if (arguments.cpu):
+ qhost = QubesHost()
+ (measure_time, cpu_usages) = qhost.measure_cpu_usage(qvm_collection)
+ fields_to_display += ["cpu"]
- if options.backup:
- fields_to_display += ["last backup"]
+ if (arguments.mem):
+ fields_to_display += ["mem"]
- if (options.network):
- if 'template' in fields_to_display:
- fields_to_display.remove ("template")
- fields_to_display += ["ip", "ip back", "gateway/DNS"]
+ if arguments.backup:
+ fields_to_display += ["last backup"]
- if (options.disk):
- if 'template' in fields_to_display:
- fields_to_display.remove ("template")
- if 'netvm' in fields_to_display:
- fields_to_display.remove ("netvm")
- fields_to_display += ["priv-curr", "priv-max", "root-curr", "root-max", "disk" ]
+ if (arguments.network):
+ if 'template' in fields_to_display:
+ fields_to_display.remove ("template")
+ fields_to_display += ["ip", "ip back", "gateway/DNS"]
- if (options.kernel):
- fields_to_display += ["kernel", "kernelopts" ]
+ if (arguments.disk):
+ if 'template' in fields_to_display:
+ fields_to_display.remove ("template")
+ if 'netvm' in fields_to_display:
+ fields_to_display.remove ("netvm")
+ fields_to_display += ["priv-curr", "priv-max", "root-curr", "root-max", "disk" ]
+
+ if (arguments.kernel):
+ fields_to_display += ["kernel", "kernelopts" ]
vms_list = [vm for vm in qvm_collection.values()]
- if len(args) > 0:
- vms_list = [vm for vm in vms_list if vm.name in args]
- no_vms = len (vms_list)
- vms_to_display = []
- # Frist, the NetVMs...
- for netvm in vms_list:
- if netvm.is_netvm():
- vms_to_display.append (netvm)
+ #assume VMs are presented in desired order:
+ if len(arguments.VMs) > 0:
+ vms_to_display = [vm for vm in vms_list if vm.name in arguments.VMs]
+ #otherwise, format them accordingly:
+ else:
+ no_vms = len (vms_list)
+ vms_to_display = []
+ # Frist, the NetVMs...
+ for netvm in vms_list:
+ if netvm.is_netvm():
+ vms_to_display.append (netvm)
- # Now, the AppVMs without template (or with template not included in the list)...
- for appvm in vms_list:
- if appvm.is_appvm() and not appvm.is_template() and \
- (appvm.template is None or appvm.template not in vms_list):
- vms_to_display.append (appvm)
+ # Now, the AppVMs without template (or with template not included in the list)...
+ for appvm in vms_list:
+ if appvm.is_appvm() and not appvm.is_template() and \
+ (appvm.template is None or appvm.template not in vms_list):
+ vms_to_display.append (appvm)
- # Now, the template, and all its AppVMs...
- for tvm in vms_list:
- if tvm.is_template():
- vms_to_display.append (tvm)
- for vm in vms_list:
- if (vm.is_appvm() or vm.is_disposablevm()) and \
- vm.template and vm.template.qid == tvm.qid:
- vms_to_display.append(vm)
+ # Now, the template, and all its AppVMs...
+ for tvm in vms_list:
+ if tvm.is_template():
+ vms_to_display.append (tvm)
+ for vm in vms_list:
+ if (vm.is_appvm() or vm.is_disposablevm()) and \
+ vm.template and vm.template.qid == tvm.qid:
+ vms_to_display.append(vm)
- assert len(vms_to_display) == no_vms
+ assert len(vms_to_display) == no_vms
+
+ #We DON'T NEED a max_width if we devide output by pipes!
# First calculate the maximum width of each field we want to display
# also collect data to display
for f in fields_to_display:
fields[f]["max_width"] = len(f)
+
+
data_to_display = []
for vm in vms_to_display:
data_row = {}
@@ -213,42 +234,44 @@ def main():
else:
data_row[f] = str(eval(fields[f]["func"]))
l = len(data_row[f])
- if l > fields[f]["max_width"]:
+ if 'max_width' in fields[f] and l > fields[f]["max_width"]:
fields[f]["max_width"] = l
data_to_display.append(data_row)
try:
vm.verify_files()
except QubesException as err:
print >> sys.stderr, "WARNING: VM '{0}' has corrupted files!".format(vm.name)
-
- # XXX: For what?
- total_width = 0;
- for f in fields_to_display:
- total_width += fields[f]["max_width"]
- # Display the header
- s = ""
- for f in fields_to_display:
- fmt="{{0:-^{0}}}-+".format(fields[f]["max_width"] + 1)
- s += fmt.format('-')
- print s
- s = ""
- for f in fields_to_display:
- fmt="{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
- s += fmt.format(f)
- print s
- s = ""
- for f in fields_to_display:
- fmt="{{0:-^{0}}}-+".format(fields[f]["max_width"] + 1)
- s += fmt.format('-')
- print s
-
- # ... and the actual data
- for row in data_to_display:
+ #Nicely formatted header only needed for humans
+ if not arguments.raw_data:
+ # Display the header
+ s = ""
+ for f in fields_to_display:
+ fmt="{{0:-^{0}}}-+".format(fields[f]["max_width"] + 1)
+ s += fmt.format('-')
+ print s
s = ""
for f in fields_to_display:
fmt="{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
- s += fmt.format(row[f])
+ s += fmt.format(f)
+ print s
+ s = ""
+ for f in fields_to_display:
+ fmt="{{0:-^{0}}}-+".format(fields[f]["max_width"] + 1)
+ s += fmt.format('-')
print s
+ # ... and the actual data
+ for row in data_to_display:
+ s = ""
+ for f in fields_to_display:
+ fmt="{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
+ s += fmt.format(row[f])
+ print s
+
+ #won't look pretty, but is easy to parse!
+ else:
+ for row in data_to_display:
+ print '|'.join([row[f] for f in fields_to_display])
+
main()
diff --git a/qvm-tools/qvm-pci b/qvm-tools/qvm-pci
index 80ca4a1f..72c5d186 100755
--- a/qvm-tools/qvm-pci
+++ b/qvm-tools/qvm-pci
@@ -27,6 +27,25 @@ import subprocess
import os
import sys
from qubes.qubes import vmm
+import re
+
+
+def find_devices_of_class(klass):
+ p = subprocess.Popen(["/sbin/lspci", "-mm", "-n"], stdout=subprocess.PIPE)
+ result = p.communicate()
+ retcode = p.returncode
+ if retcode != 0:
+ print "ERROR when executing lspci!"
+ raise IOError
+
+ rx_netdev = re.compile(r"^([0-9a-f]{2}:[0-9a-f]{2}.[0-9a-f]) \"" +
+ klass)
+ for dev in str(result[0]).splitlines():
+ match = rx_netdev.match(dev)
+ if match is not None:
+ dev_bdf = match.group(1)
+ assert dev_bdf is not None
+ yield dev_bdf
def main():
@@ -39,6 +58,9 @@ def main():
parser.add_option ("-l", "--list", action="store_true", dest="do_list", default=False)
parser.add_option ("-a", "--add", action="store_true", dest="do_add", default=False)
parser.add_option ("-d", "--delete", action="store_true", dest="do_delete", default=False)
+ parser.add_option("-C", "--add-class", action="store_true",
+ dest="do_add_class", default=False,
+ help="Add all devices of given class (net, usb)")
parser.add_option ("--offline-mode", dest="offline_mode",
action="store_true", default=False,
help="Offline mode")
@@ -49,14 +71,15 @@ def main():
vmname = args[0]
- if options.do_list + options.do_add + options.do_delete > 1:
- print >> sys.stderr, "Only one of -l -a -d is allowed!"
- exit (1)
+ if options.do_list + options.do_add + options.do_delete + \
+ options.do_add_class > 1:
+ print >> sys.stderr, "Only one of -l -a -d -C is allowed!"
+ exit(1)
if options.offline_mode:
vmm.offline_mode = True
- if options.do_add or options.do_delete:
+ if options.do_add or options.do_delete or options.do_add_class:
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_writing()
qvm_collection.load()
@@ -81,6 +104,26 @@ def main():
qvm_collection.save()
qvm_collection.unlock_db()
+ elif options.do_add_class:
+ if len(args) < 2:
+ print >> sys.stderr, "You must specify the PCI device class to add"
+ exit(1)
+
+ klass = args[1]
+
+ if klass == 'net':
+ devs = find_devices_of_class("02")
+ elif klass == 'usb':
+ devs = find_devices_of_class("0c03")
+ else:
+ print >> sys.stderr, "Supported classes: net, usb"
+ exit(1)
+
+ for dev in devs:
+ vm.pci_add(dev)
+ qvm_collection.save()
+ qvm_collection.unlock_db()
+
elif options.do_delete:
if len (args) < 2:
print >> sys.stderr, "You must specify the PCI device to delete"
diff --git a/qvm-tools/qvm-prefs b/qvm-tools/qvm-prefs
index dea3aa4f..a8180754 100755
--- a/qvm-tools/qvm-prefs
+++ b/qvm-tools/qvm-prefs
@@ -58,6 +58,7 @@ def do_list(vm):
print fmt.format ("config", vm.conf_file)
print fmt.format ("pcidevs", vm.pcidevs)
print fmt.format ("pci_strictreset", vm.pci_strictreset)
+ print fmt.format ("pci_e820_host", vm.pci_e820_host)
if vm.template is None:
print fmt.format ("root_img", vm.root_img)
if hasattr(vm, "rootcow_img") and vm.rootcow_img is not None:
@@ -228,6 +229,14 @@ def set_pci_strictreset(vms, vm, args):
vm.pci_strictreset = bool(eval(args[0].capitalize()))
return True
+def set_pci_e820_host(vms, vm, args):
+ if len (args) != 1:
+ print >> sys.stderr, "Missing value (True/False)!"
+ return False
+
+ vm.pci_e820_host = bool(eval(args[0].capitalize()))
+ return True
+
def set_netvm(vms, vm, args):
if len (args) != 1:
print >> sys.stderr, "Missing netvm name argument!"
@@ -485,6 +494,7 @@ properties = {
"include_in_backups": set_include_in_backups,
"pcidevs": set_pcidevs,
"pci_strictreset": set_pci_strictreset,
+ "pci_e820_host": set_pci_e820_host,
"label" : set_label,
"netvm" : set_netvm,
"dispvm_netvm" : set_dispvm_netvm,
@@ -536,7 +546,7 @@ def main():
default=False)
parser.add_option("-s", "--set", action="store_true", dest="do_set",
default=False)
- parser.add_option ("-g", "--gry", action="store_true", dest="do_get",
+ parser.add_option ("-g", "--get", action="store_true", dest="do_get",
default=False)
parser.add_option("--force-root", action="store_true", dest="force_root",
default=False,
diff --git a/qvm-tools/qvm-remove b/qvm-tools/qvm-remove
index ccb10212..b574d0de 100755
--- a/qvm-tools/qvm-remove
+++ b/qvm-tools/qvm-remove
@@ -79,6 +79,10 @@ def main():
exit (1)
try:
+ if options.remove_from_db_only:
+ # normally it is done by vm.remove_from_disk(), but it isn't
+ # called in this case
+ vm.libvirt_domain.undefine()
if vm.installed_by_rpm:
if options.verbose:
print >> sys.stderr, "--> VM installed by RPM, leaving all the files on disk"
diff --git a/qvm-tools/qvm-run b/qvm-tools/qvm-run
index f404a6c2..e255fc19 100755
--- a/qvm-tools/qvm-run
+++ b/qvm-tools/qvm-run
@@ -47,7 +47,9 @@ def vm_run_cmd(vm, cmd, options):
if options.verbose:
print >> sys.stderr, "Running command on VM: '{0}'...".format(vm.name)
if options.passio and options.color_output is not None:
- print "\033[0;%dm" % options.color_output,
+ sys.stdout.write("\033[0;{}m".format(options.color_output))
+ if options.passio and options.color_stderr is not None:
+ sys.stderr.write("\033[0;{}m".format(options.color_stderr))
try:
def tray_notify_generic(level, str):
@@ -65,6 +67,8 @@ def vm_run_cmd(vm, cmd, options):
except QubesException as err:
if options.passio and options.color_output is not None:
sys.stdout.write("\033[0m")
+ if options.passio and options.color_stderr is not None:
+ sys.stderr.write("\033[0m")
if options.tray:
tray_notify_error(str(err))
notify_error_qubes_manager(vm.name, str(err))
@@ -73,6 +77,8 @@ def vm_run_cmd(vm, cmd, options):
finally:
if options.passio and options.color_output is not None:
sys.stdout.write("\033[0m")
+ if options.passio and options.color_stderr is not None:
+ sys.stderr.write("\033[0m")
def main():
usage = "usage: %prog [options] [] []"
@@ -122,25 +128,44 @@ def main():
dest="color_output", default=None,
help="Disable marking VM output with red color")
+ parser.add_option("--no-color-stderr", action="store_false",
+ dest="color_stderr", default=None,
+ help="Disable marking VM stderr with red color")
+
parser.add_option("--color-output", action="store", type="int",
dest="color_output",
help="Force marking VM output with given ANSI style ("
"use 31 for red)")
+ parser.add_option("--color-stderr", action="store", type="int",
+ dest="color_stderr",
+ help="Force marking VM stderr with given ANSI style ("
+ "use 31 for red)")
+
(options, args) = parser.parse_args ()
- if options.passio and options.run_on_all_running:
+ if (options.passio and not options.localcmd) and options.run_on_all_running:
parser.error ("Options --all and --pass-io cannot be used together")
+ if options.localcmd and not options.passio:
+ print >> sys.stderr, "WARNING: option --localcmd have no effect " \
+ "without --pass-io"
+
if options.passio:
options.verbose = False
if options.color_output is None:
- if os.isatty(sys.stdout.fileno()):
+ if os.isatty(sys.stdout.fileno()) and not options.localcmd:
options.color_output = 31
elif options.color_output is False:
options.color_output = None
+ if options.color_stderr is None:
+ if os.isatty(sys.stderr.fileno()) and not options.localcmd:
+ options.color_stderr = 31
+ elif options.color_stderr is False:
+ options.color_stderr = None
+
if (options.pause or options.unpause):
takes_cmd_argument = False
else:
@@ -180,8 +205,6 @@ def main():
continue
if (options.unpause and vm.is_paused()) or (not options.unpause and vm.is_running()):
vms_list.append (vm)
- # disable options incompatible with --all
- options.passio = False
else:
vm = qvm_collection.get_vm_by_name(vmname)
if vm is None:
diff --git a/qvm-tools/qvm-shutdown b/qvm-tools/qvm-shutdown
index 37374cba..da3eeec2 100755
--- a/qvm-tools/qvm-shutdown
+++ b/qvm-tools/qvm-shutdown
@@ -36,6 +36,10 @@ def main():
help="Force operation, even if may damage other VMs (eg shutdown of NetVM)")
parser.add_option ("--wait", action="store_true", dest="wait_for_shutdown", default=False,
help="Wait for the VM(s) to shutdown")
+ parser.add_option("--wait-time", action="store", dest="wait_time",
+ default=defaults["shutdown_counter_max"],
+ help="Timout after which VM will be killed when --wait "
+ "is used")
parser.add_option ("--all", action="store_true", dest="shutdown_all", default=False,
help="Shutdown all running VMs")
parser.add_option ("--exclude", action="append", dest="exclude_list",
@@ -107,7 +111,7 @@ def main():
continue
else:
halting_vms.append(vm)
- if shutdown_counter > defaults["shutdown_counter_max"]:
+ if shutdown_counter > int(options.wait_time):
# kill the VM
if options.verbose:
print >> sys.stderr, "Killing the (apparently hanging) VM '{0}'...".format(vm.name)
diff --git a/qvm-tools/qvm-start b/qvm-tools/qvm-start
index 42deeb54..eeabe68e 100755
--- a/qvm-tools/qvm-start
+++ b/qvm-tools/qvm-start
@@ -57,6 +57,9 @@ def main():
help="Do actions necessary when preparing DVM image")
parser.add_option ("--custom-config", action="store", dest="custom_config", default=None,
help="Use custom Xen config instead of Qubes-generated one")
+ parser.add_option("--skip-if-running", action="store_true",
+ dest="skip_if_running", default=False,
+ help="Do not fail if the VM is already running")
parser.add_option ("--debug", action="store_true", dest="debug", default=False,
help="Enable debug mode for this VM (until its shutdown)")
@@ -83,7 +86,12 @@ def main():
exit(1)
if options.install_windows_tools:
- options.drive = 'cdrom:dom0:/usr/lib/qubes/qubes-windows-tools.iso'
+ windows_tools_path = '/usr/lib/qubes/qubes-windows-tools.iso'
+ if not os.path.exists(windows_tools_path):
+ print >> sys.stderr, "You need to install 'qubes-windows-tools' " \
+ "package in dom0 first"
+ exit(1)
+ options.drive = 'cdrom:dom0:{}'.format(windows_tools_path)
if options.drive_hd:
options.drive = 'hd:' + options.drive_hd
@@ -104,6 +112,9 @@ def main():
if options.debug:
vm.debug = True
+ if options.skip_if_running and vm.is_running():
+ return
+
try:
vm.verify_files()
xid = vm.start(verbose=options.verbose, preparing_dvm=options.preparing_dvm, start_guid=not options.noguid, notify_function=tray_notify_generic if options.tray else None)
diff --git a/qvm-tools/qvm-sync-clock b/qvm-tools/qvm-sync-clock
index 36e4cdfb..4cd6ba8d 100755
--- a/qvm-tools/qvm-sync-clock
+++ b/qvm-tools/qvm-sync-clock
@@ -22,6 +22,7 @@
#
import fcntl
+from optparse import OptionParser
from qubes.qubes import QubesVmCollection
import os.path
import os
@@ -41,9 +42,11 @@ def get_netvm_of_vm(vm):
return netvm
def main():
- verbose = False
- if len(sys.argv) > 1 and sys.argv[1] in [ '--verbose', '-v' ]:
- verbose = True
+ parser = OptionParser()
+ parser.add_option ("-v", "--verbose", action="store_true", dest="verbose", default=False)
+ parser.add_option ("-f", "--force", action="store_true", dest="force", default=False)
+
+ (options, args) = parser.parse_args ()
lockfile_name = "/var/run/qubes/qvm-sync-clock.lock"
if os.path.exists(lockfile_name):
@@ -74,43 +77,47 @@ def main():
sys.exit(1)
net_vm = get_netvm_of_vm(clock_vm)
- if verbose:
+ if options.verbose:
print >> sys.stderr, '--> Waiting for network for ClockVM.'
# Ignore retcode, try even if nm-online failed - user can setup network manually
# on-online has timeout 30sec by default
- net_vm.run('nm-online -x', verbose=verbose, gui=False, wait=True,
+ net_vm.run('nm-online -x', verbose=options.verbose, gui=False, wait=True,
ignore_stderr=True)
# Sync clock
if clock_vm.run('QUBESRPC qubes.SyncNtpClock dom0', user="root",
- verbose=verbose, gui=False, wait=True, ignore_stderr=True) \
+ verbose=options.verbose, gui=False, wait=True, ignore_stderr=True) \
!= 0:
- print >> sys.stderr, 'Time sync failed, aborting!'
- sys.exit(1)
+ if options.force:
+ print >> sys.stderr, 'Time sync failed! - Syncing with dom0 ' \
+ 'anyway as requested'
+ else:
+ print >> sys.stderr, 'Time sync failed! - Exiting'
+ sys.exit(1)
+ else:
+ # Use the date format based on RFC2822 to avoid localisation issues
+ p = clock_vm.run('date -u -Iseconds', verbose=options.verbose,
+ gui=False, passio_popen=True, ignore_stderr=True)
+ date_out = p.stdout.read(100)
+ date_out = date_out.strip()
+ if not re.match(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+00:?00$', date_out):
+ print >> sys.stderr, 'Invalid date output, aborting!'
+ sys.exit(1)
- # Use the date format based on RFC2822 to avoid localisation issues
- p = clock_vm.run('date -u -Iseconds', verbose=verbose,
- gui=False, passio_popen=True, ignore_stderr=True)
- date_out = p.stdout.read(100)
- date_out = date_out.strip()
- if not re.match(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+0000$', date_out):
- print >> sys.stderr, 'Invalid date output, aborting!'
- sys.exit(1)
+ # Sync dom0 time
+ if options.verbose:
+ print >> sys.stderr, '--> Syncing dom0 clock.'
- # Sync dom0 time
- if verbose:
- print >> sys.stderr, '--> Syncing dom0 clock.'
-
- subprocess.check_call(['sudo', 'date', '-u', '-Iseconds', '-s', date_out],
- stdout=None if verbose else open(os.devnull, 'w'))
- subprocess.check_call(['sudo', 'hwclock', '--systohc'],
- stdout=None if verbose else open(os.devnull, 'w'))
+ subprocess.check_call(['sudo', 'date', '-u', '-Iseconds', '-s', date_out],
+ stdout=None if options.verbose else open(os.devnull, 'w'))
+ subprocess.check_call(['sudo', 'hwclock', '--systohc'],
+ stdout=None if options.verbose else open(os.devnull, 'w'))
# Sync other VMs clock
for vm in qvm_collection.values():
if vm.is_running() and vm.qid != 0 and vm.qid != clock_vm.qid:
- if verbose:
+ if options.verbose:
print >> sys.stderr, '--> Syncing \'%s\' clock.' % vm.name
try:
vm.run_service("qubes.SetDateTime", user="root",
diff --git a/qvm-tools/qvm-top b/qvm-tools/qvm-top
new file mode 100755
index 00000000..16ebdb8d
--- /dev/null
+++ b/qvm-tools/qvm-top
@@ -0,0 +1,124 @@
+#!/usr/bin/python2
+# -*- encoding: utf8 -*-
+#
+# The Qubes OS Project, http://www.qubes-os.org
+#
+# Copyright (C) 2010 Joanna Rutkowska
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#
+
+from qubes.qubes import QubesVmCollection
+from qubes.qubes import QubesHost
+from qubes.qubes import QubesException
+from optparse import OptionParser
+import sys
+
+def main():
+ usage = "usage: %prog [options]"
+ parser = OptionParser (usage)
+
+ parser.add_option("--list", dest="list_top",
+ action="store_true", default=False,
+ help="n m : One line summary of top n vms with more than m cpu_time %")
+
+ (options, args) = parser.parse_args ()
+
+ qvm_collection = QubesVmCollection()
+ qvm_collection.lock_db_for_reading()
+ qvm_collection.load()
+ qvm_collection.unlock_db()
+
+ fields_to_display = ["name", "cpu", "mem"]
+
+ cpu_usages = None
+ qhost = QubesHost()
+ (measure_time, cpu_usages) = qhost.measure_cpu_usage(qvm_collection)
+
+ vms_list = [vm for vm in qvm_collection.values() if vm.is_running()]
+ vms_list = sorted(vms_list, key= lambda vm: 1-cpu_usages[vm.get_xid()]['cpu_usage'])
+
+ no_vms = len (vms_list)
+ vms_to_display = vms_list
+
+ if options.list_top:
+ any_shown = False
+ ndisp = 3
+ cputh = 0
+ if len(args) > 0:
+ ndisp = int(args[0])
+ if len(args) > 1:
+ cputh = int(args[1])
+
+ for vm in vms_to_display[:ndisp]:
+ cpu = cpu_usages[vm.get_xid()]['cpu_usage']
+ if cpu > cputh:
+ any_shown = True
+ sys.stdout.write("%d %s, " % (cpu, vm.name))
+
+ if any_shown:
+ sys.stdout.write(" ... | ")
+
+ totalMem = 0
+ dom0mem = 0
+ for vm in vms_to_display:
+ if not vm.name == "dom0":
+ totalMem += vm.get_mem()
+ else:
+ dom0mem = vm.get_mem()
+ totalMem /= 1024.0 * 1024.0
+ dom0mem /= 1024.0 * 1024.0
+ sys.stdout.write("%.1f G + %.1f G" % (totalMem, dom0mem))
+ return
+
+ max_width = { 'name': 0, 'cpu': 0, 'mem': 0 }
+ data_to_display = []
+ for vm in vms_to_display:
+ data_row = {}
+ data_row['name'] = vm.name
+ max_width['name'] = max(max_width['name'], len(data_row['name']))
+ data_row['cpu'] = "%.1f" % (cpu_usages[vm.get_xid()]['cpu_usage'])
+ max_width['cpu'] = max(max_width['cpu'], len(data_row['cpu']))
+ data_row['mem'] = "%d" % (vm.get_mem() / (1024.0))
+ max_width['mem'] = max(max_width['mem'], len(data_row['mem']))
+ data_to_display.append(data_row)
+
+ # Display the header
+ s = ""
+ for f in fields_to_display:
+ fmt="{{0:-^{0}}}-+".format(max_width[f] + 1)
+ s += fmt.format('-')
+ print s
+ s = ""
+ for f in fields_to_display:
+ fmt="{{0:>{0}}} |".format(max_width[f] + 1)
+ s += fmt.format(f)
+ print s
+ s = ""
+ for f in fields_to_display:
+ fmt="{{0:-^{0}}}-+".format(max_width[f] + 1)
+ s += fmt.format('-')
+ print s
+
+ # ... and the actual data
+ for row in data_to_display:
+ s = ""
+ for f in fields_to_display:
+ fmt="{{0:>{0}}} |".format(max_width[f] + 1)
+ s += fmt.format(row[f])
+ print s
+
+main()
diff --git a/qvm-tools/qvm-trim-template b/qvm-tools/qvm-trim-template
index f576822b..d5812210 100755
--- a/qvm-tools/qvm-trim-template
+++ b/qvm-tools/qvm-trim-template
@@ -92,10 +92,21 @@ def main():
touch_dvm_savefile = is_dvm_up_to_date(tvm, dvm_tmpl)
print >> sys.stderr, "Creating temporary VM..."
- fstrim_vm = qvm_collection.add_new_vm("QubesAppVm",
- template=tvm,
- name="{}-fstrim".format(tvm_name),
- netvm=None,
+ trim_vmname = "trim-{}".format(tvm_name[:31 - len('trim-')])
+ fstrim_vm = qvm_collection.get_vm_by_name(trim_vmname)
+ if fstrim_vm is not None:
+ if not fstrim_vm.internal:
+ print >>sys.stderr, \
+ "ERROR: VM '{}' already exists and is not marked as internal. " \
+ "Remove it manually."
+ fstrim_vm.remove_from_disk()
+ qvm_collection.pop(fstrim_vm.qid)
+ fstrim_vm = qvm_collection.add_new_vm(
+ "QubesAppVm",
+ template=tvm,
+ name=trim_vmname,
+ netvm=None,
+ internal=True,
)
if not fstrim_vm:
print >> sys.stderr, "ERROR: Failed to create new VM"
diff --git a/qvm-tools/qvm-usb b/qvm-tools/qvm-usb
index 31556379..f2b4407e 100755
--- a/qvm-tools/qvm-usb
+++ b/qvm-tools/qvm-usb
@@ -27,8 +27,6 @@ from optparse import OptionParser
import sys
import os
-pvusb_enable_flagfile = '/var/lib/qubes/pvusb-enable.flag'
-
def main():
usage = "usage: %prog -l [options]\n"\
"usage: %prog -a [options] :\n"\
@@ -49,24 +47,6 @@ def main():
(options, args) = parser.parse_args ()
- if not os.path.exists(pvusb_enable_flagfile):
- print >> sys.stderr, ""
- print >> sys.stderr, "******* WARNING *** WARNING *** WARNING *** WARNING *******"
- print >> sys.stderr, "*** ***"
- print >> sys.stderr, "*** PVUSB passthrough kernel support is still unstable. ***"
- print >> sys.stderr, "*** It can CRASH your VMs. ***"
- print >> sys.stderr, "*** ***"
- print >> sys.stderr, "***********************************************************"
- print >> sys.stderr, ""
- print >> sys.stderr, "To use it, you need install kernel from \"unstable\" repository"
- print >> sys.stderr, "If you still want to enable it, type capital YES"
- print >> sys.stderr, ""
- prompt = raw_input ("Do you want enable PV USB support? ")
- if prompt == "YES":
- open(pvusb_enable_flagfile, "w").close()
- else:
- exit(1)
-
if hasattr(os, "geteuid") and os.geteuid() == 0:
if not options.force_root:
print >> sys.stderr, "*** Running this tool as root is strongly discouraged, this will lead you in permissions problems."
@@ -78,11 +58,10 @@ def main():
print >> sys.stderr, "Only one of -l -a -d is allowed!"
exit (1)
- if options.do_attach or options.do_detach:
- qvm_collection = QubesVmCollection()
- qvm_collection.lock_db_for_reading()
- qvm_collection.load()
- qvm_collection.unlock_db()
+ qvm_collection = QubesVmCollection()
+ qvm_collection.lock_db_for_reading()
+ qvm_collection.load()
+ qvm_collection.unlock_db()
if options.do_attach:
if (len (args) != 2):
@@ -91,14 +70,17 @@ def main():
if vm is None:
parser.error ("Invalid VM name: %s" % args[0])
- # FIXME: here we assume that device is always in form "domain:dev", which can be changed in the future
+ # FIXME: here we assume that device is always in form "domain:dev",
+ # which can be changed in the future
if args[1].find(":") < 0:
- parser.error ("Invalid device syntax: %s" % args[1])
- dev_list = usb_list()
+ parser.error("Invalid device syntax: %s" % args[1])
+ backend_vm = qvm_collection.get_vm_by_name(args[1].split(":")[0])
+ if backend_vm is None:
+ parser.error("No such VM: {}".format(args[1].split(":")[0]))
+ dev_list = usb_list(qvm_collection, vm=backend_vm)
if not args[1] in dev_list.keys():
- parser.error ("Invalid device name: %s" % args[1])
+ parser.error("Invalid device name: %s" % args[1])
dev = dev_list[args[1]]
- backend_vm = qvm_collection.get_vm_by_name(dev['vm'])
assert backend_vm is not None
kwargs = {}
@@ -106,14 +88,14 @@ def main():
# kwargs['frontend'] = options.frontend
kwargs['auto_detach'] = options.auto_detach
try:
- usb_attach(vm, backend_vm, dev['device'], **kwargs)
+ usb_attach(qvm_collection, vm, dev, **kwargs)
except QubesException as e:
print >> sys.stderr, "ERROR: %s" % str(e)
sys.exit(1)
elif options.do_detach:
if (len (args) < 1):
parser.error ("You must provide device or vm name!")
- if len(args) > 1:
+ if len(args) > 1:
parser.error ("Too many parameters")
# Check if provided name is VM
vm = qvm_collection.get_vm_by_name(args[0])
@@ -123,36 +105,37 @@ def main():
# kwargs['frontend'] = options.frontend
# usb_detach(vm, **kwargs)
#else:
- usb_detach_all(vm)
+ usb_detach_all(qvm_collection, vm)
else:
# Maybe usbvm:device?
- # FIXME: nasty copy-paste from attach code half a page above
- # FIXME: here we assume that device is always in form "domain:dev", which can be changed in the future
+ # FIXME: nasty copy-paste from attach code half a page above
+ # FIXME: here we assume that device is always in form "domain:dev",
+ # which can be changed in the future
if args[0].find(":") < 0:
- parser.error ("Invalid device syntax: %s" % args[0])
- dev_list = usb_list()
+ parser.error("Invalid device syntax: %s" % args[0])
+ backend_vm = qvm_collection.get_vm_by_name(args[0].split(":")[0])
+ if backend_vm is None:
+ parser.error("No such VM: {}".format(args[0].split(":")[0]))
+ dev_list = usb_list(qvm_collection, vm=backend_vm)
if not args[0] in dev_list.keys():
- parser.error ("Invalid device name: %s" % args[0])
+ parser.error("Invalid device name: %s" % args[0])
dev = dev_list[args[0]]
- backend_vm = qvm_collection.get_vm_by_name(dev['vm'])
- assert backend_vm is not None
-
- attached_to = usb_check_attached('', backend_vm.xid, dev['device'])
+ attached_to = usb_check_attached(qvm_collection, dev)
if attached_to is None:
print >> sys.stderr, "WARNING: Device not connected to any VM"
exit(0)
- usb_detach(backend_vm, attached_to)
+ usb_detach(qvm_collection, attached_to, dev)
else:
- if len(args) > 0:
- parser.error ("Too many parameters")
+ if len(args) > 0:
+ parser.error("Too many parameters")
# do_list
- for dev in usb_list().values():
- attached_to = usb_check_attached('', dev['xid'], dev['device'])
+ for dev in usb_list(qvm_collection).values():
+ attached_to = dev['connected-to']
attached_to_str = ""
if attached_to:
- attached_to_str = " (attached to %s:%s)" % (attached_to['vm'], attached_to['frontend'])
- print "%s\t%s%s (USBv%s)" % (dev['name'], dev['desc'], attached_to_str, dev['usb_ver'])
+ attached_to_str = " (attached to %s)" % (attached_to.name)
+ print "%s\t%s%s" % (dev['name'], dev['desc'], attached_to_str)
exit (0)
main()
diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec
index 86612c48..bc3bebf0 100644
--- a/rpm_spec/core-dom0.spec
+++ b/rpm_spec/core-dom0.spec
@@ -27,6 +27,9 @@
%{!?version: %define version %(cat version)}
+# debug_package hack should be removed when BuildArch:noarch is enabled below
+%define debug_package %{nil}
+
%define _dracutmoddir /usr/lib/dracut/modules.d
%if %{fedora} < 17
%define _dracutmoddir /usr/share/dracut/modules.d
@@ -43,11 +46,14 @@ License: GPL
URL: http://www.qubes-os.org
BuildRequires: ImageMagick
BuildRequires: systemd-units
+# FIXME: Enable this and disable debug_package
+#BuildArch: noarch
Requires(post): systemd-units
Requires(preun): systemd-units
Requires(postun): systemd-units
Requires: python, pciutils, python-inotify, python-daemon
-Requires: qubes-core-dom0-linux >= 2.0.24
+Requires: qubes-core-dom0-linux >= 3.1.8
+Requires: qubes-core-dom0-doc
Requires: qubes-db-dom0
Requires: python-lxml
Requires: python-psutil
@@ -57,7 +63,7 @@ Requires: libvirt-python
%if x%{?backend_vmm} == xxen
Requires: xen-runtime
Requires: xen-hvm
-Requires: libvirt-daemon-xen >= 1.2.12-3
+Requires: libvirt-daemon-xen >= 1.2.20-6
%endif
Requires: createrepo
Requires: gnome-packagekit
@@ -154,7 +160,6 @@ if ! grep -q ^qubes: /etc/group ; then
fi
%triggerin -- xen-runtime
-sed -i 's/\/block /\/block.qubes /' /etc/udev/rules.d/xen-backend.rules
/usr/lib/qubes/fix-dir-perms.sh
%preun
@@ -170,12 +175,12 @@ if [ "$1" = 0 ] ; then
chgrp root /etc/xen
chmod 700 /etc/xen
groupdel qubes
- sed -i 's/\/block.qubes /\/block /' /etc/udev/rules.d/xen-backend.rules
fi
%files
%defattr(-,root,root,-)
%config(noreplace) %attr(0664,root,qubes) %{_sysconfdir}/qubes/qmemman.conf
+%config(noreplace) %attr(0664,root,qubes) %{_sysconfdir}/qubes/storage.conf
/usr/bin/qvm-*
/usr/bin/qubes-*
%dir %{python_sitearch}/qubes
@@ -245,11 +250,14 @@ fi
%attr(0664,root,qubes) %config(noreplace) /etc/qubes-rpc/policy/qubes.Filecopy
%attr(0664,root,qubes) %config(noreplace) /etc/qubes-rpc/policy/qubes.GetImageRGBA
%attr(0664,root,qubes) %config(noreplace) /etc/qubes-rpc/policy/qubes.OpenInVM
+%attr(0664,root,qubes) %config(noreplace) /etc/qubes-rpc/policy/qubes.OpenURL
%attr(0664,root,qubes) %config(noreplace) /etc/qubes-rpc/policy/qubes.NotifyTools
%attr(0664,root,qubes) %config(noreplace) /etc/qubes-rpc/policy/qubes.NotifyUpdates
%attr(0664,root,qubes) %config(noreplace) /etc/qubes-rpc/policy/qubes.VMShell
+%attr(0664,root,qubes) %config(noreplace) /etc/qubes-rpc/policy/qubes.GetRandomizedTime
/etc/qubes-rpc/qubes.NotifyTools
/etc/qubes-rpc/qubes.NotifyUpdates
+/etc/qubes-rpc/qubes.GetRandomizedTime
%attr(2770,root,qubes) %dir /var/log/qubes
%attr(0770,root,qubes) %dir /var/run/qubes
/etc/xdg/autostart/qubes-guid.desktop
diff --git a/tests/Makefile b/tests/Makefile
index 8523adde..243fe681 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -17,13 +17,31 @@ endif
cp backupcompatibility.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
cp basic.py $(DESTDIR)$(PYTHON_TESTSPATH)
cp basic.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp block.py $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp block.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp dispvm.py $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp dispvm.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
cp dom0_update.py $(DESTDIR)$(PYTHON_TESTSPATH)
cp dom0_update.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp extra.py $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp extra.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp hardware.py $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp hardware.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp hvm.py $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp hvm.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp mime.py $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp mime.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
cp network.py $(DESTDIR)$(PYTHON_TESTSPATH)
cp network.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
- cp vm_qrexec_gui.py $(DESTDIR)$(PYTHON_TESTSPATH)
- cp vm_qrexec_gui.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp pvgrub.py $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp pvgrub.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
cp regressions.py $(DESTDIR)$(PYTHON_TESTSPATH)
cp regressions.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
cp run.py $(DESTDIR)$(PYTHON_TESTSPATH)
cp run.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp storage.py $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp storage.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp storage_xen.py $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp storage_xen.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp vm_qrexec_gui.py $(DESTDIR)$(PYTHON_TESTSPATH)
+ cp vm_qrexec_gui.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
diff --git a/tests/__init__.py b/tests/__init__.py
index 8abea4b8..cc100bd1 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -23,21 +23,35 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
+"""
+.. warning::
+ The test suite hereby claims any domain whose name starts with
+ :py:data:`VMPREFIX` as fair game. This is needed to enforce sane
+ test executing environment. If you have domains named ``test-*``,
+ don't run the tests.
+"""
+from distutils import spawn
+import functools
+
import multiprocessing
import logging
import os
import shutil
import subprocess
+import tempfile
import unittest
+import unittest.case
import lxml.etree
import sys
+import pkg_resources
import qubes.backup
import qubes.qubes
import time
-VMPREFIX = 'test-'
+VMPREFIX = 'test-inst-'
+CLSVMPREFIX = 'test-cls-'
#: :py:obj:`True` if running in dom0, :py:obj:`False` otherwise
@@ -85,6 +99,32 @@ def skipUnlessGit(test_item):
return unittest.skipUnless(in_git, 'outside git tree')(test_item)
+def expectedFailureIfTemplate(templates):
+ """
+ Decorator for marking specific test as expected to fail only for some
+ templates. Template name is compared as substring, so 'whonix' will
+ handle both 'whonix-ws' and 'whonix-gw'.
+ templates can be either a single string, or an iterable
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(self, *args, **kwargs):
+ template = self.template
+ if isinstance(templates, basestring):
+ should_expect_fail = template in templates
+ else:
+ should_expect_fail = any([template in x for x in templates])
+ if should_expect_fail:
+ try:
+ func(self, *args, **kwargs)
+ except Exception:
+ raise unittest.case._ExpectedFailure(sys.exc_info())
+ raise unittest.case._UnexpectedSuccess()
+ else:
+ # Call directly:
+ func(self, *args, **kwargs)
+ return wrapper
+ return decorator
class _AssertNotRaisesContext(object):
"""A context manager used to implement TestCase.assertNotRaises methods.
@@ -133,14 +173,12 @@ class QubesTestCase(unittest.TestCase):
self.__class__.__name__,
self._testMethodName))
-
def __str__(self):
return '{}/{}/{}'.format(
- '.'.join(self.__class__.__module__.split('.')[2:]),
+ self.__class__.__module__,
self.__class__.__name__,
self._testMethodName)
-
def tearDown(self):
super(QubesTestCase, self).tearDown()
@@ -153,7 +191,6 @@ class QubesTestCase(unittest.TestCase):
and filter((lambda (tc, exc): tc is self), l):
raise BeforeCleanExit()
-
def assertNotRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail if an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
@@ -183,15 +220,14 @@ class QubesTestCase(unittest.TestCase):
with context:
callableObj(*args, **kwargs)
-
def assertXMLEqual(self, xml1, xml2):
- '''Check for equality of two XML objects.
+ """Check for equality of two XML objects.
:param xml1: first element
:param xml2: second element
:type xml1: :py:class:`lxml.etree._Element`
:type xml2: :py:class:`lxml.etree._Element`
- ''' # pylint: disable=invalid-name
+ """ # pylint: disable=invalid-name
self.assertEqual(xml1.tag, xml2.tag)
self.assertEqual(xml1.text, xml2.text)
@@ -202,13 +238,13 @@ class QubesTestCase(unittest.TestCase):
class SystemTestsMixin(object):
def setUp(self):
- '''Set up the test.
+ """Set up the test.
.. warning::
This method instantiates QubesVmCollection acquires write lock for
it. You can use is as :py:attr:`qc`. You can (and probably
should) release the lock at the end of setUp in subclass
- '''
+ """
super(SystemTestsMixin, self).setUp()
@@ -218,17 +254,24 @@ class SystemTestsMixin(object):
self.conn = libvirt.open(qubes.qubes.defaults['libvirt_uri'])
- self.remove_test_vms()
-
+ self._remove_test_vms(self.qc, self.conn)
def tearDown(self):
super(SystemTestsMixin, self).tearDown()
- try: self.qc.lock_db_for_writing()
- except qubes.qubes.QubesException: pass
+ # release the lock, because we have no way to check whether it was
+ # read or write lock
+ try:
+ self.qc.unlock_db()
+ except qubes.qubes.QubesException:
+ pass
+
+ self._kill_test_vms(self.qc)
+
+ self.qc.lock_db_for_writing()
self.qc.load()
- self.remove_test_vms()
+ self._remove_test_vms(self.qc, self.conn)
self.qc.save()
self.qc.unlock_db()
@@ -236,9 +279,36 @@ class SystemTestsMixin(object):
self.conn.close()
+ @classmethod
+ def tearDownClass(cls):
+ super(SystemTestsMixin, cls).tearDownClass()
- def make_vm_name(self, name):
- return VMPREFIX + name
+ qc = qubes.qubes.QubesVmCollection()
+ qc.lock_db_for_reading()
+ qc.load()
+ qc.unlock_db()
+
+ conn = libvirt.open(qubes.qubes.defaults['libvirt_uri'])
+
+ cls._kill_test_vms(qc, prefix=CLSVMPREFIX)
+
+ qc.lock_db_for_writing()
+ qc.load()
+
+ cls._remove_test_vms(qc, conn, prefix=CLSVMPREFIX)
+
+ qc.save()
+ qc.unlock_db()
+ del qc
+
+ conn.close()
+
+ @staticmethod
+ def make_vm_name(name, class_teardown=False):
+ if class_teardown:
+ return CLSVMPREFIX + name
+ else:
+ return VMPREFIX + name
def save_and_reload_db(self):
self.qc.save()
@@ -246,44 +316,64 @@ class SystemTestsMixin(object):
self.qc.lock_db_for_writing()
self.qc.load()
- def _remove_vm_qubes(self, vm):
+ @staticmethod
+ def _kill_test_vms(qc, prefix=VMPREFIX):
+ # do not keep write lock while killing VMs, because that may cause a
+ # deadlock with disk hotplug scripts (namely qvm-template-commit
+ # called when shutting down TemplateVm)
+ qc.lock_db_for_reading()
+ qc.load()
+ qc.unlock_db()
+ for vm in qc.values():
+ if vm.name.startswith(prefix):
+ if vm.is_running():
+ vm.force_shutdown()
+
+ @classmethod
+ def _remove_vm_qubes(cls, qc, conn, vm):
vmname = vm.name
try:
# XXX .is_running() may throw libvirtError if undefined
if vm.is_running():
vm.force_shutdown()
- except: pass
+ except:
+ pass
- try: vm.remove_from_disk()
- except: pass
+ try:
+ vm.remove_from_disk()
+ except:
+ pass
- try: vm.libvirt_domain.undefine()
- except libvirt.libvirtError: pass
+ try:
+ vm.libvirt_domain.undefine()
+ except libvirt.libvirtError:
+ pass
- self.qc.pop(vm.qid)
+ qc.pop(vm.qid)
del vm
# Now ensure it really went away. This may not have happened,
# for example if vm.libvirtDomain malfunctioned.
try:
- dom = self.conn.lookupByName(vmname)
- except: pass
+ dom = conn.lookupByName(vmname)
+ except:
+ pass
else:
- self._remove_vm_libvirt(dom)
+ cls._remove_vm_libvirt(dom)
- self._remove_vm_disk(vmname)
+ cls._remove_vm_disk(vmname)
-
- def _remove_vm_libvirt(self, dom):
+ @staticmethod
+ def _remove_vm_libvirt(dom):
try:
dom.destroy()
except libvirt.libvirtError: # not running
pass
dom.undefine()
-
- def _remove_vm_disk(self, vmname):
+ @staticmethod
+ def _remove_vm_disk(vmname):
for dirspec in (
'qubes_appvms_dir',
'qubes_servicevms_dir',
@@ -296,35 +386,33 @@ class SystemTestsMixin(object):
else:
os.unlink(dirpath)
-
def remove_vms(self, vms):
- for vm in vms: self._remove_vm_qubes(vm)
+ for vm in vms:
+ self._remove_vm_qubes(self.qc, self.conn, vm)
self.save_and_reload_db()
+ @classmethod
+ def _remove_test_vms(cls, qc, conn, prefix=VMPREFIX):
+ """Aggresively remove any domain that has name in testing namespace.
- def remove_test_vms(self):
- '''Aggresively remove any domain that has name in testing namespace.
-
- .. warning::
- The test suite hereby claims any domain whose name starts with
- :py:data:`VMPREFIX` as fair game. This is needed to enforce sane
- test executing environment. If you have domains named ``test-*``,
- don't run the tests.
- '''
+ """
# first, remove them Qubes-way
something_removed = False
- for vm in self.qc.values():
- if vm.name.startswith(VMPREFIX):
- self._remove_vm_qubes(vm)
+ for vm in qc.values():
+ if vm.name.startswith(prefix):
+ cls._remove_vm_qubes(qc, conn, vm)
something_removed = True
if something_removed:
- self.save_and_reload_db()
+ qc.save()
+ qc.unlock_db()
+ qc.lock_db_for_writing()
+ qc.load()
# now remove what was only in libvirt
- for dom in self.conn.listAllDomains():
- if dom.name().startswith(VMPREFIX):
- self._remove_vm_libvirt(dom)
+ for dom in conn.listAllDomains():
+ if dom.name().startswith(prefix):
+ cls._remove_vm_libvirt(dom)
# finally remove anything that is left on disk
vmnames = set()
@@ -335,10 +423,34 @@ class SystemTestsMixin(object):
dirpath = os.path.join(qubes.qubes.system_path['qubes_base_dir'],
qubes.qubes.system_path[dirspec])
for name in os.listdir(dirpath):
- if name.startswith(VMPREFIX):
+ if name.startswith(prefix):
vmnames.add(name)
for vmname in vmnames:
- self._remove_vm_disk(vmname)
+ cls._remove_vm_disk(vmname)
+
+ def qrexec_policy(self, service, source, destination, allow=True):
+ """
+ Allow qrexec calls for duration of the test
+ :param service: service name
+ :param source: source VM name
+ :param destination: destination VM name
+ :return:
+ """
+
+ def add_remove_rule(add=True):
+ with open('/etc/qubes-rpc/policy/{}'.format(service), 'r+') as policy:
+ policy_rules = policy.readlines()
+ rule = "{} {} {}\n".format(source, destination,
+ 'allow' if allow else 'deny')
+ if add:
+ policy_rules.insert(0, rule)
+ else:
+ policy_rules.remove(rule)
+ policy.truncate(0)
+ policy.seek(0)
+ policy.write(''.join(policy_rules))
+ add_remove_rule(add=True)
+ self.addCleanup(add_remove_rule, add=False)
def wait_for_window(self, title, timeout=30, show=True):
"""
@@ -355,7 +467,7 @@ class SystemTestsMixin(object):
wait_count = 0
while subprocess.call(['xdotool', 'search', '--name', title],
stdout=open(os.path.devnull, 'w'),
- stderr=subprocess.STDOUT) == 0:
+ stderr=subprocess.STDOUT) == int(show):
wait_count += 1
if wait_count > timeout*10:
self.fail("Timeout while waiting for {} window to {}".format(
@@ -363,7 +475,31 @@ class SystemTestsMixin(object):
)
time.sleep(0.1)
+ def enter_keys_in_window(self, title, keys):
+ """
+ Search for window with given title, then enter listed keys there.
+ The function will wait for said window to appear.
+
+ :param title: title of window
+ :param keys: list of keys to enter, as for `xdotool key`
+ :return: None
+ """
+
+ # 'xdotool search --sync' sometimes crashes on some race when
+ # accessing window properties
+ self.wait_for_window(title)
+ command = ['xdotool', 'search', '--name', title,
+ 'windowactivate', '--sync',
+ 'key'] + keys
+ subprocess.check_call(command)
+
def shutdown_and_wait(self, vm, timeout=60):
+ """
+
+ :param vm: VM object
+ :param timeout: timeout after which fail the test
+ :return:
+ """
vm.shutdown()
while timeout > 0:
if not vm.is_running():
@@ -372,6 +508,77 @@ class SystemTestsMixin(object):
timeout -= 1
self.fail("Timeout while waiting for VM {} shutdown".format(vm.name))
+ def prepare_hvm_system_linux(self, vm, init_script, extra_files=None):
+ if not os.path.exists('/usr/lib/grub/i386-pc'):
+ self.skipTest('grub2 not installed')
+ if not spawn.find_executable('grub2-install'):
+ self.skipTest('grub2-tools not installed')
+ if not spawn.find_executable('dracut'):
+ self.skipTest('dracut not installed')
+ # create a single partition
+ p = subprocess.Popen(['sfdisk', '-q', '-L', vm.storage.root_img],
+ stdin=subprocess.PIPE,
+ stdout=open(os.devnull, 'w'),
+ stderr=subprocess.STDOUT)
+ p.communicate('2048,\n')
+ assert p.returncode == 0, 'sfdisk failed'
+ # TODO: check if root_img is really file, not already block device
+ p = subprocess.Popen(['sudo', 'losetup', '-f', '-P', '--show',
+ vm.storage.root_img], stdout=subprocess.PIPE)
+ (loopdev, _) = p.communicate()
+ loopdev = loopdev.strip()
+ looppart = loopdev + 'p1'
+ assert p.returncode == 0, 'losetup failed'
+ subprocess.check_call(['sudo', 'mkfs.ext2', '-q', '-F', looppart])
+ mountpoint = tempfile.mkdtemp()
+ subprocess.check_call(['sudo', 'mount', looppart, mountpoint])
+ try:
+ subprocess.check_call(['sudo', 'grub2-install',
+ '--target', 'i386-pc',
+ '--modules', 'part_msdos ext2',
+ '--boot-directory', mountpoint, loopdev],
+ stderr=open(os.devnull, 'w')
+ )
+ grub_cfg = '{}/grub2/grub.cfg'.format(mountpoint)
+ subprocess.check_call(
+ ['sudo', 'chown', '-R', os.getlogin(), mountpoint])
+ with open(grub_cfg, 'w') as f:
+ f.write(
+ "set timeout=1\n"
+ "menuentry 'Default' {\n"
+ " linux /vmlinuz root=/dev/xvda1 "
+ "rd.driver.blacklist=bochs_drm "
+ "rd.driver.blacklist=uhci_hcd\n"
+ " initrd /initrd\n"
+ "}"
+ )
+ p = subprocess.Popen(['uname', '-r'], stdout=subprocess.PIPE)
+ (kernel_version, _) = p.communicate()
+ kernel_version = kernel_version.strip()
+ kernel = '/boot/vmlinuz-{}'.format(kernel_version)
+ shutil.copy(kernel, os.path.join(mountpoint, 'vmlinuz'))
+ init_path = os.path.join(mountpoint, 'init')
+ with open(init_path, 'w') as f:
+ f.write(init_script)
+ os.chmod(init_path, 0755)
+ dracut_args = [
+ '--kver', kernel_version,
+ '--include', init_path,
+ '/usr/lib/dracut/hooks/pre-pivot/initscript.sh',
+ '--no-hostonly', '--nolvmconf', '--nomdadmconf',
+ ]
+ if extra_files:
+ dracut_args += ['--install', ' '.join(extra_files)]
+ subprocess.check_call(
+ ['dracut'] + dracut_args + [os.path.join(mountpoint,
+ 'initrd')],
+ stderr=open(os.devnull, 'w')
+ )
+ finally:
+ subprocess.check_call(['sudo', 'umount', mountpoint])
+ shutil.rmtree(mountpoint)
+ subprocess.check_call(['sudo', 'losetup', '-d', loopdev])
+
class BackupTestsMixin(SystemTestsMixin):
def setUp(self):
super(BackupTestsMixin, self).setUp()
@@ -381,39 +588,28 @@ class BackupTestsMixin(SystemTestsMixin):
if self.verbose:
print >>sys.stderr, "-> Creating backupvm"
- # TODO: allow non-default template
- self.backupvm = self.qc.add_new_vm("QubesAppVm",
- name=self.make_vm_name('backupvm'),
- template=self.qc.get_default_template())
- self.backupvm.create_on_disk(verbose=self.verbose)
-
self.backupdir = os.path.join(os.environ["HOME"], "test-backup")
if os.path.exists(self.backupdir):
shutil.rmtree(self.backupdir)
os.mkdir(self.backupdir)
-
def tearDown(self):
super(BackupTestsMixin, self).tearDown()
shutil.rmtree(self.backupdir)
-
def print_progress(self, progress):
if self.verbose:
print >> sys.stderr, "\r-> Backing up files: {0}%...".format(progress)
-
def error_callback(self, message):
self.error_detected.put(message)
if self.verbose:
print >> sys.stderr, "ERROR: {0}".format(message)
-
def print_callback(self, msg):
if self.verbose:
print msg
-
def fill_image(self, path, size=None, sparse=False):
block_size = 4096
@@ -432,17 +628,28 @@ class BackupTestsMixin(SystemTestsMixin):
f.close()
-
# NOTE: this was create_basic_vms
def create_backup_vms(self):
template=self.qc.get_default_template()
vms = []
+ vmname = self.make_vm_name('test-net')
+ if self.verbose:
+ print >>sys.stderr, "-> Creating %s" % vmname
+ testnet = self.qc.add_new_vm('QubesNetVm',
+ name=vmname, template=template)
+ testnet.create_on_disk(verbose=self.verbose)
+ testnet.services['ntpd'] = True
+ vms.append(testnet)
+ self.fill_image(testnet.private_img, 20*1024*1024)
+
vmname = self.make_vm_name('test1')
if self.verbose:
print >>sys.stderr, "-> Creating %s" % vmname
testvm1 = self.qc.add_new_vm('QubesAppVm',
name=vmname, template=template)
+ testvm1.uses_default_netvm = False
+ testvm1.netvm = testnet
testvm1.create_on_disk(verbose=self.verbose)
vms.append(testvm1)
self.fill_image(testvm1.private_img, 100*1024*1024)
@@ -451,6 +658,8 @@ class BackupTestsMixin(SystemTestsMixin):
if self.verbose:
print >>sys.stderr, "-> Creating %s" % vmname
testvm2 = self.qc.add_new_vm('QubesHVm', name=vmname)
+ # fixup - uses_default_netvm=True anyway
+ testvm2.netvm = self.qc.get_default_netvm()
testvm2.create_on_disk(verbose=self.verbose)
self.fill_image(testvm2.root_img, 1024*1024*1024, True)
vms.append(testvm2)
@@ -459,9 +668,8 @@ class BackupTestsMixin(SystemTestsMixin):
return vms
-
def make_backup(self, vms, prepare_kwargs=dict(), do_kwargs=dict(),
- target=None):
+ target=None, expect_failure=False):
# XXX: bakup_prepare and backup_do don't support host_collection
self.qc.unlock_db()
if target is None:
@@ -472,19 +680,24 @@ class BackupTestsMixin(SystemTestsMixin):
print_callback=self.print_callback,
**prepare_kwargs)
except qubes.qubes.QubesException as e:
- self.fail("QubesException during backup_prepare: %s" % str(e))
+ if not expect_failure:
+ self.fail("QubesException during backup_prepare: %s" % str(e))
+ else:
+ raise
try:
qubes.backup.backup_do(target, files_to_backup, "qubes",
progress_callback=self.print_progress,
**do_kwargs)
except qubes.qubes.QubesException as e:
- self.fail("QubesException during backup_do: %s" % str(e))
+ if not expect_failure:
+ self.fail("QubesException during backup_do: %s" % str(e))
+ else:
+ raise
self.qc.lock_db_for_writing()
self.qc.load()
-
def restore_backup(self, source=None, appvm=None, options=None,
expect_errors=None):
if source is None:
@@ -528,7 +741,6 @@ class BackupTestsMixin(SystemTestsMixin):
if not appvm and not os.path.isdir(backupfile):
os.unlink(backupfile)
-
def create_sparse(self, path, size):
f = open(path, "w")
f.truncate(size)
@@ -543,13 +755,21 @@ def load_tests(loader, tests, pattern):
'qubes.tests.basic',
'qubes.tests.dom0_update',
'qubes.tests.network',
+ 'qubes.tests.dispvm',
'qubes.tests.vm_qrexec_gui',
+ 'qubes.tests.mime',
+ 'qubes.tests.hvm',
+ 'qubes.tests.pvgrub',
'qubes.tests.backup',
'qubes.tests.backupcompatibility',
'qubes.tests.regressions',
+ 'qubes.tests.storage',
+ 'qubes.tests.storage_xen',
+ 'qubes.tests.block',
+ 'qubes.tests.hardware',
+ 'qubes.tests.extra',
):
tests.addTests(loader.loadTestsFromName(modname))
-
return tests
diff --git a/tests/backup.py b/tests/backup.py
index 15f63014..554f8dc8 100644
--- a/tests/backup.py
+++ b/tests/backup.py
@@ -28,7 +28,7 @@ import os
import unittest
import sys
-
+from qubes.qubes import QubesException, QubesTemplateVm
import qubes.tests
class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
@@ -37,6 +37,32 @@ class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
self.make_backup(vms)
self.remove_vms(vms)
self.restore_backup()
+ for vm in vms:
+ restored_vm = self.qc.get_vm_by_name(vm.name)
+ for prop in ('name', 'kernel', 'uses_default_kernel',
+ 'uses_default_netvm', 'memory', 'maxmem', 'kernelopts',
+ 'uses_default_kernelopts', 'services', 'vcpus', 'pcidevs',
+ 'include_in_backups', 'default_user', 'qrexec_timeout',
+ 'autostart', 'pci_strictreset', 'pci_e820_host', 'debug',
+ 'internal'):
+ if prop not in vm.get_attrs_config():
+ continue
+ self.assertEquals(
+ getattr(vm, prop), getattr(restored_vm, prop),
+ "VM {} - property {} not properly restored".format(
+ vm.name, prop))
+ for prop in ('netvm', 'template', 'label'):
+ orig_value = getattr(vm, prop)
+ restored_value = getattr(restored_vm, prop)
+ if orig_value and restored_value:
+ self.assertEquals(orig_value.name, restored_value.name,
+ "VM {} - property {} not properly restored".format(
+ vm.name, prop))
+ else:
+ self.assertEquals(orig_value, restored_value,
+ "VM {} - property {} not properly restored".format(
+ vm.name, prop))
+
self.remove_vms(vms)
def test_001_compressed_backup(self):
@@ -63,7 +89,6 @@ class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
self.restore_backup()
self.remove_vms(vms)
-
def test_004_sparse_multipart(self):
vms = []
@@ -85,22 +110,17 @@ class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
self.restore_backup()
self.remove_vms(vms)
-
- # TODO: iterate over templates
- def test_100_send_to_vm(self):
+ def test_005_compressed_custom(self):
vms = self.create_backup_vms()
- self.backupvm.start()
- self.make_backup(vms,
- do_kwargs={
- 'appvm': self.backupvm,
- 'compressed': True,
- 'encrypted': True},
- target='dd of=/var/tmp/backup-test')
+ self.make_backup(vms, do_kwargs={'compressed': "bzip2"})
self.remove_vms(vms)
- self.restore_backup(source='dd if=/var/tmp/backup-test',
- appvm=self.backupvm)
+ self.restore_backup()
self.remove_vms(vms)
+ def test_100_backup_dom0_no_restore(self):
+ self.make_backup([self.qc[0]])
+ # TODO: think of some safe way to test restore...
+
def test_200_restore_over_existing_directory(self):
"""
Regression test for #1386
@@ -119,3 +139,115 @@ class TC_00_Backup(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
test_dir)
])
self.remove_vms(vms)
+
+ def test_210_auto_rename(self):
+ """
+ Test for #869
+ :return:
+ """
+ vms = self.create_backup_vms()
+ self.make_backup(vms)
+ self.restore_backup(options={
+ 'rename-conflicting': True
+ })
+ for vm in vms:
+ self.assertIsNotNone(self.qc.get_vm_by_name(vm.name+'1'))
+ restored_vm = self.qc.get_vm_by_name(vm.name+'1')
+ if vm.netvm and not vm.uses_default_netvm:
+ self.assertEqual(restored_vm.netvm.name, vm.netvm.name+'1')
+
+ self.remove_vms(vms)
+
+class TC_10_BackupVMMixin(qubes.tests.BackupTestsMixin):
+ def setUp(self):
+ super(TC_10_BackupVMMixin, self).setUp()
+ self.backupvm = self.qc.add_new_vm(
+ "QubesAppVm",
+ name=self.make_vm_name('backupvm'),
+ template=self.qc.get_vm_by_name(self.template)
+ )
+ self.backupvm.create_on_disk(verbose=self.verbose)
+
+ def test_100_send_to_vm_file_with_spaces(self):
+ vms = self.create_backup_vms()
+ self.backupvm.start()
+ self.backupvm.run("mkdir '/var/tmp/backup directory'", wait=True)
+ self.make_backup(vms,
+ do_kwargs={
+ 'appvm': self.backupvm,
+ 'compressed': True,
+ 'encrypted': True},
+ target='/var/tmp/backup directory')
+ self.remove_vms(vms)
+ p = self.backupvm.run("ls /var/tmp/backup*/qubes-backup*",
+ passio_popen=True)
+ (backup_path, _) = p.communicate()
+ backup_path = backup_path.strip()
+ self.restore_backup(source=backup_path,
+ appvm=self.backupvm)
+ self.remove_vms(vms)
+
+ def test_110_send_to_vm_command(self):
+ vms = self.create_backup_vms()
+ self.backupvm.start()
+ self.make_backup(vms,
+ do_kwargs={
+ 'appvm': self.backupvm,
+ 'compressed': True,
+ 'encrypted': True},
+ target='dd of=/var/tmp/backup-test')
+ self.remove_vms(vms)
+ self.restore_backup(source='dd if=/var/tmp/backup-test',
+ appvm=self.backupvm)
+ self.remove_vms(vms)
+
+ def test_110_send_to_vm_no_space(self):
+ """
+ Check whether backup properly report failure when no enough space is
+ available
+ :return:
+ """
+ vms = self.create_backup_vms()
+ self.backupvm.start()
+ retcode = self.backupvm.run(
+ # Debian 7 has too old losetup to handle loop-control device
+ "mknod /dev/loop0 b 7 0;"
+ "truncate -s 50M /home/user/backup.img && "
+ "mkfs.ext4 -F /home/user/backup.img && "
+ "mkdir /home/user/backup && "
+ "mount /home/user/backup.img /home/user/backup -o loop &&"
+ "chmod 777 /home/user/backup",
+ user="root", wait=True)
+ if retcode != 0:
+ raise RuntimeError("Failed to prepare backup directory")
+ with self.assertRaises(QubesException):
+ self.make_backup(vms,
+ do_kwargs={
+ 'appvm': self.backupvm,
+ 'compressed': False,
+ 'encrypted': True},
+ target='/home/user/backup',
+ expect_failure=True)
+ self.qc.lock_db_for_writing()
+ self.qc.load()
+ self.remove_vms(vms)
+
+
+def load_tests(loader, tests, pattern):
+ try:
+ qc = qubes.qubes.QubesVmCollection()
+ qc.lock_db_for_reading()
+ qc.load()
+ qc.unlock_db()
+ templates = [vm.name for vm in qc.values() if
+ isinstance(vm, QubesTemplateVm)]
+ except OSError:
+ templates = []
+ for template in templates:
+ tests.addTests(loader.loadTestsFromTestCase(
+ type(
+ 'TC_10_BackupVM_' + template,
+ (TC_10_BackupVMMixin, qubes.tests.QubesTestCase),
+ {'template': template})))
+
+ return tests
diff --git a/tests/backupcompatibility.py b/tests/backupcompatibility.py
index 775db92f..e9a2e84b 100644
--- a/tests/backupcompatibility.py
+++ b/tests/backupcompatibility.py
@@ -146,6 +146,19 @@ compression-filter=gzip
'''
class TC_00_BackupCompatibility(qubes.tests.BackupTestsMixin, qubes.tests.QubesTestCase):
+ def tearDown(self):
+ self.qc.unlock_db()
+ self.qc.lock_db_for_writing()
+ self.qc.load()
+
+ # Remove here as we use 'test-' prefix, instead of 'test-inst-'
+ self._remove_test_vms(self.qc, self.conn, prefix="test-")
+
+ self.qc.save()
+ self.qc.unlock_db()
+
+ super(TC_00_BackupCompatibility, self).tearDown()
+
def create_whitelisted_appmenus(self, filename):
f = open(filename, "w")
f.write("gnome-terminal.desktop\n")
@@ -167,14 +180,20 @@ class TC_00_BackupCompatibility(qubes.tests.BackupTestsMixin, qubes.tests.QubesT
def create_volatile_img(self, filename):
self.create_sparse(filename, 11.5*2**30)
- sfdisk_input="0,1024,S\n,10240,L\n"
- p = subprocess.Popen(["/usr/sbin/sfdisk", "--no-reread", "-u",
- "M",
- filename], stdout=open("/dev/null","w"),
- stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
- p.communicate(input=sfdisk_input)
- self.assertEqual(p.returncode, 0, "sfdisk failed with code %d" % p
- .returncode)
+ # here used to be sfdisk call with "0,1024,S\n,10240,L\n" input,
+ # but since sfdisk folks like to change command arguments in
+ # incompatible way, have an partition table verbatim here
+ ptable = (
+ '\x00\x00\x00\x00\x00\x00\x00\x00\xab\x39\xd5\xd4\x00\x00\x20\x00'
+ '\x00\x21\xaa\x82\x82\x28\x08\x00\x00\x00\x00\x00\x00\x20\xaa\x00'
+ '\x82\x29\x15\x83\x9c\x79\x08\x00\x00\x20\x00\x00\x01\x40\x00\x00'
+ '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\x55'
+ )
+ with open(filename, 'r+') as f:
+ f.seek(0x1b0)
+ f.write(ptable)
+
# TODO: mkswap
def fullpath(self, name):
@@ -424,6 +443,7 @@ class TC_00_BackupCompatibility(qubes.tests.BackupTestsMixin, qubes.tests.QubesT
self.restore_backup(self.backupdir, options={
'use-default-template': True,
+ 'use-default-netvm': True,
})
self.assertIsNotNone(self.qc.get_vm_by_name("test-template-clone"))
self.assertIsNotNone(self.qc.get_vm_by_name("test-testproxy"))
diff --git a/tests/basic.py b/tests/basic.py
index d3a524c2..c07bd272 100644
--- a/tests/basic.py
+++ b/tests/basic.py
@@ -22,15 +22,18 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
+from distutils import spawn
import multiprocessing
import os
import shutil
import subprocess
+import tempfile
import unittest
import time
-from qubes.qubes import QubesVmCollection, QubesException, system_path
+from qubes.qubes import QubesVmCollection, QubesException, system_path, vmm
+import libvirt
import qubes.qubes
import qubes.tests
@@ -51,6 +54,26 @@ class TC_00_Basic(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
with self.assertNotRaises(qubes.qubes.QubesException):
vm.verify_files()
+ def test_010_remove(self):
+ vmname = self.make_vm_name('appvm')
+ vm = self.qc.add_new_vm('QubesAppVm',
+ name=vmname, template=self.qc.get_default_template())
+ vm.create_on_disk(verbose=False)
+ # check for QubesOS/qubes-issues#1930
+ vm.autostart = True
+ self.save_and_reload_db()
+ vm = self.qc[vm.qid]
+ vm.remove_from_disk()
+ self.qc.pop(vm.qid)
+ self.save_and_reload_db()
+ self.assertNotIn(vm.qid, self.qc)
+ self.assertFalse(os.path.exists(vm.dir_path))
+ self.assertFalse(os.path.exists(
+ '/etc/systemd/system/multi-user.target.wants/'
+ 'qubes-vm@{}.service'.format(vm.name)))
+ with self.assertRaises(libvirt.libvirtError):
+ vmm.libvirt_conn.lookupByName(vm.name)
+
class TC_01_Properties(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
def setUp(self):
@@ -113,6 +136,14 @@ class TC_01_Properties(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
'/etc/systemd/system/multi-user.target.wants/'
'qubes-vm@{}.service'.format(self.vmname)))
+ def test_001_rename_libvirt_undefined(self):
+ self.vm.libvirt_domain.undefine()
+ self.vm._libvirt_domain = None
+
+ newname = self.make_vm_name('newname')
+ with self.assertNotRaises(libvirt.libvirtError):
+ self.vm.set_name(newname)
+
def test_010_netvm(self):
if self.qc.get_default_netvm() is None:
self.skip("Set default NetVM before running this test")
@@ -258,6 +289,45 @@ class TC_01_Properties(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
self.assertEquals(testvm1.get_firewall_conf(),
testvm3.get_firewall_conf())
+ def test_020_name_conflict_app(self):
+ with self.assertRaises(QubesException):
+ self.vm2 = self.qc.add_new_vm('QubesAppVm',
+ name=self.vmname, template=self.qc.get_default_template())
+ self.vm2.create_on_disk(verbose=False)
+
+ def test_021_name_conflict_hvm(self):
+ with self.assertRaises(QubesException):
+ self.vm2 = self.qc.add_new_vm('QubesHVm',
+ name=self.vmname, template=self.qc.get_default_template())
+ self.vm2.create_on_disk(verbose=False)
+
+ def test_022_name_conflict_net(self):
+ with self.assertRaises(QubesException):
+ self.vm2 = self.qc.add_new_vm('QubesNetVm',
+ name=self.vmname, template=self.qc.get_default_template())
+ self.vm2.create_on_disk(verbose=False)
+
+ def test_030_rename_conflict_app(self):
+ vm2name = self.make_vm_name('newname')
+
+ self.vm2 = self.qc.add_new_vm('QubesAppVm',
+ name=vm2name, template=self.qc.get_default_template())
+ self.vm2.create_on_disk(verbose=False)
+
+ with self.assertRaises(QubesException):
+ self.vm2.set_name(self.vmname)
+
+ def test_031_rename_conflict_net(self):
+ vm3name = self.make_vm_name('newname')
+
+ self.vm3 = self.qc.add_new_vm('QubesNetVm',
+ name=vm3name, template=self.qc.get_default_template())
+ self.vm3.create_on_disk(verbose=False)
+
+ with self.assertRaises(QubesException):
+ self.vm3.set_name(self.vmname)
+
+
class TC_02_QvmPrefs(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
def setup_appvm(self):
self.testvm = self.qc.add_new_vm(
@@ -571,5 +641,181 @@ class TC_02_QvmPrefs(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
self.execute_tests('kernel', [('default', '', False)])
self.execute_tests('kernelopts', [('default', '', False)])
+class TC_03_QvmRevertTemplateChanges(qubes.tests.SystemTestsMixin,
+ qubes.tests.QubesTestCase):
+
+ def setup_pv_template(self):
+ self.test_template = self.qc.add_new_vm(
+ "QubesTemplateVm",
+ name=self.make_vm_name("pv-clone"),
+ )
+ self.test_template.clone_attrs(src_vm=self.qc.get_default_template())
+ self.test_template.clone_disk_files(
+ src_vm=self.qc.get_default_template(),
+ verbose=False)
+ self.save_and_reload_db()
+ self.qc.unlock_db()
+
+ def setup_hvm_template(self):
+ self.test_template = self.qc.add_new_vm(
+ "QubesTemplateHVm",
+ name=self.make_vm_name("hvm"),
+ )
+ self.test_template.create_on_disk(verbose=False)
+ self.save_and_reload_db()
+ self.qc.unlock_db()
+
+ def get_rootimg_checksum(self):
+ p = subprocess.Popen(['sha1sum', self.test_template.root_img],
+ stdout=subprocess.PIPE)
+ return p.communicate()[0]
+
+ def _do_test(self):
+ checksum_before = self.get_rootimg_checksum()
+ self.test_template.start(verbose=False)
+ self.shutdown_and_wait(self.test_template)
+ checksum_changed = self.get_rootimg_checksum()
+ if checksum_before == checksum_changed:
+ self.log.warning("template not modified, test result will be "
+ "unreliable")
+ with self.assertNotRaises(subprocess.CalledProcessError):
+ subprocess.check_call(['sudo', 'qvm-revert-template-changes',
+ '--force', self.test_template.name])
+
+ checksum_after = self.get_rootimg_checksum()
+ self.assertEquals(checksum_before, checksum_after)
+
+ def test_000_revert_pv(self):
+ """
+ Test qvm-revert-template-changes for PV template
+ """
+ self.setup_pv_template()
+ self._do_test()
+
+ def test_000_revert_hvm(self):
+ """
+ Test qvm-revert-template-changes for HVM template
+ """
+ # TODO: have some system there, so the root.img will get modified
+ self.setup_hvm_template()
+ self._do_test()
+
+
+class TC_30_Gui_daemon(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
+ @unittest.skipUnless(spawn.find_executable('xdotool'),
+ "xdotool not installed")
+ def test_000_clipboard(self):
+ testvm1 = self.qc.add_new_vm("QubesAppVm",
+ name=self.make_vm_name('vm1'),
+ template=self.qc.get_default_template())
+ testvm1.create_on_disk(verbose=False)
+ testvm2 = self.qc.add_new_vm("QubesAppVm",
+ name=self.make_vm_name('vm2'),
+ template=self.qc.get_default_template())
+ testvm2.create_on_disk(verbose=False)
+ self.qc.save()
+ self.qc.unlock_db()
+
+ testvm1.start()
+ testvm2.start()
+
+ window_title = 'user@{}'.format(testvm1.name)
+ testvm1.run('zenity --text-info --editable --title={}'.format(
+ window_title))
+
+ self.wait_for_window(window_title)
+ time.sleep(0.5)
+ test_string = "test{}".format(testvm1.xid)
+
+ # Type and copy some text
+ subprocess.check_call(['xdotool', 'search', '--name', window_title,
+ 'windowactivate', '--sync',
+ 'type', '{}'.format(test_string)])
+ # second xdotool call because type --terminator do not work (SEGV)
+ # additionally do not use search here, so window stack will be empty
+ # and xdotool will use XTEST instead of generating events manually -
+ # this will be much better - at least because events will have
+ # correct timestamp (so gui-daemon would not drop the copy request)
+ subprocess.check_call(['xdotool',
+ 'key', 'ctrl+a', 'ctrl+c', 'ctrl+shift+c',
+ 'Escape'])
+
+ clipboard_content = \
+ open('/var/run/qubes/qubes-clipboard.bin', 'r').read().strip()
+ self.assertEquals(clipboard_content, test_string,
+ "Clipboard copy operation failed - content")
+ clipboard_source = \
+ open('/var/run/qubes/qubes-clipboard.bin.source',
+ 'r').read().strip()
+ self.assertEquals(clipboard_source, testvm1.name,
+ "Clipboard copy operation failed - owner")
+
+ # Then paste it to the other window
+ window_title = 'user@{}'.format(testvm2.name)
+ p = testvm2.run('zenity --entry --title={} > test.txt'.format(
+ window_title), passio_popen=True)
+ self.wait_for_window(window_title)
+
+ subprocess.check_call(['xdotool', 'key', '--delay', '100',
+ 'ctrl+shift+v', 'ctrl+v', 'Return'])
+ p.wait()
+
+ # And compare the result
+ (test_output, _) = testvm2.run('cat test.txt',
+ passio_popen=True).communicate()
+ self.assertEquals(test_string, test_output.strip())
+
+ clipboard_content = \
+ open('/var/run/qubes/qubes-clipboard.bin', 'r').read().strip()
+ self.assertEquals(clipboard_content, "",
+ "Clipboard not wiped after paste - content")
+ clipboard_source = \
+ open('/var/run/qubes/qubes-clipboard.bin.source', 'r').read(
+
+ ).strip()
+ self.assertEquals(clipboard_source, "",
+ "Clipboard not wiped after paste - owner")
+
+class TC_05_StandaloneVM(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
+ def test_000_create_start(self):
+ testvm1 = self.qc.add_new_vm("QubesAppVm",
+ template=None,
+ name=self.make_vm_name('vm1'))
+ testvm1.create_on_disk(verbose=False,
+ source_template=self.qc.get_default_template())
+ self.qc.save()
+ self.qc.unlock_db()
+ testvm1.start()
+ self.assertEquals(testvm1.get_power_state(), "Running")
+
+ def test_100_resize_root_img(self):
+ testvm1 = self.qc.add_new_vm("QubesAppVm",
+ template=None,
+ name=self.make_vm_name('vm1'))
+ testvm1.create_on_disk(verbose=False,
+ source_template=self.qc.get_default_template())
+ self.qc.save()
+ self.qc.unlock_db()
+ with self.assertRaises(QubesException):
+ testvm1.resize_root_img(20*1024**3)
+ testvm1.resize_root_img(20*1024**3, allow_start=True)
+ timeout = 60
+ while testvm1.is_running():
+ time.sleep(1)
+ timeout -= 1
+ if timeout == 0:
+ self.fail("Timeout while waiting for VM shutdown")
+ self.assertEquals(testvm1.get_root_img_sz(), 20*1024**3)
+ testvm1.start()
+ p = testvm1.run('df --output=size /|tail -n 1',
+ passio_popen=True)
+ # new_size in 1k-blocks
+ (new_size, _) = p.communicate()
+ # some safety margin for FS metadata
+ self.assertGreater(int(new_size.strip()), 19*1024**2)
+
+
+
+
# vim: ts=4 sw=4 et
diff --git a/tests/block.py b/tests/block.py
new file mode 100644
index 00000000..3d9dcbac
--- /dev/null
+++ b/tests/block.py
@@ -0,0 +1,310 @@
+# vim: fileencoding=utf-8
+#
+# The Qubes OS Project, https://www.qubes-os.org/
+#
+# Copyright (C) 2016
+# Marek Marczykowski-Górecki
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+import os
+
+import qubes.tests
+import qubes.qubesutils
+import subprocess
+
+# the same class for both dom0 and VMs
+class TC_00_List(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
+ template = None
+
+ def setUp(self):
+ super(TC_00_List, self).setUp()
+ self.img_path = '/tmp/test.img'
+ self.mount_point = '/tmp/test-dir'
+ if self.template is not None:
+ self.vm = self.qc.add_new_vm(
+ "QubesAppVm",
+ name=self.make_vm_name("vm"),
+ template=self.qc.get_vm_by_name(self.template))
+ self.vm.create_on_disk(verbose=False)
+ self.save_and_reload_db()
+ self.qc.unlock_db()
+ self.vm.start()
+ else:
+ self.qc.unlock_db()
+ self.vm = self.qc[0]
+
+ def tearDown(self):
+ super(TC_00_List, self).tearDown()
+ if self.template is None:
+ if os.path.exists(self.mount_point):
+ subprocess.call(['sudo', 'umount', self.mount_point])
+ subprocess.call(['sudo', 'rmdir', self.mount_point])
+ subprocess.call(['sudo', 'dmsetup', 'remove', 'test-dm'])
+ if os.path.exists(self.img_path):
+ loopdev = subprocess.check_output(['losetup', '-j',
+ self.img_path])
+ for dev in loopdev.splitlines():
+ subprocess.call(
+ ['sudo', 'losetup', '-d', dev.split(':')[0]])
+ subprocess.call(['sudo', 'rm', '-f', self.img_path])
+
+ def run_script(self, script, user="user"):
+ if self.template is None:
+ if user == "user":
+ subprocess.check_call(script, shell=True)
+ elif user == "root":
+ subprocess.check_call(['sudo', 'sh', '-c', script])
+ else:
+ retcode = self.vm.run(script, user=user, wait=True)
+ if retcode != 0:
+ raise subprocess.CalledProcessError
+
+ def test_000_list_loop(self):
+ if self.template is None:
+ self.skipTest('loop devices excluded in dom0')
+ self.run_script(
+ "set -e;"
+ "truncate -s 128M {path}; "
+ "losetup -f {path}; "
+ "udevadm settle".format(path=self.img_path), user="root")
+
+ dev_list = qubes.qubesutils.block_list_vm(self.vm)
+ found = False
+ for dev in dev_list.keys():
+ if dev_list[dev]['desc'] == self.img_path:
+ self.assertTrue(dev.startswith(self.vm.name + ':loop'))
+ self.assertEquals(dev_list[dev]['mode'], 'w')
+ self.assertEquals(dev_list[dev]['size'], 1024 * 1024 * 128)
+ self.assertEquals(
+ dev_list[dev]['device'], '/dev/' + dev.split(':')[1])
+ found = True
+
+ if not found:
+ self.fail("Device {} not found in {!r}".format(self.img_path, dev_list))
+
+ def test_001_list_loop_mounted(self):
+ if self.template is None:
+ self.skipTest('loop devices excluded in dom0')
+ self.run_script(
+ "set -e;"
+ "truncate -s 128M {path}; "
+ "mkfs.ext4 -q -F {path}; "
+ "mkdir -p {mntdir}; "
+ "mount {path} {mntdir} -o loop; "
+ "udevadm settle".format(
+ path=self.img_path,
+ mntdir=self.mount_point),
+ user="root")
+
+ dev_list = qubes.qubesutils.block_list_vm(self.vm)
+ for dev in dev_list.keys():
+ if dev_list[dev]['desc'] == self.img_path:
+ self.fail(
+ 'Device {} ({}) should not be listed because is mounted'
+ .format(dev, self.img_path))
+
+ def test_010_list_dm(self):
+ self.run_script(
+ "set -e;"
+ "truncate -s 128M {path}; "
+ "loopdev=`losetup -f`; "
+ "losetup $loopdev {path}; "
+ "dmsetup create test-dm --table \"0 262144 linear $(cat "
+ "/sys/block/$(basename $loopdev)/dev) 0\";"
+ "udevadm settle".format(path=self.img_path), user="root")
+
+ dev_list = qubes.qubesutils.block_list_vm(self.vm)
+ found = False
+ for dev in dev_list.keys():
+ if dev.startswith(self.vm.name + ':loop'):
+ self.assertNotEquals(dev_list[dev]['desc'], self.img_path,
+ "Device {} ({}) should not be listed as it is used in "
+ "device-mapper".format(dev, self.img_path)
+ )
+ elif dev_list[dev]['desc'] == 'test-dm':
+ self.assertEquals(dev_list[dev]['mode'], 'w')
+ self.assertEquals(dev_list[dev]['size'], 1024 * 1024 * 128)
+ self.assertEquals(
+ dev_list[dev]['device'], '/dev/' + dev.split(':')[1])
+ found = True
+
+ if not found:
+ self.fail("Device {} not found in {!r}".format('test-dm', dev_list))
+
+ def test_011_list_dm_mounted(self):
+ self.run_script(
+ "set -e;"
+ "truncate -s 128M {path}; "
+ "loopdev=`losetup -f`; "
+ "losetup $loopdev {path}; "
+ "dmsetup create test-dm --table \"0 262144 linear $(cat "
+ "/sys/block/$(basename $loopdev)/dev) 0\";"
+ "mkfs.ext4 -q -F /dev/mapper/test-dm;"
+ "mkdir -p {mntdir};"
+ "mount /dev/mapper/test-dm {mntdir};"
+ "udevadm settle".format(
+ path=self.img_path,
+ mntdir=self.mount_point),
+ user="root")
+
+ dev_list = qubes.qubesutils.block_list_vm(self.vm)
+ for dev in dev_list.keys():
+ if dev.startswith(self.vm.name + ':loop'):
+ self.assertNotEquals(dev_list[dev]['desc'], self.img_path,
+ "Device {} ({}) should not be listed as it is used in "
+ "device-mapper".format(dev, self.img_path)
+ )
+ else:
+ self.assertNotEquals(dev_list[dev]['desc'], 'test-dm',
+ "Device {} ({}) should not be listed as it is "
+ "mounted".format(dev, 'test-dm')
+ )
+
+ def test_012_list_dm_delayed(self):
+ self.run_script(
+ "set -e;"
+ "truncate -s 128M {path}; "
+ "loopdev=`losetup -f`; "
+ "losetup $loopdev {path}; "
+ "udevadm settle; "
+ "dmsetup create test-dm --table \"0 262144 linear $(cat "
+ "/sys/block/$(basename $loopdev)/dev) 0\";"
+ "udevadm settle".format(path=self.img_path), user="root")
+
+ dev_list = qubes.qubesutils.block_list_vm(self.vm)
+ found = False
+ for dev in dev_list.keys():
+ if dev.startswith(self.vm.name + ':loop'):
+ self.assertNotEquals(dev_list[dev]['desc'], self.img_path,
+ "Device {} ({}) should not be listed as it is used in "
+ "device-mapper".format(dev, self.img_path)
+ )
+ elif dev_list[dev]['desc'] == 'test-dm':
+ self.assertEquals(dev_list[dev]['mode'], 'w')
+ self.assertEquals(dev_list[dev]['size'], 1024 * 1024 * 128)
+ self.assertEquals(
+ dev_list[dev]['device'], '/dev/' + dev.split(':')[1])
+ found = True
+
+ if not found:
+ self.fail("Device {} not found in {!r}".format('test-dm', dev_list))
+
+ def test_013_list_dm_removed(self):
+ if self.template is None:
+ self.skipTest('test not supported in dom0 - loop devices excluded '
+ 'in dom0')
+ self.run_script(
+ "set -e;"
+ "truncate -s 128M {path}; "
+ "loopdev=`losetup -f`; "
+ "losetup $loopdev {path}; "
+ "dmsetup create test-dm --table \"0 262144 linear $(cat "
+ "/sys/block/$(basename $loopdev)/dev) 0\";"
+ "udevadm settle;"
+ "dmsetup remove test-dm;"
+ "udevadm settle".format(path=self.img_path), user="root")
+
+ dev_list = qubes.qubesutils.block_list_vm(self.vm)
+ found = False
+ for dev in dev_list.keys():
+ if dev_list[dev]['desc'] == self.img_path:
+ self.assertTrue(dev.startswith(self.vm.name + ':loop'))
+ self.assertEquals(dev_list[dev]['mode'], 'w')
+ self.assertEquals(dev_list[dev]['size'], 1024 * 1024 * 128)
+ self.assertEquals(
+ dev_list[dev]['device'], '/dev/' + dev.split(':')[1])
+ found = True
+
+ if not found:
+ self.fail("Device {} not found in {!r}".format(self.img_path, dev_list))
+
+ def test_020_list_loop_partition(self):
+ if self.template is None:
+ self.skipTest('loop devices excluded in dom0')
+ self.run_script(
+ "set -e;"
+ "truncate -s 128M {path}; "
+ "echo ,,L | sfdisk {path};"
+ "loopdev=`losetup -f`; "
+ "losetup -P $loopdev {path}; "
+ "blockdev --rereadpt $loopdev; "
+ "udevadm settle".format(path=self.img_path), user="root")
+
+ dev_list = qubes.qubesutils.block_list_vm(self.vm)
+ found = False
+ for dev in dev_list.keys():
+ if dev_list[dev]['desc'] == self.img_path:
+ self.assertTrue(dev.startswith(self.vm.name + ':loop'))
+ self.assertEquals(dev_list[dev]['mode'], 'w')
+ self.assertEquals(dev_list[dev]['size'], 1024 * 1024 * 128)
+ self.assertEquals(
+ dev_list[dev]['device'], '/dev/' + dev.split(':')[1])
+ self.assertIn(dev + 'p1', dev_list)
+ found = True
+
+ if not found:
+ self.fail("Device {} not found in {!r}".format(self.img_path, dev_list))
+
+ def test_021_list_loop_partition_mounted(self):
+ if self.template is None:
+ self.skipTest('loop devices excluded in dom0')
+ self.run_script(
+ "set -e;"
+ "truncate -s 128M {path}; "
+ "echo ,,L | sfdisk {path};"
+ "loopdev=`losetup -f`; "
+ "losetup -P $loopdev {path}; "
+ "blockdev --rereadpt $loopdev; "
+ "mkfs.ext4 -q -F ${{loopdev}}p1; "
+ "mkdir -p {mntdir}; "
+ "mount ${{loopdev}}p1 {mntdir}; "
+ "udevadm settle".format(
+ path=self.img_path, mntdir=self.mount_point),
+ user="root")
+
+ dev_list = qubes.qubesutils.block_list_vm(self.vm)
+ for dev in dev_list.keys():
+ if dev_list[dev]['desc'] == self.img_path:
+ self.fail(
+ 'Device {} ({}) should not be listed because its '
+ 'partition is mounted'
+ .format(dev, self.img_path))
+ elif dev.startswith(self.vm.name + ':loop') and dev.endswith('p1'):
+ # FIXME: risky assumption that only tests create partitioned
+ # loop devices
+ self.fail(
+ 'Device {} ({}) should not be listed because is mounted'
+ .format(dev, self.img_path))
+
+
+def load_tests(loader, tests, pattern):
+ try:
+ qc = qubes.qubes.QubesVmCollection()
+ qc.lock_db_for_reading()
+ qc.load()
+ qc.unlock_db()
+ templates = [vm.name for vm in qc.values() if
+ isinstance(vm, qubes.qubes.QubesTemplateVm)]
+ except OSError:
+ templates = []
+ for template in templates:
+ tests.addTests(loader.loadTestsFromTestCase(
+ type(
+ 'TC_00_List_' + template,
+ (TC_00_List, qubes.tests.QubesTestCase),
+ {'template': template})))
+
+ return tests
diff --git a/tests/dispvm.py b/tests/dispvm.py
new file mode 100644
index 00000000..164a202c
--- /dev/null
+++ b/tests/dispvm.py
@@ -0,0 +1,448 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# The Qubes OS Project, http://www.qubes-os.org
+#
+# Copyright (C) 2016 Marek Marczykowski-Górecki
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#
+
+from distutils import spawn
+import qubes.tests
+import subprocess
+import tempfile
+import unittest
+import os
+import time
+
+class TC_04_DispVM(qubes.tests.SystemTestsMixin,
+ qubes.tests.QubesTestCase):
+
+ @staticmethod
+ def get_dispvm_template_name():
+ vmdir = os.readlink('/var/lib/qubes/dvmdata/vmdir')
+ return os.path.basename(vmdir)
+
+ def test_000_firewall_propagation(self):
+ """
+ Check firewall propagation VM->DispVM, when VM have some firewall rules
+ """
+
+ # FIXME: currently qubes.xml doesn't contain this information...
+ dispvm_template_name = self.get_dispvm_template_name()
+ dispvm_template = self.qc.get_vm_by_name(dispvm_template_name)
+
+ testvm1 = self.qc.add_new_vm("QubesAppVm",
+ name=self.make_vm_name('vm1'),
+ template=self.qc.get_default_template())
+ testvm1.create_on_disk(verbose=False)
+ firewall = testvm1.get_firewall_conf()
+ firewall['allowDns'] = False
+ firewall['allowYumProxy'] = False
+ firewall['rules'] = [{'address': '1.2.3.4',
+ 'netmask': 24,
+ 'proto': 'tcp',
+ 'portBegin': 22,
+ 'portEnd': 22,
+ }]
+ testvm1.write_firewall_conf(firewall)
+ self.qc.save()
+ self.qc.unlock_db()
+
+ testvm1.start()
+
+ p = testvm1.run("qvm-run --dispvm 'qubesdb-read /name; echo ERROR;"
+ " read x'",
+ passio_popen=True)
+
+ dispvm_name = p.stdout.readline().strip()
+ self.qc.lock_db_for_reading()
+ self.qc.load()
+ self.qc.unlock_db()
+ dispvm = self.qc.get_vm_by_name(dispvm_name)
+ self.assertIsNotNone(dispvm, "DispVM {} not found in qubes.xml".format(
+ dispvm_name))
+ # check if firewall was propagated to the DispVM
+ self.assertEquals(testvm1.get_firewall_conf(),
+ dispvm.get_firewall_conf())
+ # and only there (#1608)
+ self.assertNotEquals(dispvm_template.get_firewall_conf(),
+ dispvm.get_firewall_conf())
+ # then modify some rule
+ firewall = dispvm.get_firewall_conf()
+ firewall['rules'] = [{'address': '4.3.2.1',
+ 'netmask': 24,
+ 'proto': 'tcp',
+ 'portBegin': 22,
+ 'portEnd': 22,
+ }]
+ dispvm.write_firewall_conf(firewall)
+ # and check again if wasn't saved anywhere else (#1608)
+ self.assertNotEquals(dispvm_template.get_firewall_conf(),
+ dispvm.get_firewall_conf())
+ self.assertNotEquals(testvm1.get_firewall_conf(),
+ dispvm.get_firewall_conf())
+ p.stdin.write('\n')
+ p.wait()
+
+ def test_001_firewall_propagation(self):
+ """
+ Check firewall propagation VM->DispVM, when VM have no firewall rules
+ """
+ testvm1 = self.qc.add_new_vm("QubesAppVm",
+ name=self.make_vm_name('vm1'),
+ template=self.qc.get_default_template())
+ testvm1.create_on_disk(verbose=False)
+ self.qc.save()
+ self.qc.unlock_db()
+
+ # FIXME: currently qubes.xml doesn't contain this information...
+ dispvm_template_name = self.get_dispvm_template_name()
+ dispvm_template = self.qc.get_vm_by_name(dispvm_template_name)
+ original_firewall = None
+ if os.path.exists(dispvm_template.firewall_conf):
+ original_firewall = tempfile.TemporaryFile()
+ with open(dispvm_template.firewall_conf) as f:
+ original_firewall.write(f.read())
+ try:
+
+ firewall = dispvm_template.get_firewall_conf()
+ firewall['allowDns'] = False
+ firewall['allowYumProxy'] = False
+ firewall['rules'] = [{'address': '1.2.3.4',
+ 'netmask': 24,
+ 'proto': 'tcp',
+ 'portBegin': 22,
+ 'portEnd': 22,
+ }]
+ dispvm_template.write_firewall_conf(firewall)
+
+ testvm1.start()
+
+ p = testvm1.run("qvm-run --dispvm 'qubesdb-read /name; echo ERROR;"
+ " read x'",
+ passio_popen=True)
+
+ dispvm_name = p.stdout.readline().strip()
+ self.qc.lock_db_for_reading()
+ self.qc.load()
+ self.qc.unlock_db()
+ dispvm = self.qc.get_vm_by_name(dispvm_name)
+ self.assertIsNotNone(dispvm, "DispVM {} not found in qubes.xml".format(
+ dispvm_name))
+ # check if firewall was propagated to the DispVM from the right VM
+ self.assertEquals(testvm1.get_firewall_conf(),
+ dispvm.get_firewall_conf())
+ # and only there (#1608)
+ self.assertNotEquals(dispvm_template.get_firewall_conf(),
+ dispvm.get_firewall_conf())
+ # then modify some rule
+ firewall = dispvm.get_firewall_conf()
+ firewall['rules'] = [{'address': '4.3.2.1',
+ 'netmask': 24,
+ 'proto': 'tcp',
+ 'portBegin': 22,
+ 'portEnd': 22,
+ }]
+ dispvm.write_firewall_conf(firewall)
+ # and check again if wasn't saved anywhere else (#1608)
+ self.assertNotEquals(dispvm_template.get_firewall_conf(),
+ dispvm.get_firewall_conf())
+ self.assertNotEquals(testvm1.get_firewall_conf(),
+ dispvm.get_firewall_conf())
+ p.stdin.write('\n')
+ p.wait()
+ finally:
+ if original_firewall:
+ original_firewall.seek(0)
+ with open(dispvm_template.firewall_conf, 'w') as f:
+ f.write(original_firewall.read())
+ original_firewall.close()
+ else:
+ os.unlink(dispvm_template.firewall_conf)
+
+ def test_002_cleanup(self):
+ self.qc.unlock_db()
+ p = subprocess.Popen(['/usr/lib/qubes/qfile-daemon-dvm',
+ 'qubes.VMShell', 'dom0', 'DEFAULT'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'))
+ (stdout, _) = p.communicate(input="echo test; qubesdb-read /name; "
+ "echo ERROR\n")
+ self.assertEquals(p.returncode, 0)
+ lines = stdout.splitlines()
+ self.assertEqual(lines[0], "test")
+ dispvm_name = lines[1]
+ self.qc.lock_db_for_reading()
+ self.qc.load()
+ self.qc.unlock_db()
+ dispvm = self.qc.get_vm_by_name(dispvm_name)
+ self.assertIsNone(dispvm, "DispVM {} still exists in qubes.xml".format(
+ dispvm_name))
+
+ def test_003_cleanup_destroyed(self):
+ """
+ Check if DispVM is properly removed even if it terminated itself (#1660)
+ :return:
+ """
+ self.qc.unlock_db()
+ p = subprocess.Popen(['/usr/lib/qubes/qfile-daemon-dvm',
+ 'qubes.VMShell', 'dom0', 'DEFAULT'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'))
+ p.stdin.write("qubesdb-read /name\n")
+ p.stdin.write("echo ERROR\n")
+ p.stdin.write("sudo poweroff\n")
+ # do not close p.stdin on purpose - wait to automatic disconnect when
+ # domain is destroyed
+ timeout = 30
+ while timeout > 0:
+ if p.poll():
+ break
+ time.sleep(1)
+ timeout -= 1
+ # includes check for None - timeout
+ self.assertEquals(p.returncode, 0)
+ lines = p.stdout.read().splitlines()
+ dispvm_name = lines[0]
+ self.assertNotEquals(dispvm_name, "ERROR")
+ self.qc.lock_db_for_reading()
+ self.qc.load()
+ self.qc.unlock_db()
+ dispvm = self.qc.get_vm_by_name(dispvm_name)
+ self.assertIsNone(dispvm, "DispVM {} still exists in qubes.xml".format(
+ dispvm_name))
+
+
+class TC_20_DispVMMixin(qubes.tests.SystemTestsMixin):
+ def test_000_prepare_dvm(self):
+ self.qc.unlock_db()
+ retcode = subprocess.call(['/usr/bin/qvm-create-default-dvm',
+ self.template],
+ stderr=open(os.devnull, 'w'))
+ self.assertEqual(retcode, 0)
+ self.qc.lock_db_for_writing()
+ self.qc.load()
+ self.assertIsNotNone(self.qc.get_vm_by_name(
+ self.template + "-dvm"))
+ # TODO: check mtime of snapshot file
+
+ def test_010_simple_dvm_run(self):
+ self.qc.unlock_db()
+ p = subprocess.Popen(['/usr/lib/qubes/qfile-daemon-dvm',
+ 'qubes.VMShell', 'dom0', 'DEFAULT'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'))
+ (stdout, _) = p.communicate(input="echo test")
+ self.assertEqual(stdout, "test\n")
+ # TODO: check if DispVM is destroyed
+
+ @unittest.skipUnless(spawn.find_executable('xdotool'),
+ "xdotool not installed")
+ def test_020_gui_app(self):
+ self.qc.unlock_db()
+ p = subprocess.Popen(['/usr/lib/qubes/qfile-daemon-dvm',
+ 'qubes.VMShell', 'dom0', 'DEFAULT'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'))
+
+ # wait for DispVM startup:
+ p.stdin.write("echo test\n")
+ p.stdin.flush()
+ l = p.stdout.readline()
+ self.assertEqual(l, "test\n")
+
+ # potential race condition, but our tests are supposed to be
+ # running on dedicated machine, so should not be a problem
+ self.qc.lock_db_for_reading()
+ self.qc.load()
+ self.qc.unlock_db()
+
+ max_qid = 0
+ for vm in self.qc.values():
+ if not vm.is_disposablevm():
+ continue
+ if vm.qid > max_qid:
+ max_qid = vm.qid
+ dispvm = self.qc[max_qid]
+ self.assertNotEqual(dispvm.qid, 0, "DispVM not found in qubes.xml")
+ self.assertTrue(dispvm.is_running())
+ try:
+ window_title = 'user@%s' % (dispvm.template.name + "-dvm")
+ p.stdin.write("xterm -e "
+ "\"sh -c 'echo \\\"\033]0;{}\007\\\";read x;'\"\n".
+ format(window_title))
+ self.wait_for_window(window_title)
+
+ time.sleep(0.5)
+ self.enter_keys_in_window(window_title, ['Return'])
+ # Wait for window to close
+ self.wait_for_window(window_title, show=False)
+ finally:
+ p.stdin.close()
+
+ wait_count = 0
+ while dispvm.is_running():
+ wait_count += 1
+ if wait_count > 100:
+ self.fail("Timeout while waiting for DispVM destruction")
+ time.sleep(0.1)
+ wait_count = 0
+ while p.poll() is None:
+ wait_count += 1
+ if wait_count > 100:
+ self.fail("Timeout while waiting for qfile-daemon-dvm "
+ "termination")
+ time.sleep(0.1)
+ self.assertEqual(p.returncode, 0)
+
+ self.qc.lock_db_for_reading()
+ self.qc.load()
+ self.qc.unlock_db()
+ self.assertIsNone(self.qc.get_vm_by_name(dispvm.name),
+ "DispVM not removed from qubes.xml")
+
+ def _handle_editor(self, winid):
+ (window_title, _) = subprocess.Popen(
+ ['xdotool', 'getwindowname', winid], stdout=subprocess.PIPE).\
+ communicate()
+ window_title = window_title.strip().\
+ replace('(', '\(').replace(')', '\)')
+ time.sleep(1)
+ if "gedit" in window_title:
+ subprocess.check_call(['xdotool', 'windowactivate', '--sync', winid,
+ 'type', 'Test test 2'])
+ subprocess.check_call(['xdotool', 'key', '--window', winid,
+ 'key', 'Return'])
+ time.sleep(0.5)
+ subprocess.check_call(['xdotool',
+ 'key', 'ctrl+s', 'ctrl+q'])
+ elif "LibreOffice" in window_title:
+ # wait for actual editor (we've got splash screen)
+ search = subprocess.Popen(['xdotool', 'search', '--sync',
+ '--onlyvisible', '--all', '--name', '--class', 'disp*|Writer'],
+ stdout=subprocess.PIPE,
+ stderr=open(os.path.devnull, 'w'))
+ retcode = search.wait()
+ if retcode == 0:
+ winid = search.stdout.read().strip()
+ time.sleep(0.5)
+ subprocess.check_call(['xdotool', 'windowactivate', '--sync', winid,
+ 'type', 'Test test 2'])
+ subprocess.check_call(['xdotool', 'key', '--window', winid,
+ 'key', 'Return'])
+ time.sleep(0.5)
+ subprocess.check_call(['xdotool',
+ 'key', '--delay', '100', 'ctrl+s',
+ 'Return', 'ctrl+q'])
+ elif "emacs" in window_title:
+ subprocess.check_call(['xdotool', 'windowactivate', '--sync', winid,
+ 'type', 'Test test 2'])
+ subprocess.check_call(['xdotool', 'key', '--window', winid,
+ 'key', 'Return'])
+ time.sleep(0.5)
+ subprocess.check_call(['xdotool',
+ 'key', 'ctrl+x', 'ctrl+s'])
+ subprocess.check_call(['xdotool',
+ 'key', 'ctrl+x', 'ctrl+c'])
+ elif "vim" in window_title or "user@" in window_title:
+ subprocess.check_call(['xdotool', 'windowactivate', '--sync', winid,
+ 'key', 'i', 'type', 'Test test 2'])
+ subprocess.check_call(['xdotool', 'key', '--window', winid,
+ 'key', 'Return'])
+ subprocess.check_call(
+ ['xdotool',
+ 'key', 'Escape', 'colon', 'w', 'q', 'Return'])
+ else:
+ self.fail("Unknown editor window: {}".format(window_title))
+
+ @unittest.skipUnless(spawn.find_executable('xdotool'),
+ "xdotool not installed")
+ def test_030_edit_file(self):
+ testvm1 = self.qc.add_new_vm("QubesAppVm",
+ name=self.make_vm_name('vm1'),
+ template=self.qc.get_vm_by_name(
+ self.template))
+ testvm1.create_on_disk(verbose=False)
+ self.qc.save()
+
+ testvm1.start()
+ testvm1.run("echo test1 > /home/user/test.txt", wait=True)
+
+ self.qc.unlock_db()
+ p = testvm1.run("qvm-open-in-dvm /home/user/test.txt",
+ passio_popen=True)
+
+ wait_count = 0
+ winid = None
+ while True:
+ search = subprocess.Popen(['xdotool', 'search',
+ '--onlyvisible', '--class', 'disp*'],
+ stdout=subprocess.PIPE,
+ stderr=open(os.path.devnull, 'w'))
+ retcode = search.wait()
+ if retcode == 0:
+ winid = search.stdout.read().strip()
+ # get window title
+ (window_title, _) = subprocess.Popen(
+ ['xdotool', 'getwindowname', winid], stdout=subprocess.PIPE). \
+ communicate()
+ window_title = window_title.strip()
+ # ignore LibreOffice splash screen and window with no title
+ # set yet
+ if window_title and not window_title.startswith("LibreOffice")\
+ and not window_title == 'VMapp command':
+ break
+ wait_count += 1
+ if wait_count > 100:
+ self.fail("Timeout while waiting for editor window")
+ time.sleep(0.3)
+
+ time.sleep(0.5)
+ self._handle_editor(winid)
+ p.wait()
+ p = testvm1.run("cat /home/user/test.txt",
+ passio_popen=True)
+ (test_txt_content, _) = p.communicate()
+ # Drop BOM if added by editor
+ if test_txt_content.startswith('\xef\xbb\xbf'):
+ test_txt_content = test_txt_content[3:]
+ self.assertEqual(test_txt_content, "Test test 2\ntest1\n")
+
+def load_tests(loader, tests, pattern):
+ try:
+ qc = qubes.qubes.QubesVmCollection()
+ qc.lock_db_for_reading()
+ qc.load()
+ qc.unlock_db()
+ templates = [vm.name for vm in qc.values() if
+ isinstance(vm, qubes.qubes.QubesTemplateVm)]
+ except OSError:
+ templates = []
+ for template in templates:
+ tests.addTests(loader.loadTestsFromTestCase(
+ type(
+ 'TC_20_DispVM_' + template,
+ (TC_20_DispVMMixin, qubes.tests.QubesTestCase),
+ {'template': template})))
+
+ return tests
\ No newline at end of file
diff --git a/tests/dom0_update.py b/tests/dom0_update.py
index 12fcc95d..164e436a 100644
--- a/tests/dom0_update.py
+++ b/tests/dom0_update.py
@@ -39,8 +39,8 @@ class TC_00_Dom0UpgradeMixin(qubes.tests.SystemTestsMixin):
Tests for downloading dom0 updates using VMs based on different templates
"""
pkg_name = 'qubes-test-pkg'
- dom0_update_common_opts = ['--disablerepo=*', '--enablerepo=test',
- '--setopt=test.copy_local=1']
+ dom0_update_common_opts = ['--disablerepo=*', '--enablerepo=test']
+ update_flag_path = '/var/lib/qubes/updates/dom0-updates-available'
@classmethod
def generate_key(cls, keydir):
@@ -84,9 +84,9 @@ Expire-Date: 0
p.stdin.write('''
[test]
name = Test
-baseurl = file:///tmp/repo
+baseurl = http://localhost:8080/
enabled = 1
- ''')
+''')
p.stdin.close()
p.wait()
@@ -100,6 +100,10 @@ enabled = 1
def setUp(self):
super(TC_00_Dom0UpgradeMixin, self).setUp()
+ if self.template.startswith('whonix-'):
+ # Whonix redirect all the traffic through tor, so repository
+ # on http://localhost:8080/ is unavailable
+ self.skipTest("Test not supported for this template")
self.updatevm = self.qc.add_new_vm(
"QubesProxyVm",
name=self.make_vm_name("updatevm"),
@@ -114,6 +118,7 @@ enabled = 1
subprocess.check_call(['sudo', 'rpm', '--import',
os.path.join(self.tmpdir, 'pubkey.asc')])
self.updatevm.start()
+ self.repo_running = False
def tearDown(self):
self.qc.lock_db_for_writing()
@@ -185,12 +190,27 @@ Test package
elif retcode != 0:
self.skipTest("createrepo failed with code {}, cannot perform the "
"test".format(retcode))
+ self.start_repo()
+
+ def start_repo(self):
+ if not self.repo_running:
+ self.updatevm.run("cd /tmp/repo &&"
+ "python -m SimpleHTTPServer 8080")
+ self.repo_running = True
def test_000_update(self):
+ """Dom0 update tests
+
+ Check if package update is:
+ - detected
+ - installed
+ - "updates pending" flag is cleared
+ """
filename = self.create_pkg(self.tmpdir, self.pkg_name, '1.0')
subprocess.check_call(['sudo', 'rpm', '-i', filename])
filename = self.create_pkg(self.tmpdir, self.pkg_name, '2.0')
self.send_pkg(filename)
+ open(self.update_flag_path, 'a').close()
logpath = os.path.join(self.tmpdir, 'dom0-update-output.txt')
try:
@@ -210,6 +230,68 @@ Test package
self.pkg_name)], stdout=open(os.devnull, 'w'))
self.assertEqual(retcode, 0, 'Package {}-2.0 not installed after '
'update'.format(self.pkg_name))
+ self.assertFalse(os.path.exists(self.update_flag_path),
+ "'updates pending' flag not cleared")
+
+ def test_005_update_flag_clear(self):
+ """Check if 'updates pending' flag is creared"""
+
+ # create any pkg (but not install it) to initialize repo in the VM
+ filename = self.create_pkg(self.tmpdir, self.pkg_name, '1.0')
+ self.send_pkg(filename)
+ open(self.update_flag_path, 'a').close()
+
+ logpath = os.path.join(self.tmpdir, 'dom0-update-output.txt')
+ try:
+ subprocess.check_call(['sudo', 'qubes-dom0-update', '-y'] +
+ self.dom0_update_common_opts,
+ stdout=open(logpath, 'w'),
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError:
+ self.fail("qubes-dom0-update failed: " + open(
+ logpath).read())
+
+ with open(logpath) as f:
+ dom0_update_output = f.read()
+ self.assertFalse('Errno' in dom0_update_output or
+ 'Couldn\'t' in dom0_update_output,
+ "qubes-dom0-update reported an error: {}".
+ format(dom0_update_output))
+
+ self.assertFalse(os.path.exists(self.update_flag_path),
+ "'updates pending' flag not cleared")
+
+ def test_006_update_flag_clear(self):
+ """Check if 'updates pending' flag is creared, using --clean"""
+
+ # create any pkg (but not install it) to initialize repo in the VM
+ filename = self.create_pkg(self.tmpdir, self.pkg_name, '1.0')
+ self.send_pkg(filename)
+ open(self.update_flag_path, 'a').close()
+
+ # remove also repodata to test #1685
+ if os.path.exists('/var/lib/qubes/updates/repodata'):
+ shutil.rmtree('/var/lib/qubes/updates/repodata')
+ logpath = os.path.join(self.tmpdir, 'dom0-update-output.txt')
+ try:
+ subprocess.check_call(['sudo', 'qubes-dom0-update', '-y',
+ '--clean'] +
+ self.dom0_update_common_opts,
+ stdout=open(logpath, 'w'),
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError:
+ self.fail("qubes-dom0-update failed: " + open(
+ logpath).read())
+
+ with open(logpath) as f:
+ dom0_update_output = f.read()
+ self.assertFalse('Errno' in dom0_update_output or
+ 'Couldn\'t' in dom0_update_output,
+ "qubes-dom0-update reported an error: {}".
+ format(dom0_update_output))
+
+ self.assertFalse(os.path.exists(self.update_flag_path),
+ "'updates pending' flag not cleared")
def test_010_instal(self):
filename = self.create_pkg(self.tmpdir, self.pkg_name, '1.0')
diff --git a/tests/extra.py b/tests/extra.py
new file mode 100644
index 00000000..d818c503
--- /dev/null
+++ b/tests/extra.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python2 -O
+# vim: fileencoding=utf-8
+
+#
+# The Qubes OS Project, https://www.qubes-os.org/
+#
+# Copyright (C) 2016
+# Marek Marczykowski-Górecki
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import pkg_resources
+import qubes.tests
+import qubes.qubes
+
+
+class ExtraTestCase(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
+
+ template = None
+
+ def setUp(self):
+ super(ExtraTestCase, self).setUp()
+ self.qc.unlock_db()
+ self.default_netvm = None
+
+ def create_vms(self, names):
+ """
+ Create AppVMs for the duration of the test. Will be automatically
+ removed after completing the test.
+ :param names: list of VM names to create (each of them will be
+ prefixed with some test specific string)
+ :return: list of created VM objects
+ """
+ self.qc.lock_db_for_writing()
+ self.qc.load()
+ if self.template:
+ template = self.qc.get_vm_by_name(self.template)
+ else:
+ template = self.qc.get_default_template()
+ for vmname in names:
+ vm = self.qc.add_new_vm("QubesAppVm",
+ name=self.make_vm_name(vmname),
+ template=template,
+ uses_default_netvm=False,
+ netvm=self.default_netvm)
+ vm.create_on_disk(verbose=False)
+ self.save_and_reload_db()
+ self.qc.unlock_db()
+
+ # get objects after reload
+ vms = []
+ for vmname in names:
+ vms.append(self.qc.get_vm_by_name(self.make_vm_name(vmname)))
+ return vms
+
+ def enable_network(self):
+ """
+ Enable access to the network. Must be called before creating VMs.
+ """
+ self.default_netvm = self.qc.get_default_netvm()
+ if self.template.startswith('whonix-ws'):
+ whonix_netvm = self.qc.get_vm_by_name('sys-whonix')
+ if whonix_netvm:
+ self.default_netvm = whonix_netvm
+
+def load_tests(loader, tests, pattern):
+ for entry in pkg_resources.iter_entry_points('qubes.tests.extra'):
+ for test_case in entry.load()():
+ tests.addTests(loader.loadTestsFromTestCase(test_case))
+
+ try:
+ qc = qubes.qubes.QubesVmCollection()
+ qc.lock_db_for_reading()
+ qc.load()
+ qc.unlock_db()
+ templates = [vm.name for vm in qc.values() if
+ isinstance(vm, qubes.qubes.QubesTemplateVm)]
+ except OSError:
+ templates = []
+
+ for entry in pkg_resources.iter_entry_points(
+ 'qubes.tests.extra.for_template'):
+ for test_case in entry.load()():
+ for template in templates:
+ tests.addTests(loader.loadTestsFromTestCase(
+ type(
+ '{}_{}_{}'.format(
+ entry.name, test_case.__name__, template),
+ (test_case,),
+ {'template': template}
+ )
+ ))
+
+ return tests
diff --git a/tests/hardware.py b/tests/hardware.py
new file mode 100644
index 00000000..8ae282e5
--- /dev/null
+++ b/tests/hardware.py
@@ -0,0 +1,75 @@
+#!/usr/bin/python2
+# -*- coding: utf-8 -*-
+#
+# The Qubes OS Project, http://www.qubes-os.org
+#
+# Copyright (C) 2016 Marek Marczykowski-Górecki
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#
+import os
+
+import qubes.tests
+import time
+import subprocess
+from unittest import expectedFailure
+
+
+class TC_00_HVM(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
+ def setUp(self):
+ super(TC_00_HVM, self).setUp()
+ self.vm = self.qc.add_new_vm("QubesHVm",
+ name=self.make_vm_name('vm1'))
+ self.vm.create_on_disk(verbose=False)
+
+ @expectedFailure
+ def test_000_pci_passthrough_presence(self):
+ pcidev = os.environ.get('QUBES_TEST_PCIDEV', None)
+ if pcidev is None:
+ self.skipTest('Specify PCI device with QUBES_TEST_PCIDEV '
+ 'environment variable')
+ self.vm.pcidevs = [pcidev]
+ self.vm.pci_strictreset = False
+ self.qc.save()
+ self.qc.unlock_db()
+
+ init_script = (
+ "#!/bin/sh\n"
+ "set -e\n"
+ "lspci -n > /dev/xvdb\n"
+ "poweroff\n"
+ )
+
+ self.prepare_hvm_system_linux(self.vm, init_script,
+ ['/usr/sbin/lspci'])
+ self.vm.start()
+ timeout = 60
+ while timeout > 0:
+ if not self.vm.is_running():
+ break
+ time.sleep(1)
+ timeout -= 1
+ if self.vm.is_running():
+ self.fail("Timeout while waiting for VM shutdown")
+
+ with open(self.vm.storage.private_img, 'r') as f:
+ lspci_vm = f.read(512).strip('\0')
+ p = subprocess.Popen(['lspci', '-ns', pcidev], stdout=subprocess.PIPE)
+ (lspci_host, _) = p.communicate()
+ # strip BDF, as it is different in VM
+ pcidev_desc = ' '.join(lspci_host.strip().split(' ')[1:])
+ self.assertIn(pcidev_desc, lspci_vm)
diff --git a/tests/hvm.py b/tests/hvm.py
new file mode 100644
index 00000000..9cc3e132
--- /dev/null
+++ b/tests/hvm.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# The Qubes OS Project, http://www.qubes-os.org
+#
+# Copyright (C) 2016 Marek Marczykowski-Górecki
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#
+
+import qubes.tests
+from qubes.qubes import QubesException
+
+class TC_10_HVM(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
+ # TODO: test with some OS inside
+ # TODO: windows tools tests
+
+ def test_000_create_start(self):
+ testvm1 = self.qc.add_new_vm("QubesHVm",
+ name=self.make_vm_name('vm1'))
+ testvm1.create_on_disk(verbose=False)
+ self.qc.save()
+ self.qc.unlock_db()
+ testvm1.start()
+ self.assertEquals(testvm1.get_power_state(), "Running")
+
+ def test_010_create_start_template(self):
+ templatevm = self.qc.add_new_vm("QubesTemplateHVm",
+ name=self.make_vm_name('template'))
+ templatevm.create_on_disk(verbose=False)
+ self.qc.save()
+ self.qc.unlock_db()
+
+ templatevm.start()
+ self.assertEquals(templatevm.get_power_state(), "Running")
+
+ def test_020_create_start_template_vm(self):
+ templatevm = self.qc.add_new_vm("QubesTemplateHVm",
+ name=self.make_vm_name('template'))
+ templatevm.create_on_disk(verbose=False)
+ testvm2 = self.qc.add_new_vm("QubesHVm",
+ name=self.make_vm_name('vm2'),
+ template=templatevm)
+ testvm2.create_on_disk(verbose=False)
+ self.qc.save()
+ self.qc.unlock_db()
+
+ testvm2.start()
+ self.assertEquals(testvm2.get_power_state(), "Running")
+
+ def test_030_prevent_simultaneus_start(self):
+ templatevm = self.qc.add_new_vm("QubesTemplateHVm",
+ name=self.make_vm_name('template'))
+ templatevm.create_on_disk(verbose=False)
+ testvm2 = self.qc.add_new_vm("QubesHVm",
+ name=self.make_vm_name('vm2'),
+ template=templatevm)
+ testvm2.create_on_disk(verbose=False)
+ self.qc.save()
+ self.qc.unlock_db()
+
+ templatevm.start()
+ self.assertEquals(templatevm.get_power_state(), "Running")
+ self.assertRaises(QubesException, testvm2.start)
+ templatevm.force_shutdown()
+ testvm2.start()
+ self.assertEquals(testvm2.get_power_state(), "Running")
+ self.assertRaises(QubesException, templatevm.start)
+
+ def test_100_resize_root_img(self):
+ testvm1 = self.qc.add_new_vm("QubesHVm",
+ name=self.make_vm_name('vm1'))
+ testvm1.create_on_disk(verbose=False)
+ self.qc.save()
+ self.qc.unlock_db()
+ testvm1.resize_root_img(30*1024**3)
+ self.assertEquals(testvm1.get_root_img_sz(), 30*1024**3)
+ testvm1.start()
+ self.assertEquals(testvm1.get_power_state(), "Running")
+ # TODO: launch some OS there and check the size
+
+ def test_200_start_invalid_drive(self):
+ """Regression test for #1619"""
+ testvm1 = self.qc.add_new_vm("QubesHVm",
+ name=self.make_vm_name('vm1'))
+ testvm1.create_on_disk(verbose=False)
+ testvm1.drive = 'hd:dom0:/invalid'
+ self.qc.save()
+ self.qc.unlock_db()
+ try:
+ testvm1.start()
+ except Exception as e:
+ self.assertIsInstance(e, QubesException)
+ else:
+ self.fail('No exception raised')
+
+ def test_201_start_invalid_drive_cdrom(self):
+ """Regression test for #1619"""
+ testvm1 = self.qc.add_new_vm("QubesHVm",
+ name=self.make_vm_name('vm1'))
+ testvm1.create_on_disk(verbose=False)
+ testvm1.drive = 'cdrom:dom0:/invalid'
+ self.qc.save()
+ self.qc.unlock_db()
+ try:
+ testvm1.start()
+ except Exception as e:
+ self.assertIsInstance(e, QubesException)
+ else:
+ self.fail('No exception raised')
+
diff --git a/tests/mime.py b/tests/mime.py
new file mode 100644
index 00000000..660a0338
--- /dev/null
+++ b/tests/mime.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# The Qubes OS Project, http://www.qubes-os.org
+#
+# Copyright (C) 2016 Marek Marczykowski-Górecki
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#
+from distutils import spawn
+import os
+import re
+import subprocess
+import time
+import unittest
+
+import qubes.tests
+import qubes.qubes
+from qubes.qubes import QubesVmCollection
+
+@unittest.skipUnless(
+ spawn.find_executable('xprop') and
+ spawn.find_executable('xdotool') and
+ spawn.find_executable('wmctrl'),
+ "xprop or xdotool or wmctrl not installed")
+class TC_50_MimeHandlers(qubes.tests.SystemTestsMixin):
+ @classmethod
+ def setUpClass(cls):
+ if cls.template == 'whonix-gw' or 'minimal' in cls.template:
+ raise unittest.SkipTest(
+ 'Template {} not supported by this test'.format(cls.template))
+
+ if cls.template == 'whonix-ws':
+ # TODO remove when Whonix-based DispVMs will work (Whonix 13?)
+ raise unittest.SkipTest(
+ 'Template {} not supported by this test'.format(cls.template))
+
+ qc = QubesVmCollection()
+
+ cls._kill_test_vms(qc, prefix=qubes.tests.CLSVMPREFIX)
+
+ qc.lock_db_for_writing()
+ qc.load()
+
+ cls._remove_test_vms(qc, qubes.qubes.vmm.libvirt_conn,
+ prefix=qubes.tests.CLSVMPREFIX)
+
+ cls.source_vmname = cls.make_vm_name('source', True)
+ source_vm = qc.add_new_vm("QubesAppVm",
+ template=qc.get_vm_by_name(cls.template),
+ name=cls.source_vmname)
+ source_vm.create_on_disk(verbose=False)
+
+ cls.target_vmname = cls.make_vm_name('target', True)
+ target_vm = qc.add_new_vm("QubesAppVm",
+ template=qc.get_vm_by_name(cls.template),
+ name=cls.target_vmname)
+ target_vm.create_on_disk(verbose=False)
+
+ qc.save()
+ qc.unlock_db()
+ source_vm.start()
+ target_vm.start()
+
+ # make sure that DispVMs will be started of the same template
+ retcode = subprocess.call(['/usr/bin/qvm-create-default-dvm',
+ cls.template],
+ stderr=open(os.devnull, 'w'))
+ assert retcode == 0, "Error preparing DispVM"
+
+ def setUp(self):
+ super(TC_50_MimeHandlers, self).setUp()
+ self.source_vm = self.qc.get_vm_by_name(self.source_vmname)
+ self.target_vm = self.qc.get_vm_by_name(self.target_vmname)
+
+ def get_window_class(self, winid, dispvm=False):
+ (vm_winid, _) = subprocess.Popen(
+ ['xprop', '-id', winid, '_QUBES_VMWINDOWID'],
+ stdout=subprocess.PIPE
+ ).communicate()
+ vm_winid = vm_winid.split("#")[1].strip('\n" ')
+ if dispvm:
+ (vmname, _) = subprocess.Popen(
+ ['xprop', '-id', winid, '_QUBES_VMNAME'],
+ stdout=subprocess.PIPE
+ ).communicate()
+ vmname = vmname.split("=")[1].strip('\n" ')
+ window_class = None
+ while window_class is None:
+ # XXX to use self.qc.get_vm_by_name would require reloading
+ # qubes.xml, so use qvm-run instead
+ xprop = subprocess.Popen(
+ ['qvm-run', '-p', vmname, 'xprop -id {} WM_CLASS'.format(
+ vm_winid)], stdout=subprocess.PIPE)
+ (window_class, _) = xprop.communicate()
+ if xprop.returncode != 0:
+ self.skipTest("xprop failed, not installed?")
+ if 'not found' in window_class:
+ # WM_CLASS not set yet, wait a little
+ time.sleep(0.1)
+ window_class = None
+ else:
+ window_class = None
+ while window_class is None:
+ xprop = self.target_vm.run(
+ 'xprop -id {} WM_CLASS'.format(vm_winid),
+ passio_popen=True)
+ (window_class, _) = xprop.communicate()
+ if xprop.returncode != 0:
+ self.skipTest("xprop failed, not installed?")
+ if 'not found' in window_class:
+ # WM_CLASS not set yet, wait a little
+ time.sleep(0.1)
+ window_class = None
+ # output: WM_CLASS(STRING) = "gnome-terminal-server", "Gnome-terminal"
+ try:
+ window_class = window_class.split("=")[1].split(",")[0].strip('\n" ')
+ except IndexError:
+ raise Exception(
+ "Unexpected output from xprop: '{}'".format(window_class))
+
+ return window_class
+
+ def open_file_and_check_viewer(self, filename, expected_app_titles,
+ expected_app_classes, dispvm=False):
+ self.qc.unlock_db()
+ if dispvm:
+ p = self.source_vm.run("qvm-open-in-dvm {}".format(filename),
+ passio_popen=True)
+ vmpattern = "disp*"
+ else:
+ self.qrexec_policy('qubes.OpenInVM', self.source_vm.name,
+ self.target_vmname)
+ self.qrexec_policy('qubes.OpenURL', self.source_vm.name,
+ self.target_vmname)
+ p = self.source_vm.run("qvm-open-in-vm {} {}".format(
+ self.target_vmname, filename), passio_popen=True)
+ vmpattern = self.target_vmname
+ wait_count = 0
+ winid = None
+ window_title = None
+ while True:
+ search = subprocess.Popen(['xdotool', 'search',
+ '--onlyvisible', '--class', vmpattern],
+ stdout=subprocess.PIPE,
+ stderr=open(os.path.devnull, 'w'))
+ retcode = search.wait()
+ if retcode == 0:
+ winid = search.stdout.read().strip()
+ # get window title
+ (window_title, _) = subprocess.Popen(
+ ['xdotool', 'getwindowname', winid], stdout=subprocess.PIPE). \
+ communicate()
+ window_title = window_title.strip()
+ # ignore LibreOffice splash screen and window with no title
+ # set yet
+ if window_title and not window_title.startswith("LibreOffice")\
+ and not window_title == 'VMapp command':
+ break
+ wait_count += 1
+ if wait_count > 100:
+ self.fail("Timeout while waiting for editor window")
+ time.sleep(0.3)
+
+ # get window class
+ window_class = self.get_window_class(winid, dispvm)
+ # close the window - we've got the window class, it is no longer needed
+ subprocess.check_call(['wmctrl', '-i', '-c', winid])
+ p.wait()
+ self.wait_for_window(window_title, show=False)
+
+ def check_matches(obj, patterns):
+ return any((pat.search(obj) if isinstance(pat, type(re.compile('')))
+ else pat in obj) for pat in patterns)
+
+ if not check_matches(window_title, expected_app_titles) and \
+ not check_matches(window_class, expected_app_classes):
+ self.fail("Opening file {} resulted in window '{} ({})', which is "
+ "none of {!r} ({!r})".format(
+ filename, window_title, window_class,
+ expected_app_titles, expected_app_classes))
+
+ def prepare_txt(self, filename):
+ p = self.source_vm.run("cat > {}".format(filename), passio_popen=True)
+ p.stdin.write("This is test\n")
+ p.stdin.close()
+ retcode = p.wait()
+ assert retcode == 0, "Failed to write {} file".format(filename)
+
+ def prepare_pdf(self, filename):
+ self.prepare_txt("/tmp/source.txt")
+ cmd = "convert /tmp/source.txt {}".format(filename)
+ retcode = self.source_vm.run(cmd, wait=True)
+ assert retcode == 0, "Failed to run '{}'".format(cmd)
+
+ def prepare_doc(self, filename):
+ self.prepare_txt("/tmp/source.txt")
+ cmd = "unoconv -f doc -o {} /tmp/source.txt".format(filename)
+ retcode = self.source_vm.run(cmd, wait=True)
+ if retcode != 0:
+ self.skipTest("Failed to run '{}', not installed?".format(cmd))
+
+ def prepare_pptx(self, filename):
+ self.prepare_txt("/tmp/source.txt")
+ cmd = "unoconv -f pptx -o {} /tmp/source.txt".format(filename)
+ retcode = self.source_vm.run(cmd, wait=True)
+ if retcode != 0:
+ self.skipTest("Failed to run '{}', not installed?".format(cmd))
+
+ def prepare_png(self, filename):
+ self.prepare_txt("/tmp/source.txt")
+ cmd = "convert /tmp/source.txt {}".format(filename)
+ retcode = self.source_vm.run(cmd, wait=True)
+ if retcode != 0:
+ self.skipTest("Failed to run '{}', not installed?".format(cmd))
+
+ def prepare_jpg(self, filename):
+ self.prepare_txt("/tmp/source.txt")
+ cmd = "convert /tmp/source.txt {}".format(filename)
+ retcode = self.source_vm.run(cmd, wait=True)
+ if retcode != 0:
+ self.skipTest("Failed to run '{}', not installed?".format(cmd))
+
+ def test_000_txt(self):
+ filename = "/home/user/test_file.txt"
+ self.prepare_txt(filename)
+ self.open_file_and_check_viewer(filename, ["vim", "user@"],
+ ["gedit", "emacs", "libreoffice"])
+
+ def test_001_pdf(self):
+ filename = "/home/user/test_file.pdf"
+ self.prepare_pdf(filename)
+ self.open_file_and_check_viewer(filename, [],
+ ["evince"])
+
+ def test_002_doc(self):
+ filename = "/home/user/test_file.doc"
+ self.prepare_doc(filename)
+ self.open_file_and_check_viewer(filename, [],
+ ["libreoffice", "abiword"])
+
+ def test_003_pptx(self):
+ filename = "/home/user/test_file.pptx"
+ self.prepare_pptx(filename)
+ self.open_file_and_check_viewer(filename, [],
+ ["libreoffice"])
+
+ def test_004_png(self):
+ filename = "/home/user/test_file.png"
+ self.prepare_png(filename)
+ self.open_file_and_check_viewer(filename, [],
+ ["shotwell", "eog", "display"])
+
+ def test_005_jpg(self):
+ filename = "/home/user/test_file.jpg"
+ self.prepare_jpg(filename)
+ self.open_file_and_check_viewer(filename, [],
+ ["shotwell", "eog", "display"])
+
+ def test_006_jpeg(self):
+ filename = "/home/user/test_file.jpeg"
+ self.prepare_jpg(filename)
+ self.open_file_and_check_viewer(filename, [],
+ ["shotwell", "eog", "display"])
+
+ def test_010_url(self):
+ self.open_file_and_check_viewer("https://www.qubes-os.org/", [],
+ ["Firefox", "Iceweasel", "Navigator"])
+
+ def test_100_txt_dispvm(self):
+ filename = "/home/user/test_file.txt"
+ self.prepare_txt(filename)
+ self.open_file_and_check_viewer(filename, ["vim", "user@"],
+ ["gedit", "emacs", "libreoffice"],
+ dispvm=True)
+
+ def test_101_pdf_dispvm(self):
+ filename = "/home/user/test_file.pdf"
+ self.prepare_pdf(filename)
+ self.open_file_and_check_viewer(filename, [],
+ ["evince"],
+ dispvm=True)
+
+ def test_102_doc_dispvm(self):
+ filename = "/home/user/test_file.doc"
+ self.prepare_doc(filename)
+ self.open_file_and_check_viewer(filename, [],
+ ["libreoffice", "abiword"],
+ dispvm=True)
+
+ def test_103_pptx_dispvm(self):
+ filename = "/home/user/test_file.pptx"
+ self.prepare_pptx(filename)
+ self.open_file_and_check_viewer(filename, [],
+ ["libreoffice"],
+ dispvm=True)
+
+ def test_104_png_dispvm(self):
+ filename = "/home/user/test_file.png"
+ self.prepare_png(filename)
+ self.open_file_and_check_viewer(filename, [],
+ ["shotwell", "eog", "display"],
+ dispvm=True)
+
+ def test_105_jpg_dispvm(self):
+ filename = "/home/user/test_file.jpg"
+ self.prepare_jpg(filename)
+ self.open_file_and_check_viewer(filename, [],
+ ["shotwell", "eog", "display"],
+ dispvm=True)
+
+ def test_106_jpeg_dispvm(self):
+ filename = "/home/user/test_file.jpeg"
+ self.prepare_jpg(filename)
+ self.open_file_and_check_viewer(filename, [],
+ ["shotwell", "eog", "display"],
+ dispvm=True)
+
+ def test_110_url_dispvm(self):
+ self.open_file_and_check_viewer("https://www.qubes-os.org/", [],
+ ["Firefox", "Iceweasel", "Navigator"],
+ dispvm=True)
+
+def load_tests(loader, tests, pattern):
+ try:
+ qc = qubes.qubes.QubesVmCollection()
+ qc.lock_db_for_reading()
+ qc.load()
+ qc.unlock_db()
+ templates = [vm.name for vm in qc.values() if
+ isinstance(vm, qubes.qubes.QubesTemplateVm)]
+ except OSError:
+ templates = []
+ for template in templates:
+ tests.addTests(loader.loadTestsFromTestCase(
+ type(
+ 'TC_50_MimeHandlers_' + template,
+ (TC_50_MimeHandlers, qubes.tests.QubesTestCase),
+ {'template': template})))
+ return tests
\ No newline at end of file
diff --git a/tests/network.py b/tests/network.py
index fe202b9c..a1c57a09 100644
--- a/tests/network.py
+++ b/tests/network.py
@@ -54,6 +54,9 @@ class VmNetworkingMixin(qubes.tests.SystemTestsMixin):
def setUp(self):
super(VmNetworkingMixin, self).setUp()
+ if self.template.startswith('whonix-'):
+ self.skipTest("Test not supported here - Whonix uses its own "
+ "firewall settings")
self.testnetvm = self.qc.add_new_vm("QubesNetVm",
name=self.make_vm_name('netvm1'),
template=self.qc.get_vm_by_name(self.template))
@@ -85,6 +88,8 @@ class VmNetworkingMixin(qubes.tests.SystemTestsMixin):
run_netvm_cmd("ip link set test0 up")
run_netvm_cmd("ip addr add {}/24 dev test0".format(self.test_ip))
run_netvm_cmd("iptables -I INPUT -d {} -j ACCEPT".format(self.test_ip))
+ # ignore failure
+ self.run_cmd(self.testnetvm, "killall --wait dnsmasq")
run_netvm_cmd("dnsmasq -a {ip} -A /{name}/{ip} -i test0 -z".format(
ip=self.test_ip, name=self.test_name))
run_netvm_cmd("echo nameserver {} > /etc/resolv.conf".format(
@@ -121,6 +126,7 @@ class VmNetworkingMixin(qubes.tests.SystemTestsMixin):
"Ping by IP from AppVM failed")
+ @qubes.tests.expectedFailureIfTemplate('debian-7')
@unittest.skipUnless(spawn.find_executable('xdotool'),
"xdotool not installed")
def test_020_simple_proxyvm_nm(self):
@@ -159,8 +165,8 @@ class VmNetworkingMixin(qubes.tests.SystemTestsMixin):
# check for nm-applet presence
self.assertEqual(subprocess.call([
- 'xdotool', 'search', '--all', '--name',
- '--class', '^(NetworkManager Applet|{})$'.format(self.proxy.name)],
+ 'xdotool', 'search', '--class', '{}:nm-applet'.format(
+ self.proxy.name)],
stdout=open('/dev/null', 'w')), 0, "nm-applet window not found")
self.assertEqual(self.run_cmd(self.testvm1, self.ping_ip), 0,
"Ping by IP failed (after NM reconnection")
@@ -320,12 +326,29 @@ class VmNetworkingMixin(qubes.tests.SystemTestsMixin):
self.testvm1.start()
self.assertEqual(self.run_cmd(self.testvm1, self.ping_ip), 0)
- self.testvm1.run("ip addr flush dev eth0", user="root")
- self.testvm1.run("ip addr add 10.137.1.128/24 dev eth0", user="root")
- self.testvm1.run("ip route add dev eth0", user="root")
+ self.testvm1.run("ip addr flush dev eth0", user="root", wait=True)
+ self.testvm1.run("ip addr add 10.137.1.128/24 dev eth0", user="root",
+ wait=True)
+ self.testvm1.run("ip route add default dev eth0", user="root",
+ wait=True)
self.assertNotEqual(self.run_cmd(self.testvm1, self.ping_ip), 0,
"Spoofed ping should be blocked")
+ def test_100_late_xldevd_startup(self):
+ """Regression test for #1990"""
+ self.qc.unlock_db()
+ # Simulater late xl devd startup
+ cmd = "systemctl stop xendriverdomain"
+ if self.run_cmd(self.testnetvm, cmd) != 0:
+ self.fail("Command '%s' failed" % cmd)
+ self.testvm1.start()
+
+ cmd = "systemctl start xendriverdomain"
+ if self.run_cmd(self.testnetvm, cmd) != 0:
+ self.fail("Command '%s' failed" % cmd)
+
+ self.assertEqual(self.run_cmd(self.testvm1, self.ping_ip), 0)
+
class VmUpdatesMixin(qubes.tests.SystemTestsMixin):
"""
Tests for VM updates
@@ -495,10 +518,10 @@ class VmUpdatesMixin(qubes.tests.SystemTestsMixin):
p = self.netvm_repo.run(
"mkdir -p /tmp/apt-repo/dists/test && "
"cd /tmp/apt-repo/dists/test && "
- "cat > Release < Release && "
+ "echo '' $(sha256sum {p} | cut -f 1 -d ' ') $(stat -c %s {p}) {p}"
" >> Release && "
- "echo '' $(sha1sum {z} | cut -f 1 -d ' ') $(stat -c %s {z}) {z}"
+ "echo '' $(sha256sum {z} | cut -f 1 -d ' ') $(stat -c %s {z}) {z}"
" >> Release"
.format(p="main/binary-amd64/Packages",
z="main/binary-amd64/Packages.gz"),
@@ -508,11 +531,10 @@ class VmUpdatesMixin(qubes.tests.SystemTestsMixin):
"Label: Test repo\n"
"Suite: test\n"
"Codename: test\n"
- "Date: Tue, 27 Oct 2015 03:22:09 +0100\n"
+ "Date: Tue, 27 Oct 2015 03:22:09 UTC\n"
"Architectures: amd64\n"
"Components: main\n"
- "SHA1:\n"
- "EOF\n"
+ "SHA256:\n"
)
p.stdin.close()
if p.wait() != 0:
diff --git a/tests/pvgrub.py b/tests/pvgrub.py
new file mode 100644
index 00000000..5467d2a3
--- /dev/null
+++ b/tests/pvgrub.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# The Qubes OS Project, http://www.qubes-os.org
+#
+# Copyright (C) 2016 Marek Marczykowski-Górecki
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#
+
+import os
+import unittest
+import qubes.tests
+@unittest.skipUnless(os.path.exists('/var/lib/qubes/vm-kernels/pvgrub2'),
+ 'grub-xen package not installed')
+class TC_40_PVGrub(qubes.tests.SystemTestsMixin):
+ def setUp(self):
+ super(TC_40_PVGrub, self).setUp()
+ supported = False
+ if self.template.startswith('fedora-'):
+ supported = True
+ elif self.template.startswith('debian-'):
+ supported = True
+ if not supported:
+ self.skipTest("Template {} not supported by this test".format(
+ self.template))
+
+ def install_packages(self, vm):
+ if self.template.startswith('fedora-'):
+ cmd_install1 = 'dnf clean expire-cache && ' \
+ 'dnf install -y qubes-kernel-vm-support grub2-tools'
+ cmd_install2 = 'dnf install -y kernel && ' \
+ 'KVER=$(rpm -q --qf %{VERSION}-%{RELEASE}.%{ARCH} kernel) && ' \
+ 'dnf install --allowerasing -y kernel-devel-$KVER && ' \
+ 'dkms autoinstall -k $KVER'
+ cmd_update_grub = 'grub2-mkconfig -o /boot/grub2/grub.cfg'
+ elif self.template.startswith('debian-'):
+ cmd_install1 = 'apt-get update && apt-get install -y ' \
+ 'qubes-kernel-vm-support grub2-common'
+ cmd_install2 = 'apt-get install -y linux-image-amd64'
+ cmd_update_grub = 'mkdir /boot/grub && update-grub2'
+ else:
+ assert False, "Unsupported template?!"
+
+ for cmd in [cmd_install1, cmd_install2, cmd_update_grub]:
+ p = vm.run(cmd, user="root", passio_popen=True, passio_stderr=True)
+ (stdout, stderr) = p.communicate()
+ self.assertEquals(p.returncode, 0,
+ "Failed command: {}\nSTDOUT: {}\nSTDERR: {}"
+ .format(cmd, stdout, stderr))
+
+ def get_kernel_version(self, vm):
+ if self.template.startswith('fedora-'):
+ cmd_get_kernel_version = 'rpm -q kernel|sort -n|tail -1|' \
+ 'cut -d - -f 2-'
+ elif self.template.startswith('debian-'):
+ cmd_get_kernel_version = \
+ 'dpkg-query --showformat=\'${Package}\\n\' --show ' \
+ '\'linux-image-*-amd64\'|sort -n|tail -1|cut -d - -f 3-'
+ else:
+ raise RuntimeError("Unsupported template?!")
+
+ p = vm.run(cmd_get_kernel_version, user="root", passio_popen=True)
+ (kver, _) = p.communicate()
+ self.assertEquals(p.returncode, 0,
+ "Failed command: {}".format(cmd_get_kernel_version))
+ return kver.strip()
+
+ def test_000_standalone_vm(self):
+ testvm1 = self.qc.add_new_vm("QubesAppVm",
+ template=None,
+ name=self.make_vm_name('vm1'))
+ testvm1.create_on_disk(verbose=False,
+ source_template=self.qc.get_vm_by_name(
+ self.template))
+ self.save_and_reload_db()
+ self.qc.unlock_db()
+ testvm1 = self.qc[testvm1.qid]
+ testvm1.start()
+ self.install_packages(testvm1)
+ kver = self.get_kernel_version(testvm1)
+ self.shutdown_and_wait(testvm1)
+
+ self.qc.lock_db_for_writing()
+ self.qc.load()
+ testvm1 = self.qc[testvm1.qid]
+ testvm1.kernel = 'pvgrub2'
+ self.save_and_reload_db()
+ self.qc.unlock_db()
+ testvm1 = self.qc[testvm1.qid]
+ testvm1.start()
+ p = testvm1.run('uname -r', passio_popen=True)
+ (actual_kver, _) = p.communicate()
+ self.assertEquals(actual_kver.strip(), kver)
+
+ def test_010_template_based_vm(self):
+ test_template = self.qc.add_new_vm("QubesTemplateVm",
+ template=None,
+ name=self.make_vm_name('template'))
+ test_template.clone_attrs(self.qc.get_vm_by_name(self.template))
+ test_template.clone_disk_files(
+ src_vm=self.qc.get_vm_by_name(self.template),
+ verbose=False)
+
+ testvm1 = self.qc.add_new_vm("QubesAppVm",
+ template=test_template,
+ name=self.make_vm_name('vm1'))
+ testvm1.create_on_disk(verbose=False,
+ source_template=test_template)
+ self.save_and_reload_db()
+ self.qc.unlock_db()
+ test_template = self.qc[test_template.qid]
+ testvm1 = self.qc[testvm1.qid]
+ test_template.start()
+ self.install_packages(test_template)
+ kver = self.get_kernel_version(test_template)
+ self.shutdown_and_wait(test_template)
+
+ self.qc.lock_db_for_writing()
+ self.qc.load()
+ test_template = self.qc[test_template.qid]
+ test_template.kernel = 'pvgrub2'
+ testvm1 = self.qc[testvm1.qid]
+ testvm1.kernel = 'pvgrub2'
+ self.save_and_reload_db()
+ self.qc.unlock_db()
+
+ # Check if TemplateBasedVM boots and has the right kernel
+ testvm1 = self.qc[testvm1.qid]
+ testvm1.start()
+ p = testvm1.run('uname -r', passio_popen=True)
+ (actual_kver, _) = p.communicate()
+ self.assertEquals(actual_kver.strip(), kver)
+
+ # And the same for the TemplateVM itself
+ test_template = self.qc[test_template.qid]
+ test_template.start()
+ p = test_template.run('uname -r', passio_popen=True)
+ (actual_kver, _) = p.communicate()
+ self.assertEquals(actual_kver.strip(), kver)
+
+def load_tests(loader, tests, pattern):
+ try:
+ qc = qubes.qubes.QubesVmCollection()
+ qc.lock_db_for_reading()
+ qc.load()
+ qc.unlock_db()
+ templates = [vm.name for vm in qc.values() if
+ isinstance(vm, qubes.qubes.QubesTemplateVm)]
+ except OSError:
+ templates = []
+ for template in templates:
+ tests.addTests(loader.loadTestsFromTestCase(
+ type(
+ 'TC_40_PVGrub_' + template,
+ (TC_40_PVGrub, qubes.tests.QubesTestCase),
+ {'template': template})))
+ return tests
\ No newline at end of file
diff --git a/tests/regressions.py b/tests/regressions.py
index 70c03586..673de721 100644
--- a/tests/regressions.py
+++ b/tests/regressions.py
@@ -1,4 +1,5 @@
#!/usr/bin/python2 -O
+# coding=utf-8
#
# The Qubes OS Project, https://www.qubes-os.org/
@@ -23,11 +24,14 @@
#
import multiprocessing
+import os
import time
import unittest
import qubes.qubes
import qubes.tests
+import subprocess
+
class TC_00_Regressions(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
# Bug: #906
@@ -56,3 +60,21 @@ class TC_00_Regressions(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase)
self.assertIsNotNone(qc.get_vm_by_name(vmname1))
self.assertIsNotNone(qc.get_vm_by_name(vmname2))
+ def test_bug_1389_dispvm_qubesdb_crash(self):
+ """
+ Sometimes QubesDB instance in DispVM crashes at startup.
+ Unfortunately we don't have reliable way to reproduce it, so try twice
+ :return:
+ """
+ self.qc.unlock_db()
+ for try_no in xrange(2):
+ p = subprocess.Popen(['/usr/lib/qubes/qfile-daemon-dvm',
+ 'qubes.VMShell', 'dom0', 'DEFAULT'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'))
+ p.stdin.write("qubesdb-read /name || echo ERROR\n")
+ dispvm_name = p.stdout.readline()
+ p.stdin.close()
+ self.assertTrue(dispvm_name.startswith("disp"),
+ "Try {} failed".format(try_no))
diff --git a/tests/run.py b/tests/run.py
index d4064898..78441eb3 100755
--- a/tests/run.py
+++ b/tests/run.py
@@ -133,7 +133,11 @@ class QubesTestResult(unittest.TestResult):
def addError(self, test, err): # pylint: disable=invalid-name
super(QubesTestResult, self).addError(test, err)
- test.log.critical('ERROR ({err[0].__name__}: {err[1]!r})'.format(err=err))
+ try:
+ test.log.critical(
+ 'ERROR ({err[0].__name__}: {err[1]!r})'.format(err=err))
+ except AttributeError:
+ pass
if self.showAll:
self.stream.writeln(
'{color[red]}{color[bold]}ERROR{color[normal]} ({})'.format(
@@ -157,7 +161,10 @@ class QubesTestResult(unittest.TestResult):
def addSkip(self, test, reason): # pylint: disable=invalid-name
super(QubesTestResult, self).addSkip(test, reason)
- test.log.warning('skipped ({})'.format(reason))
+ try:
+ test.log.warning('skipped ({})'.format(reason))
+ except AttributeError:
+ pass
if self.showAll:
self.stream.writeln(
'{color[cyan]}skipped{color[normal]} ({})'.format(
@@ -307,7 +314,7 @@ def main():
for name in args.names:
suite.addTests(
[test for test in list_test_cases(alltests)
- if (str(test)+'/').startswith(name.replace('.', '/')+'/')])
+ if str(test).startswith(name)])
else:
suite.addTests(loader.loadTestsFromName('qubes.tests'))
diff --git a/tests/storage.py b/tests/storage.py
new file mode 100644
index 00000000..2c32fa73
--- /dev/null
+++ b/tests/storage.py
@@ -0,0 +1,77 @@
+# The Qubes OS Project, https://www.qubes-os.org/
+#
+# Copyright (C) 2015 Bahtiar `kalkin-` Gadimov
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import qubes.storage
+from qubes.qubes import defaults
+from qubes.storage.xen import XenPool, XenStorage
+from qubes.tests import QubesTestCase, SystemTestsMixin
+
+
+class TC_00_Storage(SystemTestsMixin, QubesTestCase):
+
+ """ This class tests the utility methods from :mod:``qubes.storage`` """
+
+ def test_000_dump(self):
+ """ Dumps storage instance to a storage string """
+ vmname = self.make_vm_name('appvm')
+ template = self.qc.get_default_template()
+ vm = self.qc.add_new_vm('QubesAppVm', name=vmname,
+ pool_name='default', template=template)
+ storage = vm.storage
+ result = qubes.storage.dump(storage)
+ expected = 'qubes.storage.xen.XenStorage'
+ self.assertEquals(result, expected)
+
+ def test_001_load(self):
+ """ Loads storage driver from a storage string """
+ result = qubes.storage.load('qubes.storage.xen.XenStorage')
+ self.assertTrue(result is XenStorage)
+
+ def test_002_default_pool_drivers(self):
+ """ The only predifined pool driver is xen """
+ result = defaults['pool_drivers'].keys()
+ expected = ["xen"]
+ self.assertEquals(result, expected)
+
+ def test_003_get_pool_klass(self):
+ """ Expect the default pool to be `XenPool` """
+ result = qubes.storage._get_pool_klass('default')
+ self.assertTrue(result is XenPool)
+
+ def test_004_pool_exists_default(self):
+ """ Expect the default pool to exists """
+ self.assertTrue(qubes.storage.pool_exists('default'))
+
+ def test_005_pool_exists_random(self):
+ """ Expect this pool to not a exist """
+ self.assertFalse(
+ qubes.storage.pool_exists('asdh312096r832598213iudhas'))
+
+ def test_006_add_remove_pool(self):
+ """ Tries to adding and removing a pool. """
+ pool_name = 'asdjhrp89132'
+
+ # make sure it's really does not exist
+ qubes.storage.remove_pool(pool_name)
+
+ qubes.storage.add_pool(pool_name, driver='xen')
+ self.assertTrue(qubes.storage.pool_exists(pool_name))
+
+ qubes.storage.remove_pool(pool_name)
+ self.assertFalse(qubes.storage.pool_exists(pool_name))
diff --git a/tests/storage_xen.py b/tests/storage_xen.py
new file mode 100644
index 00000000..a6503929
--- /dev/null
+++ b/tests/storage_xen.py
@@ -0,0 +1,228 @@
+# The Qubes OS Project, https://www.qubes-os.org/
+#
+# Copyright (C) 2015 Bahtiar `kalkin-` Gadimov
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import shutil
+import qubes.storage
+from qubes.tests import QubesTestCase, SystemTestsMixin
+from qubes.storage.xen import XenStorage
+
+
+class TC_00_XenPool(SystemTestsMixin, QubesTestCase):
+
+ """ This class tests some properties of the 'default' pool. """
+
+ def test000_default_pool_dir(self):
+ """ The predefined dir for the default pool should be ``/var/lib/qubes``
+
+ .. sealso::
+ Data :data:``qubes.qubes.defaults['pool_config']``.
+ """
+ vm = self._init_app_vm()
+ result = qubes.storage.get_pool("default", vm).dir_path
+ expected = '/var/lib/qubes/'
+ self.assertEquals(result, expected)
+
+ def test001_default_storage_class(self):
+ """ Check when using default pool the Storage is ``XenStorage``. """
+ result = self._init_app_vm().storage
+ self.assertIsInstance(result, XenStorage)
+
+ def test_002_default_pool_name(self):
+ """ Default pool_name is 'default'. """
+ vm = self._init_app_vm()
+ self.assertEquals(vm.pool_name, "default")
+
+ def _init_app_vm(self):
+ """ Return initalised, but not created, AppVm. """
+ vmname = self.make_vm_name('appvm')
+ template = self.qc.get_default_template()
+ return self.qc.add_new_vm('QubesAppVm', name=vmname, template=template,
+ pool_name='default')
+
+
+class TC_01_XenPool(SystemTestsMixin, QubesTestCase):
+
+ """ Test the paths for the default Xen file based storage (``XenStorage``).
+ """
+
+ POOL_DIR = '/var/lib/qubes/test-pool'
+ APPVMS_DIR = '/var/lib/qubes/test-pool/appvms'
+ TEMPLATES_DIR = '/var/lib/qubes/test-pool/vm-templates'
+ SERVICE_DIR = '/var/lib/qubes/test-pool/servicevms'
+
+ def setUp(self):
+ """ Add a test file based storage pool """
+ super(TC_01_XenPool, self).setUp()
+ qubes.storage.add_pool('test-pool', driver='xen',
+ dir_path=self.POOL_DIR)
+
+ def tearDown(self):
+ """ Remove the file based storage pool after testing """
+ super(TC_01_XenPool, self).tearDown()
+ qubes.storage.remove_pool("test-pool")
+ shutil.rmtree(self.POOL_DIR, ignore_errors=True)
+
+ def test_001_pool_exists(self):
+ """ Check if the storage pool was added to the storage pool config """
+ self.assertTrue(qubes.storage.pool_exists('test-pool'))
+
+ def test_002_pool_dir_create(self):
+ """ Check if the storage pool dir and subdirs were created """
+
+ # The dir should not exists before
+ self.assertFalse(os.path.exists(self.POOL_DIR))
+
+ vmname = self.make_vm_name('appvm')
+ template = self.qc.get_default_template()
+ self.qc.add_new_vm('QubesAppVm', name=vmname, template=template,
+ pool_name='test-pool')
+
+ self.assertTrue(os.path.exists(self.POOL_DIR))
+ self.assertTrue(os.path.exists(self.APPVMS_DIR))
+ self.assertTrue(os.path.exists(self.SERVICE_DIR))
+ self.assertTrue(os.path.exists(self.TEMPLATES_DIR))
+
+ def test_003_pool_dir(self):
+ """ Check if the vm storage pool_dir is the same as specified """
+ vmname = self.make_vm_name('appvm')
+ template = self.qc.get_default_template()
+ vm = self.qc.add_new_vm('QubesAppVm', name=vmname, template=template,
+ pool_name='test-pool')
+ result = qubes.storage.get_pool('test-pool', vm).dir_path
+ self.assertEquals(self.POOL_DIR, result)
+
+ def test_004_app_vmdir(self):
+ """ Check the vm storage dir for an AppVm"""
+ vmname = self.make_vm_name('appvm')
+ template = self.qc.get_default_template()
+ vm = self.qc.add_new_vm('QubesAppVm', name=vmname, template=template,
+ pool_name='test-pool')
+
+ expected = os.path.join(self.APPVMS_DIR, vm.name)
+ result = vm.storage.vmdir
+ self.assertEquals(expected, result)
+
+ def test_005_hvm_vmdir(self):
+ """ Check the vm storage dir for a HVM"""
+ vmname = self.make_vm_name('hvm')
+ vm = self.qc.add_new_vm('QubesHVm', name=vmname,
+ pool_name='test-pool')
+
+ expected = os.path.join(self.APPVMS_DIR, vm.name)
+ result = vm.storage.vmdir
+ self.assertEquals(expected, result)
+
+ def test_006_net_vmdir(self):
+ """ Check the vm storage dir for a Netvm"""
+ vmname = self.make_vm_name('hvm')
+ vm = self.qc.add_new_vm('QubesNetVm', name=vmname,
+ pool_name='test-pool')
+
+ expected = os.path.join(self.SERVICE_DIR, vm.name)
+ result = vm.storage.vmdir
+ self.assertEquals(expected, result)
+
+ def test_007_proxy_vmdir(self):
+ """ Check the vm storage dir for a ProxyVm"""
+ vmname = self.make_vm_name('proxyvm')
+ vm = self.qc.add_new_vm('QubesProxyVm', name=vmname,
+ pool_name='test-pool')
+
+ expected = os.path.join(self.SERVICE_DIR, vm.name)
+ result = vm.storage.vmdir
+ self.assertEquals(expected, result)
+
+ def test_008_admin_vmdir(self):
+ """ Check the vm storage dir for a AdminVm"""
+ # TODO How to test AdminVm?
+ pass
+
+ def test_009_template_vmdir(self):
+ """ Check the vm storage dir for a TemplateVm"""
+ vmname = self.make_vm_name('templatevm')
+ vm = self.qc.add_new_vm('QubesTemplateVm', name=vmname,
+ pool_name='test-pool')
+
+ expected = os.path.join(self.TEMPLATES_DIR, vm.name)
+ result = vm.storage.vmdir
+ self.assertEquals(expected, result)
+
+ def test_010_template_hvm_vmdir(self):
+ """ Check the vm storage dir for a TemplateHVm"""
+ vmname = self.make_vm_name('templatehvm')
+ vm = self.qc.add_new_vm('QubesTemplateHVm', name=vmname,
+ pool_name='test-pool')
+
+ expected = os.path.join(self.TEMPLATES_DIR, vm.name)
+ result = vm.storage.vmdir
+ self.assertEquals(expected, result)
+
+ def test_011_appvm_file_images(self):
+ """ Check if all the needed image files are created for an AppVm"""
+
+ vmname = self.make_vm_name('appvm')
+ template = self.qc.get_default_template()
+ vm = self.qc.add_new_vm('QubesAppVm', name=vmname, template=template,
+ pool_name='test-pool')
+ vm.create_on_disk(verbose=False)
+
+ expected_vmdir = os.path.join(self.APPVMS_DIR, vm.name)
+ self.assertEqualsAndExists(vm.storage.vmdir, expected_vmdir)
+
+ expected_private_path = os.path.join(expected_vmdir, 'private.img')
+ self.assertEqualsAndExists(vm.storage.private_img,
+ expected_private_path)
+
+ expected_volatile_path = os.path.join(expected_vmdir, 'volatile.img')
+ self.assertEqualsAndExists(vm.storage.volatile_img,
+ expected_volatile_path)
+
+ def test_012_hvm_file_images(self):
+ """ Check if all the needed image files are created for a HVm"""
+
+ vmname = self.make_vm_name('hvm')
+ vm = self.qc.add_new_vm('QubesHVm', name=vmname,
+ pool_name='test-pool')
+ vm.create_on_disk(verbose=False)
+
+ expected_vmdir = os.path.join(self.APPVMS_DIR, vm.name)
+ self.assertEqualsAndExists(vm.storage.vmdir, expected_vmdir)
+
+ expected_private_path = os.path.join(expected_vmdir, 'private.img')
+ self.assertEqualsAndExists(vm.storage.private_img,
+ expected_private_path)
+
+ expected_root_path = os.path.join(expected_vmdir, 'root.img')
+ self.assertEqualsAndExists(vm.storage.root_img, expected_root_path)
+
+ expected_volatile_path = os.path.join(expected_vmdir, 'volatile.img')
+ self.assertEqualsAndExists(vm.storage.volatile_img,
+ expected_volatile_path)
+
+ def assertEqualsAndExists(self, result_path, expected_path):
+ """ Check if the ``result_path``, matches ``expected_path`` and exists.
+
+ See also: :meth:``assertExist``
+ """
+ self.assertEquals(result_path, expected_path)
+ self.assertExist(result_path)
+
+ def assertExist(self, path):
+ """ Assert that the given path exists. """
+ self.assertTrue(os.path.exists(path))
diff --git a/tests/vm_qrexec_gui.py b/tests/vm_qrexec_gui.py
index 6d62f6f8..e3d3539a 100644
--- a/tests/vm_qrexec_gui.py
+++ b/tests/vm_qrexec_gui.py
@@ -33,6 +33,7 @@ import time
from qubes.qubes import QubesVmCollection, defaults, QubesException
import qubes.tests
+import re
TEST_DATA = "0123456789" * 1024
@@ -54,24 +55,6 @@ class TC_00_AppVMMixin(qubes.tests.SystemTestsMixin):
self.testvm1 = self.qc[self.testvm1.qid]
self.testvm2 = self.qc[self.testvm2.qid]
- def enter_keys_in_window(self, title, keys):
- """
- Search for window with given title, then enter listed keys there.
- The function will wait for said window to appear.
-
- :param title: title of window
- :param keys: list of keys to enter, as for `xdotool key`
- :return: None
- """
-
- # 'xdotool search --sync' sometimes crashes on some race when
- # accessing window properties
- self.wait_for_window(title)
- command = ['xdotool', 'search', '--name', title,
- 'windowactivate',
- 'key'] + keys
- subprocess.check_call(command)
-
def test_000_start_shutdown(self):
self.testvm1.start()
self.assertEquals(self.testvm1.get_power_state(), "Running")
@@ -144,7 +127,7 @@ class TC_00_AppVMMixin(qubes.tests.SystemTestsMixin):
time.sleep(0.5)
subprocess.check_call(
['xdotool', 'search', '--name', title,
- 'windowactivate', 'type', 'exit\n'])
+ 'windowactivate', '--sync', 'type', 'exit\n'])
wait_count = 0
while subprocess.call(['xdotool', 'search', '--name', title],
@@ -185,7 +168,7 @@ class TC_00_AppVMMixin(qubes.tests.SystemTestsMixin):
time.sleep(0.5)
subprocess.check_call(
['xdotool', 'search', '--name', title,
- 'windowactivate', 'type', 'exit\n'])
+ 'windowactivate', '--sync', 'type', 'exit\n'])
wait_count = 0
while subprocess.call(['xdotool', 'search', '--name', title],
@@ -546,17 +529,123 @@ class TC_00_AppVMMixin(qubes.tests.SystemTestsMixin):
self.fail("Timeout, probably deadlock")
self.assertEqual(result.value, 0, "Service call failed")
- @unittest.skipUnless(spawn.find_executable('xdotool'),
- "xdotool not installed")
+ def test_080_qrexec_service_argument_allow_default(self):
+ """Qrexec service call with argument"""
+ self.testvm1.start()
+ self.testvm2.start()
+ p = self.testvm2.run("cat > /etc/qubes-rpc/test.Argument", user="root",
+ passio_popen=True)
+ p.communicate("/bin/echo $1")
+
+ with open("/etc/qubes-rpc/policy/test.Argument", "w") as policy:
+ policy.write("%s %s allow" % (self.testvm1.name, self.testvm2.name))
+ self.addCleanup(os.unlink, "/etc/qubes-rpc/policy/test.Argument")
+
+ p = self.testvm1.run("/usr/lib/qubes/qrexec-client-vm {} "
+ "test.Argument+argument".format(self.testvm2.name),
+ passio_popen=True)
+ (stdout, stderr) = p.communicate()
+ self.assertEqual(stdout, "argument\n")
+
+ def test_081_qrexec_service_argument_allow_specific(self):
+ """Qrexec service call with argument - allow only specific value"""
+ self.testvm1.start()
+ self.testvm2.start()
+ p = self.testvm2.run("cat > /etc/qubes-rpc/test.Argument", user="root",
+ passio_popen=True)
+ p.communicate("/bin/echo $1")
+
+ with open("/etc/qubes-rpc/policy/test.Argument", "w") as policy:
+ policy.write("$anyvm $anyvm deny")
+ self.addCleanup(os.unlink, "/etc/qubes-rpc/policy/test.Argument")
+
+ with open("/etc/qubes-rpc/policy/test.Argument+argument", "w") as \
+ policy:
+ policy.write("%s %s allow" % (self.testvm1.name, self.testvm2.name))
+ self.addCleanup(os.unlink,
+ "/etc/qubes-rpc/policy/test.Argument+argument")
+
+ p = self.testvm1.run("/usr/lib/qubes/qrexec-client-vm {} "
+ "test.Argument+argument".format(self.testvm2.name),
+ passio_popen=True)
+ (stdout, stderr) = p.communicate()
+ self.assertEqual(stdout, "argument\n")
+
+ def test_082_qrexec_service_argument_deny_specific(self):
+ """Qrexec service call with argument - deny specific value"""
+ self.testvm1.start()
+ self.testvm2.start()
+ p = self.testvm2.run("cat > /etc/qubes-rpc/test.Argument", user="root",
+ passio_popen=True)
+ p.communicate("/bin/echo $1")
+
+ with open("/etc/qubes-rpc/policy/test.Argument", "w") as policy:
+ policy.write("$anyvm $anyvm allow")
+ self.addCleanup(os.unlink, "/etc/qubes-rpc/policy/test.Argument")
+
+ with open("/etc/qubes-rpc/policy/test.Argument+argument", "w") as \
+ policy:
+ policy.write("%s %s deny" % (self.testvm1.name, self.testvm2.name))
+ self.addCleanup(os.unlink,
+ "/etc/qubes-rpc/policy/test.Argument+argument")
+
+ p = self.testvm1.run("/usr/lib/qubes/qrexec-client-vm {} "
+ "test.Argument+argument".format(self.testvm2.name),
+ passio_popen=True)
+ (stdout, stderr) = p.communicate()
+ self.assertEqual(stdout, "")
+ self.assertEqual(p.returncode, 1, "Service request should be denied")
+
+ def test_083_qrexec_service_argument_specific_implementation(self):
+ """Qrexec service call with argument - argument specific
+ implementatation"""
+ self.testvm1.start()
+ self.testvm2.start()
+ p = self.testvm2.run("cat > /etc/qubes-rpc/test.Argument", user="root",
+ passio_popen=True)
+ p.communicate("/bin/echo $1")
+
+ p = self.testvm2.run("cat > /etc/qubes-rpc/test.Argument+argument",
+ user="root", passio_popen=True)
+ p.communicate("/bin/echo specific: $1")
+
+ with open("/etc/qubes-rpc/policy/test.Argument", "w") as policy:
+ policy.write("%s %s allow" % (self.testvm1.name, self.testvm2.name))
+ self.addCleanup(os.unlink, "/etc/qubes-rpc/policy/test.Argument")
+
+ p = self.testvm1.run("/usr/lib/qubes/qrexec-client-vm {} "
+ "test.Argument+argument".format(self.testvm2.name),
+ passio_popen=True)
+ (stdout, stderr) = p.communicate()
+ self.assertEqual(stdout, "specific: argument\n")
+
+ def test_084_qrexec_service_argument_extra_env(self):
+ """Qrexec service call with argument - extra env variables"""
+ self.testvm1.start()
+ self.testvm2.start()
+ p = self.testvm2.run("cat > /etc/qubes-rpc/test.Argument", user="root",
+ passio_popen=True)
+ p.communicate("/bin/echo $QREXEC_SERVICE_FULL_NAME "
+ "$QREXEC_SERVICE_ARGUMENT")
+
+ with open("/etc/qubes-rpc/policy/test.Argument", "w") as policy:
+ policy.write("%s %s allow" % (self.testvm1.name, self.testvm2.name))
+ self.addCleanup(os.unlink, "/etc/qubes-rpc/policy/test.Argument")
+
+ p = self.testvm1.run("/usr/lib/qubes/qrexec-client-vm {} "
+ "test.Argument+argument".format(self.testvm2.name),
+ passio_popen=True)
+ (stdout, stderr) = p.communicate()
+ self.assertEqual(stdout, "test.Argument+argument argument\n")
+
def test_100_qrexec_filecopy(self):
self.testvm1.start()
self.testvm2.start()
+ self.qrexec_policy('qubes.Filecopy', self.testvm1.name,
+ self.testvm2.name)
p = self.testvm1.run("qvm-copy-to-vm %s /etc/passwd" %
self.testvm2.name, passio_popen=True,
passio_stderr=True)
- # Confirm transfer
- subprocess.check_call(
- ['xdotool', 'search', '--sync', '--name', 'Question', 'key', 'y'])
p.wait()
self.assertEqual(p.returncode, 0, "qvm-copy-to-vm failed: %s" %
p.stderr.read())
@@ -566,15 +655,55 @@ class TC_00_AppVMMixin(qubes.tests.SystemTestsMixin):
wait=True)
self.assertEqual(retcode, 0, "file differs")
- @unittest.skipUnless(spawn.find_executable('xdotool'),
- "xdotool not installed")
+ def test_105_qrexec_filemove(self):
+ self.testvm1.start()
+ self.testvm2.start()
+ self.qrexec_policy('qubes.Filecopy', self.testvm1.name,
+ self.testvm2.name)
+ retcode = self.testvm1.run("cp /etc/passwd passwd", wait=True)
+ assert retcode == 0, "Failed to prepare source file"
+ p = self.testvm1.run("qvm-move-to-vm %s passwd" %
+ self.testvm2.name, passio_popen=True,
+ passio_stderr=True)
+ p.wait()
+ self.assertEqual(p.returncode, 0, "qvm-move-to-vm failed: %s" %
+ p.stderr.read())
+ retcode = self.testvm2.run("diff /etc/passwd "
+ "/home/user/QubesIncoming/{}/passwd".format(
+ self.testvm1.name),
+ wait=True)
+ self.assertEqual(retcode, 0, "file differs")
+ retcode = self.testvm1.run("test -f passwd", wait=True)
+ self.assertEqual(retcode, 1, "source file not removed")
+
+ def test_101_qrexec_filecopy_with_autostart(self):
+ self.testvm1.start()
+ self.qrexec_policy('qubes.Filecopy', self.testvm1.name,
+ self.testvm2.name)
+ p = self.testvm1.run("qvm-copy-to-vm %s /etc/passwd" %
+ self.testvm2.name, passio_popen=True,
+ passio_stderr=True)
+ p.wait()
+ self.assertEqual(p.returncode, 0, "qvm-copy-to-vm failed: %s" %
+ p.stderr.read())
+ # workaround for libvirt bug (domain ID isn't updated when is started
+ # from other application) - details in
+ # QubesOS/qubes-core-libvirt@63ede4dfb4485c4161dd6a2cc809e8fb45ca664f
+ self.testvm2._libvirt_domain = None
+ self.assertTrue(self.testvm2.is_running())
+ retcode = self.testvm2.run("diff /etc/passwd "
+ "/home/user/QubesIncoming/{}/passwd".format(
+ self.testvm1.name),
+ wait=True)
+ self.assertEqual(retcode, 0, "file differs")
+
def test_110_qrexec_filecopy_deny(self):
self.testvm1.start()
self.testvm2.start()
+ self.qrexec_policy('qubes.Filecopy', self.testvm1.name,
+ self.testvm2.name, allow=False)
p = self.testvm1.run("qvm-copy-to-vm %s /etc/passwd" %
self.testvm2.name, passio_popen=True)
- # Deny transfer
- self.enter_keys_in_window('Question', ['n'])
p.wait()
self.assertNotEqual(p.returncode, 0, "qvm-copy-to-vm unexpectedly "
"succeeded")
@@ -586,15 +715,13 @@ class TC_00_AppVMMixin(qubes.tests.SystemTestsMixin):
@unittest.skip("Xen gntalloc driver crashes when page is mapped in the "
"same domain")
- @unittest.skipUnless(spawn.find_executable('xdotool'),
- "xdotool not installed")
def test_120_qrexec_filecopy_self(self):
self.testvm1.start()
+ self.qrexec_policy('qubes.Filecopy', self.testvm1.name,
+ self.testvm1.name)
p = self.testvm1.run("qvm-copy-to-vm %s /etc/passwd" %
self.testvm1.name, passio_popen=True,
passio_stderr=True)
- # Confirm transfer
- self.enter_keys_in_window('Question', ['y'])
p.wait()
self.assertEqual(p.returncode, 0, "qvm-copy-to-vm failed: %s" %
p.stderr.read())
@@ -604,6 +731,41 @@ class TC_00_AppVMMixin(qubes.tests.SystemTestsMixin):
wait=True)
self.assertEqual(retcode, 0, "file differs")
+ @unittest.skipUnless(spawn.find_executable('xdotool'),
+ "xdotool not installed")
+ def test_130_qrexec_filemove_disk_full(self):
+ self.testvm1.start()
+ self.testvm2.start()
+ self.qrexec_policy('qubes.Filecopy', self.testvm1.name,
+ self.testvm2.name)
+ # Prepare test file
+ prepare_cmd = ("yes teststring | dd of=testfile bs=1M "
+ "count=50 iflag=fullblock")
+ retcode = self.testvm1.run(prepare_cmd, wait=True)
+ if retcode != 0:
+ raise RuntimeError("Failed '{}' in {}".format(prepare_cmd,
+ self.testvm1.name))
+ # Prepare target directory with limited size
+ prepare_cmd = (
+ "mkdir -p /home/user/QubesIncoming && "
+ "chown user /home/user/QubesIncoming && "
+ "mount -t tmpfs none /home/user/QubesIncoming -o size=48M"
+ )
+ retcode = self.testvm2.run(prepare_cmd, user="root", wait=True)
+ if retcode != 0:
+ raise RuntimeError("Failed '{}' in {}".format(prepare_cmd,
+ self.testvm2.name))
+ p = self.testvm1.run("qvm-move-to-vm %s testfile" %
+ self.testvm2.name, passio_popen=True,
+ passio_stderr=True)
+ # Close GUI error message
+ self.enter_keys_in_window('Error', ['Return'])
+ p.wait()
+ self.assertNotEqual(p.returncode, 0, "qvm-move-to-vm should fail")
+ retcode = self.testvm1.run("test -f testfile", wait=True)
+ self.assertEqual(retcode, 0, "testfile should not be deleted in "
+ "source VM")
+
def test_200_timezone(self):
"""Test whether timezone setting is properly propagated to the VM"""
if "whonix" in self.template:
@@ -623,6 +785,8 @@ class TC_00_AppVMMixin(qubes.tests.SystemTestsMixin):
def test_210_time_sync(self):
"""Test time synchronization mechanism"""
+ if self.template.startswith('whonix-'):
+ self.skipTest('qvm-sync-clock disabled for Whonix VMs')
self.testvm1.start()
self.testvm2.start()
(start_time, _) = subprocess.Popen(["date", "-u", "+%s"],
@@ -650,6 +814,10 @@ class TC_00_AppVMMixin(qubes.tests.SystemTestsMixin):
self.assertEquals(retcode, 0,
"qvm-sync-clock failed with code {}".
format(retcode))
+ # qvm-sync-clock is asynchronous - it spawns qubes.SetDateTime
+ # service, send it timestamp value and exists without waiting for
+ # actual time set
+ time.sleep(1)
(vm_time, _) = self.testvm1.run("date -u +%s",
passio_popen=True).communicate()
self.assertAlmostEquals(int(vm_time), int(start_time), delta=30)
@@ -677,7 +845,9 @@ class TC_00_AppVMMixin(qubes.tests.SystemTestsMixin):
# First offline test
self.testvm1.resize_private_img(4*1024**3)
self.testvm1.start()
- p = self.testvm1.run('df --output=size /rw|tail -n 1',
+ df_cmd = '( df --output=size /rw || df /rw | awk \'{print $2}\' )|' \
+ 'tail -n 1'
+ p = self.testvm1.run(df_cmd,
passio_popen=True)
# new_size in 1k-blocks
(new_size, _) = p.communicate()
@@ -685,489 +855,157 @@ class TC_00_AppVMMixin(qubes.tests.SystemTestsMixin):
self.assertGreater(int(new_size.strip()), 3.8*1024**2)
# Then online test
self.testvm1.resize_private_img(6*1024**3)
- p = self.testvm1.run('df --output=size /rw|tail -n 1',
+ p = self.testvm1.run(df_cmd,
passio_popen=True)
# new_size in 1k-blocks
(new_size, _) = p.communicate()
# some safety margin for FS metadata
self.assertGreater(int(new_size.strip()), 5.8*1024**2)
-
-class TC_05_StandaloneVM(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
- def test_000_create_start(self):
- testvm1 = self.qc.add_new_vm("QubesAppVm",
- template=None,
- name=self.make_vm_name('vm1'))
- testvm1.create_on_disk(verbose=False,
- source_template=self.qc.get_default_template())
- self.qc.save()
- self.qc.unlock_db()
- testvm1.start()
- self.assertEquals(testvm1.get_power_state(), "Running")
-
- def test_100_resize_root_img(self):
- testvm1 = self.qc.add_new_vm("QubesAppVm",
- template=None,
- name=self.make_vm_name('vm1'))
- testvm1.create_on_disk(verbose=False,
- source_template=self.qc.get_default_template())
- self.qc.save()
- self.qc.unlock_db()
- testvm1.resize_root_img(20*1024**3)
- timeout = 60
- while testvm1.is_running():
- time.sleep(1)
- timeout -= 1
- if timeout == 0:
- self.fail("Timeout while waiting for VM shutdown")
- self.assertEquals(testvm1.get_root_img_sz(), 20*1024**3)
- testvm1.start()
- p = testvm1.run('df --output=size /|tail -n 1',
- passio_popen=True)
- # new_size in 1k-blocks
- (new_size, _) = p.communicate()
- # some safety margin for FS metadata
- self.assertGreater(int(new_size.strip()), 19*1024**2)
-
-
-class TC_10_HVM(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
- # TODO: test with some OS inside
- # TODO: windows tools tests
-
- def test_000_create_start(self):
- testvm1 = self.qc.add_new_vm("QubesHVm",
- name=self.make_vm_name('vm1'))
- testvm1.create_on_disk(verbose=False)
- self.qc.save()
- self.qc.unlock_db()
- testvm1.start()
- self.assertEquals(testvm1.get_power_state(), "Running")
-
- def test_010_create_start_template(self):
- templatevm = self.qc.add_new_vm("QubesTemplateHVm",
- name=self.make_vm_name('template'))
- templatevm.create_on_disk(verbose=False)
- self.qc.save()
- self.qc.unlock_db()
-
- templatevm.start()
- self.assertEquals(templatevm.get_power_state(), "Running")
-
- def test_020_create_start_template_vm(self):
- templatevm = self.qc.add_new_vm("QubesTemplateHVm",
- name=self.make_vm_name('template'))
- templatevm.create_on_disk(verbose=False)
- testvm2 = self.qc.add_new_vm("QubesHVm",
- name=self.make_vm_name('vm2'),
- template=templatevm)
- testvm2.create_on_disk(verbose=False)
- self.qc.save()
- self.qc.unlock_db()
-
- testvm2.start()
- self.assertEquals(testvm2.get_power_state(), "Running")
-
- def test_030_prevent_simultaneus_start(self):
- templatevm = self.qc.add_new_vm("QubesTemplateHVm",
- name=self.make_vm_name('template'))
- templatevm.create_on_disk(verbose=False)
- testvm2 = self.qc.add_new_vm("QubesHVm",
- name=self.make_vm_name('vm2'),
- template=templatevm)
- testvm2.create_on_disk(verbose=False)
- self.qc.save()
- self.qc.unlock_db()
-
- templatevm.start()
- self.assertEquals(templatevm.get_power_state(), "Running")
- self.assertRaises(QubesException, testvm2.start)
- templatevm.force_shutdown()
- testvm2.start()
- self.assertEquals(testvm2.get_power_state(), "Running")
- self.assertRaises(QubesException, templatevm.start)
-
- def test_100_resize_root_img(self):
- testvm1 = self.qc.add_new_vm("QubesHVm",
- name=self.make_vm_name('vm1'))
- testvm1.create_on_disk(verbose=False)
- self.qc.save()
- self.qc.unlock_db()
- testvm1.resize_root_img(30*1024**3)
- self.assertEquals(testvm1.get_root_img_sz(), 30*1024**3)
- testvm1.start()
- self.assertEquals(testvm1.get_power_state(), "Running")
- # TODO: launch some OS there and check the size
-
-class TC_20_DispVMMixin(qubes.tests.SystemTestsMixin):
- def test_000_prepare_dvm(self):
- self.qc.unlock_db()
- retcode = subprocess.call(['/usr/bin/qvm-create-default-dvm',
- self.template],
- stderr=open(os.devnull, 'w'))
- self.assertEqual(retcode, 0)
- self.qc.lock_db_for_writing()
- self.qc.load()
- self.assertIsNotNone(self.qc.get_vm_by_name(
- self.template + "-dvm"))
- # TODO: check mtime of snapshot file
-
- def test_010_simple_dvm_run(self):
- self.qc.unlock_db()
- p = subprocess.Popen(['/usr/lib/qubes/qfile-daemon-dvm',
- 'qubes.VMShell', 'dom0', 'DEFAULT'],
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=open(os.devnull, 'w'))
- (stdout, _) = p.communicate(input="echo test")
- self.assertEqual(stdout, "test\n")
- # TODO: check if DispVM is destroyed
-
@unittest.skipUnless(spawn.find_executable('xdotool'),
"xdotool not installed")
- def test_020_gui_app(self):
- self.qc.unlock_db()
- p = subprocess.Popen(['/usr/lib/qubes/qfile-daemon-dvm',
- 'qubes.VMShell', 'dom0', 'DEFAULT'],
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=open(os.devnull, 'w'))
+ def test_300_bug_1028_gui_memory_pinning(self):
+ """
+ If VM window composition buffers are relocated in memory, GUI will
+ still use old pointers and will display old pages
+ :return:
+ """
+ self.testvm1.memory = 800
+ self.testvm1.maxmem = 800
+ # exclude from memory balancing
+ self.testvm1.services['meminfo-writer'] = False
+ self.testvm1.start()
+ # and allow large map count
+ self.testvm1.run("echo 256000 > /proc/sys/vm/max_map_count",
+ user="root", wait=True)
+ allocator_c = (
+ "#include \n"
+ "#include \n"
+ "#include \n"
+ "\n"
+ "int main(int argc, char **argv) {\n"
+ " int total_pages;\n"
+ " char *addr, *iter;\n"
+ "\n"
+ " total_pages = atoi(argv[1]);\n"
+ " addr = mmap(NULL, total_pages * 0x1000, PROT_READ | "
+ "PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_POPULATE, -1, 0);\n"
+ " if (addr == MAP_FAILED) {\n"
+ " perror(\"mmap\");\n"
+ " exit(1);\n"
+ " }\n"
+ " printf(\"Stage1\\n\");\n"
+ " fflush(stdout);\n"
+ " getchar();\n"
+ " for (iter = addr; iter < addr + total_pages*0x1000; iter += "
+ "0x2000) {\n"
+ " if (mlock(iter, 0x1000) == -1) {\n"
+ " perror(\"mlock\");\n"
+ " fprintf(stderr, \"%d of %d\\n\", (iter-addr)/0x1000, "
+ "total_pages);\n"
+ " exit(1);\n"
+ " }\n"
+ " }\n"
+ " printf(\"Stage2\\n\");\n"
+ " fflush(stdout);\n"
+ " for (iter = addr+0x1000; iter < addr + total_pages*0x1000; "
+ "iter += 0x2000) {\n"
+ " if (munmap(iter, 0x1000) == -1) {\n"
+ " perror(\"munmap\");\n"
+ " exit(1);\n"
+ " }\n"
+ " }\n"
+ " printf(\"Stage3\\n\");\n"
+ " fflush(stdout);\n"
+ " fclose(stdout);\n"
+ " getchar();\n"
+ "\n"
+ " return 0;\n"
+ "}\n")
- # wait for DispVM startup:
- p.stdin.write("echo test\n")
- p.stdin.flush()
- l = p.stdout.readline()
- self.assertEqual(l, "test\n")
+ p = self.testvm1.run("cat > allocator.c", passio_popen=True)
+ p.communicate(allocator_c)
+ p = self.testvm1.run("gcc allocator.c -o allocator",
+ passio_popen=True, passio_stderr=True)
+ (stdout, stderr) = p.communicate()
+ if p.returncode != 0:
+ self.skipTest("allocator compile failed: {}".format(stderr))
- # potential race condition, but our tests are supposed to be
- # running on dedicated machine, so should not be a problem
- self.qc.lock_db_for_reading()
- self.qc.load()
- self.qc.unlock_db()
+ # drop caches to have even more memory pressure
+ self.testvm1.run("echo 3 > /proc/sys/vm/drop_caches",
+ user="root", wait=True)
- max_qid = 0
- for vm in self.qc.values():
- if not vm.is_disposablevm():
- continue
- if vm.qid > max_qid:
- max_qid = vm.qid
- dispvm = self.qc[max_qid]
- self.assertNotEqual(dispvm.qid, 0, "DispVM not found in qubes.xml")
- self.assertTrue(dispvm.is_running())
- try:
- window_title = 'user@%s' % (dispvm.template.name + "-dvm")
- p.stdin.write("xterm -e "
- "\"sh -s -c 'echo \\\"\033]0;{}\007\\\";read;'\"\n".
- format(window_title))
- self.wait_for_window(window_title)
+ # now fragment all free memory
+ p = self.testvm1.run("grep ^MemFree: /proc/meminfo|awk '{print $2}'",
+ passio_popen=True)
+ memory_pages = int(p.communicate()[0].strip())
+ memory_pages /= 4 # 4k pages
+ alloc1 = self.testvm1.run(
+ "ulimit -l unlimited; exec /home/user/allocator {}".format(
+ memory_pages),
+ user="root", passio_popen=True, passio_stderr=True)
+ # wait for memory being allocated; can't use just .read(), because EOF
+ # passing is unreliable while the process is still running
+ alloc1.stdin.write("\n")
+ alloc1.stdin.flush()
+ alloc_out = alloc1.stdout.read(len("Stage1\nStage2\nStage3\n"))
- time.sleep(0.5)
- subprocess.check_call(['xdotool', 'search', '--name', window_title,
- 'windowactivate', 'key', 'Return'])
+ if "Stage3" not in alloc_out:
+ # read stderr only in case of failed assert, but still have nice
+ # failure message (don't use self.fail() directly)
+ self.assertIn("Stage3", alloc_out, alloc1.stderr.read())
- wait_count = 0
- while subprocess.call(['xdotool', 'search', '--name', window_title],
- stdout=open(os.path.devnull, 'w'),
- stderr=subprocess.STDOUT) == 0:
- wait_count += 1
- if wait_count > 100:
- self.fail("Timeout while waiting for gnome-terminal "
- "termination")
- time.sleep(0.1)
- finally:
- p.stdin.close()
+ # now, launch some window - it should get fragmented composition buffer
+ # it is important to have some changing content there, to generate
+ # content update events (aka damage notify)
+ proc = self.testvm1.run("gnome-terminal --full-screen -e top",
+ passio_popen=True)
- wait_count = 0
- while dispvm.is_running():
- wait_count += 1
- if wait_count > 100:
- self.fail("Timeout while waiting for DispVM destruction")
- time.sleep(0.1)
- wait_count = 0
- while p.poll() is None:
- wait_count += 1
- if wait_count > 100:
- self.fail("Timeout while waiting for qfile-daemon-dvm "
- "termination")
- time.sleep(0.1)
- self.assertEqual(p.returncode, 0)
+ # help xdotool a little...
+ time.sleep(2)
+ # get window ID
+ search = subprocess.Popen(['xdotool', 'search', '--sync',
+ '--onlyvisible', '--class', self.testvm1.name + ':.*erminal'],
+ stdout=subprocess.PIPE)
+ winid = search.communicate()[0].strip()
+ xprop = subprocess.Popen(['xprop', '-notype', '-id', winid,
+ '_QUBES_VMWINDOWID'], stdout=subprocess.PIPE)
+ vm_winid = xprop.stdout.read().strip().split(' ')[4]
- self.qc.lock_db_for_reading()
- self.qc.load()
- self.qc.unlock_db()
- self.assertIsNone(self.qc.get_vm_by_name(dispvm.name),
- "DispVM not removed from qubes.xml")
+ # now free the fragmented memory and trigger compaction
+ alloc1.stdin.write("\n")
+ alloc1.wait()
+ self.testvm1.run("echo 1 > /proc/sys/vm/compact_memory", user="root")
- def _handle_editor(self, winid):
- (window_title, _) = subprocess.Popen(
- ['xdotool', 'getwindowname', winid], stdout=subprocess.PIPE).\
- communicate()
- window_title = window_title.strip().\
- replace('(', '\(').replace(')', '\)')
- time.sleep(1)
- if "gedit" in window_title:
- subprocess.check_call(['xdotool', 'search', '--name', window_title,
- 'windowactivate', 'type', 'test test 2\n'])
- time.sleep(0.5)
- subprocess.check_call(['xdotool', 'search', '--name', window_title,
- 'key', 'ctrl+s', 'ctrl+q'])
- elif "emacs" in window_title:
- subprocess.check_call(['xdotool', 'search', '--name', window_title,
- 'windowactivate', 'type', 'test test 2\n'])
- time.sleep(0.5)
- subprocess.check_call(['xdotool', 'search', '--name', window_title,
- 'key', 'ctrl+x', 'ctrl+s'])
- subprocess.check_call(['xdotool', 'search', '--name', window_title,
- 'key', 'ctrl+x', 'ctrl+c'])
- elif "vim" in window_title:
- subprocess.check_call(['xdotool', 'search', '--name', window_title,
- 'windowactivate', 'key', 'i',
- 'type', 'test test 2\n'])
- subprocess.check_call(
- ['xdotool', 'search', '--name', window_title,
- 'key', 'Escape', 'colon', 'w', 'q', 'Return'])
- else:
- self.fail("Unknown editor window: {}".format(window_title))
+ # now window may be already "broken"; to be sure, allocate (=zero)
+ # some memory
+ alloc2 = self.testvm1.run(
+ "ulimit -l unlimited; /home/user/allocator {}".format(memory_pages),
+ user="root", passio_popen=True, passio_stderr=True)
+ alloc2.stdout.read(len("Stage1\n"))
- @unittest.skipUnless(spawn.find_executable('xdotool'),
- "xdotool not installed")
- def test_030_edit_file(self):
- testvm1 = self.qc.add_new_vm("QubesAppVm",
- name=self.make_vm_name('vm1'),
- template=self.qc.get_vm_by_name(
- self.template))
- testvm1.create_on_disk(verbose=False)
- self.qc.save()
+ # wait for damage notify - top updates every 3 sec by default
+ time.sleep(6)
- testvm1.start()
- testvm1.run("echo test1 > /home/user/test.txt", wait=True)
+ # now take screenshot of the window, from dom0 and VM
+ # choose pnm format, as it doesn't have any useless metadata - easy
+ # to compare
+ p = self.testvm1.run("import -window {} pnm:-".format(vm_winid),
+ passio_popen=True, passio_stderr=True)
+ (vm_image, stderr) = p.communicate()
+ if p.returncode != 0:
+ raise Exception("Failed to get VM window image: {}".format(
+ stderr))
- self.qc.unlock_db()
- p = testvm1.run("qvm-open-in-dvm /home/user/test.txt",
- passio_popen=True)
+ p = subprocess.Popen(["import", "-window", winid, "pnm:-"],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (dom0_image, stderr) = p.communicate()
+ if p.returncode != 0:
+ raise Exception("Failed to get dom0 window image: {}".format(
+ stderr))
- wait_count = 0
- winid = None
- while True:
- search = subprocess.Popen(['xdotool', 'search',
- '--onlyvisible', '--class', 'disp*'],
- stdout=subprocess.PIPE,
- stderr=open(os.path.devnull, 'w'))
- retcode = search.wait()
- if retcode == 0:
- winid = search.stdout.read().strip()
- break
- wait_count += 1
- if wait_count > 100:
- self.fail("Timeout while waiting for editor window")
- time.sleep(0.3)
-
- self._handle_editor(winid)
- p.wait()
- p = testvm1.run("cat /home/user/test.txt",
- passio_popen=True)
- (test_txt_content, _) = p.communicate()
- self.assertEqual(test_txt_content, "test test 2\ntest1\n")
-
-
-class TC_30_Gui_daemon(qubes.tests.SystemTestsMixin, qubes.tests.QubesTestCase):
- @unittest.skipUnless(spawn.find_executable('xdotool'),
- "xdotool not installed")
- def test_000_clipboard(self):
- testvm1 = self.qc.add_new_vm("QubesAppVm",
- name=self.make_vm_name('vm1'),
- template=self.qc.get_default_template())
- testvm1.create_on_disk(verbose=False)
- testvm2 = self.qc.add_new_vm("QubesAppVm",
- name=self.make_vm_name('vm2'),
- template=self.qc.get_default_template())
- testvm2.create_on_disk(verbose=False)
- self.qc.save()
- self.qc.unlock_db()
-
- testvm1.start()
- testvm2.start()
-
- window_title = 'user@{}'.format(testvm1.name)
- testvm1.run('zenity --text-info --editable --title={}'.format(
- window_title))
-
- self.wait_for_window(window_title)
- time.sleep(0.5)
- test_string = "test{}".format(testvm1.xid)
-
- # Type and copy some text
- subprocess.check_call(['xdotool', 'search', '--name', window_title,
- 'windowactivate',
- 'type', '{}'.format(test_string)])
- # second xdotool call because type --terminator do not work (SEGV)
- # additionally do not use search here, so window stack will be empty
- # and xdotool will use XTEST instead of generating events manually -
- # this will be much better - at least because events will have
- # correct timestamp (so gui-daemon would not drop the copy request)
- subprocess.check_call(['xdotool',
- 'key', 'ctrl+a', 'ctrl+c', 'ctrl+shift+c',
- 'Escape'])
-
- clipboard_content = \
- open('/var/run/qubes/qubes-clipboard.bin', 'r').read().strip()
- self.assertEquals(clipboard_content, test_string,
- "Clipboard copy operation failed - content")
- clipboard_source = \
- open('/var/run/qubes/qubes-clipboard.bin.source',
- 'r').read().strip()
- self.assertEquals(clipboard_source, testvm1.name,
- "Clipboard copy operation failed - owner")
-
- # Then paste it to the other window
- window_title = 'user@{}'.format(testvm2.name)
- p = testvm2.run('zenity --entry --title={} > test.txt'.format(
- window_title), passio_popen=True)
- self.wait_for_window(window_title)
-
- subprocess.check_call(['xdotool', 'key', '--delay', '100',
- 'ctrl+shift+v', 'ctrl+v', 'Return'])
- p.wait()
-
- # And compare the result
- (test_output, _) = testvm2.run('cat test.txt',
- passio_popen=True).communicate()
- self.assertEquals(test_string, test_output.strip())
-
- clipboard_content = \
- open('/var/run/qubes/qubes-clipboard.bin', 'r').read().strip()
- self.assertEquals(clipboard_content, "",
- "Clipboard not wiped after paste - content")
- clipboard_source = \
- open('/var/run/qubes/qubes-clipboard.bin.source', 'r').read(
-
- ).strip()
- self.assertEquals(clipboard_source, "",
- "Clipboard not wiped after paste - owner")
-
-
-@unittest.skipUnless(os.path.exists('/var/lib/qubes/vm-kernels/pvgrub2'),
- 'grub-xen package not installed')
-class TC_40_PVGrub(qubes.tests.SystemTestsMixin):
- def setUp(self):
- super(TC_40_PVGrub, self).setUp()
- supported = False
- if self.template.startswith('fedora-'):
- supported = True
- elif self.template.startswith('debian-'):
- supported = True
- if not supported:
- self.skipTest("Template {} not supported by this test".format(
- self.template))
-
- def install_packages(self, vm):
- if self.template.startswith('fedora-'):
- cmd_install1 = 'yum install -y qubes-kernel-vm-support grub2-tools'
- cmd_install2 = 'yum install -y kernel kernel-devel'
- cmd_update_grub = 'grub2-mkconfig -o /boot/grub2/grub.cfg'
- elif self.template.startswith('debian-'):
- cmd_install1 = 'apt-get update && apt-get install -y ' \
- 'qubes-kernel-vm-support grub2-common'
- cmd_install2 = 'apt-get install -y linux-image-amd64'
- cmd_update_grub = 'mkdir /boot/grub && update-grub2'
- else:
- assert False, "Unsupported template?!"
-
- for cmd in [cmd_install1, cmd_install2, cmd_update_grub]:
- p = vm.run(cmd, user="root", passio_popen=True, passio_stderr=True)
- (stdout, stderr) = p.communicate()
- self.assertEquals(p.returncode, 0,
- "Failed command: {}\nSTDOUT: {}\nSTDERR: {}"
- .format(cmd, stdout, stderr))
-
- def get_kernel_version(self, vm):
- if self.template.startswith('fedora-'):
- cmd_get_kernel_version = 'rpm -q kernel|sort -n|tail -1|' \
- 'cut -d - -f 2-'
- elif self.template.startswith('debian-'):
- cmd_get_kernel_version = \
- 'dpkg-query --showformat=\'${Package}\\n\' --show ' \
- '\'linux-image-*-amd64\'|sort -n|tail -1|cut -d - -f 3-'
- else:
- raise RuntimeError("Unsupported template?!")
-
- p = vm.run(cmd_get_kernel_version, user="root", passio_popen=True)
- (kver, _) = p.communicate()
- self.assertEquals(p.returncode, 0,
- "Failed command: {}".format(cmd_get_kernel_version))
- return kver.strip()
-
- def test_000_standalone_vm(self):
- testvm1 = self.qc.add_new_vm("QubesAppVm",
- template=None,
- name=self.make_vm_name('vm1'))
- testvm1.create_on_disk(verbose=False,
- source_template=self.qc.get_vm_by_name(
- self.template))
- self.save_and_reload_db()
- self.qc.unlock_db()
- testvm1 = self.qc[testvm1.qid]
- testvm1.start()
- self.install_packages(testvm1)
- kver = self.get_kernel_version(testvm1)
- self.shutdown_and_wait(testvm1)
-
- self.qc.lock_db_for_writing()
- self.qc.load()
- testvm1 = self.qc[testvm1.qid]
- testvm1.kernel = 'pvgrub2'
- self.save_and_reload_db()
- self.qc.unlock_db()
- testvm1 = self.qc[testvm1.qid]
- testvm1.start()
- p = testvm1.run('uname -r', passio_popen=True)
- (actual_kver, _) = p.communicate()
- self.assertEquals(actual_kver.strip(), kver)
-
- def test_010_template_based_vm(self):
- test_template = self.qc.add_new_vm("QubesTemplateVm",
- template=None,
- name=self.make_vm_name('template'))
- test_template.clone_attrs(self.qc.get_vm_by_name(self.template))
- test_template.clone_disk_files(
- src_vm=self.qc.get_vm_by_name(self.template),
- verbose=False)
-
- testvm1 = self.qc.add_new_vm("QubesAppVm",
- template=test_template,
- name=self.make_vm_name('vm1'))
- testvm1.create_on_disk(verbose=False,
- source_template=test_template)
- self.save_and_reload_db()
- self.qc.unlock_db()
- test_template = self.qc[test_template.qid]
- testvm1 = self.qc[testvm1.qid]
- test_template.start()
- self.install_packages(test_template)
- kver = self.get_kernel_version(test_template)
- self.shutdown_and_wait(test_template)
-
- self.qc.lock_db_for_writing()
- self.qc.load()
- test_template = self.qc[test_template.qid]
- test_template.kernel = 'pvgrub2'
- testvm1 = self.qc[testvm1.qid]
- testvm1.kernel = 'pvgrub2'
- self.save_and_reload_db()
- self.qc.unlock_db()
-
- # Check if TemplateBasedVM boots and has the right kernel
- testvm1 = self.qc[testvm1.qid]
- testvm1.start()
- p = testvm1.run('uname -r', passio_popen=True)
- (actual_kver, _) = p.communicate()
- self.assertEquals(actual_kver.strip(), kver)
-
- # And the same for the TemplateVM itself
- test_template = self.qc[test_template.qid]
- test_template.start()
- p = test_template.run('uname -r', passio_popen=True)
- (actual_kver, _) = p.communicate()
- self.assertEquals(actual_kver.strip(), kver)
+ if vm_image != dom0_image:
+ self.fail("Dom0 window doesn't match VM window content")
def load_tests(loader, tests, pattern):
@@ -1187,15 +1025,4 @@ def load_tests(loader, tests, pattern):
(TC_00_AppVMMixin, qubes.tests.QubesTestCase),
{'template': template})))
- tests.addTests(loader.loadTestsFromTestCase(
- type(
- 'TC_20_DispVM_' + template,
- (TC_20_DispVMMixin, qubes.tests.QubesTestCase),
- {'template': template})))
- tests.addTests(loader.loadTestsFromTestCase(
- type(
- 'TC_40_PVGrub_' + template,
- (TC_40_PVGrub, qubes.tests.QubesTestCase),
- {'template': template})))
-
return tests
diff --git a/version b/version
index ff365e06..e650c01d 100644
--- a/version
+++ b/version
@@ -1 +1 @@
-3.1.3
+3.2.9
diff --git a/vm-config/xen-vm-template-hvm.xml b/vm-config/xen-vm-template-hvm.xml
index 13431b33..728b17c4 100644
--- a/vm-config/xen-vm-template-hvm.xml
+++ b/vm-config/xen-vm-template-hvm.xml
@@ -9,20 +9,21 @@
hvmloader
- {disable_network1}-net lwip,client_ip={ip},server_ip={dns2},dns={dns1},gw={gateway},netmask={netmask}{disable_network2}
+ {features}
destroy
destroy
destroy
-
+ {no_network_begin}{no_network_end}
+ {network_begin}{network_end}
{rootdev}
{privatedev}
{otherdevs}
diff --git a/vm-config/xen-vm-template.xml b/vm-config/xen-vm-template.xml
index ccb89c93..bbb7ee0a 100644
--- a/vm-config/xen-vm-template.xml
+++ b/vm-config/xen-vm-template.xml
@@ -10,6 +10,7 @@
{kerneldir}/initramfs
root=/dev/mapper/dmroot ro nomodeset console=hvc0 rd_NO_PLYMOUTH 3 {kernelopts}
+ {features}