2015-10-17 00:10:15 +02:00
|
|
|
# pylint: disable=invalid-name
|
2015-01-19 18:03:23 +01:00
|
|
|
|
|
|
|
#
|
|
|
|
# The Qubes OS Project, https://www.qubes-os.org/
|
|
|
|
#
|
|
|
|
# Copyright (C) 2014-2015 Joanna Rutkowska <joanna@invisiblethingslab.com>
|
2015-10-17 00:10:15 +02:00
|
|
|
# Copyright (C) 2014-2015
|
|
|
|
# Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
2015-01-19 18:03:23 +01:00
|
|
|
# Copyright (C) 2014-2015 Wojtek Porczyk <woju@invisiblethingslab.com>
|
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU Lesser General Public
|
|
|
|
# License as published by the Free Software Foundation; either
|
|
|
|
# version 2.1 of the License, or (at your option) any later version.
|
2015-01-19 18:03:23 +01:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is distributed in the hope that it will be useful,
|
2015-01-19 18:03:23 +01:00
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2017-10-12 00:11:50 +02:00
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
# Lesser General Public License for more details.
|
2015-01-19 18:03:23 +01:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# You should have received a copy of the GNU Lesser General Public
|
|
|
|
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
|
2015-01-19 18:03:23 +01:00
|
|
|
#
|
2015-01-05 14:41:59 +01:00
|
|
|
|
2016-03-13 03:36:20 +01:00
|
|
|
"""
|
|
|
|
.. warning::
|
|
|
|
The test suite hereby claims any domain whose name starts with
|
|
|
|
:py:data:`VMPREFIX` as fair game. This is needed to enforce sane
|
|
|
|
test executing environment. If you have domains named ``test-*``,
|
|
|
|
don't run the tests.
|
|
|
|
"""
|
2016-04-11 13:03:12 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
import asyncio
|
2015-01-05 15:39:14 +01:00
|
|
|
import collections
|
2016-02-28 03:43:04 +01:00
|
|
|
import functools
|
2015-10-17 00:10:15 +02:00
|
|
|
import logging
|
2015-01-13 23:17:18 +01:00
|
|
|
import os
|
2017-04-18 10:16:14 +02:00
|
|
|
import pathlib
|
2017-06-20 16:14:16 +02:00
|
|
|
import shlex
|
2015-10-17 00:10:15 +02:00
|
|
|
import shutil
|
2015-01-13 23:17:18 +01:00
|
|
|
import subprocess
|
2015-10-17 00:10:15 +02:00
|
|
|
import sys
|
2016-02-26 10:59:20 +01:00
|
|
|
import tempfile
|
2016-09-25 16:31:31 +02:00
|
|
|
import time
|
2016-02-10 19:23:09 +01:00
|
|
|
import traceback
|
2015-01-05 14:41:59 +01:00
|
|
|
import unittest
|
2017-04-18 10:16:14 +02:00
|
|
|
import warnings
|
2016-09-25 16:31:31 +02:00
|
|
|
from distutils import spawn
|
2015-01-05 14:41:59 +01:00
|
|
|
|
2017-07-26 02:59:05 +02:00
|
|
|
import gc
|
2015-01-11 01:19:03 +01:00
|
|
|
import lxml.etree
|
2017-03-16 19:54:22 +01:00
|
|
|
import pkg_resources
|
2015-01-11 01:19:03 +01:00
|
|
|
|
2017-08-28 14:28:17 +02:00
|
|
|
import qubes
|
2017-06-05 19:00:18 +02:00
|
|
|
import qubes.api
|
|
|
|
import qubes.api.admin
|
2017-07-04 05:22:39 +02:00
|
|
|
import qubes.api.internal
|
2018-12-05 05:40:50 +01:00
|
|
|
import qubes.api.misc
|
2016-09-25 16:31:31 +02:00
|
|
|
import qubes.backup
|
2015-01-05 17:01:13 +01:00
|
|
|
import qubes.config
|
2016-08-09 04:26:05 +02:00
|
|
|
import qubes.devices
|
2015-01-05 15:39:14 +01:00
|
|
|
import qubes.events
|
2016-03-10 11:22:52 +01:00
|
|
|
import qubes.exc
|
2018-01-16 21:32:15 +01:00
|
|
|
import qubes.ext.pci
|
2016-03-13 14:03:53 +01:00
|
|
|
import qubes.vm.standalonevm
|
2017-08-28 14:28:17 +02:00
|
|
|
import qubes.vm.templatevm
|
2015-01-05 15:39:14 +01:00
|
|
|
|
2015-10-27 11:39:59 +01:00
|
|
|
XMLPATH = '/var/lib/qubes/qubes-test.xml'
|
2016-02-10 17:16:06 +01:00
|
|
|
CLASS_XMLPATH = '/var/lib/qubes/qubes-class-test.xml'
|
2015-12-29 01:53:47 +01:00
|
|
|
TEMPLATE = 'fedora-23'
|
2016-02-10 17:16:06 +01:00
|
|
|
VMPREFIX = 'test-inst-'
|
|
|
|
CLSVMPREFIX = 'test-cls-'
|
2015-10-17 00:10:15 +02:00
|
|
|
|
2015-01-05 17:01:13 +01:00
|
|
|
|
2016-10-19 01:58:43 +02:00
|
|
|
if 'DEFAULT_LVM_POOL' in os.environ.keys():
|
|
|
|
DEFAULT_LVM_POOL = os.environ['DEFAULT_LVM_POOL']
|
|
|
|
else:
|
|
|
|
DEFAULT_LVM_POOL = 'qubes_dom0/pool00'
|
|
|
|
|
|
|
|
|
|
|
|
POOL_CONF = {'name': 'test-lvm',
|
|
|
|
'driver': 'lvm_thin',
|
|
|
|
'volume_group': DEFAULT_LVM_POOL.split('/')[0],
|
|
|
|
'thin_pool': DEFAULT_LVM_POOL.split('/')[1]}
|
|
|
|
|
2015-01-05 17:01:13 +01:00
|
|
|
#: :py:obj:`True` if running in dom0, :py:obj:`False` otherwise
|
|
|
|
in_dom0 = False
|
|
|
|
|
2015-01-19 17:06:30 +01:00
|
|
|
#: :py:obj:`False` if outside of git repo,
|
|
|
|
#: path to root of the directory otherwise
|
2015-01-13 23:17:18 +01:00
|
|
|
in_git = False
|
|
|
|
|
2015-01-05 17:01:13 +01:00
|
|
|
try:
|
|
|
|
import libvirt
|
|
|
|
libvirt.openReadOnly(qubes.config.defaults['libvirt_uri']).close()
|
|
|
|
in_dom0 = True
|
|
|
|
except libvirt.libvirtError:
|
|
|
|
pass
|
|
|
|
|
2017-07-26 03:05:27 +02:00
|
|
|
if in_dom0:
|
|
|
|
import libvirtaio
|
2017-09-19 16:42:49 +02:00
|
|
|
libvirt_event_impl = libvirtaio.virEventRegisterAsyncIOImpl()
|
|
|
|
else:
|
|
|
|
libvirt_event_impl = None
|
2017-07-26 03:05:27 +02:00
|
|
|
|
2015-01-13 23:17:18 +01:00
|
|
|
try:
|
2015-01-19 17:06:30 +01:00
|
|
|
in_git = subprocess.check_output(
|
2017-01-18 22:16:46 +01:00
|
|
|
['git', 'rev-parse', '--show-toplevel']).decode().strip()
|
2015-09-23 16:25:53 +02:00
|
|
|
qubes.log.LOGPATH = '/tmp'
|
|
|
|
qubes.log.LOGFILE = '/tmp/qubes.log'
|
2015-01-13 23:17:18 +01:00
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
# git returned nonzero, we are outside git repo
|
|
|
|
pass
|
|
|
|
except OSError:
|
|
|
|
# command not found; let's assume we're outside
|
|
|
|
pass
|
|
|
|
|
2019-09-28 03:43:51 +02:00
|
|
|
ha_syslog = None
|
2017-08-14 23:07:25 +02:00
|
|
|
|
2015-01-05 17:01:13 +01:00
|
|
|
def skipUnlessDom0(test_item):
|
|
|
|
'''Decorator that skips test outside dom0.
|
|
|
|
|
|
|
|
Some tests (especially integration tests) have to be run in more or less
|
|
|
|
working dom0. This is checked by connecting to libvirt.
|
2015-10-17 00:10:15 +02:00
|
|
|
'''
|
2015-01-05 19:15:32 +01:00
|
|
|
|
2015-01-05 17:01:13 +01:00
|
|
|
return unittest.skipUnless(in_dom0, 'outside dom0')(test_item)
|
|
|
|
|
2015-01-13 23:17:18 +01:00
|
|
|
def skipUnlessGit(test_item):
|
|
|
|
'''Decorator that skips test outside git repo.
|
|
|
|
|
|
|
|
There are very few tests that an be run only in git. One example is
|
|
|
|
correctness of example code that won't get included in RPM.
|
2015-10-17 00:10:15 +02:00
|
|
|
'''
|
2015-01-13 23:17:18 +01:00
|
|
|
|
|
|
|
return unittest.skipUnless(in_git, 'outside git tree')(test_item)
|
|
|
|
|
2017-08-14 23:07:25 +02:00
|
|
|
def skipUnlessEnv(varname):
|
|
|
|
'''Decorator generator for skipping tests without environment variable set.
|
|
|
|
|
|
|
|
Some tests require working X11 display, like those using GTK library, which
|
|
|
|
segfaults without connection to X.
|
|
|
|
Other require their own, custom variables.
|
|
|
|
'''
|
|
|
|
|
|
|
|
return unittest.skipUnless(os.getenv(varname), 'no {} set'.format(varname))
|
|
|
|
|
2015-01-13 23:17:18 +01:00
|
|
|
|
2015-01-05 15:39:14 +01:00
|
|
|
class TestEmitter(qubes.events.Emitter):
|
|
|
|
'''Dummy event emitter which records events fired on it.
|
|
|
|
|
|
|
|
Events are counted in :py:attr:`fired_events` attribute, which is
|
|
|
|
:py:class:`collections.Counter` instance. For each event, ``(event, args,
|
|
|
|
kwargs)`` object is counted. *event* is event name (a string), *args* is
|
|
|
|
tuple with positional arguments and *kwargs* is sorted tuple of items from
|
|
|
|
keyword arguments.
|
|
|
|
|
|
|
|
>>> emitter = TestEmitter()
|
|
|
|
>>> emitter.fired_events
|
|
|
|
Counter()
|
2017-02-21 14:09:06 +01:00
|
|
|
>>> emitter.fire_event('event', spam='eggs', foo='bar')
|
2015-01-05 15:39:14 +01:00
|
|
|
>>> emitter.fired_events
|
|
|
|
Counter({('event', (1, 2, 3), (('foo', 'bar'), ('spam', 'eggs'))): 1})
|
|
|
|
'''
|
2015-01-05 19:15:32 +01:00
|
|
|
|
2015-01-05 15:39:14 +01:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super(TestEmitter, self).__init__(*args, **kwargs)
|
|
|
|
|
|
|
|
#: :py:class:`collections.Counter` instance
|
|
|
|
self.fired_events = collections.Counter()
|
|
|
|
|
2017-02-21 14:09:06 +01:00
|
|
|
def fire_event(self, event, **kwargs):
|
|
|
|
effects = super(TestEmitter, self).fire_event(event, **kwargs)
|
2017-03-16 15:30:49 +01:00
|
|
|
ev_kwargs = frozenset(
|
|
|
|
(key,
|
2017-06-26 12:58:14 +02:00
|
|
|
frozenset(value.items()) if isinstance(value, dict)
|
|
|
|
else tuple(value) if isinstance(value, list)
|
|
|
|
else value)
|
2017-03-16 15:30:49 +01:00
|
|
|
for key, value in kwargs.items()
|
|
|
|
)
|
|
|
|
self.fired_events[(event, ev_kwargs)] += 1
|
2016-06-26 04:07:15 +02:00
|
|
|
return effects
|
2015-01-05 15:39:14 +01:00
|
|
|
|
2017-06-23 19:03:57 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def fire_event_async(self, event, pre_event=False, **kwargs):
|
|
|
|
effects = yield from super(TestEmitter, self).fire_event_async(
|
|
|
|
event, pre_event=pre_event, **kwargs)
|
|
|
|
ev_kwargs = frozenset(
|
|
|
|
(key,
|
|
|
|
frozenset(value.items()) if isinstance(value, dict) else value)
|
|
|
|
for key, value in kwargs.items()
|
|
|
|
)
|
|
|
|
self.fired_events[(event, ev_kwargs)] += 1
|
|
|
|
return effects
|
|
|
|
|
2015-01-05 15:39:14 +01:00
|
|
|
|
2016-02-28 03:43:04 +01:00
|
|
|
def expectedFailureIfTemplate(templates):
|
|
|
|
"""
|
|
|
|
Decorator for marking specific test as expected to fail only for some
|
|
|
|
templates. Template name is compared as substring, so 'whonix' will
|
|
|
|
handle both 'whonix-ws' and 'whonix-gw'.
|
2017-06-27 02:57:50 +02:00
|
|
|
templates can be either a single string, or an iterable
|
2016-02-28 03:43:04 +01:00
|
|
|
"""
|
|
|
|
def decorator(func):
|
|
|
|
@functools.wraps(func)
|
2016-03-02 14:10:50 +01:00
|
|
|
def wrapper(self, *args, **kwargs):
|
|
|
|
template = self.template
|
2017-01-18 22:16:46 +01:00
|
|
|
if isinstance(templates, str):
|
2016-02-28 03:43:04 +01:00
|
|
|
should_expect_fail = template in templates
|
|
|
|
else:
|
|
|
|
should_expect_fail = any([template in x for x in templates])
|
|
|
|
if should_expect_fail:
|
|
|
|
try:
|
2016-03-02 14:10:50 +01:00
|
|
|
func(self, *args, **kwargs)
|
2016-02-28 03:43:04 +01:00
|
|
|
except Exception:
|
2016-03-02 14:10:50 +01:00
|
|
|
raise unittest.case._ExpectedFailure(sys.exc_info())
|
|
|
|
raise unittest.case._UnexpectedSuccess()
|
2016-02-28 03:43:04 +01:00
|
|
|
else:
|
|
|
|
# Call directly:
|
2016-03-02 14:10:50 +01:00
|
|
|
func(self, *args, **kwargs)
|
2016-02-28 03:43:04 +01:00
|
|
|
return wrapper
|
|
|
|
return decorator
|
2015-01-05 15:39:14 +01:00
|
|
|
|
2019-09-23 03:58:33 +02:00
|
|
|
|
|
|
|
def wait_on_fail(func):
|
|
|
|
"""Test decorator for debugging. It pause test execution on failure and wait
|
|
|
|
for user input. It's useful to manually inspect system state just after test
|
|
|
|
fails, before executing any cleanup.
|
|
|
|
|
|
|
|
Usage: decorate a test you are debugging.
|
|
|
|
DO IT ONLY TEMPORARILY, DO NOT COMMIT!
|
|
|
|
"""
|
|
|
|
|
|
|
|
@functools.wraps(func)
|
|
|
|
def wrapper(self, *args, **kwargs):
|
|
|
|
try:
|
|
|
|
func(self, *args, **kwargs)
|
|
|
|
except:
|
|
|
|
print('FAIL\n')
|
|
|
|
traceback.print_exc()
|
|
|
|
print('Press return to continue:', end='')
|
|
|
|
sys.stdout.flush()
|
|
|
|
reader = asyncio.StreamReader(loop=self.loop)
|
|
|
|
transport, protocol = self.loop.run_until_complete(
|
|
|
|
self.loop.connect_read_pipe(lambda: asyncio.StreamReaderProtocol(reader),
|
|
|
|
sys.stdin))
|
|
|
|
self.loop.run_until_complete(reader.readline())
|
|
|
|
raise
|
|
|
|
return wrapper
|
|
|
|
|
|
|
|
|
2015-10-17 00:10:15 +02:00
|
|
|
class _AssertNotRaisesContext(object):
|
|
|
|
"""A context manager used to implement TestCase.assertNotRaises methods.
|
|
|
|
|
|
|
|
Stolen from unittest and hacked. Regexp support stripped.
|
|
|
|
""" # pylint: disable=too-few-public-methods
|
|
|
|
|
|
|
|
def __init__(self, expected, test_case, expected_regexp=None):
|
|
|
|
if expected_regexp is not None:
|
|
|
|
raise NotImplementedError('expected_regexp is unsupported')
|
|
|
|
|
|
|
|
self.expected = expected
|
|
|
|
self.exception = None
|
|
|
|
|
|
|
|
self.failureException = test_case.failureException
|
|
|
|
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_value, tb):
|
|
|
|
if exc_type is None:
|
|
|
|
return True
|
|
|
|
|
|
|
|
if issubclass(exc_type, self.expected):
|
|
|
|
raise self.failureException(
|
2016-02-10 19:23:09 +01:00
|
|
|
"{!r} raised, traceback:\n{!s}".format(
|
|
|
|
exc_value, ''.join(traceback.format_tb(tb))))
|
2015-10-17 00:10:15 +02:00
|
|
|
else:
|
|
|
|
# pass through
|
|
|
|
return False
|
|
|
|
|
2017-10-03 11:56:55 +02:00
|
|
|
self.exception = exc_value # store for later retrieval
|
2015-10-17 00:10:15 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
class _QrexecPolicyContext(object):
|
2017-07-12 19:01:15 +02:00
|
|
|
'''Context manager for SystemTestCase.qrexec_policy'''
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2017-10-03 11:56:55 +02:00
|
|
|
def __init__(self, service, source, destination, allow=True, action=None):
|
2017-04-18 10:16:14 +02:00
|
|
|
try:
|
|
|
|
source = source.name
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
try:
|
|
|
|
destination = destination.name
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
self._filename = pathlib.Path('/etc/qubes-rpc/policy') / service
|
2017-10-03 11:56:55 +02:00
|
|
|
if action is None:
|
|
|
|
action = 'allow' if allow else 'deny'
|
|
|
|
self._rule = '{} {} {}\n'.format(source, destination, action)
|
2017-05-31 23:59:02 +02:00
|
|
|
self._did_create = False
|
|
|
|
self._handle = None
|
|
|
|
|
|
|
|
def load(self):
|
|
|
|
if self._handle is None:
|
|
|
|
try:
|
|
|
|
self._handle = self._filename.open('r+')
|
|
|
|
except FileNotFoundError:
|
|
|
|
self._handle = self._filename.open('w+')
|
|
|
|
self._did_create = True
|
|
|
|
self._handle.seek(0)
|
|
|
|
return self._handle.readlines()
|
|
|
|
|
|
|
|
def save(self, rules):
|
|
|
|
assert self._handle is not None
|
|
|
|
self._handle.truncate(0)
|
|
|
|
self._handle.seek(0)
|
|
|
|
self._handle.write(''.join(rules))
|
2017-06-21 00:09:32 +02:00
|
|
|
self._handle.flush()
|
2017-05-31 23:59:02 +02:00
|
|
|
|
|
|
|
def close(self):
|
|
|
|
assert self._handle is not None
|
|
|
|
self._handle.close()
|
|
|
|
self._handle = None
|
2017-04-18 10:16:14 +02:00
|
|
|
|
|
|
|
def __enter__(self):
|
2017-05-31 23:59:02 +02:00
|
|
|
rules = self.load()
|
|
|
|
rules.insert(0, self._rule)
|
2018-02-04 14:03:12 +01:00
|
|
|
self.save(rules)
|
2017-04-18 10:16:14 +02:00
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_value, tb):
|
2017-05-31 23:59:02 +02:00
|
|
|
if not self._did_create:
|
|
|
|
try:
|
|
|
|
rules = self.load()
|
|
|
|
rules.remove(self._rule)
|
|
|
|
self.save(rules)
|
|
|
|
finally:
|
|
|
|
self.close()
|
|
|
|
else:
|
|
|
|
self.close()
|
2017-06-20 17:21:35 +02:00
|
|
|
self._filename.unlink()
|
2015-10-17 00:10:15 +02:00
|
|
|
|
2016-10-24 15:15:06 +02:00
|
|
|
class substitute_entry_points(object):
|
|
|
|
'''Monkey-patch pkg_resources to substitute one group in iter_entry_points
|
|
|
|
|
|
|
|
This is for testing plugins, like device classes.
|
|
|
|
|
|
|
|
:param str group: The group that is to be overloaded.
|
|
|
|
:param str tempgroup: The substitute group.
|
|
|
|
|
|
|
|
Inside this context, if one iterates over entry points in overloaded group,
|
|
|
|
the iteration actually happens over the other group.
|
|
|
|
|
|
|
|
This context manager is stackable. To substitute more than one entry point
|
|
|
|
group, just nest two contexts.
|
|
|
|
''' # pylint: disable=invalid-name
|
|
|
|
|
|
|
|
def __init__(self, group, tempgroup):
|
|
|
|
self.group = group
|
|
|
|
self.tempgroup = tempgroup
|
|
|
|
self._orig_iter_entry_points = None
|
|
|
|
|
|
|
|
def _iter_entry_points(self, group, *args, **kwargs):
|
|
|
|
if group == self.group:
|
|
|
|
group = self.tempgroup
|
|
|
|
return self._orig_iter_entry_points(group, *args, **kwargs)
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
self._orig_iter_entry_points = pkg_resources.iter_entry_points
|
|
|
|
pkg_resources.iter_entry_points = self._iter_entry_points
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_value, tb):
|
|
|
|
pkg_resources.iter_entry_points = self._orig_iter_entry_points
|
|
|
|
self._orig_iter_entry_points = None
|
|
|
|
|
|
|
|
|
2015-01-05 14:41:59 +01:00
|
|
|
class QubesTestCase(unittest.TestCase):
|
2015-01-05 15:39:14 +01:00
|
|
|
'''Base class for Qubes unit tests.
|
|
|
|
'''
|
2015-01-05 19:15:32 +01:00
|
|
|
|
2015-10-17 00:10:15 +02:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super(QubesTestCase, self).__init__(*args, **kwargs)
|
|
|
|
self.longMessage = True
|
|
|
|
self.log = logging.getLogger('{}.{}.{}'.format(
|
|
|
|
self.__class__.__module__,
|
|
|
|
self.__class__.__name__,
|
|
|
|
self._testMethodName))
|
2016-08-09 04:26:05 +02:00
|
|
|
self.addTypeEqualityFunc(qubes.devices.DeviceManager,
|
|
|
|
self.assertDevicesEqual)
|
2015-10-17 00:10:15 +02:00
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
self.loop = None
|
|
|
|
|
2015-10-17 00:10:15 +02:00
|
|
|
|
2015-01-05 14:41:59 +01:00
|
|
|
def __str__(self):
|
|
|
|
return '{}/{}/{}'.format(
|
2016-04-20 02:25:06 +02:00
|
|
|
self.__class__.__module__,
|
2015-01-05 14:41:59 +01:00
|
|
|
self.__class__.__name__,
|
|
|
|
self._testMethodName)
|
2015-01-05 15:39:14 +01:00
|
|
|
|
|
|
|
|
2017-04-18 10:16:14 +02:00
|
|
|
def setUp(self):
|
|
|
|
super().setUp()
|
2017-09-19 16:59:09 +02:00
|
|
|
self.addCleanup(self.cleanup_gc)
|
|
|
|
|
2017-09-19 16:42:49 +02:00
|
|
|
self.loop = asyncio.get_event_loop()
|
2017-08-31 20:22:05 +02:00
|
|
|
self.addCleanup(self.cleanup_loop)
|
2018-09-09 02:33:17 +02:00
|
|
|
self.addCleanup(self.cleanup_traceback)
|
2017-04-18 10:16:14 +02:00
|
|
|
|
2018-09-09 02:33:17 +02:00
|
|
|
def cleanup_traceback(self):
|
|
|
|
'''Remove local variables reference from tracebacks to allow garbage
|
|
|
|
collector to clean all Qubes*() objects, otherwise file descriptors
|
|
|
|
held by them will leak'''
|
2018-10-12 14:41:35 +02:00
|
|
|
exc_infos = [e for test_case, e in self._outcome.errors
|
|
|
|
if test_case is self]
|
|
|
|
if self._outcome.expectedFailure:
|
|
|
|
exc_infos.append(self._outcome.expectedFailure)
|
|
|
|
for exc_info in exc_infos:
|
2018-09-09 02:33:17 +02:00
|
|
|
if exc_info is None:
|
|
|
|
continue
|
2018-10-03 22:39:54 +02:00
|
|
|
ex = exc_info[1]
|
|
|
|
while ex is not None:
|
2018-10-21 16:49:20 +02:00
|
|
|
if isinstance(ex, qubes.exc.QubesVMError):
|
|
|
|
ex.vm = None
|
2018-10-03 22:39:54 +02:00
|
|
|
traceback.clear_frames(ex.__traceback__)
|
|
|
|
ex = ex.__context__
|
2018-09-09 02:33:17 +02:00
|
|
|
|
2017-09-19 16:59:09 +02:00
|
|
|
def cleanup_gc(self):
|
|
|
|
gc.collect()
|
|
|
|
leaked = [obj for obj in gc.get_objects() + gc.garbage
|
|
|
|
if isinstance(obj,
|
|
|
|
(qubes.Qubes, qubes.vm.BaseVM,
|
|
|
|
libvirt.virConnect, libvirt.virDomain))]
|
|
|
|
|
|
|
|
if leaked:
|
|
|
|
try:
|
|
|
|
import objgraph
|
|
|
|
objgraph.show_backrefs(leaked,
|
|
|
|
max_depth=15, extra_info=extra_info,
|
|
|
|
filename='/tmp/objgraph-{}.png'.format(self.id()))
|
|
|
|
except ImportError:
|
|
|
|
pass
|
|
|
|
|
2018-09-29 02:40:43 +02:00
|
|
|
# do not keep leaked object references in locals()
|
|
|
|
leaked = bool(leaked)
|
2017-09-19 16:59:09 +02:00
|
|
|
assert not leaked
|
|
|
|
|
2017-08-31 20:22:05 +02:00
|
|
|
def cleanup_loop(self):
|
2017-09-19 16:42:49 +02:00
|
|
|
'''Check if the loop is empty'''
|
|
|
|
# XXX BEWARE this is touching undocumented, implementation-specific
|
|
|
|
# attributes of the loop. This is most certainly unsupported and likely
|
|
|
|
# will break when messing with: Python version, kernel family, loop
|
|
|
|
# implementation, a combination thereof, or other things.
|
|
|
|
# KEYWORDS for searching:
|
|
|
|
# win32, SelectorEventLoop, ProactorEventLoop, uvloop, gevent
|
|
|
|
|
|
|
|
global libvirt_event_impl
|
|
|
|
|
2018-04-05 00:36:34 +02:00
|
|
|
# really destroy all objects that could have used loop and/or libvirt
|
|
|
|
gc.collect()
|
|
|
|
|
2017-09-19 16:42:49 +02:00
|
|
|
# Check for unfinished libvirt business.
|
|
|
|
if libvirt_event_impl is not None:
|
2017-09-21 14:17:36 +02:00
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(asyncio.wait_for(
|
|
|
|
libvirt_event_impl.drain(), timeout=4))
|
|
|
|
except asyncio.TimeoutError:
|
|
|
|
raise AssertionError('libvirt event impl drain timeout')
|
2017-09-19 16:42:49 +02:00
|
|
|
|
2018-03-17 01:10:43 +01:00
|
|
|
# this is stupid, but apparently it requires two passes
|
|
|
|
# to cleanup SIGCHLD handlers
|
|
|
|
self.loop.stop()
|
|
|
|
self.loop.run_forever()
|
|
|
|
self.loop.stop()
|
|
|
|
self.loop.run_forever()
|
|
|
|
|
2017-09-19 16:42:49 +02:00
|
|
|
# Check there are no Tasks left.
|
|
|
|
assert not self.loop._ready
|
|
|
|
assert not self.loop._scheduled
|
|
|
|
|
|
|
|
# Check the loop watches no descriptors.
|
|
|
|
# NOTE the loop has a pipe for self-interrupting, created once per
|
|
|
|
# lifecycle, and it is unwatched only at loop.close(); so we cannot just
|
|
|
|
# check selector for non-emptiness
|
|
|
|
assert len(self.loop._selector.get_map()) \
|
|
|
|
== int(self.loop._ssock is not None)
|
|
|
|
|
2017-08-31 20:22:05 +02:00
|
|
|
del self.loop
|
2015-10-17 00:10:15 +02:00
|
|
|
|
|
|
|
def assertNotRaises(self, excClass, callableObj=None, *args, **kwargs):
|
|
|
|
"""Fail if an exception of class excClass is raised
|
|
|
|
by callableObj when invoked with arguments args and keyword
|
|
|
|
arguments kwargs. If a different type of exception is
|
|
|
|
raised, it will not be caught, and the test case will be
|
|
|
|
deemed to have suffered an error, exactly as for an
|
|
|
|
unexpected exception.
|
|
|
|
|
|
|
|
If called with callableObj omitted or None, will return a
|
|
|
|
context object used like this::
|
|
|
|
|
|
|
|
with self.assertRaises(SomeException):
|
|
|
|
do_something()
|
|
|
|
|
|
|
|
The context manager keeps a reference to the exception as
|
|
|
|
the 'exception' attribute. This allows you to inspect the
|
|
|
|
exception after the assertion::
|
|
|
|
|
|
|
|
with self.assertRaises(SomeException) as cm:
|
|
|
|
do_something()
|
|
|
|
the_exception = cm.exception
|
|
|
|
self.assertEqual(the_exception.error_code, 3)
|
|
|
|
"""
|
|
|
|
context = _AssertNotRaisesContext(excClass, self)
|
|
|
|
if callableObj is None:
|
|
|
|
return context
|
|
|
|
with context:
|
|
|
|
callableObj(*args, **kwargs)
|
|
|
|
|
|
|
|
|
2017-07-17 12:28:24 +02:00
|
|
|
def assertXMLEqual(self, xml1, xml2, msg=''):
|
2015-01-08 17:32:45 +01:00
|
|
|
'''Check for equality of two XML objects.
|
|
|
|
|
|
|
|
:param xml1: first element
|
|
|
|
:param xml2: second element
|
|
|
|
:type xml1: :py:class:`lxml.etree._Element`
|
|
|
|
:type xml2: :py:class:`lxml.etree._Element`
|
2017-07-17 12:28:24 +02:00
|
|
|
'''
|
2015-01-19 19:02:28 +01:00
|
|
|
|
2015-01-08 17:32:45 +01:00
|
|
|
self.assertEqual(xml1.tag, xml2.tag)
|
2017-07-17 12:28:24 +02:00
|
|
|
msg += '/' + str(xml1.tag)
|
|
|
|
|
|
|
|
if xml1.text is not None and xml2.text is not None:
|
|
|
|
self.assertEqual(xml1.text.strip(), xml2.text.strip(), msg)
|
|
|
|
else:
|
|
|
|
self.assertEqual(xml1.text, xml2.text, msg)
|
|
|
|
self.assertCountEqual(xml1.keys(), xml2.keys(), msg)
|
2015-01-08 17:32:45 +01:00
|
|
|
for key in xml1.keys():
|
2017-07-17 12:28:24 +02:00
|
|
|
self.assertEqual(xml1.get(key), xml2.get(key), msg)
|
|
|
|
|
|
|
|
self.assertEqual(len(xml1), len(xml2), msg + ' children count')
|
|
|
|
for child1, child2 in zip(xml1, xml2):
|
|
|
|
self.assertXMLEqual(child1, child2, msg=msg)
|
2015-01-08 17:32:45 +01:00
|
|
|
|
2016-08-09 04:26:05 +02:00
|
|
|
def assertDevicesEqual(self, devices1, devices2, msg=None):
|
|
|
|
self.assertEqual(devices1.keys(), devices2.keys(), msg)
|
|
|
|
for dev_class in devices1.keys():
|
|
|
|
self.assertEqual(
|
|
|
|
[str(dev) for dev in devices1[dev_class]],
|
|
|
|
[str(dev) for dev in devices2[dev_class]],
|
|
|
|
"Devices of class {} differs{}".format(
|
|
|
|
dev_class, (": " + msg) if msg else "")
|
|
|
|
)
|
2015-01-08 17:32:45 +01:00
|
|
|
|
2017-02-21 14:09:06 +01:00
|
|
|
def assertEventFired(self, subject, event, kwargs=None):
|
2015-01-05 15:39:14 +01:00
|
|
|
'''Check whether event was fired on given emitter and fail if it did
|
|
|
|
not.
|
|
|
|
|
2017-02-21 14:09:06 +01:00
|
|
|
:param subject: emitter which is being checked
|
2015-01-05 19:15:32 +01:00
|
|
|
:type emitter: :py:class:`TestEmitter`
|
2015-01-05 15:39:14 +01:00
|
|
|
:param str event: event identifier
|
2017-10-20 01:06:06 +02:00
|
|
|
:param dict kwargs: when given, all items must appear in kwargs passed \
|
2015-01-19 17:06:30 +01:00
|
|
|
to an event
|
2015-10-17 00:10:15 +02:00
|
|
|
'''
|
2015-01-05 15:39:14 +01:00
|
|
|
|
2017-02-21 14:09:06 +01:00
|
|
|
will_not_match = object()
|
|
|
|
for ev, ev_kwargs in subject.fired_events:
|
2015-01-05 15:39:14 +01:00
|
|
|
if ev != event:
|
|
|
|
continue
|
2017-02-21 14:09:06 +01:00
|
|
|
if kwargs is not None:
|
|
|
|
ev_kwargs = dict(ev_kwargs)
|
|
|
|
if any(ev_kwargs.get(k, will_not_match) != v
|
|
|
|
for k, v in kwargs.items()):
|
|
|
|
continue
|
2015-01-05 15:39:14 +01:00
|
|
|
|
|
|
|
return
|
|
|
|
|
2017-02-21 14:09:06 +01:00
|
|
|
self.fail('event {!r} {}did not fire on {!r}'.format(
|
|
|
|
event, ('' if kwargs is None else '{!r} '.format(kwargs)), subject))
|
2015-01-05 15:39:14 +01:00
|
|
|
|
|
|
|
|
2017-02-21 14:09:06 +01:00
|
|
|
def assertEventNotFired(self, subject, event, kwargs=None):
|
2015-01-05 15:39:14 +01:00
|
|
|
'''Check whether event was fired on given emitter. Fail if it did.
|
|
|
|
|
2017-02-21 14:09:06 +01:00
|
|
|
:param subject: emitter which is being checked
|
2015-01-05 19:15:32 +01:00
|
|
|
:type emitter: :py:class:`TestEmitter`
|
2015-01-05 15:39:14 +01:00
|
|
|
:param str event: event identifier
|
2015-01-19 17:06:30 +01:00
|
|
|
:param list kwargs: when given, all items must appear in kwargs passed \
|
|
|
|
to an event
|
2015-10-17 00:10:15 +02:00
|
|
|
'''
|
2015-01-05 15:39:14 +01:00
|
|
|
|
2017-02-21 14:09:06 +01:00
|
|
|
will_not_match = object()
|
|
|
|
for ev, ev_kwargs in subject.fired_events:
|
2015-01-05 15:39:14 +01:00
|
|
|
if ev != event:
|
|
|
|
continue
|
2017-02-21 14:09:06 +01:00
|
|
|
if kwargs is not None:
|
|
|
|
ev_kwargs = dict(ev_kwargs)
|
|
|
|
if any(ev_kwargs.get(k, will_not_match) != v
|
|
|
|
for k, v in kwargs.items()):
|
|
|
|
continue
|
|
|
|
|
|
|
|
self.fail('event {!r} {}did fire on {!r}'.format(
|
|
|
|
event,
|
|
|
|
('' if kwargs is None else '{!r} '.format(kwargs)),
|
|
|
|
subject))
|
2015-01-05 15:39:14 +01:00
|
|
|
|
|
|
|
return
|
2015-01-11 01:19:03 +01:00
|
|
|
|
|
|
|
|
|
|
|
def assertXMLIsValid(self, xml, file=None, schema=None):
|
|
|
|
'''Check whether given XML fulfills Relax NG schema.
|
|
|
|
|
|
|
|
Schema can be given in a couple of ways:
|
|
|
|
|
|
|
|
- As separate file. This is most common, and also the only way to
|
2015-01-19 17:06:30 +01:00
|
|
|
handle file inclusion. Call with file name as second argument.
|
2015-01-11 01:19:03 +01:00
|
|
|
|
|
|
|
- As string containing actual schema. Put that string in *schema*
|
|
|
|
keyword argument.
|
|
|
|
|
|
|
|
:param lxml.etree._Element xml: XML element instance to check
|
|
|
|
:param str file: filename of Relax NG schema
|
|
|
|
:param str schema: optional explicit schema string
|
2015-10-17 00:10:15 +02:00
|
|
|
''' # pylint: disable=redefined-builtin
|
2015-01-11 01:19:03 +01:00
|
|
|
|
|
|
|
if schema is not None and file is None:
|
|
|
|
relaxng = schema
|
|
|
|
if isinstance(relaxng, str):
|
|
|
|
relaxng = lxml.etree.XML(relaxng)
|
2015-01-19 19:02:28 +01:00
|
|
|
# pylint: disable=protected-access
|
2015-01-11 01:19:03 +01:00
|
|
|
if isinstance(relaxng, lxml.etree._Element):
|
|
|
|
relaxng = lxml.etree.RelaxNG(relaxng)
|
|
|
|
|
|
|
|
elif file is not None and schema is None:
|
2015-01-13 23:17:18 +01:00
|
|
|
if not os.path.isabs(file):
|
|
|
|
basedirs = ['/usr/share/doc/qubes/relaxng']
|
|
|
|
if in_git:
|
|
|
|
basedirs.insert(0, os.path.join(in_git, 'relaxng'))
|
|
|
|
for basedir in basedirs:
|
|
|
|
abspath = os.path.join(basedir, file)
|
|
|
|
if os.path.exists(abspath):
|
|
|
|
file = abspath
|
|
|
|
break
|
2015-01-11 01:19:03 +01:00
|
|
|
relaxng = lxml.etree.RelaxNG(file=file)
|
|
|
|
|
|
|
|
else:
|
|
|
|
raise TypeError("There should be excactly one of 'file' and "
|
|
|
|
"'schema' arguments specified.")
|
|
|
|
|
|
|
|
# We have to be extra careful here in case someone messed up with
|
|
|
|
# self.failureException. It should by default be AssertionError, just
|
|
|
|
# what is spewed by RelaxNG(), but who knows what might happen.
|
|
|
|
try:
|
|
|
|
relaxng.assert_(xml)
|
|
|
|
except self.failureException:
|
|
|
|
raise
|
|
|
|
except AssertionError as e:
|
|
|
|
self.fail(str(e))
|
2015-10-17 00:10:15 +02:00
|
|
|
|
2016-02-11 05:42:08 +01:00
|
|
|
@staticmethod
|
|
|
|
def make_vm_name(name, class_teardown=False):
|
|
|
|
if class_teardown:
|
|
|
|
return CLSVMPREFIX + name
|
|
|
|
else:
|
|
|
|
return VMPREFIX + name
|
|
|
|
|
2015-10-17 00:10:15 +02:00
|
|
|
|
2017-07-12 19:01:15 +02:00
|
|
|
class SystemTestCase(QubesTestCase):
|
2016-02-10 17:16:06 +01:00
|
|
|
"""
|
|
|
|
Mixin for integration tests. All the tests here should use self.app
|
|
|
|
object and when need qubes.xml path - should use :py:data:`XMLPATH`
|
|
|
|
defined in this file.
|
2017-07-12 19:01:15 +02:00
|
|
|
Every VM created by test, must use :py:meth:`SystemTestCase.make_vm_name`
|
2016-02-10 17:16:06 +01:00
|
|
|
for VM name.
|
|
|
|
By default self.app represents empty collection, if anything is needed
|
|
|
|
there from the real collection it can be imported from self.host_app in
|
2017-07-12 19:01:15 +02:00
|
|
|
:py:meth:`SystemTestCase.setUp`. But *can not be modified* in any way -
|
2016-02-10 17:16:06 +01:00
|
|
|
this include both changing attributes in
|
2017-07-12 19:01:15 +02:00
|
|
|
:py:attr:`SystemTestCase.host_app` and modifying files of such imported
|
2016-02-10 17:16:06 +01:00
|
|
|
VM. If test need to make some modification, it must clone the VM first.
|
|
|
|
|
|
|
|
If some group of tests needs class-wide initialization, first of all the
|
|
|
|
author should consider if it is really needed. But if so, setUpClass can
|
|
|
|
be used to create Qubes(CLASS_XMLPATH) object and create/import required
|
|
|
|
stuff there. VMs created in :py:meth:`TestCase.setUpClass` should
|
|
|
|
use self.make_vm_name('...', class_teardown=True) for name creation.
|
2017-07-25 23:15:24 +02:00
|
|
|
Such (group of) test need to take care about
|
|
|
|
:py:meth:`TestCase.tearDownClass` implementation itself.
|
2016-02-10 17:16:06 +01:00
|
|
|
"""
|
|
|
|
# noinspection PyAttributeOutsideInit
|
2015-10-17 00:10:15 +02:00
|
|
|
def setUp(self):
|
2016-02-10 19:45:44 +01:00
|
|
|
if not in_dom0:
|
|
|
|
self.skipTest('outside dom0')
|
2017-07-12 19:01:15 +02:00
|
|
|
super(SystemTestCase, self).setUp()
|
2015-10-17 00:10:15 +02:00
|
|
|
self.remove_test_vms()
|
|
|
|
|
2019-09-28 03:43:51 +02:00
|
|
|
global ha_syslog
|
|
|
|
if ha_syslog is None:
|
|
|
|
ha_syslog = logging.handlers.SysLogHandler('/dev/log')
|
|
|
|
ha_syslog.setFormatter(
|
|
|
|
logging.Formatter('%(name)s[%(process)d]: %(message)s'))
|
|
|
|
logging.root.addHandler(ha_syslog)
|
|
|
|
|
|
|
|
self.log.critical('starting')
|
|
|
|
|
2016-02-10 17:11:50 +01:00
|
|
|
# need some information from the real qubes.xml - at least installed
|
|
|
|
# templates; should not be used for testing, only to initialize self.app
|
2016-02-10 19:30:55 +01:00
|
|
|
self.host_app = qubes.Qubes(os.path.join(
|
2017-07-01 23:25:47 +02:00
|
|
|
qubes.config.qubes_base_dir,
|
2016-02-10 19:30:55 +01:00
|
|
|
qubes.config.system_path['qubes_store_filename']))
|
2016-02-10 17:16:06 +01:00
|
|
|
if os.path.exists(CLASS_XMLPATH):
|
|
|
|
shutil.copy(CLASS_XMLPATH, XMLPATH)
|
|
|
|
else:
|
2017-06-20 16:15:16 +02:00
|
|
|
shutil.copy(self.host_app.store, XMLPATH)
|
|
|
|
self.app = qubes.Qubes(XMLPATH)
|
2016-02-10 19:30:55 +01:00
|
|
|
os.environ['QUBES_XML_PATH'] = XMLPATH
|
2017-08-28 17:10:20 +02:00
|
|
|
self.app.register_event_handlers()
|
2016-02-10 17:11:50 +01:00
|
|
|
|
2017-06-06 15:49:19 +02:00
|
|
|
self.qubesd = self.loop.run_until_complete(
|
|
|
|
qubes.api.create_servers(
|
|
|
|
qubes.api.admin.QubesAdminAPI,
|
2017-06-05 19:00:18 +02:00
|
|
|
qubes.api.internal.QubesInternalAPI,
|
2018-12-05 05:40:50 +01:00
|
|
|
qubes.api.misc.QubesMiscAPI,
|
2017-06-06 15:49:19 +02:00
|
|
|
app=self.app, debug=True))
|
2017-06-05 19:00:18 +02:00
|
|
|
|
2017-08-31 20:22:05 +02:00
|
|
|
self.addCleanup(self.cleanup_app)
|
|
|
|
|
2017-10-20 02:41:14 +02:00
|
|
|
self.app.add_handler('domain-delete', self.close_qdb_on_remove)
|
|
|
|
|
|
|
|
def close_qdb_on_remove(self, app, event, vm, **kwargs):
|
|
|
|
# only close QubesDB connection, do not perform other (destructive)
|
|
|
|
# actions of vm.close()
|
|
|
|
if vm._qdb_connection_watch is not None:
|
|
|
|
asyncio.get_event_loop().remove_reader(
|
|
|
|
vm._qdb_connection_watch.watch_fd())
|
|
|
|
vm._qdb_connection_watch.close()
|
|
|
|
vm._qdb_connection_watch = None
|
2017-08-31 20:22:05 +02:00
|
|
|
|
|
|
|
def cleanup_app(self):
|
2017-08-28 17:10:20 +02:00
|
|
|
self.remove_test_vms()
|
|
|
|
|
|
|
|
server = None
|
|
|
|
for server in self.qubesd:
|
|
|
|
for sock in server.sockets:
|
|
|
|
os.unlink(sock.getsockname())
|
|
|
|
server.close()
|
|
|
|
del server
|
|
|
|
|
|
|
|
# close all existing connections, especially this will interrupt
|
|
|
|
# running admin.Events calls, which do keep reference to Qubes() and
|
|
|
|
# libvirt connection
|
|
|
|
conn = None
|
|
|
|
for conn in qubes.api.QubesDaemonProtocol.connections:
|
|
|
|
if conn.transport:
|
|
|
|
conn.transport.abort()
|
|
|
|
del conn
|
|
|
|
|
|
|
|
self.loop.run_until_complete(asyncio.wait([
|
|
|
|
server.wait_closed() for server in self.qubesd]))
|
|
|
|
del self.qubesd
|
|
|
|
|
|
|
|
# remove all references to any complex qubes objects, to release
|
|
|
|
# resources - most importantly file descriptors; this object will live
|
|
|
|
# during the whole test run, but all the file descriptors would be
|
|
|
|
# depleted earlier
|
|
|
|
self.app.close()
|
|
|
|
self.host_app.close()
|
|
|
|
del self.app
|
|
|
|
del self.host_app
|
|
|
|
for attr in dir(self):
|
|
|
|
obj_type = type(getattr(self, attr))
|
|
|
|
if obj_type.__module__.startswith('qubes'):
|
|
|
|
delattr(self, attr)
|
|
|
|
|
|
|
|
# then trigger garbage collector to really destroy those objects
|
|
|
|
gc.collect()
|
|
|
|
|
2016-02-10 17:11:50 +01:00
|
|
|
def init_default_template(self, template=None):
|
|
|
|
if template is None:
|
|
|
|
template = self.host_app.default_template
|
2017-01-18 22:16:46 +01:00
|
|
|
elif isinstance(template, str):
|
2016-02-11 01:46:25 +01:00
|
|
|
template = self.host_app.domains[template]
|
2016-02-10 17:11:50 +01:00
|
|
|
|
2017-06-20 16:15:16 +02:00
|
|
|
self.app.default_template = str(template)
|
2016-02-10 17:11:50 +01:00
|
|
|
|
2016-03-07 01:19:10 +01:00
|
|
|
def init_networking(self):
|
|
|
|
if not self.app.default_template:
|
|
|
|
self.skipTest('Default template required for testing networking')
|
|
|
|
default_netvm = self.host_app.default_netvm
|
2016-08-08 00:11:46 +02:00
|
|
|
# if testing Whonix Workstation based VMs, try to use sys-whonix instead
|
|
|
|
if self.app.default_template.name.startswith('whonix-ws'):
|
|
|
|
if 'sys-whonix' in self.host_app.domains:
|
|
|
|
default_netvm = self.host_app.domains['sys-whonix']
|
2016-03-07 01:19:10 +01:00
|
|
|
if default_netvm is None:
|
|
|
|
self.skipTest('Default netvm required')
|
|
|
|
if not default_netvm.is_running():
|
2016-08-08 00:11:46 +02:00
|
|
|
self.skipTest('VM {} required to be running'.format(
|
|
|
|
default_netvm.name))
|
2017-06-20 16:15:16 +02:00
|
|
|
|
|
|
|
self.app.default_netvm = str(default_netvm)
|
2016-03-07 01:19:10 +01:00
|
|
|
|
2016-10-19 01:58:43 +02:00
|
|
|
|
|
|
|
def _find_pool(self, volume_group, thin_pool):
|
|
|
|
''' Returns the pool matching the specified ``volume_group`` &
|
|
|
|
``thin_pool``, or None.
|
|
|
|
'''
|
|
|
|
pools = [p for p in self.app.pools
|
|
|
|
if issubclass(p.__class__, qubes.storage.lvm.ThinPool)]
|
|
|
|
for pool in pools:
|
|
|
|
if pool.volume_group == volume_group \
|
|
|
|
and pool.thin_pool == thin_pool:
|
|
|
|
return pool
|
|
|
|
return None
|
|
|
|
|
|
|
|
def init_lvm_pool(self):
|
|
|
|
volume_group, thin_pool = DEFAULT_LVM_POOL.split('/', 1)
|
|
|
|
path = "/dev/mapper/{!s}-{!s}".format(volume_group, thin_pool)
|
|
|
|
if not os.path.exists(path):
|
|
|
|
self.skipTest('LVM thin pool {!r} does not exist'.
|
|
|
|
format(DEFAULT_LVM_POOL))
|
|
|
|
self.pool = self._find_pool(volume_group, thin_pool)
|
|
|
|
if not self.pool:
|
2019-02-18 21:13:24 +01:00
|
|
|
self.pool = self.loop.run_until_complete(
|
|
|
|
self.app.add_pool(**POOL_CONF))
|
2016-10-19 01:58:43 +02:00
|
|
|
self.created_pool = True
|
|
|
|
|
2017-07-25 23:15:24 +02:00
|
|
|
def _remove_vm_qubes(self, vm):
|
2015-10-17 00:10:15 +02:00
|
|
|
vmname = vm.name
|
2015-10-27 11:39:59 +01:00
|
|
|
app = vm.app
|
2015-10-17 00:10:15 +02:00
|
|
|
|
|
|
|
try:
|
2019-09-29 06:03:00 +02:00
|
|
|
del app.domains[vm.qid]
|
|
|
|
except KeyError:
|
2015-10-17 00:10:15 +02:00
|
|
|
pass
|
|
|
|
|
2017-10-02 18:24:49 +02:00
|
|
|
try:
|
2019-09-29 06:03:00 +02:00
|
|
|
self.loop.run_until_complete(vm.remove_from_disk())
|
|
|
|
except: # pylint: disable=bare-except
|
2017-10-02 18:24:49 +02:00
|
|
|
pass
|
2019-09-29 06:03:00 +02:00
|
|
|
|
2017-09-28 02:40:56 +02:00
|
|
|
vm.close()
|
2015-10-17 00:10:15 +02:00
|
|
|
del vm
|
|
|
|
|
2015-10-27 11:39:59 +01:00
|
|
|
app.save()
|
|
|
|
del app
|
|
|
|
|
2015-10-17 00:10:15 +02:00
|
|
|
# Now ensure it really went away. This may not have happened,
|
|
|
|
# for example if vm.libvirt_domain malfunctioned.
|
|
|
|
try:
|
2015-10-27 11:39:59 +01:00
|
|
|
conn = libvirt.open(qubes.config.defaults['libvirt_uri'])
|
2017-07-12 10:41:22 +02:00
|
|
|
except: # pylint: disable=bare-except
|
2015-10-17 00:10:15 +02:00
|
|
|
pass
|
|
|
|
else:
|
2017-07-25 23:15:24 +02:00
|
|
|
try:
|
|
|
|
dom = conn.lookupByName(vmname)
|
|
|
|
except: # pylint: disable=bare-except
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
self._remove_vm_libvirt(dom)
|
2017-07-12 10:41:22 +02:00
|
|
|
conn.close()
|
2015-10-17 00:10:15 +02:00
|
|
|
|
2017-07-25 23:15:24 +02:00
|
|
|
self._remove_vm_disk(vmname)
|
2015-10-17 00:10:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _remove_vm_libvirt(dom):
|
|
|
|
try:
|
|
|
|
dom.destroy()
|
|
|
|
except libvirt.libvirtError: # not running
|
|
|
|
pass
|
|
|
|
dom.undefine()
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _remove_vm_disk(vmname):
|
|
|
|
for dirspec in (
|
|
|
|
'qubes_appvms_dir',
|
|
|
|
'qubes_servicevms_dir',
|
|
|
|
'qubes_templates_dir'):
|
2017-07-01 23:25:47 +02:00
|
|
|
dirpath = os.path.join(qubes.config.qubes_base_dir,
|
2015-10-17 00:10:15 +02:00
|
|
|
qubes.config.system_path[dirspec], vmname)
|
|
|
|
if os.path.exists(dirpath):
|
|
|
|
if os.path.isdir(dirpath):
|
|
|
|
shutil.rmtree(dirpath)
|
|
|
|
else:
|
|
|
|
os.unlink(dirpath)
|
|
|
|
|
2016-09-29 01:57:37 +02:00
|
|
|
@staticmethod
|
|
|
|
def _remove_vm_disk_lvm(prefix=VMPREFIX):
|
|
|
|
''' Remove LVM volumes with given prefix
|
|
|
|
|
|
|
|
This is "a bit" drastic, as it removes volumes regardless of volume
|
|
|
|
group, thin pool etc. But we assume no important data on test system.
|
|
|
|
'''
|
|
|
|
try:
|
|
|
|
volumes = subprocess.check_output(
|
2018-09-07 15:04:00 +02:00
|
|
|
['lvs', '--noheadings', '-o', 'vg_name,name',
|
2017-01-26 18:26:06 +01:00
|
|
|
'--separator', '/']).decode()
|
2017-07-25 05:42:39 +02:00
|
|
|
if ('/vm-' + prefix) not in volumes:
|
2016-09-29 01:57:37 +02:00
|
|
|
return
|
|
|
|
subprocess.check_call(['sudo', 'lvremove', '-f'] +
|
|
|
|
[vol.strip() for vol in volumes.splitlines()
|
2017-07-25 05:42:39 +02:00
|
|
|
if ('/vm-' + prefix) in vol],
|
2017-07-12 10:41:22 +02:00
|
|
|
stdout=subprocess.DEVNULL)
|
2016-09-29 01:57:37 +02:00
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
pass
|
2015-10-17 00:10:15 +02:00
|
|
|
|
2017-07-25 23:15:24 +02:00
|
|
|
def remove_vms(self, vms):
|
2017-09-28 02:40:56 +02:00
|
|
|
vms = list(vms)
|
|
|
|
if not vms:
|
|
|
|
return
|
2019-09-27 22:29:16 +02:00
|
|
|
# workaround for https://phabricator.whonix.org/T930
|
|
|
|
# unregister all the VMs from sys-whonix, otherwise it will start them
|
|
|
|
# again (possibly in further test)
|
|
|
|
if 'whonix' in self.app.default_netvm.name:
|
|
|
|
for vm in vms:
|
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(
|
|
|
|
self.app.default_netvm.run_service_for_stdio(
|
|
|
|
'whonix.NewStatus+{}_shutdown'.format(vm.name)))
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
2019-09-29 06:03:00 +02:00
|
|
|
locked_vms = set()
|
|
|
|
# first take startup lock
|
|
|
|
for vm in vms:
|
|
|
|
self.loop.run_until_complete(vm.startup_lock.acquire())
|
|
|
|
locked_vms.add(vm)
|
|
|
|
|
2018-10-21 05:19:07 +02:00
|
|
|
# first kill all the domains, to avoid side effects of changing netvm
|
|
|
|
for vm in vms:
|
|
|
|
try:
|
|
|
|
# XXX .is_running() may throw libvirtError if undefined
|
|
|
|
if vm.is_running():
|
2019-09-29 06:03:00 +02:00
|
|
|
self.loop.run_until_complete(vm._kill_locked())
|
2018-10-21 05:19:07 +02:00
|
|
|
except: # pylint: disable=bare-except
|
|
|
|
pass
|
2017-09-28 02:40:56 +02:00
|
|
|
# break dependencies
|
2015-10-17 00:10:15 +02:00
|
|
|
for vm in vms:
|
2017-09-28 02:40:56 +02:00
|
|
|
vm.default_dispvm = None
|
2018-10-21 05:19:07 +02:00
|
|
|
vm.netvm = None
|
|
|
|
# take app instance from any VM to be removed
|
|
|
|
app = vms[0].app
|
|
|
|
if app.default_dispvm in vms:
|
|
|
|
app.default_dispvm = None
|
|
|
|
if app.default_netvm in vms:
|
|
|
|
app.default_netvm = None
|
|
|
|
del app
|
|
|
|
# then remove in reverse topological order (wrt template), using naive
|
2017-09-28 02:40:56 +02:00
|
|
|
# algorithm
|
2018-10-21 05:19:07 +02:00
|
|
|
# this heavily depends on lack of template loops, but those are
|
|
|
|
# impossible
|
2017-09-28 02:40:56 +02:00
|
|
|
while vms:
|
|
|
|
vm = vms.pop(0)
|
|
|
|
# make sure that all connected VMs are going to be removed,
|
|
|
|
# otherwise this will loop forever
|
2018-10-21 05:19:07 +02:00
|
|
|
child_vms = list(getattr(vm, 'appvms', []))
|
|
|
|
assert all(x in vms for x in child_vms)
|
|
|
|
if child_vms:
|
2017-09-28 02:40:56 +02:00
|
|
|
# if still something use this VM, put it at the end of queue
|
|
|
|
# and try next one
|
|
|
|
vms.append(vm)
|
|
|
|
continue
|
2017-07-25 23:15:24 +02:00
|
|
|
self._remove_vm_qubes(vm)
|
2015-10-17 00:10:15 +02:00
|
|
|
|
2019-09-29 06:03:00 +02:00
|
|
|
# release startup_lock, if anything was waiting at vm.start(),
|
|
|
|
# it will detect the VM is gone
|
|
|
|
for vm in locked_vms:
|
|
|
|
vm.startup_lock.release()
|
|
|
|
|
2017-07-25 23:15:24 +02:00
|
|
|
def remove_test_vms(self, xmlpath=XMLPATH, prefix=VMPREFIX):
|
2018-10-14 03:29:30 +02:00
|
|
|
'''Aggressively remove any domain that has name in testing namespace.
|
|
|
|
|
|
|
|
:param prefix: name prefix of VMs to remove, can be a list of prefixes
|
2015-10-17 00:10:15 +02:00
|
|
|
'''
|
|
|
|
|
2018-10-14 03:29:30 +02:00
|
|
|
if isinstance(prefix, str):
|
|
|
|
prefixes = [prefix]
|
|
|
|
else:
|
|
|
|
prefixes = prefix
|
|
|
|
del prefix
|
2015-10-17 00:10:15 +02:00
|
|
|
# first, remove them Qubes-way
|
2016-02-10 17:16:06 +01:00
|
|
|
if os.path.exists(xmlpath):
|
2016-02-10 18:16:05 +01:00
|
|
|
try:
|
2017-07-26 02:59:05 +02:00
|
|
|
try:
|
|
|
|
app = self.app
|
|
|
|
except AttributeError:
|
|
|
|
app = qubes.Qubes(xmlpath)
|
2017-10-02 18:24:49 +02:00
|
|
|
try:
|
|
|
|
host_app = self.host_app
|
|
|
|
except AttributeError:
|
|
|
|
host_app = qubes.Qubes()
|
2017-09-28 02:40:56 +02:00
|
|
|
self.remove_vms([vm for vm in app.domains
|
2018-10-14 03:29:30 +02:00
|
|
|
if any(vm.name.startswith(prefix) for prefix in prefixes) or
|
2017-10-02 18:24:49 +02:00
|
|
|
(isinstance(vm, qubes.vm.dispvm.DispVM) and vm.name
|
|
|
|
not in host_app.domains)])
|
|
|
|
if not hasattr(self, 'host_app'):
|
|
|
|
host_app.close()
|
|
|
|
del host_app
|
2017-09-28 02:40:56 +02:00
|
|
|
if not hasattr(self, 'app'):
|
|
|
|
app.close()
|
2017-07-26 02:59:05 +02:00
|
|
|
del app
|
|
|
|
except qubes.exc.QubesException:
|
2016-02-10 18:16:05 +01:00
|
|
|
pass
|
2016-02-10 17:16:06 +01:00
|
|
|
os.unlink(xmlpath)
|
2015-10-17 00:10:15 +02:00
|
|
|
|
|
|
|
# now remove what was only in libvirt
|
2015-10-27 11:39:59 +01:00
|
|
|
conn = libvirt.open(qubes.config.defaults['libvirt_uri'])
|
|
|
|
for dom in conn.listAllDomains():
|
2018-10-14 03:29:30 +02:00
|
|
|
if any(dom.name().startswith(prefix) for prefix in prefixes):
|
2017-07-25 23:15:24 +02:00
|
|
|
self._remove_vm_libvirt(dom)
|
2016-02-10 17:14:47 +01:00
|
|
|
conn.close()
|
2015-10-17 00:10:15 +02:00
|
|
|
|
|
|
|
# finally remove anything that is left on disk
|
|
|
|
vmnames = set()
|
|
|
|
for dirspec in (
|
|
|
|
'qubes_appvms_dir',
|
|
|
|
'qubes_servicevms_dir',
|
|
|
|
'qubes_templates_dir'):
|
2017-07-01 23:25:47 +02:00
|
|
|
dirpath = os.path.join(qubes.config.qubes_base_dir,
|
2015-10-17 00:10:15 +02:00
|
|
|
qubes.config.system_path[dirspec])
|
2017-09-27 02:20:25 +02:00
|
|
|
if not os.path.exists(dirpath):
|
|
|
|
continue
|
2015-10-17 00:10:15 +02:00
|
|
|
for name in os.listdir(dirpath):
|
2018-10-14 03:29:30 +02:00
|
|
|
if any(name.startswith(prefix) for prefix in prefixes):
|
2015-10-17 00:10:15 +02:00
|
|
|
vmnames.add(name)
|
|
|
|
for vmname in vmnames:
|
2017-07-25 23:15:24 +02:00
|
|
|
self._remove_vm_disk(vmname)
|
2018-10-14 03:29:30 +02:00
|
|
|
for prefix in prefixes:
|
|
|
|
self._remove_vm_disk_lvm(prefix)
|
2015-10-17 00:10:15 +02:00
|
|
|
|
2017-10-03 11:56:55 +02:00
|
|
|
def qrexec_policy(self, service, source, destination, allow=True,
|
|
|
|
action=None):
|
2016-03-03 01:07:23 +01:00
|
|
|
"""
|
|
|
|
Allow qrexec calls for duration of the test
|
|
|
|
:param service: service name
|
|
|
|
:param source: source VM name
|
|
|
|
:param destination: destination VM name
|
2017-10-03 11:56:55 +02:00
|
|
|
:param allow: add rule with 'allow' action, otherwise 'deny'
|
|
|
|
:param action: custom action, if specified *allow* argument is ignored
|
2016-03-03 01:07:23 +01:00
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
|
2017-10-03 11:56:55 +02:00
|
|
|
return _QrexecPolicyContext(service, source, destination,
|
|
|
|
allow=allow, action=action)
|
2016-03-03 01:07:23 +01:00
|
|
|
|
2018-10-15 05:08:25 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def wait_for_window_hide_coro(self, title, winid, timeout=30):
|
|
|
|
"""
|
|
|
|
Wait for window do disappear
|
|
|
|
:param winid: window id
|
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
wait_count = 0
|
|
|
|
while subprocess.call(['xdotool', 'getwindowname', str(winid)],
|
|
|
|
stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) == 0:
|
|
|
|
wait_count += 1
|
|
|
|
if wait_count > timeout * 10:
|
|
|
|
self.fail("Timeout while waiting for {}({}) window to "
|
|
|
|
"disappear".format(title, winid))
|
|
|
|
yield from asyncio.sleep(0.1)
|
|
|
|
|
|
|
|
@asyncio.coroutine
|
|
|
|
def wait_for_window_coro(self, title, search_class=False, timeout=30,
|
|
|
|
show=True):
|
2016-02-10 17:18:27 +01:00
|
|
|
"""
|
|
|
|
Wait for a window with a given title. Depending on show parameter,
|
|
|
|
it will wait for either window to show or to disappear.
|
|
|
|
|
|
|
|
:param title: title of the window to wait for
|
|
|
|
:param timeout: timeout of the operation, in seconds
|
|
|
|
:param show: if True - wait for the window to be visible,
|
|
|
|
otherwise - to not be visible
|
2018-10-15 05:08:25 +02:00
|
|
|
:param search_class: search based on window class instead of title
|
|
|
|
:return: window id of found window, if show=True
|
2016-02-10 17:18:27 +01:00
|
|
|
"""
|
|
|
|
|
2018-10-15 05:08:25 +02:00
|
|
|
xdotool_search = ['xdotool', 'search', '--onlyvisible']
|
|
|
|
if search_class:
|
|
|
|
xdotool_search.append('--class')
|
|
|
|
else:
|
|
|
|
xdotool_search.append('--name')
|
|
|
|
if show:
|
|
|
|
xdotool_search.append('--sync')
|
|
|
|
if not show:
|
|
|
|
try:
|
|
|
|
winid = subprocess.check_output(xdotool_search + [title],
|
|
|
|
stderr=subprocess.DEVNULL).decode()
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
# already gone
|
|
|
|
return
|
|
|
|
yield from self.wait_for_window_hide_coro(winid, title,
|
|
|
|
timeout=timeout)
|
|
|
|
return
|
|
|
|
|
|
|
|
winid = None
|
|
|
|
while not winid:
|
|
|
|
p = yield from asyncio.create_subprocess_exec(
|
|
|
|
*xdotool_search, title,
|
|
|
|
stderr=subprocess.DEVNULL, stdout=subprocess.PIPE)
|
|
|
|
try:
|
|
|
|
(winid, _) = yield from asyncio.wait_for(
|
|
|
|
p.communicate(), timeout)
|
|
|
|
# don't check exit code, getting winid on stdout is enough
|
|
|
|
# indicator of success; specifically ignore xdotool failing
|
|
|
|
# with BadWindow or such - when some window appears only for a
|
|
|
|
# moment by xdotool didn't manage to get its properties
|
|
|
|
except asyncio.TimeoutError:
|
|
|
|
self.fail(
|
|
|
|
"Timeout while waiting for {} window to show".format(title))
|
|
|
|
return winid.decode().strip()
|
|
|
|
|
|
|
|
def wait_for_window(self, *args, **kwargs):
|
|
|
|
"""
|
|
|
|
Wait for a window with a given title. Depending on show parameter,
|
|
|
|
it will wait for either window to show or to disappear.
|
|
|
|
|
|
|
|
:param title: title of the window to wait for
|
|
|
|
:param timeout: timeout of the operation, in seconds
|
|
|
|
:param show: if True - wait for the window to be visible,
|
|
|
|
otherwise - to not be visible
|
|
|
|
:param search_class: search based on window class instead of title
|
|
|
|
:return: window id of found window, if show=True
|
|
|
|
"""
|
|
|
|
return self.loop.run_until_complete(
|
|
|
|
self.wait_for_window_coro(*args, **kwargs))
|
2016-02-10 17:18:27 +01:00
|
|
|
|
|
|
|
def enter_keys_in_window(self, title, keys):
|
|
|
|
"""
|
|
|
|
Search for window with given title, then enter listed keys there.
|
|
|
|
The function will wait for said window to appear.
|
|
|
|
|
|
|
|
:param title: title of window
|
|
|
|
:param keys: list of keys to enter, as for `xdotool key`
|
|
|
|
:return: None
|
|
|
|
"""
|
|
|
|
|
|
|
|
# 'xdotool search --sync' sometimes crashes on some race when
|
|
|
|
# accessing window properties
|
|
|
|
self.wait_for_window(title)
|
|
|
|
command = ['xdotool', 'search', '--name', title,
|
2016-06-29 23:50:52 +02:00
|
|
|
'windowactivate', '--sync',
|
2016-02-10 17:18:27 +01:00
|
|
|
'key'] + keys
|
|
|
|
subprocess.check_call(command)
|
|
|
|
|
|
|
|
def shutdown_and_wait(self, vm, timeout=60):
|
2018-10-21 05:11:24 +02:00
|
|
|
try:
|
|
|
|
self.loop.run_until_complete(vm.shutdown(wait=True, timeout=timeout))
|
|
|
|
except qubes.exc.QubesException:
|
|
|
|
name = vm.name
|
|
|
|
del vm
|
|
|
|
self.fail("Timeout while waiting for VM {} shutdown".format(name))
|
2016-02-10 17:18:27 +01:00
|
|
|
|
2016-02-26 10:59:20 +01:00
|
|
|
def prepare_hvm_system_linux(self, vm, init_script, extra_files=None):
|
|
|
|
if not os.path.exists('/usr/lib/grub/i386-pc'):
|
|
|
|
self.skipTest('grub2 not installed')
|
|
|
|
if not spawn.find_executable('grub2-install'):
|
|
|
|
self.skipTest('grub2-tools not installed')
|
|
|
|
if not spawn.find_executable('dracut'):
|
|
|
|
self.skipTest('dracut not installed')
|
|
|
|
# create a single partition
|
|
|
|
p = subprocess.Popen(['sfdisk', '-q', '-L', vm.storage.root_img],
|
|
|
|
stdin=subprocess.PIPE,
|
2017-07-12 10:41:22 +02:00
|
|
|
stdout=subprocess.DEVNULL,
|
2016-02-26 10:59:20 +01:00
|
|
|
stderr=subprocess.STDOUT)
|
|
|
|
p.communicate('2048,\n')
|
|
|
|
assert p.returncode == 0, 'sfdisk failed'
|
|
|
|
# TODO: check if root_img is really file, not already block device
|
|
|
|
p = subprocess.Popen(['sudo', 'losetup', '-f', '-P', '--show',
|
|
|
|
vm.storage.root_img], stdout=subprocess.PIPE)
|
|
|
|
(loopdev, _) = p.communicate()
|
|
|
|
loopdev = loopdev.strip()
|
|
|
|
looppart = loopdev + 'p1'
|
|
|
|
assert p.returncode == 0, 'losetup failed'
|
|
|
|
subprocess.check_call(['sudo', 'mkfs.ext2', '-q', '-F', looppart])
|
|
|
|
mountpoint = tempfile.mkdtemp()
|
|
|
|
subprocess.check_call(['sudo', 'mount', looppart, mountpoint])
|
|
|
|
try:
|
|
|
|
subprocess.check_call(['sudo', 'grub2-install',
|
|
|
|
'--target', 'i386-pc',
|
|
|
|
'--modules', 'part_msdos ext2',
|
|
|
|
'--boot-directory', mountpoint, loopdev],
|
2017-07-12 10:41:22 +02:00
|
|
|
stderr=subprocess.DEVNULL
|
2016-02-26 10:59:20 +01:00
|
|
|
)
|
|
|
|
grub_cfg = '{}/grub2/grub.cfg'.format(mountpoint)
|
|
|
|
subprocess.check_call(
|
|
|
|
['sudo', 'chown', '-R', os.getlogin(), mountpoint])
|
|
|
|
with open(grub_cfg, 'w') as f:
|
|
|
|
f.write(
|
|
|
|
"set timeout=1\n"
|
|
|
|
"menuentry 'Default' {\n"
|
|
|
|
" linux /vmlinuz root=/dev/xvda1 "
|
|
|
|
"rd.driver.blacklist=bochs_drm "
|
2016-11-30 03:07:39 +01:00
|
|
|
"rd.driver.blacklist=uhci_hcd console=hvc0\n"
|
2016-02-26 10:59:20 +01:00
|
|
|
" initrd /initrd\n"
|
|
|
|
"}"
|
|
|
|
)
|
|
|
|
p = subprocess.Popen(['uname', '-r'], stdout=subprocess.PIPE)
|
|
|
|
(kernel_version, _) = p.communicate()
|
|
|
|
kernel_version = kernel_version.strip()
|
|
|
|
kernel = '/boot/vmlinuz-{}'.format(kernel_version)
|
|
|
|
shutil.copy(kernel, os.path.join(mountpoint, 'vmlinuz'))
|
|
|
|
init_path = os.path.join(mountpoint, 'init')
|
|
|
|
with open(init_path, 'w') as f:
|
|
|
|
f.write(init_script)
|
2017-01-18 22:16:46 +01:00
|
|
|
os.chmod(init_path, 0o755)
|
2016-02-26 10:59:20 +01:00
|
|
|
dracut_args = [
|
|
|
|
'--kver', kernel_version,
|
|
|
|
'--include', init_path,
|
|
|
|
'/usr/lib/dracut/hooks/pre-pivot/initscript.sh',
|
|
|
|
'--no-hostonly', '--nolvmconf', '--nomdadmconf',
|
|
|
|
]
|
|
|
|
if extra_files:
|
|
|
|
dracut_args += ['--install', ' '.join(extra_files)]
|
|
|
|
subprocess.check_call(
|
|
|
|
['dracut'] + dracut_args + [os.path.join(mountpoint,
|
|
|
|
'initrd')],
|
2017-07-12 10:41:22 +02:00
|
|
|
stderr=subprocess.DEVNULL
|
2016-02-26 10:59:20 +01:00
|
|
|
)
|
|
|
|
finally:
|
|
|
|
subprocess.check_call(['sudo', 'umount', mountpoint])
|
|
|
|
shutil.rmtree(mountpoint)
|
|
|
|
subprocess.check_call(['sudo', 'losetup', '-d', loopdev])
|
2016-02-10 17:18:27 +01:00
|
|
|
|
2017-12-14 21:58:56 +01:00
|
|
|
def create_bootable_iso(self):
|
|
|
|
'''Create simple bootable ISO image.
|
|
|
|
Type 'poweroff' to it to terminate that VM.
|
|
|
|
'''
|
|
|
|
isolinux_cfg = (
|
|
|
|
'prompt 1\n'
|
|
|
|
'label poweroff\n'
|
|
|
|
' kernel poweroff.c32\n'
|
|
|
|
)
|
|
|
|
output_fd, output_path = tempfile.mkstemp('.iso')
|
|
|
|
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
|
|
try:
|
|
|
|
shutil.copy('/usr/share/syslinux/isolinux.bin', tmp_dir)
|
|
|
|
shutil.copy('/usr/share/syslinux/ldlinux.c32', tmp_dir)
|
|
|
|
shutil.copy('/usr/share/syslinux/poweroff.c32', tmp_dir)
|
|
|
|
with open(os.path.join(tmp_dir, 'isolinux.cfg'), 'w') as cfg:
|
|
|
|
cfg.write(isolinux_cfg)
|
|
|
|
subprocess.check_call(['genisoimage', '-o', output_path,
|
|
|
|
'-c', 'boot.cat',
|
|
|
|
'-b', 'isolinux.bin',
|
|
|
|
'-no-emul-boot',
|
|
|
|
'-boot-load-size', '4',
|
|
|
|
'-boot-info-table',
|
|
|
|
'-q',
|
|
|
|
tmp_dir])
|
|
|
|
except FileNotFoundError:
|
|
|
|
self.skipTest('syslinux or genisoimage not installed')
|
|
|
|
os.close(output_fd)
|
|
|
|
self.addCleanup(os.unlink, output_path)
|
|
|
|
return output_path
|
|
|
|
|
2017-06-20 16:14:16 +02:00
|
|
|
def create_local_file(self, filename, content, mode='w'):
|
|
|
|
with open(filename, mode) as file:
|
|
|
|
file.write(content)
|
|
|
|
self.addCleanup(os.unlink, filename)
|
|
|
|
|
|
|
|
def create_remote_file(self, vm, filename, content):
|
|
|
|
self.loop.run_until_complete(vm.run_for_stdio(
|
|
|
|
'cat > {}'.format(shlex.quote(filename)),
|
|
|
|
user='root', input=content.encode('utf-8')))
|
|
|
|
|
2017-06-21 04:45:46 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def wait_for_session(self, vm):
|
2018-10-27 16:22:10 +02:00
|
|
|
timeout = 30
|
|
|
|
if getattr(vm, 'template', None) and 'whonix-ws' in vm.template.name:
|
|
|
|
# first boot of whonix-ws takes more time because of /home
|
|
|
|
# initialization, including Tor Browser copying
|
|
|
|
timeout = 120
|
2017-06-21 04:45:46 +02:00
|
|
|
yield from asyncio.wait_for(
|
|
|
|
vm.run_service_for_stdio(
|
|
|
|
'qubes.WaitForSession', input=vm.default_user.encode()),
|
2018-10-27 16:22:10 +02:00
|
|
|
timeout=timeout)
|
2017-06-21 04:45:46 +02:00
|
|
|
|
2015-10-17 00:10:15 +02:00
|
|
|
|
2017-08-28 14:28:17 +02:00
|
|
|
_templates = None
|
|
|
|
def list_templates():
|
|
|
|
'''Returns tuple of template names available in the system.'''
|
|
|
|
global _templates
|
2018-10-03 22:40:38 +02:00
|
|
|
if _templates is None:
|
|
|
|
if 'QUBES_TEST_TEMPLATES' in os.environ:
|
|
|
|
_templates = os.environ['QUBES_TEST_TEMPLATES'].split()
|
2017-08-28 14:28:17 +02:00
|
|
|
if _templates is None:
|
|
|
|
try:
|
|
|
|
app = qubes.Qubes()
|
|
|
|
_templates = tuple(vm.name for vm in app.domains
|
2018-07-15 22:08:06 +02:00
|
|
|
if isinstance(vm, qubes.vm.templatevm.TemplateVM) and
|
|
|
|
vm.features.get('os', None) != 'Windows')
|
2017-08-28 14:28:17 +02:00
|
|
|
app.close()
|
|
|
|
del app
|
|
|
|
except OSError:
|
|
|
|
_templates = ()
|
|
|
|
return _templates
|
|
|
|
|
2018-04-24 18:30:55 +02:00
|
|
|
def create_testcases_for_templates(name, *bases, module, **kwds):
|
2018-03-30 03:04:15 +02:00
|
|
|
'''Do-it-all helper for generating per-template tests via load_tests proto
|
|
|
|
|
|
|
|
This does several things:
|
|
|
|
- creates per-template classes
|
|
|
|
- adds them to module's :py:func:`globals`
|
|
|
|
- returns an iterable suitable for passing to loader.loadTestsFromNames
|
|
|
|
|
|
|
|
TestCase classes created by this function have implicit `.template`
|
|
|
|
attribute, which contains name of the respective template. They are also
|
|
|
|
named with given prefix, underscore and template name. If template name
|
|
|
|
contains characters not valid as part of Python identifier, they are
|
|
|
|
impossible to get via standard ``.`` operator, though :py:func:`getattr` is
|
|
|
|
still usable.
|
|
|
|
|
|
|
|
>>> class MyTestsMixIn:
|
|
|
|
... def test_000_my_test(self):
|
|
|
|
... assert self.template.startswith('debian')
|
|
|
|
>>> def load_tests(loader, tests, pattern):
|
|
|
|
... tests.addTests(loader.loadTestsFromNames(
|
|
|
|
... qubes.tests.create_testcases_for_templates(
|
|
|
|
... 'TC_00_MyTests', MyTestsMixIn, qubes.tests.SystemTestCase,
|
2018-04-24 18:30:55 +02:00
|
|
|
... module=sys.modules[__name__])))
|
2018-03-30 03:04:15 +02:00
|
|
|
|
2018-04-24 18:30:55 +02:00
|
|
|
*NOTE* adding ``module=sys.modules[__name__]`` is *mandatory*, and to allow
|
|
|
|
enforcing this, it uses keyword-only argument syntax, which is only in
|
|
|
|
Python 3.
|
2018-03-30 03:04:15 +02:00
|
|
|
'''
|
2018-04-24 18:30:55 +02:00
|
|
|
# Do not attempt to grab the module from traceback, since we are actually
|
|
|
|
# a generator and loadTestsFromNames may also be a generator, so it's not
|
|
|
|
# possible to correctly guess frame from stack. Explicit is better than
|
|
|
|
# implicit!
|
2018-03-30 03:04:15 +02:00
|
|
|
|
|
|
|
for template in list_templates():
|
|
|
|
clsname = name + '_' + template
|
2018-10-07 19:44:48 +02:00
|
|
|
if hasattr(module, clsname):
|
|
|
|
continue
|
2018-03-30 03:04:15 +02:00
|
|
|
cls = type(clsname, bases, {'template': template, **kwds})
|
2018-04-24 18:30:55 +02:00
|
|
|
cls.__module__ = module.__name__
|
2018-03-30 03:04:15 +02:00
|
|
|
# XXX I wonder what other __dunder__ attrs did I miss
|
2018-04-24 18:30:55 +02:00
|
|
|
setattr(module, clsname, cls)
|
|
|
|
yield '.'.join((module.__name__, clsname))
|
2018-03-30 03:04:15 +02:00
|
|
|
|
2018-10-07 19:44:48 +02:00
|
|
|
def maybe_create_testcases_on_import(create_testcases_gen):
|
|
|
|
'''If certain conditions are met, call *create_testcases_gen* to create
|
|
|
|
testcases for templates tests. The purpose is to use it on integration
|
|
|
|
tests module(s) import, so the test runner could discover tests without
|
|
|
|
using load tests protocol.
|
|
|
|
|
|
|
|
The conditions - any of:
|
|
|
|
- QUBES_TEST_TEMPLATES present in the environment (it's possible to
|
|
|
|
create test cases without opening qubes.xml)
|
|
|
|
- QUBES_TEST_LOAD_ALL present in the environment
|
|
|
|
'''
|
|
|
|
if 'QUBES_TEST_TEMPLATES' in os.environ or \
|
|
|
|
'QUBES_TEST_LOAD_ALL' in os.environ:
|
|
|
|
list(create_testcases_gen())
|
|
|
|
|
2017-09-19 16:59:09 +02:00
|
|
|
def extra_info(obj):
|
|
|
|
'''Return short info identifying object.
|
|
|
|
|
|
|
|
For example, if obj is a qube, return its name. This is for use with
|
|
|
|
:py:mod:`objgraph` package.
|
|
|
|
'''
|
|
|
|
# Feel free to extend to other cases.
|
|
|
|
|
|
|
|
if isinstance(obj, qubes.vm.qubesvm.QubesVM):
|
|
|
|
try:
|
|
|
|
return obj.name
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
if isinstance(obj, unittest.TestCase):
|
|
|
|
return obj.id()
|
|
|
|
|
|
|
|
return ''
|
2017-08-28 14:28:17 +02:00
|
|
|
|
2015-10-17 00:10:15 +02:00
|
|
|
def load_tests(loader, tests, pattern): # pylint: disable=unused-argument
|
|
|
|
# discard any tests from this module, because it hosts base classes
|
|
|
|
tests = unittest.TestSuite()
|
|
|
|
|
|
|
|
for modname in (
|
|
|
|
# unit tests
|
|
|
|
'qubes.tests.events',
|
2016-03-09 14:20:32 +01:00
|
|
|
'qubes.tests.devices',
|
2017-05-29 21:20:06 +02:00
|
|
|
'qubes.tests.devices_block',
|
2016-09-08 04:10:02 +02:00
|
|
|
'qubes.tests.firewall',
|
2016-04-28 16:00:29 +02:00
|
|
|
'qubes.tests.init',
|
2015-10-17 00:10:15 +02:00
|
|
|
'qubes.tests.vm.init',
|
2016-02-11 05:45:17 +01:00
|
|
|
'qubes.tests.storage',
|
2016-04-30 20:42:46 +02:00
|
|
|
'qubes.tests.storage_file',
|
2018-09-12 01:50:32 +02:00
|
|
|
'qubes.tests.storage_reflink',
|
2016-07-12 18:43:28 +02:00
|
|
|
'qubes.tests.storage_lvm',
|
2017-05-17 01:40:10 +02:00
|
|
|
'qubes.tests.storage_kernels',
|
2017-06-12 12:22:39 +02:00
|
|
|
'qubes.tests.ext',
|
2015-10-17 00:10:15 +02:00
|
|
|
'qubes.tests.vm.qubesvm',
|
2016-04-20 13:41:33 +02:00
|
|
|
'qubes.tests.vm.mix.net',
|
2015-10-17 00:10:15 +02:00
|
|
|
'qubes.tests.vm.adminvm',
|
2017-06-04 01:13:57 +02:00
|
|
|
'qubes.tests.vm.appvm',
|
2017-08-06 12:27:35 +02:00
|
|
|
'qubes.tests.vm.dispvm',
|
2016-04-28 16:00:29 +02:00
|
|
|
'qubes.tests.app',
|
2016-10-04 21:54:29 +02:00
|
|
|
'qubes.tests.tarwriter',
|
2017-06-21 06:25:59 +02:00
|
|
|
'qubes.tests.api',
|
2017-05-12 19:07:41 +02:00
|
|
|
'qubes.tests.api_admin',
|
2017-06-03 04:52:30 +02:00
|
|
|
'qubes.tests.api_misc',
|
2019-02-28 00:32:44 +01:00
|
|
|
'qubes.tests.api_internal',
|
2019-03-15 20:55:17 +01:00
|
|
|
):
|
|
|
|
tests.addTests(loader.loadTestsFromName(modname))
|
2017-04-07 12:29:32 +02:00
|
|
|
|
2016-03-04 13:03:43 +01:00
|
|
|
tests.addTests(loader.discover(
|
|
|
|
os.path.join(os.path.dirname(__file__), 'tools')))
|
|
|
|
|
2016-06-02 22:02:06 +02:00
|
|
|
if not in_dom0:
|
|
|
|
return tests
|
|
|
|
|
2016-03-04 13:03:43 +01:00
|
|
|
for modname in (
|
2019-04-11 04:29:17 +02:00
|
|
|
'qrexec.tests',
|
|
|
|
'qrexec.tests.cli',
|
|
|
|
'qrexec.tests.gtkhelpers',
|
|
|
|
'qrexec.tests.rpcconfirmation',
|
2015-10-17 00:10:15 +02:00
|
|
|
# integration tests
|
2017-01-18 22:16:46 +01:00
|
|
|
'qubes.tests.integ.basic',
|
|
|
|
'qubes.tests.integ.storage',
|
2019-01-05 17:09:10 +01:00
|
|
|
'qubes.tests.integ.grub',
|
2018-10-07 22:37:02 +02:00
|
|
|
'qubes.tests.integ.devices_block',
|
2017-01-18 22:16:46 +01:00
|
|
|
'qubes.tests.integ.devices_pci',
|
|
|
|
'qubes.tests.integ.dom0_update',
|
|
|
|
'qubes.tests.integ.network',
|
|
|
|
'qubes.tests.integ.dispvm',
|
|
|
|
'qubes.tests.integ.vm_qrexec_gui',
|
2018-10-01 06:02:22 +02:00
|
|
|
'qubes.tests.integ.mime',
|
2017-12-21 18:14:39 +01:00
|
|
|
'qubes.tests.integ.salt',
|
2017-01-18 22:16:46 +01:00
|
|
|
'qubes.tests.integ.backup',
|
|
|
|
'qubes.tests.integ.backupcompatibility',
|
2015-10-17 00:10:15 +02:00
|
|
|
# 'qubes.tests.regressions',
|
2015-10-27 11:39:59 +01:00
|
|
|
|
2016-04-20 02:26:16 +02:00
|
|
|
# external modules
|
2017-12-22 01:38:12 +01:00
|
|
|
'qubes.tests.extra',
|
2015-10-17 00:10:15 +02:00
|
|
|
):
|
|
|
|
tests.addTests(loader.loadTestsFromName(modname))
|
|
|
|
|
|
|
|
return tests
|