Merge branch 'tests-storage'
* tests-storage: tests: register libvirt events tests: even more agressive cleanup in tearDown app: do not wrap libvirt_conn.close() in auto-reconnect wrapper api: keep track of established connections tests: drop VM cleanup from tearDownClass, fix asyncio usage in tearDown storage: fix Storage.clone and Storage.clone_volume tests: more tests fixes firewall: raise ValueError on invalid hostname in dsthost= qmemman: don't load qubes.xml tests: fix AdminVM test tests: create temporary files in /tmp tests: remove renaming test - it isn't supported anymore tests: various fixes for storage tests tests: fix removing LVM volumes tests: fix asyncio usage in some tests tests: minor fixes to api/admin tests storage/file: create -cow.img only when needed storage: move volume_config['source'] filling to one place app: do not create 'default' storage pool app: add missing setters for default_pool* global properties
This commit is contained in:
commit
76640df091
@ -200,6 +200,9 @@ class AbstractQubesAPI(object):
|
||||
class QubesDaemonProtocol(asyncio.Protocol):
|
||||
buffer_size = 65536
|
||||
header = struct.Struct('Bx')
|
||||
# keep track of connections, to gracefully close them at server exit
|
||||
# (including cleanup of integration test)
|
||||
connections = set()
|
||||
|
||||
def __init__(self, handler, *args, app, debug=False, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
@ -214,6 +217,7 @@ class QubesDaemonProtocol(asyncio.Protocol):
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.transport = transport
|
||||
self.connections.add(self)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
self.untrusted_buffer.close()
|
||||
@ -221,6 +225,7 @@ class QubesDaemonProtocol(asyncio.Protocol):
|
||||
if self.mgmt is not None:
|
||||
self.mgmt.cancel()
|
||||
self.transport = None
|
||||
self.connections.remove(self)
|
||||
|
||||
def data_received(self, untrusted_data): # pylint: disable=arguments-differ
|
||||
if self.len_untrusted_buffer + len(untrusted_data) > self.buffer_size:
|
||||
|
16
qubes/app.py
16
qubes/app.py
@ -120,6 +120,8 @@ class VirConnectWrapper(object):
|
||||
attr = getattr(self._conn, attrname)
|
||||
if not isinstance(attr, collections.Callable):
|
||||
return attr
|
||||
if attrname == 'close':
|
||||
return attr
|
||||
|
||||
@functools.wraps(attr)
|
||||
def wrapper(*args, **kwargs):
|
||||
@ -577,7 +579,7 @@ def _default_pool(app):
|
||||
for pool in app.pools.values():
|
||||
if pool.config.get('driver', None) != 'file':
|
||||
continue
|
||||
if pool.config['dir_path'] == '/var/lib/qubes':
|
||||
if pool.config['dir_path'] == qubes.config.qubes_base_dir:
|
||||
return pool
|
||||
raise AttributeError('Cannot determine default storage pool')
|
||||
|
||||
@ -672,23 +674,28 @@ class Qubes(qubes.PropertyHolder):
|
||||
|
||||
default_pool = qubes.property('default_pool', load_stage=3,
|
||||
default=_default_pool,
|
||||
setter=_setter_pool,
|
||||
doc='Default storage pool')
|
||||
|
||||
default_pool_private = qubes.property('default_pool_private', load_stage=3,
|
||||
default=lambda app: app.default_pool,
|
||||
setter=_setter_pool,
|
||||
doc='Default storage pool for private volumes')
|
||||
|
||||
default_pool_root = qubes.property('default_pool_root', load_stage=3,
|
||||
default=lambda app: app.default_pool,
|
||||
setter=_setter_pool,
|
||||
doc='Default storage pool for root volumes')
|
||||
|
||||
default_pool_volatile = qubes.property('default_pool_volatile',
|
||||
load_stage=3,
|
||||
default=lambda app: app.default_pool,
|
||||
setter=_setter_pool,
|
||||
doc='Default storage pool for volatile volumes')
|
||||
|
||||
default_pool_kernel = qubes.property('default_pool_kernel', load_stage=3,
|
||||
default=lambda app: app.default_pool,
|
||||
setter=_setter_pool,
|
||||
doc='Default storage pool for kernel volumes')
|
||||
|
||||
# TODO #1637 #892
|
||||
@ -961,11 +968,8 @@ class Qubes(qubes.PropertyHolder):
|
||||
# check if the default LVM Thin pool qubes_dom0/pool00 exists
|
||||
if os.path.exists('/dev/mapper/qubes_dom0-pool00-tpool'):
|
||||
self.add_pool(volume_group='qubes_dom0', thin_pool='pool00',
|
||||
name='default', driver='lvm_thin')
|
||||
else:
|
||||
self.pools['default'] = self._get_pool(
|
||||
dir_path=qubes.config.qubes_base_dir,
|
||||
name='default', driver='file')
|
||||
name='lvm', driver='lvm_thin')
|
||||
# pool based on /var/lib/qubes will be created here:
|
||||
for name, config in qubes.config.defaults['pool_configs'].items():
|
||||
self.pools[name] = self._get_pool(**config)
|
||||
|
||||
|
@ -123,7 +123,8 @@ class DstHost(RuleOption):
|
||||
self.type = 'dsthost'
|
||||
self.prefixlen = 0
|
||||
safe_set = string.ascii_lowercase + string.digits + '-._'
|
||||
assert all(c in safe_set for c in untrusted_value)
|
||||
if not all(c in safe_set for c in untrusted_value):
|
||||
raise ValueError('Invalid hostname')
|
||||
value = untrusted_value
|
||||
else:
|
||||
untrusted_host, untrusted_prefixlen = untrusted_value.split('/', 1)
|
||||
|
@ -30,7 +30,6 @@ import functools
|
||||
import xen.lowlevel.xc
|
||||
import xen.lowlevel.xs
|
||||
|
||||
import qubes
|
||||
import qubes.qmemman.algo
|
||||
|
||||
|
||||
@ -141,24 +140,16 @@ class SystemState(object):
|
||||
self.domdict[i].memory_actual <= self.domdict[i].last_target + self.XEN_FREE_MEM_LEFT/4:
|
||||
dom_name = self.xs.read('', '/local/domain/%s/name' % str(i))
|
||||
if dom_name is not None:
|
||||
try:
|
||||
qubes.Qubes().domains[str(dom_name)].fire_event(
|
||||
'status:no-error', status='no-error',
|
||||
msg=slow_memset_react_msg)
|
||||
except LookupError:
|
||||
pass
|
||||
# TODO: report it somewhere, qubesd or elsewhere
|
||||
pass
|
||||
self.domdict[i].slow_memset_react = False
|
||||
|
||||
if self.domdict[i].no_progress and \
|
||||
self.domdict[i].memory_actual <= self.domdict[i].last_target + self.XEN_FREE_MEM_LEFT/4:
|
||||
dom_name = self.xs.read('', '/local/domain/%s/name' % str(i))
|
||||
if dom_name is not None:
|
||||
try:
|
||||
qubes.Qubes().domains[str(dom_name)].fire_event(
|
||||
'status:no-error', status='no-error',
|
||||
msg=no_progress_msg)
|
||||
except LookupError:
|
||||
pass
|
||||
# TODO: report it somewhere, qubesd or elsewhere
|
||||
pass
|
||||
self.domdict[i].no_progress = False
|
||||
|
||||
#the below works (and is fast), but then 'xm list' shows unchanged memory value
|
||||
@ -343,13 +334,8 @@ class SystemState(object):
|
||||
self.domdict[dom2].no_progress = True
|
||||
dom_name = self.xs.read('', '/local/domain/%s/name' % str(dom2))
|
||||
if dom_name is not None:
|
||||
try:
|
||||
qubes.Qubes().domains[str(
|
||||
dom_name)].fire_event(
|
||||
'status:error', status='error',
|
||||
msg=no_progress_msg)
|
||||
except LookupError:
|
||||
pass
|
||||
# TODO: report it somewhere, qubesd or elsewhere
|
||||
pass
|
||||
else:
|
||||
self.log.warning('dom {!r} still hold more'
|
||||
' memory than have assigned ({} > {})'
|
||||
@ -359,13 +345,8 @@ class SystemState(object):
|
||||
self.domdict[dom2].slow_memset_react = True
|
||||
dom_name = self.xs.read('', '/local/domain/%s/name' % str(dom2))
|
||||
if dom_name is not None:
|
||||
try:
|
||||
qubes.Qubes().domains[str(
|
||||
dom_name)].fire_event(
|
||||
'status:error', status='error',
|
||||
msg=slow_memset_react_msg)
|
||||
except LookupError:
|
||||
pass
|
||||
# TODO: report it somewhere, qubesd or elsewhere
|
||||
pass
|
||||
self.mem_set(dom, self.get_free_xen_memory() + self.domdict[dom].memory_actual - self.XEN_FREE_MEM_LEFT)
|
||||
return
|
||||
|
||||
|
@ -349,39 +349,49 @@ class Storage(object):
|
||||
|
||||
if hasattr(vm, 'volume_config'):
|
||||
for name, conf in self.vm.volume_config.items():
|
||||
conf = conf.copy()
|
||||
if 'source' in conf:
|
||||
template = getattr(vm, 'template', None)
|
||||
# recursively lookup source volume - templates may be
|
||||
# chained (TemplateVM -> AppVM -> DispVM, where the
|
||||
# actual source should be used from TemplateVM)
|
||||
while template:
|
||||
# we have no control over VM load order,
|
||||
# so initialize storage recursively if needed
|
||||
if template.storage is None:
|
||||
template.storage = Storage(template)
|
||||
# FIXME: this effectively ignore 'source' value;
|
||||
# maybe we don't need it at all if it's always from
|
||||
# VM's template?
|
||||
conf['source'] = template.volumes[name]
|
||||
if conf['source'].source is not None:
|
||||
template = getattr(template, 'template', None)
|
||||
else:
|
||||
break
|
||||
|
||||
self.init_volume(name, conf)
|
||||
|
||||
def _update_volume_config_source(self, name, volume_config):
|
||||
'''Retrieve 'source' volume from VM's template'''
|
||||
template = getattr(self.vm, 'template', None)
|
||||
# recursively lookup source volume - templates may be
|
||||
# chained (TemplateVM -> AppVM -> DispVM, where the
|
||||
# actual source should be used from TemplateVM)
|
||||
while template:
|
||||
source = template.volumes[name]
|
||||
volume_config['source'] = source
|
||||
volume_config['pool'] = source.pool
|
||||
volume_config['size'] = source.size
|
||||
if source.source is not None:
|
||||
template = getattr(template, 'template', None)
|
||||
else:
|
||||
break
|
||||
|
||||
def init_volume(self, name, volume_config):
|
||||
''' Initialize Volume instance attached to this domain '''
|
||||
assert 'pool' in volume_config, "Pool missing in volume_config " + str(
|
||||
volume_config)
|
||||
|
||||
if 'name' not in volume_config:
|
||||
volume_config['name'] = name
|
||||
|
||||
if 'source' in volume_config:
|
||||
# we have no control over VM load order,
|
||||
# so initialize storage recursively if needed
|
||||
template = getattr(self.vm, 'template', None)
|
||||
if template and template.storage is None:
|
||||
template.storage = Storage(template)
|
||||
|
||||
if volume_config['source'] is None:
|
||||
self._update_volume_config_source(name, volume_config)
|
||||
else:
|
||||
# if source is already specified, pool needs to be too
|
||||
pool = self.vm.app.get_pool(volume_config['pool'])
|
||||
volume_config['source'] = pool.volumes[volume_config['source']]
|
||||
|
||||
# if pool still unknown, load default
|
||||
if 'pool' not in volume_config:
|
||||
pool = getattr(self.vm.app, 'default_pool_' + name)
|
||||
else:
|
||||
pool = self.vm.app.get_pool(volume_config['pool'])
|
||||
volume_config['pool'] = \
|
||||
getattr(self.vm.app, 'default_pool_' + name)
|
||||
pool = self.vm.app.get_pool(volume_config['pool'])
|
||||
if 'internal' in volume_config:
|
||||
# migrate old config
|
||||
del volume_config['internal']
|
||||
@ -504,13 +514,22 @@ class Storage(object):
|
||||
src_volume = src_vm.volumes[name]
|
||||
msg = "Importing volume {!s} from vm {!s}"
|
||||
self.vm.log.info(msg.format(src_volume.name, src_vm.name))
|
||||
|
||||
# First create the destination volume
|
||||
create_op_ret = dst.create()
|
||||
# clone/import functions may be either synchronous or asynchronous
|
||||
# in the later case, we need to wait for them to finish
|
||||
if asyncio.iscoroutine(create_op_ret):
|
||||
yield from create_op_ret
|
||||
|
||||
# Then import data from source volume
|
||||
clone_op_ret = dst.import_volume(src_volume)
|
||||
|
||||
# clone/import functions may be either synchronous or asynchronous
|
||||
# in the later case, we need to wait for them to finish
|
||||
if asyncio.iscoroutine(clone_op_ret):
|
||||
clone_op_ret = yield from clone_op_ret
|
||||
self.vm.volumes[name] = clone_op_ret
|
||||
yield from clone_op_ret
|
||||
self.vm.volumes[name] = dst
|
||||
return self.vm.volumes[name]
|
||||
|
||||
@asyncio.coroutine
|
||||
@ -519,8 +538,8 @@ class Storage(object):
|
||||
|
||||
self.vm.volumes = {}
|
||||
with VmCreationManager(self.vm):
|
||||
yield from asyncio.wait(self.clone_volume(src_vm, vol_name)
|
||||
for vol_name in self.vm.volume_config.keys())
|
||||
yield from asyncio.wait([self.clone_volume(src_vm, vol_name)
|
||||
for vol_name in self.vm.volume_config.keys()])
|
||||
|
||||
@property
|
||||
def outdated_volumes(self):
|
||||
|
@ -164,9 +164,6 @@ class FileVolume(qubes.storage.Volume):
|
||||
'Volume size must be > 0'
|
||||
if not self.snap_on_start:
|
||||
create_sparse_file(self.path, self.size)
|
||||
# path_cow not needed only in volatile volume
|
||||
if self.save_on_stop or self.snap_on_start:
|
||||
create_sparse_file(self.path_cow, self.size)
|
||||
|
||||
def remove(self):
|
||||
if not self.snap_on_start:
|
||||
|
@ -48,6 +48,7 @@ import unittest
|
||||
import warnings
|
||||
from distutils import spawn
|
||||
|
||||
import gc
|
||||
import lxml.etree
|
||||
import pkg_resources
|
||||
|
||||
@ -93,6 +94,9 @@ try:
|
||||
except libvirt.libvirtError:
|
||||
pass
|
||||
|
||||
if in_dom0:
|
||||
import libvirtaio
|
||||
|
||||
try:
|
||||
in_git = subprocess.check_output(
|
||||
['git', 'rev-parse', '--show-toplevel']).decode().strip()
|
||||
@ -581,12 +585,15 @@ class SystemTestCase(QubesTestCase):
|
||||
be used to create Qubes(CLASS_XMLPATH) object and create/import required
|
||||
stuff there. VMs created in :py:meth:`TestCase.setUpClass` should
|
||||
use self.make_vm_name('...', class_teardown=True) for name creation.
|
||||
Such (group of) test need to take care about
|
||||
:py:meth:`TestCase.tearDownClass` implementation itself.
|
||||
"""
|
||||
# noinspection PyAttributeOutsideInit
|
||||
def setUp(self):
|
||||
if not in_dom0:
|
||||
self.skipTest('outside dom0')
|
||||
super(SystemTestCase, self).setUp()
|
||||
libvirtaio.virEventRegisterAsyncIOImpl(loop=self.loop)
|
||||
self.remove_test_vms()
|
||||
|
||||
# need some information from the real qubes.xml - at least installed
|
||||
@ -600,6 +607,7 @@ class SystemTestCase(QubesTestCase):
|
||||
shutil.copy(self.host_app.store, XMLPATH)
|
||||
self.app = qubes.Qubes(XMLPATH)
|
||||
os.environ['QUBES_XML_PATH'] = XMLPATH
|
||||
self.app.vmm.register_event_handlers(self.app)
|
||||
|
||||
self.qubesd = self.loop.run_until_complete(
|
||||
qubes.api.create_servers(
|
||||
@ -659,46 +667,56 @@ class SystemTestCase(QubesTestCase):
|
||||
self.remove_test_vms()
|
||||
|
||||
# close the servers before super(), because that might close the loop
|
||||
server = None
|
||||
for server in self.qubesd:
|
||||
for sock in server.sockets:
|
||||
os.unlink(sock.getsockname())
|
||||
server.close()
|
||||
del server
|
||||
|
||||
# close all existing connections, especially this will interrupt
|
||||
# running admin.Events calls, which do keep reference to Qubes() and
|
||||
# libvirt connection
|
||||
conn = None
|
||||
for conn in qubes.api.QubesDaemonProtocol.connections:
|
||||
if conn.transport:
|
||||
conn.transport.abort()
|
||||
del conn
|
||||
|
||||
self.loop.run_until_complete(asyncio.wait([
|
||||
server.wait_closed() for server in self.qubesd]))
|
||||
del self.qubesd
|
||||
|
||||
super(SystemTestCase, self).tearDown()
|
||||
# remove all references to VM objects, to release resources - most
|
||||
# importantly file descriptors; this object will live
|
||||
# remove all references to any complex qubes objects, to release
|
||||
# resources - most importantly file descriptors; this object will live
|
||||
# during the whole test run, but all the file descriptors would be
|
||||
# depleted earlier
|
||||
self.app.vmm._libvirt_conn = None
|
||||
del self.app
|
||||
del self.host_app
|
||||
for attr in dir(self):
|
||||
if isinstance(getattr(self, attr), qubes.vm.BaseVM):
|
||||
obj_type = type(getattr(self, attr))
|
||||
if obj_type.__module__.startswith('qubes'):
|
||||
delattr(self, attr)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
super(SystemTestCase, cls).tearDownClass()
|
||||
if not in_dom0:
|
||||
return
|
||||
cls.remove_test_vms(xmlpath=CLASS_XMLPATH, prefix=CLSVMPREFIX)
|
||||
# then trigger garbage collector to really destroy those objects
|
||||
gc.collect()
|
||||
|
||||
@classmethod
|
||||
def _remove_vm_qubes(cls, vm):
|
||||
super(SystemTestCase, self).tearDown()
|
||||
|
||||
def _remove_vm_qubes(self, vm):
|
||||
vmname = vm.name
|
||||
app = vm.app
|
||||
|
||||
try:
|
||||
# XXX .is_running() may throw libvirtError if undefined
|
||||
if vm.is_running():
|
||||
vm.kill()
|
||||
except: # pylint: disable=bare-except
|
||||
self.loop.run_until_complete(vm.kill())
|
||||
except: # pylint: disable=bare-except
|
||||
pass
|
||||
|
||||
try:
|
||||
vm.remove_from_disk()
|
||||
self.loop.run_until_complete(vm.remove_from_disk())
|
||||
except: # pylint: disable=bare-except
|
||||
pass
|
||||
|
||||
@ -712,14 +730,18 @@ class SystemTestCase(QubesTestCase):
|
||||
# for example if vm.libvirt_domain malfunctioned.
|
||||
try:
|
||||
conn = libvirt.open(qubes.config.defaults['libvirt_uri'])
|
||||
dom = conn.lookupByName(vmname)
|
||||
except: # pylint: disable=bare-except
|
||||
pass
|
||||
else:
|
||||
cls._remove_vm_libvirt(dom)
|
||||
try:
|
||||
dom = conn.lookupByName(vmname)
|
||||
except: # pylint: disable=bare-except
|
||||
pass
|
||||
else:
|
||||
self._remove_vm_libvirt(dom)
|
||||
conn.close()
|
||||
|
||||
cls._remove_vm_disk(vmname)
|
||||
self._remove_vm_disk(vmname)
|
||||
|
||||
|
||||
@staticmethod
|
||||
@ -756,35 +778,35 @@ class SystemTestCase(QubesTestCase):
|
||||
volumes = subprocess.check_output(
|
||||
['sudo', 'lvs', '--noheadings', '-o', 'vg_name,name',
|
||||
'--separator', '/']).decode()
|
||||
if ('/' + prefix) not in volumes:
|
||||
if ('/vm-' + prefix) not in volumes:
|
||||
return
|
||||
subprocess.check_call(['sudo', 'lvremove', '-f'] +
|
||||
[vol.strip() for vol in volumes.splitlines()
|
||||
if ('/' + prefix) in vol],
|
||||
if ('/vm-' + prefix) in vol],
|
||||
stdout=subprocess.DEVNULL)
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def remove_vms(cls, vms):
|
||||
def remove_vms(self, vms):
|
||||
for vm in vms:
|
||||
cls._remove_vm_qubes(vm)
|
||||
self._remove_vm_qubes(vm)
|
||||
|
||||
|
||||
@classmethod
|
||||
def remove_test_vms(cls, xmlpath=XMLPATH, prefix=VMPREFIX):
|
||||
def remove_test_vms(self, xmlpath=XMLPATH, prefix=VMPREFIX):
|
||||
'''Aggresively remove any domain that has name in testing namespace.
|
||||
'''
|
||||
|
||||
# first, remove them Qubes-way
|
||||
if os.path.exists(xmlpath):
|
||||
try:
|
||||
cls.remove_vms(vm for vm in qubes.Qubes(xmlpath).domains
|
||||
try:
|
||||
app = self.app
|
||||
except AttributeError:
|
||||
app = qubes.Qubes(xmlpath)
|
||||
self.remove_vms(vm for vm in app.domains
|
||||
if vm.name.startswith(prefix))
|
||||
except (qubes.exc.QubesException, lxml.etree.XMLSyntaxError):
|
||||
# If qubes-test.xml is broken that much it doesn't even load,
|
||||
# simply remove it. VMs will be cleaned up the hard way.
|
||||
# TODO logging?
|
||||
del app
|
||||
except qubes.exc.QubesException:
|
||||
pass
|
||||
os.unlink(xmlpath)
|
||||
|
||||
@ -792,7 +814,7 @@ class SystemTestCase(QubesTestCase):
|
||||
conn = libvirt.open(qubes.config.defaults['libvirt_uri'])
|
||||
for dom in conn.listAllDomains():
|
||||
if dom.name().startswith(prefix):
|
||||
cls._remove_vm_libvirt(dom)
|
||||
self._remove_vm_libvirt(dom)
|
||||
conn.close()
|
||||
|
||||
# finally remove anything that is left on disk
|
||||
@ -807,8 +829,8 @@ class SystemTestCase(QubesTestCase):
|
||||
if name.startswith(prefix):
|
||||
vmnames.add(name)
|
||||
for vmname in vmnames:
|
||||
cls._remove_vm_disk(vmname)
|
||||
cls._remove_vm_disk_lvm(prefix)
|
||||
self._remove_vm_disk(vmname)
|
||||
self._remove_vm_disk_lvm(prefix)
|
||||
|
||||
def qrexec_policy(self, service, source, destination, allow=True):
|
||||
"""
|
||||
@ -867,7 +889,7 @@ class SystemTestCase(QubesTestCase):
|
||||
while timeout > 0:
|
||||
if not vm.is_running():
|
||||
return
|
||||
time.sleep(1)
|
||||
self.loop.run_until_complete(asyncio.sleep(1))
|
||||
timeout -= 1
|
||||
self.fail("Timeout while waiting for VM {} shutdown".format(vm.name))
|
||||
|
||||
|
@ -50,8 +50,12 @@ class AdminAPITestCase(qubes.tests.QubesTestCase):
|
||||
{'qubes_base_dir': self.test_base_dir})
|
||||
self.base_dir_patch2 = unittest.mock.patch(
|
||||
'qubes.config.qubes_base_dir', self.test_base_dir)
|
||||
self.base_dir_patch3 = unittest.mock.patch.dict(
|
||||
qubes.config.defaults['pool_configs']['varlibqubes'],
|
||||
{'dir_path': self.test_base_dir})
|
||||
self.base_dir_patch.start()
|
||||
self.base_dir_patch2.start()
|
||||
self.base_dir_patch3.start()
|
||||
app = qubes.Qubes('/tmp/qubes-test.xml', load=False)
|
||||
app.vmm = unittest.mock.Mock(spec=qubes.app.VMMConnection)
|
||||
app.load_initial_values()
|
||||
@ -63,6 +67,7 @@ class AdminAPITestCase(qubes.tests.QubesTestCase):
|
||||
with qubes.tests.substitute_entry_points('qubes.storage',
|
||||
'qubes.tests.storage'):
|
||||
app.add_pool('test', driver='test')
|
||||
app.default_pool = 'varlibqubes'
|
||||
app.save = unittest.mock.Mock()
|
||||
self.vm = app.add_new_vm('AppVM', label='red', name='test-vm1',
|
||||
template='test-template')
|
||||
@ -79,6 +84,7 @@ class AdminAPITestCase(qubes.tests.QubesTestCase):
|
||||
self.app.domains[0].fire_event = self.emitter.fire_event
|
||||
|
||||
def tearDown(self):
|
||||
self.base_dir_patch3.stop()
|
||||
self.base_dir_patch2.stop()
|
||||
self.base_dir_patch.stop()
|
||||
if os.path.exists(self.test_base_dir):
|
||||
@ -417,7 +423,6 @@ class TC_00_VMs(AdminAPITestCase):
|
||||
value = self.call_mgmt_func(b'admin.vm.volume.Revert',
|
||||
b'test-vm1', b'private', b'rev1')
|
||||
self.assertIsNone(value)
|
||||
print(repr(self.vm.volumes.mock_calls))
|
||||
self.assertEqual(self.vm.volumes.mock_calls, [
|
||||
('__getitem__', ('private', ), {}),
|
||||
('__getitem__().revert', ('rev1', ), {}),
|
||||
@ -1156,7 +1161,8 @@ class TC_00_VMs(AdminAPITestCase):
|
||||
self.assertEqual(vm.template, self.app.domains['test-template'])
|
||||
# setting pool= affect only volumes actually created for this VM,
|
||||
# not used from a template or so
|
||||
self.assertEqual(vm.volume_config['root']['pool'], 'default')
|
||||
self.assertEqual(vm.volume_config['root']['pool'],
|
||||
self.template.volumes['root'].pool)
|
||||
self.assertEqual(vm.volume_config['private']['pool'], 'test')
|
||||
self.assertEqual(vm.volume_config['volatile']['pool'], 'test')
|
||||
self.assertEqual(vm.volume_config['kernel']['pool'], 'linux-kernel')
|
||||
@ -1178,9 +1184,11 @@ class TC_00_VMs(AdminAPITestCase):
|
||||
vm = self.app.domains['test-vm2']
|
||||
self.assertEqual(vm.label, self.app.get_label('red'))
|
||||
self.assertEqual(vm.template, self.app.domains['test-template'])
|
||||
self.assertEqual(vm.volume_config['root']['pool'], 'default')
|
||||
self.assertEqual(vm.volume_config['root']['pool'],
|
||||
self.template.volumes['root'].pool)
|
||||
self.assertEqual(vm.volume_config['private']['pool'], 'test')
|
||||
self.assertEqual(vm.volume_config['volatile']['pool'], 'default')
|
||||
self.assertEqual(vm.volume_config['volatile']['pool'],
|
||||
self.app.default_pool_volatile)
|
||||
self.assertEqual(vm.volume_config['kernel']['pool'], 'linux-kernel')
|
||||
self.assertEqual(storage_mock.mock_calls,
|
||||
[unittest.mock.call(self.app.domains['test-vm2']).create()])
|
||||
@ -1524,6 +1532,7 @@ class TC_00_VMs(AdminAPITestCase):
|
||||
@unittest.mock.patch('qubes.storage.Storage.remove')
|
||||
@unittest.mock.patch('shutil.rmtree')
|
||||
def test_500_vm_remove(self, mock_rmtree, mock_remove):
|
||||
mock_remove.side_effect = self.dummy_coro
|
||||
value = self.call_mgmt_func(b'admin.vm.Remove', b'test-vm1')
|
||||
self.assertIsNone(value)
|
||||
mock_rmtree.assert_called_once_with(
|
||||
@ -1534,6 +1543,7 @@ class TC_00_VMs(AdminAPITestCase):
|
||||
@unittest.mock.patch('qubes.storage.Storage.remove')
|
||||
@unittest.mock.patch('shutil.rmtree')
|
||||
def test_501_vm_remove_running(self, mock_rmtree, mock_remove):
|
||||
mock_remove.side_effect = self.dummy_coro
|
||||
with unittest.mock.patch.object(
|
||||
self.vm, 'get_power_state', lambda: 'Running'):
|
||||
with self.assertRaises(qubes.exc.QubesVMNotHaltedError):
|
||||
|
@ -202,7 +202,6 @@ class TC_02_DstHost(qubes.tests.QubesTestCase):
|
||||
with self.assertRaises(ValueError):
|
||||
qubes.firewall.DstHost('2001:abcd:efab::3/64')
|
||||
|
||||
@unittest.expectedFailure
|
||||
def test_020_invalid_hostname(self):
|
||||
with self.assertRaises(ValueError):
|
||||
qubes.firewall.DstHost('www qubes-os.org')
|
||||
|
@ -229,12 +229,10 @@ class TC_20_PropertyHolder(qubes.tests.QubesTestCase):
|
||||
self.assertIs(self.holder.property_get_def(TestHolder.testprop1),
|
||||
TestHolder.testprop1)
|
||||
|
||||
@unittest.expectedFailure
|
||||
def test_002_load_properties(self):
|
||||
self.holder.load_properties()
|
||||
|
||||
self.assertEventFired(self.holder, 'property-loaded')
|
||||
self.assertEventNotFired(self.holder, 'property-set:testprop1')
|
||||
self.assertEventFired(self.holder, 'property-set:testprop1')
|
||||
|
||||
self.assertEqual(self.holder.testprop1, 'testvalue1')
|
||||
self.assertEqual(self.holder.testprop2, 'testref2')
|
||||
|
@ -60,10 +60,10 @@ class TC_00_Basic(qubes.tests.SystemTestCase):
|
||||
self.assertIsNotNone(vm)
|
||||
self.assertEqual(vm.name, vmname)
|
||||
self.assertEqual(vm.template, self.app.default_template)
|
||||
vm.create_on_disk()
|
||||
self.loop.run_until_complete(vm.create_on_disk())
|
||||
|
||||
with self.assertNotRaises(qubes.exc.QubesException):
|
||||
vm.storage.verify()
|
||||
self.loop.run_until_complete(vm.storage.verify())
|
||||
|
||||
def test_040_qdb_watch(self):
|
||||
flag = set()
|
||||
@ -89,6 +89,7 @@ class TC_01_Properties(qubes.tests.SystemTestCase):
|
||||
self.vm = self.app.add_new_vm(qubes.vm.appvm.AppVM, name=self.vmname,
|
||||
template=self.app.default_template,
|
||||
label='red')
|
||||
self.loop.run_until_complete(self.vm.create_on_disk())
|
||||
|
||||
@unittest.expectedFailure
|
||||
def test_030_clone(self):
|
||||
@ -104,7 +105,7 @@ class TC_01_Properties(qubes.tests.SystemTestCase):
|
||||
label='red')
|
||||
testvm2.clone_properties(testvm1)
|
||||
self.loop.run_until_complete(testvm2.clone_disk_files(testvm1))
|
||||
self.assertTrue(testvm1.storage.verify())
|
||||
self.assertTrue(self.loop.run_until_complete(testvm1.storage.verify()))
|
||||
self.assertIn('source', testvm1.volumes['root'].config)
|
||||
self.assertNotEquals(testvm2, None)
|
||||
self.assertNotEquals(testvm2.volumes, {})
|
||||
@ -140,7 +141,6 @@ class TC_01_Properties(qubes.tests.SystemTestCase):
|
||||
testvm1.label = 'orange'
|
||||
testvm1.memory = 512
|
||||
firewall = testvm1.firewall
|
||||
firewall.policy = 'drop'
|
||||
firewall.rules = [
|
||||
qubes.firewall.Rule(None, action='accept', dsthost='1.2.3.0/24',
|
||||
proto='tcp', dstports=22)]
|
||||
@ -193,15 +193,6 @@ class TC_01_Properties(qubes.tests.SystemTestCase):
|
||||
name=self.vmname, label='red')
|
||||
self.loop.run_until_complete(self.vm2.create_on_disk())
|
||||
|
||||
def test_030_rename_conflict_app(self):
|
||||
vm2name = self.make_vm_name('newname')
|
||||
|
||||
self.vm2 = self.app.add_new_vm(qubes.vm.appvm.AppVM,
|
||||
name=vm2name, template=self.app.default_template, label='red')
|
||||
|
||||
with self.assertNotRaises(OSError):
|
||||
with self.assertRaises(qubes.exc.QubesException):
|
||||
self.vm2.name = self.vmname
|
||||
|
||||
class TC_02_QvmPrefs(qubes.tests.SystemTestCase):
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
@ -229,13 +220,18 @@ class TC_02_QvmPrefs(qubes.tests.SystemTestCase):
|
||||
self.app.save()
|
||||
|
||||
def pref_set(self, name, value, valid=True):
|
||||
p = subprocess.Popen(
|
||||
['qvm-prefs'] + self.sharedopts +
|
||||
(['--'] if value != '-D' else []) + [self.testvm.name, name, value],
|
||||
self.loop.run_until_complete(self._pref_set(name, value, valid))
|
||||
|
||||
@asyncio.coroutine
|
||||
def _pref_set(self, name, value, valid=True):
|
||||
cmd = ['qvm-prefs']
|
||||
if value != '-D':
|
||||
cmd.append('--')
|
||||
cmd.extend((self.testvm.name, name, value))
|
||||
p = yield from asyncio.create_subprocess_exec(*cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
(stdout, stderr) = p.communicate()
|
||||
stderr=subprocess.PIPE)
|
||||
(stdout, stderr) = yield from p.communicate()
|
||||
if valid:
|
||||
self.assertEqual(p.returncode, 0,
|
||||
"qvm-prefs .. '{}' '{}' failed: {}{}".format(
|
||||
@ -247,9 +243,14 @@ class TC_02_QvmPrefs(qubes.tests.SystemTestCase):
|
||||
"property '{}'".format(value, name))
|
||||
|
||||
def pref_get(self, name):
|
||||
p = subprocess.Popen(['qvm-prefs'] + self.sharedopts +
|
||||
['--', self.testvm.name, name], stdout=subprocess.PIPE)
|
||||
(stdout, _) = p.communicate()
|
||||
self.loop.run_until_complete(self._pref_get(name))
|
||||
|
||||
@asyncio.coroutine
|
||||
def _pref_get(self, name):
|
||||
p = yield from asyncio.create_subprocess_exec(
|
||||
'qvm-prefs', *self.sharedopts, '--', self.testvm.name, name,
|
||||
stdout=subprocess.PIPE)
|
||||
(stdout, _) = yield from p.communicate()
|
||||
self.assertEqual(p.returncode, 0)
|
||||
return stdout.strip()
|
||||
|
||||
@ -357,10 +358,13 @@ class TC_03_QvmRevertTemplateChanges(qubes.tests.SystemTestCase):
|
||||
self.log.warning("template not modified, test result will be "
|
||||
"unreliable")
|
||||
self.assertNotEqual(self.test_template.volumes['root'].revisions, {})
|
||||
with self.assertNotRaises(subprocess.CalledProcessError):
|
||||
pool_vid = repr(self.test_template.volumes['root']).strip("'")
|
||||
revert_cmd = ['qvm-block', 'revert', pool_vid]
|
||||
subprocess.check_call(revert_cmd)
|
||||
pool_vid = repr(self.test_template.volumes['root']).strip("'")
|
||||
revert_cmd = ['qvm-block', 'revert', pool_vid]
|
||||
p = self.loop.run_until_complete(asyncio.create_subprocess_exec(
|
||||
*revert_cmd))
|
||||
self.loop.run_until_complete(p.wait())
|
||||
self.assertEqual(p.returncode, 0)
|
||||
|
||||
|
||||
checksum_after = self.get_rootimg_checksum()
|
||||
self.assertEqual(checksum_before, checksum_after)
|
||||
@ -437,7 +441,7 @@ class TC_30_Gui_daemon(qubes.tests.SystemTestCase):
|
||||
# Then paste it to the other window
|
||||
window_title = 'user@{}'.format(testvm2.name)
|
||||
p = self.loop.run_until_complete(testvm2.run(
|
||||
'zenity --entry --title={} > test.txt'.format(window_title)))
|
||||
'zenity --entry --title={} > /tmp/test.txt'.format(window_title)))
|
||||
self.wait_for_window(window_title)
|
||||
|
||||
subprocess.check_call(['xdotool', 'key', '--delay', '100',
|
||||
@ -446,7 +450,7 @@ class TC_30_Gui_daemon(qubes.tests.SystemTestCase):
|
||||
|
||||
# And compare the result
|
||||
(test_output, _) = self.loop.run_until_complete(
|
||||
testvm2.run_for_stdio('cat test.txt'))
|
||||
testvm2.run_for_stdio('cat /tmp/test.txt'))
|
||||
self.assertEqual(test_string, test_output.strip().decode('ascii'))
|
||||
|
||||
clipboard_content = \
|
||||
@ -464,20 +468,20 @@ class TC_05_StandaloneVM(qubes.tests.SystemTestCase):
|
||||
super(TC_05_StandaloneVM, self).setUp()
|
||||
self.init_default_template()
|
||||
|
||||
@unittest.expectedFailure
|
||||
def test_000_create_start(self):
|
||||
testvm1 = self.app.add_new_vm(qubes.vm.standalonevm.StandaloneVM,
|
||||
name=self.make_vm_name('vm1'), label='red')
|
||||
testvm1.features['qrexec'] = True
|
||||
self.loop.run_until_complete(
|
||||
testvm1.clone_disk_files(self.app.default_template))
|
||||
self.app.save()
|
||||
self.loop.run_until_complete(testvm1.start())
|
||||
self.assertEqual(testvm1.get_power_state(), "Running")
|
||||
|
||||
@unittest.expectedFailure
|
||||
def test_100_resize_root_img(self):
|
||||
testvm1 = self.app.add_new_vm(qubes.vm.standalonevm.StandaloneVM,
|
||||
name=self.make_vm_name('vm1'), label='red')
|
||||
testvm1.features['qrexec'] = True
|
||||
self.loop.run_until_complete(
|
||||
testvm1.clone_disk_files(self.app.default_template))
|
||||
self.app.save()
|
||||
|
@ -22,6 +22,7 @@
|
||||
import asyncio
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
import qubes.storage.lvm
|
||||
import qubes.tests
|
||||
@ -36,11 +37,11 @@ class StorageTestMixin(object):
|
||||
self.vm1 = self.app.add_new_vm(qubes.vm.appvm.AppVM,
|
||||
name=self.make_vm_name('vm1'),
|
||||
label='red')
|
||||
self.vm1.create_on_disk()
|
||||
self.loop.run_until_complete(self.vm1.create_on_disk())
|
||||
self.vm2 = self.app.add_new_vm(qubes.vm.appvm.AppVM,
|
||||
name=self.make_vm_name('vm2'),
|
||||
label='red')
|
||||
self.vm2.create_on_disk()
|
||||
self.loop.run_until_complete(self.vm2.create_on_disk())
|
||||
self.pool = None
|
||||
self.init_pool()
|
||||
self.app.save()
|
||||
@ -63,7 +64,9 @@ class StorageTestMixin(object):
|
||||
'rw': True,
|
||||
}
|
||||
testvol = self.vm1.storage.init_volume('testvol', volume_config)
|
||||
yield from self.vm1.storage.get_pool(testvol).create(testvol)
|
||||
coro_maybe = testvol.create()
|
||||
if asyncio.iscoroutine(coro_maybe):
|
||||
yield from coro_maybe
|
||||
self.app.save()
|
||||
yield from (self.vm1.start())
|
||||
|
||||
@ -93,9 +96,10 @@ class StorageTestMixin(object):
|
||||
'save_on_stop': True,
|
||||
'rw': True,
|
||||
}
|
||||
testvol = yield from self.vm1.storage.init_volume(
|
||||
'testvol', volume_config)
|
||||
yield from self.vm1.storage.get_pool(testvol).create(testvol)
|
||||
testvol = self.vm1.storage.init_volume('testvol', volume_config)
|
||||
coro_maybe = testvol.create()
|
||||
if asyncio.iscoroutine(coro_maybe):
|
||||
yield from coro_maybe
|
||||
self.app.save()
|
||||
yield from self.vm1.start()
|
||||
# non-volatile image not clean
|
||||
@ -128,7 +132,9 @@ class StorageTestMixin(object):
|
||||
'rw': False,
|
||||
}
|
||||
testvol = self.vm1.storage.init_volume('testvol', volume_config)
|
||||
yield from self.vm1.storage.get_pool(testvol).create(testvol)
|
||||
coro_maybe = testvol.create()
|
||||
if asyncio.iscoroutine(coro_maybe):
|
||||
yield from coro_maybe
|
||||
self.app.save()
|
||||
yield from self.vm1.start()
|
||||
# non-volatile image not clean
|
||||
@ -158,7 +164,9 @@ class StorageTestMixin(object):
|
||||
'rw': True,
|
||||
}
|
||||
testvol = self.vm1.storage.init_volume('testvol', volume_config)
|
||||
yield from self.vm1.storage.get_pool(testvol).create(testvol)
|
||||
coro_maybe = testvol.create()
|
||||
if asyncio.iscoroutine(coro_maybe):
|
||||
yield from coro_maybe
|
||||
volume_config = {
|
||||
'pool': self.pool.name,
|
||||
'size': size,
|
||||
@ -167,44 +175,61 @@ class StorageTestMixin(object):
|
||||
'rw': True,
|
||||
}
|
||||
testvol_snap = self.vm2.storage.init_volume('testvol', volume_config)
|
||||
yield from self.vm2.storage.get_pool(testvol_snap).create(testvol_snap)
|
||||
coro_maybe = testvol_snap.create()
|
||||
if asyncio.iscoroutine(coro_maybe):
|
||||
yield from coro_maybe
|
||||
self.app.save()
|
||||
yield from self.vm1.start()
|
||||
yield from self.vm2.start()
|
||||
# origin image not clean
|
||||
yield from self.vm1.run_for_stdio(
|
||||
'head -c {} /dev/zero 2>&1 | diff -q /dev/xvde - 2>&1'.format(size),
|
||||
user='root')
|
||||
|
||||
# snapshot image not clean
|
||||
yield from self.vm2.run_for_stdio(
|
||||
'head -c {} /dev/zero | diff -q /dev/xvde -'.format(size),
|
||||
user='root')
|
||||
try:
|
||||
yield from self.vm1.run_for_stdio(
|
||||
'head -c {} /dev/zero 2>&1 | diff -q /dev/xvde - 2>&1'.
|
||||
format(size),
|
||||
user='root')
|
||||
except subprocess.CalledProcessError:
|
||||
self.fail('origin image not clean')
|
||||
|
||||
# Write to read-write volume failed
|
||||
yield from self.vm1.run_for_stdio('echo test123 > /dev/xvde && sync',
|
||||
user='root')
|
||||
# origin changes propagated to snapshot too early
|
||||
yield from self.vm2.run_for_stdio(
|
||||
'head -c {} /dev/zero 2>&1 | diff -q /dev/xvde - 2>&1'.format(size),
|
||||
user='root')
|
||||
try:
|
||||
yield from self.vm2.run_for_stdio(
|
||||
'head -c {} /dev/zero | diff -q /dev/xvde -'.format(size),
|
||||
user='root')
|
||||
except subprocess.CalledProcessError:
|
||||
self.fail('snapshot image not clean')
|
||||
|
||||
try:
|
||||
yield from self.vm1.run_for_stdio(
|
||||
'echo test123 > /dev/xvde && sync',
|
||||
user='root')
|
||||
except subprocess.CalledProcessError:
|
||||
self.fail('Write to read-write volume failed')
|
||||
try:
|
||||
yield from self.vm2.run_for_stdio(
|
||||
'head -c {} /dev/zero 2>&1 | diff -q /dev/xvde - 2>&1'.
|
||||
format(size),
|
||||
user='root')
|
||||
except subprocess.CalledProcessError:
|
||||
self.fail('origin changes propagated to snapshot too early')
|
||||
yield from self.vm1.shutdown(wait=True)
|
||||
|
||||
# after origin shutdown there should be still no change
|
||||
|
||||
# origin changes propagated to snapshot too early2
|
||||
yield from self.vm2.run_for_stdio(
|
||||
'head -c {} /dev/zero 2>&1 | diff -q /dev/xvde - 2>&1'.format(size),
|
||||
user='root')
|
||||
try:
|
||||
yield from self.vm2.run_for_stdio(
|
||||
'head -c {} /dev/zero 2>&1 | diff -q /dev/xvde - 2>&1'.
|
||||
format(size),
|
||||
user='root')
|
||||
except subprocess.CalledProcessError:
|
||||
self.fail('origin changes propagated to snapshot too early2')
|
||||
|
||||
yield from self.vm2.shutdown(wait=True)
|
||||
yield from self.vm2.start()
|
||||
|
||||
# only after target VM restart changes should be visible
|
||||
|
||||
# origin changes not visible in snapshot
|
||||
with self.assertRaises(subprocess.CalledProcessError):
|
||||
yield from self.vm2.run(
|
||||
with self.assertRaises(subprocess.CalledProcessError,
|
||||
msg='origin changes not visible in snapshot'):
|
||||
yield from self.vm2.run_for_stdio(
|
||||
'head -c {} /dev/zero 2>&1 | diff -q /dev/xvde - 2>&1'.format(
|
||||
size),
|
||||
user='root')
|
||||
@ -224,7 +249,9 @@ class StorageTestMixin(object):
|
||||
'rw': True,
|
||||
}
|
||||
testvol = self.vm1.storage.init_volume('testvol', volume_config)
|
||||
yield from self.vm1.storage.get_pool(testvol).create(testvol)
|
||||
coro_maybe = testvol.create()
|
||||
if asyncio.iscoroutine(coro_maybe):
|
||||
yield from coro_maybe
|
||||
volume_config = {
|
||||
'pool': self.pool.name,
|
||||
'size': size,
|
||||
@ -233,7 +260,9 @@ class StorageTestMixin(object):
|
||||
'rw': True,
|
||||
}
|
||||
testvol_snap = self.vm2.storage.init_volume('testvol', volume_config)
|
||||
yield from self.vm2.storage.get_pool(testvol_snap).create(testvol_snap)
|
||||
coro_maybe = testvol_snap.create()
|
||||
if asyncio.iscoroutine(coro_maybe):
|
||||
yield from coro_maybe
|
||||
self.app.save()
|
||||
yield from self.vm2.start()
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
#
|
||||
import unittest.mock
|
||||
import qubes.log
|
||||
import qubes.storage
|
||||
from qubes.exc import QubesException
|
||||
from qubes.storage import pool_drivers
|
||||
from qubes.storage.file import FilePool
|
||||
@ -30,10 +31,21 @@ from qubes.tests import SystemTestCase
|
||||
class TestPool(unittest.mock.Mock):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(TestPool, self).__init__(*args, spec=qubes.storage.Pool, **kwargs)
|
||||
try:
|
||||
self.name = kwargs['name']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def __str__(self):
|
||||
return 'test'
|
||||
|
||||
def init_volume(self, vm, volume_config):
|
||||
vol = unittest.mock.Mock(spec=qubes.storage.Volume)
|
||||
vol.configure_mock(**volume_config)
|
||||
vol.pool = self
|
||||
vol.import_data.return_value = '/tmp/test-' + vm.name
|
||||
return vol
|
||||
|
||||
|
||||
class TestVM(object):
|
||||
def __init__(self, test, template=None):
|
||||
@ -95,12 +107,12 @@ class TC_00_Pool(SystemTestCase):
|
||||
def test_002_get_pool_klass(self):
|
||||
""" Expect the default pool to be `FilePool` """
|
||||
# :pylint: disable=protected-access
|
||||
result = self.app.get_pool('default')
|
||||
result = self.app.get_pool('varlibqubes')
|
||||
self.assertIsInstance(result, FilePool)
|
||||
|
||||
def test_003_pool_exists_default(self):
|
||||
""" Expect the default pool to exists """
|
||||
self.assertPoolExists('default')
|
||||
self.assertPoolExists('varlibqubes')
|
||||
|
||||
def test_004_add_remove_pool(self):
|
||||
""" Tries to adding and removing a pool. """
|
||||
|
@ -24,6 +24,7 @@ import os
|
||||
import shutil
|
||||
|
||||
import asyncio
|
||||
import unittest.mock
|
||||
|
||||
import qubes.storage
|
||||
import qubes.tests.storage
|
||||
@ -50,6 +51,8 @@ class TestApp(qubes.Qubes):
|
||||
def cleanup(self):
|
||||
''' Remove temporary directories '''
|
||||
shutil.rmtree(self.pools['linux-kernel'].dir_path)
|
||||
if os.path.exists(self.store):
|
||||
os.unlink(self.store)
|
||||
|
||||
def create_dummy_template(self):
|
||||
''' Initalizes a dummy TemplateVM as the `default_template` '''
|
||||
@ -80,7 +83,7 @@ class TC_00_FilePool(qubes.tests.QubesTestCase):
|
||||
.. sealso::
|
||||
Data :data:``qubes.qubes.defaults['pool_config']``.
|
||||
"""
|
||||
result = self.app.get_pool("default").dir_path
|
||||
result = self.app.get_pool("varlibqubes").dir_path
|
||||
expected = '/var/lib/qubes'
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
@ -306,21 +309,32 @@ class TC_03_FilePool(qubes.tests.QubesTestCase):
|
||||
def setUp(self):
|
||||
""" Add a test file based storage pool """
|
||||
super(TC_03_FilePool, self).setUp()
|
||||
self._orig_qubes_base_dir = qubes.config.qubes_base_dir
|
||||
qubes.config.qubes_base_dir = '/tmp/qubes-test'
|
||||
self.test_base_dir = '/tmp/qubes-test-dir'
|
||||
self.base_dir_patch = unittest.mock.patch.dict(qubes.config.system_path,
|
||||
{'qubes_base_dir': self.test_base_dir})
|
||||
self.base_dir_patch2 = unittest.mock.patch(
|
||||
'qubes.config.qubes_base_dir', self.test_base_dir)
|
||||
self.base_dir_patch3 = unittest.mock.patch.dict(
|
||||
qubes.config.defaults['pool_configs']['varlibqubes'],
|
||||
{'dir_path': self.test_base_dir})
|
||||
self.base_dir_patch.start()
|
||||
self.base_dir_patch2.start()
|
||||
self.base_dir_patch3.start()
|
||||
self.app = TestApp()
|
||||
self.app.create_dummy_template()
|
||||
self.app.add_pool(**self.POOL_CONFIG)
|
||||
self.app.create_dummy_template()
|
||||
|
||||
def tearDown(self):
|
||||
""" Remove the file based storage pool after testing """
|
||||
self.app.remove_pool("test-pool")
|
||||
self.app.cleanup()
|
||||
self.base_dir_patch3.stop()
|
||||
self.base_dir_patch2.stop()
|
||||
self.base_dir_patch.stop()
|
||||
super(TC_03_FilePool, self).tearDown()
|
||||
shutil.rmtree(self.POOL_DIR, ignore_errors=True)
|
||||
if os.path.exists('/tmp/qubes-test'):
|
||||
shutil.rmtree('/tmp/qubes-test')
|
||||
qubes.config.qubes_base_dir = self._orig_qubes_base_dir
|
||||
if os.path.exists('/tmp/qubes-test-dir'):
|
||||
shutil.rmtree('/tmp/qubes-test-dir')
|
||||
|
||||
def test_001_pool_exists(self):
|
||||
""" Check if the storage pool was added to the storage pool config """
|
||||
@ -406,8 +420,7 @@ class TC_03_FilePool(qubes.tests.QubesTestCase):
|
||||
expected_private_path)
|
||||
|
||||
expected_rootcow_path = os.path.join(expected_vmdir, 'root-cow.img')
|
||||
self.assertEqualAndExists(vm.volumes['root'].path_cow,
|
||||
expected_rootcow_path)
|
||||
self.assertEqual(vm.volumes['root'].path_cow, expected_rootcow_path)
|
||||
|
||||
def assertEqualAndExists(self, result_path, expected_path):
|
||||
""" Check if the ``result_path``, matches ``expected_path`` and exists.
|
||||
|
@ -160,7 +160,7 @@ class TC_00_ThinPool(ThinPoolBase):
|
||||
volume.remove()
|
||||
|
||||
@skipUnlessLvmPoolExists
|
||||
class TC_01_ThinPool(qubes.tests.SystemTestCase, ThinPoolBase):
|
||||
class TC_01_ThinPool(ThinPoolBase, qubes.tests.SystemTestCase):
|
||||
''' Sanity tests for :py:class:`qubes.storage.lvm.ThinPool` '''
|
||||
|
||||
def setUp(self):
|
||||
@ -176,7 +176,7 @@ class TC_01_ThinPool(qubes.tests.SystemTestCase, ThinPoolBase):
|
||||
vm.clone_disk_files(template_vm, pool='test-lvm')
|
||||
for v_name, volume in vm.volumes.items():
|
||||
if volume.save_on_stop:
|
||||
expected = "/dev/{!s}/{!s}-{!s}".format(
|
||||
expected = "/dev/{!s}/vm-{!s}-{!s}".format(
|
||||
DEFAULT_LVM_POOL.split('/')[0], vm.name, v_name)
|
||||
self.assertEqual(volume.path, expected)
|
||||
with self.assertNotRaises(qubes.exc.QubesException):
|
||||
@ -188,7 +188,7 @@ class TC_01_ThinPool(qubes.tests.SystemTestCase, ThinPoolBase):
|
||||
vm.create_on_disk(pool='test-lvm')
|
||||
for v_name, volume in vm.volumes.items():
|
||||
if volume.save_on_stop:
|
||||
expected = "/dev/{!s}/{!s}-{!s}".format(
|
||||
expected = "/dev/{!s}/vm-{!s}-{!s}".format(
|
||||
DEFAULT_LVM_POOL.split('/')[0], vm.name, v_name)
|
||||
self.assertEqual(volume.path, expected)
|
||||
with self.assertNotRaises(qubes.exc.QubesException):
|
||||
|
@ -60,6 +60,10 @@ class TestApp(qubes.tests.TestEmitter):
|
||||
self.vmm = TestVMM()
|
||||
self.host = TestHost()
|
||||
self.pools = {}
|
||||
self.default_pool_volatile = 'default'
|
||||
self.default_pool_root = 'default'
|
||||
self.default_pool_private = 'default'
|
||||
self.default_pool_kernel = 'linux-kernel'
|
||||
self.domains = {}
|
||||
#: jinja2 environment for libvirt XML templates
|
||||
self.env = jinja2.Environment(
|
||||
|
@ -20,6 +20,7 @@
|
||||
#
|
||||
|
||||
import unittest
|
||||
import unittest.mock
|
||||
|
||||
import qubes
|
||||
import qubes.exc
|
||||
@ -28,14 +29,13 @@ import qubes.vm.adminvm
|
||||
|
||||
import qubes.tests
|
||||
|
||||
@qubes.tests.skipUnlessDom0
|
||||
class TC_00_AdminVM(qubes.tests.QubesTestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
try:
|
||||
self.app = qubes.tests.vm.TestApp()
|
||||
self.vm = qubes.vm.adminvm.AdminVM(self.app,
|
||||
xml=None, qid=0, name='dom0')
|
||||
xml=None)
|
||||
except: # pylint: disable=bare-except
|
||||
if self.id().endswith('.test_000_init'):
|
||||
raise
|
||||
@ -48,10 +48,11 @@ class TC_00_AdminVM(qubes.tests.QubesTestCase):
|
||||
self.assertEqual(self.vm.xid, 0)
|
||||
|
||||
def test_101_libvirt_domain(self):
|
||||
self.assertIs(self.vm.libvirt_domain, None)
|
||||
|
||||
def test_200_libvirt_netvm(self):
|
||||
self.assertIs(self.vm.netvm, None)
|
||||
with unittest.mock.patch.object(self.app, 'vmm') as mock_vmm:
|
||||
self.assertIsNotNone(self.vm.libvirt_domain)
|
||||
self.assertEqual(mock_vmm.mock_calls, [
|
||||
('libvirt_conn.lookupByID', (0,), {}),
|
||||
])
|
||||
|
||||
def test_300_is_running(self):
|
||||
self.assertTrue(self.vm.is_running())
|
||||
@ -66,21 +67,6 @@ class TC_00_AdminVM(qubes.tests.QubesTestCase):
|
||||
def test_303_get_mem_static_max(self):
|
||||
self.assertGreater(self.vm.get_mem_static_max(), 0)
|
||||
|
||||
def test_304_get_disk_utilization(self):
|
||||
self.assertEqual(self.vm.storage.get_disk_utilization(), 0)
|
||||
|
||||
def test_305_has_no_private_volume(self):
|
||||
with self.assertRaises(KeyError):
|
||||
self.vm.volumes['private']
|
||||
|
||||
def test_306_has_no_root_volume(self):
|
||||
with self.assertRaises(KeyError):
|
||||
self.vm.volumes['root']
|
||||
|
||||
def test_307_has_no_volatile_volume(self):
|
||||
with self.assertRaises(KeyError):
|
||||
self.vm.volumes['volatile']
|
||||
|
||||
def test_310_start(self):
|
||||
with self.assertRaises(qubes.exc.QubesException):
|
||||
self.vm.start()
|
||||
|
@ -110,7 +110,7 @@ class TC_90_AppVM(qubes.tests.vm.qubesvm.QubesVMTestsMixin,
|
||||
self.assertNotEqual(vm.volume_config['root'].get('source', None),
|
||||
self.template.volumes['root'].source)
|
||||
self.assertEqual(vm.volume_config['root'].get('source', None),
|
||||
template2.volumes['root'].source)
|
||||
template2.volumes['root'])
|
||||
|
||||
def test_003_template_change_running(self):
|
||||
vm = self.get_vm()
|
||||
|
@ -44,7 +44,6 @@ class AppVM(qubes.vm.qubesvm.QubesVM):
|
||||
default_volume_config = {
|
||||
'root': {
|
||||
'name': 'root',
|
||||
'pool': 'default',
|
||||
'snap_on_start': True,
|
||||
'save_on_stop': False,
|
||||
'rw': False,
|
||||
@ -52,7 +51,6 @@ class AppVM(qubes.vm.qubesvm.QubesVM):
|
||||
},
|
||||
'private': {
|
||||
'name': 'private',
|
||||
'pool': 'default',
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': True,
|
||||
'rw': True,
|
||||
@ -60,7 +58,6 @@ class AppVM(qubes.vm.qubesvm.QubesVM):
|
||||
},
|
||||
'volatile': {
|
||||
'name': 'volatile',
|
||||
'pool': 'default',
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': False,
|
||||
'size': defaults['root_img_size'],
|
||||
@ -68,7 +65,6 @@ class AppVM(qubes.vm.qubesvm.QubesVM):
|
||||
},
|
||||
'kernel': {
|
||||
'name': 'kernel',
|
||||
'pool': 'linux-kernel',
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': False,
|
||||
'rw': False,
|
||||
@ -83,12 +79,6 @@ class AppVM(qubes.vm.qubesvm.QubesVM):
|
||||
# template is only passed if the AppVM is created, in other cases we
|
||||
# don't need to patch the volume_config because the config is
|
||||
# coming from XML, already as we need it
|
||||
|
||||
for name, conf in self.volume_config.items():
|
||||
tpl_volume = template.volumes[name]
|
||||
|
||||
self.config_volume_from_source(conf, tpl_volume)
|
||||
|
||||
for name, config in template.volume_config.items():
|
||||
# in case the template vm has more volumes add them to own
|
||||
# config
|
||||
@ -124,9 +114,5 @@ class AppVM(qubes.vm.qubesvm.QubesVM):
|
||||
if conf.get('snap_on_start', False) and \
|
||||
conf.get('source', None) is None:
|
||||
config = conf.copy()
|
||||
template_volume = newvalue.volumes[volume_name]
|
||||
self.volume_config[volume_name] = \
|
||||
self.config_volume_from_source(
|
||||
config,
|
||||
template_volume)
|
||||
self.volume_config[volume_name] = config
|
||||
self.storage.init_volume(volume_name, config)
|
||||
|
@ -43,7 +43,6 @@ class DispVM(qubes.vm.qubesvm.QubesVM):
|
||||
self.volume_config = {
|
||||
'root': {
|
||||
'name': 'root',
|
||||
'pool': 'default',
|
||||
'snap_on_start': True,
|
||||
'save_on_stop': False,
|
||||
'rw': False,
|
||||
@ -51,7 +50,6 @@ class DispVM(qubes.vm.qubesvm.QubesVM):
|
||||
},
|
||||
'private': {
|
||||
'name': 'private',
|
||||
'pool': 'default',
|
||||
'snap_on_start': True,
|
||||
'save_on_stop': False,
|
||||
'rw': True,
|
||||
@ -59,7 +57,6 @@ class DispVM(qubes.vm.qubesvm.QubesVM):
|
||||
},
|
||||
'volatile': {
|
||||
'name': 'volatile',
|
||||
'pool': 'default',
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': False,
|
||||
'rw': True,
|
||||
@ -68,7 +65,6 @@ class DispVM(qubes.vm.qubesvm.QubesVM):
|
||||
},
|
||||
'kernel': {
|
||||
'name': 'kernel',
|
||||
'pool': 'linux-kernel',
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': False,
|
||||
'rw': False,
|
||||
@ -82,11 +78,6 @@ class DispVM(qubes.vm.qubesvm.QubesVM):
|
||||
# template is only passed if the AppVM is created, in other cases we
|
||||
# don't need to patch the volume_config because the config is
|
||||
# coming from XML, already as we need it
|
||||
|
||||
for name, conf in self.volume_config.items():
|
||||
tpl_volume = template.volumes[name]
|
||||
self.config_volume_from_source(conf, tpl_volume)
|
||||
|
||||
for name, config in template.volume_config.items():
|
||||
# in case the template vm has more volumes add them to own
|
||||
# config
|
||||
|
@ -1696,24 +1696,6 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
|
||||
# helper methods
|
||||
#
|
||||
|
||||
@staticmethod
|
||||
def config_volume_from_source(volume_config, source):
|
||||
'''Adjust storage volume config to use given volume as a source'''
|
||||
|
||||
volume_config['size'] = source.size
|
||||
volume_config['pool'] = source.pool
|
||||
|
||||
needs_source = (
|
||||
'source' in volume_config)
|
||||
is_snapshot = 'snap_on_start' in volume_config and volume_config[
|
||||
'snap_on_start']
|
||||
if is_snapshot and needs_source:
|
||||
if source.source is not None:
|
||||
volume_config['source'] = source.source
|
||||
else:
|
||||
volume_config['source'] = source
|
||||
return volume_config
|
||||
|
||||
def relative_path(self, path):
|
||||
'''Return path relative to py:attr:`dir_path`.
|
||||
|
||||
|
@ -30,7 +30,6 @@ class StandaloneVM(qubes.vm.qubesvm.QubesVM):
|
||||
self.volume_config = {
|
||||
'root': {
|
||||
'name': 'root',
|
||||
'pool': 'default',
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': True,
|
||||
'rw': True,
|
||||
@ -39,7 +38,6 @@ class StandaloneVM(qubes.vm.qubesvm.QubesVM):
|
||||
},
|
||||
'private': {
|
||||
'name': 'private',
|
||||
'pool': 'default',
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': True,
|
||||
'rw': True,
|
||||
@ -48,7 +46,6 @@ class StandaloneVM(qubes.vm.qubesvm.QubesVM):
|
||||
},
|
||||
'volatile': {
|
||||
'name': 'volatile',
|
||||
'pool': 'default',
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': False,
|
||||
'rw': True,
|
||||
@ -56,7 +53,6 @@ class StandaloneVM(qubes.vm.qubesvm.QubesVM):
|
||||
},
|
||||
'kernel': {
|
||||
'name': 'kernel',
|
||||
'pool': 'linux-kernel',
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': False,
|
||||
'rw': False,
|
||||
|
@ -65,7 +65,6 @@ class TemplateVM(QubesVM):
|
||||
self.volume_config = {
|
||||
'root': {
|
||||
'name': 'root',
|
||||
'pool': 'default',
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': True,
|
||||
'rw': True,
|
||||
@ -74,7 +73,6 @@ class TemplateVM(QubesVM):
|
||||
},
|
||||
'private': {
|
||||
'name': 'private',
|
||||
'pool': 'default',
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': True,
|
||||
'rw': True,
|
||||
@ -84,7 +82,6 @@ class TemplateVM(QubesVM):
|
||||
},
|
||||
'volatile': {
|
||||
'name': 'volatile',
|
||||
'pool': 'default',
|
||||
'size': defaults['root_img_size'],
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': False,
|
||||
@ -92,7 +89,6 @@ class TemplateVM(QubesVM):
|
||||
},
|
||||
'kernel': {
|
||||
'name': 'kernel',
|
||||
'pool': 'linux-kernel',
|
||||
'snap_on_start': False,
|
||||
'save_on_stop': False,
|
||||
'rw': False
|
||||
|
Loading…
Reference in New Issue
Block a user