Merge remote-tracking branch 'qubesos/pr/231'
* qubesos/pr/231: (30 commits) tests/app: test varlibqubes pool driver selection tests/storage_reflink: test some file-reflink helpers tests/integ/storage: add file-reflink integration tests tests/integ/basic: use export() in get_rootimg_checksum() tests/integ/backupcompatibility: Storage.verify() is a coro tests: delete orphaned Makefile app: create /var/lib/qubes as file-reflink if supported app: uncouple pool setup from loading initial configuration tools/qubes-create: fix docstring storage: factor out _wait_and_reraise(); fix clone/create storage: fix search_pool_containing_dir() storage: remove broken default parameter from isodate() storage: insert missing NotImplementedError in Volume.stop() storage: fix docstrings storage/reflink: is_reflink_supported() -> is_supported() storage/reflink: run synchronous volume methods in executor storage/reflink: native FICLONE in _copy_file() happy path storage/reflink: factor out _ficlone() storage/reflink: inline and simplify _cmd() storage/reflink: _update_loopdev_sizes() without losetup ...
This commit is contained in:
commit
4dab769934
2
Makefile
2
Makefile
@ -141,7 +141,6 @@ rpms-dom0:
|
||||
all:
|
||||
$(PYTHON) setup.py build
|
||||
$(MAKE) -C qubes-rpc all
|
||||
# make all -C tests
|
||||
# Currently supported only on xen
|
||||
|
||||
install:
|
||||
@ -158,7 +157,6 @@ endif
|
||||
ln -s qvm-device.1.gz $(DESTDIR)/usr/share/man/man1/qvm-block.1.gz
|
||||
ln -s qvm-device.1.gz $(DESTDIR)/usr/share/man/man1/qvm-pci.1.gz
|
||||
ln -s qvm-device.1.gz $(DESTDIR)/usr/share/man/man1/qvm-usb.1.gz
|
||||
# $(MAKE) install -C tests
|
||||
$(MAKE) install -C relaxng
|
||||
mkdir -p $(DESTDIR)/etc/qubes
|
||||
ifeq ($(BACKEND_VMM),xen)
|
||||
|
35
qubes/app.py
35
qubes/app.py
@ -21,6 +21,7 @@
|
||||
#
|
||||
|
||||
import collections
|
||||
import copy
|
||||
import errno
|
||||
import functools
|
||||
import grp
|
||||
@ -60,6 +61,7 @@ import qubes
|
||||
import qubes.ext
|
||||
import qubes.utils
|
||||
import qubes.storage
|
||||
import qubes.storage.reflink
|
||||
import qubes.vm
|
||||
import qubes.vm.adminvm
|
||||
import qubes.vm.qubesvm
|
||||
@ -552,7 +554,7 @@ def _default_pool(app):
|
||||
|
||||
1. If there is one named 'default', use it.
|
||||
2. Check if root fs is on LVM thin - use that
|
||||
3. Look for file-based pool pointing /var/lib/qubes
|
||||
3. Look for file(-reflink)-based pool pointing to /var/lib/qubes
|
||||
4. Fail
|
||||
'''
|
||||
if 'default' in app.pools:
|
||||
@ -1064,15 +1066,29 @@ class Qubes(qubes.PropertyHolder):
|
||||
}
|
||||
assert max(self.labels.keys()) == qubes.config.max_default_label
|
||||
|
||||
pool_configs = copy.deepcopy(qubes.config.defaults['pool_configs'])
|
||||
|
||||
root_volume_group, root_thin_pool = \
|
||||
qubes.storage.DirectoryThinPool.thin_pool('/')
|
||||
|
||||
if root_thin_pool:
|
||||
self.add_pool(
|
||||
volume_group=root_volume_group, thin_pool=root_thin_pool,
|
||||
name='lvm', driver='lvm_thin')
|
||||
# pool based on /var/lib/qubes will be created here:
|
||||
for name, config in qubes.config.defaults['pool_configs'].items():
|
||||
lvm_config = {
|
||||
'name': 'lvm',
|
||||
'driver': 'lvm_thin',
|
||||
'volume_group': root_volume_group,
|
||||
'thin_pool': root_thin_pool
|
||||
}
|
||||
pool_configs[lvm_config['name']] = lvm_config
|
||||
|
||||
for name, config in pool_configs.items():
|
||||
if 'driver' not in config and 'dir_path' in config:
|
||||
config['driver'] = 'file'
|
||||
try:
|
||||
os.makedirs(config['dir_path'], exist_ok=True)
|
||||
if qubes.storage.reflink.is_supported(config['dir_path']):
|
||||
config['driver'] = 'file-reflink'
|
||||
config['setup_check'] = 'no' # don't check twice
|
||||
except PermissionError: # looks like a testing environment
|
||||
pass # stay with 'file'
|
||||
self.pools[name] = self._get_pool(**config)
|
||||
|
||||
self.default_pool_kernel = 'linux-kernel'
|
||||
@ -1170,6 +1186,11 @@ class Qubes(qubes.PropertyHolder):
|
||||
|
||||
raise KeyError(label)
|
||||
|
||||
def setup_pools(self):
|
||||
""" Run implementation specific setup for each storage pool. """
|
||||
for pool in self.pools.values():
|
||||
pool.setup()
|
||||
|
||||
def add_pool(self, name, **kwargs):
|
||||
""" Add a storage pool to config."""
|
||||
|
||||
|
@ -76,9 +76,8 @@ defaults = {
|
||||
'root_img_size': 10*1024*1024*1024,
|
||||
|
||||
'pool_configs': {
|
||||
# create file pool even when the default one is LVM
|
||||
# create file(-reflink) pool even when the default one is LVM
|
||||
'varlibqubes': {'dir_path': qubes_base_dir,
|
||||
'driver': 'file',
|
||||
'name': 'varlibqubes'},
|
||||
'linux-kernel': {
|
||||
'dir_path': os.path.join(qubes_base_dir,
|
||||
|
@ -252,6 +252,8 @@ class Volume:
|
||||
def revert(self, revision=None):
|
||||
''' Revert volume to previous revision
|
||||
|
||||
This can be implemented as a coroutine.
|
||||
|
||||
:param revision: revision to revert volume to, see :py:attr:`revisions`
|
||||
'''
|
||||
# pylint: disable=unused-argument
|
||||
@ -272,6 +274,7 @@ class Volume:
|
||||
This include committing data if :py:attr:`save_on_stop` is set.
|
||||
|
||||
This can be implemented as a coroutine.'''
|
||||
raise self._not_implemented("stop")
|
||||
|
||||
def verify(self):
|
||||
''' Verifies the volume.
|
||||
@ -506,8 +509,7 @@ class Storage:
|
||||
ret = volume.create()
|
||||
if asyncio.iscoroutine(ret):
|
||||
coros.append(ret)
|
||||
if coros:
|
||||
yield from asyncio.wait(coros)
|
||||
yield from _wait_and_reraise(coros)
|
||||
|
||||
os.umask(old_umask)
|
||||
|
||||
@ -549,7 +551,7 @@ class Storage:
|
||||
|
||||
self.vm.volumes = {}
|
||||
with VmCreationManager(self.vm):
|
||||
yield from asyncio.wait([self.clone_volume(src_vm, vol_name)
|
||||
yield from _wait_and_reraise([self.clone_volume(src_vm, vol_name)
|
||||
for vol_name in self.vm.volume_config.keys()])
|
||||
|
||||
@property
|
||||
@ -581,11 +583,7 @@ class Storage:
|
||||
ret = volume.verify()
|
||||
if asyncio.iscoroutine(ret):
|
||||
futures.append(ret)
|
||||
if futures:
|
||||
done, _ = yield from asyncio.wait(futures)
|
||||
for task in done:
|
||||
# re-raise any exception from async task
|
||||
task.result()
|
||||
yield from _wait_and_reraise(futures)
|
||||
self.vm.fire_event('domain-verify-files')
|
||||
return True
|
||||
|
||||
@ -605,44 +603,32 @@ class Storage:
|
||||
except (IOError, OSError) as e:
|
||||
self.vm.log.exception("Failed to remove volume %s", name, e)
|
||||
|
||||
if futures:
|
||||
try:
|
||||
done, _ = yield from asyncio.wait(futures)
|
||||
for task in done:
|
||||
# re-raise any exception from async task
|
||||
task.result()
|
||||
except (IOError, OSError) as e:
|
||||
self.vm.log.exception("Failed to remove some volume", e)
|
||||
try:
|
||||
yield from _wait_and_reraise(futures)
|
||||
except (IOError, OSError) as e:
|
||||
self.vm.log.exception("Failed to remove some volume", e)
|
||||
|
||||
@asyncio.coroutine
|
||||
def start(self):
|
||||
''' Execute the start method on each pool '''
|
||||
''' Execute the start method on each volume '''
|
||||
futures = []
|
||||
for volume in self.vm.volumes.values():
|
||||
ret = volume.start()
|
||||
if asyncio.iscoroutine(ret):
|
||||
futures.append(ret)
|
||||
|
||||
if futures:
|
||||
done, _ = yield from asyncio.wait(futures)
|
||||
for task in done:
|
||||
# re-raise any exception from async task
|
||||
task.result()
|
||||
yield from _wait_and_reraise(futures)
|
||||
|
||||
@asyncio.coroutine
|
||||
def stop(self):
|
||||
''' Execute the start method on each pool '''
|
||||
''' Execute the stop method on each volume '''
|
||||
futures = []
|
||||
for volume in self.vm.volumes.values():
|
||||
ret = volume.stop()
|
||||
if asyncio.iscoroutine(ret):
|
||||
futures.append(ret)
|
||||
|
||||
if futures:
|
||||
done, _ = yield from asyncio.wait(futures)
|
||||
for task in done:
|
||||
# re-raise any exception from async task
|
||||
task.result()
|
||||
yield from _wait_and_reraise(futures)
|
||||
|
||||
def unused_frontend(self):
|
||||
''' Find an unused device name '''
|
||||
@ -842,6 +828,14 @@ class Pool:
|
||||
return NotImplementedError(msg)
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def _wait_and_reraise(futures):
|
||||
if futures:
|
||||
done, _ = yield from asyncio.wait(futures)
|
||||
for task in done: # (re-)raise first exception in line
|
||||
task.result()
|
||||
|
||||
|
||||
def _sanitize_config(config):
|
||||
''' Helper function to convert types to appropriate strings
|
||||
''' # FIXME: find another solution for serializing basic types
|
||||
@ -871,7 +865,7 @@ def driver_parameters(name):
|
||||
return [p for p in params if p not in ignored_params]
|
||||
|
||||
|
||||
def isodate(seconds=time.time()):
|
||||
def isodate(seconds):
|
||||
''' Helper method which returns an iso date '''
|
||||
return datetime.utcfromtimestamp(seconds).isoformat("T")
|
||||
|
||||
@ -881,17 +875,21 @@ def search_pool_containing_dir(pools, dir_path):
|
||||
This is useful for implementing Pool.included_in method
|
||||
'''
|
||||
|
||||
real_dir_path = os.path.realpath(dir_path)
|
||||
|
||||
# prefer filesystem pools
|
||||
for pool in pools:
|
||||
if hasattr(pool, 'dir_path'):
|
||||
if dir_path.startswith(pool.dir_path):
|
||||
pool_real_dir_path = os.path.realpath(pool.dir_path)
|
||||
if os.path.commonpath([pool_real_dir_path, real_dir_path]) == \
|
||||
pool_real_dir_path:
|
||||
return pool
|
||||
|
||||
# then look for lvm
|
||||
for pool in pools:
|
||||
if hasattr(pool, 'thin_pool') and hasattr(pool, 'volume_group'):
|
||||
if (pool.volume_group, pool.thin_pool) == \
|
||||
DirectoryThinPool.thin_pool(dir_path):
|
||||
DirectoryThinPool.thin_pool(real_dir_path):
|
||||
return pool
|
||||
|
||||
return None
|
||||
|
@ -22,13 +22,14 @@
|
||||
but not required.
|
||||
'''
|
||||
|
||||
import asyncio
|
||||
import collections
|
||||
import errno
|
||||
import fcntl
|
||||
import functools
|
||||
import glob
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import tempfile
|
||||
from contextlib import contextmanager, suppress
|
||||
@ -36,7 +37,8 @@ from contextlib import contextmanager, suppress
|
||||
import qubes.storage
|
||||
|
||||
BLKSIZE = 512
|
||||
FICLONE = 1074041865 # see ioctl_ficlone manpage
|
||||
FICLONE = 1074041865 # defined in <linux/fs.h>
|
||||
LOOP_SET_CAPACITY = 0x4C07 # defined in <linux/loop.h>
|
||||
LOGGER = logging.getLogger('qubes.storage.reflink')
|
||||
|
||||
|
||||
@ -53,7 +55,7 @@ class ReflinkPool(qubes.storage.Pool):
|
||||
|
||||
def setup(self):
|
||||
created = _make_dir(self.dir_path)
|
||||
if self.setup_check and not is_reflink_supported(self.dir_path):
|
||||
if self.setup_check and not is_supported(self.dir_path):
|
||||
if created:
|
||||
_remove_empty_dir(self.dir_path)
|
||||
raise qubes.storage.StoragePoolException(
|
||||
@ -115,12 +117,37 @@ class ReflinkPool(qubes.storage.Pool):
|
||||
[pool for pool in app.pools.values() if pool is not self],
|
||||
self.dir_path)
|
||||
|
||||
|
||||
def _unblock(method):
|
||||
''' Decorator transforming a synchronous volume method into a
|
||||
coroutine that runs the original method in the event loop's
|
||||
thread-based default executor, under a per-volume lock.
|
||||
'''
|
||||
@asyncio.coroutine
|
||||
@functools.wraps(method)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
with (yield from self._lock): # pylint: disable=protected-access
|
||||
return (yield from asyncio.get_event_loop().run_in_executor(
|
||||
None, functools.partial(method, self, *args, **kwargs)))
|
||||
return wrapper
|
||||
|
||||
class ReflinkVolume(qubes.storage.Volume):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self._lock = asyncio.Lock()
|
||||
self._path_vid = os.path.join(self.pool.dir_path, self.vid)
|
||||
self._path_clean = self._path_vid + '.img'
|
||||
self._path_dirty = self._path_vid + '-dirty.img'
|
||||
self._path_import = self._path_vid + '-import.img'
|
||||
self.path = self._path_dirty
|
||||
|
||||
@_unblock
|
||||
def create(self):
|
||||
if self.save_on_stop and not self.snap_on_start:
|
||||
_create_sparse_file(self._path_clean, self.size)
|
||||
return self
|
||||
|
||||
@_unblock
|
||||
def verify(self):
|
||||
if self.snap_on_start:
|
||||
img = self.source._path_clean # pylint: disable=protected-access
|
||||
@ -132,19 +159,26 @@ class ReflinkVolume(qubes.storage.Volume):
|
||||
if img is None or os.path.exists(img):
|
||||
return True
|
||||
raise qubes.storage.StoragePoolException(
|
||||
'Missing image file {!r} for volume {!s}'.format(img, self.vid))
|
||||
'Missing image file {!r} for volume {}'.format(img, self.vid))
|
||||
|
||||
@_unblock
|
||||
def remove(self):
|
||||
''' Drop volume object from pool; remove volume images from
|
||||
oldest to newest; remove empty VM directory.
|
||||
'''
|
||||
self.pool._volumes.pop(self, None) # pylint: disable=protected-access
|
||||
self._cleanup()
|
||||
self._prune_revisions(keep=0)
|
||||
_remove_file(self._path_clean)
|
||||
_remove_file(self._path_dirty)
|
||||
_remove_empty_dir(os.path.dirname(self._path_dirty))
|
||||
return self
|
||||
|
||||
def _cleanup(self):
|
||||
for tmp in glob.iglob(glob.escape(self._path_vid) + '*.img*~*'):
|
||||
_remove_file(tmp)
|
||||
_remove_file(self._path_import)
|
||||
|
||||
def is_outdated(self):
|
||||
if self.snap_on_start:
|
||||
with suppress(FileNotFoundError):
|
||||
@ -156,7 +190,9 @@ class ReflinkVolume(qubes.storage.Volume):
|
||||
def is_dirty(self):
|
||||
return self.save_on_stop and os.path.exists(self._path_dirty)
|
||||
|
||||
@_unblock
|
||||
def start(self):
|
||||
self._cleanup()
|
||||
if self.is_dirty(): # implies self.save_on_stop
|
||||
return self
|
||||
if self.snap_on_start:
|
||||
@ -168,24 +204,23 @@ class ReflinkVolume(qubes.storage.Volume):
|
||||
_create_sparse_file(self._path_dirty, self.size)
|
||||
return self
|
||||
|
||||
@_unblock
|
||||
def stop(self):
|
||||
if self.save_on_stop:
|
||||
self._commit()
|
||||
self._commit(self._path_dirty)
|
||||
else:
|
||||
_remove_file(self._path_dirty)
|
||||
_remove_file(self._path_clean)
|
||||
return self
|
||||
|
||||
def _commit(self):
|
||||
def _commit(self, path_from):
|
||||
self._add_revision()
|
||||
self._prune_revisions()
|
||||
_rename_file(self._path_dirty, self._path_clean)
|
||||
_rename_file(path_from, self._path_clean)
|
||||
|
||||
def _add_revision(self):
|
||||
if self.revisions_to_keep == 0:
|
||||
return
|
||||
if _get_file_disk_usage(self._path_clean) == 0:
|
||||
return
|
||||
ctime = os.path.getctime(self._path_clean)
|
||||
timestamp = qubes.storage.isodate(int(ctime))
|
||||
_copy_file(self._path_clean,
|
||||
@ -198,7 +233,11 @@ class ReflinkVolume(qubes.storage.Volume):
|
||||
for number, timestamp in list(self.revisions.items())[:-keep or None]:
|
||||
_remove_file(self._path_revision(number, timestamp))
|
||||
|
||||
@_unblock
|
||||
def revert(self, revision=None):
|
||||
if self.is_dirty():
|
||||
raise qubes.storage.StoragePoolException(
|
||||
'Cannot revert: {} is not cleanly stopped'.format(self.vid))
|
||||
if revision is None:
|
||||
number, timestamp = list(self.revisions.items())[-1]
|
||||
else:
|
||||
@ -208,61 +247,58 @@ class ReflinkVolume(qubes.storage.Volume):
|
||||
_rename_file(path_revision, self._path_clean)
|
||||
return self
|
||||
|
||||
@_unblock
|
||||
def resize(self, size):
|
||||
''' Expand a read-write volume image; notify any corresponding
|
||||
loop devices of the size change.
|
||||
'''
|
||||
if not self.rw:
|
||||
raise qubes.storage.StoragePoolException(
|
||||
'Cannot resize: {!s} is read-only'.format(self.vid))
|
||||
'Cannot resize: {} is read-only'.format(self.vid))
|
||||
|
||||
if size < self.size:
|
||||
raise qubes.storage.StoragePoolException(
|
||||
'For your own safety, shrinking of {!s} is disabled'
|
||||
' ({:d} < {:d}). If you really know what you are doing,'
|
||||
'For your own safety, shrinking of {} is disabled'
|
||||
' ({} < {}). If you really know what you are doing,'
|
||||
' use "truncate" manually.'.format(self.vid, size, self.size))
|
||||
|
||||
try: # assume volume is not (cleanly) stopped ...
|
||||
_resize_file(self._path_dirty, size)
|
||||
self.size = size
|
||||
except FileNotFoundError: # ... but it actually is.
|
||||
_resize_file(self._path_clean, size)
|
||||
self.size = size
|
||||
return self
|
||||
|
||||
self.size = size
|
||||
|
||||
# resize any corresponding loop devices
|
||||
out = _cmd('losetup', '--associated', self._path_dirty)
|
||||
for match in re.finditer(br'^(/dev/loop[0-9]+): ', out, re.MULTILINE):
|
||||
loop_dev = match.group(1).decode('ascii')
|
||||
_cmd('losetup', '--set-capacity', loop_dev)
|
||||
|
||||
_update_loopdev_sizes(self._path_dirty)
|
||||
return self
|
||||
|
||||
def _require_save_on_stop(self, method_name):
|
||||
def export(self):
|
||||
if not self.save_on_stop:
|
||||
raise NotImplementedError(
|
||||
'Cannot {!s}: {!s} is not save_on_stop'.format(
|
||||
method_name, self.vid))
|
||||
|
||||
def export(self):
|
||||
self._require_save_on_stop('export')
|
||||
'Cannot export: {} is not save_on_stop'.format(self.vid))
|
||||
return self._path_clean
|
||||
|
||||
def import_data(self):
|
||||
self._require_save_on_stop('import_data')
|
||||
_create_sparse_file(self._path_dirty, self.size)
|
||||
return self._path_dirty
|
||||
if not self.save_on_stop:
|
||||
raise NotImplementedError(
|
||||
'Cannot import_data: {} is not save_on_stop'.format(self.vid))
|
||||
_create_sparse_file(self._path_import, self.size)
|
||||
return self._path_import
|
||||
|
||||
def import_data_end(self, success):
|
||||
if success:
|
||||
self._commit()
|
||||
self._commit(self._path_import)
|
||||
else:
|
||||
_remove_file(self._path_dirty)
|
||||
_remove_file(self._path_import)
|
||||
return self
|
||||
|
||||
@_unblock
|
||||
def import_volume(self, src_volume):
|
||||
self._require_save_on_stop('import_volume')
|
||||
if not self.save_on_stop:
|
||||
return self
|
||||
try:
|
||||
_copy_file(src_volume.export(), self._path_dirty)
|
||||
_copy_file(src_volume.export(), self._path_import)
|
||||
except:
|
||||
self.import_data_end(False)
|
||||
raise
|
||||
@ -274,18 +310,6 @@ class ReflinkVolume(qubes.storage.Volume):
|
||||
timestamp = self.revisions[number]
|
||||
return self._path_clean + '.' + number + '@' + timestamp + 'Z'
|
||||
|
||||
@property
|
||||
def _path_clean(self):
|
||||
return os.path.join(self.pool.dir_path, self.vid + '.img')
|
||||
|
||||
@property
|
||||
def _path_dirty(self):
|
||||
return os.path.join(self.pool.dir_path, self.vid + '-dirty.img')
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self._path_dirty
|
||||
|
||||
@property
|
||||
def _next_revision_number(self):
|
||||
numbers = self.revisions.keys()
|
||||
@ -296,10 +320,10 @@ class ReflinkVolume(qubes.storage.Volume):
|
||||
@property
|
||||
def revisions(self):
|
||||
prefix = self._path_clean + '.'
|
||||
paths = glob.glob(glob.escape(prefix) + '*@*Z')
|
||||
items = sorted((path[len(prefix):-1].split('@') for path in paths),
|
||||
key=lambda item: int(item[0]))
|
||||
return collections.OrderedDict(items)
|
||||
paths = glob.iglob(glob.escape(prefix) + '*@*Z')
|
||||
items = (path[len(prefix):-1].split('@') for path in paths)
|
||||
return collections.OrderedDict(sorted(items,
|
||||
key=lambda item: int(item[0])))
|
||||
|
||||
@property
|
||||
def usage(self):
|
||||
@ -391,27 +415,41 @@ def _create_sparse_file(path, size):
|
||||
tmp.truncate(size)
|
||||
LOGGER.info('Created sparse file: %s', tmp.name)
|
||||
|
||||
def _update_loopdev_sizes(img):
|
||||
''' Resolve img; update the size of loop devices backed by it. '''
|
||||
needle = os.fsencode(os.path.realpath(img)) + b'\n'
|
||||
for sys_path in glob.iglob('/sys/block/loop[0-9]*/loop/backing_file'):
|
||||
try:
|
||||
with open(sys_path, 'rb') as sys_io:
|
||||
if sys_io.read() != needle:
|
||||
continue
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
with open('/dev/' + sys_path.split('/')[3]) as dev_io:
|
||||
fcntl.ioctl(dev_io.fileno(), LOOP_SET_CAPACITY)
|
||||
|
||||
def _attempt_ficlone(src, dst):
|
||||
try:
|
||||
fcntl.ioctl(dst.fileno(), FICLONE, src.fileno())
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
def _copy_file(src, dst):
|
||||
''' Copy src to dst as a reflink if possible, sparse if not. '''
|
||||
if not os.path.exists(src):
|
||||
raise FileNotFoundError(src)
|
||||
with _replace_file(dst) as tmp:
|
||||
LOGGER.info('Copying file: %s -> %s', src, tmp.name)
|
||||
_cmd('cp', '--sparse=always', '--reflink=auto', src, tmp.name)
|
||||
with _replace_file(dst) as tmp_io:
|
||||
with open(src, 'rb') as src_io:
|
||||
if _attempt_ficlone(src_io, tmp_io):
|
||||
LOGGER.info('Reflinked file: %s -> %s', src, tmp_io.name)
|
||||
return True
|
||||
LOGGER.info('Copying file: %s -> %s', src, tmp_io.name)
|
||||
cmd = 'cp', '--sparse=always', src, tmp_io.name
|
||||
p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
if p.returncode != 0:
|
||||
raise qubes.storage.StoragePoolException(str(p))
|
||||
return False
|
||||
|
||||
def _cmd(*args):
|
||||
''' Run command until finished; return stdout (as bytes) if it
|
||||
exited 0. Otherwise, raise a detailed StoragePoolException.
|
||||
'''
|
||||
try:
|
||||
return subprocess.run(args, check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE).stdout
|
||||
except subprocess.CalledProcessError as ex:
|
||||
msg = '{!s} err={!r} out={!r}'.format(ex, ex.stderr, ex.stdout)
|
||||
raise qubes.storage.StoragePoolException(msg) from ex
|
||||
|
||||
def is_reflink_supported(dst_dir, src_dir=None):
|
||||
def is_supported(dst_dir, src_dir=None):
|
||||
''' Return whether destination directory supports reflink copies
|
||||
from source directory. (A temporary file is created in each
|
||||
directory, using O_TMPFILE if possible.)
|
||||
@ -421,9 +459,4 @@ def is_reflink_supported(dst_dir, src_dir=None):
|
||||
dst = tempfile.TemporaryFile(dir=dst_dir)
|
||||
src = tempfile.TemporaryFile(dir=src_dir)
|
||||
src.write(b'foo') # don't let any filesystem get clever with empty files
|
||||
|
||||
try:
|
||||
fcntl.ioctl(dst.fileno(), FICLONE, src.fileno())
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
return _attempt_ficlone(src, dst)
|
||||
|
@ -1223,6 +1223,7 @@ def load_tests(loader, tests, pattern): # pylint: disable=unused-argument
|
||||
'qubes.tests.vm.init',
|
||||
'qubes.tests.storage',
|
||||
'qubes.tests.storage_file',
|
||||
'qubes.tests.storage_reflink',
|
||||
'qubes.tests.storage_lvm',
|
||||
'qubes.tests.storage_kernels',
|
||||
'qubes.tests.ext',
|
||||
|
@ -60,6 +60,7 @@ class AdminAPITestCase(qubes.tests.QubesTestCase):
|
||||
app = qubes.Qubes('/tmp/qubes-test.xml', load=False)
|
||||
app.vmm = unittest.mock.Mock(spec=qubes.app.VMMConnection)
|
||||
app.load_initial_values()
|
||||
app.setup_pools()
|
||||
app.default_kernel = '1.0'
|
||||
app.default_netvm = None
|
||||
self.template = app.add_new_vm('TemplateVM', label='black',
|
||||
|
@ -30,6 +30,7 @@ import qubes.events
|
||||
|
||||
import qubes.tests
|
||||
import qubes.tests.init
|
||||
import qubes.tests.storage_reflink
|
||||
|
||||
class TestApp(qubes.tests.TestEmitter):
|
||||
pass
|
||||
@ -264,6 +265,44 @@ class TC_30_VMCollection(qubes.tests.QubesTestCase):
|
||||
# pass
|
||||
|
||||
|
||||
class TC_80_QubesInitialPools(qubes.tests.QubesTestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.app = qubes.Qubes('/tmp/qubestest.xml', load=False,
|
||||
offline_mode=True)
|
||||
self.test_dir = '/var/tmp/test-varlibqubes'
|
||||
self.test_patch = mock.patch.dict(
|
||||
qubes.config.defaults['pool_configs']['varlibqubes'],
|
||||
{'dir_path': self.test_dir})
|
||||
self.test_patch.start()
|
||||
|
||||
def tearDown(self):
|
||||
self.test_patch.stop()
|
||||
self.app.close()
|
||||
del self.app
|
||||
|
||||
def get_driver(self, fs_type, accessible):
|
||||
qubes.tests.storage_reflink.mkdir_fs(self.test_dir, fs_type,
|
||||
accessible=accessible, cleanup_via=self.addCleanup)
|
||||
self.app.load_initial_values()
|
||||
|
||||
varlibqubes = self.app.pools['varlibqubes']
|
||||
self.assertEqual(varlibqubes.dir_path, self.test_dir)
|
||||
return varlibqubes.driver
|
||||
|
||||
def test_100_varlibqubes_btrfs_accessible(self):
|
||||
self.assertEqual(self.get_driver('btrfs', True), 'file-reflink')
|
||||
|
||||
def test_101_varlibqubes_btrfs_inaccessible(self):
|
||||
self.assertEqual(self.get_driver('btrfs', False), 'file')
|
||||
|
||||
def test_102_varlibqubes_ext4_accessible(self):
|
||||
self.assertEqual(self.get_driver('ext4', True), 'file')
|
||||
|
||||
def test_103_varlibqubes_ext4_inaccessible(self):
|
||||
self.assertEqual(self.get_driver('ext4', False), 'file')
|
||||
|
||||
|
||||
class TC_89_QubesEmpty(qubes.tests.QubesTestCase):
|
||||
def tearDown(self):
|
||||
try:
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
from multiprocessing import Queue
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
@ -382,7 +383,7 @@ class TC_00_BackupCompatibility(
|
||||
def assertRestored(self, name, **kwargs):
|
||||
with self.assertNotRaises((KeyError, qubes.exc.QubesException)):
|
||||
vm = self.app.domains[name]
|
||||
vm.storage.verify()
|
||||
asyncio.get_event_loop().run_until_complete(vm.storage.verify())
|
||||
for prop, value in kwargs.items():
|
||||
if prop == 'klass':
|
||||
self.assertIsInstance(vm, value)
|
||||
|
@ -572,7 +572,7 @@ class TC_03_QvmRevertTemplateChanges(qubes.tests.SystemTestCase):
|
||||
|
||||
def get_rootimg_checksum(self):
|
||||
return subprocess.check_output(
|
||||
['sha1sum', self.test_template.volumes['root'].path]).\
|
||||
['sha1sum', self.test_template.volumes['root'].export()]).\
|
||||
decode().split(' ')[0]
|
||||
|
||||
def _do_test(self):
|
||||
|
@ -26,6 +26,7 @@ import subprocess
|
||||
import qubes.storage.lvm
|
||||
import qubes.tests
|
||||
import qubes.tests.storage_lvm
|
||||
import qubes.tests.storage_reflink
|
||||
import qubes.vm.appvm
|
||||
|
||||
|
||||
@ -318,6 +319,28 @@ class StorageFile(StorageTestMixin, qubes.tests.SystemTestCase):
|
||||
super(StorageFile, self).tearDown()
|
||||
|
||||
|
||||
class StorageReflinkMixin(StorageTestMixin):
|
||||
def tearDown(self):
|
||||
self.app.remove_pool(self.pool.name)
|
||||
super().tearDown()
|
||||
|
||||
def init_pool(self, fs_type, **kwargs):
|
||||
name = 'test-reflink-integration-on-' + fs_type
|
||||
dir_path = os.path.join('/var/tmp', name)
|
||||
qubes.tests.storage_reflink.mkdir_fs(dir_path, fs_type,
|
||||
cleanup_via=self.addCleanup)
|
||||
self.pool = self.app.add_pool(name=name, dir_path=dir_path,
|
||||
driver='file-reflink', **kwargs)
|
||||
|
||||
class StorageReflinkOnBtrfs(StorageReflinkMixin, qubes.tests.SystemTestCase):
|
||||
def init_pool(self):
|
||||
super().init_pool('btrfs')
|
||||
|
||||
class StorageReflinkOnExt4(StorageReflinkMixin, qubes.tests.SystemTestCase):
|
||||
def init_pool(self):
|
||||
super().init_pool('ext4', setup_check='no')
|
||||
|
||||
|
||||
@qubes.tests.storage_lvm.skipUnlessLvmPoolExists
|
||||
class StorageLVM(StorageTestMixin, qubes.tests.SystemTestCase):
|
||||
def init_pool(self):
|
||||
|
@ -22,6 +22,7 @@ import qubes.storage
|
||||
from qubes.exc import QubesException
|
||||
from qubes.storage import pool_drivers
|
||||
from qubes.storage.file import FilePool
|
||||
from qubes.storage.reflink import ReflinkPool
|
||||
from qubes.tests import SystemTestCase
|
||||
|
||||
# :pylint: disable=invalid-name
|
||||
@ -107,10 +108,11 @@ class TC_00_Pool(SystemTestCase):
|
||||
pool_drivers())
|
||||
|
||||
def test_002_get_pool_klass(self):
|
||||
""" Expect the default pool to be `FilePool` """
|
||||
""" Expect the default pool to be `FilePool` or `ReflinkPool` """
|
||||
# :pylint: disable=protected-access
|
||||
result = self.app.get_pool('varlibqubes')
|
||||
self.assertIsInstance(result, FilePool)
|
||||
self.assertTrue(isinstance(result, FilePool)
|
||||
or isinstance(result, ReflinkPool))
|
||||
|
||||
def test_003_pool_exists_default(self):
|
||||
""" Expect the default pool to exists """
|
||||
|
154
qubes/tests/storage_reflink.py
Normal file
154
qubes/tests/storage_reflink.py
Normal file
@ -0,0 +1,154 @@
|
||||
#
|
||||
# The Qubes OS Project, https://www.qubes-os.org
|
||||
#
|
||||
# Copyright (C) 2018 Rusty Bird <rustybird@net-c.com>
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
''' Tests for the file-reflink storage driver '''
|
||||
|
||||
# pylint: disable=protected-access
|
||||
# pylint: disable=invalid-name
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import qubes.tests
|
||||
from qubes.storage import reflink
|
||||
|
||||
|
||||
class ReflinkMixin:
|
||||
def setUp(self, fs_type='btrfs'): # pylint: disable=arguments-differ
|
||||
super().setUp()
|
||||
self.test_dir = '/var/tmp/test-reflink-units-on-' + fs_type
|
||||
mkdir_fs(self.test_dir, fs_type, cleanup_via=self.addCleanup)
|
||||
|
||||
def test_000_copy_file(self):
|
||||
source = os.path.join(self.test_dir, 'source-file')
|
||||
dest = os.path.join(self.test_dir, 'new-directory', 'dest-file')
|
||||
content = os.urandom(1024**2)
|
||||
|
||||
with open(source, 'wb') as source_io:
|
||||
source_io.write(content)
|
||||
|
||||
ficlone_succeeded = reflink._copy_file(source, dest)
|
||||
self.assertEqual(ficlone_succeeded, self.ficlone_supported)
|
||||
|
||||
self.assertNotEqual(os.stat(source).st_ino, os.stat(dest).st_ino)
|
||||
with open(source, 'rb') as source_io:
|
||||
self.assertEqual(source_io.read(), content)
|
||||
with open(dest, 'rb') as dest_io:
|
||||
self.assertEqual(dest_io.read(), content)
|
||||
|
||||
def test_001_create_and_resize_files_and_update_loopdevs(self):
|
||||
img_real = os.path.join(self.test_dir, 'img-real')
|
||||
img_sym = os.path.join(self.test_dir, 'img-sym')
|
||||
size_initial = 111 * 1024**2
|
||||
size_resized = 222 * 1024**2
|
||||
|
||||
os.symlink(img_real, img_sym)
|
||||
reflink._create_sparse_file(img_real, size_initial)
|
||||
self.assertEqual(reflink._get_file_disk_usage(img_real), 0)
|
||||
self.assertEqual(os.stat(img_real).st_size, size_initial)
|
||||
|
||||
dev_from_real = setup_loopdev(img_real, cleanup_via=self.addCleanup)
|
||||
dev_from_sym = setup_loopdev(img_sym, cleanup_via=self.addCleanup)
|
||||
|
||||
reflink._resize_file(img_real, size_resized)
|
||||
self.assertEqual(reflink._get_file_disk_usage(img_real), 0)
|
||||
self.assertEqual(os.stat(img_real).st_size, size_resized)
|
||||
|
||||
reflink_update_loopdev_sizes(os.path.join(self.test_dir, 'unrelated'))
|
||||
|
||||
for dev in (dev_from_real, dev_from_sym):
|
||||
self.assertEqual(get_blockdev_size(dev), size_initial)
|
||||
|
||||
reflink_update_loopdev_sizes(img_sym)
|
||||
|
||||
for dev in (dev_from_real, dev_from_sym):
|
||||
self.assertEqual(get_blockdev_size(dev), size_resized)
|
||||
|
||||
class TC_00_ReflinkOnBtrfs(ReflinkMixin, qubes.tests.QubesTestCase):
|
||||
def setUp(self): # pylint: disable=arguments-differ
|
||||
super().setUp('btrfs')
|
||||
self.ficlone_supported = True
|
||||
|
||||
class TC_01_ReflinkOnExt4(ReflinkMixin, qubes.tests.QubesTestCase):
|
||||
def setUp(self): # pylint: disable=arguments-differ
|
||||
super().setUp('ext4')
|
||||
self.ficlone_supported = False
|
||||
|
||||
|
||||
def setup_loopdev(img, cleanup_via=None):
|
||||
dev = str.strip(cmd('sudo', 'losetup', '-f', '--show', img).decode())
|
||||
if cleanup_via is not None:
|
||||
cleanup_via(detach_loopdev, dev)
|
||||
return dev
|
||||
|
||||
def detach_loopdev(dev):
|
||||
cmd('sudo', 'losetup', '-d', dev)
|
||||
|
||||
def get_fs_type(directory):
|
||||
# 'stat -f -c %T' would identify ext4 as 'ext2/ext3'
|
||||
return cmd('df', '--output=fstype', directory).decode().splitlines()[1]
|
||||
|
||||
def mkdir_fs(directory, fs_type,
|
||||
accessible=True, max_size=100*1024**3, cleanup_via=None):
|
||||
os.mkdir(directory)
|
||||
|
||||
if get_fs_type(directory) != fs_type:
|
||||
img = os.path.join(directory, 'img')
|
||||
with open(img, 'xb') as img_io:
|
||||
img_io.truncate(max_size)
|
||||
cmd('mkfs.' + fs_type, img)
|
||||
dev = setup_loopdev(img)
|
||||
os.remove(img)
|
||||
cmd('sudo', 'mount', dev, directory)
|
||||
detach_loopdev(dev)
|
||||
|
||||
if accessible:
|
||||
cmd('sudo', 'chmod', '777', directory)
|
||||
else:
|
||||
cmd('sudo', 'chmod', '000', directory)
|
||||
cmd('sudo', 'chattr', '+i', directory) # cause EPERM on write as root
|
||||
|
||||
if cleanup_via is not None:
|
||||
cleanup_via(rmtree_fs, directory)
|
||||
|
||||
def rmtree_fs(directory):
|
||||
if os.path.ismount(directory):
|
||||
cmd('sudo', 'umount', '-l', directory)
|
||||
# loop device and backing file are garbage collected automatically
|
||||
cmd('sudo', 'chattr', '-i', directory)
|
||||
cmd('sudo', 'chmod', '777', directory)
|
||||
shutil.rmtree(directory)
|
||||
|
||||
def get_blockdev_size(dev):
|
||||
return int(cmd('sudo', 'blockdev', '--getsize64', dev))
|
||||
|
||||
def reflink_update_loopdev_sizes(img):
|
||||
env = [k + '=' + v for k, v in os.environ.items() # 'sudo -E' alone would
|
||||
if k.startswith('PYTHON')] # drop some of these
|
||||
code = ('from qubes.storage import reflink\n'
|
||||
'reflink._update_loopdev_sizes(%r)' % img)
|
||||
cmd('sudo', '-E', 'env', *env, sys.executable, '-c', code)
|
||||
|
||||
def cmd(*argv):
|
||||
p = subprocess.run(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
if p.returncode != 0:
|
||||
raise Exception(str(p)) # this will show stdout and stderr
|
||||
return p.stdout
|
@ -18,7 +18,7 @@
|
||||
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
'''qvm-create - Create new Qubes OS store'''
|
||||
'''qubes-create - Create new Qubes OS store'''
|
||||
|
||||
import sys
|
||||
import qubes
|
||||
@ -38,7 +38,7 @@ def main(args=None):
|
||||
|
||||
args = parser.parse_args(args)
|
||||
qubes.Qubes.create_empty_store(args.app,
|
||||
offline_mode=args.offline_mode)
|
||||
offline_mode=args.offline_mode).setup_pools()
|
||||
return 0
|
||||
|
||||
|
||||
|
@ -305,6 +305,7 @@ fi
|
||||
%{python3_sitelib}/qubes/tests/init.py
|
||||
%{python3_sitelib}/qubes/tests/storage.py
|
||||
%{python3_sitelib}/qubes/tests/storage_file.py
|
||||
%{python3_sitelib}/qubes/tests/storage_reflink.py
|
||||
%{python3_sitelib}/qubes/tests/storage_kernels.py
|
||||
%{python3_sitelib}/qubes/tests/storage_lvm.py
|
||||
%{python3_sitelib}/qubes/tests/tarwriter.py
|
||||
|
@ -1,49 +0,0 @@
|
||||
PYTHON_TESTSPATH = $(PYTHON_SITEPATH)/qubes/tests
|
||||
|
||||
all:
|
||||
python -m compileall .
|
||||
python -O -m compileall .
|
||||
|
||||
install:
|
||||
ifndef PYTHON_SITEPATH
|
||||
$(error PYTHON_SITEPATH not defined)
|
||||
endif
|
||||
mkdir -p $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp __init__.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp __init__.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp backup.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp backup.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp backupcompatibility.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp backupcompatibility.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp basic.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp basic.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp block.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp block.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp dispvm.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp dispvm.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp dom0_update.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp dom0_update.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp extra.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp extra.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp hardware.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp hardware.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp hvm.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp hvm.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp mime.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp mime.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp network.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp network.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp pvgrub.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp pvgrub.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp regressions.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp regressions.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp run.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp run.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp storage.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp storage.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp storage_file.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp storage_file.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp storage_xen.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp storage_xen.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp vm_qrexec_gui.py $(DESTDIR)$(PYTHON_TESTSPATH)
|
||||
cp vm_qrexec_gui.py[co] $(DESTDIR)$(PYTHON_TESTSPATH)
|
Loading…
Reference in New Issue
Block a user