2015-01-16 15:33:03 +01:00
|
|
|
#
|
2015-01-19 18:03:23 +01:00
|
|
|
# The Qubes OS Project, https://www.qubes-os.org/
|
2015-01-16 15:33:03 +01:00
|
|
|
#
|
2015-01-19 18:03:23 +01:00
|
|
|
# Copyright (C) 2013-2015 Joanna Rutkowska <joanna@invisiblethingslab.com>
|
|
|
|
# Copyright (C) 2013-2015 Marek Marczykowski-Górecki
|
|
|
|
# <marmarek@invisiblethingslab.com>
|
2015-01-16 15:33:03 +01:00
|
|
|
# Copyright (C) 2015 Wojtek Porczyk <woju@invisiblethingslab.com>
|
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU Lesser General Public
|
|
|
|
# License as published by the Free Software Foundation; either
|
|
|
|
# version 2.1 of the License, or (at your option) any later version.
|
2015-01-16 15:33:03 +01:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is distributed in the hope that it will be useful,
|
2015-01-16 15:33:03 +01:00
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2017-10-12 00:11:50 +02:00
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
# Lesser General Public License for more details.
|
2015-01-16 15:33:03 +01:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# You should have received a copy of the GNU Lesser General Public
|
|
|
|
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
|
2015-01-16 15:33:03 +01:00
|
|
|
#
|
2017-01-18 22:16:46 +01:00
|
|
|
|
2016-03-20 20:29:46 +01:00
|
|
|
""" Qubes storage system"""
|
|
|
|
|
2015-01-16 15:33:03 +01:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
2017-05-12 17:41:38 +02:00
|
|
|
import inspect
|
2015-01-16 15:33:03 +01:00
|
|
|
import os
|
|
|
|
import os.path
|
2016-07-12 18:56:06 +02:00
|
|
|
import string # pylint: disable=deprecated-module
|
2017-10-16 01:52:34 +02:00
|
|
|
import subprocess
|
2016-07-12 18:57:04 +02:00
|
|
|
import time
|
|
|
|
from datetime import datetime
|
2015-01-16 15:33:03 +01:00
|
|
|
|
2017-04-25 23:01:17 +02:00
|
|
|
import asyncio
|
2016-06-02 22:02:06 +02:00
|
|
|
import lxml.etree
|
2016-04-27 21:51:40 +02:00
|
|
|
import pkg_resources
|
2015-01-16 15:33:03 +01:00
|
|
|
import qubes
|
2015-10-14 22:02:11 +02:00
|
|
|
import qubes.exc
|
2015-01-16 15:33:03 +01:00
|
|
|
import qubes.utils
|
2016-04-22 14:16:41 +02:00
|
|
|
|
2016-03-20 20:29:46 +01:00
|
|
|
STORAGE_ENTRY_POINT = 'qubes.storage'
|
2015-10-05 23:46:25 +02:00
|
|
|
|
2016-01-29 17:56:33 +01:00
|
|
|
|
|
|
|
class StoragePoolException(qubes.exc.QubesException):
|
2016-06-21 12:40:03 +02:00
|
|
|
''' A general storage exception '''
|
2016-01-29 17:56:33 +01:00
|
|
|
|
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class BlockDevice:
|
2017-04-15 16:29:50 +02:00
|
|
|
''' Represents a storage block device. '''
|
|
|
|
# pylint: disable=too-few-public-methods
|
|
|
|
def __init__(self, path, name, script=None, rw=True, domain=None,
|
|
|
|
devtype='disk'):
|
|
|
|
assert name, 'Missing device name'
|
|
|
|
assert path, 'Missing device path'
|
|
|
|
self.path = path
|
|
|
|
self.name = name
|
|
|
|
self.rw = rw
|
|
|
|
self.script = script
|
|
|
|
self.domain = domain
|
|
|
|
self.devtype = devtype
|
|
|
|
|
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class Volume:
|
2016-03-28 02:55:38 +02:00
|
|
|
''' Encapsulates all data about a volume for serialization to qubes.xml and
|
|
|
|
libvirt config.
|
2016-07-12 18:56:06 +02:00
|
|
|
|
|
|
|
|
|
|
|
Keep in mind!
|
|
|
|
volatile = not snap_on_start and not save_on_stop
|
|
|
|
snapshot = snap_on_start and not save_on_stop
|
|
|
|
origin = not snap_on_start and save_on_stop
|
|
|
|
origin_snapshot = snap_on_start and save_on_stop
|
2016-03-28 02:55:38 +02:00
|
|
|
'''
|
|
|
|
|
|
|
|
devtype = 'disk'
|
|
|
|
domain = None
|
|
|
|
path = None
|
|
|
|
script = None
|
2017-07-01 21:27:56 +02:00
|
|
|
#: disk space used by this volume, can be smaller than :py:attr:`size`
|
|
|
|
#: for sparse volumes
|
2016-03-28 02:55:38 +02:00
|
|
|
usage = 0
|
|
|
|
|
2017-07-02 01:07:48 +02:00
|
|
|
def __init__(self, name, pool, vid,
|
2016-07-12 18:56:06 +02:00
|
|
|
revisions_to_keep=0, rw=False, save_on_stop=False, size=0,
|
|
|
|
snap_on_start=False, source=None, **kwargs):
|
|
|
|
''' Initialize a volume.
|
|
|
|
|
2017-07-01 21:27:56 +02:00
|
|
|
:param str name: The name of the volume inside owning domain
|
2017-06-09 04:46:46 +02:00
|
|
|
:param Pool pool: The pool object
|
2016-07-12 18:56:06 +02:00
|
|
|
:param str vid: Volume identifier needs to be unique in pool
|
|
|
|
:param int revisions_to_keep: Amount of revisions to keep around
|
|
|
|
:param bool rw: If true volume will be mounted read-write
|
2017-07-01 21:27:56 +02:00
|
|
|
:param bool snap_on_start: Create a snapshot from source on
|
|
|
|
start, instead of using volume own data
|
|
|
|
:param bool save_on_stop: Write changes to the volume in
|
|
|
|
vm.stop(), otherwise - discard
|
|
|
|
:param Volume source: other volume in same pool to make snapshot
|
|
|
|
from, required if *snap_on_start*=`True`
|
2016-07-12 18:56:06 +02:00
|
|
|
:param str/int size: Size of the volume
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
2016-04-22 14:58:21 +02:00
|
|
|
super(Volume, self).__init__(**kwargs)
|
2017-06-09 04:46:46 +02:00
|
|
|
assert isinstance(pool, Pool)
|
|
|
|
assert source is None or (isinstance(source, Volume)
|
|
|
|
and source.pool == pool)
|
2016-07-12 18:56:06 +02:00
|
|
|
|
2017-07-01 22:45:22 +02:00
|
|
|
if snap_on_start and source is None:
|
|
|
|
msg = "snap_on_start specified on {!r} but no volume source set"
|
|
|
|
msg = msg.format(name)
|
|
|
|
raise StoragePoolException(msg)
|
2019-02-27 16:19:29 +01:00
|
|
|
if not snap_on_start and source is not None:
|
2017-07-01 22:45:22 +02:00
|
|
|
msg = "source specified on {!r} but no snap_on_start set"
|
|
|
|
msg = msg.format(name)
|
|
|
|
raise StoragePoolException(msg)
|
|
|
|
|
2017-07-01 21:27:56 +02:00
|
|
|
#: Name of the volume in a domain it's attached to (like `root` or
|
|
|
|
#: `private`).
|
2016-03-28 02:55:38 +02:00
|
|
|
self.name = str(name)
|
2017-07-01 21:27:56 +02:00
|
|
|
#: :py:class:`Pool` instance owning this volume
|
2017-06-09 04:46:46 +02:00
|
|
|
self.pool = pool
|
2017-07-01 21:27:56 +02:00
|
|
|
#: How many revisions of the volume to keep. Each revision is created
|
|
|
|
# at :py:meth:`stop`, if :py:attr:`save_on_stop` is True
|
2017-01-26 18:26:06 +01:00
|
|
|
self.revisions_to_keep = int(revisions_to_keep)
|
2017-07-01 21:27:56 +02:00
|
|
|
#: Should this volume be writable by domain.
|
2016-07-12 18:56:06 +02:00
|
|
|
self.rw = rw
|
2017-07-01 21:27:56 +02:00
|
|
|
#: Should volume state be saved or discarded at :py:meth:`stop`
|
2016-07-12 18:56:06 +02:00
|
|
|
self.save_on_stop = save_on_stop
|
2016-09-04 21:53:43 +02:00
|
|
|
self._size = int(size)
|
2017-07-01 21:27:56 +02:00
|
|
|
#: Should the volume state be initialized with a snapshot of
|
|
|
|
#: same-named volume of domain's template.
|
2016-07-12 18:56:06 +02:00
|
|
|
self.snap_on_start = snap_on_start
|
2017-07-01 21:27:56 +02:00
|
|
|
#: source volume for :py:attr:`snap_on_start` volumes
|
2016-07-12 18:56:06 +02:00
|
|
|
self.source = source
|
2017-07-01 21:27:56 +02:00
|
|
|
#: Volume unique (inside given pool) identifier
|
2016-07-12 18:56:06 +02:00
|
|
|
self.vid = vid
|
2016-03-28 02:55:38 +02:00
|
|
|
|
2016-07-12 18:56:06 +02:00
|
|
|
def __eq__(self, other):
|
2017-06-09 04:46:46 +02:00
|
|
|
if isinstance(other, Volume):
|
|
|
|
return other.pool == self.pool and other.vid == self.vid
|
|
|
|
return NotImplemented
|
2016-04-22 14:29:30 +02:00
|
|
|
|
2016-07-12 18:56:06 +02:00
|
|
|
def __hash__(self):
|
|
|
|
return hash('%s:%s' % (self.pool, self.vid))
|
|
|
|
|
|
|
|
def __neq__(self, other):
|
|
|
|
return not self.__eq__(other)
|
2016-03-28 02:55:38 +02:00
|
|
|
|
2016-04-22 14:47:00 +02:00
|
|
|
def __repr__(self):
|
2017-06-09 04:46:46 +02:00
|
|
|
return '{!r}'.format(str(self.pool) + ':' + self.vid)
|
2016-03-28 02:55:38 +02:00
|
|
|
|
2016-07-12 18:56:06 +02:00
|
|
|
def __str__(self):
|
|
|
|
return str(self.vid)
|
|
|
|
|
|
|
|
def __xml__(self):
|
|
|
|
config = _sanitize_config(self.config)
|
|
|
|
return lxml.etree.Element('volume', **config)
|
|
|
|
|
2017-06-09 04:46:46 +02:00
|
|
|
def create(self):
|
|
|
|
''' Create the given volume on disk.
|
|
|
|
|
2017-07-01 21:27:56 +02:00
|
|
|
This method is called only once in the volume lifetime. Before
|
|
|
|
calling this method, no data on disk should be touched (in
|
|
|
|
context of this volume).
|
|
|
|
|
2017-06-09 04:46:46 +02:00
|
|
|
This can be implemented as a coroutine.
|
|
|
|
'''
|
|
|
|
raise self._not_implemented("create")
|
|
|
|
|
2017-06-26 11:39:02 +02:00
|
|
|
def remove(self):
|
|
|
|
''' Remove volume.
|
|
|
|
|
|
|
|
This can be implemented as a coroutine.'''
|
|
|
|
raise self._not_implemented("remove")
|
|
|
|
|
2017-06-09 04:46:46 +02:00
|
|
|
def export(self):
|
2017-07-01 21:27:56 +02:00
|
|
|
''' Returns a path to read the volume data from.
|
|
|
|
|
|
|
|
Reading from this path when domain owning this volume is
|
|
|
|
running (i.e. when :py:meth:`is_dirty` is True) should return the
|
|
|
|
data from before domain startup.
|
|
|
|
|
|
|
|
Reading from the path returned by this method should return the
|
|
|
|
volume data. If extracting volume data require something more
|
|
|
|
than just reading from file (for example connecting to some other
|
|
|
|
domain, or decompressing the data), the returned path may be a pipe.
|
|
|
|
'''
|
2017-06-09 04:46:46 +02:00
|
|
|
raise self._not_implemented("export")
|
|
|
|
|
2020-01-16 14:41:00 +01:00
|
|
|
def import_data(self, size):
|
2017-07-01 21:27:56 +02:00
|
|
|
''' Returns a path to overwrite volume data.
|
|
|
|
|
|
|
|
This method is called after volume was already :py:meth:`create`-ed.
|
|
|
|
|
|
|
|
Writing to this path should overwrite volume data. If importing
|
|
|
|
volume data require something more than just writing to a file (
|
|
|
|
for example connecting to some other domain, or converting data
|
|
|
|
on the fly), the returned path may be a pipe.
|
2018-10-19 01:29:03 +02:00
|
|
|
|
|
|
|
This can be implemented as a coroutine.
|
2020-01-16 14:41:00 +01:00
|
|
|
|
|
|
|
:param int size: size of new data in bytes
|
2017-07-01 21:27:56 +02:00
|
|
|
'''
|
2018-10-29 21:21:42 +01:00
|
|
|
raise self._not_implemented("import_data")
|
2017-06-09 04:46:46 +02:00
|
|
|
|
|
|
|
def import_data_end(self, success):
|
2017-07-01 21:27:56 +02:00
|
|
|
''' End the data import operation. This may be used by pool
|
2017-06-09 04:46:46 +02:00
|
|
|
implementation to commit changes, cleanup temporary files etc.
|
|
|
|
|
2017-07-01 21:27:56 +02:00
|
|
|
This method is called regardless the operation was successful or not.
|
|
|
|
|
2018-10-19 01:29:03 +02:00
|
|
|
This can be implemented as a coroutine.
|
|
|
|
|
2017-06-09 04:46:46 +02:00
|
|
|
:param success: True if data import was successful, otherwise False
|
|
|
|
'''
|
|
|
|
# by default do nothing
|
|
|
|
|
|
|
|
def import_volume(self, src_volume):
|
|
|
|
''' Imports data from a different volume (possibly in a different
|
2017-06-26 11:59:53 +02:00
|
|
|
pool.
|
|
|
|
|
2017-07-01 21:27:56 +02:00
|
|
|
The volume needs to be create()d first.
|
2017-06-26 11:59:53 +02:00
|
|
|
|
|
|
|
This can be implemented as a coroutine. '''
|
2017-06-09 04:46:46 +02:00
|
|
|
# pylint: disable=unused-argument
|
|
|
|
raise self._not_implemented("import_volume")
|
|
|
|
|
|
|
|
def is_dirty(self):
|
2017-07-01 21:27:56 +02:00
|
|
|
''' Return `True` if volume was not properly shutdown and committed.
|
|
|
|
|
|
|
|
This include the situation when domain owning the volume is still
|
|
|
|
running.
|
|
|
|
|
|
|
|
'''
|
2017-06-09 04:46:46 +02:00
|
|
|
raise self._not_implemented("is_dirty")
|
|
|
|
|
|
|
|
def is_outdated(self):
|
2017-07-01 21:27:56 +02:00
|
|
|
''' Returns `True` if this snapshot of a source volume (for
|
|
|
|
`snap_on_start`=True) is outdated.
|
2017-06-09 04:46:46 +02:00
|
|
|
'''
|
|
|
|
raise self._not_implemented("is_outdated")
|
|
|
|
|
|
|
|
def resize(self, size):
|
|
|
|
''' Expands volume, throws
|
|
|
|
:py:class:`qubes.storage.StoragePoolException` if
|
|
|
|
given size is less than current_size
|
|
|
|
|
|
|
|
This can be implemented as a coroutine.
|
2017-07-01 21:27:56 +02:00
|
|
|
|
|
|
|
:param int size: new size in bytes
|
2017-06-09 04:46:46 +02:00
|
|
|
'''
|
|
|
|
# pylint: disable=unused-argument
|
|
|
|
raise self._not_implemented("resize")
|
|
|
|
|
|
|
|
def revert(self, revision=None):
|
2017-07-01 21:27:56 +02:00
|
|
|
''' Revert volume to previous revision
|
|
|
|
|
2018-09-12 01:50:18 +02:00
|
|
|
This can be implemented as a coroutine.
|
|
|
|
|
2017-07-01 21:27:56 +02:00
|
|
|
:param revision: revision to revert volume to, see :py:attr:`revisions`
|
|
|
|
'''
|
2017-06-09 04:46:46 +02:00
|
|
|
# pylint: disable=unused-argument
|
|
|
|
raise self._not_implemented("revert")
|
|
|
|
|
|
|
|
def start(self):
|
2017-07-01 21:27:56 +02:00
|
|
|
''' Do what ever is needed on start.
|
|
|
|
|
|
|
|
This include making a snapshot of template's volume if
|
|
|
|
:py:attr:`snap_on_start` is set.
|
2017-06-09 04:46:46 +02:00
|
|
|
|
|
|
|
This can be implemented as a coroutine.'''
|
|
|
|
raise self._not_implemented("start")
|
|
|
|
|
|
|
|
def stop(self):
|
2017-07-01 21:27:56 +02:00
|
|
|
''' Do what ever is needed on stop.
|
|
|
|
|
|
|
|
This include committing data if :py:attr:`save_on_stop` is set.
|
2017-06-09 04:46:46 +02:00
|
|
|
|
|
|
|
This can be implemented as a coroutine.'''
|
2018-09-12 01:50:19 +02:00
|
|
|
raise self._not_implemented("stop")
|
2017-06-09 04:46:46 +02:00
|
|
|
|
|
|
|
def verify(self):
|
|
|
|
''' Verifies the volume.
|
|
|
|
|
2017-10-16 00:43:10 +02:00
|
|
|
This function is supposed to either return :py:obj:`True`, or raise
|
|
|
|
an exception.
|
|
|
|
|
2017-06-09 04:46:46 +02:00
|
|
|
This can be implemented as a coroutine.'''
|
|
|
|
raise self._not_implemented("verify")
|
|
|
|
|
2016-03-28 02:55:38 +02:00
|
|
|
def block_device(self):
|
2017-04-15 16:29:50 +02:00
|
|
|
''' Return :py:class:`BlockDevice` for serialization in
|
2016-03-28 02:55:38 +02:00
|
|
|
the libvirt XML template as <disk>.
|
|
|
|
'''
|
2017-04-15 16:29:50 +02:00
|
|
|
return BlockDevice(self.path, self.name, self.script,
|
2016-07-12 18:56:06 +02:00
|
|
|
self.rw, self.domain, self.devtype)
|
2016-03-28 02:55:38 +02:00
|
|
|
|
2016-07-12 18:56:06 +02:00
|
|
|
@property
|
|
|
|
def revisions(self):
|
2017-07-01 21:27:56 +02:00
|
|
|
''' Returns a dict containing revision identifiers and time of their
|
|
|
|
creation '''
|
2016-07-12 18:56:06 +02:00
|
|
|
msg = "{!s} has revisions not implemented".format(self.__class__)
|
|
|
|
raise NotImplementedError(msg)
|
2016-04-27 21:51:40 +02:00
|
|
|
|
2016-09-04 21:53:43 +02:00
|
|
|
@property
|
|
|
|
def size(self):
|
2017-07-01 21:27:56 +02:00
|
|
|
''' Volume size in bytes '''
|
2016-09-04 21:53:43 +02:00
|
|
|
return self._size
|
|
|
|
|
2016-09-04 23:49:42 +02:00
|
|
|
@size.setter
|
|
|
|
def size(self, size):
|
|
|
|
# pylint: disable=attribute-defined-outside-init
|
|
|
|
self._size = int(size)
|
|
|
|
|
|
|
|
|
2016-07-12 18:56:06 +02:00
|
|
|
@property
|
|
|
|
def config(self):
|
|
|
|
''' return config data for serialization to qubes.xml '''
|
2017-07-01 20:47:08 +02:00
|
|
|
result = {
|
|
|
|
'name': self.name,
|
|
|
|
'pool': str(self.pool),
|
|
|
|
'vid': self.vid,
|
|
|
|
'revisions_to_keep': self.revisions_to_keep,
|
|
|
|
'rw': self.rw,
|
|
|
|
'save_on_stop': self.save_on_stop,
|
|
|
|
'snap_on_start': self.snap_on_start,
|
|
|
|
}
|
2016-07-12 18:56:06 +02:00
|
|
|
|
|
|
|
if self.size:
|
|
|
|
result['size'] = self.size
|
|
|
|
|
|
|
|
if self.source:
|
2017-06-09 04:46:46 +02:00
|
|
|
result['source'] = str(self.source)
|
2016-07-12 18:56:06 +02:00
|
|
|
|
|
|
|
return result
|
2016-05-28 18:06:12 +02:00
|
|
|
|
2017-06-09 04:46:46 +02:00
|
|
|
def _not_implemented(self, method_name):
|
|
|
|
''' Helper for emitting helpful `NotImplementedError` exceptions '''
|
|
|
|
msg = "Volume {!s} has {!s}() not implemented"
|
|
|
|
msg = msg.format(str(self.__class__.__name__), method_name)
|
|
|
|
return NotImplementedError(msg)
|
2016-03-28 02:55:38 +02:00
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class Storage:
|
2016-03-28 02:55:38 +02:00
|
|
|
''' Class for handling VM virtual disks.
|
2015-01-16 15:33:03 +01:00
|
|
|
|
|
|
|
This is base class for all other implementations, mostly with Xen on Linux
|
|
|
|
in mind.
|
2016-01-29 17:56:33 +01:00
|
|
|
'''
|
2015-01-16 15:33:03 +01:00
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
AVAILABLE_FRONTENDS = {'xvd' + c for c in string.ascii_lowercase}
|
2016-05-09 04:24:43 +02:00
|
|
|
|
2016-04-15 20:13:06 +02:00
|
|
|
def __init__(self, vm):
|
2015-01-16 15:33:03 +01:00
|
|
|
#: Domain for which we manage storage
|
|
|
|
self.vm = vm
|
2016-04-15 20:40:53 +02:00
|
|
|
self.log = self.vm.log
|
2016-04-15 20:13:06 +02:00
|
|
|
#: Additional drive (currently used only by HVM)
|
|
|
|
self.drive = None
|
2016-07-12 18:56:06 +02:00
|
|
|
|
2016-04-15 20:40:53 +02:00
|
|
|
if hasattr(vm, 'volume_config'):
|
|
|
|
for name, conf in self.vm.volume_config.items():
|
2016-11-04 12:39:29 +01:00
|
|
|
self.init_volume(name, conf)
|
|
|
|
|
2017-07-25 05:29:26 +02:00
|
|
|
def _update_volume_config_source(self, name, volume_config):
|
|
|
|
'''Retrieve 'source' volume from VM's template'''
|
|
|
|
template = getattr(self.vm, 'template', None)
|
|
|
|
# recursively lookup source volume - templates may be
|
|
|
|
# chained (TemplateVM -> AppVM -> DispVM, where the
|
|
|
|
# actual source should be used from TemplateVM)
|
|
|
|
while template:
|
|
|
|
source = template.volumes[name]
|
|
|
|
volume_config['source'] = source
|
|
|
|
volume_config['pool'] = source.pool
|
|
|
|
volume_config['size'] = source.size
|
|
|
|
if source.source is not None:
|
|
|
|
template = getattr(template, 'template', None)
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
2016-11-04 12:39:29 +01:00
|
|
|
def init_volume(self, name, volume_config):
|
|
|
|
''' Initialize Volume instance attached to this domain '''
|
|
|
|
|
|
|
|
if 'name' not in volume_config:
|
|
|
|
volume_config['name'] = name
|
2017-07-25 05:23:17 +02:00
|
|
|
|
2017-07-25 05:29:26 +02:00
|
|
|
if 'source' in volume_config:
|
|
|
|
# we have no control over VM load order,
|
|
|
|
# so initialize storage recursively if needed
|
|
|
|
template = getattr(self.vm, 'template', None)
|
|
|
|
if template and template.storage is None:
|
|
|
|
template.storage = Storage(template)
|
|
|
|
|
|
|
|
if volume_config['source'] is None:
|
|
|
|
self._update_volume_config_source(name, volume_config)
|
|
|
|
else:
|
|
|
|
# if source is already specified, pool needs to be too
|
|
|
|
pool = self.vm.app.get_pool(volume_config['pool'])
|
|
|
|
volume_config['source'] = pool.volumes[volume_config['source']]
|
|
|
|
|
2017-07-25 05:23:17 +02:00
|
|
|
# if pool still unknown, load default
|
2017-07-01 21:29:47 +02:00
|
|
|
if 'pool' not in volume_config:
|
2017-07-25 05:23:17 +02:00
|
|
|
volume_config['pool'] = \
|
|
|
|
getattr(self.vm.app, 'default_pool_' + name)
|
|
|
|
pool = self.vm.app.get_pool(volume_config['pool'])
|
2017-07-02 01:07:48 +02:00
|
|
|
if 'internal' in volume_config:
|
|
|
|
# migrate old config
|
|
|
|
del volume_config['internal']
|
2016-11-04 12:39:29 +01:00
|
|
|
volume = pool.init_volume(self.vm, volume_config)
|
|
|
|
self.vm.volumes[name] = volume
|
|
|
|
return volume
|
2015-01-16 15:33:03 +01:00
|
|
|
|
2016-05-09 04:24:43 +02:00
|
|
|
def attach(self, volume, rw=False):
|
|
|
|
''' Attach a volume to the domain '''
|
|
|
|
assert self.vm.is_running()
|
|
|
|
|
|
|
|
if self._is_already_attached(volume):
|
|
|
|
self.vm.log.info("{!r} already attached".format(volume))
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
frontend = self.unused_frontend()
|
|
|
|
except IndexError:
|
|
|
|
raise StoragePoolException("No unused frontend found")
|
|
|
|
disk = lxml.etree.Element("disk")
|
|
|
|
disk.set('type', 'block')
|
|
|
|
disk.set('device', 'disk')
|
|
|
|
lxml.etree.SubElement(disk, 'driver').set('name', 'phy')
|
|
|
|
lxml.etree.SubElement(disk, 'source').set('dev', '/dev/%s' % volume.vid)
|
|
|
|
lxml.etree.SubElement(disk, 'target').set('dev', frontend)
|
|
|
|
if not rw:
|
|
|
|
lxml.etree.SubElement(disk, 'readonly')
|
|
|
|
|
2016-08-18 14:02:39 +02:00
|
|
|
if volume.domain is not None:
|
2016-05-09 04:24:43 +02:00
|
|
|
lxml.etree.SubElement(disk, 'backenddomain').set(
|
2016-08-18 14:02:39 +02:00
|
|
|
'name', volume.domain.name)
|
2016-05-09 04:24:43 +02:00
|
|
|
|
|
|
|
xml_string = lxml.etree.tostring(disk, encoding='utf-8')
|
|
|
|
self.vm.libvirt_domain.attachDevice(xml_string)
|
|
|
|
# trigger watches to update device status
|
|
|
|
# FIXME: this should be removed once libvirt will report such
|
|
|
|
# events itself
|
2017-07-21 23:11:24 +02:00
|
|
|
# self.vm.untrusted_qdb.write('/qubes-block-devices', '')
|
|
|
|
# ← do we need this?
|
2016-05-09 04:24:43 +02:00
|
|
|
|
|
|
|
def _is_already_attached(self, volume):
|
|
|
|
''' Checks if the given volume is already attached '''
|
|
|
|
parsed_xml = lxml.etree.fromstring(self.vm.libvirt_domain.XMLDesc())
|
|
|
|
disk_sources = parsed_xml.xpath("//domain/devices/disk/source")
|
|
|
|
for source in disk_sources:
|
|
|
|
if source.get('dev') == '/dev/%s' % volume.vid:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2016-05-09 05:12:37 +02:00
|
|
|
def detach(self, volume):
|
|
|
|
''' Detach a volume from domain '''
|
|
|
|
parsed_xml = lxml.etree.fromstring(self.vm.libvirt_domain.XMLDesc())
|
|
|
|
disks = parsed_xml.xpath("//domain/devices/disk")
|
|
|
|
for disk in disks:
|
|
|
|
source = disk.xpath('source')[0]
|
|
|
|
if source.get('dev') == '/dev/%s' % volume.vid:
|
|
|
|
disk_xml = lxml.etree.tostring(disk, encoding='utf-8')
|
|
|
|
self.vm.libvirt_domain.detachDevice(disk_xml)
|
|
|
|
return
|
|
|
|
raise StoragePoolException('Volume {!r} is not attached'.format(volume))
|
|
|
|
|
2015-10-02 16:02:51 +02:00
|
|
|
@property
|
|
|
|
def kernels_dir(self):
|
|
|
|
'''Directory where kernel resides.
|
|
|
|
|
|
|
|
If :py:attr:`self.vm.kernel` is :py:obj:`None`, the this points inside
|
|
|
|
:py:attr:`self.vm.dir_path`
|
|
|
|
'''
|
2019-02-25 04:40:37 +01:00
|
|
|
if not self.vm.kernel:
|
|
|
|
return None
|
|
|
|
if 'kernel' in self.vm.volumes:
|
|
|
|
return self.vm.volumes['kernel'].kernels_dir
|
|
|
|
return os.path.join(
|
|
|
|
qubes.config.qubes_base_dir,
|
|
|
|
qubes.config.system_path['qubes_kernels_base_dir'],
|
|
|
|
self.vm.kernel)
|
2015-10-02 16:02:51 +02:00
|
|
|
|
2015-01-16 15:33:03 +01:00
|
|
|
def get_disk_utilization(self):
|
2016-04-15 20:40:53 +02:00
|
|
|
''' Returns summed up disk utilization for all domain volumes '''
|
|
|
|
result = 0
|
|
|
|
for volume in self.vm.volumes.values():
|
|
|
|
result += volume.usage
|
|
|
|
return result
|
2015-01-16 15:33:03 +01:00
|
|
|
|
2017-04-25 23:01:17 +02:00
|
|
|
@asyncio.coroutine
|
2016-04-15 20:40:53 +02:00
|
|
|
def resize(self, volume, size):
|
2016-07-12 18:58:11 +02:00
|
|
|
''' Resizes volume a read-writable volume '''
|
2017-01-18 22:16:46 +01:00
|
|
|
if isinstance(volume, str):
|
2016-08-17 22:42:15 +02:00
|
|
|
volume = self.vm.volumes[volume]
|
2019-06-28 12:29:24 +02:00
|
|
|
yield from qubes.utils.coro_maybe(volume.resize(size))
|
2016-08-17 22:44:17 +02:00
|
|
|
if self.vm.is_running():
|
2017-10-16 01:52:34 +02:00
|
|
|
try:
|
|
|
|
yield from self.vm.run_service_for_stdio('qubes.ResizeDisk',
|
|
|
|
input=volume.name.encode(),
|
|
|
|
user='root')
|
|
|
|
except subprocess.CalledProcessError as e:
|
|
|
|
service_error = e.stderr.decode('ascii', errors='ignore')
|
|
|
|
service_error = service_error.replace('%', '')
|
|
|
|
raise StoragePoolException(
|
|
|
|
'Online resize of volume {} failed (you need to resize '
|
|
|
|
'filesystem manually): {}'.format(volume, service_error))
|
|
|
|
|
2015-01-16 15:33:03 +01:00
|
|
|
|
2017-04-25 23:01:17 +02:00
|
|
|
@asyncio.coroutine
|
2016-07-12 18:58:11 +02:00
|
|
|
def create(self):
|
2016-06-21 12:40:03 +02:00
|
|
|
''' Creates volumes on disk '''
|
2016-07-21 23:16:39 +02:00
|
|
|
old_umask = os.umask(0o002)
|
2019-06-28 12:29:25 +02:00
|
|
|
yield from qubes.utils.void_coros_maybe(
|
|
|
|
vol.create() for vol in self.vm.volumes.values())
|
2015-01-16 15:33:03 +01:00
|
|
|
os.umask(old_umask)
|
|
|
|
|
2017-04-25 23:01:17 +02:00
|
|
|
@asyncio.coroutine
|
2017-06-19 17:21:44 +02:00
|
|
|
def clone_volume(self, src_vm, name):
|
|
|
|
''' Clone single volume from the specified vm
|
|
|
|
|
|
|
|
:param QubesVM src_vm: source VM
|
|
|
|
:param str name: name of volume to clone ('root', 'private' etc)
|
|
|
|
:return cloned volume object
|
|
|
|
'''
|
|
|
|
config = self.vm.volume_config[name]
|
|
|
|
dst_pool = self.vm.app.get_pool(config['pool'])
|
|
|
|
dst = dst_pool.init_volume(self.vm, config)
|
|
|
|
src_volume = src_vm.volumes[name]
|
2017-06-26 11:59:53 +02:00
|
|
|
msg = "Importing volume {!s} from vm {!s}"
|
|
|
|
self.vm.log.info(msg.format(src_volume.name, src_vm.name))
|
2019-06-28 12:29:24 +02:00
|
|
|
yield from qubes.utils.coro_maybe(dst.create())
|
|
|
|
yield from qubes.utils.coro_maybe(dst.import_volume(src_volume))
|
2017-07-25 14:20:42 +02:00
|
|
|
self.vm.volumes[name] = dst
|
2017-06-19 17:21:44 +02:00
|
|
|
return self.vm.volumes[name]
|
|
|
|
|
|
|
|
@asyncio.coroutine
|
|
|
|
def clone(self, src_vm):
|
|
|
|
''' Clone volumes from the specified vm '''
|
2017-04-25 23:01:17 +02:00
|
|
|
|
2016-07-12 18:58:11 +02:00
|
|
|
self.vm.volumes = {}
|
|
|
|
with VmCreationManager(self.vm):
|
2019-06-28 12:29:25 +02:00
|
|
|
yield from qubes.utils.void_coros_maybe(
|
|
|
|
self.clone_volume(src_vm, vol_name)
|
|
|
|
for vol_name in self.vm.volume_config.keys())
|
2016-07-12 18:58:11 +02:00
|
|
|
|
2016-06-16 20:01:51 +02:00
|
|
|
@property
|
|
|
|
def outdated_volumes(self):
|
|
|
|
''' Returns a list of outdated volumes '''
|
|
|
|
result = []
|
|
|
|
if self.vm.is_halted():
|
|
|
|
return result
|
|
|
|
|
|
|
|
volumes = self.vm.volumes
|
|
|
|
for volume in volumes.values():
|
2017-06-09 04:46:46 +02:00
|
|
|
if volume.is_outdated():
|
2016-06-16 20:01:51 +02:00
|
|
|
result += [volume]
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2017-05-17 01:37:11 +02:00
|
|
|
@asyncio.coroutine
|
2016-07-12 18:56:06 +02:00
|
|
|
def verify(self):
|
2016-04-15 20:40:53 +02:00
|
|
|
'''Verify that the storage is sane.
|
|
|
|
|
|
|
|
On success, returns normally. On failure, raises exception.
|
|
|
|
'''
|
2015-01-16 15:33:03 +01:00
|
|
|
if not os.path.exists(self.vm.dir_path):
|
2016-04-15 20:40:53 +02:00
|
|
|
raise qubes.exc.QubesVMError(
|
|
|
|
self.vm,
|
2015-01-22 11:24:23 +01:00
|
|
|
'VM directory does not exist: {}'.format(self.vm.dir_path))
|
2019-06-28 12:29:25 +02:00
|
|
|
yield from qubes.utils.void_coros_maybe(
|
|
|
|
vol.verify() for vol in self.vm.volumes.values())
|
2016-06-16 20:03:36 +02:00
|
|
|
self.vm.fire_event('domain-verify-files')
|
2016-07-12 18:56:06 +02:00
|
|
|
return True
|
2015-01-16 15:33:03 +01:00
|
|
|
|
2017-04-25 23:01:17 +02:00
|
|
|
@asyncio.coroutine
|
2016-04-15 20:40:53 +02:00
|
|
|
def remove(self):
|
2016-05-05 19:16:33 +02:00
|
|
|
''' Remove all the volumes.
|
|
|
|
|
|
|
|
Errors on removal are catched and logged.
|
|
|
|
'''
|
2019-06-28 12:29:25 +02:00
|
|
|
results = []
|
|
|
|
for vol in self.vm.volumes.values():
|
|
|
|
self.log.info('Removing volume %s: %s' % (vol.name, vol.vid))
|
2016-05-05 19:16:33 +02:00
|
|
|
try:
|
2019-06-28 12:29:25 +02:00
|
|
|
results.append(vol.remove())
|
2016-05-05 19:16:33 +02:00
|
|
|
except (IOError, OSError) as e:
|
2019-06-28 12:29:25 +02:00
|
|
|
self.vm.log.exception("Failed to remove volume %s", vol.name, e)
|
2018-09-12 01:50:22 +02:00
|
|
|
try:
|
2019-06-28 12:29:25 +02:00
|
|
|
yield from qubes.utils.void_coros_maybe(results)
|
2018-09-12 01:50:22 +02:00
|
|
|
except (IOError, OSError) as e:
|
|
|
|
self.vm.log.exception("Failed to remove some volume", e)
|
2017-04-25 23:01:17 +02:00
|
|
|
|
|
|
|
@asyncio.coroutine
|
2016-04-15 20:40:53 +02:00
|
|
|
def start(self):
|
2018-09-12 01:50:18 +02:00
|
|
|
''' Execute the start method on each volume '''
|
2019-06-28 12:29:25 +02:00
|
|
|
yield from qubes.utils.void_coros_maybe(
|
|
|
|
vol.start() for vol in self.vm.volumes.values())
|
2015-01-16 15:33:03 +01:00
|
|
|
|
2017-04-25 23:01:17 +02:00
|
|
|
@asyncio.coroutine
|
2016-04-15 20:40:53 +02:00
|
|
|
def stop(self):
|
2018-09-12 01:50:18 +02:00
|
|
|
''' Execute the stop method on each volume '''
|
2019-06-28 12:29:25 +02:00
|
|
|
yield from qubes.utils.void_coros_maybe(
|
|
|
|
vol.stop() for vol in self.vm.volumes.values())
|
2015-01-16 15:33:03 +01:00
|
|
|
|
2016-05-09 04:24:43 +02:00
|
|
|
def unused_frontend(self):
|
|
|
|
''' Find an unused device name '''
|
|
|
|
unused_frontends = self.AVAILABLE_FRONTENDS.difference(
|
|
|
|
self.used_frontends)
|
|
|
|
return sorted(unused_frontends)[0]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def used_frontends(self):
|
|
|
|
''' Used device names '''
|
|
|
|
xml = self.vm.libvirt_domain.XMLDesc()
|
|
|
|
parsed_xml = lxml.etree.fromstring(xml)
|
2018-07-15 23:08:23 +02:00
|
|
|
return {target.get('dev', None)
|
2016-05-09 04:24:43 +02:00
|
|
|
for target in parsed_xml.xpath(
|
2018-07-15 23:08:23 +02:00
|
|
|
"//domain/devices/disk/target")}
|
2016-05-09 04:24:43 +02:00
|
|
|
|
2016-09-26 00:53:10 +02:00
|
|
|
def export(self, volume):
|
|
|
|
''' Helper function to export volume (pool.export(volume))'''
|
2017-01-18 22:16:46 +01:00
|
|
|
assert isinstance(volume, (Volume, str)), \
|
2016-09-26 00:53:10 +02:00
|
|
|
"You need to pass a Volume or pool name as str"
|
|
|
|
if isinstance(volume, Volume):
|
2017-06-09 04:46:46 +02:00
|
|
|
return volume.export()
|
2017-04-15 20:04:38 +02:00
|
|
|
|
2017-06-09 04:46:46 +02:00
|
|
|
return self.vm.volumes[volume].export()
|
2016-09-26 00:53:10 +02:00
|
|
|
|
2018-10-19 01:29:03 +02:00
|
|
|
@asyncio.coroutine
|
2020-01-16 14:41:00 +01:00
|
|
|
def import_data(self, volume, size):
|
|
|
|
'''
|
|
|
|
Helper function to import volume data (pool.import_data(volume)).
|
|
|
|
|
|
|
|
:size: new size in bytes, or None if using old size
|
|
|
|
'''
|
|
|
|
|
2017-05-23 15:35:55 +02:00
|
|
|
assert isinstance(volume, (Volume, str)), \
|
|
|
|
"You need to pass a Volume or pool name as str"
|
2020-01-16 14:41:00 +01:00
|
|
|
if isinstance(volume, str):
|
|
|
|
volume = self.vm.volumes[volume]
|
|
|
|
|
|
|
|
if size is None:
|
|
|
|
size = volume.size
|
|
|
|
|
|
|
|
ret = volume.import_data(size)
|
2019-06-28 12:29:24 +02:00
|
|
|
return (yield from qubes.utils.coro_maybe(ret))
|
2017-05-23 15:35:55 +02:00
|
|
|
|
2018-10-19 01:29:03 +02:00
|
|
|
@asyncio.coroutine
|
2017-05-23 15:38:28 +02:00
|
|
|
def import_data_end(self, volume, success):
|
|
|
|
''' Helper function to finish/cleanup data import
|
|
|
|
(pool.import_data_end( volume))'''
|
|
|
|
assert isinstance(volume, (Volume, str)), \
|
|
|
|
"You need to pass a Volume or pool name as str"
|
|
|
|
if isinstance(volume, Volume):
|
2018-10-19 01:29:03 +02:00
|
|
|
ret = volume.import_data_end(success=success)
|
|
|
|
else:
|
|
|
|
ret = self.vm.volumes[volume].import_data_end(success=success)
|
2019-06-28 12:29:24 +02:00
|
|
|
return (yield from qubes.utils.coro_maybe(ret))
|
2017-05-23 15:38:28 +02:00
|
|
|
|
2015-10-05 23:46:25 +02:00
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class VolumesCollection:
|
2017-06-26 02:46:49 +02:00
|
|
|
'''Convenient collection wrapper for pool.get_volume and
|
|
|
|
pool.list_volumes
|
|
|
|
'''
|
|
|
|
def __init__(self, pool):
|
|
|
|
self._pool = pool
|
|
|
|
|
|
|
|
def __getitem__(self, item):
|
|
|
|
''' Get a single volume with given Volume ID.
|
|
|
|
|
|
|
|
You can also a Volume instance to get the same Volume or KeyError if
|
|
|
|
Volume no longer exists.
|
|
|
|
|
|
|
|
:param item: a Volume ID (str) or a Volume instance
|
|
|
|
'''
|
|
|
|
if isinstance(item, Volume):
|
|
|
|
if item.pool == self._pool:
|
|
|
|
return self[item.vid]
|
2018-07-15 23:08:23 +02:00
|
|
|
raise KeyError(item)
|
2017-06-26 02:46:49 +02:00
|
|
|
try:
|
|
|
|
return self._pool.get_volume(item)
|
|
|
|
except NotImplementedError:
|
|
|
|
for vol in self:
|
|
|
|
if vol.vid == item:
|
|
|
|
return vol
|
|
|
|
# if list_volumes is not implemented too, it will raise
|
|
|
|
# NotImplementedError again earlier
|
|
|
|
raise KeyError(item)
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
''' Get iterator over pool's volumes '''
|
|
|
|
return iter(self._pool.list_volumes())
|
|
|
|
|
|
|
|
def __contains__(self, item):
|
|
|
|
''' Check if given volume (either Volume ID or Volume instance) is
|
|
|
|
present in the pool
|
|
|
|
'''
|
|
|
|
try:
|
|
|
|
return self[item] is not None
|
|
|
|
except KeyError:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def keys(self):
|
|
|
|
''' Return list of volume IDs '''
|
|
|
|
return [vol.vid for vol in self]
|
|
|
|
|
|
|
|
def values(self):
|
|
|
|
''' Return list of Volumes'''
|
2019-09-25 01:18:09 +02:00
|
|
|
return list(self)
|
2017-06-26 02:46:49 +02:00
|
|
|
|
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class Pool:
|
2016-04-15 20:40:53 +02:00
|
|
|
''' A Pool is used to manage different kind of volumes (File
|
|
|
|
based/LVM/Btrfs/...).
|
2015-10-05 23:46:25 +02:00
|
|
|
|
2016-04-15 20:40:53 +02:00
|
|
|
3rd Parties providing own storage implementations will need to extend
|
|
|
|
this class.
|
2016-07-12 18:56:06 +02:00
|
|
|
''' # pylint: disable=unused-argument
|
2016-04-15 20:13:06 +02:00
|
|
|
private_img_size = qubes.config.defaults['private_img_size']
|
|
|
|
root_img_size = qubes.config.defaults['root_img_size']
|
2016-03-03 01:05:23 +01:00
|
|
|
|
2016-07-12 18:56:06 +02:00
|
|
|
def __init__(self, name, revisions_to_keep=1, **kwargs):
|
|
|
|
super(Pool, self).__init__(**kwargs)
|
2017-06-26 02:46:49 +02:00
|
|
|
self._volumes_collection = VolumesCollection(self)
|
2016-07-12 18:56:06 +02:00
|
|
|
self.name = name
|
|
|
|
self.revisions_to_keep = revisions_to_keep
|
|
|
|
kwargs['name'] = self.name
|
|
|
|
|
2016-06-05 20:18:56 +02:00
|
|
|
def __eq__(self, other):
|
2017-06-09 04:46:46 +02:00
|
|
|
if isinstance(other, Pool):
|
|
|
|
return self.name == other.name
|
2018-07-15 23:08:23 +02:00
|
|
|
if isinstance(other, str):
|
2017-06-09 04:46:46 +02:00
|
|
|
return self.name == other
|
|
|
|
return NotImplemented
|
2016-06-05 20:18:56 +02:00
|
|
|
|
|
|
|
def __neq__(self, other):
|
|
|
|
return not self.__eq__(other)
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return self.name
|
|
|
|
|
2017-04-25 22:50:12 +02:00
|
|
|
def __hash__(self):
|
|
|
|
return hash(self.name)
|
|
|
|
|
2016-04-22 14:16:41 +02:00
|
|
|
def __xml__(self):
|
2016-07-12 18:56:06 +02:00
|
|
|
config = _sanitize_config(self.config)
|
|
|
|
return lxml.etree.Element('pool', **config)
|
2016-04-22 14:16:41 +02:00
|
|
|
|
2016-04-15 20:40:53 +02:00
|
|
|
@property
|
|
|
|
def config(self):
|
|
|
|
''' Returns the pool config to be written to qubes.xml '''
|
2016-07-12 18:56:06 +02:00
|
|
|
raise self._not_implemented("config")
|
2016-03-20 20:29:46 +01:00
|
|
|
|
2016-04-15 20:31:56 +02:00
|
|
|
def destroy(self):
|
2016-06-21 12:40:03 +02:00
|
|
|
''' Called when removing the pool. Use this for implementation specific
|
|
|
|
clean up.
|
2019-02-18 21:13:24 +01:00
|
|
|
|
|
|
|
This can be implemented as a coroutine.
|
2016-06-21 12:40:03 +02:00
|
|
|
'''
|
2016-07-12 18:56:06 +02:00
|
|
|
raise self._not_implemented("destroy")
|
|
|
|
|
|
|
|
def init_volume(self, vm, volume_config):
|
|
|
|
''' Initialize a :py:class:`qubes.storage.Volume` from `volume_config`.
|
|
|
|
'''
|
|
|
|
raise self._not_implemented("init_volume")
|
|
|
|
|
2016-04-15 20:31:56 +02:00
|
|
|
def setup(self):
|
2016-06-21 12:40:03 +02:00
|
|
|
''' Called when adding a pool to the system. Use this for implementation
|
|
|
|
specific set up.
|
2019-02-18 21:13:24 +01:00
|
|
|
|
|
|
|
This can be implemented as a coroutine.
|
2016-06-21 12:40:03 +02:00
|
|
|
'''
|
2016-07-12 18:56:06 +02:00
|
|
|
raise self._not_implemented("setup")
|
2016-04-15 20:31:56 +02:00
|
|
|
|
2016-06-06 18:01:46 +02:00
|
|
|
@property
|
|
|
|
def volumes(self):
|
2017-06-26 02:46:49 +02:00
|
|
|
''' Return a collection of volumes managed by this pool '''
|
|
|
|
return self._volumes_collection
|
|
|
|
|
|
|
|
def list_volumes(self):
|
2016-06-06 18:01:46 +02:00
|
|
|
''' Return a list of volumes managed by this pool '''
|
2017-06-26 02:46:49 +02:00
|
|
|
raise self._not_implemented("list_volumes")
|
|
|
|
|
|
|
|
def get_volume(self, vid):
|
|
|
|
''' Return a volume with *vid* from this pool
|
|
|
|
|
|
|
|
:raise KeyError: if no volume is found
|
|
|
|
'''
|
|
|
|
raise self._not_implemented("get_volume")
|
2016-07-12 18:56:06 +02:00
|
|
|
|
2018-03-19 22:36:45 +01:00
|
|
|
def included_in(self, app):
|
|
|
|
''' Check if this pool is physically included in another one
|
|
|
|
|
|
|
|
This works on best-effort basis, because one pool driver may not know
|
|
|
|
all the other drivers.
|
|
|
|
|
|
|
|
:param app: Qubes() object to lookup other pools in
|
|
|
|
:returns pool or None
|
|
|
|
'''
|
|
|
|
|
2017-10-29 02:23:00 +02:00
|
|
|
@property
|
|
|
|
def size(self):
|
2018-03-20 17:19:50 +01:00
|
|
|
''' Storage pool size in bytes, or None if unknown '''
|
2017-10-29 02:23:00 +02:00
|
|
|
|
|
|
|
@property
|
|
|
|
def usage(self):
|
2018-03-20 17:19:50 +01:00
|
|
|
''' Space used in the pool in bytes, or None if unknown '''
|
2017-10-29 02:23:00 +02:00
|
|
|
|
2019-08-08 14:10:19 +02:00
|
|
|
@property
|
|
|
|
def usage_details(self):
|
|
|
|
"""Detailed information about pool usage as a dictionary
|
|
|
|
Contains data_usage for usage in bytes and data_size for pool
|
|
|
|
size; other implementations may add more implementation-specific
|
|
|
|
detail"""
|
2019-11-15 17:45:09 +01:00
|
|
|
result = {}
|
|
|
|
if self.usage is not None:
|
|
|
|
result['data_usage'] = self.usage
|
|
|
|
if self.size is not None:
|
|
|
|
result['data_size'] = self.size
|
|
|
|
|
|
|
|
return result
|
2019-08-08 14:10:19 +02:00
|
|
|
|
2016-07-12 18:56:06 +02:00
|
|
|
def _not_implemented(self, method_name):
|
|
|
|
''' Helper for emitting helpful `NotImplementedError` exceptions '''
|
|
|
|
msg = "Pool driver {!s} has {!s}() not implemented"
|
|
|
|
msg = msg.format(str(self.__class__.__name__), method_name)
|
|
|
|
return NotImplementedError(msg)
|
|
|
|
|
|
|
|
|
2016-07-12 18:58:11 +02:00
|
|
|
def _sanitize_config(config):
|
|
|
|
''' Helper function to convert types to appropriate strings
|
|
|
|
''' # FIXME: find another solution for serializing basic types
|
|
|
|
result = {}
|
|
|
|
for key, value in config.items():
|
|
|
|
if isinstance(value, bool):
|
|
|
|
if value:
|
|
|
|
result[key] = 'True'
|
|
|
|
else:
|
|
|
|
result[key] = str(value)
|
|
|
|
return result
|
2016-06-06 18:01:46 +02:00
|
|
|
|
2016-03-20 20:29:46 +01:00
|
|
|
|
|
|
|
def pool_drivers():
|
|
|
|
""" Return a list of EntryPoints names """
|
|
|
|
return [ep.name
|
2016-04-15 20:40:53 +02:00
|
|
|
for ep in pkg_resources.iter_entry_points(STORAGE_ENTRY_POINT)]
|
2016-07-12 18:57:04 +02:00
|
|
|
|
|
|
|
|
2017-03-09 02:42:05 +01:00
|
|
|
def driver_parameters(name):
|
|
|
|
''' Get __init__ parameters from a driver with out `self` & `name`. '''
|
|
|
|
init_function = qubes.utils.get_entry_point_one(
|
|
|
|
qubes.storage.STORAGE_ENTRY_POINT, name).__init__
|
2017-08-12 22:42:30 +02:00
|
|
|
signature = inspect.signature(init_function)
|
|
|
|
params = signature.parameters.keys()
|
|
|
|
ignored_params = ['self', 'name', 'kwargs']
|
2017-03-09 02:42:05 +01:00
|
|
|
return [p for p in params if p not in ignored_params]
|
|
|
|
|
|
|
|
|
2018-09-12 01:50:20 +02:00
|
|
|
def isodate(seconds):
|
2016-07-12 18:57:04 +02:00
|
|
|
''' Helper method which returns an iso date '''
|
|
|
|
return datetime.utcfromtimestamp(seconds).isoformat("T")
|
2016-07-12 18:58:11 +02:00
|
|
|
|
2018-03-19 22:36:45 +01:00
|
|
|
def search_pool_containing_dir(pools, dir_path):
|
|
|
|
''' Helper function looking for a pool containing given directory.
|
|
|
|
|
|
|
|
This is useful for implementing Pool.included_in method
|
|
|
|
'''
|
|
|
|
|
2018-09-12 01:50:21 +02:00
|
|
|
real_dir_path = os.path.realpath(dir_path)
|
|
|
|
|
2018-03-19 22:36:45 +01:00
|
|
|
# prefer filesystem pools
|
|
|
|
for pool in pools:
|
|
|
|
if hasattr(pool, 'dir_path'):
|
2018-09-12 01:50:21 +02:00
|
|
|
pool_real_dir_path = os.path.realpath(pool.dir_path)
|
|
|
|
if os.path.commonpath([pool_real_dir_path, real_dir_path]) == \
|
|
|
|
pool_real_dir_path:
|
2018-03-19 22:36:45 +01:00
|
|
|
return pool
|
|
|
|
|
|
|
|
# then look for lvm
|
|
|
|
for pool in pools:
|
|
|
|
if hasattr(pool, 'thin_pool') and hasattr(pool, 'volume_group'):
|
|
|
|
if (pool.volume_group, pool.thin_pool) == \
|
2018-09-12 01:50:21 +02:00
|
|
|
DirectoryThinPool.thin_pool(real_dir_path):
|
2018-03-19 22:36:45 +01:00
|
|
|
return pool
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
2016-07-12 18:58:11 +02:00
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class VmCreationManager:
|
2016-07-12 18:58:11 +02:00
|
|
|
''' A `ContextManager` which cleans up if volume creation fails.
|
|
|
|
''' # pylint: disable=too-few-public-methods
|
|
|
|
def __init__(self, vm):
|
|
|
|
self.vm = vm
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def __exit__(self, type, value, tb): # pylint: disable=redefined-builtin
|
|
|
|
if type is not None and value is not None and tb is not None:
|
|
|
|
for volume in self.vm.volumes.values():
|
|
|
|
try:
|
2017-06-26 11:39:02 +02:00
|
|
|
volume.remove()
|
2016-07-12 18:58:11 +02:00
|
|
|
except Exception: # pylint: disable=broad-except
|
|
|
|
pass
|
|
|
|
os.rmdir(self.vm.dir_path)
|
2018-03-19 22:26:54 +01:00
|
|
|
|
|
|
|
# pylint: disable=too-few-public-methods
|
|
|
|
class DirectoryThinPool:
|
|
|
|
'''The thin pool containing the device of given filesystem'''
|
|
|
|
_thin_pool = {}
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _init(cls, dir_path):
|
|
|
|
'''Find out the thin pool containing given filesystem'''
|
|
|
|
if dir_path not in cls._thin_pool:
|
|
|
|
cls._thin_pool[dir_path] = None, None
|
|
|
|
|
|
|
|
try:
|
|
|
|
fs_stat = os.stat(dir_path)
|
|
|
|
fs_major = (fs_stat.st_dev & 0xff00) >> 8
|
|
|
|
fs_minor = fs_stat.st_dev & 0xff
|
|
|
|
|
2018-03-20 15:54:23 +01:00
|
|
|
sudo = []
|
|
|
|
if os.getuid():
|
|
|
|
sudo = ['sudo']
|
|
|
|
root_table = subprocess.check_output(sudo + ["dmsetup",
|
2018-03-19 22:26:54 +01:00
|
|
|
"-j", str(fs_major), "-m", str(fs_minor),
|
2018-03-20 15:54:23 +01:00
|
|
|
"table"], stderr=subprocess.DEVNULL)
|
2018-03-19 22:26:54 +01:00
|
|
|
|
|
|
|
_start, _sectors, target_type, target_args = \
|
|
|
|
root_table.decode().split(" ", 3)
|
|
|
|
if target_type == "thin":
|
|
|
|
thin_pool_devnum, _thin_pool_id = target_args.split(" ")
|
|
|
|
with open("/sys/dev/block/{}/dm/name"
|
|
|
|
.format(thin_pool_devnum), "r") as thin_pool_tpool_f:
|
|
|
|
thin_pool_tpool = thin_pool_tpool_f.read().rstrip('\n')
|
|
|
|
if thin_pool_tpool.endswith("-tpool"):
|
|
|
|
volume_group, thin_pool, _tpool = \
|
|
|
|
thin_pool_tpool.rsplit("-", 2)
|
|
|
|
cls._thin_pool[dir_path] = volume_group, thin_pool
|
|
|
|
except: # pylint: disable=bare-except
|
|
|
|
pass
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def thin_pool(cls, dir_path):
|
|
|
|
'''Thin tuple (volume group, pool name) containing given filesystem'''
|
|
|
|
cls._init(dir_path)
|
|
|
|
return cls._thin_pool[dir_path]
|