2016-04-28 16:00:29 +02:00
|
|
|
#
|
|
|
|
# The Qubes OS Project, https://www.qubes-os.org/
|
|
|
|
#
|
|
|
|
# Copyright (C) 2010-2015 Joanna Rutkowska <joanna@invisiblethingslab.com>
|
|
|
|
# Copyright (C) 2011-2015 Marek Marczykowski-Górecki
|
|
|
|
# <marmarek@invisiblethingslab.com>
|
|
|
|
# Copyright (C) 2014-2015 Wojtek Porczyk <woju@invisiblethingslab.com>
|
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU Lesser General Public
|
|
|
|
# License as published by the Free Software Foundation; either
|
|
|
|
# version 2.1 of the License, or (at your option) any later version.
|
2016-04-28 16:00:29 +02:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is distributed in the hope that it will be useful,
|
2016-04-28 16:00:29 +02:00
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2017-10-12 00:11:50 +02:00
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
# Lesser General Public License for more details.
|
2016-04-28 16:00:29 +02:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# You should have received a copy of the GNU Lesser General Public
|
|
|
|
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
|
2016-04-28 16:00:29 +02:00
|
|
|
#
|
|
|
|
|
2020-04-07 23:30:21 +02:00
|
|
|
import collections.abc
|
2018-09-12 01:50:25 +02:00
|
|
|
import copy
|
2016-05-05 17:19:48 +02:00
|
|
|
import functools
|
2016-04-28 16:00:29 +02:00
|
|
|
import grp
|
2018-03-19 22:26:54 +01:00
|
|
|
import itertools
|
2016-04-28 16:00:29 +02:00
|
|
|
import logging
|
|
|
|
import os
|
2016-06-02 17:20:13 +02:00
|
|
|
import random
|
2016-04-28 16:00:29 +02:00
|
|
|
import sys
|
|
|
|
import tempfile
|
|
|
|
import time
|
2017-08-28 14:24:48 +02:00
|
|
|
import traceback
|
2016-06-02 17:20:13 +02:00
|
|
|
import uuid
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2019-02-18 21:13:24 +01:00
|
|
|
import asyncio
|
2016-07-13 20:38:46 +02:00
|
|
|
import jinja2
|
2016-06-02 22:02:06 +02:00
|
|
|
import libvirt
|
2018-03-19 22:26:54 +01:00
|
|
|
import lxml.etree
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
try:
|
2016-07-13 20:38:46 +02:00
|
|
|
import xen.lowlevel.xs # pylint: disable=wrong-import-order
|
|
|
|
import xen.lowlevel.xc # pylint: disable=wrong-import-order
|
2016-04-28 16:00:29 +02:00
|
|
|
except ImportError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
if os.name == 'posix':
|
2016-06-02 22:02:06 +02:00
|
|
|
# pylint: disable=wrong-import-order
|
2016-04-28 16:00:29 +02:00
|
|
|
import fcntl
|
|
|
|
elif os.name == 'nt':
|
|
|
|
# pylint: disable=import-error
|
|
|
|
import win32con
|
|
|
|
import win32file
|
|
|
|
import pywintypes
|
|
|
|
else:
|
|
|
|
raise RuntimeError("Qubes works only on POSIX or WinNT systems")
|
|
|
|
|
2017-04-03 13:11:34 +02:00
|
|
|
# pylint: disable=wrong-import-position
|
|
|
|
import qubes
|
|
|
|
import qubes.ext
|
|
|
|
import qubes.utils
|
2017-06-09 04:46:46 +02:00
|
|
|
import qubes.storage
|
2018-09-12 01:50:26 +02:00
|
|
|
import qubes.storage.reflink
|
2017-04-03 13:11:34 +02:00
|
|
|
import qubes.vm
|
|
|
|
import qubes.vm.adminvm
|
|
|
|
import qubes.vm.qubesvm
|
|
|
|
import qubes.vm.templatevm
|
2019-10-20 12:21:09 +02:00
|
|
|
|
|
|
|
|
2017-04-03 13:11:34 +02:00
|
|
|
# pylint: enable=wrong-import-position
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class VirDomainWrapper:
|
2016-06-02 22:02:06 +02:00
|
|
|
# pylint: disable=too-few-public-methods
|
|
|
|
|
2016-05-05 17:19:48 +02:00
|
|
|
def __init__(self, connection, vm):
|
|
|
|
self._connection = connection
|
|
|
|
self._vm = vm
|
|
|
|
|
|
|
|
def _reconnect_if_dead(self):
|
2019-09-22 04:41:43 +02:00
|
|
|
try:
|
|
|
|
is_dead = not self._vm.connect().isAlive()
|
|
|
|
except libvirt.libvirtError as ex:
|
|
|
|
if ex.get_error_code() == libvirt.VIR_ERR_INVALID_CONN:
|
|
|
|
# connection to libvirt was re-established in the meantime
|
|
|
|
is_dead = True
|
|
|
|
else:
|
|
|
|
raise
|
2016-05-05 17:19:48 +02:00
|
|
|
if is_dead:
|
2016-06-02 22:02:06 +02:00
|
|
|
# pylint: disable=protected-access
|
2016-05-05 17:19:48 +02:00
|
|
|
self._connection._reconnect_if_dead()
|
2016-08-08 00:13:26 +02:00
|
|
|
self._vm = self._connection._conn.lookupByUUID(self._vm.UUID())
|
2016-05-05 17:19:48 +02:00
|
|
|
return is_dead
|
|
|
|
|
|
|
|
def __getattr__(self, attrname):
|
|
|
|
attr = getattr(self._vm, attrname)
|
2020-04-07 23:30:21 +02:00
|
|
|
if not isinstance(attr, collections.abc.Callable):
|
2016-05-05 17:19:48 +02:00
|
|
|
return attr
|
|
|
|
|
|
|
|
@functools.wraps(attr)
|
|
|
|
def wrapper(*args, **kwargs):
|
|
|
|
try:
|
|
|
|
return attr(*args, **kwargs)
|
2016-06-02 22:02:06 +02:00
|
|
|
except libvirt.libvirtError:
|
2016-05-05 17:19:48 +02:00
|
|
|
if self._reconnect_if_dead():
|
|
|
|
return getattr(self._vm, attrname)(*args, **kwargs)
|
|
|
|
raise
|
2019-10-20 12:21:09 +02:00
|
|
|
|
2016-05-05 17:19:48 +02:00
|
|
|
return wrapper
|
|
|
|
|
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class VirConnectWrapper:
|
2016-06-02 22:02:06 +02:00
|
|
|
# pylint: disable=too-few-public-methods
|
|
|
|
|
2019-09-22 04:41:43 +02:00
|
|
|
def __init__(self, uri, reconnect_cb=None):
|
2016-05-05 17:19:48 +02:00
|
|
|
self._conn = libvirt.open(uri)
|
2019-09-22 04:41:43 +02:00
|
|
|
self._reconnect_cb = reconnect_cb
|
2016-05-05 17:19:48 +02:00
|
|
|
|
|
|
|
def _reconnect_if_dead(self):
|
|
|
|
is_dead = not self._conn.isAlive()
|
|
|
|
if is_dead:
|
2019-09-22 04:41:43 +02:00
|
|
|
uri = self._conn.getURI()
|
|
|
|
old_conn = self._conn
|
|
|
|
self._conn = libvirt.open(uri)
|
|
|
|
if callable(self._reconnect_cb):
|
|
|
|
self._reconnect_cb(old_conn)
|
|
|
|
old_conn.close()
|
2016-05-05 17:19:48 +02:00
|
|
|
return is_dead
|
|
|
|
|
|
|
|
def _wrap_domain(self, ret):
|
|
|
|
if isinstance(ret, libvirt.virDomain):
|
|
|
|
ret = VirDomainWrapper(self, ret)
|
|
|
|
return ret
|
|
|
|
|
|
|
|
def __getattr__(self, attrname):
|
|
|
|
attr = getattr(self._conn, attrname)
|
2020-04-07 23:30:21 +02:00
|
|
|
if not isinstance(attr, collections.abc.Callable):
|
2016-05-05 17:19:48 +02:00
|
|
|
return attr
|
2017-07-26 02:57:38 +02:00
|
|
|
if attrname == 'close':
|
|
|
|
return attr
|
2016-05-05 17:19:48 +02:00
|
|
|
|
|
|
|
@functools.wraps(attr)
|
|
|
|
def wrapper(*args, **kwargs):
|
|
|
|
try:
|
|
|
|
return self._wrap_domain(attr(*args, **kwargs))
|
2016-06-02 22:02:06 +02:00
|
|
|
except libvirt.libvirtError:
|
2016-05-05 17:19:48 +02:00
|
|
|
if self._reconnect_if_dead():
|
|
|
|
return self._wrap_domain(
|
|
|
|
getattr(self._conn, attrname)(*args, **kwargs))
|
|
|
|
raise
|
2019-10-20 12:21:09 +02:00
|
|
|
|
2016-05-05 17:19:48 +02:00
|
|
|
return wrapper
|
|
|
|
|
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class VMMConnection:
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Connection to Virtual Machine Manager (libvirt)"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2019-09-22 04:41:43 +02:00
|
|
|
def __init__(self, offline_mode=None, libvirt_reconnect_cb=None):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-11-02 23:31:47 +01:00
|
|
|
|
|
|
|
:param offline_mode: enable/disable offline mode; default is to
|
|
|
|
enable when running in chroot as root, otherwise disable
|
2019-09-22 04:41:43 +02:00
|
|
|
:param libvirt_reconnect_cb: callable to be called when connection to
|
|
|
|
libvirt is re-established; the callback is called with old connection
|
|
|
|
as argument
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-11-02 23:31:47 +01:00
|
|
|
if offline_mode is None:
|
|
|
|
offline_mode = bool(os.getuid() == 0 and
|
2019-10-20 12:21:09 +02:00
|
|
|
os.stat('/') != os.stat('/proc/1/root/.'))
|
2016-05-21 03:26:39 +02:00
|
|
|
self._offline_mode = offline_mode
|
2019-09-22 04:41:43 +02:00
|
|
|
self._libvirt_reconnect_cb = libvirt_reconnect_cb
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2017-08-28 14:24:48 +02:00
|
|
|
self._libvirt_conn = None
|
|
|
|
self._xs = None
|
|
|
|
self._xc = None
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
@property
|
|
|
|
def offline_mode(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Check or enable offline mode (do not actually connect to vmm)"""
|
2016-04-28 16:00:29 +02:00
|
|
|
return self._offline_mode
|
|
|
|
|
|
|
|
def _libvirt_error_handler(self, ctx, error):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def init_vmm_connection(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Initialise connection
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
This method is automatically called when getting"""
|
2016-04-28 16:00:29 +02:00
|
|
|
if self._libvirt_conn is not None:
|
|
|
|
# Already initialized
|
|
|
|
return
|
|
|
|
if self._offline_mode:
|
|
|
|
# Do not initialize in offline mode
|
|
|
|
raise qubes.exc.QubesException(
|
|
|
|
'VMM operations disabled in offline mode')
|
|
|
|
|
|
|
|
if 'xen.lowlevel.xs' in sys.modules:
|
|
|
|
self._xs = xen.lowlevel.xs.xs()
|
2017-07-27 22:16:03 +02:00
|
|
|
if 'xen.lowlevel.xc' in sys.modules:
|
2016-04-28 16:00:29 +02:00
|
|
|
self._xc = xen.lowlevel.xc.xc()
|
2016-05-05 17:19:48 +02:00
|
|
|
self._libvirt_conn = VirConnectWrapper(
|
2019-09-22 04:41:43 +02:00
|
|
|
qubes.config.defaults['libvirt_uri'],
|
|
|
|
reconnect_cb=self._libvirt_reconnect_cb)
|
2016-04-28 16:00:29 +02:00
|
|
|
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def libvirt_conn(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Connection to libvirt"""
|
2016-04-28 16:00:29 +02:00
|
|
|
self.init_vmm_connection()
|
|
|
|
return self._libvirt_conn
|
|
|
|
|
|
|
|
@property
|
|
|
|
def xs(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Connection to Xen Store
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
This property in available only when running on Xen.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
# XXX what about the case when we run under KVM,
|
|
|
|
# but xen modules are importable?
|
|
|
|
if 'xen.lowlevel.xs' not in sys.modules:
|
|
|
|
raise AttributeError(
|
|
|
|
'xs object is available under Xen hypervisor only')
|
|
|
|
|
|
|
|
self.init_vmm_connection()
|
|
|
|
return self._xs
|
|
|
|
|
|
|
|
@property
|
|
|
|
def xc(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Connection to Xen
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
This property in available only when running on Xen.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
# XXX what about the case when we run under KVM,
|
|
|
|
# but xen modules are importable?
|
|
|
|
if 'xen.lowlevel.xc' not in sys.modules:
|
|
|
|
raise AttributeError(
|
|
|
|
'xc object is available under Xen hypervisor only')
|
|
|
|
|
|
|
|
self.init_vmm_connection()
|
2017-07-27 22:16:03 +02:00
|
|
|
return self._xc
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2017-08-28 14:24:48 +02:00
|
|
|
def close(self):
|
|
|
|
libvirt.registerErrorHandler(None, None)
|
|
|
|
if self._xs:
|
|
|
|
self._xs.close()
|
|
|
|
self._xs = None
|
2016-04-28 16:00:29 +02:00
|
|
|
if self._libvirt_conn:
|
|
|
|
self._libvirt_conn.close()
|
2017-08-28 14:24:48 +02:00
|
|
|
self._libvirt_conn = None
|
|
|
|
self._xc = None # and pray it will get garbage-collected
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class QubesHost:
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Basic information about host machine
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
:param qubes.Qubes app: Qubes application context (must have \
|
|
|
|
:py:attr:`Qubes.vmm` attribute defined)
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
def __init__(self, app):
|
|
|
|
self.app = app
|
|
|
|
self._no_cpus = None
|
|
|
|
self._total_mem = None
|
|
|
|
self._physinfo = None
|
2020-06-10 05:47:53 +02:00
|
|
|
self._cpu_family = None
|
|
|
|
self._cpu_model = None
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
def _fetch(self):
|
|
|
|
if self._no_cpus is not None:
|
|
|
|
return
|
|
|
|
|
|
|
|
# pylint: disable=unused-variable
|
|
|
|
(model, memory, cpus, mhz, nodes, socket, cores, threads) = \
|
|
|
|
self.app.vmm.libvirt_conn.getInfo()
|
2017-01-18 22:16:46 +01:00
|
|
|
self._total_mem = int(memory) * 1024
|
2016-04-28 16:00:29 +02:00
|
|
|
self._no_cpus = cpus
|
|
|
|
|
|
|
|
self.app.log.debug('QubesHost: no_cpus={} memory_total={}'.format(
|
|
|
|
self.no_cpus, self.memory_total))
|
|
|
|
try:
|
|
|
|
self.app.log.debug('QubesHost: xen_free_memory={}'.format(
|
|
|
|
self.get_free_xen_memory()))
|
|
|
|
except NotImplementedError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
@property
|
|
|
|
def memory_total(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Total memory, in kbytes"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2016-06-16 21:08:44 +02:00
|
|
|
if self.app.vmm.offline_mode:
|
2019-10-20 12:21:09 +02:00
|
|
|
return 2 ** 64 - 1
|
2016-04-28 16:00:29 +02:00
|
|
|
self._fetch()
|
|
|
|
return self._total_mem
|
|
|
|
|
|
|
|
@property
|
|
|
|
def no_cpus(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Number of CPUs"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2016-06-16 21:08:44 +02:00
|
|
|
if self.app.vmm.offline_mode:
|
|
|
|
return 42
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
self._fetch()
|
|
|
|
return self._no_cpus
|
|
|
|
|
2020-06-10 05:47:53 +02:00
|
|
|
@property
|
|
|
|
def cpu_family_model(self):
|
|
|
|
"""Get CPU family and model"""
|
|
|
|
if self._cpu_family is None or self._cpu_model is None:
|
|
|
|
family = None
|
|
|
|
model = None
|
|
|
|
with open('/proc/cpuinfo') as cpuinfo:
|
|
|
|
for line in cpuinfo.readlines():
|
|
|
|
line = line.strip()
|
|
|
|
if not line:
|
|
|
|
# take info from the first core
|
|
|
|
break
|
|
|
|
field, value = line.split(':', 1)
|
|
|
|
if field.strip() == 'model':
|
|
|
|
model = int(value.strip())
|
|
|
|
elif field.strip() == 'cpu family':
|
|
|
|
family = int(value.strip())
|
|
|
|
self._cpu_family = family
|
|
|
|
self._cpu_model = model
|
|
|
|
return self._cpu_family, self._cpu_model
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
def get_free_xen_memory(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Get free memory from Xen's physinfo.
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
:raises NotImplementedError: when not under Xen
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
try:
|
2019-10-30 15:46:11 +01:00
|
|
|
self._physinfo = self.app.vmm.xc.physinfo()
|
2016-04-28 16:00:29 +02:00
|
|
|
except AttributeError:
|
|
|
|
raise NotImplementedError('This function requires Xen hypervisor')
|
2017-01-18 22:16:46 +01:00
|
|
|
return int(self._physinfo['free_memory'])
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2019-10-29 03:13:49 +01:00
|
|
|
def is_iommu_supported(self):
|
|
|
|
"""Check if IOMMU is supported on this platform"""
|
|
|
|
if self._physinfo is None:
|
|
|
|
try:
|
|
|
|
self._physinfo = self.app.vmm.xc.physinfo()
|
|
|
|
except AttributeError:
|
|
|
|
raise NotImplementedError(
|
|
|
|
'This function requires Xen hypervisor')
|
|
|
|
return 'hvm_directio' in self._physinfo['virt_caps']
|
|
|
|
|
2017-07-27 22:16:03 +02:00
|
|
|
def get_vm_stats(self, previous_time=None, previous=None, only_vm=None):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Measure cpu usage for all domains at once.
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2017-07-27 22:16:03 +02:00
|
|
|
If previous measurements are given, CPU usage will be given in
|
|
|
|
percents of time. Otherwise only absolute value (seconds).
|
|
|
|
|
|
|
|
Return a tuple of (measurements_time, measurements),
|
|
|
|
where measurements is a dictionary with key: domid, value: dict:
|
|
|
|
- cpu_time - absolute CPU usage (seconds since its startup)
|
2019-07-31 17:56:36 +02:00
|
|
|
- cpu_usage_raw - CPU usage in %
|
|
|
|
- cpu_usage - CPU usage in % (normalized to number of vcpus)
|
2017-07-27 22:16:03 +02:00
|
|
|
- memory_kb - current memory assigned, in kb
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
This function requires Xen hypervisor.
|
|
|
|
|
2017-07-27 22:16:03 +02:00
|
|
|
..warning:
|
|
|
|
|
|
|
|
This function may return info about implementation-specific VMs,
|
|
|
|
like stubdomains for HVM
|
|
|
|
|
|
|
|
:param previous: previous measurement
|
|
|
|
:param previous_time: time of previous measurement
|
|
|
|
:param only_vm: get measurements only for this VM
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
:raises NotImplementedError: when not under Xen
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2017-07-27 22:16:03 +02:00
|
|
|
if (previous_time is None) != (previous is None):
|
|
|
|
raise ValueError(
|
|
|
|
'previous and previous_time must be given together (or none)')
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
if previous is None:
|
|
|
|
previous = {}
|
|
|
|
|
|
|
|
current_time = time.time()
|
|
|
|
current = {}
|
|
|
|
try:
|
2017-07-27 22:16:03 +02:00
|
|
|
if only_vm:
|
|
|
|
xid = only_vm.xid
|
|
|
|
if xid < 0:
|
|
|
|
raise qubes.exc.QubesVMNotRunningError(only_vm)
|
|
|
|
info = self.app.vmm.xc.domain_getinfo(xid, 1)
|
|
|
|
if info[0]['domid'] != xid:
|
|
|
|
raise qubes.exc.QubesVMNotRunningError(only_vm)
|
|
|
|
else:
|
|
|
|
info = self.app.vmm.xc.domain_getinfo(0, 1024)
|
2016-04-28 16:00:29 +02:00
|
|
|
except AttributeError:
|
|
|
|
raise NotImplementedError(
|
|
|
|
'This function requires Xen hypervisor')
|
2017-07-27 22:16:03 +02:00
|
|
|
# TODO: add stubdomain stats to actual VMs
|
2016-04-28 16:00:29 +02:00
|
|
|
for vm in info:
|
2017-07-27 22:16:03 +02:00
|
|
|
domid = vm['domid']
|
|
|
|
current[domid] = {}
|
|
|
|
current[domid]['memory_kb'] = vm['mem_kb']
|
2019-07-31 17:56:36 +02:00
|
|
|
current[domid]['cpu_time'] = int(vm['cpu_time'])
|
|
|
|
vcpus = max(vm['online_vcpus'], 1)
|
2017-07-27 22:16:03 +02:00
|
|
|
if domid in previous:
|
2019-07-31 17:56:36 +02:00
|
|
|
current[domid]['cpu_usage_raw'] = int(
|
2017-07-27 22:16:03 +02:00
|
|
|
(current[domid]['cpu_time'] - previous[domid]['cpu_time'])
|
|
|
|
/ 1000 ** 3 * 100 / (current_time - previous_time))
|
2019-07-31 17:56:36 +02:00
|
|
|
if current[domid]['cpu_usage_raw'] < 0:
|
2016-04-28 16:00:29 +02:00
|
|
|
# VM has been rebooted
|
2019-07-31 17:56:36 +02:00
|
|
|
current[domid]['cpu_usage_raw'] = 0
|
2016-04-28 16:00:29 +02:00
|
|
|
else:
|
2019-07-31 17:56:36 +02:00
|
|
|
current[domid]['cpu_usage_raw'] = 0
|
|
|
|
current[domid]['cpu_usage'] = \
|
|
|
|
int(current[domid]['cpu_usage_raw'] / vcpus)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
return current_time, current
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
class VMCollection:
|
2019-10-20 12:21:09 +02:00
|
|
|
"""A collection of Qubes VMs
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
VMCollection supports ``in`` operator. You may test for ``qid``, ``name``
|
|
|
|
and whole VM object's presence.
|
|
|
|
|
|
|
|
Iterating over VMCollection will yield machine objects.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
def __init__(self, app):
|
|
|
|
self.app = app
|
|
|
|
self._dict = dict()
|
|
|
|
|
2017-08-28 14:24:48 +02:00
|
|
|
def close(self):
|
|
|
|
del self.app
|
|
|
|
self._dict.clear()
|
|
|
|
del self._dict
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
def __repr__(self):
|
|
|
|
return '<{} {!r}>'.format(
|
|
|
|
self.__class__.__name__, list(sorted(self.keys())))
|
|
|
|
|
|
|
|
def items(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Iterate over ``(qid, vm)`` pairs"""
|
2016-04-28 16:00:29 +02:00
|
|
|
for qid in self.qids():
|
|
|
|
yield (qid, self[qid])
|
|
|
|
|
|
|
|
def qids(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Iterate over all qids
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
qids are sorted by numerical order.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
return iter(sorted(self._dict.keys()))
|
|
|
|
|
|
|
|
keys = qids
|
|
|
|
|
|
|
|
def names(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Iterate over all names
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
names are sorted by lexical order.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
return iter(sorted(vm.name for vm in self._dict.values()))
|
|
|
|
|
|
|
|
def vms(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Iterate over all machines
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
vms are sorted by qid.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
return iter(sorted(self._dict.values()))
|
|
|
|
|
|
|
|
__iter__ = vms
|
|
|
|
values = vms
|
|
|
|
|
2016-05-20 02:52:57 +02:00
|
|
|
def add(self, value, _enable_events=True):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Add VM to collection
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
:param qubes.vm.BaseVM value: VM to add
|
2019-10-20 12:21:09 +02:00
|
|
|
:param _enable_events:
|
2016-04-28 16:00:29 +02:00
|
|
|
:raises TypeError: when value is of wrong type
|
|
|
|
:raises ValueError: when there is already VM which has equal ``qid``
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
# this violates duck typing, but is needed
|
|
|
|
# for VMProperty to function correctly
|
|
|
|
if not isinstance(value, qubes.vm.BaseVM):
|
|
|
|
raise TypeError('{} holds only BaseVM instances'.format(
|
|
|
|
self.__class__.__name__))
|
|
|
|
|
|
|
|
if value.qid in self:
|
|
|
|
raise ValueError('This collection already holds VM that has '
|
2019-10-20 12:21:09 +02:00
|
|
|
'qid={!r} ({!r})'.format(value.qid,
|
|
|
|
self[value.qid]))
|
2016-04-28 16:00:29 +02:00
|
|
|
if value.name in self:
|
2016-07-13 00:01:58 +02:00
|
|
|
raise ValueError('A VM named {!s} already exists'
|
2019-10-20 12:21:09 +02:00
|
|
|
.format(value.name))
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
self._dict[value.qid] = value
|
2016-05-20 02:52:57 +02:00
|
|
|
if _enable_events:
|
|
|
|
value.events_enabled = True
|
2017-02-21 14:09:06 +01:00
|
|
|
self.app.fire_event('domain-add', vm=value)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
return value
|
|
|
|
|
|
|
|
def __getitem__(self, key):
|
|
|
|
if isinstance(key, int):
|
|
|
|
return self._dict[key]
|
|
|
|
|
2017-01-18 22:16:46 +01:00
|
|
|
if isinstance(key, str):
|
2016-04-28 16:00:29 +02:00
|
|
|
for vm in self:
|
|
|
|
if vm.name == key:
|
|
|
|
return vm
|
|
|
|
raise KeyError(key)
|
|
|
|
|
|
|
|
if isinstance(key, qubes.vm.BaseVM):
|
2016-06-02 17:18:33 +02:00
|
|
|
key = key.uuid
|
|
|
|
|
|
|
|
if isinstance(key, uuid.UUID):
|
|
|
|
for vm in self:
|
|
|
|
if vm.uuid == key:
|
|
|
|
return vm
|
2016-04-28 16:00:29 +02:00
|
|
|
raise KeyError(key)
|
|
|
|
|
|
|
|
raise KeyError(key)
|
|
|
|
|
|
|
|
def __delitem__(self, key):
|
|
|
|
vm = self[key]
|
2016-07-13 18:24:29 +02:00
|
|
|
if not vm.is_halted():
|
2016-08-17 02:13:59 +02:00
|
|
|
raise qubes.exc.QubesVMNotHaltedError(vm)
|
2017-06-23 17:29:09 +02:00
|
|
|
self.app.fire_event('domain-pre-delete', pre_event=True, vm=vm)
|
2016-08-08 00:15:46 +02:00
|
|
|
try:
|
2018-01-17 15:23:39 +01:00
|
|
|
if vm.libvirt_domain:
|
|
|
|
vm.libvirt_domain.undefine()
|
2017-11-06 01:23:14 +01:00
|
|
|
# pylint: disable=protected-access
|
|
|
|
vm._libvirt_domain = None
|
2016-08-08 00:15:46 +02:00
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
|
|
|
# already undefined
|
|
|
|
pass
|
2016-04-28 16:00:29 +02:00
|
|
|
del self._dict[vm.qid]
|
2017-02-21 14:09:06 +01:00
|
|
|
self.app.fire_event('domain-delete', vm=vm)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
def __contains__(self, key):
|
2018-07-15 23:08:23 +02:00
|
|
|
return any((key in (vm, vm.qid, vm.name))
|
2016-04-28 16:00:29 +02:00
|
|
|
for vm in self)
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self._dict)
|
|
|
|
|
|
|
|
def get_vms_based_on(self, template):
|
|
|
|
template = self[template]
|
|
|
|
return set(vm for vm in self
|
2019-10-20 12:21:09 +02:00
|
|
|
if hasattr(vm, 'template') and vm.template == template)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
def get_vms_connected_to(self, netvm):
|
2019-10-20 12:21:09 +02:00
|
|
|
new_vms = {self[netvm]}
|
2016-04-28 16:00:29 +02:00
|
|
|
dependent_vms = set()
|
|
|
|
|
|
|
|
# Dependency resolving only makes sense on NetVM (or derivative)
|
2019-10-20 12:21:09 +02:00
|
|
|
# if not self[netvm_qid].is_netvm():
|
|
|
|
# return set([])
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2017-04-21 15:43:46 +02:00
|
|
|
while new_vms:
|
2016-04-28 16:00:29 +02:00
|
|
|
cur_vm = new_vms.pop()
|
2016-04-20 13:41:33 +02:00
|
|
|
for vm in cur_vm.connected_vms:
|
2016-04-28 16:00:29 +02:00
|
|
|
if vm in dependent_vms:
|
|
|
|
continue
|
2016-11-03 00:59:29 +01:00
|
|
|
dependent_vms.add(vm)
|
2019-10-20 12:21:09 +02:00
|
|
|
# if vm.is_netvm():
|
2016-11-03 00:59:29 +01:00
|
|
|
new_vms.add(vm)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
return dependent_vms
|
|
|
|
|
|
|
|
# XXX with Qubes Admin Api this will probably lead to race condition
|
|
|
|
# whole process of creating and adding should be synchronised
|
|
|
|
def get_new_unused_qid(self):
|
|
|
|
used_ids = set(self.qids())
|
|
|
|
for i in range(1, qubes.config.max_qid):
|
|
|
|
if i not in used_ids:
|
|
|
|
return i
|
|
|
|
raise LookupError("Cannot find unused qid!")
|
|
|
|
|
2016-06-02 17:20:13 +02:00
|
|
|
def get_new_unused_dispid(self):
|
2016-08-17 00:42:17 +02:00
|
|
|
for _ in range(int(qubes.config.max_dispid ** 0.5)):
|
2016-06-02 17:20:13 +02:00
|
|
|
dispid = random.SystemRandom().randrange(qubes.config.max_dispid)
|
|
|
|
if not any(getattr(vm, 'dispid', None) == dispid for vm in self):
|
|
|
|
return dispid
|
|
|
|
raise LookupError((
|
2019-10-20 12:21:09 +02:00
|
|
|
'https://xkcd.com/221/',
|
|
|
|
'http://dilbert.com/strip/2001-10-25')[
|
|
|
|
random.randint(0, 1)])
|
2016-06-02 17:20:13 +02:00
|
|
|
|
2017-11-10 02:40:38 +01:00
|
|
|
|
2017-07-01 21:29:47 +02:00
|
|
|
def _default_pool(app):
|
2019-10-20 12:21:09 +02:00
|
|
|
""" Default storage pool.
|
2017-07-01 21:29:47 +02:00
|
|
|
|
|
|
|
1. If there is one named 'default', use it.
|
|
|
|
2. Check if root fs is on LVM thin - use that
|
2018-09-12 01:50:26 +02:00
|
|
|
3. Look for file(-reflink)-based pool pointing to /var/lib/qubes
|
2017-07-01 21:29:47 +02:00
|
|
|
4. Fail
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2017-07-01 21:29:47 +02:00
|
|
|
if 'default' in app.pools:
|
|
|
|
return app.pools['default']
|
2017-11-10 02:40:38 +01:00
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
if 'DEFAULT_LVM_POOL' in os.environ:
|
|
|
|
thin_pool = os.environ['DEFAULT_LVM_POOL']
|
2017-07-01 21:29:47 +02:00
|
|
|
for pool in app.pools.values():
|
2018-07-15 23:08:23 +02:00
|
|
|
if pool.config.get('driver', None) != 'lvm_thin':
|
2017-07-01 21:29:47 +02:00
|
|
|
continue
|
2018-07-15 23:08:23 +02:00
|
|
|
if pool.config['thin_pool'] == thin_pool:
|
2017-07-01 21:29:47 +02:00
|
|
|
return pool
|
2018-07-15 23:08:23 +02:00
|
|
|
# no DEFAULT_LVM_POOL, or pool not defined
|
|
|
|
root_volume_group, root_thin_pool = \
|
|
|
|
qubes.storage.DirectoryThinPool.thin_pool('/')
|
|
|
|
if root_thin_pool:
|
|
|
|
for pool in app.pools.values():
|
|
|
|
if pool.config.get('driver', None) != 'lvm_thin':
|
|
|
|
continue
|
|
|
|
if (pool.config['volume_group'] == root_volume_group and
|
2019-10-20 12:21:09 +02:00
|
|
|
pool.config['thin_pool'] == root_thin_pool):
|
2018-07-15 23:08:23 +02:00
|
|
|
return pool
|
|
|
|
|
|
|
|
# not a thin volume? look for file pools
|
|
|
|
for pool in app.pools.values():
|
|
|
|
if pool.config.get('driver', None) not in ('file', 'file-reflink'):
|
|
|
|
continue
|
|
|
|
if pool.config['dir_path'] == qubes.config.qubes_base_dir:
|
|
|
|
return pool
|
|
|
|
raise AttributeError('Cannot determine default storage pool')
|
2017-07-01 21:29:47 +02:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
|
2017-07-01 21:29:47 +02:00
|
|
|
def _setter_pool(app, prop, value):
|
|
|
|
if isinstance(value, qubes.storage.Pool):
|
|
|
|
return value
|
|
|
|
try:
|
|
|
|
return app.pools[value]
|
|
|
|
except KeyError:
|
|
|
|
raise qubes.exc.QubesPropertyValueError(app, prop, value,
|
2019-10-20 12:21:09 +02:00
|
|
|
'No such storage pool')
|
|
|
|
|
2016-06-02 17:20:13 +02:00
|
|
|
|
2017-12-01 02:59:17 +01:00
|
|
|
def _setter_default_netvm(app, prop, value):
|
|
|
|
# skip netvm loop check while loading qubes.xml, to avoid tricky loading
|
|
|
|
# order
|
|
|
|
if not app.events_enabled:
|
|
|
|
return value
|
|
|
|
|
|
|
|
if value is None:
|
|
|
|
return value
|
|
|
|
# forbid setting to a value that would result in netvm loop
|
|
|
|
for vm in app.domains:
|
|
|
|
if not hasattr(vm, 'netvm'):
|
|
|
|
continue
|
|
|
|
if not vm.property_is_default('netvm'):
|
|
|
|
continue
|
|
|
|
if value == vm \
|
|
|
|
or value in app.domains.get_vms_connected_to(vm):
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesPropertyValueError(
|
|
|
|
app, prop, value, 'Network loop on \'{!s}\''.format(vm))
|
2017-12-01 02:59:17 +01:00
|
|
|
return value
|
|
|
|
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
class Qubes(qubes.PropertyHolder):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Main Qubes application
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
:param str store: path to ``qubes.xml``
|
|
|
|
|
|
|
|
The store is loaded in stages:
|
|
|
|
|
|
|
|
1. In the first stage there are loaded some basic features from store
|
|
|
|
(currently labels).
|
|
|
|
|
|
|
|
2. In the second stage stubs for all VMs are loaded. They are filled
|
|
|
|
with their basic properties, like ``qid`` and ``name``.
|
|
|
|
|
|
|
|
3. In the third stage all global properties are loaded. They often
|
|
|
|
reference VMs, like default netvm, so they should be filled after
|
|
|
|
loading VMs.
|
|
|
|
|
|
|
|
4. In the fourth stage all remaining VM properties are loaded. They
|
|
|
|
also need all VMs loaded, because they represent dependencies
|
|
|
|
between VMs like aforementioned netvm.
|
|
|
|
|
|
|
|
5. In the fifth stage there are some fixups to ensure sane system
|
|
|
|
operation.
|
|
|
|
|
|
|
|
This class emits following events:
|
|
|
|
|
|
|
|
.. event:: domain-add (subject, event, vm)
|
|
|
|
|
|
|
|
When domain is added.
|
|
|
|
|
|
|
|
:param subject: Event emitter
|
|
|
|
:param event: Event name (``'domain-add'``)
|
|
|
|
:param vm: Domain object
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
.. event:: domain-pre-delete (subject, event, vm)
|
|
|
|
|
|
|
|
When domain is deleted. VM still has reference to ``app`` object,
|
|
|
|
and is contained within VMCollection. You may prevent removal by
|
|
|
|
raising an exception.
|
|
|
|
|
|
|
|
:param subject: Event emitter
|
|
|
|
:param event: Event name (``'domain-pre-delete'``)
|
|
|
|
:param vm: Domain object
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
.. event:: domain-delete (subject, event, vm)
|
|
|
|
|
|
|
|
When domain is deleted. VM still has reference to ``app`` object,
|
|
|
|
but is not contained within VMCollection.
|
|
|
|
|
|
|
|
:param subject: Event emitter
|
|
|
|
:param event: Event name (``'domain-delete'``)
|
|
|
|
:param vm: Domain object
|
|
|
|
|
2019-02-18 21:20:30 +01:00
|
|
|
.. event:: pool-add (subject, event, pool)
|
|
|
|
|
|
|
|
When storage pool is added.
|
|
|
|
|
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
|
|
|
:param subject: Event emitter
|
|
|
|
:param event: Event name (``'pool-add'``)
|
|
|
|
:param pool: Pool object
|
|
|
|
|
|
|
|
.. event:: pool-pre-delete (subject, event, pool)
|
|
|
|
|
|
|
|
When pool is deleted. Pool is still contained within app.pools
|
|
|
|
dictionary. You may prevent removal by raising an exception.
|
|
|
|
|
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
|
|
|
:param subject: Event emitter
|
|
|
|
:param event: Event name (``'pool-pre-delete'``)
|
|
|
|
:param pool: Pool object
|
|
|
|
|
|
|
|
.. event:: pool-delete (subject, event, pool)
|
|
|
|
|
|
|
|
When storage pool is deleted. The pool is already removed at this
|
|
|
|
point.
|
|
|
|
|
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
|
|
|
:param subject: Event emitter
|
|
|
|
:param event: Event name (``'pool-delete'``)
|
|
|
|
:param pool: Pool object
|
|
|
|
|
2020-01-09 01:42:46 +01:00
|
|
|
.. event:: qubes-close (subject, event)
|
|
|
|
|
|
|
|
Fired when this Qubes() object instance is going to be closed
|
|
|
|
and destroyed. In practice it is called only during tests, to
|
|
|
|
cleanup objects from one test, before another.
|
|
|
|
It is _not_ called when qubesd daemon is stopped.
|
|
|
|
|
|
|
|
:param subject: Event emitter
|
|
|
|
:param event: Event name (``'qubes-close'``)
|
|
|
|
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
Methods and attributes:
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2019-10-22 09:26:25 +02:00
|
|
|
default_guivm = qubes.VMProperty(
|
|
|
|
'default_guivm',
|
|
|
|
load_stage=3,
|
2019-11-16 13:04:42 +01:00
|
|
|
default=lambda app: app.domains['dom0'], allow_none=True,
|
2019-10-22 09:26:25 +02:00
|
|
|
doc='Default GuiVM for VMs.')
|
|
|
|
|
2020-02-27 10:31:27 +01:00
|
|
|
default_audiovm = qubes.VMProperty(
|
|
|
|
'default_audiovm',
|
|
|
|
load_stage=3,
|
|
|
|
default=lambda app: app.domains['dom0'], allow_none=True,
|
|
|
|
doc='Default AudioVM for VMs.')
|
|
|
|
|
2019-10-22 09:26:25 +02:00
|
|
|
default_netvm = qubes.VMProperty(
|
|
|
|
'default_netvm',
|
|
|
|
load_stage=3,
|
|
|
|
default=None, allow_none=True,
|
|
|
|
setter=_setter_default_netvm,
|
|
|
|
doc="""Default NetVM for AppVMs. Initial state is `None`, which means
|
|
|
|
that AppVMs are not connected to the Internet.""")
|
|
|
|
default_template = qubes.VMProperty(
|
|
|
|
'default_template', load_stage=3,
|
|
|
|
vmclass=qubes.vm.templatevm.TemplateVM,
|
|
|
|
doc='Default template for new AppVMs',
|
|
|
|
allow_none=True)
|
|
|
|
updatevm = qubes.VMProperty(
|
|
|
|
'updatevm', load_stage=3,
|
|
|
|
default=None, allow_none=True,
|
|
|
|
doc="""Which VM to use as `yum` proxy for updating AdminVM and
|
|
|
|
TemplateVMs""")
|
|
|
|
clockvm = qubes.VMProperty(
|
|
|
|
'clockvm', load_stage=3,
|
|
|
|
default=None, allow_none=True,
|
|
|
|
doc='Which VM to use as NTP proxy for updating '
|
|
|
|
'AdminVM')
|
|
|
|
default_kernel = qubes.property(
|
|
|
|
'default_kernel', load_stage=3,
|
|
|
|
doc='Which kernel to use when not overriden in VM')
|
|
|
|
default_dispvm = qubes.VMProperty(
|
|
|
|
'default_dispvm',
|
|
|
|
load_stage=3,
|
|
|
|
default=None,
|
|
|
|
doc='Default DispVM base for service calls',
|
|
|
|
allow_none=True)
|
|
|
|
|
|
|
|
management_dispvm = qubes.VMProperty(
|
|
|
|
'management_dispvm',
|
|
|
|
load_stage=3,
|
|
|
|
default=None,
|
|
|
|
doc='Default DispVM base for managing VMs',
|
|
|
|
allow_none=True)
|
|
|
|
|
|
|
|
default_pool = qubes.property(
|
|
|
|
'default_pool',
|
|
|
|
load_stage=3,
|
|
|
|
default=_default_pool,
|
|
|
|
setter=_setter_pool,
|
|
|
|
doc='Default storage pool')
|
|
|
|
|
|
|
|
default_pool_private = qubes.property(
|
|
|
|
'default_pool_private',
|
|
|
|
load_stage=3,
|
|
|
|
default=lambda app: app.default_pool,
|
|
|
|
setter=_setter_pool,
|
|
|
|
doc='Default storage pool for private volumes')
|
|
|
|
|
|
|
|
default_pool_root = qubes.property(
|
|
|
|
'default_pool_root',
|
|
|
|
load_stage=3,
|
|
|
|
default=lambda app: app.default_pool,
|
|
|
|
setter=_setter_pool,
|
|
|
|
doc='Default storage pool for root volumes')
|
|
|
|
|
|
|
|
default_pool_volatile = qubes.property(
|
|
|
|
'default_pool_volatile',
|
|
|
|
load_stage=3,
|
|
|
|
default=lambda app: app.default_pool,
|
|
|
|
setter=_setter_pool,
|
|
|
|
doc='Default storage pool for volatile volumes')
|
|
|
|
|
|
|
|
default_pool_kernel = qubes.property(
|
|
|
|
'default_pool_kernel',
|
|
|
|
load_stage=3,
|
|
|
|
default=lambda app: app.default_pool,
|
|
|
|
setter=_setter_pool,
|
|
|
|
doc='Default storage pool for kernel volumes')
|
|
|
|
|
|
|
|
default_qrexec_timeout = qubes.property(
|
|
|
|
'default_qrexec_timeout',
|
|
|
|
load_stage=3,
|
|
|
|
default=60,
|
|
|
|
type=int,
|
|
|
|
doc="""Default time in seconds after which qrexec connection attempt
|
|
|
|
is deemed failed""")
|
|
|
|
|
|
|
|
default_shutdown_timeout = qubes.property(
|
|
|
|
'default_shutdown_timeout',
|
|
|
|
load_stage=3,
|
|
|
|
default=60,
|
|
|
|
type=int,
|
|
|
|
doc="""Default time in seconds for VM shutdown to complete""")
|
|
|
|
|
|
|
|
stats_interval = qubes.property(
|
|
|
|
'stats_interval',
|
|
|
|
load_stage=3,
|
|
|
|
default=3,
|
|
|
|
type=int,
|
|
|
|
doc='Interval in seconds for VM stats reporting (memory, CPU usage)')
|
2017-07-27 22:20:12 +02:00
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
# TODO #1637 #892
|
2019-10-22 09:26:25 +02:00
|
|
|
check_updates_vm = qubes.property(
|
|
|
|
'check_updates_vm',
|
|
|
|
type=bool,
|
|
|
|
setter=qubes.property.bool,
|
|
|
|
load_stage=3,
|
|
|
|
default=True,
|
|
|
|
doc='Check for updates inside qubes')
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2016-11-02 23:31:47 +01:00
|
|
|
def __init__(self, store=None, load=True, offline_mode=None, lock=False,
|
2019-10-20 12:21:09 +02:00
|
|
|
**kwargs):
|
2016-04-28 16:00:29 +02:00
|
|
|
#: logger instance for logging global messages
|
|
|
|
self.log = logging.getLogger('app')
|
2017-08-28 14:24:48 +02:00
|
|
|
self.log.debug('init() -> %#x', id(self))
|
|
|
|
self.log.debug('stack:')
|
|
|
|
for frame in traceback.extract_stack():
|
|
|
|
self.log.debug('%s', frame)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
self._extensions = qubes.ext.get_extensions()
|
|
|
|
|
|
|
|
#: collection of all VMs managed by this Qubes instance
|
|
|
|
self.domains = VMCollection(self)
|
|
|
|
|
|
|
|
#: collection of all available labels for VMs
|
|
|
|
self.labels = {}
|
|
|
|
|
|
|
|
#: collection of all pools
|
|
|
|
self.pools = {}
|
|
|
|
|
|
|
|
#: Connection to VMM
|
2019-10-20 12:21:09 +02:00
|
|
|
self.vmm = VMMConnection(
|
|
|
|
offline_mode=offline_mode,
|
2019-09-22 04:41:43 +02:00
|
|
|
libvirt_reconnect_cb=self.register_event_handlers)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
#: Information about host system
|
|
|
|
self.host = QubesHost(self)
|
|
|
|
|
|
|
|
if store is not None:
|
|
|
|
self._store = store
|
|
|
|
else:
|
|
|
|
self._store = os.environ.get('QUBES_XML_PATH',
|
2019-10-20 12:21:09 +02:00
|
|
|
os.path.join(
|
|
|
|
qubes.config.qubes_base_dir,
|
|
|
|
qubes.config.system_path[
|
|
|
|
'qubes_store_filename']))
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
super(Qubes, self).__init__(xml=None, **kwargs)
|
|
|
|
|
|
|
|
self.__load_timestamp = None
|
2016-10-27 17:30:06 +02:00
|
|
|
self.__locked_fh = None
|
2017-08-28 14:24:48 +02:00
|
|
|
self._domain_event_callback_id = None
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
#: jinja2 environment for libvirt XML templates
|
|
|
|
self.env = jinja2.Environment(
|
2016-10-04 11:30:29 +02:00
|
|
|
loader=jinja2.FileSystemLoader([
|
|
|
|
'/etc/qubes/templates',
|
|
|
|
'/usr/share/qubes/templates',
|
|
|
|
]),
|
2016-04-28 16:00:29 +02:00
|
|
|
undefined=jinja2.StrictUndefined)
|
|
|
|
|
|
|
|
if load:
|
2016-10-27 17:30:06 +02:00
|
|
|
self.load(lock=lock)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
self.events_enabled = True
|
|
|
|
|
|
|
|
@property
|
|
|
|
def store(self):
|
|
|
|
return self._store
|
|
|
|
|
2017-12-01 03:05:57 +01:00
|
|
|
def _migrate_global_properties(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Migrate renamed/dropped properties"""
|
2017-12-01 03:05:57 +01:00
|
|
|
if self.xml is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
# drop default_fw_netvm
|
|
|
|
node_default_fw_netvm = self.xml.find(
|
|
|
|
'./properties/property[@name=\'default_fw_netvm\']')
|
|
|
|
if node_default_fw_netvm is not None:
|
|
|
|
node_default_netvm = self.xml.find(
|
|
|
|
'./properties/property[@name=\'default_netvm\']')
|
|
|
|
try:
|
|
|
|
default_fw_netvm = self.domains[node_default_fw_netvm.text]
|
|
|
|
if node_default_netvm is None:
|
|
|
|
default_netvm = None
|
|
|
|
else:
|
|
|
|
default_netvm = self.domains[node_default_netvm.text]
|
|
|
|
if default_netvm != default_fw_netvm:
|
|
|
|
for vm in self.domains:
|
|
|
|
if not hasattr(vm, 'netvm'):
|
|
|
|
continue
|
|
|
|
if not getattr(vm, 'provides_network', False):
|
|
|
|
continue
|
|
|
|
node_netvm = vm.xml.find(
|
|
|
|
'./properties/property[@name=\'netvm\']')
|
|
|
|
if node_netvm is not None:
|
|
|
|
# non-default netvm
|
|
|
|
continue
|
|
|
|
# this will unfortunately break "being default"
|
|
|
|
# property state, but the alternative (changing
|
|
|
|
# value behind user's back) is worse
|
|
|
|
properties = vm.xml.find('./properties')
|
|
|
|
element = lxml.etree.Element('property',
|
2019-10-20 12:21:09 +02:00
|
|
|
name='netvm')
|
2017-12-01 03:05:57 +01:00
|
|
|
element.text = default_fw_netvm.name
|
|
|
|
# manipulate xml directly, before loading netvm
|
|
|
|
# property, to avoid hitting netvm loop detection
|
|
|
|
properties.append(element)
|
|
|
|
except KeyError:
|
|
|
|
# if default_fw_netvm was set to invalid value, simply
|
|
|
|
# drop it
|
|
|
|
pass
|
|
|
|
node_default_fw_netvm.getparent().remove(node_default_fw_netvm)
|
|
|
|
|
2020-08-13 19:16:52 +02:00
|
|
|
def _migrate_labels(self):
|
|
|
|
"""Migrate changed labels"""
|
|
|
|
if self.xml is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
# fix grey being green
|
|
|
|
grey_label = self.xml.find('./labels/label[@color=\'0x555753\']')
|
|
|
|
if grey_label is not None:
|
|
|
|
grey_label.set('color', '0x555555')
|
|
|
|
|
2016-10-27 17:30:06 +02:00
|
|
|
def load(self, lock=False):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Open qubes.xml
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
:throws EnvironmentError: failure on parsing store
|
|
|
|
:throws xml.parsers.expat.ExpatError: failure on parsing store
|
|
|
|
:raises lxml.etree.XMLSyntaxError: on syntax error in qubes.xml
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2016-10-27 17:30:06 +02:00
|
|
|
fh = self._acquire_lock()
|
2016-04-28 16:00:29 +02:00
|
|
|
self.xml = lxml.etree.parse(fh)
|
|
|
|
|
2020-08-13 19:16:52 +02:00
|
|
|
self._migrate_labels()
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
# stage 1: load labels and pools
|
|
|
|
for node in self.xml.xpath('./labels/label'):
|
|
|
|
label = qubes.Label.fromxml(node)
|
|
|
|
self.labels[label.index] = label
|
|
|
|
|
|
|
|
for node in self.xml.xpath('./pools/pool'):
|
|
|
|
name = node.get('name')
|
|
|
|
assert name, "Pool name '%s' is invalid " % name
|
|
|
|
try:
|
|
|
|
self.pools[name] = self._get_pool(**node.attrib)
|
|
|
|
except qubes.exc.QubesException as e:
|
2017-01-18 22:16:46 +01:00
|
|
|
self.log.error(str(e))
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
# stage 2: load VMs
|
|
|
|
for node in self.xml.xpath('./domains/domain'):
|
|
|
|
# pylint: disable=no-member
|
|
|
|
cls = self.get_vm_class(node.get('class'))
|
|
|
|
vm = cls(self, node)
|
|
|
|
vm.load_properties(load_stage=2)
|
|
|
|
vm.init_log()
|
2016-05-20 02:52:57 +02:00
|
|
|
self.domains.add(vm, _enable_events=False)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
if 0 not in self.domains:
|
2016-05-20 02:52:57 +02:00
|
|
|
self.domains.add(
|
2017-12-01 03:03:44 +01:00
|
|
|
qubes.vm.adminvm.AdminVM(self, None),
|
2016-05-20 02:52:57 +02:00
|
|
|
_enable_events=False)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2017-12-01 03:05:57 +01:00
|
|
|
self._migrate_global_properties()
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
# stage 3: load global properties
|
|
|
|
self.load_properties(load_stage=3)
|
|
|
|
|
|
|
|
# stage 4: fill all remaining VM properties
|
|
|
|
for vm in self.domains:
|
|
|
|
vm.load_properties(load_stage=4)
|
2016-06-26 02:18:13 +02:00
|
|
|
vm.load_extras()
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
# stage 5: misc fixups
|
|
|
|
|
2019-10-20 12:44:27 +02:00
|
|
|
self.property_require('default_guivm', allow_none=True)
|
2016-04-28 16:00:29 +02:00
|
|
|
self.property_require('default_netvm', allow_none=True)
|
2019-09-19 20:20:36 +02:00
|
|
|
self.property_require('default_template', allow_none=True)
|
2016-04-28 16:00:29 +02:00
|
|
|
self.property_require('clockvm', allow_none=True)
|
|
|
|
self.property_require('updatevm', allow_none=True)
|
|
|
|
|
|
|
|
for vm in self.domains:
|
|
|
|
vm.events_enabled = True
|
|
|
|
vm.fire_event('domain-load')
|
|
|
|
|
|
|
|
# get a file timestamp (before closing it - still holding the lock!),
|
|
|
|
# to detect whether anyone else have modified it in the meantime
|
|
|
|
self.__load_timestamp = os.path.getmtime(self._store)
|
2016-10-27 17:30:06 +02:00
|
|
|
|
|
|
|
if not lock:
|
|
|
|
self._release_lock()
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
def __xml__(self):
|
|
|
|
element = lxml.etree.Element('qubes')
|
|
|
|
|
|
|
|
element.append(self.xml_labels())
|
|
|
|
|
|
|
|
pools_xml = lxml.etree.Element('pools')
|
|
|
|
for pool in self.pools.values():
|
2016-04-27 19:39:02 +02:00
|
|
|
xml = pool.__xml__()
|
|
|
|
if xml is not None:
|
|
|
|
pools_xml.append(xml)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
element.append(pools_xml)
|
|
|
|
|
|
|
|
element.append(self.xml_properties())
|
|
|
|
|
|
|
|
domains = lxml.etree.Element('domains')
|
|
|
|
for vm in self.domains:
|
|
|
|
domains.append(vm.__xml__())
|
|
|
|
element.append(domains)
|
|
|
|
|
|
|
|
return element
|
|
|
|
|
2017-09-26 14:55:45 +02:00
|
|
|
def __str__(self):
|
|
|
|
return type(self).__name__
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2016-10-27 17:30:06 +02:00
|
|
|
def save(self, lock=True):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Save all data to qubes.xml
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
There are several problems with saving :file:`qubes.xml` which must be
|
|
|
|
mitigated:
|
|
|
|
|
|
|
|
- Running out of disk space. No space left should not result in empty
|
|
|
|
file. This is done by writing to temporary file and then renaming.
|
|
|
|
- Attempts to write two or more files concurrently. This is done by
|
|
|
|
sophisticated locking.
|
|
|
|
|
2016-10-27 17:30:06 +02:00
|
|
|
:param bool lock: keep file locked after saving
|
2016-04-28 16:00:29 +02:00
|
|
|
:throws EnvironmentError: failure on saving
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2016-10-27 17:30:06 +02:00
|
|
|
if not self.__locked_fh:
|
|
|
|
self._acquire_lock(for_save=True)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2016-10-27 17:30:06 +02:00
|
|
|
fh_new = tempfile.NamedTemporaryFile(
|
|
|
|
prefix=self._store, delete=False)
|
2016-04-28 16:00:29 +02:00
|
|
|
lxml.etree.ElementTree(self.__xml__()).write(
|
|
|
|
fh_new, encoding='utf-8', pretty_print=True)
|
|
|
|
fh_new.flush()
|
2016-11-25 00:19:27 +01:00
|
|
|
try:
|
|
|
|
os.chown(fh_new.name, -1, grp.getgrnam('qubes').gr_gid)
|
|
|
|
os.chmod(fh_new.name, 0o660)
|
|
|
|
except KeyError: # group 'qubes' not found
|
|
|
|
# don't change mode if no 'qubes' group in the system
|
|
|
|
pass
|
2016-04-28 16:00:29 +02:00
|
|
|
os.rename(fh_new.name, self._store)
|
|
|
|
|
|
|
|
# update stored mtime, in case of multiple save() calls without
|
|
|
|
# loading qubes.xml again
|
|
|
|
self.__load_timestamp = os.path.getmtime(self._store)
|
2016-10-27 17:30:06 +02:00
|
|
|
|
|
|
|
# this releases lock for all other processes,
|
|
|
|
# but they should instantly block on the new descriptor
|
|
|
|
self.__locked_fh.close()
|
|
|
|
self.__locked_fh = fh_new
|
|
|
|
|
|
|
|
if not lock:
|
|
|
|
self._release_lock()
|
|
|
|
|
2017-08-28 14:24:48 +02:00
|
|
|
def close(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Deconstruct the object and break circular references
|
2017-08-28 14:24:48 +02:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
After calling this the object is unusable, not even for saving."""
|
2017-08-28 14:24:48 +02:00
|
|
|
|
|
|
|
self.log.debug('close() <- %#x', id(self))
|
|
|
|
for frame in traceback.extract_stack():
|
|
|
|
self.log.debug('%s', frame)
|
|
|
|
|
2020-01-09 01:42:46 +01:00
|
|
|
# let all the extension cleanup things
|
|
|
|
self.fire_event('qubes-close')
|
|
|
|
|
2017-08-28 14:24:48 +02:00
|
|
|
super().close()
|
|
|
|
|
|
|
|
if self._domain_event_callback_id is not None:
|
|
|
|
self.vmm.libvirt_conn.domainEventDeregisterAny(
|
|
|
|
self._domain_event_callback_id)
|
|
|
|
self._domain_event_callback_id = None
|
|
|
|
|
|
|
|
# Only our Lord, The God Almighty, knows what references
|
|
|
|
# are kept in extensions.
|
2020-01-09 01:42:46 +01:00
|
|
|
# NOTE: this doesn't really delete extension objects - Extension class
|
|
|
|
# saves reference to instance, and also various registered (class level)
|
|
|
|
# event handlers do that too
|
2017-08-28 14:24:48 +02:00
|
|
|
del self._extensions
|
|
|
|
|
|
|
|
for vm in self.domains:
|
|
|
|
vm.close()
|
|
|
|
self.domains.close()
|
|
|
|
del self.domains
|
|
|
|
|
|
|
|
self.vmm.close()
|
|
|
|
del self.vmm
|
|
|
|
|
|
|
|
del self.host
|
|
|
|
|
|
|
|
if self.__locked_fh:
|
|
|
|
self._release_lock()
|
|
|
|
|
2016-10-27 17:30:06 +02:00
|
|
|
def _acquire_lock(self, for_save=False):
|
|
|
|
assert self.__locked_fh is None, 'double lock'
|
|
|
|
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
fd = os.open(self._store,
|
2019-10-20 12:21:09 +02:00
|
|
|
os.O_RDWR | (os.O_CREAT * int(for_save)))
|
2018-09-13 21:46:45 +02:00
|
|
|
except FileNotFoundError:
|
|
|
|
if not for_save:
|
2016-10-27 17:30:06 +02:00
|
|
|
raise qubes.exc.QubesException(
|
|
|
|
'Qubes XML store {!r} is missing; '
|
|
|
|
'use qubes-create tool'.format(self._store))
|
|
|
|
raise
|
|
|
|
|
|
|
|
# While we were waiting for lock, someone could have unlink()ed
|
|
|
|
# (or rename()d) our file out of the filesystem. We have to
|
|
|
|
# ensure we got lock on something linked to filesystem.
|
|
|
|
# If not, try again.
|
|
|
|
if os.fstat(fd) != os.stat(self._store):
|
|
|
|
os.close(fd)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if self.__load_timestamp and \
|
|
|
|
os.path.getmtime(self._store) != self.__load_timestamp:
|
|
|
|
os.close(fd)
|
|
|
|
raise qubes.exc.QubesException(
|
|
|
|
'Someone else modified qubes.xml in the meantime')
|
|
|
|
|
|
|
|
break
|
|
|
|
|
|
|
|
if os.name == 'posix':
|
|
|
|
fcntl.lockf(fd, fcntl.LOCK_EX)
|
|
|
|
elif os.name == 'nt':
|
|
|
|
# pylint: disable=protected-access
|
|
|
|
overlapped = pywintypes.OVERLAPPED()
|
|
|
|
win32file.LockFileEx(
|
|
|
|
win32file._get_osfhandle(fd),
|
|
|
|
win32con.LOCKFILE_EXCLUSIVE_LOCK, 0, -0x10000, overlapped)
|
|
|
|
|
|
|
|
self.__locked_fh = os.fdopen(fd, 'r+b')
|
|
|
|
return self.__locked_fh
|
|
|
|
|
|
|
|
def _release_lock(self):
|
|
|
|
assert self.__locked_fh is not None, 'double release'
|
|
|
|
|
|
|
|
# intentionally do not call explicit unlock to not unlock the file
|
|
|
|
# before all buffers are flushed
|
|
|
|
self.__locked_fh.close()
|
|
|
|
self.__locked_fh = None
|
|
|
|
|
2016-05-21 03:32:13 +02:00
|
|
|
def load_initial_values(self):
|
2016-04-28 16:00:29 +02:00
|
|
|
self.labels = {
|
|
|
|
1: qubes.Label(1, '0xcc0000', 'red'),
|
|
|
|
2: qubes.Label(2, '0xf57900', 'orange'),
|
|
|
|
3: qubes.Label(3, '0xedd400', 'yellow'),
|
|
|
|
4: qubes.Label(4, '0x73d216', 'green'),
|
2020-08-13 19:16:52 +02:00
|
|
|
5: qubes.Label(5, '0x555555', 'gray'),
|
2016-04-28 16:00:29 +02:00
|
|
|
6: qubes.Label(6, '0x3465a4', 'blue'),
|
|
|
|
7: qubes.Label(7, '0x75507b', 'purple'),
|
|
|
|
8: qubes.Label(8, '0x000000', 'black'),
|
|
|
|
}
|
2017-03-13 15:13:20 +01:00
|
|
|
assert max(self.labels.keys()) == qubes.config.max_default_label
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2018-09-12 01:50:25 +02:00
|
|
|
pool_configs = copy.deepcopy(qubes.config.defaults['pool_configs'])
|
|
|
|
|
|
|
|
for name, config in pool_configs.items():
|
2018-09-12 01:50:26 +02:00
|
|
|
if 'driver' not in config and 'dir_path' in config:
|
|
|
|
config['driver'] = 'file'
|
|
|
|
try:
|
|
|
|
os.makedirs(config['dir_path'], exist_ok=True)
|
|
|
|
if qubes.storage.reflink.is_supported(config['dir_path']):
|
|
|
|
config['driver'] = 'file-reflink'
|
2020-06-22 16:03:19 +02:00
|
|
|
config['setup_check'] = False # don't check twice
|
2018-09-12 01:50:26 +02:00
|
|
|
except PermissionError: # looks like a testing environment
|
|
|
|
pass # stay with 'file'
|
2016-11-02 06:34:10 +01:00
|
|
|
self.pools[name] = self._get_pool(**config)
|
2016-07-14 14:54:11 +02:00
|
|
|
|
2017-07-01 21:29:47 +02:00
|
|
|
self.default_pool_kernel = 'linux-kernel'
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
self.domains.add(
|
2017-04-01 01:25:57 +02:00
|
|
|
qubes.vm.adminvm.AdminVM(self, None, label='black'))
|
2016-05-21 03:32:13 +02:00
|
|
|
|
|
|
|
@classmethod
|
2016-11-30 18:34:11 +01:00
|
|
|
def create_empty_store(cls, *args, **kwargs):
|
2016-05-21 03:32:13 +02:00
|
|
|
self = cls(*args, load=False, **kwargs)
|
2016-11-02 06:10:54 +01:00
|
|
|
if os.path.exists(self.store):
|
|
|
|
raise qubes.exc.QubesException(
|
|
|
|
'{} already exists, aborting'.format(self.store))
|
2016-05-21 03:32:13 +02:00
|
|
|
self.load_initial_values()
|
2016-11-30 18:34:11 +01:00
|
|
|
# TODO py3 get lock= as keyword-only arg
|
|
|
|
self.save(kwargs.get('lock'))
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
return self
|
|
|
|
|
|
|
|
def xml_labels(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Serialise labels
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
:rtype: lxml.etree._Element
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
labels = lxml.etree.Element('labels')
|
|
|
|
for label in sorted(self.labels.values(), key=lambda labl: labl.index):
|
|
|
|
labels.append(label.__xml__())
|
|
|
|
return labels
|
|
|
|
|
2016-06-02 22:02:06 +02:00
|
|
|
@staticmethod
|
|
|
|
def get_vm_class(clsname):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Find the class for a domain.
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2017-02-20 22:35:55 +01:00
|
|
|
Classes are registered as setuptools' entry points in ``qubes.vm``
|
|
|
|
group. Any package may supply their own classes.
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
:param str clsname: name of the class
|
|
|
|
:return type: class
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
try:
|
2017-04-03 13:11:34 +02:00
|
|
|
return qubes.utils.get_entry_point_one(
|
|
|
|
qubes.vm.VM_ENTRY_POINT, clsname)
|
2016-04-28 16:00:29 +02:00
|
|
|
except KeyError:
|
|
|
|
raise qubes.exc.QubesException(
|
|
|
|
'no such VM class: {!r}'.format(clsname))
|
|
|
|
# don't catch TypeError
|
|
|
|
|
|
|
|
def add_new_vm(self, cls, qid=None, **kwargs):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Add new Virtual Machine to collection
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
if qid is None:
|
|
|
|
qid = self.domains.get_new_unused_qid()
|
|
|
|
|
2017-02-21 00:50:26 +01:00
|
|
|
if isinstance(cls, str):
|
|
|
|
cls = self.get_vm_class(cls)
|
2016-04-28 16:00:29 +02:00
|
|
|
# handle default template; specifically allow template=None (do not
|
|
|
|
# override it with default template)
|
|
|
|
if 'template' not in kwargs and hasattr(cls, 'template'):
|
2018-01-03 00:04:48 +01:00
|
|
|
if cls == self.get_vm_class('DispVM'):
|
|
|
|
kwargs['template'] = self.default_dispvm
|
|
|
|
else:
|
|
|
|
kwargs['template'] = self.default_template
|
2019-02-19 00:39:41 +01:00
|
|
|
if kwargs['template'] is None:
|
|
|
|
raise qubes.exc.QubesValueError(
|
|
|
|
'Template for the qube not specified, nor default '
|
|
|
|
'template set.')
|
2016-07-12 18:03:14 +02:00
|
|
|
elif 'template' in kwargs and isinstance(kwargs['template'], str):
|
|
|
|
kwargs['template'] = self.domains[kwargs['template']]
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
return self.domains.add(cls(self, None, qid=qid, **kwargs))
|
|
|
|
|
|
|
|
def get_label(self, label):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Get label as identified by index or name
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
:throws KeyError: when label is not found
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
# first search for index, verbatim
|
|
|
|
try:
|
|
|
|
return self.labels[label]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# then search for name
|
|
|
|
for i in self.labels.values():
|
|
|
|
if i.name == label:
|
|
|
|
return i
|
|
|
|
|
|
|
|
# last call, if label is a number represented as str, search in indices
|
|
|
|
try:
|
|
|
|
return self.labels[int(label)]
|
|
|
|
except (KeyError, ValueError):
|
|
|
|
pass
|
|
|
|
|
2020-08-13 20:03:47 +02:00
|
|
|
raise qubes.exc.QubesLabelNotFoundError(label)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2019-06-28 12:29:26 +02:00
|
|
|
@asyncio.coroutine
|
2018-09-12 01:50:25 +02:00
|
|
|
def setup_pools(self):
|
|
|
|
""" Run implementation specific setup for each storage pool. """
|
2019-06-28 12:29:26 +02:00
|
|
|
yield from qubes.utils.void_coros_maybe(
|
|
|
|
pool.setup() for pool in self.pools.values())
|
2018-09-12 01:50:25 +02:00
|
|
|
|
2019-02-18 21:13:24 +01:00
|
|
|
@asyncio.coroutine
|
2016-05-25 14:44:11 +02:00
|
|
|
def add_pool(self, name, **kwargs):
|
2016-04-28 16:00:29 +02:00
|
|
|
""" Add a storage pool to config."""
|
2016-04-13 14:10:04 +02:00
|
|
|
|
2016-05-25 14:44:11 +02:00
|
|
|
if name in self.pools.keys():
|
|
|
|
raise qubes.exc.QubesException('pool named %s already exists \n' %
|
|
|
|
name)
|
|
|
|
|
|
|
|
kwargs['name'] = name
|
2016-04-28 16:00:29 +02:00
|
|
|
pool = self._get_pool(**kwargs)
|
2019-06-28 12:29:24 +02:00
|
|
|
yield from qubes.utils.coro_maybe(pool.setup())
|
2016-04-28 16:00:29 +02:00
|
|
|
self.pools[name] = pool
|
2019-02-18 21:20:30 +01:00
|
|
|
yield from self.fire_event_async('pool-add', pool=pool)
|
2016-05-28 21:07:29 +02:00
|
|
|
return pool
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2019-02-18 21:13:24 +01:00
|
|
|
@asyncio.coroutine
|
2016-04-28 16:00:29 +02:00
|
|
|
def remove_pool(self, name):
|
|
|
|
""" Remove a storage pool from config file. """
|
|
|
|
try:
|
|
|
|
pool = self.pools[name]
|
2019-02-19 00:37:30 +01:00
|
|
|
volumes = [(vm, volume) for vm in self.domains
|
2019-10-20 12:21:09 +02:00
|
|
|
for volume in vm.volumes.values()
|
|
|
|
if volume.pool is pool]
|
2019-02-19 00:37:30 +01:00
|
|
|
if volumes:
|
|
|
|
raise qubes.exc.QubesPoolInUseError(pool)
|
|
|
|
prop_suffixes = ['', '_kernel', '_private', '_root', '_volatile']
|
|
|
|
for suffix in prop_suffixes:
|
|
|
|
if getattr(self, 'default_pool' + suffix, None) is pool:
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesPoolInUseError(
|
|
|
|
pool,
|
|
|
|
'Storage pool is in use: '
|
|
|
|
'set as {}'.format('default_pool' + suffix))
|
2019-02-18 21:20:30 +01:00
|
|
|
yield from self.fire_event_async('pool-pre-delete',
|
2019-10-20 12:21:09 +02:00
|
|
|
pre_event=True, pool=pool)
|
2016-04-28 16:00:29 +02:00
|
|
|
del self.pools[name]
|
2019-06-28 12:29:24 +02:00
|
|
|
yield from qubes.utils.coro_maybe(pool.destroy())
|
2019-02-18 21:20:30 +01:00
|
|
|
yield from self.fire_event_async('pool-delete', pool=pool)
|
2016-04-28 16:00:29 +02:00
|
|
|
except KeyError:
|
|
|
|
return
|
|
|
|
|
2017-06-09 04:46:46 +02:00
|
|
|
def get_pool(self, pool):
|
2019-10-20 12:21:09 +02:00
|
|
|
""" Returns a :py:class:`qubes.storage.Pool` instance """
|
2017-06-09 04:46:46 +02:00
|
|
|
if isinstance(pool, qubes.storage.Pool):
|
|
|
|
return pool
|
2016-04-28 16:00:29 +02:00
|
|
|
try:
|
2017-06-09 04:46:46 +02:00
|
|
|
return self.pools[pool]
|
2016-04-28 16:00:29 +02:00
|
|
|
except KeyError:
|
2017-06-09 04:46:46 +02:00
|
|
|
raise qubes.exc.QubesException('Unknown storage pool ' + pool)
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2016-06-02 22:02:06 +02:00
|
|
|
@staticmethod
|
|
|
|
def _get_pool(**kwargs):
|
2016-04-28 16:00:29 +02:00
|
|
|
try:
|
|
|
|
name = kwargs['name']
|
|
|
|
assert name, 'Name needs to be an non empty string'
|
|
|
|
except KeyError:
|
|
|
|
raise qubes.exc.QubesException('No pool name for pool')
|
|
|
|
|
|
|
|
try:
|
|
|
|
driver = kwargs['driver']
|
|
|
|
except KeyError:
|
|
|
|
raise qubes.exc.QubesException('No driver specified for pool ' +
|
|
|
|
name)
|
|
|
|
try:
|
|
|
|
klass = qubes.utils.get_entry_point_one(
|
|
|
|
qubes.storage.STORAGE_ENTRY_POINT, driver)
|
|
|
|
del kwargs['driver']
|
|
|
|
return klass(**kwargs)
|
|
|
|
except KeyError:
|
2016-04-30 18:01:04 +02:00
|
|
|
raise qubes.exc.QubesException('No driver %s for pool %s' %
|
2016-04-28 16:00:29 +02:00
|
|
|
(driver, name))
|
|
|
|
|
2019-09-22 04:41:43 +02:00
|
|
|
def register_event_handlers(self, old_connection=None):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Register libvirt event handlers, which will translate libvirt
|
2017-08-28 14:24:48 +02:00
|
|
|
events into qubes.events. This function should be called only in
|
|
|
|
'qubesd' process and only when mainloop has been already set.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2019-09-22 04:41:43 +02:00
|
|
|
if old_connection:
|
|
|
|
try:
|
|
|
|
old_connection.domainEventDeregisterAny(
|
|
|
|
self._domain_event_callback_id)
|
|
|
|
except libvirt.libvirtError:
|
|
|
|
# the connection is probably in a bad state; but call the above
|
|
|
|
# anyway to cleanup the client structures
|
|
|
|
pass
|
2017-08-28 14:24:48 +02:00
|
|
|
self._domain_event_callback_id = (
|
|
|
|
self.vmm.libvirt_conn.domainEventRegisterAny(
|
|
|
|
None, # any domain
|
|
|
|
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
|
|
|
|
self._domain_event_callback,
|
|
|
|
None))
|
|
|
|
|
|
|
|
def _domain_event_callback(self, _conn, domain, event, _detail, _opaque):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Generic libvirt event handler (virConnectDomainEventCallback),
|
2017-08-28 14:24:48 +02:00
|
|
|
translate libvirt event into qubes.events.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2017-08-28 14:24:48 +02:00
|
|
|
if not self.events_enabled:
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
vm = self.domains[domain.name()]
|
|
|
|
except KeyError:
|
|
|
|
# ignore events for unknown domains
|
|
|
|
return
|
|
|
|
|
|
|
|
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
|
2017-10-21 05:57:57 +02:00
|
|
|
vm.on_libvirt_domain_stopped()
|
2018-07-30 04:37:31 +02:00
|
|
|
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
|
|
|
|
try:
|
|
|
|
vm.fire_event('domain-paused')
|
2018-09-02 03:27:14 +02:00
|
|
|
except Exception: # pylint: disable=broad-except
|
|
|
|
self.log.exception(
|
|
|
|
'Uncaught exception from domain-paused handler '
|
|
|
|
'for domain %s', vm.name)
|
2018-07-30 04:37:31 +02:00
|
|
|
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
|
|
|
|
try:
|
|
|
|
vm.fire_event('domain-unpaused')
|
2018-09-02 03:27:14 +02:00
|
|
|
except Exception: # pylint: disable=broad-except
|
|
|
|
self.log.exception(
|
|
|
|
'Uncaught exception from domain-unpaused handler '
|
|
|
|
'for domain %s', vm.name)
|
2017-08-28 14:24:48 +02:00
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
@qubes.events.handler('domain-pre-delete')
|
|
|
|
def on_domain_pre_deleted(self, event, vm):
|
|
|
|
# pylint: disable=unused-argument
|
2017-09-26 14:55:45 +02:00
|
|
|
for obj in itertools.chain(self.domains, (self,)):
|
2019-02-19 00:55:13 +01:00
|
|
|
if obj is vm:
|
|
|
|
# allow removed VM to reference itself
|
|
|
|
continue
|
2017-09-26 14:55:45 +02:00
|
|
|
for prop in obj.property_list():
|
|
|
|
try:
|
|
|
|
if isinstance(prop, qubes.vm.VMProperty) and \
|
|
|
|
getattr(obj, prop.__name__) == vm:
|
|
|
|
self.log.error(
|
|
|
|
'Cannot remove %s, used by %s.%s',
|
|
|
|
vm, obj, prop.__name__)
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesVMInUseError(
|
|
|
|
vm,
|
|
|
|
'Domain is in use: {!r};'
|
|
|
|
'see /var/log/qubes/qubes.log in dom0 for '
|
|
|
|
'details'.format(
|
|
|
|
vm.name))
|
2017-09-26 14:55:45 +02:00
|
|
|
except AttributeError:
|
|
|
|
pass
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2020-01-21 15:34:24 +01:00
|
|
|
assignments = vm.get_provided_assignments()
|
|
|
|
if assignments:
|
|
|
|
desc = ', '.join(
|
|
|
|
assignment.ident for assignment in assignments)
|
|
|
|
raise qubes.exc.QubesVMInUseError(
|
|
|
|
vm,
|
|
|
|
'VM has devices attached persistently to other VMs: ' +
|
|
|
|
desc)
|
|
|
|
|
2016-04-28 16:00:29 +02:00
|
|
|
@qubes.events.handler('domain-delete')
|
|
|
|
def on_domain_deleted(self, event, vm):
|
|
|
|
# pylint: disable=unused-argument
|
|
|
|
for propname in (
|
2019-10-20 12:44:27 +02:00
|
|
|
'default_guivm'
|
2016-04-28 16:00:29 +02:00
|
|
|
'default_netvm',
|
|
|
|
'default_fw_netvm',
|
|
|
|
'clockvm',
|
|
|
|
'updatevm',
|
|
|
|
'default_template',
|
2019-10-20 12:21:09 +02:00
|
|
|
):
|
2016-04-28 16:00:29 +02:00
|
|
|
try:
|
|
|
|
if getattr(self, propname) == vm:
|
|
|
|
delattr(self, propname)
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
@qubes.events.handler('property-pre-set:clockvm')
|
|
|
|
def on_property_pre_set_clockvm(self, event, name, newvalue, oldvalue=None):
|
|
|
|
# pylint: disable=unused-argument,no-self-use
|
|
|
|
if newvalue is None:
|
|
|
|
return
|
2017-07-28 15:05:52 +02:00
|
|
|
if 'service.clocksync' not in newvalue.features:
|
|
|
|
newvalue.features['service.clocksync'] = True
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2017-07-28 15:05:52 +02:00
|
|
|
@qubes.events.handler('property-set:clockvm')
|
|
|
|
def on_property_set_clockvm(self, event, name, newvalue, oldvalue=None):
|
|
|
|
# pylint: disable=unused-argument,no-self-use
|
2019-11-30 05:20:08 +01:00
|
|
|
if oldvalue == newvalue:
|
|
|
|
return
|
2017-07-28 15:05:52 +02:00
|
|
|
if oldvalue and oldvalue.features.get('service.clocksync', False):
|
|
|
|
del oldvalue.features['service.clocksync']
|
2016-04-28 16:00:29 +02:00
|
|
|
|
2017-12-01 03:05:57 +01:00
|
|
|
@qubes.events.handler('property-pre-set:default_netvm')
|
2016-04-28 16:00:29 +02:00
|
|
|
def on_property_pre_set_default_netvm(self, event, name, newvalue,
|
2019-10-20 12:21:09 +02:00
|
|
|
oldvalue=None):
|
2016-04-28 16:00:29 +02:00
|
|
|
# pylint: disable=unused-argument,invalid-name
|
|
|
|
if newvalue is not None and oldvalue is not None \
|
|
|
|
and oldvalue.is_running() and not newvalue.is_running() \
|
|
|
|
and self.domains.get_vms_connected_to(oldvalue):
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesVMNotRunningError(
|
|
|
|
newvalue,
|
2016-04-28 16:00:29 +02:00
|
|
|
'Cannot change {!r} to domain that '
|
2019-10-20 12:21:09 +02:00
|
|
|
'is not running ({!r}).'.format(
|
|
|
|
name, newvalue.name))
|
2016-04-28 16:00:29 +02:00
|
|
|
|
|
|
|
@qubes.events.handler('property-set:default_netvm')
|
|
|
|
def on_property_set_default_netvm(self, event, name, newvalue,
|
2019-10-20 12:21:09 +02:00
|
|
|
oldvalue=None):
|
2016-04-28 16:00:29 +02:00
|
|
|
# pylint: disable=unused-argument
|
|
|
|
for vm in self.domains:
|
2017-10-21 03:06:30 +02:00
|
|
|
if hasattr(vm, 'provides_network') and not vm.provides_network and \
|
|
|
|
hasattr(vm, 'netvm') and vm.property_is_default('netvm'):
|
2020-05-21 04:48:02 +02:00
|
|
|
# fire property-reset:netvm as it is responsible for resetting
|
2016-04-28 16:00:29 +02:00
|
|
|
# netvm to it's default value
|
2020-05-21 04:48:02 +02:00
|
|
|
vm.fire_event('property-reset:netvm',
|
2019-10-20 12:21:09 +02:00
|
|
|
name='netvm', oldvalue=oldvalue)
|
2020-08-03 22:08:11 +02:00
|
|
|
|
|
|
|
@qubes.events.handler('property-set:default_dispvm')
|
|
|
|
def on_property_set_default_dispvm(self, event, name, newvalue,
|
|
|
|
oldvalue=None):
|
|
|
|
# pylint: disable=unused-argument
|
|
|
|
for vm in self.domains:
|
|
|
|
if hasattr(vm, 'default_dispvm') and \
|
|
|
|
vm.property_is_default('default_dispvm'):
|
|
|
|
# fire property-reset:default_dispvm as it is responsible for
|
|
|
|
# resetting dispvm to it's default value
|
|
|
|
vm.fire_event('property-reset:default_dispvm',
|
|
|
|
name='default_dispvm', oldvalue=oldvalue)
|