2014-12-29 12:46:16 +01:00
|
|
|
#
|
2015-01-19 18:03:23 +01:00
|
|
|
# The Qubes OS Project, https://www.qubes-os.org/
|
2014-12-29 12:46:16 +01:00
|
|
|
#
|
2015-01-19 18:03:23 +01:00
|
|
|
# Copyright (C) 2010-2015 Joanna Rutkowska <joanna@invisiblethingslab.com>
|
|
|
|
# Copyright (C) 2013-2015 Marek Marczykowski-Górecki
|
|
|
|
# <marmarek@invisiblethingslab.com>
|
|
|
|
# Copyright (C) 2014-2015 Wojtek Porczyk <woju@invisiblethingslab.com>
|
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU Lesser General Public
|
|
|
|
# License as published by the Free Software Foundation; either
|
|
|
|
# version 2.1 of the License, or (at your option) any later version.
|
2014-12-29 12:46:16 +01:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is distributed in the hope that it will be useful,
|
2014-12-29 12:46:16 +01:00
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2017-10-12 00:11:50 +02:00
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
# Lesser General Public License for more details.
|
2014-12-29 12:46:16 +01:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# You should have received a copy of the GNU Lesser General Public
|
|
|
|
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
|
2014-12-29 12:46:16 +01:00
|
|
|
#
|
|
|
|
|
2017-03-29 17:11:24 +02:00
|
|
|
import asyncio
|
2016-01-29 17:56:33 +01:00
|
|
|
import base64
|
2017-07-24 23:21:15 +02:00
|
|
|
import grp
|
2014-12-29 12:46:16 +01:00
|
|
|
import os
|
|
|
|
import os.path
|
|
|
|
import shutil
|
2017-03-12 01:53:46 +01:00
|
|
|
import string
|
2014-12-29 12:46:16 +01:00
|
|
|
import subprocess
|
|
|
|
import uuid
|
2015-09-17 12:08:03 +02:00
|
|
|
|
2016-06-16 21:07:36 +02:00
|
|
|
import libvirt # pylint: disable=import-error
|
2017-07-24 23:21:15 +02:00
|
|
|
import lxml
|
2014-11-13 14:38:41 +01:00
|
|
|
|
2014-12-05 14:58:05 +01:00
|
|
|
import qubes
|
2014-12-29 12:46:16 +01:00
|
|
|
import qubes.config
|
2015-10-14 22:02:11 +02:00
|
|
|
import qubes.exc
|
2015-01-16 15:52:01 +01:00
|
|
|
import qubes.storage
|
2016-06-02 22:02:06 +02:00
|
|
|
import qubes.storage.file
|
2014-12-29 12:46:16 +01:00
|
|
|
import qubes.utils
|
2014-11-13 14:38:41 +01:00
|
|
|
import qubes.vm
|
2016-01-21 13:08:56 +01:00
|
|
|
import qubes.vm.mix.net
|
2016-05-22 21:43:11 +02:00
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
qmemman_present = False
|
|
|
|
try:
|
2016-06-02 22:02:06 +02:00
|
|
|
import qubes.qmemman.client # pylint: disable=wrong-import-position
|
2019-10-20 12:21:09 +02:00
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
qmemman_present = True
|
|
|
|
except ImportError:
|
|
|
|
pass
|
|
|
|
|
2019-02-25 03:24:45 +01:00
|
|
|
# overhead of per-qube/per-vcpu Xen structures,
|
|
|
|
# taken from OpenStack nova/virt/xenapi/driver.py
|
|
|
|
# see https://wiki.openstack.org/wiki/XenServer/Overhead
|
|
|
|
# add an extra MB because Nova rounds up to MBs
|
2016-06-02 22:02:06 +02:00
|
|
|
MEM_OVERHEAD_BASE = (3 + 1) * 1024 * 1024
|
|
|
|
MEM_OVERHEAD_PER_VCPU = 3 * 1024 * 1024 / 2
|
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
def _setter_kernel(self, prop, value):
|
2019-10-20 12:21:09 +02:00
|
|
|
""" Helper for setting the domain kernel and running sanity checks on it.
|
|
|
|
""" # pylint: disable=unused-argument
|
2017-06-12 12:26:57 +02:00
|
|
|
if not value:
|
2017-06-02 15:43:43 +02:00
|
|
|
return ''
|
2016-02-10 17:49:22 +01:00
|
|
|
value = str(value)
|
2017-03-11 19:21:59 +01:00
|
|
|
if '/' in value:
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesPropertyValueError(
|
|
|
|
self, prop, value,
|
2017-03-11 19:21:59 +01:00
|
|
|
'Kernel name cannot contain \'/\'')
|
2014-12-29 12:46:16 +01:00
|
|
|
return value
|
|
|
|
|
|
|
|
|
2016-05-21 03:51:21 +02:00
|
|
|
def _setter_positive_int(self, prop, value):
|
2019-10-20 12:21:09 +02:00
|
|
|
""" Helper for setting a positive int. Checks that the int is > 0 """
|
2016-06-02 22:02:06 +02:00
|
|
|
# pylint: disable=unused-argument
|
2016-05-21 03:51:21 +02:00
|
|
|
value = int(value)
|
|
|
|
if value <= 0:
|
|
|
|
raise ValueError('Value must be positive')
|
|
|
|
return value
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
|
Use maxmem=0 to disable qmemman, add more automation to it
Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
- domain with PCI devices (PoD is not supported by Xen then)
- domain without balloon driver and/or meminfo-writer service
The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.
This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.
memory != maxmem only makes sense if qmemman for given domain is
enabled. Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.
Fixes QubesOS/qubes-issues#4135
2018-11-03 05:13:23 +01:00
|
|
|
def _setter_non_negative_int(self, prop, value):
|
2019-10-20 12:21:09 +02:00
|
|
|
""" Helper for setting a positive int. Checks that the int is >= 0 """
|
Use maxmem=0 to disable qmemman, add more automation to it
Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
- domain with PCI devices (PoD is not supported by Xen then)
- domain without balloon driver and/or meminfo-writer service
The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.
This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.
memory != maxmem only makes sense if qmemman for given domain is
enabled. Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.
Fixes QubesOS/qubes-issues#4135
2018-11-03 05:13:23 +01:00
|
|
|
# pylint: disable=unused-argument
|
|
|
|
value = int(value)
|
|
|
|
if value < 0:
|
|
|
|
raise ValueError('Value must be positive or zero')
|
|
|
|
return value
|
|
|
|
|
2015-09-17 12:08:03 +02:00
|
|
|
|
2017-03-12 01:53:46 +01:00
|
|
|
def _setter_default_user(self, prop, value):
|
2019-10-20 12:21:09 +02:00
|
|
|
""" Helper for setting default user """
|
2017-03-12 01:53:46 +01:00
|
|
|
value = str(value)
|
2019-10-20 12:21:09 +02:00
|
|
|
# specifically forbid: ':', ' ', """, '"'
|
2017-03-12 01:53:46 +01:00
|
|
|
allowed_chars = string.ascii_letters + string.digits + '_-+,.'
|
|
|
|
if not all(c in allowed_chars for c in value):
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesPropertyValueError(
|
|
|
|
self, prop, value,
|
2017-03-12 01:53:46 +01:00
|
|
|
'Username can contain only those characters: ' + allowed_chars)
|
|
|
|
return value
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
|
2017-07-17 12:26:10 +02:00
|
|
|
def _setter_virt_mode(self, prop, value):
|
|
|
|
value = str(value)
|
|
|
|
value = value.lower()
|
2017-10-02 22:23:27 +02:00
|
|
|
if value not in ('hvm', 'pv', 'pvh'):
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesPropertyValueError(
|
|
|
|
self, prop, value,
|
2017-10-02 22:23:27 +02:00
|
|
|
'Invalid virtualization mode, supported values: hvm, pv, pvh')
|
2017-10-11 21:10:43 +02:00
|
|
|
if value == 'pvh' and list(self.devices['pci'].persistent()):
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesPropertyValueError(
|
|
|
|
self, prop, value,
|
2017-10-11 21:10:43 +02:00
|
|
|
"pvh mode can't be set if pci devices are attached")
|
2017-07-17 12:26:10 +02:00
|
|
|
return value
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
|
2018-01-15 03:25:10 +01:00
|
|
|
def _default_virt_mode(self):
|
|
|
|
if self.devices['pci'].persistent():
|
|
|
|
return 'hvm'
|
2018-07-11 04:35:36 +02:00
|
|
|
try:
|
|
|
|
return self.template.virt_mode
|
|
|
|
except AttributeError:
|
|
|
|
return 'pvh'
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
|
2018-07-11 04:35:36 +02:00
|
|
|
def _default_with_template(prop, default):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Return a callable for 'default' argument of a property. Use a value
|
2018-07-11 04:35:36 +02:00
|
|
|
from a template (if any), otherwise *default*
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2018-07-11 04:35:36 +02:00
|
|
|
|
|
|
|
def _func(self):
|
|
|
|
try:
|
|
|
|
return getattr(self.template, prop)
|
|
|
|
except AttributeError:
|
|
|
|
if callable(default):
|
|
|
|
return default(self)
|
|
|
|
return default
|
|
|
|
|
|
|
|
return _func
|
2018-01-15 03:25:10 +01:00
|
|
|
|
2017-07-17 12:26:10 +02:00
|
|
|
|
Use maxmem=0 to disable qmemman, add more automation to it
Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
- domain with PCI devices (PoD is not supported by Xen then)
- domain without balloon driver and/or meminfo-writer service
The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.
This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.
memory != maxmem only makes sense if qmemman for given domain is
enabled. Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.
Fixes QubesOS/qubes-issues#4135
2018-11-03 05:13:23 +01:00
|
|
|
def _default_maxmem(self):
|
|
|
|
# first check for any reason to _not_ enable qmemman
|
|
|
|
if not self.is_memory_balancing_possible():
|
|
|
|
return 0
|
|
|
|
|
|
|
|
# Linux specific cap: max memory can't scale beyond 10.79*init_mem
|
|
|
|
# see https://groups.google.com/forum/#!topic/qubes-devel/VRqkFj1IOtA
|
|
|
|
if self.features.get('os', None) == 'Linux':
|
|
|
|
default_maxmem = self.memory * 10
|
|
|
|
else:
|
|
|
|
default_maxmem = 4000
|
|
|
|
|
|
|
|
# don't use default larger than half of physical ram
|
|
|
|
default_maxmem = min(default_maxmem,
|
2019-10-20 12:21:09 +02:00
|
|
|
int(self.app.host.memory_total / 1024 / 2))
|
Use maxmem=0 to disable qmemman, add more automation to it
Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
- domain with PCI devices (PoD is not supported by Xen then)
- domain without balloon driver and/or meminfo-writer service
The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.
This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.
memory != maxmem only makes sense if qmemman for given domain is
enabled. Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.
Fixes QubesOS/qubes-issues#4135
2018-11-03 05:13:23 +01:00
|
|
|
|
|
|
|
return _default_with_template('maxmem', default_maxmem)(self)
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
|
2019-02-23 01:39:51 +01:00
|
|
|
def _default_kernelopts(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2019-02-23 01:39:51 +01:00
|
|
|
Return default kernel options for the given kernel. If kernel directory
|
|
|
|
contains 'default-kernelopts-{pci,nopci}.txt' file, use that. Otherwise
|
|
|
|
use built-in defaults.
|
|
|
|
For qubes without PCI devices, kernelopts of qube's template are
|
|
|
|
considered (for template-based qubes).
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2019-02-23 01:39:51 +01:00
|
|
|
if not self.kernel:
|
|
|
|
return ''
|
|
|
|
if 'kernel' in self.volumes:
|
|
|
|
kernels_dir = self.storage.kernels_dir
|
|
|
|
else:
|
|
|
|
kernels_dir = os.path.join(
|
|
|
|
qubes.config.system_path['qubes_kernels_base_dir'],
|
|
|
|
self.kernel)
|
|
|
|
pci = bool(list(self.devices['pci'].persistent()))
|
|
|
|
if pci:
|
|
|
|
path = os.path.join(kernels_dir, 'default-kernelopts-pci.txt')
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
return self.template.kernelopts
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
path = os.path.join(kernels_dir, 'default-kernelopts-nopci.txt')
|
|
|
|
if os.path.exists(path):
|
|
|
|
with open(path) as f_kernelopts:
|
|
|
|
return f_kernelopts.read().strip()
|
|
|
|
else:
|
|
|
|
return (qubes.config.defaults['kernelopts_pcidevs'] if pci else
|
2019-10-20 12:21:09 +02:00
|
|
|
qubes.config.defaults['kernelopts'])
|
2019-02-23 01:39:51 +01:00
|
|
|
|
Use maxmem=0 to disable qmemman, add more automation to it
Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
- domain with PCI devices (PoD is not supported by Xen then)
- domain without balloon driver and/or meminfo-writer service
The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.
This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.
memory != maxmem only makes sense if qmemman for given domain is
enabled. Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.
Fixes QubesOS/qubes-issues#4135
2018-11-03 05:13:23 +01:00
|
|
|
|
2016-01-21 13:08:56 +01:00
|
|
|
class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Base functionality of Qubes VM shared between all VMs.
|
2016-10-25 17:11:38 +02:00
|
|
|
|
|
|
|
The following events are raised on this class or its subclasses:
|
|
|
|
|
|
|
|
.. event:: domain-init (subject, event)
|
|
|
|
|
|
|
|
Fired at the end of class' constructor.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-init'``)
|
|
|
|
|
|
|
|
.. event:: domain-load (subject, event)
|
|
|
|
|
|
|
|
Fired after the qube was loaded from :file:`qubes.xml`
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-loaded'``)
|
|
|
|
|
2016-10-25 17:27:02 +02:00
|
|
|
.. event:: domain-pre-start \
|
2017-06-01 03:49:57 +02:00
|
|
|
(subject, event, start_guid, mem_required)
|
2016-10-25 17:11:38 +02:00
|
|
|
|
|
|
|
Fired at the beginning of :py:meth:`start` method.
|
|
|
|
|
2017-06-26 01:56:07 +02:00
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-pre-start'``)
|
|
|
|
|
|
|
|
*other arguments are as in :py:meth:`start`*
|
|
|
|
|
2017-06-01 03:49:57 +02:00
|
|
|
.. event:: domain-spawn (subject, event, start_guid)
|
2016-10-25 17:11:38 +02:00
|
|
|
|
|
|
|
Fired after creating libvirt domain.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-spawn'``)
|
|
|
|
|
2017-06-26 01:56:07 +02:00
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
*other arguments are as in :py:meth:`start`*
|
|
|
|
|
2017-06-01 03:49:57 +02:00
|
|
|
.. event:: domain-start (subject, event, start_guid)
|
2016-10-25 17:11:38 +02:00
|
|
|
|
|
|
|
Fired at the end of :py:meth:`start` method.
|
|
|
|
|
2017-06-26 01:56:07 +02:00
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-start'``)
|
|
|
|
|
|
|
|
*other arguments are as in :py:meth:`start`*
|
|
|
|
|
2018-07-11 04:50:37 +02:00
|
|
|
.. event:: domain-start-failed (subject, event, reason)
|
|
|
|
|
|
|
|
Fired when :py:meth:`start` method fails.
|
|
|
|
*reason* argument is a textual error message.
|
|
|
|
|
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
2019-10-08 23:34:47 +02:00
|
|
|
:param event: Event name (``'domain-start-failed'``)
|
2018-07-30 04:37:31 +02:00
|
|
|
|
|
|
|
.. event:: domain-paused (subject, event)
|
|
|
|
|
|
|
|
Fired when the domain has been paused.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-paused'``)
|
|
|
|
|
|
|
|
.. event:: domain-unpaused (subject, event)
|
|
|
|
|
|
|
|
Fired when the domain has been unpaused.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-unpaused'``)
|
|
|
|
|
2017-10-21 05:57:57 +02:00
|
|
|
.. event:: domain-stopped (subject, event)
|
|
|
|
|
|
|
|
Fired when domain has been stopped.
|
|
|
|
|
|
|
|
This event is emitted before ``'domain-shutdown'`` and will trigger
|
|
|
|
the cleanup in QubesVM. So if you require that the cleanup has
|
|
|
|
already run use ``'domain-shutdown'``.
|
|
|
|
|
|
|
|
Note that you can receive this event as soon as you received
|
|
|
|
``'domain-pre-start'``. This also can be emitted in case of a
|
|
|
|
startup failure, before or after ``'domain-start-failed'``.
|
|
|
|
|
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-stopped'``)
|
|
|
|
|
2017-04-27 00:40:55 +02:00
|
|
|
.. event:: domain-shutdown (subject, event)
|
|
|
|
|
2017-10-21 05:57:57 +02:00
|
|
|
Fired when domain has been shut down. It is generated after
|
|
|
|
``'domain-stopped'``.
|
|
|
|
|
|
|
|
Note that you can receive this event as soon as you received
|
|
|
|
``'domain-pre-start'``. This also can be emitted in case of a
|
|
|
|
startup failure, before or after ``'domain-start-failed'``.
|
|
|
|
|
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
2017-04-27 00:40:55 +02:00
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-shutdown'``)
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
.. event:: domain-pre-shutdown (subject, event, force)
|
|
|
|
|
|
|
|
Fired at the beginning of :py:meth:`shutdown` method.
|
|
|
|
|
2017-06-26 01:56:07 +02:00
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-pre-shutdown'``)
|
|
|
|
:param force: If the shutdown is to be forceful
|
|
|
|
|
2019-10-08 23:35:24 +02:00
|
|
|
.. event:: domain-shutdown-failed (subject, event, reason)
|
|
|
|
|
|
|
|
Fired when ``domain-pre-shutdown`` event was sent, but the actual
|
|
|
|
shutdown operation failed. It can be caused by other
|
|
|
|
``domain-pre-shutdown`` handler blocking the operation with an
|
|
|
|
exception, or a shutdown timeout.
|
|
|
|
|
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-shutdown-failed'``)
|
|
|
|
:param reason: Error message
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
.. event:: domain-cmd-pre-run (subject, event, start_guid)
|
|
|
|
|
2017-04-04 15:57:53 +02:00
|
|
|
Fired at the beginning of :py:meth:`run_service` method.
|
2016-10-25 17:11:38 +02:00
|
|
|
|
2017-06-26 01:56:07 +02:00
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-cmd-pre-run'``)
|
|
|
|
:param start_guid: If the gui daemon can be started
|
|
|
|
|
|
|
|
.. event:: domain-create-on-disk (subject, event)
|
|
|
|
|
|
|
|
Fired at the end of :py:meth:`create_on_disk` method.
|
|
|
|
|
2017-06-26 01:56:07 +02:00
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-create-on-disk'``)
|
|
|
|
|
|
|
|
.. event:: domain-remove-from-disk (subject, event)
|
|
|
|
|
|
|
|
Fired at the beginning of :py:meth:`remove_from_disk` method, before
|
|
|
|
the qube directory is removed.
|
|
|
|
|
2017-06-26 01:56:07 +02:00
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-remove-from-disk'``)
|
|
|
|
|
|
|
|
.. event:: domain-clone-files (subject, event, src)
|
|
|
|
|
|
|
|
Fired at the end of :py:meth:`clone_disk_files` method.
|
|
|
|
|
2017-06-26 01:56:07 +02:00
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-clone-files'``)
|
|
|
|
:param src: source qube
|
|
|
|
|
|
|
|
.. event:: domain-verify-files (subject, event)
|
|
|
|
|
|
|
|
Fired at the end of :py:meth:`clone_disk_files` method.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-verify-files'``)
|
|
|
|
|
|
|
|
If you think some files are missing or damaged, raise an exception.
|
|
|
|
|
|
|
|
.. event:: domain-is-fully-usable (subject, event)
|
|
|
|
|
|
|
|
Fired at the end of :py:meth:`clone_disk_files` method.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-is-fully-usable'``)
|
|
|
|
|
|
|
|
You may ``yield False`` from the handler if you think the qube is
|
|
|
|
not fully usable. This will cause the domain to be in "transient"
|
|
|
|
state in the domain lifecycle.
|
|
|
|
|
2017-05-23 15:35:21 +02:00
|
|
|
.. event:: domain-qdb-create (subject, event)
|
2016-10-25 17:11:38 +02:00
|
|
|
|
|
|
|
Fired at the end of :py:meth:`create_qdb_entries` method.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
2017-05-23 15:35:21 +02:00
|
|
|
:param event: Event name (``'domain-qdb-create'``)
|
2016-10-25 17:11:38 +02:00
|
|
|
|
|
|
|
This event is a good place to add your custom entries to the qdb.
|
|
|
|
|
2017-07-25 01:00:04 +02:00
|
|
|
.. event:: domain-qdb-change:watched-path (subject, event, path)
|
|
|
|
|
|
|
|
Fired when watched QubesDB entry is changed. See
|
|
|
|
:py:meth:`watch_qdb_path`. *watched-path* part of event name is
|
|
|
|
what path was registered for watching, *path* in event argument
|
|
|
|
is what actually have changed (which may be different if watching a
|
|
|
|
directory, i.e. a path with `/` at the end).
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-qdb-change'``)
|
|
|
|
:param path: changed QubesDB path
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
.. event:: backup-get-files (subject, event)
|
|
|
|
|
|
|
|
Collects additional file to be included in a backup.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'backup-get-files'``)
|
|
|
|
|
|
|
|
Handlers should yield paths of the files.
|
|
|
|
|
|
|
|
.. event:: domain-restore (subject, event)
|
|
|
|
|
|
|
|
Domain was just restored from backup, although the storage was not
|
|
|
|
yet verified and the app object was not yet saved.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-restore'``)
|
|
|
|
|
2020-03-17 03:46:35 +01:00
|
|
|
.. event:: domain-feature-pre-set:feature (subject, event, feature,
|
|
|
|
value [, oldvalue])
|
|
|
|
|
|
|
|
A feature will be changed. This event is fired before value is set.
|
|
|
|
If any handler raises an exception, value will not be set.
|
|
|
|
*oldvalue* is present only when there was any.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-feature-pre-set:' feature``)
|
|
|
|
:param feature: feature name
|
|
|
|
:param value: new value
|
|
|
|
:param oldvalue: old value, if any
|
|
|
|
|
2018-01-06 15:05:34 +01:00
|
|
|
.. event:: domain-feature-set:feature (subject, event, feature, value
|
2017-06-13 13:15:59 +02:00
|
|
|
[, oldvalue])
|
2016-10-25 17:11:38 +02:00
|
|
|
|
2018-01-06 15:05:34 +01:00
|
|
|
A feature was changed. This event is fired before bare
|
|
|
|
`domain-feature-set` event.
|
2017-06-13 13:15:59 +02:00
|
|
|
*oldvalue* is present only when there was any.
|
2016-10-25 17:11:38 +02:00
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
2018-01-06 15:05:34 +01:00
|
|
|
:param event: Event name (``'domain-feature-set:' feature``)
|
2017-06-13 13:15:59 +02:00
|
|
|
:param feature: feature name
|
2016-10-25 17:11:38 +02:00
|
|
|
:param value: new value
|
2017-06-13 13:15:59 +02:00
|
|
|
:param oldvalue: old value, if any
|
2016-10-25 17:11:38 +02:00
|
|
|
|
2018-01-06 15:05:34 +01:00
|
|
|
.. event:: domain-feature-delete:feature (subject, event, feature)
|
2016-10-25 17:11:38 +02:00
|
|
|
|
2018-01-06 15:05:34 +01:00
|
|
|
A feature was removed. This event is fired before bare
|
|
|
|
`domain-feature-delete` event.
|
2016-10-25 17:11:38 +02:00
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
2018-01-06 15:05:34 +01:00
|
|
|
:param event: Event name (``'domain-feature-delete:' feature``)
|
2017-06-13 13:15:59 +02:00
|
|
|
:param feature: feature name
|
2020-03-17 03:46:35 +01:00
|
|
|
|
|
|
|
.. event:: domain-feature-pre-delete:feature (subject, event, feature)
|
|
|
|
|
|
|
|
A feature will be removed. This event is fired before feature is
|
|
|
|
removed. If any handler raises an exception,feature will not be
|
|
|
|
removed.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'domain-feature-pre-delete:' feature``)
|
|
|
|
:param feature: feature name
|
2016-10-25 17:11:38 +02:00
|
|
|
|
2018-01-06 15:05:34 +01:00
|
|
|
.. event:: domain-tag-add:tag (subject, event, tag)
|
2017-05-13 16:27:34 +02:00
|
|
|
|
|
|
|
A tag was added.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
2018-01-06 15:05:34 +01:00
|
|
|
:param event: Event name (``'domain-tag-add:' tag``)
|
2017-05-13 16:27:34 +02:00
|
|
|
:param tag: tag name
|
|
|
|
|
2018-01-06 15:05:34 +01:00
|
|
|
.. event:: domain-tag-delete:tag (subject, event, tag)
|
2017-05-13 16:27:34 +02:00
|
|
|
|
|
|
|
A feature was removed.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
2018-01-06 15:05:34 +01:00
|
|
|
:param event: Event name (``'domain-tag-delete:' tag``)
|
2017-05-13 16:27:34 +02:00
|
|
|
:param tag: tag name
|
|
|
|
|
2018-02-28 05:33:03 +01:00
|
|
|
.. event:: features-request (subject, event, *, untrusted_features)
|
2016-10-25 17:11:38 +02:00
|
|
|
|
2018-02-28 05:33:03 +01:00
|
|
|
The domain is performing a features request.
|
2016-10-25 17:11:38 +02:00
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
2018-02-28 05:33:03 +01:00
|
|
|
:param event: Event name (``'features-request'``)
|
2016-10-25 17:11:38 +02:00
|
|
|
:param untrusted_features: :py:class:`dict` containing the feature \
|
|
|
|
request
|
|
|
|
|
|
|
|
The content of the `untrusted_features` variable is, as the name
|
|
|
|
implies, **UNTRUSTED**. The remind this to programmer, the variable
|
|
|
|
name has to be exactly as provided.
|
|
|
|
|
|
|
|
It is up to the extensions to decide, what to do with request,
|
|
|
|
ranging from plainly ignoring the request to verbatim copy into
|
|
|
|
:py:attr:`features` with only minimal sanitisation.
|
|
|
|
|
2018-02-28 05:33:03 +01:00
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
|
|
|
|
2016-10-25 17:11:38 +02:00
|
|
|
.. event:: firewall-changed (subject, event)
|
|
|
|
|
|
|
|
Firewall was changed.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'firewall-changed'``)
|
|
|
|
|
|
|
|
.. event:: net-domain-connect (subject, event, vm)
|
|
|
|
|
|
|
|
Fired after connecting a domiain to this vm.
|
|
|
|
|
|
|
|
:param subject: Event emitter (the qube object)
|
|
|
|
:param event: Event name (``'net-domain-connect'``)
|
|
|
|
:param vm: The domain that was just connected.
|
|
|
|
|
|
|
|
On the `vm` object there was probably ``property-set:netvm`` fired
|
|
|
|
earlier.
|
2018-11-11 07:50:36 +01:00
|
|
|
|
|
|
|
.. event:: template-postinstall (subject, event)
|
|
|
|
|
|
|
|
Fired on non-template-based domain (TemplateVM, StandaloneVM) when
|
|
|
|
it first reports qrexec presence. This happens at the first
|
|
|
|
domain startup just after its installation and is suitable for
|
|
|
|
performing various post-installation setup.
|
|
|
|
|
|
|
|
Handler for this event can be asynchronous (a coroutine).
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-05 14:58:05 +01:00
|
|
|
|
2015-09-25 16:50:09 +02:00
|
|
|
#
|
|
|
|
# per-class properties
|
|
|
|
#
|
|
|
|
|
|
|
|
#: directory in which domains of this class will reside
|
|
|
|
dir_path_prefix = qubes.config.system_path['qubes_appvms_dir']
|
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
#
|
|
|
|
# properties loaded from XML
|
|
|
|
#
|
2019-10-20 12:44:27 +02:00
|
|
|
guivm = qubes.VMProperty('guivm', load_stage=4, allow_none=True,
|
|
|
|
default=(lambda self: self.app.default_guivm),
|
|
|
|
doc='VM used for Gui')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2020-02-27 10:31:27 +01:00
|
|
|
audiovm = qubes.VMProperty('audiovm', load_stage=4, allow_none=True,
|
|
|
|
default=(lambda self: self.app.default_audiovm),
|
|
|
|
doc='VM used for Audio')
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
virt_mode = qubes.property(
|
|
|
|
'virt_mode',
|
2017-07-17 12:26:10 +02:00
|
|
|
type=str, setter=_setter_virt_mode,
|
2018-01-15 03:25:10 +01:00
|
|
|
default=_default_virt_mode,
|
2019-10-20 12:21:09 +02:00
|
|
|
doc="""Virtualisation mode: full virtualisation ("HVM"),
|
|
|
|
or paravirtualisation ("PV"), or hybrid ("PVH").
|
|
|
|
TemplateBasedVMs use its template\'s value by default.""")
|
2016-03-02 12:17:29 +01:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
installed_by_rpm = qubes.property(
|
|
|
|
'installed_by_rpm',
|
2015-01-19 17:06:30 +01:00
|
|
|
type=bool, setter=qubes.property.bool,
|
|
|
|
default=False,
|
2019-10-20 12:21:09 +02:00
|
|
|
doc="""If this domain's image was installed from package tracked by
|
|
|
|
package manager.""")
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
memory = qubes.property(
|
|
|
|
'memory', type=int,
|
2016-05-21 03:51:21 +02:00
|
|
|
setter=_setter_positive_int,
|
2019-10-20 12:21:09 +02:00
|
|
|
default=_default_with_template(
|
|
|
|
'memory',
|
|
|
|
lambda self:
|
2017-07-17 12:26:10 +02:00
|
|
|
qubes.config.defaults[
|
|
|
|
'hvm_memory' if self.virt_mode == 'hvm' else 'memory']),
|
2018-07-11 04:35:36 +02:00
|
|
|
doc='Memory currently available for this VM. TemplateBasedVMs use its '
|
|
|
|
'template\'s value by default.')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
maxmem = qubes.property(
|
|
|
|
'maxmem', type=int,
|
Use maxmem=0 to disable qmemman, add more automation to it
Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
- domain with PCI devices (PoD is not supported by Xen then)
- domain without balloon driver and/or meminfo-writer service
The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.
This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.
memory != maxmem only makes sense if qmemman for given domain is
enabled. Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.
Fixes QubesOS/qubes-issues#4135
2018-11-03 05:13:23 +01:00
|
|
|
setter=_setter_non_negative_int,
|
|
|
|
default=_default_maxmem,
|
2019-10-20 12:21:09 +02:00
|
|
|
doc="""Maximum amount of memory available for this VM (for the purpose
|
Use maxmem=0 to disable qmemman, add more automation to it
Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
- domain with PCI devices (PoD is not supported by Xen then)
- domain without balloon driver and/or meminfo-writer service
The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.
This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.
memory != maxmem only makes sense if qmemman for given domain is
enabled. Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.
Fixes QubesOS/qubes-issues#4135
2018-11-03 05:13:23 +01:00
|
|
|
of the memory balancer). Set to 0 to disable memory balancing for
|
|
|
|
this qube. TemplateBasedVMs use its template\'s value by default
|
2019-10-20 12:21:09 +02:00
|
|
|
(unless memory balancing not supported for this qube).""")
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
stubdom_mem = qubes.property(
|
|
|
|
'stubdom_mem', type=int,
|
2017-05-04 20:48:27 +02:00
|
|
|
setter=_setter_positive_int,
|
|
|
|
default=None,
|
2018-12-07 02:43:39 +01:00
|
|
|
doc='Memory amount allocated for the stubdom')
|
2017-05-04 20:48:27 +02:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
vcpus = qubes.property(
|
|
|
|
'vcpus',
|
2016-02-10 17:49:22 +01:00
|
|
|
type=int,
|
2016-05-21 03:51:21 +02:00
|
|
|
setter=_setter_positive_int,
|
2018-07-11 04:35:36 +02:00
|
|
|
default=_default_with_template('vcpus', 2),
|
|
|
|
doc='Number of virtual CPUs for a qube. TemplateBasedVMs use its '
|
|
|
|
'template\'s value by default.')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-06-16 21:07:36 +02:00
|
|
|
# CORE2: swallowed uses_default_kernel
|
2019-10-20 12:21:09 +02:00
|
|
|
kernel = qubes.property(
|
|
|
|
'kernel', type=str,
|
2014-12-29 12:46:16 +01:00
|
|
|
setter=_setter_kernel,
|
2018-07-11 04:35:36 +02:00
|
|
|
default=_default_with_template('kernel',
|
2019-10-20 12:21:09 +02:00
|
|
|
lambda self: self.app.default_kernel),
|
2018-07-11 04:35:36 +02:00
|
|
|
doc='Kernel used by this domain. TemplateBasedVMs use its '
|
|
|
|
'template\'s value by default.')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-06-16 21:07:36 +02:00
|
|
|
# CORE2: swallowed uses_default_kernelopts
|
2017-04-21 15:43:46 +02:00
|
|
|
# pylint: disable=no-member
|
2019-10-20 12:21:09 +02:00
|
|
|
kernelopts = qubes.property(
|
|
|
|
'kernelopts', type=str, load_stage=4,
|
2019-02-23 01:39:51 +01:00
|
|
|
default=_default_kernelopts,
|
2018-07-11 04:35:36 +02:00
|
|
|
doc='Kernel command line passed to domain. TemplateBasedVMs use its '
|
|
|
|
'template\'s value by default.')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
debug = qubes.property(
|
|
|
|
'debug', type=bool, default=False,
|
2014-12-29 12:46:16 +01:00
|
|
|
setter=qubes.property.bool,
|
|
|
|
doc='Turns on debugging features.')
|
|
|
|
|
|
|
|
# XXX what this exactly does?
|
|
|
|
# XXX shouldn't this go to standalone VM and TemplateVM, and leave here
|
|
|
|
# only plain property?
|
2019-10-20 12:21:09 +02:00
|
|
|
default_user = qubes.property(
|
|
|
|
'default_user', type=str,
|
2017-04-15 23:48:02 +02:00
|
|
|
# pylint: disable=no-member
|
2019-10-20 12:21:09 +02:00
|
|
|
default=_default_with_template('default_user',
|
|
|
|
'user'),
|
2017-03-12 01:53:46 +01:00
|
|
|
setter=_setter_default_user,
|
2018-07-11 04:35:36 +02:00
|
|
|
doc='Default user to start applications as. TemplateBasedVMs use its '
|
|
|
|
'template\'s value by default.')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
qrexec_timeout = qubes.property(
|
|
|
|
'qrexec_timeout', type=int,
|
|
|
|
default=_default_with_template(
|
|
|
|
'qrexec_timeout',
|
2018-09-16 20:42:48 +02:00
|
|
|
lambda self: self.app.default_qrexec_timeout),
|
2016-05-21 03:51:21 +02:00
|
|
|
setter=_setter_positive_int,
|
2019-10-20 12:21:09 +02:00
|
|
|
doc="""Time in seconds after which qrexec connection attempt is deemed
|
2015-01-12 16:56:14 +01:00
|
|
|
failed. Operating system inside VM should be able to boot in this
|
2019-10-20 12:21:09 +02:00
|
|
|
time.""")
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
shutdown_timeout = qubes.property(
|
|
|
|
'shutdown_timeout', type=int,
|
|
|
|
default=_default_with_template(
|
|
|
|
'shutdown_timeout',
|
2018-10-21 04:36:13 +02:00
|
|
|
lambda self: self.app.default_shutdown_timeout),
|
|
|
|
setter=_setter_positive_int,
|
2019-10-20 12:21:09 +02:00
|
|
|
doc="""Time in seconds for shutdown of the VM, after which VM may be
|
2018-10-21 04:36:13 +02:00
|
|
|
forcefully powered off. Operating system inside VM should be
|
2019-10-20 12:21:09 +02:00
|
|
|
able to fully shutdown in this time.""")
|
2018-10-21 04:36:13 +02:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
autostart = qubes.property(
|
|
|
|
'autostart', default=False,
|
2015-01-19 17:06:30 +01:00
|
|
|
type=bool, setter=qubes.property.bool,
|
2019-10-20 12:21:09 +02:00
|
|
|
doc="""Setting this to `True` means that VM should be autostarted on
|
|
|
|
dom0 boot.""")
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
include_in_backups = qubes.property(
|
|
|
|
'include_in_backups',
|
2017-06-12 09:54:24 +02:00
|
|
|
default=True,
|
2015-01-19 17:06:30 +01:00
|
|
|
type=bool, setter=qubes.property.bool,
|
2014-12-29 12:46:16 +01:00
|
|
|
doc='If this domain is to be included in default backup.')
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
backup_timestamp = qubes.property(
|
|
|
|
'backup_timestamp', default=None,
|
2018-01-11 03:46:39 +01:00
|
|
|
type=int,
|
|
|
|
doc='Time of last backup of the qube, in seconds since unix epoch')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
default_dispvm = qubes.VMProperty(
|
|
|
|
'default_dispvm',
|
2016-08-17 00:46:43 +02:00
|
|
|
load_stage=4,
|
|
|
|
allow_none=True,
|
2019-10-20 12:21:09 +02:00
|
|
|
default=(
|
|
|
|
lambda self: self.app.default_dispvm),
|
2016-08-17 00:46:43 +02:00
|
|
|
doc='Default VM to be used as Disposable VM for service calls.')
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
management_dispvm = qubes.VMProperty(
|
|
|
|
'management_dispvm',
|
2018-12-01 05:07:32 +01:00
|
|
|
load_stage=4,
|
|
|
|
allow_none=True,
|
2019-10-20 12:21:09 +02:00
|
|
|
default=_default_with_template(
|
|
|
|
'management_dispvm',
|
2018-12-01 05:07:32 +01:00
|
|
|
(lambda self: self.app.management_dispvm)),
|
|
|
|
doc='Default DVM template for Disposable VM for managing this VM.')
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
updateable = qubes.property(
|
|
|
|
'updateable',
|
2017-03-11 00:06:02 +01:00
|
|
|
default=(lambda self: not hasattr(self, 'template')),
|
|
|
|
type=bool,
|
|
|
|
setter=qubes.property.forbidden,
|
|
|
|
doc='True if this machine may be updated on its own.')
|
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
#
|
|
|
|
# static, class-wide properties
|
|
|
|
#
|
|
|
|
|
|
|
|
#
|
|
|
|
# properties not loaded from XML, calculated at run-time
|
|
|
|
#
|
|
|
|
|
2016-02-11 02:52:06 +01:00
|
|
|
def __str__(self):
|
|
|
|
return self.name
|
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
# VMM-related
|
|
|
|
|
2017-04-10 04:02:13 +02:00
|
|
|
@qubes.stateless_property
|
2014-12-29 12:46:16 +01:00
|
|
|
def xid(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Xen ID.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
Or not Xen, but ID.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
if self.libvirt_domain is None:
|
|
|
|
return -1
|
2016-03-03 01:05:23 +01:00
|
|
|
try:
|
2020-02-27 10:31:28 +01:00
|
|
|
if self.is_running():
|
|
|
|
return self.libvirt_domain.ID()
|
2020-02-27 14:17:42 +01:00
|
|
|
|
|
|
|
return -1
|
2016-03-03 01:05:23 +01:00
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
|
|
|
return -1
|
2018-07-15 23:08:23 +02:00
|
|
|
self.log.exception('libvirt error code: {!r}'.format(
|
|
|
|
e.get_error_code()))
|
|
|
|
raise
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-04-14 23:13:40 +02:00
|
|
|
@qubes.stateless_property
|
|
|
|
def stubdom_xid(self):
|
2017-05-13 01:03:04 +02:00
|
|
|
if not self.is_running():
|
2017-04-14 23:13:40 +02:00
|
|
|
return -1
|
|
|
|
|
|
|
|
if self.app.vmm.xs is None:
|
|
|
|
return -1
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
stubdom_xid_str = self.app.vmm.xs.read(
|
|
|
|
'', '/local/domain/{}/image/device-model-domid'.format(
|
|
|
|
self.xid))
|
2017-04-14 23:13:40 +02:00
|
|
|
if stubdom_xid_str is None or not stubdom_xid_str.isdigit():
|
|
|
|
return -1
|
|
|
|
|
|
|
|
return int(stubdom_xid_str)
|
|
|
|
|
2016-06-21 17:37:58 +02:00
|
|
|
@property
|
|
|
|
def attached_volumes(self):
|
|
|
|
result = []
|
|
|
|
xml_desc = self.libvirt_domain.XMLDesc()
|
|
|
|
xml = lxml.etree.fromstring(xml_desc)
|
|
|
|
for disk in xml.xpath("//domain/devices/disk"):
|
|
|
|
if disk.find('backenddomain') is not None:
|
|
|
|
pool_name = 'p_%s' % disk.find('backenddomain').get('name')
|
|
|
|
pool = self.app.pools[pool_name]
|
|
|
|
vid = disk.find('source').get('dev').split('/dev/')[1]
|
|
|
|
for volume in pool.volumes:
|
|
|
|
if volume.vid == vid:
|
|
|
|
result += [volume]
|
|
|
|
break
|
|
|
|
|
2017-03-01 21:50:06 +01:00
|
|
|
return result + list(self.volumes.values())
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
@property
|
|
|
|
def libvirt_domain(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Libvirt domain object from libvirt.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
May be :py:obj:`None`, if libvirt knows nothing about this domain.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
if self._libvirt_domain is not None:
|
|
|
|
return self._libvirt_domain
|
|
|
|
|
2018-01-17 15:23:39 +01:00
|
|
|
if self.app.vmm.offline_mode:
|
|
|
|
return None
|
|
|
|
|
2015-01-16 15:33:03 +01:00
|
|
|
# XXX _update_libvirt_domain?
|
2014-12-29 12:46:16 +01:00
|
|
|
try:
|
2015-09-28 23:34:29 +02:00
|
|
|
self._libvirt_domain = self.app.vmm.libvirt_conn.lookupByUUID(
|
|
|
|
self.uuid.bytes)
|
2016-05-21 03:58:08 +02:00
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
2014-12-29 12:46:16 +01:00
|
|
|
self._update_libvirt_domain()
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
return self._libvirt_domain
|
|
|
|
|
2016-03-27 22:55:09 +02:00
|
|
|
@property
|
|
|
|
def block_devices(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
""" Return all :py:class:`qubes.storage.BlockDevice` for current domain
|
2017-06-27 02:57:50 +02:00
|
|
|
for serialization in the libvirt XML template as <disk>.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2017-05-17 01:37:59 +02:00
|
|
|
for v in self.volumes.values():
|
|
|
|
block_dev = v.block_device()
|
|
|
|
if block_dev is not None:
|
|
|
|
yield block_dev
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
@property
|
2017-07-21 23:11:24 +02:00
|
|
|
def untrusted_qdb(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""QubesDB handle for this domain."""
|
2014-12-29 12:46:16 +01:00
|
|
|
if self._qdb_connection is None:
|
|
|
|
if self.is_running():
|
2016-06-02 22:02:06 +02:00
|
|
|
import qubesdb # pylint: disable=import-error
|
2016-03-02 15:35:43 +01:00
|
|
|
self._qdb_connection = qubesdb.QubesDB(self.name)
|
2014-12-29 12:46:16 +01:00
|
|
|
return self._qdb_connection
|
|
|
|
|
2016-06-15 19:47:36 +02:00
|
|
|
@property
|
|
|
|
def dir_path(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Root directory for files related to this domain"""
|
2016-06-15 19:47:36 +02:00
|
|
|
return os.path.join(
|
2017-07-01 23:25:47 +02:00
|
|
|
qubes.config.qubes_base_dir,
|
2016-06-15 19:47:36 +02:00
|
|
|
self.dir_path_prefix,
|
|
|
|
self.name)
|
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
@property
|
2016-03-03 01:05:23 +01:00
|
|
|
def icon_path(self):
|
|
|
|
return os.path.join(self.dir_path, 'icon.png')
|
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
@property
|
2016-03-03 01:05:23 +01:00
|
|
|
def conf_file(self):
|
2016-04-03 03:46:28 +02:00
|
|
|
return os.path.join(self.dir_path, 'libvirt.xml')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
# network-related
|
|
|
|
|
|
|
|
#
|
|
|
|
# constructor
|
|
|
|
#
|
|
|
|
|
2016-06-02 22:02:06 +02:00
|
|
|
def __init__(self, app, xml, volume_config=None, **kwargs):
|
2017-07-17 12:27:17 +02:00
|
|
|
# migrate renamed properties
|
|
|
|
if xml is not None:
|
|
|
|
node_hvm = xml.find('./properties/property[@name=\'hvm\']')
|
|
|
|
if node_hvm is not None:
|
|
|
|
if qubes.property.bool(None, None, node_hvm.text):
|
|
|
|
kwargs['virt_mode'] = 'hvm'
|
|
|
|
else:
|
|
|
|
kwargs['virt_mode'] = 'pv'
|
|
|
|
node_hvm.getparent().remove(node_hvm)
|
|
|
|
|
2015-01-15 12:57:44 +01:00
|
|
|
super(QubesVM, self).__init__(app, xml, **kwargs)
|
2016-06-02 22:02:06 +02:00
|
|
|
|
|
|
|
if volume_config is None:
|
|
|
|
volume_config = {}
|
2016-06-16 12:44:33 +02:00
|
|
|
|
2016-04-15 20:40:53 +02:00
|
|
|
if hasattr(self, 'volume_config'):
|
2016-04-01 19:12:44 +02:00
|
|
|
if xml is not None:
|
|
|
|
for node in xml.xpath('volume-config/volume'):
|
|
|
|
name = node.get('name')
|
|
|
|
assert name
|
2016-06-02 22:02:06 +02:00
|
|
|
for key, value in node.items():
|
2016-07-12 17:56:49 +02:00
|
|
|
# pylint: disable=no-member
|
|
|
|
if value == 'True':
|
2016-11-04 12:39:29 +01:00
|
|
|
value = True
|
|
|
|
try:
|
2016-07-12 17:56:49 +02:00
|
|
|
self.volume_config[name][key] = value
|
2016-11-04 12:39:29 +01:00
|
|
|
except KeyError:
|
|
|
|
self.volume_config[name] = {key: value}
|
2016-04-01 19:12:44 +02:00
|
|
|
|
|
|
|
for name, conf in volume_config.items():
|
2016-06-02 22:02:06 +02:00
|
|
|
for key, value in conf.items():
|
2016-07-12 17:56:49 +02:00
|
|
|
# pylint: disable=no-member
|
2016-11-04 12:39:29 +01:00
|
|
|
try:
|
|
|
|
self.volume_config[name][key] = value
|
|
|
|
except KeyError:
|
|
|
|
self.volume_config[name] = {key: value}
|
2016-06-16 12:44:33 +02:00
|
|
|
|
2016-04-22 15:40:36 +02:00
|
|
|
elif volume_config:
|
|
|
|
raise TypeError(
|
2016-05-21 03:58:08 +02:00
|
|
|
'volume_config specified, but {} did not expect that.'.format(
|
2019-10-20 12:21:09 +02:00
|
|
|
self.__class__.__name__))
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-04-22 15:40:36 +02:00
|
|
|
# Init private attrs
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
self._libvirt_domain = None
|
|
|
|
self._qdb_connection = None
|
2017-03-29 17:11:24 +02:00
|
|
|
|
2017-10-21 05:57:57 +02:00
|
|
|
# We assume a fully halted VM here. The 'domain-init' handler will
|
|
|
|
# check if the VM is already running.
|
|
|
|
self._domain_stopped_event_received = True
|
|
|
|
self._domain_stopped_event_handled = True
|
|
|
|
|
|
|
|
self._domain_stopped_future = None
|
|
|
|
|
|
|
|
# Internal lock to ensure ordering between _domain_stopped_coro() and
|
|
|
|
# start(). This should not be accessed anywhere else.
|
|
|
|
self._domain_stopped_lock = asyncio.Lock()
|
|
|
|
|
2015-07-08 11:32:41 +02:00
|
|
|
if xml is None:
|
|
|
|
# we are creating new VM and attributes came through kwargs
|
2015-12-29 20:35:04 +01:00
|
|
|
assert hasattr(self, 'qid')
|
|
|
|
assert hasattr(self, 'name')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-03-03 01:05:23 +01:00
|
|
|
if xml is None:
|
|
|
|
# new qube, disable updates check if requested for new qubes
|
2016-06-16 21:07:36 +02:00
|
|
|
# SEE: 1637 when features are done, migrate to plugin
|
2016-03-03 01:05:23 +01:00
|
|
|
if not self.app.check_updates_vm:
|
2017-07-17 12:29:25 +02:00
|
|
|
self.features['check-updates'] = False
|
2016-03-03 01:05:23 +01:00
|
|
|
|
2016-02-08 20:27:44 +01:00
|
|
|
# will be initialized after loading all the properties
|
2015-01-16 15:52:01 +01:00
|
|
|
|
2017-06-01 03:31:59 +02:00
|
|
|
#: operations which shouldn't happen simultaneously with qube startup
|
|
|
|
# (including another startup of the same qube)
|
|
|
|
self.startup_lock = asyncio.Lock()
|
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
# fire hooks
|
2015-12-29 02:27:46 +01:00
|
|
|
if xml is None:
|
|
|
|
self.events_enabled = True
|
2014-12-29 12:46:16 +01:00
|
|
|
self.fire_event('domain-init')
|
|
|
|
|
2017-08-31 20:28:08 +02:00
|
|
|
def close(self):
|
|
|
|
if self._qdb_connection is not None:
|
|
|
|
self._qdb_connection.close()
|
|
|
|
self._qdb_connection = None
|
2017-09-19 17:01:29 +02:00
|
|
|
if self._libvirt_domain is not None:
|
|
|
|
self._libvirt_domain = None
|
2017-08-31 20:28:08 +02:00
|
|
|
super().close()
|
|
|
|
|
2016-06-26 04:07:15 +02:00
|
|
|
def __hash__(self):
|
|
|
|
return self.qid
|
|
|
|
|
2017-01-18 22:16:46 +01:00
|
|
|
def __lt__(self, other):
|
|
|
|
return self.name < other.name
|
|
|
|
|
2016-04-01 19:12:44 +02:00
|
|
|
def __xml__(self):
|
2019-02-27 16:19:29 +01:00
|
|
|
# pylint: disable=no-member
|
2016-04-01 19:12:44 +02:00
|
|
|
element = super(QubesVM, self).__xml__()
|
2019-02-27 16:19:29 +01:00
|
|
|
# pylint: enable=no-member
|
2016-06-02 22:02:06 +02:00
|
|
|
|
2016-04-01 19:12:44 +02:00
|
|
|
if hasattr(self, 'volumes'):
|
|
|
|
volume_config_node = lxml.etree.Element('volume-config')
|
|
|
|
for volume in self.volumes.values():
|
2016-04-22 14:29:30 +02:00
|
|
|
volume_config_node.append(volume.__xml__())
|
2016-04-01 19:12:44 +02:00
|
|
|
element.append(volume_config_node)
|
|
|
|
|
|
|
|
return element
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
#
|
|
|
|
# event handlers
|
|
|
|
#
|
|
|
|
|
2016-03-14 22:16:52 +01:00
|
|
|
@qubes.events.handler('domain-init', 'domain-load')
|
2015-09-28 23:34:29 +02:00
|
|
|
def on_domain_init_loaded(self, event):
|
2015-10-05 23:46:25 +02:00
|
|
|
# pylint: disable=unused-argument
|
2015-09-28 23:34:29 +02:00
|
|
|
if not hasattr(self, 'uuid'):
|
2019-02-27 16:19:29 +01:00
|
|
|
# pylint: disable=attribute-defined-outside-init
|
2015-09-28 23:34:29 +02:00
|
|
|
self.uuid = uuid.uuid4()
|
|
|
|
|
2017-06-09 04:46:46 +02:00
|
|
|
# Initialize VM image storage class;
|
|
|
|
# it might be already initialized by a recursive call from a child VM
|
|
|
|
if self.storage is None:
|
|
|
|
self.storage = qubes.storage.Storage(self)
|
2016-02-08 20:27:44 +01:00
|
|
|
|
2017-07-25 01:00:04 +02:00
|
|
|
if not self.app.vmm.offline_mode and self.is_running():
|
2017-09-28 16:12:05 +02:00
|
|
|
self.start_qdb_watch()
|
2017-10-21 05:57:57 +02:00
|
|
|
self._domain_stopped_event_received = False
|
|
|
|
self._domain_stopped_event_handled = False
|
2017-07-25 01:00:04 +02:00
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
@qubes.events.handler('property-set:label')
|
2017-03-11 00:06:55 +01:00
|
|
|
def on_property_set_label(self, event, name, newvalue, oldvalue=None):
|
2015-01-19 19:02:28 +01:00
|
|
|
# pylint: disable=unused-argument
|
2014-12-29 12:46:16 +01:00
|
|
|
if self.icon_path:
|
|
|
|
try:
|
|
|
|
os.remove(self.icon_path)
|
2015-10-05 23:46:25 +02:00
|
|
|
except OSError:
|
2014-12-29 12:46:16 +01:00
|
|
|
pass
|
|
|
|
if hasattr(os, "symlink"):
|
2017-03-11 00:06:55 +01:00
|
|
|
os.symlink(newvalue.icon_path, self.icon_path)
|
2014-12-29 12:46:16 +01:00
|
|
|
subprocess.call(['sudo', 'xdg-icon-resource', 'forceupdate'])
|
|
|
|
else:
|
2017-03-11 00:06:55 +01:00
|
|
|
shutil.copy(newvalue.icon_path, self.icon_path)
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-06-20 17:43:11 +02:00
|
|
|
@qubes.events.handler('property-pre-set:kernel')
|
|
|
|
def on_property_pre_set_kernel(self, event, name, newvalue, oldvalue=None):
|
|
|
|
# pylint: disable=unused-argument
|
2017-06-26 01:59:39 +02:00
|
|
|
if not newvalue:
|
|
|
|
return
|
2017-06-20 17:43:11 +02:00
|
|
|
dirname = os.path.join(
|
2017-07-01 23:25:47 +02:00
|
|
|
qubes.config.qubes_base_dir,
|
2017-06-20 17:43:11 +02:00
|
|
|
qubes.config.system_path['qubes_kernels_base_dir'],
|
|
|
|
newvalue)
|
|
|
|
if not os.path.exists(dirname):
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesPropertyValueError(
|
|
|
|
self, self.property_get_def(name), newvalue,
|
|
|
|
'Kernel {!r} not installed'.format(
|
|
|
|
newvalue))
|
2017-06-20 17:43:11 +02:00
|
|
|
for filename in ('vmlinuz', 'initramfs'):
|
|
|
|
if not os.path.exists(os.path.join(dirname, filename)):
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesPropertyValueError(
|
|
|
|
self, self.property_get_def(name), newvalue,
|
2017-06-20 17:43:11 +02:00
|
|
|
'Kernel {!r} not properly installed: '
|
2019-10-20 12:21:09 +02:00
|
|
|
'missing {!r} file'.format(
|
|
|
|
newvalue, filename))
|
2017-06-20 17:43:11 +02:00
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
@qubes.events.handler('property-pre-set:autostart')
|
2017-05-22 01:03:41 +02:00
|
|
|
def on_property_pre_set_autostart(self, event, name, newvalue,
|
2019-10-20 12:21:09 +02:00
|
|
|
oldvalue=None):
|
2015-01-19 19:02:28 +01:00
|
|
|
# pylint: disable=unused-argument
|
2016-05-21 03:54:10 +02:00
|
|
|
# workaround https://bugzilla.redhat.com/show_bug.cgi?id=1181922
|
2017-03-11 00:06:55 +01:00
|
|
|
if newvalue:
|
2016-05-21 03:54:10 +02:00
|
|
|
retcode = subprocess.call(
|
|
|
|
["sudo", "ln", "-sf",
|
|
|
|
"/usr/lib/systemd/system/qubes-vm@.service",
|
2016-09-12 06:09:45 +02:00
|
|
|
"/etc/systemd/system/multi-user.target.wants/qubes-vm@"
|
|
|
|
"{}.service".format(self.name)])
|
2016-05-21 03:54:10 +02:00
|
|
|
else:
|
|
|
|
retcode = subprocess.call(
|
|
|
|
['sudo', 'systemctl', 'disable',
|
2019-10-20 12:21:09 +02:00
|
|
|
'qubes-vm@{}.service'.format(self.name)])
|
2016-05-21 03:54:10 +02:00
|
|
|
if retcode:
|
2015-10-14 22:02:11 +02:00
|
|
|
raise qubes.exc.QubesException(
|
2016-05-21 03:54:10 +02:00
|
|
|
'Failed to set autostart for VM in systemd')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-08-09 03:02:42 +02:00
|
|
|
@qubes.events.handler('property-pre-del:autostart')
|
2017-05-22 01:03:41 +02:00
|
|
|
def on_property_pre_del_autostart(self, event, name, oldvalue=None):
|
2016-08-09 03:02:42 +02:00
|
|
|
# pylint: disable=unused-argument
|
|
|
|
if oldvalue:
|
|
|
|
retcode = subprocess.call(
|
|
|
|
['sudo', 'systemctl', 'disable',
|
2019-10-20 12:21:09 +02:00
|
|
|
'qubes-vm@{}.service'.format(self.name)])
|
2016-08-09 03:02:42 +02:00
|
|
|
if retcode:
|
|
|
|
raise qubes.exc.QubesException(
|
|
|
|
'Failed to reset autostart for VM in systemd')
|
|
|
|
|
2018-10-21 18:19:36 +02:00
|
|
|
@qubes.events.handler('domain-remove-from-disk')
|
|
|
|
def on_remove_from_disk(self, event, **kwargs):
|
|
|
|
# pylint: disable=unused-argument
|
|
|
|
if self.autostart:
|
|
|
|
subprocess.call(
|
|
|
|
['sudo', 'systemctl', 'disable',
|
2019-10-20 12:21:09 +02:00
|
|
|
'qubes-vm@{}.service'.format(self.name)])
|
2018-10-21 18:19:36 +02:00
|
|
|
|
|
|
|
@qubes.events.handler('domain-create-on-disk')
|
|
|
|
def on_create_on_disk(self, event, **kwargs):
|
|
|
|
# pylint: disable=unused-argument
|
|
|
|
if self.autostart:
|
|
|
|
subprocess.call(
|
|
|
|
['sudo', 'systemctl', 'enable',
|
2019-10-20 12:21:09 +02:00
|
|
|
'qubes-vm@{}.service'.format(self.name)])
|
2018-10-21 18:19:36 +02:00
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
#
|
|
|
|
# methods for changing domain state
|
|
|
|
#
|
|
|
|
|
vm: call after-shutdown cleanup also from vm.kill and vm.shutdown
Cleaning up after domain shutdown (domain-stopped and domain-shutdown
events) relies on libvirt events which may be unreliable in some cases
(events may be processed with some delay, of if libvirt was restarted in
the meantime, may not happen at all). So, instead of ensuring only
proper ordering between shutdown cleanup and next startup, also trigger
the cleanup when we know for sure domain isn't running:
- at vm.kill() - after libvirt confirms domain was destroyed
- at vm.shutdown(wait=True) - after successful shutdown
- at vm.remove_from_disk() - after ensuring it isn't running but just
before actually removing it
This fixes various race conditions:
- qvm-kill && qvm-remove: remove could happen before shutdown cleanup
was done and storage driver would be confused about that
- qvm-shutdown --wait && qvm-clone: clone could happen before new content was
commited to the original volume, making the copy of previous VM state
(and probably more)
Previously it wasn't such a big issue on default configuration, because
LVM driver was fully synchronous, effectively blocking the whole qubesd
for the time the cleanup happened.
To avoid code duplication, factor out _ensure_shutdown_handled function
calling actual cleanup (and possibly canceling one called with libvirt
event). Note that now, "Duplicated stopped event from libvirt received!"
warning may happen in normal circumstances, not only because of some
bug.
It is very important that post-shutdown cleanup happen when domain is
not running. To ensure that, take startup_lock and under it 1) ensure
its halted and only then 2) execute the cleanup. This isn't necessary
when removing it from disk, because its already removed from the
collection at that time, which also avoids other calls to it (see also
"vm/dispvm: fix DispVM cleanup" commit).
Actually, taking the startup_lock in remove_from_disk function would
cause a deadlock in DispVM auto cleanup code:
- vm.kill (or other trigger for the cleanup)
- vm.startup_lock acquire <====
- vm._ensure_shutdown_handled
- domain-shutdown event
- vm._auto_cleanup (in DispVM class)
- vm.remove_from_disk
- cannot take vm.startup_lock again
2018-10-21 04:52:27 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def _ensure_shutdown_handled(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Make sure previous shutdown is fully handled.
|
vm: call after-shutdown cleanup also from vm.kill and vm.shutdown
Cleaning up after domain shutdown (domain-stopped and domain-shutdown
events) relies on libvirt events which may be unreliable in some cases
(events may be processed with some delay, of if libvirt was restarted in
the meantime, may not happen at all). So, instead of ensuring only
proper ordering between shutdown cleanup and next startup, also trigger
the cleanup when we know for sure domain isn't running:
- at vm.kill() - after libvirt confirms domain was destroyed
- at vm.shutdown(wait=True) - after successful shutdown
- at vm.remove_from_disk() - after ensuring it isn't running but just
before actually removing it
This fixes various race conditions:
- qvm-kill && qvm-remove: remove could happen before shutdown cleanup
was done and storage driver would be confused about that
- qvm-shutdown --wait && qvm-clone: clone could happen before new content was
commited to the original volume, making the copy of previous VM state
(and probably more)
Previously it wasn't such a big issue on default configuration, because
LVM driver was fully synchronous, effectively blocking the whole qubesd
for the time the cleanup happened.
To avoid code duplication, factor out _ensure_shutdown_handled function
calling actual cleanup (and possibly canceling one called with libvirt
event). Note that now, "Duplicated stopped event from libvirt received!"
warning may happen in normal circumstances, not only because of some
bug.
It is very important that post-shutdown cleanup happen when domain is
not running. To ensure that, take startup_lock and under it 1) ensure
its halted and only then 2) execute the cleanup. This isn't necessary
when removing it from disk, because its already removed from the
collection at that time, which also avoids other calls to it (see also
"vm/dispvm: fix DispVM cleanup" commit).
Actually, taking the startup_lock in remove_from_disk function would
cause a deadlock in DispVM auto cleanup code:
- vm.kill (or other trigger for the cleanup)
- vm.startup_lock acquire <====
- vm._ensure_shutdown_handled
- domain-shutdown event
- vm._auto_cleanup (in DispVM class)
- vm.remove_from_disk
- cannot take vm.startup_lock again
2018-10-21 04:52:27 +02:00
|
|
|
MUST NOT be called when domain is running.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
vm: call after-shutdown cleanup also from vm.kill and vm.shutdown
Cleaning up after domain shutdown (domain-stopped and domain-shutdown
events) relies on libvirt events which may be unreliable in some cases
(events may be processed with some delay, of if libvirt was restarted in
the meantime, may not happen at all). So, instead of ensuring only
proper ordering between shutdown cleanup and next startup, also trigger
the cleanup when we know for sure domain isn't running:
- at vm.kill() - after libvirt confirms domain was destroyed
- at vm.shutdown(wait=True) - after successful shutdown
- at vm.remove_from_disk() - after ensuring it isn't running but just
before actually removing it
This fixes various race conditions:
- qvm-kill && qvm-remove: remove could happen before shutdown cleanup
was done and storage driver would be confused about that
- qvm-shutdown --wait && qvm-clone: clone could happen before new content was
commited to the original volume, making the copy of previous VM state
(and probably more)
Previously it wasn't such a big issue on default configuration, because
LVM driver was fully synchronous, effectively blocking the whole qubesd
for the time the cleanup happened.
To avoid code duplication, factor out _ensure_shutdown_handled function
calling actual cleanup (and possibly canceling one called with libvirt
event). Note that now, "Duplicated stopped event from libvirt received!"
warning may happen in normal circumstances, not only because of some
bug.
It is very important that post-shutdown cleanup happen when domain is
not running. To ensure that, take startup_lock and under it 1) ensure
its halted and only then 2) execute the cleanup. This isn't necessary
when removing it from disk, because its already removed from the
collection at that time, which also avoids other calls to it (see also
"vm/dispvm: fix DispVM cleanup" commit).
Actually, taking the startup_lock in remove_from_disk function would
cause a deadlock in DispVM auto cleanup code:
- vm.kill (or other trigger for the cleanup)
- vm.startup_lock acquire <====
- vm._ensure_shutdown_handled
- domain-shutdown event
- vm._auto_cleanup (in DispVM class)
- vm.remove_from_disk
- cannot take vm.startup_lock again
2018-10-21 04:52:27 +02:00
|
|
|
with (yield from self._domain_stopped_lock):
|
|
|
|
# Don't accept any new stopped event's till a new VM has been
|
|
|
|
# created. If we didn't received any stopped event or it wasn't
|
|
|
|
# handled yet we will handle this in the next lines.
|
|
|
|
self._domain_stopped_event_received = True
|
|
|
|
|
|
|
|
if self._domain_stopped_future is not None:
|
|
|
|
# Libvirt stopped event was already received, so cancel the
|
|
|
|
# future. If it didn't generate the Qubes events yet we
|
|
|
|
# will do it below.
|
|
|
|
self._domain_stopped_future.cancel()
|
|
|
|
self._domain_stopped_future = None
|
|
|
|
|
|
|
|
if not self._domain_stopped_event_handled:
|
|
|
|
# No Qubes domain-stopped events have been generated yet.
|
|
|
|
# So do this now.
|
|
|
|
|
|
|
|
# Set this immediately such that we don't generate events
|
|
|
|
# twice if an exception gets thrown.
|
|
|
|
self._domain_stopped_event_handled = True
|
|
|
|
|
|
|
|
yield from self.fire_event_async('domain-stopped')
|
|
|
|
yield from self.fire_event_async('domain-shutdown')
|
|
|
|
|
2017-04-05 16:44:13 +02:00
|
|
|
@asyncio.coroutine
|
2017-06-01 03:49:57 +02:00
|
|
|
def start(self, start_guid=True, notify_function=None,
|
2019-10-20 12:21:09 +02:00
|
|
|
mem_required=None):
|
|
|
|
"""Start domain
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
:param bool start_guid: FIXME
|
|
|
|
:param collections.Callable notify_function: FIXME
|
|
|
|
:param int mem_required: FIXME
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-06-01 03:31:59 +02:00
|
|
|
with (yield from self.startup_lock):
|
2019-09-29 06:06:25 +02:00
|
|
|
# check if domain wasn't removed in the meantime
|
|
|
|
if self not in self.app.domains:
|
|
|
|
raise qubes.exc.QubesVMNotFoundError(self.name)
|
2017-06-01 03:31:59 +02:00
|
|
|
# Intentionally not used is_running(): eliminate also "Paused",
|
|
|
|
# "Crashed", "Halting"
|
|
|
|
if self.get_power_state() != 'Halted':
|
2017-12-21 18:19:10 +01:00
|
|
|
return self
|
2014-12-29 12:46:16 +01:00
|
|
|
|
vm: call after-shutdown cleanup also from vm.kill and vm.shutdown
Cleaning up after domain shutdown (domain-stopped and domain-shutdown
events) relies on libvirt events which may be unreliable in some cases
(events may be processed with some delay, of if libvirt was restarted in
the meantime, may not happen at all). So, instead of ensuring only
proper ordering between shutdown cleanup and next startup, also trigger
the cleanup when we know for sure domain isn't running:
- at vm.kill() - after libvirt confirms domain was destroyed
- at vm.shutdown(wait=True) - after successful shutdown
- at vm.remove_from_disk() - after ensuring it isn't running but just
before actually removing it
This fixes various race conditions:
- qvm-kill && qvm-remove: remove could happen before shutdown cleanup
was done and storage driver would be confused about that
- qvm-shutdown --wait && qvm-clone: clone could happen before new content was
commited to the original volume, making the copy of previous VM state
(and probably more)
Previously it wasn't such a big issue on default configuration, because
LVM driver was fully synchronous, effectively blocking the whole qubesd
for the time the cleanup happened.
To avoid code duplication, factor out _ensure_shutdown_handled function
calling actual cleanup (and possibly canceling one called with libvirt
event). Note that now, "Duplicated stopped event from libvirt received!"
warning may happen in normal circumstances, not only because of some
bug.
It is very important that post-shutdown cleanup happen when domain is
not running. To ensure that, take startup_lock and under it 1) ensure
its halted and only then 2) execute the cleanup. This isn't necessary
when removing it from disk, because its already removed from the
collection at that time, which also avoids other calls to it (see also
"vm/dispvm: fix DispVM cleanup" commit).
Actually, taking the startup_lock in remove_from_disk function would
cause a deadlock in DispVM auto cleanup code:
- vm.kill (or other trigger for the cleanup)
- vm.startup_lock acquire <====
- vm._ensure_shutdown_handled
- domain-shutdown event
- vm._auto_cleanup (in DispVM class)
- vm.remove_from_disk
- cannot take vm.startup_lock again
2018-10-21 04:52:27 +02:00
|
|
|
yield from self._ensure_shutdown_handled()
|
2017-10-21 05:57:57 +02:00
|
|
|
|
2017-06-01 03:31:59 +02:00
|
|
|
self.log.info('Starting {}'.format(self.name))
|
2016-03-05 10:57:58 +01:00
|
|
|
|
2018-04-13 16:07:32 +02:00
|
|
|
try:
|
|
|
|
yield from self.fire_event_async('domain-pre-start',
|
2019-10-20 12:21:09 +02:00
|
|
|
pre_event=True,
|
|
|
|
start_guid=start_guid,
|
|
|
|
mem_required=mem_required)
|
2018-04-13 16:07:32 +02:00
|
|
|
except Exception as exc:
|
2019-10-10 19:41:02 +02:00
|
|
|
self.log.error('Start failed: %s', str(exc))
|
2018-04-13 16:07:32 +02:00
|
|
|
yield from self.fire_event_async('domain-start-failed',
|
2019-10-20 12:21:09 +02:00
|
|
|
reason=str(exc))
|
2018-04-13 16:07:32 +02:00
|
|
|
raise
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-11-21 04:16:30 +01:00
|
|
|
qmemman_client = None
|
2017-09-22 22:47:30 +02:00
|
|
|
try:
|
2018-11-15 10:13:45 +01:00
|
|
|
for devclass in self.devices:
|
|
|
|
for dev in self.devices[devclass].persistent():
|
|
|
|
if isinstance(dev, qubes.devices.UnknownDevice):
|
|
|
|
raise qubes.exc.QubesException(
|
|
|
|
'{} device {} not available'.format(
|
|
|
|
devclass, dev))
|
|
|
|
|
2018-09-03 00:23:05 +02:00
|
|
|
if self.virt_mode == 'pvh' and not self.kernel:
|
2018-01-16 21:42:20 +01:00
|
|
|
raise qubes.exc.QubesException(
|
|
|
|
'virt_mode PVH require kernel to be set')
|
2017-09-22 22:47:30 +02:00
|
|
|
yield from self.storage.verify()
|
|
|
|
|
|
|
|
if self.netvm is not None:
|
|
|
|
# pylint: disable = no-member
|
|
|
|
if self.netvm.qid != 0:
|
|
|
|
if not self.netvm.is_running():
|
2019-10-20 12:21:09 +02:00
|
|
|
yield from self.netvm.start(
|
|
|
|
start_guid=start_guid,
|
2017-09-22 22:47:30 +02:00
|
|
|
notify_function=notify_function)
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
qmemman_client = yield from asyncio.get_event_loop(). \
|
2017-09-22 22:47:30 +02:00
|
|
|
run_in_executor(None, self.request_memory, mem_required)
|
|
|
|
|
2017-10-21 05:57:57 +02:00
|
|
|
yield from self.storage.start()
|
|
|
|
|
2017-09-22 22:47:30 +02:00
|
|
|
except Exception as exc:
|
2019-10-10 19:41:02 +02:00
|
|
|
self.log.error('Start failed: %s', str(exc))
|
2017-09-22 22:47:30 +02:00
|
|
|
# let anyone receiving domain-pre-start know that startup failed
|
|
|
|
yield from self.fire_event_async('domain-start-failed',
|
2019-10-20 12:21:09 +02:00
|
|
|
reason=str(exc))
|
2017-10-21 05:57:57 +02:00
|
|
|
if qmemman_client:
|
|
|
|
qmemman_client.close()
|
2017-09-22 22:47:30 +02:00
|
|
|
raise
|
2017-03-31 16:47:47 +02:00
|
|
|
|
2017-06-01 03:31:59 +02:00
|
|
|
try:
|
|
|
|
self._update_libvirt_domain()
|
2017-05-17 02:15:26 +02:00
|
|
|
|
2017-06-01 03:31:59 +02:00
|
|
|
self.libvirt_domain.createWithFlags(
|
|
|
|
libvirt.VIR_DOMAIN_START_PAUSED)
|
2017-09-22 22:47:30 +02:00
|
|
|
|
2019-10-29 03:13:49 +01:00
|
|
|
except libvirt.libvirtError as exc:
|
|
|
|
# missing IOMMU?
|
|
|
|
if self.virt_mode == 'hvm' and \
|
|
|
|
list(self.devices['pci'].persistent()) and \
|
|
|
|
not self.app.host.is_iommu_supported():
|
|
|
|
exc = qubes.exc.QubesException(
|
|
|
|
'Failed to start an HVM qube with PCI devices assigned '
|
|
|
|
'- hardware does not support IOMMU/VT-d/AMD-Vi')
|
|
|
|
self.log.error('Start failed: %s', str(exc))
|
|
|
|
yield from self.fire_event_async('domain-start-failed',
|
|
|
|
reason=str(exc))
|
|
|
|
yield from self.storage.stop()
|
|
|
|
raise exc
|
2017-09-22 22:47:30 +02:00
|
|
|
except Exception as exc:
|
2017-11-06 01:19:19 +01:00
|
|
|
self.log.error('Start failed: %s', str(exc))
|
2017-09-22 22:47:30 +02:00
|
|
|
# let anyone receiving domain-pre-start know that startup failed
|
|
|
|
yield from self.fire_event_async('domain-start-failed',
|
2019-10-20 12:21:09 +02:00
|
|
|
reason=str(exc))
|
2017-11-20 22:49:52 +01:00
|
|
|
yield from self.storage.stop()
|
2017-09-22 22:47:30 +02:00
|
|
|
raise
|
|
|
|
|
2017-06-01 03:31:59 +02:00
|
|
|
finally:
|
|
|
|
if qmemman_client:
|
|
|
|
qmemman_client.close()
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-10-21 05:57:57 +02:00
|
|
|
self._domain_stopped_event_received = False
|
|
|
|
self._domain_stopped_event_handled = False
|
|
|
|
|
2017-06-01 03:31:59 +02:00
|
|
|
try:
|
2017-06-26 01:56:07 +02:00
|
|
|
yield from self.fire_event_async('domain-spawn',
|
2019-10-20 12:21:09 +02:00
|
|
|
start_guid=start_guid)
|
2017-06-01 03:31:59 +02:00
|
|
|
|
|
|
|
self.log.info('Setting Qubes DB info for the VM')
|
|
|
|
yield from self.start_qubesdb()
|
|
|
|
self.create_qdb_entries()
|
2017-09-28 16:12:05 +02:00
|
|
|
self.start_qdb_watch()
|
2017-06-01 03:31:59 +02:00
|
|
|
|
|
|
|
self.log.warning('Activating the {} VM'.format(self.name))
|
|
|
|
self.libvirt_domain.resume()
|
|
|
|
|
2017-06-01 03:49:57 +02:00
|
|
|
yield from self.start_qrexec_daemon()
|
2017-06-01 03:31:59 +02:00
|
|
|
|
2017-06-26 01:56:07 +02:00
|
|
|
yield from self.fire_event_async('domain-start',
|
2019-10-20 12:21:09 +02:00
|
|
|
start_guid=start_guid)
|
2017-06-01 03:31:59 +02:00
|
|
|
|
2017-09-22 22:47:30 +02:00
|
|
|
except Exception as exc: # pylint: disable=bare-except
|
2017-11-06 01:19:19 +01:00
|
|
|
self.log.error('Start failed: %s', str(exc))
|
2018-11-02 17:39:26 +01:00
|
|
|
# This avoids losing the exception if an exception is
|
2020-02-15 18:48:32 +01:00
|
|
|
# raised in self._kill_locked(), because the vm is not
|
2018-11-02 17:39:26 +01:00
|
|
|
# running or paused
|
2019-01-06 03:57:19 +01:00
|
|
|
try:
|
2018-11-02 17:39:26 +01:00
|
|
|
yield from self._kill_locked()
|
2019-01-06 03:57:19 +01:00
|
|
|
except qubes.exc.QubesVMNotStartedError:
|
|
|
|
pass
|
2017-09-22 22:47:30 +02:00
|
|
|
|
|
|
|
# let anyone receiving domain-pre-start know that startup failed
|
|
|
|
yield from self.fire_event_async('domain-start-failed',
|
2019-10-20 12:21:09 +02:00
|
|
|
reason=str(exc))
|
2017-06-01 03:31:59 +02:00
|
|
|
raise
|
2016-01-21 13:08:56 +01:00
|
|
|
|
2016-06-02 17:17:05 +02:00
|
|
|
return self
|
|
|
|
|
2017-10-21 05:57:57 +02:00
|
|
|
def on_libvirt_domain_stopped(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
""" Handle VIR_DOMAIN_EVENT_STOPPED events from libvirt.
|
2017-10-21 05:57:57 +02:00
|
|
|
|
|
|
|
This is not a Qubes event handler. Instead we do some sanity checks
|
|
|
|
and synchronization with start() and then emits Qubes events.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2017-10-21 05:57:57 +02:00
|
|
|
|
|
|
|
state = self.get_power_state()
|
|
|
|
if state not in ['Halted', 'Crashed', 'Dying']:
|
|
|
|
self.log.warning('Stopped event from libvirt received,'
|
2019-10-20 12:21:09 +02:00
|
|
|
' but domain is in state {}!'.format(state))
|
2017-10-21 05:57:57 +02:00
|
|
|
# ignore this unexpected event
|
|
|
|
return
|
|
|
|
|
|
|
|
if self._domain_stopped_event_received:
|
vm: call after-shutdown cleanup also from vm.kill and vm.shutdown
Cleaning up after domain shutdown (domain-stopped and domain-shutdown
events) relies on libvirt events which may be unreliable in some cases
(events may be processed with some delay, of if libvirt was restarted in
the meantime, may not happen at all). So, instead of ensuring only
proper ordering between shutdown cleanup and next startup, also trigger
the cleanup when we know for sure domain isn't running:
- at vm.kill() - after libvirt confirms domain was destroyed
- at vm.shutdown(wait=True) - after successful shutdown
- at vm.remove_from_disk() - after ensuring it isn't running but just
before actually removing it
This fixes various race conditions:
- qvm-kill && qvm-remove: remove could happen before shutdown cleanup
was done and storage driver would be confused about that
- qvm-shutdown --wait && qvm-clone: clone could happen before new content was
commited to the original volume, making the copy of previous VM state
(and probably more)
Previously it wasn't such a big issue on default configuration, because
LVM driver was fully synchronous, effectively blocking the whole qubesd
for the time the cleanup happened.
To avoid code duplication, factor out _ensure_shutdown_handled function
calling actual cleanup (and possibly canceling one called with libvirt
event). Note that now, "Duplicated stopped event from libvirt received!"
warning may happen in normal circumstances, not only because of some
bug.
It is very important that post-shutdown cleanup happen when domain is
not running. To ensure that, take startup_lock and under it 1) ensure
its halted and only then 2) execute the cleanup. This isn't necessary
when removing it from disk, because its already removed from the
collection at that time, which also avoids other calls to it (see also
"vm/dispvm: fix DispVM cleanup" commit).
Actually, taking the startup_lock in remove_from_disk function would
cause a deadlock in DispVM auto cleanup code:
- vm.kill (or other trigger for the cleanup)
- vm.startup_lock acquire <====
- vm._ensure_shutdown_handled
- domain-shutdown event
- vm._auto_cleanup (in DispVM class)
- vm.remove_from_disk
- cannot take vm.startup_lock again
2018-10-21 04:52:27 +02:00
|
|
|
# ignore this event - already triggered by shutdown(), kill(),
|
|
|
|
# or subsequent start()
|
2017-10-21 05:57:57 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
self._domain_stopped_event_received = True
|
|
|
|
self._domain_stopped_future = \
|
|
|
|
asyncio.ensure_future(self._domain_stopped_coro())
|
|
|
|
|
|
|
|
@asyncio.coroutine
|
|
|
|
def _domain_stopped_coro(self):
|
|
|
|
with (yield from self._domain_stopped_lock):
|
|
|
|
assert not self._domain_stopped_event_handled
|
|
|
|
|
|
|
|
# Set this immediately such that we don't generate events twice if
|
|
|
|
# an exception gets thrown.
|
|
|
|
self._domain_stopped_event_handled = True
|
|
|
|
|
2018-02-22 20:53:29 +01:00
|
|
|
while self.get_power_state() == 'Dying':
|
|
|
|
yield from asyncio.sleep(0.25)
|
2017-10-21 05:57:57 +02:00
|
|
|
yield from self.fire_event_async('domain-stopped')
|
|
|
|
yield from self.fire_event_async('domain-shutdown')
|
|
|
|
|
|
|
|
@qubes.events.handler('domain-stopped')
|
|
|
|
@asyncio.coroutine
|
|
|
|
def on_domain_stopped(self, _event, **_kwargs):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Cleanup after domain was stopped"""
|
2017-10-21 05:57:57 +02:00
|
|
|
try:
|
|
|
|
yield from self.storage.stop()
|
|
|
|
except qubes.storage.StoragePoolException:
|
|
|
|
self.log.exception('Failed to stop storage for domain %s',
|
2019-10-20 12:21:09 +02:00
|
|
|
self.name)
|
2017-04-27 00:45:08 +02:00
|
|
|
|
2017-04-05 16:44:13 +02:00
|
|
|
@asyncio.coroutine
|
2018-10-21 04:36:13 +02:00
|
|
|
def shutdown(self, force=False, wait=False, timeout=None):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Shutdown domain.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2018-10-21 04:36:13 +02:00
|
|
|
:param force: ignored
|
|
|
|
:param wait: wait for shutdown to complete
|
|
|
|
:param timeout: shutdown wait timeout (for *wait*=True), defaults to
|
|
|
|
:py:attr:`shutdown_timeout`
|
2015-10-14 22:02:11 +02:00
|
|
|
:raises qubes.exc.QubesVMNotStartedError: \
|
|
|
|
when domain is already shut down.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-06-16 21:06:20 +02:00
|
|
|
if self.is_halted():
|
2015-10-14 22:02:11 +02:00
|
|
|
raise qubes.exc.QubesVMNotStartedError(self)
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2019-10-08 23:35:24 +02:00
|
|
|
try:
|
|
|
|
yield from self.fire_event_async('domain-pre-shutdown',
|
2019-10-20 12:21:09 +02:00
|
|
|
pre_event=True, force=force)
|
2019-10-08 23:35:24 +02:00
|
|
|
|
|
|
|
self.libvirt_domain.shutdown()
|
|
|
|
|
|
|
|
if wait:
|
|
|
|
if timeout is None:
|
|
|
|
timeout = self.shutdown_timeout
|
|
|
|
while timeout > 0 and not self.is_halted():
|
|
|
|
yield from asyncio.sleep(0.25)
|
|
|
|
timeout -= 0.25
|
|
|
|
with (yield from self.startup_lock):
|
|
|
|
if self.is_halted():
|
|
|
|
# make sure all shutdown tasks are completed
|
|
|
|
yield from self._ensure_shutdown_handled()
|
|
|
|
else:
|
|
|
|
raise qubes.exc.QubesVMShutdownTimeoutError(self)
|
|
|
|
except Exception as ex:
|
|
|
|
yield from self.fire_event_async('domain-shutdown-failed',
|
2019-10-20 12:21:09 +02:00
|
|
|
reason=str(ex))
|
2019-10-08 23:35:24 +02:00
|
|
|
raise
|
2016-08-01 15:09:28 +02:00
|
|
|
|
2016-06-02 17:17:05 +02:00
|
|
|
return self
|
|
|
|
|
2017-04-05 16:44:13 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def kill(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Forcefully shutdown (destroy) domain.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2015-10-14 22:02:11 +02:00
|
|
|
:raises qubes.exc.QubesVMNotStartedError: \
|
|
|
|
when domain is already shut down.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
if not self.is_running() and not self.is_paused():
|
2015-10-14 22:02:11 +02:00
|
|
|
raise qubes.exc.QubesVMNotStartedError(self)
|
2014-12-29 12:46:16 +01:00
|
|
|
|
vm: call after-shutdown cleanup also from vm.kill and vm.shutdown
Cleaning up after domain shutdown (domain-stopped and domain-shutdown
events) relies on libvirt events which may be unreliable in some cases
(events may be processed with some delay, of if libvirt was restarted in
the meantime, may not happen at all). So, instead of ensuring only
proper ordering between shutdown cleanup and next startup, also trigger
the cleanup when we know for sure domain isn't running:
- at vm.kill() - after libvirt confirms domain was destroyed
- at vm.shutdown(wait=True) - after successful shutdown
- at vm.remove_from_disk() - after ensuring it isn't running but just
before actually removing it
This fixes various race conditions:
- qvm-kill && qvm-remove: remove could happen before shutdown cleanup
was done and storage driver would be confused about that
- qvm-shutdown --wait && qvm-clone: clone could happen before new content was
commited to the original volume, making the copy of previous VM state
(and probably more)
Previously it wasn't such a big issue on default configuration, because
LVM driver was fully synchronous, effectively blocking the whole qubesd
for the time the cleanup happened.
To avoid code duplication, factor out _ensure_shutdown_handled function
calling actual cleanup (and possibly canceling one called with libvirt
event). Note that now, "Duplicated stopped event from libvirt received!"
warning may happen in normal circumstances, not only because of some
bug.
It is very important that post-shutdown cleanup happen when domain is
not running. To ensure that, take startup_lock and under it 1) ensure
its halted and only then 2) execute the cleanup. This isn't necessary
when removing it from disk, because its already removed from the
collection at that time, which also avoids other calls to it (see also
"vm/dispvm: fix DispVM cleanup" commit).
Actually, taking the startup_lock in remove_from_disk function would
cause a deadlock in DispVM auto cleanup code:
- vm.kill (or other trigger for the cleanup)
- vm.startup_lock acquire <====
- vm._ensure_shutdown_handled
- domain-shutdown event
- vm._auto_cleanup (in DispVM class)
- vm.remove_from_disk
- cannot take vm.startup_lock again
2018-10-21 04:52:27 +02:00
|
|
|
with (yield from self.startup_lock):
|
2018-11-02 17:39:26 +01:00
|
|
|
yield from self._kill_locked()
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-06-02 17:17:05 +02:00
|
|
|
return self
|
|
|
|
|
2018-11-02 17:39:26 +01:00
|
|
|
@asyncio.coroutine
|
|
|
|
def _kill_locked(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Forcefully shutdown (destroy) domain.
|
2018-11-02 17:39:26 +01:00
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
This function needs to be called with self.startup_lock held."""
|
2018-11-02 17:39:26 +01:00
|
|
|
try:
|
|
|
|
self.libvirt_domain.destroy()
|
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_OPERATION_INVALID:
|
|
|
|
raise qubes.exc.QubesVMNotStartedError(self)
|
2019-02-27 16:19:29 +01:00
|
|
|
raise
|
2018-11-02 17:39:26 +01:00
|
|
|
|
|
|
|
# make sure all shutdown tasks are completed
|
|
|
|
yield from self._ensure_shutdown_handled()
|
|
|
|
|
2017-04-05 16:44:13 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def suspend(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Suspend (pause) domain.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2015-10-14 22:02:11 +02:00
|
|
|
:raises qubes.exc.QubesVMNotRunnignError: \
|
|
|
|
when domain is already shut down.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
if not self.is_running() and not self.is_paused():
|
2015-10-14 22:02:11 +02:00
|
|
|
raise qubes.exc.QubesVMNotRunningError(self)
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-06-26 04:07:15 +02:00
|
|
|
if list(self.devices['pci'].attached()):
|
2019-02-24 16:08:49 +01:00
|
|
|
if self.features.check_with_template('qrexec', False):
|
|
|
|
yield from self.run_service_for_stdio('qubes.SuspendPre',
|
2019-10-20 12:21:09 +02:00
|
|
|
user='root')
|
2017-06-02 15:51:02 +02:00
|
|
|
self.libvirt_domain.pMSuspendForDuration(
|
|
|
|
libvirt.VIR_NODE_SUSPEND_TARGET_MEM, 0, 0)
|
2014-12-29 12:46:16 +01:00
|
|
|
else:
|
2016-08-09 05:08:55 +02:00
|
|
|
self.libvirt_domain.suspend()
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-06-02 17:17:05 +02:00
|
|
|
return self
|
|
|
|
|
2017-04-05 16:44:13 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def pause(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Pause (suspend) domain."""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
if not self.is_running():
|
2015-10-14 22:02:11 +02:00
|
|
|
raise qubes.exc.QubesVMNotRunningError(self)
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-06-02 15:51:02 +02:00
|
|
|
self.libvirt_domain.suspend()
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-06-02 17:17:05 +02:00
|
|
|
return self
|
|
|
|
|
2017-04-05 16:44:13 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def resume(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Resume suspended domain.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2015-10-14 22:02:11 +02:00
|
|
|
:raises qubes.exc.QubesVMNotSuspendedError: when machine is not paused
|
|
|
|
:raises qubes.exc.QubesVMError: when machine is suspended
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-06-02 15:51:02 +02:00
|
|
|
# pylint: disable=not-an-iterable
|
2014-12-29 12:46:16 +01:00
|
|
|
if self.get_power_state() == "Suspended":
|
2017-06-02 15:51:02 +02:00
|
|
|
self.libvirt_domain.pMWakeup()
|
2019-02-24 16:08:49 +01:00
|
|
|
if self.features.check_with_template('qrexec', False):
|
|
|
|
yield from self.run_service_for_stdio('qubes.SuspendPost',
|
2019-10-20 12:21:09 +02:00
|
|
|
user='root')
|
2014-12-29 12:46:16 +01:00
|
|
|
else:
|
2017-06-02 15:51:02 +02:00
|
|
|
yield from self.unpause()
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-06-02 17:17:05 +02:00
|
|
|
return self
|
|
|
|
|
2017-04-05 16:44:13 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def unpause(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Resume (unpause) a domain"""
|
2014-12-29 12:46:16 +01:00
|
|
|
if not self.is_paused():
|
2015-10-14 22:02:11 +02:00
|
|
|
raise qubes.exc.QubesVMNotPausedError(self)
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
self.libvirt_domain.resume()
|
|
|
|
|
2016-06-02 17:17:05 +02:00
|
|
|
return self
|
|
|
|
|
2017-04-05 16:44:13 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def run_service(self, service, source=None, user=None,
|
2019-10-20 12:21:09 +02:00
|
|
|
filter_esc=False, autostart=False, gui=False, **kwargs):
|
|
|
|
"""Run service on this VM
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-04-04 15:57:53 +02:00
|
|
|
:param str service: service name
|
2017-04-15 20:36:59 +02:00
|
|
|
:param qubes.vm.qubesvm.QubesVM source: source domain as presented to
|
2017-06-27 02:57:50 +02:00
|
|
|
this VM
|
2017-04-04 15:57:53 +02:00
|
|
|
:param str user: username to run service as
|
|
|
|
:param bool filter_esc: filter escape sequences to protect terminal \
|
|
|
|
emulator
|
2015-01-19 17:06:30 +01:00
|
|
|
:param bool autostart: if :py:obj:`True`, machine will be started if \
|
|
|
|
it is not running
|
2014-12-29 12:46:16 +01:00
|
|
|
:param bool gui: when autostarting, also start gui daemon
|
2017-04-04 15:57:53 +02:00
|
|
|
:rtype: asyncio.subprocess.Process
|
|
|
|
|
|
|
|
.. note::
|
|
|
|
User ``root`` is redefined to ``SYSTEM`` in the Windows agent code
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-04-04 15:57:53 +02:00
|
|
|
# UNSUPPORTED from previous incarnation:
|
|
|
|
# localcmd, wait, passio*, notify_function, `-e` switch
|
|
|
|
#
|
|
|
|
# - passio* and friends depend on params to command (like in stdlib)
|
|
|
|
# - the filter_esc is orthogonal to passio*
|
|
|
|
# - input: see run_service_for_stdio
|
|
|
|
# - wait has no purpose since this is asynchronous
|
|
|
|
# - notify_function is gone
|
|
|
|
|
|
|
|
source = 'dom0' if source is None else self.app.domains[source].name
|
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
if user is None:
|
|
|
|
user = self.default_user
|
|
|
|
|
|
|
|
if self.is_paused():
|
2015-10-14 22:02:11 +02:00
|
|
|
# XXX what about autostart?
|
|
|
|
raise qubes.exc.QubesVMNotRunningError(
|
|
|
|
self, 'Domain {!r} is paused'.format(self.name))
|
2019-02-27 16:19:29 +01:00
|
|
|
if not self.is_running():
|
2017-04-04 15:57:53 +02:00
|
|
|
if not autostart:
|
|
|
|
raise qubes.exc.QubesVMNotRunningError(self)
|
2017-04-05 16:44:13 +02:00
|
|
|
yield from self.start(start_guid=gui)
|
2015-10-14 22:02:11 +02:00
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
if not self.is_qrexec_running():
|
2015-10-14 22:02:11 +02:00
|
|
|
raise qubes.exc.QubesVMError(
|
|
|
|
self, 'Domain {!r}: qrexec not connected'.format(self.name))
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-06-26 01:56:07 +02:00
|
|
|
yield from self.fire_event_async('domain-cmd-pre-run', pre_event=True,
|
2019-10-20 12:21:09 +02:00
|
|
|
start_guid=gui)
|
2017-04-04 15:57:53 +02:00
|
|
|
|
2017-04-05 16:44:13 +02:00
|
|
|
return (yield from asyncio.create_subprocess_exec(
|
2017-04-04 15:57:53 +02:00
|
|
|
qubes.config.system_path['qrexec_client_path'],
|
2015-01-20 14:09:47 +01:00
|
|
|
'-d', str(self.name),
|
2017-04-04 15:57:53 +02:00
|
|
|
*(('-t', '-T') if filter_esc else ()),
|
|
|
|
'{}:QUBESRPC {} {}'.format(user, service, source),
|
2017-04-05 16:44:13 +02:00
|
|
|
**kwargs))
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-04-05 16:44:13 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def run_service_for_stdio(self, *args, input=None, **kwargs):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Run a service, pass an optional input and return (stdout, stderr).
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-04-04 15:57:53 +02:00
|
|
|
Raises an exception if return code != 0.
|
|
|
|
|
|
|
|
*args* and *kwargs* are passed verbatim to :py:meth:`run_service`.
|
|
|
|
|
|
|
|
.. warning::
|
|
|
|
There are some combinations if stdio-related *kwargs*, which are
|
|
|
|
not filtered for problems originating between the keyboard and the
|
|
|
|
chair.
|
2019-10-20 12:21:09 +02:00
|
|
|
""" # pylint: disable=redefined-builtin
|
2017-04-27 00:45:52 +02:00
|
|
|
|
|
|
|
kwargs.setdefault('stdin', subprocess.PIPE)
|
|
|
|
kwargs.setdefault('stdout', subprocess.PIPE)
|
|
|
|
kwargs.setdefault('stderr', subprocess.PIPE)
|
2020-02-26 05:38:59 +01:00
|
|
|
if kwargs['stdin'] == subprocess.PIPE and input is None:
|
|
|
|
# workaround for https://bugs.python.org/issue39744
|
|
|
|
input = b''
|
2017-04-05 16:44:13 +02:00
|
|
|
p = yield from self.run_service(*args, **kwargs)
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-04-04 15:57:53 +02:00
|
|
|
# this one is actually a tuple, but there is no need to unpack it
|
2017-04-05 16:44:13 +02:00
|
|
|
stdouterr = yield from p.communicate(input=input)
|
2016-03-03 01:05:23 +01:00
|
|
|
|
2017-04-04 15:57:53 +02:00
|
|
|
if p.returncode:
|
2017-06-20 17:09:01 +02:00
|
|
|
raise subprocess.CalledProcessError(p.returncode,
|
2019-10-20 12:21:09 +02:00
|
|
|
args[0], *stdouterr)
|
2016-04-11 13:03:12 +02:00
|
|
|
|
2017-04-04 15:57:53 +02:00
|
|
|
return stdouterr
|
2016-04-03 03:15:45 +02:00
|
|
|
|
2017-05-30 15:06:05 +02:00
|
|
|
def run(self, command, user=None, **kwargs):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Run a shell command inside the domain using qrexec.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-04-04 15:57:53 +02:00
|
|
|
This method is a coroutine.
|
2019-10-20 12:21:09 +02:00
|
|
|
""" # pylint: disable=redefined-builtin
|
2017-04-04 15:57:53 +02:00
|
|
|
|
2017-05-26 22:20:14 +02:00
|
|
|
if user is None:
|
|
|
|
user = self.default_user
|
|
|
|
|
|
|
|
return asyncio.create_subprocess_exec(
|
|
|
|
qubes.config.system_path['qrexec_client_path'],
|
|
|
|
'-d', str(self.name),
|
|
|
|
'{}:{}'.format(user, command),
|
|
|
|
**kwargs)
|
|
|
|
|
|
|
|
@asyncio.coroutine
|
|
|
|
def run_for_stdio(self, *args, input=None, **kwargs):
|
2020-01-24 16:59:20 +01:00
|
|
|
"""Run a shell command inside the domain using qrexec.
|
2017-04-04 15:57:53 +02:00
|
|
|
|
|
|
|
This method is a coroutine.
|
2019-10-20 12:21:09 +02:00
|
|
|
""" # pylint: disable=redefined-builtin
|
2017-05-26 22:20:14 +02:00
|
|
|
|
|
|
|
kwargs.setdefault('stdin', subprocess.PIPE)
|
|
|
|
kwargs.setdefault('stdout', subprocess.PIPE)
|
|
|
|
kwargs.setdefault('stderr', subprocess.PIPE)
|
2020-02-26 05:38:59 +01:00
|
|
|
if kwargs['stdin'] == subprocess.PIPE and input is None:
|
|
|
|
# workaround for https://bugs.python.org/issue39744
|
|
|
|
input = b''
|
2017-05-26 22:20:14 +02:00
|
|
|
p = yield from self.run(*args, **kwargs)
|
|
|
|
stdouterr = yield from p.communicate(input=input)
|
|
|
|
|
|
|
|
if p.returncode:
|
2017-06-20 17:09:01 +02:00
|
|
|
raise subprocess.CalledProcessError(p.returncode,
|
2019-10-20 12:21:09 +02:00
|
|
|
args[0], *stdouterr)
|
2017-05-26 22:20:14 +02:00
|
|
|
|
|
|
|
return stdouterr
|
2016-03-03 01:05:23 +01:00
|
|
|
|
Use maxmem=0 to disable qmemman, add more automation to it
Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
- domain with PCI devices (PoD is not supported by Xen then)
- domain without balloon driver and/or meminfo-writer service
The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.
This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.
memory != maxmem only makes sense if qmemman for given domain is
enabled. Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.
Fixes QubesOS/qubes-issues#4135
2018-11-03 05:13:23 +01:00
|
|
|
def is_memory_balancing_possible(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Check if memory balancing can be enabled.
|
Use maxmem=0 to disable qmemman, add more automation to it
Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
- domain with PCI devices (PoD is not supported by Xen then)
- domain without balloon driver and/or meminfo-writer service
The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.
This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.
memory != maxmem only makes sense if qmemman for given domain is
enabled. Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.
Fixes QubesOS/qubes-issues#4135
2018-11-03 05:13:23 +01:00
|
|
|
Reasons to not enable it:
|
|
|
|
- have PCI devices
|
|
|
|
- balloon driver not present
|
|
|
|
|
|
|
|
We don't have reliable way to detect the second point, but good
|
|
|
|
heuristic is HVM virt_mode (PV and PVH require OS support and it does
|
|
|
|
include balloon driver) and lack of qrexec/meminfo-writer service
|
|
|
|
support (no qubes tools installed).
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
Use maxmem=0 to disable qmemman, add more automation to it
Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
- domain with PCI devices (PoD is not supported by Xen then)
- domain without balloon driver and/or meminfo-writer service
The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.
This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.
memory != maxmem only makes sense if qmemman for given domain is
enabled. Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.
Fixes QubesOS/qubes-issues#4135
2018-11-03 05:13:23 +01:00
|
|
|
if list(self.devices['pci'].persistent()):
|
|
|
|
return False
|
|
|
|
if self.virt_mode == 'hvm':
|
|
|
|
# if VM announce any supported service
|
|
|
|
features_set = set(self.features)
|
|
|
|
template = getattr(self, 'template', None)
|
|
|
|
while template is not None:
|
|
|
|
features_set.update(template.features)
|
|
|
|
template = getattr(template, 'template', None)
|
|
|
|
supported_services = any(f.startswith('supported-service.')
|
2019-10-20 12:21:09 +02:00
|
|
|
for f in features_set)
|
Use maxmem=0 to disable qmemman, add more automation to it
Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
- domain with PCI devices (PoD is not supported by Xen then)
- domain without balloon driver and/or meminfo-writer service
The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.
This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.
memory != maxmem only makes sense if qmemman for given domain is
enabled. Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.
Fixes QubesOS/qubes-issues#4135
2018-11-03 05:13:23 +01:00
|
|
|
if (not self.features.check_with_template('qrexec', False) or
|
2019-10-20 12:21:09 +02:00
|
|
|
(supported_services and
|
|
|
|
not self.features.check_with_template(
|
|
|
|
'supported-service.meminfo-writer', False))):
|
Use maxmem=0 to disable qmemman, add more automation to it
Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
- domain with PCI devices (PoD is not supported by Xen then)
- domain without balloon driver and/or meminfo-writer service
The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.
This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.
memory != maxmem only makes sense if qmemman for given domain is
enabled. Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.
Fixes QubesOS/qubes-issues#4135
2018-11-03 05:13:23 +01:00
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2016-03-03 01:05:23 +01:00
|
|
|
def request_memory(self, mem_required=None):
|
|
|
|
if not qmemman_present:
|
2017-12-21 18:19:10 +01:00
|
|
|
return None
|
2016-03-03 01:05:23 +01:00
|
|
|
|
|
|
|
if mem_required is None:
|
2017-07-17 12:26:10 +02:00
|
|
|
if self.virt_mode == 'hvm':
|
2017-05-04 20:48:27 +02:00
|
|
|
if self.stubdom_mem:
|
|
|
|
stubdom_mem = self.stubdom_mem
|
|
|
|
else:
|
2017-05-16 09:01:40 +02:00
|
|
|
if self.features.check_with_template('linux-stubdom', True):
|
2019-10-20 12:21:09 +02:00
|
|
|
stubdom_mem = 128 # from libxl_create.c
|
2017-05-04 20:48:27 +02:00
|
|
|
else:
|
2019-10-20 12:21:09 +02:00
|
|
|
stubdom_mem = 28 # from libxl_create.c
|
|
|
|
stubdom_mem += 16 # video ram
|
2017-05-04 20:48:27 +02:00
|
|
|
else:
|
|
|
|
stubdom_mem = 0
|
|
|
|
|
2017-11-18 02:37:38 +01:00
|
|
|
initial_memory = self.memory
|
|
|
|
mem_required = int(initial_memory + stubdom_mem) * 1024 * 1024
|
2016-03-03 01:05:23 +01:00
|
|
|
|
|
|
|
qmemman_client = qubes.qmemman.client.QMemmanClient()
|
|
|
|
try:
|
|
|
|
mem_required_with_overhead = mem_required + MEM_OVERHEAD_BASE \
|
2019-10-20 12:21:09 +02:00
|
|
|
+ self.vcpus * MEM_OVERHEAD_PER_VCPU
|
2016-03-03 01:05:23 +01:00
|
|
|
got_memory = qmemman_client.request_memory(
|
|
|
|
mem_required_with_overhead)
|
|
|
|
|
|
|
|
except IOError as e:
|
|
|
|
raise IOError('Failed to connect to qmemman: {!s}'.format(e))
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-03-03 01:05:23 +01:00
|
|
|
if not got_memory:
|
|
|
|
qmemman_client.close()
|
|
|
|
raise qubes.exc.QubesMemoryError(self)
|
|
|
|
|
|
|
|
return qmemman_client
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-09-06 13:34:26 +02:00
|
|
|
@staticmethod
|
2017-04-05 16:44:13 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def start_daemon(*command, input=None, **kwargs):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Start a daemon for the VM
|
2016-09-06 13:34:26 +02:00
|
|
|
|
|
|
|
This function take care to run it as appropriate user.
|
|
|
|
|
|
|
|
:param command: command to run (array for
|
2017-06-27 02:57:50 +02:00
|
|
|
:py:meth:`subprocess.check_call`)
|
2016-09-06 13:34:26 +02:00
|
|
|
:param kwargs: args for :py:meth:`subprocess.check_call`
|
|
|
|
:return: None
|
2019-10-20 12:21:09 +02:00
|
|
|
""" # pylint: disable=redefined-builtin
|
2016-09-06 13:34:26 +02:00
|
|
|
|
|
|
|
if os.getuid() == 0:
|
|
|
|
# try to always have VM daemons running as normal user, otherwise
|
|
|
|
# some files (like clipboard) may be created as root and cause
|
|
|
|
# permission problems
|
|
|
|
qubes_group = grp.getgrnam('qubes')
|
2017-04-07 16:49:35 +02:00
|
|
|
command = ['runuser', '-u', qubes_group.gr_mem[0], '--'] + \
|
2019-10-20 12:21:09 +02:00
|
|
|
list(command)
|
2017-04-05 16:44:13 +02:00
|
|
|
p = yield from asyncio.create_subprocess_exec(*command, **kwargs)
|
|
|
|
stdout, stderr = yield from p.communicate(input=input)
|
2017-03-29 17:11:24 +02:00
|
|
|
if p.returncode:
|
|
|
|
raise subprocess.CalledProcessError(p.returncode, command,
|
2019-10-20 12:21:09 +02:00
|
|
|
output=stdout, stderr=stderr)
|
2017-03-29 17:11:24 +02:00
|
|
|
|
2017-04-05 16:44:13 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def start_qrexec_daemon(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Start qrexec daemon.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
:raises OSError: when starting fails.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
self.log.debug('Starting the qrexec daemon')
|
|
|
|
qrexec_args = [str(self.xid), self.name, self.default_user]
|
|
|
|
if not self.debug:
|
|
|
|
qrexec_args.insert(0, "-q")
|
2016-03-16 18:07:49 +01:00
|
|
|
|
2015-01-13 17:55:13 +01:00
|
|
|
qrexec_env = os.environ.copy()
|
2017-07-17 12:26:10 +02:00
|
|
|
if not self.features.check_with_template('qrexec', False):
|
2016-03-16 18:07:49 +01:00
|
|
|
self.log.debug(
|
|
|
|
'Starting the qrexec daemon in background, because of features')
|
|
|
|
qrexec_env['QREXEC_STARTUP_NOWAIT'] = '1'
|
|
|
|
else:
|
|
|
|
qrexec_env['QREXEC_STARTUP_TIMEOUT'] = str(self.qrexec_timeout)
|
|
|
|
|
|
|
|
try:
|
2017-04-05 16:44:13 +02:00
|
|
|
yield from self.start_daemon(
|
2017-03-29 17:11:24 +02:00
|
|
|
qubes.config.system_path['qrexec_daemon_path'], *qrexec_args,
|
2018-02-26 02:53:35 +01:00
|
|
|
env=qrexec_env, stderr=subprocess.PIPE)
|
|
|
|
except subprocess.CalledProcessError as err:
|
|
|
|
if err.returncode == 3:
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesVMError(
|
|
|
|
self,
|
2018-02-26 02:53:35 +01:00
|
|
|
'Cannot connect to qrexec agent for {} seconds, '
|
|
|
|
'see /var/log/xen/console/guest-{}.log for details'.format(
|
|
|
|
self.qrexec_timeout, self.name
|
|
|
|
))
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesVMError(
|
|
|
|
self, 'qrexec-daemon startup failed: ' + err.stderr.decode())
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-04-05 16:44:13 +02:00
|
|
|
@asyncio.coroutine
|
|
|
|
def start_qubesdb(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Start QubesDB daemon.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
:raises OSError: when starting fails.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-11-04 13:28:01 +01:00
|
|
|
# drop old connection to QubesDB, if any
|
|
|
|
self._qdb_connection = None
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-11-04 13:28:01 +01:00
|
|
|
self.log.info('Starting Qubes DB')
|
2016-09-06 13:34:26 +02:00
|
|
|
try:
|
2017-04-05 16:44:13 +02:00
|
|
|
yield from self.start_daemon(
|
2017-03-29 17:11:24 +02:00
|
|
|
qubes.config.system_path['qubesdb_daemon_path'],
|
2016-09-06 13:34:26 +02:00
|
|
|
str(self.xid),
|
2017-03-29 17:11:24 +02:00
|
|
|
self.name)
|
2016-09-06 13:34:26 +02:00
|
|
|
except subprocess.CalledProcessError:
|
2016-03-03 01:05:23 +01:00
|
|
|
raise qubes.exc.QubesException('Cannot execute qubesdb-daemon')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-04-25 23:01:17 +02:00
|
|
|
@asyncio.coroutine
|
2016-07-12 17:58:30 +02:00
|
|
|
def create_on_disk(self, pool=None, pools=None):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Create files needed for VM.
|
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-05-21 04:12:27 +02:00
|
|
|
self.log.info('Creating directory: {0}'.format(self.dir_path))
|
2018-01-21 23:28:47 +01:00
|
|
|
os.makedirs(self.dir_path, mode=0o775, exist_ok=True)
|
2016-05-21 04:12:27 +02:00
|
|
|
|
2016-07-12 17:58:30 +02:00
|
|
|
if pool or pools:
|
2016-07-12 18:24:43 +02:00
|
|
|
# pylint: disable=attribute-defined-outside-init
|
2016-07-12 17:58:30 +02:00
|
|
|
self.volume_config = _patch_volume_config(self.volume_config, pool,
|
|
|
|
pools)
|
|
|
|
self.storage = qubes.storage.Storage(self)
|
|
|
|
|
2017-04-27 04:01:07 +02:00
|
|
|
try:
|
|
|
|
yield from self.storage.create()
|
|
|
|
except:
|
|
|
|
try:
|
|
|
|
yield from self.storage.remove()
|
|
|
|
os.rmdir(self.dir_path)
|
|
|
|
except: # pylint: disable=bare-except
|
|
|
|
self.log.exception('failed to cleanup {} after failed VM '
|
|
|
|
'creation'.format(self.dir_path))
|
|
|
|
raise
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2018-10-18 00:01:45 +02:00
|
|
|
if os.path.exists(self.icon_path):
|
|
|
|
os.unlink(self.icon_path)
|
2015-01-19 17:06:30 +01:00
|
|
|
self.log.info('Creating icon symlink: {} -> {}'.format(
|
|
|
|
self.icon_path, self.label.icon_path))
|
2014-12-29 12:46:16 +01:00
|
|
|
if hasattr(os, "symlink"):
|
|
|
|
os.symlink(self.label.icon_path, self.icon_path)
|
|
|
|
else:
|
|
|
|
shutil.copy(self.label.icon_path, self.icon_path)
|
|
|
|
|
|
|
|
# fire hooks
|
2017-06-26 01:56:07 +02:00
|
|
|
yield from self.fire_event_async('domain-create-on-disk')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-04-25 23:01:17 +02:00
|
|
|
@asyncio.coroutine
|
2014-12-29 12:46:16 +01:00
|
|
|
def remove_from_disk(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Remove domain remnants from disk."""
|
2016-07-13 18:24:29 +02:00
|
|
|
if not self.is_halted():
|
2016-07-21 19:40:45 +02:00
|
|
|
raise qubes.exc.QubesVMNotHaltedError(
|
2019-06-28 12:29:32 +02:00
|
|
|
"Can't remove VM {!s}, because it's in state {!r}.".format(
|
2016-07-21 19:40:45 +02:00
|
|
|
self, self.get_power_state()))
|
2016-07-13 18:24:29 +02:00
|
|
|
|
vm: call after-shutdown cleanup also from vm.kill and vm.shutdown
Cleaning up after domain shutdown (domain-stopped and domain-shutdown
events) relies on libvirt events which may be unreliable in some cases
(events may be processed with some delay, of if libvirt was restarted in
the meantime, may not happen at all). So, instead of ensuring only
proper ordering between shutdown cleanup and next startup, also trigger
the cleanup when we know for sure domain isn't running:
- at vm.kill() - after libvirt confirms domain was destroyed
- at vm.shutdown(wait=True) - after successful shutdown
- at vm.remove_from_disk() - after ensuring it isn't running but just
before actually removing it
This fixes various race conditions:
- qvm-kill && qvm-remove: remove could happen before shutdown cleanup
was done and storage driver would be confused about that
- qvm-shutdown --wait && qvm-clone: clone could happen before new content was
commited to the original volume, making the copy of previous VM state
(and probably more)
Previously it wasn't such a big issue on default configuration, because
LVM driver was fully synchronous, effectively blocking the whole qubesd
for the time the cleanup happened.
To avoid code duplication, factor out _ensure_shutdown_handled function
calling actual cleanup (and possibly canceling one called with libvirt
event). Note that now, "Duplicated stopped event from libvirt received!"
warning may happen in normal circumstances, not only because of some
bug.
It is very important that post-shutdown cleanup happen when domain is
not running. To ensure that, take startup_lock and under it 1) ensure
its halted and only then 2) execute the cleanup. This isn't necessary
when removing it from disk, because its already removed from the
collection at that time, which also avoids other calls to it (see also
"vm/dispvm: fix DispVM cleanup" commit).
Actually, taking the startup_lock in remove_from_disk function would
cause a deadlock in DispVM auto cleanup code:
- vm.kill (or other trigger for the cleanup)
- vm.startup_lock acquire <====
- vm._ensure_shutdown_handled
- domain-shutdown event
- vm._auto_cleanup (in DispVM class)
- vm.remove_from_disk
- cannot take vm.startup_lock again
2018-10-21 04:52:27 +02:00
|
|
|
# make sure shutdown is handled before removing anything, but only if
|
|
|
|
# handling is pending; if not, we may be called from within
|
|
|
|
# domain-shutdown event (DispVM._auto_cleanup), which would deadlock
|
|
|
|
if not self._domain_stopped_event_handled:
|
|
|
|
yield from self._ensure_shutdown_handled()
|
|
|
|
|
2017-06-26 01:56:07 +02:00
|
|
|
yield from self.fire_event_async('domain-remove-from-disk')
|
2016-11-02 06:23:43 +01:00
|
|
|
try:
|
2019-06-28 12:29:34 +02:00
|
|
|
yield from self.storage.remove()
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
# TODO: make it async?
|
|
|
|
shutil.rmtree(self.dir_path)
|
|
|
|
except FileNotFoundError:
|
|
|
|
pass
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-04-25 23:01:17 +02:00
|
|
|
@asyncio.coroutine
|
2016-07-12 18:24:43 +02:00
|
|
|
def clone_disk_files(self, src, pool=None, pools=None, ):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Clone files from other vm.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2015-01-22 19:25:00 +01:00
|
|
|
:param qubes.vm.qubesvm.QubesVM src: source VM
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-06-23 18:41:11 +02:00
|
|
|
# If the current vm name is not a part of `self.app.domains.keys()`,
|
|
|
|
# then the current vm is in creation process. Calling
|
|
|
|
# `self.is_halted()` at this point, would instantiate libvirt, we want
|
|
|
|
# avoid that.
|
|
|
|
if self.name in self.app.domains.keys() and not self.is_halted():
|
2015-10-14 22:02:11 +02:00
|
|
|
raise qubes.exc.QubesVMNotHaltedError(
|
|
|
|
self, 'Cannot clone a running domain {!r}'.format(self.name))
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-04-26 01:07:27 +02:00
|
|
|
msg = "Destination {!s} already exists".format(self.dir_path)
|
|
|
|
assert not os.path.exists(self.dir_path), msg
|
|
|
|
|
|
|
|
self.log.info('Creating directory: {0}'.format(self.dir_path))
|
2018-01-21 23:28:47 +01:00
|
|
|
os.makedirs(self.dir_path, mode=0o775, exist_ok=True)
|
2017-04-26 01:07:27 +02:00
|
|
|
|
2016-07-12 18:24:43 +02:00
|
|
|
if pool or pools:
|
2016-06-16 13:17:24 +02:00
|
|
|
# pylint: disable=attribute-defined-outside-init
|
2016-07-12 18:24:43 +02:00
|
|
|
self.volume_config = _patch_volume_config(self.volume_config, pool,
|
|
|
|
pools)
|
|
|
|
|
2016-04-15 15:16:25 +02:00
|
|
|
self.storage = qubes.storage.Storage(self)
|
2017-04-25 23:01:17 +02:00
|
|
|
yield from self.storage.clone(src)
|
2016-07-12 17:58:30 +02:00
|
|
|
self.storage.verify()
|
|
|
|
assert self.volumes != {}
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2015-01-20 14:41:19 +01:00
|
|
|
if src.icon_path is not None \
|
2018-04-06 12:10:50 +02:00
|
|
|
and os.path.exists(src.icon_path) \
|
2014-12-29 12:46:16 +01:00
|
|
|
and self.icon_path is not None:
|
|
|
|
if os.path.islink(src.icon_path):
|
|
|
|
icon_path = os.readlink(src.icon_path)
|
|
|
|
self.log.info(
|
|
|
|
'Creating icon symlink {} -> {}'.format(
|
|
|
|
self.icon_path, icon_path))
|
|
|
|
os.symlink(icon_path, self.icon_path)
|
|
|
|
else:
|
|
|
|
self.log.info(
|
|
|
|
'Copying icon {} -> {}'.format(
|
|
|
|
src.icon_path, self.icon_path))
|
|
|
|
shutil.copy(src.icon_path, self.icon_path)
|
|
|
|
|
|
|
|
# fire hooks
|
2017-06-26 01:56:07 +02:00
|
|
|
yield from self.fire_event_async('domain-clone-files', src=src)
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
#
|
|
|
|
# methods for querying domain state
|
|
|
|
#
|
|
|
|
|
|
|
|
# state of the machine
|
|
|
|
|
|
|
|
def get_power_state(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Return power state description string.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
Return value may be one of those:
|
|
|
|
|
|
|
|
=============== ========================================================
|
|
|
|
return value meaning
|
|
|
|
=============== ========================================================
|
|
|
|
``'Halted'`` Machine is not active.
|
|
|
|
``'Transient'`` Machine is running, but does not have :program:`guid`
|
|
|
|
or :program:`qrexec` available.
|
|
|
|
``'Running'`` Machine is ready and running.
|
2017-06-02 15:51:02 +02:00
|
|
|
``'Paused'`` Machine is paused.
|
2014-12-29 12:46:16 +01:00
|
|
|
``'Suspended'`` Machine is S3-suspended.
|
|
|
|
``'Halting'`` Machine is in process of shutting down.
|
2018-02-22 20:53:29 +01:00
|
|
|
``'Dying'`` Machine is still in process of shutting down.
|
2014-12-29 12:46:16 +01:00
|
|
|
``'Crashed'`` Machine crashed and is unusable, probably because of
|
|
|
|
bug in dom0.
|
|
|
|
``'NA'`` Machine is in unknown state (most likely libvirt domain
|
|
|
|
is undefined).
|
|
|
|
=============== ========================================================
|
|
|
|
|
|
|
|
FIXME: graph below may be incomplete and wrong. Click on method name to
|
|
|
|
see its documentation.
|
|
|
|
|
|
|
|
.. graphviz::
|
|
|
|
|
|
|
|
digraph {
|
|
|
|
node [fontname="sans-serif"];
|
|
|
|
edge [fontname="mono"];
|
|
|
|
|
|
|
|
|
|
|
|
Halted;
|
|
|
|
NA;
|
|
|
|
Dying;
|
|
|
|
Crashed;
|
|
|
|
Transient;
|
|
|
|
Halting;
|
|
|
|
Running;
|
|
|
|
Paused [color=gray75 fontcolor=gray75];
|
|
|
|
Suspended;
|
|
|
|
|
|
|
|
NA -> Halted;
|
|
|
|
Halted -> NA [constraint=false];
|
|
|
|
|
|
|
|
Halted -> Transient
|
|
|
|
[xlabel="start()" URL="#qubes.vm.qubesvm.QubesVM.start"];
|
|
|
|
Transient -> Running;
|
|
|
|
|
|
|
|
Running -> Halting
|
|
|
|
[xlabel="shutdown()"
|
|
|
|
URL="#qubes.vm.qubesvm.QubesVM.shutdown"
|
|
|
|
constraint=false];
|
|
|
|
Halting -> Dying -> Halted [constraint=false];
|
|
|
|
|
|
|
|
/* cosmetic, invisible edges to put rank constraint */
|
|
|
|
Dying -> Halting [style="invis"];
|
|
|
|
Halting -> Transient [style="invis"];
|
|
|
|
|
|
|
|
Running -> Halted
|
2020-02-15 18:48:32 +01:00
|
|
|
[label="kill()"
|
|
|
|
URL="#qubes.vm.qubesvm.QubesVM.kill"
|
2014-12-29 12:46:16 +01:00
|
|
|
constraint=false];
|
|
|
|
|
|
|
|
Running -> Crashed [constraint=false];
|
|
|
|
Crashed -> Halted [constraint=false];
|
|
|
|
|
|
|
|
Running -> Paused
|
|
|
|
[label="pause()" URL="#qubes.vm.qubesvm.QubesVM.pause"
|
|
|
|
color=gray75 fontcolor=gray75];
|
|
|
|
Running -> Suspended
|
2017-06-02 15:51:02 +02:00
|
|
|
[label="suspend()" URL="#qubes.vm.qubesvm.QubesVM.suspend"
|
2014-12-29 12:46:16 +01:00
|
|
|
color=gray50 fontcolor=gray50];
|
|
|
|
Paused -> Running
|
|
|
|
[label="unpause()" URL="#qubes.vm.qubesvm.QubesVM.unpause"
|
|
|
|
color=gray75 fontcolor=gray75];
|
|
|
|
Suspended -> Running
|
2017-06-02 15:51:02 +02:00
|
|
|
[label="resume()" URL="#qubes.vm.qubesvm.QubesVM.resume"
|
2014-12-29 12:46:16 +01:00
|
|
|
color=gray50 fontcolor=gray50];
|
|
|
|
|
|
|
|
Running -> Suspended
|
|
|
|
[label="suspend()" URL="#qubes.vm.qubesvm.QubesVM.suspend"];
|
|
|
|
Suspended -> Running
|
|
|
|
[label="resume()" URL="#qubes.vm.qubesvm.QubesVM.resume"];
|
|
|
|
|
|
|
|
|
|
|
|
{ rank=source; Halted NA };
|
|
|
|
{ rank=same; Transient Halting };
|
|
|
|
{ rank=same; Crashed Dying };
|
|
|
|
{ rank=sink; Paused Suspended };
|
|
|
|
}
|
|
|
|
|
2015-01-19 17:06:30 +01:00
|
|
|
.. seealso::
|
2015-01-22 19:25:00 +01:00
|
|
|
|
2015-01-19 17:06:30 +01:00
|
|
|
http://wiki.libvirt.org/page/VM_lifecycle
|
|
|
|
Description of VM life cycle from the point of view of libvirt.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2015-01-19 17:06:30 +01:00
|
|
|
https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainState
|
2015-01-22 19:25:00 +01:00
|
|
|
Libvirt's enum describing precise state of a domain.
|
2019-10-20 12:21:09 +02:00
|
|
|
""" # pylint: disable=too-many-return-statements
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-05-22 14:42:01 +02:00
|
|
|
# don't try to define libvirt domain, if it isn't there, VM surely
|
|
|
|
# isn't running
|
|
|
|
# reason for this "if": allow vm.is_running() in PCI (or other
|
|
|
|
# device) extension while constructing libvirt XML
|
2017-06-04 01:12:30 +02:00
|
|
|
if self.app.vmm.offline_mode:
|
|
|
|
return 'Halted'
|
2017-05-22 14:42:01 +02:00
|
|
|
if self._libvirt_domain is None:
|
|
|
|
try:
|
|
|
|
self._libvirt_domain = self.app.vmm.libvirt_conn.lookupByUUID(
|
|
|
|
self.uuid.bytes)
|
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
|
|
|
return 'Halted'
|
2018-07-15 23:08:23 +02:00
|
|
|
raise
|
2017-05-22 14:42:01 +02:00
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
libvirt_domain = self.libvirt_domain
|
|
|
|
if libvirt_domain is None:
|
|
|
|
return 'Halted'
|
|
|
|
|
2016-03-03 01:05:23 +01:00
|
|
|
try:
|
|
|
|
if libvirt_domain.isActive():
|
2016-01-29 17:56:33 +01:00
|
|
|
# pylint: disable=line-too-long
|
2016-03-03 01:05:23 +01:00
|
|
|
if libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PAUSED:
|
|
|
|
return "Paused"
|
2018-07-15 23:08:23 +02:00
|
|
|
if libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_CRASHED:
|
2016-03-03 01:05:23 +01:00
|
|
|
return "Crashed"
|
2018-07-15 23:08:23 +02:00
|
|
|
if libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_SHUTDOWN:
|
2016-03-03 01:05:23 +01:00
|
|
|
return "Halting"
|
2018-07-15 23:08:23 +02:00
|
|
|
if libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_SHUTOFF:
|
2016-03-03 01:05:23 +01:00
|
|
|
return "Dying"
|
2019-10-20 12:21:09 +02:00
|
|
|
if libvirt_domain.state()[
|
|
|
|
0] == libvirt.VIR_DOMAIN_PMSUSPENDED: # nopep8
|
2016-03-03 01:05:23 +01:00
|
|
|
return "Suspended"
|
2018-07-15 23:08:23 +02:00
|
|
|
if not self.is_fully_usable():
|
|
|
|
return "Transient"
|
|
|
|
return "Running"
|
2017-04-15 20:04:38 +02:00
|
|
|
|
2017-04-21 15:43:46 +02:00
|
|
|
return 'Halted'
|
2016-03-03 01:05:23 +01:00
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
|
|
|
return 'Halted'
|
2017-04-21 15:43:46 +02:00
|
|
|
raise
|
2016-03-03 01:05:23 +01:00
|
|
|
|
|
|
|
assert False
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-06-16 21:06:20 +02:00
|
|
|
def is_halted(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
""" Check whether this domain's state is 'Halted'
|
2016-06-16 21:06:20 +02:00
|
|
|
:returns: :py:obj:`True` if this domain is halted, \
|
|
|
|
:py:obj:`False` otherwise.
|
|
|
|
:rtype: bool
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2016-06-16 21:06:20 +02:00
|
|
|
return self.get_power_state() == 'Halted'
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
def is_running(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Check whether this domain is running.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2015-01-19 17:06:30 +01:00
|
|
|
:returns: :py:obj:`True` if this domain is started, \
|
|
|
|
:py:obj:`False` otherwise.
|
2014-12-29 12:46:16 +01:00
|
|
|
:rtype: bool
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-05-21 03:26:39 +02:00
|
|
|
if self.app.vmm.offline_mode:
|
|
|
|
return False
|
|
|
|
|
2017-05-22 14:42:01 +02:00
|
|
|
# don't try to define libvirt domain, if it isn't there, VM surely
|
|
|
|
# isn't running
|
|
|
|
# reason for this "if": allow vm.is_running() in PCI (or other
|
|
|
|
# device) extension while constructing libvirt XML
|
|
|
|
if self._libvirt_domain is None:
|
|
|
|
try:
|
|
|
|
self._libvirt_domain = self.app.vmm.libvirt_conn.lookupByUUID(
|
|
|
|
self.uuid.bytes)
|
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
|
|
|
return False
|
2018-07-15 23:08:23 +02:00
|
|
|
raise
|
2017-05-22 14:42:01 +02:00
|
|
|
|
2018-02-21 04:30:47 +01:00
|
|
|
return bool(self.libvirt_domain.isActive())
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
def is_paused(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Check whether this domain is paused.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2015-01-19 17:06:30 +01:00
|
|
|
:returns: :py:obj:`True` if this domain is paused, \
|
|
|
|
:py:obj:`False` otherwise.
|
2014-12-29 12:46:16 +01:00
|
|
|
:rtype: bool
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
return self.libvirt_domain \
|
2019-10-20 12:21:09 +02:00
|
|
|
and self.libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PAUSED
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
def is_qrexec_running(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Check whether qrexec for this domain is available.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2015-01-19 17:06:30 +01:00
|
|
|
:returns: :py:obj:`True` if qrexec is running, \
|
|
|
|
:py:obj:`False` otherwise.
|
2014-12-29 12:46:16 +01:00
|
|
|
:rtype: bool
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2018-07-15 23:08:23 +02:00
|
|
|
if self.xid < 0: # pylint: disable=comparison-with-callable
|
2014-12-29 12:46:16 +01:00
|
|
|
return False
|
|
|
|
return os.path.exists('/var/run/qubes/qrexec.%s' % self.name)
|
|
|
|
|
|
|
|
def is_fully_usable(self):
|
2016-04-19 15:59:25 +02:00
|
|
|
return all(self.fire_event('domain-is-fully-usable'))
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-04-19 15:59:25 +02:00
|
|
|
@qubes.events.handler('domain-is-fully-usable')
|
|
|
|
def on_domain_is_fully_usable(self, event):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Check whether domain is running and sane.
|
2016-04-19 15:59:25 +02:00
|
|
|
|
|
|
|
Currently this checks for running qrexec.
|
2019-10-20 12:21:09 +02:00
|
|
|
""" # pylint: disable=unused-argument
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2020-01-05 22:13:04 +01:00
|
|
|
if self.features.check_with_template('qrexec', False):
|
|
|
|
# Running gui-daemon implies also VM running
|
|
|
|
yield self.is_qrexec_running()
|
|
|
|
else:
|
|
|
|
yield True
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
# memory and disk
|
|
|
|
|
|
|
|
def get_mem(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Get current memory usage from VM.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
:returns: Memory usage [FIXME unit].
|
|
|
|
:rtype: FIXME
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
if self.libvirt_domain is None:
|
|
|
|
return 0
|
|
|
|
|
2016-03-03 01:05:23 +01:00
|
|
|
try:
|
|
|
|
if not self.libvirt_domain.isActive():
|
|
|
|
return 0
|
|
|
|
return self.libvirt_domain.info()[1]
|
|
|
|
|
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
if e.get_error_code() in (
|
|
|
|
# qube no longer exists
|
|
|
|
libvirt.VIR_ERR_NO_DOMAIN,
|
|
|
|
|
|
|
|
# libxl_domain_info failed (race condition from isActive)
|
2016-06-16 21:07:36 +02:00
|
|
|
libvirt.VIR_ERR_INTERNAL_ERROR):
|
2016-03-03 01:05:23 +01:00
|
|
|
return 0
|
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
self.log.exception(
|
|
|
|
'libvirt error code: {!r}'.format(e.get_error_code()))
|
|
|
|
raise
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
def get_mem_static_max(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Get maximum memory available to VM.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
:returns: Memory limit [FIXME unit].
|
|
|
|
:rtype: FIXME
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
if self.libvirt_domain is None:
|
|
|
|
return 0
|
|
|
|
|
2016-03-03 01:05:23 +01:00
|
|
|
try:
|
|
|
|
return self.libvirt_domain.maxMemory()
|
|
|
|
|
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
if e.get_error_code() in (
|
|
|
|
# qube no longer exists
|
|
|
|
libvirt.VIR_ERR_NO_DOMAIN,
|
|
|
|
|
|
|
|
# libxl_domain_info failed (race condition from isActive)
|
2016-06-16 21:07:36 +02:00
|
|
|
libvirt.VIR_ERR_INTERNAL_ERROR):
|
2016-03-03 01:05:23 +01:00
|
|
|
return 0
|
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
self.log.exception(
|
|
|
|
'libvirt error code: {!r}'.format(e.get_error_code()))
|
|
|
|
raise
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-03-03 01:05:23 +01:00
|
|
|
def get_cputime(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Get total CPU time burned by this domain since start.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
:returns: CPU time usage [FIXME unit].
|
|
|
|
:rtype: FIXME
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-03-03 01:05:23 +01:00
|
|
|
if self.libvirt_domain is None:
|
|
|
|
return 0
|
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
if self.libvirt_domain is None:
|
|
|
|
return 0
|
|
|
|
if not self.libvirt_domain.isActive():
|
|
|
|
return 0
|
|
|
|
|
2016-03-03 01:05:23 +01:00
|
|
|
try:
|
|
|
|
if not self.libvirt_domain.isActive():
|
|
|
|
return 0
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
# this does not work, because libvirt
|
2019-10-20 16:40:40 +02:00
|
|
|
# return self.libvirt_domain.getCPUStats(
|
|
|
|
# libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)[0]['cpu_time']/10**9
|
2016-03-03 01:05:23 +01:00
|
|
|
|
|
|
|
return self.libvirt_domain.info()[4]
|
|
|
|
|
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
if e.get_error_code() in (
|
|
|
|
# qube no longer exists
|
|
|
|
libvirt.VIR_ERR_NO_DOMAIN,
|
|
|
|
|
|
|
|
# libxl_domain_info failed (race condition from isActive)
|
2016-06-16 21:07:36 +02:00
|
|
|
libvirt.VIR_ERR_INTERNAL_ERROR):
|
2016-03-03 01:05:23 +01:00
|
|
|
return 0
|
|
|
|
|
2018-07-15 23:08:23 +02:00
|
|
|
self.log.exception(
|
|
|
|
'libvirt error code: {!r}'.format(e.get_error_code()))
|
|
|
|
raise
|
2016-03-03 01:05:23 +01:00
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
# miscellanous
|
|
|
|
|
2018-01-11 03:50:24 +01:00
|
|
|
@qubes.stateless_property
|
|
|
|
def start_time(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Tell when machine was started.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2018-01-11 03:50:24 +01:00
|
|
|
:rtype: float or None
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
if not self.is_running():
|
|
|
|
return None
|
|
|
|
|
|
|
|
# TODO shouldn't this be qubesdb?
|
2015-01-19 17:06:30 +01:00
|
|
|
start_time = self.app.vmm.xs.read('',
|
2019-10-20 12:21:09 +02:00
|
|
|
'/vm/{}/start_time'.format(self.uuid))
|
2014-12-29 12:46:16 +01:00
|
|
|
if start_time != '':
|
2018-01-11 03:50:24 +01:00
|
|
|
return float(start_time)
|
2017-04-15 20:04:38 +02:00
|
|
|
|
2017-04-21 15:43:46 +02:00
|
|
|
return None
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2019-02-25 04:59:46 +01:00
|
|
|
@property
|
|
|
|
def kernelopts_common(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Kernel options which should be used in addition to *kernelopts*
|
2019-02-25 04:59:46 +01:00
|
|
|
property.
|
|
|
|
|
|
|
|
This is specific to kernel (and initrd if any)
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2019-02-25 04:59:46 +01:00
|
|
|
if not self.kernel:
|
|
|
|
return ''
|
|
|
|
kernels_dir = self.storage.kernels_dir
|
|
|
|
|
|
|
|
kernelopts_path = os.path.join(kernels_dir,
|
2019-10-20 12:21:09 +02:00
|
|
|
'default-kernelopts-common.txt')
|
2019-02-25 04:59:46 +01:00
|
|
|
if os.path.exists(kernelopts_path):
|
|
|
|
with open(kernelopts_path) as f_kernelopts:
|
|
|
|
return f_kernelopts.read().rstrip('\n\r')
|
|
|
|
else:
|
|
|
|
return qubes.config.defaults['kernelopts_common']
|
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
#
|
|
|
|
# helper methods
|
|
|
|
#
|
|
|
|
|
|
|
|
def relative_path(self, path):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Return path relative to py:attr:`dir_path`.
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
:param str path: Path in question.
|
|
|
|
:returns: Relative path.
|
2019-10-20 12:21:09 +02:00
|
|
|
"""
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
return os.path.relpath(path, self.dir_path)
|
|
|
|
|
|
|
|
def create_qdb_entries(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Create entries in Qubes DB.
|
|
|
|
"""
|
2015-10-05 23:46:25 +02:00
|
|
|
# pylint: disable=no-member
|
|
|
|
|
2017-07-21 23:11:24 +02:00
|
|
|
self.untrusted_qdb.write('/name', self.name)
|
|
|
|
self.untrusted_qdb.write('/type', self.__class__.__name__)
|
2018-02-01 01:50:42 +01:00
|
|
|
self.untrusted_qdb.write('/default-user', self.default_user)
|
2017-07-21 23:11:24 +02:00
|
|
|
self.untrusted_qdb.write('/qubes-vm-updateable', str(self.updateable))
|
|
|
|
self.untrusted_qdb.write('/qubes-vm-persistence',
|
2019-10-20 12:21:09 +02:00
|
|
|
'full' if self.updateable else 'rw-only')
|
2017-07-21 23:11:24 +02:00
|
|
|
self.untrusted_qdb.write('/qubes-debug-mode', str(int(self.debug)))
|
2016-03-03 01:05:23 +01:00
|
|
|
try:
|
2017-07-21 23:11:24 +02:00
|
|
|
self.untrusted_qdb.write('/qubes-base-template', self.template.name)
|
2016-03-03 01:05:23 +01:00
|
|
|
except AttributeError:
|
2017-07-21 23:11:24 +02:00
|
|
|
self.untrusted_qdb.write('/qubes-base-template', '')
|
2016-03-03 01:05:23 +01:00
|
|
|
|
2017-07-21 23:11:24 +02:00
|
|
|
self.untrusted_qdb.write('/qubes-random-seed',
|
2019-10-20 12:21:09 +02:00
|
|
|
base64.b64encode(qubes.utils.urandom(64)))
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
if self.provides_network:
|
2016-09-07 03:12:03 +02:00
|
|
|
# '/qubes-netvm-network' value is only checked for being non empty
|
2017-12-06 15:23:47 +01:00
|
|
|
self.untrusted_qdb.write('/qubes-netvm-network', str(self.gateway))
|
|
|
|
self.untrusted_qdb.write('/qubes-netvm-gateway', str(self.gateway))
|
2019-09-25 01:18:09 +02:00
|
|
|
if self.gateway6: # pylint: disable=using-constant-test
|
2017-12-07 01:32:57 +01:00
|
|
|
self.untrusted_qdb.write('/qubes-netvm-gateway6',
|
2019-10-20 12:21:09 +02:00
|
|
|
str(self.gateway6))
|
2017-12-06 15:23:47 +01:00
|
|
|
self.untrusted_qdb.write('/qubes-netvm-netmask', str(self.netmask))
|
2016-01-21 13:08:56 +01:00
|
|
|
|
2016-09-07 03:12:03 +02:00
|
|
|
for i, addr in zip(('primary', 'secondary'), self.dns):
|
2017-07-21 23:11:24 +02:00
|
|
|
self.untrusted_qdb.write('/qubes-netvm-{}-dns'.format(i), addr)
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
if self.netvm is not None:
|
2019-05-16 17:23:47 +02:00
|
|
|
self.untrusted_qdb.write('/qubes-mac', str(self.mac))
|
2017-12-06 15:23:47 +01:00
|
|
|
self.untrusted_qdb.write('/qubes-ip', str(self.visible_ip))
|
|
|
|
self.untrusted_qdb.write('/qubes-netmask',
|
2019-10-20 12:21:09 +02:00
|
|
|
str(self.visible_netmask))
|
2017-12-06 15:23:47 +01:00
|
|
|
self.untrusted_qdb.write('/qubes-gateway',
|
2019-10-20 12:21:09 +02:00
|
|
|
str(self.visible_gateway))
|
2016-01-21 13:08:56 +01:00
|
|
|
|
2016-09-07 03:12:03 +02:00
|
|
|
for i, addr in zip(('primary', 'secondary'), self.dns):
|
2017-12-06 15:23:47 +01:00
|
|
|
self.untrusted_qdb.write('/qubes-{}-dns'.format(i), str(addr))
|
2016-01-21 13:08:56 +01:00
|
|
|
|
2019-09-25 01:18:09 +02:00
|
|
|
if self.visible_ip6: # pylint: disable=using-constant-test
|
2017-12-06 15:23:47 +01:00
|
|
|
self.untrusted_qdb.write('/qubes-ip6', str(self.visible_ip6))
|
2019-09-25 01:18:09 +02:00
|
|
|
if self.visible_gateway6: # pylint: disable=using-constant-test
|
2017-12-01 03:24:34 +01:00
|
|
|
self.untrusted_qdb.write('/qubes-gateway6',
|
2019-10-20 12:21:09 +02:00
|
|
|
str(self.visible_gateway6))
|
2016-09-07 03:05:49 +02:00
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
tzname = qubes.utils.get_timezone()
|
|
|
|
if tzname:
|
2017-07-21 23:11:24 +02:00
|
|
|
self.untrusted_qdb.write('/qubes-timezone', tzname)
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-07-21 23:11:24 +02:00
|
|
|
self.untrusted_qdb.write('/qubes-block-devices', '')
|
|
|
|
self.untrusted_qdb.write('/qubes-usb-devices', '')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
# TODO: Currently the whole qmemman is quite Xen-specific, so stay with
|
|
|
|
# xenstore for it until decided otherwise
|
|
|
|
if qmemman_present:
|
2015-01-20 14:09:47 +01:00
|
|
|
self.app.vmm.xs.set_permissions('',
|
2019-10-20 12:21:09 +02:00
|
|
|
'/local/domain/{}/memory'.format(
|
|
|
|
self.xid),
|
|
|
|
[{'dom': self.xid}])
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2016-03-04 18:04:39 +01:00
|
|
|
self.fire_event('domain-qdb-create')
|
2014-12-29 12:46:16 +01:00
|
|
|
|
2017-03-29 17:11:24 +02:00
|
|
|
# TODO async; update this in constructor
|
2014-12-29 12:46:16 +01:00
|
|
|
def _update_libvirt_domain(self):
|
2019-10-20 12:21:09 +02:00
|
|
|
"""Re-initialise :py:attr:`libvirt_domain`."""
|
2014-12-29 12:46:16 +01:00
|
|
|
domain_config = self.create_config_file()
|
|
|
|
try:
|
2015-01-20 14:09:47 +01:00
|
|
|
self._libvirt_domain = self.app.vmm.libvirt_conn.defineXML(
|
|
|
|
domain_config)
|
2016-03-03 01:05:23 +01:00
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_OS_TYPE \
|
|
|
|
and e.get_str2() == 'hvm':
|
2019-10-20 12:21:09 +02:00
|
|
|
raise qubes.exc.QubesVMError(
|
|
|
|
self,
|
2016-03-03 01:05:23 +01:00
|
|
|
'HVM qubes are not supported on this machine. '
|
|
|
|
'Check BIOS settings for VT-x/AMD-V extensions.')
|
2019-02-27 16:19:29 +01:00
|
|
|
raise
|
2014-12-29 12:46:16 +01:00
|
|
|
|
|
|
|
#
|
|
|
|
# workshop -- those are to be reworked later
|
|
|
|
#
|
|
|
|
|
|
|
|
def get_prefmem(self):
|
|
|
|
# TODO: qmemman is still xen specific
|
2019-10-20 12:21:09 +02:00
|
|
|
untrusted_meminfo_key = self.app.vmm.xs.read(
|
|
|
|
'', '/local/domain/{}/memory/meminfo'.format(self.xid))
|
2015-10-05 23:46:25 +02:00
|
|
|
|
2014-12-29 12:46:16 +01:00
|
|
|
if untrusted_meminfo_key is None or untrusted_meminfo_key == '':
|
|
|
|
return 0
|
2015-10-05 23:46:25 +02:00
|
|
|
|
|
|
|
domain = qubes.qmemman.DomainState(self.xid)
|
|
|
|
qubes.qmemman.algo.refresh_meminfo_for_domain(
|
|
|
|
domain, untrusted_meminfo_key)
|
2016-09-07 03:43:46 +02:00
|
|
|
if domain.mem_used is None:
|
|
|
|
# apparently invalid xenstore content
|
|
|
|
return 0
|
2015-10-05 23:46:25 +02:00
|
|
|
domain.memory_maximum = self.get_mem_static_max() * 1024
|
|
|
|
|
|
|
|
return qubes.qmemman.algo.prefmem(domain) / 1024
|
2016-07-12 18:24:43 +02:00
|
|
|
|
|
|
|
|
|
|
|
def _clean_volume_config(config):
|
2017-07-02 01:07:48 +02:00
|
|
|
common_attributes = ['name', 'pool', 'size',
|
|
|
|
'revisions_to_keep', 'rw', 'snap_on_start',
|
|
|
|
'save_on_stop', 'source']
|
2017-07-12 10:43:48 +02:00
|
|
|
return {k: v for k, v in config.items() if k in common_attributes}
|
2016-07-12 18:24:43 +02:00
|
|
|
|
|
|
|
|
|
|
|
def _patch_pool_config(config, pool=None, pools=None):
|
|
|
|
assert pool is not None or pools is not None
|
2017-07-01 20:47:08 +02:00
|
|
|
is_snapshot = config['snap_on_start']
|
|
|
|
is_rw = config['rw']
|
2016-07-12 18:24:43 +02:00
|
|
|
|
|
|
|
name = config['name']
|
|
|
|
|
2017-07-01 20:47:08 +02:00
|
|
|
if pool and not is_snapshot and is_rw:
|
2016-07-12 18:24:43 +02:00
|
|
|
config['pool'] = str(pool)
|
2017-07-01 20:47:08 +02:00
|
|
|
elif pool:
|
2016-07-12 18:24:43 +02:00
|
|
|
pass
|
|
|
|
elif pools and name in pools.keys():
|
2017-07-01 20:47:08 +02:00
|
|
|
if not is_snapshot:
|
2016-07-12 18:24:43 +02:00
|
|
|
config['pool'] = str(pools[name])
|
|
|
|
else:
|
2018-10-18 00:03:05 +02:00
|
|
|
msg = "Snapshot volume {0!s} must be in the same pool as its " \
|
|
|
|
"origin ({0!s} volume of template)," \
|
|
|
|
"cannot move to pool {1!s} " \
|
2016-07-12 18:24:43 +02:00
|
|
|
.format(name, pools[name])
|
|
|
|
raise qubes.exc.QubesException(msg)
|
|
|
|
return config
|
|
|
|
|
2019-10-20 12:21:09 +02:00
|
|
|
|
2016-07-12 18:24:43 +02:00
|
|
|
def _patch_volume_config(volume_config, pool=None, pools=None):
|
|
|
|
assert not (pool and pools), \
|
|
|
|
'You can not pass pool & pools parameter at same time'
|
|
|
|
assert pool or pools
|
|
|
|
|
|
|
|
result = {}
|
|
|
|
|
|
|
|
for name, config in volume_config.items():
|
|
|
|
# copy only the subset of volume_config key/values
|
|
|
|
dst_config = _clean_volume_config(config)
|
|
|
|
|
|
|
|
if pool is not None or pools is not None:
|
|
|
|
dst_config = _patch_pool_config(dst_config, pool, pools)
|
|
|
|
|
|
|
|
result[name] = dst_config
|
|
|
|
|
|
|
|
return result
|