2013-03-16 02:39:30 +01:00
|
|
|
#!/usr/bin/python2
|
2014-05-18 21:01:21 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2013-03-16 02:39:30 +01:00
|
|
|
#
|
|
|
|
# The Qubes OS Project, http://www.qubes-os.org
|
|
|
|
#
|
|
|
|
# Copyright (C) 2010 Joanna Rutkowska <joanna@invisiblethingslab.com>
|
|
|
|
# Copyright (C) 2013 Marek Marczykowski <marmarek@invisiblethingslab.com>
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
|
|
|
import datetime
|
2015-07-01 19:44:46 +02:00
|
|
|
import base64
|
|
|
|
import hashlib
|
2015-02-17 16:11:37 +01:00
|
|
|
import logging
|
2013-03-16 02:39:30 +01:00
|
|
|
import lxml.etree
|
|
|
|
import os
|
|
|
|
import os.path
|
|
|
|
import re
|
|
|
|
import shutil
|
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
import time
|
|
|
|
import uuid
|
|
|
|
import xml.parsers.expat
|
core/start: ensure that the previous QubesDB daemon isn't running
When restarting VM (starting it just after it was shut down), it may
happen that previous `qubesdb-daemon` instance is still running - if VM
doesn't properly terminate the connection, dom0 part will not terminate
immediately, but at next alive check (every 10s). Such `qubesdb-daemon`,
when terminating, will remove pid file and socket file. In case of new
daemon already running it would be those of the new daemon, making the
whole QubesDB of this VM inaccessible for dom0 (`qubesdb-daemon` is
running, but its socket is removed).
To prevent this race, ensure that previous instance is terminated before
starting the new one.
There is no need to manually removing socket file, because if some stale
socket exists, it will be replaced by the new one when new
`qubesdb-daemon` starts up.
QubesOS/qubes-issues#1241
2015-09-25 22:06:14 +02:00
|
|
|
import signal
|
2014-03-31 03:41:33 +02:00
|
|
|
from qubes import qmemman
|
|
|
|
from qubes import qmemman_algo
|
2013-05-04 04:45:55 +02:00
|
|
|
import libvirt
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-17 04:06:29 +02:00
|
|
|
from qubes.qubes import dry_run,vmm
|
2013-03-16 02:39:30 +01:00
|
|
|
from qubes.qubes import register_qubes_vm_class
|
|
|
|
from qubes.qubes import QubesVmCollection,QubesException,QubesHost,QubesVmLabels
|
|
|
|
from qubes.qubes import defaults,system_path,vm_files,qubes_max_qid
|
2015-11-07 19:32:38 +01:00
|
|
|
from qubes.storage import get_pool
|
2015-08-03 22:43:07 +02:00
|
|
|
|
2013-07-22 04:17:30 +02:00
|
|
|
qmemman_present = False
|
|
|
|
try:
|
|
|
|
from qubes.qmemman_client import QMemmanClient
|
|
|
|
qmemman_present = True
|
|
|
|
except ImportError:
|
|
|
|
pass
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2014-04-24 21:50:12 +02:00
|
|
|
import qubes.qubesutils
|
|
|
|
|
2013-11-26 20:16:10 +01:00
|
|
|
xid_to_name_cache = {}
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
class QubesVm(object):
|
|
|
|
"""
|
|
|
|
A representation of one Qubes VM
|
|
|
|
Only persistent information are stored here, while all the runtime
|
|
|
|
information, e.g. Xen dom id, etc, are to be retrieved via Xen API
|
|
|
|
Note that qid is not the same as Xen's domid!
|
|
|
|
"""
|
|
|
|
|
|
|
|
# In which order load this VM type from qubes.xml
|
|
|
|
load_order = 100
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# hooks for plugins (modules) which want to influence existing classes,
|
|
|
|
# without introducing new ones
|
|
|
|
hooks_clone_disk_files = []
|
|
|
|
hooks_create_on_disk = []
|
2015-03-28 22:36:28 +01:00
|
|
|
hooks_create_qubesdb_entries = []
|
2013-03-16 16:09:31 +01:00
|
|
|
hooks_get_attrs_config = []
|
|
|
|
hooks_get_clone_attrs = []
|
|
|
|
hooks_get_config_params = []
|
|
|
|
hooks_init = []
|
|
|
|
hooks_label_setter = []
|
|
|
|
hooks_netvm_setter = []
|
|
|
|
hooks_post_rename = []
|
|
|
|
hooks_pre_rename = []
|
|
|
|
hooks_remove_from_disk = []
|
|
|
|
hooks_start = []
|
|
|
|
hooks_verify_files = []
|
2014-05-05 16:31:06 +02:00
|
|
|
hooks_set_attr = []
|
2013-03-16 16:09:31 +01:00
|
|
|
|
2013-03-16 15:28:18 +01:00
|
|
|
def get_attrs_config(self):
|
2013-03-16 02:39:30 +01:00
|
|
|
""" Object attributes for serialization/deserialization
|
|
|
|
inner dict keys:
|
|
|
|
- order: initialization order (to keep dependency intact)
|
|
|
|
attrs without order will be evaluated at the end
|
|
|
|
- default: default value used when attr not given to object constructor
|
|
|
|
- attr: set value to this attribute instead of parameter name
|
2014-03-25 20:56:40 +01:00
|
|
|
- eval: (DEPRECATED) assign result of this expression instead of
|
|
|
|
value directly; local variable 'value' contains
|
|
|
|
attribute value (or default if it was not given)
|
|
|
|
- func: callable used to parse the value retrieved from XML
|
2013-03-16 02:39:30 +01:00
|
|
|
- save: use evaluation result as value for XML serialization; only attrs with 'save' key will be saved in XML
|
|
|
|
- save_skip: if present and evaluates to true, attr will be omitted in XML
|
|
|
|
- save_attr: save to this XML attribute instead of parameter name
|
|
|
|
"""
|
|
|
|
|
|
|
|
attrs = {
|
|
|
|
# __qid cannot be accessed by setattr, so must be set manually in __init__
|
|
|
|
"qid": { "attr": "_qid", "order": 0 },
|
|
|
|
"name": { "order": 1 },
|
2013-05-10 05:35:22 +02:00
|
|
|
"uuid": { "order": 0, "eval": 'uuid.UUID(value) if value else None' },
|
2013-03-16 02:39:30 +01:00
|
|
|
"dir_path": { "default": None, "order": 2 },
|
2015-11-07 19:32:38 +01:00
|
|
|
"pool_name": { "default":"default" },
|
2014-03-26 04:41:28 +01:00
|
|
|
"conf_file": {
|
|
|
|
"func": lambda value: self.absolute_path(value, self.name +
|
|
|
|
".conf"),
|
|
|
|
"order": 3 },
|
2013-03-16 02:39:30 +01:00
|
|
|
### order >= 10: have base attrs set
|
2014-03-26 04:41:28 +01:00
|
|
|
"firewall_conf": {
|
|
|
|
"func": self._absolute_path_gen(vm_files["firewall_conf"]),
|
|
|
|
"order": 10 },
|
2013-03-16 02:39:30 +01:00
|
|
|
"installed_by_rpm": { "default": False, 'order': 10 },
|
2013-11-25 07:14:52 +01:00
|
|
|
"template": { "default": None, "attr": '_template', 'order': 10 },
|
2013-03-16 02:39:30 +01:00
|
|
|
### order >= 20: have template set
|
|
|
|
"uses_default_netvm": { "default": True, 'order': 20 },
|
|
|
|
"netvm": { "default": None, "attr": "_netvm", 'order': 20 },
|
|
|
|
"label": { "attr": "_label", "default": defaults["appvm_label"], 'order': 20,
|
|
|
|
'xml_deserialize': lambda _x: QubesVmLabels[_x] },
|
2013-05-18 03:50:29 +02:00
|
|
|
"memory": { "default": defaults["memory"], 'order': 20 },
|
|
|
|
"maxmem": { "default": None, 'order': 25 },
|
2014-03-26 04:41:28 +01:00
|
|
|
"pcidevs": {
|
|
|
|
"default": '[]',
|
|
|
|
"order": 25,
|
|
|
|
"func": lambda value: [] if value in ["none", None] else
|
|
|
|
eval(value) if value.find("[") >= 0 else
|
|
|
|
eval("[" + value + "]") },
|
2015-05-28 00:06:25 +02:00
|
|
|
"pci_strictreset": {"default": True},
|
2013-03-16 02:39:30 +01:00
|
|
|
# Internal VM (not shown in qubes-manager, doesn't create appmenus entries
|
2014-05-05 16:31:06 +02:00
|
|
|
"internal": { "default": False, 'attr': '_internal' },
|
2016-05-15 14:00:37 +02:00
|
|
|
"vcpus": { "default": 2 },
|
2013-03-16 02:39:30 +01:00
|
|
|
"uses_default_kernel": { "default": True, 'order': 30 },
|
|
|
|
"uses_default_kernelopts": { "default": True, 'order': 30 },
|
2014-03-26 04:41:28 +01:00
|
|
|
"kernel": {
|
2014-05-12 19:45:30 +02:00
|
|
|
"attr": "_kernel",
|
2014-03-26 04:41:28 +01:00
|
|
|
"default": None,
|
|
|
|
"order": 31,
|
|
|
|
"func": lambda value: self._collection.get_default_kernel() if
|
|
|
|
self.uses_default_kernel else value },
|
|
|
|
"kernelopts": {
|
|
|
|
"default": "",
|
|
|
|
"order": 31,
|
|
|
|
"func": lambda value: value if not self.uses_default_kernelopts\
|
|
|
|
else defaults["kernelopts_pcidevs"] if len(self.pcidevs)>0 \
|
2015-08-05 01:43:43 +02:00
|
|
|
else self.template.kernelopts if self.template
|
2014-03-26 04:41:28 +01:00
|
|
|
else defaults["kernelopts"] },
|
2013-03-16 02:39:30 +01:00
|
|
|
"mac": { "attr": "_mac", "default": None },
|
2015-03-31 05:49:13 +02:00
|
|
|
"include_in_backups": {
|
|
|
|
"func": lambda x: x if x is not None
|
|
|
|
else not self.installed_by_rpm },
|
2014-03-26 04:41:28 +01:00
|
|
|
"services": {
|
|
|
|
"default": {},
|
|
|
|
"func": lambda value: eval(str(value)) },
|
2013-03-16 02:39:30 +01:00
|
|
|
"debug": { "default": False },
|
2014-09-16 01:46:41 +02:00
|
|
|
"default_user": { "default": "user", "attr": "_default_user" },
|
2013-05-18 03:50:29 +02:00
|
|
|
"qrexec_timeout": { "default": 60 },
|
2013-11-20 02:57:17 +01:00
|
|
|
"autostart": { "default": False, "attr": "_autostart" },
|
2015-04-04 21:04:49 +02:00
|
|
|
"uses_default_dispvm_netvm": {"default": True, "order": 30},
|
|
|
|
"dispvm_netvm": {"attr": "_dispvm_netvm", "default": None},
|
2013-11-07 22:41:16 +01:00
|
|
|
"backup_content" : { 'default': False },
|
2014-03-26 04:41:28 +01:00
|
|
|
"backup_size" : {
|
|
|
|
"default": 0,
|
|
|
|
"func": int },
|
2013-11-07 22:41:16 +01:00
|
|
|
"backup_path" : { 'default': "" },
|
2014-03-26 04:41:28 +01:00
|
|
|
"backup_timestamp": {
|
|
|
|
"func": lambda value:
|
|
|
|
datetime.datetime.fromtimestamp(int(value)) if value
|
|
|
|
else None },
|
2013-03-16 02:39:30 +01:00
|
|
|
##### Internal attributes - will be overriden in __init__ regardless of args
|
2014-03-26 04:41:28 +01:00
|
|
|
"config_file_template": {
|
|
|
|
"func": lambda x: system_path["config_template_pv"] },
|
|
|
|
"icon_path": {
|
|
|
|
"func": lambda x: os.path.join(self.dir_path, "icon.png") if
|
|
|
|
self.dir_path is not None else None },
|
2013-03-16 02:39:30 +01:00
|
|
|
# used to suppress side effects of clone_attrs
|
2014-03-26 04:41:28 +01:00
|
|
|
"_do_not_reset_firewall": { "func": lambda x: False },
|
|
|
|
"kernels_dir": {
|
2013-03-16 02:39:30 +01:00
|
|
|
# for backward compatibility (or another rare case): kernel=None -> kernel in VM dir
|
2014-03-26 04:41:28 +01:00
|
|
|
"func": lambda x: \
|
|
|
|
os.path.join(system_path["qubes_kernels_base_dir"],
|
|
|
|
self.kernel) if self.kernel is not None \
|
|
|
|
else os.path.join(self.dir_path,
|
|
|
|
vm_files["kernels_subdir"]) },
|
2013-03-16 02:39:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
### Mark attrs for XML inclusion
|
|
|
|
# Simple string attrs
|
2013-05-10 05:35:22 +02:00
|
|
|
for prop in ['qid', 'uuid', 'name', 'dir_path', 'memory', 'maxmem',
|
2015-05-28 00:06:25 +02:00
|
|
|
'pcidevs', 'pci_strictreset', 'vcpus', 'internal',\
|
2013-03-16 02:39:30 +01:00
|
|
|
'uses_default_kernel', 'kernel', 'uses_default_kernelopts',\
|
|
|
|
'kernelopts', 'services', 'installed_by_rpm',\
|
|
|
|
'uses_default_netvm', 'include_in_backups', 'debug',\
|
2015-04-06 00:21:08 +02:00
|
|
|
'qrexec_timeout', 'autostart', 'uses_default_dispvm_netvm',
|
2015-11-07 19:32:38 +01:00
|
|
|
'backup_content', 'backup_size', 'backup_path', 'pool_name' ]:
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs[prop]['save'] = lambda prop=prop: str(getattr(self, prop))
|
2013-03-16 02:39:30 +01:00
|
|
|
# Simple paths
|
2015-07-08 04:42:58 +02:00
|
|
|
for prop in ['conf_file', 'firewall_conf']:
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs[prop]['save'] = \
|
|
|
|
lambda prop=prop: self.relative_path(getattr(self, prop))
|
|
|
|
attrs[prop]['save_skip'] = \
|
|
|
|
lambda prop=prop: getattr(self, prop) is None
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-17 04:06:29 +02:00
|
|
|
# Can happen only if VM created in offline mode
|
|
|
|
attrs['maxmem']['save_skip'] = lambda: self.maxmem is None
|
|
|
|
attrs['vcpus']['save_skip'] = lambda: self.vcpus is None
|
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
attrs['uuid']['save_skip'] = lambda: self.uuid is None
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs['mac']['save'] = lambda: str(self._mac)
|
|
|
|
attrs['mac']['save_skip'] = lambda: self._mac is None
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2014-09-16 01:46:41 +02:00
|
|
|
attrs['default_user']['save'] = lambda: str(self._default_user)
|
|
|
|
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs['backup_timestamp']['save'] = \
|
|
|
|
lambda: self.backup_timestamp.strftime("%s")
|
|
|
|
attrs['backup_timestamp']['save_skip'] = \
|
|
|
|
lambda: self.backup_timestamp is None
|
2014-03-10 04:29:14 +01:00
|
|
|
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs['netvm']['save'] = \
|
|
|
|
lambda: str(self.netvm.qid) if self.netvm is not None else "none"
|
2013-03-16 02:39:30 +01:00
|
|
|
attrs['netvm']['save_attr'] = "netvm_qid"
|
2015-04-04 21:04:49 +02:00
|
|
|
attrs['dispvm_netvm']['save'] = \
|
|
|
|
lambda: str(self.dispvm_netvm.qid) \
|
|
|
|
if self.dispvm_netvm is not None \
|
|
|
|
else "none"
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs['template']['save'] = \
|
|
|
|
lambda: str(self.template.qid) if self.template else "none"
|
2013-03-16 02:39:30 +01:00
|
|
|
attrs['template']['save_attr'] = "template_qid"
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs['label']['save'] = lambda: self.label.name
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_get_attrs_config:
|
|
|
|
attrs = hook(self, attrs)
|
2013-03-16 02:39:30 +01:00
|
|
|
return attrs
|
|
|
|
|
2014-05-05 22:23:43 +02:00
|
|
|
def post_set_attr(self, attr, newvalue, oldvalue):
|
2014-05-05 16:31:06 +02:00
|
|
|
for hook in self.hooks_set_attr:
|
2014-05-05 22:23:43 +02:00
|
|
|
hook(self, attr, newvalue, oldvalue)
|
2014-05-05 16:31:06 +02:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def __basic_parse_xml_attr(self, value):
|
|
|
|
if value is None:
|
|
|
|
return None
|
|
|
|
if value.lower() == "none":
|
|
|
|
return None
|
|
|
|
if value.lower() == "true":
|
|
|
|
return True
|
|
|
|
if value.lower() == "false":
|
|
|
|
return False
|
|
|
|
if value.isdigit():
|
|
|
|
return int(value)
|
|
|
|
return value
|
|
|
|
|
|
|
|
def __init__(self, **kwargs):
|
2014-03-26 04:41:28 +01:00
|
|
|
self._collection = None
|
2013-03-16 02:39:30 +01:00
|
|
|
if 'collection' in kwargs:
|
2014-03-26 04:41:28 +01:00
|
|
|
self._collection = kwargs['collection']
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
|
|
|
raise ValueError("No collection given to QubesVM constructor")
|
|
|
|
|
|
|
|
# Special case for template b/c it is given in "template_qid" property
|
|
|
|
if "xml_element" in kwargs and kwargs["xml_element"].get("template_qid"):
|
|
|
|
template_qid = kwargs["xml_element"].get("template_qid")
|
|
|
|
if template_qid.lower() != "none":
|
2014-03-26 04:41:28 +01:00
|
|
|
if int(template_qid) in self._collection:
|
|
|
|
kwargs["template"] = self._collection[int(template_qid)]
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
|
|
|
raise ValueError("Unknown template with QID %s" % template_qid)
|
2013-03-16 15:28:18 +01:00
|
|
|
attrs = self.get_attrs_config()
|
2013-03-16 02:39:30 +01:00
|
|
|
for attr_name in sorted(attrs, key=lambda _x: attrs[_x]['order'] if 'order' in attrs[_x] else 1000):
|
|
|
|
attr_config = attrs[attr_name]
|
|
|
|
attr = attr_name
|
|
|
|
if 'attr' in attr_config:
|
|
|
|
attr = attr_config['attr']
|
|
|
|
value = None
|
|
|
|
if attr_name in kwargs:
|
|
|
|
value = kwargs[attr_name]
|
|
|
|
elif 'xml_element' in kwargs and kwargs['xml_element'].get(attr_name) is not None:
|
|
|
|
if 'xml_deserialize' in attr_config and callable(attr_config['xml_deserialize']):
|
|
|
|
value = attr_config['xml_deserialize'](kwargs['xml_element'].get(attr_name))
|
|
|
|
else:
|
|
|
|
value = self.__basic_parse_xml_attr(kwargs['xml_element'].get(attr_name))
|
|
|
|
else:
|
|
|
|
if 'default' in attr_config:
|
|
|
|
value = attr_config['default']
|
2014-03-25 20:56:40 +01:00
|
|
|
if 'func' in attr_config:
|
|
|
|
setattr(self, attr, attr_config['func'](value))
|
|
|
|
elif 'eval' in attr_config:
|
2013-03-16 02:39:30 +01:00
|
|
|
setattr(self, attr, eval(attr_config['eval']))
|
|
|
|
else:
|
|
|
|
#print "setting %s to %s" % (attr, value)
|
|
|
|
setattr(self, attr, value)
|
|
|
|
|
|
|
|
#Init private attrs
|
|
|
|
self.__qid = self._qid
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
self._libvirt_domain = None
|
2013-06-07 05:16:15 +02:00
|
|
|
self._qdb_connection = None
|
2013-05-04 04:45:55 +02:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
assert self.__qid < qubes_max_qid, "VM id out of bounds!"
|
|
|
|
assert self.name is not None
|
|
|
|
|
|
|
|
if not self.verify_name(self.name):
|
2014-07-28 01:00:56 +02:00
|
|
|
msg = ("'%s' is invalid VM name (invalid characters, over 31 chars long, "
|
|
|
|
"or one of 'none', 'true', 'false')") % self.name
|
|
|
|
if 'xml_element' in kwargs:
|
|
|
|
print >>sys.stderr, "WARNING: %s" % msg
|
|
|
|
else:
|
|
|
|
raise QubesException(msg)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if self.netvm is not None:
|
|
|
|
self.netvm.connected_vms[self.qid] = self
|
|
|
|
|
|
|
|
# Not in generic way to not create QubesHost() to frequently
|
2013-05-17 04:06:29 +02:00
|
|
|
if self.maxmem is None and not vmm.offline_mode:
|
2013-03-16 02:39:30 +01:00
|
|
|
qubes_host = QubesHost()
|
|
|
|
total_mem_mb = qubes_host.memory_total/1024
|
|
|
|
self.maxmem = total_mem_mb/2
|
2014-09-17 07:27:40 +02:00
|
|
|
|
|
|
|
# Linux specific cap: max memory can't scale beyond 10.79*init_mem
|
|
|
|
if self.maxmem > self.memory * 10:
|
|
|
|
self.maxmem = self.memory * 10
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
# Always set if meminfo-writer should be active or not
|
|
|
|
if 'meminfo-writer' not in self.services:
|
|
|
|
self.services['meminfo-writer'] = not (len(self.pcidevs) > 0)
|
|
|
|
|
|
|
|
# Additionally force meminfo-writer disabled when VM have PCI devices
|
|
|
|
if len(self.pcidevs) > 0:
|
|
|
|
self.services['meminfo-writer'] = False
|
|
|
|
|
2015-08-03 22:43:07 +02:00
|
|
|
if 'xml_element' not in kwargs:
|
|
|
|
# New VM, disable updates check if requested for new VMs
|
|
|
|
if os.path.exists(qubes.qubesutils.UPDATES_DEFAULT_VM_DISABLE_FLAG):
|
|
|
|
self.services['qubes-update-check'] = False
|
|
|
|
|
2013-08-02 20:31:50 +02:00
|
|
|
# Initialize VM image storage class
|
2015-11-07 19:32:38 +01:00
|
|
|
self.storage = get_pool(self.pool_name, self).getStorage()
|
2015-11-08 21:22:44 +01:00
|
|
|
self.dir_path = self.storage.vmdir
|
|
|
|
self.icon_path = os.path.join(self.storage.vmdir, 'icon.png')
|
|
|
|
self.conf_file = os.path.join(self.storage.vmdir, self.name + '.conf')
|
|
|
|
|
2013-08-02 20:31:50 +02:00
|
|
|
if hasattr(self, 'kernels_dir'):
|
2015-11-10 17:04:50 +01:00
|
|
|
modules_path = os.path.join(self.kernels_dir,
|
2013-08-02 20:31:50 +02:00
|
|
|
"modules.img")
|
2015-11-10 17:04:50 +01:00
|
|
|
if os.path.exists(modules_path):
|
|
|
|
self.storage.modules_img = modules_path
|
|
|
|
self.storage.modules_img_rw = self.kernel is None
|
2013-08-02 20:31:50 +02:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
# Some additional checks for template based VM
|
|
|
|
if self.template is not None:
|
|
|
|
if not self.template.is_template():
|
|
|
|
print >> sys.stderr, "ERROR: template_qid={0} doesn't point to a valid TemplateVM".\
|
|
|
|
format(self.template.qid)
|
2015-03-29 16:19:50 +02:00
|
|
|
return
|
2013-03-16 02:39:30 +01:00
|
|
|
self.template.appvms[self.qid] = self
|
|
|
|
else:
|
|
|
|
assert self.root_img is not None, "Missing root_img for standalone VM!"
|
|
|
|
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log = logging.getLogger('qubes.vm.{}'.format(self.qid))
|
|
|
|
self.log.debug('instantiated name={!r} class={}'.format(
|
|
|
|
self.name, self.__class__.__name__))
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_init:
|
|
|
|
hook(self)
|
|
|
|
|
2014-08-11 16:34:33 +02:00
|
|
|
def __repr__(self):
|
|
|
|
return '<{} at {:#0x} qid={!r} name={!r}>'.format(
|
|
|
|
self.__class__.__name__,
|
|
|
|
id(self),
|
|
|
|
self.qid,
|
|
|
|
self.name)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def absolute_path(self, arg, default):
|
|
|
|
if arg is not None and os.path.isabs(arg):
|
|
|
|
return arg
|
2015-09-04 00:19:40 +02:00
|
|
|
elif self.dir_path is not None:
|
2013-03-16 02:39:30 +01:00
|
|
|
return os.path.join(self.dir_path, (arg if arg is not None else default))
|
2015-09-04 00:19:40 +02:00
|
|
|
else:
|
|
|
|
# cannot provide any meaningful value without dir_path; this is
|
|
|
|
# only to import some older format of `qubes.xml` (for example
|
|
|
|
# during migration from older release)
|
|
|
|
return None
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2014-03-26 04:41:28 +01:00
|
|
|
def _absolute_path_gen(self, default):
|
|
|
|
return lambda value: self.absolute_path(value, default)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def relative_path(self, arg):
|
|
|
|
return arg.replace(self.dir_path + '/', '')
|
|
|
|
|
|
|
|
@property
|
|
|
|
def qid(self):
|
|
|
|
return self.__qid
|
|
|
|
|
|
|
|
@property
|
|
|
|
def label(self):
|
|
|
|
return self._label
|
|
|
|
|
|
|
|
@label.setter
|
|
|
|
def label(self, new_label):
|
|
|
|
self._label = new_label
|
|
|
|
if self.icon_path:
|
|
|
|
try:
|
|
|
|
os.remove(self.icon_path)
|
|
|
|
except:
|
|
|
|
pass
|
2013-07-30 11:27:46 +02:00
|
|
|
if hasattr(os, "symlink"):
|
|
|
|
os.symlink (new_label.icon_path, self.icon_path)
|
|
|
|
# FIXME: some os-independent wrapper?
|
|
|
|
subprocess.call(['sudo', 'xdg-icon-resource', 'forceupdate'])
|
|
|
|
else:
|
|
|
|
shutil.copy(new_label.icon_path, self.icon_path)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_label_setter:
|
|
|
|
hook(self, new_label)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
@property
|
|
|
|
def netvm(self):
|
|
|
|
return self._netvm
|
|
|
|
|
|
|
|
# Don't know how properly call setter from base class, so workaround it...
|
|
|
|
@netvm.setter
|
|
|
|
def netvm(self, new_netvm):
|
|
|
|
self._set_netvm(new_netvm)
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_netvm_setter:
|
|
|
|
hook(self, new_netvm)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def _set_netvm(self, new_netvm):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('netvm = {!r}'.format(new_netvm))
|
2015-04-06 02:55:26 +02:00
|
|
|
if new_netvm and not new_netvm.is_netvm():
|
|
|
|
raise ValueError("Vm {!r} does not provide network".format(
|
|
|
|
new_netvm))
|
2013-03-16 02:39:30 +01:00
|
|
|
if self.is_running() and new_netvm is not None and not new_netvm.is_running():
|
|
|
|
raise QubesException("Cannot dynamically attach to stopped NetVM")
|
|
|
|
if self.netvm is not None:
|
|
|
|
self.netvm.connected_vms.pop(self.qid)
|
|
|
|
if self.is_running():
|
2013-05-04 04:45:55 +02:00
|
|
|
self.detach_network()
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
if hasattr(self.netvm, 'post_vm_net_detach'):
|
|
|
|
self.netvm.post_vm_net_detach(self)
|
|
|
|
|
2015-04-04 21:48:03 +02:00
|
|
|
if new_netvm is not None:
|
2013-03-16 02:39:30 +01:00
|
|
|
new_netvm.connected_vms[self.qid]=self
|
|
|
|
|
|
|
|
self._netvm = new_netvm
|
|
|
|
|
|
|
|
if new_netvm is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
if self.is_running():
|
|
|
|
# refresh IP, DNS etc
|
2015-03-28 22:36:28 +01:00
|
|
|
self.create_qubesdb_entries()
|
2013-03-16 02:39:30 +01:00
|
|
|
self.attach_network()
|
|
|
|
if hasattr(self.netvm, 'post_vm_net_attach'):
|
|
|
|
self.netvm.post_vm_net_attach(self)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def ip(self):
|
|
|
|
if self.netvm is not None:
|
|
|
|
return self.netvm.get_ip_for_vm(self.qid)
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def netmask(self):
|
|
|
|
if self.netvm is not None:
|
|
|
|
return self.netvm.netmask
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def gateway(self):
|
|
|
|
# This is gateway IP for _other_ VMs, so make sense only in NetVMs
|
|
|
|
return None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def secondary_dns(self):
|
|
|
|
if self.netvm is not None:
|
|
|
|
return self.netvm.secondary_dns
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def vif(self):
|
|
|
|
if self.xid < 0:
|
|
|
|
return None
|
|
|
|
if self.netvm is None:
|
|
|
|
return None
|
|
|
|
return "vif{0}.+".format(self.xid)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def mac(self):
|
|
|
|
if self._mac is not None:
|
|
|
|
return self._mac
|
|
|
|
else:
|
|
|
|
return "00:16:3E:5E:6C:{qid:02X}".format(qid=self.qid)
|
|
|
|
|
|
|
|
@mac.setter
|
|
|
|
def mac(self, new_mac):
|
|
|
|
self._mac = new_mac
|
|
|
|
|
2014-05-12 19:45:30 +02:00
|
|
|
@property
|
|
|
|
def kernel(self):
|
|
|
|
return self._kernel
|
|
|
|
|
|
|
|
@kernel.setter
|
|
|
|
def kernel(self, new_value):
|
|
|
|
if new_value is not None:
|
|
|
|
if not os.path.exists(os.path.join(system_path[
|
|
|
|
'qubes_kernels_base_dir'], new_value)):
|
|
|
|
raise QubesException("Kernel '%s' not installed" % new_value)
|
2015-11-10 17:04:50 +01:00
|
|
|
for f in ('vmlinuz', 'initramfs'):
|
2014-05-12 19:45:30 +02:00
|
|
|
if not os.path.exists(os.path.join(
|
|
|
|
system_path['qubes_kernels_base_dir'], new_value, f)):
|
|
|
|
raise QubesException(
|
|
|
|
"Kernel '%s' not properly installed: missing %s "
|
|
|
|
"file" % (new_value, f))
|
|
|
|
self._kernel = new_value
|
2015-11-10 17:07:57 +01:00
|
|
|
self.uses_default_kernel = False
|
2014-05-12 19:45:30 +02:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
@property
|
|
|
|
def updateable(self):
|
|
|
|
return self.template is None
|
|
|
|
|
|
|
|
# Leaved for compatibility
|
|
|
|
def is_updateable(self):
|
|
|
|
return self.updateable
|
|
|
|
|
2014-09-16 01:46:41 +02:00
|
|
|
@property
|
|
|
|
def default_user(self):
|
|
|
|
if self.template is not None:
|
|
|
|
return self.template.default_user
|
|
|
|
else:
|
|
|
|
return self._default_user
|
|
|
|
|
|
|
|
@default_user.setter
|
|
|
|
def default_user(self, value):
|
|
|
|
self._default_user = value
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def is_networked(self):
|
|
|
|
if self.is_netvm():
|
|
|
|
return True
|
|
|
|
|
|
|
|
if self.netvm is not None:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def verify_name(self, name):
|
2014-01-21 00:41:01 +01:00
|
|
|
if not isinstance(self.__basic_parse_xml_attr(name), str):
|
|
|
|
return False
|
2014-07-28 00:52:51 +02:00
|
|
|
if len(name) > 31:
|
|
|
|
return False
|
2015-12-26 02:20:28 +01:00
|
|
|
if name == 'lost+found':
|
|
|
|
# avoid conflict when /var/lib/qubes/appvms is mounted on
|
|
|
|
# separate partition
|
|
|
|
return False
|
2015-03-10 22:03:28 +01:00
|
|
|
return re.match(r"^[a-zA-Z][a-zA-Z0-9_.-]*$", name) is not None
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def pre_rename(self, new_name):
|
2015-10-02 01:57:00 +02:00
|
|
|
if self.autostart:
|
|
|
|
subprocess.check_call(['sudo', 'systemctl', '-q', 'disable',
|
|
|
|
'qubes-vm@{}.service'.format(self.name)])
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_pre_rename:
|
|
|
|
hook(self, new_name)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def set_name(self, name):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('name = {!r}'.format(name))
|
2013-03-16 02:39:30 +01:00
|
|
|
if self.is_running():
|
|
|
|
raise QubesException("Cannot change name of running VM!")
|
|
|
|
|
|
|
|
if not self.verify_name(name):
|
|
|
|
raise QubesException("Invalid characters in VM name")
|
|
|
|
|
2014-06-26 13:55:35 +02:00
|
|
|
if self.installed_by_rpm:
|
|
|
|
raise QubesException("Cannot rename VM installed by RPM -- first clone VM and then use yum to remove package.")
|
|
|
|
|
2016-02-08 04:38:26 +01:00
|
|
|
assert self._collection is not None
|
|
|
|
if self._collection.get_vm_by_name(name):
|
|
|
|
raise QubesException("VM with this name already exists")
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
self.pre_rename(name)
|
2016-02-06 05:36:07 +01:00
|
|
|
try:
|
2013-09-26 22:24:58 +02:00
|
|
|
self.libvirt_domain.undefine()
|
2016-02-06 05:36:07 +01:00
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
raise
|
2013-09-26 22:24:58 +02:00
|
|
|
if self._qdb_connection:
|
|
|
|
self._qdb_connection.close()
|
|
|
|
self._qdb_connection = None
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-25 22:18:37 +02:00
|
|
|
new_conf = os.path.join(self.dir_path, name + '.conf')
|
2013-03-16 02:39:30 +01:00
|
|
|
if os.path.exists(self.conf_file):
|
|
|
|
os.rename(self.conf_file, new_conf)
|
|
|
|
old_dirpath = self.dir_path
|
2013-08-02 20:58:20 +02:00
|
|
|
self.storage.rename(self.name, name)
|
|
|
|
new_dirpath = self.storage.vmdir
|
2013-03-16 02:39:30 +01:00
|
|
|
self.dir_path = new_dirpath
|
|
|
|
old_name = self.name
|
|
|
|
self.name = name
|
|
|
|
if self.conf_file is not None:
|
|
|
|
self.conf_file = new_conf.replace(old_dirpath, new_dirpath)
|
|
|
|
if self.icon_path is not None:
|
|
|
|
self.icon_path = self.icon_path.replace(old_dirpath, new_dirpath)
|
|
|
|
if hasattr(self, 'kernels_dir') and self.kernels_dir is not None:
|
|
|
|
self.kernels_dir = self.kernels_dir.replace(old_dirpath, new_dirpath)
|
2015-08-04 18:12:18 +02:00
|
|
|
if self.firewall_conf is not None:
|
|
|
|
self.firewall_conf = self.firewall_conf.replace(old_dirpath,
|
|
|
|
new_dirpath)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
self._update_libvirt_domain()
|
2013-03-16 02:39:30 +01:00
|
|
|
self.post_rename(old_name)
|
|
|
|
|
|
|
|
def post_rename(self, old_name):
|
2015-10-02 01:57:00 +02:00
|
|
|
if self.autostart:
|
|
|
|
# force setter to be called again
|
|
|
|
self.autostart = self.autostart
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_post_rename:
|
|
|
|
hook(self, old_name)
|
|
|
|
|
2014-05-05 16:31:06 +02:00
|
|
|
@property
|
|
|
|
def internal(self):
|
|
|
|
return self._internal
|
|
|
|
|
|
|
|
@internal.setter
|
|
|
|
def internal(self, value):
|
2014-05-05 22:23:43 +02:00
|
|
|
oldvalue = self._internal
|
2014-05-05 16:31:06 +02:00
|
|
|
self._internal = value
|
2014-05-05 22:23:43 +02:00
|
|
|
self.post_set_attr('internal', value, oldvalue)
|
2014-05-05 16:31:06 +02:00
|
|
|
|
2015-04-04 21:04:49 +02:00
|
|
|
@property
|
|
|
|
def dispvm_netvm(self):
|
|
|
|
if self.uses_default_dispvm_netvm:
|
|
|
|
return self.netvm
|
|
|
|
else:
|
|
|
|
if isinstance(self._dispvm_netvm, int):
|
|
|
|
return self._collection[self._dispvm_netvm]
|
|
|
|
else:
|
|
|
|
return self._dispvm_netvm
|
|
|
|
|
|
|
|
@dispvm_netvm.setter
|
|
|
|
def dispvm_netvm(self, value):
|
2015-04-06 02:55:26 +02:00
|
|
|
if value and not value.is_netvm():
|
|
|
|
raise ValueError("Vm {!r} does not provide network".format(
|
|
|
|
value))
|
2015-04-04 21:04:49 +02:00
|
|
|
self._dispvm_netvm = value
|
|
|
|
|
2013-11-20 02:57:17 +01:00
|
|
|
@property
|
|
|
|
def autostart(self):
|
|
|
|
return self._autostart
|
|
|
|
|
|
|
|
@autostart.setter
|
|
|
|
def autostart(self, value):
|
|
|
|
if value:
|
2015-03-29 23:48:10 +02:00
|
|
|
retcode = subprocess.call(["sudo", "ln", "-sf",
|
|
|
|
"/usr/lib/systemd/system/qubes-vm@.service",
|
|
|
|
"/etc/systemd/system/multi-user.target.wants/qubes-vm@%s.service" % self.name])
|
2013-11-20 02:57:17 +01:00
|
|
|
else:
|
|
|
|
retcode = subprocess.call(["sudo", "systemctl", "disable", "qubes-vm@%s.service" % self.name])
|
|
|
|
if retcode != 0:
|
|
|
|
raise QubesException("Failed to set autostart for VM via systemctl")
|
|
|
|
self._autostart = bool(value)
|
|
|
|
|
2013-11-21 03:39:08 +01:00
|
|
|
@classmethod
|
|
|
|
def is_template_compatible(cls, template):
|
|
|
|
"""Check if given VM can be a template for this VM"""
|
|
|
|
# FIXME: check if the value is instance of QubesTemplateVM, not the VM
|
|
|
|
# type. The problem is while this file is loaded, QubesTemplateVM is
|
|
|
|
# not defined yet.
|
|
|
|
if template and (not template.is_template() or template.type != "TemplateVM"):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2013-11-19 18:42:59 +01:00
|
|
|
@property
|
|
|
|
def template(self):
|
|
|
|
return self._template
|
|
|
|
|
|
|
|
@template.setter
|
|
|
|
def template(self, value):
|
2013-11-25 07:14:52 +01:00
|
|
|
if self._template is None and value is not None:
|
|
|
|
raise QubesException("Cannot set template for standalone VM")
|
2013-11-21 03:39:08 +01:00
|
|
|
if value and not self.is_template_compatible(value):
|
|
|
|
raise QubesException("Incompatible template type %s with VM of type %s" % (value.type, self.type))
|
2013-11-19 18:42:59 +01:00
|
|
|
self._template = value
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def is_template(self):
|
|
|
|
return False
|
|
|
|
|
|
|
|
def is_appvm(self):
|
|
|
|
return False
|
|
|
|
|
|
|
|
def is_netvm(self):
|
|
|
|
return False
|
|
|
|
|
|
|
|
def is_proxyvm(self):
|
|
|
|
return False
|
|
|
|
|
|
|
|
def is_disposablevm(self):
|
|
|
|
return False
|
|
|
|
|
2013-06-07 05:16:15 +02:00
|
|
|
@property
|
|
|
|
def qdb(self):
|
|
|
|
if self._qdb_connection is None:
|
2015-03-29 17:22:15 +02:00
|
|
|
from qubes.qdb import QubesDB
|
|
|
|
self._qdb_connection = QubesDB(self.name)
|
2013-06-07 05:16:15 +02:00
|
|
|
return self._qdb_connection
|
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
@property
|
|
|
|
def xid(self):
|
2015-03-29 23:38:36 +02:00
|
|
|
try:
|
|
|
|
return self.libvirt_domain.ID()
|
2015-05-03 20:22:50 +02:00
|
|
|
except libvirt.libvirtError as e:
|
2015-08-08 21:29:56 +02:00
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
2015-03-29 23:38:36 +02:00
|
|
|
return -1
|
|
|
|
else:
|
2016-01-06 04:32:11 +01:00
|
|
|
print >>sys.stderr, "libvirt error code: {!r}".format(
|
|
|
|
e.get_error_code())
|
2015-03-29 23:38:36 +02:00
|
|
|
raise
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
def get_xid(self):
|
|
|
|
# obsoleted
|
|
|
|
return self.xid
|
|
|
|
|
|
|
|
def _update_libvirt_domain(self):
|
|
|
|
domain_config = self.create_config_file()
|
2015-09-25 22:21:10 +02:00
|
|
|
try:
|
|
|
|
self._libvirt_domain = vmm.libvirt_conn.defineXML(domain_config)
|
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
# shouldn't this be in QubesHVm implementation?
|
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_OS_TYPE and \
|
|
|
|
e.get_str2() == 'hvm':
|
|
|
|
raise QubesException("HVM domains not supported on this "
|
|
|
|
"machine. Check BIOS settings for "
|
|
|
|
"VT-x/AMD-V extensions.")
|
2015-11-24 18:42:53 +01:00
|
|
|
else:
|
|
|
|
raise e
|
2015-03-29 23:38:36 +02:00
|
|
|
self.uuid = uuid.UUID(bytes=self._libvirt_domain.UUID())
|
2013-05-10 05:35:22 +02:00
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
@property
|
|
|
|
def libvirt_domain(self):
|
2015-03-29 23:38:36 +02:00
|
|
|
if self._libvirt_domain is None:
|
2013-05-10 05:35:22 +02:00
|
|
|
if self.uuid is not None:
|
2013-05-17 04:06:29 +02:00
|
|
|
self._libvirt_domain = vmm.libvirt_conn.lookupByUUID(self.uuid.bytes)
|
2013-05-10 05:35:22 +02:00
|
|
|
else:
|
2013-05-17 04:06:29 +02:00
|
|
|
self._libvirt_domain = vmm.libvirt_conn.lookupByName(self.name)
|
2013-05-10 05:35:22 +02:00
|
|
|
self.uuid = uuid.UUID(bytes=self._libvirt_domain.UUID())
|
2013-05-04 04:45:55 +02:00
|
|
|
return self._libvirt_domain
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_uuid(self):
|
2013-05-10 05:35:22 +02:00
|
|
|
# obsoleted
|
|
|
|
return self.uuid
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2015-03-29 17:22:15 +02:00
|
|
|
def refresh(self):
|
|
|
|
self._libvirt_domain = None
|
|
|
|
self._qdb_connection = None
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def get_mem(self):
|
|
|
|
if dry_run:
|
|
|
|
return 666
|
|
|
|
|
2015-03-29 23:38:36 +02:00
|
|
|
try:
|
|
|
|
if not self.libvirt_domain.isActive():
|
|
|
|
return 0
|
|
|
|
return self.libvirt_domain.info()[1]
|
2015-05-03 20:22:50 +02:00
|
|
|
except libvirt.libvirtError as e:
|
2015-08-08 21:29:56 +02:00
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
2015-03-29 23:38:36 +02:00
|
|
|
return 0
|
2015-10-27 21:31:59 +01:00
|
|
|
# libxl_domain_info failed - domain no longer exists
|
2015-12-07 00:03:39 +01:00
|
|
|
elif e.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR:
|
2015-10-27 21:31:59 +01:00
|
|
|
return 0
|
2016-02-06 05:34:22 +01:00
|
|
|
elif e.get_error_code() is None: # unknown...
|
|
|
|
return 0
|
2015-03-29 23:38:36 +02:00
|
|
|
else:
|
2016-01-06 04:32:11 +01:00
|
|
|
print >>sys.stderr, "libvirt error code: {!r}".format(
|
|
|
|
e.get_error_code())
|
2015-03-29 23:38:36 +02:00
|
|
|
raise
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2015-02-09 03:28:01 +01:00
|
|
|
def get_cputime(self):
|
|
|
|
if dry_run:
|
|
|
|
return 666
|
|
|
|
|
2015-03-29 23:38:36 +02:00
|
|
|
try:
|
|
|
|
if not self.libvirt_domain.isActive():
|
|
|
|
return 0
|
|
|
|
return self.libvirt_domain.info()[4]
|
2015-05-03 20:22:50 +02:00
|
|
|
except libvirt.libvirtError as e:
|
2015-08-08 21:29:56 +02:00
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
2015-03-29 23:38:36 +02:00
|
|
|
return 0
|
2015-10-27 21:31:59 +01:00
|
|
|
# libxl_domain_info failed - domain no longer exists
|
2016-02-27 20:11:49 +01:00
|
|
|
elif e.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR:
|
2015-10-27 21:31:59 +01:00
|
|
|
return 0
|
2016-02-06 05:34:22 +01:00
|
|
|
elif e.get_error_code() is None: # unknown...
|
|
|
|
return 0
|
2015-03-29 23:38:36 +02:00
|
|
|
else:
|
2016-01-06 04:32:11 +01:00
|
|
|
print >>sys.stderr, "libvirt error code: {!r}".format(
|
|
|
|
e.get_error_code())
|
2015-03-29 23:38:36 +02:00
|
|
|
raise
|
2015-02-09 03:28:01 +01:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def get_mem_static_max(self):
|
|
|
|
if dry_run:
|
|
|
|
return 666
|
|
|
|
|
2015-03-29 23:38:36 +02:00
|
|
|
try:
|
|
|
|
return self.libvirt_domain.maxMemory()
|
2015-05-03 20:22:50 +02:00
|
|
|
except libvirt.libvirtError as e:
|
2015-08-08 21:29:56 +02:00
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
2015-03-29 23:38:36 +02:00
|
|
|
return 0
|
|
|
|
else:
|
|
|
|
raise
|
2013-05-04 04:45:55 +02:00
|
|
|
|
2014-03-31 03:41:33 +02:00
|
|
|
def get_prefmem(self):
|
2013-05-04 04:45:55 +02:00
|
|
|
# TODO: qmemman is still xen specific
|
2015-02-09 03:28:01 +01:00
|
|
|
untrusted_meminfo_key = vmm.xs.read('',
|
|
|
|
'/local/domain/%s/memory/meminfo'
|
2014-03-31 03:41:33 +02:00
|
|
|
% self.xid)
|
|
|
|
if untrusted_meminfo_key is None or untrusted_meminfo_key == '':
|
|
|
|
return 0
|
|
|
|
domain = qmemman.DomainState(self.xid)
|
|
|
|
qmemman_algo.refresh_meminfo_for_domain(domain, untrusted_meminfo_key)
|
|
|
|
domain.memory_maximum = self.get_mem_static_max()*1024
|
|
|
|
return qmemman_algo.prefmem(domain)/1024
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def get_per_cpu_time(self):
|
|
|
|
if dry_run:
|
|
|
|
import random
|
|
|
|
return random.random() * 100
|
|
|
|
|
2015-03-29 23:38:36 +02:00
|
|
|
try:
|
|
|
|
if self.libvirt_domain.isActive():
|
|
|
|
return self.libvirt_domain.getCPUStats(
|
|
|
|
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)[0]['cpu_time']/10**9
|
|
|
|
else:
|
|
|
|
return 0
|
2015-05-03 20:22:50 +02:00
|
|
|
except libvirt.libvirtError as e:
|
2015-08-08 21:29:56 +02:00
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
2015-03-29 23:38:36 +02:00
|
|
|
return 0
|
|
|
|
else:
|
2016-01-06 04:32:11 +01:00
|
|
|
print >>sys.stderr, "libvirt error code: {!r}".format(
|
|
|
|
e.get_error_code())
|
2015-03-29 23:38:36 +02:00
|
|
|
raise
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_disk_utilization_root_img(self):
|
2014-04-24 21:50:12 +02:00
|
|
|
return qubes.qubesutils.get_disk_usage(self.root_img)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_root_img_sz(self):
|
|
|
|
if not os.path.exists(self.root_img):
|
|
|
|
return 0
|
|
|
|
|
|
|
|
return os.path.getsize(self.root_img)
|
|
|
|
|
|
|
|
def get_power_state(self):
|
|
|
|
if dry_run:
|
|
|
|
return "NA"
|
|
|
|
|
2015-03-29 23:38:36 +02:00
|
|
|
try:
|
|
|
|
libvirt_domain = self.libvirt_domain
|
|
|
|
if libvirt_domain.isActive():
|
|
|
|
if libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PAUSED:
|
|
|
|
return "Paused"
|
|
|
|
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_CRASHED:
|
|
|
|
return "Crashed"
|
|
|
|
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_SHUTDOWN:
|
|
|
|
return "Halting"
|
|
|
|
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_SHUTOFF:
|
|
|
|
return "Dying"
|
|
|
|
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PMSUSPENDED:
|
|
|
|
return "Suspended"
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
2015-03-29 23:38:36 +02:00
|
|
|
if not self.is_fully_usable():
|
|
|
|
return "Transient"
|
|
|
|
else:
|
|
|
|
return "Running"
|
|
|
|
else:
|
|
|
|
return 'Halted'
|
2015-05-03 20:22:50 +02:00
|
|
|
except libvirt.libvirtError as e:
|
2015-08-08 21:29:56 +02:00
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
2015-03-30 05:32:29 +02:00
|
|
|
return "Halted"
|
2015-03-29 23:38:36 +02:00
|
|
|
else:
|
|
|
|
raise
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
|
|
|
|
def is_guid_running(self):
|
2013-05-10 05:35:22 +02:00
|
|
|
xid = self.xid
|
2013-03-16 02:39:30 +01:00
|
|
|
if xid < 0:
|
|
|
|
return False
|
|
|
|
if not os.path.exists('/var/run/qubes/guid-running.%d' % xid):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2014-02-05 03:27:12 +01:00
|
|
|
def is_qrexec_running(self):
|
|
|
|
if self.xid < 0:
|
|
|
|
return False
|
|
|
|
return os.path.exists('/var/run/qubes/qrexec.%s' % self.name)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def is_fully_usable(self):
|
|
|
|
# Running gui-daemon implies also VM running
|
|
|
|
if not self.is_guid_running():
|
|
|
|
return False
|
2014-02-05 03:27:12 +01:00
|
|
|
if not self.is_qrexec_running():
|
|
|
|
return False
|
2013-03-16 02:39:30 +01:00
|
|
|
return True
|
|
|
|
|
|
|
|
def is_running(self):
|
2015-07-30 14:03:36 +02:00
|
|
|
if vmm.offline_mode:
|
|
|
|
return False
|
2015-03-29 23:38:36 +02:00
|
|
|
try:
|
|
|
|
if self.libvirt_domain.isActive():
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
2015-05-03 20:22:50 +02:00
|
|
|
except libvirt.libvirtError as e:
|
2015-08-08 21:29:56 +02:00
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
2015-03-29 23:38:36 +02:00
|
|
|
return False
|
2016-02-06 05:34:22 +01:00
|
|
|
# libxl_domain_info failed - domain no longer exists
|
2016-02-27 20:11:49 +01:00
|
|
|
elif e.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR:
|
2016-02-06 05:34:22 +01:00
|
|
|
return False
|
|
|
|
elif e.get_error_code() is None: # unknown...
|
|
|
|
return False
|
2015-03-29 23:38:36 +02:00
|
|
|
else:
|
2016-01-06 04:32:11 +01:00
|
|
|
print >>sys.stderr, "libvirt error code: {!r}".format(
|
|
|
|
e.get_error_code())
|
2015-03-29 23:38:36 +02:00
|
|
|
raise
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def is_paused(self):
|
2015-03-29 23:38:36 +02:00
|
|
|
try:
|
|
|
|
if self.libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PAUSED:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
2015-05-03 20:22:50 +02:00
|
|
|
except libvirt.libvirtError as e:
|
2015-08-08 21:29:56 +02:00
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
2015-03-29 23:38:36 +02:00
|
|
|
return False
|
2016-02-06 05:34:22 +01:00
|
|
|
# libxl_domain_info failed - domain no longer exists
|
2016-02-27 20:11:49 +01:00
|
|
|
elif e.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR:
|
2016-02-06 05:34:22 +01:00
|
|
|
return False
|
|
|
|
elif e.get_error_code() is None: # unknown...
|
|
|
|
return False
|
2015-03-29 23:38:36 +02:00
|
|
|
else:
|
2016-01-06 04:32:11 +01:00
|
|
|
print >>sys.stderr, "libvirt error code: {!r}".format(
|
|
|
|
e.get_error_code())
|
2015-03-29 23:38:36 +02:00
|
|
|
raise
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_start_time(self):
|
|
|
|
if not self.is_running():
|
|
|
|
return None
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
# TODO
|
2013-05-10 05:35:22 +02:00
|
|
|
uuid = self.uuid
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-17 04:06:29 +02:00
|
|
|
start_time = vmm.xs.read('', "/vm/%s/start_time" % str(uuid))
|
2013-03-16 02:39:30 +01:00
|
|
|
if start_time != '':
|
2013-03-26 02:15:09 +01:00
|
|
|
return datetime.datetime.fromtimestamp(float(start_time))
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def is_outdated(self):
|
|
|
|
# Makes sense only on VM based on template
|
|
|
|
if self.template is None:
|
|
|
|
return False
|
|
|
|
|
|
|
|
if not self.is_running():
|
|
|
|
return False
|
|
|
|
|
2014-04-18 01:34:09 +02:00
|
|
|
if not hasattr(self.template, 'rootcow_img'):
|
|
|
|
return False
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
rootimg_inode = os.stat(self.template.root_img)
|
|
|
|
try:
|
|
|
|
rootcow_inode = os.stat(self.template.rootcow_img)
|
|
|
|
except OSError:
|
|
|
|
# The only case when rootcow_img doesn't exists is in the middle of
|
|
|
|
# commit_changes, so VM is outdated right now
|
|
|
|
return True
|
|
|
|
|
|
|
|
current_dmdev = "/dev/mapper/snapshot-{0:x}:{1}-{2:x}:{3}".format(
|
|
|
|
rootimg_inode[2], rootimg_inode[1],
|
|
|
|
rootcow_inode[2], rootcow_inode[1])
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
# FIXME
|
2013-03-16 02:39:30 +01:00
|
|
|
# 51712 (0xCA00) is xvda
|
|
|
|
# backend node name not available through xenapi :(
|
2013-05-17 04:06:29 +02:00
|
|
|
used_dmdev = vmm.xs.read('', "/local/domain/0/backend/vbd/{0}/51712/node".format(self.xid))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
return used_dmdev != current_dmdev
|
|
|
|
|
2013-08-02 20:31:08 +02:00
|
|
|
@property
|
|
|
|
def private_img(self):
|
|
|
|
return self.storage.private_img
|
|
|
|
|
|
|
|
@property
|
|
|
|
def root_img(self):
|
|
|
|
return self.storage.root_img
|
|
|
|
|
|
|
|
@property
|
|
|
|
def volatile_img(self):
|
|
|
|
return self.storage.volatile_img
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def get_disk_utilization(self):
|
2014-04-24 21:50:12 +02:00
|
|
|
return qubes.qubesutils.get_disk_usage(self.dir_path)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_disk_utilization_private_img(self):
|
2014-04-24 21:50:12 +02:00
|
|
|
return qubes.qubesutils.get_disk_usage(self.private_img)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_private_img_sz(self):
|
2013-07-29 03:51:39 +02:00
|
|
|
return self.storage.get_private_img_sz()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def resize_private_img(self, size):
|
|
|
|
assert size >= self.get_private_img_sz(), "Cannot shrink private.img"
|
|
|
|
|
2013-07-29 03:51:39 +02:00
|
|
|
# resize the image
|
|
|
|
self.storage.resize_private_img(size)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-07-29 03:51:39 +02:00
|
|
|
# and then the filesystem
|
2013-03-16 02:39:30 +01:00
|
|
|
retcode = 0
|
|
|
|
if self.is_running():
|
|
|
|
retcode = self.run("while [ \"`blockdev --getsize64 /dev/xvdb`\" -lt {0} ]; do ".format(size) +
|
|
|
|
"head /dev/xvdb > /dev/null; sleep 0.2; done; resize2fs /dev/xvdb", user="root", wait=True)
|
|
|
|
if retcode != 0:
|
|
|
|
raise QubesException("resize2fs failed")
|
|
|
|
|
|
|
|
|
2014-03-21 18:43:13 +01:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
# FIXME: should be outside of QubesVM?
|
|
|
|
def get_timezone(self):
|
|
|
|
# fc18
|
|
|
|
if os.path.islink('/etc/localtime'):
|
|
|
|
return '/'.join(os.readlink('/etc/localtime').split('/')[-2:])
|
|
|
|
# <=fc17
|
|
|
|
elif os.path.exists('/etc/sysconfig/clock'):
|
|
|
|
clock_config = open('/etc/sysconfig/clock', "r")
|
|
|
|
clock_config_lines = clock_config.readlines()
|
|
|
|
clock_config.close()
|
|
|
|
zone_re = re.compile(r'^ZONE="(.*)"')
|
|
|
|
for line in clock_config_lines:
|
|
|
|
line_match = zone_re.match(line)
|
|
|
|
if line_match:
|
|
|
|
return line_match.group(1)
|
2014-01-23 02:33:05 +01:00
|
|
|
else:
|
|
|
|
# last resort way, some applications makes /etc/localtime
|
|
|
|
# hardlink instead of symlink...
|
|
|
|
tz_info = os.stat('/etc/localtime')
|
|
|
|
if not tz_info:
|
|
|
|
return None
|
|
|
|
if tz_info.st_nlink > 1:
|
|
|
|
p = subprocess.Popen(['find', '/usr/share/zoneinfo',
|
2015-10-10 17:30:40 +02:00
|
|
|
'-inum', str(tz_info.st_ino),
|
|
|
|
'-print', '-quit'],
|
|
|
|
stdout=subprocess.PIPE)
|
2014-01-23 02:33:05 +01:00
|
|
|
tz_path = p.communicate()[0].strip()
|
|
|
|
return tz_path.replace('/usr/share/zoneinfo/', '')
|
2013-03-16 02:39:30 +01:00
|
|
|
return None
|
|
|
|
|
|
|
|
def cleanup_vifs(self):
|
|
|
|
"""
|
|
|
|
Xend does not remove vif when backend domain is down, so we must do it
|
|
|
|
manually
|
|
|
|
"""
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
# FIXME: remove this?
|
2013-03-16 02:39:30 +01:00
|
|
|
if not self.is_running():
|
|
|
|
return
|
|
|
|
|
|
|
|
dev_basepath = '/local/domain/%d/device/vif' % self.xid
|
2015-04-15 12:04:21 +02:00
|
|
|
for dev in (vmm.xs.ls('', dev_basepath) or []):
|
2013-03-16 02:39:30 +01:00
|
|
|
# check if backend domain is alive
|
2013-05-17 04:06:29 +02:00
|
|
|
backend_xid = int(vmm.xs.read('', '%s/%s/backend-id' % (dev_basepath, dev)))
|
|
|
|
if backend_xid in vmm.libvirt_conn.listDomainsID():
|
2013-03-16 02:39:30 +01:00
|
|
|
# check if device is still active
|
2013-05-17 04:06:29 +02:00
|
|
|
if vmm.xs.read('', '%s/%s/state' % (dev_basepath, dev)) == '4':
|
2013-03-16 02:39:30 +01:00
|
|
|
continue
|
|
|
|
# remove dead device
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.rm('', '%s/%s' % (dev_basepath, dev))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2015-03-28 22:36:28 +01:00
|
|
|
def create_qubesdb_entries(self):
|
2013-03-16 02:39:30 +01:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2013-06-07 05:16:15 +02:00
|
|
|
self.qdb.write("/name", self.name)
|
|
|
|
self.qdb.write("/qubes-vm-type", self.type)
|
|
|
|
self.qdb.write("/qubes-vm-updateable", str(self.updateable))
|
2015-09-02 00:15:14 +02:00
|
|
|
self.qdb.write("/qubes-vm-persistence",
|
|
|
|
"full" if self.updateable else "rw-only")
|
|
|
|
self.qdb.write("/qubes-base-template",
|
|
|
|
self.template.name if self.template else '')
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if self.is_netvm():
|
2013-06-07 05:16:15 +02:00
|
|
|
self.qdb.write("/qubes-netvm-gateway", self.gateway)
|
2016-03-07 03:54:21 +01:00
|
|
|
self.qdb.write("/qubes-netvm-primary-dns", self.gateway)
|
2013-06-07 05:16:15 +02:00
|
|
|
self.qdb.write("/qubes-netvm-secondary-dns", self.secondary_dns)
|
|
|
|
self.qdb.write("/qubes-netvm-netmask", self.netmask)
|
|
|
|
self.qdb.write("/qubes-netvm-network", self.network)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if self.netvm is not None:
|
2013-06-07 05:16:15 +02:00
|
|
|
self.qdb.write("/qubes-ip", self.ip)
|
|
|
|
self.qdb.write("/qubes-netmask", self.netvm.netmask)
|
|
|
|
self.qdb.write("/qubes-gateway", self.netvm.gateway)
|
2016-03-07 03:54:21 +01:00
|
|
|
self.qdb.write("/qubes-primary-dns", self.netvm.gateway)
|
2013-06-07 05:16:15 +02:00
|
|
|
self.qdb.write("/qubes-secondary-dns", self.netvm.secondary_dns)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
tzname = self.get_timezone()
|
|
|
|
if tzname:
|
2013-06-07 05:16:15 +02:00
|
|
|
self.qdb.write("/qubes-timezone", tzname)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
for srv in self.services.keys():
|
|
|
|
# convert True/False to "1"/"0"
|
2013-06-07 05:16:15 +02:00
|
|
|
self.qdb.write("/qubes-service/{0}".format(srv),
|
2013-03-16 02:39:30 +01:00
|
|
|
str(int(self.services[srv])))
|
|
|
|
|
2013-06-07 05:16:15 +02:00
|
|
|
self.qdb.write("/qubes-block-devices", '')
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-06-07 05:16:15 +02:00
|
|
|
self.qdb.write("/qubes-usb-devices", '')
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-06-07 05:16:15 +02:00
|
|
|
self.qdb.write("/qubes-debug-mode", str(int(self.debug)))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2015-07-01 19:44:46 +02:00
|
|
|
self.provide_random_seed_to_vm()
|
|
|
|
|
2013-07-29 03:53:01 +02:00
|
|
|
# TODO: Currently the whole qmemman is quite Xen-specific, so stay with
|
2013-06-07 05:16:15 +02:00
|
|
|
# xenstore for it until decided otherwise
|
2013-07-22 04:17:30 +02:00
|
|
|
if qmemman_present:
|
|
|
|
vmm.xs.set_permissions('', '/local/domain/{0}/memory'.format(self.xid),
|
2015-03-28 22:36:28 +01:00
|
|
|
[{ 'dom': self.xid }])
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
2015-03-28 22:36:28 +01:00
|
|
|
for hook in self.hooks_create_qubesdb_entries:
|
|
|
|
hook(self)
|
2013-03-16 16:09:31 +01:00
|
|
|
|
2015-07-01 19:44:46 +02:00
|
|
|
def provide_random_seed_to_vm(self):
|
|
|
|
f = open('/dev/urandom', 'r')
|
|
|
|
s = f.read(64)
|
|
|
|
if len(s) != 64:
|
|
|
|
raise IOError("failed to read seed from /dev/urandom")
|
|
|
|
f.close()
|
|
|
|
self.qdb.write("/qubes-random-seed", base64.b64encode(hashlib.sha512(s).digest()))
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
def _format_net_dev(self, ip, mac, backend):
|
|
|
|
template = " <interface type='ethernet'>\n" \
|
|
|
|
" <mac address='{mac}'/>\n" \
|
|
|
|
" <ip address='{ip}'/>\n" \
|
|
|
|
" <script path='vif-route-qubes'/>\n" \
|
2015-03-21 21:12:48 +01:00
|
|
|
" <backenddomain name='{backend}'/>\n" \
|
2013-05-04 04:45:55 +02:00
|
|
|
" </interface>\n"
|
|
|
|
return template.format(ip=ip, mac=mac, backend=backend)
|
|
|
|
|
|
|
|
def _format_pci_dev(self, address):
|
2015-05-28 00:06:25 +02:00
|
|
|
template = " <hostdev type='pci' managed='yes'{strictreset}>\n" \
|
2013-05-04 04:45:55 +02:00
|
|
|
" <source>\n" \
|
|
|
|
" <address bus='0x{bus}' slot='0x{slot}' function='0x{fun}'/>\n" \
|
|
|
|
" </source>\n" \
|
|
|
|
" </hostdev>\n"
|
|
|
|
dev_match = re.match('([0-9a-f]+):([0-9a-f]+)\.([0-9a-f]+)', address)
|
|
|
|
if not dev_match:
|
|
|
|
raise QubesException("Invalid PCI device address: %s" % address)
|
|
|
|
return template.format(
|
|
|
|
bus=dev_match.group(1),
|
|
|
|
slot=dev_match.group(2),
|
2015-05-28 00:06:25 +02:00
|
|
|
fun=dev_match.group(3),
|
|
|
|
strictreset=("" if self.pci_strictreset else
|
|
|
|
" nostrictreset='yes'"),
|
|
|
|
)
|
2013-05-04 04:45:55 +02:00
|
|
|
|
2013-05-22 05:43:26 +02:00
|
|
|
def get_config_params(self):
|
2013-03-16 02:39:30 +01:00
|
|
|
args = {}
|
|
|
|
args['name'] = self.name
|
|
|
|
if hasattr(self, 'kernels_dir'):
|
|
|
|
args['kerneldir'] = self.kernels_dir
|
2013-05-10 05:35:22 +02:00
|
|
|
args['uuidnode'] = "<uuid>%s</uuid>" % str(self.uuid) if self.uuid else ""
|
2013-03-16 02:39:30 +01:00
|
|
|
args['vmdir'] = self.dir_path
|
2013-05-04 04:45:55 +02:00
|
|
|
args['pcidevs'] = ''.join(map(self._format_pci_dev, self.pcidevs))
|
2013-03-16 02:39:30 +01:00
|
|
|
args['mem'] = str(self.memory)
|
|
|
|
if self.maxmem < self.memory:
|
|
|
|
args['mem'] = str(self.maxmem)
|
|
|
|
args['maxmem'] = str(self.maxmem)
|
|
|
|
if 'meminfo-writer' in self.services and not self.services['meminfo-writer']:
|
|
|
|
# If dynamic memory management disabled, set maxmem=mem
|
|
|
|
args['maxmem'] = args['mem']
|
|
|
|
args['vcpus'] = str(self.vcpus)
|
|
|
|
if self.netvm is not None:
|
|
|
|
args['ip'] = self.ip
|
|
|
|
args['mac'] = self.mac
|
|
|
|
args['gateway'] = self.netvm.gateway
|
|
|
|
args['dns1'] = self.netvm.gateway
|
|
|
|
args['dns2'] = self.secondary_dns
|
|
|
|
args['netmask'] = self.netmask
|
2013-05-04 04:45:55 +02:00
|
|
|
args['netdev'] = self._format_net_dev(self.ip, self.mac, self.netvm.name)
|
2015-11-27 00:09:11 +01:00
|
|
|
args['network_begin'] = ''
|
|
|
|
args['network_end'] = ''
|
|
|
|
args['no_network_begin'] = '<!--'
|
|
|
|
args['no_network_end'] = '-->'
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
|
|
|
args['ip'] = ''
|
|
|
|
args['mac'] = ''
|
|
|
|
args['gateway'] = ''
|
|
|
|
args['dns1'] = ''
|
|
|
|
args['dns2'] = ''
|
|
|
|
args['netmask'] = ''
|
|
|
|
args['netdev'] = ''
|
2015-11-27 00:09:11 +01:00
|
|
|
args['network_begin'] = '<!--'
|
|
|
|
args['network_end'] = '-->'
|
|
|
|
args['no_network_begin'] = ''
|
|
|
|
args['no_network_end'] = ''
|
2013-07-29 03:51:39 +02:00
|
|
|
args.update(self.storage.get_config_params())
|
2013-03-16 02:39:30 +01:00
|
|
|
if hasattr(self, 'kernelopts'):
|
|
|
|
args['kernelopts'] = self.kernelopts
|
|
|
|
if self.debug:
|
|
|
|
print >> sys.stderr, "--> Debug mode: adding 'earlyprintk=xen' to kernel opts"
|
|
|
|
args['kernelopts'] += ' earlyprintk=xen'
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_get_config_params:
|
|
|
|
args = hook(self, args)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
return args
|
|
|
|
|
|
|
|
@property
|
|
|
|
def uses_custom_config(self):
|
|
|
|
return self.conf_file != self.absolute_path(self.name + ".conf", None)
|
|
|
|
|
2013-05-22 05:43:26 +02:00
|
|
|
def create_config_file(self, file_path = None, prepare_dvm = False):
|
2013-03-16 02:39:30 +01:00
|
|
|
if file_path is None:
|
|
|
|
file_path = self.conf_file
|
2013-05-20 01:30:19 +02:00
|
|
|
if self.uses_custom_config:
|
|
|
|
conf_appvm = open(file_path, "r")
|
|
|
|
domain_config = conf_appvm.read()
|
|
|
|
conf_appvm.close()
|
|
|
|
return domain_config
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
f_conf_template = open(self.config_file_template, 'r')
|
|
|
|
conf_template = f_conf_template.read()
|
|
|
|
f_conf_template.close()
|
|
|
|
|
2013-05-22 05:43:26 +02:00
|
|
|
template_params = self.get_config_params()
|
2013-03-16 02:39:30 +01:00
|
|
|
if prepare_dvm:
|
|
|
|
template_params['name'] = '%NAME%'
|
|
|
|
template_params['privatedev'] = ''
|
2013-05-04 04:45:55 +02:00
|
|
|
template_params['netdev'] = re.sub(r"address='[0-9.]*'", "address='%IP%'", template_params['netdev'])
|
|
|
|
domain_config = conf_template.format(**template_params)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
# FIXME: This is only for debugging purposes
|
2013-05-20 01:30:48 +02:00
|
|
|
old_umask = os.umask(002)
|
2013-06-07 05:31:31 +02:00
|
|
|
try:
|
2015-01-16 04:29:18 +01:00
|
|
|
if os.path.exists(file_path):
|
|
|
|
os.unlink(file_path)
|
2013-06-07 05:31:31 +02:00
|
|
|
conf_appvm = open(file_path, "w")
|
|
|
|
conf_appvm.write(domain_config)
|
|
|
|
conf_appvm.close()
|
|
|
|
except:
|
|
|
|
# Ignore errors
|
|
|
|
pass
|
|
|
|
finally:
|
|
|
|
os.umask(old_umask)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
return domain_config
|
|
|
|
|
2014-09-28 03:14:29 +02:00
|
|
|
def create_on_disk(self, verbose=False, source_template = None):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('create_on_disk(source_template={!r})'.format(
|
|
|
|
source_template))
|
2013-03-16 02:39:30 +01:00
|
|
|
if source_template is None:
|
|
|
|
source_template = self.template
|
|
|
|
assert source_template is not None
|
|
|
|
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2013-07-29 03:51:39 +02:00
|
|
|
self.storage.create_on_disk(verbose, source_template)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if self.updateable:
|
|
|
|
kernels_dir = source_template.kernels_dir
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Copying the kernel (set kernel \"none\" to use it): {0}".\
|
|
|
|
format(kernels_dir)
|
|
|
|
|
|
|
|
os.mkdir (self.dir_path + '/kernels')
|
|
|
|
for f in ("vmlinuz", "initramfs", "modules.img"):
|
|
|
|
shutil.copy(os.path.join(kernels_dir, f),
|
|
|
|
os.path.join(self.dir_path, vm_files["kernels_subdir"], f))
|
|
|
|
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, self.label.icon_path)
|
2013-07-30 11:27:46 +02:00
|
|
|
if hasattr(os, "symlink"):
|
|
|
|
os.symlink (self.label.icon_path, self.icon_path)
|
|
|
|
else:
|
|
|
|
shutil.copy(self.label.icon_path, self.icon_path)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2015-03-29 23:38:36 +02:00
|
|
|
# Make sure that we have UUID allocated
|
2015-07-30 14:03:36 +02:00
|
|
|
if not vmm.offline_mode:
|
|
|
|
self._update_libvirt_domain()
|
|
|
|
else:
|
|
|
|
self.uuid = uuid.uuid4()
|
2015-03-29 23:38:36 +02:00
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_create_on_disk:
|
|
|
|
hook(self, verbose, source_template=source_template)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_clone_attrs(self):
|
2015-12-23 19:07:18 +01:00
|
|
|
attrs = ['kernel', 'uses_default_kernel', 'netvm', 'uses_default_netvm',
|
|
|
|
'memory', 'maxmem', 'kernelopts', 'uses_default_kernelopts',
|
|
|
|
'services', 'vcpus', '_mac', 'pcidevs', 'include_in_backups',
|
|
|
|
'_label', 'default_user', 'qrexec_timeout']
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_get_clone_attrs:
|
|
|
|
attrs = hook(self, attrs)
|
|
|
|
|
2013-05-25 22:18:37 +02:00
|
|
|
return attrs
|
|
|
|
|
2014-05-12 19:47:48 +02:00
|
|
|
def clone_attrs(self, src_vm, fail_on_error=True):
|
2013-03-16 02:39:30 +01:00
|
|
|
self._do_not_reset_firewall = True
|
|
|
|
for prop in self.get_clone_attrs():
|
2014-05-12 19:47:48 +02:00
|
|
|
try:
|
2015-04-10 18:30:15 +02:00
|
|
|
val = getattr(src_vm, prop)
|
|
|
|
if isinstance(val, dict):
|
|
|
|
val = val.copy()
|
|
|
|
setattr(self, prop, val)
|
2014-05-12 19:47:48 +02:00
|
|
|
except Exception as e:
|
|
|
|
if fail_on_error:
|
|
|
|
self._do_not_reset_firewall = False
|
|
|
|
raise
|
|
|
|
else:
|
|
|
|
print >>sys.stderr, "WARNING: %s" % str(e)
|
2013-03-16 02:39:30 +01:00
|
|
|
self._do_not_reset_firewall = False
|
|
|
|
|
|
|
|
def clone_disk_files(self, src_vm, verbose):
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if src_vm.is_running():
|
|
|
|
raise QubesException("Attempt to clone a running VM!")
|
|
|
|
|
2013-07-29 03:51:39 +02:00
|
|
|
self.storage.clone_disk_files(src_vm, verbose)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if src_vm.icon_path is not None and self.icon_path is not None:
|
|
|
|
if os.path.exists (src_vm.dir_path):
|
|
|
|
if os.path.islink(src_vm.icon_path):
|
|
|
|
icon_path = os.readlink(src_vm.icon_path)
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, icon_path)
|
|
|
|
os.symlink (icon_path, self.icon_path)
|
|
|
|
else:
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Copying icon: {0} -> {1}".format(src_vm.icon_path, self.icon_path)
|
|
|
|
shutil.copy(src_vm.icon_path, self.icon_path)
|
|
|
|
|
2015-07-08 04:42:58 +02:00
|
|
|
if src_vm.has_firewall():
|
|
|
|
self.write_firewall_conf(src_vm.get_firewall_conf())
|
|
|
|
|
2015-03-29 23:38:36 +02:00
|
|
|
# Make sure that we have UUID allocated
|
|
|
|
self._update_libvirt_domain()
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_clone_disk_files:
|
2013-05-25 22:18:37 +02:00
|
|
|
hook(self, src_vm, verbose)
|
2013-03-16 16:09:31 +01:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def verify_files(self):
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2013-07-29 03:51:39 +02:00
|
|
|
self.storage.verify_files()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if not os.path.exists (os.path.join(self.kernels_dir, 'vmlinuz')):
|
|
|
|
raise QubesException (
|
2013-07-30 11:31:52 +02:00
|
|
|
"VM kernel does not exist: {0}".\
|
2013-03-16 02:39:30 +01:00
|
|
|
format(os.path.join(self.kernels_dir, 'vmlinuz')))
|
|
|
|
|
|
|
|
if not os.path.exists (os.path.join(self.kernels_dir, 'initramfs')):
|
|
|
|
raise QubesException (
|
2013-07-30 11:31:52 +02:00
|
|
|
"VM initramfs does not exist: {0}".\
|
2013-03-16 02:39:30 +01:00
|
|
|
format(os.path.join(self.kernels_dir, 'initramfs')))
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_verify_files:
|
|
|
|
hook(self)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
return True
|
|
|
|
|
|
|
|
def remove_from_disk(self):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('remove_from_disk()')
|
2013-03-16 02:39:30 +01:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_remove_from_disk:
|
|
|
|
hook(self)
|
|
|
|
|
2015-05-03 20:26:07 +02:00
|
|
|
try:
|
|
|
|
self.libvirt_domain.undefine()
|
|
|
|
except libvirt.libvirtError as e:
|
2015-08-08 21:29:56 +02:00
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
|
2015-05-03 20:26:07 +02:00
|
|
|
# already undefined
|
|
|
|
pass
|
|
|
|
else:
|
2016-01-06 04:32:11 +01:00
|
|
|
print >>sys.stderr, "libvirt error code: {!r}".format(
|
|
|
|
e.get_error_code())
|
2015-05-03 20:26:07 +02:00
|
|
|
raise
|
2015-03-29 23:38:36 +02:00
|
|
|
|
2016-04-28 08:30:57 +02:00
|
|
|
if os.path.exists("/etc/systemd/system/multi-user.target.wants/qubes-vm@" + self.name + ".service"):
|
|
|
|
subprocess.call(["sudo", "systemctl", "-q", "disable","qubes-vm@" + self.name + ".service"])
|
2016-04-27 12:46:05 +02:00
|
|
|
if retcode != 0:
|
|
|
|
raise QubesException("Failed to delete autostart entry for VM")
|
|
|
|
|
2013-07-29 03:51:39 +02:00
|
|
|
self.storage.remove_from_disk()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def write_firewall_conf(self, conf):
|
|
|
|
defaults = self.get_firewall_conf()
|
2014-03-27 17:16:47 +01:00
|
|
|
expiring_rules_present = False
|
2013-03-16 02:39:30 +01:00
|
|
|
for item in defaults.keys():
|
|
|
|
if item not in conf:
|
|
|
|
conf[item] = defaults[item]
|
|
|
|
|
|
|
|
root = lxml.etree.Element(
|
2014-05-05 05:22:25 +02:00
|
|
|
"QubesFirewallRules",
|
2013-03-16 02:39:30 +01:00
|
|
|
policy = "allow" if conf["allow"] else "deny",
|
|
|
|
dns = "allow" if conf["allowDns"] else "deny",
|
|
|
|
icmp = "allow" if conf["allowIcmp"] else "deny",
|
|
|
|
yumProxy = "allow" if conf["allowYumProxy"] else "deny"
|
|
|
|
)
|
|
|
|
|
|
|
|
for rule in conf["rules"]:
|
|
|
|
# For backward compatibility
|
|
|
|
if "proto" not in rule:
|
|
|
|
if rule["portBegin"] is not None and rule["portBegin"] > 0:
|
|
|
|
rule["proto"] = "tcp"
|
|
|
|
else:
|
|
|
|
rule["proto"] = "any"
|
|
|
|
element = lxml.etree.Element(
|
|
|
|
"rule",
|
|
|
|
address=rule["address"],
|
|
|
|
proto=str(rule["proto"]),
|
|
|
|
)
|
|
|
|
if rule["netmask"] is not None and rule["netmask"] != 32:
|
|
|
|
element.set("netmask", str(rule["netmask"]))
|
2014-03-28 02:55:35 +01:00
|
|
|
if rule.get("portBegin", None) is not None and \
|
|
|
|
rule["portBegin"] > 0:
|
2013-03-16 02:39:30 +01:00
|
|
|
element.set("port", str(rule["portBegin"]))
|
2014-03-28 02:55:35 +01:00
|
|
|
if rule.get("portEnd", None) is not None and rule["portEnd"] > 0:
|
2013-03-16 02:39:30 +01:00
|
|
|
element.set("toport", str(rule["portEnd"]))
|
2014-03-27 17:16:47 +01:00
|
|
|
if "expire" in rule:
|
|
|
|
element.set("expire", str(rule["expire"]))
|
|
|
|
expiring_rules_present = True
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
root.append(element)
|
|
|
|
|
|
|
|
tree = lxml.etree.ElementTree(root)
|
|
|
|
|
|
|
|
try:
|
2013-05-20 01:30:48 +02:00
|
|
|
old_umask = os.umask(002)
|
2013-03-16 02:39:30 +01:00
|
|
|
with open(self.firewall_conf, 'w') as f:
|
|
|
|
tree.write(f, encoding="UTF-8", pretty_print=True)
|
|
|
|
f.close()
|
2013-05-20 01:30:48 +02:00
|
|
|
os.umask(old_umask)
|
2013-03-16 02:39:30 +01:00
|
|
|
except EnvironmentError as err:
|
|
|
|
print >> sys.stderr, "{0}: save error: {1}".format(
|
|
|
|
os.path.basename(sys.argv[0]), err)
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Automatically enable/disable 'yum-proxy-setup' service based on allowYumProxy
|
|
|
|
if conf['allowYumProxy']:
|
|
|
|
self.services['yum-proxy-setup'] = True
|
|
|
|
else:
|
|
|
|
if self.services.has_key('yum-proxy-setup'):
|
|
|
|
self.services.pop('yum-proxy-setup')
|
|
|
|
|
2014-03-27 17:16:47 +01:00
|
|
|
if expiring_rules_present:
|
|
|
|
subprocess.call(["sudo", "systemctl", "start",
|
|
|
|
"qubes-reload-firewall@%s.timer" % self.name])
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
return True
|
|
|
|
|
|
|
|
def has_firewall(self):
|
|
|
|
return os.path.exists (self.firewall_conf)
|
|
|
|
|
|
|
|
def get_firewall_defaults(self):
|
|
|
|
return { "rules": list(), "allow": True, "allowDns": True, "allowIcmp": True, "allowYumProxy": False }
|
|
|
|
|
|
|
|
def get_firewall_conf(self):
|
|
|
|
conf = self.get_firewall_defaults()
|
|
|
|
|
|
|
|
try:
|
|
|
|
tree = lxml.etree.parse(self.firewall_conf)
|
|
|
|
root = tree.getroot()
|
|
|
|
|
|
|
|
conf["allow"] = (root.get("policy") == "allow")
|
|
|
|
conf["allowDns"] = (root.get("dns") == "allow")
|
|
|
|
conf["allowIcmp"] = (root.get("icmp") == "allow")
|
|
|
|
conf["allowYumProxy"] = (root.get("yumProxy") == "allow")
|
|
|
|
|
|
|
|
for element in root:
|
|
|
|
rule = {}
|
2014-03-27 17:16:47 +01:00
|
|
|
attr_list = ("address", "netmask", "proto", "port", "toport",
|
|
|
|
"expire")
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
for attribute in attr_list:
|
|
|
|
rule[attribute] = element.get(attribute)
|
|
|
|
|
|
|
|
if rule["netmask"] is not None:
|
|
|
|
rule["netmask"] = int(rule["netmask"])
|
|
|
|
else:
|
|
|
|
rule["netmask"] = 32
|
|
|
|
|
|
|
|
if rule["port"] is not None:
|
|
|
|
rule["portBegin"] = int(rule["port"])
|
|
|
|
else:
|
|
|
|
# backward compatibility
|
|
|
|
rule["portBegin"] = 0
|
|
|
|
|
|
|
|
# For backward compatibility
|
|
|
|
if rule["proto"] is None:
|
|
|
|
if rule["portBegin"] > 0:
|
|
|
|
rule["proto"] = "tcp"
|
|
|
|
else:
|
|
|
|
rule["proto"] = "any"
|
|
|
|
|
|
|
|
if rule["toport"] is not None:
|
|
|
|
rule["portEnd"] = int(rule["toport"])
|
|
|
|
else:
|
|
|
|
rule["portEnd"] = None
|
|
|
|
|
2014-03-27 17:16:47 +01:00
|
|
|
if rule["expire"] is not None:
|
|
|
|
rule["expire"] = int(rule["expire"])
|
|
|
|
if rule["expire"] <= int(datetime.datetime.now().strftime(
|
|
|
|
"%s")):
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
del(rule["expire"])
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
del(rule["port"])
|
|
|
|
del(rule["toport"])
|
|
|
|
|
|
|
|
conf["rules"].append(rule)
|
|
|
|
|
|
|
|
except EnvironmentError as err:
|
|
|
|
return conf
|
|
|
|
except (xml.parsers.expat.ExpatError,
|
|
|
|
ValueError, LookupError) as err:
|
|
|
|
print("{0}: load error: {1}".format(
|
|
|
|
os.path.basename(sys.argv[0]), err))
|
|
|
|
return None
|
|
|
|
|
|
|
|
return conf
|
|
|
|
|
2013-09-01 01:26:43 +02:00
|
|
|
def pci_add(self, pci):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('pci_add(pci={!r})'.format(pci))
|
2013-09-01 01:26:43 +02:00
|
|
|
if not os.path.exists('/sys/bus/pci/devices/0000:%s' % pci):
|
|
|
|
raise QubesException("Invalid PCI device: %s" % pci)
|
|
|
|
if self.pcidevs.count(pci):
|
|
|
|
# already added
|
|
|
|
return
|
|
|
|
self.pcidevs.append(pci)
|
|
|
|
if self.is_running():
|
|
|
|
try:
|
|
|
|
subprocess.check_call(['sudo', system_path["qubes_pciback_cmd"], pci])
|
|
|
|
subprocess.check_call(['sudo', 'xl', 'pci-attach', str(self.xid), pci])
|
|
|
|
except Exception as e:
|
|
|
|
print >>sys.stderr, "Failed to attach PCI device on the fly " \
|
|
|
|
"(%s), changes will be seen after VM restart" % str(e)
|
|
|
|
|
|
|
|
def pci_remove(self, pci):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('pci_remove(pci={!r})'.format(pci))
|
2013-09-01 01:26:43 +02:00
|
|
|
if not self.pcidevs.count(pci):
|
|
|
|
# not attached
|
|
|
|
return
|
|
|
|
self.pcidevs.remove(pci)
|
|
|
|
if self.is_running():
|
|
|
|
p = subprocess.Popen(['xl', 'pci-list', str(self.xid)],
|
|
|
|
stdout=subprocess.PIPE)
|
|
|
|
result = p.communicate()
|
|
|
|
m = re.search(r"^(\d+.\d+)\s+0000:%s$" % pci, result[0], flags=re.MULTILINE)
|
|
|
|
if not m:
|
|
|
|
print >>sys.stderr, "Device %s already detached" % pci
|
|
|
|
return
|
|
|
|
vmdev = m.group(1)
|
|
|
|
try:
|
2014-04-01 01:07:23 +02:00
|
|
|
self.run_service("qubes.DetachPciDevice",
|
|
|
|
user="root", input="00:%s" % vmdev)
|
2013-09-01 01:26:43 +02:00
|
|
|
subprocess.check_call(['sudo', 'xl', 'pci-detach', str(self.xid), pci])
|
|
|
|
except Exception as e:
|
|
|
|
print >>sys.stderr, "Failed to detach PCI device on the fly " \
|
|
|
|
"(%s), changes will be seen after VM restart" % str(e)
|
|
|
|
|
2014-04-15 03:19:48 +02:00
|
|
|
def run(self, command, user = None, verbose = True, autostart = False,
|
|
|
|
notify_function = None,
|
|
|
|
passio = False, passio_popen = False, passio_stderr=False,
|
|
|
|
ignore_stderr=False, localcmd = None, wait = False, gui = True,
|
|
|
|
filter_esc = False):
|
2013-03-16 02:39:30 +01:00
|
|
|
"""command should be in form 'cmdline'
|
|
|
|
When passio_popen=True, popen object with stdout connected to pipe.
|
|
|
|
When additionally passio_stderr=True, stderr also is connected to pipe.
|
|
|
|
When ignore_stderr=True, stderr is connected to /dev/null.
|
|
|
|
"""
|
|
|
|
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug(
|
|
|
|
'run(command={!r}, user={!r}, passio={!r}, wait={!r})'.format(
|
|
|
|
command, user, passio, wait))
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
if user is None:
|
|
|
|
user = self.default_user
|
|
|
|
null = None
|
|
|
|
if not self.is_running() and not self.is_paused():
|
|
|
|
if not autostart:
|
|
|
|
raise QubesException("VM not running")
|
|
|
|
|
|
|
|
try:
|
|
|
|
if notify_function is not None:
|
|
|
|
notify_function ("info", "Starting the '{0}' VM...".format(self.name))
|
|
|
|
elif verbose:
|
|
|
|
print >> sys.stderr, "Starting the VM '{0}'...".format(self.name)
|
2013-05-10 05:35:22 +02:00
|
|
|
self.start(verbose=verbose, start_guid = gui, notify_function=notify_function)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
except (IOError, OSError, QubesException) as err:
|
|
|
|
raise QubesException("Error while starting the '{0}' VM: {1}".format(self.name, err))
|
|
|
|
except (MemoryError) as err:
|
2014-02-05 03:27:12 +01:00
|
|
|
raise QubesException("Not enough memory to start '{0}' VM! "
|
|
|
|
"Close one or more running VMs and try "
|
|
|
|
"again.".format(self.name))
|
|
|
|
|
2014-04-16 15:52:08 +02:00
|
|
|
if self.is_paused():
|
|
|
|
raise QubesException("VM is paused")
|
2014-02-05 03:27:12 +01:00
|
|
|
if not self.is_qrexec_running():
|
|
|
|
raise QubesException(
|
|
|
|
"Domain '{}': qrexec not connected.".format(self.name))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if gui and os.getenv("DISPLAY") is not None and not self.is_guid_running():
|
|
|
|
self.start_guid(verbose = verbose, notify_function = notify_function)
|
|
|
|
|
2013-10-15 15:02:58 +02:00
|
|
|
args = [system_path["qrexec_client_path"], "-d", str(self.name), "%s:%s" % (user, command)]
|
2013-03-16 02:39:30 +01:00
|
|
|
if localcmd is not None:
|
|
|
|
args += [ "-l", localcmd]
|
2014-04-15 03:19:48 +02:00
|
|
|
if filter_esc:
|
|
|
|
args += ["-t"]
|
|
|
|
if os.isatty(sys.stderr.fileno()):
|
|
|
|
args += ["-T"]
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
call_kwargs = {}
|
2014-12-12 23:09:33 +01:00
|
|
|
if ignore_stderr or not passio:
|
2015-07-01 04:33:04 +02:00
|
|
|
null = open("/dev/null", "w+")
|
2013-03-16 02:39:30 +01:00
|
|
|
call_kwargs['stderr'] = null
|
2014-12-12 23:09:33 +01:00
|
|
|
if not passio:
|
|
|
|
call_kwargs['stdin'] = null
|
|
|
|
call_kwargs['stdout'] = null
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if passio_popen:
|
|
|
|
popen_kwargs={'stdout': subprocess.PIPE}
|
|
|
|
popen_kwargs['stdin'] = subprocess.PIPE
|
|
|
|
if passio_stderr:
|
|
|
|
popen_kwargs['stderr'] = subprocess.PIPE
|
|
|
|
else:
|
|
|
|
popen_kwargs['stderr'] = call_kwargs.get('stderr', None)
|
|
|
|
p = subprocess.Popen (args, **popen_kwargs)
|
|
|
|
if null:
|
|
|
|
null.close()
|
|
|
|
return p
|
2014-12-12 23:09:33 +01:00
|
|
|
if not wait and not passio:
|
2013-03-16 02:39:30 +01:00
|
|
|
args += ["-e"]
|
|
|
|
retcode = subprocess.call(args, **call_kwargs)
|
|
|
|
if null:
|
|
|
|
null.close()
|
|
|
|
return retcode
|
|
|
|
|
2014-04-01 01:07:23 +02:00
|
|
|
def run_service(self, service, source="dom0", user=None,
|
2015-11-13 05:43:40 +01:00
|
|
|
passio_popen=False, input=None, localcmd=None, gui=False,
|
|
|
|
wait=True):
|
2015-07-08 01:23:47 +02:00
|
|
|
if bool(input) + bool(passio_popen) + bool(localcmd) > 1:
|
|
|
|
raise ValueError("'input', 'passio_popen', 'localcmd' cannot be "
|
|
|
|
"used together")
|
2016-04-01 02:53:04 +02:00
|
|
|
if not wait and (localcmd or input):
|
|
|
|
raise ValueError("Cannot use wait=False with input or "
|
|
|
|
"localcmd specified")
|
2015-07-08 01:23:47 +02:00
|
|
|
if localcmd:
|
2014-04-01 01:07:23 +02:00
|
|
|
return self.run("QUBESRPC %s %s" % (service, source),
|
2015-11-13 05:43:40 +01:00
|
|
|
localcmd=localcmd, user=user, wait=wait, gui=gui)
|
2015-07-08 01:23:47 +02:00
|
|
|
elif input:
|
2016-04-01 02:53:04 +02:00
|
|
|
p = self.run("QUBESRPC %s %s" % (service, source),
|
|
|
|
user=user, wait=wait, gui=gui, passio_popen=True)
|
|
|
|
p.communicate(input)
|
|
|
|
return p.returncode
|
2014-04-01 01:07:23 +02:00
|
|
|
else:
|
|
|
|
return self.run("QUBESRPC %s %s" % (service, source),
|
2015-11-13 05:43:40 +01:00
|
|
|
passio_popen=passio_popen, user=user, wait=wait,
|
2015-07-08 01:24:35 +02:00
|
|
|
gui=gui)
|
2014-04-01 01:07:23 +02:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def attach_network(self, verbose = False, wait = True, netvm = None):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('attach_network(netvm={!r})'.format(netvm))
|
2013-03-16 02:39:30 +01:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self.is_running():
|
|
|
|
raise QubesException ("VM not running!")
|
|
|
|
|
|
|
|
if netvm is None:
|
|
|
|
netvm = self.netvm
|
|
|
|
|
|
|
|
if netvm is None:
|
|
|
|
raise QubesException ("NetVM not set!")
|
|
|
|
|
|
|
|
if netvm.qid != 0:
|
|
|
|
if not netvm.is_running():
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Starting NetVM {0}...".format(netvm.name)
|
|
|
|
netvm.start()
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
self.libvirt_domain.attachDevice(
|
|
|
|
self._format_net_dev(self.ip, self.mac, self.netvm.name))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
def detach_network(self, verbose = False, netvm = None):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('detach_network(netvm={!r})'.format(netvm))
|
2013-05-04 04:45:55 +02:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self.is_running():
|
|
|
|
raise QubesException ("VM not running!")
|
|
|
|
|
|
|
|
if netvm is None:
|
|
|
|
netvm = self.netvm
|
|
|
|
|
|
|
|
if netvm is None:
|
|
|
|
raise QubesException ("NetVM not set!")
|
|
|
|
|
|
|
|
self.libvirt_domain.detachDevice( self._format_net_dev(self.ip,
|
|
|
|
self.mac, self.netvm.name))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def wait_for_session(self, notify_function = None):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('wait_for_session()')
|
2013-03-16 02:39:30 +01:00
|
|
|
#self.run('echo $$ >> /tmp/qubes-session-waiter; [ ! -f /tmp/qubes-session-env ] && exec sleep 365d', ignore_stderr=True, gui=False, wait=True)
|
|
|
|
|
|
|
|
# Note : User root is redefined to SYSTEM in the Windows agent code
|
|
|
|
p = self.run('QUBESRPC qubes.WaitForSession none', user="root", passio_popen=True, gui=False, wait=True)
|
|
|
|
p.communicate(input=self.default_user)
|
|
|
|
|
2013-12-03 06:18:23 +01:00
|
|
|
def start_guid(self, verbose = True, notify_function = None,
|
2015-02-09 05:39:44 +01:00
|
|
|
extra_guid_args=None, before_qrexec=False):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug(
|
|
|
|
'start_guid(extra_guid_args={!r}, before_qrexec={!r})'.format(
|
|
|
|
extra_guid_args, before_qrexec))
|
2015-06-27 04:10:32 +02:00
|
|
|
if before_qrexec:
|
|
|
|
# On PV start GUId only after qrexec-daemon
|
|
|
|
return
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Starting Qubes GUId..."
|
|
|
|
|
2013-12-02 03:30:26 +01:00
|
|
|
guid_cmd = [system_path["qubes_guid_path"],
|
2014-06-25 15:23:42 +02:00
|
|
|
"-d", str(self.xid), "-N", self.name,
|
2013-12-02 03:30:26 +01:00
|
|
|
"-c", self.label.color,
|
|
|
|
"-i", self.label.icon_path,
|
|
|
|
"-l", str(self.label.index)]
|
2015-02-09 05:39:44 +01:00
|
|
|
if extra_guid_args is not None:
|
|
|
|
guid_cmd += extra_guid_args
|
2013-03-16 02:39:30 +01:00
|
|
|
if self.debug:
|
|
|
|
guid_cmd += ['-v', '-v']
|
2014-10-25 01:45:01 +02:00
|
|
|
elif not verbose:
|
|
|
|
guid_cmd += ['-q']
|
2013-03-16 02:39:30 +01:00
|
|
|
retcode = subprocess.call (guid_cmd)
|
|
|
|
if (retcode != 0) :
|
|
|
|
raise QubesException("Cannot start qubes-guid!")
|
|
|
|
|
2015-03-04 02:18:46 +01:00
|
|
|
if not self.is_qrexec_running():
|
|
|
|
return
|
|
|
|
|
2013-08-11 04:08:54 +02:00
|
|
|
try:
|
2014-11-21 21:45:03 +01:00
|
|
|
import qubes.monitorlayoutnotify
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Sending monitor layout..."
|
|
|
|
monitor_layout = qubes.monitorlayoutnotify.get_monitor_layout()
|
2015-10-09 21:52:28 +02:00
|
|
|
# Notify VM only if we've got a monitor_layout which is not empty
|
|
|
|
# or else we break proper VM resolution set by gui-agent
|
|
|
|
if len(monitor_layout) > 0:
|
|
|
|
qubes.monitorlayoutnotify.notify_vm(self, monitor_layout)
|
2014-11-21 21:45:03 +01:00
|
|
|
except ImportError as e:
|
2013-08-11 04:08:54 +02:00
|
|
|
print >>sys.stderr, "ERROR: %s" % e
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Waiting for qubes-session..."
|
|
|
|
|
|
|
|
self.wait_for_session(notify_function)
|
|
|
|
|
|
|
|
def start_qrexec_daemon(self, verbose = False, notify_function = None):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('start_qrexec_daemon()')
|
2013-03-16 02:39:30 +01:00
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Starting the qrexec daemon..."
|
2013-05-10 05:35:22 +02:00
|
|
|
qrexec_args = [str(self.xid), self.name, self.default_user]
|
2014-10-25 01:45:01 +02:00
|
|
|
if not verbose:
|
|
|
|
qrexec_args.insert(0, "-q")
|
2013-03-16 02:39:30 +01:00
|
|
|
qrexec_env = os.environ
|
|
|
|
qrexec_env['QREXEC_STARTUP_TIMEOUT'] = str(self.qrexec_timeout)
|
2014-10-25 01:45:01 +02:00
|
|
|
retcode = subprocess.call ([system_path["qrexec_daemon_path"]] +
|
|
|
|
qrexec_args, env=qrexec_env)
|
2013-03-16 02:39:30 +01:00
|
|
|
if (retcode != 0) :
|
2014-02-05 03:27:12 +01:00
|
|
|
raise OSError ("Cannot execute qrexec-daemon!")
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-06-07 05:16:15 +02:00
|
|
|
def start_qubesdb(self):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('start_qubesdb()')
|
core/start: ensure that the previous QubesDB daemon isn't running
When restarting VM (starting it just after it was shut down), it may
happen that previous `qubesdb-daemon` instance is still running - if VM
doesn't properly terminate the connection, dom0 part will not terminate
immediately, but at next alive check (every 10s). Such `qubesdb-daemon`,
when terminating, will remove pid file and socket file. In case of new
daemon already running it would be those of the new daemon, making the
whole QubesDB of this VM inaccessible for dom0 (`qubesdb-daemon` is
running, but its socket is removed).
To prevent this race, ensure that previous instance is terminated before
starting the new one.
There is no need to manually removing socket file, because if some stale
socket exists, it will be replaced by the new one when new
`qubesdb-daemon` starts up.
QubesOS/qubes-issues#1241
2015-09-25 22:06:14 +02:00
|
|
|
pidfile = '/var/run/qubes/qubesdb.{}.pid'.format(self.name)
|
|
|
|
try:
|
|
|
|
if os.path.exists(pidfile):
|
|
|
|
old_qubesdb_pid = open(pidfile, 'r').read()
|
2015-10-13 23:46:59 +02:00
|
|
|
try:
|
|
|
|
os.kill(int(old_qubesdb_pid), signal.SIGTERM)
|
|
|
|
except OSError:
|
|
|
|
raise QubesException(
|
|
|
|
"Failed to kill old QubesDB instance (PID {}). "
|
|
|
|
"Terminate it manually and retry. "
|
|
|
|
"If that isn't QubesDB process, "
|
|
|
|
"remove the pidfile: {}".format(old_qubesdb_pid,
|
|
|
|
pidfile))
|
core/start: ensure that the previous QubesDB daemon isn't running
When restarting VM (starting it just after it was shut down), it may
happen that previous `qubesdb-daemon` instance is still running - if VM
doesn't properly terminate the connection, dom0 part will not terminate
immediately, but at next alive check (every 10s). Such `qubesdb-daemon`,
when terminating, will remove pid file and socket file. In case of new
daemon already running it would be those of the new daemon, making the
whole QubesDB of this VM inaccessible for dom0 (`qubesdb-daemon` is
running, but its socket is removed).
To prevent this race, ensure that previous instance is terminated before
starting the new one.
There is no need to manually removing socket file, because if some stale
socket exists, it will be replaced by the new one when new
`qubesdb-daemon` starts up.
QubesOS/qubes-issues#1241
2015-09-25 22:06:14 +02:00
|
|
|
timeout = 25
|
|
|
|
while os.path.exists(pidfile) and timeout:
|
|
|
|
time.sleep(0.2)
|
|
|
|
timeout -= 1
|
|
|
|
except IOError: # ENOENT (pidfile)
|
|
|
|
pass
|
|
|
|
|
2015-11-07 05:43:34 +01:00
|
|
|
# force connection to a new daemon
|
|
|
|
self._qdb_connection = None
|
|
|
|
|
2013-06-07 05:16:15 +02:00
|
|
|
retcode = subprocess.call ([
|
|
|
|
system_path["qubesdb_daemon_path"],
|
|
|
|
str(self.xid),
|
|
|
|
self.name])
|
|
|
|
if retcode != 0:
|
|
|
|
raise OSError("ERROR: Cannot execute qubesdb-daemon!")
|
|
|
|
|
2015-10-09 19:39:40 +02:00
|
|
|
def request_memory(self, mem_required = None):
|
|
|
|
# Overhead of per-VM/per-vcpu Xen structures, taken from OpenStack nova/virt/xenapi/driver.py
|
|
|
|
# see https://wiki.openstack.org/wiki/XenServer/Overhead
|
|
|
|
# add an extra MB because Nova rounds up to MBs
|
|
|
|
MEM_OVERHEAD_BASE = (3 + 1) * 1024 * 1024
|
|
|
|
MEM_OVERHEAD_PER_VCPU = 3 * 1024 * 1024 / 2
|
|
|
|
if mem_required is None:
|
|
|
|
mem_required = int(self.memory) * 1024 * 1024
|
|
|
|
if qmemman_present:
|
|
|
|
qmemman_client = QMemmanClient()
|
|
|
|
try:
|
|
|
|
mem_required_with_overhead = mem_required + MEM_OVERHEAD_BASE + self.vcpus * MEM_OVERHEAD_PER_VCPU
|
|
|
|
got_memory = qmemman_client.request_memory(mem_required_with_overhead)
|
|
|
|
except IOError as e:
|
|
|
|
raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e))
|
|
|
|
if not got_memory:
|
|
|
|
qmemman_client.close()
|
|
|
|
raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name)
|
|
|
|
return qmemman_client
|
|
|
|
|
2013-05-04 04:47:36 +02:00
|
|
|
def start(self, verbose = False, preparing_dvm = False, start_guid = True,
|
|
|
|
notify_function = None, mem_required = None):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('start('
|
|
|
|
'preparing_dvm={!r}, start_guid={!r}, mem_required={!r})'.format(
|
|
|
|
preparing_dvm, start_guid, mem_required))
|
2013-03-16 02:39:30 +01:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Intentionally not used is_running(): eliminate also "Paused", "Crashed", "Halting"
|
|
|
|
if self.get_power_state() != "Halted":
|
|
|
|
raise QubesException ("VM is already running!")
|
|
|
|
|
|
|
|
self.verify_files()
|
|
|
|
|
|
|
|
if self.netvm is not None:
|
|
|
|
if self.netvm.qid != 0:
|
|
|
|
if not self.netvm.is_running():
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Starting NetVM {0}...".format(self.netvm.name)
|
|
|
|
self.netvm.start(verbose = verbose, start_guid = start_guid, notify_function = notify_function)
|
|
|
|
|
2013-07-29 03:51:39 +02:00
|
|
|
self.storage.prepare_for_vm_startup(verbose=verbose)
|
2013-03-16 02:39:30 +01:00
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Loading the VM (type = {0})...".format(self.type)
|
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
self._update_libvirt_domain()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2015-10-09 19:39:40 +02:00
|
|
|
qmemman_client = self.request_memory(mem_required)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
# Bind pci devices to pciback driver
|
|
|
|
for pci in self.pcidevs:
|
2015-06-27 04:52:25 +02:00
|
|
|
try:
|
|
|
|
nd = vmm.libvirt_conn.nodeDeviceLookupByName('pci_0000_' + pci.replace(':','_').replace('.','_'))
|
|
|
|
except libvirt.libvirtError as e:
|
2015-08-08 21:29:56 +02:00
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_NO_NODE_DEVICE:
|
2015-06-27 04:52:25 +02:00
|
|
|
raise QubesException(
|
|
|
|
"PCI device {} does not exist (domain {})".
|
|
|
|
format(pci, self.name))
|
|
|
|
else:
|
|
|
|
raise
|
2013-08-31 00:29:11 +02:00
|
|
|
try:
|
|
|
|
nd.dettach()
|
2015-05-03 20:22:50 +02:00
|
|
|
except libvirt.libvirtError as e:
|
2015-08-08 21:29:56 +02:00
|
|
|
if e.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR:
|
|
|
|
# already detached
|
2013-08-31 00:29:11 +02:00
|
|
|
pass
|
|
|
|
else:
|
|
|
|
raise
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
self.libvirt_domain.createWithFlags(libvirt.VIR_DOMAIN_START_PAUSED)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2015-10-13 23:49:32 +02:00
|
|
|
try:
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Starting Qubes DB..."
|
|
|
|
self.start_qubesdb()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2015-10-13 23:49:32 +02:00
|
|
|
xid = self.xid
|
|
|
|
self.log.debug('xid={}'.format(xid))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2015-10-13 23:49:32 +02:00
|
|
|
if preparing_dvm:
|
|
|
|
self.services['qubes-dvm'] = True
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Setting Qubes DB info for the VM..."
|
|
|
|
self.create_qubesdb_entries()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2015-10-13 23:49:32 +02:00
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Updating firewall rules..."
|
|
|
|
netvm = self.netvm
|
|
|
|
while netvm is not None:
|
|
|
|
if netvm.is_proxyvm() and netvm.is_running():
|
|
|
|
netvm.write_iptables_qubesdb_entry()
|
|
|
|
netvm = netvm.netvm
|
|
|
|
|
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_start:
|
|
|
|
hook(self, verbose = verbose, preparing_dvm = preparing_dvm,
|
|
|
|
start_guid = start_guid, notify_function = notify_function)
|
|
|
|
except:
|
|
|
|
self.force_shutdown()
|
|
|
|
raise
|
2013-03-16 16:09:31 +01:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Starting the VM..."
|
2013-05-04 04:45:55 +02:00
|
|
|
self.libvirt_domain.resume()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
# close() is not really needed, because the descriptor is close-on-exec
|
|
|
|
# anyway, the reason to postpone close() is that possibly xl is not done
|
|
|
|
# constructing the domain after its main process exits
|
|
|
|
# so we close() when we know the domain is up
|
|
|
|
# the successful unpause is some indicator of it
|
2013-07-22 04:17:30 +02:00
|
|
|
if qmemman_present:
|
|
|
|
qmemman_client.close()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2015-06-27 04:10:32 +02:00
|
|
|
extra_guid_args = []
|
2015-03-04 02:19:29 +01:00
|
|
|
if preparing_dvm:
|
|
|
|
# Run GUI daemon in "invisible" mode, so applications started by
|
|
|
|
# prerun script will not disturb the user
|
|
|
|
extra_guid_args = ['-I']
|
2015-03-19 10:30:18 +01:00
|
|
|
elif not os.path.exists('/var/run/shm.id'):
|
|
|
|
# Start GUI daemon only when shmoverride is loaded; unless
|
|
|
|
# preparing DispVM, where it isn't needed because of "invisible"
|
|
|
|
# mode
|
|
|
|
start_guid = False
|
2015-03-04 02:19:29 +01:00
|
|
|
|
2015-06-27 04:10:32 +02:00
|
|
|
if start_guid:
|
2015-03-04 02:19:29 +01:00
|
|
|
self.start_guid(verbose=verbose, notify_function=notify_function,
|
|
|
|
before_qrexec=True, extra_guid_args=extra_guid_args)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if not preparing_dvm:
|
|
|
|
self.start_qrexec_daemon(verbose=verbose,notify_function=notify_function)
|
|
|
|
|
2015-03-19 10:30:18 +01:00
|
|
|
if start_guid:
|
2015-03-04 02:19:29 +01:00
|
|
|
self.start_guid(verbose=verbose, notify_function=notify_function,
|
|
|
|
extra_guid_args=extra_guid_args)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
return xid
|
|
|
|
|
2014-05-10 21:23:04 +02:00
|
|
|
def _cleanup_zombie_domains(self):
|
|
|
|
"""
|
|
|
|
This function is workaround broken libxl (which leaves not fully
|
|
|
|
created domain on failure) and vchan on domain crash behaviour
|
|
|
|
@return: None
|
|
|
|
"""
|
|
|
|
xc = self.get_xc_dominfo()
|
|
|
|
if xc and xc['dying'] == 1:
|
|
|
|
# GUID still running?
|
|
|
|
guid_pidfile = '/var/run/qubes/guid-running.%d' % xc['domid']
|
|
|
|
if os.path.exists(guid_pidfile):
|
|
|
|
guid_pid = open(guid_pidfile).read().strip()
|
|
|
|
os.kill(int(guid_pid), 15)
|
|
|
|
# qrexec still running?
|
|
|
|
if self.is_qrexec_running():
|
|
|
|
#TODO: kill qrexec daemon
|
|
|
|
pass
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def shutdown(self, force=False, xid = None):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('shutdown()')
|
2013-03-16 02:39:30 +01:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self.is_running():
|
|
|
|
raise QubesException ("VM already stopped!")
|
|
|
|
|
2015-12-06 14:31:43 +01:00
|
|
|
# try to gracefully detach PCI devices before shutdown, to mitigate
|
|
|
|
# timeouts on forcible detach at domain destroy; if that fails, too bad
|
|
|
|
try:
|
|
|
|
for pcidev in self.pcidevs:
|
|
|
|
self.libvirt_domain.detachDevice(self._format_pci_dev(pcidev))
|
2015-12-26 14:05:21 +01:00
|
|
|
except libvirt.libvirtError as e:
|
|
|
|
print >>sys.stderr, "WARNING: {}, continuing VM shutdown " \
|
|
|
|
"anyway".format(str(e))
|
2015-12-06 14:31:43 +01:00
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
self.libvirt_domain.shutdown()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def force_shutdown(self, xid = None):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('force_shutdown()')
|
2013-03-16 02:39:30 +01:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self.is_running() and not self.is_paused():
|
|
|
|
raise QubesException ("VM already stopped!")
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
self.libvirt_domain.destroy()
|
2015-03-29 23:38:36 +02:00
|
|
|
self.refresh()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-10-23 21:56:50 +02:00
|
|
|
def suspend(self):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('suspend()')
|
2013-10-23 21:56:50 +02:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2015-02-10 06:45:47 +01:00
|
|
|
if not self.is_running() and not self.is_paused() or \
|
|
|
|
self.get_power_state() == "Suspended":
|
|
|
|
raise QubesException ("VM not running!")
|
2013-10-23 21:56:50 +02:00
|
|
|
|
2013-10-24 04:10:07 +02:00
|
|
|
if len (self.pcidevs) > 0:
|
2015-02-10 06:45:47 +01:00
|
|
|
self.libvirt_domain.pMSuspendForDuration(
|
|
|
|
libvirt.VIR_NODE_SUSPEND_TARGET_MEM, 0, 0)
|
2013-10-24 04:10:07 +02:00
|
|
|
else:
|
|
|
|
self.pause()
|
2013-10-23 21:56:50 +02:00
|
|
|
|
|
|
|
def resume(self):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('resume()')
|
2013-10-23 21:56:50 +02:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
if self.get_power_state() == "Suspended":
|
2015-02-10 06:45:47 +01:00
|
|
|
self.libvirt_domain.pMWakeup()
|
2013-10-23 21:56:50 +02:00
|
|
|
else:
|
|
|
|
self.unpause()
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def pause(self):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('pause()')
|
2013-03-16 02:39:30 +01:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2013-09-24 06:18:51 +02:00
|
|
|
if not self.is_running():
|
|
|
|
raise QubesException ("VM not running!")
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
self.libvirt_domain.suspend()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def unpause(self):
|
2015-02-17 16:11:37 +01:00
|
|
|
self.log.debug('unpause()')
|
2013-03-16 02:39:30 +01:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2013-09-24 06:18:51 +02:00
|
|
|
if not self.is_paused():
|
|
|
|
raise QubesException ("VM not paused!")
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
self.libvirt_domain.resume()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_xml_attrs(self):
|
|
|
|
attrs = {}
|
2013-03-16 15:28:18 +01:00
|
|
|
attrs_config = self.get_attrs_config()
|
2013-03-16 02:39:30 +01:00
|
|
|
for attr in attrs_config:
|
|
|
|
attr_config = attrs_config[attr]
|
|
|
|
if 'save' in attr_config:
|
2014-03-25 20:56:40 +01:00
|
|
|
if 'save_skip' in attr_config:
|
|
|
|
if callable(attr_config['save_skip']):
|
|
|
|
if attr_config['save_skip']():
|
|
|
|
continue
|
|
|
|
elif eval(attr_config['save_skip']):
|
|
|
|
continue
|
|
|
|
if callable(attr_config['save']):
|
|
|
|
value = attr_config['save']()
|
|
|
|
else:
|
|
|
|
value = eval(attr_config['save'])
|
2013-03-16 02:39:30 +01:00
|
|
|
if 'save_attr' in attr_config:
|
2014-03-25 20:56:40 +01:00
|
|
|
attrs[attr_config['save_attr']] = value
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
2014-03-25 20:56:40 +01:00
|
|
|
attrs[attr] = value
|
2013-03-16 02:39:30 +01:00
|
|
|
return attrs
|
|
|
|
|
|
|
|
def create_xml_element(self):
|
|
|
|
|
|
|
|
attrs = self.get_xml_attrs()
|
|
|
|
element = lxml.etree.Element(
|
2013-05-04 04:45:55 +02:00
|
|
|
# Compatibility hack (Qubes*VM in type vs Qubes*Vm in XML)...
|
|
|
|
"Qubes" + self.type.replace("VM", "Vm"),
|
2013-03-16 02:39:30 +01:00
|
|
|
**attrs)
|
|
|
|
return element
|
|
|
|
|
|
|
|
register_qubes_vm_class(QubesVm)
|