2013-03-16 02:39:30 +01:00
|
|
|
#!/usr/bin/python2
|
2014-05-18 21:01:21 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2013-03-16 02:39:30 +01:00
|
|
|
#
|
|
|
|
# The Qubes OS Project, http://www.qubes-os.org
|
|
|
|
#
|
|
|
|
# Copyright (C) 2010 Joanna Rutkowska <joanna@invisiblethingslab.com>
|
|
|
|
# Copyright (C) 2013 Marek Marczykowski <marmarek@invisiblethingslab.com>
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
|
|
|
import datetime
|
|
|
|
import fcntl
|
|
|
|
import lxml.etree
|
|
|
|
import os
|
|
|
|
import os.path
|
|
|
|
import re
|
|
|
|
import shutil
|
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
import time
|
|
|
|
import uuid
|
|
|
|
import xml.parsers.expat
|
2014-03-31 03:41:33 +02:00
|
|
|
from qubes import qmemman
|
|
|
|
from qubes import qmemman_algo
|
2013-05-04 04:45:55 +02:00
|
|
|
import libvirt
|
|
|
|
import warnings
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-17 04:06:29 +02:00
|
|
|
from qubes.qubes import dry_run,vmm
|
2013-03-16 02:39:30 +01:00
|
|
|
from qubes.qubes import register_qubes_vm_class
|
|
|
|
from qubes.qubes import QubesVmCollection,QubesException,QubesHost,QubesVmLabels
|
|
|
|
from qubes.qubes import defaults,system_path,vm_files,qubes_max_qid
|
|
|
|
from qubes.qmemman_client import QMemmanClient
|
|
|
|
|
2014-04-24 21:50:12 +02:00
|
|
|
import qubes.qubesutils
|
|
|
|
|
2013-11-26 20:16:10 +01:00
|
|
|
xid_to_name_cache = {}
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
class QubesVm(object):
|
|
|
|
"""
|
|
|
|
A representation of one Qubes VM
|
|
|
|
Only persistent information are stored here, while all the runtime
|
|
|
|
information, e.g. Xen dom id, etc, are to be retrieved via Xen API
|
|
|
|
Note that qid is not the same as Xen's domid!
|
|
|
|
"""
|
|
|
|
|
|
|
|
# In which order load this VM type from qubes.xml
|
|
|
|
load_order = 100
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# hooks for plugins (modules) which want to influence existing classes,
|
|
|
|
# without introducing new ones
|
|
|
|
hooks_clone_disk_files = []
|
|
|
|
hooks_create_on_disk = []
|
|
|
|
hooks_create_xenstore_entries = []
|
|
|
|
hooks_get_attrs_config = []
|
|
|
|
hooks_get_clone_attrs = []
|
|
|
|
hooks_get_config_params = []
|
|
|
|
hooks_init = []
|
|
|
|
hooks_label_setter = []
|
|
|
|
hooks_netvm_setter = []
|
|
|
|
hooks_post_rename = []
|
|
|
|
hooks_pre_rename = []
|
|
|
|
hooks_remove_from_disk = []
|
|
|
|
hooks_start = []
|
|
|
|
hooks_verify_files = []
|
2014-05-05 16:31:06 +02:00
|
|
|
hooks_set_attr = []
|
2013-03-16 16:09:31 +01:00
|
|
|
|
2013-03-16 15:28:18 +01:00
|
|
|
def get_attrs_config(self):
|
2013-03-16 02:39:30 +01:00
|
|
|
""" Object attributes for serialization/deserialization
|
|
|
|
inner dict keys:
|
|
|
|
- order: initialization order (to keep dependency intact)
|
|
|
|
attrs without order will be evaluated at the end
|
|
|
|
- default: default value used when attr not given to object constructor
|
|
|
|
- attr: set value to this attribute instead of parameter name
|
2014-03-25 20:56:40 +01:00
|
|
|
- eval: (DEPRECATED) assign result of this expression instead of
|
|
|
|
value directly; local variable 'value' contains
|
|
|
|
attribute value (or default if it was not given)
|
|
|
|
- func: callable used to parse the value retrieved from XML
|
2013-03-16 02:39:30 +01:00
|
|
|
- save: use evaluation result as value for XML serialization; only attrs with 'save' key will be saved in XML
|
|
|
|
- save_skip: if present and evaluates to true, attr will be omitted in XML
|
|
|
|
- save_attr: save to this XML attribute instead of parameter name
|
|
|
|
"""
|
|
|
|
|
|
|
|
attrs = {
|
|
|
|
# __qid cannot be accessed by setattr, so must be set manually in __init__
|
|
|
|
"qid": { "attr": "_qid", "order": 0 },
|
|
|
|
"name": { "order": 1 },
|
2013-05-10 05:35:22 +02:00
|
|
|
"uuid": { "order": 0, "eval": 'uuid.UUID(value) if value else None' },
|
2013-03-16 02:39:30 +01:00
|
|
|
"dir_path": { "default": None, "order": 2 },
|
2014-03-26 04:41:28 +01:00
|
|
|
"conf_file": {
|
|
|
|
"func": lambda value: self.absolute_path(value, self.name +
|
|
|
|
".conf"),
|
|
|
|
"order": 3 },
|
2013-03-16 02:39:30 +01:00
|
|
|
### order >= 10: have base attrs set
|
2014-03-26 04:41:28 +01:00
|
|
|
"root_img": {
|
|
|
|
"func": self._absolute_path_gen(vm_files["root_img"]),
|
|
|
|
"order": 10 },
|
|
|
|
"private_img": {
|
|
|
|
"func": self._absolute_path_gen(vm_files["private_img"]),
|
|
|
|
"order": 10 },
|
|
|
|
"volatile_img": {
|
|
|
|
"func": self._absolute_path_gen(vm_files["volatile_img"]),
|
|
|
|
"order": 10 },
|
|
|
|
"firewall_conf": {
|
|
|
|
"func": self._absolute_path_gen(vm_files["firewall_conf"]),
|
|
|
|
"order": 10 },
|
2013-03-16 02:39:30 +01:00
|
|
|
"installed_by_rpm": { "default": False, 'order': 10 },
|
2013-11-25 07:14:52 +01:00
|
|
|
"template": { "default": None, "attr": '_template', 'order': 10 },
|
2013-03-16 02:39:30 +01:00
|
|
|
### order >= 20: have template set
|
|
|
|
"uses_default_netvm": { "default": True, 'order': 20 },
|
|
|
|
"netvm": { "default": None, "attr": "_netvm", 'order': 20 },
|
|
|
|
"label": { "attr": "_label", "default": defaults["appvm_label"], 'order': 20,
|
|
|
|
'xml_deserialize': lambda _x: QubesVmLabels[_x] },
|
2013-05-18 03:50:29 +02:00
|
|
|
"memory": { "default": defaults["memory"], 'order': 20 },
|
|
|
|
"maxmem": { "default": None, 'order': 25 },
|
2014-03-26 04:41:28 +01:00
|
|
|
"pcidevs": {
|
|
|
|
"default": '[]',
|
|
|
|
"order": 25,
|
|
|
|
"func": lambda value: [] if value in ["none", None] else
|
|
|
|
eval(value) if value.find("[") >= 0 else
|
|
|
|
eval("[" + value + "]") },
|
2013-03-16 02:39:30 +01:00
|
|
|
# Internal VM (not shown in qubes-manager, doesn't create appmenus entries
|
2014-05-05 16:31:06 +02:00
|
|
|
"internal": { "default": False, 'attr': '_internal' },
|
2013-03-16 02:39:30 +01:00
|
|
|
"vcpus": { "default": None },
|
|
|
|
"uses_default_kernel": { "default": True, 'order': 30 },
|
|
|
|
"uses_default_kernelopts": { "default": True, 'order': 30 },
|
2014-03-26 04:41:28 +01:00
|
|
|
"kernel": {
|
2014-05-12 19:45:30 +02:00
|
|
|
"attr": "_kernel",
|
2014-03-26 04:41:28 +01:00
|
|
|
"default": None,
|
|
|
|
"order": 31,
|
|
|
|
"func": lambda value: self._collection.get_default_kernel() if
|
|
|
|
self.uses_default_kernel else value },
|
|
|
|
"kernelopts": {
|
|
|
|
"default": "",
|
|
|
|
"order": 31,
|
|
|
|
"func": lambda value: value if not self.uses_default_kernelopts\
|
|
|
|
else defaults["kernelopts_pcidevs"] if len(self.pcidevs)>0 \
|
|
|
|
else defaults["kernelopts"] },
|
2013-03-16 02:39:30 +01:00
|
|
|
"mac": { "attr": "_mac", "default": None },
|
|
|
|
"include_in_backups": { "default": True },
|
2014-03-26 04:41:28 +01:00
|
|
|
"services": {
|
|
|
|
"default": {},
|
|
|
|
"func": lambda value: eval(str(value)) },
|
2013-03-16 02:39:30 +01:00
|
|
|
"debug": { "default": False },
|
2014-09-16 01:46:41 +02:00
|
|
|
"default_user": { "default": "user", "attr": "_default_user" },
|
2013-05-18 03:50:29 +02:00
|
|
|
"qrexec_timeout": { "default": 60 },
|
2013-11-20 02:57:17 +01:00
|
|
|
"autostart": { "default": False, "attr": "_autostart" },
|
2013-11-07 22:41:16 +01:00
|
|
|
"backup_content" : { 'default': False },
|
2014-03-26 04:41:28 +01:00
|
|
|
"backup_size" : {
|
|
|
|
"default": 0,
|
|
|
|
"func": int },
|
2013-11-07 22:41:16 +01:00
|
|
|
"backup_path" : { 'default': "" },
|
2014-03-26 04:41:28 +01:00
|
|
|
"backup_timestamp": {
|
|
|
|
"func": lambda value:
|
|
|
|
datetime.datetime.fromtimestamp(int(value)) if value
|
|
|
|
else None },
|
2013-03-16 02:39:30 +01:00
|
|
|
##### Internal attributes - will be overriden in __init__ regardless of args
|
2014-03-26 04:41:28 +01:00
|
|
|
"config_file_template": {
|
|
|
|
"func": lambda x: system_path["config_template_pv"] },
|
|
|
|
"icon_path": {
|
|
|
|
"func": lambda x: os.path.join(self.dir_path, "icon.png") if
|
|
|
|
self.dir_path is not None else None },
|
2013-03-16 02:39:30 +01:00
|
|
|
# used to suppress side effects of clone_attrs
|
2014-03-26 04:41:28 +01:00
|
|
|
"_do_not_reset_firewall": { "func": lambda x: False },
|
|
|
|
"kernels_dir": {
|
2013-03-16 02:39:30 +01:00
|
|
|
# for backward compatibility (or another rare case): kernel=None -> kernel in VM dir
|
2014-03-26 04:41:28 +01:00
|
|
|
"func": lambda x: \
|
|
|
|
os.path.join(system_path["qubes_kernels_base_dir"],
|
|
|
|
self.kernel) if self.kernel is not None \
|
|
|
|
else os.path.join(self.dir_path,
|
|
|
|
vm_files["kernels_subdir"]) },
|
|
|
|
"_start_guid_first": { "func": lambda x: False },
|
2013-03-16 02:39:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
### Mark attrs for XML inclusion
|
|
|
|
# Simple string attrs
|
2013-05-10 05:35:22 +02:00
|
|
|
for prop in ['qid', 'uuid', 'name', 'dir_path', 'memory', 'maxmem',
|
|
|
|
'pcidevs', 'vcpus', 'internal',\
|
2013-03-16 02:39:30 +01:00
|
|
|
'uses_default_kernel', 'kernel', 'uses_default_kernelopts',\
|
|
|
|
'kernelopts', 'services', 'installed_by_rpm',\
|
|
|
|
'uses_default_netvm', 'include_in_backups', 'debug',\
|
2014-09-16 01:46:41 +02:00
|
|
|
'qrexec_timeout', 'autostart',
|
2013-11-07 22:41:16 +01:00
|
|
|
'backup_content', 'backup_size', 'backup_path' ]:
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs[prop]['save'] = lambda prop=prop: str(getattr(self, prop))
|
2013-03-16 02:39:30 +01:00
|
|
|
# Simple paths
|
|
|
|
for prop in ['conf_file', 'root_img', 'volatile_img', 'private_img']:
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs[prop]['save'] = \
|
|
|
|
lambda prop=prop: self.relative_path(getattr(self, prop))
|
|
|
|
attrs[prop]['save_skip'] = \
|
|
|
|
lambda prop=prop: getattr(self, prop) is None
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-17 04:06:29 +02:00
|
|
|
# Can happen only if VM created in offline mode
|
|
|
|
attrs['maxmem']['save_skip'] = lambda: self.maxmem is None
|
|
|
|
attrs['vcpus']['save_skip'] = lambda: self.vcpus is None
|
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
attrs['uuid']['save_skip'] = lambda: self.uuid is None
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs['mac']['save'] = lambda: str(self._mac)
|
|
|
|
attrs['mac']['save_skip'] = lambda: self._mac is None
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2014-09-16 01:46:41 +02:00
|
|
|
attrs['default_user']['save'] = lambda: str(self._default_user)
|
|
|
|
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs['backup_timestamp']['save'] = \
|
|
|
|
lambda: self.backup_timestamp.strftime("%s")
|
|
|
|
attrs['backup_timestamp']['save_skip'] = \
|
|
|
|
lambda: self.backup_timestamp is None
|
2014-03-10 04:29:14 +01:00
|
|
|
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs['netvm']['save'] = \
|
|
|
|
lambda: str(self.netvm.qid) if self.netvm is not None else "none"
|
2013-03-16 02:39:30 +01:00
|
|
|
attrs['netvm']['save_attr'] = "netvm_qid"
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs['template']['save'] = \
|
|
|
|
lambda: str(self.template.qid) if self.template else "none"
|
2013-03-16 02:39:30 +01:00
|
|
|
attrs['template']['save_attr'] = "template_qid"
|
2014-03-27 17:15:15 +01:00
|
|
|
attrs['label']['save'] = lambda: self.label.name
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_get_attrs_config:
|
|
|
|
attrs = hook(self, attrs)
|
2013-03-16 02:39:30 +01:00
|
|
|
return attrs
|
|
|
|
|
2014-05-05 22:23:43 +02:00
|
|
|
def post_set_attr(self, attr, newvalue, oldvalue):
|
2014-05-05 16:31:06 +02:00
|
|
|
for hook in self.hooks_set_attr:
|
2014-05-05 22:23:43 +02:00
|
|
|
hook(self, attr, newvalue, oldvalue)
|
2014-05-05 16:31:06 +02:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def __basic_parse_xml_attr(self, value):
|
|
|
|
if value is None:
|
|
|
|
return None
|
|
|
|
if value.lower() == "none":
|
|
|
|
return None
|
|
|
|
if value.lower() == "true":
|
|
|
|
return True
|
|
|
|
if value.lower() == "false":
|
|
|
|
return False
|
|
|
|
if value.isdigit():
|
|
|
|
return int(value)
|
|
|
|
return value
|
|
|
|
|
|
|
|
def __init__(self, **kwargs):
|
|
|
|
|
2014-03-26 04:41:28 +01:00
|
|
|
self._collection = None
|
2013-03-16 02:39:30 +01:00
|
|
|
if 'collection' in kwargs:
|
2014-03-26 04:41:28 +01:00
|
|
|
self._collection = kwargs['collection']
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
|
|
|
raise ValueError("No collection given to QubesVM constructor")
|
|
|
|
|
|
|
|
# Special case for template b/c it is given in "template_qid" property
|
|
|
|
if "xml_element" in kwargs and kwargs["xml_element"].get("template_qid"):
|
|
|
|
template_qid = kwargs["xml_element"].get("template_qid")
|
|
|
|
if template_qid.lower() != "none":
|
2014-03-26 04:41:28 +01:00
|
|
|
if int(template_qid) in self._collection:
|
|
|
|
kwargs["template"] = self._collection[int(template_qid)]
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
|
|
|
raise ValueError("Unknown template with QID %s" % template_qid)
|
2013-03-16 15:28:18 +01:00
|
|
|
attrs = self.get_attrs_config()
|
2013-03-16 02:39:30 +01:00
|
|
|
for attr_name in sorted(attrs, key=lambda _x: attrs[_x]['order'] if 'order' in attrs[_x] else 1000):
|
|
|
|
attr_config = attrs[attr_name]
|
|
|
|
attr = attr_name
|
|
|
|
if 'attr' in attr_config:
|
|
|
|
attr = attr_config['attr']
|
|
|
|
value = None
|
|
|
|
if attr_name in kwargs:
|
|
|
|
value = kwargs[attr_name]
|
|
|
|
elif 'xml_element' in kwargs and kwargs['xml_element'].get(attr_name) is not None:
|
|
|
|
if 'xml_deserialize' in attr_config and callable(attr_config['xml_deserialize']):
|
|
|
|
value = attr_config['xml_deserialize'](kwargs['xml_element'].get(attr_name))
|
|
|
|
else:
|
|
|
|
value = self.__basic_parse_xml_attr(kwargs['xml_element'].get(attr_name))
|
|
|
|
else:
|
|
|
|
if 'default' in attr_config:
|
|
|
|
value = attr_config['default']
|
2014-03-25 20:56:40 +01:00
|
|
|
if 'func' in attr_config:
|
|
|
|
setattr(self, attr, attr_config['func'](value))
|
|
|
|
elif 'eval' in attr_config:
|
2013-03-16 02:39:30 +01:00
|
|
|
setattr(self, attr, eval(attr_config['eval']))
|
|
|
|
else:
|
|
|
|
#print "setting %s to %s" % (attr, value)
|
|
|
|
setattr(self, attr, value)
|
|
|
|
|
|
|
|
#Init private attrs
|
|
|
|
self.__qid = self._qid
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
self._libvirt_domain = None
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
assert self.__qid < qubes_max_qid, "VM id out of bounds!"
|
|
|
|
assert self.name is not None
|
|
|
|
|
|
|
|
if not self.verify_name(self.name):
|
2014-07-28 01:00:56 +02:00
|
|
|
msg = ("'%s' is invalid VM name (invalid characters, over 31 chars long, "
|
|
|
|
"or one of 'none', 'true', 'false')") % self.name
|
|
|
|
if 'xml_element' in kwargs:
|
|
|
|
print >>sys.stderr, "WARNING: %s" % msg
|
|
|
|
else:
|
|
|
|
raise QubesException(msg)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if self.netvm is not None:
|
|
|
|
self.netvm.connected_vms[self.qid] = self
|
|
|
|
|
|
|
|
# Not in generic way to not create QubesHost() to frequently
|
2013-05-17 04:06:29 +02:00
|
|
|
if self.maxmem is None and not vmm.offline_mode:
|
2013-03-16 02:39:30 +01:00
|
|
|
qubes_host = QubesHost()
|
|
|
|
total_mem_mb = qubes_host.memory_total/1024
|
|
|
|
self.maxmem = total_mem_mb/2
|
2014-09-17 07:27:40 +02:00
|
|
|
|
|
|
|
# Linux specific cap: max memory can't scale beyond 10.79*init_mem
|
|
|
|
if self.maxmem > self.memory * 10:
|
|
|
|
self.maxmem = self.memory * 10
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
# By default allow use all VCPUs
|
2013-05-17 04:06:29 +02:00
|
|
|
if self.vcpus is None and not vmm.offline_mode:
|
2013-03-16 02:39:30 +01:00
|
|
|
qubes_host = QubesHost()
|
|
|
|
self.vcpus = qubes_host.no_cpus
|
|
|
|
|
|
|
|
# Always set if meminfo-writer should be active or not
|
|
|
|
if 'meminfo-writer' not in self.services:
|
|
|
|
self.services['meminfo-writer'] = not (len(self.pcidevs) > 0)
|
|
|
|
|
|
|
|
# Additionally force meminfo-writer disabled when VM have PCI devices
|
|
|
|
if len(self.pcidevs) > 0:
|
|
|
|
self.services['meminfo-writer'] = False
|
|
|
|
|
|
|
|
# Some additional checks for template based VM
|
|
|
|
if self.template is not None:
|
|
|
|
if not self.template.is_template():
|
|
|
|
print >> sys.stderr, "ERROR: template_qid={0} doesn't point to a valid TemplateVM".\
|
|
|
|
format(self.template.qid)
|
|
|
|
return False
|
|
|
|
self.template.appvms[self.qid] = self
|
|
|
|
else:
|
|
|
|
assert self.root_img is not None, "Missing root_img for standalone VM!"
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_init:
|
|
|
|
hook(self)
|
|
|
|
|
2014-08-11 16:34:33 +02:00
|
|
|
def __repr__(self):
|
|
|
|
return '<{} at {:#0x} qid={!r} name={!r}>'.format(
|
|
|
|
self.__class__.__name__,
|
|
|
|
id(self),
|
|
|
|
self.qid,
|
|
|
|
self.name)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def absolute_path(self, arg, default):
|
|
|
|
if arg is not None and os.path.isabs(arg):
|
|
|
|
return arg
|
|
|
|
else:
|
|
|
|
return os.path.join(self.dir_path, (arg if arg is not None else default))
|
|
|
|
|
2014-03-26 04:41:28 +01:00
|
|
|
def _absolute_path_gen(self, default):
|
|
|
|
return lambda value: self.absolute_path(value, default)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def relative_path(self, arg):
|
|
|
|
return arg.replace(self.dir_path + '/', '')
|
|
|
|
|
|
|
|
@property
|
|
|
|
def qid(self):
|
|
|
|
return self.__qid
|
|
|
|
|
|
|
|
@property
|
|
|
|
def label(self):
|
|
|
|
return self._label
|
|
|
|
|
|
|
|
@label.setter
|
|
|
|
def label(self, new_label):
|
|
|
|
self._label = new_label
|
|
|
|
if self.icon_path:
|
|
|
|
try:
|
|
|
|
os.remove(self.icon_path)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
os.symlink (new_label.icon_path, self.icon_path)
|
|
|
|
subprocess.call(['sudo', 'xdg-icon-resource', 'forceupdate'])
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_label_setter:
|
|
|
|
hook(self, new_label)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
@property
|
|
|
|
def netvm(self):
|
|
|
|
return self._netvm
|
|
|
|
|
|
|
|
# Don't know how properly call setter from base class, so workaround it...
|
|
|
|
@netvm.setter
|
|
|
|
def netvm(self, new_netvm):
|
|
|
|
self._set_netvm(new_netvm)
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_netvm_setter:
|
|
|
|
hook(self, new_netvm)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def _set_netvm(self, new_netvm):
|
|
|
|
if self.is_running() and new_netvm is not None and not new_netvm.is_running():
|
|
|
|
raise QubesException("Cannot dynamically attach to stopped NetVM")
|
|
|
|
if self.netvm is not None:
|
|
|
|
self.netvm.connected_vms.pop(self.qid)
|
|
|
|
if self.is_running():
|
2013-05-04 04:45:55 +02:00
|
|
|
self.detach_network()
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
if hasattr(self.netvm, 'post_vm_net_detach'):
|
|
|
|
self.netvm.post_vm_net_detach(self)
|
|
|
|
|
|
|
|
if new_netvm is None:
|
|
|
|
if not self._do_not_reset_firewall:
|
|
|
|
# Set also firewall to block all traffic as discussed in #370
|
|
|
|
if os.path.exists(self.firewall_conf):
|
|
|
|
shutil.copy(self.firewall_conf, os.path.join(system_path["qubes_base_dir"],
|
|
|
|
"backup", "%s-firewall-%s.xml" % (self.name,
|
|
|
|
time.strftime('%Y-%m-%d-%H:%M:%S'))))
|
|
|
|
self.write_firewall_conf({'allow': False, 'allowDns': False,
|
|
|
|
'allowIcmp': False, 'allowYumProxy': False, 'rules': []})
|
|
|
|
else:
|
|
|
|
new_netvm.connected_vms[self.qid]=self
|
|
|
|
|
|
|
|
self._netvm = new_netvm
|
|
|
|
|
|
|
|
if new_netvm is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
if self.is_running():
|
|
|
|
# refresh IP, DNS etc
|
2013-05-10 05:35:22 +02:00
|
|
|
self.create_xenstore_entries(self.xid)
|
2013-03-16 02:39:30 +01:00
|
|
|
self.attach_network()
|
|
|
|
if hasattr(self.netvm, 'post_vm_net_attach'):
|
|
|
|
self.netvm.post_vm_net_attach(self)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def ip(self):
|
|
|
|
if self.netvm is not None:
|
|
|
|
return self.netvm.get_ip_for_vm(self.qid)
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def netmask(self):
|
|
|
|
if self.netvm is not None:
|
|
|
|
return self.netvm.netmask
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def gateway(self):
|
|
|
|
# This is gateway IP for _other_ VMs, so make sense only in NetVMs
|
|
|
|
return None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def secondary_dns(self):
|
|
|
|
if self.netvm is not None:
|
|
|
|
return self.netvm.secondary_dns
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def vif(self):
|
|
|
|
if self.xid < 0:
|
|
|
|
return None
|
|
|
|
if self.netvm is None:
|
|
|
|
return None
|
|
|
|
return "vif{0}.+".format(self.xid)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def mac(self):
|
|
|
|
if self._mac is not None:
|
|
|
|
return self._mac
|
|
|
|
else:
|
|
|
|
return "00:16:3E:5E:6C:{qid:02X}".format(qid=self.qid)
|
|
|
|
|
|
|
|
@mac.setter
|
|
|
|
def mac(self, new_mac):
|
|
|
|
self._mac = new_mac
|
|
|
|
|
2014-05-12 19:45:30 +02:00
|
|
|
@property
|
|
|
|
def kernel(self):
|
|
|
|
return self._kernel
|
|
|
|
|
|
|
|
@kernel.setter
|
|
|
|
def kernel(self, new_value):
|
|
|
|
if new_value is not None:
|
|
|
|
if not os.path.exists(os.path.join(system_path[
|
|
|
|
'qubes_kernels_base_dir'], new_value)):
|
|
|
|
raise QubesException("Kernel '%s' not installed" % new_value)
|
|
|
|
for f in ('vmlinuz', 'modules.img'):
|
|
|
|
if not os.path.exists(os.path.join(
|
|
|
|
system_path['qubes_kernels_base_dir'], new_value, f)):
|
|
|
|
raise QubesException(
|
|
|
|
"Kernel '%s' not properly installed: missing %s "
|
|
|
|
"file" % (new_value, f))
|
|
|
|
self._kernel = new_value
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
@property
|
|
|
|
def updateable(self):
|
|
|
|
return self.template is None
|
|
|
|
|
|
|
|
# Leaved for compatibility
|
|
|
|
def is_updateable(self):
|
|
|
|
return self.updateable
|
|
|
|
|
2014-09-16 01:46:41 +02:00
|
|
|
@property
|
|
|
|
def default_user(self):
|
|
|
|
if self.template is not None:
|
|
|
|
return self.template.default_user
|
|
|
|
else:
|
|
|
|
return self._default_user
|
|
|
|
|
|
|
|
@default_user.setter
|
|
|
|
def default_user(self, value):
|
|
|
|
self._default_user = value
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def is_networked(self):
|
|
|
|
if self.is_netvm():
|
|
|
|
return True
|
|
|
|
|
|
|
|
if self.netvm is not None:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def verify_name(self, name):
|
2014-01-21 00:41:01 +01:00
|
|
|
if not isinstance(self.__basic_parse_xml_attr(name), str):
|
|
|
|
return False
|
2014-07-28 00:52:51 +02:00
|
|
|
if len(name) > 31:
|
|
|
|
return False
|
2013-10-08 22:47:56 +02:00
|
|
|
return re.match(r"^[a-zA-Z][a-zA-Z0-9_-]*$", name) is not None
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def pre_rename(self, new_name):
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_pre_rename:
|
|
|
|
hook(self, new_name)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def set_name(self, name):
|
|
|
|
if self.is_running():
|
|
|
|
raise QubesException("Cannot change name of running VM!")
|
|
|
|
|
|
|
|
if not self.verify_name(name):
|
|
|
|
raise QubesException("Invalid characters in VM name")
|
|
|
|
|
2014-06-26 13:55:35 +02:00
|
|
|
if self.installed_by_rpm:
|
|
|
|
raise QubesException("Cannot rename VM installed by RPM -- first clone VM and then use yum to remove package.")
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
self.pre_rename(name)
|
2013-05-10 05:35:22 +02:00
|
|
|
self.libvirt_domain.undefine()
|
2013-05-22 05:44:35 +02:00
|
|
|
self._libvirt_domain = None
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-25 22:18:37 +02:00
|
|
|
new_conf = os.path.join(self.dir_path, name + '.conf')
|
2013-03-16 02:39:30 +01:00
|
|
|
if os.path.exists(self.conf_file):
|
|
|
|
os.rename(self.conf_file, new_conf)
|
|
|
|
old_dirpath = self.dir_path
|
|
|
|
new_dirpath = os.path.join(os.path.dirname(self.dir_path), name)
|
|
|
|
os.rename(old_dirpath, new_dirpath)
|
|
|
|
self.dir_path = new_dirpath
|
|
|
|
old_name = self.name
|
|
|
|
self.name = name
|
|
|
|
if self.private_img is not None:
|
|
|
|
self.private_img = self.private_img.replace(old_dirpath, new_dirpath)
|
|
|
|
if self.root_img is not None:
|
|
|
|
self.root_img = self.root_img.replace(old_dirpath, new_dirpath)
|
|
|
|
if self.volatile_img is not None:
|
|
|
|
self.volatile_img = self.volatile_img.replace(old_dirpath, new_dirpath)
|
|
|
|
if self.conf_file is not None:
|
|
|
|
self.conf_file = new_conf.replace(old_dirpath, new_dirpath)
|
|
|
|
if self.icon_path is not None:
|
|
|
|
self.icon_path = self.icon_path.replace(old_dirpath, new_dirpath)
|
|
|
|
if hasattr(self, 'kernels_dir') and self.kernels_dir is not None:
|
|
|
|
self.kernels_dir = self.kernels_dir.replace(old_dirpath, new_dirpath)
|
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
self._update_libvirt_domain()
|
2013-03-16 02:39:30 +01:00
|
|
|
self.post_rename(old_name)
|
|
|
|
|
|
|
|
def post_rename(self, old_name):
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_post_rename:
|
|
|
|
hook(self, old_name)
|
|
|
|
|
2014-05-05 16:31:06 +02:00
|
|
|
@property
|
|
|
|
def internal(self):
|
|
|
|
return self._internal
|
|
|
|
|
|
|
|
@internal.setter
|
|
|
|
def internal(self, value):
|
2014-05-05 22:23:43 +02:00
|
|
|
oldvalue = self._internal
|
2014-05-05 16:31:06 +02:00
|
|
|
self._internal = value
|
2014-05-05 22:23:43 +02:00
|
|
|
self.post_set_attr('internal', value, oldvalue)
|
2014-05-05 16:31:06 +02:00
|
|
|
|
2013-11-20 02:57:17 +01:00
|
|
|
@property
|
|
|
|
def autostart(self):
|
|
|
|
return self._autostart
|
|
|
|
|
|
|
|
@autostart.setter
|
|
|
|
def autostart(self, value):
|
|
|
|
if value:
|
|
|
|
retcode = subprocess.call(["sudo", "systemctl", "enable", "qubes-vm@%s.service" % self.name])
|
|
|
|
else:
|
|
|
|
retcode = subprocess.call(["sudo", "systemctl", "disable", "qubes-vm@%s.service" % self.name])
|
|
|
|
if retcode != 0:
|
|
|
|
raise QubesException("Failed to set autostart for VM via systemctl")
|
|
|
|
self._autostart = bool(value)
|
|
|
|
|
2013-11-21 03:39:08 +01:00
|
|
|
@classmethod
|
|
|
|
def is_template_compatible(cls, template):
|
|
|
|
"""Check if given VM can be a template for this VM"""
|
|
|
|
# FIXME: check if the value is instance of QubesTemplateVM, not the VM
|
|
|
|
# type. The problem is while this file is loaded, QubesTemplateVM is
|
|
|
|
# not defined yet.
|
|
|
|
if template and (not template.is_template() or template.type != "TemplateVM"):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2013-11-19 18:42:59 +01:00
|
|
|
@property
|
|
|
|
def template(self):
|
|
|
|
return self._template
|
|
|
|
|
|
|
|
@template.setter
|
|
|
|
def template(self, value):
|
2013-11-25 07:14:52 +01:00
|
|
|
if self._template is None and value is not None:
|
|
|
|
raise QubesException("Cannot set template for standalone VM")
|
2013-11-21 03:39:08 +01:00
|
|
|
if value and not self.is_template_compatible(value):
|
|
|
|
raise QubesException("Incompatible template type %s with VM of type %s" % (value.type, self.type))
|
2013-11-19 18:42:59 +01:00
|
|
|
self._template = value
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def is_template(self):
|
|
|
|
return False
|
|
|
|
|
|
|
|
def is_appvm(self):
|
|
|
|
return False
|
|
|
|
|
|
|
|
def is_netvm(self):
|
|
|
|
return False
|
|
|
|
|
|
|
|
def is_proxyvm(self):
|
|
|
|
return False
|
|
|
|
|
|
|
|
def is_disposablevm(self):
|
|
|
|
return False
|
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
@property
|
|
|
|
def xid(self):
|
2013-05-04 04:45:55 +02:00
|
|
|
if self.libvirt_domain is None:
|
|
|
|
return -1
|
|
|
|
return self.libvirt_domain.ID()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
def get_xid(self):
|
|
|
|
# obsoleted
|
|
|
|
return self.xid
|
|
|
|
|
|
|
|
def _update_libvirt_domain(self):
|
|
|
|
domain_config = self.create_config_file()
|
2013-05-22 05:44:35 +02:00
|
|
|
if self._libvirt_domain:
|
|
|
|
self._libvirt_domain.undefine()
|
2013-05-17 04:06:29 +02:00
|
|
|
self._libvirt_domain = vmm.libvirt_conn.defineXML(domain_config)
|
2013-05-10 05:35:22 +02:00
|
|
|
self.uuid = uuid.UUID(bytes=self._libvirt_domain.UUID())
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
@property
|
|
|
|
def libvirt_domain(self):
|
|
|
|
if self._libvirt_domain is not None:
|
|
|
|
return self._libvirt_domain
|
2014-05-10 21:23:04 +02:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
try:
|
2013-05-10 05:35:22 +02:00
|
|
|
if self.uuid is not None:
|
2013-05-17 04:06:29 +02:00
|
|
|
self._libvirt_domain = vmm.libvirt_conn.lookupByUUID(self.uuid.bytes)
|
2013-05-10 05:35:22 +02:00
|
|
|
else:
|
2013-05-17 04:06:29 +02:00
|
|
|
self._libvirt_domain = vmm.libvirt_conn.lookupByName(self.name)
|
2013-05-10 05:35:22 +02:00
|
|
|
self.uuid = uuid.UUID(bytes=self._libvirt_domain.UUID())
|
2013-05-04 04:45:55 +02:00
|
|
|
except libvirt.libvirtError:
|
|
|
|
if libvirt.virGetLastError()[0] == libvirt.VIR_ERR_NO_DOMAIN:
|
2013-05-10 05:35:22 +02:00
|
|
|
self._update_libvirt_domain()
|
|
|
|
else:
|
|
|
|
raise
|
2013-05-04 04:45:55 +02:00
|
|
|
return self._libvirt_domain
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_uuid(self):
|
2013-05-10 05:35:22 +02:00
|
|
|
# obsoleted
|
|
|
|
return self.uuid
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_mem(self):
|
|
|
|
if dry_run:
|
|
|
|
return 666
|
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
if not self.libvirt_domain.isActive():
|
2013-03-16 02:39:30 +01:00
|
|
|
return 0
|
2013-05-06 06:51:40 +02:00
|
|
|
return self.libvirt_domain.info()[1]
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_mem_static_max(self):
|
|
|
|
if dry_run:
|
|
|
|
return 666
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
if self.libvirt_domain is None:
|
2013-03-16 02:39:30 +01:00
|
|
|
return 0
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
return self.libvirt_domain.maxMemory()
|
|
|
|
|
2014-03-31 03:41:33 +02:00
|
|
|
def get_prefmem(self):
|
2013-05-04 04:45:55 +02:00
|
|
|
# TODO: qmemman is still xen specific
|
2014-03-31 03:41:33 +02:00
|
|
|
untrusted_meminfo_key = xs.read('', '/local/domain/%s/memory/meminfo'
|
|
|
|
% self.xid)
|
|
|
|
if untrusted_meminfo_key is None or untrusted_meminfo_key == '':
|
|
|
|
return 0
|
|
|
|
domain = qmemman.DomainState(self.xid)
|
|
|
|
qmemman_algo.refresh_meminfo_for_domain(domain, untrusted_meminfo_key)
|
|
|
|
domain.memory_maximum = self.get_mem_static_max()*1024
|
|
|
|
return qmemman_algo.prefmem(domain)/1024
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def get_per_cpu_time(self):
|
|
|
|
if dry_run:
|
|
|
|
import random
|
|
|
|
return random.random() * 100
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
libvirt_domain = self.libvirt_domain
|
|
|
|
if libvirt_domain and libvirt_domain.isActive():
|
|
|
|
return libvirt_domain.getCPUStats(
|
|
|
|
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)[0]['cpu_time']/10**9
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
def get_disk_utilization_root_img(self):
|
2014-04-24 21:50:12 +02:00
|
|
|
return qubes.qubesutils.get_disk_usage(self.root_img)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_root_img_sz(self):
|
|
|
|
if not os.path.exists(self.root_img):
|
|
|
|
return 0
|
|
|
|
|
|
|
|
return os.path.getsize(self.root_img)
|
|
|
|
|
|
|
|
def get_power_state(self):
|
|
|
|
if dry_run:
|
|
|
|
return "NA"
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
libvirt_domain = self.libvirt_domain
|
2013-05-10 05:35:22 +02:00
|
|
|
if libvirt_domain.isActive():
|
|
|
|
if libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PAUSED:
|
2013-03-16 02:39:30 +01:00
|
|
|
return "Paused"
|
2013-05-10 05:35:22 +02:00
|
|
|
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_CRASHED:
|
2013-03-16 02:39:30 +01:00
|
|
|
return "Crashed"
|
2013-05-10 05:35:22 +02:00
|
|
|
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_SHUTDOWN:
|
2013-05-04 04:45:55 +02:00
|
|
|
return "Halting"
|
2013-05-10 05:35:22 +02:00
|
|
|
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_SHUTOFF:
|
2013-03-16 02:39:30 +01:00
|
|
|
return "Dying"
|
2013-05-10 05:35:22 +02:00
|
|
|
elif libvirt_domain.state()[0] == libvirt.VIR_DOMAIN_PMSUSPENDED:
|
2013-05-04 04:45:55 +02:00
|
|
|
return "Suspended"
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
|
|
|
if not self.is_fully_usable():
|
|
|
|
return "Transient"
|
|
|
|
else:
|
|
|
|
return "Running"
|
|
|
|
else:
|
|
|
|
return 'Halted'
|
|
|
|
|
|
|
|
return "NA"
|
|
|
|
|
|
|
|
def is_guid_running(self):
|
2013-05-10 05:35:22 +02:00
|
|
|
xid = self.xid
|
2013-03-16 02:39:30 +01:00
|
|
|
if xid < 0:
|
|
|
|
return False
|
|
|
|
if not os.path.exists('/var/run/qubes/guid-running.%d' % xid):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2014-02-05 03:27:12 +01:00
|
|
|
def is_qrexec_running(self):
|
|
|
|
if self.xid < 0:
|
|
|
|
return False
|
|
|
|
return os.path.exists('/var/run/qubes/qrexec.%s' % self.name)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def is_fully_usable(self):
|
|
|
|
# Running gui-daemon implies also VM running
|
|
|
|
if not self.is_guid_running():
|
|
|
|
return False
|
2014-02-05 03:27:12 +01:00
|
|
|
if not self.is_qrexec_running():
|
|
|
|
return False
|
2013-03-16 02:39:30 +01:00
|
|
|
return True
|
|
|
|
|
|
|
|
def is_running(self):
|
2013-05-04 04:45:55 +02:00
|
|
|
if self.libvirt_domain and self.libvirt_domain.isActive():
|
2013-03-16 02:39:30 +01:00
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def is_paused(self):
|
2013-05-04 04:45:55 +02:00
|
|
|
if self.libvirt_domain and self.libvirt_domain.state() == libvirt.VIR_DOMAIN_PAUSED:
|
2013-03-16 02:39:30 +01:00
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def get_start_time(self):
|
|
|
|
if not self.is_running():
|
|
|
|
return None
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
# TODO
|
2013-05-10 05:35:22 +02:00
|
|
|
uuid = self.uuid
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-17 04:06:29 +02:00
|
|
|
start_time = vmm.xs.read('', "/vm/%s/start_time" % str(uuid))
|
2013-03-16 02:39:30 +01:00
|
|
|
if start_time != '':
|
2013-03-26 02:15:09 +01:00
|
|
|
return datetime.datetime.fromtimestamp(float(start_time))
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def is_outdated(self):
|
|
|
|
# Makes sense only on VM based on template
|
|
|
|
if self.template is None:
|
|
|
|
return False
|
|
|
|
|
|
|
|
if not self.is_running():
|
|
|
|
return False
|
|
|
|
|
2014-04-18 01:34:09 +02:00
|
|
|
if not hasattr(self.template, 'rootcow_img'):
|
|
|
|
return False
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
rootimg_inode = os.stat(self.template.root_img)
|
|
|
|
try:
|
|
|
|
rootcow_inode = os.stat(self.template.rootcow_img)
|
|
|
|
except OSError:
|
|
|
|
# The only case when rootcow_img doesn't exists is in the middle of
|
|
|
|
# commit_changes, so VM is outdated right now
|
|
|
|
return True
|
|
|
|
|
|
|
|
current_dmdev = "/dev/mapper/snapshot-{0:x}:{1}-{2:x}:{3}".format(
|
|
|
|
rootimg_inode[2], rootimg_inode[1],
|
|
|
|
rootcow_inode[2], rootcow_inode[1])
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
# FIXME
|
2013-03-16 02:39:30 +01:00
|
|
|
# 51712 (0xCA00) is xvda
|
|
|
|
# backend node name not available through xenapi :(
|
2013-05-17 04:06:29 +02:00
|
|
|
used_dmdev = vmm.xs.read('', "/local/domain/0/backend/vbd/{0}/51712/node".format(self.xid))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
return used_dmdev != current_dmdev
|
|
|
|
|
|
|
|
def get_disk_utilization(self):
|
2014-04-24 21:50:12 +02:00
|
|
|
return qubes.qubesutils.get_disk_usage(self.dir_path)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_disk_utilization_private_img(self):
|
2014-04-24 21:50:12 +02:00
|
|
|
return qubes.qubesutils.get_disk_usage(self.private_img)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_private_img_sz(self):
|
|
|
|
if not os.path.exists(self.private_img):
|
|
|
|
return 0
|
|
|
|
|
|
|
|
return os.path.getsize(self.private_img)
|
|
|
|
|
|
|
|
def resize_private_img(self, size):
|
|
|
|
assert size >= self.get_private_img_sz(), "Cannot shrink private.img"
|
|
|
|
|
|
|
|
f_private = open (self.private_img, "a+b")
|
|
|
|
f_private.truncate (size)
|
|
|
|
f_private.close ()
|
|
|
|
|
|
|
|
retcode = 0
|
|
|
|
if self.is_running():
|
|
|
|
# find loop device
|
|
|
|
p = subprocess.Popen (["sudo", "losetup", "--associated", self.private_img],
|
|
|
|
stdout=subprocess.PIPE)
|
|
|
|
result = p.communicate()
|
|
|
|
m = re.match(r"^(/dev/loop\d+):\s", result[0])
|
|
|
|
if m is None:
|
|
|
|
raise QubesException("ERROR: Cannot find loop device!")
|
|
|
|
|
|
|
|
loop_dev = m.group(1)
|
|
|
|
|
|
|
|
# resize loop device
|
|
|
|
subprocess.check_call(["sudo", "losetup", "--set-capacity", loop_dev])
|
|
|
|
|
|
|
|
retcode = self.run("while [ \"`blockdev --getsize64 /dev/xvdb`\" -lt {0} ]; do ".format(size) +
|
|
|
|
"head /dev/xvdb > /dev/null; sleep 0.2; done; resize2fs /dev/xvdb", user="root", wait=True)
|
|
|
|
if retcode != 0:
|
|
|
|
raise QubesException("resize2fs failed")
|
|
|
|
|
|
|
|
|
2014-03-21 18:43:13 +01:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
# FIXME: should be outside of QubesVM?
|
|
|
|
def get_timezone(self):
|
|
|
|
# fc18
|
|
|
|
if os.path.islink('/etc/localtime'):
|
|
|
|
return '/'.join(os.readlink('/etc/localtime').split('/')[-2:])
|
|
|
|
# <=fc17
|
|
|
|
elif os.path.exists('/etc/sysconfig/clock'):
|
|
|
|
clock_config = open('/etc/sysconfig/clock', "r")
|
|
|
|
clock_config_lines = clock_config.readlines()
|
|
|
|
clock_config.close()
|
|
|
|
zone_re = re.compile(r'^ZONE="(.*)"')
|
|
|
|
for line in clock_config_lines:
|
|
|
|
line_match = zone_re.match(line)
|
|
|
|
if line_match:
|
|
|
|
return line_match.group(1)
|
2014-01-23 02:33:05 +01:00
|
|
|
else:
|
|
|
|
# last resort way, some applications makes /etc/localtime
|
|
|
|
# hardlink instead of symlink...
|
|
|
|
tz_info = os.stat('/etc/localtime')
|
|
|
|
if not tz_info:
|
|
|
|
return None
|
|
|
|
if tz_info.st_nlink > 1:
|
|
|
|
p = subprocess.Popen(['find', '/usr/share/zoneinfo',
|
|
|
|
'-inum', str(tz_info.st_ino)],
|
|
|
|
stdout=subprocess.PIPE)
|
|
|
|
tz_path = p.communicate()[0].strip()
|
|
|
|
return tz_path.replace('/usr/share/zoneinfo/', '')
|
2013-03-16 02:39:30 +01:00
|
|
|
return None
|
|
|
|
|
|
|
|
def cleanup_vifs(self):
|
|
|
|
"""
|
|
|
|
Xend does not remove vif when backend domain is down, so we must do it
|
|
|
|
manually
|
|
|
|
"""
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
# FIXME: remove this?
|
2013-03-16 02:39:30 +01:00
|
|
|
if not self.is_running():
|
|
|
|
return
|
|
|
|
|
|
|
|
dev_basepath = '/local/domain/%d/device/vif' % self.xid
|
2013-05-17 04:06:29 +02:00
|
|
|
for dev in vmm.xs.ls('', dev_basepath):
|
2013-03-16 02:39:30 +01:00
|
|
|
# check if backend domain is alive
|
2013-05-17 04:06:29 +02:00
|
|
|
backend_xid = int(vmm.xs.read('', '%s/%s/backend-id' % (dev_basepath, dev)))
|
|
|
|
if backend_xid in vmm.libvirt_conn.listDomainsID():
|
2013-03-16 02:39:30 +01:00
|
|
|
# check if device is still active
|
2013-05-17 04:06:29 +02:00
|
|
|
if vmm.xs.read('', '%s/%s/state' % (dev_basepath, dev)) == '4':
|
2013-03-16 02:39:30 +01:00
|
|
|
continue
|
|
|
|
# remove dead device
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.rm('', '%s/%s' % (dev_basepath, dev))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def create_xenstore_entries(self, xid = None):
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if xid is None:
|
|
|
|
xid = self.xid
|
|
|
|
|
2013-05-10 05:22:57 +02:00
|
|
|
assert xid >= 0, "Invalid XID value"
|
|
|
|
|
2013-05-17 04:06:29 +02:00
|
|
|
domain_path = vmm.xs.get_domain_path(xid)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
# Set Xen Store entires with VM networking info:
|
|
|
|
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('', "{0}/qubes-vm-type".format(domain_path),
|
2013-03-16 02:39:30 +01:00
|
|
|
self.type)
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('', "{0}/qubes-vm-updateable".format(domain_path),
|
2013-03-16 02:39:30 +01:00
|
|
|
str(self.updateable))
|
|
|
|
|
|
|
|
if self.is_netvm():
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('',
|
2013-03-16 02:39:30 +01:00
|
|
|
"{0}/qubes-netvm-gateway".format(domain_path),
|
|
|
|
self.gateway)
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('',
|
2013-03-16 02:39:30 +01:00
|
|
|
"{0}/qubes-netvm-secondary-dns".format(domain_path),
|
|
|
|
self.secondary_dns)
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('',
|
2013-03-16 02:39:30 +01:00
|
|
|
"{0}/qubes-netvm-netmask".format(domain_path),
|
|
|
|
self.netmask)
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('',
|
2013-03-16 02:39:30 +01:00
|
|
|
"{0}/qubes-netvm-network".format(domain_path),
|
|
|
|
self.network)
|
|
|
|
|
|
|
|
if self.netvm is not None:
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('', "{0}/qubes-ip".format(domain_path), self.ip)
|
|
|
|
vmm.xs.write('', "{0}/qubes-netmask".format(domain_path),
|
2013-03-16 02:39:30 +01:00
|
|
|
self.netvm.netmask)
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('', "{0}/qubes-gateway".format(domain_path),
|
2013-03-16 02:39:30 +01:00
|
|
|
self.netvm.gateway)
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('',
|
2013-03-16 02:39:30 +01:00
|
|
|
"{0}/qubes-secondary-dns".format(domain_path),
|
|
|
|
self.netvm.secondary_dns)
|
|
|
|
|
|
|
|
tzname = self.get_timezone()
|
|
|
|
if tzname:
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('',
|
2013-03-16 02:39:30 +01:00
|
|
|
"{0}/qubes-timezone".format(domain_path),
|
|
|
|
tzname)
|
|
|
|
|
|
|
|
for srv in self.services.keys():
|
|
|
|
# convert True/False to "1"/"0"
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('', "{0}/qubes-service/{1}".format(domain_path, srv),
|
2013-03-16 02:39:30 +01:00
|
|
|
str(int(self.services[srv])))
|
|
|
|
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('',
|
2013-03-16 02:39:30 +01:00
|
|
|
"{0}/qubes-block-devices".format(domain_path),
|
|
|
|
'')
|
|
|
|
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('',
|
2013-03-16 02:39:30 +01:00
|
|
|
"{0}/qubes-usb-devices".format(domain_path),
|
|
|
|
'')
|
|
|
|
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.write('', "{0}/qubes-debug-mode".format(domain_path),
|
2013-03-16 02:39:30 +01:00
|
|
|
str(int(self.debug)))
|
|
|
|
|
|
|
|
# Fix permissions
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.set_permissions('', '{0}/device'.format(domain_path),
|
2013-03-16 02:39:30 +01:00
|
|
|
[{ 'dom': xid }])
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.set_permissions('', '{0}/memory'.format(domain_path),
|
2013-03-16 02:39:30 +01:00
|
|
|
[{ 'dom': xid }])
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.set_permissions('', '{0}/qubes-block-devices'.format(domain_path),
|
2013-03-16 02:39:30 +01:00
|
|
|
[{ 'dom': xid }])
|
2013-05-17 04:06:29 +02:00
|
|
|
vmm.xs.set_permissions('', '{0}/qubes-usb-devices'.format(domain_path),
|
2013-03-16 02:39:30 +01:00
|
|
|
[{ 'dom': xid }])
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_create_xenstore_entries:
|
|
|
|
hook(self, xid=xid)
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
def _format_disk_dev(self, path, script, vdev, rw=True, type="disk", domain=None):
|
|
|
|
template = " <disk type='block' device='{type}'>\n" \
|
|
|
|
" <driver name='phy'/>\n" \
|
|
|
|
" <source dev='{path}'/>\n" \
|
|
|
|
" <target dev='{vdev}' bus='xen'/>\n" \
|
|
|
|
"{params}" \
|
|
|
|
" </disk>\n"
|
|
|
|
params = ""
|
|
|
|
if not rw:
|
|
|
|
params += " <readonly/>\n"
|
|
|
|
if domain:
|
|
|
|
params += " <domain name='%s'/>\n" % domain
|
|
|
|
if script:
|
|
|
|
params += " <script path='%s'/>\n" % script
|
|
|
|
return template.format(path=path, vdev=vdev, type=type,
|
|
|
|
params=params)
|
|
|
|
|
|
|
|
def _format_net_dev(self, ip, mac, backend):
|
|
|
|
template = " <interface type='ethernet'>\n" \
|
|
|
|
" <mac address='{mac}'/>\n" \
|
|
|
|
" <ip address='{ip}'/>\n" \
|
|
|
|
" <script path='vif-route-qubes'/>\n" \
|
|
|
|
" <domain name='{backend}'/>\n" \
|
|
|
|
" </interface>\n"
|
|
|
|
return template.format(ip=ip, mac=mac, backend=backend)
|
|
|
|
|
|
|
|
def _format_pci_dev(self, address):
|
|
|
|
template = " <hostdev type='pci' managed='yes'>\n" \
|
|
|
|
" <source>\n" \
|
|
|
|
" <address bus='0x{bus}' slot='0x{slot}' function='0x{fun}'/>\n" \
|
|
|
|
" </source>\n" \
|
|
|
|
" </hostdev>\n"
|
|
|
|
dev_match = re.match('([0-9a-f]+):([0-9a-f]+)\.([0-9a-f]+)', address)
|
|
|
|
if not dev_match:
|
|
|
|
raise QubesException("Invalid PCI device address: %s" % address)
|
|
|
|
return template.format(
|
|
|
|
bus=dev_match.group(1),
|
|
|
|
slot=dev_match.group(2),
|
|
|
|
fun=dev_match.group(3))
|
|
|
|
|
2013-05-22 05:43:26 +02:00
|
|
|
def get_rootdev(self):
|
2013-03-16 02:39:30 +01:00
|
|
|
if self.template:
|
2013-05-04 04:45:55 +02:00
|
|
|
return self._format_disk_dev(
|
|
|
|
"{dir}/root.img:{dir}/root-cow.img".format(
|
|
|
|
dir=self.template.dir_path),
|
|
|
|
"block-snapshot", "xvda", False)
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
2013-05-04 04:45:55 +02:00
|
|
|
return self._format_disk_dev(
|
|
|
|
"{dir}/root.img".format(dir=self.dir_path),
|
|
|
|
None, "xvda", True)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-22 05:43:26 +02:00
|
|
|
def get_config_params(self):
|
2013-03-16 02:39:30 +01:00
|
|
|
args = {}
|
|
|
|
args['name'] = self.name
|
|
|
|
if hasattr(self, 'kernels_dir'):
|
|
|
|
args['kerneldir'] = self.kernels_dir
|
2013-05-10 05:35:22 +02:00
|
|
|
args['uuidnode'] = "<uuid>%s</uuid>" % str(self.uuid) if self.uuid else ""
|
2013-03-16 02:39:30 +01:00
|
|
|
args['vmdir'] = self.dir_path
|
2013-05-04 04:45:55 +02:00
|
|
|
args['pcidevs'] = ''.join(map(self._format_pci_dev, self.pcidevs))
|
2013-03-16 02:39:30 +01:00
|
|
|
args['mem'] = str(self.memory)
|
|
|
|
if self.maxmem < self.memory:
|
|
|
|
args['mem'] = str(self.maxmem)
|
|
|
|
args['maxmem'] = str(self.maxmem)
|
|
|
|
if 'meminfo-writer' in self.services and not self.services['meminfo-writer']:
|
|
|
|
# If dynamic memory management disabled, set maxmem=mem
|
|
|
|
args['maxmem'] = args['mem']
|
|
|
|
args['vcpus'] = str(self.vcpus)
|
|
|
|
if self.netvm is not None:
|
|
|
|
args['ip'] = self.ip
|
|
|
|
args['mac'] = self.mac
|
|
|
|
args['gateway'] = self.netvm.gateway
|
|
|
|
args['dns1'] = self.netvm.gateway
|
|
|
|
args['dns2'] = self.secondary_dns
|
|
|
|
args['netmask'] = self.netmask
|
2013-05-04 04:45:55 +02:00
|
|
|
args['netdev'] = self._format_net_dev(self.ip, self.mac, self.netvm.name)
|
|
|
|
args['disable_network1'] = '';
|
|
|
|
args['disable_network2'] = '';
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
|
|
|
args['ip'] = ''
|
|
|
|
args['mac'] = ''
|
|
|
|
args['gateway'] = ''
|
|
|
|
args['dns1'] = ''
|
|
|
|
args['dns2'] = ''
|
|
|
|
args['netmask'] = ''
|
|
|
|
args['netdev'] = ''
|
2013-05-04 04:45:55 +02:00
|
|
|
args['disable_network1'] = '<!--';
|
|
|
|
args['disable_network2'] = '-->';
|
|
|
|
args['rootdev'] = self.get_rootdev()
|
|
|
|
args['privatedev'] = \
|
|
|
|
self._format_disk_dev("{dir}/private.img".format(dir=self.dir_path),
|
|
|
|
None, "xvdb", True)
|
|
|
|
args['volatiledev'] = \
|
|
|
|
self._format_disk_dev("{dir}/volatile.img".format(dir=self.dir_path),
|
|
|
|
None, "xvdc", True)
|
2013-03-16 02:39:30 +01:00
|
|
|
if hasattr(self, 'kernel'):
|
2013-05-04 04:45:55 +02:00
|
|
|
args['otherdevs'] = \
|
|
|
|
self._format_disk_dev("{dir}/modules.img".format(dir=self.kernels_dir),
|
|
|
|
None, "xvdd", self.kernel is None)
|
2013-03-16 02:39:30 +01:00
|
|
|
if hasattr(self, 'kernelopts'):
|
|
|
|
args['kernelopts'] = self.kernelopts
|
|
|
|
if self.debug:
|
|
|
|
print >> sys.stderr, "--> Debug mode: adding 'earlyprintk=xen' to kernel opts"
|
|
|
|
args['kernelopts'] += ' earlyprintk=xen'
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_get_config_params:
|
|
|
|
args = hook(self, args)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
return args
|
|
|
|
|
|
|
|
@property
|
|
|
|
def uses_custom_config(self):
|
|
|
|
return self.conf_file != self.absolute_path(self.name + ".conf", None)
|
|
|
|
|
2013-05-22 05:43:26 +02:00
|
|
|
def create_config_file(self, file_path = None, prepare_dvm = False):
|
2013-03-16 02:39:30 +01:00
|
|
|
if file_path is None:
|
|
|
|
file_path = self.conf_file
|
2013-05-20 01:30:19 +02:00
|
|
|
if self.uses_custom_config:
|
|
|
|
conf_appvm = open(file_path, "r")
|
|
|
|
domain_config = conf_appvm.read()
|
|
|
|
conf_appvm.close()
|
|
|
|
return domain_config
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
f_conf_template = open(self.config_file_template, 'r')
|
|
|
|
conf_template = f_conf_template.read()
|
|
|
|
f_conf_template.close()
|
|
|
|
|
2013-05-22 05:43:26 +02:00
|
|
|
template_params = self.get_config_params()
|
2013-03-16 02:39:30 +01:00
|
|
|
if prepare_dvm:
|
|
|
|
template_params['name'] = '%NAME%'
|
|
|
|
template_params['privatedev'] = ''
|
2013-05-04 04:45:55 +02:00
|
|
|
template_params['netdev'] = re.sub(r"address='[0-9.]*'", "address='%IP%'", template_params['netdev'])
|
|
|
|
domain_config = conf_template.format(**template_params)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
# FIXME: This is only for debugging purposes
|
2013-05-20 01:30:48 +02:00
|
|
|
old_umask = os.umask(002)
|
2013-05-04 04:45:55 +02:00
|
|
|
conf_appvm = open(file_path, "w")
|
|
|
|
conf_appvm.write(domain_config)
|
2013-03-16 02:39:30 +01:00
|
|
|
conf_appvm.close()
|
2013-05-20 01:30:48 +02:00
|
|
|
os.umask(old_umask)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
return domain_config
|
|
|
|
|
2014-09-28 03:14:29 +02:00
|
|
|
def create_on_disk(self, verbose=False, source_template = None):
|
2013-03-16 02:39:30 +01:00
|
|
|
if source_template is None:
|
|
|
|
source_template = self.template
|
|
|
|
assert source_template is not None
|
|
|
|
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2013-05-20 01:30:48 +02:00
|
|
|
old_umask = os.umask(002)
|
2013-03-16 02:39:30 +01:00
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Creating directory: {0}".format(self.dir_path)
|
|
|
|
os.mkdir (self.dir_path)
|
|
|
|
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Creating the VM config file: {0}".format(self.conf_file)
|
|
|
|
|
|
|
|
template_priv = source_template.private_img
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Copying the template's private image: {0}".\
|
|
|
|
format(template_priv)
|
|
|
|
|
|
|
|
# We prefer to use Linux's cp, because it nicely handles sparse files
|
|
|
|
retcode = subprocess.call (["cp", template_priv, self.private_img])
|
|
|
|
if retcode != 0:
|
|
|
|
raise IOError ("Error while copying {0} to {1}".\
|
|
|
|
format(template_priv, self.private_img))
|
|
|
|
|
|
|
|
if self.updateable:
|
|
|
|
template_root = source_template.root_img
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Copying the template's root image: {0}".\
|
|
|
|
format(template_root)
|
|
|
|
|
|
|
|
# We prefer to use Linux's cp, because it nicely handles sparse files
|
|
|
|
retcode = subprocess.call (["cp", template_root, self.root_img])
|
|
|
|
if retcode != 0:
|
|
|
|
raise IOError ("Error while copying {0} to {1}".\
|
|
|
|
format(template_root, self.root_img))
|
|
|
|
|
|
|
|
kernels_dir = source_template.kernels_dir
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Copying the kernel (set kernel \"none\" to use it): {0}".\
|
|
|
|
format(kernels_dir)
|
|
|
|
|
|
|
|
os.mkdir (self.dir_path + '/kernels')
|
|
|
|
for f in ("vmlinuz", "initramfs", "modules.img"):
|
|
|
|
shutil.copy(os.path.join(kernels_dir, f),
|
|
|
|
os.path.join(self.dir_path, vm_files["kernels_subdir"], f))
|
|
|
|
|
|
|
|
# Create volatile.img
|
|
|
|
self.reset_volatile_storage(source_template = source_template, verbose=verbose)
|
|
|
|
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, self.label.icon_path)
|
|
|
|
os.symlink (self.label.icon_path, self.icon_path)
|
|
|
|
|
2013-05-20 01:30:48 +02:00
|
|
|
os.umask(old_umask)
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_create_on_disk:
|
|
|
|
hook(self, verbose, source_template=source_template)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_clone_attrs(self):
|
2013-03-16 16:09:31 +01:00
|
|
|
attrs = ['kernel', 'uses_default_kernel', 'netvm', 'uses_default_netvm', \
|
2013-03-16 02:39:30 +01:00
|
|
|
'memory', 'maxmem', 'kernelopts', 'uses_default_kernelopts', 'services', 'vcpus', \
|
2014-02-10 12:59:46 +01:00
|
|
|
'_mac', 'pcidevs', 'include_in_backups', '_label', 'default_user']
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_get_clone_attrs:
|
|
|
|
attrs = hook(self, attrs)
|
|
|
|
|
2013-05-25 22:18:37 +02:00
|
|
|
return attrs
|
|
|
|
|
2014-05-12 19:47:48 +02:00
|
|
|
def clone_attrs(self, src_vm, fail_on_error=True):
|
2013-03-16 02:39:30 +01:00
|
|
|
self._do_not_reset_firewall = True
|
|
|
|
for prop in self.get_clone_attrs():
|
2014-05-12 19:47:48 +02:00
|
|
|
try:
|
|
|
|
setattr(self, prop, getattr(src_vm, prop))
|
|
|
|
except Exception as e:
|
|
|
|
if fail_on_error:
|
|
|
|
self._do_not_reset_firewall = False
|
|
|
|
raise
|
|
|
|
else:
|
|
|
|
print >>sys.stderr, "WARNING: %s" % str(e)
|
2013-03-16 02:39:30 +01:00
|
|
|
self._do_not_reset_firewall = False
|
|
|
|
|
|
|
|
def clone_disk_files(self, src_vm, verbose):
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if src_vm.is_running():
|
|
|
|
raise QubesException("Attempt to clone a running VM!")
|
|
|
|
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Creating directory: {0}".format(self.dir_path)
|
|
|
|
os.mkdir (self.dir_path)
|
|
|
|
|
|
|
|
if src_vm.private_img is not None and self.private_img is not None:
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Copying the private image:\n{0} ==>\n{1}".\
|
|
|
|
format(src_vm.private_img, self.private_img)
|
|
|
|
# We prefer to use Linux's cp, because it nicely handles sparse files
|
|
|
|
retcode = subprocess.call (["cp", src_vm.private_img, self.private_img])
|
|
|
|
if retcode != 0:
|
|
|
|
raise IOError ("Error while copying {0} to {1}".\
|
|
|
|
format(src_vm.private_img, self.private_img))
|
|
|
|
|
|
|
|
if src_vm.updateable and src_vm.root_img is not None and self.root_img is not None:
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Copying the root image:\n{0} ==>\n{1}".\
|
|
|
|
format(src_vm.root_img, self.root_img)
|
|
|
|
# We prefer to use Linux's cp, because it nicely handles sparse files
|
|
|
|
retcode = subprocess.call (["cp", src_vm.root_img, self.root_img])
|
|
|
|
if retcode != 0:
|
|
|
|
raise IOError ("Error while copying {0} to {1}".\
|
|
|
|
format(src_vm.root_img, self.root_img))
|
|
|
|
|
|
|
|
if src_vm.icon_path is not None and self.icon_path is not None:
|
|
|
|
if os.path.exists (src_vm.dir_path):
|
|
|
|
if os.path.islink(src_vm.icon_path):
|
|
|
|
icon_path = os.readlink(src_vm.icon_path)
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, icon_path)
|
|
|
|
os.symlink (icon_path, self.icon_path)
|
|
|
|
else:
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Copying icon: {0} -> {1}".format(src_vm.icon_path, self.icon_path)
|
|
|
|
shutil.copy(src_vm.icon_path, self.icon_path)
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_clone_disk_files:
|
2013-05-25 22:18:37 +02:00
|
|
|
hook(self, src_vm, verbose)
|
2013-03-16 16:09:31 +01:00
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def verify_files(self):
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not os.path.exists (self.dir_path):
|
|
|
|
raise QubesException (
|
|
|
|
"VM directory doesn't exist: {0}".\
|
|
|
|
format(self.dir_path))
|
|
|
|
|
|
|
|
if self.updateable and not os.path.exists (self.root_img):
|
|
|
|
raise QubesException (
|
|
|
|
"VM root image file doesn't exist: {0}".\
|
|
|
|
format(self.root_img))
|
|
|
|
|
|
|
|
if not os.path.exists (self.private_img):
|
|
|
|
raise QubesException (
|
|
|
|
"VM private image file doesn't exist: {0}".\
|
|
|
|
format(self.private_img))
|
|
|
|
|
|
|
|
if not os.path.exists (os.path.join(self.kernels_dir, 'vmlinuz')):
|
|
|
|
raise QubesException (
|
|
|
|
"VM kernel does not exists: {0}".\
|
|
|
|
format(os.path.join(self.kernels_dir, 'vmlinuz')))
|
|
|
|
|
|
|
|
if not os.path.exists (os.path.join(self.kernels_dir, 'initramfs')):
|
|
|
|
raise QubesException (
|
|
|
|
"VM initramfs does not exists: {0}".\
|
|
|
|
format(os.path.join(self.kernels_dir, 'initramfs')))
|
|
|
|
|
|
|
|
if not os.path.exists (os.path.join(self.kernels_dir, 'modules.img')):
|
|
|
|
raise QubesException (
|
|
|
|
"VM kernel modules image does not exists: {0}".\
|
|
|
|
format(os.path.join(self.kernels_dir, 'modules.img')))
|
2013-03-16 16:09:31 +01:00
|
|
|
|
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_verify_files:
|
|
|
|
hook(self)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
return True
|
|
|
|
|
|
|
|
def reset_volatile_storage(self, source_template = None, verbose = False):
|
|
|
|
assert not self.is_running(), "Attempt to clean volatile image of running VM!"
|
|
|
|
|
|
|
|
if source_template is None:
|
|
|
|
source_template = self.template
|
|
|
|
|
|
|
|
# Only makes sense on template based VM
|
|
|
|
if source_template is None:
|
|
|
|
# For StandaloneVM create it only if not already exists (eg after backup-restore)
|
|
|
|
if not os.path.exists(self.volatile_img):
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Creating volatile image: {0}...".format (self.volatile_img)
|
|
|
|
f_root = open (self.root_img, "r")
|
|
|
|
f_root.seek(0, os.SEEK_END)
|
|
|
|
root_size = f_root.tell()
|
|
|
|
f_root.close()
|
|
|
|
subprocess.check_call([system_path["prepare_volatile_img_cmd"], self.volatile_img, str(root_size / 1024 / 1024)])
|
|
|
|
return
|
|
|
|
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Cleaning volatile image: {0}...".format (self.volatile_img)
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
if os.path.exists (self.volatile_img):
|
|
|
|
os.remove (self.volatile_img)
|
|
|
|
|
2014-04-18 01:23:15 +02:00
|
|
|
if hasattr(source_template, 'clean_volatile_img'):
|
|
|
|
retcode = subprocess.call (["tar", "xf", source_template.clean_volatile_img, "-C", self.dir_path])
|
|
|
|
if retcode != 0:
|
|
|
|
raise IOError ("Error while unpacking {0} to {1}".\
|
|
|
|
format(source_template.clean_volatile_img, self.volatile_img))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def remove_from_disk(self):
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_remove_from_disk:
|
|
|
|
hook(self)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
shutil.rmtree (self.dir_path)
|
|
|
|
|
|
|
|
def write_firewall_conf(self, conf):
|
|
|
|
defaults = self.get_firewall_conf()
|
2014-03-27 17:16:47 +01:00
|
|
|
expiring_rules_present = False
|
2013-03-16 02:39:30 +01:00
|
|
|
for item in defaults.keys():
|
|
|
|
if item not in conf:
|
|
|
|
conf[item] = defaults[item]
|
|
|
|
|
|
|
|
root = lxml.etree.Element(
|
2014-05-05 05:22:25 +02:00
|
|
|
"QubesFirewallRules",
|
2013-03-16 02:39:30 +01:00
|
|
|
policy = "allow" if conf["allow"] else "deny",
|
|
|
|
dns = "allow" if conf["allowDns"] else "deny",
|
|
|
|
icmp = "allow" if conf["allowIcmp"] else "deny",
|
|
|
|
yumProxy = "allow" if conf["allowYumProxy"] else "deny"
|
|
|
|
)
|
|
|
|
|
|
|
|
for rule in conf["rules"]:
|
|
|
|
# For backward compatibility
|
|
|
|
if "proto" not in rule:
|
|
|
|
if rule["portBegin"] is not None and rule["portBegin"] > 0:
|
|
|
|
rule["proto"] = "tcp"
|
|
|
|
else:
|
|
|
|
rule["proto"] = "any"
|
|
|
|
element = lxml.etree.Element(
|
|
|
|
"rule",
|
|
|
|
address=rule["address"],
|
|
|
|
proto=str(rule["proto"]),
|
|
|
|
)
|
|
|
|
if rule["netmask"] is not None and rule["netmask"] != 32:
|
|
|
|
element.set("netmask", str(rule["netmask"]))
|
2014-03-28 02:55:35 +01:00
|
|
|
if rule.get("portBegin", None) is not None and \
|
|
|
|
rule["portBegin"] > 0:
|
2013-03-16 02:39:30 +01:00
|
|
|
element.set("port", str(rule["portBegin"]))
|
2014-03-28 02:55:35 +01:00
|
|
|
if rule.get("portEnd", None) is not None and rule["portEnd"] > 0:
|
2013-03-16 02:39:30 +01:00
|
|
|
element.set("toport", str(rule["portEnd"]))
|
2014-03-27 17:16:47 +01:00
|
|
|
if "expire" in rule:
|
|
|
|
element.set("expire", str(rule["expire"]))
|
|
|
|
expiring_rules_present = True
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
root.append(element)
|
|
|
|
|
|
|
|
tree = lxml.etree.ElementTree(root)
|
|
|
|
|
|
|
|
try:
|
2013-05-20 01:30:48 +02:00
|
|
|
old_umask = os.umask(002)
|
2013-03-16 02:39:30 +01:00
|
|
|
with open(self.firewall_conf, 'w') as f:
|
|
|
|
fcntl.lockf(f, fcntl.LOCK_EX)
|
|
|
|
tree.write(f, encoding="UTF-8", pretty_print=True)
|
|
|
|
fcntl.lockf(f, fcntl.LOCK_UN)
|
|
|
|
f.close()
|
2013-05-20 01:30:48 +02:00
|
|
|
os.umask(old_umask)
|
2013-03-16 02:39:30 +01:00
|
|
|
except EnvironmentError as err:
|
|
|
|
print >> sys.stderr, "{0}: save error: {1}".format(
|
|
|
|
os.path.basename(sys.argv[0]), err)
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Automatically enable/disable 'yum-proxy-setup' service based on allowYumProxy
|
|
|
|
if conf['allowYumProxy']:
|
|
|
|
self.services['yum-proxy-setup'] = True
|
|
|
|
else:
|
|
|
|
if self.services.has_key('yum-proxy-setup'):
|
|
|
|
self.services.pop('yum-proxy-setup')
|
|
|
|
|
2014-03-27 17:16:47 +01:00
|
|
|
if expiring_rules_present:
|
|
|
|
subprocess.call(["sudo", "systemctl", "start",
|
|
|
|
"qubes-reload-firewall@%s.timer" % self.name])
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
return True
|
|
|
|
|
|
|
|
def has_firewall(self):
|
|
|
|
return os.path.exists (self.firewall_conf)
|
|
|
|
|
|
|
|
def get_firewall_defaults(self):
|
|
|
|
return { "rules": list(), "allow": True, "allowDns": True, "allowIcmp": True, "allowYumProxy": False }
|
|
|
|
|
|
|
|
def get_firewall_conf(self):
|
|
|
|
conf = self.get_firewall_defaults()
|
|
|
|
|
|
|
|
try:
|
|
|
|
tree = lxml.etree.parse(self.firewall_conf)
|
|
|
|
root = tree.getroot()
|
|
|
|
|
|
|
|
conf["allow"] = (root.get("policy") == "allow")
|
|
|
|
conf["allowDns"] = (root.get("dns") == "allow")
|
|
|
|
conf["allowIcmp"] = (root.get("icmp") == "allow")
|
|
|
|
conf["allowYumProxy"] = (root.get("yumProxy") == "allow")
|
|
|
|
|
|
|
|
for element in root:
|
|
|
|
rule = {}
|
2014-03-27 17:16:47 +01:00
|
|
|
attr_list = ("address", "netmask", "proto", "port", "toport",
|
|
|
|
"expire")
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
for attribute in attr_list:
|
|
|
|
rule[attribute] = element.get(attribute)
|
|
|
|
|
|
|
|
if rule["netmask"] is not None:
|
|
|
|
rule["netmask"] = int(rule["netmask"])
|
|
|
|
else:
|
|
|
|
rule["netmask"] = 32
|
|
|
|
|
|
|
|
if rule["port"] is not None:
|
|
|
|
rule["portBegin"] = int(rule["port"])
|
|
|
|
else:
|
|
|
|
# backward compatibility
|
|
|
|
rule["portBegin"] = 0
|
|
|
|
|
|
|
|
# For backward compatibility
|
|
|
|
if rule["proto"] is None:
|
|
|
|
if rule["portBegin"] > 0:
|
|
|
|
rule["proto"] = "tcp"
|
|
|
|
else:
|
|
|
|
rule["proto"] = "any"
|
|
|
|
|
|
|
|
if rule["toport"] is not None:
|
|
|
|
rule["portEnd"] = int(rule["toport"])
|
|
|
|
else:
|
|
|
|
rule["portEnd"] = None
|
|
|
|
|
2014-03-27 17:16:47 +01:00
|
|
|
if rule["expire"] is not None:
|
|
|
|
rule["expire"] = int(rule["expire"])
|
|
|
|
if rule["expire"] <= int(datetime.datetime.now().strftime(
|
|
|
|
"%s")):
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
del(rule["expire"])
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
del(rule["port"])
|
|
|
|
del(rule["toport"])
|
|
|
|
|
|
|
|
conf["rules"].append(rule)
|
|
|
|
|
|
|
|
except EnvironmentError as err:
|
|
|
|
return conf
|
|
|
|
except (xml.parsers.expat.ExpatError,
|
|
|
|
ValueError, LookupError) as err:
|
|
|
|
print("{0}: load error: {1}".format(
|
|
|
|
os.path.basename(sys.argv[0]), err))
|
|
|
|
return None
|
|
|
|
|
|
|
|
return conf
|
|
|
|
|
2013-09-01 01:26:43 +02:00
|
|
|
def pci_add(self, pci):
|
|
|
|
if not os.path.exists('/sys/bus/pci/devices/0000:%s' % pci):
|
|
|
|
raise QubesException("Invalid PCI device: %s" % pci)
|
|
|
|
if self.pcidevs.count(pci):
|
|
|
|
# already added
|
|
|
|
return
|
|
|
|
self.pcidevs.append(pci)
|
|
|
|
if self.is_running():
|
|
|
|
try:
|
|
|
|
subprocess.check_call(['sudo', system_path["qubes_pciback_cmd"], pci])
|
|
|
|
subprocess.check_call(['sudo', 'xl', 'pci-attach', str(self.xid), pci])
|
|
|
|
except Exception as e:
|
|
|
|
print >>sys.stderr, "Failed to attach PCI device on the fly " \
|
|
|
|
"(%s), changes will be seen after VM restart" % str(e)
|
|
|
|
|
|
|
|
def pci_remove(self, pci):
|
|
|
|
if not self.pcidevs.count(pci):
|
|
|
|
# not attached
|
|
|
|
return
|
|
|
|
self.pcidevs.remove(pci)
|
|
|
|
if self.is_running():
|
|
|
|
p = subprocess.Popen(['xl', 'pci-list', str(self.xid)],
|
|
|
|
stdout=subprocess.PIPE)
|
|
|
|
result = p.communicate()
|
|
|
|
m = re.search(r"^(\d+.\d+)\s+0000:%s$" % pci, result[0], flags=re.MULTILINE)
|
|
|
|
if not m:
|
|
|
|
print >>sys.stderr, "Device %s already detached" % pci
|
|
|
|
return
|
|
|
|
vmdev = m.group(1)
|
|
|
|
try:
|
2014-04-01 01:07:23 +02:00
|
|
|
self.run_service("qubes.DetachPciDevice",
|
|
|
|
user="root", input="00:%s" % vmdev)
|
2013-09-01 01:26:43 +02:00
|
|
|
subprocess.check_call(['sudo', 'xl', 'pci-detach', str(self.xid), pci])
|
|
|
|
except Exception as e:
|
|
|
|
print >>sys.stderr, "Failed to detach PCI device on the fly " \
|
|
|
|
"(%s), changes will be seen after VM restart" % str(e)
|
|
|
|
|
2014-04-15 03:19:48 +02:00
|
|
|
def run(self, command, user = None, verbose = True, autostart = False,
|
|
|
|
notify_function = None,
|
|
|
|
passio = False, passio_popen = False, passio_stderr=False,
|
|
|
|
ignore_stderr=False, localcmd = None, wait = False, gui = True,
|
|
|
|
filter_esc = False):
|
2013-03-16 02:39:30 +01:00
|
|
|
"""command should be in form 'cmdline'
|
|
|
|
When passio_popen=True, popen object with stdout connected to pipe.
|
|
|
|
When additionally passio_stderr=True, stderr also is connected to pipe.
|
|
|
|
When ignore_stderr=True, stderr is connected to /dev/null.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if user is None:
|
|
|
|
user = self.default_user
|
|
|
|
null = None
|
|
|
|
if not self.is_running() and not self.is_paused():
|
|
|
|
if not autostart:
|
|
|
|
raise QubesException("VM not running")
|
|
|
|
|
|
|
|
try:
|
|
|
|
if notify_function is not None:
|
|
|
|
notify_function ("info", "Starting the '{0}' VM...".format(self.name))
|
|
|
|
elif verbose:
|
|
|
|
print >> sys.stderr, "Starting the VM '{0}'...".format(self.name)
|
2013-05-10 05:35:22 +02:00
|
|
|
self.start(verbose=verbose, start_guid = gui, notify_function=notify_function)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
except (IOError, OSError, QubesException) as err:
|
|
|
|
raise QubesException("Error while starting the '{0}' VM: {1}".format(self.name, err))
|
|
|
|
except (MemoryError) as err:
|
2014-02-05 03:27:12 +01:00
|
|
|
raise QubesException("Not enough memory to start '{0}' VM! "
|
|
|
|
"Close one or more running VMs and try "
|
|
|
|
"again.".format(self.name))
|
|
|
|
|
2014-04-16 15:52:08 +02:00
|
|
|
if self.is_paused():
|
|
|
|
raise QubesException("VM is paused")
|
2014-02-05 03:27:12 +01:00
|
|
|
if not self.is_qrexec_running():
|
|
|
|
raise QubesException(
|
|
|
|
"Domain '{}': qrexec not connected.".format(self.name))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if gui and os.getenv("DISPLAY") is not None and not self.is_guid_running():
|
|
|
|
self.start_guid(verbose = verbose, notify_function = notify_function)
|
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
args = [system_path["qrexec_client_path"], "-d", str(self.xid), "%s:%s" % (user, command)]
|
2013-03-16 02:39:30 +01:00
|
|
|
if localcmd is not None:
|
|
|
|
args += [ "-l", localcmd]
|
2014-04-15 03:19:48 +02:00
|
|
|
if filter_esc:
|
|
|
|
args += ["-t"]
|
|
|
|
if os.isatty(sys.stderr.fileno()):
|
|
|
|
args += ["-T"]
|
2013-03-16 02:39:30 +01:00
|
|
|
if passio:
|
|
|
|
os.execv(system_path["qrexec_client_path"], args)
|
|
|
|
exit(1)
|
|
|
|
|
|
|
|
call_kwargs = {}
|
|
|
|
if ignore_stderr:
|
|
|
|
null = open("/dev/null", "w")
|
|
|
|
call_kwargs['stderr'] = null
|
|
|
|
|
|
|
|
if passio_popen:
|
|
|
|
popen_kwargs={'stdout': subprocess.PIPE}
|
|
|
|
popen_kwargs['stdin'] = subprocess.PIPE
|
|
|
|
if passio_stderr:
|
|
|
|
popen_kwargs['stderr'] = subprocess.PIPE
|
|
|
|
else:
|
|
|
|
popen_kwargs['stderr'] = call_kwargs.get('stderr', None)
|
|
|
|
p = subprocess.Popen (args, **popen_kwargs)
|
|
|
|
if null:
|
|
|
|
null.close()
|
|
|
|
return p
|
|
|
|
if not wait:
|
|
|
|
args += ["-e"]
|
|
|
|
retcode = subprocess.call(args, **call_kwargs)
|
|
|
|
if null:
|
|
|
|
null.close()
|
|
|
|
return retcode
|
|
|
|
|
2014-04-01 01:07:23 +02:00
|
|
|
def run_service(self, service, source="dom0", user=None,
|
|
|
|
passio_popen = False, input=None):
|
|
|
|
if input and passio_popen:
|
|
|
|
raise ValueError("'input' and 'passio_popen' cannot be used "
|
|
|
|
"together")
|
|
|
|
if input:
|
|
|
|
return self.run("QUBESRPC %s %s" % (service, source),
|
|
|
|
localcmd="echo %s" % input, user=user, wait=True)
|
|
|
|
else:
|
|
|
|
return self.run("QUBESRPC %s %s" % (service, source),
|
|
|
|
passio_popen=passio_popen, user=user, wait=True)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def attach_network(self, verbose = False, wait = True, netvm = None):
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self.is_running():
|
|
|
|
raise QubesException ("VM not running!")
|
|
|
|
|
|
|
|
if netvm is None:
|
|
|
|
netvm = self.netvm
|
|
|
|
|
|
|
|
if netvm is None:
|
|
|
|
raise QubesException ("NetVM not set!")
|
|
|
|
|
|
|
|
if netvm.qid != 0:
|
|
|
|
if not netvm.is_running():
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Starting NetVM {0}...".format(netvm.name)
|
|
|
|
netvm.start()
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
self.libvirt_domain.attachDevice(
|
|
|
|
self._format_net_dev(self.ip, self.mac, self.netvm.name))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
def detach_network(self, verbose = False, netvm = None):
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self.is_running():
|
|
|
|
raise QubesException ("VM not running!")
|
|
|
|
|
|
|
|
if netvm is None:
|
|
|
|
netvm = self.netvm
|
|
|
|
|
|
|
|
if netvm is None:
|
|
|
|
raise QubesException ("NetVM not set!")
|
|
|
|
|
|
|
|
self.libvirt_domain.detachDevice( self._format_net_dev(self.ip,
|
|
|
|
self.mac, self.netvm.name))
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def wait_for_session(self, notify_function = None):
|
|
|
|
#self.run('echo $$ >> /tmp/qubes-session-waiter; [ ! -f /tmp/qubes-session-env ] && exec sleep 365d', ignore_stderr=True, gui=False, wait=True)
|
|
|
|
|
|
|
|
# Note : User root is redefined to SYSTEM in the Windows agent code
|
|
|
|
p = self.run('QUBESRPC qubes.WaitForSession none', user="root", passio_popen=True, gui=False, wait=True)
|
|
|
|
p.communicate(input=self.default_user)
|
|
|
|
|
2013-12-03 06:18:23 +01:00
|
|
|
def start_guid(self, verbose = True, notify_function = None,
|
|
|
|
extra_guid_args=[], before_qrexec=False):
|
2013-03-16 02:39:30 +01:00
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Starting Qubes GUId..."
|
|
|
|
|
2013-12-02 03:30:26 +01:00
|
|
|
guid_cmd = [system_path["qubes_guid_path"],
|
2013-05-04 05:19:48 +02:00
|
|
|
"-d", str(xid), "-N", self.name,
|
2013-12-02 03:30:26 +01:00
|
|
|
"-c", self.label.color,
|
|
|
|
"-i", self.label.icon_path,
|
|
|
|
"-l", str(self.label.index)]
|
|
|
|
guid_cmd += extra_guid_args
|
2013-03-16 02:39:30 +01:00
|
|
|
if self.debug:
|
|
|
|
guid_cmd += ['-v', '-v']
|
2014-10-25 01:45:01 +02:00
|
|
|
elif not verbose:
|
|
|
|
guid_cmd += ['-q']
|
2013-03-16 02:39:30 +01:00
|
|
|
retcode = subprocess.call (guid_cmd)
|
|
|
|
if (retcode != 0) :
|
|
|
|
raise QubesException("Cannot start qubes-guid!")
|
|
|
|
|
2013-08-11 04:08:54 +02:00
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Sending monitor layout..."
|
|
|
|
|
|
|
|
try:
|
|
|
|
subprocess.call([system_path["monitor_layout_notify_cmd"], self.name])
|
|
|
|
except Exception as e:
|
|
|
|
print >>sys.stderr, "ERROR: %s" % e
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Waiting for qubes-session..."
|
|
|
|
|
|
|
|
self.wait_for_session(notify_function)
|
|
|
|
|
|
|
|
def start_qrexec_daemon(self, verbose = False, notify_function = None):
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Starting the qrexec daemon..."
|
2013-05-10 05:35:22 +02:00
|
|
|
qrexec_args = [str(self.xid), self.name, self.default_user]
|
2014-10-25 01:45:01 +02:00
|
|
|
if not verbose:
|
|
|
|
qrexec_args.insert(0, "-q")
|
2013-03-16 02:39:30 +01:00
|
|
|
qrexec_env = os.environ
|
|
|
|
qrexec_env['QREXEC_STARTUP_TIMEOUT'] = str(self.qrexec_timeout)
|
2014-10-25 01:45:01 +02:00
|
|
|
retcode = subprocess.call ([system_path["qrexec_daemon_path"]] +
|
|
|
|
qrexec_args, env=qrexec_env)
|
2013-03-16 02:39:30 +01:00
|
|
|
if (retcode != 0) :
|
2014-02-05 03:27:12 +01:00
|
|
|
raise OSError ("Cannot execute qrexec-daemon!")
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-04 04:47:36 +02:00
|
|
|
def start(self, verbose = False, preparing_dvm = False, start_guid = True,
|
|
|
|
notify_function = None, mem_required = None):
|
2013-03-16 02:39:30 +01:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Intentionally not used is_running(): eliminate also "Paused", "Crashed", "Halting"
|
|
|
|
if self.get_power_state() != "Halted":
|
|
|
|
raise QubesException ("VM is already running!")
|
|
|
|
|
|
|
|
self.verify_files()
|
|
|
|
|
|
|
|
if self.netvm is not None:
|
|
|
|
if self.netvm.qid != 0:
|
|
|
|
if not self.netvm.is_running():
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Starting NetVM {0}...".format(self.netvm.name)
|
|
|
|
self.netvm.start(verbose = verbose, start_guid = start_guid, notify_function = notify_function)
|
|
|
|
|
|
|
|
self.reset_volatile_storage(verbose=verbose)
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Loading the VM (type = {0})...".format(self.type)
|
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
self._update_libvirt_domain()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2014-09-16 01:18:55 +02:00
|
|
|
if mem_required is None:
|
|
|
|
mem_required = int(self.memory) * 1024 * 1024
|
2013-03-16 02:39:30 +01:00
|
|
|
qmemman_client = QMemmanClient()
|
|
|
|
try:
|
|
|
|
got_memory = qmemman_client.request_memory(mem_required)
|
|
|
|
except IOError as e:
|
|
|
|
raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e))
|
|
|
|
if not got_memory:
|
|
|
|
qmemman_client.close()
|
|
|
|
raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name)
|
|
|
|
|
|
|
|
# Bind pci devices to pciback driver
|
|
|
|
for pci in self.pcidevs:
|
2013-05-17 04:06:29 +02:00
|
|
|
nd = vmm.libvirt_conn.nodeDeviceLookupByName('pci_0000_' + pci.replace(':','_').replace('.','_'))
|
2013-05-04 04:45:55 +02:00
|
|
|
nd.dettach()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
self.libvirt_domain.createWithFlags(libvirt.VIR_DOMAIN_START_PAUSED)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-05-10 05:35:22 +02:00
|
|
|
xid = self.xid
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if preparing_dvm:
|
|
|
|
self.services['qubes-dvm'] = True
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Setting Xen Store info for the VM..."
|
|
|
|
self.create_xenstore_entries(xid)
|
|
|
|
|
|
|
|
qvm_collection = QubesVmCollection()
|
|
|
|
qvm_collection.lock_db_for_reading()
|
|
|
|
qvm_collection.load()
|
|
|
|
qvm_collection.unlock_db()
|
|
|
|
|
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Updating firewall rules..."
|
|
|
|
for vm in qvm_collection.values():
|
|
|
|
if vm.is_proxyvm() and vm.is_running():
|
|
|
|
vm.write_iptables_xenstore_entry()
|
|
|
|
|
2013-03-16 16:09:31 +01:00
|
|
|
# fire hooks
|
|
|
|
for hook in self.hooks_start:
|
|
|
|
hook(self, verbose = verbose, preparing_dvm = preparing_dvm,
|
|
|
|
start_guid = start_guid, notify_function = notify_function)
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
if verbose:
|
|
|
|
print >> sys.stderr, "--> Starting the VM..."
|
2013-05-04 04:45:55 +02:00
|
|
|
self.libvirt_domain.resume()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
# close() is not really needed, because the descriptor is close-on-exec
|
|
|
|
# anyway, the reason to postpone close() is that possibly xl is not done
|
|
|
|
# constructing the domain after its main process exits
|
|
|
|
# so we close() when we know the domain is up
|
|
|
|
# the successful unpause is some indicator of it
|
|
|
|
qmemman_client.close()
|
|
|
|
|
|
|
|
if self._start_guid_first and start_guid and not preparing_dvm and os.path.exists('/var/run/shm.id'):
|
2013-12-03 06:18:23 +01:00
|
|
|
self.start_guid(verbose=verbose, notify_function=notify_function, before_qrexec=True)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
if not preparing_dvm:
|
|
|
|
self.start_qrexec_daemon(verbose=verbose,notify_function=notify_function)
|
|
|
|
|
2013-12-03 06:18:23 +01:00
|
|
|
if start_guid and not preparing_dvm and os.path.exists('/var/run/shm.id'):
|
|
|
|
self.start_guid(verbose=verbose, notify_function=notify_function)
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
return xid
|
|
|
|
|
2014-05-10 21:23:04 +02:00
|
|
|
def _cleanup_zombie_domains(self):
|
|
|
|
"""
|
|
|
|
This function is workaround broken libxl (which leaves not fully
|
|
|
|
created domain on failure) and vchan on domain crash behaviour
|
|
|
|
@return: None
|
|
|
|
"""
|
|
|
|
xc = self.get_xc_dominfo()
|
|
|
|
if xc and xc['dying'] == 1:
|
|
|
|
# GUID still running?
|
|
|
|
guid_pidfile = '/var/run/qubes/guid-running.%d' % xc['domid']
|
|
|
|
if os.path.exists(guid_pidfile):
|
|
|
|
guid_pid = open(guid_pidfile).read().strip()
|
|
|
|
os.kill(int(guid_pid), 15)
|
|
|
|
# qrexec still running?
|
|
|
|
if self.is_qrexec_running():
|
|
|
|
#TODO: kill qrexec daemon
|
|
|
|
pass
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def shutdown(self, force=False, xid = None):
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self.is_running():
|
|
|
|
raise QubesException ("VM already stopped!")
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
self.libvirt_domain.shutdown()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def force_shutdown(self, xid = None):
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self.is_running() and not self.is_paused():
|
|
|
|
raise QubesException ("VM already stopped!")
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
self.libvirt_domain.destroy()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
2013-10-23 21:56:50 +02:00
|
|
|
def suspend(self):
|
2013-05-04 04:45:55 +02:00
|
|
|
# TODO!!!
|
2013-10-23 21:56:50 +02:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self.is_running() and not self.is_paused():
|
|
|
|
raise QubesException ("VM already stopped!")
|
|
|
|
|
2013-10-24 04:10:07 +02:00
|
|
|
if len (self.pcidevs) > 0:
|
2013-05-04 04:45:55 +02:00
|
|
|
raise NotImplementedError
|
2013-10-24 04:10:07 +02:00
|
|
|
else:
|
|
|
|
self.pause()
|
2013-10-23 21:56:50 +02:00
|
|
|
|
|
|
|
def resume(self):
|
2013-05-04 04:45:55 +02:00
|
|
|
# TODO!!!
|
2013-10-23 21:56:50 +02:00
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
if self.get_power_state() == "Suspended":
|
|
|
|
raise NotImplementedError
|
2013-10-23 21:56:50 +02:00
|
|
|
else:
|
|
|
|
self.unpause()
|
|
|
|
|
2013-03-16 02:39:30 +01:00
|
|
|
def pause(self):
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
self.libvirt_domain.suspend()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def unpause(self):
|
|
|
|
if dry_run:
|
|
|
|
return
|
|
|
|
|
2013-05-04 04:45:55 +02:00
|
|
|
self.libvirt_domain.resume()
|
2013-03-16 02:39:30 +01:00
|
|
|
|
|
|
|
def get_xml_attrs(self):
|
|
|
|
attrs = {}
|
2013-03-16 15:28:18 +01:00
|
|
|
attrs_config = self.get_attrs_config()
|
2013-03-16 02:39:30 +01:00
|
|
|
for attr in attrs_config:
|
|
|
|
attr_config = attrs_config[attr]
|
|
|
|
if 'save' in attr_config:
|
2014-03-25 20:56:40 +01:00
|
|
|
if 'save_skip' in attr_config:
|
|
|
|
if callable(attr_config['save_skip']):
|
|
|
|
if attr_config['save_skip']():
|
|
|
|
continue
|
|
|
|
elif eval(attr_config['save_skip']):
|
|
|
|
continue
|
|
|
|
if callable(attr_config['save']):
|
|
|
|
value = attr_config['save']()
|
|
|
|
else:
|
|
|
|
value = eval(attr_config['save'])
|
2013-03-16 02:39:30 +01:00
|
|
|
if 'save_attr' in attr_config:
|
2014-03-25 20:56:40 +01:00
|
|
|
attrs[attr_config['save_attr']] = value
|
2013-03-16 02:39:30 +01:00
|
|
|
else:
|
2014-03-25 20:56:40 +01:00
|
|
|
attrs[attr] = value
|
2013-03-16 02:39:30 +01:00
|
|
|
return attrs
|
|
|
|
|
|
|
|
def create_xml_element(self):
|
|
|
|
|
|
|
|
attrs = self.get_xml_attrs()
|
|
|
|
element = lxml.etree.Element(
|
2013-05-04 04:45:55 +02:00
|
|
|
# Compatibility hack (Qubes*VM in type vs Qubes*Vm in XML)...
|
|
|
|
"Qubes" + self.type.replace("VM", "Vm"),
|
2013-03-16 02:39:30 +01:00
|
|
|
**attrs)
|
|
|
|
return element
|
|
|
|
|
|
|
|
register_qubes_vm_class(QubesVm)
|