From ecd8837113e2003705904bc0b48d171a123bafe4 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 16 Mar 2013 02:39:30 +0100 Subject: [PATCH] Split core qubes.py into modules --- dom0/core-modules/000QubesVm.py | 1516 ++++++++++++++ dom0/core-modules/003QubesTemplateVm.py | 212 ++ dom0/core-modules/005QubesNetVm.py | 201 ++ dom0/core-modules/006QubesDom0NetVm.py | 89 + dom0/core-modules/006QubesProxyVm.py | 201 ++ dom0/core-modules/01QubesAppVm.py | 59 + dom0/core-modules/01QubesDisposableVm.py | 97 + dom0/core-modules/01QubesHVm.py | 302 +++ dom0/core-modules/README.txt | 5 + dom0/core-modules/__init__.py | 0 dom0/core/qubes.py | 2439 +--------------------- rpm_spec/core-dom0.spec | 12 +- 12 files changed, 2703 insertions(+), 2430 deletions(-) create mode 100644 dom0/core-modules/000QubesVm.py create mode 100644 dom0/core-modules/003QubesTemplateVm.py create mode 100644 dom0/core-modules/005QubesNetVm.py create mode 100644 dom0/core-modules/006QubesDom0NetVm.py create mode 100644 dom0/core-modules/006QubesProxyVm.py create mode 100644 dom0/core-modules/01QubesAppVm.py create mode 100644 dom0/core-modules/01QubesDisposableVm.py create mode 100644 dom0/core-modules/01QubesHVm.py create mode 100644 dom0/core-modules/README.txt create mode 100644 dom0/core-modules/__init__.py diff --git a/dom0/core-modules/000QubesVm.py b/dom0/core-modules/000QubesVm.py new file mode 100644 index 00000000..190d3df3 --- /dev/null +++ b/dom0/core-modules/000QubesVm.py @@ -0,0 +1,1516 @@ +#!/usr/bin/python2 +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2010 Joanna Rutkowska +# Copyright (C) 2013 Marek Marczykowski +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + +import datetime +import fcntl +import lxml.etree +import os +import os.path +import re +import shutil +import subprocess +import sys +import time +import uuid +import xml.parsers.expat + +from qubes.qubes import xs,dry_run,xc,xl_ctx +from qubes.qubes import register_qubes_vm_class +from qubes.qubes import QubesVmCollection,QubesException,QubesHost,QubesVmLabels +from qubes.qubes import defaults,system_path,vm_files,qubes_max_qid +from qubes.qmemman_client import QMemmanClient + +class QubesVm(object): + """ + A representation of one Qubes VM + Only persistent information are stored here, while all the runtime + information, e.g. Xen dom id, etc, are to be retrieved via Xen API + Note that qid is not the same as Xen's domid! + """ + + # In which order load this VM type from qubes.xml + load_order = 100 + + def _get_attrs_config(self): + """ Object attributes for serialization/deserialization + inner dict keys: + - order: initialization order (to keep dependency intact) + attrs without order will be evaluated at the end + - default: default value used when attr not given to object constructor + - attr: set value to this attribute instead of parameter name + - eval: assign result of this expression instead of value directly; + local variable 'value' contains attribute value (or default if it was not given) + - save: use evaluation result as value for XML serialization; only attrs with 'save' key will be saved in XML + - save_skip: if present and evaluates to true, attr will be omitted in XML + - save_attr: save to this XML attribute instead of parameter name + """ + + attrs = { + # __qid cannot be accessed by setattr, so must be set manually in __init__ + "qid": { "attr": "_qid", "order": 0 }, + "name": { "order": 1 }, + "dir_path": { "default": None, "order": 2 }, + "conf_file": { "eval": 'self.absolute_path(value, self.name + ".conf")', 'order': 3 }, + ### order >= 10: have base attrs set + "root_img": { "eval": 'self.absolute_path(value, vm_files["root_img"])', 'order': 10 }, + "private_img": { "eval": 'self.absolute_path(value, vm_files["private_img"])', 'order': 10 }, + "volatile_img": { "eval": 'self.absolute_path(value, vm_files["volatile_img"])', 'order': 10 }, + "firewall_conf": { "eval": 'self.absolute_path(value, vm_files["firewall_conf"])', 'order': 10 }, + "installed_by_rpm": { "default": False, 'order': 10 }, + "template": { "default": None, 'order': 10 }, + ### order >= 20: have template set + "uses_default_netvm": { "default": True, 'order': 20 }, + "netvm": { "default": None, "attr": "_netvm", 'order': 20 }, + "label": { "attr": "_label", "default": defaults["appvm_label"], 'order': 20, + 'xml_deserialize': lambda _x: QubesVmLabels[_x] }, + "memory": { "default": defaults["memory"], 'order': 20, "eval": "int(value)" }, + "maxmem": { "default": None, 'order': 25, "eval": "int(value) if value else None" }, + "pcidevs": { "default": '[]', 'order': 25, "eval": \ + '[] if value in ["none", None] else eval(value) if value.find("[") >= 0 else eval("[" + value + "]")' }, + # Internal VM (not shown in qubes-manager, doesn't create appmenus entries + "internal": { "default": False }, + "vcpus": { "default": None }, + "uses_default_kernel": { "default": True, 'order': 30 }, + "uses_default_kernelopts": { "default": True, 'order': 30 }, + "kernel": { "default": None, 'order': 31, + 'eval': 'collection.get_default_kernel() if self.uses_default_kernel else value' }, + "kernelopts": { "default": "", 'order': 31, "eval": \ + 'value if not self.uses_default_kernelopts else defaults["kernelopts_pcidevs"] if len(self.pcidevs) > 0 else defaults["kernelopts"]' }, + "mac": { "attr": "_mac", "default": None }, + "include_in_backups": { "default": True }, + "services": { "default": {}, "eval": "eval(str(value))" }, + "debug": { "default": False }, + "default_user": { "default": "user" }, + "qrexec_timeout": { "default": 60, "eval": "int(value)" }, + ##### Internal attributes - will be overriden in __init__ regardless of args + "appmenus_templates_dir": { "eval": \ + 'os.path.join(self.dir_path, vm_files["appmenus_templates_subdir"]) if self.updateable else ' + \ + 'self.template.appmenus_templates_dir if self.template is not None else None' }, + "config_file_template": { "eval": 'system_path["config_template_pv"]' }, + "icon_path": { "eval": 'os.path.join(self.dir_path, "/icon.png") if self.dir_path is not None else None' }, + # used to suppress side effects of clone_attrs + "_do_not_reset_firewall": { "eval": 'False' }, + "kernels_dir": { 'eval': 'os.path.join(system_path["qubes_kernels_base_dir"], self.kernel) if self.kernel is not None else ' + \ + # for backward compatibility (or another rare case): kernel=None -> kernel in VM dir + 'os.path.join(self.dir_path, vm_files["kernels_subdir"])' }, + "_start_guid_first": { 'eval': 'False' }, + } + + ### Mark attrs for XML inclusion + # Simple string attrs + for prop in ['qid', 'name', 'dir_path', 'memory', 'maxmem', 'pcidevs', 'vcpus', 'internal',\ + 'uses_default_kernel', 'kernel', 'uses_default_kernelopts',\ + 'kernelopts', 'services', 'installed_by_rpm',\ + 'uses_default_netvm', 'include_in_backups', 'debug',\ + 'default_user', 'qrexec_timeout' ]: + attrs[prop]['save'] = 'str(self.%s)' % prop + # Simple paths + for prop in ['conf_file', 'root_img', 'volatile_img', 'private_img']: + attrs[prop]['save'] = 'self.relative_path(self.%s)' % prop + attrs[prop]['save_skip'] = 'self.%s is None' % prop + + attrs['mac']['save'] = 'str(self._mac)' + attrs['mac']['save_skip'] = 'self._mac is None' + + attrs['netvm']['save'] = 'str(self.netvm.qid) if self.netvm is not None else "none"' + attrs['netvm']['save_attr'] = "netvm_qid" + attrs['template']['save'] = 'str(self.template.qid) if self.template else "none"' + attrs['template']['save_attr'] = "template_qid" + attrs['label']['save'] = 'self.label.name' + + return attrs + + def __basic_parse_xml_attr(self, value): + if value is None: + return None + if value.lower() == "none": + return None + if value.lower() == "true": + return True + if value.lower() == "false": + return False + if value.isdigit(): + return int(value) + return value + + def __init__(self, **kwargs): + + collection = None + if 'collection' in kwargs: + collection = kwargs['collection'] + else: + raise ValueError("No collection given to QubesVM constructor") + + # Special case for template b/c it is given in "template_qid" property + if "xml_element" in kwargs and kwargs["xml_element"].get("template_qid"): + template_qid = kwargs["xml_element"].get("template_qid") + if template_qid.lower() != "none": + if int(template_qid) in collection: + kwargs["template"] = collection[int(template_qid)] + else: + raise ValueError("Unknown template with QID %s" % template_qid) + attrs = self._get_attrs_config() + for attr_name in sorted(attrs, key=lambda _x: attrs[_x]['order'] if 'order' in attrs[_x] else 1000): + attr_config = attrs[attr_name] + attr = attr_name + if 'attr' in attr_config: + attr = attr_config['attr'] + value = None + if attr_name in kwargs: + value = kwargs[attr_name] + elif 'xml_element' in kwargs and kwargs['xml_element'].get(attr_name) is not None: + if 'xml_deserialize' in attr_config and callable(attr_config['xml_deserialize']): + value = attr_config['xml_deserialize'](kwargs['xml_element'].get(attr_name)) + else: + value = self.__basic_parse_xml_attr(kwargs['xml_element'].get(attr_name)) + else: + if 'default' in attr_config: + value = attr_config['default'] + if 'eval' in attr_config: + setattr(self, attr, eval(attr_config['eval'])) + else: + #print "setting %s to %s" % (attr, value) + setattr(self, attr, value) + + #Init private attrs + self.__qid = self._qid + + assert self.__qid < qubes_max_qid, "VM id out of bounds!" + assert self.name is not None + + if not self.verify_name(self.name): + raise QubesException("Invalid characters in VM name") + + if self.netvm is not None: + self.netvm.connected_vms[self.qid] = self + + # Not in generic way to not create QubesHost() to frequently + if self.maxmem is None: + qubes_host = QubesHost() + total_mem_mb = qubes_host.memory_total/1024 + self.maxmem = total_mem_mb/2 + + # By default allow use all VCPUs + if self.vcpus is None: + qubes_host = QubesHost() + self.vcpus = qubes_host.no_cpus + + # Always set if meminfo-writer should be active or not + if 'meminfo-writer' not in self.services: + self.services['meminfo-writer'] = not (len(self.pcidevs) > 0) + + # Additionally force meminfo-writer disabled when VM have PCI devices + if len(self.pcidevs) > 0: + self.services['meminfo-writer'] = False + + # Some additional checks for template based VM + if self.template is not None: + if not self.template.is_template(): + print >> sys.stderr, "ERROR: template_qid={0} doesn't point to a valid TemplateVM".\ + format(self.template.qid) + return False + self.template.appvms[self.qid] = self + else: + assert self.root_img is not None, "Missing root_img for standalone VM!" + + self.xid = -1 + self.xid = self.get_xid() + + def absolute_path(self, arg, default): + if arg is not None and os.path.isabs(arg): + return arg + else: + return os.path.join(self.dir_path, (arg if arg is not None else default)) + + def relative_path(self, arg): + return arg.replace(self.dir_path + '/', '') + + @property + def qid(self): + return self.__qid + + @property + def label(self): + return self._label + + @label.setter + def label(self, new_label): + self._label = new_label + if self.icon_path: + try: + os.remove(self.icon_path) + except: + pass + os.symlink (new_label.icon_path, self.icon_path) + subprocess.call(['sudo', 'xdg-icon-resource', 'forceupdate']) + + @property + def netvm(self): + return self._netvm + + # Don't know how properly call setter from base class, so workaround it... + @netvm.setter + def netvm(self, new_netvm): + self._set_netvm(new_netvm) + + def _set_netvm(self, new_netvm): + if self.is_running() and new_netvm is not None and not new_netvm.is_running(): + raise QubesException("Cannot dynamically attach to stopped NetVM") + if self.netvm is not None: + self.netvm.connected_vms.pop(self.qid) + if self.is_running(): + subprocess.call(["xl", "network-detach", self.name, "0"], stderr=subprocess.PIPE) + if hasattr(self.netvm, 'post_vm_net_detach'): + self.netvm.post_vm_net_detach(self) + + if new_netvm is None: + if not self._do_not_reset_firewall: + # Set also firewall to block all traffic as discussed in #370 + if os.path.exists(self.firewall_conf): + shutil.copy(self.firewall_conf, os.path.join(system_path["qubes_base_dir"], + "backup", "%s-firewall-%s.xml" % (self.name, + time.strftime('%Y-%m-%d-%H:%M:%S')))) + self.write_firewall_conf({'allow': False, 'allowDns': False, + 'allowIcmp': False, 'allowYumProxy': False, 'rules': []}) + else: + new_netvm.connected_vms[self.qid]=self + + self._netvm = new_netvm + + if new_netvm is None: + return + + if self.is_running(): + # refresh IP, DNS etc + self.create_xenstore_entries() + self.attach_network() + if hasattr(self.netvm, 'post_vm_net_attach'): + self.netvm.post_vm_net_attach(self) + + @property + def ip(self): + if self.netvm is not None: + return self.netvm.get_ip_for_vm(self.qid) + else: + return None + + @property + def netmask(self): + if self.netvm is not None: + return self.netvm.netmask + else: + return None + + @property + def gateway(self): + # This is gateway IP for _other_ VMs, so make sense only in NetVMs + return None + + @property + def secondary_dns(self): + if self.netvm is not None: + return self.netvm.secondary_dns + else: + return None + + @property + def vif(self): + if self.xid < 0: + return None + if self.netvm is None: + return None + return "vif{0}.+".format(self.xid) + + @property + def mac(self): + if self._mac is not None: + return self._mac + else: + return "00:16:3E:5E:6C:{qid:02X}".format(qid=self.qid) + + @mac.setter + def mac(self, new_mac): + self._mac = new_mac + + @property + def updateable(self): + return self.template is None + + # Leaved for compatibility + def is_updateable(self): + return self.updateable + + def is_networked(self): + if self.is_netvm(): + return True + + if self.netvm is not None: + return True + else: + return False + + def verify_name(self, name): + return re.match(r"^[a-zA-Z0-9_-]*$", name) is not None + + def pre_rename(self, new_name): + self.remove_appmenus() + + def set_name(self, name): + if self.is_running(): + raise QubesException("Cannot change name of running VM!") + + if not self.verify_name(name): + raise QubesException("Invalid characters in VM name") + + self.pre_rename(name) + + new_conf = os.path.join(self.dir_path, name) + if os.path.exists(self.conf_file): + os.rename(self.conf_file, new_conf) + old_dirpath = self.dir_path + new_dirpath = os.path.join(os.path.dirname(self.dir_path), name) + os.rename(old_dirpath, new_dirpath) + self.dir_path = new_dirpath + old_name = self.name + self.name = name + if self.private_img is not None: + self.private_img = self.private_img.replace(old_dirpath, new_dirpath) + if self.root_img is not None: + self.root_img = self.root_img.replace(old_dirpath, new_dirpath) + if self.volatile_img is not None: + self.volatile_img = self.volatile_img.replace(old_dirpath, new_dirpath) + if self.conf_file is not None: + self.conf_file = new_conf.replace(old_dirpath, new_dirpath) + if self.appmenus_templates_dir is not None: + self.appmenus_templates_dir = self.appmenus_templates_dir.replace(old_dirpath, new_dirpath) + if self.icon_path is not None: + self.icon_path = self.icon_path.replace(old_dirpath, new_dirpath) + if hasattr(self, 'kernels_dir') and self.kernels_dir is not None: + self.kernels_dir = self.kernels_dir.replace(old_dirpath, new_dirpath) + + self.post_rename(old_name) + + def post_rename(self, old_name): + self.create_appmenus(verbose=False) + + def is_template(self): + return False + + def is_appvm(self): + return False + + def is_netvm(self): + return False + + def is_proxyvm(self): + return False + + def is_disposablevm(self): + return False + + def get_xl_dominfo(self): + if dry_run: + return + + domains = xl_ctx.list_domains() + for dominfo in domains: + domname = xl_ctx.domid_to_name(dominfo.domid) + if domname == self.name: + return dominfo + return None + + def get_xc_dominfo(self): + if dry_run: + return + + start_xid = self.xid + if start_xid < 0: + start_xid = 0 + try: + domains = xc.domain_getinfo(start_xid, qubes_max_qid) + except xen.lowlevel.xc.Error: + return None + + for dominfo in domains: + domname = xl_ctx.domid_to_name(dominfo['domid']) + if domname == self.name: + return dominfo + return None + + def get_xid(self): + if dry_run: + return 666 + + dominfo = self.get_xc_dominfo() + if dominfo: + self.xid = dominfo['domid'] + return self.xid + else: + return -1 + + def get_uuid(self): + + dominfo = self.get_xl_dominfo() + if dominfo: + vmuuid = uuid.UUID(''.join('%02x' % b for b in dominfo.uuid)) + return vmuuid + else: + return None + + def get_mem(self): + if dry_run: + return 666 + + dominfo = self.get_xc_dominfo() + if dominfo: + return dominfo['mem_kb'] + else: + return 0 + + def get_mem_static_max(self): + if dry_run: + return 666 + + dominfo = self.get_xc_dominfo() + if dominfo: + return dominfo['maxmem_kb'] + else: + return 0 + + def get_per_cpu_time(self): + if dry_run: + import random + return random.random() * 100 + + dominfo = self.get_xc_dominfo() + if dominfo: + return dominfo['cpu_time']/dominfo['online_vcpus'] + else: + return 0 + + def get_disk_utilization_root_img(self): + if not os.path.exists(self.root_img): + return 0 + + return self.get_disk_usage(self.root_img) + + def get_root_img_sz(self): + if not os.path.exists(self.root_img): + return 0 + + return os.path.getsize(self.root_img) + + def get_power_state(self): + if dry_run: + return "NA" + + dominfo = self.get_xc_dominfo() + if dominfo: + if dominfo['paused']: + return "Paused" + elif dominfo['crashed']: + return "Crashed" + elif dominfo['shutdown']: + return "Halting" + elif dominfo['dying']: + return "Dying" + else: + if not self.is_fully_usable(): + return "Transient" + else: + return "Running" + else: + return 'Halted' + + return "NA" + + def is_guid_running(self): + xid = self.get_xid() + if xid < 0: + return False + if not os.path.exists('/var/run/qubes/guid-running.%d' % xid): + return False + return True + + def is_fully_usable(self): + # Running gui-daemon implies also VM running + if not self.is_guid_running(): + return False + # currently qrexec daemon doesn't cleanup socket in /var/run/qubes, so + # it can be left from some other VM + return True + + def is_running(self): + # in terms of Xen and internal logic - starting VM is running + if self.get_power_state() in ["Running", "Transient", "Halting"]: + return True + else: + return False + + def is_paused(self): + if self.get_power_state() == "Paused": + return True + else: + return False + + def get_start_time(self): + if not self.is_running(): + return None + + dominfo = self.get_xl_dominfo() + + uuid = self.get_uuid() + + start_time = xs.read('', "/vm/%s/start_time" % str(uuid)) + if start_time != '': + return datetime.fromtimestamp(float(start_time)) + else: + return None + + def is_outdated(self): + # Makes sense only on VM based on template + if self.template is None: + return False + + if not self.is_running(): + return False + + rootimg_inode = os.stat(self.template.root_img) + try: + rootcow_inode = os.stat(self.template.rootcow_img) + except OSError: + # The only case when rootcow_img doesn't exists is in the middle of + # commit_changes, so VM is outdated right now + return True + + current_dmdev = "/dev/mapper/snapshot-{0:x}:{1}-{2:x}:{3}".format( + rootimg_inode[2], rootimg_inode[1], + rootcow_inode[2], rootcow_inode[1]) + + # 51712 (0xCA00) is xvda + # backend node name not available through xenapi :( + used_dmdev = xs.read('', "/local/domain/0/backend/vbd/{0}/51712/node".format(self.get_xid())) + + return used_dmdev != current_dmdev + + def get_disk_usage(self, file_or_dir): + if not os.path.exists(file_or_dir): + return 0 + p = subprocess.Popen (["du", "-s", "--block-size=1", file_or_dir], + stdout=subprocess.PIPE) + result = p.communicate() + m = re.match(r"^(\d+)\s.*", result[0]) + sz = int(m.group(1)) if m is not None else 0 + return sz + + def get_disk_utilization(self): + return self.get_disk_usage(self.dir_path) + + def get_disk_utilization_private_img(self): + return self.get_disk_usage(self.private_img) + + def get_private_img_sz(self): + if not os.path.exists(self.private_img): + return 0 + + return os.path.getsize(self.private_img) + + def resize_private_img(self, size): + assert size >= self.get_private_img_sz(), "Cannot shrink private.img" + + f_private = open (self.private_img, "a+b") + f_private.truncate (size) + f_private.close () + + retcode = 0 + if self.is_running(): + # find loop device + p = subprocess.Popen (["sudo", "losetup", "--associated", self.private_img], + stdout=subprocess.PIPE) + result = p.communicate() + m = re.match(r"^(/dev/loop\d+):\s", result[0]) + if m is None: + raise QubesException("ERROR: Cannot find loop device!") + + loop_dev = m.group(1) + + # resize loop device + subprocess.check_call(["sudo", "losetup", "--set-capacity", loop_dev]) + + retcode = self.run("while [ \"`blockdev --getsize64 /dev/xvdb`\" -lt {0} ]; do ".format(size) + + "head /dev/xvdb > /dev/null; sleep 0.2; done; resize2fs /dev/xvdb", user="root", wait=True) + else: + retcode = subprocess.check_call(["sudo", "resize2fs", "-f", self.private_img]) + if retcode != 0: + raise QubesException("resize2fs failed") + + + # FIXME: should be outside of QubesVM? + def get_timezone(self): + # fc18 + if os.path.islink('/etc/localtime'): + return '/'.join(os.readlink('/etc/localtime').split('/')[-2:]) + # <=fc17 + elif os.path.exists('/etc/sysconfig/clock'): + clock_config = open('/etc/sysconfig/clock', "r") + clock_config_lines = clock_config.readlines() + clock_config.close() + zone_re = re.compile(r'^ZONE="(.*)"') + for line in clock_config_lines: + line_match = zone_re.match(line) + if line_match: + return line_match.group(1) + + return None + + def cleanup_vifs(self): + """ + Xend does not remove vif when backend domain is down, so we must do it + manually + """ + + if not self.is_running(): + return + + dev_basepath = '/local/domain/%d/device/vif' % self.xid + for dev in xs.ls('', dev_basepath): + # check if backend domain is alive + backend_xid = int(xs.read('', '%s/%s/backend-id' % (dev_basepath, dev))) + if xl_ctx.domid_to_name(backend_xid) is not None: + # check if device is still active + if xs.read('', '%s/%s/state' % (dev_basepath, dev)) == '4': + continue + # remove dead device + xs.rm('', '%s/%s' % (dev_basepath, dev)) + + def create_xenstore_entries(self, xid = None): + if dry_run: + return + + if xid is None: + xid = self.xid + + domain_path = xs.get_domain_path(xid) + + # Set Xen Store entires with VM networking info: + + xs.write('', "{0}/qubes-vm-type".format(domain_path), + self.type) + xs.write('', "{0}/qubes-vm-updateable".format(domain_path), + str(self.updateable)) + + if self.is_netvm(): + xs.write('', + "{0}/qubes-netvm-gateway".format(domain_path), + self.gateway) + xs.write('', + "{0}/qubes-netvm-secondary-dns".format(domain_path), + self.secondary_dns) + xs.write('', + "{0}/qubes-netvm-netmask".format(domain_path), + self.netmask) + xs.write('', + "{0}/qubes-netvm-network".format(domain_path), + self.network) + + if self.netvm is not None: + xs.write('', "{0}/qubes-ip".format(domain_path), self.ip) + xs.write('', "{0}/qubes-netmask".format(domain_path), + self.netvm.netmask) + xs.write('', "{0}/qubes-gateway".format(domain_path), + self.netvm.gateway) + xs.write('', + "{0}/qubes-secondary-dns".format(domain_path), + self.netvm.secondary_dns) + + tzname = self.get_timezone() + if tzname: + xs.write('', + "{0}/qubes-timezone".format(domain_path), + tzname) + + for srv in self.services.keys(): + # convert True/False to "1"/"0" + xs.write('', "{0}/qubes-service/{1}".format(domain_path, srv), + str(int(self.services[srv]))) + + xs.write('', + "{0}/qubes-block-devices".format(domain_path), + '') + + xs.write('', + "{0}/qubes-usb-devices".format(domain_path), + '') + + xs.write('', "{0}/qubes-debug-mode".format(domain_path), + str(int(self.debug))) + + # Fix permissions + xs.set_permissions('', '{0}/device'.format(domain_path), + [{ 'dom': xid }]) + xs.set_permissions('', '{0}/memory'.format(domain_path), + [{ 'dom': xid }]) + xs.set_permissions('', '{0}/qubes-block-devices'.format(domain_path), + [{ 'dom': xid }]) + xs.set_permissions('', '{0}/qubes-usb-devices'.format(domain_path), + [{ 'dom': xid }]) + + def get_rootdev(self, source_template=None): + if self.template: + return "'script:snapshot:{dir}/root.img:{dir}/root-cow.img,xvda,r',".format(dir=self.template.dir_path) + else: + return "'script:file:{dir}/root.img,xvda,w',".format(dir=self.dir_path) + + def get_config_params(self, source_template=None): + args = {} + args['name'] = self.name + if hasattr(self, 'kernels_dir'): + args['kerneldir'] = self.kernels_dir + args['vmdir'] = self.dir_path + args['pcidev'] = str(self.pcidevs).strip('[]') + args['mem'] = str(self.memory) + if self.maxmem < self.memory: + args['mem'] = str(self.maxmem) + args['maxmem'] = str(self.maxmem) + if 'meminfo-writer' in self.services and not self.services['meminfo-writer']: + # If dynamic memory management disabled, set maxmem=mem + args['maxmem'] = args['mem'] + args['vcpus'] = str(self.vcpus) + if self.netvm is not None: + args['ip'] = self.ip + args['mac'] = self.mac + args['gateway'] = self.netvm.gateway + args['dns1'] = self.netvm.gateway + args['dns2'] = self.secondary_dns + args['netmask'] = self.netmask + args['netdev'] = "'mac={mac},script=/etc/xen/scripts/vif-route-qubes,ip={ip}".format(ip=self.ip, mac=self.mac) + if self.netvm.qid != 0: + args['netdev'] += ",backend={0}".format(self.netvm.name) + args['netdev'] += "'" + args['disable_network'] = ''; + else: + args['ip'] = '' + args['mac'] = '' + args['gateway'] = '' + args['dns1'] = '' + args['dns2'] = '' + args['netmask'] = '' + args['netdev'] = '' + args['disable_network'] = '#'; + args['rootdev'] = self.get_rootdev(source_template=source_template) + args['privatedev'] = "'script:file:{dir}/private.img,xvdb,w',".format(dir=self.dir_path) + args['volatiledev'] = "'script:file:{dir}/volatile.img,xvdc,w',".format(dir=self.dir_path) + if hasattr(self, 'kernel'): + modulesmode='r' + if self.kernel is None: + modulesmode='w' + args['otherdevs'] = "'script:file:{dir}/modules.img,xvdd,{mode}',".format(dir=self.kernels_dir, mode=modulesmode) + if hasattr(self, 'kernelopts'): + args['kernelopts'] = self.kernelopts + if self.debug: + print >> sys.stderr, "--> Debug mode: adding 'earlyprintk=xen' to kernel opts" + args['kernelopts'] += ' earlyprintk=xen' + + return args + + @property + def uses_custom_config(self): + return self.conf_file != self.absolute_path(self.name + ".conf", None) + + def create_config_file(self, file_path = None, source_template = None, prepare_dvm = False): + if file_path is None: + file_path = self.conf_file + if self.uses_custom_config: + return + if source_template is None: + source_template = self.template + + f_conf_template = open(self.config_file_template, 'r') + conf_template = f_conf_template.read() + f_conf_template.close() + + template_params = self.get_config_params(source_template) + if prepare_dvm: + template_params['name'] = '%NAME%' + template_params['privatedev'] = '' + template_params['netdev'] = re.sub(r"ip=[0-9.]*", "ip=%IP%", template_params['netdev']) + conf_appvm = open(file_path, "w") + + conf_appvm.write(conf_template.format(**template_params)) + conf_appvm.close() + + def create_on_disk(self, verbose, source_template = None): + if source_template is None: + source_template = self.template + assert source_template is not None + + if dry_run: + return + + if verbose: + print >> sys.stderr, "--> Creating directory: {0}".format(self.dir_path) + os.mkdir (self.dir_path) + + if verbose: + print >> sys.stderr, "--> Creating the VM config file: {0}".format(self.conf_file) + + self.create_config_file(source_template = source_template) + + template_priv = source_template.private_img + if verbose: + print >> sys.stderr, "--> Copying the template's private image: {0}".\ + format(template_priv) + + # We prefer to use Linux's cp, because it nicely handles sparse files + retcode = subprocess.call (["cp", template_priv, self.private_img]) + if retcode != 0: + raise IOError ("Error while copying {0} to {1}".\ + format(template_priv, self.private_img)) + + if os.path.exists(os.path.join(source_template.dir_path, '/vm-' + vm_files["whitelisted_appmenus"])): + if verbose: + print >> sys.stderr, "--> Creating default whitelisted apps list: {0}".\ + format(self.dir_path + '/' + vm_files["whitelisted_appmenus"]) + shutil.copy(os.path.join(source_template.dir_path, '/vm-' + vm_files["whitelisted_appmenus"]), + os.path.join(self.dir_path, vm_files["whitelisted_appmenus"])) + + if self.updateable: + template_root = source_template.root_img + if verbose: + print >> sys.stderr, "--> Copying the template's root image: {0}".\ + format(template_root) + + # We prefer to use Linux's cp, because it nicely handles sparse files + retcode = subprocess.call (["cp", template_root, self.root_img]) + if retcode != 0: + raise IOError ("Error while copying {0} to {1}".\ + format(template_root, self.root_img)) + + kernels_dir = source_template.kernels_dir + if verbose: + print >> sys.stderr, "--> Copying the kernel (set kernel \"none\" to use it): {0}".\ + format(kernels_dir) + + os.mkdir (self.dir_path + '/kernels') + for f in ("vmlinuz", "initramfs", "modules.img"): + shutil.copy(os.path.join(kernels_dir, f), + os.path.join(self.dir_path, vm_files["kernels_subdir"], f)) + + if verbose: + print >> sys.stderr, "--> Copying the template's appmenus templates dir:\n{0} ==>\n{1}".\ + format(source_template.appmenus_templates_dir, self.appmenus_templates_dir) + shutil.copytree (source_template.appmenus_templates_dir, self.appmenus_templates_dir) + + # Create volatile.img + self.reset_volatile_storage(source_template = source_template, verbose=verbose) + + if verbose: + print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, self.label.icon_path) + os.symlink (self.label.icon_path, self.icon_path) + + def create_appmenus(self, verbose=False, source_template = None): + if source_template is None: + source_template = self.template + + vmtype = None + if self.is_netvm(): + vmtype = 'servicevms' + else: + vmtype = 'appvms' + + try: + if source_template is not None: + subprocess.check_call ([system_path["qubes_appmenu_create_cmd"], source_template.appmenus_templates_dir, self.name, vmtype]) + elif self.appmenus_templates_dir is not None: + subprocess.check_call ([system_path["qubes_appmenu_create_cmd"], self.appmenus_templates_dir, self.name, vmtype]) + else: + # Only add apps to menu + subprocess.check_call ([system_path["qubes_appmenu_create_cmd"], "none", self.name, vmtype]) + except subprocess.CalledProcessError: + print >> sys.stderr, "Ooops, there was a problem creating appmenus for {0} VM!".format (self.name) + + def get_clone_attrs(self): + return ['kernel', 'uses_default_kernel', 'netvm', 'uses_default_netvm', \ + 'memory', 'maxmem', 'kernelopts', 'uses_default_kernelopts', 'services', 'vcpus', \ + '_mac', 'pcidevs', 'include_in_backups', '_label'] + + def clone_attrs(self, src_vm): + self._do_not_reset_firewall = True + for prop in self.get_clone_attrs(): + setattr(self, prop, getattr(src_vm, prop)) + self._do_not_reset_firewall = False + + def clone_disk_files(self, src_vm, verbose): + if dry_run: + return + + if src_vm.is_running(): + raise QubesException("Attempt to clone a running VM!") + + if verbose: + print >> sys.stderr, "--> Creating directory: {0}".format(self.dir_path) + os.mkdir (self.dir_path) + + if src_vm.private_img is not None and self.private_img is not None: + if verbose: + print >> sys.stderr, "--> Copying the private image:\n{0} ==>\n{1}".\ + format(src_vm.private_img, self.private_img) + # We prefer to use Linux's cp, because it nicely handles sparse files + retcode = subprocess.call (["cp", src_vm.private_img, self.private_img]) + if retcode != 0: + raise IOError ("Error while copying {0} to {1}".\ + format(src_vm.private_img, self.private_img)) + + if src_vm.updateable and src_vm.root_img is not None and self.root_img is not None: + if verbose: + print >> sys.stderr, "--> Copying the root image:\n{0} ==>\n{1}".\ + format(src_vm.root_img, self.root_img) + # We prefer to use Linux's cp, because it nicely handles sparse files + retcode = subprocess.call (["cp", src_vm.root_img, self.root_img]) + if retcode != 0: + raise IOError ("Error while copying {0} to {1}".\ + format(src_vm.root_img, self.root_img)) + + if src_vm.updateable and src_vm.appmenus_templates_dir is not None and self.appmenus_templates_dir is not None: + if verbose: + print >> sys.stderr, "--> Copying the template's appmenus templates dir:\n{0} ==>\n{1}".\ + format(src_vm.appmenus_templates_dir, self.appmenus_templates_dir) + shutil.copytree (src_vm.appmenus_templates_dir, self.appmenus_templates_dir) + + if os.path.exists(os.path.join(src_vm.dir_path, vm_files["whitelisted_appmenus"])): + if verbose: + print >> sys.stderr, "--> Copying whitelisted apps list: {0}".\ + format(os.path.join(self.dir_path, vm_files["whitelisted_appmenus"])) + shutil.copy(os.path.join(src_vm.dir_path, vm_files["whitelisted_appmenus"]), + os.path.join(self.dir_path, vm_files["whitelisted_appmenus"])) + + if src_vm.icon_path is not None and self.icon_path is not None: + if os.path.exists (src_vm.dir_path): + if os.path.islink(src_vm.icon_path): + icon_path = os.readlink(src_vm.icon_path) + if verbose: + print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, icon_path) + os.symlink (icon_path, self.icon_path) + else: + if verbose: + print >> sys.stderr, "--> Copying icon: {0} -> {1}".format(src_vm.icon_path, self.icon_path) + shutil.copy(src_vm.icon_path, self.icon_path) + + # Create appmenus + self.create_appmenus(verbose=verbose) + + def remove_appmenus(self): + vmtype = None + if self.is_netvm(): + vmtype = 'servicevms' + else: + vmtype = 'appvms' + subprocess.check_call ([system_path["qubes_appmenu_remove_cmd"], self.name, vmtype]) + + def verify_files(self): + if dry_run: + return + + if not os.path.exists (self.dir_path): + raise QubesException ( + "VM directory doesn't exist: {0}".\ + format(self.dir_path)) + + if self.updateable and not os.path.exists (self.root_img): + raise QubesException ( + "VM root image file doesn't exist: {0}".\ + format(self.root_img)) + + if not os.path.exists (self.private_img): + raise QubesException ( + "VM private image file doesn't exist: {0}".\ + format(self.private_img)) + + if not os.path.exists (os.path.join(self.kernels_dir, 'vmlinuz')): + raise QubesException ( + "VM kernel does not exists: {0}".\ + format(os.path.join(self.kernels_dir, 'vmlinuz'))) + + if not os.path.exists (os.path.join(self.kernels_dir, 'initramfs')): + raise QubesException ( + "VM initramfs does not exists: {0}".\ + format(os.path.join(self.kernels_dir, 'initramfs'))) + + if not os.path.exists (os.path.join(self.kernels_dir, 'modules.img')): + raise QubesException ( + "VM kernel modules image does not exists: {0}".\ + format(os.path.join(self.kernels_dir, 'modules.img'))) + return True + + def reset_volatile_storage(self, source_template = None, verbose = False): + assert not self.is_running(), "Attempt to clean volatile image of running VM!" + + if source_template is None: + source_template = self.template + + # Only makes sense on template based VM + if source_template is None: + # For StandaloneVM create it only if not already exists (eg after backup-restore) + if not os.path.exists(self.volatile_img): + if verbose: + print >> sys.stderr, "--> Creating volatile image: {0}...".format (self.volatile_img) + f_root = open (self.root_img, "r") + f_root.seek(0, os.SEEK_END) + root_size = f_root.tell() + f_root.close() + subprocess.check_call([system_path["prepare_volatile_img_cmd"], self.volatile_img, str(root_size / 1024 / 1024)]) + return + + if verbose: + print >> sys.stderr, "--> Cleaning volatile image: {0}...".format (self.volatile_img) + if dry_run: + return + if os.path.exists (self.volatile_img): + os.remove (self.volatile_img) + + retcode = subprocess.call (["tar", "xf", source_template.clean_volatile_img, "-C", self.dir_path]) + if retcode != 0: + raise IOError ("Error while unpacking {0} to {1}".\ + format(source_template.clean_volatile_img, self.volatile_img)) + + def remove_from_disk(self): + if dry_run: + return + + shutil.rmtree (self.dir_path) + + def write_firewall_conf(self, conf): + defaults = self.get_firewall_conf() + for item in defaults.keys(): + if item not in conf: + conf[item] = defaults[item] + + root = lxml.etree.Element( + "QubesFirwallRules", + policy = "allow" if conf["allow"] else "deny", + dns = "allow" if conf["allowDns"] else "deny", + icmp = "allow" if conf["allowIcmp"] else "deny", + yumProxy = "allow" if conf["allowYumProxy"] else "deny" + ) + + for rule in conf["rules"]: + # For backward compatibility + if "proto" not in rule: + if rule["portBegin"] is not None and rule["portBegin"] > 0: + rule["proto"] = "tcp" + else: + rule["proto"] = "any" + element = lxml.etree.Element( + "rule", + address=rule["address"], + proto=str(rule["proto"]), + ) + if rule["netmask"] is not None and rule["netmask"] != 32: + element.set("netmask", str(rule["netmask"])) + if rule["portBegin"] is not None and rule["portBegin"] > 0: + element.set("port", str(rule["portBegin"])) + if rule["portEnd"] is not None and rule["portEnd"] > 0: + element.set("toport", str(rule["portEnd"])) + + root.append(element) + + tree = lxml.etree.ElementTree(root) + + try: + f = open(self.firewall_conf, 'a') # create the file if not exist + f.close() + + with open(self.firewall_conf, 'w') as f: + fcntl.lockf(f, fcntl.LOCK_EX) + tree.write(f, encoding="UTF-8", pretty_print=True) + fcntl.lockf(f, fcntl.LOCK_UN) + f.close() + except EnvironmentError as err: + print >> sys.stderr, "{0}: save error: {1}".format( + os.path.basename(sys.argv[0]), err) + return False + + # Automatically enable/disable 'yum-proxy-setup' service based on allowYumProxy + if conf['allowYumProxy']: + self.services['yum-proxy-setup'] = True + else: + if self.services.has_key('yum-proxy-setup'): + self.services.pop('yum-proxy-setup') + + return True + + def has_firewall(self): + return os.path.exists (self.firewall_conf) + + def get_firewall_defaults(self): + return { "rules": list(), "allow": True, "allowDns": True, "allowIcmp": True, "allowYumProxy": False } + + def get_firewall_conf(self): + conf = self.get_firewall_defaults() + + try: + tree = lxml.etree.parse(self.firewall_conf) + root = tree.getroot() + + conf["allow"] = (root.get("policy") == "allow") + conf["allowDns"] = (root.get("dns") == "allow") + conf["allowIcmp"] = (root.get("icmp") == "allow") + conf["allowYumProxy"] = (root.get("yumProxy") == "allow") + + for element in root: + rule = {} + attr_list = ("address", "netmask", "proto", "port", "toport") + + for attribute in attr_list: + rule[attribute] = element.get(attribute) + + if rule["netmask"] is not None: + rule["netmask"] = int(rule["netmask"]) + else: + rule["netmask"] = 32 + + if rule["port"] is not None: + rule["portBegin"] = int(rule["port"]) + else: + # backward compatibility + rule["portBegin"] = 0 + + # For backward compatibility + if rule["proto"] is None: + if rule["portBegin"] > 0: + rule["proto"] = "tcp" + else: + rule["proto"] = "any" + + if rule["toport"] is not None: + rule["portEnd"] = int(rule["toport"]) + else: + rule["portEnd"] = None + + del(rule["port"]) + del(rule["toport"]) + + conf["rules"].append(rule) + + except EnvironmentError as err: + return conf + except (xml.parsers.expat.ExpatError, + ValueError, LookupError) as err: + print("{0}: load error: {1}".format( + os.path.basename(sys.argv[0]), err)) + return None + + return conf + + def run(self, command, user = None, verbose = True, autostart = False, notify_function = None, passio = False, passio_popen = False, passio_stderr=False, ignore_stderr=False, localcmd = None, wait = False, gui = True): + """command should be in form 'cmdline' + When passio_popen=True, popen object with stdout connected to pipe. + When additionally passio_stderr=True, stderr also is connected to pipe. + When ignore_stderr=True, stderr is connected to /dev/null. + """ + + if user is None: + user = self.default_user + null = None + if not self.is_running() and not self.is_paused(): + if not autostart: + raise QubesException("VM not running") + + try: + if notify_function is not None: + notify_function ("info", "Starting the '{0}' VM...".format(self.name)) + elif verbose: + print >> sys.stderr, "Starting the VM '{0}'...".format(self.name) + xid = self.start(verbose=verbose, start_guid = gui, notify_function=notify_function) + + except (IOError, OSError, QubesException) as err: + raise QubesException("Error while starting the '{0}' VM: {1}".format(self.name, err)) + except (MemoryError) as err: + raise QubesException("Not enough memory to start '{0}' VM! Close one or more running VMs and try again.".format(self.name)) + + xid = self.get_xid() + if gui and os.getenv("DISPLAY") is not None and not self.is_guid_running(): + self.start_guid(verbose = verbose, notify_function = notify_function) + + args = [system_path["qrexec_client_path"], "-d", str(xid), "%s:%s" % (user, command)] + if localcmd is not None: + args += [ "-l", localcmd] + if passio: + os.execv(system_path["qrexec_client_path"], args) + exit(1) + + call_kwargs = {} + if ignore_stderr: + null = open("/dev/null", "w") + call_kwargs['stderr'] = null + + if passio_popen: + popen_kwargs={'stdout': subprocess.PIPE} + popen_kwargs['stdin'] = subprocess.PIPE + if passio_stderr: + popen_kwargs['stderr'] = subprocess.PIPE + else: + popen_kwargs['stderr'] = call_kwargs.get('stderr', None) + p = subprocess.Popen (args, **popen_kwargs) + if null: + null.close() + return p + if not wait: + args += ["-e"] + retcode = subprocess.call(args, **call_kwargs) + if null: + null.close() + return retcode + + def attach_network(self, verbose = False, wait = True, netvm = None): + if dry_run: + return + + if not self.is_running(): + raise QubesException ("VM not running!") + + if netvm is None: + netvm = self.netvm + + if netvm is None: + raise QubesException ("NetVM not set!") + + if netvm.qid != 0: + if not netvm.is_running(): + if verbose: + print >> sys.stderr, "--> Starting NetVM {0}...".format(netvm.name) + netvm.start() + + xs_path = '/local/domain/%d/device/vif/0/state' % (self.xid) + if xs.read('', xs_path) is not None: + # TODO: check its state and backend state (this can be stale vif after NetVM restart) + if verbose: + print >> sys.stderr, "NOTICE: Network already attached" + return + + xm_cmdline = ["/usr/sbin/xl", "network-attach", str(self.xid), "script=/etc/xen/scripts/vif-route-qubes", "ip="+self.ip, "backend="+netvm.name ] + retcode = subprocess.call (xm_cmdline) + if retcode != 0: + print >> sys.stderr, ("WARNING: Cannot attach to network to '{0}'!".format(self.name)) + if wait: + tries = 0 + while xs.read('', xs_path) != '4': + tries += 1 + if tries > 50: + raise QubesException ("Network attach timed out!") + time.sleep(0.2) + + def wait_for_session(self, notify_function = None): + #self.run('echo $$ >> /tmp/qubes-session-waiter; [ ! -f /tmp/qubes-session-env ] && exec sleep 365d', ignore_stderr=True, gui=False, wait=True) + + # Note : User root is redefined to SYSTEM in the Windows agent code + p = self.run('QUBESRPC qubes.WaitForSession none', user="root", passio_popen=True, gui=False, wait=True) + p.communicate(input=self.default_user) + + def start_guid(self, verbose = True, notify_function = None): + if verbose: + print >> sys.stderr, "--> Starting Qubes GUId..." + xid = self.get_xid() + + guid_cmd = [system_path["qubes_guid_path"], "-d", str(xid), "-c", self.label.color, "-i", self.label.icon_path, "-l", str(self.label.index)] + if self.debug: + guid_cmd += ['-v', '-v'] + retcode = subprocess.call (guid_cmd) + if (retcode != 0) : + raise QubesException("Cannot start qubes-guid!") + + if verbose: + print >> sys.stderr, "--> Waiting for qubes-session..." + + self.wait_for_session(notify_function) + + def start_qrexec_daemon(self, verbose = False, notify_function = None): + if verbose: + print >> sys.stderr, "--> Starting the qrexec daemon..." + xid = self.get_xid() + qrexec_env = os.environ + qrexec_env['QREXEC_STARTUP_TIMEOUT'] = str(self.qrexec_timeout) + retcode = subprocess.call ([system_path["qrexec_daemon_path"], str(xid), self.default_user], env=qrexec_env) + if (retcode != 0) : + self.force_shutdown(xid=xid) + raise OSError ("ERROR: Cannot execute qrexec-daemon!") + + def start(self, debug_console = False, verbose = False, preparing_dvm = False, start_guid = True, notify_function = None): + if dry_run: + return + + # Intentionally not used is_running(): eliminate also "Paused", "Crashed", "Halting" + if self.get_power_state() != "Halted": + raise QubesException ("VM is already running!") + + self.verify_files() + + if self.netvm is not None: + if self.netvm.qid != 0: + if not self.netvm.is_running(): + if verbose: + print >> sys.stderr, "--> Starting NetVM {0}...".format(self.netvm.name) + self.netvm.start(verbose = verbose, start_guid = start_guid, notify_function = notify_function) + + self.reset_volatile_storage(verbose=verbose) + if verbose: + print >> sys.stderr, "--> Loading the VM (type = {0})...".format(self.type) + + # refresh config file + self.create_config_file() + + mem_required = int(self.memory) * 1024 * 1024 + qmemman_client = QMemmanClient() + try: + got_memory = qmemman_client.request_memory(mem_required) + except IOError as e: + raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e)) + if not got_memory: + qmemman_client.close() + raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name) + + # Bind pci devices to pciback driver + for pci in self.pcidevs: + try: + subprocess.check_call(['sudo', system_path["qubes_pciback_cmd"], pci]) + except subprocess.CalledProcessError: + raise QubesException("Failed to prepare PCI device %s" % pci) + + xl_cmdline = ['sudo', '/usr/sbin/xl', 'create', self.conf_file, '-q', '-p'] + + try: + subprocess.check_call(xl_cmdline) + except: + raise QubesException("Failed to load VM config") + + xid = self.get_xid() + self.xid = xid + + if preparing_dvm: + self.services['qubes-dvm'] = True + if verbose: + print >> sys.stderr, "--> Setting Xen Store info for the VM..." + self.create_xenstore_entries(xid) + + qvm_collection = QubesVmCollection() + qvm_collection.lock_db_for_reading() + qvm_collection.load() + qvm_collection.unlock_db() + + if verbose: + print >> sys.stderr, "--> Updating firewall rules..." + for vm in qvm_collection.values(): + if vm.is_proxyvm() and vm.is_running(): + vm.write_iptables_xenstore_entry() + + if verbose: + print >> sys.stderr, "--> Starting the VM..." + xc.domain_unpause(xid) + +# close() is not really needed, because the descriptor is close-on-exec +# anyway, the reason to postpone close() is that possibly xl is not done +# constructing the domain after its main process exits +# so we close() when we know the domain is up +# the successful unpause is some indicator of it + qmemman_client.close() + + if self._start_guid_first and start_guid and not preparing_dvm and os.path.exists('/var/run/shm.id'): + self.start_guid(verbose=verbose,notify_function=notify_function) + + if not preparing_dvm: + self.start_qrexec_daemon(verbose=verbose,notify_function=notify_function) + + if not self._start_guid_first and start_guid and not preparing_dvm and os.path.exists('/var/run/shm.id'): + self.start_guid(verbose=verbose,notify_function=notify_function) + + if preparing_dvm: + if verbose: + print >> sys.stderr, "--> Preparing config template for DispVM" + self.create_config_file(file_path = self.dir_path + '/dvm.conf', prepare_dvm = True) + + # perhaps we should move it before unpause and fork? + # FIXME: this uses obsolete xm api + if debug_console: + from xen.xm import console + if verbose: + print >> sys.stderr, "--> Starting debug console..." + console.execConsole (xid) + + return xid + + def shutdown(self, force=False, xid = None): + if dry_run: + return + + if not self.is_running(): + raise QubesException ("VM already stopped!") + + subprocess.call (['/usr/sbin/xl', 'shutdown', str(xid) if xid is not None else self.name]) + #xc.domain_destroy(self.get_xid()) + + def force_shutdown(self, xid = None): + if dry_run: + return + + if not self.is_running() and not self.is_paused(): + raise QubesException ("VM already stopped!") + + subprocess.call (['/usr/sbin/xl', 'destroy', str(xid) if xid is not None else self.name]) + + def pause(self): + if dry_run: + return + + xc.domain_pause(self.get_xid()) + + def unpause(self): + if dry_run: + return + + xc.domain_unpause(self.get_xid()) + + def get_xml_attrs(self): + attrs = {} + attrs_config = self._get_attrs_config() + for attr in attrs_config: + attr_config = attrs_config[attr] + if 'save' in attr_config: + if 'save_skip' in attr_config and eval(attr_config['save_skip']): + continue + if 'save_attr' in attr_config: + attrs[attr_config['save_attr']] = eval(attr_config['save']) + else: + attrs[attr] = eval(attr_config['save']) + return attrs + + def create_xml_element(self): + # Compatibility hack (Qubes*VM in type vs Qubes*Vm in XML)... + rx_type = re.compile (r"VM") + + attrs = self.get_xml_attrs() + element = lxml.etree.Element( + "Qubes" + rx_type.sub("Vm", self.type), + **attrs) + return element + +register_qubes_vm_class(QubesVm) diff --git a/dom0/core-modules/003QubesTemplateVm.py b/dom0/core-modules/003QubesTemplateVm.py new file mode 100644 index 00000000..5d283ef4 --- /dev/null +++ b/dom0/core-modules/003QubesTemplateVm.py @@ -0,0 +1,212 @@ +#!/usr/bin/python2 +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2010 Joanna Rutkowska +# Copyright (C) 2013 Marek Marczykowski +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + +import os +import subprocess +import sys + +from qubes.qubes import QubesVm,register_qubes_vm_class,dry_run +from qubes.qubes import QubesVmCollection,QubesException,QubesVmLabels +from qubes.qubes import defaults,system_path,vm_files + +class QubesTemplateVm(QubesVm): + """ + A class that represents an TemplateVM. A child of QubesVm. + """ + + # In which order load this VM type from qubes.xml + load_order = 50 + + def _get_attrs_config(self): + attrs_config = super(QubesTemplateVm, self)._get_attrs_config() + attrs_config['dir_path']['eval'] = 'value if value is not None else os.path.join(system_path["qubes_templates_dir"], self.name)' + attrs_config['label']['default'] = defaults["template_label"] + + # New attributes + + # Image for template changes + attrs_config['rootcow_img'] = { 'eval': 'os.path.join(self.dir_path, vm_files["rootcow_img"])' } + # Clean image for root-cow and swap (AppVM side) + attrs_config['clean_volatile_img'] = { 'eval': 'os.path.join(self.dir_path, vm_files["clean_volatile_img"])' } + + attrs_config['appmenus_templates_dir'] = { 'eval': 'os.path.join(self.dir_path, vm_files["appmenus_templates_subdir"])' } + return attrs_config + + def __init__(self, **kwargs): + + super(QubesTemplateVm, self).__init__(**kwargs) + + self.appvms = QubesVmCollection() + + @property + def type(self): + return "TemplateVM" + + @property + def updateable(self): + return True + + def is_template(self): + return True + + def get_firewall_defaults(self): + return { "rules": list(), "allow": False, "allowDns": False, "allowIcmp": False, "allowYumProxy": True } + + def get_rootdev(self, source_template=None): + return "'script:origin:{dir}/root.img:{dir}/root-cow.img,xvda,w',".format(dir=self.dir_path) + + def clone_disk_files(self, src_vm, verbose): + if dry_run: + return + + super(QubesTemplateVm, self).clone_disk_files(src_vm=src_vm, verbose=verbose) + + for whitelist in ['vm-' + vm_files["whitelisted_appmenus"], 'netvm-' + vm_files["whitelisted_appmenus"]]: + if os.path.exists(os.path.join(src_vm.dir_path, whitelist)): + if verbose: + print >> sys.stderr, "--> Copying default whitelisted apps list: {0}".\ + format(os.path.join(self.dir_path, whitelist)) + shutil.copy(os.path.join(src_vm.dir_path, whitelist), + os.path.join(self.dir_path, whitelist)) + + if verbose: + print >> sys.stderr, "--> Copying the template's clean volatile image:\n{0} ==>\n{1}".\ + format(src_vm.clean_volatile_img, self.clean_volatile_img) + # We prefer to use Linux's cp, because it nicely handles sparse files + retcode = subprocess.call (["cp", src_vm.clean_volatile_img, self.clean_volatile_img]) + if retcode != 0: + raise IOError ("Error while copying {0} to {1}".\ + format(src_vm.clean_volatile_img, self.clean_volatile_img)) + if verbose: + print >> sys.stderr, "--> Copying the template's volatile image:\n{0} ==>\n{1}".\ + format(self.clean_volatile_img, self.volatile_img) + # We prefer to use Linux's cp, because it nicely handles sparse files + retcode = subprocess.call (["cp", self.clean_volatile_img, self.volatile_img]) + if retcode != 0: + raise IOError ("Error while copying {0} to {1}".\ + format(self.clean_img, self.volatile_img)) + + # Create root-cow.img + self.commit_changes(verbose=verbose) + + def create_appmenus(self, verbose=False, source_template = None): + if source_template is None: + source_template = self.template + + try: + subprocess.check_call ([system_path["qubes_appmenu_create_cmd"], self.appmenus_templates_dir, self.name, "vm-templates"]) + except subprocess.CalledProcessError: + print >> sys.stderr, "Ooops, there was a problem creating appmenus for {0} VM!".format (self.name) + + def remove_appmenus(self): + subprocess.check_call ([system_path["qubes_appmenu_remove_cmd"], self.name, "vm-templates"]) + + def pre_rename(self, new_name): + self.remove_appmenus() + + def post_rename(self, old_name): + self.create_appmenus(verbose=False) + + old_dirpath = os.path.join(os.path.dirname(self.dir_path), old_name) + self.clean_volatile_img = self.clean_volatile_img.replace(old_dirpath, self.dir_path) + self.rootcow_img = self.rootcow_img.replace(old_dirpath, self.dir_path) + + def remove_from_disk(self): + if dry_run: + return + + self.remove_appmenus() + super(QubesTemplateVm, self).remove_from_disk() + + def verify_files(self): + if dry_run: + return + + + if not os.path.exists (self.dir_path): + raise QubesException ( + "VM directory doesn't exist: {0}".\ + format(self.dir_path)) + + if not os.path.exists (self.root_img): + raise QubesException ( + "VM root image file doesn't exist: {0}".\ + format(self.root_img)) + + if not os.path.exists (self.private_img): + raise QubesException ( + "VM private image file doesn't exist: {0}".\ + format(self.private_img)) + + if not os.path.exists (self.volatile_img): + raise QubesException ( + "VM volatile image file doesn't exist: {0}".\ + format(self.volatile_img)) + + if not os.path.exists (self.clean_volatile_img): + raise QubesException ( + "Clean VM volatile image file doesn't exist: {0}".\ + format(self.clean_volatile_img)) + + if not os.path.exists (self.kernels_dir): + raise QubesException ( + "VM's kernels directory does not exist: {0}".\ + format(self.kernels_dir)) + + return True + + def reset_volatile_storage(self, verbose = False): + assert not self.is_running(), "Attempt to clean volatile image of running Template VM!" + + if verbose: + print >> sys.stderr, "--> Cleaning volatile image: {0}...".format (self.volatile_img) + if dry_run: + return + if os.path.exists (self.volatile_img): + os.remove (self.volatile_img) + + retcode = subprocess.call (["tar", "xf", self.clean_volatile_img, "-C", self.dir_path]) + if retcode != 0: + raise IOError ("Error while unpacking {0} to {1}".\ + format(self.template.clean_volatile_img, self.volatile_img)) + + def commit_changes (self, verbose = False): + + assert not self.is_running(), "Attempt to commit changes on running Template VM!" + + if verbose: + print >> sys.stderr, "--> Commiting template updates... COW: {0}...".format (self.rootcow_img) + + if dry_run: + return + if os.path.exists (self.rootcow_img): + os.rename (self.rootcow_img, self.rootcow_img + '.old') + + f_cow = open (self.rootcow_img, "w") + f_root = open (self.root_img, "r") + f_root.seek(0, os.SEEK_END) + f_cow.truncate (f_root.tell()) # make empty sparse file of the same size as root.img + f_cow.close () + f_root.close() + +register_qubes_vm_class(QubesTemplateVm) diff --git a/dom0/core-modules/005QubesNetVm.py b/dom0/core-modules/005QubesNetVm.py new file mode 100644 index 00000000..246228bf --- /dev/null +++ b/dom0/core-modules/005QubesNetVm.py @@ -0,0 +1,201 @@ +#!/usr/bin/python2 +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2010 Joanna Rutkowska +# Copyright (C) 2013 Marek Marczykowski +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + +from qubes.qubes import QubesVm,register_qubes_vm_class,xs,dry_run +from qubes.qubes import defaults,system_path,vm_files +from qubes.qubes import QubesVmCollection,QubesException + +class QubesNetVm(QubesVm): + """ + A class that represents a NetVM. A child of QubesCowVM. + """ + + # In which order load this VM type from qubes.xml + load_order = 70 + + def _get_attrs_config(self): + attrs_config = super(QubesNetVm, self)._get_attrs_config() + attrs_config['dir_path']['eval'] = 'value if value is not None else os.path.join(system_path["qubes_servicevms_dir"], self.name)' + attrs_config['label']['default'] = defaults["servicevm_label"] + attrs_config['memory']['default'] = 200 + + # New attributes + attrs_config['netid'] = { 'save': 'str(self.netid)', 'order': 30, + 'eval': 'value if value is not None else collection.get_new_unused_netid()' } + attrs_config['netprefix'] = { 'eval': '"10.137.{0}.".format(self.netid)' } + attrs_config['dispnetprefix'] = { 'eval': '"10.138.{0}.".format(self.netid)' } + + # Dont save netvm prop + attrs_config['netvm'].pop('save') + attrs_config['uses_default_netvm'].pop('save') + + return attrs_config + + def __init__(self, **kwargs): + super(QubesNetVm, self).__init__(**kwargs) + self.connected_vms = QubesVmCollection() + + self.__network = "10.137.{0}.0".format(self.netid) + self.__netmask = defaults["vm_default_netmask"] + self.__gateway = self.netprefix + "1" + self.__secondary_dns = self.netprefix + "254" + + self.__external_ip_allowed_xids = set() + + @property + def type(self): + return "NetVM" + + def is_netvm(self): + return True + + @property + def gateway(self): + return self.__gateway + + @property + def secondary_dns(self): + return self.__secondary_dns + + @property + def netmask(self): + return self.__netmask + + @property + def network(self): + return self.__network + + def get_ip_for_vm(self, qid): + lo = qid % 253 + 2 + assert lo >= 2 and lo <= 254, "Wrong IP address for VM" + return self.netprefix + "{0}".format(lo) + + def get_ip_for_dispvm(self, dispid): + lo = dispid % 254 + 1 + assert lo >= 1 and lo <= 254, "Wrong IP address for VM" + return self.dispnetprefix + "{0}".format(lo) + + def create_xenstore_entries(self, xid = None): + if dry_run: + return + + if xid is None: + xid = self.xid + + + super(QubesNetVm, self).create_xenstore_entries(xid) + xs.write('', "/local/domain/{0}/qubes-netvm-external-ip".format(xid), '') + self.update_external_ip_permissions(xid) + + def update_external_ip_permissions(self, xid = -1): + if xid < 0: + xid = self.get_xid() + if xid < 0: + return + + command = [ + "/usr/bin/xenstore-chmod", + "/local/domain/{0}/qubes-netvm-external-ip".format(xid) + ] + + command.append("n{0}".format(xid)) + + for id in self.__external_ip_allowed_xids: + command.append("r{0}".format(id)) + + return subprocess.check_call(command) + + def start(self, **kwargs): + if dry_run: + return + + xid=super(QubesNetVm, self).start(**kwargs) + + # Connect vif's of already running VMs + for vm in self.connected_vms.values(): + if not vm.is_running(): + continue + + if 'verbose' in kwargs and kwargs['verbose']: + print >> sys.stderr, "--> Attaching network to '{0}'...".format(vm.name) + + # Cleanup stale VIFs + vm.cleanup_vifs() + + # force frontend to forget about this device + # module actually will be loaded back by udev, as soon as network is attached + vm.run("modprobe -r xen-netfront xennet", user="root") + + try: + vm.attach_network(wait=False) + except QubesException as ex: + print >> sys.stderr, ("WARNING: Cannot attach to network to '{0}': {1}".format(vm.name, ex)) + + return xid + + def shutdown(self, force=False): + if dry_run: + return + + connected_vms = [vm for vm in self.connected_vms.values() if vm.is_running()] + if connected_vms and not force: + raise QubesException("There are other VMs connected to this VM: " + str([vm.name for vm in connected_vms])) + + super(QubesNetVm, self).shutdown(force=force) + + def add_external_ip_permission(self, xid): + if int(xid) < 0: + return + self.__external_ip_allowed_xids.add(int(xid)) + self.update_external_ip_permissions() + + def remove_external_ip_permission(self, xid): + self.__external_ip_allowed_xids.discard(int(xid)) + self.update_external_ip_permissions() + + def create_on_disk(self, verbose, source_template = None): + if dry_run: + return + + super(QubesNetVm, self).create_on_disk(verbose, source_template=source_template) + + if os.path.exists(os.path.join(source_template.dir_path, 'netvm-' + vm_files["whitelisted_appmenus"])): + if verbose: + print >> sys.stderr, "--> Creating default whitelisted apps list: {0}".\ + format(self.dir_path + '/' + vm_files["whitelisted_appmenus"]) + shutil.copy(os.path.join(source_template.dir_path, 'netvm-' + vm_files["whitelisted_appmenus"]), + os.path.join(self.dir_path, vm_files["whitelisted_appmenus"])) + + if not self.internal: + self.create_appmenus (verbose=verbose, source_template=source_template) + + def remove_from_disk(self): + if dry_run: + return + + if not self.internal: + self.remove_appmenus() + super(QubesNetVm, self).remove_from_disk() + + +register_qubes_vm_class(QubesNetVm) diff --git a/dom0/core-modules/006QubesDom0NetVm.py b/dom0/core-modules/006QubesDom0NetVm.py new file mode 100644 index 00000000..6cd1a9bb --- /dev/null +++ b/dom0/core-modules/006QubesDom0NetVm.py @@ -0,0 +1,89 @@ +#!/usr/bin/python2 +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2010 Joanna Rutkowska +# Copyright (C) 2013 Marek Marczykowski +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + +from qubes.qubes import QubesNetVm,register_qubes_vm_class,xl_ctx,xc +from qubes.qubes import defaults +from qubes.qubes import QubesException,dry_run + +class QubesDom0NetVm(QubesNetVm): + def __init__(self, **kwargs): + super(QubesDom0NetVm, self).__init__(qid=0, name="dom0", netid=0, + dir_path=None, + private_img = None, + template = None, + label = defaults["template_label"], + **kwargs) + self.xid = 0 + + def is_running(self): + return True + + def get_xid(self): + return 0 + + def get_power_state(self): + return "Running" + + def get_disk_usage(self, file_or_dir): + return 0 + + def get_disk_utilization(self): + return 0 + + def get_disk_utilization_private_img(self): + return 0 + + def get_private_img_sz(self): + return 0 + + @property + def ip(self): + return "10.137.0.2" + + def start(self, **kwargs): + raise QubesException ("Cannot start Dom0 fake domain!") + + def get_xl_dominfo(self): + if dry_run: + return + + domains = xl_ctx.list_domains() + for dominfo in domains: + if dominfo.domid == 0: + return dominfo + return None + + def get_xc_dominfo(self): + if dry_run: + return + + domains = xc.domain_getinfo(0, 1) + return domains[0] + + def create_xml_element(self): + return None + + def verify_files(self): + return True + +register_qubes_vm_class(QubesDom0NetVm) diff --git a/dom0/core-modules/006QubesProxyVm.py b/dom0/core-modules/006QubesProxyVm.py new file mode 100644 index 00000000..e3d3b97e --- /dev/null +++ b/dom0/core-modules/006QubesProxyVm.py @@ -0,0 +1,201 @@ +#!/usr/bin/python2 +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2010 Joanna Rutkowska +# Copyright (C) 2013 Marek Marczykowski +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + +from datetime import datetime + +from qubes.qubes import QubesNetVm,register_qubes_vm_class,xs +from qubes.qubes import QubesVmCollection,QubesException + +yum_proxy_ip = '10.137.255.254' +yum_proxy_port = '8082' + +class QubesProxyVm(QubesNetVm): + """ + A class that represents a ProxyVM, ex FirewallVM. A child of QubesNetVM. + """ + + def _get_attrs_config(self): + attrs_config = super(QubesProxyVm, self)._get_attrs_config() + attrs_config['uses_default_netvm']['eval'] = 'False' + # Save netvm prop again + attrs_config['netvm']['save'] = 'str(self.netvm.qid) if self.netvm is not None else "none"' + + return attrs_config + + def __init__(self, **kwargs): + super(QubesProxyVm, self).__init__(**kwargs) + self.rules_applied = None + + @property + def type(self): + return "ProxyVM" + + def is_proxyvm(self): + return True + + def _set_netvm(self, new_netvm): + old_netvm = self.netvm + super(QubesProxyVm, self)._set_netvm(new_netvm) + if self.netvm is not None: + self.netvm.add_external_ip_permission(self.get_xid()) + self.write_netvm_domid_entry() + if old_netvm is not None: + old_netvm.remove_external_ip_permission(self.get_xid()) + + def post_vm_net_attach(self, vm): + """ Called after some VM net-attached to this ProxyVm """ + + self.write_iptables_xenstore_entry() + + def post_vm_net_detach(self, vm): + """ Called after some VM net-detached from this ProxyVm """ + + self.write_iptables_xenstore_entry() + + def start(self, **kwargs): + if dry_run: + return + retcode = super(QubesProxyVm, self).start(**kwargs) + if self.netvm is not None: + self.netvm.add_external_ip_permission(self.get_xid()) + self.write_netvm_domid_entry() + return retcode + + def force_shutdown(self, **kwargs): + if dry_run: + return + if self.netvm is not None: + self.netvm.remove_external_ip_permission(kwargs['xid'] if 'xid' in kwargs else self.get_xid()) + super(QubesProxyVm, self).force_shutdown(**kwargs) + + def create_xenstore_entries(self, xid = None): + if dry_run: + return + + if xid is None: + xid = self.xid + + + super(QubesProxyVm, self).create_xenstore_entries(xid) + xs.write('', "/local/domain/{0}/qubes-iptables-error".format(xid), '') + xs.set_permissions('', "/local/domain/{0}/qubes-iptables-error".format(xid), + [{ 'dom': xid, 'write': True }]) + self.write_iptables_xenstore_entry() + + def write_netvm_domid_entry(self, xid = -1): + if not self.is_running(): + return + + if xid < 0: + xid = self.get_xid() + + if self.netvm is None: + xs.write('', "/local/domain/{0}/qubes-netvm-domid".format(xid), '') + else: + xs.write('', "/local/domain/{0}/qubes-netvm-domid".format(xid), + "{0}".format(self.netvm.get_xid())) + + def write_iptables_xenstore_entry(self): + xs.rm('', "/local/domain/{0}/qubes-iptables-domainrules".format(self.get_xid())) + iptables = "# Generated by Qubes Core on {0}\n".format(datetime.now().ctime()) + iptables += "*filter\n" + iptables += ":INPUT DROP [0:0]\n" + iptables += ":FORWARD DROP [0:0]\n" + iptables += ":OUTPUT ACCEPT [0:0]\n" + + # Strict INPUT rules + iptables += "-A INPUT -i vif+ -p udp -m udp --dport 68 -j DROP\n" + iptables += "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT\n" + iptables += "-A INPUT -p icmp -j ACCEPT\n" + iptables += "-A INPUT -i lo -j ACCEPT\n" + iptables += "-A INPUT -j REJECT --reject-with icmp-host-prohibited\n" + + iptables += "-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT\n" + # Allow dom0 networking + iptables += "-A FORWARD -i vif0.0 -j ACCEPT\n" + # Deny inter-VMs networking + iptables += "-A FORWARD -i vif+ -o vif+ -j DROP\n" + iptables += "COMMIT\n" + xs.write('', "/local/domain/{0}/qubes-iptables-header".format(self.get_xid()), iptables) + + vms = [vm for vm in self.connected_vms.values()] + for vm in vms: + iptables="*filter\n" + conf = vm.get_firewall_conf() + + xid = vm.get_xid() + if xid < 0: # VM not active ATM + continue + + ip = vm.ip + if ip is None: + continue + + # Anti-spoof rules are added by vif-script (vif-route-qubes), here we trust IP address + + accept_action = "ACCEPT" + reject_action = "REJECT --reject-with icmp-host-prohibited" + + if conf["allow"]: + default_action = accept_action + rules_action = reject_action + else: + default_action = reject_action + rules_action = accept_action + + for rule in conf["rules"]: + iptables += "-A FORWARD -s {0} -d {1}".format(ip, rule["address"]) + if rule["netmask"] != 32: + iptables += "/{0}".format(rule["netmask"]) + + if rule["proto"] is not None and rule["proto"] != "any": + iptables += " -p {0}".format(rule["proto"]) + if rule["portBegin"] is not None and rule["portBegin"] > 0: + iptables += " --dport {0}".format(rule["portBegin"]) + if rule["portEnd"] is not None and rule["portEnd"] > rule["portBegin"]: + iptables += ":{0}".format(rule["portEnd"]) + + iptables += " -j {0}\n".format(rules_action) + + if conf["allowDns"] and self.netvm is not None: + # PREROUTING does DNAT to NetVM DNSes, so we need self.netvm. properties + iptables += "-A FORWARD -s {0} -p udp -d {1} --dport 53 -j ACCEPT\n".format(ip,self.netvm.gateway) + iptables += "-A FORWARD -s {0} -p udp -d {1} --dport 53 -j ACCEPT\n".format(ip,self.netvm.secondary_dns) + if conf["allowIcmp"]: + iptables += "-A FORWARD -s {0} -p icmp -j ACCEPT\n".format(ip) + if conf["allowYumProxy"]: + iptables += "-A FORWARD -s {0} -p tcp -d {1} --dport {2} -j ACCEPT\n".format(ip, yum_proxy_ip, yum_proxy_port) + else: + iptables += "-A FORWARD -s {0} -p tcp -d {1} --dport {2} -j DROP\n".format(ip, yum_proxy_ip, yum_proxy_port) + + iptables += "-A FORWARD -s {0} -j {1}\n".format(ip, default_action) + iptables += "COMMIT\n" + xs.write('', "/local/domain/"+str(self.get_xid())+"/qubes-iptables-domainrules/"+str(xid), iptables) + # no need for ending -A FORWARD -j DROP, cause default action is DROP + + self.write_netvm_domid_entry() + + self.rules_applied = None + xs.write('', "/local/domain/{0}/qubes-iptables".format(self.get_xid()), 'reload') + +register_qubes_vm_class(QubesProxyVm) diff --git a/dom0/core-modules/01QubesAppVm.py b/dom0/core-modules/01QubesAppVm.py new file mode 100644 index 00000000..0050f0b4 --- /dev/null +++ b/dom0/core-modules/01QubesAppVm.py @@ -0,0 +1,59 @@ +#!/usr/bin/python2 +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2010 Joanna Rutkowska +# Copyright (C) 2013 Marek Marczykowski +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + +from qubes.qubes import QubesVm,QubesVmLabel,register_qubes_vm_class + +class QubesAppVm(QubesVm): + """ + A class that represents an AppVM. A child of QubesVm. + """ + def _get_attrs_config(self): + attrs_config = super(QubesAppVm, self)._get_attrs_config() + attrs_config['dir_path']['eval'] = 'value if value is not None else os.path.join(system_path["qubes_appvms_dir"], self.name)' + + return attrs_config + + @property + def type(self): + return "AppVM" + + def is_appvm(self): + return True + + def create_on_disk(self, verbose, source_template = None): + if dry_run: + return + + super(QubesAppVm, self).create_on_disk(verbose, source_template=source_template) + + if not self.internal: + self.create_appmenus (verbose=verbose, source_template=source_template) + + def remove_from_disk(self): + if dry_run: + return + + self.remove_appmenus() + super(QubesAppVm, self).remove_from_disk() + +register_qubes_vm_class(QubesAppVm) diff --git a/dom0/core-modules/01QubesDisposableVm.py b/dom0/core-modules/01QubesDisposableVm.py new file mode 100644 index 00000000..20b29495 --- /dev/null +++ b/dom0/core-modules/01QubesDisposableVm.py @@ -0,0 +1,97 @@ +#!/usr/bin/python2 +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2010 Joanna Rutkowska +# Copyright (C) 2013 Marek Marczykowski +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + +import sys +from qubes.qubes import QubesVm,QubesVmLabel,register_qubes_vm_class + +QubesDispVmLabels = { + "red" : QubesVmLabel ("red", 1, icon="dispvm-red"), + "orange" : QubesVmLabel ("orange", 2, icon="dispvm-orange"), + "yellow" : QubesVmLabel ("yellow", 3, icon="dispvm-yellow"), + "green" : QubesVmLabel ("green", 4, color="0x5fa05e", icon="dispvm-green"), + "gray" : QubesVmLabel ("gray", 5, icon="dispvm-gray"), + "blue" : QubesVmLabel ("blue", 6, icon="dispvm-blue"), + "purple" : QubesVmLabel ("purple", 7, color="0xb83374", icon="dispvm-purple"), + "black" : QubesVmLabel ("black", 8, icon="dispvm-black"), +} + +class QubesDisposableVm(QubesVm): + """ + A class that represents an DisposableVM. A child of QubesVm. + """ + + # In which order load this VM type from qubes.xml + load_order = 120 + + def _get_attrs_config(self): + attrs_config = super(QubesDisposableVm, self)._get_attrs_config() + + # New attributes + attrs_config['dispid'] = { 'save': 'str(self.dispid)' } + + return attrs_config + + def __init__(self, **kwargs): + + super(QubesDisposableVm, self).__init__(dir_path="/nonexistent", **kwargs) + + assert self.template is not None, "Missing template for DisposableVM!" + + # Use DispVM icon with the same color + if self._label: + self._label = QubesDispVmLabels[self._label.name] + self.icon_path = self._label.icon_path + + @property + def type(self): + return "DisposableVM" + + def is_disposablevm(self): + return True + + @property + def ip(self): + if self.netvm is not None: + return self.netvm.get_ip_for_dispvm(self.dispid) + else: + return None + + + def get_xml_attrs(self): + # Minimal set - do not inherit rest of attributes + attrs = {} + attrs["qid"] = str(self.qid) + attrs["name"] = self.name + attrs["dispid"] = str(self.dispid) + attrs["template_qid"] = str(self.template.qid) + attrs["label"] = self.label.name + attrs["firewall_conf"] = self.relative_path(self.firewall_conf) + attrs["netvm_qid"] = str(self.netvm.qid) if self.netvm is not None else "none" + return attrs + + def verify_files(self): + return True + +# register classes +sys.modules['qubes.qubes'].QubesDispVmLabels = QubesDispVmLabels +register_qubes_vm_class(QubesDisposableVm) diff --git a/dom0/core-modules/01QubesHVm.py b/dom0/core-modules/01QubesHVm.py new file mode 100644 index 00000000..d94ce2a0 --- /dev/null +++ b/dom0/core-modules/01QubesHVm.py @@ -0,0 +1,302 @@ +#!/usr/bin/python2 +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2010 Joanna Rutkowska +# Copyright (C) 2013 Marek Marczykowski +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + +import os + +from qubes.qubes import QubesVm,register_qubes_vm_class,xs,dry_run +from qubes.qubes import system_path,defaults + +system_path["config_template_hvm"] = '/usr/share/qubes/vm-template-hvm.conf' + +defaults["hvm_disk_size"] = 20*1024*1024*1024 +defaults["hvm_private_img_size"] = 2*1024*1024*1024 +defaults["hvm_memory"] = 512 + + +class QubesHVm(QubesVm): + """ + A class that represents an HVM. A child of QubesVm. + """ + + # FIXME: logically should inherit after QubesAppVm, but none of its methods + # are useful for HVM + + def _get_attrs_config(self): + attrs = super(QubesHVm, self)._get_attrs_config() + attrs.pop('kernel') + attrs.pop('kernels_dir') + attrs.pop('kernelopts') + attrs.pop('uses_default_kernel') + attrs.pop('uses_default_kernelopts') + attrs['dir_path']['eval'] = 'value if value is not None else os.path.join(system_path["qubes_appvms_dir"], self.name)' + attrs['volatile_img']['eval'] = 'None' + attrs['config_file_template']['eval'] = 'system_path["config_template_hvm"]' + attrs['drive'] = { 'save': 'str(self.drive)' } + attrs['maxmem'].pop('save') + attrs['timezone'] = { 'default': 'localtime', 'save': 'str(self.timezone)' } + attrs['qrexec_installed'] = { 'default': False, 'save': 'str(self.qrexec_installed)' } + attrs['guiagent_installed'] = { 'default' : False, 'save': 'str(self.guiagent_installed)' } + attrs['_start_guid_first']['eval'] = 'True' + attrs['services']['default'] = "{'meminfo-writer': False}" + + # only standalone HVM supported for now + attrs['template']['eval'] = 'None' + attrs['memory']['default'] = defaults["hvm_memory"] + + return attrs + + def __init__(self, **kwargs): + + super(QubesHVm, self).__init__(**kwargs) + + # Default for meminfo-writer have changed to (correct) False in the + # same version as introduction of guiagent_installed, so for older VMs + # with wrong setting, change is based on 'guiagent_installed' presence + if "guiagent_installed" not in kwargs and \ + (not 'xml_element' in kwargs or kwargs['xml_element'].get('guiagent_installed') is None): + self.services['meminfo-writer'] = False + + # HVM normally doesn't support dynamic memory management + if not ('meminfo-writer' in self.services and self.services['meminfo-writer']): + self.maxmem = self.memory + + # Disable qemu GUID if the user installed qubes gui agent + if self.guiagent_installed: + self._start_guid_first = False + + @property + def type(self): + return "HVM" + + def is_appvm(self): + return True + + def get_clone_attrs(self): + attrs = super(QubesHVm, self).get_clone_attrs() + attrs.remove('kernel') + attrs.remove('uses_default_kernel') + attrs.remove('kernelopts') + attrs.remove('uses_default_kernelopts') + attrs += [ 'timezone' ] + attrs += [ 'qrexec_installed' ] + attrs += [ 'guiagent_installed' ] + return attrs + + def create_on_disk(self, verbose, source_template = None): + if dry_run: + return + + if verbose: + print >> sys.stderr, "--> Creating directory: {0}".format(self.dir_path) + os.mkdir (self.dir_path) + + if verbose: + print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, self.label.icon_path) + os.symlink (self.label.icon_path, self.icon_path) + + if verbose: + print >> sys.stderr, "--> Creating appmenus directory: {0}".format(self.appmenus_templates_dir) + os.mkdir (self.appmenus_templates_dir) + shutil.copy (system_path["start_appmenu_template"], self.appmenus_templates_dir) + + if not self.internal: + self.create_appmenus (verbose, source_template=source_template) + + self.create_config_file() + + # create empty disk + f_root = open(self.root_img, "w") + f_root.truncate(defaults["hvm_disk_size"]) + f_root.close() + + # create empty private.img + f_private = open(self.private_img, "w") + f_private.truncate(defaults["hvm_private_img_size"]) + f_root.close() + + def remove_from_disk(self): + if dry_run: + return + + self.remove_appmenus() + super(QubesHVm, self).remove_from_disk() + + def get_disk_utilization_private_img(self): + return 0 + + def get_private_img_sz(self): + return 0 + + def resize_private_img(self, size): + raise NotImplementedError("HVM has no private.img") + + def get_config_params(self, source_template=None): + + params = super(QubesHVm, self).get_config_params(source_template=source_template) + + params['volatiledev'] = '' + if self.drive: + type_mode = ":cdrom,r" + drive_path = self.drive + # leave empty to use standard syntax in case of dom0 + backend_domain = "" + if drive_path.startswith("hd:"): + type_mode = ",w" + drive_path = drive_path[3:] + elif drive_path.startswith("cdrom:"): + type_mode = ":cdrom,r" + drive_path = drive_path[6:] + backend_split = re.match(r"^([a-zA-Z0-9-]*):(.*)", drive_path) + if backend_split: + backend_domain = "," + backend_split.group(1) + drive_path = backend_split.group(2) + + # FIXME: os.stat will work only when backend in dom0... + stat_res = None + if backend_domain == "": + stat_res = os.stat(drive_path) + if stat_res and stat.S_ISBLK(stat_res.st_mode): + params['otherdevs'] = "'phy:%s,xvdc%s%s'," % (drive_path, type_mode, backend_domain) + else: + params['otherdevs'] = "'script:file:%s,xvdc%s%s'," % (drive_path, type_mode, backend_domain) + else: + params['otherdevs'] = '' + + # Disable currently unused private.img - to be enabled when TemplateHVm done + params['privatedev'] = '' + + if self.timezone.lower() == 'localtime': + params['localtime'] = '1' + params['timeoffset'] = '0' + elif self.timezone.isdigit(): + params['localtime'] = '0' + params['timeoffset'] = self.timezone + else: + print >>sys.stderr, "WARNING: invalid 'timezone' value: %s" % self.timezone + params['localtime'] = '0' + params['timeoffset'] = '0' + return params + + def verify_files(self): + if dry_run: + return + + if not os.path.exists (self.dir_path): + raise QubesException ( + "VM directory doesn't exist: {0}".\ + format(self.dir_path)) + + if self.is_updateable() and not os.path.exists (self.root_img): + raise QubesException ( + "VM root image file doesn't exist: {0}".\ + format(self.root_img)) + + if not os.path.exists (self.private_img): + print >>sys.stderr, "WARNING: Creating empty VM private image file: {0}".\ + format(self.private_img) + f_private = open(self.private_img, "w") + f_private.truncate(defaults["hvm_private_img_size"]) + f_private.close() + + return True + + def reset_volatile_storage(self, **kwargs): + pass + + @property + def vif(self): + if self.xid < 0: + return None + if self.netvm is None: + return None + return "vif{0}.+".format(self.stubdom_xid) + + def run(self, command, **kwargs): + if self.qrexec_installed: + if 'gui' in kwargs and kwargs['gui']==False: + command = "nogui:" + command + return super(QubesHVm, self).run(command, **kwargs) + else: + raise QubesException("Needs qrexec agent installed in VM to use this function. See also qvm-prefs.") + + @property + def stubdom_xid(self): + if self.xid < 0: + return -1 + + stubdom_xid_str = xs.read('', '/local/domain/%d/image/device-model-domid' % self.xid) + if stubdom_xid_str is not None: + return int(stubdom_xid_str) + else: + return -1 + + def start_guid(self, verbose = True, notify_function = None): + # If user force the guiagent, start_guid will mimic a standard QubesVM + if self.guiagent_installed: + super(QubesHVm, self).start_guid(verbose, notify_function) + else: + if verbose: + print >> sys.stderr, "--> Starting Qubes GUId..." + + retcode = subprocess.call ([system_path["qubes_guid_path"], "-d", str(self.stubdom_xid), "-c", self.label.color, "-i", self.label.icon_path, "-l", str(self.label.index)]) + if (retcode != 0) : + raise QubesException("Cannot start qubes-guid!") + + def start_qrexec_daemon(self, **kwargs): + if self.qrexec_installed: + super(QubesHVm, self).start_qrexec_daemon(**kwargs) + + if self._start_guid_first: + if kwargs.get('verbose'): + print >> sys.stderr, "--> Waiting for user '%s' login..." % self.default_user + + self.wait_for_session(notify_function=kwargs.get('notify_function', None)) + + def pause(self): + if dry_run: + return + + xc.domain_pause(self.stubdom_xid) + super(QubesHVm, self).pause() + + def unpause(self): + if dry_run: + return + + xc.domain_unpause(self.stubdom_xid) + super(QubesHVm, self).unpause() + + def is_guid_running(self): + # If user force the guiagent, is_guid_running will mimic a standard QubesVM + if self.guiagent_installed: + return super(QubesHVm, self).is_guid_running() + else: + xid = self.stubdom_xid + if xid < 0: + return False + if not os.path.exists('/var/run/qubes/guid-running.%d' % xid): + return False + return True + + +register_qubes_vm_class(QubesHVm) diff --git a/dom0/core-modules/README.txt b/dom0/core-modules/README.txt new file mode 100644 index 00000000..17f6445a --- /dev/null +++ b/dom0/core-modules/README.txt @@ -0,0 +1,5 @@ +This directory contains Qubes core modules. It will be loaded in +lexicographical order, use numeric prefix to force load ordering. + +0* - Qubes base modules +00* - Qubes core VM classes diff --git a/dom0/core-modules/__init__.py b/dom0/core-modules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dom0/core/qubes.py b/dom0/core/qubes.py index ed02290c..15c49f1e 100755 --- a/dom0/core/qubes.py +++ b/dom0/core/qubes.py @@ -65,7 +65,6 @@ system_path = { 'qubes_icon_dir': '/usr/share/qubes/icons', 'config_template_pv': '/usr/share/qubes/vm-template.conf', - 'config_template_hvm': '/usr/share/qubes/vm-template-hvm.conf', 'start_appmenu_template': '/usr/share/qubes/qubes-start.desktop', 'qubes_appmenu_create_cmd': '/usr/lib/qubes/create-apps-for-appvm.sh', @@ -94,10 +93,6 @@ defaults = { 'kernelopts': "", 'kernelopts_pcidevs': "iommu=soft swiotlb=4096", - 'hvm_disk_size': 20*1024*1024*1024, - 'hvm_private_img_size': 2*1024*1024*1024, - 'hvm_memory': 512, - 'dom0_update_check_interval': 6*3600, # how long (in sec) to wait for VMs to shutdown, @@ -115,9 +110,6 @@ defaults = { qubes_max_qid = 254 qubes_max_netid = 254 -yum_proxy_ip = '10.137.255.254' -yum_proxy_port = '8082' - class QubesException (Exception) : pass if not dry_run: @@ -214,2427 +206,12 @@ defaults["template_label"] = QubesVmLabels["black"] defaults["servicevm_label"] = QubesVmLabels["red"] QubesVmClasses = {} -def register_qubes_vm_class(class_name, vm_class): +def register_qubes_vm_class(vm_class): global QubesVmClasses - QubesVmClasses[class_name] = vm_class - -class QubesVm(object): - """ - A representation of one Qubes VM - Only persistent information are stored here, while all the runtime - information, e.g. Xen dom id, etc, are to be retrieved via Xen API - Note that qid is not the same as Xen's domid! - """ - - # In which order load this VM type from qubes.xml - load_order = 100 - - def _get_attrs_config(self): - """ Object attributes for serialization/deserialization - inner dict keys: - - order: initialization order (to keep dependency intact) - attrs without order will be evaluated at the end - - default: default value used when attr not given to object constructor - - attr: set value to this attribute instead of parameter name - - eval: assign result of this expression instead of value directly; - local variable 'value' contains attribute value (or default if it was not given) - - save: use evaluation result as value for XML serialization; only attrs with 'save' key will be saved in XML - - save_skip: if present and evaluates to true, attr will be omitted in XML - - save_attr: save to this XML attribute instead of parameter name - """ - - attrs = { - # __qid cannot be accessed by setattr, so must be set manually in __init__ - "qid": { "attr": "_qid", "order": 0 }, - "name": { "order": 1 }, - "dir_path": { "default": None, "order": 2 }, - "conf_file": { "eval": 'self.absolute_path(value, self.name + ".conf")', 'order': 3 }, - ### order >= 10: have base attrs set - "root_img": { "eval": 'self.absolute_path(value, vm_files["root_img"])', 'order': 10 }, - "private_img": { "eval": 'self.absolute_path(value, vm_files["private_img"])', 'order': 10 }, - "volatile_img": { "eval": 'self.absolute_path(value, vm_files["volatile_img"])', 'order': 10 }, - "firewall_conf": { "eval": 'self.absolute_path(value, vm_files["firewall_conf"])', 'order': 10 }, - "installed_by_rpm": { "default": False, 'order': 10 }, - "template": { "default": None, 'order': 10 }, - ### order >= 20: have template set - "uses_default_netvm": { "default": True, 'order': 20 }, - "netvm": { "default": None, "attr": "_netvm", 'order': 20 }, - "label": { "attr": "_label", "default": QubesVmLabels["red"], 'order': 20, - 'xml_deserialize': lambda _x: QubesVmLabels[_x] }, - "memory": { "default": defaults["memory"], 'order': 20, "eval": "int(value)" }, - "maxmem": { "default": None, 'order': 25, "eval": "int(value) if value else None" }, - "pcidevs": { "default": '[]', 'order': 25, "eval": \ - '[] if value in ["none", None] else eval(value) if value.find("[") >= 0 else eval("[" + value + "]")' }, - # Internal VM (not shown in qubes-manager, doesn't create appmenus entries - "internal": { "default": False }, - "vcpus": { "default": None }, - "uses_default_kernel": { "default": True, 'order': 30 }, - "uses_default_kernelopts": { "default": True, 'order': 30 }, - "kernel": { "default": None, 'order': 31, - 'eval': 'collection.get_default_kernel() if self.uses_default_kernel else value' }, - "kernelopts": { "default": "", 'order': 31, "eval": \ - 'value if not self.uses_default_kernelopts else defaults["kernelopts_pcidevs"] if len(self.pcidevs) > 0 else defaults["kernelopts"]' }, - "mac": { "attr": "_mac", "default": None }, - "include_in_backups": { "default": True }, - "services": { "default": {}, "eval": "eval(str(value))" }, - "debug": { "default": False }, - "default_user": { "default": "user" }, - "qrexec_timeout": { "default": 60, "eval": "int(value)" }, - ##### Internal attributes - will be overriden in __init__ regardless of args - "appmenus_templates_dir": { "eval": \ - 'os.path.join(self.dir_path, vm_files["appmenus_templates_subdir"]) if self.updateable else ' + \ - 'self.template.appmenus_templates_dir if self.template is not None else None' }, - "config_file_template": { "eval": 'system_path["config_template_pv"]' }, - "icon_path": { "eval": 'os.path.join(self.dir_path, "/icon.png") if self.dir_path is not None else None' }, - # used to suppress side effects of clone_attrs - "_do_not_reset_firewall": { "eval": 'False' }, - "kernels_dir": { 'eval': 'os.path.join(system_path["qubes_kernels_base_dir"], self.kernel) if self.kernel is not None else ' + \ - # for backward compatibility (or another rare case): kernel=None -> kernel in VM dir - 'os.path.join(self.dir_path, vm_files["kernels_subdir"])' }, - "_start_guid_first": { 'eval': 'False' }, - } - - ### Mark attrs for XML inclusion - # Simple string attrs - for prop in ['qid', 'name', 'dir_path', 'memory', 'maxmem', 'pcidevs', 'vcpus', 'internal',\ - 'uses_default_kernel', 'kernel', 'uses_default_kernelopts',\ - 'kernelopts', 'services', 'installed_by_rpm',\ - 'uses_default_netvm', 'include_in_backups', 'debug',\ - 'default_user', 'qrexec_timeout' ]: - attrs[prop]['save'] = 'str(self.%s)' % prop - # Simple paths - for prop in ['conf_file', 'root_img', 'volatile_img', 'private_img']: - attrs[prop]['save'] = 'self.relative_path(self.%s)' % prop - attrs[prop]['save_skip'] = 'self.%s is None' % prop - - attrs['mac']['save'] = 'str(self._mac)' - attrs['mac']['save_skip'] = 'self._mac is None' - - attrs['netvm']['save'] = 'str(self.netvm.qid) if self.netvm is not None else "none"' - attrs['netvm']['save_attr'] = "netvm_qid" - attrs['template']['save'] = 'str(self.template.qid) if self.template else "none"' - attrs['template']['save_attr'] = "template_qid" - attrs['label']['save'] = 'self.label.name' - - return attrs - - def __basic_parse_xml_attr(self, value): - if value is None: - return None - if value.lower() == "none": - return None - if value.lower() == "true": - return True - if value.lower() == "false": - return False - if value.isdigit(): - return int(value) - return value - - def __init__(self, **kwargs): - - collection = None - if 'collection' in kwargs: - collection = kwargs['collection'] - else: - raise ValueError("No collection given to QubesVM constructor") - - # Special case for template b/c it is given in "template_qid" property - if "xml_element" in kwargs and kwargs["xml_element"].get("template_qid"): - template_qid = kwargs["xml_element"].get("template_qid") - if template_qid.lower() != "none": - if int(template_qid) in collection: - kwargs["template"] = collection[int(template_qid)] - else: - raise ValueError("Unknown template with QID %s" % template_qid) - attrs = self._get_attrs_config() - for attr_name in sorted(attrs, key=lambda _x: attrs[_x]['order'] if 'order' in attrs[_x] else 1000): - attr_config = attrs[attr_name] - attr = attr_name - if 'attr' in attr_config: - attr = attr_config['attr'] - value = None - if attr_name in kwargs: - value = kwargs[attr_name] - elif 'xml_element' in kwargs and kwargs['xml_element'].get(attr_name) is not None: - if 'xml_deserialize' in attr_config and callable(attr_config['xml_deserialize']): - value = attr_config['xml_deserialize'](kwargs['xml_element'].get(attr_name)) - else: - value = self.__basic_parse_xml_attr(kwargs['xml_element'].get(attr_name)) - else: - if 'default' in attr_config: - value = attr_config['default'] - if 'eval' in attr_config: - setattr(self, attr, eval(attr_config['eval'])) - else: - #print "setting %s to %s" % (attr, value) - setattr(self, attr, value) - - #Init private attrs - self.__qid = self._qid - - assert self.__qid < qubes_max_qid, "VM id out of bounds!" - assert self.name is not None - - if not self.verify_name(self.name): - raise QubesException("Invalid characters in VM name") - - if self.netvm is not None: - self.netvm.connected_vms[self.qid] = self - - # Not in generic way to not create QubesHost() to frequently - if self.maxmem is None: - qubes_host = QubesHost() - total_mem_mb = qubes_host.memory_total/1024 - self.maxmem = total_mem_mb/2 - - # By default allow use all VCPUs - if self.vcpus is None: - qubes_host = QubesHost() - self.vcpus = qubes_host.no_cpus - - # Always set if meminfo-writer should be active or not - if 'meminfo-writer' not in self.services: - self.services['meminfo-writer'] = not (len(self.pcidevs) > 0) - - # Additionally force meminfo-writer disabled when VM have PCI devices - if len(self.pcidevs) > 0: - self.services['meminfo-writer'] = False - - # Some additional checks for template based VM - if self.template is not None: - if not self.template.is_template(): - print >> sys.stderr, "ERROR: template_qid={0} doesn't point to a valid TemplateVM".\ - format(self.template.qid) - return False - self.template.appvms[self.qid] = self - else: - assert self.root_img is not None, "Missing root_img for standalone VM!" - - self.xid = -1 - self.xid = self.get_xid() - - def absolute_path(self, arg, default): - if arg is not None and os.path.isabs(arg): - return arg - else: - return os.path.join(self.dir_path, (arg if arg is not None else default)) - - def relative_path(self, arg): - return arg.replace(self.dir_path + '/', '') - - @property - def qid(self): - return self.__qid - - @property - def label(self): - return self._label - - @label.setter - def label(self, new_label): - self._label = new_label - if self.icon_path: - try: - os.remove(self.icon_path) - except: - pass - os.symlink (new_label.icon_path, self.icon_path) - subprocess.call(['sudo', 'xdg-icon-resource', 'forceupdate']) - - @property - def netvm(self): - return self._netvm - - # Don't know how properly call setter from base class, so workaround it... - @netvm.setter - def netvm(self, new_netvm): - self._set_netvm(new_netvm) - - def _set_netvm(self, new_netvm): - if self.is_running() and new_netvm is not None and not new_netvm.is_running(): - raise QubesException("Cannot dynamically attach to stopped NetVM") - if self.netvm is not None: - self.netvm.connected_vms.pop(self.qid) - if self.is_running(): - subprocess.call(["xl", "network-detach", self.name, "0"], stderr=subprocess.PIPE) - if hasattr(self.netvm, 'post_vm_net_detach'): - self.netvm.post_vm_net_detach(self) - - if new_netvm is None: - if not self._do_not_reset_firewall: - # Set also firewall to block all traffic as discussed in #370 - if os.path.exists(self.firewall_conf): - shutil.copy(self.firewall_conf, os.path.join(system_path["qubes_base_dir"], - "backup", "%s-firewall-%s.xml" % (self.name, - time.strftime('%Y-%m-%d-%H:%M:%S')))) - self.write_firewall_conf({'allow': False, 'allowDns': False, - 'allowIcmp': False, 'allowYumProxy': False, 'rules': []}) - else: - new_netvm.connected_vms[self.qid]=self - - self._netvm = new_netvm - - if new_netvm is None: - return - - if self.is_running(): - # refresh IP, DNS etc - self.create_xenstore_entries() - self.attach_network() - if hasattr(self.netvm, 'post_vm_net_attach'): - self.netvm.post_vm_net_attach(self) - - @property - def ip(self): - if self.netvm is not None: - return self.netvm.get_ip_for_vm(self.qid) - else: - return None - - @property - def netmask(self): - if self.netvm is not None: - return self.netvm.netmask - else: - return None - - @property - def gateway(self): - # This is gateway IP for _other_ VMs, so make sense only in NetVMs - return None - - @property - def secondary_dns(self): - if self.netvm is not None: - return self.netvm.secondary_dns - else: - return None - - @property - def vif(self): - if self.xid < 0: - return None - if self.netvm is None: - return None - return "vif{0}.+".format(self.xid) - - @property - def mac(self): - if self._mac is not None: - return self._mac - else: - return "00:16:3E:5E:6C:{qid:02X}".format(qid=self.qid) - - @mac.setter - def mac(self, new_mac): - self._mac = new_mac - - @property - def updateable(self): - return self.template is None - - # Leaved for compatibility - def is_updateable(self): - return self.updateable - - def is_networked(self): - if self.is_netvm(): - return True - - if self.netvm is not None: - return True - else: - return False - - def verify_name(self, name): - return re.match(r"^[a-zA-Z0-9_-]*$", name) is not None - - def pre_rename(self, new_name): - self.remove_appmenus() - - def set_name(self, name): - if self.is_running(): - raise QubesException("Cannot change name of running VM!") - - if not self.verify_name(name): - raise QubesException("Invalid characters in VM name") - - self.pre_rename(name) - - new_conf = os.path.join(self.dir_path, name) - if os.path.exists(self.conf_file): - os.rename(self.conf_file, new_conf) - old_dirpath = self.dir_path - new_dirpath = os.path.join(os.path.dirname(self.dir_path), name) - os.rename(old_dirpath, new_dirpath) - self.dir_path = new_dirpath - old_name = self.name - self.name = name - if self.private_img is not None: - self.private_img = self.private_img.replace(old_dirpath, new_dirpath) - if self.root_img is not None: - self.root_img = self.root_img.replace(old_dirpath, new_dirpath) - if self.volatile_img is not None: - self.volatile_img = self.volatile_img.replace(old_dirpath, new_dirpath) - if self.conf_file is not None: - self.conf_file = new_conf.replace(old_dirpath, new_dirpath) - if self.appmenus_templates_dir is not None: - self.appmenus_templates_dir = self.appmenus_templates_dir.replace(old_dirpath, new_dirpath) - if self.icon_path is not None: - self.icon_path = self.icon_path.replace(old_dirpath, new_dirpath) - if hasattr(self, 'kernels_dir') and self.kernels_dir is not None: - self.kernels_dir = self.kernels_dir.replace(old_dirpath, new_dirpath) - - self.post_rename(old_name) - - def post_rename(self, old_name): - self.create_appmenus(verbose=False) - - def is_template(self): - return False - - def is_appvm(self): - return False - - def is_netvm(self): - return False - - def is_proxyvm(self): - return False - - def is_disposablevm(self): - return False - - def get_xl_dominfo(self): - if dry_run: - return - - domains = xl_ctx.list_domains() - for dominfo in domains: - domname = xl_ctx.domid_to_name(dominfo.domid) - if domname == self.name: - return dominfo - return None - - def get_xc_dominfo(self): - if dry_run: - return - - start_xid = self.xid - if start_xid < 0: - start_xid = 0 - try: - domains = xc.domain_getinfo(start_xid, qubes_max_qid) - except xen.lowlevel.xc.Error: - return None - - for dominfo in domains: - domname = xl_ctx.domid_to_name(dominfo['domid']) - if domname == self.name: - return dominfo - return None - - def get_xid(self): - if dry_run: - return 666 - - dominfo = self.get_xc_dominfo() - if dominfo: - self.xid = dominfo['domid'] - return self.xid - else: - return -1 - - def get_uuid(self): - - dominfo = self.get_xl_dominfo() - if dominfo: - vmuuid = uuid.UUID(''.join('%02x' % b for b in dominfo.uuid)) - return vmuuid - else: - return None - - def get_mem(self): - if dry_run: - return 666 - - dominfo = self.get_xc_dominfo() - if dominfo: - return dominfo['mem_kb'] - else: - return 0 - - def get_mem_static_max(self): - if dry_run: - return 666 - - dominfo = self.get_xc_dominfo() - if dominfo: - return dominfo['maxmem_kb'] - else: - return 0 - - def get_per_cpu_time(self): - if dry_run: - import random - return random.random() * 100 - - dominfo = self.get_xc_dominfo() - if dominfo: - return dominfo['cpu_time']/dominfo['online_vcpus'] - else: - return 0 - - def get_disk_utilization_root_img(self): - if not os.path.exists(self.root_img): - return 0 - - return self.get_disk_usage(self.root_img) - - def get_root_img_sz(self): - if not os.path.exists(self.root_img): - return 0 - - return os.path.getsize(self.root_img) - - def get_power_state(self): - if dry_run: - return "NA" - - dominfo = self.get_xc_dominfo() - if dominfo: - if dominfo['paused']: - return "Paused" - elif dominfo['crashed']: - return "Crashed" - elif dominfo['shutdown']: - return "Halting" - elif dominfo['dying']: - return "Dying" - else: - if not self.is_fully_usable(): - return "Transient" - else: - return "Running" - else: - return 'Halted' - - return "NA" - - def is_guid_running(self): - xid = self.get_xid() - if xid < 0: - return False - if not os.path.exists('/var/run/qubes/guid-running.%d' % xid): - return False - return True - - def is_fully_usable(self): - # Running gui-daemon implies also VM running - if not self.is_guid_running(): - return False - # currently qrexec daemon doesn't cleanup socket in /var/run/qubes, so - # it can be left from some other VM - return True - - def is_running(self): - # in terms of Xen and internal logic - starting VM is running - if self.get_power_state() in ["Running", "Transient", "Halting"]: - return True - else: - return False - - def is_paused(self): - if self.get_power_state() == "Paused": - return True - else: - return False - - def get_start_time(self): - if not self.is_running(): - return None - - dominfo = self.get_xl_dominfo() - - uuid = self.get_uuid() - - start_time = xs.read('', "/vm/%s/start_time" % str(uuid)) - if start_time != '': - return datetime.fromtimestamp(float(start_time)) - else: - return None - - def is_outdated(self): - # Makes sense only on VM based on template - if self.template is None: - return False - - if not self.is_running(): - return False - - rootimg_inode = os.stat(self.template.root_img) - try: - rootcow_inode = os.stat(self.template.rootcow_img) - except OSError: - # The only case when rootcow_img doesn't exists is in the middle of - # commit_changes, so VM is outdated right now - return True - - current_dmdev = "/dev/mapper/snapshot-{0:x}:{1}-{2:x}:{3}".format( - rootimg_inode[2], rootimg_inode[1], - rootcow_inode[2], rootcow_inode[1]) - - # 51712 (0xCA00) is xvda - # backend node name not available through xenapi :( - used_dmdev = xs.read('', "/local/domain/0/backend/vbd/{0}/51712/node".format(self.get_xid())) - - return used_dmdev != current_dmdev - - def get_disk_usage(self, file_or_dir): - if not os.path.exists(file_or_dir): - return 0 - p = subprocess.Popen (["du", "-s", "--block-size=1", file_or_dir], - stdout=subprocess.PIPE) - result = p.communicate() - m = re.match(r"^(\d+)\s.*", result[0]) - sz = int(m.group(1)) if m is not None else 0 - return sz - - def get_disk_utilization(self): - return self.get_disk_usage(self.dir_path) - - def get_disk_utilization_private_img(self): - return self.get_disk_usage(self.private_img) - - def get_private_img_sz(self): - if not os.path.exists(self.private_img): - return 0 - - return os.path.getsize(self.private_img) - - def resize_private_img(self, size): - assert size >= self.get_private_img_sz(), "Cannot shrink private.img" - - f_private = open (self.private_img, "a+b") - f_private.truncate (size) - f_private.close () - - retcode = 0 - if self.is_running(): - # find loop device - p = subprocess.Popen (["sudo", "losetup", "--associated", self.private_img], - stdout=subprocess.PIPE) - result = p.communicate() - m = re.match(r"^(/dev/loop\d+):\s", result[0]) - if m is None: - raise QubesException("ERROR: Cannot find loop device!") - - loop_dev = m.group(1) - - # resize loop device - subprocess.check_call(["sudo", "losetup", "--set-capacity", loop_dev]) - - retcode = self.run("while [ \"`blockdev --getsize64 /dev/xvdb`\" -lt {0} ]; do ".format(size) + - "head /dev/xvdb > /dev/null; sleep 0.2; done; resize2fs /dev/xvdb", user="root", wait=True) - else: - retcode = subprocess.check_call(["sudo", "resize2fs", "-f", self.private_img]) - if retcode != 0: - raise QubesException("resize2fs failed") - - - # FIXME: should be outside of QubesVM? - def get_timezone(self): - # fc18 - if os.path.islink('/etc/localtime'): - return '/'.join(os.readlink('/etc/localtime').split('/')[-2:]) - # <=fc17 - elif os.path.exists('/etc/sysconfig/clock'): - clock_config = open('/etc/sysconfig/clock', "r") - clock_config_lines = clock_config.readlines() - clock_config.close() - zone_re = re.compile(r'^ZONE="(.*)"') - for line in clock_config_lines: - line_match = zone_re.match(line) - if line_match: - return line_match.group(1) - - return None - - def cleanup_vifs(self): - """ - Xend does not remove vif when backend domain is down, so we must do it - manually - """ - - if not self.is_running(): - return - - dev_basepath = '/local/domain/%d/device/vif' % self.xid - for dev in xs.ls('', dev_basepath): - # check if backend domain is alive - backend_xid = int(xs.read('', '%s/%s/backend-id' % (dev_basepath, dev))) - if xl_ctx.domid_to_name(backend_xid) is not None: - # check if device is still active - if xs.read('', '%s/%s/state' % (dev_basepath, dev)) == '4': - continue - # remove dead device - xs.rm('', '%s/%s' % (dev_basepath, dev)) - - def create_xenstore_entries(self, xid = None): - if dry_run: - return - - if xid is None: - xid = self.xid - - domain_path = xs.get_domain_path(xid) - - # Set Xen Store entires with VM networking info: - - xs.write('', "{0}/qubes-vm-type".format(domain_path), - self.type) - xs.write('', "{0}/qubes-vm-updateable".format(domain_path), - str(self.updateable)) - - if self.is_netvm(): - xs.write('', - "{0}/qubes-netvm-gateway".format(domain_path), - self.gateway) - xs.write('', - "{0}/qubes-netvm-secondary-dns".format(domain_path), - self.secondary_dns) - xs.write('', - "{0}/qubes-netvm-netmask".format(domain_path), - self.netmask) - xs.write('', - "{0}/qubes-netvm-network".format(domain_path), - self.network) - - if self.netvm is not None: - xs.write('', "{0}/qubes-ip".format(domain_path), self.ip) - xs.write('', "{0}/qubes-netmask".format(domain_path), - self.netvm.netmask) - xs.write('', "{0}/qubes-gateway".format(domain_path), - self.netvm.gateway) - xs.write('', - "{0}/qubes-secondary-dns".format(domain_path), - self.netvm.secondary_dns) - - tzname = self.get_timezone() - if tzname: - xs.write('', - "{0}/qubes-timezone".format(domain_path), - tzname) - - for srv in self.services.keys(): - # convert True/False to "1"/"0" - xs.write('', "{0}/qubes-service/{1}".format(domain_path, srv), - str(int(self.services[srv]))) - - xs.write('', - "{0}/qubes-block-devices".format(domain_path), - '') - - xs.write('', - "{0}/qubes-usb-devices".format(domain_path), - '') - - xs.write('', "{0}/qubes-debug-mode".format(domain_path), - str(int(self.debug))) - - # Fix permissions - xs.set_permissions('', '{0}/device'.format(domain_path), - [{ 'dom': xid }]) - xs.set_permissions('', '{0}/memory'.format(domain_path), - [{ 'dom': xid }]) - xs.set_permissions('', '{0}/qubes-block-devices'.format(domain_path), - [{ 'dom': xid }]) - xs.set_permissions('', '{0}/qubes-usb-devices'.format(domain_path), - [{ 'dom': xid }]) - - def get_rootdev(self, source_template=None): - if self.template: - return "'script:snapshot:{dir}/root.img:{dir}/root-cow.img,xvda,r',".format(dir=self.template.dir_path) - else: - return "'script:file:{dir}/root.img,xvda,w',".format(dir=self.dir_path) - - def get_config_params(self, source_template=None): - args = {} - args['name'] = self.name - if hasattr(self, 'kernels_dir'): - args['kerneldir'] = self.kernels_dir - args['vmdir'] = self.dir_path - args['pcidev'] = str(self.pcidevs).strip('[]') - args['mem'] = str(self.memory) - if self.maxmem < self.memory: - args['mem'] = str(self.maxmem) - args['maxmem'] = str(self.maxmem) - if 'meminfo-writer' in self.services and not self.services['meminfo-writer']: - # If dynamic memory management disabled, set maxmem=mem - args['maxmem'] = args['mem'] - args['vcpus'] = str(self.vcpus) - if self.netvm is not None: - args['ip'] = self.ip - args['mac'] = self.mac - args['gateway'] = self.netvm.gateway - args['dns1'] = self.netvm.gateway - args['dns2'] = self.secondary_dns - args['netmask'] = self.netmask - args['netdev'] = "'mac={mac},script=/etc/xen/scripts/vif-route-qubes,ip={ip}".format(ip=self.ip, mac=self.mac) - if self.netvm.qid != 0: - args['netdev'] += ",backend={0}".format(self.netvm.name) - args['netdev'] += "'" - args['disable_network'] = ''; - else: - args['ip'] = '' - args['mac'] = '' - args['gateway'] = '' - args['dns1'] = '' - args['dns2'] = '' - args['netmask'] = '' - args['netdev'] = '' - args['disable_network'] = '#'; - args['rootdev'] = self.get_rootdev(source_template=source_template) - args['privatedev'] = "'script:file:{dir}/private.img,xvdb,w',".format(dir=self.dir_path) - args['volatiledev'] = "'script:file:{dir}/volatile.img,xvdc,w',".format(dir=self.dir_path) - if hasattr(self, 'kernel'): - modulesmode='r' - if self.kernel is None: - modulesmode='w' - args['otherdevs'] = "'script:file:{dir}/modules.img,xvdd,{mode}',".format(dir=self.kernels_dir, mode=modulesmode) - if hasattr(self, 'kernelopts'): - args['kernelopts'] = self.kernelopts - if self.debug: - print >> sys.stderr, "--> Debug mode: adding 'earlyprintk=xen' to kernel opts" - args['kernelopts'] += ' earlyprintk=xen' - - return args - - @property - def uses_custom_config(self): - return self.conf_file != self.absolute_path(self.name + ".conf", None) - - def create_config_file(self, file_path = None, source_template = None, prepare_dvm = False): - if file_path is None: - file_path = self.conf_file - if self.uses_custom_config: - return - if source_template is None: - source_template = self.template - - f_conf_template = open(self.config_file_template, 'r') - conf_template = f_conf_template.read() - f_conf_template.close() - - template_params = self.get_config_params(source_template) - if prepare_dvm: - template_params['name'] = '%NAME%' - template_params['privatedev'] = '' - template_params['netdev'] = re.sub(r"ip=[0-9.]*", "ip=%IP%", template_params['netdev']) - conf_appvm = open(file_path, "w") - - conf_appvm.write(conf_template.format(**template_params)) - conf_appvm.close() - - def create_on_disk(self, verbose, source_template = None): - if source_template is None: - source_template = self.template - assert source_template is not None - - if dry_run: - return - - if verbose: - print >> sys.stderr, "--> Creating directory: {0}".format(self.dir_path) - os.mkdir (self.dir_path) - - if verbose: - print >> sys.stderr, "--> Creating the VM config file: {0}".format(self.conf_file) - - self.create_config_file(source_template = source_template) - - template_priv = source_template.private_img - if verbose: - print >> sys.stderr, "--> Copying the template's private image: {0}".\ - format(template_priv) - - # We prefer to use Linux's cp, because it nicely handles sparse files - retcode = subprocess.call (["cp", template_priv, self.private_img]) - if retcode != 0: - raise IOError ("Error while copying {0} to {1}".\ - format(template_priv, self.private_img)) - - if os.path.exists(os.path.join(source_template.dir_path, '/vm-' + vm_files["whitelisted_appmenus"])): - if verbose: - print >> sys.stderr, "--> Creating default whitelisted apps list: {0}".\ - format(self.dir_path + '/' + vm_files["whitelisted_appmenus"]) - shutil.copy(os.path.join(source_template.dir_path, '/vm-' + vm_files["whitelisted_appmenus"]), - os.path.join(self.dir_path, vm_files["whitelisted_appmenus"])) - - if self.updateable: - template_root = source_template.root_img - if verbose: - print >> sys.stderr, "--> Copying the template's root image: {0}".\ - format(template_root) - - # We prefer to use Linux's cp, because it nicely handles sparse files - retcode = subprocess.call (["cp", template_root, self.root_img]) - if retcode != 0: - raise IOError ("Error while copying {0} to {1}".\ - format(template_root, self.root_img)) - - kernels_dir = source_template.kernels_dir - if verbose: - print >> sys.stderr, "--> Copying the kernel (set kernel \"none\" to use it): {0}".\ - format(kernels_dir) - - os.mkdir (self.dir_path + '/kernels') - for f in ("vmlinuz", "initramfs", "modules.img"): - shutil.copy(os.path.join(kernels_dir, f), - os.path.join(self.dir_path, vm_files["kernels_subdir"], f)) - - if verbose: - print >> sys.stderr, "--> Copying the template's appmenus templates dir:\n{0} ==>\n{1}".\ - format(source_template.appmenus_templates_dir, self.appmenus_templates_dir) - shutil.copytree (source_template.appmenus_templates_dir, self.appmenus_templates_dir) - - # Create volatile.img - self.reset_volatile_storage(source_template = source_template, verbose=verbose) - - if verbose: - print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, self.label.icon_path) - os.symlink (self.label.icon_path, self.icon_path) - - def create_appmenus(self, verbose=False, source_template = None): - if source_template is None: - source_template = self.template - - vmtype = None - if self.is_netvm(): - vmtype = 'servicevms' - else: - vmtype = 'appvms' - - try: - if source_template is not None: - subprocess.check_call ([system_path["qubes_appmenu_create_cmd"], source_template.appmenus_templates_dir, self.name, vmtype]) - elif self.appmenus_templates_dir is not None: - subprocess.check_call ([system_path["qubes_appmenu_create_cmd"], self.appmenus_templates_dir, self.name, vmtype]) - else: - # Only add apps to menu - subprocess.check_call ([system_path["qubes_appmenu_create_cmd"], "none", self.name, vmtype]) - except subprocess.CalledProcessError: - print >> sys.stderr, "Ooops, there was a problem creating appmenus for {0} VM!".format (self.name) - - def get_clone_attrs(self): - return ['kernel', 'uses_default_kernel', 'netvm', 'uses_default_netvm', \ - 'memory', 'maxmem', 'kernelopts', 'uses_default_kernelopts', 'services', 'vcpus', \ - '_mac', 'pcidevs', 'include_in_backups', '_label'] - - def clone_attrs(self, src_vm): - self._do_not_reset_firewall = True - for prop in self.get_clone_attrs(): - setattr(self, prop, getattr(src_vm, prop)) - self._do_not_reset_firewall = False - - def clone_disk_files(self, src_vm, verbose): - if dry_run: - return - - if src_vm.is_running(): - raise QubesException("Attempt to clone a running VM!") - - if verbose: - print >> sys.stderr, "--> Creating directory: {0}".format(self.dir_path) - os.mkdir (self.dir_path) - - if src_vm.private_img is not None and self.private_img is not None: - if verbose: - print >> sys.stderr, "--> Copying the private image:\n{0} ==>\n{1}".\ - format(src_vm.private_img, self.private_img) - # We prefer to use Linux's cp, because it nicely handles sparse files - retcode = subprocess.call (["cp", src_vm.private_img, self.private_img]) - if retcode != 0: - raise IOError ("Error while copying {0} to {1}".\ - format(src_vm.private_img, self.private_img)) - - if src_vm.updateable and src_vm.root_img is not None and self.root_img is not None: - if verbose: - print >> sys.stderr, "--> Copying the root image:\n{0} ==>\n{1}".\ - format(src_vm.root_img, self.root_img) - # We prefer to use Linux's cp, because it nicely handles sparse files - retcode = subprocess.call (["cp", src_vm.root_img, self.root_img]) - if retcode != 0: - raise IOError ("Error while copying {0} to {1}".\ - format(src_vm.root_img, self.root_img)) - - if src_vm.updateable and src_vm.appmenus_templates_dir is not None and self.appmenus_templates_dir is not None: - if verbose: - print >> sys.stderr, "--> Copying the template's appmenus templates dir:\n{0} ==>\n{1}".\ - format(src_vm.appmenus_templates_dir, self.appmenus_templates_dir) - shutil.copytree (src_vm.appmenus_templates_dir, self.appmenus_templates_dir) - - if os.path.exists(os.path.join(src_vm.dir_path, vm_files["whitelisted_appmenus"])): - if verbose: - print >> sys.stderr, "--> Copying whitelisted apps list: {0}".\ - format(os.path.join(self.dir_path, vm_files["whitelisted_appmenus"])) - shutil.copy(os.path.join(src_vm.dir_path, vm_files["whitelisted_appmenus"]), - os.path.join(self.dir_path, vm_files["whitelisted_appmenus"])) - - if src_vm.icon_path is not None and self.icon_path is not None: - if os.path.exists (src_vm.dir_path): - if os.path.islink(src_vm.icon_path): - icon_path = os.readlink(src_vm.icon_path) - if verbose: - print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, icon_path) - os.symlink (icon_path, self.icon_path) - else: - if verbose: - print >> sys.stderr, "--> Copying icon: {0} -> {1}".format(src_vm.icon_path, self.icon_path) - shutil.copy(src_vm.icon_path, self.icon_path) - - # Create appmenus - self.create_appmenus(verbose=verbose) - - def remove_appmenus(self): - vmtype = None - if self.is_netvm(): - vmtype = 'servicevms' - else: - vmtype = 'appvms' - subprocess.check_call ([system_path["qubes_appmenu_remove_cmd"], self.name, vmtype]) - - def verify_files(self): - if dry_run: - return - - if not os.path.exists (self.dir_path): - raise QubesException ( - "VM directory doesn't exist: {0}".\ - format(self.dir_path)) - - if self.updateable and not os.path.exists (self.root_img): - raise QubesException ( - "VM root image file doesn't exist: {0}".\ - format(self.root_img)) - - if not os.path.exists (self.private_img): - raise QubesException ( - "VM private image file doesn't exist: {0}".\ - format(self.private_img)) - - if not os.path.exists (os.path.join(self.kernels_dir, 'vmlinuz')): - raise QubesException ( - "VM kernel does not exists: {0}".\ - format(os.path.join(self.kernels_dir, 'vmlinuz'))) - - if not os.path.exists (os.path.join(self.kernels_dir, 'initramfs')): - raise QubesException ( - "VM initramfs does not exists: {0}".\ - format(os.path.join(self.kernels_dir, 'initramfs'))) - - if not os.path.exists (os.path.join(self.kernels_dir, 'modules.img')): - raise QubesException ( - "VM kernel modules image does not exists: {0}".\ - format(os.path.join(self.kernels_dir, 'modules.img'))) - return True - - def reset_volatile_storage(self, source_template = None, verbose = False): - assert not self.is_running(), "Attempt to clean volatile image of running VM!" - - if source_template is None: - source_template = self.template - - # Only makes sense on template based VM - if source_template is None: - # For StandaloneVM create it only if not already exists (eg after backup-restore) - if not os.path.exists(self.volatile_img): - if verbose: - print >> sys.stderr, "--> Creating volatile image: {0}...".format (self.volatile_img) - f_root = open (self.root_img, "r") - f_root.seek(0, os.SEEK_END) - root_size = f_root.tell() - f_root.close() - subprocess.check_call([system_path["prepare_volatile_img_cmd"], self.volatile_img, str(root_size / 1024 / 1024)]) - return - - if verbose: - print >> sys.stderr, "--> Cleaning volatile image: {0}...".format (self.volatile_img) - if dry_run: - return - if os.path.exists (self.volatile_img): - os.remove (self.volatile_img) - - retcode = subprocess.call (["tar", "xf", source_template.clean_volatile_img, "-C", self.dir_path]) - if retcode != 0: - raise IOError ("Error while unpacking {0} to {1}".\ - format(source_template.clean_volatile_img, self.volatile_img)) - - def remove_from_disk(self): - if dry_run: - return - - shutil.rmtree (self.dir_path) - - def write_firewall_conf(self, conf): - defaults = self.get_firewall_conf() - for item in defaults.keys(): - if item not in conf: - conf[item] = defaults[item] - - root = lxml.etree.Element( - "QubesFirwallRules", - policy = "allow" if conf["allow"] else "deny", - dns = "allow" if conf["allowDns"] else "deny", - icmp = "allow" if conf["allowIcmp"] else "deny", - yumProxy = "allow" if conf["allowYumProxy"] else "deny" - ) - - for rule in conf["rules"]: - # For backward compatibility - if "proto" not in rule: - if rule["portBegin"] is not None and rule["portBegin"] > 0: - rule["proto"] = "tcp" - else: - rule["proto"] = "any" - element = lxml.etree.Element( - "rule", - address=rule["address"], - proto=str(rule["proto"]), - ) - if rule["netmask"] is not None and rule["netmask"] != 32: - element.set("netmask", str(rule["netmask"])) - if rule["portBegin"] is not None and rule["portBegin"] > 0: - element.set("port", str(rule["portBegin"])) - if rule["portEnd"] is not None and rule["portEnd"] > 0: - element.set("toport", str(rule["portEnd"])) - - root.append(element) - - tree = lxml.etree.ElementTree(root) - - try: - f = open(self.firewall_conf, 'a') # create the file if not exist - f.close() - - with open(self.firewall_conf, 'w') as f: - fcntl.lockf(f, fcntl.LOCK_EX) - tree.write(f, encoding="UTF-8", pretty_print=True) - fcntl.lockf(f, fcntl.LOCK_UN) - f.close() - except EnvironmentError as err: - print >> sys.stderr, "{0}: save error: {1}".format( - os.path.basename(sys.argv[0]), err) - return False - - # Automatically enable/disable 'yum-proxy-setup' service based on allowYumProxy - if conf['allowYumProxy']: - self.services['yum-proxy-setup'] = True - else: - if self.services.has_key('yum-proxy-setup'): - self.services.pop('yum-proxy-setup') - - return True - - def has_firewall(self): - return os.path.exists (self.firewall_conf) - - def get_firewall_defaults(self): - return { "rules": list(), "allow": True, "allowDns": True, "allowIcmp": True, "allowYumProxy": False } - - def get_firewall_conf(self): - conf = self.get_firewall_defaults() - - try: - tree = lxml.etree.parse(self.firewall_conf) - root = tree.getroot() - - conf["allow"] = (root.get("policy") == "allow") - conf["allowDns"] = (root.get("dns") == "allow") - conf["allowIcmp"] = (root.get("icmp") == "allow") - conf["allowYumProxy"] = (root.get("yumProxy") == "allow") - - for element in root: - rule = {} - attr_list = ("address", "netmask", "proto", "port", "toport") - - for attribute in attr_list: - rule[attribute] = element.get(attribute) - - if rule["netmask"] is not None: - rule["netmask"] = int(rule["netmask"]) - else: - rule["netmask"] = 32 - - if rule["port"] is not None: - rule["portBegin"] = int(rule["port"]) - else: - # backward compatibility - rule["portBegin"] = 0 - - # For backward compatibility - if rule["proto"] is None: - if rule["portBegin"] > 0: - rule["proto"] = "tcp" - else: - rule["proto"] = "any" - - if rule["toport"] is not None: - rule["portEnd"] = int(rule["toport"]) - else: - rule["portEnd"] = None - - del(rule["port"]) - del(rule["toport"]) - - conf["rules"].append(rule) - - except EnvironmentError as err: - return conf - except (xml.parsers.expat.ExpatError, - ValueError, LookupError) as err: - print("{0}: load error: {1}".format( - os.path.basename(sys.argv[0]), err)) - return None - - return conf - - def run(self, command, user = None, verbose = True, autostart = False, notify_function = None, passio = False, passio_popen = False, passio_stderr=False, ignore_stderr=False, localcmd = None, wait = False, gui = True): - """command should be in form 'cmdline' - When passio_popen=True, popen object with stdout connected to pipe. - When additionally passio_stderr=True, stderr also is connected to pipe. - When ignore_stderr=True, stderr is connected to /dev/null. - """ - - if user is None: - user = self.default_user - null = None - if not self.is_running() and not self.is_paused(): - if not autostart: - raise QubesException("VM not running") - - try: - if notify_function is not None: - notify_function ("info", "Starting the '{0}' VM...".format(self.name)) - elif verbose: - print >> sys.stderr, "Starting the VM '{0}'...".format(self.name) - xid = self.start(verbose=verbose, start_guid = gui, notify_function=notify_function) - - except (IOError, OSError, QubesException) as err: - raise QubesException("Error while starting the '{0}' VM: {1}".format(self.name, err)) - except (MemoryError) as err: - raise QubesException("Not enough memory to start '{0}' VM! Close one or more running VMs and try again.".format(self.name)) - - xid = self.get_xid() - if gui and os.getenv("DISPLAY") is not None and not self.is_guid_running(): - self.start_guid(verbose = verbose, notify_function = notify_function) - - args = [system_path["qrexec_client_path"], "-d", str(xid), "%s:%s" % (user, command)] - if localcmd is not None: - args += [ "-l", localcmd] - if passio: - os.execv(system_path["qrexec_client_path"], args) - exit(1) - - call_kwargs = {} - if ignore_stderr: - null = open("/dev/null", "w") - call_kwargs['stderr'] = null - - if passio_popen: - popen_kwargs={'stdout': subprocess.PIPE} - popen_kwargs['stdin'] = subprocess.PIPE - if passio_stderr: - popen_kwargs['stderr'] = subprocess.PIPE - else: - popen_kwargs['stderr'] = call_kwargs.get('stderr', None) - p = subprocess.Popen (args, **popen_kwargs) - if null: - null.close() - return p - if not wait: - args += ["-e"] - retcode = subprocess.call(args, **call_kwargs) - if null: - null.close() - return retcode - - def attach_network(self, verbose = False, wait = True, netvm = None): - if dry_run: - return - - if not self.is_running(): - raise QubesException ("VM not running!") - - if netvm is None: - netvm = self.netvm - - if netvm is None: - raise QubesException ("NetVM not set!") - - if netvm.qid != 0: - if not netvm.is_running(): - if verbose: - print >> sys.stderr, "--> Starting NetVM {0}...".format(netvm.name) - netvm.start() - - xs_path = '/local/domain/%d/device/vif/0/state' % (self.xid) - if xs.read('', xs_path) is not None: - # TODO: check its state and backend state (this can be stale vif after NetVM restart) - if verbose: - print >> sys.stderr, "NOTICE: Network already attached" - return - - xm_cmdline = ["/usr/sbin/xl", "network-attach", str(self.xid), "script=/etc/xen/scripts/vif-route-qubes", "ip="+self.ip, "backend="+netvm.name ] - retcode = subprocess.call (xm_cmdline) - if retcode != 0: - print >> sys.stderr, ("WARNING: Cannot attach to network to '{0}'!".format(self.name)) - if wait: - tries = 0 - while xs.read('', xs_path) != '4': - tries += 1 - if tries > 50: - raise QubesException ("Network attach timed out!") - time.sleep(0.2) - - def wait_for_session(self, notify_function = None): - #self.run('echo $$ >> /tmp/qubes-session-waiter; [ ! -f /tmp/qubes-session-env ] && exec sleep 365d', ignore_stderr=True, gui=False, wait=True) - - # Note : User root is redefined to SYSTEM in the Windows agent code - p = self.run('QUBESRPC qubes.WaitForSession none', user="root", passio_popen=True, gui=False, wait=True) - p.communicate(input=self.default_user) - - def start_guid(self, verbose = True, notify_function = None): - if verbose: - print >> sys.stderr, "--> Starting Qubes GUId..." - xid = self.get_xid() - - guid_cmd = [system_path["qubes_guid_path"], "-d", str(xid), "-c", self.label.color, "-i", self.label.icon_path, "-l", str(self.label.index)] - if self.debug: - guid_cmd += ['-v', '-v'] - retcode = subprocess.call (guid_cmd) - if (retcode != 0) : - raise QubesException("Cannot start qubes-guid!") - - if verbose: - print >> sys.stderr, "--> Waiting for qubes-session..." - - self.wait_for_session(notify_function) - - def start_qrexec_daemon(self, verbose = False, notify_function = None): - if verbose: - print >> sys.stderr, "--> Starting the qrexec daemon..." - xid = self.get_xid() - qrexec_env = os.environ - qrexec_env['QREXEC_STARTUP_TIMEOUT'] = str(self.qrexec_timeout) - retcode = subprocess.call ([system_path["qrexec_daemon_path"], str(xid), self.default_user], env=qrexec_env) - if (retcode != 0) : - self.force_shutdown(xid=xid) - raise OSError ("ERROR: Cannot execute qrexec-daemon!") - - def start(self, debug_console = False, verbose = False, preparing_dvm = False, start_guid = True, notify_function = None): - if dry_run: - return - - # Intentionally not used is_running(): eliminate also "Paused", "Crashed", "Halting" - if self.get_power_state() != "Halted": - raise QubesException ("VM is already running!") - - self.verify_files() - - if self.netvm is not None: - if self.netvm.qid != 0: - if not self.netvm.is_running(): - if verbose: - print >> sys.stderr, "--> Starting NetVM {0}...".format(self.netvm.name) - self.netvm.start(verbose = verbose, start_guid = start_guid, notify_function = notify_function) - - self.reset_volatile_storage(verbose=verbose) - if verbose: - print >> sys.stderr, "--> Loading the VM (type = {0})...".format(self.type) - - # refresh config file - self.create_config_file() - - mem_required = int(self.memory) * 1024 * 1024 - qmemman_client = QMemmanClient() - try: - got_memory = qmemman_client.request_memory(mem_required) - except IOError as e: - raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e)) - if not got_memory: - qmemman_client.close() - raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name) - - # Bind pci devices to pciback driver - for pci in self.pcidevs: - try: - subprocess.check_call(['sudo', system_path["qubes_pciback_cmd"], pci]) - except subprocess.CalledProcessError: - raise QubesException("Failed to prepare PCI device %s" % pci) - - xl_cmdline = ['sudo', '/usr/sbin/xl', 'create', self.conf_file, '-q', '-p'] - - try: - subprocess.check_call(xl_cmdline) - except: - raise QubesException("Failed to load VM config") - - xid = self.get_xid() - self.xid = xid - - if preparing_dvm: - self.services['qubes-dvm'] = True - if verbose: - print >> sys.stderr, "--> Setting Xen Store info for the VM..." - self.create_xenstore_entries(xid) - - qvm_collection = QubesVmCollection() - qvm_collection.lock_db_for_reading() - qvm_collection.load() - qvm_collection.unlock_db() - - if verbose: - print >> sys.stderr, "--> Updating firewall rules..." - for vm in qvm_collection.values(): - if vm.is_proxyvm() and vm.is_running(): - vm.write_iptables_xenstore_entry() - - if verbose: - print >> sys.stderr, "--> Starting the VM..." - xc.domain_unpause(xid) - -# close() is not really needed, because the descriptor is close-on-exec -# anyway, the reason to postpone close() is that possibly xl is not done -# constructing the domain after its main process exits -# so we close() when we know the domain is up -# the successful unpause is some indicator of it - qmemman_client.close() - - if self._start_guid_first and start_guid and not preparing_dvm and os.path.exists('/var/run/shm.id'): - self.start_guid(verbose=verbose,notify_function=notify_function) - - if not preparing_dvm: - self.start_qrexec_daemon(verbose=verbose,notify_function=notify_function) - - if not self._start_guid_first and start_guid and not preparing_dvm and os.path.exists('/var/run/shm.id'): - self.start_guid(verbose=verbose,notify_function=notify_function) - - if preparing_dvm: - if verbose: - print >> sys.stderr, "--> Preparing config template for DispVM" - self.create_config_file(file_path = self.dir_path + '/dvm.conf', prepare_dvm = True) - - # perhaps we should move it before unpause and fork? - # FIXME: this uses obsolete xm api - if debug_console: - from xen.xm import console - if verbose: - print >> sys.stderr, "--> Starting debug console..." - console.execConsole (xid) - - return xid - - def shutdown(self, force=False, xid = None): - if dry_run: - return - - if not self.is_running(): - raise QubesException ("VM already stopped!") - - subprocess.call (['/usr/sbin/xl', 'shutdown', str(xid) if xid is not None else self.name]) - #xc.domain_destroy(self.get_xid()) - - def force_shutdown(self, xid = None): - if dry_run: - return - - if not self.is_running() and not self.is_paused(): - raise QubesException ("VM already stopped!") - - subprocess.call (['/usr/sbin/xl', 'destroy', str(xid) if xid is not None else self.name]) - - def pause(self): - if dry_run: - return - - xc.domain_pause(self.get_xid()) - - def unpause(self): - if dry_run: - return - - xc.domain_unpause(self.get_xid()) - - def get_xml_attrs(self): - attrs = {} - attrs_config = self._get_attrs_config() - for attr in attrs_config: - attr_config = attrs_config[attr] - if 'save' in attr_config: - if 'save_skip' in attr_config and eval(attr_config['save_skip']): - continue - if 'save_attr' in attr_config: - attrs[attr_config['save_attr']] = eval(attr_config['save']) - else: - attrs[attr] = eval(attr_config['save']) - return attrs - - def create_xml_element(self): - # Compatibility hack (Qubes*VM in type vs Qubes*Vm in XML)... - rx_type = re.compile (r"VM") - - attrs = self.get_xml_attrs() - element = lxml.etree.Element( - "Qubes" + rx_type.sub("Vm", self.type), - **attrs) - return element - - -class QubesTemplateVm(QubesVm): - """ - A class that represents an TemplateVM. A child of QubesVm. - """ - - # In which order load this VM type from qubes.xml - load_order = 50 - - def _get_attrs_config(self): - attrs_config = super(QubesTemplateVm, self)._get_attrs_config() - attrs_config['dir_path']['eval'] = 'value if value is not None else os.path.join(system_path["qubes_templates_dir"], self.name)' - attrs_config['label']['default'] = defaults["template_label"] - - # New attributes - - # Image for template changes - attrs_config['rootcow_img'] = { 'eval': 'os.path.join(self.dir_path, vm_files["rootcow_img"])' } - # Clean image for root-cow and swap (AppVM side) - attrs_config['clean_volatile_img'] = { 'eval': 'os.path.join(self.dir_path, vm_files["clean_volatile_img"])' } - - attrs_config['appmenus_templates_dir'] = { 'eval': 'os.path.join(self.dir_path, vm_files["appmenus_templates_subdir"])' } - return attrs_config - - def __init__(self, **kwargs): - - super(QubesTemplateVm, self).__init__(**kwargs) - - self.appvms = QubesVmCollection() - - @property - def type(self): - return "TemplateVM" - - @property - def updateable(self): - return True - - def is_template(self): - return True - - def get_firewall_defaults(self): - return { "rules": list(), "allow": False, "allowDns": False, "allowIcmp": False, "allowYumProxy": True } - - def get_rootdev(self, source_template=None): - return "'script:origin:{dir}/root.img:{dir}/root-cow.img,xvda,w',".format(dir=self.dir_path) - - def clone_disk_files(self, src_vm, verbose): - if dry_run: - return - - super(QubesTemplateVm, self).clone_disk_files(src_vm=src_vm, verbose=verbose) - - for whitelist in ['vm-' + vm_files["whitelisted_appmenus"], 'netvm-' + vm_files["whitelisted_appmenus"]]: - if os.path.exists(os.path.join(src_vm.dir_path, whitelist)): - if verbose: - print >> sys.stderr, "--> Copying default whitelisted apps list: {0}".\ - format(os.path.join(self.dir_path, whitelist)) - shutil.copy(os.path.join(src_vm.dir_path, whitelist), - os.path.join(self.dir_path, whitelist)) - - if verbose: - print >> sys.stderr, "--> Copying the template's clean volatile image:\n{0} ==>\n{1}".\ - format(src_vm.clean_volatile_img, self.clean_volatile_img) - # We prefer to use Linux's cp, because it nicely handles sparse files - retcode = subprocess.call (["cp", src_vm.clean_volatile_img, self.clean_volatile_img]) - if retcode != 0: - raise IOError ("Error while copying {0} to {1}".\ - format(src_vm.clean_volatile_img, self.clean_volatile_img)) - if verbose: - print >> sys.stderr, "--> Copying the template's volatile image:\n{0} ==>\n{1}".\ - format(self.clean_volatile_img, self.volatile_img) - # We prefer to use Linux's cp, because it nicely handles sparse files - retcode = subprocess.call (["cp", self.clean_volatile_img, self.volatile_img]) - if retcode != 0: - raise IOError ("Error while copying {0} to {1}".\ - format(self.clean_img, self.volatile_img)) - - # Create root-cow.img - self.commit_changes(verbose=verbose) - - def create_appmenus(self, verbose=False, source_template = None): - if source_template is None: - source_template = self.template - - try: - subprocess.check_call ([system_path["qubes_appmenu_create_cmd"], self.appmenus_templates_dir, self.name, "vm-templates"]) - except subprocess.CalledProcessError: - print >> sys.stderr, "Ooops, there was a problem creating appmenus for {0} VM!".format (self.name) - - def remove_appmenus(self): - subprocess.check_call ([system_path["qubes_appmenu_remove_cmd"], self.name, "vm-templates"]) - - def pre_rename(self, new_name): - self.remove_appmenus() - - def post_rename(self, old_name): - self.create_appmenus(verbose=False) - - old_dirpath = os.path.join(os.path.dirname(self.dir_path), old_name) - self.clean_volatile_img = self.clean_volatile_img.replace(old_dirpath, self.dir_path) - self.rootcow_img = self.rootcow_img.replace(old_dirpath, self.dir_path) - - def remove_from_disk(self): - if dry_run: - return - - self.remove_appmenus() - super(QubesTemplateVm, self).remove_from_disk() - - def verify_files(self): - if dry_run: - return - - - if not os.path.exists (self.dir_path): - raise QubesException ( - "VM directory doesn't exist: {0}".\ - format(self.dir_path)) - - if not os.path.exists (self.root_img): - raise QubesException ( - "VM root image file doesn't exist: {0}".\ - format(self.root_img)) - - if not os.path.exists (self.private_img): - raise QubesException ( - "VM private image file doesn't exist: {0}".\ - format(self.private_img)) - - if not os.path.exists (self.volatile_img): - raise QubesException ( - "VM volatile image file doesn't exist: {0}".\ - format(self.volatile_img)) - - if not os.path.exists (self.clean_volatile_img): - raise QubesException ( - "Clean VM volatile image file doesn't exist: {0}".\ - format(self.clean_volatile_img)) - - if not os.path.exists (self.kernels_dir): - raise QubesException ( - "VM's kernels directory does not exist: {0}".\ - format(self.kernels_dir)) - - return True - - def reset_volatile_storage(self, verbose = False): - assert not self.is_running(), "Attempt to clean volatile image of running Template VM!" - - if verbose: - print >> sys.stderr, "--> Cleaning volatile image: {0}...".format (self.volatile_img) - if dry_run: - return - if os.path.exists (self.volatile_img): - os.remove (self.volatile_img) - - retcode = subprocess.call (["tar", "xf", self.clean_volatile_img, "-C", self.dir_path]) - if retcode != 0: - raise IOError ("Error while unpacking {0} to {1}".\ - format(self.template.clean_volatile_img, self.volatile_img)) - - def commit_changes (self, verbose = False): - - assert not self.is_running(), "Attempt to commit changes on running Template VM!" - - if verbose: - print >> sys.stderr, "--> Commiting template updates... COW: {0}...".format (self.rootcow_img) - - if dry_run: - return - if os.path.exists (self.rootcow_img): - os.rename (self.rootcow_img, self.rootcow_img + '.old') - - f_cow = open (self.rootcow_img, "w") - f_root = open (self.root_img, "r") - f_root.seek(0, os.SEEK_END) - f_cow.truncate (f_root.tell()) # make empty sparse file of the same size as root.img - f_cow.close () - f_root.close() - -class QubesNetVm(QubesVm): - """ - A class that represents a NetVM. A child of QubesCowVM. - """ - - # In which order load this VM type from qubes.xml - load_order = 70 - - def _get_attrs_config(self): - attrs_config = super(QubesNetVm, self)._get_attrs_config() - attrs_config['dir_path']['eval'] = 'value if value is not None else os.path.join(system_path["qubes_servicevms_dir"], self.name)' - attrs_config['label']['default'] = defaults["servicevm_label"] - attrs_config['memory']['default'] = 200 - - # New attributes - attrs_config['netid'] = { 'save': 'str(self.netid)', 'order': 30, - 'eval': 'value if value is not None else collection.get_new_unused_netid()' } - attrs_config['netprefix'] = { 'eval': '"10.137.{0}.".format(self.netid)' } - attrs_config['dispnetprefix'] = { 'eval': '"10.138.{0}.".format(self.netid)' } - - # Dont save netvm prop - attrs_config['netvm'].pop('save') - attrs_config['uses_default_netvm'].pop('save') - - return attrs_config - - def __init__(self, **kwargs): - super(QubesNetVm, self).__init__(**kwargs) - self.connected_vms = QubesVmCollection() - - self.__network = "10.137.{0}.0".format(self.netid) - self.__netmask = defaults["vm_default_netmask"] - self.__gateway = self.netprefix + "1" - self.__secondary_dns = self.netprefix + "254" - - self.__external_ip_allowed_xids = set() - - @property - def type(self): - return "NetVM" - - def is_netvm(self): - return True - - @property - def gateway(self): - return self.__gateway - - @property - def secondary_dns(self): - return self.__secondary_dns - - @property - def netmask(self): - return self.__netmask - - @property - def network(self): - return self.__network - - def get_ip_for_vm(self, qid): - lo = qid % 253 + 2 - assert lo >= 2 and lo <= 254, "Wrong IP address for VM" - return self.netprefix + "{0}".format(lo) - - def get_ip_for_dispvm(self, dispid): - lo = dispid % 254 + 1 - assert lo >= 1 and lo <= 254, "Wrong IP address for VM" - return self.dispnetprefix + "{0}".format(lo) - - def create_xenstore_entries(self, xid = None): - if dry_run: - return - - if xid is None: - xid = self.xid - - - super(QubesNetVm, self).create_xenstore_entries(xid) - xs.write('', "/local/domain/{0}/qubes-netvm-external-ip".format(xid), '') - self.update_external_ip_permissions(xid) - - def update_external_ip_permissions(self, xid = -1): - if xid < 0: - xid = self.get_xid() - if xid < 0: - return - - command = [ - "/usr/bin/xenstore-chmod", - "/local/domain/{0}/qubes-netvm-external-ip".format(xid) - ] - - command.append("n{0}".format(xid)) - - for id in self.__external_ip_allowed_xids: - command.append("r{0}".format(id)) - - return subprocess.check_call(command) - - def start(self, **kwargs): - if dry_run: - return - - xid=super(QubesNetVm, self).start(**kwargs) - - # Connect vif's of already running VMs - for vm in self.connected_vms.values(): - if not vm.is_running(): - continue - - if 'verbose' in kwargs and kwargs['verbose']: - print >> sys.stderr, "--> Attaching network to '{0}'...".format(vm.name) - - # Cleanup stale VIFs - vm.cleanup_vifs() - - # force frontend to forget about this device - # module actually will be loaded back by udev, as soon as network is attached - vm.run("modprobe -r xen-netfront xennet", user="root") - - try: - vm.attach_network(wait=False) - except QubesException as ex: - print >> sys.stderr, ("WARNING: Cannot attach to network to '{0}': {1}".format(vm.name, ex)) - - return xid - - def shutdown(self, force=False): - if dry_run: - return - - connected_vms = [vm for vm in self.connected_vms.values() if vm.is_running()] - if connected_vms and not force: - raise QubesException("There are other VMs connected to this VM: " + str([vm.name for vm in connected_vms])) - - super(QubesNetVm, self).shutdown(force=force) - - def add_external_ip_permission(self, xid): - if int(xid) < 0: - return - self.__external_ip_allowed_xids.add(int(xid)) - self.update_external_ip_permissions() - - def remove_external_ip_permission(self, xid): - self.__external_ip_allowed_xids.discard(int(xid)) - self.update_external_ip_permissions() - - def create_on_disk(self, verbose, source_template = None): - if dry_run: - return - - super(QubesNetVm, self).create_on_disk(verbose, source_template=source_template) - - if os.path.exists(os.path.join(source_template.dir_path, 'netvm-' + vm_files["whitelisted_appmenus"])): - if verbose: - print >> sys.stderr, "--> Creating default whitelisted apps list: {0}".\ - format(self.dir_path + '/' + vm_files["whitelisted_appmenus"]) - shutil.copy(os.path.join(source_template.dir_path, 'netvm-' + vm_files["whitelisted_appmenus"]), - os.path.join(self.dir_path, vm_files["whitelisted_appmenus"])) - - if not self.internal: - self.create_appmenus (verbose=verbose, source_template=source_template) - - def remove_from_disk(self): - if dry_run: - return - - if not self.internal: - self.remove_appmenus() - super(QubesNetVm, self).remove_from_disk() - -class QubesProxyVm(QubesNetVm): - """ - A class that represents a ProxyVM, ex FirewallVM. A child of QubesNetVM. - """ - - def _get_attrs_config(self): - attrs_config = super(QubesProxyVm, self)._get_attrs_config() - attrs_config['uses_default_netvm']['eval'] = 'False' - # Save netvm prop again - attrs_config['netvm']['save'] = 'str(self.netvm.qid) if self.netvm is not None else "none"' - - return attrs_config - - def __init__(self, **kwargs): - super(QubesProxyVm, self).__init__(**kwargs) - self.rules_applied = None - - @property - def type(self): - return "ProxyVM" - - def is_proxyvm(self): - return True - - def _set_netvm(self, new_netvm): - old_netvm = self.netvm - super(QubesProxyVm, self)._set_netvm(new_netvm) - if self.netvm is not None: - self.netvm.add_external_ip_permission(self.get_xid()) - self.write_netvm_domid_entry() - if old_netvm is not None: - old_netvm.remove_external_ip_permission(self.get_xid()) - - def post_vm_net_attach(self, vm): - """ Called after some VM net-attached to this ProxyVm """ - - self.write_iptables_xenstore_entry() - - def post_vm_net_detach(self, vm): - """ Called after some VM net-detached from this ProxyVm """ - - self.write_iptables_xenstore_entry() - - def start(self, **kwargs): - if dry_run: - return - retcode = super(QubesProxyVm, self).start(**kwargs) - if self.netvm is not None: - self.netvm.add_external_ip_permission(self.get_xid()) - self.write_netvm_domid_entry() - return retcode - - def force_shutdown(self, **kwargs): - if dry_run: - return - if self.netvm is not None: - self.netvm.remove_external_ip_permission(kwargs['xid'] if 'xid' in kwargs else self.get_xid()) - super(QubesProxyVm, self).force_shutdown(**kwargs) - - def create_xenstore_entries(self, xid = None): - if dry_run: - return - - if xid is None: - xid = self.xid - - - super(QubesProxyVm, self).create_xenstore_entries(xid) - xs.write('', "/local/domain/{0}/qubes-iptables-error".format(xid), '') - xs.set_permissions('', "/local/domain/{0}/qubes-iptables-error".format(xid), - [{ 'dom': xid, 'write': True }]) - self.write_iptables_xenstore_entry() - - def write_netvm_domid_entry(self, xid = -1): - if not self.is_running(): - return - - if xid < 0: - xid = self.get_xid() - - if self.netvm is None: - xs.write('', "/local/domain/{0}/qubes-netvm-domid".format(xid), '') - else: - xs.write('', "/local/domain/{0}/qubes-netvm-domid".format(xid), - "{0}".format(self.netvm.get_xid())) - - def write_iptables_xenstore_entry(self): - xs.rm('', "/local/domain/{0}/qubes-iptables-domainrules".format(self.get_xid())) - iptables = "# Generated by Qubes Core on {0}\n".format(datetime.now().ctime()) - iptables += "*filter\n" - iptables += ":INPUT DROP [0:0]\n" - iptables += ":FORWARD DROP [0:0]\n" - iptables += ":OUTPUT ACCEPT [0:0]\n" - - # Strict INPUT rules - iptables += "-A INPUT -i vif+ -p udp -m udp --dport 68 -j DROP\n" - iptables += "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT\n" - iptables += "-A INPUT -p icmp -j ACCEPT\n" - iptables += "-A INPUT -i lo -j ACCEPT\n" - iptables += "-A INPUT -j REJECT --reject-with icmp-host-prohibited\n" - - iptables += "-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT\n" - # Allow dom0 networking - iptables += "-A FORWARD -i vif0.0 -j ACCEPT\n" - # Deny inter-VMs networking - iptables += "-A FORWARD -i vif+ -o vif+ -j DROP\n" - iptables += "COMMIT\n" - xs.write('', "/local/domain/{0}/qubes-iptables-header".format(self.get_xid()), iptables) - - vms = [vm for vm in self.connected_vms.values()] - for vm in vms: - iptables="*filter\n" - conf = vm.get_firewall_conf() - - xid = vm.get_xid() - if xid < 0: # VM not active ATM - continue - - ip = vm.ip - if ip is None: - continue - - # Anti-spoof rules are added by vif-script (vif-route-qubes), here we trust IP address - - accept_action = "ACCEPT" - reject_action = "REJECT --reject-with icmp-host-prohibited" - - if conf["allow"]: - default_action = accept_action - rules_action = reject_action - else: - default_action = reject_action - rules_action = accept_action - - for rule in conf["rules"]: - iptables += "-A FORWARD -s {0} -d {1}".format(ip, rule["address"]) - if rule["netmask"] != 32: - iptables += "/{0}".format(rule["netmask"]) - - if rule["proto"] is not None and rule["proto"] != "any": - iptables += " -p {0}".format(rule["proto"]) - if rule["portBegin"] is not None and rule["portBegin"] > 0: - iptables += " --dport {0}".format(rule["portBegin"]) - if rule["portEnd"] is not None and rule["portEnd"] > rule["portBegin"]: - iptables += ":{0}".format(rule["portEnd"]) - - iptables += " -j {0}\n".format(rules_action) - - if conf["allowDns"] and self.netvm is not None: - # PREROUTING does DNAT to NetVM DNSes, so we need self.netvm. properties - iptables += "-A FORWARD -s {0} -p udp -d {1} --dport 53 -j ACCEPT\n".format(ip,self.netvm.gateway) - iptables += "-A FORWARD -s {0} -p udp -d {1} --dport 53 -j ACCEPT\n".format(ip,self.netvm.secondary_dns) - if conf["allowIcmp"]: - iptables += "-A FORWARD -s {0} -p icmp -j ACCEPT\n".format(ip) - if conf["allowYumProxy"]: - iptables += "-A FORWARD -s {0} -p tcp -d {1} --dport {2} -j ACCEPT\n".format(ip, yum_proxy_ip, yum_proxy_port) - else: - iptables += "-A FORWARD -s {0} -p tcp -d {1} --dport {2} -j DROP\n".format(ip, yum_proxy_ip, yum_proxy_port) - - iptables += "-A FORWARD -s {0} -j {1}\n".format(ip, default_action) - iptables += "COMMIT\n" - xs.write('', "/local/domain/"+str(self.get_xid())+"/qubes-iptables-domainrules/"+str(xid), iptables) - # no need for ending -A FORWARD -j DROP, cause default action is DROP - - self.write_netvm_domid_entry() - - self.rules_applied = None - xs.write('', "/local/domain/{0}/qubes-iptables".format(self.get_xid()), 'reload') - -class QubesDom0NetVm(QubesNetVm): - def __init__(self, **kwargs): - super(QubesDom0NetVm, self).__init__(qid=0, name="dom0", netid=0, - dir_path=None, - private_img = None, - template = None, - label = defaults["template_label"], - **kwargs) - self.xid = 0 - - def is_running(self): - return True - - def get_xid(self): - return 0 - - def get_power_state(self): - return "Running" - - def get_disk_usage(self, file_or_dir): - return 0 - - def get_disk_utilization(self): - return 0 - - def get_disk_utilization_private_img(self): - return 0 - - def get_private_img_sz(self): - return 0 - - @property - def ip(self): - return "10.137.0.2" - - def start(self, **kwargs): - raise QubesException ("Cannot start Dom0 fake domain!") - - def get_xl_dominfo(self): - if dry_run: - return - - domains = xl_ctx.list_domains() - for dominfo in domains: - if dominfo.domid == 0: - return dominfo - return None - - def get_xc_dominfo(self): - if dry_run: - return - - domains = xc.domain_getinfo(0, 1) - return domains[0] - - def create_xml_element(self): - return None - - def verify_files(self): - return True - -class QubesDisposableVm(QubesVm): - """ - A class that represents an DisposableVM. A child of QubesVm. - """ - - # In which order load this VM type from qubes.xml - load_order = 120 - - def _get_attrs_config(self): - attrs_config = super(QubesDisposableVm, self)._get_attrs_config() - - # New attributes - attrs_config['dispid'] = { 'save': 'str(self.dispid)' } - - return attrs_config - - def __init__(self, **kwargs): - - super(QubesDisposableVm, self).__init__(dir_path="/nonexistent", **kwargs) - - assert self.template is not None, "Missing template for DisposableVM!" - - # Use DispVM icon with the same color - if self._label: - self._label = QubesDispVmLabels[self._label.name] - self.icon_path = self._label.icon_path - - @property - def type(self): - return "DisposableVM" - - def is_disposablevm(self): - return True - - @property - def ip(self): - if self.netvm is not None: - return self.netvm.get_ip_for_dispvm(self.dispid) - else: - return None - - - def get_xml_attrs(self): - # Minimal set - do not inherit rest of attributes - attrs = {} - attrs["qid"] = str(self.qid) - attrs["name"] = self.name - attrs["dispid"] = str(self.dispid) - attrs["template_qid"] = str(self.template.qid) - attrs["label"] = self.label.name - attrs["firewall_conf"] = self.relative_path(self.firewall_conf) - attrs["netvm_qid"] = str(self.netvm.qid) if self.netvm is not None else "none" - return attrs - - def verify_files(self): - return True - -class QubesAppVm(QubesVm): - """ - A class that represents an AppVM. A child of QubesVm. - """ - def _get_attrs_config(self): - attrs_config = super(QubesAppVm, self)._get_attrs_config() - attrs_config['dir_path']['eval'] = 'value if value is not None else os.path.join(system_path["qubes_appvms_dir"], self.name)' - - return attrs_config - - @property - def type(self): - return "AppVM" - - def is_appvm(self): - return True - - def create_on_disk(self, verbose, source_template = None): - if dry_run: - return - - super(QubesAppVm, self).create_on_disk(verbose, source_template=source_template) - - if not self.internal: - self.create_appmenus (verbose=verbose, source_template=source_template) - - def remove_from_disk(self): - if dry_run: - return - - self.remove_appmenus() - super(QubesAppVm, self).remove_from_disk() - -class QubesHVm(QubesVm): - """ - A class that represents an HVM. A child of QubesVm. - """ - - # FIXME: logically should inherit after QubesAppVm, but none of its methods - # are useful for HVM - - def _get_attrs_config(self): - attrs = super(QubesHVm, self)._get_attrs_config() - attrs.pop('kernel') - attrs.pop('kernels_dir') - attrs.pop('kernelopts') - attrs.pop('uses_default_kernel') - attrs.pop('uses_default_kernelopts') - attrs['dir_path']['eval'] = 'value if value is not None else os.path.join(system_path["qubes_appvms_dir"], self.name)' - attrs['volatile_img']['eval'] = 'None' - attrs['config_file_template']['eval'] = 'system_path["config_template_hvm"]' - attrs['drive'] = { 'save': 'str(self.drive)' } - attrs['maxmem'].pop('save') - attrs['timezone'] = { 'default': 'localtime', 'save': 'str(self.timezone)' } - attrs['qrexec_installed'] = { 'default': False, 'save': 'str(self.qrexec_installed)' } - attrs['guiagent_installed'] = { 'default' : False, 'save': 'str(self.guiagent_installed)' } - attrs['_start_guid_first']['eval'] = 'True' - attrs['services']['default'] = "{'meminfo-writer': False}" - - # only standalone HVM supported for now - attrs['template']['eval'] = 'None' - attrs['memory']['default'] = defaults["hvm_memory"] - - return attrs - - def __init__(self, **kwargs): - - super(QubesHVm, self).__init__(**kwargs) - - # Default for meminfo-writer have changed to (correct) False in the - # same version as introduction of guiagent_installed, so for older VMs - # with wrong setting, change is based on 'guiagent_installed' presence - if "guiagent_installed" not in kwargs and \ - (not 'xml_element' in kwargs or kwargs['xml_element'].get('guiagent_installed') is None): - self.services['meminfo-writer'] = False - - # HVM normally doesn't support dynamic memory management - if not ('meminfo-writer' in self.services and self.services['meminfo-writer']): - self.maxmem = self.memory - - # Disable qemu GUID if the user installed qubes gui agent - if self.guiagent_installed: - self._start_guid_first = False - - @property - def type(self): - return "HVM" - - def is_appvm(self): - return True - - def get_clone_attrs(self): - attrs = super(QubesHVm, self).get_clone_attrs() - attrs.remove('kernel') - attrs.remove('uses_default_kernel') - attrs.remove('kernelopts') - attrs.remove('uses_default_kernelopts') - attrs += [ 'timezone' ] - attrs += [ 'qrexec_installed' ] - attrs += [ 'guiagent_installed' ] - return attrs - - def create_on_disk(self, verbose, source_template = None): - if dry_run: - return - - if verbose: - print >> sys.stderr, "--> Creating directory: {0}".format(self.dir_path) - os.mkdir (self.dir_path) - - if verbose: - print >> sys.stderr, "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, self.label.icon_path) - os.symlink (self.label.icon_path, self.icon_path) - - if verbose: - print >> sys.stderr, "--> Creating appmenus directory: {0}".format(self.appmenus_templates_dir) - os.mkdir (self.appmenus_templates_dir) - shutil.copy (system_path["start_appmenu_template"], self.appmenus_templates_dir) - - if not self.internal: - self.create_appmenus (verbose, source_template=source_template) - - self.create_config_file() - - # create empty disk - f_root = open(self.root_img, "w") - f_root.truncate(defaults["hvm_disk_size"]) - f_root.close() - - # create empty private.img - f_private = open(self.private_img, "w") - f_private.truncate(defaults["hvm_private_img_size"]) - f_root.close() - - def remove_from_disk(self): - if dry_run: - return - - self.remove_appmenus() - super(QubesHVm, self).remove_from_disk() - - def get_disk_utilization_private_img(self): - return 0 - - def get_private_img_sz(self): - return 0 - - def resize_private_img(self, size): - raise NotImplementedError("HVM has no private.img") - - def get_config_params(self, source_template=None): - - params = super(QubesHVm, self).get_config_params(source_template=source_template) - - params['volatiledev'] = '' - if self.drive: - type_mode = ":cdrom,r" - drive_path = self.drive - # leave empty to use standard syntax in case of dom0 - backend_domain = "" - if drive_path.startswith("hd:"): - type_mode = ",w" - drive_path = drive_path[3:] - elif drive_path.startswith("cdrom:"): - type_mode = ":cdrom,r" - drive_path = drive_path[6:] - backend_split = re.match(r"^([a-zA-Z0-9-]*):(.*)", drive_path) - if backend_split: - backend_domain = "," + backend_split.group(1) - drive_path = backend_split.group(2) - - # FIXME: os.stat will work only when backend in dom0... - stat_res = None - if backend_domain == "": - stat_res = os.stat(drive_path) - if stat_res and stat.S_ISBLK(stat_res.st_mode): - params['otherdevs'] = "'phy:%s,xvdc%s%s'," % (drive_path, type_mode, backend_domain) - else: - params['otherdevs'] = "'script:file:%s,xvdc%s%s'," % (drive_path, type_mode, backend_domain) - else: - params['otherdevs'] = '' - - # Disable currently unused private.img - to be enabled when TemplateHVm done - params['privatedev'] = '' - - if self.timezone.lower() == 'localtime': - params['localtime'] = '1' - params['timeoffset'] = '0' - elif self.timezone.isdigit(): - params['localtime'] = '0' - params['timeoffset'] = self.timezone - else: - print >>sys.stderr, "WARNING: invalid 'timezone' value: %s" % self.timezone - params['localtime'] = '0' - params['timeoffset'] = '0' - return params - - def verify_files(self): - if dry_run: - return - - if not os.path.exists (self.dir_path): - raise QubesException ( - "VM directory doesn't exist: {0}".\ - format(self.dir_path)) - - if self.is_updateable() and not os.path.exists (self.root_img): - raise QubesException ( - "VM root image file doesn't exist: {0}".\ - format(self.root_img)) - - if not os.path.exists (self.private_img): - print >>sys.stderr, "WARNING: Creating empty VM private image file: {0}".\ - format(self.private_img) - f_private = open(self.private_img, "w") - f_private.truncate(defaults["hvm_private_img_size"]) - f_private.close() - - return True - - def reset_volatile_storage(self, **kwargs): - pass - - @property - def vif(self): - if self.xid < 0: - return None - if self.netvm is None: - return None - return "vif{0}.+".format(self.stubdom_xid) - - def run(self, command, **kwargs): - if self.qrexec_installed: - if 'gui' in kwargs and kwargs['gui']==False: - command = "nogui:" + command - return super(QubesHVm, self).run(command, **kwargs) - else: - raise QubesException("Needs qrexec agent installed in VM to use this function. See also qvm-prefs.") - - @property - def stubdom_xid(self): - if self.xid < 0: - return -1 - - stubdom_xid_str = xs.read('', '/local/domain/%d/image/device-model-domid' % self.xid) - if stubdom_xid_str is not None: - return int(stubdom_xid_str) - else: - return -1 - - def start_guid(self, verbose = True, notify_function = None): - # If user force the guiagent, start_guid will mimic a standard QubesVM - if self.guiagent_installed: - super(QubesHVm, self).start_guid(verbose, notify_function) - else: - if verbose: - print >> sys.stderr, "--> Starting Qubes GUId..." - - retcode = subprocess.call ([system_path["qubes_guid_path"], "-d", str(self.stubdom_xid), "-c", self.label.color, "-i", self.label.icon_path, "-l", str(self.label.index)]) - if (retcode != 0) : - raise QubesException("Cannot start qubes-guid!") - - def start_qrexec_daemon(self, **kwargs): - if self.qrexec_installed: - super(QubesHVm, self).start_qrexec_daemon(**kwargs) - - if self._start_guid_first: - if kwargs.get('verbose'): - print >> sys.stderr, "--> Waiting for user '%s' login..." % self.default_user - - self.wait_for_session(notify_function=kwargs.get('notify_function', None)) - - def pause(self): - if dry_run: - return - - xc.domain_pause(self.stubdom_xid) - super(QubesHVm, self).pause() - - def unpause(self): - if dry_run: - return - - xc.domain_unpause(self.stubdom_xid) - super(QubesHVm, self).unpause() - - def is_guid_running(self): - # If user force the guiagent, is_guid_running will mimic a standard QubesVM - if self.guiagent_installed: - return super(QubesHVm, self).is_guid_running() - else: - xid = self.stubdom_xid - if xid < 0: - return False - if not os.path.exists('/var/run/qubes/guid-running.%d' % xid): - return False - return True - -register_qubes_vm_class("QubesTemplateVm", QubesTemplateVm) -register_qubes_vm_class("QubesNetVm", QubesNetVm) -register_qubes_vm_class("QubesProxyVm", QubesProxyVm) -register_qubes_vm_class("QubesDisposableVm", QubesDisposableVm) -register_qubes_vm_class("QubesAppVm", QubesAppVm) -register_qubes_vm_class("QubesHVm", QubesHVm) + QubesVmClasses[vm_class.__name__] = vm_class + # register class as local for this module - to make it easy to import from + # other modules + setattr(sys.modules[__name__], vm_class.__name__, vm_class) class QubesVmCollection(dict): """ @@ -3154,4 +731,10 @@ class QubesDaemonPidfile(object): self.remove_pidfile() return False +modules_dir = os.path.join(os.path.dirname(__file__), 'modules') +for module_file in sorted(os.listdir(modules_dir)): + if not module_file.endswith(".py") or module_file == "__init__.py": + continue + __import__('qubes.modules.%s' % module_file[:-3]) + # vim:sw=4:et: diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index c7e377dc..344dd6e5 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -50,6 +50,7 @@ Requires(postun): systemd-units Requires: python, xen-runtime, pciutils, python-inotify, python-daemon, kernel-qubes-dom0 Requires: qubes-qrexec-dom0 Requires: python-lxml +# TODO: R: qubes-gui-dom0 >= 2.1.11 Conflicts: qubes-gui-dom0 < 1.1.13 Requires: xen >= 4.1.0-2 Requires: xen-hvm @@ -75,8 +76,8 @@ ln -sf . %{name}-%{version} %setup -T -D %build -python -m compileall dom0/core dom0/qmemman -python -O -m compileall dom0/core dom0/qmemman +python -m compileall dom0/core dom0/core-modules dom0/qmemman +python -O -m compileall dom0/core dom/core-modules dom0/qmemman for dir in dom0/dispvm dom0/qubes-rpc dom0/qmemman; do (cd $dir; make) done @@ -118,6 +119,11 @@ cp core/__init__.py $RPM_BUILD_ROOT%{python_sitearch}/qubes cp core/__init__.py[co] $RPM_BUILD_ROOT%{python_sitearch}/qubes cp qmemman/qmemman*py $RPM_BUILD_ROOT%{python_sitearch}/qubes cp qmemman/qmemman*py[co] $RPM_BUILD_ROOT%{python_sitearch}/qubes +mkdir -p $RPM_BUILD_ROOT%{python_sitearch}/qubes/modules +cp core-modules/0*.py $RPM_BUILD_ROOT%{python_sitearch}/qubes/modules +cp core-modules/0*.py[co] $RPM_BUILD_ROOT%{python_sitearch}/qubes/modules +cp core-modules/__init__.py $RPM_BUILD_ROOT%{python_sitearch}/qubes/modules +cp core-modules/__init__.py[co] $RPM_BUILD_ROOT%{python_sitearch}/qubes/modules mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/qubes cp qmemman/qmemman.conf $RPM_BUILD_ROOT%{_sysconfdir}/qubes/ @@ -359,6 +365,8 @@ fi %{python_sitearch}/qubes/__init__.pyc %{python_sitearch}/qubes/__init__.pyo %{python_sitearch}/qubes/qmemman*.py* +%{python_sitearch}/qubes/modules/0*.py* +%{python_sitearch}/qubes/modules/__init__.py* /usr/lib/qubes/unbind-pci-device.sh /usr/lib/qubes/cleanup-dispvms /usr/lib/qubes/convert-apptemplate2vm.sh