diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index ea3e3b5a..7726b057 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -178,6 +178,11 @@ class QubesVm(object): root_img = None, private_img = None, memory = default_memory, + template_vm = None, + firewall_conf = None, + volatile_img = None, + pcidevs = None, + internal = False, vcpus = None): @@ -200,9 +205,6 @@ class QubesVm(object): self.uses_default_netvm = uses_default_netvm self.netvm_vm = netvm_vm - # Create template_vm property - used in AppVM, NetVM, ProxyVM - self.template_vm = None - # We use it in remove from disk to avoid removing rpm files (for templates) self.installed_by_rpm = installed_by_rpm @@ -221,6 +223,11 @@ class QubesVm(object): self.private_img = dir_path + "/" + ( private_img if private_img is not None else default_private_img) + if firewall_conf is None: + self.firewall_conf = dir_path + "/" + default_firewall_conf_file + else: + self.firewall_conf = firewall_conf + self.updateable = updateable self.label = label if label is not None else QubesVmLabels["red"] if self.dir_path is not None: @@ -229,10 +236,27 @@ class QubesVm(object): self.icon_path = None # PCI devices - used only by NetVM - self.pcidevs = "" + if pcidevs is None or pcidevs == "none": + self.pcidevs = "" + else: + self.pcidevs = pcidevs self.memory = memory + self.template_vm = template_vm + if template_vm is not None: + if updateable: + print "ERROR: Template based VM cannot be updateable!" + return False + if not template_vm.is_template(): + print "ERROR: template_qid={0} doesn't point to a valid TemplateVM".\ + format(template_vm.qid) + return False + + template_vm.appvms[qid] = self + else: + assert self.root_img is not None, "Missing root_img for standalone VM!" + # By default allow use all VCPUs if vcpus is None: qubes_host = QubesHost() @@ -240,6 +264,9 @@ class QubesVm(object): else: self.vcpus = vcpus + # Internal VM (not shown in qubes-manager, doesn't create appmenus entries + self.internal = internal + if not dry_run and xend_session.session is not None: self.refresh_xend_session() @@ -287,14 +314,17 @@ class QubesVm(object): else: return False + def set_updateable(self): + if self.is_updateable(): + return + + raise QubesException ("Change 'updateable' flag is not supported. Please use qvm-create.") def set_nonupdateable(self): if not self.is_updateable(): return - assert not self.is_running() - # We can always downgrade a VM to non-updateable... - self.updateable = False + raise QubesException ("Change 'updateable' flag is not supported. Please use qvm-create.") def is_template(self): return isinstance(self, QubesTemplateVm) @@ -455,6 +485,30 @@ class QubesVm(object): else: return False + def is_outdated(self): + # Makes sense only on VM based on template + if self.template_vm is None: + return False + + if not self.is_running(): + return False + + rootimg_inode = os.stat(self.template_vm.root_img) + rootcow_inode = os.stat(self.template_vm.rootcow_img) + + current_dmdev = "/dev/mapper/snapshot-{0:x}:{1}-{2:x}:{3}".format( + rootimg_inode[2], rootimg_inode[1], + rootcow_inode[2], rootcow_inode[1]) + + # Don't know why, but 51712 is xvda + # backend node name not available through xenapi :( + p = subprocess.Popen (["xenstore-read", + "/local/domain/0/backend/vbd/{0}/51712/node".format(self.get_xid())], + stdout=subprocess.PIPE) + used_dmdev = p.communicate()[0].strip() + + return used_dmdev != current_dmdev + def get_disk_usage(self, file_or_dir): if not os.path.exists(file_or_dir): return 0 @@ -535,6 +589,220 @@ class QubesVm(object): "/local/domain/{0}/qubes_secondary_dns".format(xid), self.netvm_vm.secondary_dns]) + def create_config_file(self): + assert self.template_vm is not None + + conf_template = None + if self.type == "NetVM": + conf_template = open (self.template_vm.netvms_conf_file, "r") + elif self.updateable: + conf_template = open (self.template_vm.standalonevms_conf_file, "r") + else: + conf_template = open (self.template_vm.appvms_conf_file, "r") + if os.path.isfile(self.conf_file): + shutil.copy(self.conf_file, self.conf_file + ".backup") + conf_appvm = open(self.conf_file, "w") + rx_vmname = re.compile (r"%VMNAME%") + rx_vmdir = re.compile (r"%VMDIR%") + rx_template = re.compile (r"%TEMPLATEDIR%") + rx_pcidevs = re.compile (r"%PCIDEVS%") + rx_mem = re.compile (r"%MEM%") + rx_vcpus = re.compile (r"%VCPUS%") + + for line in conf_template: + line = rx_vmname.sub (self.name, line) + line = rx_vmdir.sub (self.dir_path, line) + line = rx_template.sub (self.template_vm.dir_path, line) + line = rx_pcidevs.sub (self.pcidevs, line) + line = rx_mem.sub (str(self.memory), line) + line = rx_vcpus.sub (str(self.vcpus), line) + conf_appvm.write(line) + + conf_template.close() + conf_appvm.close() + + def create_on_disk(self, verbose): + assert self.template_vm is not None + + if dry_run: + return + + if verbose: + print "--> Creating directory: {0}".format(self.dir_path) + os.mkdir (self.dir_path) + + if verbose: + print "--> Creating the VM config file: {0}".format(self.conf_file) + + self.create_config_file() + + template_priv = self.template_vm.private_img + if verbose: + print "--> Copying the template's private image: {0}".\ + format(template_priv) + + # We prefer to use Linux's cp, because it nicely handles sparse files + retcode = subprocess.call (["cp", template_priv, self.private_img]) + if retcode != 0: + raise IOError ("Error while copying {0} to {1}".\ + format(template_priv, self.private_img)) + + if self.is_updateable(): + template_root = self.template_vm.root_img + if verbose: + print "--> Copying the template's root image: {0}".\ + format(template_root) + + # We prefer to use Linux's cp, because it nicely handles sparse files + retcode = subprocess.call (["cp", template_root, self.root_img]) + if retcode != 0: + raise IOError ("Error while copying {0} to {1}".\ + format(template_root, self.root_img)) + + kernels_dir = self.dir_path + '/' + default_kernels_subdir + if verbose: + print "--> Copying the template's kernel dir: {0}".\ + format(self.template_vm.kernels_dir) + shutil.copytree (self.template_vm.kernels_dir, kernels_dir) + + + # Create volatile.img + self.reset_volatile_storage() + + def verify_files(self): + if dry_run: + return + + if not os.path.exists (self.dir_path): + raise QubesException ( + "VM directory doesn't exist: {0}".\ + format(self.dir_path)) + + if not os.path.exists (self.conf_file): + raise QubesException ( + "VM config file doesn't exist: {0}".\ + format(self.conf_file)) + + if self.is_updateable() and not os.path.exists (self.root_img): + raise QubesException ( + "VM root image file doesn't exist: {0}".\ + format(self.root_img)) + + if not os.path.exists (self.private_img): + raise QubesException ( + "VM private image file doesn't exist: {0}".\ + format(self.private_img)) + return True + + def reset_volatile_storage(self): + assert not self.is_running(), "Attempt to clean volatile image of running VM!" + + # Only makes sense on template based VM + if self.template_vm is None: + return + + print "--> Cleaning volatile image: {0}...".format (self.volatile_img) + if dry_run: + return + if os.path.exists (self.volatile_img): + os.remove (self.volatile_img) + + retcode = subprocess.call (["tar", "xf", self.template_vm.clean_volatile_img, "-C", self.dir_path]) + if retcode != 0: + raise IOError ("Error while unpacking {0} to {1}".\ + format(self.template_vm.clean_volatile_img, self.volatile_img)) + + def remove_from_disk(self): + if dry_run: + return + + shutil.rmtree (self.dir_path) + + def write_firewall_conf(self, conf): + root = xml.etree.ElementTree.Element( + "QubesFirwallRules", + policy = "allow" if conf["allow"] else "deny", + dns = "allow" if conf["allowDns"] else "deny", + icmp = "allow" if conf["allowIcmp"] else "deny" + ) + + for rule in conf["rules"]: + element = xml.etree.ElementTree.Element( + "rule", + address=rule["address"], + port=str(rule["portBegin"]), + ) + if rule["netmask"] is not None and rule["netmask"] != 32: + element.set("netmask", str(rule["netmask"])) + if rule["portEnd"] is not None: + element.set("toport", str(rule["portEnd"])) + root.append(element) + + tree = xml.etree.ElementTree.ElementTree(root) + + try: + f = open(self.firewall_conf, 'a') # create the file if not exist + f.close() + + with open(self.firewall_conf, 'w') as f: + fcntl.lockf(f, fcntl.LOCK_EX) + tree.write(f, "UTF-8") + fcntl.lockf(f, fcntl.LOCK_UN) + f.close() + except EnvironmentError as err: + print "{0}: save error: {1}".format( + os.path.basename(sys.argv[0]), err) + return False + + return True + + def has_firewall(self): + return os.path.exists (self.firewall_conf) + + def get_firewall_conf(self): + conf = { "rules": list(), "allow": True, "allowDns": True, "allowIcmp": True } + + try: + tree = xml.etree.ElementTree.parse(self.firewall_conf) + root = tree.getroot() + + conf["allow"] = (root.get("policy") == "allow") + conf["allowDns"] = (root.get("dns") == "allow") + conf["allowIcmp"] = (root.get("icmp") == "allow") + + for element in root: + rule = {} + attr_list = ("address", "netmask", "port", "toport") + + for attribute in attr_list: + rule[attribute] = element.get(attribute) + + if rule["netmask"] is not None: + rule["netmask"] = int(rule["netmask"]) + else: + rule["netmask"] = 32 + + rule["portBegin"] = int(rule["port"]) + + if rule["toport"] is not None: + rule["portEnd"] = int(rule["toport"]) + else: + rule["portEnd"] = None + + del(rule["port"]) + del(rule["toport"]) + + conf["rules"].append(rule) + + except EnvironmentError as err: + return conf + except (xml.parsers.expat.ExpatError, + ValueError, LookupError) as err: + print("{0}: load error: {1}".format( + os.path.basename(sys.argv[0]), err)) + return None + + return conf def get_total_xen_memory(self): hosts = xend_session.session.xenapi.host.get_all() @@ -550,6 +818,8 @@ class QubesVm(object): if self.is_running(): raise QubesException ("VM is already running!") + self.reset_volatile_storage() + if verbose: print "--> Rereading the VM's conf file ({0})...".format(self.conf_file) self.update_xen_storage() @@ -665,10 +935,13 @@ class QubesVm(object): attrs["uses_default_netvm"] = str(self.uses_default_netvm) attrs["netvm_qid"] = str(self.netvm_vm.qid) if self.netvm_vm is not None else "none" attrs["installed_by_rpm"] = str(self.installed_by_rpm) + attrs["template_qid"] = str(self.template_vm.qid) if self.template_vm and not self.is_updateable() else "none" attrs["updateable"] = str(self.updateable) attrs["label"] = self.label.name attrs["memory"] = str(self.memory) + attrs["pcidevs"] = str(self.pcidevs) attrs["vcpus"] = str(self.vcpus) + attrs["internal"] = str(self.internal) return attrs def create_xml_element(self): @@ -948,196 +1221,7 @@ class QubesTemplateVm(QubesVm): attrs["rootcow_img"] = self.rootcow_img return attrs -class QubesCowVm(QubesVm): - """ - A class that represent a VM that may be based on some template, i.e. doesn't have own root.img - - """ - def __init__(self, **kwargs): - if "dir_path" not in kwargs or kwargs["dir_path"] is None: - kwargs["dir_path"] = qubes_appvms_dir + "/" + kwargs["name"] - - if "updateable" not in kwargs or kwargs["updateable"] is None: - kwargs["updateable"] = False - - if "template_vm" in kwargs: - template_vm = kwargs.pop("template_vm") - else: - template_vm = None - - super(QubesCowVm, self).__init__(**kwargs) - qid = kwargs["qid"] - dir_path = kwargs["dir_path"] - if not self.is_updateable(): - # Dirty hack for QubesDom0NetVm... - if not isinstance(self, QubesDom0NetVm): - assert template_vm is not None, "Missing template_vm for template based VM!" - if not template_vm.is_template(): - print "ERROR: template_qid={0} doesn't point to a valid TemplateVM".\ - format(template_vm.qid) - return False - - template_vm.appvms[qid] = self - else: - assert self.root_img is not None, "Missing root_img for standalone VM!" - - self.template_vm = template_vm - - def set_updateable(self): - if self.is_updateable(): - return - - raise QubesException ("Change 'updateable' flag is not supported. Please use qvm-create.") - - def set_nonupdateable(self): - if self.is_updateable(): - return - - raise QubesException ("Change 'updateable' flag is not supported. Please use qvm-create.") - - def create_config_file(self): - conf_template = None - if self.type == "NetVM": - conf_template = open (self.template_vm.netvms_conf_file, "r") - elif self.updateable: - conf_template = open (self.template_vm.standalonevms_conf_file, "r") - else: - conf_template = open (self.template_vm.appvms_conf_file, "r") - if os.path.isfile(self.conf_file): - shutil.copy(self.conf_file, self.conf_file + ".backup") - conf_appvm = open(self.conf_file, "w") - rx_vmname = re.compile (r"%VMNAME%") - rx_vmdir = re.compile (r"%VMDIR%") - rx_template = re.compile (r"%TEMPLATEDIR%") - rx_pcidevs = re.compile (r"%PCIDEVS%") - rx_mem = re.compile (r"%MEM%") - rx_vcpus = re.compile (r"%VCPUS%") - - for line in conf_template: - line = rx_vmname.sub (self.name, line) - line = rx_vmdir.sub (self.dir_path, line) - line = rx_template.sub (self.template_vm.dir_path, line) - line = rx_pcidevs.sub (self.pcidevs, line) - line = rx_mem.sub (str(self.memory), line) - line = rx_vcpus.sub (str(self.vcpus), line) - conf_appvm.write(line) - - conf_template.close() - conf_appvm.close() - - def create_on_disk(self, verbose): - if dry_run: - return - - if verbose: - print "--> Creating directory: {0}".format(self.dir_path) - os.mkdir (self.dir_path) - - if verbose: - print "--> Creating the VM config file: {0}".format(self.conf_file) - - self.create_config_file() - - template_priv = self.template_vm.private_img - if verbose: - print "--> Copying the template's private image: {0}".\ - format(template_priv) - - # We prefer to use Linux's cp, because it nicely handles sparse files - retcode = subprocess.call (["cp", template_priv, self.private_img]) - if retcode != 0: - raise IOError ("Error while copying {0} to {1}".\ - format(template_priv, self.private_img)) - - if self.is_updateable(): - template_root = self.template_vm.root_img - if verbose: - print "--> Copying the template's root image: {0}".\ - format(template_root) - - # We prefer to use Linux's cp, because it nicely handles sparse files - retcode = subprocess.call (["cp", template_root, self.root_img]) - if retcode != 0: - raise IOError ("Error while copying {0} to {1}".\ - format(template_root, self.root_img)) - - kernels_dir = self.dir_path + '/' + default_kernels_subdir - if verbose: - print "--> Copying the template's kernel dir: {0}".\ - format(self.template_vm.kernels_dir) - shutil.copytree (self.template_vm.kernels_dir, kernels_dir) - - - # Create volatile.img - self.reset_volatile_storage() - - def verify_files(self): - if dry_run: - return - - if not os.path.exists (self.dir_path): - raise QubesException ( - "VM directory doesn't exist: {0}".\ - format(self.dir_path)) - - if not os.path.exists (self.conf_file): - raise QubesException ( - "VM config file doesn't exist: {0}".\ - format(self.conf_file)) - - if self.is_updateable() and not os.path.exists (self.root_img): - raise QubesException ( - "VM root image file doesn't exist: {0}".\ - format(self.root_img)) - - if not os.path.exists (self.private_img): - raise QubesException ( - "VM private image file doesn't exist: {0}".\ - format(self.private_img)) - return True - - def start(self, debug_console = False, verbose = False, preparing_dvm = False): - if dry_run: - return - - if self.is_running(): - raise QubesException("VM is already running!") - - self.reset_volatile_storage() - - return super(QubesCowVm, self).start(debug_console=debug_console, verbose=verbose, preparing_dvm=preparing_dvm) - - def reset_volatile_storage(self): - assert not self.is_running(), "Attempt to clean volatile image of running VM!" - - # Only makes sense on template based VM - if not self.template_vm: - return - - print "--> Cleaning volatile image: {0}...".format (self.volatile_img) - if dry_run: - return - if os.path.exists (self.volatile_img): - os.remove (self.volatile_img) - - retcode = subprocess.call (["tar", "xf", self.template_vm.clean_volatile_img, "-C", self.dir_path]) - if retcode != 0: - raise IOError ("Error while unpacking {0} to {1}".\ - format(self.template_vm.clean_volatile_img, self.volatile_img)) - - def remove_from_disk(self): - if dry_run: - return - - subprocess.check_call ([qubes_appmenu_remove_cmd, self.name]) - shutil.rmtree (self.dir_path) - - def get_xml_attrs(self): - attrs = super(QubesCowVm, self).get_xml_attrs() - attrs["template_qid"] = str(self.template_vm.qid) if not self.is_updateable() else "none" - return attrs - -class QubesNetVm(QubesCowVm): +class QubesNetVm(QubesVm): """ A class that represents a NetVM. A child of QubesCowVM. """ @@ -1306,8 +1390,12 @@ class QubesProxyVm(QubesNetVm): qvm_collection.load() qvm_collection.unlock_db() - vms = [vm for vm in qvm_collection.values() if vm.is_appvm()] + vms = [vm for vm in qvm_collection.values() if vm.has_firewall()] for vm in vms: + # Process only VMs connected to this ProxyVM + if not vm.netvm_vm or vm.netvm_vm.qid != self.qid: + continue + conf = vm.get_firewall_conf() xid = vm.get_xid() @@ -1447,19 +1535,10 @@ class QubesDisposableVm(QubesVm): """ def __init__(self, **kwargs): - template_vm = kwargs.pop("template_vm") + template_vm = kwargs["template_vm"] + assert template_vm is not None, "Missing template_vm for DisposableVM!" super(QubesDisposableVm, self).__init__(dir_path="/nonexistent", **kwargs) - qid = kwargs["qid"] - - assert template_vm is not None, "Missing template_vm for DisposableVM!" - if not template_vm.is_template(): - print "ERROR: template_qid={0} doesn't point to a valid TemplateVM".\ - format(new_vm.template_vm.qid) - return False - - self.template_vm = template_vm - template_vm.appvms[qid] = self @property def type(self): @@ -1477,19 +1556,16 @@ class QubesDisposableVm(QubesVm): return True -class QubesAppVm(QubesCowVm): +class QubesAppVm(QubesVm): """ A class that represents an AppVM. A child of QubesVm. """ def __init__(self, **kwargs): + if "dir_path" not in kwargs or kwargs["dir_path"] is None: + kwargs["dir_path"] = qubes_appvms_dir + "/" + kwargs["name"] + super(QubesAppVm, self).__init__(**kwargs) - dir_path = self.dir_path - - if "firewall_conf" not in kwargs or kwargs["firewall_conf"] is None: - kwargs["firewall_conf"] = dir_path + "/" + default_firewall_conf_file - - self.firewall_conf = kwargs["firewall_conf"] @property def type(self): @@ -1505,7 +1581,15 @@ class QubesAppVm(QubesCowVm): print "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, self.label.icon_path) os.symlink (self.label.icon_path, self.icon_path) - self.create_appmenus (verbose) + if not self.internal: + self.create_appmenus (verbose) + + def remove_from_disk(self): + if dry_run: + return + + subprocess.check_call ([qubes_appmenu_remove_cmd, self.name]) + super(QubesAppVm, self).remove_from_disk() def create_appmenus(self, verbose): if self.template_vm is not None: @@ -1514,88 +1598,6 @@ class QubesAppVm(QubesCowVm): # Only add apps to menu subprocess.check_call ([qubes_appmenu_create_cmd, "none", self.name]) - def write_firewall_conf(self, conf): - root = xml.etree.ElementTree.Element( - "QubesFirwallRules", - policy = "allow" if conf["allow"] else "deny", - dns = "allow" if conf["allowDns"] else "deny", - icmp = "allow" if conf["allowIcmp"] else "deny" - ) - - for rule in conf["rules"]: - element = xml.etree.ElementTree.Element( - "rule", - address=rule["address"], - port=str(rule["portBegin"]), - ) - if rule["netmask"] is not None and rule["netmask"] != 32: - element.set("netmask", str(rule["netmask"])) - if rule["portEnd"] is not None: - element.set("toport", str(rule["portEnd"])) - root.append(element) - - tree = xml.etree.ElementTree.ElementTree(root) - - try: - f = open(self.firewall_conf, 'a') # create the file if not exist - f.close() - - with open(self.firewall_conf, 'w') as f: - fcntl.lockf(f, fcntl.LOCK_EX) - tree.write(f, "UTF-8") - fcntl.lockf(f, fcntl.LOCK_UN) - f.close() - except EnvironmentError as err: - print "{0}: save error: {1}".format( - os.path.basename(sys.argv[0]), err) - return False - - return True - - def get_firewall_conf(self): - conf = { "rules": list(), "allow": True, "allowDns": True, "allowIcmp": True } - - try: - tree = xml.etree.ElementTree.parse(self.firewall_conf) - root = tree.getroot() - - conf["allow"] = (root.get("policy") == "allow") - conf["allowDns"] = (root.get("dns") == "allow") - conf["allowIcmp"] = (root.get("icmp") == "allow") - - for element in root: - rule = {} - attr_list = ("address", "netmask", "port", "toport") - - for attribute in attr_list: - rule[attribute] = element.get(attribute) - - if rule["netmask"] is not None: - rule["netmask"] = int(rule["netmask"]) - else: - rule["netmask"] = 32 - - rule["portBegin"] = int(rule["port"]) - - if rule["toport"] is not None: - rule["portEnd"] = int(rule["toport"]) - else: - rule["portEnd"] = None - - del(rule["port"]) - del(rule["toport"]) - - conf["rules"].append(rule) - - except EnvironmentError as err: - return conf - except (xml.parsers.expat.ExpatError, - ValueError, LookupError) as err: - print("{0}: load error: {1}".format( - os.path.basename(sys.argv[0]), err)) - return None - - return conf class QubesVmCollection(dict): """ @@ -1896,8 +1898,8 @@ class QubesVmCollection(dict): kwargs = {} common_attr_list = ("qid", "name", "dir_path", "conf_file", "private_img", "root_img", "template_qid", - "installed_by_rpm", "updateable", - "uses_default_netvm", "label") + "installed_by_rpm", "updateable", "internal", + "uses_default_netvm", "label", "memory", "vcpus", "pcidevs") for attribute in common_attr_list: kwargs[attribute] = element.get(attribute) @@ -1909,6 +1911,9 @@ class QubesVmCollection(dict): if "installed_by_rpm" in kwargs: kwargs["installed_by_rpm"] = True if kwargs["installed_by_rpm"] == "True" else False + if "internal" in kwargs: + kwargs["internal"] = True if kwargs["internal"] == "True" else False + if "template_qid" in kwargs: if kwargs["template_qid"] == "none" or kwargs["template_qid"] is None: kwargs.pop("template_qid") diff --git a/dom0/qvm-tools/qvm-backup-restore b/dom0/qvm-tools/qvm-backup-restore index 1bb1f9a9..991ebb4f 100755 --- a/dom0/qvm-tools/qvm-backup-restore +++ b/dom0/qvm-tools/qvm-backup-restore @@ -127,6 +127,9 @@ def main(): parser.add_option ("--skip-conflicting", action="store_true", dest="skip_conflicting", default=False, help="Do not restore VMs that are already present on the host") + parser.add_option ("--force-root", action="store_true", dest="force_root", default=False, + help="Force to run, even with root privileges") + parser.add_option ("--recreate-conf-files", action="store_true", dest="recreate_conf", default=False, help="Recreate conf files after restore") @@ -242,14 +245,23 @@ def main(): print + if os.geteuid() == 0: + print "*** Running this tool as root is strongly discouraged, this will lead you in permissions problems." + if options.force_root: + print "Continuing as commanded. You have been warned." + else: + print "Retry as unprivileged user." + print "... or use --force-root to continue anyway." + exit(1) + if there_are_conflicting_vms: print "*** There VMs with conflicting names on the host! ***" - if options.skip_conflicting: - print "Those VMs will not be restored, the host VMs will not be overwritten!" - else: - print "Remove VMs with conflicting names from the host before proceeding." - print "... or use --skip-conflicting to restore only those VMs that do not exist on the host." - exit (1) + if options.skip_conflicting: + print "Those VMs will not be restored, the host VMs will not be overwritten!" + else: + print "Remove VMs with conflicting names from the host before proceeding." + print "... or use --skip-conflicting to restore only those VMs that do not exist on the host." + exit (1) print "The above VMs will be copied and added to your system." print "Exisiting VMs will not be removed." diff --git a/dom0/qvm-tools/qvm-create b/dom0/qvm-tools/qvm-create index c4c80b97..c0f3f478 100755 --- a/dom0/qvm-tools/qvm-create +++ b/dom0/qvm-tools/qvm-create @@ -63,6 +63,8 @@ def main(): help="Initial memory size (in MB)") parser.add_option ("-c", "--vcpus", dest="vcpus", default=None, help="VCPUs count") + parser.add_option ("-i", "--internal", action="store_true", dest="internal", default=False, + help="Create VM for internal use only (hidden in qubes-manager, no appmenus)") parser.add_option ("-q", "--quiet", action="store_false", dest="verbose", default=True) (options, args) = parser.parse_args () @@ -134,6 +136,9 @@ def main(): else: vm = qvm_collection.add_new_appvm(vmname, template_vm, label = label, updateable = options.standalone) + if options.internal: + vm.internal = True + if options.mem is not None: vm.memory = options.mem diff --git a/dom0/qvm-tools/qvm-run b/dom0/qvm-tools/qvm-run index bafe3a79..bfae342f 100755 --- a/dom0/qvm-tools/qvm-run +++ b/dom0/qvm-tools/qvm-run @@ -124,6 +124,15 @@ def vm_run_cmd(vm, cmd, options): if options.tray: tray_notify_error ("ERROR: Cannot start the GUI daemon for this VM!") exit (1) + + if not os.path.exists("/var/run/qubes/qrexec.{0}".format(xid)): + retcode = subprocess.call ([qrexec_daemon_path, str(xid)]) + if (retcode != 0) : + print "ERROR: Cannot start qrexec!" + if options.tray: + tray_notify_error ("ERROR: Cannot start the QRexec daemon for this VM!") + exit (1) + actually_execute(str(xid), cmd, options); def main(): diff --git a/dom0/restore/qvm-create-default-dvm b/dom0/restore/qvm-create-default-dvm index c854c837..04ed97e1 100755 --- a/dom0/restore/qvm-create-default-dvm +++ b/dom0/restore/qvm-create-default-dvm @@ -32,7 +32,7 @@ DVMTMPLDIR="/var/lib/qubes/appvms/$DVMTMPL" if ! [ -d "$DVMTMPLDIR" ] ; then # unfortunately, currently there are reliability issues with save of a domain # with multiple CPUs - if ! qvm-create --vcpus=1 -t "$TEMPLATENAME" -l gray "$DVMTMPL" ; then exit 1 ; fi + if ! qvm-create --vcpus=1 --internal -t "$TEMPLATENAME" -l gray "$DVMTMPL" ; then exit 1 ; fi fi if ! /usr/lib/qubes/qubes_prepare_saved_domain.sh \ "$DVMTMPL" "/var/lib/qubes/appvms/$DVMTMPL/dvm-savefile" $SCRIPTNAME ; then