Преглед изворни кода

Migration to libvirt - DispVM

Move DispVM creation to qfile-daemon-dvm/QubesDisposableVm from
qubes-restore. As actual restore is handled by libvirt, we don't get
much from separate qubes-restore process.
This code still needs some improvements, especially on performance.
Marek Marczykowski пре 11 година
родитељ
комит
107ebad9d5

+ 109 - 3
core-modules/01QubesDisposableVm.py

@@ -22,9 +22,14 @@
 #
 #
 
+import os
 import sys
+import libvirt
+import time
 from qubes.qubes import QubesVm,QubesVmLabel,register_qubes_vm_class
 from qubes.qubes import QubesDispVmLabels
+from qubes.qubes import dry_run,libvirt_conn
+from qubes.qmemman_client import QMemmanClient
 
 class QubesDisposableVm(QubesVm):
     """
@@ -37,18 +42,32 @@ class QubesDisposableVm(QubesVm):
     def get_attrs_config(self):
         attrs_config = super(QubesDisposableVm, self).get_attrs_config()
 
+        attrs_config['name']['eval'] = '"disp%d" % self._qid if value is None else value'
+
         # New attributes
-        attrs_config['dispid'] = { 'save': lambda: str(self.dispid) }
+        attrs_config['dispid'] = { 'func': lambda x: self._qid if x is None else int(x),
+            'save': lambda: str(self.dispid) }
         attrs_config['include_in_backups']['func'] = lambda x: False
+        attrs_config['disp_savefile'] = {
+                'default': '/var/run/qubes/current-savefile',
+                'save': lambda: str(self.disp_savefile) }
 
         return attrs_config
 
     def __init__(self, **kwargs):
 
-        super(QubesDisposableVm, self).__init__(dir_path="/nonexistent", **kwargs)
+        disp_template = None
+        if 'disp_template' in kwargs.keys():
+            disp_template = kwargs['disp_template']
+            kwargs['template'] = disp_template.template
+            kwargs['dir_path'] = disp_template.dir_path
+        super(QubesDisposableVm, self).__init__(**kwargs)
 
         assert self.template is not None, "Missing template for DisposableVM!"
 
+        if disp_template:
+            self.clone_attrs(disp_template)
+
         # Use DispVM icon with the same color
         if self._label:
             self._label = QubesDispVmLabels[self._label.name]
@@ -68,8 +87,12 @@ class QubesDisposableVm(QubesVm):
         else:
             return None
 
+    def get_clone_attrs(self):
+        attrs = super(QubesDisposableVm, self).get_clone_attrs()
+        attrs.remove('_label')
+        return attrs
 
-    def get_xml_attrs(self):
+    def do_not_use_get_xml_attrs(self):
         # Minimal set - do not inherit rest of attributes
         attrs = {}
         attrs["qid"] = str(self.qid)
@@ -84,5 +107,88 @@ class QubesDisposableVm(QubesVm):
     def verify_files(self):
         return True
 
+    # FIXME: source_template unused
+    def get_config_params(self, source_template=None):
+        attrs = super(QubesDisposableVm, self).get_config_params()
+        attrs['privatedev'] = ''
+        return attrs
+
+    def start(self, verbose = False, **kwargs):
+        if dry_run:
+            return
+
+        # Intentionally not used is_running(): eliminate also "Paused", "Crashed", "Halting"
+        if self.get_power_state() != "Halted":
+            raise QubesException ("VM is already running!")
+
+        # skip netvm state checking - calling VM have the same netvm, so it
+        # must be already running
+
+        if verbose:
+            print >> sys.stderr, "--> Loading the VM (type = {0})...".format(self.type)
+
+        print >>sys.stderr, "time=%s, creating config file" % (str(time.time()))
+        # refresh config file
+        domain_config = self.create_config_file()
+
+        mem_required = int(self.memory) * 1024 * 1024
+        print >>sys.stderr, "time=%s, getting %d memory" % (str(time.time()), mem_required)
+        qmemman_client = QMemmanClient()
+        try:
+            got_memory = qmemman_client.request_memory(mem_required)
+        except IOError as e:
+            raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e))
+        if not got_memory:
+            qmemman_client.close()
+            raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name)
+
+        # dispvm cannot have PCI devices
+        assert (len(self.pcidevs) == 0), "DispVM cannot have PCI devices"
+
+        print >>sys.stderr, "time=%s, calling restore" % (str(time.time()))
+        libvirt_conn.restoreFlags(self.disp_savefile,
+                domain_config, libvirt.VIR_DOMAIN_SAVE_PAUSED)
+
+        print >>sys.stderr, "time=%s, done, getting xid" % (str(time.time()))
+        xid = self.get_xid()
+        self.xid = xid
+
+        self.services['qubes-dvm'] = True
+        if verbose:
+            print >> sys.stderr, "--> Setting Xen Store info for the VM..."
+        self.create_xenstore_entries(xid)
+        print >>sys.stderr, "time=%s, done xenstore" % (str(time.time()))
+
+        # fire hooks
+        for hook in self.hooks_start:
+            hook(self, verbose = verbose, **kwargs)
+
+        if verbose:
+            print >> sys.stderr, "--> Starting the VM..."
+        self.libvirt_domain.resume()
+        print >>sys.stderr, "time=%s, resumed" % (str(time.time()))
+
+# close() is not really needed, because the descriptor is close-on-exec
+# anyway, the reason to postpone close() is that possibly xl is not done
+# constructing the domain after its main process exits
+# so we close() when we know the domain is up
+# the successful unpause is some indicator of it
+        qmemman_client.close()
+
+        if self._start_guid_first and kwargs.get('start_guid', True) and os.path.exists('/var/run/shm.id'):
+            self.start_guid(verbose=verbose,
+                    notify_function=kwargs.get('notify_function', None))
+
+        self.start_qrexec_daemon(verbose=verbose,
+                notify_function=kwargs.get('notify_function', None))
+        print >>sys.stderr, "time=%s, qrexec done" % (str(time.time()))
+
+        if not self._start_guid_first and kwargs.get('start_guid', True) and os.path.exists('/var/run/shm.id'):
+            self.start_guid(verbose=verbose,
+                    notify_function=kwargs.get('notify_function', None))
+        print >>sys.stderr, "time=%s, guid done" % (str(time.time()))
+
+        return xid
+
 # register classes
 register_qubes_vm_class(QubesDisposableVm)

+ 1 - 4
dispvm/Makefile

@@ -1,9 +1,6 @@
 CC=gcc
 CFLAGS=-Wall -Wextra -Werror
-all: qubes-restore xenstore-watch
-qubes-restore: qubes-restore.o
-	$(CC) -o qubes-restore qubes-restore.o -lxenstore
-
+all: xenstore-watch
 xenstore-watch: xenstore-watch.o
 	$(CC) -o xenstore-watch xenstore-watch.o -lxenstore
 

+ 29 - 44
dispvm/qfile-daemon-dvm

@@ -30,68 +30,49 @@ from qubes.qubes import QubesVmCollection
 from qubes.qubes import QubesException
 from qubes.qubes import QubesDaemonPidfile
 from qubes.qubes import QubesDispVmLabels
-from qubes.qmemman_client import QMemmanClient
 from qubes.notify import tray_notify,tray_notify_error,tray_notify_init
 
 current_savefile = '/var/run/qubes/current-savefile'
 current_dvm_conf = '/var/run/qubes/current-dvm.conf'
+current_savefile_vmdir = '/var/lib/qubes/dvmdata/vmdir'
 
 class QfileDaemonDvm:
     def __init__(self, name):
         self.name = name
+
+    def get_disp_templ(self):
+        vmdir = os.readlink(current_savefile_vmdir)
+        return vmdir.split('/')[-1]
         
     def do_get_dvm(self):
-        qmemman_client = QMemmanClient()
-        if not qmemman_client.request_memory(400*1024*1024):
-            qmemman_client.close()
-            errmsg = 'Not enough memory to create DVM. '
-            errmsg +='Terminate some appVM and retry.'
-            tray_notify_error(errmsg)
-            return None
-
-        tray_notify("Starting new DispVM...", "red")
+        tray_notify("red", "Starting new DispVM...")
 
         qvm_collection = QubesVmCollection()
         qvm_collection.lock_db_for_writing()
+
+        tar_process = subprocess.Popen(['tar', '-C', current_savefile_vmdir,
+            '-xSUf', os.path.join(current_savefile_vmdir, 'saved-cows.tar')])
+
         qvm_collection.load()
+        print >>sys.stderr, "time=%s, collection loaded" % (str(time.time()))
 
         vm = qvm_collection.get_vm_by_name(self.name)
         if vm is None:
             sys.stderr.write( 'Domain ' + self.name + ' does not exist ?')
             qvm_collection.unlock_db()
-            qmemman_client.close()
             return None
         label = vm.label
         if len(sys.argv) > 4 and len(sys.argv[4]) > 0:
             assert sys.argv[4] in QubesDispVmLabels.keys(), "Invalid label"
             label = QubesDispVmLabels[sys.argv[4]]
-        print >>sys.stderr, "time=%s, starting qubes-restore" % (str(time.time()))
-        retcode = subprocess.call(['/usr/lib/qubes/qubes-restore',
-            '-s', current_savefile,
-            '-c', current_dvm_conf,
-            '-u', str(vm.default_user),
-            '--',
-            '-c', label.color,
-            '-i', label.icon_path,
-            '-l', str(label.index)])
-        qmemman_client.close()
-        if retcode != 0:
-            tray_notify_error('DisposableVM creation failed, see qubes-restore.log')
-            qvm_collection.unlock_db()
-            return None
-        f = open('/var/run/qubes/dispVM.xid', 'r');
-        disp_xid = f.readline().rstrip('\n')
-        disp_name = f.readline().rstrip('\n')
-        disptempl = f.readline().rstrip('\n')
-        f.close()
-        print >>sys.stderr, "time=%s, adding to qubes.xml" % (str(time.time()))
-        vm_disptempl = qvm_collection.get_vm_by_name(disptempl);
+        disp_templ = self.get_disp_templ()
+        vm_disptempl = qvm_collection.get_vm_by_name(disp_templ);
         if vm_disptempl is None:
             sys.stderr.write( 'Domain ' + disptempl + ' does not exist ?')
             qvm_collection.unlock_db()
             return None
-        dispid=int(disp_name[4:])
-        dispvm=qvm_collection.add_new_disposablevm(disp_name, vm_disptempl.template, label=label, dispid=dispid, netvm=vm_disptempl.netvm)
+        dispvm=qvm_collection.add_new_vm('QubesDisposableVm', disp_template=vm_disptempl, label=label)
+        print >>sys.stderr, "time=%s, VM created" % (str(time.time()))
         # By default inherit firewall rules from calling VM
         if os.path.exists(vm.firewall_conf):
             disp_firewall_conf = '/var/run/qubes/%s-firewall.xml' % disp_name
@@ -100,6 +81,14 @@ class QfileDaemonDvm:
         if len(sys.argv) > 5 and len(sys.argv[5]) > 0:
             assert os.path.exists(sys.argv[5]), "Invalid firewall.conf location"
             dispvm.firewall_conf = sys.argv[5]
+        # Wait for tar to finish
+        if tar_process.wait() != 0:
+            sys.stderr.write('Failed to unpack saved-cows.tar')
+            qvm_collection.unlock_db()
+            return None
+        print >>sys.stderr, "time=%s, VM starting" % (str(time.time()))
+        dispvm.start()
+        print >>sys.stderr, "time=%s, VM started" % (str(time.time()))
         qvm_collection.save()
         qvm_collection.unlock_db()
         # Reload firewall rules
@@ -108,7 +97,7 @@ class QfileDaemonDvm:
             if vm.is_proxyvm() and vm.is_running():
                 vm.write_iptables_xenstore_entry()
 
-        return disp_name
+        return dispvm
 
     def dvm_setup_ok(self):
         dvmdata_dir = '/var/lib/qubes/dvmdata/'
@@ -158,17 +147,13 @@ def main():
     tray_notify_init()
     print >>sys.stderr, "time=%s, creating DispVM" % (str(time.time()))
     qfile = QfileDaemonDvm(src_vmname)
-    lockf = open("/var/run/qubes/qfile-daemon-dvm.lock", 'a')
-    fcntl.fcntl(lockf, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
-    fcntl.flock(lockf, fcntl.LOCK_EX)
-    dispname = qfile.get_dvm()
-    lockf.close()
-    if dispname is not None:
+    dispvm = qfile.get_dvm()
+    if dispvm is not None:
         print >>sys.stderr, "time=%s, starting VM process" % (str(time.time()))
-        subprocess.call(['/usr/lib/qubes/qrexec-client', '-d', dispname,
+        subprocess.call(['/usr/lib/qubes/qrexec-client', '-d', dispvm.name,
             user+':exec /usr/lib/qubes/qubes-rpc-multiplexer ' + exec_index + " " + src_vmname])
-        subprocess.call(['/usr/sbin/xl', 'destroy', dispname])
-        qfile.remove_disposable_from_qdb(dispname)
+        dispvm.force_shutdown()
+        qfile.remove_disposable_from_qdb(dispvm.name)
 
 main()
  

+ 9 - 8
dispvm/qubes-prepare-saved-domain.sh

@@ -29,7 +29,7 @@ if ! qvm-start $1 --no-guid --dvm ; then
 	exit 1
 fi
 
-ID=`xl domid $1`
+ID=`virsh -c xen:/// domid $1`
 if [ "$ID" = "" ] ; then 
 	echo "cannot get domain id" >&2
 	exit 1
@@ -43,22 +43,23 @@ xenstore-write /local/domain/$ID/qubes-save-request 1
 xenstore-watch-qubes /local/domain/$ID/device/qubes-used-mem
 xenstore-read /local/domain/$ID/qubes-gateway | \
 	cut -d . -f 3 | tr -d "\n" > $VMDIR/netvm-id.txt
-xl block-detach $1 xvdb
+# FIXME: get connection URI from core scripts
+virsh -c xen:/// detach-disk $1 xvdb
 MEM=$(xenstore-read /local/domain/$ID/device/qubes-used-mem)
 echo "DVM boot complete, memory used=$MEM. Saving image..." >&2
 QMEMMAN_STOP=/var/run/qubes/do-not-membalance
 touch $QMEMMAN_STOP
-xl mem-set $1 $(($MEM/1000))
+virsh -c xen:/// setmem $1 $MEM
+# Add some safety margin
+virsh -c xen:/// setmaxmem $1 $[ $MEM + 1024 ]
 sleep 1
 touch $2
-if ! xl save $1 $2 $VMDIR/$1.conf; then 
+if ! virsh -c xen:/// save $1 $2; then
 	rm -f $QMEMMAN_STOP
 	exit 1
 fi
 rm -f $QMEMMAN_STOP
+ln -s $VMDIR /var/lib/qubes/dvmdata/vmdir
 cd $VMDIR
-# Apparently baloon driver isn't effective enough on some kernels - xl
-# restore still needs initial memory amount
-#sed -i -e "s/^memory.*/memory = $((MEM/1000))/" dvm.conf
 tar -Scf saved-cows.tar volatile.img
-echo "DVM savefile created successfully." >&2
+echo "DVM savefile created successfully."

+ 0 - 549
dispvm/qubes-restore.c

@@ -1,549 +0,0 @@
-#define _GNU_SOURCE
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-#include <fcntl.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <signal.h>
-#include <unistd.h>
-#include <sys/time.h>
-#include <sys/wait.h>
-#include <sys/file.h>
-#include <syslog.h>
-#include <xs.h>
-
-int restore_domain(const char *restore_file, const char *conf_file, const char *name) {
-	int pid, status, domid, ret;
-	int pipe_fd[2];
-	char buf[256];
-	char *endptr;
-	switch (pid = fork()) {
-	case -1:
-		perror("fork");
-		exit(1);
-	case 0:
-		close(1);
-		if (dup2(open("/dev/null", O_RDWR), 1)==-1) {
-			perror("dup2 or open");
-			exit(1);
-		}
-		execl("/usr/sbin/xl", "xl", "-v", "restore", "-d", conf_file, restore_file, NULL);
-		perror("execl");
-		exit(1);
-	default:;
-	}
-	if (waitpid(pid, &status, 0) < 0) {
-		perror("waitpid");
-		exit(1);
-	}
-	if (status != 0) {
-		fprintf(stderr, "Error starting VM\n");
-		exit(1);
-	}
-
-	// read domid
-	if (pipe(pipe_fd)==-1) {
-		perror("pipe");
-		exit(1);
-	}
-	switch (pid = fork()) {
-	case -1:
-		perror("fork");
-		exit(1);
-	case 0:
-		close(1);
-		if (dup2(pipe_fd[1], 1) == -1) {
-			perror("dup2");
-			exit(1);
-		}
-		execl("/usr/sbin/xl", "xl", "domid", name, NULL);
-		perror("execl");
-		exit(1);
-	default:;
-	}
-	ret = read(pipe_fd[0], buf, sizeof(buf)-1);
-	if (ret < 0) {
-		perror("read");
-		exit(1);
-	}
-	buf[ret] = 0;
-	domid = strtoul(buf, &endptr, 10);
-	if (domid <= 0 || *endptr != '\n') {
-		fprintf(stderr, "Cannot get DispVM xid\n");
-		exit(1);
-	}
-	if (waitpid(pid, &status, 0) < 0) {
-		perror("waitpid");
-		exit(1);
-	}
-	if (status != 0) {
-		fprintf(stderr, "Error getting DispVM xid\n");
-		exit(1);
-	}
-	return domid;
-}
-
-
-const char *gettime(void)
-{
-	static char retbuf[60];
-	struct timeval tv;
-	gettimeofday(&tv, NULL);
-	snprintf(retbuf, sizeof(retbuf), "%lld.%06lld",
-		 (long long) tv.tv_sec, (long long) tv.tv_usec);
-	return retbuf;
-}
-
-int actually_do_unlink = 1;
-#define FAST_FLAG_PATH "/var/run/qubes/fast-block-attach"
-void set_fast_flag(void)
-{
-	int fd = open(FAST_FLAG_PATH, O_CREAT | O_RDONLY, 0600);
-	if (fd < 0) {
-		perror("set_fast_flag");
-		exit(1);
-	}
-	close(fd);
-}
-
-void rm_fast_flag(void)
-{
-	if (actually_do_unlink)
-		unlink(FAST_FLAG_PATH);
-}
-
-#define BUFSIZE (512*1024)
-void do_read(int fd)
-{
-	static char buf[BUFSIZE];
-	int n;
-	while ((n = read(fd, buf, BUFSIZE))) {
-		if (n < 0) {
-			perror("read savefile");
-			exit(1);
-		}
-	}
-}
-
-void preload_cache(int fd)
-{
-	signal(SIGCHLD, SIG_IGN);
-	switch (fork()) {
-	case -1:
-		perror("fork");
-		exit(1);
-	case 0:
-		actually_do_unlink = 0;
-		do_read(fd);
-		fprintf(stderr, "time=%s, fs cache preload complete\n",
-			gettime());
-		exit(0);
-	default:
-		close(fd);
-	}
-}
-
-void start_rexec(int domid, const char *domain_name, const char *default_user)
-{
-	int pid, status;
-	char dstr[40];
-	snprintf(dstr, sizeof(dstr), "%d", domid);
-	switch (pid = fork()) {
-	case -1:
-		perror("fork");
-		exit(1);
-	case 0:
-		execl("/usr/lib/qubes/qrexec-daemon", "qrexec-daemon",
-		      dstr, domain_name, default_user, NULL);
-		perror("execl");
-		exit(1);
-	default:;
-	}
-	if (waitpid(pid, &status, 0) < 0) {
-		perror("waitpid");
-		exit(1);
-	}
-}
-
-
-void start_guid(int domid, int argc, char **argv)
-{
-	int i;
-	char dstr[40];
-	char *guid_args[argc + 4];
-	snprintf(dstr, sizeof(dstr), "%d", domid);
-	guid_args[0] = "qubes-guid";
-	guid_args[1] = "-d";
-	guid_args[2] = dstr;
-	for (i = 0; i < argc; i++)
-		guid_args[i+3] = argv[i];
-	guid_args[argc+3] = NULL;
-	execv("/usr/bin/qubes-guid", guid_args);
-	perror("execv");
-}
-
-const char *dispname_by_dispid(int dispid)
-{
-	static char retbuf[16];
-	snprintf(retbuf, sizeof(retbuf), "disp%d", dispid);
-	return retbuf;
-}
-
-const char *build_dvm_ip(int netvm, int id)
-{
-	static char buf[256];
-	snprintf(buf, sizeof(buf), "10.138.%d.%d", netvm, (id % 254) + 1);
-	return buf;
-}
-
-#define NAME_PATTERN "/volatile.img"
-// replaces the unique portions of the savefile with per-dvm values
-// returns the name of VM the savefile was taken for 
-// by looking for /.../vmname/volatile.img
-// normally, it should be "templatename-dvm"
-const char *get_vmname_from_savefile(int fd)
-{
-	int buflen;
-	static char buf[4096];
-	char *name;
-	char *slash;
-	if (lseek(fd, 0, SEEK_SET) == (off_t)-1) {
-		perror("lseek vm conf");
-		exit(1);
-	}
-	buflen = read(fd, buf, sizeof(buf) - 1);
-	if (buflen < 0) {
-		perror("read vm conf");
-		exit(1);
-	}
-	buf[buflen] = 0;
-	name = strstr(buf, NAME_PATTERN);
-	if (!name) {
-		fprintf(stderr,
-			"cannot find '"NAME_PATTERN"' in savefile\n");
-		exit(1);
-	}
-	*name = 0;
-	slash = name - 1;
-	while (slash >= buf && slash[0] && slash[0] != '/')
-		slash--;
-	if (slash < buf || !*slash) {
-		fprintf(stderr, "cannot find / in savefile\n");
-		exit(1);
-	}
-	return slash + 1;
-}
-
-void fill_field(FILE *conf, const char *field, int dispid, int netvm_id)
-{
-	if (!strcmp(field, "NAME")) {
-		fprintf(conf, "%s", dispname_by_dispid(dispid));
-	} else if (!strcmp(field, "MAC")) {
-		fprintf(conf, "00:16:3e:7c:8b:%02x", dispid);
-	} else if (!strcmp(field, "IP")) {
-		fprintf(conf, "%s", build_dvm_ip(netvm_id, dispid));
-	} else if (!strcmp(field, "UUID")) {
-		// currently not present in conf file
-		fprintf(conf, "064cd14c-95ad-4fc2-a4c9-cf9f522e5b%02x", dispid);
-	} else {
-		fprintf(stderr, "unknown field in vm conf: %s\n", field);
-		exit(1);
-	}
-}
-
-// modify the config file. conf = FILE of the new config,
-// conf_templ - fd of config template
-// pattern - pattern to search for
-// val - string to replace pattern with
-void fix_conffile(FILE *conf, int conf_templ, int dispid, int netvm_id)
-{
-	int buflen = 0, cur_len = 0;
-	char buf[4096];
-	char *bufpos = buf;
-	char *pattern, *patternend;
-
-	/* read config template */
-	if (lseek(conf_templ, 0, SEEK_SET) == (off_t)-1) {
-		perror("lseek vm conf");
-		exit(1);
-	}
-	while ((cur_len = read(conf_templ, buf+buflen, sizeof(buf)-buflen)) > 0) {
-		buflen+=cur_len;
-		if (buflen >= (int)sizeof(buf)) {
-			/* We'll false positive if the file is exactly sizeof(buf) bytes,
-                           as we don't know if there's any more stuff in the file */
-			fprintf(stderr, "vm conf too large\n");
-			exit(1);
-		}
-	}
-	if (cur_len < 0) {
-		perror("read vm conf");
-		exit(1);
-	}
-
-	while ((pattern = index(bufpos, '%'))) {
-		fwrite(bufpos, 1, pattern-bufpos, conf);
-		if (ferror(conf)) {
-			perror("write vm conf");
-			exit(1);
-		}
-		patternend = index(pattern+1, '%');
-		if (!patternend) {
-			fprintf(stderr, "Unmatched '%%' in VM config\n");
-			exit(1);
-		}
-		*patternend = '\0';
-		fill_field(conf, pattern+1, dispid, netvm_id);
-		bufpos = patternend+1;
-	}
-	while ((cur_len = fwrite(bufpos, 1, buflen-(bufpos-buf), conf)) > 0) {
-		bufpos+=cur_len;
-	}
-	if (ferror(conf)) {
-		perror("write vm conf");
-		exit(1);
-	}
-}
-
-
-void unpack_cows(const char *name)
-{
-	char vmdir[4096];
-	char tarfile[4096];
-	int status;
-	snprintf(vmdir, sizeof(vmdir), "/var/lib/qubes/appvms/%s", name);
-	snprintf(tarfile, sizeof(tarfile),
-		 "/var/lib/qubes/appvms/%s/saved-cows.tar", name);
-	switch (fork()) {
-	case -1:
-		perror("fork");
-		exit(1);
-	case 0:
-		execl("/bin/tar", "tar", "-C", vmdir, "-Sxf",
-		      tarfile, NULL);
-		perror("execl");
-		exit(1);
-	default:
-		wait(&status);
-		if (WEXITSTATUS(status)) {
-			fprintf(stderr, "tar exited with status=0x%x\n",
-				status);
-			exit(1);
-		}
-		fprintf(stderr, "time=%s, cows restored\n", gettime());
-
-	}
-}
-
-void write_xs_single(struct xs_handle *xs, int domid, const char *name,
-		     const char *val)
-{
-	char key[256];
-	snprintf(key, sizeof(key), "/local/domain/%d/%s", domid, name);
-	if (!xs_write(xs, XBT_NULL, key, val, strlen(val))) {
-		fprintf(stderr, "xs_write\n");
-		exit(1);
-	}
-}
-
-void perm_xs_single(struct xs_handle *xs, int domid, const char *name,
-		     struct xs_permissions *perms, int nperms)
-{
-	char key[256];
-	snprintf(key, sizeof(key), "/local/domain/%d/%s", domid, name);
-	if (!xs_set_permissions(xs, XBT_NULL, key, perms, nperms)) {
-		fprintf(stderr, "xs_set_permissions\n");
-		exit(1);
-	}
-}
-
-int get_netvm_id_from_name(const char *name)
-{
-	int fd, n;
-	char netvm_id[256];
-	char netvm_id_path[256];
-	snprintf(netvm_id_path, sizeof(netvm_id_path),
-		 "/var/lib/qubes/appvms/%s/netvm-id.txt", name);
-	fd = open(netvm_id_path, O_RDONLY);
-	if (fd < 0) {
-		perror("open netvm_id");
-		exit(1);
-	}
-	n = read(fd, netvm_id, sizeof(netvm_id) - 1);
-	if (n < 0) {
-		perror("read netvm_id");
-		exit(1);
-	}
-	close(fd);
-	netvm_id[n] = 0;
-	n = atoi(netvm_id);
-	return n;
-}
-
-void setup_xenstore(int netvm_id, int domid, int dvmid)
-{
-	char val[256];
-	struct xs_handle *xs = xs_daemon_open();
-	struct xs_permissions perm[1];
-	if (!xs) {
-		perror("xs_daemon_open");
-		exit(1);
-	}
-
-	write_xs_single(xs, domid, "qubes-ip",
-			build_dvm_ip(netvm_id, dvmid));
-	write_xs_single(xs, domid, "qubes-netmask", "255.255.0.0");
-	snprintf(val, sizeof(val), "10.137.%d.1", netvm_id);
-	write_xs_single(xs, domid, "qubes-gateway", val);
-	snprintf(val, sizeof(val), "10.137.%d.254", netvm_id);
-	write_xs_single(xs, domid, "qubes-secondary-dns", val);
-	write_xs_single(xs, domid, "qubes-vm-type", "DisposableVM");
-	write_xs_single(xs, domid, "qubes-restore-complete", "True");
-
-	perm[0].id = domid;
-	perm[0].perms = XS_PERM_NONE;
-	perm_xs_single(xs, domid, "device", perm, 1);
-	perm_xs_single(xs, domid, "memory", perm, 1);
-
-	xs_daemon_close(xs);
-
-}
-
-int get_next_disposable_id(void)
-{
-	int seq = 0;
-	int fd = open("/var/run/qubes/dispVM.seq", O_RDWR);
-	if (fd < 0) {
-		perror("open dispVM.seq");
-		exit(1);
-	}
-	if (flock(fd, LOCK_EX) < 0) {
-		perror("lock dispVM.seq");
-		exit(1);
-	}
-	if (read(fd, &seq, sizeof(seq)) != sizeof(seq)) {
-		perror("read dispVM.seq");
-		exit(1);
-	}
-	seq++;
-	if (seq < 0) {
-		fprintf(stderr, "Invalid data in dispVM.seq");
-		exit(1);
-	}
-	if (lseek(fd, 0, SEEK_SET) == (off_t)-1) {
-		perror("seek dispVM.seq");
-		exit(1);
-	}
-	if (write(fd, &seq, sizeof(seq)) != sizeof(seq)) {
-		perror("write dispVM.seq");
-		exit(1);
-	}
-	flock(fd, LOCK_UN);
-	close(fd);
-	return seq;
-}
-
-void write_varrun_domid(int domid, const char *dispname, const char *orig)
-{
-	FILE *f = fopen("/var/run/qubes/dispVM.xid", "w");
-	if (!f) {
-		perror("fopen dispVM.xid");
-		exit(1);
-	}
-	fprintf(f, "%d\n%s\n%s\n", domid, dispname, orig);
-	fclose(f);
-}
-
-
-void redirect_stderr(void)
-{
-	int fd = open("/var/log/qubes/qubes-restore.log",
-		      O_CREAT | O_TRUNC | O_WRONLY, 0600);
-	if (fd < 0) {
-		syslog(LOG_DAEMON | LOG_ERR, "open qubes-restore.log");
-		exit(1);
-	}
-	dup2(fd, 2);
-	if (fd != 2)
-		close(fd);
-}
-
-void usage(char *argv0) {
-	fprintf(stderr,
-			"usage: %s -s savefile -c conf_templ [-u default_user] -- [guid args] \n", argv0);
-}
-
-int main(int argc, char **argv)
-{
-	int conf_templ_fd, domid, dispid, netvm_id;
-	FILE *conf;
-	int opt;
-	const char *name;
-	char confname[256];
-	char *default_user = NULL;
-	char *savefile = NULL;
-	char *conf_templ = NULL;
-	int guid_args_start = 3;
-
-	while ((opt = getopt(argc, argv, "u:c:s:")) != -1) {
-		switch (opt) {
-			case 'u':
-				default_user = optarg;
-				break;
-			case 'c':
-				conf_templ = optarg;
-				break;
-			case 's':
-				savefile = optarg;
-				break;
-			default:
-				usage(argv[0]);
-				break;
-		}
-	}
-	guid_args_start = optind;
-
-	if (!conf_templ || !savefile) {
-		usage(argv[0]);
-		exit(1);
-	}
-	redirect_stderr();
-	fprintf(stderr, "time=%s, starting\n", gettime());
-	set_fast_flag();
-	atexit(rm_fast_flag);
-	conf_templ_fd = open(conf_templ, O_RDONLY);
-	if (conf_templ_fd < 0) {
-		perror("fopen vm conf");
-		exit(1);
-	}
-	dispid = get_next_disposable_id();
-	name = get_vmname_from_savefile(conf_templ_fd);
-	netvm_id = get_netvm_id_from_name(name);
-	snprintf(confname, sizeof(confname), "/tmp/qubes-dvm-%d.xl", dispid);
-	conf = fopen(confname, "w");
-	if (!conf) {
-		perror("fopen new vm conf");
-		exit(1);
-	}
-	fix_conffile(conf, conf_templ_fd, dispid, netvm_id);
-	close(conf_templ_fd);
-	fclose(conf);
-//      printf("name=%s\n", name);
-	unpack_cows(name);
-//      no preloading for now, assume savefile in shm
-//      preload_cache(fd);
-	domid=restore_domain(savefile, confname, dispname_by_dispid(dispid));
-	write_varrun_domid(domid, dispname_by_dispid(dispid), name);
-	fprintf(stderr,
-		"time=%s, created domid=%d, creating xenstore entries\n",
-		gettime(), domid);
-	setup_xenstore(netvm_id, domid, dispid);
-	rm_fast_flag();
-	fprintf(stderr, "time=%s, starting qrexec\n", gettime());
-	start_rexec(domid, dispname_by_dispid(dispid), default_user);
-	fprintf(stderr, "time=%s, starting qubes-guid\n", gettime());
-	start_guid(domid, argc-guid_args_start, argv+guid_args_start);
-	fprintf(stderr, "time=%s, started qubes-guid\n", gettime());
-	return 0;
-}

+ 0 - 2
rpm_spec/core-dom0.spec

@@ -155,7 +155,6 @@ cp qubes-rpc-policy/qubes.NotifyUpdates.policy $RPM_BUILD_ROOT/etc/qubes-rpc/pol
 cp qubes-rpc/qubes.NotifyUpdates $RPM_BUILD_ROOT/etc/qubes-rpc/
 
 cp dispvm/xenstore-watch $RPM_BUILD_ROOT/usr/bin/xenstore-watch-qubes
-cp dispvm/qubes-restore $RPM_BUILD_ROOT/usr/lib/qubes
 cp dispvm/qubes-prepare-saved-domain.sh  $RPM_BUILD_ROOT/usr/lib/qubes
 cp dispvm/qubes-update-dispvm-savefile-with-progress.sh  $RPM_BUILD_ROOT/usr/lib/qubes
 cp dispvm/qfile-daemon-dvm $RPM_BUILD_ROOT/usr/lib/qubes
@@ -322,7 +321,6 @@ fi
 /usr/share/qubes/xen-vm-template.xml
 /usr/share/qubes/vm-template-hvm.xml
 /usr/bin/xenstore-watch-qubes
-/usr/lib/qubes/qubes-restore
 /usr/lib/qubes/qubes-prepare-saved-domain.sh
 /usr/lib/qubes/qubes-update-dispvm-savefile-with-progress.sh
 /etc/xen/scripts/block.qubes