From 4a0d6b03c666bdcf8efe8ddf796cdfb158f226ff Mon Sep 17 00:00:00 2001 From: Tomasz Sterna Date: Tue, 19 Apr 2011 00:11:45 +0200 Subject: [PATCH 01/90] Disable unnecessary Upstart, Init and XDG Autostart serices. #209 Move unneded /etc/init/*.conf services to /etc/init/*.conf.disabled. Start CUPS only in AppVM and UtilityVM. Start XDG Autostart applications only in domains that makes sense for them. --- rpm_spec/core-commonvm.spec | 52 +++++++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/rpm_spec/core-commonvm.spec b/rpm_spec/core-commonvm.spec index 0da0f39c..eb073cd2 100644 --- a/rpm_spec/core-commonvm.spec +++ b/rpm_spec/core-commonvm.spec @@ -80,8 +80,56 @@ cp /var/lib/qubes/serial.conf /etc/init/serial.conf %post -# Disable gpk-update-icon -sed 's/^NotShowIn=KDE;$/\0QUBES;/' -i /etc/xdg/autostart/gpk-update-icon.desktop +# disable some Upstart services +for F in plymouth-shutdown prefdm splash-manager start-ttys tty ; do + if [ -e /etc/init/$F.conf ]; then + mv -f /etc/init/$F.conf /etc/init/$F.conf.disabled + fi +done + +remove_ShowIn () { + if [ -e /etc/xdg/autostart/$1.desktop ]; then + sed -i '/^\(Not\|Only\)ShowIn/d' /etc/xdg/autostart/$1.desktop + fi +} + +# don't want it at all +for F in abrt-applet deja-dup-monitor imsettings-start krb5-auth-dialog pulseaudio restorecond sealertauto ; do + if [ -e /etc/xdg/autostart/$F.desktop ]; then + remove_ShowIn $F + echo 'NotShowIn=QUBES' >> /etc/xdg/autostart/$F.desktop + fi +done + +# don't want it in DisposableVM +for F in gcm-apply ; do + if [ -e /etc/xdg/autostart/$F.desktop ]; then + remove_ShowIn $F + echo 'NotShowIn=DisposableVM' >> /etc/xdg/autostart/$F.desktop + fi +done + +# want it in AppVM and StandaloneVM only +for F in gnome-keyring-gpg gnome-keyring-pkcs11 gnome-keyring-secrets gnome-keyring-ssh gnome-settings-daemon user-dirs-update-gtk gsettings-data-convert ; do + if [ -e /etc/xdg/autostart/$F.desktop ]; then + remove_ShowIn $F + echo 'OnlyShowIn=GNOME;AppVM;StandaloneVM;' >> /etc/xdg/autostart/$F.desktop + fi +done + +# remove existing rule to add own later +for F in gpk-update-icon nm-applet print-applet ; do + remove_ShowIn $F +done + +echo 'OnlyShowIn=GNOME;StandaloneVM;TemplateVM;' >> /etc/xdg/autostart/gpk-update-icon.desktop || : +echo 'OnlyShowIn=GNOME;NetVM;' >> /etc/xdg/autostart/nm-applet.desktop || : +echo 'OnlyShowIn=GNOME;AppVM;UtilityVM;' >> /etc/xdg/autostart/print-applet.desktop || : + +# start cups only in AppVM and UtilityVM +if [ -e /etc/init.d/cups ] && ! grep -q xenstore-read /etc/init.d/cups ; then + sed -i '/echo.*Starting /s#^#\ntype=$(/usr/bin/xenstore-read qubes_vm_type)\nif [ "$type" != "AppVM" -a "$type" != "UtilityVM" ]; then\nreturn 0\nfi\n\n#' /etc/init.d/cups +fi if [ "$1" != 1 ] ; then # do this whole %post thing only when updating for the first time... From 3f310e5f3e7b123566f1be79ed74fcb7d18a76c4 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 01:21:48 +0200 Subject: [PATCH 02/90] Adopt vchan to xen-libs-4.1.0 API. Add #ifdefs to support new and old API --- dom0/restore/xenfreepages.c | 5 +++++ qrexec/txrx-vchan.c | 9 ++++++++ vchan/init.c | 42 +++++++++++++++++++++++++++++++++++++ vchan/io.c | 2 +- vchan/libvchan.h | 5 +++++ 5 files changed, 62 insertions(+), 1 deletion(-) diff --git a/dom0/restore/xenfreepages.c b/dom0/restore/xenfreepages.c index ced36dbb..90d2a514 100644 --- a/dom0/restore/xenfreepages.c +++ b/dom0/restore/xenfreepages.c @@ -4,8 +4,13 @@ struct xen_sysctl_physinfo xphysinfo; main() { +#ifdef XENCTRL_HAS_XC_INTERFACE + xc_interface *handle = xc_interface_open(NULL, NULL, 0); + if (!handle) { +#else int handle = xc_interface_open(); if (handle == -1) { +#endif perror("xc_interface_open"); exit(1); } diff --git a/qrexec/txrx-vchan.c b/qrexec/txrx-vchan.c index 2a95180f..01e54c5a 100644 --- a/qrexec/txrx-vchan.c +++ b/qrexec/txrx-vchan.c @@ -83,7 +83,11 @@ int buffer_space_vchan_ext() // if the remote domain is destroyed, we get no notification // thus, we check for the status periodically +#ifdef XENCTRL_HAS_XC_INTERFACE +static xc_interface *xc_handle = NULL; +#else static int xc_handle = -1; +#endif void slow_check_for_libvchan_is_eof(struct libvchan *ctrl) { struct evtchn_status evst; @@ -198,8 +202,13 @@ char *peer_client_init(int dom, int port) // now client init should succeed; "while" is redundant while (!(ctrl = libvchan_client_init(dom, port))); +#ifdef XENCTRL_HAS_XC_INTERFACE + xc_handle = xc_interface_open(NULL, 0, 0); + if (!xc_handle) { +#else xc_handle = xc_interface_open(); if (xc_handle < 0) { +#endif perror("xc_interface_open"); exit(1); } diff --git a/vchan/init.c b/vchan/init.c index 4a3da4f2..30cc2001 100644 --- a/vchan/init.c +++ b/vchan/init.c @@ -65,15 +65,25 @@ static int server_interface_init(struct libvchan *ctrl, int devno) struct xs_handle *xs; char buf[64]; char ref[16]; +#ifdef XENCTRL_HAS_XC_INTERFACE + xc_evtchn *evfd; +#else int evfd; +#endif evtchn_port_or_error_t port; xs = xs_domain_open(); if (!xs) { return ret; } +#ifdef XENCTRL_HAS_XC_INTERFACE + evfd = xc_evtchn_open(NULL, 0); + if (!evfd) + goto fail; +#else evfd = xc_evtchn_open(); if (evfd < 0) goto fail; +#endif ctrl->evfd = evfd; // the following hardcoded 0 is the peer domain id port = xc_evtchn_bind_unbound_port(evfd, 0); @@ -98,7 +108,11 @@ static int server_interface_init(struct libvchan *ctrl, int devno) ret = 0; fail2: if (ret) +#ifdef XENCTRL_HAS_XC_INTERFACE + xc_evtchn_close(evfd); +#else close(evfd); +#endif fail: xs_daemon_close(xs); return ret; @@ -152,10 +166,18 @@ static int client_interface_init(struct libvchan *ctrl, int domain, int devno) int ret = -1; unsigned int len; struct xs_handle *xs; +#ifdef XENCTRL_HAS_XC_INTERFACE + xc_interface *xcfd; +#else int xcfd; +#endif char buf[64]; char *ref; +#ifdef XENCTRL_HAS_XC_INTERFACE + xc_evtchn *evfd; +#else int evfd; +#endif int remote_port; xs = xs_daemon_open(); if (!xs) { @@ -181,23 +203,43 @@ static int client_interface_init(struct libvchan *ctrl, int domain, int devno) if (!remote_port) goto fail; free(ref); +#ifdef XENCTRL_HAS_XC_INTERFACE + xcfd = xc_interface_open(NULL, NULL, 0); + if (!xcfd) + goto fail; +#else xcfd = xc_interface_open(); if (xcfd < 0) goto fail; +#endif ctrl->ring = (struct vchan_interface *) xc_map_foreign_range(xcfd, domain, 4096, PROT_READ | PROT_WRITE, ctrl->ring_ref); +#ifdef XENCTRL_HAS_XC_INTERFACE + xc_interface_close(xcfd); +#else close(xcfd); +#endif if (ctrl->ring == 0 || ctrl->ring == MAP_FAILED) goto fail; +#ifdef XENCTRL_HAS_XC_INTERFACE + evfd = xc_evtchn_open(NULL, 0); + if (!evfd) + goto fail; +#else evfd = xc_evtchn_open(); if (evfd < 0) goto fail; +#endif ctrl->evfd = evfd; ctrl->evport = xc_evtchn_bind_interdomain(evfd, domain, remote_port); if (ctrl->evport < 0 || xc_evtchn_notify(evfd, ctrl->evport)) +#ifdef XENCTRL_HAS_XC_INTERFACE + xc_evtchn_close(evfd); +#else close(evfd); +#endif else ret = 0; fail: diff --git a/vchan/io.c b/vchan/io.c index 7b524279..6b16f406 100644 --- a/vchan/io.c +++ b/vchan/io.c @@ -149,7 +149,7 @@ int libvchan_close(struct libvchan *ctrl) /// The fd to use for select() set int libvchan_fd_for_select(struct libvchan *ctrl) { - return ctrl->evfd; + return xc_evtchn_fd(ctrl->evfd); } /// Unmasks event channel; must be called before calling select(), and only then diff --git a/vchan/libvchan.h b/vchan/libvchan.h index 652284ba..6a6025fb 100644 --- a/vchan/libvchan.h +++ b/vchan/libvchan.h @@ -20,6 +20,7 @@ */ #include +#include typedef uint32_t VCHAN_RING_IDX; /// struct vchan_interface is placed in memory shared between domains @@ -37,7 +38,11 @@ struct libvchan { struct vchan_interface *ring; uint32_t ring_ref; /// descriptor to event channel interface +#ifdef XENCTRL_HAS_XC_INTERFACE + xc_evtchn *evfd; +#else int evfd; +#endif int evport; VCHAN_RING_IDX *wr_cons, *wr_prod, *rd_cons, *rd_prod; char *rd_ring, *wr_ring; From 860bab5662c732631e69665bba64210a76877126 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 01:38:07 +0200 Subject: [PATCH 03/90] Rename xenstore-watch to xenstore-watch-qubes Xen 4.1.0 provides own xenstore-watch with diffrent args. We can't use it by default, because we still support xen 3.4. --- dom0/restore/qubes_prepare_saved_domain.sh | 2 +- rpm_spec/core-dom0.spec | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dom0/restore/qubes_prepare_saved_domain.sh b/dom0/restore/qubes_prepare_saved_domain.sh index a1bf0770..bd1da7e9 100755 --- a/dom0/restore/qubes_prepare_saved_domain.sh +++ b/dom0/restore/qubes_prepare_saved_domain.sh @@ -42,7 +42,7 @@ if [ -n "$ENCODED_SCRIPT" ] ; then fi #set -x xenstore-write /local/domain/$ID/qubes_save_request 1 -xenstore-watch /local/domain/$ID/device/qubes_used_mem +xenstore-watch-qubes /local/domain/$ID/device/qubes_used_mem xenstore-read /local/domain/$ID/qubes_gateway | \ cut -d . -f 2 | tr -d "\n" > $VMDIR/netvm_id.txt xm block-detach $1 /dev/xvdb diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 9fc3924c..1ac8c067 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -94,7 +94,7 @@ cp ../common/meminfo-writer $RPM_BUILD_ROOT/usr/lib/qubes/ cp ../qrexec/qrexec_daemon $RPM_BUILD_ROOT/usr/lib/qubes/ cp ../qrexec/qrexec_client $RPM_BUILD_ROOT/usr/lib/qubes/ -cp restore/xenstore-watch restore/qvm-create-default-dvm $RPM_BUILD_ROOT/usr/bin +cp restore/xenstore-watch restore/qvm-create-default-dvm $RPM_BUILD_ROOT/usr/bin/xenstore-watch-qubes cp restore/qubes_restore restore/xenfreepages $RPM_BUILD_ROOT/usr/lib/qubes cp restore/qubes_prepare_saved_domain.sh $RPM_BUILD_ROOT/usr/lib/qubes cp restore/qfile-daemon-dvm $RPM_BUILD_ROOT/usr/lib/qubes @@ -279,7 +279,7 @@ fi /usr/lib64/pm-utils/sleep.d/01qubes-sync-vms-clock /usr/lib64/pm-utils/sleep.d/01qubes-suspend-netvm /usr/lib64/pm-utils/sleep.d/02qubes-pause-vms -/usr/bin/xenstore-watch +/usr/bin/xenstore-watch-qubes /usr/lib/qubes/qubes_restore /usr/lib/qubes/qubes_prepare_saved_domain.sh /etc/xen/scripts/block.qubes From d3c96d12bf7940eb41acb5e29e4c04a10f9b07f0 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 01:42:42 +0200 Subject: [PATCH 04/90] Rename try 2... --- rpm_spec/core-dom0.spec | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 1ac8c067..d3aefce6 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -94,7 +94,8 @@ cp ../common/meminfo-writer $RPM_BUILD_ROOT/usr/lib/qubes/ cp ../qrexec/qrexec_daemon $RPM_BUILD_ROOT/usr/lib/qubes/ cp ../qrexec/qrexec_client $RPM_BUILD_ROOT/usr/lib/qubes/ -cp restore/xenstore-watch restore/qvm-create-default-dvm $RPM_BUILD_ROOT/usr/bin/xenstore-watch-qubes +cp restore/qvm-create-default-dvm $RPM_BUILD_ROOT/usr/bin +cp restore/xenstore-watch $RPM_BUILD_ROOT/usr/bin/xenstore-watch-qubes cp restore/qubes_restore restore/xenfreepages $RPM_BUILD_ROOT/usr/lib/qubes cp restore/qubes_prepare_saved_domain.sh $RPM_BUILD_ROOT/usr/lib/qubes cp restore/qfile-daemon-dvm $RPM_BUILD_ROOT/usr/lib/qubes From ae661a614814daaef6bcfdbb5e28d3e8236011a4 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 12:53:57 +0200 Subject: [PATCH 05/90] Down net ifaces on suspend (#146) NetworkManager stop isn't enough --- dom0/pm-utils/01qubes-suspend-netvm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/pm-utils/01qubes-suspend-netvm b/dom0/pm-utils/01qubes-suspend-netvm index cf635ae6..dca33246 100755 --- a/dom0/pm-utils/01qubes-suspend-netvm +++ b/dom0/pm-utils/01qubes-suspend-netvm @@ -16,7 +16,7 @@ get_running_netvms() { suspend_net() { for VM in `get_running_netvms`; do - qvm-run -u root --pass_io $VM "service NetworkManager stop" + qvm-run -u root --pass_io $VM 'service NetworkManager stop; for if in `ls /sys/class/net|grep -v "lo\|vif"`; do ip l s $if down; done' done # Ignore exit status from netvm... return 0 From fb4ee67b7b862f3823d2604668732f7399dc4f8a Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 13:06:34 +0200 Subject: [PATCH 06/90] Show progress of qvm-copy-to-vm by default (#221) --- appvm/qvm-copy-to-vm | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/appvm/qvm-copy-to-vm b/appvm/qvm-copy-to-vm index 4817b43b..480f2ca3 100755 --- a/appvm/qvm-copy-to-vm +++ b/appvm/qvm-copy-to-vm @@ -20,15 +20,15 @@ # # -if [ x"$1" = "x--with-progress" ] ; then - DO_PROGRESS=1 +if [ x"$1" = "x--without-progress" ] ; then + DO_PROGRESS=0 shift else - DO_PROGRESS=0 + DO_PROGRESS=1 fi if [ $# -lt 2 ] ; then - echo usage: $0 '[--with-progress] dest_vmname file [file]+' + echo usage: $0 '[--without-progress] dest_vmname file [file]+' exit 1 fi From fa99d32d6ecc1ea9011dfd0a4a3aff161a118cd8 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 13:07:13 +0200 Subject: [PATCH 07/90] Fix typo --- appvm/qvm-copy-to-vm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/appvm/qvm-copy-to-vm b/appvm/qvm-copy-to-vm index 480f2ca3..eaf8c8f7 100755 --- a/appvm/qvm-copy-to-vm +++ b/appvm/qvm-copy-to-vm @@ -49,7 +49,7 @@ while true ; do read agentpid sentsize agentstatus < $PROGRESS_FILE if [ "x"$agentstatus = x ] ; then continue ; fi if ! [ -e /proc/$agentpid ] ; then break ; fi - if [ "x"$agentstatus = xdone ] ; then break ; fi + if [ "x"$agentstatus = xDONE ] ; then break ; fi CURRSIZE=$(($sentsize/1024)) if [ $DO_PROGRESS = 1 ] ; then echo -ne "\r sent $CURRSIZE/$SIZE KB " From ec3f42798817395b6f6366e6eb5c4fbe2e1dce82 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 13:10:18 +0200 Subject: [PATCH 08/90] Run qubes_core_appvm also in TemplateVM (#222) --- appvm/qubes_core_appvm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/appvm/qubes_core_appvm b/appvm/qubes_core_appvm index 32c0833f..a4c24651 100755 --- a/appvm/qubes_core_appvm +++ b/appvm/qubes_core_appvm @@ -26,7 +26,7 @@ start() fi type=$(/usr/bin/xenstore-read qubes_vm_type) - if [ "$type" != "AppVM" -a "$type" != "DisposableVM" ]; then + if [ "$type" != "AppVM" -a "$type" != "DisposableVM" -a "$type" != "TemplateVM" ]; then # This script runs only on AppVMs return 0 fi From 1e53115eab39548f73ecd0c63e935f4f2162c023 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 13:38:24 +0200 Subject: [PATCH 09/90] Create appmenus not only for AppVM (#225) Needed also by TemplateVM, and maybe others (service VMs) For TemplateVM uses separate appmenus template (apps-template.templates). --- dom0/aux-tools/create_apps_for_appvm.sh | 10 ++++-- dom0/qvm-core/qubes.py | 44 +++++++++++++++++-------- 2 files changed, 37 insertions(+), 17 deletions(-) diff --git a/dom0/aux-tools/create_apps_for_appvm.sh b/dom0/aux-tools/create_apps_for_appvm.sh index 645d8d0c..4b84b377 100755 --- a/dom0/aux-tools/create_apps_for_appvm.sh +++ b/dom0/aux-tools/create_apps_for_appvm.sh @@ -22,11 +22,15 @@ SRCDIR=$1 VMNAME=$2 -VMDIR=/var/lib/qubes/appvms/$VMNAME +VMTYPE=$3 +if [ -z "$VMTYPE" ]; then + VMTYPE=appvms +fi +VMDIR=/var/lib/qubes/$VMTYPE/$VMNAME APPSDIR=$VMDIR/apps -if [ $# != 2 ]; then - echo "usage: $0 " +if [ $# -lt 2 ]; then + echo "usage: $0 [appvms|vm-templates|servicevms]" exit fi mkdir -p $APPSDIR diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 514deda6..0bf6d995 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -679,6 +679,19 @@ class QubesVm(object): # Create volatile.img self.reset_volatile_storage(source_template = source_template) + def create_appmenus(self, verbose, source_template = None): + if source_template is None: + source_template = self.template_vm + + try: + if source_template is not None: + subprocess.check_call ([qubes_appmenu_create_cmd, source_template.appmenus_templates_dir, self.name]) + else: + # Only add apps to menu + subprocess.check_call ([qubes_appmenu_create_cmd, "none", self.name, vmtype]) + except subprocess.CalledProcessError: + print "Ooops, there was a problem creating appmenus for {0} VM!".format (self.name) + def verify_files(self): if dry_run: return @@ -1126,13 +1139,29 @@ class QubesTemplateVm(QubesVm): shutil.copytree (src_template_vm.kernels_dir, self.kernels_dir) if verbose: - print "--> Copying the template's appvm templates dir:\n{0} ==>\n{1}".\ + print "--> Copying the template's appmenus templates dir:\n{0} ==>\n{1}".\ format(src_template_vm.appmenus_templates_dir, self.appmenus_templates_dir) shutil.copytree (src_template_vm.appmenus_templates_dir, self.appmenus_templates_dir) # Create root-cow.img self.commit_changes() + # Create appmenus + self.create_appmenus(verbose, source_template = self) + + def create_appmenus(self, verbose, source_template = None): + if source_template is None: + source_template = self.template_vm + + try: + if source_template is not None: + subprocess.check_call ([qubes_appmenu_create_cmd, source_template.dir_path + "/apps-template.templates", self.name, "vm-templates"]) + else: + # Only add apps to menu + subprocess.check_call ([qubes_appmenu_create_cmd, "none", self.name, vmtype]) + except subprocess.CalledProcessError: + print "Ooops, there was a problem creating appmenus for {0} VM!".format (self.name) + def verify_files(self): if dry_run: return @@ -1605,19 +1634,6 @@ class QubesAppVm(QubesVm): subprocess.check_call ([qubes_appmenu_remove_cmd, self.name]) super(QubesAppVm, self).remove_from_disk() - def create_appmenus(self, verbose, source_template = None): - if source_template is None: - source_template = self.template_vm - - try: - if source_template is not None: - subprocess.check_call ([qubes_appmenu_create_cmd, source_template.appmenus_templates_dir, self.name]) - else: - # Only add apps to menu - subprocess.check_call ([qubes_appmenu_create_cmd, "none", self.name]) - except subprocess.CalledProcessError: - print "Ooops, there was a problem creating appmenus for {0} VM!".format (self.name) - class QubesVmCollection(dict): """ From 067165e03066c1f3beb680ca5a877f73743a645c Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 15:56:00 +0200 Subject: [PATCH 10/90] Link to icon on template clone (#225) --- dom0/qvm-core/qubes.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 0bf6d995..5d9d8259 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -1143,6 +1143,11 @@ class QubesTemplateVm(QubesVm): format(src_template_vm.appmenus_templates_dir, self.appmenus_templates_dir) shutil.copytree (src_template_vm.appmenus_templates_dir, self.appmenus_templates_dir) + icon_path = "/usr/share/qubes/icons/template.png" + if verbose: + print "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, icon_path) + os.symlink (icon_path, self.icon_path) + # Create root-cow.img self.commit_changes() From 6eb39106bb51bdacf267cc0688c1e9acb68038a6 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 16:09:11 +0200 Subject: [PATCH 11/90] Include appmenus template for TemplateVM when clonning template files (#225) --- dom0/qvm-core/qubes.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 5d9d8259..1eac9008 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -69,6 +69,7 @@ default_netvms_conf_file = "netvm-template.conf" default_standalonevms_conf_file = "standalone-template.conf" default_templatevm_conf_template = "templatevm.conf" # needed for TemplateVM cloning default_appmenus_templates_subdir = "apps.templates" +default_appmenus_template_templates_subdir = "apps-template.templates" default_kernels_subdir = "kernels" default_firewall_conf_file = "firewall.xml" default_memory = 400 @@ -1031,6 +1032,7 @@ class QubesTemplateVm(QubesVm): self.templatevm_conf_template = self.dir_path + "/" + default_templatevm_conf_template self.kernels_dir = self.dir_path + "/" + default_kernels_subdir self.appmenus_templates_dir = self.dir_path + "/" + default_appmenus_templates_subdir + self.appmenus_template_templates_dir = self.dir_path + "/" + default_appmenus_template_templates_subdir self.appvms = QubesVmCollection() @property @@ -1143,6 +1145,11 @@ class QubesTemplateVm(QubesVm): format(src_template_vm.appmenus_templates_dir, self.appmenus_templates_dir) shutil.copytree (src_template_vm.appmenus_templates_dir, self.appmenus_templates_dir) + if verbose: + print "--> Copying the template's appmenus (for template) templates dir:\n{0} ==>\n{1}".\ + format(src_template_vm.appmenus_template_templates_dir, self.appmenus_template_templates_dir) + shutil.copytree (src_template_vm.appmenus_template_templates_dir, self.appmenus_template_templates_dir) + icon_path = "/usr/share/qubes/icons/template.png" if verbose: print "--> Creating icon symlink: {0} -> {1}".format(self.icon_path, icon_path) @@ -1152,7 +1159,7 @@ class QubesTemplateVm(QubesVm): self.commit_changes() # Create appmenus - self.create_appmenus(verbose, source_template = self) + self.create_appmenus(verbose, source_template = src_template_vm) def create_appmenus(self, verbose, source_template = None): if source_template is None: @@ -1160,7 +1167,7 @@ class QubesTemplateVm(QubesVm): try: if source_template is not None: - subprocess.check_call ([qubes_appmenu_create_cmd, source_template.dir_path + "/apps-template.templates", self.name, "vm-templates"]) + subprocess.check_call ([qubes_appmenu_create_cmd, source_template.appmenus_template_templates_dir, self.name, "vm-templates"]) else: # Only add apps to menu subprocess.check_call ([qubes_appmenu_create_cmd, "none", self.name, vmtype]) From ccecb27b5b08f2fee0dee0a509125edfcf5aff48 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 16:52:31 +0200 Subject: [PATCH 12/90] Use any directory template when creating appmenus (#225) --- dom0/aux-tools/create_apps_for_appvm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/aux-tools/create_apps_for_appvm.sh b/dom0/aux-tools/create_apps_for_appvm.sh index 4b84b377..a57525df 100755 --- a/dom0/aux-tools/create_apps_for_appvm.sh +++ b/dom0/aux-tools/create_apps_for_appvm.sh @@ -39,7 +39,7 @@ if [ "$SRCDIR" != "none" ]; then echo "--> Converting Appmenu Templates..." find $SRCDIR -name "*.desktop" -exec /usr/lib/qubes/convert_apptemplate2vm.sh {} $APPSDIR $VMNAME $VMDIR \; - /usr/lib/qubes/convert_dirtemplate2vm.sh $SRCDIR/qubes-vm.directory.template $APPSDIR/$VMNAME-vm.directory $VMNAME $VMDIR + /usr/lib/qubes/convert_dirtemplate2vm.sh $SRCDIR/qubes-*.directory.template $APPSDIR/$VMNAME-vm.directory $VMNAME $VMDIR fi echo "--> Adding Apps to the Menu..." From e7190d0239eeeca2768b2d35425f961da9637f23 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 17:55:06 +0200 Subject: [PATCH 13/90] Clean appmenus on template remove (#225) --- dom0/aux-tools/remove_appvm_appmenus.sh | 6 +++++- dom0/qvm-core/qubes.py | 7 +++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/dom0/aux-tools/remove_appvm_appmenus.sh b/dom0/aux-tools/remove_appvm_appmenus.sh index 748132e3..a05e491f 100755 --- a/dom0/aux-tools/remove_appvm_appmenus.sh +++ b/dom0/aux-tools/remove_appvm_appmenus.sh @@ -1,6 +1,10 @@ #!/bin/sh VMNAME=$1 -VMDIR=/var/lib/qubes/appvms/$VMNAME +VMTYPE=$2 +if [ -z "$VMTYPE" ]; then + VMTYPE=appvms +fi +VMDIR=/var/lib/qubes/$VMTYPE/$VMNAME APPSDIR=$VMDIR/apps if [ $# != 1 ]; then diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 1eac9008..c080231b 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -1174,6 +1174,13 @@ class QubesTemplateVm(QubesVm): except subprocess.CalledProcessError: print "Ooops, there was a problem creating appmenus for {0} VM!".format (self.name) + def remove_from_disk(self): + if dry_run: + return + + subprocess.check_call ([qubes_appmenu_remove_cmd, self.name, "vm-templates"]) + super(QubesTemplateVm, self).remove_from_disk() + def verify_files(self): if dry_run: return From 8a9bbbfc981d30a5663c58f1a17c9a504ce38a6d Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 19 Apr 2011 18:06:01 +0200 Subject: [PATCH 14/90] Fix usage info (and args check) for remove_appvm_appmenus.sh (#225) --- dom0/aux-tools/remove_appvm_appmenus.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dom0/aux-tools/remove_appvm_appmenus.sh b/dom0/aux-tools/remove_appvm_appmenus.sh index a05e491f..dad609a2 100755 --- a/dom0/aux-tools/remove_appvm_appmenus.sh +++ b/dom0/aux-tools/remove_appvm_appmenus.sh @@ -7,8 +7,8 @@ fi VMDIR=/var/lib/qubes/$VMTYPE/$VMNAME APPSDIR=$VMDIR/apps -if [ $# != 1 ]; then - echo "usage: $0 " +if [ $# -lt 1 ]; then + echo "usage: $0 [appvms|vm-templates|servicevms]" exit fi From 47fea4258c2aa4f6ea1c9fd1991d4676e8eb9dbb Mon Sep 17 00:00:00 2001 From: Tomasz Sterna Date: Wed, 20 Apr 2011 00:56:58 +0200 Subject: [PATCH 15/90] We do not want to have StandaloneVM and UtilityVM types. --- rpm_spec/core-commonvm.spec | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/rpm_spec/core-commonvm.spec b/rpm_spec/core-commonvm.spec index eb073cd2..bd0a5401 100644 --- a/rpm_spec/core-commonvm.spec +++ b/rpm_spec/core-commonvm.spec @@ -109,27 +109,21 @@ for F in gcm-apply ; do fi done -# want it in AppVM and StandaloneVM only +# want it in AppVM only for F in gnome-keyring-gpg gnome-keyring-pkcs11 gnome-keyring-secrets gnome-keyring-ssh gnome-settings-daemon user-dirs-update-gtk gsettings-data-convert ; do if [ -e /etc/xdg/autostart/$F.desktop ]; then remove_ShowIn $F - echo 'OnlyShowIn=GNOME;AppVM;StandaloneVM;' >> /etc/xdg/autostart/$F.desktop + echo 'OnlyShowIn=GNOME;AppVM;' >> /etc/xdg/autostart/$F.desktop fi done # remove existing rule to add own later -for F in gpk-update-icon nm-applet print-applet ; do +for F in gpk-update-icon nm-applet ; do remove_ShowIn $F done -echo 'OnlyShowIn=GNOME;StandaloneVM;TemplateVM;' >> /etc/xdg/autostart/gpk-update-icon.desktop || : +echo 'OnlyShowIn=GNOME;UpdateableVM;' >> /etc/xdg/autostart/gpk-update-icon.desktop || : echo 'OnlyShowIn=GNOME;NetVM;' >> /etc/xdg/autostart/nm-applet.desktop || : -echo 'OnlyShowIn=GNOME;AppVM;UtilityVM;' >> /etc/xdg/autostart/print-applet.desktop || : - -# start cups only in AppVM and UtilityVM -if [ -e /etc/init.d/cups ] && ! grep -q xenstore-read /etc/init.d/cups ; then - sed -i '/echo.*Starting /s#^#\ntype=$(/usr/bin/xenstore-read qubes_vm_type)\nif [ "$type" != "AppVM" -a "$type" != "UtilityVM" ]; then\nreturn 0\nfi\n\n#' /etc/init.d/cups -fi if [ "$1" != 1 ] ; then # do this whole %post thing only when updating for the first time... From 5001b7c9d71610ea9267a7b07c3625956f51978a Mon Sep 17 00:00:00 2001 From: Tomasz Sterna Date: Wed, 20 Apr 2011 01:01:38 +0200 Subject: [PATCH 16/90] Save VM updatable state in qubes_vm_updateable --- dom0/qvm-core/qubes.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 514deda6..4c2a232b 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -553,6 +553,11 @@ class QubesVm(object): "/local/domain/{0}/qubes_vm_type".format(xid), self.type]) + retcode = subprocess.check_call ([ + "/usr/bin/xenstore-write", + "/local/domain/{0}/qubes_vm_updateable".format(xid), + str(self.updateable)]) + if self.is_netvm(): retcode = subprocess.check_call ([ "/usr/bin/xenstore-write", From 50af1d15b31eeb2df66ac96f0d8fa2e1af00e421 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 20 Apr 2011 20:05:58 +0200 Subject: [PATCH 17/90] Catch more exceptions during VM restore (#212) --- dom0/qvm-tools/qvm-backup-restore | 41 ++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/dom0/qvm-tools/qvm-backup-restore b/dom0/qvm-tools/qvm-backup-restore index 991ebb4f..121956a8 100755 --- a/dom0/qvm-tools/qvm-backup-restore +++ b/dom0/qvm-tools/qvm-backup-restore @@ -318,18 +318,19 @@ def main(): for vm in [ vm for vm in vms_to_restore if vm.is_template()]: print "-> Adding Template VM {0}...".format(vm.name) updateable = vm.updateable - vm = host_collection.add_new_templatevm(vm.name, + try: + vm = host_collection.add_new_templatevm(vm.name, conf_file=vm.conf_file, dir_path=vm.dir_path, installed_by_rpm=False) - vm.updateable = updateable - try: + vm.updateable = updateable vm.verify_files() - except QubesException as err: + except Exception as err: print "ERROR: {0}".format(err) print "*** Skiping VM: {0}".vm.name - host_collection.pop(vm.qid) + if vm: + host_collection.pop(vm.qid) continue try: @@ -361,34 +362,50 @@ def main(): updateable = vm.updateable - vm = host_collection.add_new_appvm(vm.name, template_vm, + try: + vm = host_collection.add_new_appvm(vm.name, template_vm, conf_file=vm.conf_file, dir_path=vm.dir_path, updateable=updateable, label=vm.label) + except Exception as err: + print "ERROR: {0}".format(err) + print "*** Skiping VM: {0}".format(vm.name) + if vm: + host_collection.pop(vm.qid) + continue if not uses_default_netvm: vm.uses_default_netvm = False vm.netvm_vm = netvm_vm - if template_vm is not None and recreate_conf: - print "--> Recreating config file..." - vm.create_config_file() + try: + if template_vm is not None and recreate_conf: + print "--> Recreating config file..." + vm.create_config_file() + except QubesException as err: + print "ERROR xen config restore: {0}".format(err) + print "*** VM '{0}' will not boot until you manually fix it (or correctly restore this VM)!".format(vm.name) + + try: + vm.create_appmenus(verbose=True) + except QubesException as err: + print "ERROR during appmenu restore: {0}".format(err) + print "*** VM '{0}' will not have appmenus".format(vm.name) - vm.create_appmenus(verbose=True) try: vm.verify_files() except QubesException as err: print "ERROR: {0}".format(err) print "*** Skiping VM: {0}".format(vm.name) host_collection.pop(vm.qid) + continue try: vm.add_to_xen_storage() except (IOError, OSError) as err: print "ERROR: {0}".format(err) - print "*** Skiping VM: {0}".format(vm.name) - host_collection.pop(vm.qid) + print "*** VM '{0}' will not boot until you manually fix it (or correctly restore this VM)!".format(vm.name) backup_collection.unlock_db() From 655f13e2ec70714b316ce13921349bdd547b9cb5 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 23 Apr 2011 02:31:54 +0200 Subject: [PATCH 18/90] Configure VM network iface on attach (not only on boot) (#190) --- common/qubes_network.rules | 2 ++ common/setup_ip | 12 ++++++++++++ rpm_spec/core-commonvm.spec | 6 ++++++ 3 files changed, 20 insertions(+) create mode 100644 common/qubes_network.rules create mode 100755 common/setup_ip diff --git a/common/qubes_network.rules b/common/qubes_network.rules new file mode 100644 index 00000000..077c8411 --- /dev/null +++ b/common/qubes_network.rules @@ -0,0 +1,2 @@ + +SUBSYSTEMS=="xen", KERNEL=="eth*", ACTION=="add", RUN+="/usr/lib/qubes/setup_ip" diff --git a/common/setup_ip b/common/setup_ip new file mode 100755 index 00000000..267d7ba3 --- /dev/null +++ b/common/setup_ip @@ -0,0 +1,12 @@ +#!/bin/sh + +ip=`/usr/bin/xenstore-read qubes_ip` +netmask=`/usr/bin/xenstore-read qubes_netmask` +gateway=`/usr/bin/xenstore-read qubes_gateway` +secondary_dns=`/usr/bin/xenstore-read qubes_secondary_dns` +if [ x$ip != x ]; then + /sbin/ifconfig $INTERFACE $ip netmask 255.255.255.255 up + /sbin/route add default dev $INTERFACE + echo "nameserver $gateway" > /etc/resolv.conf + echo "nameserver $secondary_dns" >> /etc/resolv.conf +fi diff --git a/rpm_spec/core-commonvm.spec b/rpm_spec/core-commonvm.spec index bd0a5401..d4944b75 100644 --- a/rpm_spec/core-commonvm.spec +++ b/rpm_spec/core-commonvm.spec @@ -74,6 +74,10 @@ mkdir -p $RPM_BUILD_ROOT/usr/bin cp xenstore-watch $RPM_BUILD_ROOT/usr/bin mkdir -p $RPM_BUILD_ROOT/etc cp serial.conf $RPM_BUILD_ROOT/var/lib/qubes/ +mkdir -p $RPM_BUILD_ROOT/etc/udev/rules.d +cp qubes_network.rules $RPM_BUILD_ROOT/etc/udev/rules.d/ +mkdir -p $RPM_BUILD_ROOT/usr/lib/qubes/ +cp setup_ip $RPM_BUILD_ROOT/usr/lib/qubes/ %triggerin -- initscripts cp /var/lib/qubes/serial.conf /etc/init/serial.conf @@ -220,3 +224,5 @@ rm -rf $RPM_BUILD_ROOT /etc/pki/rpm-gpg/RPM-GPG-KEY-qubes* /sbin/qubes_serial_login /usr/bin/xenstore-watch +/etc/udev/rules.d/qubes_network.rules +/usr/lib/qubes/setup_ip From 98f40281428219954c49d43186e031872d492704 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 23 Apr 2011 02:32:54 +0200 Subject: [PATCH 19/90] Connect vif's to already running VMs on NetVM/ProxyVM startup (#190) Also cleanup stale vifs using "xm network-detach ... -f" Fix iptables rules to support not only first vif of VM --- common/setup_ip | 3 ++- dom0/qvm-core/qubes.py | 57 +++++++++++++++++++++++++++++++++++++----- dom0/qvm-tools/qvm-run | 11 ++++++-- 3 files changed, 62 insertions(+), 9 deletions(-) diff --git a/common/setup_ip b/common/setup_ip index 267d7ba3..aec795e7 100755 --- a/common/setup_ip +++ b/common/setup_ip @@ -5,7 +5,8 @@ netmask=`/usr/bin/xenstore-read qubes_netmask` gateway=`/usr/bin/xenstore-read qubes_gateway` secondary_dns=`/usr/bin/xenstore-read qubes_secondary_dns` if [ x$ip != x ]; then - /sbin/ifconfig $INTERFACE $ip netmask 255.255.255.255 up + /sbin/ifconfig $INTERFACE $ip netmask 255.255.255.255 + /sbin/ifconfig $INTERFACE up /sbin/route add default dev $INTERFACE echo "nameserver $gateway" > /etc/resolv.conf echo "nameserver $secondary_dns" >> /etc/resolv.conf diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index ad70efe9..8d5b7564 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -543,6 +543,28 @@ class QubesVm(object): f_private.truncate (size) f_private.close () + def cleanup_vifs(self): + """ + Xend does not remove vif when backend domain is down, so we must do it + manually + """ + + if not self.is_running(): + return + + p = subprocess.Popen (["/usr/sbin/xm", "network-list", self.name], + stdout=subprocess.PIPE) + result = p.communicate() + for line in result[0].split('\n'): + m = re.match(r"^(\d+)\s*(\d+)", line) + if m: + retcode = subprocess.call(["/usr/sbin/xm", "list", m.group(2)], + stderr=subprocess.PIPE) + if retcode != 0: + # Don't check retcode - it always will fail when backend domain is down + subprocess.call(["/usr/sbin/xm", + "network-detach", self.name, m.group(1), "-f"], stderr=subprocess.PIPE) + def create_xenstore_entries(self, xid): if dry_run: return @@ -1371,6 +1393,29 @@ class QubesNetVm(QubesVm): return subprocess.check_call(command) + def start(self, debug_console = False, verbose = False, preparing_dvm=False): + if dry_run: + return + + xid=super(QubesNetVm, self).start(debug_console=debug_console, verbose=verbose) + + # Connect vif's of already running VMs + for vm in self.connected_vms.values(): + if not vm.is_running(): + continue + + if verbose: + print "--> Attaching network to '{0}'...".format(vm.name) + + # Cleanup stale VIFs + vm.cleanup_vifs() + + xm_cmdline = ["/usr/sbin/xm", "network-attach", vm.name, "script=vif-route-qubes", "ip="+vm.ip, "backend="+self.name ] + retcode = subprocess.call (xm_cmdline) + if retcode != 0: + print ("WARNING: Cannot attach to network to '{0}'!".format(vm.name)) + return xid + def add_external_ip_permission(self, xid): if int(xid) < 0: return @@ -1471,7 +1516,7 @@ class QubesProxyVm(QubesNetVm): continue iptables += "# '{0}' VM:\n".format(vm.name) - iptables += "-A FORWARD ! -s {0}/32 -i vif{1}.0 -j DROP\n".format(vm.ip, xid) + iptables += "-A FORWARD ! -s {0}/32 -i vif{1}.+ -j DROP\n".format(vm.ip, xid) accept_action = "ACCEPT" reject_action = "REJECT --reject-with icmp-host-prohibited" @@ -1484,7 +1529,7 @@ class QubesProxyVm(QubesNetVm): rules_action = accept_action for rule in conf["rules"]: - iptables += "-A FORWARD -i vif{0}.0 -d {1}".format(xid, rule["address"]) + iptables += "-A FORWARD -i vif{0}.+ -d {1}".format(xid, rule["address"]) if rule["netmask"] != 32: iptables += "/{0}".format(rule["netmask"]) @@ -1497,12 +1542,12 @@ class QubesProxyVm(QubesNetVm): if conf["allowDns"]: # PREROUTING does DNAT to NetVM DNSes, so we need self.netvm_vm. properties - iptables += "-A FORWARD -i vif{0}.0 -p udp -d {1} --dport 53 -j ACCEPT\n".format(xid,self.netvm_vm.gateway) - iptables += "-A FORWARD -i vif{0}.0 -p udp -d {1} --dport 53 -j ACCEPT\n".format(xid,self.netvm_vm.secondary_dns) + iptables += "-A FORWARD -i vif{0}.+ -p udp -d {1} --dport 53 -j ACCEPT\n".format(xid,self.netvm_vm.gateway) + iptables += "-A FORWARD -i vif{0}.+ -p udp -d {1} --dport 53 -j ACCEPT\n".format(xid,self.netvm_vm.secondary_dns) if conf["allowIcmp"]: - iptables += "-A FORWARD -i vif{0}.0 -p icmp -j ACCEPT\n".format(xid) + iptables += "-A FORWARD -i vif{0}.+ -p icmp -j ACCEPT\n".format(xid) - iptables += "-A FORWARD -i vif{0}.0 -j {1}\n".format(xid, default_action) + iptables += "-A FORWARD -i vif{0}.+ -j {1}\n".format(xid, default_action) iptables += "#End of VM rules\n" iptables += "-A FORWARD -j DROP\n" diff --git a/dom0/qvm-tools/qvm-run b/dom0/qvm-tools/qvm-run index 236f4a8b..e59f7741 100755 --- a/dom0/qvm-tools/qvm-run +++ b/dom0/qvm-tools/qvm-run @@ -166,6 +166,9 @@ def main(): parser.add_option ("--localcmd", action="store", dest="localcmd", default=None, help="With --pass_io, pass stdin/stdout/stderr to the given program") + parser.add_option ("--force", action="store_true", dest="force", default=False, + help="Force operation, even if may damage other VMs (eg shutdown of NetVM)") + (options, args) = parser.parse_args () @@ -218,8 +221,12 @@ def main(): # If stopping NetVM - stop connected VMs too if options.shutdown and vm.is_netvm(): - vms_list += [vm for vm in qvm_collection.get_vms_connected_to(vm.qid) if vm.is_running()] - vms_list.reverse() + connected_vms = [vm for vm in qvm_collection.get_vms_connected_to(vm.qid) if vm.is_running()] + if connected_vms and not options.force: + print "ERROR: There are other VMs connected to this VM, " + print " shutdown them first or use --force option" + print "VMs list: " + str([vm.name for vm in connected_vms]) + exit(1) if takes_cmd_argument: cmd = "{user}:{cmd}".format(user=options.user, cmd=cmdstr) From ac84bbe6219cf1507cec94ad510866907ae04673 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 27 Apr 2011 23:07:38 +0200 Subject: [PATCH 20/90] Remove correct lockfile on qubes_setupdvm stop --- dom0/init.d/qubes_setupdvm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/init.d/qubes_setupdvm b/dom0/init.d/qubes_setupdvm index 26390536..ba4e5d02 100755 --- a/dom0/init.d/qubes_setupdvm +++ b/dom0/init.d/qubes_setupdvm @@ -44,7 +44,7 @@ start() stop() { - rm -f /var/lock/subsys/qubes_netvm + rm -f /var/lock/subsys/qubes_setupdvm success echo } From aa7df98b7e7e0874f23ac6d0279f797a61602617 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 29 Apr 2011 01:38:43 +0200 Subject: [PATCH 21/90] Use half of host memory as maxmem by default. Allow to configure it per VM. --- dom0/qvm-core/qubes.py | 14 +++++++++++--- dom0/qvm-tools/qvm-prefs | 15 +++++++++++++++ 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 8d5b7564..f67cac83 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -179,6 +179,7 @@ class QubesVm(object): root_img = None, private_img = None, memory = default_memory, + maxmem = None, template_vm = None, firewall_conf = None, volatile_img = None, @@ -246,6 +247,12 @@ class QubesVm(object): self.memory = memory + if maxmem is None: + total_mem_mb = self.get_total_xen_memory()/1024/1024 + self.maxmem = total_mem_mb/2 + else: + self.maxmem = maxmem + self.template_vm = template_vm if template_vm is not None: if updateable: @@ -882,8 +889,7 @@ class QubesVm(object): print "--> Loading the VM (type = {0})...".format(self.type) if not self.is_netvm(): - total_mem_mb = self.get_total_xen_memory()/1024/1024 - xend_session.xend_server.xend.domain.maxmem_set(self.name, total_mem_mb) + xend_session.xend_server.xend.domain.maxmem_set(self.name, self.maxmem) mem_required = self.get_mem_dynamic_max() qmemman_client = QMemmanClient() @@ -993,6 +999,7 @@ class QubesVm(object): attrs["updateable"] = str(self.updateable) attrs["label"] = self.label.name attrs["memory"] = str(self.memory) + attrs["maxmem"] = str(self.maxmem) attrs["pcidevs"] = str(self.pcidevs) attrs["vcpus"] = str(self.vcpus) attrs["internal"] = str(self.internal) @@ -2004,7 +2011,8 @@ class QubesVmCollection(dict): common_attr_list = ("qid", "name", "dir_path", "conf_file", "private_img", "root_img", "template_qid", "installed_by_rpm", "updateable", "internal", - "uses_default_netvm", "label", "memory", "vcpus", "pcidevs") + "uses_default_netvm", "label", "memory", "vcpus", "pcidevs", + "maxmem" ) for attribute in common_attr_list: kwargs[attribute] = element.get(attribute) diff --git a/dom0/qvm-tools/qvm-prefs b/dom0/qvm-tools/qvm-prefs index 8ee3abdf..35b84b78 100755 --- a/dom0/qvm-tools/qvm-prefs +++ b/dom0/qvm-tools/qvm-prefs @@ -49,6 +49,8 @@ def do_list(vm): print fmt.format ("root volatile img", vm.volatile_img) print fmt.format ("private img", vm.private_img) + print fmt.format ("memory", vm.memory) + print fmt.format ("maxmem", vm.maxmem) def set_label(vms, vm, args): @@ -65,6 +67,17 @@ def set_label(vms, vm, args): vm.label = QubesVmLabels[label] subprocess.check_call (["ln", "-sf", vm.label.icon_path, vm.icon_path]) +def set_memory(vms, vm, args): + if len (args) != 1: + print "Missing memory argument!" + + vm.memory = int(args[0]) + +def set_maxmem(vms, vm, args): + if len (args) != 1: + print "Missing maxmem argument!" + + vm.maxmem = int(args[0]) def set_netvm(vms, vm, args): if len (args) != 1: @@ -153,6 +166,8 @@ properties = { "nonupdateable": set_nonupdateable, "label" : set_label, "netvm" : set_netvm, + "maxmem" : set_maxmem, + "memory" : set_memory, } From 1891954f71cafd51242441582e3301b0b6d24859 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 29 Apr 2011 02:26:25 +0200 Subject: [PATCH 22/90] Revert "Run nm-applet as normal user" This reverts commit 2f5b6e6582e71630193d0098d4cc60db019e1e9b. Dbus policy hacking not needed any more. ConsoleKit session is correctly started. --- netvm/dbus-nm-applet.conf | 42 --------------------------------------- rpm_spec/core-netvm.spec | 9 --------- 2 files changed, 51 deletions(-) delete mode 100644 netvm/dbus-nm-applet.conf diff --git a/netvm/dbus-nm-applet.conf b/netvm/dbus-nm-applet.conf deleted file mode 100644 index 0d0f0822..00000000 --- a/netvm/dbus-nm-applet.conf +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - 512 - - diff --git a/rpm_spec/core-netvm.spec b/rpm_spec/core-netvm.spec index 07200b44..a4c966b9 100644 --- a/rpm_spec/core-netvm.spec +++ b/rpm_spec/core-netvm.spec @@ -66,9 +66,6 @@ mkdir -p $RPM_BUILD_ROOT/var/run/qubes mkdir -p $RPM_BUILD_ROOT/etc/xen/scripts cp ../common/vif-route-qubes $RPM_BUILD_ROOT/etc/xen/scripts -mkdir -p $RPM_BUILD_ROOT/etc/dbus-1/system.d -cp ../netvm/dbus-nm-applet.conf $RPM_BUILD_ROOT/etc/dbus-1/system.d/qubes-nm-applet.conf - %post # Create NetworkManager configuration if we do not have it @@ -91,11 +88,6 @@ if [ "$1" = 0 ] ; then chkconfig qubes_core_netvm off fi -%triggerin -- NetworkManager -# Fix PolicyKit settings to allow run as normal user not visible to ConsoleKit -sed 's#$#\0yes#' -i /usr/share/polkit-1/actions/org.freedesktop.NetworkManager.policy - - %clean rm -rf $RPM_BUILD_ROOT @@ -108,4 +100,3 @@ rm -rf $RPM_BUILD_ROOT /etc/NetworkManager/dispatcher.d/qubes_nmhook /etc/NetworkManager/dispatcher.d/30-qubes_external_ip /etc/xen/scripts/vif-route-qubes -/etc/dbus-1/system.d/qubes-nm-applet.conf From f49c3a4224d083d4ce0844dc71ca84bf53d08530 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 1 May 2011 00:32:04 +0200 Subject: [PATCH 23/90] Reduce dom0 priority bonus To not kill AppVMs performance with ex kcryptd --- dom0/init.d/qubes_core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/init.d/qubes_core b/dom0/init.d/qubes_core index 9ce01bf1..86949f36 100755 --- a/dom0/init.d/qubes_core +++ b/dom0/init.d/qubes_core @@ -28,7 +28,7 @@ start() chmod 660 /var/run/xend/xen-api.sock /var/run/xend/xmlrpc.sock chgrp qubes /var/run/xenstored/* chmod 660 /var/run/xenstored/* - xm sched-credit -d 0 -w 65535 + xm sched-credit -d 0 -w 512 cp /var/lib/qubes/qubes.xml /var/lib/qubes/backup/qubes-$(date +%F-%T).xml /usr/lib/qubes/qmemman_daemon.py >/var/log/qubes/qmemman.log 2>/var/log/qubes/qmemman.errs & From 4a76bf2981d671d8dfcb0c19f568caaa2cd2bd65 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 1 May 2011 12:02:27 +0200 Subject: [PATCH 24/90] Call xm to set maxmem, instead of direct call to xend. Previous one hangs sometimes with 100% occupied by xend. This will also be simpler to port to xl/libxl interface. --- dom0/qvm-core/qubes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index f67cac83..4c4ecca4 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -889,7 +889,7 @@ class QubesVm(object): print "--> Loading the VM (type = {0})...".format(self.type) if not self.is_netvm(): - xend_session.xend_server.xend.domain.maxmem_set(self.name, self.maxmem) + subprocess.check_call(['/usr/sbin/xm', 'mem-max', self.name, str(self.maxmem)]) mem_required = self.get_mem_dynamic_max() qmemman_client = QMemmanClient() From b57b41aafa42d90dda7076504a6ebd4991d074af Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 12 May 2011 15:20:26 +0200 Subject: [PATCH 25/90] dom0: qmemman: Support for maxmem != physical memory (#235) --- dom0/qmemman/qmemman.py | 2 ++ dom0/qmemman/qmemman_algo.py | 46 ++++++++++++++++++++++++++++++++---- 2 files changed, 43 insertions(+), 5 deletions(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index 243ca6fa..8a235422 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -9,6 +9,7 @@ class DomainState: def __init__(self, id): self.meminfo = None #dictionary of memory info read from client self.memory_actual = None #the current memory size + self.memory_maximum = None #the maximum memory size self.mem_used = None #used memory, computed based on meminfo self.id = id #domain id self.last_target = 0 #the last memset target @@ -42,6 +43,7 @@ class SystemState: id = str(domain['domid']) if self.domdict.has_key(id): self.domdict[id].memory_actual = domain['mem_kb']*1024 + self.domdict[id].memory_maximum = domain['maxmem_kb']*1024 #the below works (and is fast), but then 'xm list' shows unchanged memory value def mem_set(self, id, val): diff --git a/dom0/qmemman/qmemman_algo.py b/dom0/qmemman/qmemman_algo.py index 43bbeefc..ee02b6e9 100755 --- a/dom0/qmemman/qmemman_algo.py +++ b/dom0/qmemman/qmemman_algo.py @@ -61,7 +61,7 @@ def prefmem(domain): #dom0 is special, as it must have large cache, for vbds. Thus, give it a special boost if domain.id == '0': return domain.mem_used*CACHE_FACTOR + 350*1024*1024 - return domain.mem_used*CACHE_FACTOR + return min(domain.mem_used*CACHE_FACTOR, domain.memory_maximum) def memory_needed(domain): #do not change @@ -104,8 +104,11 @@ def balloon(memsize, domain_dictionary): #redistribute positive "total_available_memory" of memory between domains, proportionally to prefmem def balance_when_enough_memory(domain_dictionary, xen_free_memory, total_mem_pref, total_available_memory): - donors_rq = list() - acceptors_rq = list() + print 'balance_when_enough_memory(', xen_free_memory, total_mem_pref, total_available_memory, ')' + target_memory = {} + # memory not assigned because of static max + left_memory = 0 + acceptors_count = 0 for i in domain_dictionary.keys(): if domain_dictionary[i].meminfo is None: continue @@ -114,10 +117,41 @@ def balance_when_enough_memory(domain_dictionary, xen_free_memory, total_mem_pre target_nonint = prefmem(domain_dictionary[i]) + scale*total_available_memory #prevent rounding errors target = int(0.999*target_nonint) +#do not try to give more memory than static max + if target > domain_dictionary[i].memory_maximum: + left_memory += target-domain_dictionary[i].memory_maximum + target = domain_dictionary[i].memory_maximum + else: +# count domains which can accept more memory + acceptors_count += 1 + target_memory[i] = target +# distribute left memory across all acceptors + while left_memory > 0: + print ' left_memory:', left_memory, 'acceptors_count:', acceptors_count + new_left_memory = 0 + for i in target_memory.keys(): + target = target_memory[i] + if target < domain_dictionary[i].memory_maximum: + memory_bonus = int(0.999*(left_memory/acceptors_count)) + if target+memory_bonus >= domain_dictionary[i].memory_maximum: + new_left_memory += target+memory_bonus - domain_dictionary[i].memory_maximum + target = domain_dictionary[i].memory_maximum + acceptors_count -= 1 + else: + target += memory_bonus + target_memory[i] = target + left_memory = new_left_memory +# split target_memory dictionary to donors and acceptors +# this is needed to first get memory from donors and only then give it to acceptors + donors_rq = list() + acceptors_rq = list() + for i in target_memory.keys(): + target = target_memory[i] if (target < domain_dictionary[i].memory_actual): donors_rq.append((i, target)) else: acceptors_rq.append((i, target)) + # print 'balance(enough): xen_free_memory=', xen_free_memory, 'requests:', donors_rq + acceptors_rq return donors_rq + acceptors_rq @@ -140,7 +174,9 @@ def balance_when_low_on_memory(domain_dictionary, xen_free_memory, total_mem_pre for i in acceptors: scale = 1.0*prefmem(domain_dictionary[i])/total_mem_pref_acceptors target_nonint = domain_dictionary[i].memory_actual + scale*squeezed_mem - acceptors_rq.append((i, int(target_nonint))) +#do not try to give more memory than static max + target = min(int(0.999*target_nonint), domain_dictionary[i].memory_maximum) + acceptors_rq.append((i, target)) # print 'balance(low): xen_free_memory=', xen_free_memory, 'requests:', donors_rq + acceptors_rq return donors_rq + acceptors_rq @@ -171,7 +207,7 @@ def balance(xen_free_memory, domain_dictionary): continue need = memory_needed(domain_dictionary[i]) # print 'domain' , i, 'act/pref', domain_dictionary[i].memory_actual, prefmem(domain_dictionary[i]), 'need=', need - if need < 0: + if need < 0 or domain_dictionary[i].memory_actual >= domain_dictionary[i].memory_maximum: donors.append(i) else: acceptors.append(i) From dccc528144f55655a6d6250c9c35fb1c1694b016 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 12 May 2011 17:36:47 +0200 Subject: [PATCH 26/90] dom0: qmemman: distribute memory only if there are VMs which can accept it This prevent potential inifinite loop in qmemman when free memory cannot be assigned to any VM (because of static max). Practically this will never happen, because dom0 can always accept memory. --- dom0/qmemman/qmemman_algo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/qmemman/qmemman_algo.py b/dom0/qmemman/qmemman_algo.py index ee02b6e9..22c3feb5 100755 --- a/dom0/qmemman/qmemman_algo.py +++ b/dom0/qmemman/qmemman_algo.py @@ -126,7 +126,7 @@ def balance_when_enough_memory(domain_dictionary, xen_free_memory, total_mem_pre acceptors_count += 1 target_memory[i] = target # distribute left memory across all acceptors - while left_memory > 0: + while left_memory > 0 and acceptors_count > 0: print ' left_memory:', left_memory, 'acceptors_count:', acceptors_count new_left_memory = 0 for i in target_memory.keys(): From ee87fff0d75d2ac7bb42d9581d669dfc75678989 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 12 May 2011 18:15:09 +0200 Subject: [PATCH 27/90] dom0: implement QubesVm.get_start_time() (#231) Needed to check if VM was just started again --- dom0/qvm-core/qubes.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 4c4ecca4..885988a8 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -499,6 +499,20 @@ class QubesVm(object): else: return False + def get_start_time(self): + if not self.is_running(): + return 0 + + try: + start_time = xend_session.session.xenapi.VM_metrics.get_record (self.session_metrics)['start_time'] + except XenAPI.Failure: + self.refresh_xend_session() + if self.session_uuid is None: + return "NA" + start_time = xend_session.session.xenapi.VM_metrics.get_record (self.session_metrics)['start_time'] + + return start_time + def is_outdated(self): # Makes sense only on VM based on template if self.template_vm is None: From bb073c3cdb91775eef966d8bc4f9d4299da7f9ca Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 12 May 2011 19:15:24 +0200 Subject: [PATCH 28/90] vm: Remove root password to allow easy escalation from UI application (#202) Ex. gpk-application needs this to work properly while running from user. When root password is set - polkit-daemon asks for it (according to polkit setting). --- rpm_spec/core-commonvm.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpm_spec/core-commonvm.spec b/rpm_spec/core-commonvm.spec index d4944b75..9e484d02 100644 --- a/rpm_spec/core-commonvm.spec +++ b/rpm_spec/core-commonvm.spec @@ -129,12 +129,12 @@ done echo 'OnlyShowIn=GNOME;UpdateableVM;' >> /etc/xdg/autostart/gpk-update-icon.desktop || : echo 'OnlyShowIn=GNOME;NetVM;' >> /etc/xdg/autostart/nm-applet.desktop || : +usermod -p '' root if [ "$1" != 1 ] ; then # do this whole %post thing only when updating for the first time... exit 0 fi -usermod -L root if ! [ -f /var/lib/qubes/serial.orig ] ; then cp /etc/init/serial.conf /var/lib/qubes/serial.orig fi From b43baee22007ac8d6dd39d2ca19e4b126fd3ad90 Mon Sep 17 00:00:00 2001 From: Timo Juhani Lindfors Date: Tue, 10 May 2011 14:03:31 +0300 Subject: [PATCH 29/90] Ensure 'make clean' removes xenstore-watch --- common/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/Makefile b/common/Makefile index 4f8df460..3b015060 100644 --- a/common/Makefile +++ b/common/Makefile @@ -6,4 +6,4 @@ meminfo-writer: meminfo-writer.o xenstore-watch: xenstore-watch.o $(CC) -o xenstore-watch xenstore-watch.o -lxenstore clean: - rm -f meminfo-writer *.o *~ + rm -f meminfo-writer xenstore-watch *.o *~ From 6e1978055168a1c715e824f048b56a661eecccc4 Mon Sep 17 00:00:00 2001 From: Timo Juhani Lindfors Date: Tue, 10 May 2011 14:03:32 +0300 Subject: [PATCH 30/90] Ensure 'make clean' descends to u2mfn/ --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index b07baf6a..6b36c883 100644 --- a/Makefile +++ b/Makefile @@ -41,5 +41,6 @@ clean: (cd dom0/restore && make clean) (cd dom0/qmemman && make clean) (cd common && make clean) + (cd u2mfn && make clean) make -C qrexec clean make -C vchan clean From 215e788f6248c4e0632724c53bde01d6717ac717 Mon Sep 17 00:00:00 2001 From: Timo Juhani Lindfors Date: Tue, 10 May 2011 14:03:33 +0300 Subject: [PATCH 31/90] Add _GNU_SOURCE to get O_NOFOLLOW on debian squeeze. --- appvm/unpack.c | 1 + 1 file changed, 1 insertion(+) diff --git a/appvm/unpack.c b/appvm/unpack.c index 1c887713..76cee955 100644 --- a/appvm/unpack.c +++ b/appvm/unpack.c @@ -1,3 +1,4 @@ +#define _GNU_SOURCE /* For O_NOFOLLOW. */ #include #include #include From 773f0f7b7ac50810876c56846af65f73d7e91cbe Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 17 May 2011 10:44:15 +0200 Subject: [PATCH 32/90] dom0: Fix qvm-prefs for standalone VM --- dom0/qvm-tools/qvm-prefs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dom0/qvm-tools/qvm-prefs b/dom0/qvm-tools/qvm-prefs index 35b84b78..ecc5f4cc 100755 --- a/dom0/qvm-tools/qvm-prefs +++ b/dom0/qvm-tools/qvm-prefs @@ -32,7 +32,7 @@ def do_list(vm): print fmt.format ("name", vm.name) print fmt.format ("label", vm.label.name) print fmt.format ("type", vm.type) - if vm.is_appvm(): + if vm.template_vm is not None: print fmt.format ("template", vm.template_vm.name) if vm.netvm_vm is not None: print fmt.format ("netvm", vm.netvm_vm.name) @@ -44,9 +44,9 @@ def do_list(vm): print fmt.format ("root img", vm.root_img) if vm.is_template(): print fmt.format ("root COW img", vm.rootcow_img) - if vm.is_appvm(): + if vm.template_vm is not None: print fmt.format ("root img", vm.template_vm.root_img) - print fmt.format ("root volatile img", vm.volatile_img) + print fmt.format ("root volatile img", vm.volatile_img) print fmt.format ("private img", vm.private_img) print fmt.format ("memory", vm.memory) From b24cf454b34beefb1ad87d3758456a09ccbae2c1 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 17 May 2011 22:24:29 +0200 Subject: [PATCH 33/90] version 1.6.0 --- version_dom0 | 2 +- version_vm | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/version_dom0 b/version_dom0 index d532fd93..dc1e644a 100644 --- a/version_dom0 +++ b/version_dom0 @@ -1 +1 @@ -1.5.22 +1.6.0 diff --git a/version_vm b/version_vm index 07a45d78..dc1e644a 100644 --- a/version_vm +++ b/version_vm @@ -1 +1 @@ -1.5.21 +1.6.0 From e1cea1f50b96ef4bc44ab7bf2de5aff94459cf51 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 20 May 2011 16:36:48 +0200 Subject: [PATCH 34/90] dom0: tool for sync desktop file templates (#45) --- dom0/qvm-core/qubes.py | 1 + dom0/qvm-tools/qvm-sync-appmenus | 192 +++++++++++++++++++++++++++++++ 2 files changed, 193 insertions(+) create mode 100755 dom0/qvm-tools/qvm-sync-appmenus diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 885988a8..67355b9f 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -47,6 +47,7 @@ if not dry_run: qubes_guid_path = "/usr/bin/qubes_guid" qrexec_daemon_path = "/usr/lib/qubes/qrexec_daemon" +qrexec_client_path = "/usr/lib/qubes/qrexec_client" qubes_base_dir = "/var/lib/qubes" diff --git a/dom0/qvm-tools/qvm-sync-appmenus b/dom0/qvm-tools/qvm-sync-appmenus new file mode 100755 index 00000000..c0f46fd4 --- /dev/null +++ b/dom0/qvm-tools/qvm-sync-appmenus @@ -0,0 +1,192 @@ +#!/usr/bin/python +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2011 Marek Marczykowski +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + +import subprocess +import re +import os +import sys +import fnmatch +from optparse import OptionParser +from qubes.qubes import QubesVmCollection,QubesException +from qubes.qubes import qrexec_client_path,default_appmenus_templates_subdir + +#TODO: qrexec_client_path + +# fields required to be present (and verified) in retrieved desktop file +required_fields = [ "Name", "Exec" ] + +# regexps for sanitization of retrieved values +std_re = re.compile(r"^[/a-zA-Z0-9.,&() -]*$") +fields_regexp = { + "Name": std_re, + "GenericName": std_re, + "Comment": std_re, + "Categories": re.compile(r"^[a-zA-Z0-9/.; -]*$"), + "Exec": re.compile(r"^[a-zA-Z0-9%>/:.= -]*$"), +} + +def get_appmenus(xid): + p = subprocess.Popen ([qrexec_client_path, '-d', str(xid), + 'user:grep -H = /usr/share/applications/*.desktop'], stdout=subprocess.PIPE) + untrusted_appmenulist = p.communicate()[0].split('\n') + if p.returncode != 0: + raise QubesException("Error getting application list") + + + row_no = 0 + appmenus = {} + line_rx = re.compile(r"([a-zA-Z0-9-.]+.desktop):([a-zA-Z0-9-]+(?:\[[a-zA-Z@_]+\])?)=(.*)") + ignore_rx = re.compile(r".*([a-zA-Z0-9-.]+.desktop):(#.*|\s+)$") + for untrusted_line in untrusted_appmenulist: + # Ignore blank lines and comments + if len(untrusted_line) == 0 or ignore_rx.match(untrusted_line): + continue + # use search instead of match to skip file path + untrusted_m = line_rx.search(untrusted_line) + if untrusted_m: + untrusted_key = untrusted_m.group(2) + untrusted_value = untrusted_m.group(3) + if fields_regexp.has_key(untrusted_key): + if fields_regexp[untrusted_key].match(untrusted_value): + # now values are sanitized + key = untrusted_key + value = untrusted_value + filename = untrusted_m.group(1) + + if not appmenus.has_key(filename): + appmenus[filename] = {} + + appmenus[filename][key]=value + else: + print "Warning: ignoring key %s: %s" % (untrusted_key, untrusted_value) + # else: ignore this key + else: + print "Warning: ignoring line: %s" % (untrusted_line); + + return appmenus + + +def create_template(path, values): + + # check if all required fields are present + for key in required_fields: + if not values.has_key(key): + print "Warning: not creating/updating '%s' because of missing '%s' key" % (path, key) + return + + desktop_file = open(path, "w") + desktop_file.write("[Desktop Entry]\n") + desktop_file.write("Version=1.0\n") + desktop_file.write("Type=Application\n") + desktop_file.write("Terminal=false\n") + desktop_file.write("X-Qubes-VmName=%VMNAME%\n") + desktop_file.write("Icon=%VMDIR%/icon.png\n") + for key in ["Name", "GenericName" ]: + if values.has_key(key): + desktop_file.write("{0}=%VMNAME%: {1}\n".format(key, values[key])) + + for key in [ "Comment", "Categories" ]: + if values.has_key(key): + desktop_file.write("{0}={1}\n".format(key, values[key])) + + desktop_file.write("Exec=qvm-run -q --tray -a %VMNAME% '{0}'\n".format(values['Exec'])) + desktop_file.close() + + +def main(): + + + usage = "usage: %prog [options] \n"\ + "Updates desktop file templates for given StandaloneVM or TemplateVM" + + parser = OptionParser (usage) + parser.add_option ("-v", "--verbose", action="store_true", dest="verbose", default=False) + + (options, args) = parser.parse_args () + if (len (args) != 1): + parser.error ("You must specify at least the VM name!") + + vmname=args[0] + + qvm_collection = QubesVmCollection() + qvm_collection.lock_db_for_reading() + qvm_collection.load() + qvm_collection.unlock_db() + + vm = qvm_collection.get_vm_by_name(vmname) + + if vm is None: + print "ERROR: A VM with the name '{0}' does not exist in the system.".format(vmname) + exit(1) + + if not vm.is_updateable(): + print "ERROR: To sync appmenus for non-updateable VM, do it on template instead" + exit(1) + + if not vm.is_running(): + print "ERROR: Appmenus can be retrieved only from running VM - start it first" + exit(1) + + # Get appmenus from VM + xid = vm.get_xid() + assert xid > 0 + + new_appmenus = get_appmenus(xid) + + if len(new_appmenus) == 0: + print "ERROR: No appmenus received, terminating" + exit(1) + + # Contstruct path to work also for StandaloneVM + appmenus_templates = vm.dir_path + '/' + default_appmenus_templates_subdir + assert os.path.exists(appmenus_templates) + + # Create new/update existing templates + if options.verbose: + print "--> Got {0} appmenus, storing to disk".format(str(len(new_appmenus))) + for appmenu_file in new_appmenus.keys(): + if options.verbose: + if os.path.exists(appmenus_templates + '/' + appmenu_file): + print "---> Updating {0}".format(appmenu_file) + else: + print "---> Creating {0}".format(appmenu_file) + create_template(appmenus_templates + '/' + appmenu_file, new_appmenus[appmenu_file]) + + # Delete appmenus of remove applications + if options.verbose: + print "--> Cleaning old files" + for appmenu_file in os.listdir(appmenus_templates): + if not fnmatch.fnmatch(appmenu_file, '*.desktop'): + continue + + if not new_appmenus.has_key(appmenu_file): + if options.verbose: + print "---> Removing {0}".format(appmenu_file) + os.unlink(appmenus_templates + '/' + appmenu_file) + +main() + + + + + + From dee7c69156368c51b5da8070f7df5339ff6b57e0 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 24 May 2011 00:09:44 +0200 Subject: [PATCH 35/90] Create appmenus only for whitelisted apps (if set) (#45) --- dom0/aux-tools/create_apps_for_appvm.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/dom0/aux-tools/create_apps_for_appvm.sh b/dom0/aux-tools/create_apps_for_appvm.sh index a57525df..8b01596d 100755 --- a/dom0/aux-tools/create_apps_for_appvm.sh +++ b/dom0/aux-tools/create_apps_for_appvm.sh @@ -37,7 +37,11 @@ mkdir -p $APPSDIR if [ "$SRCDIR" != "none" ]; then echo "--> Converting Appmenu Templates..." - find $SRCDIR -name "*.desktop" -exec /usr/lib/qubes/convert_apptemplate2vm.sh {} $APPSDIR $VMNAME $VMDIR \; + if [ -r "$VMDIR/whitelisted-appmenus.list" ]; then + cat $VMDIR/whitelisted-appmenus.list | xargs -I{} /usr/lib/qubes/convert_apptemplate2vm.sh $SRCDIR/{} $APPSDIR $VMNAME $VMDIR + else + find $SRCDIR -name "*.desktop" $CHECK_WHITELISTED -exec /usr/lib/qubes/convert_apptemplate2vm.sh {} $APPSDIR $VMNAME $VMDIR \; + fi /usr/lib/qubes/convert_dirtemplate2vm.sh $SRCDIR/qubes-*.directory.template $APPSDIR/$VMNAME-vm.directory $VMNAME $VMDIR fi From df0240c218edeaae29ad97f7e46b039f493cdd83 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 24 May 2011 00:10:17 +0200 Subject: [PATCH 36/90] Remove desktop files after uninstalling it (#45) --- dom0/aux-tools/remove_appvm_appmenus.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/dom0/aux-tools/remove_appvm_appmenus.sh b/dom0/aux-tools/remove_appvm_appmenus.sh index dad609a2..93a1ef8b 100755 --- a/dom0/aux-tools/remove_appvm_appmenus.sh +++ b/dom0/aux-tools/remove_appvm_appmenus.sh @@ -14,5 +14,6 @@ fi if ls $APPSDIR/*.directory $APPSDIR/*.desktop > /dev/null 2>&1; then xdg-desktop-menu uninstall $APPSDIR/*.directory $APPSDIR/*.desktop + rm -f $APPSDIR/*.desktop $APPSDIR/*.directory fi From 4f33e17e69d8b61e6cf1e25f30f5104f6bb18f3a Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 24 May 2011 00:14:03 +0200 Subject: [PATCH 37/90] Set appmenus_templates_dir also for StandaloneVM (#45) StandaloneVM also have appmenus templates - retrieved from VM. User can choose some of them to real menu. --- dom0/qvm-core/qubes.py | 3 +++ dom0/qvm-tools/qvm-sync-appmenus | 16 ++++++---------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 67355b9f..83fd4bc2 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -268,6 +268,9 @@ class QubesVm(object): else: assert self.root_img is not None, "Missing root_img for standalone VM!" + if updateable: + self.appmenus_templates_dir = self.dir_path + "/" + default_appmenus_templates_subdir + # By default allow use all VCPUs if vcpus is None: qubes_host = QubesHost() diff --git a/dom0/qvm-tools/qvm-sync-appmenus b/dom0/qvm-tools/qvm-sync-appmenus index c0f46fd4..58e3af7a 100755 --- a/dom0/qvm-tools/qvm-sync-appmenus +++ b/dom0/qvm-tools/qvm-sync-appmenus @@ -27,9 +27,7 @@ import sys import fnmatch from optparse import OptionParser from qubes.qubes import QubesVmCollection,QubesException -from qubes.qubes import qrexec_client_path,default_appmenus_templates_subdir - -#TODO: qrexec_client_path +from qubes.qubes import qrexec_client_path # fields required to be present (and verified) in retrieved desktop file required_fields = [ "Name", "Exec" ] @@ -156,32 +154,30 @@ def main(): print "ERROR: No appmenus received, terminating" exit(1) - # Contstruct path to work also for StandaloneVM - appmenus_templates = vm.dir_path + '/' + default_appmenus_templates_subdir - assert os.path.exists(appmenus_templates) + assert os.path.exists(vm.appmenus_templates_dir) # Create new/update existing templates if options.verbose: print "--> Got {0} appmenus, storing to disk".format(str(len(new_appmenus))) for appmenu_file in new_appmenus.keys(): if options.verbose: - if os.path.exists(appmenus_templates + '/' + appmenu_file): + if os.path.exists(appmenus_templates_dir + '/' + appmenu_file): print "---> Updating {0}".format(appmenu_file) else: print "---> Creating {0}".format(appmenu_file) - create_template(appmenus_templates + '/' + appmenu_file, new_appmenus[appmenu_file]) + create_template(appmenus_templates_dir + '/' + appmenu_file, new_appmenus[appmenu_file]) # Delete appmenus of remove applications if options.verbose: print "--> Cleaning old files" - for appmenu_file in os.listdir(appmenus_templates): + for appmenu_file in os.listdir(appmenus_templates_dir): if not fnmatch.fnmatch(appmenu_file, '*.desktop'): continue if not new_appmenus.has_key(appmenu_file): if options.verbose: print "---> Removing {0}".format(appmenu_file) - os.unlink(appmenus_templates + '/' + appmenu_file) + os.unlink(appmenus_templates_dir + '/' + appmenu_file) main() From 086c41cb9f73c92b28944abcffc99c2f40b15f2c Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 1 Jun 2011 23:31:56 +0200 Subject: [PATCH 38/90] dom0 qmemman: watch /local/domain xenstore tree for new/deleted domains This is the place where _running_ domains are placed. --- dom0/qmemman/qmemman_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/qmemman/qmemman_server.py b/dom0/qmemman/qmemman_server.py index 7dcc0325..f6b768b5 100755 --- a/dom0/qmemman/qmemman_server.py +++ b/dom0/qmemman/qmemman_server.py @@ -29,7 +29,7 @@ class WatchType: class XS_Watcher: def __init__(self): self.handle = xen.lowlevel.xs.xs() - self.handle.watch('/vm', WatchType(XS_Watcher.domain_list_changed, None)) + self.handle.watch('/local/domain', WatchType(XS_Watcher.domain_list_changed, None)) self.watch_token_dict = {} def domain_list_changed(self, param): From c789121f84ce6ef09f586febd4a0997eb089edb5 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 1 Jun 2011 23:44:06 +0200 Subject: [PATCH 39/90] dom0: migrate from xend to libxl stack - qvm-core This is core part of migration. Things not migrated yet: - DispVM (qubes_restore needs to be almost rewritten) - VM xen config files should be fixed (use "script:" prefix in block device description, perhaps generate this files on VM start) Huge, slow xend not needed any more, now it conflicts with libxl --- dom0/init.d/qubes_core | 12 +- dom0/init.d/qubes_netvm | 4 +- dom0/pm-utils/01qubes-suspend-netvm | 4 +- dom0/qvm-core/qubes.py | 425 +++++++++------------ dom0/qvm-tools/qvm-ls | 9 +- dom0/qvm-tools/qvm-run | 12 +- dom0/restore/qfile-daemon-dvm | 4 +- dom0/restore/qubes_prepare_saved_domain.sh | 6 +- rpm_spec/core-dom0.spec | 10 +- 9 files changed, 214 insertions(+), 272 deletions(-) diff --git a/dom0/init.d/qubes_core b/dom0/init.d/qubes_core index 86949f36..6d4866fd 100755 --- a/dom0/init.d/qubes_core +++ b/dom0/init.d/qubes_core @@ -20,6 +20,7 @@ start() { echo -n $"Executing Qubes Core scripts:" + modprobe evtchn chgrp qubes /etc/xen chmod 710 /etc/xen chgrp qubes /var/run/xend @@ -28,7 +29,16 @@ start() chmod 660 /var/run/xend/xen-api.sock /var/run/xend/xmlrpc.sock chgrp qubes /var/run/xenstored/* chmod 660 /var/run/xenstored/* - xm sched-credit -d 0 -w 512 + chgrp qubes /var/lib/xen + chmod 770 /var/lib/xen + chgrp qubes /var/log/xen + chmod 770 /var/log/xen + chgrp qubes /proc/xen/privcmd + chmod 660 /proc/xen/privcmd + chgrp qubes /dev/xen/evtchn + chmod 660 /dev/xen/evtchn + + xl sched-credit -d 0 -w 512 cp /var/lib/qubes/qubes.xml /var/lib/qubes/backup/qubes-$(date +%F-%T).xml /usr/lib/qubes/qmemman_daemon.py >/var/log/qubes/qmemman.log 2>/var/log/qubes/qmemman.errs & diff --git a/dom0/init.d/qubes_netvm b/dom0/init.d/qubes_netvm index 6c05dd2d..c27aad63 100755 --- a/dom0/init.d/qubes_netvm +++ b/dom0/init.d/qubes_netvm @@ -22,10 +22,10 @@ NETVM=$(qvm-get-default-netvm) get_running_netvms() { # Actually get running VMs with PCI devices attached - RUNNING_VMS=`xm list --state=Running | tail -n +3 | cut -f 1 -d " "` + RUNNING_VMS=`xl list | tail -n +3 | cut -f 1 -d " "` RUNNING_NETVMS="" for VM in $RUNNING_VMS; do - if [ -n "`xm pci-list $VM`" ]; then + if [ -n "`xl pci-list $VM`" ]; then echo "$VM" fi done diff --git a/dom0/pm-utils/01qubes-suspend-netvm b/dom0/pm-utils/01qubes-suspend-netvm index dca33246..d6e362d1 100755 --- a/dom0/pm-utils/01qubes-suspend-netvm +++ b/dom0/pm-utils/01qubes-suspend-netvm @@ -4,10 +4,10 @@ get_running_netvms() { # Actually get running VMs with PCI devices attached - RUNNING_VMS=`xm list --state=Running | tail -n +3 | cut -f 1 -d " "` + RUNNING_VMS=`xl list | tail -n +3 | cut -f 1 -d " "` RUNNING_NETVMS="" for VM in $RUNNING_VMS; do - if [ -n "`xm pci-list $VM`" ]; then + if [ -n "`xl pci-list $VM|tail -n +2`" ]; then echo "$VM" fi done diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 83fd4bc2..b9b16595 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -29,6 +29,8 @@ import xml.parsers.expat import fcntl import re import shutil +import uuid +import time from datetime import datetime from qmemman_client import QMemmanClient @@ -44,6 +46,10 @@ if not dry_run: from xen.xm import XenAPI from xen.xend import sxp + import xen.lowlevel.xc + import xen.lowlevel.xl + import xen.lowlevel.xs + qubes_guid_path = "/usr/bin/qubes_guid" qrexec_daemon_path = "/usr/lib/qubes/qrexec_daemon" @@ -56,6 +62,7 @@ qubes_templates_dir = qubes_base_dir + "/vm-templates" qubes_servicevms_dir = qubes_base_dir + "/servicevms" qubes_store_filename = qubes_base_dir + "/qubes.xml" +qubes_max_xid = 1024 qubes_max_qid = 254 qubes_max_netid = 254 vm_default_netmask = "255.255.255.0" @@ -86,41 +93,19 @@ dom0_vm = None qubes_appmenu_create_cmd = "/usr/lib/qubes/create_apps_for_appvm.sh" qubes_appmenu_remove_cmd = "/usr/lib/qubes/remove_appvm_appmenus.sh" -class XendSession(object): - def __init__(self): - self.get_xend_session_old_api() - self.get_xend_session_new_api() - - def get_xend_session_old_api(self): - from xen.xend import XendClient - from xen.util.xmlrpcclient import ServerProxy - self.xend_server = ServerProxy(XendClient.uri) - if self.xend_server is None: - print "get_xend_session_old_api(): cannot open session!" - - - def get_xend_session_new_api(self): - xend_socket_uri = "httpu:///var/run/xend/xen-api.sock" - self.session = XenAPI.Session (xend_socket_uri) - self.session.login_with_password ("", "") - if self.session is None: - print "get_xend_session_new_api(): cannot open session!" - - -if not dry_run: - xend_session = XendSession() - class QubesException (Exception) : pass +if not dry_run: + xc = xen.lowlevel.xc.xc() + xs = xen.lowlevel.xs.xs() + xl_ctx = xen.lowlevel.xl.ctx() class QubesHost(object): def __init__(self): - self.hosts = xend_session.session.xenapi.host.get_all() - self.host_record = xend_session.session.xenapi.host.get_record(self.hosts[0]) - self.host_metrics_record = xend_session.session.xenapi.host_metrics.get_record(self.host_record["metrics"]) + self.physinfo = xc.physinfo() - self.xen_total_mem = long(self.host_metrics_record["memory_total"]) - self.xen_no_cpus = len (self.host_record["host_CPUs"]) + self.xen_total_mem = long(self.physinfo['total_memory']) + self.xen_no_cpus = self.physinfo['nr_cpus'] # print "QubesHost: total_mem = {0}B".format (self.xen_total_mem) # print "QubesHost: free_mem = {0}".format (self.get_free_xen_memory()) @@ -135,9 +120,36 @@ class QubesHost(object): return self.xen_no_cpus def get_free_xen_memory(self): - ret = self.host_metrics_record["memory_free"] + ret = self.physinfo['free_memory'] return long(ret) + # measure cpu usage for all domains at once + def measure_cpu_usage(self, previous=None, previous_time = None, wait_time=1): + if previous is None: + previous_time = time.time() + previous = {} + info = xc.domain_getinfo(0, qubes_max_xid) + for vm in info: + previous[vm['domid']] = {} + previous[vm['domid']]['cpu_time'] = vm['cpu_time']/vm['online_vcpus'] + previous[vm['domid']]['cpu_usage'] = 0 + time.sleep(wait_time) + + current_time = time.time() + current = {} + info = xc.domain_getinfo(0, qubes_max_xid) + for vm in info: + current[vm['domid']] = {} + current[vm['domid']]['cpu_time'] = vm['cpu_time']/vm['online_vcpus'] + if vm['domid'] in previous.keys(): + current[vm['domid']]['cpu_usage'] = \ + float(current[vm['domid']]['cpu_time'] - previous[vm['domid']]['cpu_time']) \ + / long(1000**3) / (current_time-previous_time) * 100 + else: + current[vm['domid']]['cpu_usage'] = 0 + + return (current_time, current) + class QubesVmLabel(object): def __init__(self, name, index, color = None, icon = None): self.name = name @@ -249,7 +261,8 @@ class QubesVm(object): self.memory = memory if maxmem is None: - total_mem_mb = self.get_total_xen_memory()/1024/1024 + host = QubesHost() + total_mem_mb = host.memory_total/1024 self.maxmem = total_mem_mb/2 else: self.maxmem = maxmem @@ -268,6 +281,11 @@ class QubesVm(object): else: assert self.root_img is not None, "Missing root_img for standalone VM!" + if template_vm is not None: + self.kernels_dir = template_vm.kernels_dir + else: + self.kernels_dir = self.dir_path + "/" + default_kernels_subdir + if updateable: self.appmenus_templates_dir = self.dir_path + "/" + default_appmenus_templates_subdir @@ -281,8 +299,8 @@ class QubesVm(object): # Internal VM (not shown in qubes-manager, doesn't create appmenus entries self.internal = internal - if not dry_run and xend_session.session is not None: - self.refresh_xend_session() + self.xid = -1 + self.xid = self.get_xid() @property def qid(self): @@ -355,116 +373,81 @@ class QubesVm(object): def is_disposablevm(self): return isinstance(self, QubesDisposableVm) - def add_to_xen_storage(self): + def get_xl_dominfo(self): if dry_run: return - retcode = subprocess.call (["/usr/sbin/xm", "new", "-q", self.conf_file]) - if retcode != 0: - raise OSError ("Cannot add VM '{0}' to Xen Store!".format(self.name)) + domains = xl_ctx.list_domains() + for dominfo in domains: + domname = xl_ctx.domid_to_name(dominfo.domid) + if domname == self.name: + return dominfo + return None - return True - - def remove_from_xen_storage(self): + def get_xc_dominfo(self): if dry_run: return - retcode = subprocess.call (["/usr/sbin/xm", "delete", self.name]) - if retcode != 0: - raise OSError ("Cannot remove VM '{0}' from Xen Store!".format(self.name)) - - self.in_xen_storage = False - - def refresh_xend_session(self): - uuids = xend_session.session.xenapi.VM.get_by_name_label (self.name) - self.session_uuid = uuids[0] if len (uuids) > 0 else None - if self.session_uuid is not None: - self.session_metrics = xend_session.session.xenapi.VM.get_metrics(self.session_uuid) - else: - self.session_metrics = None - - def update_xen_storage(self): - try: - self.remove_from_xen_storage() - except OSError as ex: - print "WARNING: {0}. Continuing anyway...".format(str(ex)) - pass - self.add_to_xen_storage() - if not dry_run and xend_session.session is not None: - self.refresh_xend_session() + start_xid = self.xid + if start_xid < 0: + start_xid = 0 + domains = xc.domain_getinfo(start_xid, qubes_max_xid-start_xid) + for dominfo in domains: + domname = xl_ctx.domid_to_name(dominfo['domid']) + if domname == self.name: + return dominfo + return None def get_xid(self): if dry_run: return 666 - try: - xid = int (xend_session.session.xenapi.VM.get_domid (self.session_uuid)) - except XenAPI.Failure: - self.refresh_xend_session() - xid = int (xend_session.session.xenapi.VM.get_domid (self.session_uuid)) + dominfo = self.get_xc_dominfo() + if dominfo: + return dominfo['domid'] + else: + return -1 - return xid + def get_uuid(self): + + dominfo = self.get_xl_dominfo() + if dominfo: + uuid = uuid.UUID(''.join('%02x' % b for b in dominfo.uuid)) + return uuid + else: + return None def get_mem(self): if dry_run: return 666 - try: - mem = int (xend_session.session.xenapi.VM_metrics.get_memory_actual (self.session_metrics)) - except XenAPI.Failure: - self.refresh_xend_session() - mem = int (xend_session.session.xenapi.VM_metrics.get_memory_actual (self.session_metrics)) - - return mem + dominfo = self.get_xc_dominfo() + if dominfo: + return dominfo['mem_kb'] + else: + return 0 def get_mem_static_max(self): if dry_run: return 666 - try: - mem = int(xend_session.session.xenapi.VM.get_memory_static_max(self.session_uuid)) - except XenAPI.Failure: - self.refresh_xend_session() - mem = int(xend_session.session.xenapi.VM.get_memory_static_max(self.session_uuid)) + dominfo = self.get_xc_dominfo() + if dominfo: + return dominfo['maxmem_kb'] + else: + return 0 - return mem - - def get_mem_dynamic_max(self): - if dry_run: - return 666 - - try: - mem = int(xend_session.session.xenapi.VM.get_memory_dynamic_max(self.session_uuid)) - except XenAPI.Failure: - self.refresh_xend_session() - mem = int(xend_session.session.xenapi.VM.get_memory_dynamic_max(self.session_uuid)) - - return mem - - - def get_cpu_total_load(self): + def get_per_cpu_time(self): if dry_run: import random return random.random() * 100 - try: - cpus_util = xend_session.session.xenapi.VM_metrics.get_VCPUs_utilisation (self.session_metrics) - except XenAPI.Failure: - self.refresh_xend_session() - cpus_util = xend_session.session.xenapi.VM_metrics.get_VCPUs_utilisation (self.session_metrics) - - if len (cpus_util) == 0: + dominfo = self.get_xc_dominfo() + if dominfo: + return dominfo['cpu_time']/dominfo['online_vcpus'] + else: return 0 - cpu_total_load = 0.0 - for cpu in cpus_util: - cpu_total_load += cpus_util[cpu] - cpu_total_load /= len(cpus_util) - p = 100*cpu_total_load - if p > 100: - p = 100 - return p - def get_disk_utilization_root_img(self): if not os.path.exists(self.root_img): return 0 @@ -481,15 +464,22 @@ class QubesVm(object): if dry_run: return "NA" - try: - power_state = xend_session.session.xenapi.VM.get_power_state (self.session_uuid) - except XenAPI.Failure: - self.refresh_xend_session() - if self.session_uuid is None: - return "NA" - power_state = xend_session.session.xenapi.VM.get_power_state (self.session_uuid) + dominfo = self.get_xc_dominfo() + if dominfo: + if dominfo['paused']: + return "Paused" + elif dominfo['shutdown']: + return "Halted" + elif dominfo['crashed']: + return "Crashed" + elif dominfo['dying']: + return "Dying" + else: + return "Running" + else: + return 'Halted' - return power_state + return "NA" def is_running(self): if self.get_power_state() == "Running": @@ -507,13 +497,13 @@ class QubesVm(object): if not self.is_running(): return 0 - try: - start_time = xend_session.session.xenapi.VM_metrics.get_record (self.session_metrics)['start_time'] - except XenAPI.Failure: - self.refresh_xend_session() - if self.session_uuid is None: - return "NA" - start_time = xend_session.session.xenapi.VM_metrics.get_record (self.session_metrics)['start_time'] + dominfo = self.get_xl_dominfo() + + uuid = self.get_uuid() + + xs_trans = xs.transaction_start() + start_time = xs.read(xs_trans, "/vm/%s/start_time" % str(uuid)) + xs.transaction_end() return start_time @@ -527,14 +517,14 @@ class QubesVm(object): rootimg_inode = os.stat(self.template_vm.root_img) rootcow_inode = os.stat(self.template_vm.rootcow_img) - + current_dmdev = "/dev/mapper/snapshot-{0:x}:{1}-{2:x}:{3}".format( rootimg_inode[2], rootimg_inode[1], rootcow_inode[2], rootcow_inode[1]) # Don't know why, but 51712 is xvda # backend node name not available through xenapi :( - p = subprocess.Popen (["xenstore-read", + p = subprocess.Popen (["xenstore-read", "/local/domain/0/backend/vbd/{0}/51712/node".format(self.get_xid())], stdout=subprocess.PIPE) used_dmdev = p.communicate()[0].strip() @@ -577,76 +567,63 @@ class QubesVm(object): if not self.is_running(): return - p = subprocess.Popen (["/usr/sbin/xm", "network-list", self.name], + p = subprocess.Popen (["/usr/sbin/xl", "network-list", self.name], stdout=subprocess.PIPE) result = p.communicate() for line in result[0].split('\n'): m = re.match(r"^(\d+)\s*(\d+)", line) if m: - retcode = subprocess.call(["/usr/sbin/xm", "list", m.group(2)], + retcode = subprocess.call(["/usr/sbin/xl", "list", m.group(2)], stderr=subprocess.PIPE) if retcode != 0: # Don't check retcode - it always will fail when backend domain is down - subprocess.call(["/usr/sbin/xm", - "network-detach", self.name, m.group(1), "-f"], stderr=subprocess.PIPE) + subprocess.call(["/usr/sbin/xl", + "network-detach", self.name, m.group(1)], stderr=subprocess.PIPE) def create_xenstore_entries(self, xid): if dry_run: return + domain_path = xs.get_domain_path(xid) + # Set Xen Store entires with VM networking info: + xs_trans = xs.transaction_start() - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_vm_type".format(xid), - self.type]) - - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_vm_updateable".format(xid), - str(self.updateable)]) + xs.write(xs_trans, "{0}/qubes_vm_type".format(domain_path), + self.type) + xs.write(xs_trans, "{0}/qubes_vm_updateable".format(domain_path), + str(self.updateable)) if self.is_netvm(): - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_netvm_gateway".format(xid), - self.gateway]) - - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_netvm_secondary_dns".format(xid), - self.secondary_dns]) - - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_netvm_netmask".format(xid), - self.netmask]) - - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_netvm_network".format(xid), - self.network]) + xs.write(xs_trans, + "{0}/qubes_netvm_gateway".format(domain_path), + self.gateway) + xs.write(xs_trans, + "{0}/qubes_netvm_secondary_dns".format(domain_path), + self.secondary_dns) + xs.write(xs_trans, + "{0}/qubes_netvm_netmask".format(domain_path), + self.netmask) + xs.write(xs_trans, + "{0}/qubes_netvm_network".format(domain_path), + self.network) if self.netvm_vm is not None: - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_ip".format(xid), - self.ip]) + xs.write(xs_trans, "{0}/qubes_ip".format(domain_path), self.ip) + xs.write(xs_trans, "{0}/qubes_netmask".format(domain_path), + self.netvm_vm.netmask) + xs.write(xs_trans, "{0}/qubes_gateway".format(domain_path), + self.netvm_vm.gateway) + xs.write(xs_trans, + "{0}/qubes_secondary_dns".format(domain_path), + self.netvm_vm.secondary_dns) - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_netmask".format(xid), - self.netvm_vm.netmask]) - - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_gateway".format(xid), - self.netvm_vm.gateway]) - - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_secondary_dns".format(xid), - self.netvm_vm.secondary_dns]) + # Fix permissions + xs.set_permissions(xs_trans, '{0}/device'.format(domain_path), + [{ 'dom': xid }]) + xs.set_permissions(xs_trans, '{0}/memory'.format(domain_path), + [{ 'dom': xid }]) + xs.transaction_end(xs_trans) def create_config_file(self, source_template = None): if source_template is None: @@ -883,13 +860,6 @@ class QubesVm(object): return conf - def get_total_xen_memory(self): - hosts = xend_session.session.xenapi.host.get_all() - host_record = xend_session.session.xenapi.host.get_record(hosts[0]) - host_metrics_record = xend_session.session.xenapi.host_metrics.get_record(host_record["metrics"]) - ret = host_metrics_record["memory_total"] - return long(ret) - def start(self, debug_console = False, verbose = False, preparing_dvm = False): if dry_run: return @@ -898,32 +868,28 @@ class QubesVm(object): raise QubesException ("VM is already running!") self.reset_volatile_storage() - - if verbose: - print "--> Rereading the VM's conf file ({0})...".format(self.conf_file) - self.update_xen_storage() - if verbose: print "--> Loading the VM (type = {0})...".format(self.type) - if not self.is_netvm(): - subprocess.check_call(['/usr/sbin/xm', 'mem-max', self.name, str(self.maxmem)]) - - mem_required = self.get_mem_dynamic_max() + mem_required = int(self.memory) * 1024 * 1024 qmemman_client = QMemmanClient() if not qmemman_client.request_memory(mem_required): qmemman_client.close() raise MemoryError ("ERROR: insufficient memory to start this VM") + xl_cmdline = ['/usr/sbin/xl', 'create', self.conf_file, '-p'] + if not self.is_netvm(): + xl_cmdline += ['maxmem={0}'.format(self.maxmem)] + try: - xend_session.session.xenapi.VM.start (self.session_uuid, True) # Starting a VM paused + subprocess.check_call(xl_cmdline) except XenAPI.Failure: - self.refresh_xend_session() - xend_session.session.xenapi.VM.start (self.session_uuid, True) # Starting a VM paused + raise QubesException("Failed to load VM config") + finally: + qmemman_client.close() # let qmemman_daemon resume balancing - qmemman_client.close() # let qmemman_daemon resume balancing - - xid = int (xend_session.session.xenapi.VM.get_domid (self.session_uuid)) + xid = self.get_xid() + self.xid = xid if verbose: print "--> Setting Xen Store info for the VM..." @@ -937,17 +903,17 @@ class QubesVm(object): actual_ip = "254.254.254.254" else: actual_ip = self.ip - xm_cmdline = ["/usr/sbin/xm", "network-attach", self.name, "script=vif-route-qubes", "ip="+actual_ip] + xl_cmdline = ["/usr/sbin/xl", "network-attach", self.name, "script=/etc/xen/scripts/vif-route-qubes", "ip="+actual_ip] if self.netvm_vm.qid != 0: if not self.netvm_vm.is_running(): self.netvm_vm.start() - retcode = subprocess.call (xm_cmdline + ["backend={0}".format(self.netvm_vm.name)]) + retcode = subprocess.call (xl_cmdline + ["backend={0}".format(self.netvm_vm.name)]) if retcode != 0: self.force_shutdown() raise OSError ("ERROR: Cannot attach to network backend!") else: - retcode = subprocess.call (xm_cmdline) + retcode = subprocess.call (xl_cmdline) if retcode != 0: self.force_shutdown() raise OSError ("ERROR: Cannot attach to network backend!") @@ -965,7 +931,7 @@ class QubesVm(object): if verbose: print "--> Starting the VM..." - xend_session.session.xenapi.VM.unpause (self.session_uuid) + xc.domain_unpause(xid) if not preparing_dvm: if verbose: @@ -976,6 +942,7 @@ class QubesVm(object): raise OSError ("ERROR: Cannot execute qrexec_daemon!") # perhaps we should move it before unpause and fork? + # FIXME: this uses obsolete xm api if debug_console: from xen.xm import console if verbose: @@ -988,11 +955,8 @@ class QubesVm(object): if dry_run: return - try: - xend_session.session.xenapi.VM.hard_shutdown (self.session_uuid) - except XenAPI.Failure: - self.refresh_xend_session() - xend_session.session.xenapi.VM.hard_shutdown (self.session_uuid) + subprocess.call (['/usr/sbin/xl', 'destroy', self.name]) + #xc.domain_destroy(self.get_xid()) def remove_from_disk(self): if dry_run: @@ -1082,7 +1046,6 @@ class QubesTemplateVm(QubesVm): standalonevms_conf_file if standalonevms_conf_file is not None else default_standalonevms_conf_file) self.templatevm_conf_template = self.dir_path + "/" + default_templatevm_conf_template - self.kernels_dir = self.dir_path + "/" + default_kernels_subdir self.appmenus_templates_dir = self.dir_path + "/" + default_appmenus_templates_subdir self.appmenus_template_templates_dir = self.dir_path + "/" + default_appmenus_template_templates_subdir self.appvms = QubesVmCollection() @@ -1435,7 +1398,7 @@ class QubesNetVm(QubesVm): # Cleanup stale VIFs vm.cleanup_vifs() - xm_cmdline = ["/usr/sbin/xm", "network-attach", vm.name, "script=vif-route-qubes", "ip="+vm.ip, "backend="+self.name ] + xm_cmdline = ["/usr/sbin/xl", "network-attach", vm.name, "script=vif-route-qubes", "ip="+vm.ip, "backend="+self.name ] retcode = subprocess.call (xm_cmdline) if retcode != 0: print ("WARNING: Cannot attach to network to '{0}'!".format(vm.name)) @@ -1599,46 +1562,10 @@ class QubesDom0NetVm(QubesNetVm): private_img = None, template_vm = None, label = default_template_label) - if not dry_run and xend_session.session is not None: - self.session_hosts = xend_session.session.xenapi.host.get_all() - self.session_cpus = xend_session.session.xenapi.host.get_host_CPUs(self.session_hosts[0]) - def is_running(self): return True - def get_cpu_total_load(self): - if dry_run: - import random - return random.random() * 100 - - cpu_total_load = 0.0 - for cpu in self.session_cpus: - cpu_total_load += xend_session.session.xenapi.host_cpu.get_utilisation(cpu) - cpu_total_load /= len(self.session_cpus) - p = 100*cpu_total_load - if p > 100: - p = 100 - return p - - def get_mem(self): - - # Unfortunately XenAPI provides only info about total memory, not the one actually usable by Dom0... - #session = get_xend_session_new_api() - #hosts = session.xenapi.host.get_all() - #metrics = session.xenapi.host.get_metrics(hosts[0]) - #memory_total = int(session.xenapi.metrics.get_memory_total(metrics)) - - # ... so we must read /proc/meminfo, just like free command does - f = open ("/proc/meminfo") - for line in f: - match = re.match(r"^MemTotal\:\s*(\d+) kB", line) - if match is not None: - break - f.close() - assert match is not None - return int(match.group(1))*1024 - def get_xid(self): return 0 diff --git a/dom0/qvm-tools/qvm-ls b/dom0/qvm-tools/qvm-ls index a5b4c006..36abafab 100755 --- a/dom0/qvm-tools/qvm-ls +++ b/dom0/qvm-tools/qvm-ls @@ -21,6 +21,7 @@ # from qubes.qubes import QubesVmCollection +from qubes.qubes import QubesHost from qubes.qubes import QubesException from optparse import OptionParser @@ -59,8 +60,8 @@ fields = { "xid" : {"func" : "vm.get_xid() if vm.is_running() else '-'"}, - "mem" : {"func" : "(str(vm.get_mem()/1024/1024) + ' MB') if vm.is_running() else '-'"}, - "cpu" : {"func" : "round (vm.get_cpu_total_load(), 1) if vm.is_running() else '-'"}, + "mem" : {"func" : "(str(vm.get_mem()/1024) + ' MB') if vm.is_running() else '-'"}, + "cpu" : {"func" : "round (cpu_usages[vm.get_xid()]['cpu_usage'], 1) if vm.is_running() else '-'"}, "disk": {"func" : "str(vm.get_disk_utilization()/(1024*1024)) + ' MB'"}, "state": {"func" : "vm.get_power_state()"}, @@ -114,10 +115,14 @@ def main(): fields_to_display = ["name", "on", "state", "updbl", "type", "template", "netvm", "label" ] + cpu_usages = None + if (options.ids): fields_to_display += ["qid", "xid"] if (options.cpu): + qhost = QubesHost() + (measure_time, cpu_usages) = qhost.measure_cpu_usage() fields_to_display += ["cpu"] if (options.mem): diff --git a/dom0/qvm-tools/qvm-run b/dom0/qvm-tools/qvm-run index e59f7741..936006b6 100755 --- a/dom0/qvm-tools/qvm-run +++ b/dom0/qvm-tools/qvm-run @@ -79,19 +79,19 @@ def vm_run_cmd(vm, cmd, options): if options.shutdown: if options.verbose: print "Shutting down VM: '{0}'...".format(vm.name) - subprocess.call (["/usr/sbin/xm", "shutdown", vm.name]) + subprocess.call (["/usr/sbin/xl", "shutdown", vm.name]) return if options.pause: if options.verbose: print "Pausing VM: '{0}'...".format(vm.name) - subprocess.call (["/usr/sbin/xm", "pause", vm.name]) + subprocess.call (["/usr/sbin/xl", "pause", vm.name]) return if options.unpause: if options.verbose: print "UnPausing VM: '{0}'...".format(vm.name) - subprocess.call (["/usr/sbin/xm", "unpause", vm.name]) + subprocess.call (["/usr/sbin/xl", "unpause", vm.name]) return if options.verbose: @@ -152,13 +152,13 @@ def main(): help="Wait for the VM(s) to shutdown") parser.add_option ("--shutdown", action="store_true", dest="shutdown", default=False, - help="Do 'xm shutdown' for the VM(s) (can be combined this with --all and --wait)") + help="Do 'xl shutdown' for the VM(s) (can be combined this with --all and --wait)") parser.add_option ("--pause", action="store_true", dest="pause", default=False, - help="Do 'xm pause' for the VM(s) (can be combined this with --all and --wait)") + help="Do 'xl pause' for the VM(s) (can be combined this with --all and --wait)") parser.add_option ("--unpause", action="store_true", dest="unpause", default=False, - help="Do 'xm unpause' for the VM(s) (can be combined this with --all and --wait)") + help="Do 'xl unpause' for the VM(s) (can be combined this with --all and --wait)") parser.add_option ("--pass_io", action="store_true", dest="passio", default=False, help="Pass stdin/stdout/stderr from remote program") diff --git a/dom0/restore/qfile-daemon-dvm b/dom0/restore/qfile-daemon-dvm index 6a41db24..c166c78d 100755 --- a/dom0/restore/qfile-daemon-dvm +++ b/dom0/restore/qfile-daemon-dvm @@ -130,8 +130,8 @@ def main(): lockf.close() if dispname is not None: subprocess.call(['/usr/lib/qubes/qrexec_client', '-d', dispname, 'directly:user:/usr/lib/qubes/dvm_file_editor']) - subprocess.call(['/usr/sbin/xm', 'destroy', dispname]) + subprocess.call(['/usr/sbin/xl', 'destroy', dispname]) qfile.remove_disposable_from_qdb(dispname) main() - \ No newline at end of file + diff --git a/dom0/restore/qubes_prepare_saved_domain.sh b/dom0/restore/qubes_prepare_saved_domain.sh index bd1da7e9..ff37b829 100755 --- a/dom0/restore/qubes_prepare_saved_domain.sh +++ b/dom0/restore/qubes_prepare_saved_domain.sh @@ -45,15 +45,15 @@ xenstore-write /local/domain/$ID/qubes_save_request 1 xenstore-watch-qubes /local/domain/$ID/device/qubes_used_mem xenstore-read /local/domain/$ID/qubes_gateway | \ cut -d . -f 2 | tr -d "\n" > $VMDIR/netvm_id.txt -xm block-detach $1 /dev/xvdb +xl block-detach $1 /dev/xvdb MEM=$(xenstore-read /local/domain/$ID/device/qubes_used_mem) echo "DVM boot complete, memory used=$MEM. Saving image..." QMEMMAN_STOP=/var/run/qubes/do-not-membalance touch $QMEMMAN_STOP -xm mem-set $1 $(($MEM/1000)) +xl mem-set $1 $(($MEM/1000)) sleep 1 touch $2 -if ! xm save $1 $2 ; then +if ! xl save $1 $2 ; then rm -f $QMEMMAN_STOP exit 1 fi diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 879e296d..04a46724 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -40,7 +40,7 @@ BuildRequires: xen-devel Requires: python, xen-runtime, pciutils, python-inotify, python-daemon, kernel-qubes-dom0 Conflicts: qubes-gui-dom0 < 1.1.13 Requires: NetworkManager >= 0.8.1-1 -Requires: xen >= 3.4.3-6 +Requires: xen >= 4.1.0-2 %define _builddir %(pwd)/dom0 %description @@ -159,6 +159,10 @@ chkconfig --level 5 qubes_core on || echo "WARNING: Cannot enable service qubes_ chkconfig --level 5 qubes_netvm on || echo "WARNING: Cannot enable service qubes_netvm!" chkconfig --level 5 qubes_setupdvm on || echo "WARNING: Cannot enable service qubes_setupdvm!" +# Conflicts with libxl stack, so disable it +service xend stop +chkconfig --level 5 xend off + HAD_SYSCONFIG_NETWORK=yes if ! [ -e /etc/sysconfig/network ]; then HAD_SYSCONFIG_NETWORK=no @@ -169,10 +173,6 @@ fi # Load evtchn module - xenstored needs it modprobe evtchn -# Now launch xend - we will need it for subsequent steps -service xenstored start -service xend start - if ! [ -e /var/lib/qubes/qubes.xml ]; then # echo "Initializing Qubes DB..." umask 007; sg qubes -c qvm-init-storage From cb1fbfc1451091270134bc3616019d5d26cb578a Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 2 Jun 2011 00:07:22 +0200 Subject: [PATCH 40/90] dom0: store xid in QubesVm on get_xid() --- dom0/qvm-core/qubes.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index b9b16595..f4dcc43e 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -404,7 +404,8 @@ class QubesVm(object): dominfo = self.get_xc_dominfo() if dominfo: - return dominfo['domid'] + self.xid = dominfo['domid'] + return self.xid else: return -1 From fac1f7f107b92e37c5aa6c827e236ca685576864 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 2 Jun 2011 01:20:01 +0200 Subject: [PATCH 41/90] dom0: Set xid=0 for QubesDom0NetVm --- dom0/qvm-core/qubes.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index f4dcc43e..2abcb4c4 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -1563,6 +1563,7 @@ class QubesDom0NetVm(QubesNetVm): private_img = None, template_vm = None, label = default_template_label) + self.xid = 0 def is_running(self): return True From cc4df5089da1017a57ebd2424c439ad8107980a1 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 2 Jun 2011 01:20:23 +0200 Subject: [PATCH 42/90] dom0: XC/XL infos for dom0 --- dom0/qvm-core/qubes.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 2abcb4c4..a4678a65 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -1589,6 +1589,23 @@ class QubesDom0NetVm(QubesNetVm): def start(self, debug_console = False, verbose = False): raise QubesException ("Cannot start Dom0 fake domain!") + def get_xl_dominfo(self): + if dry_run: + return + + domains = xl_ctx.list_domains() + for dominfo in domains: + if dominfo.domid == 0: + return dominfo + return None + + def get_xc_dominfo(self): + if dry_run: + return + + domains = xc.domain_getinfo(0, 1) + return domains[0] + def create_xml_element(self): return None From f5751bfea7582a608478b296356ac5cafdd47f6a Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 4 Jun 2011 02:44:27 +0200 Subject: [PATCH 43/90] dom0: prevent division by zero on calculating cpu usage When VM is starting online_vcpus=0 for short time. --- dom0/qvm-core/qubes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index a4678a65..d289f9d6 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -140,7 +140,7 @@ class QubesHost(object): info = xc.domain_getinfo(0, qubes_max_xid) for vm in info: current[vm['domid']] = {} - current[vm['domid']]['cpu_time'] = vm['cpu_time']/vm['online_vcpus'] + current[vm['domid']]['cpu_time'] = vm['cpu_time']/max(vm['online_vcpus'],1) if vm['domid'] in previous.keys(): current[vm['domid']]['cpu_usage'] = \ float(current[vm['domid']]['cpu_time'] - previous[vm['domid']]['cpu_time']) \ From 7b2ac4b2795bb7e363d3ccf11d0735f4ed49e626 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 4 Jun 2011 02:46:12 +0200 Subject: [PATCH 44/90] dom0: catch error when no VM found by libxc (assume not running) --- dom0/qvm-core/qubes.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index d289f9d6..aaf3c696 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -391,7 +391,11 @@ class QubesVm(object): start_xid = self.xid if start_xid < 0: start_xid = 0 - domains = xc.domain_getinfo(start_xid, qubes_max_xid-start_xid) + try: + domains = xc.domain_getinfo(start_xid, qubes_max_xid-start_xid) + except xen.lowlevel.xc.Error: + return None + for dominfo in domains: domname = xl_ctx.domid_to_name(dominfo['domid']) if domname == self.name: From 48d520f8475ed4b3ac11380595c3daf56a39e5ca Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 4 Jun 2011 02:49:50 +0200 Subject: [PATCH 45/90] proxyvm: directly display error msg beside of writing it to xenstore --- proxyvm/bin/qubes_firewall | 3 +++ 1 file changed, 3 insertions(+) diff --git a/proxyvm/bin/qubes_firewall b/proxyvm/bin/qubes_firewall index 6f1cc267..2a0963de 100755 --- a/proxyvm/bin/qubes_firewall +++ b/proxyvm/bin/qubes_firewall @@ -19,6 +19,9 @@ while true; do IPTABLES_SAVE=$(/sbin/iptables-save | sed '/^\*filter/,/^COMMIT/d') OUT=`echo -e "$RULES\n$IPTABLES_SAVE" | /sbin/iptables-restore 2>&1 || :` /usr/bin/xenstore-write $XENSTORE_ERROR "$OUT" + if [ "$OUT" ]; then + DISPLAY=:0 /usr/bin/notify-send -t 3000 "Firewall loading error ($HOSTNAME)" "$OUT" || : + fi if [[ -z "$OUT" ]]; then # If OK save it for later From 9ce2f440c351be03a26ee2379ec2f19764a8f1a4 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 5 Jun 2011 22:58:20 +0200 Subject: [PATCH 46/90] dom0: remove import of old xend libraries --- dom0/qvm-core/qubes.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index aaf3c696..bca0013d 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -41,11 +41,6 @@ dry_run = False if not dry_run: - # Xen API - import xmlrpclib - from xen.xm import XenAPI - from xen.xend import sxp - import xen.lowlevel.xc import xen.lowlevel.xl import xen.lowlevel.xs @@ -888,7 +883,7 @@ class QubesVm(object): try: subprocess.check_call(xl_cmdline) - except XenAPI.Failure: + except: raise QubesException("Failed to load VM config") finally: qmemman_client.close() # let qmemman_daemon resume balancing From d3e6e3dec03a55c50edcca05e3ba9ac9abf73ec5 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 5 Jun 2011 23:35:53 +0200 Subject: [PATCH 47/90] dom0: use xen.lowlevel.xs instead of call xenstore-* --- dom0/qvm-core/qubes.py | 34 ++++++++++------------------------ 1 file changed, 10 insertions(+), 24 deletions(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index bca0013d..07771975 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -524,10 +524,7 @@ class QubesVm(object): # Don't know why, but 51712 is xvda # backend node name not available through xenapi :( - p = subprocess.Popen (["xenstore-read", - "/local/domain/0/backend/vbd/{0}/51712/node".format(self.get_xid())], - stdout=subprocess.PIPE) - used_dmdev = p.communicate()[0].strip() + used_dmdev = xs.read('', "/local/domain/0/backend/vbd/{0}/51712/node".format(self.get_xid())) return used_dmdev != current_dmdev @@ -1356,10 +1353,7 @@ class QubesNetVm(QubesVm): return super(QubesNetVm, self).create_xenstore_entries(xid) - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_netvm_external_ip".format(xid), - ""]) + xs.write('', "/local/domain/{0}/qubes_netvm_external_ip".format(xid), '') self.update_external_ip_permissions(xid) def update_external_ip_permissions(self, xid = -1): @@ -1453,24 +1447,19 @@ class QubesProxyVm(QubesNetVm): return super(QubesProxyVm, self).create_xenstore_entries(xid) - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_iptables_error".format(xid), - ""]) - retcode = subprocess.check_call ([ - "/usr/bin/xenstore-chmod", - "/local/domain/{0}/qubes_iptables_error".format(xid), - "r{0}".format(xid), "w{0}".format(xid)]) + xs_trans = xs.start_transaction() + xs.write(xs_trans, "/local/domain/{0}/qubes_iptables_error".format(xid), '') + xs.set_permissions(xs_trans, "/local/domain/{0}/qubes_iptables_error".format(xid), + [{ 'dom': xid, 'write': True }]) + xs.end_transaction(xs_trans) self.write_iptables_xenstore_entry() def write_netvm_domid_entry(self, xid = -1): if xid < 0: xid = self.get_xid() - return subprocess.check_call ([ - "/usr/bin/xenstore-write", "--", - "/local/domain/{0}/qubes_netvm_domid".format(xid), - "{0}".format(self.netvm_vm.get_xid())]) + xs.write('', "/local/domain/{0}/qubes_netvm_domid".format(xid), + "{0}".format(self.netvm_vm.get_xid())) def write_iptables_xenstore_entry(self): iptables = "# Generated by Qubes Core on {0}\n".format(datetime.now().ctime()) @@ -1545,10 +1534,7 @@ class QubesProxyVm(QubesNetVm): self.write_netvm_domid_entry() self.rules_applied = None - return subprocess.check_call ([ - "/usr/bin/xenstore-write", - "/local/domain/{0}/qubes_iptables".format(self.get_xid()), - iptables]) + xs.write('', "/local/domain/{0}/qubes_iptables".format(self.get_xid()), iptables) def get_xml_attrs(self): attrs = super(QubesProxyVm, self).get_xml_attrs() From 5ebd163fd36360778df90664f5a7e0b7cfe66c5a Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Mon, 6 Jun 2011 01:17:28 +0200 Subject: [PATCH 48/90] dom0: check RLIMIT_MEMLOCK before starting VM (and fix if possible) --- dom0/qvm-core/qubes.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 07771975..d9e7d101 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -31,6 +31,7 @@ import re import shutil import uuid import time +import resource from datetime import datetime from qmemman_client import QMemmanClient @@ -868,6 +869,14 @@ class QubesVm(object): if verbose: print "--> Loading the VM (type = {0})...".format(self.type) + limit_memlock = resource.getrlimit(resource.RLIMIT_MEMLOCK) + # try to increase limit if needed + if limit_memlock[0] < int(self.memory) * 1024: + # intentionally don't catch exceptions - if it fails - there is no + # memory for new VM + resource.setrlimit(resource.RLIMIT_MEMLOCK, + (int(self.memory) * 1024, limit_memlock[1])) + mem_required = int(self.memory) * 1024 * 1024 qmemman_client = QMemmanClient() if not qmemman_client.request_memory(mem_required): From 62111845eaf9a0feddd6c9fe8cecd640a2568fac Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Mon, 6 Jun 2011 01:42:04 +0200 Subject: [PATCH 49/90] dom0: set memlock limit to unlimited for qubes users Needed to 'xl create' work --- dom0/misc/limits-qubes.conf | 3 +++ rpm_spec/core-dom0.spec | 3 +++ 2 files changed, 6 insertions(+) create mode 100644 dom0/misc/limits-qubes.conf diff --git a/dom0/misc/limits-qubes.conf b/dom0/misc/limits-qubes.conf new file mode 100644 index 00000000..88198a69 --- /dev/null +++ b/dom0/misc/limits-qubes.conf @@ -0,0 +1,3 @@ +# xl create needs to allocate and mlock all VM memory +@qubes soft memlock unlimited +@qubes hard memlock unlimited diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 04a46724..1ab2d358 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -121,6 +121,8 @@ mkdir -p $RPM_BUILD_ROOT/etc/NetworkManager/dispatcher.d/ cp ../common/qubes_nmhook $RPM_BUILD_ROOT/etc/NetworkManager/dispatcher.d/ mkdir -p $RPM_BUILD_ROOT/etc/sysconfig cp ../common/iptables $RPM_BUILD_ROOT/etc/sysconfig +mkdir -p $RPM_BUILD_ROOT/etc/security/limits.d +cp misc/limits-qubes.conf $RPM_BUILD_ROOT/etc/security/limits.d/99-qubes.conf mkdir -p $RPM_BUILD_ROOT/usr/lib64/pm-utils/sleep.d cp pm-utils/01qubes-sync-vms-clock $RPM_BUILD_ROOT/usr/lib64/pm-utils/sleep.d/ @@ -291,3 +293,4 @@ fi %{_libdir}/libu2mfn.so /etc/sudoers.d/qubes /etc/xdg/autostart/qubes-guid.desktop +/etc/security/limits.d/99-qubes.conf From 6dd0870ca6166014404cec058cdf19466ec645e8 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Mon, 6 Jun 2011 01:43:52 +0200 Subject: [PATCH 50/90] dom0: Generate Xen VM config file from common template, on each VM start Do not use many different config templates for different types of VMs. Also regenerate config on each VM start to keep in synchronized with qubes.xml --- dom0/misc/vm-template.conf | 28 +++++++ dom0/qvm-core/qubes.py | 160 +++++++++++-------------------------- 2 files changed, 73 insertions(+), 115 deletions(-) create mode 100644 dom0/misc/vm-template.conf diff --git a/dom0/misc/vm-template.conf b/dom0/misc/vm-template.conf new file mode 100644 index 00000000..df138c85 --- /dev/null +++ b/dom0/misc/vm-template.conf @@ -0,0 +1,28 @@ +# +# This is a Xen VM config file for Qubes VM +# DO NOT EDIT - autogenerated by qubes tools +# + +kernel="{kerneldir}/vmlinuz" +ramdisk="{kerneldir}/initramfs" +extra="ro nomodeset xencons=hvc rd_NO_PLYMOUTH 3 {kernelopts}" +root="/dev/mapper/dmroot" + +memory = {mem} +maxmem = {maxmem} +name = "{name}" + +disk = [ {rootdev} + {privatedev} + {volatiledev} + ] + +vif = [ {netdev} ] + +pci = [ {pcidev} ] + +vcpus = {vcpus} + +on_poweroff = 'destroy' +on_reboot = 'destroy' +on_crash = 'destroy' diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index d9e7d101..15dba02f 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -68,10 +68,6 @@ default_rootcow_img = "root-cow.img" default_volatile_img = "volatile.img" default_clean_volatile_img = "clean-volatile.img.tar" default_private_img = "private.img" -default_appvms_conf_file = "appvm-template.conf" -default_netvms_conf_file = "netvm-template.conf" -default_standalonevms_conf_file = "standalone-template.conf" -default_templatevm_conf_template = "templatevm.conf" # needed for TemplateVM cloning default_appmenus_templates_subdir = "apps.templates" default_appmenus_template_templates_subdir = "apps-template.templates" default_kernels_subdir = "kernels" @@ -623,38 +619,43 @@ class QubesVm(object): [{ 'dom': xid }]) xs.transaction_end(xs_trans) - def create_config_file(self, source_template = None): + def get_rootdev(self, source_template=None): + if self.template_vm: + return "'script:snapshot:{dir}/root.img:{dir}/root-cow.img,xvda,r',".format(dir=self.template_vm.dir_path) + else: + return "'script:file:{dir}/root.img,xvda,w',".format(dir=self.dir_path) + + def get_config_params(self, source_template=None): + args = {} + args['name'] = self.name + args['kerneldir'] = self.kernels_dir + args['vmdir'] = self.dir_path + args['pcidev'] = self.pcidevs + args['mem'] = str(self.memory) + args['maxmem'] = str(self.maxmem) + args['vcpus'] = str(self.vcpus) + args['netdev'] = '' + args['rootdev'] = self.get_rootdev(source_template=source_template) + args['privatedev'] = "'script:file:{dir}/private.img,xvdb,w',".format(dir=self.dir_path) + args['volatiledev'] = "'script:file:{dir}/volatile.img,xvdc,w',".format(dir=self.dir_path) + args['kernelopts'] = '' + + return args + + def create_config_file(self, file_path = None, source_template = None): + if file_path is None: + file_path = self.conf_file if source_template is None: source_template = self.template_vm - assert source_template is not None - conf_template = None - if self.type == "NetVM": - conf_template = open (source_template.netvms_conf_file, "r") - elif self.updateable: - conf_template = open (source_template.standalonevms_conf_file, "r") - else: - conf_template = open (source_template.appvms_conf_file, "r") - if os.path.isfile(self.conf_file): - shutil.copy(self.conf_file, self.conf_file + ".backup") + f_conf_template = open('/usr/share/qubes/vm-template.conf', 'r') + conf_template = f_conf_template.read() + f_conf_template.close() + + template_params = self.get_config_params(source_template) conf_appvm = open(self.conf_file, "w") - rx_vmname = re.compile (r"%VMNAME%") - rx_vmdir = re.compile (r"%VMDIR%") - rx_template = re.compile (r"%TEMPLATEDIR%") - rx_pcidevs = re.compile (r"%PCIDEVS%") - rx_mem = re.compile (r"%MEM%") - rx_vcpus = re.compile (r"%VCPUS%") - for line in conf_template: - line = rx_vmname.sub (self.name, line) - line = rx_vmdir.sub (self.dir_path, line) - line = rx_template.sub (source_template.dir_path, line) - line = rx_pcidevs.sub (self.pcidevs, line) - line = rx_mem.sub (str(self.memory), line) - line = rx_vcpus.sub (str(self.vcpus), line) - conf_appvm.write(line) - - conf_template.close() + conf_appvm.write(conf_template.format(**template_params)) conf_appvm.close() def create_on_disk(self, verbose, source_template = None): @@ -729,11 +730,6 @@ class QubesVm(object): "VM directory doesn't exist: {0}".\ format(self.dir_path)) - if not os.path.exists (self.conf_file): - raise QubesException ( - "VM config file doesn't exist: {0}".\ - format(self.conf_file)) - if self.is_updateable() and not os.path.exists (self.root_img): raise QubesException ( "VM root image file doesn't exist: {0}".\ @@ -869,6 +865,9 @@ class QubesVm(object): if verbose: print "--> Loading the VM (type = {0})...".format(self.type) + # refresh config file + self.create_config_file() + limit_memlock = resource.getrlimit(resource.RLIMIT_MEMLOCK) # try to increase limit if needed if limit_memlock[0] < int(self.memory) * 1024: @@ -884,8 +883,6 @@ class QubesVm(object): raise MemoryError ("ERROR: insufficient memory to start this VM") xl_cmdline = ['/usr/sbin/xl', 'create', self.conf_file, '-p'] - if not self.is_netvm(): - xl_cmdline += ['maxmem={0}'.format(self.maxmem)] try: subprocess.check_call(xl_cmdline) @@ -1016,10 +1013,6 @@ class QubesTemplateVm(QubesVm): if "updateable" not in kwargs or kwargs["updateable"] is None : kwargs["updateable"] = True - appvms_conf_file = kwargs.pop("appvms_conf_file") if "appvms_conf_file" in kwargs else None - netvms_conf_file = kwargs.pop("netvms_conf_file") if "netvms_conf_file" in kwargs else None - standalonevms_conf_file = kwargs.pop("standalonevms_conf_file") if "standalonevms_conf_file" in kwargs else None - if "label" not in kwargs or kwargs["label"] == None: kwargs["label"] = default_template_label @@ -1033,25 +1026,6 @@ class QubesTemplateVm(QubesVm): # Image for template changes self.rootcow_img = self.dir_path + "/" + default_rootcow_img - if appvms_conf_file is not None and os.path.isabs(appvms_conf_file): - self.appvms_conf_file = appvms_conf_file - else: - self.appvms_conf_file = dir_path + "/" + ( - appvms_conf_file if appvms_conf_file is not None else default_appvms_conf_file) - - if netvms_conf_file is not None and os.path.isabs(netvms_conf_file): - self.netvms_conf_file = netvms_conf_file - else: - self.netvms_conf_file = dir_path + "/" + ( - netvms_conf_file if netvms_conf_file is not None else default_netvms_conf_file) - - if standalonevms_conf_file is not None and os.path.isabs(standalonevms_conf_file): - self.standalonevms_conf_file = standalonevms_conf_file - else: - self.standalonevms_conf_file = dir_path + "/" + ( - standalonevms_conf_file if standalonevms_conf_file is not None else default_standalonevms_conf_file) - - self.templatevm_conf_template = self.dir_path + "/" + default_templatevm_conf_template self.appmenus_templates_dir = self.dir_path + "/" + default_appmenus_templates_subdir self.appmenus_template_templates_dir = self.dir_path + "/" + default_appmenus_template_templates_subdir self.appvms = QubesVmCollection() @@ -1073,6 +1047,8 @@ class QubesTemplateVm(QubesVm): format (appvm.name, self.name)) self.updateable = True + def get_rootdev(self, source_template=None): + return "'script:origin:{dir}/root.img:{dir}/root-cow.img,xvda,w',".format(dir=self.dir_path) def clone_disk_files(self, src_template_vm, verbose): if dry_run: @@ -1086,42 +1062,9 @@ class QubesTemplateVm(QubesVm): os.mkdir (self.dir_path) if verbose: - print "--> Copying the VM config file:\n{0} =*>\n{1}".\ - format(src_template_vm.templatevm_conf_template, self.conf_file) - conf_templatevm_template = open (src_template_vm.templatevm_conf_template, "r") - conf_file = open(self.conf_file, "w") - rx_templatename = re.compile (r"%TEMPLATENAME%") - rx_mem = re.compile (r"%MEM%") - rx_vcpus = re.compile (r"%VCPUS%") - - for line in conf_templatevm_template: - line = rx_templatename.sub (self.name, line) - line = rx_mem.sub (str(self.memory), line) - line = rx_vcpus.sub (str(self.vcpus), line) - conf_file.write(line) - - conf_templatevm_template.close() - conf_file.close() - - if verbose: - print "--> Copying the VM config template :\n{0} ==>\n{1}".\ - format(src_template_vm.templatevm_conf_template, self.templatevm_conf_template) - shutil.copy (src_template_vm.templatevm_conf_template, self.templatevm_conf_template) - - if verbose: - print "--> Copying the VM config template :\n{0} ==>\n{1}".\ - format(src_template_vm.appvms_conf_file, self.appvms_conf_file) - shutil.copy (src_template_vm.appvms_conf_file, self.appvms_conf_file) - - if verbose: - print "--> Copying the VM config template :\n{0} ==>\n{1}".\ - format(src_template_vm.netvms_conf_file, self.netvms_conf_file) - shutil.copy (src_template_vm.netvms_conf_file, self.netvms_conf_file) - - if verbose: - print "--> Copying the VM config template :\n{0} ==>\n{1}".\ - format(src_template_vm.standalonevms_conf_file, self.standalonevms_conf_file) - shutil.copy (src_template_vm.standalonevms_conf_file, self.standalonevms_conf_file) + print "--> Creating VM config file: {0}".\ + format(self.conf_file) + self.create_config_file(source_template=src_template_vm) if verbose: print "--> Copying the template's private image:\n{0} ==>\n{1}".\ @@ -1212,16 +1155,6 @@ class QubesTemplateVm(QubesVm): "VM directory doesn't exist: {0}".\ format(self.dir_path)) - if not os.path.exists (self.conf_file): - raise QubesException ( - "VM config file doesn't exist: {0}".\ - format(self.conf_file)) - - if not os.path.exists (self.appvms_conf_file): - raise QubesException ( - "Appvm template config file doesn't exist: {0}".\ - format(self.appvms_conf_file)) - if not os.path.exists (self.root_img): raise QubesException ( "VM root image file doesn't exist: {0}".\ @@ -1296,9 +1229,6 @@ class QubesTemplateVm(QubesVm): def get_xml_attrs(self): attrs = super(QubesTemplateVm, self).get_xml_attrs() - attrs["appvms_conf_file"] = self.appvms_conf_file - attrs["netvms_conf_file"] = self.netvms_conf_file - attrs["standalonevms_conf_file"] = self.standalonevms_conf_file attrs["clean_volatile_img"] = self.clean_volatile_img attrs["rootcow_img"] = self.rootcow_img return attrs @@ -1357,6 +1287,11 @@ class QubesNetVm(QubesVm): assert lo >= 2 and lo <= 254, "Wrong IP address for VM" return self.netprefix + "{0}".format(lo) + def get_config_params(self, source_template=None): + args = super(QubesNetVm, self).get_config_params(source_template) + args['kernelopts'] = ' swiotlb=force pci=nomsi' + return args + def create_xenstore_entries(self, xid): if dry_run: return @@ -2078,11 +2013,6 @@ class QubesVmCollection(dict): try: kwargs = self.parse_xml_element(element) - # Add TemplateVM specific fields - attr_list = ("appvms_conf_file", "netvms_conf_file", "standalonevms_conf_file") - - for attribute in attr_list: - kwargs[attribute] = element.get(attribute) vm = QubesTemplateVm(**kwargs) From 645132f04310bb9e953b2ea678bab66729e47c1a Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Mon, 6 Jun 2011 01:46:35 +0200 Subject: [PATCH 51/90] dom0: Explicitly set maxmem=mem for NetVM --- dom0/qvm-core/qubes.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 15dba02f..b079e681 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -1259,6 +1259,8 @@ class QubesNetVm(QubesVm): if "memory" not in kwargs or kwargs["memory"] is None: kwargs["memory"] = 200 + kwargs["maxmem"] = kwargs["memory"] + super(QubesNetVm, self).__init__(**kwargs) self.connected_vms = QubesVmCollection() From ae6d2ac70cbf180453b13c8ef705fab5256ff0f3 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Mon, 6 Jun 2011 01:56:37 +0200 Subject: [PATCH 52/90] dom0: include xl.conf in qubes-core-dom0 package Disable autoballoon (qmemman will handle it) and specify lock file location writable by user. --- dom0/misc/xl.conf | 11 +++++++++++ rpm_spec/core-dom0.spec | 3 +++ 2 files changed, 14 insertions(+) create mode 100644 dom0/misc/xl.conf diff --git a/dom0/misc/xl.conf b/dom0/misc/xl.conf new file mode 100644 index 00000000..2c461582 --- /dev/null +++ b/dom0/misc/xl.conf @@ -0,0 +1,11 @@ +## Global XL config file ## + +# automatically balloon down dom0 when xen doesn't have enough free +# memory to create a domain +autoballoon=0 + +# full path of the lockfile used by xl during domain creation +lockfile="/var/run/qubes/xl-lock" + +# default vif script +#vifscript="vif-bridge" diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 1ab2d358..5223a3ca 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -123,6 +123,8 @@ mkdir -p $RPM_BUILD_ROOT/etc/sysconfig cp ../common/iptables $RPM_BUILD_ROOT/etc/sysconfig mkdir -p $RPM_BUILD_ROOT/etc/security/limits.d cp misc/limits-qubes.conf $RPM_BUILD_ROOT/etc/security/limits.d/99-qubes.conf +mkdir -p $RPM_BUILD_ROOT/etc/xen/ +cp misc/xl.conf $RPM_BUILD_ROOT/etc/xen/ mkdir -p $RPM_BUILD_ROOT/usr/lib64/pm-utils/sleep.d cp pm-utils/01qubes-sync-vms-clock $RPM_BUILD_ROOT/usr/lib64/pm-utils/sleep.d/ @@ -294,3 +296,4 @@ fi /etc/sudoers.d/qubes /etc/xdg/autostart/qubes-guid.desktop /etc/security/limits.d/99-qubes.conf +/etc/xen/xl.conf From 0ffb18668138a797f48a16a8479f2e5ab2c23653 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Mon, 6 Jun 2011 02:37:55 +0200 Subject: [PATCH 53/90] vm: add -qubes suffix to xenstore-watch to not conflict with xen standard tool --- proxyvm/bin/qubes_firewall | 2 +- proxyvm/bin/qubes_netwatcher | 4 ++-- rpm_spec/core-commonvm.spec | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/proxyvm/bin/qubes_firewall b/proxyvm/bin/qubes_firewall index 2a0963de..fbac2959 100755 --- a/proxyvm/bin/qubes_firewall +++ b/proxyvm/bin/qubes_firewall @@ -32,5 +32,5 @@ while true; do fi # Wait for changes in xenstore file - /usr/bin/xenstore-watch $XENSTORE_IPTABLES + /usr/bin/xenstore-watch-qubes $XENSTORE_IPTABLES done diff --git a/proxyvm/bin/qubes_netwatcher b/proxyvm/bin/qubes_netwatcher index 9b9f279d..f250cc19 100755 --- a/proxyvm/bin/qubes_netwatcher +++ b/proxyvm/bin/qubes_netwatcher @@ -24,8 +24,8 @@ while true; do /usr/bin/xenstore-write qubes_netvm_external_ip "$CURR_NETCFG" fi - /usr/bin/xenstore-watch /local/domain/$NET_DOMID/qubes_netvm_external_ip + /usr/bin/xenstore-watch-qubes /local/domain/$NET_DOMID/qubes_netvm_external_ip else - /usr/bin/xenstore-watch qubes_netvm_domid + /usr/bin/xenstore-watch-qubes qubes_netvm_domid fi done diff --git a/rpm_spec/core-commonvm.spec b/rpm_spec/core-commonvm.spec index 9e484d02..74b7a5e8 100644 --- a/rpm_spec/core-commonvm.spec +++ b/rpm_spec/core-commonvm.spec @@ -71,7 +71,7 @@ install -m 644 RPM-GPG-KEY-qubes* $RPM_BUILD_ROOT/etc/pki/rpm-gpg/ mkdir -p $RPM_BUILD_ROOT/sbin cp qubes_serial_login $RPM_BUILD_ROOT/sbin mkdir -p $RPM_BUILD_ROOT/usr/bin -cp xenstore-watch $RPM_BUILD_ROOT/usr/bin +cp xenstore-watch $RPM_BUILD_ROOT/usr/bin/xenstore-watch-qubes mkdir -p $RPM_BUILD_ROOT/etc cp serial.conf $RPM_BUILD_ROOT/var/lib/qubes/ mkdir -p $RPM_BUILD_ROOT/etc/udev/rules.d @@ -223,6 +223,6 @@ rm -rf $RPM_BUILD_ROOT /etc/yum.repos.d/qubes%{dist}.repo /etc/pki/rpm-gpg/RPM-GPG-KEY-qubes* /sbin/qubes_serial_login -/usr/bin/xenstore-watch +/usr/bin/xenstore-watch-qubes /etc/udev/rules.d/qubes_network.rules /usr/lib/qubes/setup_ip From 429c685f1d511845796fd6377f4cb9b5904ccb82 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 7 Jun 2011 12:19:17 +0200 Subject: [PATCH 54/90] dom0: write firewall rules only for running proxyvms --- dom0/qvm-core/qubes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index b079e681..0912f4a1 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -929,7 +929,7 @@ class QubesVm(object): if verbose: print "--> Updating firewall rules..." for vm in qvm_collection.values(): - if vm.is_proxyvm(): + if vm.is_proxyvm() and vm.is_running(): vm.write_iptables_xenstore_entry() if verbose: From bd447308fe03240d781eecb856153b1ac5461dc4 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 7 Jun 2011 15:52:10 +0200 Subject: [PATCH 55/90] dom0/qmemman: distribute memory freed by deleted domain Also wait a moment after domain list change for domain cleanup. Even if this time is not sufficient, memory will be balanced when some domain need it. --- dom0/qmemman/qmemman.py | 1 + dom0/qmemman/qmemman_server.py | 1 + 2 files changed, 2 insertions(+) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index 8a235422..b62f08fd 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -28,6 +28,7 @@ class SystemState: def del_domain(self, id): self.domdict.pop(id) + self.do_balance() def get_free_xen_memory(self): return self.xc.physinfo()['free_memory']*1024 diff --git a/dom0/qmemman/qmemman_server.py b/dom0/qmemman/qmemman_server.py index f6b768b5..cf28e905 100755 --- a/dom0/qmemman/qmemman_server.py +++ b/dom0/qmemman/qmemman_server.py @@ -33,6 +33,7 @@ class XS_Watcher: self.watch_token_dict = {} def domain_list_changed(self, param): + time.sleep(0.05) curr = self.handle.ls('', '/local/domain') if curr == None: return From 9ed6b94d63aedd54c37ba3570007179a6fd0797d Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 7 Jun 2011 15:56:11 +0200 Subject: [PATCH 56/90] dom0/qmemman: Check for memory_maximum also for dom0 --- dom0/qmemman/qmemman_algo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/qmemman/qmemman_algo.py b/dom0/qmemman/qmemman_algo.py index 22c3feb5..e64c76de 100755 --- a/dom0/qmemman/qmemman_algo.py +++ b/dom0/qmemman/qmemman_algo.py @@ -60,7 +60,7 @@ def prefmem(domain): CACHE_FACTOR = 1.3 #dom0 is special, as it must have large cache, for vbds. Thus, give it a special boost if domain.id == '0': - return domain.mem_used*CACHE_FACTOR + 350*1024*1024 + return min(domain.mem_used*CACHE_FACTOR + 350*1024*1024, domain.memory_maximum) return min(domain.mem_used*CACHE_FACTOR, domain.memory_maximum) def memory_needed(domain): From 50a910362d98710052b85e15160659e4d9fe42e6 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 7 Jun 2011 15:56:55 +0200 Subject: [PATCH 57/90] dom0/qmemman: Fix distribution memory left because of memory_maximum --- dom0/qmemman/qmemman_algo.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dom0/qmemman/qmemman_algo.py b/dom0/qmemman/qmemman_algo.py index e64c76de..5f01abf2 100755 --- a/dom0/qmemman/qmemman_algo.py +++ b/dom0/qmemman/qmemman_algo.py @@ -129,6 +129,7 @@ def balance_when_enough_memory(domain_dictionary, xen_free_memory, total_mem_pre while left_memory > 0 and acceptors_count > 0: print ' left_memory:', left_memory, 'acceptors_count:', acceptors_count new_left_memory = 0 + new_acceptors_count = acceptors_count for i in target_memory.keys(): target = target_memory[i] if target < domain_dictionary[i].memory_maximum: @@ -136,11 +137,12 @@ def balance_when_enough_memory(domain_dictionary, xen_free_memory, total_mem_pre if target+memory_bonus >= domain_dictionary[i].memory_maximum: new_left_memory += target+memory_bonus - domain_dictionary[i].memory_maximum target = domain_dictionary[i].memory_maximum - acceptors_count -= 1 + new_acceptors_count -= 1 else: target += memory_bonus target_memory[i] = target left_memory = new_left_memory + acceptors_count = new_acceptors_count # split target_memory dictionary to donors and acceptors # this is needed to first get memory from donors and only then give it to acceptors donors_rq = list() From c444ebc5f8ebe65358905b1e7f93a453e2987944 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Tue, 7 Jun 2011 16:19:52 +0200 Subject: [PATCH 58/90] dom0/qmemman: different approach of mem-set and maxmem (libxl way) Libxl stores maxmem in xenstore (/local/domain/X/memory/static-max) and sets maxmem and target_mem to actual memory. So qmemman should use xenstore entry as memory_maximum (when exists) and also adjust maxmem when changing domain memory. --- dom0/qmemman/qmemman.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index b62f08fd..2c30ebb7 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -44,7 +44,9 @@ class SystemState: id = str(domain['domid']) if self.domdict.has_key(id): self.domdict[id].memory_actual = domain['mem_kb']*1024 - self.domdict[id].memory_maximum = domain['maxmem_kb']*1024 + self.domdict[id].memory_maximum = self.xs.read('', '/local/domain/%s/memory/static-max' % str(id)) + if not self.domdict[id].memory_maximum: + self.domdict[id].memory_maximum = domain['maxmem_kb']*1024 #the below works (and is fast), but then 'xm list' shows unchanged memory value def mem_set(self, id, val): @@ -54,6 +56,7 @@ class SystemState: #can happen in the middle of domain shutdown #apparently xc.lowlevel throws exceptions too try: + self.xc.domain_setmaxmem(int(id), val/1024 + 1024) # LIBXL_MAXMEM_CONSTANT=1024 self.xc.domain_set_target_mem(int(id), val/1024) except: pass From f5e4cf58aa41d312346e8b8d30d36a1774c679a0 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 8 Jun 2011 03:28:08 +0200 Subject: [PATCH 59/90] dom0: include vif in domain config (no need for network-attach) --- dom0/qvm-core/qubes.py | 38 ++++++++++++++------------------------ 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 0912f4a1..2551fa35 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -634,7 +634,13 @@ class QubesVm(object): args['mem'] = str(self.memory) args['maxmem'] = str(self.maxmem) args['vcpus'] = str(self.vcpus) - args['netdev'] = '' + if self.netvm_vm is not None: + args['netdev'] = "'script=/etc/xen/scripts/vif-route-qubes,ip={ip}".format(ip=self.ip) + if self.netvm_vm.qid != 0: + args['netdev'] += ",backend={0}".format(self.netvm_vm.name) + args['netdev'] += "'" + else: + args['netdev'] = '' args['rootdev'] = self.get_rootdev(source_template=source_template) args['privatedev'] = "'script:file:{dir}/private.img,xvdb,w',".format(dir=self.dir_path) args['volatiledev'] = "'script:file:{dir}/volatile.img,xvdc,w',".format(dir=self.dir_path) @@ -861,6 +867,13 @@ class QubesVm(object): if self.is_running(): raise QubesException ("VM is already running!") + if self.netvm_vm is not None: + if self.netvm_vm.qid != 0: + if not self.netvm_vm.is_running(): + if verbose: + print "--> Starting NetVM {0}...".format(self.netvm_vm.name) + self.netvm_vm.start() + self.reset_volatile_storage() if verbose: print "--> Loading the VM (type = {0})...".format(self.type) @@ -898,29 +911,6 @@ class QubesVm(object): print "--> Setting Xen Store info for the VM..." self.create_xenstore_entries(xid) - if self.netvm_vm is not None: - assert self.netvm_vm is not None - if verbose: - print "--> Attaching to the network backend (netvm={0})...".format(self.netvm_vm.name) - if preparing_dvm: - actual_ip = "254.254.254.254" - else: - actual_ip = self.ip - xl_cmdline = ["/usr/sbin/xl", "network-attach", self.name, "script=/etc/xen/scripts/vif-route-qubes", "ip="+actual_ip] - if self.netvm_vm.qid != 0: - if not self.netvm_vm.is_running(): - self.netvm_vm.start() - retcode = subprocess.call (xl_cmdline + ["backend={0}".format(self.netvm_vm.name)]) - if retcode != 0: - self.force_shutdown() - raise OSError ("ERROR: Cannot attach to network backend!") - - else: - retcode = subprocess.call (xl_cmdline) - if retcode != 0: - self.force_shutdown() - raise OSError ("ERROR: Cannot attach to network backend!") - qvm_collection = QubesVmCollection() qvm_collection.lock_db_for_reading() qvm_collection.load() From 1647d03f745ecae68202b38b39aabb80b7c36dd4 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 8 Jun 2011 03:29:52 +0200 Subject: [PATCH 60/90] dom0: use path given in argument to store VM configuration --- dom0/qvm-core/qubes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 2551fa35..8b97e539 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -659,7 +659,7 @@ class QubesVm(object): f_conf_template.close() template_params = self.get_config_params(source_template) - conf_appvm = open(self.conf_file, "w") + conf_appvm = open(file_path, "w") conf_appvm.write(conf_template.format(**template_params)) conf_appvm.close() From fcd4cd44eb866ab17a034916ecfc6246b27a2f18 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 8 Jun 2011 03:30:42 +0200 Subject: [PATCH 61/90] dom0: create config template for DispVM Introduction for later patches. --- dom0/qvm-core/qubes.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 8b97e539..56208d6f 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -648,7 +648,7 @@ class QubesVm(object): return args - def create_config_file(self, file_path = None, source_template = None): + def create_config_file(self, file_path = None, source_template = None, prepare_dvm = False): if file_path is None: file_path = self.conf_file if source_template is None: @@ -659,6 +659,10 @@ class QubesVm(object): f_conf_template.close() template_params = self.get_config_params(source_template) + if prepare_dvm: + template_params['name'] = '%NAME%' + template_params['privatedev'] = '' + template_params['netdev'] = re.sub(r"ip=[0-9.]*", "ip=%IP%", template_params['netdev']) conf_appvm = open(file_path, "w") conf_appvm.write(conf_template.format(**template_params)) @@ -934,6 +938,11 @@ class QubesVm(object): self.force_shutdown() raise OSError ("ERROR: Cannot execute qrexec_daemon!") + if preparing_dvm: + if verbose: + print "--> Preparing config template for DispVM" + self.create_config_file(file_path = self.dir_path + '/dvm.conf', prepare_dvm = True) + # perhaps we should move it before unpause and fork? # FIXME: this uses obsolete xm api if debug_console: From 81ae4fafcf08eb10987acf16d5060cdd08060dc9 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 8 Jun 2011 03:33:45 +0200 Subject: [PATCH 62/90] dom0: Use 10.138.x.y for DispVMs and fix gateway/DNS addresses --- dom0/restore/qubes_prepare_saved_domain.sh | 2 +- dom0/restore/qubes_restore.c | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/dom0/restore/qubes_prepare_saved_domain.sh b/dom0/restore/qubes_prepare_saved_domain.sh index ff37b829..2d17e1a5 100755 --- a/dom0/restore/qubes_prepare_saved_domain.sh +++ b/dom0/restore/qubes_prepare_saved_domain.sh @@ -44,7 +44,7 @@ fi xenstore-write /local/domain/$ID/qubes_save_request 1 xenstore-watch-qubes /local/domain/$ID/device/qubes_used_mem xenstore-read /local/domain/$ID/qubes_gateway | \ - cut -d . -f 2 | tr -d "\n" > $VMDIR/netvm_id.txt + cut -d . -f 3 | tr -d "\n" > $VMDIR/netvm_id.txt xl block-detach $1 /dev/xvdb MEM=$(xenstore-read /local/domain/$ID/device/qubes_used_mem) echo "DVM boot complete, memory used=$MEM. Saving image..." diff --git a/dom0/restore/qubes_restore.c b/dom0/restore/qubes_restore.c index dfe91880..15df3d5b 100644 --- a/dom0/restore/qubes_restore.c +++ b/dom0/restore/qubes_restore.c @@ -258,8 +258,7 @@ char *dispname_by_dispid(int dispid) char *build_dvm_ip(int netvm, int id) { static char buf[256]; - snprintf(buf, sizeof(buf), "10.%d.%d.%d", netvm, id / 254 + 200, - (id % 254) + 1); + snprintf(buf, sizeof(buf), "10.138.%d.%d", netvm, (id % 254) + 1); return buf; } @@ -385,9 +384,9 @@ void setup_xenstore(int netvm_id, int domid, int dvmid, char *name) write_xs_single(xs, domid, "qubes_ip", build_dvm_ip(netvm_id, dvmid)); write_xs_single(xs, domid, "qubes_netmask", "255.255.0.0"); - snprintf(val, sizeof(val), "10.%d.0.1", netvm_id); + snprintf(val, sizeof(val), "10.137.%d.1", netvm_id); write_xs_single(xs, domid, "qubes_gateway", val); - snprintf(val, sizeof(val), "10.%d.255.254", netvm_id); + snprintf(val, sizeof(val), "10.137.%d.254", netvm_id); write_xs_single(xs, domid, "qubes_secondary_dns", val); write_xs_single(xs, domid, "qubes_vm_type", "AppVM"); write_xs_single(xs, domid, "qubes_restore_complete", "True"); From e5df78fe92fb3a81dd4d2c0d6a4b9c3bf05c6ad2 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 8 Jun 2011 03:36:02 +0200 Subject: [PATCH 63/90] dom0: Migrate qubes_restore (and all DispVM logic) to libxl Detailed changes: - use domain config in separate file (not embeded in savefile) - DispVM domain config generated from dvm.conf (introduced by previous patches) by qubes_restore - use call 'xl restore' to restore domain (instead of command to xend) - additional parameter to qubes_restore - config template - minor changes (xenstore perms, block-detach without /dev/ prefix, etc) --- dom0/restore/qfile-daemon-dvm | 2 + dom0/restore/qubes_prepare_saved_domain.sh | 4 +- dom0/restore/qubes_restore.c | 324 ++++++++++----------- dom0/restore/qvm-create-default-dvm | 6 +- 4 files changed, 172 insertions(+), 164 deletions(-) diff --git a/dom0/restore/qfile-daemon-dvm b/dom0/restore/qfile-daemon-dvm index c166c78d..837db4d4 100755 --- a/dom0/restore/qfile-daemon-dvm +++ b/dom0/restore/qfile-daemon-dvm @@ -31,6 +31,7 @@ from qubes.qubes import QubesDaemonPidfile from qubes.qmemman_client import QMemmanClient current_savefile = '/var/run/qubes/current_savefile' +current_dvm_conf = '/var/run/qubes/current_dvm.conf' notify_object = None class QfileDaemonDvm: @@ -58,6 +59,7 @@ class QfileDaemonDvm: return None retcode = subprocess.call(['/usr/lib/qubes/qubes_restore', current_savefile, + current_dvm_conf, '-c', vm.label.color, '-i', vm.label.icon, '-l', str(vm.label.index)]) diff --git a/dom0/restore/qubes_prepare_saved_domain.sh b/dom0/restore/qubes_prepare_saved_domain.sh index 2d17e1a5..f45ddf8b 100755 --- a/dom0/restore/qubes_prepare_saved_domain.sh +++ b/dom0/restore/qubes_prepare_saved_domain.sh @@ -45,7 +45,7 @@ xenstore-write /local/domain/$ID/qubes_save_request 1 xenstore-watch-qubes /local/domain/$ID/device/qubes_used_mem xenstore-read /local/domain/$ID/qubes_gateway | \ cut -d . -f 3 | tr -d "\n" > $VMDIR/netvm_id.txt -xl block-detach $1 /dev/xvdb +xl block-detach $1 xvdb MEM=$(xenstore-read /local/domain/$ID/device/qubes_used_mem) echo "DVM boot complete, memory used=$MEM. Saving image..." QMEMMAN_STOP=/var/run/qubes/do-not-membalance @@ -59,5 +59,7 @@ if ! xl save $1 $2 ; then fi rm -f $QMEMMAN_STOP cd $VMDIR +# Fix start memory +sed -i -e "s/^memory.*/memory = $((MEM/1000))/" dvm.conf tar -Scvf saved_cows.tar volatile.img echo "DVM savefile created successfully." diff --git a/dom0/restore/qubes_restore.c b/dom0/restore/qubes_restore.c index 15df3d5b..698d4a2e 100644 --- a/dom0/restore/qubes_restore.c +++ b/dom0/restore/qubes_restore.c @@ -12,95 +12,73 @@ #include #include -char xmlrpc_header[] = - "POST /RPC2 HTTP/1.0\r\n" - "Host: \r\n" - "User-Agent: xmlrpclib.py/1.0.1 (by www.pythonware.com)\r\n" - "Content-Type: text/xml\r\n" "Content-Length: %d\r\n" "\r\n"; -char xmlrpc_body_restore[] = - "\n" - "\n" - "xend.domain.restore\n" - "\n" - "\n" - "%s\n" - "\n" - "\n" - "0\n" - "\n" "\n" "\n"; - -char xmlrpc_body_setmem[] = - "\n\nxend.domain.setMemoryTarget\n\n\n%d\n\n\n%d\n\n\n\n"; - -void send_raw(int fd, char *body) -{ - char *header; - asprintf(&header, xmlrpc_header, strlen(body)); - if (write(fd, header, strlen(header)) != strlen(header)) { - perror("write xend"); +int restore_domain(char *restore_file, char *conf_file, char *name) { + int pid, status, domid; + int pipe_fd[2]; + char buf[256]; + char *endptr; + switch (pid = fork()) { + case -1: + perror("fork"); exit(1); - } - if (write(fd, body, strlen(body)) != strlen(body)) { - perror("write xend"); - exit(1); - } - shutdown(fd, SHUT_WR); -} - - -void send_req_restore(int fd, char *name) -{ - char *body; - asprintf(&body, xmlrpc_body_restore, name); - send_raw(fd, body); -} - -void send_req_setmem(int fd, int domid, int mem) -{ - char *body; - asprintf(&body, xmlrpc_body_setmem, domid, mem); - send_raw(fd, body); -} - -char *recv_resp(int fd) -{ -#define RESPSIZE 65536 - static char buf[RESPSIZE]; - int total = 0; - int n; - for (;;) { - n = read(fd, buf + total, RESPSIZE - total); - if (n == 0) { - buf[total] = 0; - close(fd); - return buf; - } - if (n < 0) { - perror("xend read"); + case 0: + close(1); + if (dup2(open("/dev/null", O_RDWR), 1)==-1) { + perror("dup2 or open"); exit(1); } - total += n; + execl("/usr/sbin/xl", "xl", "restore", conf_file, restore_file, NULL); + perror("execl"); + exit(1); + default:; } + if (waitpid(pid, &status, 0) < 0) { + perror("waitpid"); + exit(1); + } + if (status != 0) { + fprintf(stderr, "Error starting VM\n"); + exit(1); + } + + // read domid + if (pipe(pipe_fd)==-1) { + perror("pipe"); + exit(1); + } + switch (pid = fork()) { + case -1: + perror("fork"); + exit(1); + case 0: + close(1); + if (dup2(pipe_fd[1], 1) == -1) { + perror("dup2"); + exit(1); + } + execl("/usr/sbin/xl", "xl", "domid", name, NULL); + perror("execl"); + exit(1); + default:; + } + read(pipe_fd[0], buf, sizeof(buf)-1); + buf[sizeof(buf)-1] = 0; + domid = strtoul(buf, &endptr, 10); + if (domid <= 0 || *endptr != '\n') { + fprintf(stderr, "Cannot get DispVM xid\n"); + exit(1); + } + if (waitpid(pid, &status, 0) < 0) { + perror("waitpid"); + exit(1); + } + if (status != 0) { + fprintf(stderr, "Error getting DispVM xid\n"); + exit(1); + } + return domid; } -void bad_resp(char *resp) -{ - fprintf(stderr, "Error; Xend response:\n%s\n", resp); - exit(1); -} - -int parse_resp(char *resp) -{ - char *domid; - if (strstr(resp, "")) - bad_resp(resp); - if (!strstr(resp, "domid")) - bad_resp(resp); - domid = strstr(resp, ""); - if (!domid) - bad_resp(resp); - return strtoul(domid + 5, NULL, 0); -} char *gettime() { @@ -161,27 +139,6 @@ void preload_cache(int fd) } } -int xend_connect() -{ - struct sockaddr_un server; - int s; - - s = socket(AF_UNIX, SOCK_STREAM, 0); - if (s < 0) { - perror("socket af_unix"); - exit(1); - } - server.sun_family = AF_UNIX; - strcpy(server.sun_path, "/var/run/xend/xmlrpc.sock"); - if (connect - (s, (struct sockaddr *) &server, - strlen(server.sun_path) + sizeof(server.sun_family))) { - perror("connext xend"); - exit(1); - } - return s; -} - void start_rexec(int domid) { int pid, status; @@ -214,40 +171,13 @@ void start_guid(int domid, int argc, char **argv) guid_args[0] = "qubes_guid"; guid_args[1] = "-d"; guid_args[2] = dstr; - for (i = 2; i < argc; i++) + for (i = 3; i < argc; i++) guid_args[i + 1] = argv[i]; guid_args[argc + 1] = NULL; execv("/usr/bin/qubes_guid", guid_args); perror("execv"); } -// modify the savefile. fd = fd to the open savefile, -// buf - already read 1st page of the savefile -// pattern - pattern to search for -// val - string to replace pattern with -void fix_savefile(int fd, char *buf, char *pattern, char *val) -{ - int i, len = strlen(val), origlen; - char *bracket; - char *loc = strstr(buf + 20, pattern) + strlen(pattern); - if (!loc) - return; - bracket = index(loc, ')'); - if (!bracket) - return; - origlen = (long) bracket - (long) loc; - if (origlen < len) { - fprintf(stderr, "too long string %s\n", val); - exit(1); - } - for (i = 0; i < origlen - len; i++) - loc[i] = ' '; - memcpy(loc + i, val, strlen(val)); - lseek(fd, (long) loc - (long) buf, SEEK_SET); - write(fd, loc, origlen); -} - - char *dispname_by_dispid(int dispid) { static char retbuf[16]; @@ -269,15 +199,17 @@ char *build_dvm_ip(int netvm, int id) // normally, it should be "templatename-dvm" char *get_vmname_from_savefile(int fd) { + int buflen; static char buf[4096]; char *name; char *slash; lseek(fd, 0, SEEK_SET); - if (read(fd, buf, sizeof(buf)) != sizeof(buf)) { - perror("read savefile"); + buflen = read(fd, buf, sizeof(buf) - 1); + if (buflen < 0) { + perror("read vm conf"); exit(1); } - buf[sizeof(buf) - 1] = 0; + buf[buflen] = 0; name = strstr(buf + 20, NAME_PATTERN); if (!name) { fprintf(stderr, @@ -295,25 +227,69 @@ char *get_vmname_from_savefile(int fd) return slash + 1; } -void fix_savefile_all(int fd, int dispid, int netvm_id) +void fill_field(FILE *conf, char *field, int dispid, int netvm_id) { - char val[256]; - char buf[4096]; - lseek(fd, 0, SEEK_SET); - if (read(fd, buf, sizeof(buf)) != sizeof(buf)) { - perror("read savefile"); + if (!strcmp(field, "NAME")) { + fprintf(conf, "%s", dispname_by_dispid(dispid)); + } else if (!strcmp(field, "MAC")) { + fprintf(conf, "00:16:3e:7c:8b:%02x", dispid); + } else if (!strcmp(field, "IP")) { + fprintf(conf, "%s", build_dvm_ip(netvm_id, dispid)); + } else if (!strcmp(field, "UUID")) { + // currently not present in conf file + fprintf(conf, "064cd14c-95ad-4fc2-a4c9-cf9f522e5b%02x", dispid); + } else { + fprintf(stderr, "unknown field in vm conf: %s\n", field); exit(1); } - buf[sizeof(buf) - 1] = 0; - snprintf(val, sizeof(val), - "064cd14c-95ad-4fc2-a4c9-cf9f522e5b%02x", dispid); - fix_savefile(fd, buf, "(uuid ", val); - fix_savefile(fd, buf, "(name ", dispname_by_dispid(dispid)); - snprintf(val, sizeof(val), "00:16:3e:7c:8b:%02x", dispid); - fix_savefile(fd, buf, "(mac ", val); - fix_savefile(fd, buf, "(ip ", build_dvm_ip(netvm_id, dispid)); } +// modify the config file. conf = FILE of the new config, +// conf_templ - fd of config template +// pattern - pattern to search for +// val - string to replace pattern with +void fix_conffile(FILE *conf, int conf_templ, int dispid, int netvm_id) +{ + int buflen, cur_len = 0; + char buf[4096]; + char *bufpos = buf; + char *pattern, *patternend; + + /* read config template */ + lseek(conf_templ, 0, SEEK_SET); + while ((cur_len = read(conf_templ, buf+cur_len, sizeof(buf)-cur_len)) > 0) { + buflen+=cur_len; + } + if (cur_len < 0) { + perror("read vm conf"); + exit(1); + } + + while ((pattern = index(bufpos, '%'))) { + fwrite(bufpos, 1, pattern-bufpos, conf); + if (ferror(conf)) { + perror("write vm conf"); + exit(1); + } + patternend = index(pattern+1, '%'); + if (!patternend) { + fprintf(stderr, "Unmatched '%%' in VM config\n"); + exit(1); + } + *patternend = '\0'; + fill_field(conf, pattern+1, dispid, netvm_id); + bufpos = patternend+1; + } + while ((cur_len = fwrite(bufpos, 1, buflen-(bufpos-buf), conf)) > 0) { + bufpos+=cur_len; + } + if (ferror(conf)) { + perror("write vm conf"); + exit(1); + } +} + + void unpack_cows(char *name) { char vmdir[4096]; @@ -354,6 +330,17 @@ void write_xs_single(struct xs_handle *xs, int domid, char *name, } } +void perm_xs_single(struct xs_handle *xs, int domid, char *name, + struct xs_permissions *perms, int nperms) +{ + char key[256]; + snprintf(key, sizeof(key), "/local/domain/%d/%s", domid, name); + if (!xs_set_permissions(xs, XBT_NULL, key, perms, nperms)) { + fprintf(stderr, "xs_set_permissions"); + exit(1); + } +} + int get_netvm_id_from_name(char *name) { int fd, n; @@ -376,6 +363,7 @@ void setup_xenstore(int netvm_id, int domid, int dvmid, char *name) { char val[256]; struct xs_handle *xs = xs_daemon_open(); + struct xs_permissions perm[1]; if (!xs) { perror("xs_daemon_open"); exit(1); @@ -390,6 +378,12 @@ void setup_xenstore(int netvm_id, int domid, int dvmid, char *name) write_xs_single(xs, domid, "qubes_secondary_dns", val); write_xs_single(xs, domid, "qubes_vm_type", "AppVM"); write_xs_single(xs, domid, "qubes_restore_complete", "True"); + + perm[0].id = domid; + perm[0].perms = XS_PERM_NONE; + perm_xs_single(xs, domid, "device", perm, 1); + perm_xs_single(xs, domid, "memory", perm, 1); + xs_daemon_close(xs); } @@ -436,35 +430,41 @@ void redirect_stderr() int main(int argc, char **argv) { - int fd, domid, dispid, netvm_id; - char *resp; + int conf_templ, domid, dispid, netvm_id; + FILE *conf; char *name; - if (argc < 2) { + char confname[256]; + if (argc < 3) { fprintf(stderr, - "usage: %s savefile [guid args] \n", argv[0]); + "usage: %s savefile conf_templ [guid args] \n", argv[0]); exit(1); } redirect_stderr(); fprintf(stderr, "time=%s, starting\n", gettime()); set_fast_flag(); atexit(rm_fast_flag); - fd = open(argv[1], O_RDWR); - if (fd < 0) { - perror("open savefile"); + conf_templ = open(argv[2], O_RDONLY); + if (conf_templ < 0) { + perror("fopen vm conf"); exit(1); } dispid = get_next_disposable_id(); - name = get_vmname_from_savefile(fd); + name = get_vmname_from_savefile(conf_templ); netvm_id = get_netvm_id_from_name(name); - fix_savefile_all(fd, dispid, netvm_id); + snprintf(confname, sizeof(confname), "/tmp/qubes-dvm-%d.xl", dispid); + conf = fopen(confname, "w"); + if (!conf) { + perror("fopen new vm conf"); + exit(1); + } + fix_conffile(conf, conf_templ, dispid, netvm_id); + close(conf_templ); + fclose(conf); // printf("name=%s\n", name); unpack_cows(name); // no preloading for now, assume savefile in shm // preload_cache(fd); - fd = xend_connect(); - send_req_restore(fd, argv[1]); - resp = recv_resp(fd); - domid = parse_resp(resp); + domid=restore_domain(argv[1], confname, dispname_by_dispid(dispid)); write_varrun_domid(domid, dispname_by_dispid(dispid), name); fprintf(stderr, "time=%s, created domid=%d, creating xenstore entries\n", diff --git a/dom0/restore/qvm-create-default-dvm b/dom0/restore/qvm-create-default-dvm index 55a0ac37..75ffeeba 100755 --- a/dom0/restore/qvm-create-default-dvm +++ b/dom0/restore/qvm-create-default-dvm @@ -40,12 +40,16 @@ if ! /usr/lib/qubes/qubes_prepare_saved_domain.sh \ fi ROOT=/var/lib/qubes/dvmdata/savefile_root DEFAULT=/var/lib/qubes/dvmdata/default_savefile +DEFAULTCONF=/var/lib/qubes/dvmdata/default_dvm.conf CURRENT=/var/run/qubes/current_savefile +CURRENTCONF=/var/run/qubes/current_dvm.conf SHMDIR=/dev/shm/qubes SHMCOPY=$SHMDIR/current_savefile -rm -f $ROOT $DEFAULT $CURRENT +rm -f $ROOT $DEFAULT $CURRENT $DEFAULTCONF $CURRENTCONF ln -s "/var/lib/qubes/appvms/$DVMTMPL/dvm-savefile" $DEFAULT ln -s "/var/lib/qubes/vm-templates/$TEMPLATENAME/root.img" $ROOT +ln -s $DVMTMPLDIR/dvm.conf $DEFAULTCONF +ln -s $DVMTMPLDIR/dvm.conf $CURRENTCONF if [ -f /var/lib/qubes/dvmdata/dont_use_shm ] ; then ln -s $DEFAULT $CURRENT else From ea69b51a97bc4b96737f103dc3f0fa912a29c307 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 8 Jun 2011 03:41:22 +0200 Subject: [PATCH 64/90] dom0: use /bin/bash as interpreter of qubes_prepare_saved_domain.sh Required for ex $(( )) construction. /bin/sh may not handle it (when linked to some other shell than bash). --- dom0/restore/qubes_prepare_saved_domain.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/restore/qubes_prepare_saved_domain.sh b/dom0/restore/qubes_prepare_saved_domain.sh index f45ddf8b..251ce38c 100755 --- a/dom0/restore/qubes_prepare_saved_domain.sh +++ b/dom0/restore/qubes_prepare_saved_domain.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash get_encoded_script() { if ! [ -f "$1" ] ; then From 197ccb2e2c4225a94762c9bedc1009a4487b8f6e Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 8 Jun 2011 03:42:51 +0200 Subject: [PATCH 65/90] dom0: remove obsolete code from qubes_restore --- dom0/restore/qubes_restore.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/dom0/restore/qubes_restore.c b/dom0/restore/qubes_restore.c index 698d4a2e..5c5693a4 100644 --- a/dom0/restore/qubes_restore.c +++ b/dom0/restore/qubes_restore.c @@ -469,13 +469,6 @@ int main(int argc, char **argv) fprintf(stderr, "time=%s, created domid=%d, creating xenstore entries\n", gettime(), domid); -#if 0 - fd = xend_connect(); - send_req_setmem(fd, domid, 400); - resp = recv_resp(fd); -// printf("%s\n", resp); - fprintf(stderr, "time=%s, creating xenstore entries\n", gettime()); -#endif setup_xenstore(netvm_id, domid, dispid, name); fprintf(stderr, "time=%s, starting qubes_guid\n", gettime()); rm_fast_flag(); From f1f98d47df17c01d61e7f0d1965e3a1ff9ddde25 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 9 Jun 2011 14:06:24 +0200 Subject: [PATCH 66/90] dom0: Use /var/run/xen-hotplug to store information needed for block devices cleanup. Libxl removes xenstore entries before udev (+scripts) have chance to read it. --- common/block-snapshot | 15 +++++++++++---- dom0/restore/block.qubes | 4 ++++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/common/block-snapshot b/common/block-snapshot index 14752da2..d8605bd9 100755 --- a/common/block-snapshot +++ b/common/block-snapshot @@ -7,6 +7,8 @@ dir=$(dirname "$0") . "$dir/block-common.sh" +HOTPLUG_STORE="/var/run/xen-hotplug/${XENBUS_PATH//\//-}" + get_dev() { dev=$1 @@ -89,7 +91,6 @@ create_dm_snapshot_origin() { t=$(xenstore_read_default "$XENBUS_PATH/type" 'MISSING') - case "$command" in add) case $t in @@ -117,14 +118,20 @@ case "$command" in if [ "$t" == "snapshot" ]; then #that's all for snapshot, store name of prepared device xenstore_write "$XENBUS_PATH/node" "/dev/mapper/$dm_devname" + echo "/dev/mapper/$dm_devname" > "$HOTPLUG_STORE-node" write_dev /dev/mapper/$dm_devname elif [ "$t" == "origin" ]; then # for origin - prepare snapshot-origin device and store its name dm_devname=origin-$(stat -c '%D:%i' "$base") create_dm_snapshot_origin $dm_devname "$base" xenstore_write "$XENBUS_PATH/node" "/dev/mapper/$dm_devname" + echo "/dev/mapper/$dm_devname" > "$HOTPLUG_STORE-node" write_dev /dev/mapper/$dm_devname fi + # Save domain name for template commit on device remove + domid=$(xenstore_read "$XENBUS_PATH/frontend-id") + domain=$(xl domname $domid) + echo $domain > "$HOTPLUG_STORE-domain" release_lock "block" exit 0 @@ -134,7 +141,7 @@ case "$command" in remove) case $t in snapshot|origin) - node=$(xenstore_read "$XENBUS_PATH/node") + node=$(cat "$HOTPLUG_STORE-node") if [ -z "$node" ]; then fatal "No device node to remove" @@ -175,13 +182,13 @@ case "$command" in fi done # Commit template changes - domain=$(xenstore_read "$XENBUS_PATH/domain") + domain=$(cat "$HOTPLUG_STORE-domain") if [ "$domain" ]; then # Dont stop on errors /usr/bin/qvm-template-commit "$domain" || true fi fi - + if [ -e $node ]; then log debug "Removing $node" dmsetup remove $node diff --git a/dom0/restore/block.qubes b/dom0/restore/block.qubes index 4aee7f31..be2267f6 100755 --- a/dom0/restore/block.qubes +++ b/dom0/restore/block.qubes @@ -1,5 +1,7 @@ #!/bin/bash +HOTPLUG_STORE="/var/run/xen-hotplug/${XENBUS_PATH//\//-}" + hd_arr[10]=a hd_arr[11]=b hd_arr[12]=c @@ -38,6 +40,8 @@ process() xenstore-write "$XENBUS_PATH/node" "$dev" \ "$XENBUS_PATH/physical-device" "7:"$HEXNUMBER \ "$XENBUS_PATH/hotplug-status" connected + echo "$dev" > "$HOTPLUG_STORE-node" + echo "file" > "$HOTPLUG_STORE-type" } #exec 2>>/tmp/block.$$ From 3571a34010d0f74ea02c63531c7f1da1a9205e38 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 9 Jun 2011 14:22:22 +0200 Subject: [PATCH 67/90] dom0: preserve old root-cow - for qvm-revert-template-changes --- dom0/qvm-core/qubes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 56208d6f..594eb27c 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -1217,7 +1217,7 @@ class QubesTemplateVm(QubesVm): if dry_run: return if os.path.exists (self.rootcow_img): - os.remove (self.rootcow_img) + os.rename (self.rootcow_img, self.rootcow_img + '.old') f_cow = open (self.rootcow_img, "w") f_root = open (self.root_img, "r") From 891653a413e61d7903fa5be3ab025c8825b8ddaa Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 10 Jun 2011 12:02:32 +0200 Subject: [PATCH 68/90] dom0: create lockfile for libxl and set dom0 name in xenstore Create lockfile to set it proper permissions. Without it the first use (qvm-start netvm) will create it with root:root and 600. Without xend, no one sets dom0 name... --- dom0/init.d/qubes_core | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dom0/init.d/qubes_core b/dom0/init.d/qubes_core index 6d4866fd..cd3af4dd 100755 --- a/dom0/init.d/qubes_core +++ b/dom0/init.d/qubes_core @@ -37,6 +37,11 @@ start() chmod 660 /proc/xen/privcmd chgrp qubes /dev/xen/evtchn chmod 660 /dev/xen/evtchn + touch /var/run/qubes/xl-lock + chgrp qubes /var/run/qubes/xl-lock + chmod 660 /var/run/qubes/xl-lock + + xenstore-write /local/domain/0/name Domain-0 xl sched-credit -d 0 -w 512 cp /var/lib/qubes/qubes.xml /var/lib/qubes/backup/qubes-$(date +%F-%T).xml From 925647c7d7ef5294e973b9540397bc3c77ec93bb Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 10 Jun 2011 18:19:19 +0200 Subject: [PATCH 69/90] dom0: run xl create through sudo This finally solve problem with RLIMIT_MEMLOCK (less important) and is required to attach PCI devices (eg netvm restart) - more important. --- dom0/qvm-core/qubes.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 594eb27c..e49928d1 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -31,7 +31,6 @@ import re import shutil import uuid import time -import resource from datetime import datetime from qmemman_client import QMemmanClient @@ -885,21 +884,13 @@ class QubesVm(object): # refresh config file self.create_config_file() - limit_memlock = resource.getrlimit(resource.RLIMIT_MEMLOCK) - # try to increase limit if needed - if limit_memlock[0] < int(self.memory) * 1024: - # intentionally don't catch exceptions - if it fails - there is no - # memory for new VM - resource.setrlimit(resource.RLIMIT_MEMLOCK, - (int(self.memory) * 1024, limit_memlock[1])) - mem_required = int(self.memory) * 1024 * 1024 qmemman_client = QMemmanClient() if not qmemman_client.request_memory(mem_required): qmemman_client.close() raise MemoryError ("ERROR: insufficient memory to start this VM") - xl_cmdline = ['/usr/sbin/xl', 'create', self.conf_file, '-p'] + xl_cmdline = ['sudo', '/usr/sbin/xl', 'create', self.conf_file, '-p'] try: subprocess.check_call(xl_cmdline) From 63dda4de3491fce8fa78e188b1c1e354dd8c6004 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 10 Jun 2011 18:27:16 +0200 Subject: [PATCH 70/90] dom0: qvm-create: remove obsolete add_to_xen_storage call --- dom0/qvm-tools/qvm-create | 1 - 1 file changed, 1 deletion(-) diff --git a/dom0/qvm-tools/qvm-create b/dom0/qvm-tools/qvm-create index b4479fab..77f246f1 100755 --- a/dom0/qvm-tools/qvm-create +++ b/dom0/qvm-tools/qvm-create @@ -164,7 +164,6 @@ def main(): try: vm.create_on_disk(verbose=options.verbose, source_template=template_vm) - vm.add_to_xen_storage() except (IOError, OSError) as err: print "ERROR: {0}".format(err) From ae52e1829aed3f8469265c56562815a07e2b8fcc Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 10 Jun 2011 18:29:56 +0200 Subject: [PATCH 71/90] dom0: block-snapshot: enable nullglob to not fail on snapshot/origin remove Normally should not happen because all domains needs at least one snapshot device, but in some rare situation can be helpful to cleanup stale devices. --- common/block-snapshot | 2 ++ 1 file changed, 2 insertions(+) diff --git a/common/block-snapshot b/common/block-snapshot index d8605bd9..bba8ee5c 100755 --- a/common/block-snapshot +++ b/common/block-snapshot @@ -7,6 +7,8 @@ dir=$(dirname "$0") . "$dir/block-common.sh" +shopt -s nullglob + HOTPLUG_STORE="/var/run/xen-hotplug/${XENBUS_PATH//\//-}" get_dev() { From f3b245a45a795afc8fd00d446457216d50777db5 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 10 Jun 2011 18:32:34 +0200 Subject: [PATCH 72/90] dom0: block-snapshot: retrieve domain name from frontend-id only when no "domain" entry in xenstore This enables compatibility with libxl AND xend. --- common/block-snapshot | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/common/block-snapshot b/common/block-snapshot index bba8ee5c..ad71eeb3 100755 --- a/common/block-snapshot +++ b/common/block-snapshot @@ -131,8 +131,11 @@ case "$command" in write_dev /dev/mapper/$dm_devname fi # Save domain name for template commit on device remove - domid=$(xenstore_read "$XENBUS_PATH/frontend-id") - domain=$(xl domname $domid) + domain=$(xenstore_read_default "$XENBUS_PATH/domain" '') + if [ -z "$domain" ]; then + domid=$(xenstore_read "$XENBUS_PATH/frontend-id") + domain=$(xl domname $domid) + fi echo $domain > "$HOTPLUG_STORE-domain" release_lock "block" From 4ab4783ee23d6e5a8611cc6a9d0936f5e93effa5 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 10 Jun 2011 18:34:17 +0200 Subject: [PATCH 73/90] dom0: block-snapshot: add prepare and cleanup actions "prepare" and "cleanup" actions can be used to setup device manually - not from udev. This is used by qvm-revert-template-changes. --- common/block-snapshot | 71 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 63 insertions(+), 8 deletions(-) diff --git a/common/block-snapshot b/common/block-snapshot index ad71eeb3..719b10aa 100755 --- a/common/block-snapshot +++ b/common/block-snapshot @@ -5,7 +5,12 @@ # This creates dm-snapshot device on given arguments dir=$(dirname "$0") -. "$dir/block-common.sh" +if [ "$1" = "prepare" ] || [ "$1" = "cleanup" ]; then + . "$dir/xen-hotplug-common.sh" + command=$1 +else + . "$dir/block-common.sh" +fi shopt -s nullglob @@ -143,10 +148,58 @@ case "$command" in ;; esac ;; - remove) + prepare) + t=$2 case $t in snapshot|origin) - node=$(cat "$HOTPLUG_STORE-node") + p=$3 + base=${p/:*/} + cow=${p/*:/} + + if [ -L "$base" ]; then + base=$(readlink -f "$base") || fatal "$base link does not exist." + fi + + if [ -L "$cow" ]; then + cow=$(readlink -f "$cow") || fatal "$cow link does not exist." + fi + + # first ensure that snapshot device exists (to write somewhere changes from snapshot-origin) + dm_devname=$(get_dm_snapshot_name "$base" "$cow") + + claim_lock "block" + + # prepare snapshot device + create_dm_snapshot $dm_devname "$base" "$cow" + + if [ "$t" == "snapshot" ]; then + #that's all for snapshot, store name of prepared device + echo "/dev/mapper/$dm_devname" + elif [ "$t" == "origin" ]; then + # for origin - prepare snapshot-origin device and store its name + dm_devname=origin-$(stat -c '%D:%i' "$base") + create_dm_snapshot_origin $dm_devname "$base" + echo "/dev/mapper/$dm_devname" + fi + + release_lock "block" + exit 0 + ;; + esac + ;; + remove|cleanup) + if [ "$command" = "cleanup" ]; then + t=$2 + else + t=$(cat $HOTPLUG_STORE-type) + fi + case $t in + snapshot|origin) + if [ "$command" = "cleanup" ]; then + node=$3 + else + node=$(cat "$HOTPLUG_STORE-node") + fi if [ -z "$node" ]; then fatal "No device node to remove" @@ -186,11 +239,13 @@ case "$command" in dmsetup remove $snap fi done - # Commit template changes - domain=$(cat "$HOTPLUG_STORE-domain") - if [ "$domain" ]; then - # Dont stop on errors - /usr/bin/qvm-template-commit "$domain" || true + if [ "$command" = "remove" ]; then + # Commit template changes + domain=$(cat "$HOTPLUG_STORE-domain") + if [ "$domain" ]; then + # Dont stop on errors + /usr/bin/qvm-template-commit "$domain" || true + fi fi fi From 5cce87c7d2f1dc5b65f147d2920ea5d8826c2492 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 10 Jun 2011 18:36:20 +0200 Subject: [PATCH 74/90] dom0: Introduce qvm-revert-template-changes tool --- dom0/qvm-tools/qvm-revert-template-changes | 140 +++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100755 dom0/qvm-tools/qvm-revert-template-changes diff --git a/dom0/qvm-tools/qvm-revert-template-changes b/dom0/qvm-tools/qvm-revert-template-changes new file mode 100755 index 00000000..81dfbae7 --- /dev/null +++ b/dom0/qvm-tools/qvm-revert-template-changes @@ -0,0 +1,140 @@ +#!/usr/bin/python2.6 +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2011 Marek Marczykowski +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + +from qubes.qubes import QubesVmCollection +from qubes.qubes import QubesException +from optparse import OptionParser +import subprocess +import os +import time +import glob + +def main(): + usage = "usage: %prog [options] " + parser = OptionParser (usage) + parser.add_option ("--force", action="store_true", dest="force", default=False, + help="Do not prompt for comfirmation") + + (options, args) = parser.parse_args () + if (len (args) != 1): + parser.error ("You must specify TemplateVM name!") + vmname = args[0] + + if os.geteuid() != 0: + print "ERROR: This tool must be run as root!" + exit(1) + + qvm_collection = QubesVmCollection() + qvm_collection.lock_db_for_reading() + qvm_collection.load() + qvm_collection.unlock_db() + + vm = qvm_collection.get_vm_by_name(vmname) + if vm is None: + print "A VM with the name '{0}' does not exist in the system.".format(vmname) + exit(1) + + if not vm.is_template(): + print "A VM '{0}' is not template.".format(vmname) + exit(1) + + if vm.is_running(): + print "You must stop VM first." + exit(1) + + oldcow_img = vm.rootcow_img + '.old' + oldcow_stat = os.stat(oldcow_img) + oldcow_time_str = time.strftime("%F %T", time.gmtime(oldcow_stat.st_mtime)) + + root_stat = os.stat(vm.root_img) + old_dmdev = "/dev/mapper/snapshot-{0:x}:{1}-{2:x}:{3}".format( + root_stat[2], root_stat[1], + oldcow_stat[2], oldcow_stat[1]) + + snapshots = glob.glob('/dev/mapper/snapshot-{0:x}:{1}-*'.format(root_stat[2], root_stat[1])) + snapshot_present = False + for dev in snapshots: + if dev == old_dmdev: + snapshot_present = True + else: + print "ERROR: You must shutdown all VMs running system older/newer than last good one." + exit(1) + + root_blocks = os.path.getsize(vm.root_img)/512 + if not snapshot_present: + p = subprocess.Popen (["/etc/xen/scripts/block-snapshot", "prepare", + "snapshot", "{0}:{1}".format(vm.root_img, oldcow_img)], + stdout=subprocess.PIPE) + result = p.communicate() + if result[0].strip() != old_dmdev: + print "ERROR: Cannot create snapshot device ({0} != {1})".format( + result[0].strip(), old_dmdev) + exit(1) + + + print "INFO: Reverting template changes done at {0}".format(oldcow_time_str) + if not options.force: + prompt = raw_input ("Do you want to proceed? [y/N] ") + if not (prompt == "y" or prompt == "Y"): + exit (0) + + p = subprocess.Popen(["/sbin/dmsetup", "table", old_dmdev], stdout=subprocess.PIPE) + result = p.communicate() + dm_table = result[0] + dm_table_elements = dm_table.split(' ') + if dm_table_elements[2] != 'snapshot': + print "ERROR: Unexpected device-mapper type ({0}). Template changes reverting already running".format(dm_table_elements[2]) + exit(1) + + dm_table_elements[2] = 'snapshot-merge' + dm_table = ' '.join(dm_table_elements) + subprocess.check_call(["/sbin/dmsetup", "reload", old_dmdev, "--table", dm_table]) + # Reload new table into LIVE slot + subprocess.check_call(["/sbin/dmsetup", "suspend", old_dmdev]) + subprocess.check_call(["/sbin/dmsetup", "resume", old_dmdev]) + # Wait to snapshot merge completed + while True: + p = subprocess.Popen(["/sbin/dmsetup", "status", old_dmdev], stdout=subprocess.PIPE) + result = p.communicate() + status_details = result[0].split(' ') + blocks_used = status_details[3].split('/')[0] + if int(blocks_used) == int(status_details[4]): + break + print "\r-> Reverting template changes: {0} of {1} left".format(blocks_used, root_blocks), + time.sleep(1) + print "\r-> Reverting template changes: done".format(blocks_used, root_blocks) + + dm_table_elements[2] = 'snapshot' + dm_table = ' '.join(dm_table_elements) + subprocess.check_call(["/sbin/dmsetup", "reload", old_dmdev, "--table", dm_table]) + # Reload new table into LIVE slot + subprocess.check_call(["/sbin/dmsetup", "suspend", old_dmdev]) + subprocess.check_call(["/sbin/dmsetup", "resume", old_dmdev]) + + subprocess.check_call(["/etc/xen/scripts/block-snapshot", "cleanup", + "snapshot", old_dmdev]) + + os.rename(oldcow_img, vm.rootcow_img) + exit(0) + + +main() From 4cb5838f5b939d568fe98ac2d05f9135721399c5 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 10 Jun 2011 18:44:53 +0200 Subject: [PATCH 75/90] dom0: qvm-revert-template-changes message fix --- dom0/qvm-tools/qvm-revert-template-changes | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/qvm-tools/qvm-revert-template-changes b/dom0/qvm-tools/qvm-revert-template-changes index 81dfbae7..7ba7a697 100755 --- a/dom0/qvm-tools/qvm-revert-template-changes +++ b/dom0/qvm-tools/qvm-revert-template-changes @@ -121,7 +121,7 @@ def main(): break print "\r-> Reverting template changes: {0} of {1} left".format(blocks_used, root_blocks), time.sleep(1) - print "\r-> Reverting template changes: done".format(blocks_used, root_blocks) + print "\r-> Reverting template changes: done ".format(blocks_used, root_blocks) dm_table_elements[2] = 'snapshot' dm_table = ' '.join(dm_table_elements) From 7ced90832b83f59d853ced79c8424ce34999109b Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Fri, 10 Jun 2011 19:08:47 +0200 Subject: [PATCH 76/90] dom0: Support for pcidevs in qvm-prefs Can be used to e.g. have two NetVMs, eatch with one network interface assigned. --- dom0/qvm-tools/qvm-prefs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/dom0/qvm-tools/qvm-prefs b/dom0/qvm-tools/qvm-prefs index ecc5f4cc..c190cbb0 100755 --- a/dom0/qvm-tools/qvm-prefs +++ b/dom0/qvm-tools/qvm-prefs @@ -40,6 +40,7 @@ def do_list(vm): print fmt.format ("installed by RPM?", vm.installed_by_rpm) print fmt.format ("dir", vm.dir_path) print fmt.format ("config", vm.conf_file) + print fmt.format ("pcidevs", vm.pcidevs) if not vm.is_appvm(): print fmt.format ("root img", vm.root_img) if vm.is_template(): @@ -79,6 +80,12 @@ def set_maxmem(vms, vm, args): vm.maxmem = int(args[0]) +def set_pcidevs(vms, vm, args): + if len (args) != 1: + print "Missing memory argument!" + + vm.pcidevs = args[0] + def set_netvm(vms, vm, args): if len (args) != 1: print "Missing netvm name argument!" @@ -164,6 +171,7 @@ def set_nonupdateable(vms, vm, args): properties = { "updateable": set_updateable, "nonupdateable": set_nonupdateable, + "pcidevs": set_pcidevs, "label" : set_label, "netvm" : set_netvm, "maxmem" : set_maxmem, From 454b678284352916ad933b9c2407cc4505f94fff Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 11 Jun 2011 20:44:26 +0200 Subject: [PATCH 77/90] dom0: cpu load calculation when VM rebooted fix --- dom0/qvm-core/qubes.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index e49928d1..8f333b4d 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -136,6 +136,9 @@ class QubesHost(object): current[vm['domid']]['cpu_usage'] = \ float(current[vm['domid']]['cpu_time'] - previous[vm['domid']]['cpu_time']) \ / long(1000**3) / (current_time-previous_time) * 100 + if current[vm['domid']]['cpu_usage'] < 0: + # VM has been rebooted + current[vm['domid']]['cpu_usage'] = 0 else: current[vm['domid']]['cpu_usage'] = 0 From 57144107243030a79f9c1c6b16663ffba4f494c1 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 11 Jun 2011 22:55:53 +0200 Subject: [PATCH 78/90] dom0: qvm-sync-appmenus: create appmenus dir if needed --- dom0/qvm-tools/qvm-sync-appmenus | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dom0/qvm-tools/qvm-sync-appmenus b/dom0/qvm-tools/qvm-sync-appmenus index 58e3af7a..5e2a2946 100755 --- a/dom0/qvm-tools/qvm-sync-appmenus +++ b/dom0/qvm-tools/qvm-sync-appmenus @@ -154,7 +154,8 @@ def main(): print "ERROR: No appmenus received, terminating" exit(1) - assert os.path.exists(vm.appmenus_templates_dir) + if not os.path.exists(vm.appmenus_templates_dir): + os.mkdir(vm.appmenus_templates_dir) # Create new/update existing templates if options.verbose: From 9375b8d6ff816ff38e9e4a21caab16cd7d0815e9 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 11 Jun 2011 22:58:00 +0200 Subject: [PATCH 79/90] dom0: qvm-sync-appmenus: add missing object name to vars --- dom0/qvm-tools/qvm-sync-appmenus | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dom0/qvm-tools/qvm-sync-appmenus b/dom0/qvm-tools/qvm-sync-appmenus index 5e2a2946..c444bbb8 100755 --- a/dom0/qvm-tools/qvm-sync-appmenus +++ b/dom0/qvm-tools/qvm-sync-appmenus @@ -162,23 +162,23 @@ def main(): print "--> Got {0} appmenus, storing to disk".format(str(len(new_appmenus))) for appmenu_file in new_appmenus.keys(): if options.verbose: - if os.path.exists(appmenus_templates_dir + '/' + appmenu_file): + if os.path.exists(vm.appmenus_templates_dir + '/' + appmenu_file): print "---> Updating {0}".format(appmenu_file) else: print "---> Creating {0}".format(appmenu_file) - create_template(appmenus_templates_dir + '/' + appmenu_file, new_appmenus[appmenu_file]) + create_template(vm.appmenus_templates_dir + '/' + appmenu_file, new_appmenus[appmenu_file]) # Delete appmenus of remove applications if options.verbose: print "--> Cleaning old files" - for appmenu_file in os.listdir(appmenus_templates_dir): + for appmenu_file in os.listdir(vm.appmenus_templates_dir): if not fnmatch.fnmatch(appmenu_file, '*.desktop'): continue if not new_appmenus.has_key(appmenu_file): if options.verbose: print "---> Removing {0}".format(appmenu_file) - os.unlink(appmenus_templates_dir + '/' + appmenu_file) + os.unlink(vm.appmenus_templates_dir + '/' + appmenu_file) main() From a4d1a21b4678d531d8d112ff93bf4ad5659a6f9a Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 11 Jun 2011 23:07:29 +0200 Subject: [PATCH 80/90] dom0: qvm-sync-appmenus - copy *directory.template when needed --- dom0/misc/qubes-templatevm.directory.template | 5 +++++ dom0/misc/qubes-vm.directory.template | 5 +++++ dom0/qvm-tools/qvm-sync-appmenus | 11 +++++------ rpm_spec/core-dom0.spec | 4 ++++ 4 files changed, 19 insertions(+), 6 deletions(-) create mode 100644 dom0/misc/qubes-templatevm.directory.template create mode 100644 dom0/misc/qubes-vm.directory.template diff --git a/dom0/misc/qubes-templatevm.directory.template b/dom0/misc/qubes-templatevm.directory.template new file mode 100644 index 00000000..596fffaf --- /dev/null +++ b/dom0/misc/qubes-templatevm.directory.template @@ -0,0 +1,5 @@ +[Desktop Entry] +Encoding=UTF-8 +Type=Directory +Name=Template: %VMNAME% +Icon=/usr/share/qubes/icons/template.png diff --git a/dom0/misc/qubes-vm.directory.template b/dom0/misc/qubes-vm.directory.template new file mode 100644 index 00000000..6825d200 --- /dev/null +++ b/dom0/misc/qubes-vm.directory.template @@ -0,0 +1,5 @@ +[Desktop Entry] +Encoding=UTF-8 +Type=Directory +Name=Domain: %VMNAME% +Icon=%VMDIR%/icon.png diff --git a/dom0/qvm-tools/qvm-sync-appmenus b/dom0/qvm-tools/qvm-sync-appmenus index c444bbb8..da5f5c97 100755 --- a/dom0/qvm-tools/qvm-sync-appmenus +++ b/dom0/qvm-tools/qvm-sync-appmenus @@ -25,6 +25,7 @@ import re import os import sys import fnmatch +import shutil from optparse import OptionParser from qubes.qubes import QubesVmCollection,QubesException from qubes.qubes import qrexec_client_path @@ -156,6 +157,10 @@ def main(): if not os.path.exists(vm.appmenus_templates_dir): os.mkdir(vm.appmenus_templates_dir) + if vm.is_template(): + shutil.copy('/usr/share/qubes/qubes-templatevm.directory.template', vm.appmenus_templates_dir) + else: + shutil.copy('/usr/share/qubes/qubes-vm.directory.template', vm.appmenus_templates_dir) # Create new/update existing templates if options.verbose: @@ -181,9 +186,3 @@ def main(): os.unlink(vm.appmenus_templates_dir + '/' + appmenu_file) main() - - - - - - diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 5223a3ca..5b6e1519 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -111,6 +111,8 @@ mkdir -p $RPM_BUILD_ROOT/var/lib/qubes/dvmdata mkdir -p $RPM_BUILD_ROOT/usr/share/qubes/icons cp icons/*.png $RPM_BUILD_ROOT/usr/share/qubes/icons +cp misc/qubes-vm.directory.template $RPM_BUILD_ROOT/usr/share/qubes/ +cp misc/qubes-templatevm.directory.template $RPM_BUILD_ROOT/usr/share/qubes/ mkdir -p $RPM_BUILD_ROOT/usr/bin cp ../common/qubes_setup_dnat_to_ns $RPM_BUILD_ROOT/usr/lib/qubes @@ -271,6 +273,8 @@ fi %attr(770,root,qubes) %dir /var/lib/qubes/backup %attr(770,root,qubes) %dir /var/lib/qubes/dvmdata %dir /usr/share/qubes/icons/*.png +/usr/share/qubes/qubes-vm.directory.template +/usr/share/qubes/qubes-templatevm.directory.template /usr/lib/qubes/qubes_setup_dnat_to_ns /usr/lib/qubes/qubes_fix_nm_conf.sh /etc/dhclient.d/qubes_setup_dnat_to_ns.sh From 4634a6897c4257c275087913d1af1cb6c26cb861 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 12 Jun 2011 00:47:01 +0200 Subject: [PATCH 81/90] dom0: qvm-sync-appmenus: support for calling by qrexec_client --- dom0/qvm-tools/qvm-sync-appmenus | 36 ++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/dom0/qvm-tools/qvm-sync-appmenus b/dom0/qvm-tools/qvm-sync-appmenus index da5f5c97..c3014903 100755 --- a/dom0/qvm-tools/qvm-sync-appmenus +++ b/dom0/qvm-tools/qvm-sync-appmenus @@ -44,12 +44,15 @@ fields_regexp = { } def get_appmenus(xid): - p = subprocess.Popen ([qrexec_client_path, '-d', str(xid), - 'user:grep -H = /usr/share/applications/*.desktop'], stdout=subprocess.PIPE) - untrusted_appmenulist = p.communicate()[0].split('\n') - if p.returncode != 0: - raise QubesException("Error getting application list") - + untrusted_appmenulist = [] + if xid == -1: + untrusted_appmenulist = sys.stdin.readlines() + else: + p = subprocess.Popen ([qrexec_client_path, '-d', str(xid), + 'user:grep -H = /usr/share/applications/*.desktop'], stdout=subprocess.PIPE) + untrusted_appmenulist = p.communicate()[0].split('\n') + if p.returncode != 0: + raise QubesException("Error getting application list") row_no = 0 appmenus = {} @@ -113,7 +116,7 @@ def create_template(path, values): def main(): - + env_vmname = os.environ.get("QREXEC_REMOTE_DOMAIN") usage = "usage: %prog [options] \n"\ "Updates desktop file templates for given StandaloneVM or TemplateVM" @@ -121,10 +124,13 @@ def main(): parser.add_option ("-v", "--verbose", action="store_true", dest="verbose", default=False) (options, args) = parser.parse_args () - if (len (args) != 1): + if (len (args) != 1) and env_vmname is None: parser.error ("You must specify at least the VM name!") - vmname=args[0] + if env_vmname: + vmname=env_vmname + else: + vmname=args[0] qvm_collection = QubesVmCollection() qvm_collection.lock_db_for_reading() @@ -145,11 +151,15 @@ def main(): print "ERROR: Appmenus can be retrieved only from running VM - start it first" exit(1) - # Get appmenus from VM - xid = vm.get_xid() - assert xid > 0 + new_appmenus = {} + if env_vmname is None: + # Get appmenus from VM + xid = vm.get_xid() + assert xid > 0 - new_appmenus = get_appmenus(xid) + new_appmenus = get_appmenus(xid) + else: + new_appmenus = get_appmenus(-1) if len(new_appmenus) == 0: print "ERROR: No appmenus received, terminating" From 83d211836ad5ab8773a67a9876182dee318ac389 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 12 Jun 2011 00:50:39 +0200 Subject: [PATCH 82/90] dom0+vm: Trigger appmenus sync after yum transaction (#45), NEW QREXEC COMMAND After yum transaction (install/upgrade/remove), yum-plugin-post-transaction-actions will execute script which trigger qvm-sync-appmenus in dom0 (through qrexec). THIS INTRODUCE NEW PREDEFINED COMMAND IN QREXEC --- common/qubes_trigger_sync_appmenus.action | 1 + common/qubes_trigger_sync_appmenus.sh | 7 +++++++ qrexec/qrexec.h | 3 ++- qrexec/qrexec_agent.c | 3 +++ qrexec/qrexec_daemon.c | 7 ++++++- rpm_spec/core-commonvm.spec | 7 +++++++ 6 files changed, 26 insertions(+), 2 deletions(-) create mode 100644 common/qubes_trigger_sync_appmenus.action create mode 100755 common/qubes_trigger_sync_appmenus.sh diff --git a/common/qubes_trigger_sync_appmenus.action b/common/qubes_trigger_sync_appmenus.action new file mode 100644 index 00000000..ad56a8f2 --- /dev/null +++ b/common/qubes_trigger_sync_appmenus.action @@ -0,0 +1 @@ +*:any:/usr/lib/qubes/qubes_trigger_sync_appmenus.sh diff --git a/common/qubes_trigger_sync_appmenus.sh b/common/qubes_trigger_sync_appmenus.sh new file mode 100755 index 00000000..fc5301a4 --- /dev/null +++ b/common/qubes_trigger_sync_appmenus.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +UPDATEABLE=`/usr/bin/xenstore-read qubes_vm_updateable` + +if [ "$UPDATEABLE" = "True" ]; then + echo -n SYNC > /var/run/qubes/qrexec_agent +fi diff --git a/qrexec/qrexec.h b/qrexec/qrexec.h index d0769366..bd996c48 100644 --- a/qrexec/qrexec.h +++ b/qrexec/qrexec.h @@ -51,7 +51,8 @@ enum { enum { QREXEC_EXECUTE_FILE_COPY=0x700, - QREXEC_EXECUTE_FILE_COPY_FOR_DISPVM + QREXEC_EXECUTE_FILE_COPY_FOR_DISPVM, + QREXEC_EXECUTE_APPMENUS_SYNC }; struct server_header { diff --git a/qrexec/qrexec_agent.c b/qrexec/qrexec_agent.c index 0e244678..06a89103 100644 --- a/qrexec/qrexec_agent.c +++ b/qrexec/qrexec_agent.c @@ -482,6 +482,9 @@ void handle_trigger_io() else if (!strcmp(buf, "DVMR")) s_hdr.client_id = QREXEC_EXECUTE_FILE_COPY_FOR_DISPVM; + else if (!strcmp(buf, "SYNC")) + s_hdr.client_id = + QREXEC_EXECUTE_APPMENUS_SYNC; if (s_hdr.client_id) { s_hdr.type = MSG_AGENT_TO_SERVER_TRIGGER_EXEC; write_all_vchan_ext(&s_hdr, sizeof s_hdr); diff --git a/qrexec/qrexec_daemon.c b/qrexec/qrexec_daemon.c index 22a163e1..dafce73c 100644 --- a/qrexec/qrexec_daemon.c +++ b/qrexec/qrexec_daemon.c @@ -361,6 +361,10 @@ void handle_execute_predefined_command(int req) rcmd = "directly:user:/usr/lib/qubes/qfile-agent-dvm"; lcmd = "/usr/lib/qubes/qfile-daemon-dvm"; break; + case QREXEC_EXECUTE_APPMENUS_SYNC: + rcmd = "user:grep -H = /usr/share/applications/*.desktop"; + lcmd = "/usr/bin/qvm-sync-appmenus"; + break; default: /* cannot happen, already sanitized */ fprintf(stderr, "got trigger exec no %d\n", req); exit(1); @@ -402,7 +406,8 @@ void sanitize_message_from_agent(struct server_header *untrusted_header) case MSG_AGENT_TO_SERVER_TRIGGER_EXEC: untrusted_cmd = untrusted_header->client_id; if (untrusted_cmd != QREXEC_EXECUTE_FILE_COPY && - untrusted_cmd != QREXEC_EXECUTE_FILE_COPY_FOR_DISPVM) { + untrusted_cmd != QREXEC_EXECUTE_FILE_COPY_FOR_DISPVM && + untrusted_cmd != QREXEC_EXECUTE_APPMENUS_SYNC) { fprintf(stderr, "received MSG_AGENT_TO_SERVER_TRIGGER_EXEC cmd %d ?\n", untrusted_cmd); diff --git a/rpm_spec/core-commonvm.spec b/rpm_spec/core-commonvm.spec index 74b7a5e8..993b277f 100644 --- a/rpm_spec/core-commonvm.spec +++ b/rpm_spec/core-commonvm.spec @@ -33,6 +33,7 @@ License: GPL URL: http://www.qubes-os.org Requires: /usr/bin/xenstore-read Requires: fedora-release +Requires: yum-plugin-post-transaction-actions BuildRequires: xen-devel %define _builddir %(pwd)/common @@ -78,6 +79,10 @@ mkdir -p $RPM_BUILD_ROOT/etc/udev/rules.d cp qubes_network.rules $RPM_BUILD_ROOT/etc/udev/rules.d/ mkdir -p $RPM_BUILD_ROOT/usr/lib/qubes/ cp setup_ip $RPM_BUILD_ROOT/usr/lib/qubes/ +mkdir -p $RPM_BUILD_ROOT/etc/yum/post-actions +cp qubes_trigger_sync_appmenus.action $RPM_BUILD_ROOT/etc/yum/post-actions/ +mkdir -p $RPM_BUILD_ROOT/usr/lib/qubes +cp qubes_trigger_sync_appmenus.sh $RPM_BUILD_ROOT/usr/lib/qubes/ %triggerin -- initscripts cp /var/lib/qubes/serial.conf /etc/init/serial.conf @@ -226,3 +231,5 @@ rm -rf $RPM_BUILD_ROOT /usr/bin/xenstore-watch-qubes /etc/udev/rules.d/qubes_network.rules /usr/lib/qubes/setup_ip +/etc/yum/post-actions/qubes_trigger_sync_appmenus.action +/usr/lib/qubes/qubes_trigger_sync_appmenus.sh From b75f89038b1cad6d5ecec855fa371d6fde6dfdc0 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 12 Jun 2011 01:01:20 +0200 Subject: [PATCH 83/90] dom0: qvm-sync-appmenus output error messages to stderr --- dom0/qvm-tools/qvm-sync-appmenus | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/dom0/qvm-tools/qvm-sync-appmenus b/dom0/qvm-tools/qvm-sync-appmenus index c3014903..daa70a79 100755 --- a/dom0/qvm-tools/qvm-sync-appmenus +++ b/dom0/qvm-tools/qvm-sync-appmenus @@ -79,10 +79,10 @@ def get_appmenus(xid): appmenus[filename][key]=value else: - print "Warning: ignoring key %s: %s" % (untrusted_key, untrusted_value) + print >>sys.stderr, "Warning: ignoring key %s: %s" % (untrusted_key, untrusted_value) # else: ignore this key else: - print "Warning: ignoring line: %s" % (untrusted_line); + print >>sys.stderr, "Warning: ignoring line: %s" % (untrusted_line); return appmenus @@ -92,7 +92,7 @@ def create_template(path, values): # check if all required fields are present for key in required_fields: if not values.has_key(key): - print "Warning: not creating/updating '%s' because of missing '%s' key" % (path, key) + print >>sys.stderr, "Warning: not creating/updating '%s' because of missing '%s' key" % (path, key) return desktop_file = open(path, "w") @@ -140,15 +140,15 @@ def main(): vm = qvm_collection.get_vm_by_name(vmname) if vm is None: - print "ERROR: A VM with the name '{0}' does not exist in the system.".format(vmname) + print >>sys.stderr, "ERROR: A VM with the name '{0}' does not exist in the system.".format(vmname) exit(1) if not vm.is_updateable(): - print "ERROR: To sync appmenus for non-updateable VM, do it on template instead" + print >>sys.stderr, "ERROR: To sync appmenus for non-updateable VM, do it on template instead" exit(1) if not vm.is_running(): - print "ERROR: Appmenus can be retrieved only from running VM - start it first" + print >>sys.stderr, "ERROR: Appmenus can be retrieved only from running VM - start it first" exit(1) new_appmenus = {} @@ -159,10 +159,11 @@ def main(): new_appmenus = get_appmenus(xid) else: + options.verbose = False new_appmenus = get_appmenus(-1) if len(new_appmenus) == 0: - print "ERROR: No appmenus received, terminating" + print >>sys.stderr, "ERROR: No appmenus received, terminating" exit(1) if not os.path.exists(vm.appmenus_templates_dir): From 6d9fdf4729a723117151bbee5b782e1dc30474b5 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 12 Jun 2011 01:29:35 +0200 Subject: [PATCH 84/90] dom0: Add shortcut qubes-appmenu-select ("Add more shortcuts...") for each VM (#45) --- dom0/aux-tools/create_apps_for_appvm.sh | 1 + dom0/misc/qubes-appmenu-select.template | 10 ++++++++++ rpm_spec/core-dom0.spec | 2 ++ 3 files changed, 13 insertions(+) create mode 100644 dom0/misc/qubes-appmenu-select.template diff --git a/dom0/aux-tools/create_apps_for_appvm.sh b/dom0/aux-tools/create_apps_for_appvm.sh index 8b01596d..c727d534 100755 --- a/dom0/aux-tools/create_apps_for_appvm.sh +++ b/dom0/aux-tools/create_apps_for_appvm.sh @@ -42,6 +42,7 @@ if [ "$SRCDIR" != "none" ]; then else find $SRCDIR -name "*.desktop" $CHECK_WHITELISTED -exec /usr/lib/qubes/convert_apptemplate2vm.sh {} $APPSDIR $VMNAME $VMDIR \; fi + /usr/lib/qubes/convert_apptemplate2vm.sh /usr/share/qubes/qubes-appmenu-select.template $APPSDIR $VMNAME $VMDIR /usr/lib/qubes/convert_dirtemplate2vm.sh $SRCDIR/qubes-*.directory.template $APPSDIR/$VMNAME-vm.directory $VMNAME $VMDIR fi diff --git a/dom0/misc/qubes-appmenu-select.template b/dom0/misc/qubes-appmenu-select.template new file mode 100644 index 00000000..2275f779 --- /dev/null +++ b/dom0/misc/qubes-appmenu-select.template @@ -0,0 +1,10 @@ +[Desktop Entry] +Version=1.0 +Type=Application +Exec=qubes-appmenu-select %VMNAME% +Icon=/usr/share/qubes/icons/qubes.png +Terminal=false +Name=%VMNAME%: Add more shortcuts... +GenericName=%VMNAME%: Add more shortcuts... +StartupNotify=false +Categories=System; diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 5b6e1519..47252bec 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -113,6 +113,7 @@ mkdir -p $RPM_BUILD_ROOT/usr/share/qubes/icons cp icons/*.png $RPM_BUILD_ROOT/usr/share/qubes/icons cp misc/qubes-vm.directory.template $RPM_BUILD_ROOT/usr/share/qubes/ cp misc/qubes-templatevm.directory.template $RPM_BUILD_ROOT/usr/share/qubes/ +cp misc/qubes-appmenu-select.template $RPM_BUILD_ROOT/usr/share/qubes/ mkdir -p $RPM_BUILD_ROOT/usr/bin cp ../common/qubes_setup_dnat_to_ns $RPM_BUILD_ROOT/usr/lib/qubes @@ -275,6 +276,7 @@ fi %dir /usr/share/qubes/icons/*.png /usr/share/qubes/qubes-vm.directory.template /usr/share/qubes/qubes-templatevm.directory.template +/usr/share/qubes/qubes-appmenu-select.template /usr/lib/qubes/qubes_setup_dnat_to_ns /usr/lib/qubes/qubes_fix_nm_conf.sh /etc/dhclient.d/qubes_setup_dnat_to_ns.sh From b2a0a091686bdccd9580ab6f902b83d05a977004 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sun, 12 Jun 2011 02:27:30 +0200 Subject: [PATCH 85/90] version 1.6.1 --- version_dom0 | 2 +- version_vm | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/version_dom0 b/version_dom0 index dc1e644a..9c6d6293 100644 --- a/version_dom0 +++ b/version_dom0 @@ -1 +1 @@ -1.6.0 +1.6.1 diff --git a/version_vm b/version_vm index dc1e644a..9c6d6293 100644 --- a/version_vm +++ b/version_vm @@ -1 +1 @@ -1.6.0 +1.6.1 From d9d7a69c273f72bff242559ba18a35bd6c99d5ba Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Wed, 22 Jun 2011 00:44:48 +0200 Subject: [PATCH 86/90] dom0+vm: Tools for downloading dom0 update by VM (#198) Mainly 4 parts: - scripts for providing rpmdb and yum repos to VM (choosen by qvm-set-updatevm) - VM script for downloading updates (qubes_download_dom0_updates.sh) - qfile-dom0-unpacker which receive updates, check signatures and place its in dom0 local yum repo - qvm-dom0-upgrade which calls all of above and after all yum gpk-update-viewer Besides qvm-dom0-upgrade, updates are checked every 6h and user is prompted if want to download it. At dom0 side gpk-update-icon (disabled yet) should notice new updates in "local" repo. --- appvm/Makefile | 4 +- {appvm => common}/copy_file.c | 0 {appvm => common}/crc32.c | 0 {appvm => common}/crc32.h | 0 {appvm => common}/filecopy.h | 0 common/qubes_download_dom0_updates.sh | 51 +++++++++++++ {appvm => common}/unpack.c | 0 dom0/aux-tools/.gitignore | 1 + dom0/aux-tools/Makefile | 4 + dom0/aux-tools/qfile-dom0-unpacker.c | 86 ++++++++++++++++++++++ dom0/aux-tools/sync_rpmdb_updatevm.sh | 10 +++ dom0/misc/qubes_sync_rpmdb_updatevm.action | 1 + dom0/qubes-cached.repo | 5 ++ dom0/qvm-core/qubes.py | 31 +++++++- dom0/qvm-tools/qvm-dom0-upgrade | 20 +++++ dom0/qvm-tools/qvm-get-updatevm | 39 ++++++++++ dom0/qvm-tools/qvm-set-updatevm | 46 ++++++++++++ dom0/restore/qfile-daemon | 40 ++++++++++ rpm_spec/core-commonvm.spec | 3 + rpm_spec/core-dom0.spec | 20 +++++ 20 files changed, 358 insertions(+), 3 deletions(-) rename {appvm => common}/copy_file.c (100%) rename {appvm => common}/crc32.c (100%) rename {appvm => common}/crc32.h (100%) rename {appvm => common}/filecopy.h (100%) create mode 100755 common/qubes_download_dom0_updates.sh rename {appvm => common}/unpack.c (100%) create mode 100644 dom0/aux-tools/.gitignore create mode 100644 dom0/aux-tools/Makefile create mode 100644 dom0/aux-tools/qfile-dom0-unpacker.c create mode 100755 dom0/aux-tools/sync_rpmdb_updatevm.sh create mode 100644 dom0/misc/qubes_sync_rpmdb_updatevm.action create mode 100644 dom0/qubes-cached.repo create mode 100755 dom0/qvm-tools/qvm-dom0-upgrade create mode 100755 dom0/qvm-tools/qvm-get-updatevm create mode 100755 dom0/qvm-tools/qvm-set-updatevm diff --git a/appvm/Makefile b/appvm/Makefile index df9989e2..d1e1040f 100644 --- a/appvm/Makefile +++ b/appvm/Makefile @@ -5,9 +5,9 @@ dvm_file_editor: dvm_file_editor.o ../common/ioall.o $(CC) -pie -g -o $@ $^ qfile-agent-dvm: qfile-agent-dvm.o ../common/ioall.o ../common/gui-fatal.o $(CC) -pie -g -o $@ $^ -qfile-agent: qfile-agent.o ../common/ioall.o ../common/gui-fatal.o copy_file.o crc32.o +qfile-agent: qfile-agent.o ../common/ioall.o ../common/gui-fatal.o ../common/copy_file.o ../common/crc32.o $(CC) -pie -g -o $@ $^ -qfile-unpacker: qfile-unpacker.o ../common/ioall.o ../common/gui-fatal.o copy_file.o unpack.o crc32.o +qfile-unpacker: qfile-unpacker.o ../common/ioall.o ../common/gui-fatal.o ../common/copy_file.o ../common/unpack.o ../common/crc32.o $(CC) -pie -g -o $@ $^ clean: diff --git a/appvm/copy_file.c b/common/copy_file.c similarity index 100% rename from appvm/copy_file.c rename to common/copy_file.c diff --git a/appvm/crc32.c b/common/crc32.c similarity index 100% rename from appvm/crc32.c rename to common/crc32.c diff --git a/appvm/crc32.h b/common/crc32.h similarity index 100% rename from appvm/crc32.h rename to common/crc32.h diff --git a/appvm/filecopy.h b/common/filecopy.h similarity index 100% rename from appvm/filecopy.h rename to common/filecopy.h diff --git a/common/qubes_download_dom0_updates.sh b/common/qubes_download_dom0_updates.sh new file mode 100755 index 00000000..488eecb7 --- /dev/null +++ b/common/qubes_download_dom0_updates.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +DOM0_UPDATES_DIR=/var/lib/qubes/dom0-updates + +DOIT=0 +GUI=1 +while [ -n "$1" ]; do + if [ "x--doit" = "x$1" ]; then + DOIT=1 + elif [ "x--nogui" = "x$1" ]; then + GUI=0 + fi + shift +done + +if ! [ -d "$DOM0_UPDATES_DIR" ]; then + echo "Dom0 updates dir does not exists: $DOM0_UPDATES_DIR" + exit 1 +fi + +mkdir -p $DOM0_UPDATES_DIR/etc +cp /etc/yum.conf $DOM0_UPDATES_DIR/etc/ + +echo "Checking for updates..." +PKGLIST=`yum --installroot $DOM0_UPDATES_DIR check-update -q | cut -f 1 -d ' '` + +if [ -z $PKGLIST ]; then + # No new updates + exit 0 +fi + +if [ "$DOIT" != "1" ]; then + zenity --question --title="Qubes Dom0 updates" \ + --text="Updates for dom0 available. Do you want to download its now?" || exit 0 +fi + +mkdir -p "$DOM0_UPDATES_DIR/packages" + +set -e + +if [ "$GUI" = 1 ]; then + ( echo "1" + yumdownloader --destdir "$DOM0_UPDATES_DIR/packages" --installroot "$DOM0_UPDATES_DIR" $PKGLIST + echo 100 ) | zenity --progress --pulsate --auto-close --auto-kill \ + --text="Downloading updates for Dom0, please wait..." --title="Qubes Dom0 updates" +else + yumdownloader --destdir "$DOM0_UPDATES_DIR/packages" --installroot "$DOM0_UPDATES_DIR" $PKGLIST +fi + +# qvm-copy-to-vm works only from user +su -c "qvm-copy-to-vm @dom0updates $DOM0_UPDATES_DIR/packages/*.rpm" user diff --git a/appvm/unpack.c b/common/unpack.c similarity index 100% rename from appvm/unpack.c rename to common/unpack.c diff --git a/dom0/aux-tools/.gitignore b/dom0/aux-tools/.gitignore new file mode 100644 index 00000000..bc50fb19 --- /dev/null +++ b/dom0/aux-tools/.gitignore @@ -0,0 +1 @@ +qfile-dom0-unpacker diff --git a/dom0/aux-tools/Makefile b/dom0/aux-tools/Makefile new file mode 100644 index 00000000..6e2b40d1 --- /dev/null +++ b/dom0/aux-tools/Makefile @@ -0,0 +1,4 @@ +CC=gcc +CFLAGS=-g -Wall -I../../common -fPIC -pie +qfile-dom0-unpacker: qfile-dom0-unpacker.o ../../common/ioall.o ../../common/gui-fatal.o ../../common/copy_file.o ../../common/unpack.o ../../common/crc32.o + $(CC) -pie -g -o $@ $^ diff --git a/dom0/aux-tools/qfile-dom0-unpacker.c b/dom0/aux-tools/qfile-dom0-unpacker.c new file mode 100644 index 00000000..8ad8261a --- /dev/null +++ b/dom0/aux-tools/qfile-dom0-unpacker.c @@ -0,0 +1,86 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "filecopy.h" +int prepare_creds_return_uid(char *username) +{ + struct passwd *pwd; + pwd = getpwnam(username); + if (!pwd) { + perror("getpwnam"); + exit(1); + } + setenv("HOME", pwd->pw_dir, 1); + setenv("USER", username, 1); + setgid(pwd->pw_gid); + initgroups(username, pwd->pw_gid); + setfsuid(pwd->pw_uid); + return pwd->pw_uid; +} + +void wait_for_child(int statusfd) +{ + int status; + if (read(statusfd, &status, sizeof status)!=sizeof status) + gui_fatal("File copy error: Internal error reading status from unpacker"); + errno = status; + switch (status) { + case LEGAL_EOF: break; + case 0: gui_fatal("File copy: Connection terminated unexpectedly"); break; + case EINVAL: gui_fatal("File copy: Corrupted data from packer"); break; + case EEXIST: gui_fatal("File copy: not overwriting existing file. Clean ~/incoming, and retry copy"); break; + default: gui_fatal("File copy"); + } +} + +extern void do_unpack(int); + +int main(int argc, char ** argv) +{ + char *incoming_dir; + int pipefds[2]; + int uid; + + if (argc < 3) { + fprintf(stderr, "Invalid parameters, usage: %s user dir\n", argv[0]); + exit(1); + } + + pipe(pipefds); + + uid = prepare_creds_return_uid(argv[1]); + + incoming_dir = argv[2]; + mkdir(incoming_dir, 0700); + if (chdir(incoming_dir)) + gui_fatal("Error chdir to %s", incoming_dir); + switch (fork()) { + case -1: + perror("fork"); + exit(1); + case 0: + if (chroot(incoming_dir)) //impossible + gui_fatal("Error chroot to %s", incoming_dir); + setuid(uid); + close(pipefds[0]); + do_unpack(pipefds[1]); + exit(0); + default:; + } + + setuid(uid); + close(pipefds[1]); + wait_for_child(pipefds[0]); + + return 0; +} diff --git a/dom0/aux-tools/sync_rpmdb_updatevm.sh b/dom0/aux-tools/sync_rpmdb_updatevm.sh new file mode 100755 index 00000000..19c98214 --- /dev/null +++ b/dom0/aux-tools/sync_rpmdb_updatevm.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +UPDATEVM=`qvm-get-updatevm` + +if [ -n "$UPDATEVM" ]; then + qvm-run -u root --pass_io --localcmd='tar c /var/lib/rpm /etc/yum.repos.d' "$UPDATEVM" 'tar x -C /var/lib/qubes/dom0-updates' +fi + +# Ignore errors (eg VM not running) +exit 0 diff --git a/dom0/misc/qubes_sync_rpmdb_updatevm.action b/dom0/misc/qubes_sync_rpmdb_updatevm.action new file mode 100644 index 00000000..d56c2af3 --- /dev/null +++ b/dom0/misc/qubes_sync_rpmdb_updatevm.action @@ -0,0 +1 @@ +*:any:/usr/lib/qubes/sync_rpmdb_updatevm.sh diff --git a/dom0/qubes-cached.repo b/dom0/qubes-cached.repo new file mode 100644 index 00000000..948d4876 --- /dev/null +++ b/dom0/qubes-cached.repo @@ -0,0 +1,5 @@ +[qubes-dom0-cached] +name = Qubes OS Repository for Dom0 +baseurl = file:///var/lib/qubes/updates +gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-qubes-1-primary +gpgcheck = 1 diff --git a/dom0/qvm-core/qubes.py b/dom0/qvm-core/qubes.py index 8f333b4d..36fdafaf 100755 --- a/dom0/qvm-core/qubes.py +++ b/dom0/qvm-core/qubes.py @@ -74,6 +74,8 @@ default_firewall_conf_file = "firewall.xml" default_memory = 400 default_servicevm_vcpus = 1 +dom0_update_check_interval = 6*3600 + # do not allow to start a new AppVM if Dom0 mem was to be less than this dom0_min_memory = 700*1024*1024 @@ -937,6 +939,13 @@ class QubesVm(object): print "--> Preparing config template for DispVM" self.create_config_file(file_path = self.dir_path + '/dvm.conf', prepare_dvm = True) + if qvm_collection.updatevm_qid == self.qid: + # Sync RPMDB + subprocess.call(["/usr/lib/qubes/sync_rpmdb_updatevm.sh"]) + # Start polling + subprocess.call([qrexec_client_path, '-d', xid, '-e', + "while true; do sleep %d; /usr/lib/qubes/qubes_download_dom0_updates.sh; done" % dom0_update_check_interval]) + # perhaps we should move it before unpause and fork? # FIXME: this uses obsolete xm api if debug_console: @@ -1609,6 +1618,7 @@ class QubesVmCollection(dict): self.default_netvm_qid = None self.default_fw_netvm_qid = None self.default_template_qid = None + self.updatevm_qid = None self.qubes_store_filename = store_filename def values(self): @@ -1769,6 +1779,15 @@ class QubesVmCollection(dict): else: return self[self.default_fw_netvm_qid] + def set_updatevm_vm(self, vm): + self.updatevm_qid = vm.qid + + def get_updatevm_vm(self): + if self.updatevm_qid is None: + return None + else: + return self[self.updatevm_qid] + def get_vm_by_name(self, name): for vm in self.values(): if (vm.name == name): @@ -1872,7 +1891,10 @@ class QubesVmCollection(dict): if self.default_netvm_qid is not None else "None", default_fw_netvm=str(self.default_fw_netvm_qid) \ - if self.default_fw_netvm_qid is not None else "None" + if self.default_fw_netvm_qid is not None else "None", + + updatevm=str(self.updatevm_qid) \ + if self.updatevm_qid is not None else "None" ) for vm in self.values(): @@ -2002,6 +2024,13 @@ class QubesVmCollection(dict): if default_fw_netvm != "None" else None #assert self.default_netvm_qid is not None + updatevm = element.get("updatevm") + if updatevm is not None: + self.updatevm_qid = int(updatevm) \ + if updatevm != "None" else None + #assert self.default_netvm_qid is not None + + # Then, read in the TemplateVMs, because a reference to template VM # is needed to create each AppVM for element in tree.findall("QubesTemplateVm"): diff --git a/dom0/qvm-tools/qvm-dom0-upgrade b/dom0/qvm-tools/qvm-dom0-upgrade new file mode 100755 index 00000000..2d47edaa --- /dev/null +++ b/dom0/qvm-tools/qvm-dom0-upgrade @@ -0,0 +1,20 @@ +#!/bin/bash + +UPDATEVM=`qvm-get-updatevm` +if [ -z "$UPDATEVM" ]; then + echo "UpdateVM not set, exiting" + exit 1 +fi + +echo "Checking for dom0 updates" + +# Start VM if not running already +qvm-run -a $UPDATEVM true || exit 1 +/usr/lib/qubes/sync_rpmdb_updatevm.sh || exit 1 +qvm-run -u root --pass_io $UPDATEVM "/usr/lib/qubes/qubes_download_dom0_updates.sh --doit $@" || exit 1 +yum check-update +if [ $? -ne 100 ]; then + exit 0 +fi +gpk-update-viewer + diff --git a/dom0/qvm-tools/qvm-get-updatevm b/dom0/qvm-tools/qvm-get-updatevm new file mode 100755 index 00000000..e746587b --- /dev/null +++ b/dom0/qvm-tools/qvm-get-updatevm @@ -0,0 +1,39 @@ +#!/usr/bin/python2.6 +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2010 Joanna Rutkowska +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + +from qubes.qubes import QubesVmCollection +from optparse import OptionParser; + +def main(): + qvm_collection = QubesVmCollection() + qvm_collection.lock_db_for_reading() + qvm_collection.load() + qvm_collection.unlock_db() + updatevm = qvm_collection.get_updatevm_vm() + if updatevm is None: + print "" + else: + print updatevm.name + + + +main() diff --git a/dom0/qvm-tools/qvm-set-updatevm b/dom0/qvm-tools/qvm-set-updatevm new file mode 100755 index 00000000..4fb3a6d4 --- /dev/null +++ b/dom0/qvm-tools/qvm-set-updatevm @@ -0,0 +1,46 @@ +#!/usr/bin/python2.6 +# +# The Qubes OS Project, http://www.qubes-os.org +# +# Copyright (C) 2010 Joanna Rutkowska +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + +from qubes.qubes import QubesVmCollection +from optparse import OptionParser; + +def main(): + usage = "usage: %prog " + parser = OptionParser (usage) + (options, args) = parser.parse_args () + if (len (args) != 1): + parser.error ("Missing argument!") + vmname = args[0] + + qvm_collection = QubesVmCollection() + qvm_collection.lock_db_for_writing() + qvm_collection.load() + vm = qvm_collection.get_vm_by_name(vmname) + if vm is None or vm.qid not in qvm_collection: + print "A VM with the name '{0}' does not exist in the system.".format(vmname) + exit(1) + + qvm_collection.set_updatevm_vm(vm) + qvm_collection.save() + qvm_collection.unlock_db() + +main() diff --git a/dom0/restore/qfile-daemon b/dom0/restore/qfile-daemon index 6b589279..aca6f25f 100755 --- a/dom0/restore/qfile-daemon +++ b/dom0/restore/qfile-daemon @@ -22,8 +22,13 @@ import os import sys import subprocess +import shutil +import glob from qubes.qubes import QubesVmCollection +updates_dir = "/var/lib/qubes/updates" +updates_rpm_dir = updates_dir + "/rpm" + def is_copy_allowed(vm): # if vm.copy_allowed: # return True @@ -33,6 +38,36 @@ def is_copy_allowed(vm): retcode = subprocess.call(['/usr/bin/kdialog', '--yesno', q, '--title', 'File transfer confirmation']) return retcode == 0 +def dom0updates_fatal(msg): + print >> sys.stderr, msg + shutil.rmtree(updates_rpm_dir) + exit(1) + +def handle_dom0updates(updatevm): + source=os.getenv("QREXEC_REMOTE_DOMAIN") + if source != updatevm.name: + print >> sys.stderr, 'Domain ' + source + ' not allowed to send dom0 updates' + exit(1) + # Clean old packages + if os.path.exists(updates_rpm_dir): + shutil.rmtree(updates_rpm_dir) + subprocess.check_call(["/usr/lib/qubes/qfile-dom0-unpacker", os.getlogin(), updates_rpm_dir]) + # Verify received files + for f in os.listdir(updates_rpm_dir): + if glob.fnmatch.fnmatch(f, "*.rpm"): + p = subprocess.Popen (["/bin/rpm", "-K", updates_rpm_dir + "/" + f], + stdout=subprocess.PIPE) + output = p.communicate()[0] + if p.returncode != 0: + dom0updates_fatal('Error while verifing %s signature: %s' % (f, output)) + if output.find("pgp") < 0: + dom0updates_fatal('Domain ' + source + ' sent not signed rpm: ' + f) + else: + dom0updates_fatal('Domain ' + source + ' sent unexpected file: ' + f) + # After updates received - create repo metadata + subprocess.check_call(["/usr/bin/createrepo", "-q", "/var/lib/qubes/updates"]) + exit(0) + def main(): FILECOPY_VMNAME_SIZE = 32 blob=os.read(0, FILECOPY_VMNAME_SIZE) @@ -42,6 +77,11 @@ def main(): qvm_collection.lock_db_for_reading() qvm_collection.load() qvm_collection.unlock_db() + + if vmname == '@dom0updates': + updatevm = qvm_collection.get_updatevm_vm() + handle_dom0updates(updatevm) + # handle_dom0updates never returns vm = qvm_collection.get_vm_by_name(vmname) # we do not want to flood dom0 with error windows; so just log to stderr diff --git a/rpm_spec/core-commonvm.spec b/rpm_spec/core-commonvm.spec index 993b277f..d754c361 100644 --- a/rpm_spec/core-commonvm.spec +++ b/rpm_spec/core-commonvm.spec @@ -79,10 +79,12 @@ mkdir -p $RPM_BUILD_ROOT/etc/udev/rules.d cp qubes_network.rules $RPM_BUILD_ROOT/etc/udev/rules.d/ mkdir -p $RPM_BUILD_ROOT/usr/lib/qubes/ cp setup_ip $RPM_BUILD_ROOT/usr/lib/qubes/ +cp qubes_download_dom0_updates.sh $RPM_BUILD_ROOT/usr/lib/qubes/ mkdir -p $RPM_BUILD_ROOT/etc/yum/post-actions cp qubes_trigger_sync_appmenus.action $RPM_BUILD_ROOT/etc/yum/post-actions/ mkdir -p $RPM_BUILD_ROOT/usr/lib/qubes cp qubes_trigger_sync_appmenus.sh $RPM_BUILD_ROOT/usr/lib/qubes/ +mkdir -p $RPM_BUILD_ROOT/var/lib/qubes/dom0-updates %triggerin -- initscripts cp /var/lib/qubes/serial.conf /etc/init/serial.conf @@ -233,3 +235,4 @@ rm -rf $RPM_BUILD_ROOT /usr/lib/qubes/setup_ip /etc/yum/post-actions/qubes_trigger_sync_appmenus.action /usr/lib/qubes/qubes_trigger_sync_appmenus.sh +/usr/lib/qubes/qubes_download_dom0_updates.sh diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 47252bec..7960b923 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -39,6 +39,7 @@ URL: http://www.qubes-os.org BuildRequires: xen-devel Requires: python, xen-runtime, pciutils, python-inotify, python-daemon, kernel-qubes-dom0 Conflicts: qubes-gui-dom0 < 1.1.13 +Requires: yum-plugin-post-transaction-actions Requires: NetworkManager >= 0.8.1-1 Requires: xen >= 4.1.0-2 %define _builddir %(pwd)/dom0 @@ -50,6 +51,7 @@ The Qubes core files for installation on Dom0. python -m compileall qvm-core qmemman python -O -m compileall qvm-core qmemman make -C restore +make -C aux-tools make -C ../common make -C ../vchan make -C ../u2mfn @@ -89,10 +91,12 @@ cp aux-tools/convert_dirtemplate2vm.sh $RPM_BUILD_ROOT/usr/lib/qubes cp aux-tools/create_apps_for_appvm.sh $RPM_BUILD_ROOT/usr/lib/qubes cp aux-tools/remove_appvm_appmenus.sh $RPM_BUILD_ROOT/usr/lib/qubes cp aux-tools/reset_vm_configs.py $RPM_BUILD_ROOT/usr/lib/qubes +cp aux-tools/sync_rpmdb_updatevm.sh $RPM_BUILD_ROOT/usr/lib/qubes/ cp qmemman/server.py $RPM_BUILD_ROOT/usr/lib/qubes/qmemman_daemon.py cp ../common/meminfo-writer $RPM_BUILD_ROOT/usr/lib/qubes/ cp ../qrexec/qrexec_daemon $RPM_BUILD_ROOT/usr/lib/qubes/ cp ../qrexec/qrexec_client $RPM_BUILD_ROOT/usr/lib/qubes/ +cp aux-tools/qfile-dom0-unpacker $RPM_BUILD_ROOT/usr/lib/qubes/ cp restore/qvm-create-default-dvm $RPM_BUILD_ROOT/usr/bin cp restore/xenstore-watch $RPM_BUILD_ROOT/usr/bin/xenstore-watch-qubes @@ -101,6 +105,12 @@ cp restore/qubes_prepare_saved_domain.sh $RPM_BUILD_ROOT/usr/lib/qubes cp restore/qfile-daemon-dvm $RPM_BUILD_ROOT/usr/lib/qubes cp restore/qfile-daemon $RPM_BUILD_ROOT/usr/lib/qubes +mkdir -p $RPM_BUILD_ROOT/etc/yum.real.repos.d +cp qubes-cached.repo $RPM_BUILD_ROOT/etc/yum.real.repos.d/ + +mkdir -p $RPM_BUILD_ROOT/etc/yum/post-actions +cp misc/qubes_sync_rpmdb_updatevm.action $RPM_BUILD_ROOT/etc/yum/post-actions/ + mkdir -p $RPM_BUILD_ROOT/var/lib/qubes mkdir -p $RPM_BUILD_ROOT/var/lib/qubes/vm-templates mkdir -p $RPM_BUILD_ROOT/var/lib/qubes/appvms @@ -109,6 +119,8 @@ mkdir -p $RPM_BUILD_ROOT/var/lib/qubes/servicevms mkdir -p $RPM_BUILD_ROOT/var/lib/qubes/backup mkdir -p $RPM_BUILD_ROOT/var/lib/qubes/dvmdata +mkdir -p $RPM_BUILD_ROOT/var/lib/qubes/updates + mkdir -p $RPM_BUILD_ROOT/usr/share/qubes/icons cp icons/*.png $RPM_BUILD_ROOT/usr/share/qubes/icons cp misc/qubes-vm.directory.template $RPM_BUILD_ROOT/usr/share/qubes/ @@ -158,6 +170,9 @@ fi sed 's/^net.ipv4.ip_forward.*/net.ipv4.ip_forward = 1/' -i /etc/sysctl.conf +sed '/^reposdir=/d' -i /etc/yum.conf +echo reposdir=/etc/yum.real.repos.d >> /etc/yum.conf + chkconfig --add qubes_core || echo "WARNING: Cannot add service qubes_core!" chkconfig --add qubes_netvm || echo "WARNING: Cannot add service qubes_netvm!" chkconfig --add qubes_setupdvm || echo "WARNING: Cannot add service qubes_setupdvm!" @@ -267,12 +282,15 @@ fi /usr/lib/qubes/meminfo-writer /usr/lib/qubes/qfile-daemon-dvm* /usr/lib/qubes/qfile-daemon +/usr/lib/qubes/sync_rpmdb_updatevm.sh +%attr(4750,root,qubes) /usr/lib/qubes/qfile-dom0-unpacker %attr(770,root,qubes) %dir /var/lib/qubes %attr(770,root,qubes) %dir /var/lib/qubes/vm-templates %attr(770,root,qubes) %dir /var/lib/qubes/appvms %attr(770,root,qubes) %dir /var/lib/qubes/servicevms %attr(770,root,qubes) %dir /var/lib/qubes/backup %attr(770,root,qubes) %dir /var/lib/qubes/dvmdata +%attr(770,root,qubes) %dir /var/lib/qubes/updates %dir /usr/share/qubes/icons/*.png /usr/share/qubes/qubes-vm.directory.template /usr/share/qubes/qubes-templatevm.directory.template @@ -299,7 +317,9 @@ fi %attr(770,root,qubes) %dir /var/run/qubes %{_libdir}/libvchan.so %{_libdir}/libu2mfn.so +/etc/yum.real.repos.d/qubes-cached.repo /etc/sudoers.d/qubes /etc/xdg/autostart/qubes-guid.desktop /etc/security/limits.d/99-qubes.conf /etc/xen/xl.conf +/etc/yum/post-actions/qubes_sync_rpmdb_updatevm.action From 151b15bb8cba982aec3dba44c5cfff27fbd1b09a Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 23 Jun 2011 14:39:17 +0200 Subject: [PATCH 87/90] dom0: Edit xl.conf in %post instead of overriding file (rpm file conflict) --- dom0/misc/xl.conf | 11 ----------- rpm_spec/core-dom0.spec | 7 ++++--- 2 files changed, 4 insertions(+), 14 deletions(-) delete mode 100644 dom0/misc/xl.conf diff --git a/dom0/misc/xl.conf b/dom0/misc/xl.conf deleted file mode 100644 index 2c461582..00000000 --- a/dom0/misc/xl.conf +++ /dev/null @@ -1,11 +0,0 @@ -## Global XL config file ## - -# automatically balloon down dom0 when xen doesn't have enough free -# memory to create a domain -autoballoon=0 - -# full path of the lockfile used by xl during domain creation -lockfile="/var/run/qubes/xl-lock" - -# default vif script -#vifscript="vif-bridge" diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 7960b923..0d0b7d8f 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -138,8 +138,6 @@ mkdir -p $RPM_BUILD_ROOT/etc/sysconfig cp ../common/iptables $RPM_BUILD_ROOT/etc/sysconfig mkdir -p $RPM_BUILD_ROOT/etc/security/limits.d cp misc/limits-qubes.conf $RPM_BUILD_ROOT/etc/security/limits.d/99-qubes.conf -mkdir -p $RPM_BUILD_ROOT/etc/xen/ -cp misc/xl.conf $RPM_BUILD_ROOT/etc/xen/ mkdir -p $RPM_BUILD_ROOT/usr/lib64/pm-utils/sleep.d cp pm-utils/01qubes-sync-vms-clock $RPM_BUILD_ROOT/usr/lib64/pm-utils/sleep.d/ @@ -170,6 +168,10 @@ fi sed 's/^net.ipv4.ip_forward.*/net.ipv4.ip_forward = 1/' -i /etc/sysctl.conf +sed '/^autoballoon=/d;/^lockfile=/d' -i /etc/xen/xl.conf +echo 'autoballoon=0' >> /etc/xen/xl.conf +echo 'lockfile="/var/run/qubes/xl-lock"' >> /etc/xen/xl.conf + sed '/^reposdir=/d' -i /etc/yum.conf echo reposdir=/etc/yum.real.repos.d >> /etc/yum.conf @@ -321,5 +323,4 @@ fi /etc/sudoers.d/qubes /etc/xdg/autostart/qubes-guid.desktop /etc/security/limits.d/99-qubes.conf -/etc/xen/xl.conf /etc/yum/post-actions/qubes_sync_rpmdb_updatevm.action From 21222cc8597a69ecf914c01de0b89835ace8ef1f Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 23 Jun 2011 20:04:27 +0200 Subject: [PATCH 88/90] dom0: start xenstored service in %post This is required by qvm-init-storage and in general to qvm-* works properly. --- rpm_spec/core-dom0.spec | 1 + 1 file changed, 1 insertion(+) diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 0d0b7d8f..c3e5d6cb 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -196,6 +196,7 @@ fi # Load evtchn module - xenstored needs it modprobe evtchn +service xenstored start if ! [ -e /var/lib/qubes/qubes.xml ]; then # echo "Initializing Qubes DB..." From 0f28db380ed073522287d9e3832665ac42ae3a55 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 23 Jun 2011 22:01:52 +0200 Subject: [PATCH 89/90] dom0: QubesVm has no add_to_xen_storage() --- dom0/qvm-tools/qvm-add-template | 8 -------- 1 file changed, 8 deletions(-) diff --git a/dom0/qvm-tools/qvm-add-template b/dom0/qvm-tools/qvm-add-template index a6a58a12..e9f5aed8 100755 --- a/dom0/qvm-tools/qvm-add-template +++ b/dom0/qvm-tools/qvm-add-template @@ -64,14 +64,6 @@ def main(): qvm_collection.pop(vm.qid) exit (1) - try: - vm.add_to_xen_storage() - - except (IOError, OSError) as err: - print "ERROR: {0}".format(err) - qvm_collection.pop(vm.qid) - exit (1) - qvm_collection.save() qvm_collection.unlock_db() From 42cab54520b57444110b147105bc6a8d9119a29b Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 23 Jun 2011 23:23:45 +0200 Subject: [PATCH 90/90] dom0: include missing vm-template.conf in rpm package --- rpm_spec/core-dom0.spec | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index c3e5d6cb..3759e4e2 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -126,6 +126,7 @@ cp icons/*.png $RPM_BUILD_ROOT/usr/share/qubes/icons cp misc/qubes-vm.directory.template $RPM_BUILD_ROOT/usr/share/qubes/ cp misc/qubes-templatevm.directory.template $RPM_BUILD_ROOT/usr/share/qubes/ cp misc/qubes-appmenu-select.template $RPM_BUILD_ROOT/usr/share/qubes/ +cp misc/vm-template.conf $RPM_BUILD_ROOT/usr/share/qubes/ mkdir -p $RPM_BUILD_ROOT/usr/bin cp ../common/qubes_setup_dnat_to_ns $RPM_BUILD_ROOT/usr/lib/qubes @@ -298,6 +299,7 @@ fi /usr/share/qubes/qubes-vm.directory.template /usr/share/qubes/qubes-templatevm.directory.template /usr/share/qubes/qubes-appmenu-select.template +/usr/share/qubes/vm-template.conf /usr/lib/qubes/qubes_setup_dnat_to_ns /usr/lib/qubes/qubes_fix_nm_conf.sh /etc/dhclient.d/qubes_setup_dnat_to_ns.sh