From 421b13b6c459c502d2ea9c45b0eb5c8c5c220e47 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 5 Jul 2012 01:17:45 +0200 Subject: [PATCH 01/10] dom0/qmemman: minor fix in mem_set() Set target/maxmem in more logical order (balloon driver can fail to balloon the first time, but will retry anyway). Force sizes to be integers. --- dom0/qmemman/qmemman.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index cf6325a7..09b45af3 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -58,15 +58,15 @@ class SystemState: def mem_set(self, id, val): print 'mem-set domain', id, 'to', val self.domdict[id].last_target = val - self.xs.write('', '/local/domain/' + id + '/memory/target', str(val/1024)) #can happen in the middle of domain shutdown #apparently xc.lowlevel throws exceptions too try: - self.xc.domain_setmaxmem(int(id), val/1024 + 1024) # LIBXL_MAXMEM_CONSTANT=1024 - self.xc.domain_set_target_mem(int(id), val/1024) + self.xc.domain_setmaxmem(int(id), int(val/1024) + 1024) # LIBXL_MAXMEM_CONSTANT=1024 + self.xc.domain_set_target_mem(int(id), int(val/1024)) except: pass - + self.xs.write('', '/local/domain/' + id + '/memory/target', str(int(val/1024))) + def mem_set_obsolete(self, id, val): uuid = self.domdict[id].uuid if val >= 2**31: From 892a6bbc130a919c1b56f355596791858423293d Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 5 Jul 2012 01:20:03 +0200 Subject: [PATCH 02/10] dom0/guihelpers: interface to notifying qubes-manager (#615) --- dom0/qvm-core/guihelpers.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/dom0/qvm-core/guihelpers.py b/dom0/qvm-core/guihelpers.py index dbda6022..f4c8ff3d 100644 --- a/dom0/qvm-core/guihelpers.py +++ b/dom0/qvm-core/guihelpers.py @@ -24,8 +24,11 @@ import sys from optparse import OptionParser from PyQt4.QtCore import * from PyQt4.QtGui import * +import dbus +from dbus import DBusException app = None +system_bus = None def prepare_app(): global app @@ -53,3 +56,29 @@ def ask(text, title="Question", yestoall=False): else: #?! return 127 + +def notify_error_qubes_manager(name, message): + global system_bus + if system_bus is None: + system_bus = dbus.SystemBus() + + try: + qubes_manager = system_bus.get_object('org.qubesos.QubesManager', + '/org/qubesos/QubesManager') + qubes_manager.notify_error(name, message, dbus_interface='org.qubesos.QubesManager') + except DBusException: + # ignore the case when no qubes-manager is running + pass + +def clear_error_qubes_manager(name, message): + global system_bus + if system_bus is None: + system_bus = dbus.SystemBus() + + try: + qubes_manager = system_bus.get_object('org.qubesos.QubesManager', + '/org/qubesos/QubesManager') + qubes_manager.clear_error_exact(name, message, dbus_interface='org.qubesos.QubesManager') + except DBusException: + # ignore the case when no qubes-manager is running + pass From b4070a99a3b792c0251865dd824c566e32b14623 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 5 Jul 2012 01:23:43 +0200 Subject: [PATCH 03/10] dom0/qmemman: check if donors have returned memory before distributing it to other VMs (#563) When some VM did't returned memory to Xen, mark this VM as suspicious and abort balance to always have some xen free_memory margin. VMs marked as suspicius will be evaluated before next balance and still didn't returned memory, will be skipped in balance process. --- dom0/qmemman/qmemman.py | 49 ++++++++++++++++++++++++++++++++++++ dom0/qmemman/qmemman_algo.py | 4 +++ 2 files changed, 53 insertions(+) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index 09b45af3..9a3c5c42 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -5,6 +5,9 @@ import time import qmemman_algo import os +no_progress_msg="VM refused to give back requested memory" +slow_memset_react_msg="VM didn't give back all requested memory" + class DomainState: def __init__(self, id): self.meminfo = None #dictionary of memory info read from client @@ -13,6 +16,8 @@ class DomainState: self.mem_used = None #used memory, computed based on meminfo self.id = id #domain id self.last_target = 0 #the last memset target + self.no_progress = False #no react to memset + self.slow_memset_react = False #slow react to memset (after few tries still above target) class SystemState: def __init__(self): @@ -54,6 +59,17 @@ class SystemState: # in fact, the only possible case of nonexisting memory/static-max is dom0 # see #307 + def clear_outdated_error_markers(self): + # Clear outdated errors + for i in self.domdict.keys(): + if self.domdict[i].slow_memset_react and \ + self.domdict[i].memory_actual <= self.domdict[i].last_target + self.XEN_FREE_MEM_LEFT/4: + self.domdict[i].slow_memset_react = False + + if self.domdict[i].no_progress and \ + self.domdict[i].memory_actual <= self.domdict[i].last_target + self.XEN_FREE_MEM_LEFT/4: + self.domdict[i].no_progress = False + #the below works (and is fast), but then 'xm list' shows unchanged memory value def mem_set(self, id, val): print 'mem-set domain', id, 'to', val @@ -156,6 +172,7 @@ class SystemState: if os.path.isfile('/var/run/qubes/do-not-membalance'): return self.refresh_memactual() + self.clear_outdated_error_markers() xenfree = self.get_free_xen_memory() memset_reqs = qmemman_algo.balance(xenfree - self.XEN_FREE_MEM_LEFT, self.domdict) if not self.is_balance_req_significant(memset_reqs, xenfree): @@ -163,8 +180,40 @@ class SystemState: self.print_stats(xenfree, memset_reqs) + prev_memactual = {} + for i in self.domdict.keys(): + prev_memactual[i] = self.domdict[i].memory_actual for rq in memset_reqs: dom, mem = rq + # Force to always have at least 0.9*self.XEN_FREE_MEM_LEFT (some + # margin for rounding errors). Before giving memory to + # domain, ensure that others have gived it back. + # If not - wait a little. + ntries = 5 + while self.get_free_xen_memory() - (mem - self.domdict[dom].memory_actual) < 0.9*self.XEN_FREE_MEM_LEFT: + time.sleep(self.BALOON_DELAY) + ntries -= 1 + if ntries <= 0: + # Waiting haven't helped; Find which domain get stuck and + # abort balance (after distributing what we have) + self.refresh_memactual() + for rq2 in memset_reqs: + dom2, mem2 = rq2 + if dom2 == dom: + # All donors have been procesed + break + # allow some small margin + if self.domdict[dom2].memory_actual > self.domdict[dom2].last_target + self.XEN_FREE_MEM_LEFT/4: + # VM didn't react to memory request at all, remove from donors + if prev_memactual[dom2] == self.domdict[dom2].memory_actual: + print 'dom %s didnt react to memory request (holds %d, requested balloon down to %d)' % (dom2, self.domdict[dom2].memory_actual, mem2) + self.domdict[dom2].no_progress = True + else: + print 'dom %s still hold more memory than have assigned (%d > %d)' % (dom2, self.domdict[dom2].memory_actual, mem2) + self.domdict[dom2].slow_memset_react = True + self.mem_set(dom, self.get_free_xen_memory() + self.domdict[dom].memory_actual - self.XEN_FREE_MEM_LEFT) + return + self.mem_set(dom, mem) # for i in self.domdict.keys(): diff --git a/dom0/qmemman/qmemman_algo.py b/dom0/qmemman/qmemman_algo.py index b61d9d17..3f807c53 100755 --- a/dom0/qmemman/qmemman_algo.py +++ b/dom0/qmemman/qmemman_algo.py @@ -117,6 +117,8 @@ def balance_when_enough_memory(domain_dictionary, xen_free_memory, total_mem_pre for i in domain_dictionary.keys(): if domain_dictionary[i].meminfo is None: continue + if domain_dictionary[i].no_progress: + continue #distribute total_available_memory proportionally to mempref scale = 1.0*prefmem(domain_dictionary[i])/total_mem_pref target_nonint = prefmem(domain_dictionary[i]) + scale*total_available_memory @@ -212,6 +214,8 @@ def balance(xen_free_memory, domain_dictionary): for i in domain_dictionary.keys(): if domain_dictionary[i].meminfo is None: continue + if domain_dictionary[i].no_progress: + continue need = memory_needed(domain_dictionary[i]) # print 'domain' , i, 'act/pref', domain_dictionary[i].memory_actual, prefmem(domain_dictionary[i]), 'need=', need if need < 0 or domain_dictionary[i].memory_actual >= domain_dictionary[i].memory_maximum: From e70b690150b655cfd90b556ba7074dcaab4388e1 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 5 Jul 2012 01:27:36 +0200 Subject: [PATCH 04/10] dom0/qmemman: notify qubes-manager about misbehaving VMs (#615) --- dom0/qmemman/qmemman.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index 9a3c5c42..7b6f687e 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -4,6 +4,7 @@ import string import time import qmemman_algo import os +from guihelpers import notify_error_qubes_manager, clear_error_qubes_manager no_progress_msg="VM refused to give back requested memory" slow_memset_react_msg="VM didn't give back all requested memory" @@ -64,10 +65,14 @@ class SystemState: for i in self.domdict.keys(): if self.domdict[i].slow_memset_react and \ self.domdict[i].memory_actual <= self.domdict[i].last_target + self.XEN_FREE_MEM_LEFT/4: + dom_name = self.xs.read('', '/local/domain/%s/name' % str(i)) + clear_error_qubes_manager(dom_name, slow_memset_react_msg) self.domdict[i].slow_memset_react = False if self.domdict[i].no_progress and \ self.domdict[i].memory_actual <= self.domdict[i].last_target + self.XEN_FREE_MEM_LEFT/4: + dom_name = self.xs.read('', '/local/domain/%s/name' % str(i)) + clear_error_qubes_manager(dom_name, no_progress_msg) self.domdict[i].no_progress = False #the below works (and is fast), but then 'xm list' shows unchanged memory value @@ -208,9 +213,13 @@ class SystemState: if prev_memactual[dom2] == self.domdict[dom2].memory_actual: print 'dom %s didnt react to memory request (holds %d, requested balloon down to %d)' % (dom2, self.domdict[dom2].memory_actual, mem2) self.domdict[dom2].no_progress = True + dom_name = self.xs.read('', '/local/domain/%s/name' % str(dom2)) + notify_error_qubes_manager(dom_name, no_progress_msg) else: print 'dom %s still hold more memory than have assigned (%d > %d)' % (dom2, self.domdict[dom2].memory_actual, mem2) self.domdict[dom2].slow_memset_react = True + dom_name = self.xs.read('', '/local/domain/%s/name' % str(dom2)) + notify_error_qubes_manager(dom_name, slow_memset_react_msg) self.mem_set(dom, self.get_free_xen_memory() + self.domdict[dom].memory_actual - self.XEN_FREE_MEM_LEFT) return From 64369bf02aadccc26cd163c9fe5f831761cf5f86 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 5 Jul 2012 01:30:00 +0200 Subject: [PATCH 05/10] dom0/qvm-run: Notify qubes-manager about failed VM start (#615) --- dom0/qvm-tools/qvm-run | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dom0/qvm-tools/qvm-run b/dom0/qvm-tools/qvm-run index d83236cd..8292e0cd 100755 --- a/dom0/qvm-tools/qvm-run +++ b/dom0/qvm-tools/qvm-run @@ -23,6 +23,7 @@ from qubes.qubes import QubesVmCollection from qubes.qubes import QubesException +from qubes.guihelpers import notify_error_qubes_manager from optparse import OptionParser import subprocess import socket @@ -89,6 +90,7 @@ def vm_run_cmd(vm, cmd, options): except QubesException as err: if options.tray: tray_notify_error(str(err)) + notify_error_qubes_manager(vm.name, str(err)) print >> sys.stderr, "ERROR: %s" % str(err) exit(1) From 9a1a9c8b1f22c5b0be2bb5bbafe8d487d9b9fd72 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 5 Jul 2012 01:30:47 +0200 Subject: [PATCH 06/10] vm/qubes-update-proxy: update URL whitelist --- network/filter-qubes-yum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/filter-qubes-yum b/network/filter-qubes-yum index b244f3cf..ee5c777f 100644 --- a/network/filter-qubes-yum +++ b/network/filter-qubes-yum @@ -1,4 +1,4 @@ -.*/repodata/[A-Za-z0-9-]*\(primary\|filelist\|comps\(-[a-z0-9]*\)\?\|other\|prestodelta\)\.\(sqlite\|xml\)\(\.bz2\|\.gz\)\?$ +.*/repodata/[A-Za-z0-9-]*\(primary\|filelist\|comps\(-[a-z0-9]*\)\?\|other\|prestodelta\|updateinfo\)\.\(sqlite\|xml\)\(\.bz2\|\.gz\)\?$ .*/repodata/repomd\.xml$ .*\.rpm$ .*\.drpm$ From b834e2c5a7ff47d6bc2c51fa90202ad9e2f80d88 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 5 Jul 2012 01:31:32 +0200 Subject: [PATCH 07/10] vm/spec: disable pam_systemd globally (#607) Actually all /etc/pam.d/ files containing pam_systemd.so are autogenerated by authconfig, so "removing" pam_systemd.so file as not elegant solution, seems to be much more realiable. --- rpm_spec/core-vm.spec | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/rpm_spec/core-vm.spec b/rpm_spec/core-vm.spec index 3cb6a6b2..71316f76 100644 --- a/rpm_spec/core-vm.spec +++ b/rpm_spec/core-vm.spec @@ -179,6 +179,9 @@ install -D u2mfn/libu2mfn.so $RPM_BUILD_ROOT/%{_libdir}/libu2mfn.so %triggerin -- initscripts cp /usr/lib/qubes/serial.conf /etc/init/serial.conf +%triggerin -- systemd +mv -f /%{_lib}/security/pam_systemd.so /%{_lib}/security/pam_systemd.so.disabled + %post # disable some Upstart services @@ -246,6 +249,12 @@ if ! [ -e /lib/firmware/updates ]; then ln -s /lib/modules/firmware /lib/firmware/updates fi +# Disable pam_systemd - we (hopefully) don't need it, but it cause some minor +# problems (http://wiki.qubes-os.org/trac/ticket/607) +# /etc/pam.d/common-* are automatically (re)generated by authconfig, so its +# modification will not be persistent -> must be done this way +mv -f /%{_lib}/security/pam_systemd.so /%{_lib}/security/pam_systemd.so.disabled + if ! grep -q '/etc/yum\.conf\.d/qubes-proxy\.conf'; then echo >> /etc/yum.conf echo '# Yum does not support inclusion of config dir...' >> /etc/yum.conf @@ -320,6 +329,7 @@ if [ "$1" = 0 ] ; then mv /var/lib/qubes/fstab.orig /etc/fstab mv /var/lib/qubes/removed-udev-scripts/* /etc/udev/rules.d/ mv /var/lib/qubes/serial.orig /etc/init/serial.conf + mv /%{_lib}/security/pam_systemd.so.disabled /%{_lib}/security/pam_systemd.so fi %postun From c4888add664b866c62713fea9bf1ce1a1721889e Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 5 Jul 2012 01:33:22 +0200 Subject: [PATCH 08/10] vm: disable D-Bus activation of NetworkManager (#610) --- rpm_spec/core-vm.spec | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/rpm_spec/core-vm.spec b/rpm_spec/core-vm.spec index 71316f76..de058384 100644 --- a/rpm_spec/core-vm.spec +++ b/rpm_spec/core-vm.spec @@ -559,6 +559,10 @@ done /bin/systemctl enable qubes-update-check.timer 2> /dev/null +# Disable D-BUS activation of NetworkManager - in AppVm it causes problems (eg PackageKit timeouts) +/bin/systemctl disable NetworkManager.service 2> /dev/null +/bin/systemctl mask dbus-org.freedesktop.NetworkManager.service 2> /dev/null + # Install overriden services only when original exists for srv in cups NetworkManager ntpd; do if [ -f /lib/systemd/system/$srv.service ]; then From 8b2be6b6934d263175a148d9ebf01e18b0bcea10 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Thu, 5 Jul 2012 01:40:38 +0200 Subject: [PATCH 09/10] dom0/spec: remove some udev rules from system (#605) --- rpm_spec/core-dom0.spec | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 918164f9..95aa56d2 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -265,6 +265,10 @@ if [ "x"$HAD_SYSCONFIG_NETWORK = "xno" ]; then rm -f /etc/sysconfig/network fi +# Remove unnecessary udev rules that causes problems in dom0 (#605) +mkdir -p /var/lib/qubes/removed-udev-scripts +mv -f /lib/udev/rules.d/69-xorg-vmmouse.rules /var/lib/qubes/removed-udev-scripts/ + %clean rm -rf $RPM_BUILD_ROOT @@ -287,6 +291,8 @@ fi %triggerin -- xen-runtime sed -i 's/\/block /\/block.qubes /' /etc/udev/rules.d/xen-backend.rules +%triggerin -- xorg-x11-drv-vmmouse +mv -f /lib/udev/rules.d/69-xorg-vmmouse.rules /var/lib/qubes/removed-udev-scripts/ %preun if [ "$1" = 0 ] ; then From ee3cf6b583368308c052ba14443ccf1ac23ca500 Mon Sep 17 00:00:00 2001 From: Marek Marczykowski Date: Sat, 30 Jun 2012 00:58:06 +0200 Subject: [PATCH 10/10] dom0/qvm-block: fix error handling (#614) --- dom0/qvm-core/qubesutils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/qvm-core/qubesutils.py b/dom0/qvm-core/qubesutils.py index 83bdc407..b29559ba 100644 --- a/dom0/qvm-core/qubesutils.py +++ b/dom0/qvm-core/qubesutils.py @@ -341,7 +341,7 @@ def block_attach(vm, backend_vm, device, frontend=None, mode="w", auto_detach=Fa return elif int(be_state) > 4: # Error - error = xs.read('/local/domain/%d/error/backend/vbd/%d/%d/error' % (backend_vm.xid, vm.xid, block_name_to_devid(frontend))) + error = xs.read('', '/local/domain/%d/error/backend/vbd/%d/%d/error' % (backend_vm.xid, vm.xid, block_name_to_devid(frontend))) if error is None: raise QubesException("Error while connecting block device: " + error) else: