diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index cf6325a7..7b6f687e 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -4,6 +4,10 @@ import string import time import qmemman_algo import os +from guihelpers import notify_error_qubes_manager, clear_error_qubes_manager + +no_progress_msg="VM refused to give back requested memory" +slow_memset_react_msg="VM didn't give back all requested memory" class DomainState: def __init__(self, id): @@ -13,6 +17,8 @@ class DomainState: self.mem_used = None #used memory, computed based on meminfo self.id = id #domain id self.last_target = 0 #the last memset target + self.no_progress = False #no react to memset + self.slow_memset_react = False #slow react to memset (after few tries still above target) class SystemState: def __init__(self): @@ -54,19 +60,34 @@ class SystemState: # in fact, the only possible case of nonexisting memory/static-max is dom0 # see #307 + def clear_outdated_error_markers(self): + # Clear outdated errors + for i in self.domdict.keys(): + if self.domdict[i].slow_memset_react and \ + self.domdict[i].memory_actual <= self.domdict[i].last_target + self.XEN_FREE_MEM_LEFT/4: + dom_name = self.xs.read('', '/local/domain/%s/name' % str(i)) + clear_error_qubes_manager(dom_name, slow_memset_react_msg) + self.domdict[i].slow_memset_react = False + + if self.domdict[i].no_progress and \ + self.domdict[i].memory_actual <= self.domdict[i].last_target + self.XEN_FREE_MEM_LEFT/4: + dom_name = self.xs.read('', '/local/domain/%s/name' % str(i)) + clear_error_qubes_manager(dom_name, no_progress_msg) + self.domdict[i].no_progress = False + #the below works (and is fast), but then 'xm list' shows unchanged memory value def mem_set(self, id, val): print 'mem-set domain', id, 'to', val self.domdict[id].last_target = val - self.xs.write('', '/local/domain/' + id + '/memory/target', str(val/1024)) #can happen in the middle of domain shutdown #apparently xc.lowlevel throws exceptions too try: - self.xc.domain_setmaxmem(int(id), val/1024 + 1024) # LIBXL_MAXMEM_CONSTANT=1024 - self.xc.domain_set_target_mem(int(id), val/1024) + self.xc.domain_setmaxmem(int(id), int(val/1024) + 1024) # LIBXL_MAXMEM_CONSTANT=1024 + self.xc.domain_set_target_mem(int(id), int(val/1024)) except: pass - + self.xs.write('', '/local/domain/' + id + '/memory/target', str(int(val/1024))) + def mem_set_obsolete(self, id, val): uuid = self.domdict[id].uuid if val >= 2**31: @@ -156,6 +177,7 @@ class SystemState: if os.path.isfile('/var/run/qubes/do-not-membalance'): return self.refresh_memactual() + self.clear_outdated_error_markers() xenfree = self.get_free_xen_memory() memset_reqs = qmemman_algo.balance(xenfree - self.XEN_FREE_MEM_LEFT, self.domdict) if not self.is_balance_req_significant(memset_reqs, xenfree): @@ -163,8 +185,44 @@ class SystemState: self.print_stats(xenfree, memset_reqs) + prev_memactual = {} + for i in self.domdict.keys(): + prev_memactual[i] = self.domdict[i].memory_actual for rq in memset_reqs: dom, mem = rq + # Force to always have at least 0.9*self.XEN_FREE_MEM_LEFT (some + # margin for rounding errors). Before giving memory to + # domain, ensure that others have gived it back. + # If not - wait a little. + ntries = 5 + while self.get_free_xen_memory() - (mem - self.domdict[dom].memory_actual) < 0.9*self.XEN_FREE_MEM_LEFT: + time.sleep(self.BALOON_DELAY) + ntries -= 1 + if ntries <= 0: + # Waiting haven't helped; Find which domain get stuck and + # abort balance (after distributing what we have) + self.refresh_memactual() + for rq2 in memset_reqs: + dom2, mem2 = rq2 + if dom2 == dom: + # All donors have been procesed + break + # allow some small margin + if self.domdict[dom2].memory_actual > self.domdict[dom2].last_target + self.XEN_FREE_MEM_LEFT/4: + # VM didn't react to memory request at all, remove from donors + if prev_memactual[dom2] == self.domdict[dom2].memory_actual: + print 'dom %s didnt react to memory request (holds %d, requested balloon down to %d)' % (dom2, self.domdict[dom2].memory_actual, mem2) + self.domdict[dom2].no_progress = True + dom_name = self.xs.read('', '/local/domain/%s/name' % str(dom2)) + notify_error_qubes_manager(dom_name, no_progress_msg) + else: + print 'dom %s still hold more memory than have assigned (%d > %d)' % (dom2, self.domdict[dom2].memory_actual, mem2) + self.domdict[dom2].slow_memset_react = True + dom_name = self.xs.read('', '/local/domain/%s/name' % str(dom2)) + notify_error_qubes_manager(dom_name, slow_memset_react_msg) + self.mem_set(dom, self.get_free_xen_memory() + self.domdict[dom].memory_actual - self.XEN_FREE_MEM_LEFT) + return + self.mem_set(dom, mem) # for i in self.domdict.keys(): diff --git a/dom0/qmemman/qmemman_algo.py b/dom0/qmemman/qmemman_algo.py index b61d9d17..3f807c53 100755 --- a/dom0/qmemman/qmemman_algo.py +++ b/dom0/qmemman/qmemman_algo.py @@ -117,6 +117,8 @@ def balance_when_enough_memory(domain_dictionary, xen_free_memory, total_mem_pre for i in domain_dictionary.keys(): if domain_dictionary[i].meminfo is None: continue + if domain_dictionary[i].no_progress: + continue #distribute total_available_memory proportionally to mempref scale = 1.0*prefmem(domain_dictionary[i])/total_mem_pref target_nonint = prefmem(domain_dictionary[i]) + scale*total_available_memory @@ -212,6 +214,8 @@ def balance(xen_free_memory, domain_dictionary): for i in domain_dictionary.keys(): if domain_dictionary[i].meminfo is None: continue + if domain_dictionary[i].no_progress: + continue need = memory_needed(domain_dictionary[i]) # print 'domain' , i, 'act/pref', domain_dictionary[i].memory_actual, prefmem(domain_dictionary[i]), 'need=', need if need < 0 or domain_dictionary[i].memory_actual >= domain_dictionary[i].memory_maximum: diff --git a/dom0/qvm-core/guihelpers.py b/dom0/qvm-core/guihelpers.py index dbda6022..f4c8ff3d 100644 --- a/dom0/qvm-core/guihelpers.py +++ b/dom0/qvm-core/guihelpers.py @@ -24,8 +24,11 @@ import sys from optparse import OptionParser from PyQt4.QtCore import * from PyQt4.QtGui import * +import dbus +from dbus import DBusException app = None +system_bus = None def prepare_app(): global app @@ -53,3 +56,29 @@ def ask(text, title="Question", yestoall=False): else: #?! return 127 + +def notify_error_qubes_manager(name, message): + global system_bus + if system_bus is None: + system_bus = dbus.SystemBus() + + try: + qubes_manager = system_bus.get_object('org.qubesos.QubesManager', + '/org/qubesos/QubesManager') + qubes_manager.notify_error(name, message, dbus_interface='org.qubesos.QubesManager') + except DBusException: + # ignore the case when no qubes-manager is running + pass + +def clear_error_qubes_manager(name, message): + global system_bus + if system_bus is None: + system_bus = dbus.SystemBus() + + try: + qubes_manager = system_bus.get_object('org.qubesos.QubesManager', + '/org/qubesos/QubesManager') + qubes_manager.clear_error_exact(name, message, dbus_interface='org.qubesos.QubesManager') + except DBusException: + # ignore the case when no qubes-manager is running + pass diff --git a/dom0/qvm-core/qubesutils.py b/dom0/qvm-core/qubesutils.py index 83bdc407..b29559ba 100644 --- a/dom0/qvm-core/qubesutils.py +++ b/dom0/qvm-core/qubesutils.py @@ -341,7 +341,7 @@ def block_attach(vm, backend_vm, device, frontend=None, mode="w", auto_detach=Fa return elif int(be_state) > 4: # Error - error = xs.read('/local/domain/%d/error/backend/vbd/%d/%d/error' % (backend_vm.xid, vm.xid, block_name_to_devid(frontend))) + error = xs.read('', '/local/domain/%d/error/backend/vbd/%d/%d/error' % (backend_vm.xid, vm.xid, block_name_to_devid(frontend))) if error is None: raise QubesException("Error while connecting block device: " + error) else: diff --git a/dom0/qvm-tools/qvm-run b/dom0/qvm-tools/qvm-run index d83236cd..8292e0cd 100755 --- a/dom0/qvm-tools/qvm-run +++ b/dom0/qvm-tools/qvm-run @@ -23,6 +23,7 @@ from qubes.qubes import QubesVmCollection from qubes.qubes import QubesException +from qubes.guihelpers import notify_error_qubes_manager from optparse import OptionParser import subprocess import socket @@ -89,6 +90,7 @@ def vm_run_cmd(vm, cmd, options): except QubesException as err: if options.tray: tray_notify_error(str(err)) + notify_error_qubes_manager(vm.name, str(err)) print >> sys.stderr, "ERROR: %s" % str(err) exit(1) diff --git a/network/filter-qubes-yum b/network/filter-qubes-yum index b244f3cf..ee5c777f 100644 --- a/network/filter-qubes-yum +++ b/network/filter-qubes-yum @@ -1,4 +1,4 @@ -.*/repodata/[A-Za-z0-9-]*\(primary\|filelist\|comps\(-[a-z0-9]*\)\?\|other\|prestodelta\)\.\(sqlite\|xml\)\(\.bz2\|\.gz\)\?$ +.*/repodata/[A-Za-z0-9-]*\(primary\|filelist\|comps\(-[a-z0-9]*\)\?\|other\|prestodelta\|updateinfo\)\.\(sqlite\|xml\)\(\.bz2\|\.gz\)\?$ .*/repodata/repomd\.xml$ .*\.rpm$ .*\.drpm$ diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 918164f9..95aa56d2 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -265,6 +265,10 @@ if [ "x"$HAD_SYSCONFIG_NETWORK = "xno" ]; then rm -f /etc/sysconfig/network fi +# Remove unnecessary udev rules that causes problems in dom0 (#605) +mkdir -p /var/lib/qubes/removed-udev-scripts +mv -f /lib/udev/rules.d/69-xorg-vmmouse.rules /var/lib/qubes/removed-udev-scripts/ + %clean rm -rf $RPM_BUILD_ROOT @@ -287,6 +291,8 @@ fi %triggerin -- xen-runtime sed -i 's/\/block /\/block.qubes /' /etc/udev/rules.d/xen-backend.rules +%triggerin -- xorg-x11-drv-vmmouse +mv -f /lib/udev/rules.d/69-xorg-vmmouse.rules /var/lib/qubes/removed-udev-scripts/ %preun if [ "$1" = 0 ] ; then diff --git a/rpm_spec/core-vm.spec b/rpm_spec/core-vm.spec index 3cb6a6b2..de058384 100644 --- a/rpm_spec/core-vm.spec +++ b/rpm_spec/core-vm.spec @@ -179,6 +179,9 @@ install -D u2mfn/libu2mfn.so $RPM_BUILD_ROOT/%{_libdir}/libu2mfn.so %triggerin -- initscripts cp /usr/lib/qubes/serial.conf /etc/init/serial.conf +%triggerin -- systemd +mv -f /%{_lib}/security/pam_systemd.so /%{_lib}/security/pam_systemd.so.disabled + %post # disable some Upstart services @@ -246,6 +249,12 @@ if ! [ -e /lib/firmware/updates ]; then ln -s /lib/modules/firmware /lib/firmware/updates fi +# Disable pam_systemd - we (hopefully) don't need it, but it cause some minor +# problems (http://wiki.qubes-os.org/trac/ticket/607) +# /etc/pam.d/common-* are automatically (re)generated by authconfig, so its +# modification will not be persistent -> must be done this way +mv -f /%{_lib}/security/pam_systemd.so /%{_lib}/security/pam_systemd.so.disabled + if ! grep -q '/etc/yum\.conf\.d/qubes-proxy\.conf'; then echo >> /etc/yum.conf echo '# Yum does not support inclusion of config dir...' >> /etc/yum.conf @@ -320,6 +329,7 @@ if [ "$1" = 0 ] ; then mv /var/lib/qubes/fstab.orig /etc/fstab mv /var/lib/qubes/removed-udev-scripts/* /etc/udev/rules.d/ mv /var/lib/qubes/serial.orig /etc/init/serial.conf + mv /%{_lib}/security/pam_systemd.so.disabled /%{_lib}/security/pam_systemd.so fi %postun @@ -549,6 +559,10 @@ done /bin/systemctl enable qubes-update-check.timer 2> /dev/null +# Disable D-BUS activation of NetworkManager - in AppVm it causes problems (eg PackageKit timeouts) +/bin/systemctl disable NetworkManager.service 2> /dev/null +/bin/systemctl mask dbus-org.freedesktop.NetworkManager.service 2> /dev/null + # Install overriden services only when original exists for srv in cups NetworkManager ntpd; do if [ -f /lib/systemd/system/$srv.service ]; then