From 62487c0f1e575434ba768508cfc943b3c275d987 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Mon, 30 Aug 2010 11:40:19 +0200 Subject: [PATCH 01/24] Memory management across VMs, first release --- appvm/qubes_core | 1 + common/meminfo-writer | 4 + dom0/init.d/qubes_core | 2 + dom0/pendrive_swapper/qfilexchgd | 15 +-- dom0/qmemman/qmemman.py | 185 +++++++++++++++++++++++++++++++ dom0/qmemman/qmemman_algo.py | 101 +++++++++++++++++ dom0/qmemman/qmemman_client.py | 16 +++ dom0/qmemman/qmemman_server.py | 123 ++++++++++++++++++++ dom0/qmemman/server.py | 4 + dom0/qvm-core/qubes.py | 29 +++-- rpm_spec/core-appvm.spec | 2 + rpm_spec/core-dom0.spec | 11 +- 12 files changed, 475 insertions(+), 18 deletions(-) create mode 100755 common/meminfo-writer create mode 100755 dom0/qmemman/qmemman.py create mode 100755 dom0/qmemman/qmemman_algo.py create mode 100755 dom0/qmemman/qmemman_client.py create mode 100755 dom0/qmemman/qmemman_server.py create mode 100755 dom0/qmemman/server.py diff --git a/appvm/qubes_core b/appvm/qubes_core index c8dd1509..c291ec78 100755 --- a/appvm/qubes_core +++ b/appvm/qubes_core @@ -87,6 +87,7 @@ start() fi fi + /usr/lib/qubes/meminfo-writer & [ -x /rw/config/rc.local ] && /rw/config/rc.local success echo "" diff --git a/common/meminfo-writer b/common/meminfo-writer new file mode 100755 index 00000000..fdbfd29f --- /dev/null +++ b/common/meminfo-writer @@ -0,0 +1,4 @@ +#!/bin/sh +while sleep 1 ; do + xenstore-write memory/meminfo "`cat /proc/meminfo`" +done diff --git a/dom0/init.d/qubes_core b/dom0/init.d/qubes_core index 61452d76..e4bb5c3e 100755 --- a/dom0/init.d/qubes_core +++ b/dom0/init.d/qubes_core @@ -56,6 +56,8 @@ start() xm mem-set 0 1600 cp /var/lib/qubes/qubes.xml /var/lib/qubes/backup/qubes-$(date +%F-%T).xml setup_dvm_files + /usr/lib/qubes/qmemman_daemon.py >/var/log/qubes/qmemman.log 2>/var/log/qubes/qmemman.errs & + /usr/lib/qubes/meminfo-writer & touch /var/lock/subsys/qubes_core success echo diff --git a/dom0/pendrive_swapper/qfilexchgd b/dom0/pendrive_swapper/qfilexchgd index f08a235b..3773d0d2 100755 --- a/dom0/pendrive_swapper/qfilexchgd +++ b/dom0/pendrive_swapper/qfilexchgd @@ -30,6 +30,7 @@ import time from qubes.qubes import QubesVmCollection from qubes.qubes import QubesException from qubes.qubes import QubesDaemonPidfile +from qubes.qmemman_client import QMemmanClient filename_seq = 50 pen_cmd = '/usr/lib/qubes/qubes_pencmd' @@ -187,13 +188,11 @@ class DomainState: def handle_transfer_disposable(self, transaction_seq): - mem_for_dvm = 400 - xenfreepages_s = subprocess.Popen(["/usr/lib/qubes/xenfreepages"],stdout=subprocess.PIPE).stdout.readline() - xenfree_mb = int(xenfreepages_s)*4096/1024/1024 - if xenfree_mb < mem_for_dvm: - errmsg = 'Not enough memory to create DVM: ' - errmsg +='have ' + str(xenfree_mb) + 'MB, need ' - errmsg +=str(mem_for_dvm) + 'MB. Terminate some appVM and retry.' + qmemman_client = QMemmanClient() + if not qmemman_client.request_memory(400*1024*1024): + qmemman_client.close() + errmsg = 'Not enough memory to create DVM. ' + errmsg +='Terminate some appVM and retry.' subprocess.call(['/usr/bin/kdialog', '--sorry', errmsg]) return False @@ -205,12 +204,14 @@ class DomainState: if vm is None: logproc( 'Domain ' + vmname + ' does not exist ?') qvm_collection.unlock_db() + qmemman_client.close() return False retcode = subprocess.call(['/usr/lib/qubes/qubes_restore', current_savefile, '-c', vm.label.color, '-i', vm.label.icon, '-l', str(vm.label.index)]) + qmemman_client.close() if retcode != 0: subprocess.call(['/usr/bin/kdialog', '--sorry', 'DisposableVM creation failed, see qubes_restore.log']) qvm_collection.unlock_db() diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py new file mode 100755 index 00000000..55e75e73 --- /dev/null +++ b/dom0/qmemman/qmemman.py @@ -0,0 +1,185 @@ +import xmlrpclib +from xen.xm import XenAPI +import xen.lowlevel.xc +import string +import time +import qmemman_algo +import os + +class XendSession(object): + def __init__(self): +# self.get_xend_session_old_api() + self.get_xend_session_new_api() + +# def get_xend_session_old_api(self): +# from xen.xend import XendClient +# from xen.util.xmlrpcclient import ServerProxy +# self.xend_server = ServerProxy(XendClient.uri) +# if self.xend_server is None: +# print "get_xend_session_old_api(): cannot open session!" + + + def get_xend_session_new_api(self): + xend_socket_uri = "httpu:///var/run/xend/xen-api.sock" + self.session = XenAPI.Session (xend_socket_uri) + self.session.login_with_password ("", "") + if self.session is None: + print "get_xend_session_new_api(): cannot open session!" + +class DomainState: + def __init__(self, id): + self.meminfo = None + self.memory_actual = None + self.mem_used = None + self.uuid = None + self.id = id + self.meminfo_updated = False + +class SystemState: + def __init__(self): + self.xend_session = XendSession() + self.domdict = {} + self.xc = xen.lowlevel.xc.xc() + self.BALOON_DELAY = 0.1 + + def add_domain(self, id): + self.domdict[id] = DomainState(id) + + def del_domain(self, id): + self.domdict.pop(id) + + def get_free_xen_memory(self): + return self.xc.physinfo()['free_memory']*1024 +# hosts = self.xend_session.session.xenapi.host.get_all() +# host_record = self.xend_session.session.xenapi.host.get_record(hosts[0]) +# host_metrics_record = self.xend_session.session.xenapi.host_metrics.get_record(host_record["metrics"]) +# ret = host_metrics_record["memory_free"] +# return long(ret) + + def refresh_memactual(self): + update_uuid_info = False + for domain in self.xc.domain_getinfo(): + id = str(domain['domid']) + if self.domdict.has_key(id): + self.domdict[id].memory_actual = domain['mem_kb']*1024 + if self.domdict[id].uuid is None: + update_uuid_info = True + if not update_uuid_info: + return + dom_recs = self.xend_session.session.xenapi.VM.get_all_records() +# dom_metrics_recs = self.xend_session.session.xenapi.VM_metrics.get_all_records() + for dom_ref, dom_rec in dom_recs.items(): +# dom_metrics_rec = dom_metrics_recs[dom_rec['metrics']] + id = dom_rec['domid'] +# mem = int(dom_metrics_rec['memory_actual'])/1024 + if (self.domdict.has_key(id)): +# self.domdict[id].memory_actual = mem + self.domdict[id].uuid = dom_rec['uuid'] + + def parse_meminfo(self, meminfo): + dict = {} + l1 = string.split(meminfo,"\n") + for i in l1: + l2 = string.split(i) + if len(l2) >= 2: + dict[string.rstrip(l2[0], ":")] = l2[1] + + try: + for i in ('MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree'): + val = int(dict[i])*1024 + if (val < 0): + return None + dict[i] = val + except: + return None + + if dict['SwapTotal'] < dict['SwapFree']: + return None + return dict + +#the below works (and is fast), but then 'xm list' shows unchanged memory value + def mem_set_alternative(self, id, val): + os.system('xenstore-write /local/domain/' + id + '/memory/target ' + str(val/1024)) + self.xc.domain_set_target_mem(int(id), val/1024) + + def mem_set(self, id, val): + uuid = self.domdict[id].uuid + print 'mem-set domain', id, 'to', val + self.xend_session.session.xenapi.VM.set_memory_dynamic_max_live(uuid, val) + self.xend_session.session.xenapi.VM.set_memory_dynamic_min_live(uuid, val) + + def do_balloon(self, memsize): + MAX_TRIES = 20 + niter = 0 + prev_memory_actual = None + for i in self.domdict.keys(): + self.domdict[i].no_progress = False + while True: + xenfree = self.get_free_xen_memory() + print 'got xenfree=', xenfree + if xenfree >= memsize: + return True + self.refresh_memactual() + if prev_memory_actual is not None: + for i in prev_memory_actual.keys(): + if prev_memory_actual[i] == self.domdict[i].memory_actual: + self.domdict[i].no_progress = True + print 'domain', i, 'stuck at', self.domdict[i].memory_actual + memset_reqs = qmemman_algo.balloon(memsize-xenfree, self.domdict) + print 'requests:', memset_reqs + if niter > MAX_TRIES or len(memset_reqs) == 0: + return False + prev_memory_actual = {} + for i in memset_reqs: + dom, mem = i + self.mem_set(dom, mem) + prev_memory_actual[dom] = self.domdict[dom].memory_actual + time.sleep(self.BALOON_DELAY) + niter = niter + 1 + + def refresh_meminfo(self, domid, val): + self.domdict[domid].meminfo = self.parse_meminfo(val) + self.domdict[domid].meminfo_updated = True + + def adjust_inflates_to_xenfree(self, reqs, idx): + i = idx + memory_needed = 0 + while i < len(reqs): + dom, mem = reqs[i] + memory_needed += mem - self.domdict[dom].memory_actual + i = i + 1 + scale = 1.0*self.get_free_xen_memory()/memory_needed + dom, mem = reqs[idx] + scaled_req = self.domdict[dom].memory_actual + scale*(mem - self.domdict[dom].memory_actual) + return int(scaled_req) + + def do_balance(self): + if os.path.isfile('/etc/do-not-membalance'): + return + self.refresh_memactual() + xenfree = self.get_free_xen_memory() + memset_reqs = qmemman_algo.balance(xenfree, self.domdict) + wait_before_first_inflate = False + i = 0 + while i < len(memset_reqs): + dom, mem = memset_reqs[i] + memory_change = mem - self.domdict[dom].memory_actual + if abs(memory_change) < 100*1024*1024: + i = i + 1 + continue + if memory_change < 0: + wait_before_first_inflate = True + else: + if wait_before_first_inflate: + time.sleep(self.BALOON_DELAY) + wait_before_first_inflate = False + #the following is called before _each_ inflate, to account for possibility that + #previously triggered memory release is in progress + mem = self.adjust_inflates_to_xenfree(memset_reqs, i) + self.mem_set(dom, mem) + i = i + 1 + +# for i in self.domdict.keys(): +# print 'domain ', i, ' meminfo=', self.domdict[i].meminfo, 'actual mem', self.domdict[i].memory_actual +# print 'domain ', i, 'actual mem', self.domdict[i].memory_actual +# print 'xen free mem', self.get_free_xen_memory() diff --git a/dom0/qmemman/qmemman_algo.py b/dom0/qmemman/qmemman_algo.py new file mode 100755 index 00000000..45cf6a59 --- /dev/null +++ b/dom0/qmemman/qmemman_algo.py @@ -0,0 +1,101 @@ +def is_suspicious(dom): + ret = False + if dom.meminfo['SwapTotal'] < dom.meminfo['SwapFree']: + ret = True + if dom.memory_actual < dom.meminfo['MemFree'] + dom.meminfo['Cached'] + dom.meminfo['Buffers']: + ret = True + if ret: + print 'suspicious meminfo for domain', dom.id, 'mem actual', dom.memory_actual, dom.meminfo + return ret + +def recalc_mem_used(domdict): + for domid in domdict.keys(): + dom = domdict[domid] + if dom.meminfo_updated: + dom.meminfo_updated = False + if is_suspicious(dom): + dom.meminfo = None + dom.mem_used = None + else: + dom.mem_used = dom.memory_actual - dom.meminfo['MemFree'] - dom.meminfo['Cached'] - dom.meminfo['Buffers'] + dom.meminfo['SwapTotal'] - dom.meminfo['SwapFree'] + +def prefmem(dom): + if dom.meminfo_updated: + raise AssertionError('meminfo_updated=True in prefmem') + CACHE_FACTOR = 1.3 +#dom0 is special, as it must have large cache, for vbds. Thus, give it a special boost + if dom.id == '0': + return dom.mem_used*CACHE_FACTOR + 350*1024*1024 + return dom.mem_used*CACHE_FACTOR + +def memneeded(dom): +#do not change +#in balance(), "distribute totalsum proportionally to mempref" relies on this exact formula + ret = prefmem(dom) - dom.memory_actual + return ret + + +def balloon(memsize, domdict): + REQ_SAFETY_NET_FACTOR = 1.05 + donors = list() + request = list() + available = 0 + recalc_mem_used(domdict) + for i in domdict.keys(): + if domdict[i].meminfo is None: + continue + if domdict[i].no_progress: + continue + need = memneeded(domdict[i]) + if need < 0: + print 'balloon: dom' , i, 'has actual memory', domdict[i].memory_actual + donors.append((i,-need)) + available-=need + print 'req=', memsize, 'avail=', available, 'donors', donors + if available Loading the VM (type = {0})...".format(self.type) - mem_required = self.get_mem_static_max() - dom0_mem = dom0_vm.get_mem() - dom0_mem_new = dom0_mem - mem_required + self.get_free_xen_memory() - if verbose: - print "--> AppVM required mem : {0}".format(mem_required) - print "--> Dom0 mem after launch : {0}".format(dom0_mem_new) - - if dom0_mem_new < dom0_min_memory: - raise MemoryError ("ERROR: starting this VM would cause Dom0 memory to go below {0}B".format(dom0_min_memory)) + mem_required = self.get_mem_dynamic_max() + qmemman_client = QMemmanClient() + if not qmemman_client.request_memory(mem_required): + qmemman_client.close() + raise MemoryError ("ERROR: insufficient memory to start this VM") try: xend_session.session.xenapi.VM.start (self.session_uuid, True) # Starting a VM paused @@ -490,6 +499,8 @@ class QubesVm(object): self.refresh_xend_session() xend_session.session.xenapi.VM.start (self.session_uuid, True) # Starting a VM paused + qmemman_client.close() # let qmemman_daemon resume balancing + xid = int (xend_session.session.xenapi.VM.get_domid (self.session_uuid)) if verbose: diff --git a/rpm_spec/core-appvm.spec b/rpm_spec/core-appvm.spec index a4444f0b..2949a059 100644 --- a/rpm_spec/core-appvm.spec +++ b/rpm_spec/core-appvm.spec @@ -65,6 +65,7 @@ cp qubes_timestamp qvm-copy-to-vm qvm-open-in-dvm $RPM_BUILD_ROOT/usr/bin mkdir -p $RPM_BUILD_ROOT/usr/lib/qubes cp qubes_add_pendrive_script qubes_penctl qvm-copy-to-vm.kde $RPM_BUILD_ROOT/usr/lib/qubes ln -s /usr/bin/qvm-open-in-dvm $RPM_BUILD_ROOT/usr/lib/qubes/qvm-dvm-transfer +cp ../common/meminfo-writer $RPM_BUILD_ROOT/usr/lib/qubes mkdir -p $RPM_BUILD_ROOT/%{kde_service_dir} cp qvm-copy.desktop qvm-dvm.desktop $RPM_BUILD_ROOT/%{kde_service_dir} mkdir -p $RPM_BUILD_ROOT/etc/udev/rules.d @@ -187,6 +188,7 @@ rm -rf $RPM_BUILD_ROOT /usr/lib/qubes/qvm-copy-to-vm.kde %attr(4755,root,root) /usr/bin/qvm-open-in-dvm /usr/lib/qubes/qvm-dvm-transfer +/usr/lib/qubes/meminfo-writer %{kde_service_dir}/qvm-copy.desktop %{kde_service_dir}/qvm-dvm.desktop %attr(4755,root,root) /usr/lib/qubes/qubes_penctl diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index 456d959c..f4af95d8 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -44,8 +44,8 @@ Requires: python, xen-runtime, pciutils, python-inotify, python-daemon, kernel-q The Qubes core files for installation on Dom0. %build -python -m compileall qvm-core -python -O -m compileall qvm-core +python -m compileall qvm-core qmemman +python -O -m compileall qvm-core qmemman make -C restore %install @@ -67,6 +67,8 @@ cp qvm-core/qubes.py $RPM_BUILD_ROOT%{python_sitearch}/qubes cp qvm-core/qubes.py[co] $RPM_BUILD_ROOT%{python_sitearch}/qubes cp qvm-core/__init__.py $RPM_BUILD_ROOT%{python_sitearch}/qubes cp qvm-core/__init__.py[co] $RPM_BUILD_ROOT%{python_sitearch}/qubes +cp qmemman/qmemman*py $RPM_BUILD_ROOT%{python_sitearch}/qubes +cp qmemman/qmemman*py[co] $RPM_BUILD_ROOT%{python_sitearch}/qubes mkdir -p $RPM_BUILD_ROOT/usr/lib/qubes cp aux-tools/patch_appvm_initramfs.sh $RPM_BUILD_ROOT/usr/lib/qubes @@ -77,6 +79,8 @@ cp aux-tools/convert_dirtemplate2vm.sh $RPM_BUILD_ROOT/usr/lib/qubes cp aux-tools/create_apps_for_appvm.sh $RPM_BUILD_ROOT/usr/lib/qubes cp aux-tools/remove_appvm_appmenus.sh $RPM_BUILD_ROOT/usr/lib/qubes cp pendrive_swapper/qubes_pencmd $RPM_BUILD_ROOT/usr/lib/qubes +cp qmemman/server.py $RPM_BUILD_ROOT/usr/lib/qubes/qmemman_daemon.py +cp ../common/meminfo-writer $RPM_BUILD_ROOT/usr/lib/qubes/ cp restore/xenstore-watch restore/qvm-create-default-dvm $RPM_BUILD_ROOT/usr/bin cp restore/qubes_restore restore/xenfreepages $RPM_BUILD_ROOT/usr/lib/qubes @@ -195,6 +199,7 @@ fi %{python_sitearch}/qubes/__init__.py %{python_sitearch}/qubes/__init__.pyc %{python_sitearch}/qubes/__init__.pyo +%{python_sitearch}/qubes/qmemman*.py* /usr/lib/qubes/patch_appvm_initramfs.sh /usr/lib/qubes/unbind_pci_device.sh /usr/lib/qubes/unbind_all_network_devices @@ -203,6 +208,8 @@ fi /usr/lib/qubes/create_apps_for_appvm.sh /usr/lib/qubes/remove_appvm_appmenus.sh /usr/lib/qubes/qubes_pencmd +/usr/lib/qubes/qmemman_daemon.py* +/usr/lib/qubes/meminfo-writer %attr(770,root,qubes) %dir /var/lib/qubes %attr(770,root,qubes) %dir /var/lib/qubes/vm-templates %attr(770,root,qubes) %dir /var/lib/qubes/appvms From f3561b7aad137ef10c8c045db1b48fefb8ece2be Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Mon, 30 Aug 2010 11:43:30 +0200 Subject: [PATCH 02/24] Fix restore completion detection in appvm/qubes_core --- appvm/qubes_core | 7 +------ dom0/restore/qubes_restore.c | 1 + 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/appvm/qubes_core b/appvm/qubes_core index c291ec78..1b7aa3f9 100755 --- a/appvm/qubes_core +++ b/appvm/qubes_core @@ -35,13 +35,8 @@ start() (read a b c d ; xenstore-write device/qubes_used_mem $c) # we're still running in DispVM template echo "Waiting for save/restore..." - # WARNING: Nergalism! - # Apparently it has been determined that DomU kernel - # dmesg's "using vcpu" after restore - while ! dmesg -c | grep "using vcpu" ; do usleep 10 ; done - # we're now after restore in a new instance of a DispVM # ... wait until qubes_restore.c (in Dom0) recreates VM-specific keys - while ! xenstore-read qubes_vm_type 2>/dev/null ; do + while ! xenstore-read qubes_restore_complete 2>/dev/null ; do usleep 10 done echo Back to life. diff --git a/dom0/restore/qubes_restore.c b/dom0/restore/qubes_restore.c index 9ab3550e..0374eff3 100644 --- a/dom0/restore/qubes_restore.c +++ b/dom0/restore/qubes_restore.c @@ -336,6 +336,7 @@ void setup_xenstore(int domid, char *name) snprintf(val, sizeof(val), "10.%s.255.254", netvm_id); write_xs_single(xs, domid, "qubes_secondary_dns", val); write_xs_single(xs, domid, "qubes_vm_type", "AppVM"); + write_xs_single(xs, domid, "qubes_restore_complete", "True"); xs_daemon_close(xs); } From 2eba4c1c15e81792674b18d0453faee3edb1b048 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Mon, 30 Aug 2010 12:01:42 +0200 Subject: [PATCH 03/24] I will test before commit. I will test before commit. I will... --- dom0/qmemman/qmemman_server.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dom0/qmemman/qmemman_server.py b/dom0/qmemman/qmemman_server.py index e5f5874f..302333ec 100755 --- a/dom0/qmemman/qmemman_server.py +++ b/dom0/qmemman/qmemman_server.py @@ -75,6 +75,7 @@ class QMemmanReqHandler(SocketServer.BaseRequestHandler): """ def handle(self): + global additional_balance_delay # self.request is the TCP socket connected to the client while True: self.data = self.request.recv(1024).strip() @@ -105,9 +106,11 @@ def start_server(): server.serve_forever() def start_balancer(): + global additional_balance_delay while True: time.sleep(1) - if additional_balance_delay == 0: + if additional_balance_delay != 0: + print 'waiting additional_balance_delay to allow VM to start' time.sleep(additional_balance_delay) additional_balance_delay = 0 global_lock.acquire() From eb6755e93c22258e759072db653629bc48e5fcf8 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Mon, 30 Aug 2010 14:50:48 +0200 Subject: [PATCH 04/24] qmemman: fix locking We want balance() to wait on a lock even after balloon() has finished, until socket client has closed. --- dom0/qmemman/qmemman_server.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/dom0/qmemman/qmemman_server.py b/dom0/qmemman/qmemman_server.py index 302333ec..73dd205f 100755 --- a/dom0/qmemman/qmemman_server.py +++ b/dom0/qmemman/qmemman_server.py @@ -76,21 +76,25 @@ class QMemmanReqHandler(SocketServer.BaseRequestHandler): def handle(self): global additional_balance_delay + got_lock = False # self.request is the TCP socket connected to the client while True: self.data = self.request.recv(1024).strip() if len(self.data) == 0: print 'EOF' + if got_lock: + global_lock.release() return - if self.data == "DONE": + if got_lock: + print 'Second request over qmemman.sock ?' return global_lock.acquire() + got_lock = True if system_state.do_balloon(int(self.data)): resp = "OK\n" additional_balance_delay = 5 else: resp = "FAIL\n" - global_lock.release() self.request.send(resp) From de2619fbedaf7f76aa64bb5d8f3d0c576ce18eee Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Tue, 31 Aug 2010 15:53:24 +0200 Subject: [PATCH 05/24] qmemman: wrap xenapi.memset within try/except It can fail e.g. when a domain is being shutdown with a pretty message like File "/usr/lib64/python2.6/site-packages/xen/xend/XendDomainInfo.py", line 1322, in setMemoryTarget (target * 1024)) Error: (1, 'Operation not permitted') --- dom0/qmemman/qmemman.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index 55e75e73..915e61f3 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -105,8 +105,12 @@ class SystemState: def mem_set(self, id, val): uuid = self.domdict[id].uuid print 'mem-set domain', id, 'to', val - self.xend_session.session.xenapi.VM.set_memory_dynamic_max_live(uuid, val) - self.xend_session.session.xenapi.VM.set_memory_dynamic_min_live(uuid, val) + try: + self.xend_session.session.xenapi.VM.set_memory_dynamic_max_live(uuid, val) + self.xend_session.session.xenapi.VM.set_memory_dynamic_min_live(uuid, val) +#can happen in the middle of domain shutdown + except XenAPI.Failure: + pass def do_balloon(self, memsize): MAX_TRIES = 20 From c66e0848f388fbd7443df1964930a5f92c67cc29 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Tue, 31 Aug 2010 16:19:01 +0200 Subject: [PATCH 06/24] qmemman: limit domain memory to 2G to workaround for xen xml-rpc limitation File "/usr/lib64/python2.6/xmlrpclib.py", line 710, in dump_int raise OverflowError, "int exceeds XML-RPC limits" OverflowError: int exceeds XML-RPC limits How crappy. --- dom0/qmemman/qmemman.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index 915e61f3..46dfe75d 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -104,6 +104,9 @@ class SystemState: def mem_set(self, id, val): uuid = self.domdict[id].uuid + if val >= 2**31: + print 'limiting memory from ', val, 'to maxint because of xml-rpc lameness' + val = 2**31 - 1 print 'mem-set domain', id, 'to', val try: self.xend_session.session.xenapi.VM.set_memory_dynamic_max_live(uuid, val) From 80771763cfc3f450c9cd57b342dccbf8392dbc6c Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Wed, 1 Sep 2010 10:39:39 +0200 Subject: [PATCH 07/24] qmemman: limit total memory transfer, not each one --- dom0/qmemman/qmemman.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index 46dfe75d..7df89eb2 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -160,20 +160,29 @@ class SystemState: scaled_req = self.domdict[dom].memory_actual + scale*(mem - self.domdict[dom].memory_actual) return int(scaled_req) + def is_balance_req_significant(self, memset_reqs): + total_memory_transfer = 0 + MIN_TOTAL_MEMORY_TRANSFER = 150*1024*1024 + for rq in memset_reqs: + dom, mem = rq + memory_change = mem - self.domdict[dom].memory_actual + total_memory_transfer += abs(memory_change) + return total_memory_transfer > MIN_TOTAL_MEMORY_TRANSFER + def do_balance(self): if os.path.isfile('/etc/do-not-membalance'): return self.refresh_memactual() xenfree = self.get_free_xen_memory() memset_reqs = qmemman_algo.balance(xenfree, self.domdict) + if not self.is_balance_req_significant(memset_reqs): + return + wait_before_first_inflate = False i = 0 while i < len(memset_reqs): dom, mem = memset_reqs[i] memory_change = mem - self.domdict[dom].memory_actual - if abs(memory_change) < 100*1024*1024: - i = i + 1 - continue if memory_change < 0: wait_before_first_inflate = True else: From 10408d61db2e87b3b1667ec50de1bf39e1e83d35 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Wed, 1 Sep 2010 12:40:02 +0200 Subject: [PATCH 08/24] qmemman: when low on memory, do not make a VM go below prefmem Now the balance() has two different cases: enough memory and low_on_memory. In the former, distribute memory proportianally; in the former, dont do this, as this makes a VM go below prefmem. --- dom0/qmemman/qmemman_algo.py | 77 +++++++++++++++++++++++++----------- 1 file changed, 55 insertions(+), 22 deletions(-) diff --git a/dom0/qmemman/qmemman_algo.py b/dom0/qmemman/qmemman_algo.py index 45cf6a59..61e58b68 100755 --- a/dom0/qmemman/qmemman_algo.py +++ b/dom0/qmemman/qmemman_algo.py @@ -64,25 +64,12 @@ def balloon(memsize, domdict): return request # REQ_SAFETY_NET_FACTOR is a bit greater that 1. So that if the domain yields a bit less than requested, due # to e.g. rounding errors, we will not get stuck. The surplus will return to the VM during "balance" call. - -def balance(xenfree, domdict): - total_memneeded = 0 - total_mem_pref = 0 - recalc_mem_used(domdict) -#pass 1: compute the above "total" values - for i in domdict.keys(): - if domdict[i].meminfo is None: - continue - need = memneeded(domdict[i]) - print 'domain' , i, 'act/pref', domdict[i].memory_actual, prefmem(domdict[i]), 'need=', need - total_memneeded += need - total_mem_pref += prefmem(domdict[i]) - totalsum = xenfree - total_memneeded -#pass 2: redistribute "totalsum" of memory between domains, proportionally to prefmem - donors = list() - acceptors = list() +#redistribute positive "totalsum" of memory between domains, proportionally to prefmem +def balance_when_enough_memory(domdict, xenfree, total_mem_pref, totalsum): + donors_rq = list() + acceptors_rq = list() for i in domdict.keys(): if domdict[i].meminfo is None: continue @@ -92,10 +79,56 @@ def balance(xenfree, domdict): #prevent rounding errors target = int(0.995*target_nonint) if (target < domdict[i].memory_actual): - donors.append((i, target)) + donors_rq.append((i, target)) else: - acceptors.append((i, target)) - print 'balance: xenfree=', xenfree, 'requests:', donors + acceptors - return donors + acceptors + acceptors_rq.append((i, target)) + print 'balance(enough): xenfree=', xenfree, 'requests:', donors_rq + acceptors_rq + return donors_rq + acceptors_rq + +#when not enough mem to make everyone be above prefmem, make donors be at prefmem, and +#redistribute anything left between acceptors +def balance_when_low_on_memory(domdict, xenfree, total_mem_pref_acceptors, donors, acceptors): + donors_rq = list() + acceptors_rq = list() + squeezed_mem = xenfree + for i in donors: + avail = -memneeded(domdict[i]) + if avail < 10*1024*1024: + #probably we have already tried making it exactly at prefmem, give up + continue + squeezed_mem -= avail + donors_rq.append((i, prefmem(domdict[i]))) + for i in acceptors: + scale = 1.0*prefmem(domdict[i])/total_mem_pref_acceptors + target_nonint = domdict[i].memory_actual + scale*squeezed_mem + acceptors_rq.append((i, int(target_nonint))) + print 'balance(low): xenfree=', xenfree, 'requests:', donors_rq + acceptors_rq + return donors_rq + acceptors_rq + +def balance(xenfree, domdict): + total_memneeded = 0 + total_mem_pref = 0 + total_mem_pref_acceptors = 0 - \ No newline at end of file + recalc_mem_used(domdict) + donors = list() + acceptors = list() +#pass 1: compute the above "total" values + for i in domdict.keys(): + if domdict[i].meminfo is None: + continue + need = memneeded(domdict[i]) + print 'domain' , i, 'act/pref', domdict[i].memory_actual, prefmem(domdict[i]), 'need=', need + if need < 0: + donors.append(i) + else: + acceptors.append(i) + total_mem_pref_acceptors += prefmem(domdict[i]) + total_memneeded += need + total_mem_pref += prefmem(domdict[i]) + + totalsum = xenfree - total_memneeded + if totalsum > 0: + return balance_when_enough_memory(domdict, xenfree, total_mem_pref, totalsum) + else: + return balance_when_low_on_memory(domdict, xenfree, total_mem_pref_acceptors, donors, acceptors) From 7dcb7cb196fab579d2832f0ef4804859e11e73ea Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Fri, 3 Sep 2010 16:19:48 +0200 Subject: [PATCH 09/24] qmemman: don't use xenapi, use hypercalls to do mem-set --- dom0/qmemman/qmemman.py | 47 ++++++----------------------------------- 1 file changed, 6 insertions(+), 41 deletions(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index 7df89eb2..69521470 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -1,45 +1,23 @@ -import xmlrpclib -from xen.xm import XenAPI import xen.lowlevel.xc +import xen.lowlevel.xs import string import time import qmemman_algo import os -class XendSession(object): - def __init__(self): -# self.get_xend_session_old_api() - self.get_xend_session_new_api() - -# def get_xend_session_old_api(self): -# from xen.xend import XendClient -# from xen.util.xmlrpcclient import ServerProxy -# self.xend_server = ServerProxy(XendClient.uri) -# if self.xend_server is None: -# print "get_xend_session_old_api(): cannot open session!" - - - def get_xend_session_new_api(self): - xend_socket_uri = "httpu:///var/run/xend/xen-api.sock" - self.session = XenAPI.Session (xend_socket_uri) - self.session.login_with_password ("", "") - if self.session is None: - print "get_xend_session_new_api(): cannot open session!" - class DomainState: def __init__(self, id): self.meminfo = None self.memory_actual = None self.mem_used = None - self.uuid = None self.id = id self.meminfo_updated = False class SystemState: def __init__(self): - self.xend_session = XendSession() self.domdict = {} self.xc = xen.lowlevel.xc.xc() + self.xs = xen.lowlevel.xs.xs() self.BALOON_DELAY = 0.1 def add_domain(self, id): @@ -57,24 +35,10 @@ class SystemState: # return long(ret) def refresh_memactual(self): - update_uuid_info = False for domain in self.xc.domain_getinfo(): id = str(domain['domid']) if self.domdict.has_key(id): self.domdict[id].memory_actual = domain['mem_kb']*1024 - if self.domdict[id].uuid is None: - update_uuid_info = True - if not update_uuid_info: - return - dom_recs = self.xend_session.session.xenapi.VM.get_all_records() -# dom_metrics_recs = self.xend_session.session.xenapi.VM_metrics.get_all_records() - for dom_ref, dom_rec in dom_recs.items(): -# dom_metrics_rec = dom_metrics_recs[dom_rec['metrics']] - id = dom_rec['domid'] -# mem = int(dom_metrics_rec['memory_actual'])/1024 - if (self.domdict.has_key(id)): -# self.domdict[id].memory_actual = mem - self.domdict[id].uuid = dom_rec['uuid'] def parse_meminfo(self, meminfo): dict = {} @@ -98,11 +62,12 @@ class SystemState: return dict #the below works (and is fast), but then 'xm list' shows unchanged memory value - def mem_set_alternative(self, id, val): - os.system('xenstore-write /local/domain/' + id + '/memory/target ' + str(val/1024)) + def mem_set(self, id, val): + print 'mem-set domain', id, 'to', val + self.xs.write('', '/local/domain/' + id + '/memory/target', str(val/1024)) self.xc.domain_set_target_mem(int(id), val/1024) - def mem_set(self, id, val): + def mem_set_obsolete(self, id, val): uuid = self.domdict[id].uuid if val >= 2**31: print 'limiting memory from ', val, 'to maxint because of xml-rpc lameness' From 1c337db989e4a3abe63b54233c854e4eaff72b1f Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Fri, 3 Sep 2010 16:23:09 +0200 Subject: [PATCH 10/24] qmemman: make meminfo-writer a C program --- common/Makefile | 7 +++++ common/meminfo-writer | 4 --- common/meminfo-writer.c | 62 ++++++++++++++++++++++++++++++++++++++++ rpm_spec/core-appvm.spec | 1 + rpm_spec/core-dom0.spec | 1 + 5 files changed, 71 insertions(+), 4 deletions(-) create mode 100644 common/Makefile delete mode 100755 common/meminfo-writer create mode 100644 common/meminfo-writer.c diff --git a/common/Makefile b/common/Makefile new file mode 100644 index 00000000..0bef1ae7 --- /dev/null +++ b/common/Makefile @@ -0,0 +1,7 @@ +CC=gcc +CFLAGS=-Wall -g +all: meminfo-writer +meminfo-writer: meminfo-writer.o + $(CC) -g -o meminfo-writer meminfo-writer.o -lxenstore +clean: + rm -f meminfo-writer *.o *~ diff --git a/common/meminfo-writer b/common/meminfo-writer deleted file mode 100755 index fdbfd29f..00000000 --- a/common/meminfo-writer +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -while sleep 1 ; do - xenstore-write memory/meminfo "`cat /proc/meminfo`" -done diff --git a/common/meminfo-writer.c b/common/meminfo-writer.c new file mode 100644 index 00000000..5e019039 --- /dev/null +++ b/common/meminfo-writer.c @@ -0,0 +1,62 @@ +/* + * The Qubes OS Project, http://www.qubes-os.org + * + * Copyright (C) 2010 Rafal Wojtczuk + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +int main() +{ + struct xs_handle *xs; + int fd, n; + char buf[4096]; + + openlog("meminfo-writer", LOG_CONS | LOG_PID, LOG_DAEMON); + xs = xs_domain_open(); + if (!xs) { + syslog(LOG_DAEMON | LOG_ERR, "xs_domain_open"); + exit(1); + } + for (;;) { + fd = open("/proc/meminfo", O_RDONLY); + if (fd < 0) { + syslog(LOG_DAEMON | LOG_ERR, + "error opening /proc/meminfo ?"); + exit(1); + } + n = read(fd, buf, sizeof(buf)); + if (n <= 0) { + syslog(LOG_DAEMON | LOG_ERR, + "error reading /proc/meminfo ?"); + exit(1); + } + close(fd); + if (!xs_write(xs, XBT_NULL, "memory/meminfo", buf, n)) { + syslog(LOG_DAEMON | LOG_ERR, + "error writing xenstore ?"); + exit(1); + } + sleep(1); + } +} diff --git a/rpm_spec/core-appvm.spec b/rpm_spec/core-appvm.spec index 2949a059..7974e23d 100644 --- a/rpm_spec/core-appvm.spec +++ b/rpm_spec/core-appvm.spec @@ -52,6 +52,7 @@ fi %build make clean all +make -C ../common %install diff --git a/rpm_spec/core-dom0.spec b/rpm_spec/core-dom0.spec index f4af95d8..25b3e407 100644 --- a/rpm_spec/core-dom0.spec +++ b/rpm_spec/core-dom0.spec @@ -47,6 +47,7 @@ The Qubes core files for installation on Dom0. python -m compileall qvm-core qmemman python -O -m compileall qvm-core qmemman make -C restore +make -C ../common %install From 22df51742511e2a4bf2f0e143e0a2dd21ba6b231 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Mon, 6 Sep 2010 10:46:36 +0200 Subject: [PATCH 11/24] qmemman: detect domain list change by watching /vm, not /local/domain The latter triggers on every memory/meminfo key update, which needlessly adds xenstore requests. --- dom0/qmemman/qmemman_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/qmemman/qmemman_server.py b/dom0/qmemman/qmemman_server.py index 73dd205f..4a95fee3 100755 --- a/dom0/qmemman/qmemman_server.py +++ b/dom0/qmemman/qmemman_server.py @@ -30,7 +30,7 @@ class WatchType: class XS_Watcher: def __init__(self): self.handle = xen.lowlevel.xs.xs() - self.handle.watch('/local/domain', WatchType(XS_Watcher.dom_list_change, None)) + self.handle.watch('/vm', WatchType(XS_Watcher.dom_list_change, None)) self.watch_token_dict = {} def dom_list_change(self, param): From 11abef3439fd24fcf6d1c7289b3d5078303dd781 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Tue, 7 Sep 2010 13:10:48 +0200 Subject: [PATCH 12/24] qmemman: xc.domain_set_target_mem can throw exceptions, too --- dom0/qmemman/qmemman.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index 69521470..8538cfa5 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -65,7 +65,12 @@ class SystemState: def mem_set(self, id, val): print 'mem-set domain', id, 'to', val self.xs.write('', '/local/domain/' + id + '/memory/target', str(val/1024)) - self.xc.domain_set_target_mem(int(id), val/1024) +#can happen in the middle of domain shutdown +#apparently xc.lowlevel throws exceptions too + try: + self.xc.domain_set_target_mem(int(id), val/1024) + except: + pass def mem_set_obsolete(self, id, val): uuid = self.domdict[id].uuid From 5be12f8459600a21e2b382eac6d1f6eea384c728 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Tue, 7 Sep 2010 16:00:14 +0200 Subject: [PATCH 13/24] qmemman: switch off memory balancing when doing xm save Apparently, it interferes: INFO (XendCheckpoint:417) ERROR Internal error: Could not get vcpu context INFO (XendCheckpoint:417) ERROR Internal error: Failed to map/save the p2m frame list --- dom0/qmemman/qmemman.py | 2 +- dom0/restore/qubes_prepare_saved_domain.sh | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index 8538cfa5..d0cb14ff 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -140,7 +140,7 @@ class SystemState: return total_memory_transfer > MIN_TOTAL_MEMORY_TRANSFER def do_balance(self): - if os.path.isfile('/etc/do-not-membalance'): + if os.path.isfile('/var/run/qubes/do-not-membalance'): return self.refresh_memactual() xenfree = self.get_free_xen_memory() diff --git a/dom0/restore/qubes_prepare_saved_domain.sh b/dom0/restore/qubes_prepare_saved_domain.sh index 7c9ca5c4..e1200e5c 100755 --- a/dom0/restore/qubes_prepare_saved_domain.sh +++ b/dom0/restore/qubes_prepare_saved_domain.sh @@ -48,10 +48,16 @@ xenstore-read /local/domain/$ID/qubes_gateway | \ xm block-detach $1 /dev/xvdb MEM=$(xenstore-read /local/domain/$ID/device/qubes_used_mem) echo MEM=$MEM +QMEMMAN_STOP=/var/run/qubes/do-not-membalance +touch $QMEMMAN_STOP xm mem-set $1 $(($MEM/1000)) sleep 1 touch $2 -if ! xm save $1 $2 ; then exit 1 ; fi +if ! xm save $1 $2 ; then + rm -f $QMEMMAN_STOP + exit 1 +fi +rm -f $QMEMMAN_STOP cd $VMDIR tar -Scvf saved_cows.tar root-cow.img swap-cow.img From 87d1e973c7e82001fb09a1ab6254780fd2104ab9 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Thu, 9 Sep 2010 10:29:35 +0200 Subject: [PATCH 14/24] qmemman: print balance stats only when updating --- dom0/qmemman/qmemman.py | 7 +++++++ dom0/qmemman/qmemman_algo.py | 6 +++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index d0cb14ff..aca537d3 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -139,6 +139,12 @@ class SystemState: total_memory_transfer += abs(memory_change) return total_memory_transfer > MIN_TOTAL_MEMORY_TRANSFER + def print_stats(self, xenfree, memset_reqs): + for i in self.domdict.keys(): + if self.domdict[i].meminfo is not None: + print 'dom' , i, 'act/pref', self.domdict[i].memory_actual, qmemman_algo.prefmem(self.domdict[i]) + print 'xenfree=', xenfree, 'balance req:', memset_reqs + def do_balance(self): if os.path.isfile('/var/run/qubes/do-not-membalance'): return @@ -148,6 +154,7 @@ class SystemState: if not self.is_balance_req_significant(memset_reqs): return + self.print_stats(xenfree, memset_reqs) wait_before_first_inflate = False i = 0 while i < len(memset_reqs): diff --git a/dom0/qmemman/qmemman_algo.py b/dom0/qmemman/qmemman_algo.py index 61e58b68..b0c9cd1e 100755 --- a/dom0/qmemman/qmemman_algo.py +++ b/dom0/qmemman/qmemman_algo.py @@ -82,7 +82,7 @@ def balance_when_enough_memory(domdict, xenfree, total_mem_pref, totalsum): donors_rq.append((i, target)) else: acceptors_rq.append((i, target)) - print 'balance(enough): xenfree=', xenfree, 'requests:', donors_rq + acceptors_rq +# print 'balance(enough): xenfree=', xenfree, 'requests:', donors_rq + acceptors_rq return donors_rq + acceptors_rq #when not enough mem to make everyone be above prefmem, make donors be at prefmem, and @@ -102,7 +102,7 @@ def balance_when_low_on_memory(domdict, xenfree, total_mem_pref_acceptors, donor scale = 1.0*prefmem(domdict[i])/total_mem_pref_acceptors target_nonint = domdict[i].memory_actual + scale*squeezed_mem acceptors_rq.append((i, int(target_nonint))) - print 'balance(low): xenfree=', xenfree, 'requests:', donors_rq + acceptors_rq +# print 'balance(low): xenfree=', xenfree, 'requests:', donors_rq + acceptors_rq return donors_rq + acceptors_rq def balance(xenfree, domdict): @@ -118,7 +118,7 @@ def balance(xenfree, domdict): if domdict[i].meminfo is None: continue need = memneeded(domdict[i]) - print 'domain' , i, 'act/pref', domdict[i].memory_actual, prefmem(domdict[i]), 'need=', need +# print 'domain' , i, 'act/pref', domdict[i].memory_actual, prefmem(domdict[i]), 'need=', need if need < 0: donors.append(i) else: From 5a33ed71ceb2e788038ac77ef0136433d14a1895 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Thu, 9 Sep 2010 10:36:13 +0200 Subject: [PATCH 15/24] qmemman: use the fact that balloon driver retries Apparently even if there is not enough xen memory to balloon up, balloon driver will try to fulfill the request later, when some memory is freed. Thus, in do_balloon, do not limit mem_set to the available memory. --- dom0/qmemman/qmemman.py | 34 ++++++---------------------------- 1 file changed, 6 insertions(+), 28 deletions(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index aca537d3..8ec73715 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -12,6 +12,7 @@ class DomainState: self.mem_used = None self.id = id self.meminfo_updated = False + self.last_target = 0 class SystemState: def __init__(self): @@ -64,6 +65,7 @@ class SystemState: #the below works (and is fast), but then 'xm list' shows unchanged memory value def mem_set(self, id, val): print 'mem-set domain', id, 'to', val + self.domdict[id].last_target = val self.xs.write('', '/local/domain/' + id + '/memory/target', str(val/1024)) #can happen in the middle of domain shutdown #apparently xc.lowlevel throws exceptions too @@ -118,24 +120,12 @@ class SystemState: self.domdict[domid].meminfo = self.parse_meminfo(val) self.domdict[domid].meminfo_updated = True - def adjust_inflates_to_xenfree(self, reqs, idx): - i = idx - memory_needed = 0 - while i < len(reqs): - dom, mem = reqs[i] - memory_needed += mem - self.domdict[dom].memory_actual - i = i + 1 - scale = 1.0*self.get_free_xen_memory()/memory_needed - dom, mem = reqs[idx] - scaled_req = self.domdict[dom].memory_actual + scale*(mem - self.domdict[dom].memory_actual) - return int(scaled_req) - def is_balance_req_significant(self, memset_reqs): total_memory_transfer = 0 MIN_TOTAL_MEMORY_TRANSFER = 150*1024*1024 for rq in memset_reqs: dom, mem = rq - memory_change = mem - self.domdict[dom].memory_actual + memory_change = mem - self.domdict[dom].last_target total_memory_transfer += abs(memory_change) return total_memory_transfer > MIN_TOTAL_MEMORY_TRANSFER @@ -155,22 +145,10 @@ class SystemState: return self.print_stats(xenfree, memset_reqs) - wait_before_first_inflate = False - i = 0 - while i < len(memset_reqs): - dom, mem = memset_reqs[i] - memory_change = mem - self.domdict[dom].memory_actual - if memory_change < 0: - wait_before_first_inflate = True - else: - if wait_before_first_inflate: - time.sleep(self.BALOON_DELAY) - wait_before_first_inflate = False - #the following is called before _each_ inflate, to account for possibility that - #previously triggered memory release is in progress - mem = self.adjust_inflates_to_xenfree(memset_reqs, i) + + for rq in memset_reqs: + dom, mem = rq self.mem_set(dom, mem) - i = i + 1 # for i in self.domdict.keys(): # print 'domain ', i, ' meminfo=', self.domdict[i].meminfo, 'actual mem', self.domdict[i].memory_actual From 24b3baf063a3ec1eac3a122476afecef8a885771 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Thu, 9 Sep 2010 11:08:20 +0200 Subject: [PATCH 16/24] qmemman: use 'Memtotal' from /proc/meminfo to calculate used memory Previously, memory_actual (retrieved from xen) was used; it can be inconsistent. 'Memtotal' can be spoofed, but anyway we rely on other fields from /proc/meminfo. --- dom0/qmemman/qmemman.py | 2 +- dom0/qmemman/qmemman_algo.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index 8ec73715..b352d95a 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -50,7 +50,7 @@ class SystemState: dict[string.rstrip(l2[0], ":")] = l2[1] try: - for i in ('MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree'): + for i in ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree'): val = int(dict[i])*1024 if (val < 0): return None diff --git a/dom0/qmemman/qmemman_algo.py b/dom0/qmemman/qmemman_algo.py index b0c9cd1e..c34a1f95 100755 --- a/dom0/qmemman/qmemman_algo.py +++ b/dom0/qmemman/qmemman_algo.py @@ -2,7 +2,7 @@ def is_suspicious(dom): ret = False if dom.meminfo['SwapTotal'] < dom.meminfo['SwapFree']: ret = True - if dom.memory_actual < dom.meminfo['MemFree'] + dom.meminfo['Cached'] + dom.meminfo['Buffers']: + if dom.meminfo['MemTotal'] < dom.meminfo['MemFree'] + dom.meminfo['Cached'] + dom.meminfo['Buffers']: ret = True if ret: print 'suspicious meminfo for domain', dom.id, 'mem actual', dom.memory_actual, dom.meminfo @@ -17,7 +17,7 @@ def recalc_mem_used(domdict): dom.meminfo = None dom.mem_used = None else: - dom.mem_used = dom.memory_actual - dom.meminfo['MemFree'] - dom.meminfo['Cached'] - dom.meminfo['Buffers'] + dom.meminfo['SwapTotal'] - dom.meminfo['SwapFree'] + dom.mem_used = dom.meminfo['MemTotal'] - dom.meminfo['MemFree'] - dom.meminfo['Cached'] - dom.meminfo['Buffers'] + dom.meminfo['SwapTotal'] - dom.meminfo['SwapFree'] def prefmem(dom): if dom.meminfo_updated: From 9c609a23bf43759dad929419095a8c659418d1da Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Thu, 9 Sep 2010 11:24:04 +0200 Subject: [PATCH 17/24] qmemman: move /proc/meminfo parsing to qmemman_algo Just cosmetics, to make code layout more coherent. --- dom0/qmemman/qmemman.py | 25 +----------------- dom0/qmemman/qmemman_algo.py | 49 +++++++++++++++++++++++++----------- 2 files changed, 35 insertions(+), 39 deletions(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index b352d95a..a76b0bfa 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -11,7 +11,6 @@ class DomainState: self.memory_actual = None self.mem_used = None self.id = id - self.meminfo_updated = False self.last_target = 0 class SystemState: @@ -41,27 +40,6 @@ class SystemState: if self.domdict.has_key(id): self.domdict[id].memory_actual = domain['mem_kb']*1024 - def parse_meminfo(self, meminfo): - dict = {} - l1 = string.split(meminfo,"\n") - for i in l1: - l2 = string.split(i) - if len(l2) >= 2: - dict[string.rstrip(l2[0], ":")] = l2[1] - - try: - for i in ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree'): - val = int(dict[i])*1024 - if (val < 0): - return None - dict[i] = val - except: - return None - - if dict['SwapTotal'] < dict['SwapFree']: - return None - return dict - #the below works (and is fast), but then 'xm list' shows unchanged memory value def mem_set(self, id, val): print 'mem-set domain', id, 'to', val @@ -117,8 +95,7 @@ class SystemState: niter = niter + 1 def refresh_meminfo(self, domid, val): - self.domdict[domid].meminfo = self.parse_meminfo(val) - self.domdict[domid].meminfo_updated = True + qmemman_algo.refresh_meminfo_for_domain(self.domdict[domid], val) def is_balance_req_significant(self, memset_reqs): total_memory_transfer = 0 diff --git a/dom0/qmemman/qmemman_algo.py b/dom0/qmemman/qmemman_algo.py index c34a1f95..306e08fb 100755 --- a/dom0/qmemman/qmemman_algo.py +++ b/dom0/qmemman/qmemman_algo.py @@ -1,3 +1,26 @@ +import string + +def parse_meminfo(self, meminfo): + dict = {} + l1 = string.split(meminfo,"\n") + for i in l1: + l2 = string.split(i) + if len(l2) >= 2: + dict[string.rstrip(l2[0], ":")] = l2[1] + + try: + for i in ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree'): + val = int(dict[i])*1024 + if (val < 0): + return None + dict[i] = val + except: + return None + + if dict['SwapTotal'] < dict['SwapFree']: + return None + return dict + def is_suspicious(dom): ret = False if dom.meminfo['SwapTotal'] < dom.meminfo['SwapFree']: @@ -8,20 +31,18 @@ def is_suspicious(dom): print 'suspicious meminfo for domain', dom.id, 'mem actual', dom.memory_actual, dom.meminfo return ret -def recalc_mem_used(domdict): - for domid in domdict.keys(): - dom = domdict[domid] - if dom.meminfo_updated: - dom.meminfo_updated = False - if is_suspicious(dom): - dom.meminfo = None - dom.mem_used = None - else: - dom.mem_used = dom.meminfo['MemTotal'] - dom.meminfo['MemFree'] - dom.meminfo['Cached'] - dom.meminfo['Buffers'] + dom.meminfo['SwapTotal'] - dom.meminfo['SwapFree'] - +def refresh_meminfo_for_domain(dom, xenstore_key): + meminfo = parse_meminfo(xenstore_key) + dom.meminfo = meminfo + if meminfo is None: + return + if is_suspicious(dom): + dom.meminfo = None + dom.mem_used = None + else: + dom.mem_used = dom.meminfo['MemTotal'] - dom.meminfo['MemFree'] - dom.meminfo['Cached'] - dom.meminfo['Buffers'] + dom.meminfo['SwapTotal'] - dom.meminfo['SwapFree'] + def prefmem(dom): - if dom.meminfo_updated: - raise AssertionError('meminfo_updated=True in prefmem') CACHE_FACTOR = 1.3 #dom0 is special, as it must have large cache, for vbds. Thus, give it a special boost if dom.id == '0': @@ -40,7 +61,6 @@ def balloon(memsize, domdict): donors = list() request = list() available = 0 - recalc_mem_used(domdict) for i in domdict.keys(): if domdict[i].meminfo is None: continue @@ -110,7 +130,6 @@ def balance(xenfree, domdict): total_mem_pref = 0 total_mem_pref_acceptors = 0 - recalc_mem_used(domdict) donors = list() acceptors = list() #pass 1: compute the above "total" values From 7545789a26786d45158e476b80dec48c3e5fecc2 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Thu, 9 Sep 2010 11:30:02 +0200 Subject: [PATCH 18/24] qmemman: now parse_meminfo takes a single argument --- dom0/qmemman/qmemman_algo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dom0/qmemman/qmemman_algo.py b/dom0/qmemman/qmemman_algo.py index 306e08fb..06037206 100755 --- a/dom0/qmemman/qmemman_algo.py +++ b/dom0/qmemman/qmemman_algo.py @@ -1,6 +1,6 @@ import string -def parse_meminfo(self, meminfo): +def parse_meminfo(meminfo): dict = {} l1 = string.split(meminfo,"\n") for i in l1: From f4e46b63a4d6d6cf4d4a75c5f65b512cf98c9915 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Thu, 9 Sep 2010 12:33:48 +0200 Subject: [PATCH 19/24] qmemman: in client code, set FD_CLOEXEC on qmmemman.socket --- dom0/qmemman/qmemman_client.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/dom0/qmemman/qmemman_client.py b/dom0/qmemman/qmemman_client.py index b4bc77e6..e1d5234c 100755 --- a/dom0/qmemman/qmemman_client.py +++ b/dom0/qmemman/qmemman_client.py @@ -1,9 +1,14 @@ import socket - +import fcntl class QMemmanClient: def request_memory(self, amount): self.sock = socket.socket(socket.AF_UNIX) + + flags = fcntl.fcntl(self.sock.fileno(), fcntl.F_GETFD) + flags |= fcntl.FD_CLOEXEC + fcntl.fcntl(self.sock.fileno(), fcntl.F_SETFD, flags) + self.sock.connect("/var/run/qubes/qmemman.sock") self.sock.send(str(amount)+"\n") self.received = self.sock.recv(1024).strip() From 51e14fc8bbe877f155c1a544446879a506989c4c Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Thu, 9 Sep 2010 12:36:18 +0200 Subject: [PATCH 20/24] qmemman: trigger do_balance() on receiving /proc/meminfo data --- dom0/qmemman/qmemman.py | 1 + dom0/qmemman/qmemman_server.py | 17 ----------------- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index a76b0bfa..eb9b4328 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -96,6 +96,7 @@ class SystemState: def refresh_meminfo(self, domid, val): qmemman_algo.refresh_meminfo_for_domain(self.domdict[domid], val) + self.do_balance() def is_balance_req_significant(self, memset_reqs): total_memory_transfer = 0 diff --git a/dom0/qmemman/qmemman_server.py b/dom0/qmemman/qmemman_server.py index 4a95fee3..52e35664 100755 --- a/dom0/qmemman/qmemman_server.py +++ b/dom0/qmemman/qmemman_server.py @@ -9,7 +9,6 @@ from qmemman import SystemState system_state = SystemState() global_lock = thread.allocate_lock() -additional_balance_delay = 0 def only_in_first_list(l1, l2): ret=[] @@ -75,7 +74,6 @@ class QMemmanReqHandler(SocketServer.BaseRequestHandler): """ def handle(self): - global additional_balance_delay got_lock = False # self.request is the TCP socket connected to the client while True: @@ -92,7 +90,6 @@ class QMemmanReqHandler(SocketServer.BaseRequestHandler): got_lock = True if system_state.do_balloon(int(self.data)): resp = "OK\n" - additional_balance_delay = 5 else: resp = "FAIL\n" self.request.send(resp) @@ -109,22 +106,8 @@ def start_server(): os.umask(077) server.serve_forever() -def start_balancer(): - global additional_balance_delay - while True: - time.sleep(1) - if additional_balance_delay != 0: - print 'waiting additional_balance_delay to allow VM to start' - time.sleep(additional_balance_delay) - additional_balance_delay = 0 - global_lock.acquire() - if additional_balance_delay == 0: - system_state.do_balance() - global_lock.release() - class QMemmanServer: @staticmethod def main(): thread.start_new_thread(start_server, tuple([])) - thread.start_new_thread(start_balancer, tuple([])) XS_Watcher().watch_loop() From f6e3607d2d87bb3cb05636cb275d8acbbc98d0fa Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Thu, 9 Sep 2010 17:51:53 +0200 Subject: [PATCH 21/24] qmemman: offload some processing to meminfo-writer Make meminfo-writer compute used memory, and report to qmemman only if it has changed significantly enough. As it is written in C, its code is much faster that qmemman-server; also in the idle case, it saves on xenstore communication overhead. Allows to send updates up to 10 times per second, with CPU load on the VM below 0.1%. --- appvm/qubes_core | 5 +- common/meminfo-writer.c | 156 +++++++++++++++++++++++++++------------- dom0/init.d/qubes_core | 6 +- 3 files changed, 116 insertions(+), 51 deletions(-) diff --git a/appvm/qubes_core b/appvm/qubes_core index 1b7aa3f9..aeaf53c4 100755 --- a/appvm/qubes_core +++ b/appvm/qubes_core @@ -82,7 +82,10 @@ start() fi fi - /usr/lib/qubes/meminfo-writer & + MEM_CHANGE_THRESHOLD_KB=30000 + MEMINFO_DELAY_USEC=100000 + /usr/lib/qubes/meminfo-writer $MEM_CHANGE_THRESHOLD_KB $MEMINFO_DELAY_USEC & + [ -x /rw/config/rc.local ] && /rw/config/rc.local success echo "" diff --git a/common/meminfo-writer.c b/common/meminfo-writer.c index 5e019039..d97c68df 100644 --- a/common/meminfo-writer.c +++ b/common/meminfo-writer.c @@ -1,62 +1,120 @@ -/* - * The Qubes OS Project, http://www.qubes-os.org - * - * Copyright (C) 2010 Rafal Wojtczuk - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - * - */ -#include -#include -#include -#include #include +#include +#include #include #include #include -int main() -{ - struct xs_handle *xs; - int fd, n; - char buf[4096]; +#include - openlog("meminfo-writer", LOG_CONS | LOG_PID, LOG_DAEMON); +unsigned long prev_used_mem; +int used_mem_change_threshold; +int delay; + +char *parse(char *buf) +{ + char *ptr = buf; + char name[256]; + static char outbuf[4096]; + int val; + int len; + int MemTotal=0, MemFree=0, Buffers=0, Cached=0, SwapTotal=0, SwapFree=0; + unsigned long long key; + long used_mem, used_mem_diff; + int nitems = 0; + + while (nitems != 6) { + sscanf(ptr, "%s %d kB\n%n", name, &val, &len); + key = *(unsigned long long *) ptr; + if (key == *(unsigned long long *) "MemTotal:") { + MemTotal = val; + nitems++; + } else if (key == *(unsigned long long *) "MemFree:") { + MemFree = val; + nitems++; + } else if (key == *(unsigned long long *) "Buffers:") { + Buffers = val; + nitems++; + } else if (key == *(unsigned long long *) "Cached: ") { + Cached = val; + nitems++; + } else if (key == *(unsigned long long *) "SwapTotal:") { + SwapTotal = val; + nitems++; + } else if (key == *(unsigned long long *) "SwapFree:") { + SwapFree = val; + nitems++; + } + + ptr += len; + } + + used_mem = + MemTotal - Buffers - Cached - MemFree + SwapTotal - SwapFree; + if (used_mem < 0) + return NULL; + + used_mem_diff = used_mem - prev_used_mem; + prev_used_mem = used_mem; + if (used_mem_diff < 0) + used_mem_diff = -used_mem_diff; + if (used_mem_diff > used_mem_change_threshold) { + sprintf(outbuf, + "MemTotal: %d kB\nMemFree: %d kB\nBuffers: %d kB\nCached: %d kB\n" + "SwapTotal: %d kB\nSwapFree: %d kB\n", MemTotal, + MemFree, Buffers, Cached, SwapTotal, SwapFree); + return outbuf; + } + return NULL; +} + +void usage() +{ + fprintf(stderr, + "usage: meminfo_writer threshold_in_kb delay_in_us\n"); + exit(1); +} + +void send_to_qmemman(struct xs_handle *xs, char *data) +{ + if (!xs_write(xs, XBT_NULL, "memory/meminfo", data, strlen(data))) { + syslog(LOG_DAEMON | LOG_ERR, "error writing xenstore ?"); + exit(1); + } +} + +int +main(int argc, char **argv) +{ + char buf[4096]; + int n; + char *meminfo_data; + int fd; + struct xs_handle *xs; + + if (argc != 3) + usage(); + used_mem_change_threshold = atoi(argv[1]); + delay = atoi(argv[2]); + if (!used_mem_change_threshold || !delay) + usage(); + + fd = open("/proc/meminfo", O_RDONLY); + if (fd < 0) { + perror("open meminfo"); + exit(1); + } xs = xs_domain_open(); if (!xs) { - syslog(LOG_DAEMON | LOG_ERR, "xs_domain_open"); + perror("xs_domain_open"); exit(1); } for (;;) { - fd = open("/proc/meminfo", O_RDONLY); - if (fd < 0) { - syslog(LOG_DAEMON | LOG_ERR, - "error opening /proc/meminfo ?"); - exit(1); - } n = read(fd, buf, sizeof(buf)); - if (n <= 0) { - syslog(LOG_DAEMON | LOG_ERR, - "error reading /proc/meminfo ?"); - exit(1); - } - close(fd); - if (!xs_write(xs, XBT_NULL, "memory/meminfo", buf, n)) { - syslog(LOG_DAEMON | LOG_ERR, - "error writing xenstore ?"); - exit(1); - } - sleep(1); + buf[n] = 0; + meminfo_data = parse(buf); + if (meminfo_data) + send_to_qmemman(xs, meminfo_data); + usleep(delay); + lseek(fd, 0, SEEK_SET); } } diff --git a/dom0/init.d/qubes_core b/dom0/init.d/qubes_core index e4bb5c3e..90f7cad7 100755 --- a/dom0/init.d/qubes_core +++ b/dom0/init.d/qubes_core @@ -57,7 +57,11 @@ start() cp /var/lib/qubes/qubes.xml /var/lib/qubes/backup/qubes-$(date +%F-%T).xml setup_dvm_files /usr/lib/qubes/qmemman_daemon.py >/var/log/qubes/qmemman.log 2>/var/log/qubes/qmemman.errs & - /usr/lib/qubes/meminfo-writer & + + MEM_CHANGE_THRESHOLD_KB=30000 + MEMINFO_DELAY_USEC=100000 + /usr/lib/qubes/meminfo-writer $MEM_CHANGE_THRESHOLD_KB $MEMINFO_DELAY_USEC & + touch /var/lock/subsys/qubes_core success echo From 0c1f21a28ec44f527237be9f810284de265c8522 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Fri, 10 Sep 2010 11:35:30 +0200 Subject: [PATCH 22/24] qmemman: when a AppVM is low on memory, allow small adjustments A small AppVM (say, with 100MB total) can go below prefmem, and still not be assigned memory, because of the MIN_TOTAL_MEMORY_TRANSFER threshold. So, if AppVM is below prefmem, allow for smaller mem-sets. --- common/meminfo-writer.c | 12 +++++++----- dom0/qmemman/qmemman.py | 8 +++++++- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/common/meminfo-writer.c b/common/meminfo-writer.c index d97c68df..2f6f2174 100644 --- a/common/meminfo-writer.c +++ b/common/meminfo-writer.c @@ -17,7 +17,8 @@ char *parse(char *buf) static char outbuf[4096]; int val; int len; - int MemTotal=0, MemFree=0, Buffers=0, Cached=0, SwapTotal=0, SwapFree=0; + int MemTotal = 0, MemFree = 0, Buffers = 0, Cached = 0, SwapTotal = + 0, SwapFree = 0; unsigned long long key; long used_mem, used_mem_diff; int nitems = 0; @@ -54,10 +55,12 @@ char *parse(char *buf) return NULL; used_mem_diff = used_mem - prev_used_mem; - prev_used_mem = used_mem; if (used_mem_diff < 0) used_mem_diff = -used_mem_diff; - if (used_mem_diff > used_mem_change_threshold) { + if (used_mem_diff > used_mem_change_threshold + || (used_mem > prev_used_mem && used_mem * 13 / 10 > MemTotal + && used_mem_diff > used_mem_change_threshold/2)) { + prev_used_mem = used_mem; sprintf(outbuf, "MemTotal: %d kB\nMemFree: %d kB\nBuffers: %d kB\nCached: %d kB\n" "SwapTotal: %d kB\nSwapFree: %d kB\n", MemTotal, @@ -82,8 +85,7 @@ void send_to_qmemman(struct xs_handle *xs, char *data) } } -int -main(int argc, char **argv) +int main(int argc, char **argv) { char buf[4096]; int n; diff --git a/dom0/qmemman/qmemman.py b/dom0/qmemman/qmemman.py index eb9b4328..cbfc0543 100755 --- a/dom0/qmemman/qmemman.py +++ b/dom0/qmemman/qmemman.py @@ -101,10 +101,16 @@ class SystemState: def is_balance_req_significant(self, memset_reqs): total_memory_transfer = 0 MIN_TOTAL_MEMORY_TRANSFER = 150*1024*1024 + MIN_MEM_CHANGE_WHEN_UNDER_PREF = 15*1024*1024 for rq in memset_reqs: dom, mem = rq - memory_change = mem - self.domdict[dom].last_target + last_target = self.domdict[dom].last_target + memory_change = mem - last_target total_memory_transfer += abs(memory_change) + pref = qmemman_algo.prefmem(self.domdict[dom]) + if last_target > 0 and last_target < pref and memory_change > MIN_MEM_CHANGE_WHEN_UNDER_PREF: + print 'dom', dom, 'is below pref, allowing balance' + return True return total_memory_transfer > MIN_TOTAL_MEMORY_TRANSFER def print_stats(self, xenfree, memset_reqs): From 673608158135279ed106511c34a95ed1047da342 Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Fri, 10 Sep 2010 11:38:06 +0200 Subject: [PATCH 23/24] Compile meminfo-writer with -O3 --- common/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/Makefile b/common/Makefile index 0bef1ae7..85888a90 100644 --- a/common/Makefile +++ b/common/Makefile @@ -1,5 +1,5 @@ CC=gcc -CFLAGS=-Wall -g +CFLAGS=-Wall -g -O3 all: meminfo-writer meminfo-writer: meminfo-writer.o $(CC) -g -o meminfo-writer meminfo-writer.o -lxenstore From d91c03358cb63a7dd3ca4ca3e2fb73ce2f8160cd Mon Sep 17 00:00:00 2001 From: Rafal Wojtczuk Date: Fri, 10 Sep 2010 14:53:41 +0200 Subject: [PATCH 24/24] qmemman: save a syscall in meminfo-writer via use of "pread" --- common/meminfo-writer.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/common/meminfo-writer.c b/common/meminfo-writer.c index 2f6f2174..49c8b6a5 100644 --- a/common/meminfo-writer.c +++ b/common/meminfo-writer.c @@ -111,12 +111,11 @@ int main(int argc, char **argv) exit(1); } for (;;) { - n = read(fd, buf, sizeof(buf)); + n = pread(fd, buf, sizeof(buf), 0); buf[n] = 0; meminfo_data = parse(buf); if (meminfo_data) send_to_qmemman(xs, meminfo_data); usleep(delay); - lseek(fd, 0, SEEK_SET); } }