Merge remote-tracking branch 'qubesos/pr/179'

* qubesos/pr/179:
  qmemman: request VMs balloon down with 16MB safety margin
  qmemman: clear "not responding" flags when VM require more memory
  qmemman: slightly improve logging
  qmemman: reformat code, especially comments
This commit is contained in:
Marek Marczykowski-Górecki 2018-01-15 04:21:40 +01:00
commit 21760d8ff0
No known key found for this signature in database
GPG Key ID: 063938BA42CFA724
2 changed files with 166 additions and 135 deletions

View File

@ -39,13 +39,14 @@ class DomainState:
def __init__(self, id):
self.memory_current = 0 # the current memory size
self.memory_actual = None # the current memory allocation (what VM
# is using or can use at any time)
self.memory_maximum = None #the maximum memory size
self.mem_used = None #used memory, computed based on meminfo
self.id = id #domain id
self.last_target = 0 #the last memset target
self.no_progress = False #no react to memset
self.slow_memset_react = False #slow react to memset (after few tries still above target)
# is using or can use at any time)
self.memory_maximum = None # the maximum memory size
self.mem_used = None # used memory, computed based on meminfo
self.id = id # domain id
self.last_target = 0 # the last memset target
self.no_progress = False # no react to memset
self.slow_memset_react = False # slow react to memset (after few
# tries still above target)
def __repr__(self):
return self.__dict__.__repr__()
@ -61,9 +62,11 @@ class SystemState(object):
self.BALOON_DELAY = 0.1
self.XEN_FREE_MEM_LEFT = 50*1024*1024
self.XEN_FREE_MEM_MIN = 25*1024*1024
# Overhead of per-page Xen structures, taken from OpenStack nova/virt/xenapi/driver.py
# Overhead of per-page Xen structures, taken from OpenStack
# nova/virt/xenapi/driver.py
# see https://wiki.openstack.org/wiki/XenServer/Overhead
# we divide total and free physical memory by this to get "assignable" memory
# we divide total and free physical memory by this to get
# "assignable" memory
self.MEM_OVERHEAD_FACTOR = 1.0 / 1.00781
try:
self.ALL_PHYS_MEM = int(self.xc.physinfo()['total_memory']*1024 * self.MEM_OVERHEAD_FACTOR)
@ -108,7 +111,7 @@ class SystemState(object):
xen_free, assigned_but_unused, self.domdict))
return xen_free - assigned_but_unused
#refresh information on memory assigned to all domains
# refresh information on memory assigned to all domains
def refresh_memactual(self):
for domain in self.xc.domain_getinfo():
id = str(domain['domid'])
@ -125,47 +128,49 @@ class SystemState(object):
self.domdict[id].memory_maximum = int(self.domdict[id].memory_maximum)*1024
else:
self.domdict[id].memory_maximum = self.ALL_PHYS_MEM
# the previous line used to be
# self.domdict[id].memory_maximum = domain['maxmem_kb']*1024
# but domain['maxmem_kb'] changes in self.mem_set as well, and this results in
# the memory never increasing
# in fact, the only possible case of nonexisting memory/static-max is dom0
# see #307
# the previous line used to be
# self.domdict[id].memory_maximum = domain[
# 'maxmem_kb']*1024
# but domain['maxmem_kb'] changes in self.mem_set as well,
# and this results in the memory never increasing
# in fact, the only possible case of nonexisting
# memory/static-max is dom0
# see #307
def clear_outdated_error_markers(self):
# Clear outdated errors
for i in self.domdict.keys():
if self.domdict[i].slow_memset_react and \
self.domdict[i].memory_actual <= self.domdict[i].last_target + self.XEN_FREE_MEM_LEFT/4:
dom_name = self.xs.read('', '/local/domain/%s/name' % str(i))
if dom_name is not None:
# TODO: report it somewhere, qubesd or elsewhere
pass
# clear markers excluding VM from memory balance, if:
# - VM have responded to previous request (with some safety margin)
# - VM request more memory than it has assigned
# The second condition avoids starving a VM, even when there is
# some free memory available
if self.domdict[i].memory_actual <= \
self.domdict[i].last_target + self.XEN_FREE_MEM_LEFT/2 or \
self.domdict[i].memory_actual < \
qubes.qmemman.algo.prefmem(self.domdict[i]):
self.domdict[i].slow_memset_react = False
if self.domdict[i].no_progress and \
self.domdict[i].memory_actual <= self.domdict[i].last_target + self.XEN_FREE_MEM_LEFT/4:
dom_name = self.xs.read('', '/local/domain/%s/name' % str(i))
if dom_name is not None:
# TODO: report it somewhere, qubesd or elsewhere
pass
self.domdict[i].no_progress = False
#the below works (and is fast), but then 'xm list' shows unchanged memory value
# the below works (and is fast), but then 'xm list' shows unchanged
# memory value
def mem_set(self, id, val):
self.log.info('mem-set domain {} to {}'.format(id, val))
self.domdict[id].last_target = val
#can happen in the middle of domain shutdown
#apparently xc.lowlevel throws exceptions too
# can happen in the middle of domain shutdown
# apparently xc.lowlevel throws exceptions too
try:
self.xc.domain_setmaxmem(int(id), int(val/1024) + 1024) # LIBXL_MAXMEM_CONSTANT=1024
self.xc.domain_set_target_mem(int(id), int(val/1024))
self.xc.domain_set_target_mem(int(id), int(val / 1024))
except:
pass
self.xs.write('', '/local/domain/' + id + '/memory/target', str(int(val/1024)))
# VM sees about 16MB memory less, so adjust for it here - qmemman
# handle Xen view of memory
self.xs.write('', '/local/domain/' + id + '/memory/target',
str(int(val/1024 - 16 * 1024)))
# this is called at the end of ballooning, when we have Xen free mem already
# make sure that past mem_set will not decrease Xen free mem
# this is called at the end of ballooning, when we have Xen free mem already
# make sure that past mem_set will not decrease Xen free mem
def inhibit_balloon_up(self):
self.log.debug('inhibit_balloon_up()')
for i in self.domdict.keys():
@ -175,7 +180,8 @@ class SystemState(object):
'Preventing balloon up to {}'.format(dom.last_target))
self.mem_set(i, dom.memory_actual)
#perform memory ballooning, across all domains, to add "memsize" to Xen free memory
# perform memory ballooning, across all domains, to add "memsize" to Xen
# free memory
def do_balloon(self, memsize):
self.log.info('do_balloon(memsize={!r})'.format(memsize))
CHECK_PERIOD_S = 3
@ -213,7 +219,8 @@ class SystemState(object):
if prev_memory_actual is not None:
for i in prev_memory_actual.keys():
if prev_memory_actual[i] == self.domdict[i].memory_actual:
#domain not responding to memset requests, remove it from donors
# domain not responding to memset requests, remove it
# from donors
self.domdict[i].no_progress = True
self.log.info('domain {} stuck at {}'.format(i, self.domdict[i].memory_actual))
memset_reqs = qubes.qmemman.algo.balloon(memsize + self.XEN_FREE_MEM_LEFT - xenfree, self.domdict)
@ -238,8 +245,8 @@ class SystemState(object):
self.domdict[domid], untrusted_meminfo_key)
self.do_balance()
#is the computed balance request big enough ?
#so that we do not trash with small adjustments
# is the computed balance request big enough ?
# so that we do not trash with small adjustments
def is_balance_req_significant(self, memset_reqs, xenfree):
self.log.debug(
'is_balance_req_significant(memset_reqs={}, xenfree={})'.format(
@ -261,7 +268,8 @@ class SystemState(object):
total_memory_transfer += abs(memory_change)
pref = qubes.qmemman.algo.prefmem(self.domdict[dom])
if last_target > 0 and last_target < pref and memory_change > MIN_MEM_CHANGE_WHEN_UNDER_PREF:
if 0 < last_target < pref and \
memory_change > MIN_MEM_CHANGE_WHEN_UNDER_PREF:
self.log.info(
'dom {} is below pref, allowing balance'.format(dom))
return True
@ -274,9 +282,14 @@ class SystemState(object):
def print_stats(self, xenfree, memset_reqs):
for i in self.domdict.keys():
if self.domdict[i].mem_used is not None:
self.log.info('stat: dom {!r} act={} pref={}'.format(i,
self.domdict[i].memory_actual,
qubes.qmemman.algo.prefmem(self.domdict[i])))
self.log.info('stat: dom {!r} act={} pref={} last_target={}'
'{}{}'.format(i,
self.domdict[i].memory_actual,
qubes.qmemman.algo.prefmem(self.domdict[i]),
self.domdict[i].last_target,
' no_progress' if self.domdict[i].no_progress else '',
' slow_memset_react'
if self.domdict[i].slow_memset_react else ''))
self.log.info('stat: xenfree={} memset_reqs={}'.format(xenfree, memset_reqs))
@ -303,7 +316,7 @@ class SystemState(object):
dom, mem = rq
# Force to always have at least 0.9*self.XEN_FREE_MEM_LEFT (some
# margin for rounding errors). Before giving memory to
# domain, ensure that others have gived it back.
# domain, ensure that others have gave it back.
# If not - wait a little.
ntries = 5
while self.get_free_xen_memory() - (mem - self.domdict[dom].memory_actual) < 0.9*self.XEN_FREE_MEM_LEFT:
@ -318,11 +331,12 @@ class SystemState(object):
for rq2 in memset_reqs:
dom2, mem2 = rq2
if dom2 == dom:
# All donors have been procesed
break
# All donors have been processed
break
# allow some small margin
if self.domdict[dom2].memory_actual > self.domdict[dom2].last_target + self.XEN_FREE_MEM_LEFT/4:
# VM didn't react to memory request at all, remove from donors
# VM didn't react to memory request at all,
# remove from donors
if prev_memactual[dom2] == self.domdict[dom2].memory_actual:
self.log.warning(
'dom {!r} didnt react to memory request'
@ -331,10 +345,6 @@ class SystemState(object):
self.domdict[dom2].memory_actual,
mem2))
self.domdict[dom2].no_progress = True
dom_name = self.xs.read('', '/local/domain/%s/name' % str(dom2))
if dom_name is not None:
# TODO: report it somewhere, qubesd or elsewhere
pass
else:
self.log.warning('dom {!r} still hold more'
' memory than have assigned ({} > {})'
@ -342,10 +352,6 @@ class SystemState(object):
self.domdict[dom2].memory_actual,
mem2))
self.domdict[dom2].slow_memset_react = True
dom_name = self.xs.read('', '/local/domain/%s/name' % str(dom2))
if dom_name is not None:
# TODO: report it somewhere, qubesd or elsewhere
pass
self.mem_set(dom, self.get_free_xen_memory() + self.domdict[dom].memory_actual - self.XEN_FREE_MEM_LEFT)
return

View File

@ -26,14 +26,14 @@ import string
# This are only defaults - can be overridden by QMemmanServer with values from
# config file
CACHE_FACTOR = 1.3
MIN_PREFMEM = 200*1024*1024
DOM0_MEM_BOOST = 350*1024*1024
MIN_PREFMEM = 200 * 1024 * 1024
DOM0_MEM_BOOST = 350 * 1024 * 1024
log = logging.getLogger('qmemman.daemon.algo')
#untrusted meminfo size is taken from xenstore key, thus its size is limited
#so splits do not require excessive memory
# untrusted meminfo size is taken from xenstore key, thus its size is limited
# so splits do not require excessive memory
def sanitize_and_parse_meminfo(untrusted_meminfo):
if not untrusted_meminfo:
return None
@ -62,22 +62,21 @@ def sanitize_and_parse_meminfo(untrusted_meminfo):
# sanitize end
meminfo = untrusted_dict
return (meminfo['MemTotal'] -
meminfo['MemFree'] - meminfo['Cached'] - meminfo['Buffers'] +
meminfo['SwapTotal'] - meminfo['SwapFree']) * 1024
meminfo['MemFree'] - meminfo['Cached'] - meminfo['Buffers'] +
meminfo['SwapTotal'] - meminfo['SwapFree']) * 1024
return None
def is_meminfo_suspicious(untrusted_meminfo):
log.debug('is_meminfo_suspicious('
'untrusted_meminfo={!r})'.format(untrusted_meminfo))
'untrusted_meminfo={!r})'.format(untrusted_meminfo))
ret = False
# check whether the required keys exist and are not negative
try:
for i in ('MemTotal', 'MemFree', 'Buffers', 'Cached',
'SwapTotal', 'SwapFree'):
'SwapTotal', 'SwapFree'):
val = int(untrusted_meminfo[i])
if val < 0:
ret = True
@ -88,8 +87,9 @@ def is_meminfo_suspicious(untrusted_meminfo):
if untrusted_meminfo['SwapTotal'] < untrusted_meminfo['SwapFree']:
ret = True
if untrusted_meminfo['MemTotal'] < \
untrusted_meminfo['MemFree'] + \
untrusted_meminfo['Cached'] + untrusted_meminfo['Buffers']:
untrusted_meminfo['MemFree'] + \
untrusted_meminfo['Cached'] + untrusted_meminfo[
'Buffers']:
ret = True
# we could also impose some limits on all the above values
# but it has little purpose - all the domain can gain by passing e.g.
@ -97,7 +97,8 @@ def is_meminfo_suspicious(untrusted_meminfo):
# it can be achieved with legal values, too, and it will not allow to
# starve existing domains, by design
if ret:
log.warning('suspicious meminfo untrusted_meminfo={!r}'.format(untrusted_meminfo))
log.warning('suspicious meminfo untrusted_meminfo={!r}'.format(
untrusted_meminfo))
return ret
@ -107,20 +108,26 @@ def refresh_meminfo_for_domain(domain, untrusted_xenstore_key):
def prefmem(domain):
#dom0 is special, as it must have large cache, for vbds. Thus, give it a special boost
# dom0 is special, as it must have large cache, for vbds. Thus, give it
# a special boost
if domain.id == '0':
return min(domain.mem_used*CACHE_FACTOR + DOM0_MEM_BOOST, domain.memory_maximum)
return max(min(domain.mem_used*CACHE_FACTOR, domain.memory_maximum), MIN_PREFMEM)
return min(domain.mem_used * CACHE_FACTOR + DOM0_MEM_BOOST,
domain.memory_maximum)
return max(min(domain.mem_used * CACHE_FACTOR, domain.memory_maximum),
MIN_PREFMEM)
def memory_needed(domain):
#do not change
#in balance(), "distribute total_available_memory proportionally to mempref" relies on this exact formula
# do not change
# in balance(), "distribute total_available_memory proportionally to
# mempref" relies on this exact formula
ret = prefmem(domain) - domain.memory_actual
return ret
#prepare list of (domain, memory_target) pairs that need to be passed
#to "xm memset" equivalent in order to obtain "memsize" of memory
#return empty list when the request cannot be satisfied
# prepare list of (domain, memory_target) pairs that need to be passed
# to "xm memset" equivalent in order to obtain "memsize" of memory
# return empty list when the request cannot be satisfied
def balloon(memsize, domain_dictionary):
log.debug('balloon(memsize={!r}, domain_dictionary={!r})'.format(
memsize, domain_dictionary))
@ -137,31 +144,35 @@ def balloon(memsize, domain_dictionary):
if need < 0:
log.info('balloon: dom {} has actual memory {}'.format(i,
domain_dictionary[i].memory_actual))
donors.append((i,-need))
available-=need
donors.append((i, -need))
available -= need
log.info('req={} avail={} donors={!r}'.format(memsize, available, donors))
if available<memsize:
if available < memsize:
return ()
scale = 1.0*memsize/available
scale = 1.0 * memsize / available
for donors_iter in donors:
id, mem = donors_iter
memborrowed = mem*scale*REQ_SAFETY_NET_FACTOR
log.info('borrow {} from {}'.format(memborrowed, id))
memtarget = int(domain_dictionary[id].memory_actual - memborrowed)
request.append((id, memtarget))
dom_id, mem = donors_iter
memborrowed = mem * scale * REQ_SAFETY_NET_FACTOR
log.info('borrow {} from {}'.format(memborrowed, dom_id))
memtarget = int(domain_dictionary[dom_id].memory_actual - memborrowed)
request.append((dom_id, memtarget))
return request
# REQ_SAFETY_NET_FACTOR is a bit greater that 1. So that if the domain yields a bit less than requested, due
# to e.g. rounding errors, we will not get stuck. The surplus will return to the VM during "balance" call.
#redistribute positive "total_available_memory" of memory between domains, proportionally to prefmem
# REQ_SAFETY_NET_FACTOR is a bit greater that 1. So that if the domain
# yields a bit less than requested, due to e.g. rounding errors, we will not
# get stuck. The surplus will return to the VM during "balance" call.
# redistribute positive "total_available_memory" of memory between domains,
# proportionally to prefmem
def balance_when_enough_memory(domain_dictionary,
xen_free_memory, total_mem_pref, total_available_memory):
log.info('balance_when_enough_memory(xen_free_memory={!r}, '
'total_mem_pref={!r}, total_available_memory={!r})'.format(
xen_free_memory, total_mem_pref, total_available_memory))
'total_mem_pref={!r}, total_available_memory={!r})'.format(
xen_free_memory, total_mem_pref, total_available_memory))
target_memory = {}
# memory not assigned because of static max
@ -172,20 +183,21 @@ def balance_when_enough_memory(domain_dictionary,
continue
if domain_dictionary[i].no_progress:
continue
#distribute total_available_memory proportionally to mempref
scale = 1.0*prefmem(domain_dictionary[i])/total_mem_pref
target_nonint = prefmem(domain_dictionary[i]) + scale*total_available_memory
#prevent rounding errors
target = int(0.999*target_nonint)
#do not try to give more memory than static max
# distribute total_available_memory proportionally to mempref
scale = 1.0 * prefmem(domain_dictionary[i]) / total_mem_pref
target_nonint = prefmem(
domain_dictionary[i]) + scale * total_available_memory
# prevent rounding errors
target = int(0.999 * target_nonint)
# do not try to give more memory than static max
if target > domain_dictionary[i].memory_maximum:
left_memory += target-domain_dictionary[i].memory_maximum
left_memory += target - domain_dictionary[i].memory_maximum
target = domain_dictionary[i].memory_maximum
else:
# count domains which can accept more memory
# count domains which can accept more memory
acceptors_count += 1
target_memory[i] = target
# distribute left memory across all acceptors
# distribute left memory across all acceptors
while left_memory > 0 and acceptors_count > 0:
log.info('left_memory={} acceptors_count={}'.format(
left_memory, acceptors_count))
@ -195,9 +207,10 @@ def balance_when_enough_memory(domain_dictionary,
for i in target_memory.keys():
target = target_memory[i]
if target < domain_dictionary[i].memory_maximum:
memory_bonus = int(0.999*(left_memory/acceptors_count))
if target+memory_bonus >= domain_dictionary[i].memory_maximum:
new_left_memory += target+memory_bonus - domain_dictionary[i].memory_maximum
memory_bonus = int(0.999 * (left_memory / acceptors_count))
if target + memory_bonus >= domain_dictionary[i].memory_maximum:
new_left_memory += target + memory_bonus - \
domain_dictionary[i].memory_maximum
target = domain_dictionary[i].memory_maximum
new_acceptors_count -= 1
else:
@ -205,82 +218,92 @@ def balance_when_enough_memory(domain_dictionary,
target_memory[i] = target
left_memory = new_left_memory
acceptors_count = new_acceptors_count
# split target_memory dictionary to donors and acceptors
# this is needed to first get memory from donors and only then give it to acceptors
# split target_memory dictionary to donors and acceptors
# this is needed to first get memory from donors and only then give it
# to acceptors
donors_rq = list()
acceptors_rq = list()
for i in target_memory.keys():
target = target_memory[i]
if (target < domain_dictionary[i].memory_actual):
if target < domain_dictionary[i].memory_actual:
donors_rq.append((i, target))
else:
acceptors_rq.append((i, target))
# print 'balance(enough): xen_free_memory=', xen_free_memory, 'requests:', donors_rq + acceptors_rq
# print 'balance(enough): xen_free_memory=', xen_free_memory, \
# 'requests:', donors_rq + acceptors_rq
return donors_rq + acceptors_rq
#when not enough mem to make everyone be above prefmem, make donors be at prefmem, and
#redistribute anything left between acceptors
# when not enough mem to make everyone be above prefmem, make donors be at
# prefmem, and redistribute anything left between acceptors
def balance_when_low_on_memory(domain_dictionary,
xen_free_memory, total_mem_pref_acceptors, donors, acceptors):
log.debug('balance_when_low_on_memory(xen_free_memory={!r}, '
log.info('balance_when_low_on_memory(xen_free_memory={!r}, '
'total_mem_pref_acceptors={!r}, donors={!r}, acceptors={!r})'.format(
xen_free_memory, total_mem_pref_acceptors, donors, acceptors))
xen_free_memory, total_mem_pref_acceptors, donors, acceptors))
donors_rq = list()
acceptors_rq = list()
squeezed_mem = xen_free_memory
for i in donors:
avail = -memory_needed(domain_dictionary[i])
if avail < 10*1024*1024:
#probably we have already tried making it exactly at prefmem, give up
if avail < 10 * 1024 * 1024:
# probably we have already tried making it exactly at prefmem,
# give up
continue
squeezed_mem -= avail
donors_rq.append((i, prefmem(domain_dictionary[i])))
#the below can happen if initially xen free memory is below 50M
# the below can happen if initially xen free memory is below 50M
if squeezed_mem < 0:
return donors_rq
for i in acceptors:
scale = 1.0*prefmem(domain_dictionary[i])/total_mem_pref_acceptors
target_nonint = domain_dictionary[i].memory_actual + scale*squeezed_mem
#do not try to give more memory than static max
target = min(int(0.999*target_nonint), domain_dictionary[i].memory_maximum)
scale = 1.0 * prefmem(domain_dictionary[i]) / total_mem_pref_acceptors
target_nonint = \
domain_dictionary[i].memory_actual + scale * squeezed_mem
# do not try to give more memory than static max
target = \
min(int(0.999 * target_nonint), domain_dictionary[i].memory_maximum)
acceptors_rq.append((i, target))
# print 'balance(low): xen_free_memory=', xen_free_memory, 'requests:', donors_rq + acceptors_rq
# print 'balance(low): xen_free_memory=', xen_free_memory, 'requests:',
# donors_rq + acceptors_rq
return donors_rq + acceptors_rq
#redistribute memory across domains
#called when one of domains update its 'meminfo' xenstore key
#return the list of (domain, memory_target) pairs to be passed to
#"xm memset" equivalent
# redistribute memory across domains
# called when one of domains update its 'meminfo' xenstore key
# return the list of (domain, memory_target) pairs to be passed to
# "xm memset" equivalent
def balance(xen_free_memory, domain_dictionary):
log.debug('balance(xen_free_memory={!r}, domain_dictionary={!r})'.format(
xen_free_memory, domain_dictionary))
#sum of all memory requirements - in other words, the difference between
#memory required to be added to domains (acceptors) to make them be at their
#preferred memory, and memory that can be taken from domains (donors) that
#can provide memory. So, it can be negative when plenty of memory.
# sum of all memory requirements - in other words, the difference between
# memory required to be added to domains (acceptors) to make them be
# at their preferred memory, and memory that can be taken from domains
# (donors) that can provide memory. So, it can be negative when plenty
# of memory.
total_memory_needed = 0
#sum of memory preferences of all domains
# sum of memory preferences of all domains
total_mem_pref = 0
#sum of memory preferences of all domains that require more memory
# sum of memory preferences of all domains that require more memory
total_mem_pref_acceptors = 0
donors = list() # domains that can yield memory
donors = list() # domains that can yield memory
acceptors = list() # domains that require more memory
#pass 1: compute the above "total" values
# pass 1: compute the above "total" values
for i in domain_dictionary.keys():
if domain_dictionary[i].mem_used is None:
continue
if domain_dictionary[i].no_progress:
continue
need = memory_needed(domain_dictionary[i])
# print 'domain' , i, 'act/pref', domain_dictionary[i].memory_actual, prefmem(domain_dictionary[i]), 'need=', need
if need < 0 or domain_dictionary[i].memory_actual >= domain_dictionary[i].memory_maximum:
# print 'domain' , i, 'act/pref', \
# domain_dictionary[i].memory_actual, prefmem(domain_dictionary[i]), \
# 'need=', need
if need < 0 or domain_dictionary[i].memory_actual >= \
domain_dictionary[i].memory_maximum:
donors.append(i)
else:
acceptors.append(i)
@ -290,6 +313,8 @@ def balance(xen_free_memory, domain_dictionary):
total_available_memory = xen_free_memory - total_memory_needed
if total_available_memory > 0:
return balance_when_enough_memory(domain_dictionary, xen_free_memory, total_mem_pref, total_available_memory)
return balance_when_enough_memory(domain_dictionary, xen_free_memory,
total_mem_pref, total_available_memory)
else:
return balance_when_low_on_memory(domain_dictionary, xen_free_memory, total_mem_pref_acceptors, donors, acceptors)
return balance_when_low_on_memory(domain_dictionary, xen_free_memory,
total_mem_pref_acceptors, donors, acceptors)