Merge remote-tracking branch 'origin/pr/7' into HEAD

* origin/pr/7:
  Properly account for Xen memory overhead to fix large VMs

QubesOS/qubes-issues#1136
This commit is contained in:
Marek Marczykowski-Górecki 2015-10-10 05:22:13 +02:00
commit 88b4a84cce
No known key found for this signature in database
GPG Key ID: 063938BA42CFA724
3 changed files with 28 additions and 24 deletions

View File

@ -1738,6 +1738,26 @@ class QubesVm(object):
self.force_shutdown()
raise OSError("ERROR: Cannot execute qubesdb-daemon!")
def request_memory(self, mem_required = None):
# Overhead of per-VM/per-vcpu Xen structures, taken from OpenStack nova/virt/xenapi/driver.py
# see https://wiki.openstack.org/wiki/XenServer/Overhead
# add an extra MB because Nova rounds up to MBs
MEM_OVERHEAD_BASE = (3 + 1) * 1024 * 1024
MEM_OVERHEAD_PER_VCPU = 3 * 1024 * 1024 / 2
if mem_required is None:
mem_required = int(self.memory) * 1024 * 1024
if qmemman_present:
qmemman_client = QMemmanClient()
try:
mem_required_with_overhead = mem_required + MEM_OVERHEAD_BASE + self.vcpus * MEM_OVERHEAD_PER_VCPU
got_memory = qmemman_client.request_memory(mem_required_with_overhead)
except IOError as e:
raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e))
if not got_memory:
qmemman_client.close()
raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name)
return qmemman_client
def start(self, verbose = False, preparing_dvm = False, start_guid = True,
notify_function = None, mem_required = None):
self.log.debug('start('
@ -1765,17 +1785,7 @@ class QubesVm(object):
self._update_libvirt_domain()
if mem_required is None:
mem_required = int(self.memory) * 1024 * 1024
if qmemman_present:
qmemman_client = QMemmanClient()
try:
got_memory = qmemman_client.request_memory(mem_required)
except IOError as e:
raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e))
if not got_memory:
qmemman_client.close()
raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name)
qmemman_client = self.request_memory(mem_required)
# Bind pci devices to pciback driver
for pci in self.pcidevs:

View File

@ -178,17 +178,7 @@ class QubesDisposableVm(QubesVm):
# refresh config file
domain_config = self.create_config_file()
if qmemman_present:
mem_required = int(self.memory) * 1024 * 1024
print >>sys.stderr, "time=%s, getting %d memory" % (str(time.time()), mem_required)
qmemman_client = QMemmanClient()
try:
got_memory = qmemman_client.request_memory(mem_required)
except IOError as e:
raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e))
if not got_memory:
qmemman_client.close()
raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name)
qmemman_client = self.request_memory()
# dispvm cannot have PCI devices
assert (len(self.pcidevs) == 0), "DispVM cannot have PCI devices"

View File

@ -56,7 +56,11 @@ class SystemState(object):
self.BALOON_DELAY = 0.1
self.XEN_FREE_MEM_LEFT = 50*1024*1024
self.XEN_FREE_MEM_MIN = 25*1024*1024
self.ALL_PHYS_MEM = self.xc.physinfo()['total_memory']*1024
# Overhead of per-page Xen structures, taken from OpenStack nova/virt/xenapi/driver.py
# see https://wiki.openstack.org/wiki/XenServer/Overhead
# we divide total and free physical memory by this to get "assignable" memory
self.MEM_OVERHEAD_FACTOR = 1.0 / 1.00781
self.ALL_PHYS_MEM = int(self.xc.physinfo()['total_memory']*1024 * self.MEM_OVERHEAD_FACTOR)
def add_domain(self, id):
self.log.debug('add_domain(id={!r})'.format(id))
@ -67,7 +71,7 @@ class SystemState(object):
self.domdict.pop(id)
def get_free_xen_memory(self):
return self.xc.physinfo()['free_memory']*1024
return int(self.xc.physinfo()['free_memory']*1024 * self.MEM_OVERHEAD_FACTOR)
# hosts = self.xend_session.session.xenapi.host.get_all()
# host_record = self.xend_session.session.xenapi.host.get_record(hosts[0])
# host_metrics_record = self.xend_session.session.xenapi.host_metrics.get_record(host_record["metrics"])