diff --git a/core-modules/000QubesVm.py b/core-modules/000QubesVm.py index b9b32bdd..1e087196 100644 --- a/core-modules/000QubesVm.py +++ b/core-modules/000QubesVm.py @@ -1738,6 +1738,26 @@ class QubesVm(object): self.force_shutdown() raise OSError("ERROR: Cannot execute qubesdb-daemon!") + def request_memory(self, mem_required = None): + # Overhead of per-VM/per-vcpu Xen structures, taken from OpenStack nova/virt/xenapi/driver.py + # see https://wiki.openstack.org/wiki/XenServer/Overhead + # add an extra MB because Nova rounds up to MBs + MEM_OVERHEAD_BASE = (3 + 1) * 1024 * 1024 + MEM_OVERHEAD_PER_VCPU = 3 * 1024 * 1024 / 2 + if mem_required is None: + mem_required = int(self.memory) * 1024 * 1024 + if qmemman_present: + qmemman_client = QMemmanClient() + try: + mem_required_with_overhead = mem_required + MEM_OVERHEAD_BASE + self.vcpus * MEM_OVERHEAD_PER_VCPU + got_memory = qmemman_client.request_memory(mem_required_with_overhead) + except IOError as e: + raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e)) + if not got_memory: + qmemman_client.close() + raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name) + return qmemman_client + def start(self, verbose = False, preparing_dvm = False, start_guid = True, notify_function = None, mem_required = None): self.log.debug('start(' @@ -1765,17 +1785,7 @@ class QubesVm(object): self._update_libvirt_domain() - if mem_required is None: - mem_required = int(self.memory) * 1024 * 1024 - if qmemman_present: - qmemman_client = QMemmanClient() - try: - got_memory = qmemman_client.request_memory(mem_required) - except IOError as e: - raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e)) - if not got_memory: - qmemman_client.close() - raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name) + qmemman_client = self.request_memory(mem_required) # Bind pci devices to pciback driver for pci in self.pcidevs: diff --git a/core-modules/01QubesDisposableVm.py b/core-modules/01QubesDisposableVm.py index 68b4319f..a758dd74 100644 --- a/core-modules/01QubesDisposableVm.py +++ b/core-modules/01QubesDisposableVm.py @@ -173,17 +173,7 @@ class QubesDisposableVm(QubesVm): # refresh config file domain_config = self.create_config_file() - if qmemman_present: - mem_required = int(self.memory) * 1024 * 1024 - print >>sys.stderr, "time=%s, getting %d memory" % (str(time.time()), mem_required) - qmemman_client = QMemmanClient() - try: - got_memory = qmemman_client.request_memory(mem_required) - except IOError as e: - raise IOError("ERROR: Failed to connect to qmemman: %s" % str(e)) - if not got_memory: - qmemman_client.close() - raise MemoryError ("ERROR: insufficient memory to start VM '%s'" % self.name) + qmemman_client = self.request_memory() # dispvm cannot have PCI devices assert (len(self.pcidevs) == 0), "DispVM cannot have PCI devices" diff --git a/qmemman/qmemman.py b/qmemman/qmemman.py index 3de81d75..d40c0b0a 100755 --- a/qmemman/qmemman.py +++ b/qmemman/qmemman.py @@ -56,7 +56,11 @@ class SystemState(object): self.BALOON_DELAY = 0.1 self.XEN_FREE_MEM_LEFT = 50*1024*1024 self.XEN_FREE_MEM_MIN = 25*1024*1024 - self.ALL_PHYS_MEM = self.xc.physinfo()['total_memory']*1024 + # Overhead of per-page Xen structures, taken from OpenStack nova/virt/xenapi/driver.py + # see https://wiki.openstack.org/wiki/XenServer/Overhead + # we divide total and free physical memory by this to get "assignable" memory + self.MEM_OVERHEAD_FACTOR = 1.0 / 1.00781 + self.ALL_PHYS_MEM = int(self.xc.physinfo()['total_memory']*1024 * self.MEM_OVERHEAD_FACTOR) def add_domain(self, id): self.log.debug('add_domain(id={!r})'.format(id)) @@ -67,7 +71,7 @@ class SystemState(object): self.domdict.pop(id) def get_free_xen_memory(self): - return self.xc.physinfo()['free_memory']*1024 + return int(self.xc.physinfo()['free_memory']*1024 * self.MEM_OVERHEAD_FACTOR) # hosts = self.xend_session.session.xenapi.host.get_all() # host_record = self.xend_session.session.xenapi.host.get_record(hosts[0]) # host_metrics_record = self.xend_session.session.xenapi.host_metrics.get_record(host_record["metrics"])