Use maxmem=0 to disable qmemman, add more automation to it

Use maxmem=0 for disabling dynamic memory balance, instead of cryptic
service.meminfo-writer feature. Under the hood, meminfo-writer service
is also set based on maxmem property (directly in qubesdb, not
vm.features dict).
Having this as a property (not "feature"), allow to have sensible
handling of default value. Specifically, disable it automatically if
otherwise it would crash a VM. This is the case for:
 - domain with PCI devices (PoD is not supported by Xen then)
 - domain without balloon driver and/or meminfo-writer service

The check for the latter is heuristic (assume presence of 'qrexec' also
can indicate balloon driver support), but it is true for currently
supported systems.

This also allows more reliable control of libvirt config: do not set
memory != maxmem, unless qmemman is enabled.

memory != maxmem only makes sense if qmemman for given domain is
enabled.  Besides wasting some domain resources for extra page tables
etc, for HVM domains this is harmful, because maxmem-memory difference
is made of Popupate-on-Demand pool, which - when depleted - will kill
the domain. This means domain without balloon driver will die as soon
as will try to use more than initial memory - but without balloon driver
it sees maxmem memory and doesn't know about the lower limit.

Fixes QubesOS/qubes-issues#4135
This commit is contained in:
Marek Marczykowski-Górecki 2018-11-03 05:13:23 +01:00
parent 328697730b
commit 4dc8631010
No known key found for this signature in database
GPG Key ID: 063938BA42CFA724
5 changed files with 76 additions and 15 deletions

View File

@ -39,6 +39,10 @@ class ServicesExtension(qubes.ext.Extension):
vm.untrusted_qdb.write('/qubes-service/{}'.format(service),
str(int(bool(value))))
# always set meminfo-writer according to maxmem
vm.untrusted_qdb.write('/qubes-service/meminfo-writer',
'1' if vm.maxmem > 0 else '0')
@qubes.ext.handler('domain-feature-set:*')
def on_domain_feature_set(self, vm, event, feature, value, oldvalue=None):
'''Update /qubes-service/ QubesDB tree in runtime'''

View File

@ -224,6 +224,7 @@ class TC_20_Services(qubes.tests.QubesTestCase):
self.features = {}
self.vm.configure_mock(**{
'template': None,
'maxmem': 1024,
'is_running.return_value': True,
'features.get.side_effect': self.features.get,
'features.items.side_effect': self.features.items,
@ -239,6 +240,7 @@ class TC_20_Services(qubes.tests.QubesTestCase):
self.ext.on_domain_qdb_create(self.vm, 'domain-qdb-create')
self.assertEqual(sorted(self.vm.untrusted_qdb.mock_calls), [
('write', ('/qubes-service/meminfo-writer', '1'), {}),
('write', ('/qubes-service/test1', '1'), {}),
('write', ('/qubes-service/test2', '0'), {}),
])

View File

@ -665,7 +665,7 @@ class TC_90_QubesVM(QubesVMTestsMixin, qubes.tests.QubesTestCase):
expected = '''<domain type="xen">
<name>test-inst-test</name>
<uuid>7db78950-c467-4863-94d1-af59806384ea</uuid>
<memory unit="MiB">500</memory>
<memory unit="MiB">400</memory>
<currentMemory unit="MiB">400</currentMemory>
<vcpu placement="static">2</vcpu>
<cpu mode='host-passthrough'>
@ -766,6 +766,7 @@ class TC_90_QubesVM(QubesVMTestsMixin, qubes.tests.QubesTestCase):
vm = self.get_vm(uuid=my_uuid)
vm.netvm = None
vm.virt_mode = 'hvm'
vm.features['qrexec'] = True
with unittest.mock.patch('qubes.config.qubes_base_dir',
'/tmp/qubes-test'):
kernel_dir = '/tmp/qubes-test/vm-kernels/dummy'
@ -906,6 +907,7 @@ class TC_90_QubesVM(QubesVMTestsMixin, qubes.tests.QubesTestCase):
vm = self.get_vm(uuid=my_uuid)
vm.netvm = netvm
vm.virt_mode = 'hvm'
vm.features['qrexec'] = True
with self.subTest('ipv4_only'):
libvirt_xml = vm.create_config_file()
self.assertXMLEqual(lxml.etree.XML(libvirt_xml),
@ -969,6 +971,7 @@ class TC_90_QubesVM(QubesVMTestsMixin, qubes.tests.QubesTestCase):
'/qubes-iptables-error': '',
'/qubes-iptables-header': iptables_header,
'/qubes-service/qubes-update-check': '0',
'/qubes-service/meminfo-writer': '1',
})
@unittest.mock.patch('qubes.utils.get_timezone')
@ -1029,6 +1032,7 @@ class TC_90_QubesVM(QubesVMTestsMixin, qubes.tests.QubesTestCase):
'/qubes-iptables-error': '',
'/qubes-iptables-header': iptables_header,
'/qubes-service/qubes-update-check': '0',
'/qubes-service/meminfo-writer': '1',
'/qubes-ip': '10.137.0.3',
'/qubes-netmask': '255.255.255.255',
'/qubes-gateway': '10.137.0.2',

View File

@ -69,13 +69,21 @@ def _setter_kernel(self, prop, value):
def _setter_positive_int(self, prop, value):
''' Helper for setting a positive int. Checks that the int is >= 0 '''
''' Helper for setting a positive int. Checks that the int is > 0 '''
# pylint: disable=unused-argument
value = int(value)
if value <= 0:
raise ValueError('Value must be positive')
return value
def _setter_non_negative_int(self, prop, value):
''' Helper for setting a positive int. Checks that the int is >= 0 '''
# pylint: disable=unused-argument
value = int(value)
if value < 0:
raise ValueError('Value must be positive or zero')
return value
def _setter_default_user(self, prop, value):
''' Helper for setting default user '''
@ -122,6 +130,25 @@ def _default_with_template(prop, default):
return _func
def _default_maxmem(self):
# first check for any reason to _not_ enable qmemman
if not self.is_memory_balancing_possible():
return 0
# Linux specific cap: max memory can't scale beyond 10.79*init_mem
# see https://groups.google.com/forum/#!topic/qubes-devel/VRqkFj1IOtA
if self.features.get('os', None) == 'Linux':
default_maxmem = self.memory * 10
else:
default_maxmem = 4000
# don't use default larger than half of physical ram
default_maxmem = min(default_maxmem,
int(self.app.host.memory_total / 1024 / 2))
return _default_with_template('maxmem', default_maxmem)(self)
class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
'''Base functionality of Qubes VM shared between all VMs.
@ -448,12 +475,12 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
'template\'s value by default.')
maxmem = qubes.property('maxmem', type=int,
setter=_setter_positive_int,
default=_default_with_template('maxmem', (lambda self:
int(min(self.app.host.memory_total / 1024 / 2, 4000)))),
setter=_setter_non_negative_int,
default=_default_maxmem,
doc='''Maximum amount of memory available for this VM (for the purpose
of the memory balancer). TemplateBasedVMs use its '
'template\'s value by default.''')
of the memory balancer). Set to 0 to disable memory balancing for
this qube. TemplateBasedVMs use its template\'s value by default
(unless memory balancing not supported for this qube).''')
stubdom_mem = qubes.property('stubdom_mem', type=int,
setter=_setter_positive_int,
@ -743,11 +770,6 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
assert hasattr(self, 'qid')
assert hasattr(self, 'name')
# Linux specific cap: max memory can't scale beyond 10.79*init_mem
# see https://groups.google.com/forum/#!topic/qubes-devel/VRqkFj1IOtA
if self.maxmem > self.memory * 10:
self.maxmem = self.memory * 10
if xml is None:
# new qube, disable updates check if requested for new qubes
# SEE: 1637 when features are done, migrate to plugin
@ -1350,6 +1372,34 @@ class QubesVM(qubes.vm.mix.net.NetVMMixin, qubes.vm.BaseVM):
return stdouterr
def is_memory_balancing_possible(self):
'''Check if memory balancing can be enabled.
Reasons to not enable it:
- have PCI devices
- balloon driver not present
We don't have reliable way to detect the second point, but good
heuristic is HVM virt_mode (PV and PVH require OS support and it does
include balloon driver) and lack of qrexec/meminfo-writer service
support (no qubes tools installed).
'''
if list(self.devices['pci'].persistent()):
return False
if self.virt_mode == 'hvm':
# if VM announce any supported service
features_set = set(self.features)
template = getattr(self, 'template', None)
while template is not None:
features_set.update(template.features)
template = getattr(template, 'template', None)
supported_services = any(f.startswith('supported-service.')
for f in features_set)
if (not self.features.check_with_template('qrexec', False) or
(supported_services and not self.features.check_with_template(
'supported-service.meminfo-writer', False))):
return False
return True
def request_memory(self, mem_required=None):
# overhead of per-qube/per-vcpu Xen structures,
# taken from OpenStack nova/virt/xenapi/driver.py

View File

@ -2,11 +2,12 @@
{% block basic %}
<name>{{ vm.name }}</name>
<uuid>{{ vm.uuid }}</uuid>
{% if vm.virt_mode == 'hvm' and vm.devices['pci'].persistent() | list %}
{% if ((vm.virt_mode == 'hvm' and vm.devices['pci'].persistent() | list)
or vm.maxmem == 0) -%}
<memory unit="MiB">{{ vm.memory }}</memory>
{% else %}
{% else -%}
<memory unit="MiB">{{ vm.maxmem }}</memory>
{% endif %}
{% endif -%}
<currentMemory unit="MiB">{{ vm.memory }}</currentMemory>
<vcpu placement="static">{{ vm.vcpus }}</vcpu>
{% endblock %}