2014-05-18 21:01:21 +02:00
|
|
|
#
|
|
|
|
# The Qubes OS Project, http://www.qubes-os.org
|
|
|
|
#
|
|
|
|
# Copyright (C) 2010 Rafal Wojtczuk <rafal@invisiblethingslab.com>
|
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU Lesser General Public
|
|
|
|
# License as published by the Free Software Foundation; either
|
|
|
|
# version 2.1 of the License, or (at your option) any later version.
|
2014-05-18 21:01:21 +02:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# This library is distributed in the hope that it will be useful,
|
2014-05-18 21:01:21 +02:00
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2017-10-12 00:11:50 +02:00
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
# Lesser General Public License for more details.
|
2014-05-18 21:01:21 +02:00
|
|
|
#
|
2017-10-12 00:11:50 +02:00
|
|
|
# You should have received a copy of the GNU Lesser General Public
|
|
|
|
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
|
2014-05-18 21:01:21 +02:00
|
|
|
#
|
|
|
|
#
|
2021-02-26 01:40:58 +01:00
|
|
|
# pylint: disable=global-statement
|
|
|
|
|
2017-02-23 00:15:38 +01:00
|
|
|
import configparser
|
|
|
|
import socketserver
|
2015-10-01 22:14:35 +02:00
|
|
|
import logging
|
|
|
|
import logging.handlers
|
2010-08-30 11:40:19 +02:00
|
|
|
import os
|
2013-05-04 04:33:58 +02:00
|
|
|
import socket
|
2015-10-01 22:14:35 +02:00
|
|
|
import sys
|
2017-02-23 00:15:38 +01:00
|
|
|
import threading
|
2021-02-26 01:40:58 +01:00
|
|
|
from dataclasses import dataclass
|
|
|
|
from typing import Callable, Any
|
2012-03-28 00:21:01 +02:00
|
|
|
|
2021-02-26 01:40:58 +01:00
|
|
|
import xen.lowlevel.xs # pylint: disable=import-error
|
2015-10-01 22:14:35 +02:00
|
|
|
|
|
|
|
import qubes.qmemman
|
|
|
|
import qubes.qmemman.algo
|
|
|
|
import qubes.utils
|
2015-03-17 16:45:00 +01:00
|
|
|
|
2015-10-01 22:14:35 +02:00
|
|
|
SOCK_PATH = '/var/run/qubes/qmemman.sock'
|
2010-08-30 11:40:19 +02:00
|
|
|
|
2015-10-01 22:14:35 +02:00
|
|
|
system_state = qubes.qmemman.SystemState()
|
2017-02-23 00:15:38 +01:00
|
|
|
global_lock = threading.Lock()
|
2021-02-26 01:40:58 +01:00
|
|
|
# If XSWatcher will
|
2016-01-14 04:37:02 +01:00
|
|
|
# handle meminfo event before @introduceDomain, it will use
|
|
|
|
# incomplete domain list for that and may redistribute memory
|
|
|
|
# allocated to some VM, but not yet used (see #1389).
|
|
|
|
# To fix that, system_state should be updated (refresh domain
|
|
|
|
# list) before processing other changes, every time some process requested
|
|
|
|
# memory for a new VM, before releasing the lock. Then XS_Watcher will check
|
|
|
|
# this flag before processing other event.
|
|
|
|
force_refresh_domain_list = False
|
2010-08-30 11:40:19 +02:00
|
|
|
|
2021-02-26 01:40:58 +01:00
|
|
|
def only_in_first_list(list1, list2):
|
2015-10-01 22:14:35 +02:00
|
|
|
ret = []
|
2021-02-26 01:40:58 +01:00
|
|
|
for i in list1:
|
|
|
|
if i not in list2:
|
2010-08-30 11:40:19 +02:00
|
|
|
ret.append(i)
|
|
|
|
return ret
|
|
|
|
|
2011-05-04 17:58:28 +02:00
|
|
|
def get_domain_meminfo_key(domain_id):
|
2010-08-30 11:40:19 +02:00
|
|
|
return '/local/domain/'+domain_id+'/memory/meminfo'
|
|
|
|
|
2015-10-01 22:14:35 +02:00
|
|
|
|
2021-02-26 01:40:58 +01:00
|
|
|
@dataclass
|
|
|
|
class WatchType:
|
|
|
|
func: Callable
|
|
|
|
param: Any
|
2010-08-30 11:40:19 +02:00
|
|
|
|
2021-02-26 01:40:58 +01:00
|
|
|
|
|
|
|
class XSWatcher:
|
2010-08-30 11:40:19 +02:00
|
|
|
def __init__(self):
|
2015-03-17 16:45:00 +01:00
|
|
|
self.log = logging.getLogger('qmemman.daemon.xswatcher')
|
|
|
|
self.log.debug('XS_Watcher()')
|
|
|
|
|
2010-08-30 11:40:19 +02:00
|
|
|
self.handle = xen.lowlevel.xs.xs()
|
2016-01-14 04:37:02 +01:00
|
|
|
self.handle.watch('@introduceDomain', WatchType(
|
2021-02-26 01:40:58 +01:00
|
|
|
XSWatcher.domain_list_changed, False))
|
2016-01-14 04:37:02 +01:00
|
|
|
self.handle.watch('@releaseDomain', WatchType(
|
2021-02-26 01:40:58 +01:00
|
|
|
XSWatcher.domain_list_changed, False))
|
2010-08-30 11:40:19 +02:00
|
|
|
self.watch_token_dict = {}
|
|
|
|
|
2016-01-14 04:37:02 +01:00
|
|
|
def domain_list_changed(self, refresh_only=False):
|
|
|
|
"""
|
|
|
|
Check if any domain was created/destroyed. If it was, update
|
|
|
|
appropriate list. Then redistribute memory.
|
2015-03-17 16:45:00 +01:00
|
|
|
|
2016-01-14 04:37:02 +01:00
|
|
|
:param refresh_only If True, only refresh domain list, do not
|
|
|
|
redistribute memory. In this mode, caller must already hold
|
|
|
|
global_lock.
|
|
|
|
"""
|
|
|
|
self.log.debug('domain_list_changed(only_refresh={!r})'.format(
|
|
|
|
refresh_only))
|
2015-03-17 16:45:00 +01:00
|
|
|
|
2016-01-14 04:37:02 +01:00
|
|
|
got_lock = False
|
|
|
|
if not refresh_only:
|
|
|
|
self.log.debug('acquiring global_lock')
|
|
|
|
global_lock.acquire()
|
|
|
|
got_lock = True
|
|
|
|
self.log.debug('global_lock acquired')
|
2016-01-14 03:32:18 +01:00
|
|
|
try:
|
2016-01-14 04:34:53 +01:00
|
|
|
curr = self.handle.ls('', '/local/domain')
|
|
|
|
if curr is None:
|
|
|
|
return
|
2015-03-17 16:45:00 +01:00
|
|
|
|
2016-01-14 04:34:53 +01:00
|
|
|
# check if domain is really there, it may happen that some empty
|
|
|
|
# directories are left in xenstore
|
2017-05-18 09:44:48 +02:00
|
|
|
curr = list(filter(
|
2016-01-14 04:34:53 +01:00
|
|
|
lambda x:
|
|
|
|
self.handle.read('',
|
|
|
|
'/local/domain/{}/domid'.format(x)
|
|
|
|
) is not None,
|
|
|
|
curr
|
2017-05-18 09:44:48 +02:00
|
|
|
))
|
2016-01-14 04:34:53 +01:00
|
|
|
self.log.debug('curr={!r}'.format(curr))
|
|
|
|
|
2016-01-14 03:32:18 +01:00
|
|
|
for i in only_in_first_list(curr, self.watch_token_dict.keys()):
|
2016-01-14 04:34:53 +01:00
|
|
|
# new domain has been created
|
2021-02-26 01:40:58 +01:00
|
|
|
watch = WatchType(XSWatcher.meminfo_changed, i)
|
2016-01-14 03:32:18 +01:00
|
|
|
self.watch_token_dict[i] = watch
|
|
|
|
self.handle.watch(get_domain_meminfo_key(i), watch)
|
|
|
|
system_state.add_domain(i)
|
|
|
|
|
|
|
|
for i in only_in_first_list(self.watch_token_dict.keys(), curr):
|
2016-01-14 04:34:53 +01:00
|
|
|
# domain destroyed
|
2021-02-26 01:40:58 +01:00
|
|
|
self.handle.unwatch(get_domain_meminfo_key(i),
|
|
|
|
self.watch_token_dict[i])
|
2016-01-14 03:32:18 +01:00
|
|
|
self.watch_token_dict.pop(i)
|
|
|
|
system_state.del_domain(i)
|
2021-02-26 01:40:58 +01:00
|
|
|
except: # pylint: disable=bare-except
|
Fix multiple qmemman issues
First the main bug: when meminfo xenstore watch fires, in some cases
(just after starting some domain) XS_Watcher refreshes internal list of
domains before processing the event. This is done specifically to
include new domain in there. But the opposite could happen too - the
domain could be destroyed. In this case refres_meminfo() function raises
an exception, which isn't handled and interrupts the whole xenstore
watch loop. This issue is likely to be triggered by killing the domain,
as this way it could disappear shortly after writing updated meminfo
entry. In case of proper shutdown, meminfo-writer is stopped earlier and
do not write updates just before domain destroy.
Fix this by checking if the requested domain is still there just after
refreshing the list.
Then, catch exceptions in xenstore watch handling functions, to not
interrupt xenstore watch loop. If it gets interrupted, qmemman basically
stops memory balancing.
And finally, clear force_refresh_domain_list flag after refreshing the
domain list. That missing line caused domain refresh at every meminfo
change, making it use some more CPU time.
While at it, change "EOF" log message to something a bit more
meaningful.
Thanks @conorsch for capturing valuable logs.
Fixes QubesOS/qubes-issues#4890
2020-04-01 03:33:21 +02:00
|
|
|
self.log.exception('Updating domain list failed')
|
2016-01-14 03:32:18 +01:00
|
|
|
finally:
|
2016-01-14 04:37:02 +01:00
|
|
|
if got_lock:
|
|
|
|
global_lock.release()
|
|
|
|
self.log.debug('global_lock released')
|
2015-03-17 16:45:00 +01:00
|
|
|
|
2016-01-14 04:37:02 +01:00
|
|
|
if not refresh_only:
|
Fix multiple qmemman issues
First the main bug: when meminfo xenstore watch fires, in some cases
(just after starting some domain) XS_Watcher refreshes internal list of
domains before processing the event. This is done specifically to
include new domain in there. But the opposite could happen too - the
domain could be destroyed. In this case refres_meminfo() function raises
an exception, which isn't handled and interrupts the whole xenstore
watch loop. This issue is likely to be triggered by killing the domain,
as this way it could disappear shortly after writing updated meminfo
entry. In case of proper shutdown, meminfo-writer is stopped earlier and
do not write updates just before domain destroy.
Fix this by checking if the requested domain is still there just after
refreshing the list.
Then, catch exceptions in xenstore watch handling functions, to not
interrupt xenstore watch loop. If it gets interrupted, qmemman basically
stops memory balancing.
And finally, clear force_refresh_domain_list flag after refreshing the
domain list. That missing line caused domain refresh at every meminfo
change, making it use some more CPU time.
While at it, change "EOF" log message to something a bit more
meaningful.
Thanks @conorsch for capturing valuable logs.
Fixes QubesOS/qubes-issues#4890
2020-04-01 03:33:21 +02:00
|
|
|
try:
|
|
|
|
system_state.do_balance()
|
2021-02-26 01:40:58 +01:00
|
|
|
except: # pylint: disable=bare-except
|
Fix multiple qmemman issues
First the main bug: when meminfo xenstore watch fires, in some cases
(just after starting some domain) XS_Watcher refreshes internal list of
domains before processing the event. This is done specifically to
include new domain in there. But the opposite could happen too - the
domain could be destroyed. In this case refres_meminfo() function raises
an exception, which isn't handled and interrupts the whole xenstore
watch loop. This issue is likely to be triggered by killing the domain,
as this way it could disappear shortly after writing updated meminfo
entry. In case of proper shutdown, meminfo-writer is stopped earlier and
do not write updates just before domain destroy.
Fix this by checking if the requested domain is still there just after
refreshing the list.
Then, catch exceptions in xenstore watch handling functions, to not
interrupt xenstore watch loop. If it gets interrupted, qmemman basically
stops memory balancing.
And finally, clear force_refresh_domain_list flag after refreshing the
domain list. That missing line caused domain refresh at every meminfo
change, making it use some more CPU time.
While at it, change "EOF" log message to something a bit more
meaningful.
Thanks @conorsch for capturing valuable logs.
Fixes QubesOS/qubes-issues#4890
2020-04-01 03:33:21 +02:00
|
|
|
self.log.exception('do_balance() failed')
|
2010-08-30 11:40:19 +02:00
|
|
|
|
2015-03-17 16:45:00 +01:00
|
|
|
|
2011-05-04 17:58:28 +02:00
|
|
|
def meminfo_changed(self, domain_id):
|
2015-03-17 16:45:00 +01:00
|
|
|
self.log.debug('meminfo_changed(domain_id={!r})'.format(domain_id))
|
2015-10-01 22:14:35 +02:00
|
|
|
untrusted_meminfo_key = self.handle.read(
|
|
|
|
'', get_domain_meminfo_key(domain_id))
|
2021-02-26 01:40:58 +01:00
|
|
|
if untrusted_meminfo_key is None or untrusted_meminfo_key == b'':
|
2010-08-30 11:40:19 +02:00
|
|
|
return
|
2015-03-17 16:45:00 +01:00
|
|
|
|
|
|
|
self.log.debug('acquiring global_lock')
|
2010-08-30 11:40:19 +02:00
|
|
|
global_lock.acquire()
|
2015-03-17 16:45:00 +01:00
|
|
|
self.log.debug('global_lock acquired')
|
2017-06-12 09:52:44 +02:00
|
|
|
try:
|
Fix multiple qmemman issues
First the main bug: when meminfo xenstore watch fires, in some cases
(just after starting some domain) XS_Watcher refreshes internal list of
domains before processing the event. This is done specifically to
include new domain in there. But the opposite could happen too - the
domain could be destroyed. In this case refres_meminfo() function raises
an exception, which isn't handled and interrupts the whole xenstore
watch loop. This issue is likely to be triggered by killing the domain,
as this way it could disappear shortly after writing updated meminfo
entry. In case of proper shutdown, meminfo-writer is stopped earlier and
do not write updates just before domain destroy.
Fix this by checking if the requested domain is still there just after
refreshing the list.
Then, catch exceptions in xenstore watch handling functions, to not
interrupt xenstore watch loop. If it gets interrupted, qmemman basically
stops memory balancing.
And finally, clear force_refresh_domain_list flag after refreshing the
domain list. That missing line caused domain refresh at every meminfo
change, making it use some more CPU time.
While at it, change "EOF" log message to something a bit more
meaningful.
Thanks @conorsch for capturing valuable logs.
Fixes QubesOS/qubes-issues#4890
2020-04-01 03:33:21 +02:00
|
|
|
global force_refresh_domain_list
|
2017-06-12 09:52:44 +02:00
|
|
|
if force_refresh_domain_list:
|
|
|
|
self.domain_list_changed(refresh_only=True)
|
Fix multiple qmemman issues
First the main bug: when meminfo xenstore watch fires, in some cases
(just after starting some domain) XS_Watcher refreshes internal list of
domains before processing the event. This is done specifically to
include new domain in there. But the opposite could happen too - the
domain could be destroyed. In this case refres_meminfo() function raises
an exception, which isn't handled and interrupts the whole xenstore
watch loop. This issue is likely to be triggered by killing the domain,
as this way it could disappear shortly after writing updated meminfo
entry. In case of proper shutdown, meminfo-writer is stopped earlier and
do not write updates just before domain destroy.
Fix this by checking if the requested domain is still there just after
refreshing the list.
Then, catch exceptions in xenstore watch handling functions, to not
interrupt xenstore watch loop. If it gets interrupted, qmemman basically
stops memory balancing.
And finally, clear force_refresh_domain_list flag after refreshing the
domain list. That missing line caused domain refresh at every meminfo
change, making it use some more CPU time.
While at it, change "EOF" log message to something a bit more
meaningful.
Thanks @conorsch for capturing valuable logs.
Fixes QubesOS/qubes-issues#4890
2020-04-01 03:33:21 +02:00
|
|
|
force_refresh_domain_list = False
|
|
|
|
if domain_id not in self.watch_token_dict:
|
|
|
|
# domain just destroyed
|
|
|
|
return
|
2015-03-17 16:45:00 +01:00
|
|
|
|
2017-06-12 09:52:44 +02:00
|
|
|
system_state.refresh_meminfo(domain_id, untrusted_meminfo_key)
|
2021-02-26 01:40:58 +01:00
|
|
|
except: # pylint: disable=bare-except
|
Fix multiple qmemman issues
First the main bug: when meminfo xenstore watch fires, in some cases
(just after starting some domain) XS_Watcher refreshes internal list of
domains before processing the event. This is done specifically to
include new domain in there. But the opposite could happen too - the
domain could be destroyed. In this case refres_meminfo() function raises
an exception, which isn't handled and interrupts the whole xenstore
watch loop. This issue is likely to be triggered by killing the domain,
as this way it could disappear shortly after writing updated meminfo
entry. In case of proper shutdown, meminfo-writer is stopped earlier and
do not write updates just before domain destroy.
Fix this by checking if the requested domain is still there just after
refreshing the list.
Then, catch exceptions in xenstore watch handling functions, to not
interrupt xenstore watch loop. If it gets interrupted, qmemman basically
stops memory balancing.
And finally, clear force_refresh_domain_list flag after refreshing the
domain list. That missing line caused domain refresh at every meminfo
change, making it use some more CPU time.
While at it, change "EOF" log message to something a bit more
meaningful.
Thanks @conorsch for capturing valuable logs.
Fixes QubesOS/qubes-issues#4890
2020-04-01 03:33:21 +02:00
|
|
|
self.log.exception('Updating meminfo for %d failed', domain_id)
|
2017-06-12 09:52:44 +02:00
|
|
|
finally:
|
|
|
|
global_lock.release()
|
|
|
|
self.log.debug('global_lock released')
|
2015-03-17 16:45:00 +01:00
|
|
|
|
2010-08-30 11:40:19 +02:00
|
|
|
def watch_loop(self):
|
2015-03-17 16:45:00 +01:00
|
|
|
self.log.debug('watch_loop()')
|
2010-08-30 11:40:19 +02:00
|
|
|
while True:
|
|
|
|
result = self.handle.read_watch()
|
2015-03-17 16:45:00 +01:00
|
|
|
self.log.debug('watch_loop result={!r}'.format(result))
|
2010-08-30 11:40:19 +02:00
|
|
|
token = result[1]
|
2021-02-26 01:40:58 +01:00
|
|
|
token.func(self, token.param)
|
2010-08-30 11:40:19 +02:00
|
|
|
|
|
|
|
|
2017-02-23 00:15:38 +01:00
|
|
|
class QMemmanReqHandler(socketserver.BaseRequestHandler):
|
2010-08-30 11:40:19 +02:00
|
|
|
"""
|
|
|
|
The RequestHandler class for our server.
|
|
|
|
|
|
|
|
It is instantiated once per connection to the server, and must
|
|
|
|
override the handle() method to implement communication to the
|
|
|
|
client.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def handle(self):
|
2015-03-17 16:45:00 +01:00
|
|
|
self.log = logging.getLogger('qmemman.daemon.reqhandler')
|
|
|
|
|
2010-08-30 14:50:48 +02:00
|
|
|
got_lock = False
|
2016-02-22 17:56:43 +01:00
|
|
|
try:
|
|
|
|
# self.request is the TCP socket connected to the client
|
|
|
|
while True:
|
|
|
|
self.data = self.request.recv(1024).strip()
|
|
|
|
self.log.debug('data={!r}'.format(self.data))
|
|
|
|
if len(self.data) == 0:
|
Fix multiple qmemman issues
First the main bug: when meminfo xenstore watch fires, in some cases
(just after starting some domain) XS_Watcher refreshes internal list of
domains before processing the event. This is done specifically to
include new domain in there. But the opposite could happen too - the
domain could be destroyed. In this case refres_meminfo() function raises
an exception, which isn't handled and interrupts the whole xenstore
watch loop. This issue is likely to be triggered by killing the domain,
as this way it could disappear shortly after writing updated meminfo
entry. In case of proper shutdown, meminfo-writer is stopped earlier and
do not write updates just before domain destroy.
Fix this by checking if the requested domain is still there just after
refreshing the list.
Then, catch exceptions in xenstore watch handling functions, to not
interrupt xenstore watch loop. If it gets interrupted, qmemman basically
stops memory balancing.
And finally, clear force_refresh_domain_list flag after refreshing the
domain list. That missing line caused domain refresh at every meminfo
change, making it use some more CPU time.
While at it, change "EOF" log message to something a bit more
meaningful.
Thanks @conorsch for capturing valuable logs.
Fixes QubesOS/qubes-issues#4890
2020-04-01 03:33:21 +02:00
|
|
|
self.log.info('client disconnected, resuming membalance')
|
2016-02-22 17:56:43 +01:00
|
|
|
if got_lock:
|
|
|
|
global force_refresh_domain_list
|
|
|
|
force_refresh_domain_list = True
|
|
|
|
return
|
|
|
|
|
|
|
|
# XXX something is wrong here: return without release?
|
2010-08-30 14:50:48 +02:00
|
|
|
if got_lock:
|
2016-02-22 17:56:43 +01:00
|
|
|
self.log.warning('Second request over qmemman.sock?')
|
|
|
|
return
|
|
|
|
|
|
|
|
self.log.debug('acquiring global_lock')
|
|
|
|
global_lock.acquire()
|
|
|
|
self.log.debug('global_lock acquired')
|
|
|
|
|
|
|
|
got_lock = True
|
2017-02-23 00:15:38 +01:00
|
|
|
if system_state.do_balloon(int(self.data.decode('ascii'))):
|
|
|
|
resp = b"OK\n"
|
2016-02-22 17:56:43 +01:00
|
|
|
else:
|
2017-02-23 00:15:38 +01:00
|
|
|
resp = b"FAIL\n"
|
2016-02-22 17:56:43 +01:00
|
|
|
self.log.debug('resp={!r}'.format(resp))
|
|
|
|
self.request.send(resp)
|
|
|
|
except BaseException as e:
|
|
|
|
self.log.exception(
|
|
|
|
"exception while handling request: {!r}".format(e))
|
|
|
|
finally:
|
2010-08-30 14:50:48 +02:00
|
|
|
if got_lock:
|
2016-02-22 17:56:43 +01:00
|
|
|
global_lock.release()
|
|
|
|
self.log.debug('global_lock released')
|
2015-03-17 16:45:00 +01:00
|
|
|
|
2010-08-30 11:40:19 +02:00
|
|
|
|
2015-10-05 17:34:13 +02:00
|
|
|
parser = qubes.tools.QubesArgumentParser(want_app=False)
|
2010-08-30 11:40:19 +02:00
|
|
|
|
2015-10-01 22:14:35 +02:00
|
|
|
parser.add_argument('--config', '-c', metavar='FILE',
|
|
|
|
action='store', default='/etc/qubes/qmemman.conf',
|
|
|
|
help='qmemman config file')
|
2015-03-17 16:45:00 +01:00
|
|
|
|
2015-10-01 22:14:35 +02:00
|
|
|
parser.add_argument('--foreground',
|
|
|
|
action='store_true', default=False,
|
|
|
|
help='do not close stdio')
|
2015-03-17 16:45:00 +01:00
|
|
|
|
|
|
|
|
2015-10-01 22:14:35 +02:00
|
|
|
def main():
|
|
|
|
args = parser.parse_args()
|
2012-03-28 00:21:01 +02:00
|
|
|
|
2015-10-01 22:14:35 +02:00
|
|
|
# setup logging
|
|
|
|
ha_syslog = logging.handlers.SysLogHandler('/dev/log')
|
|
|
|
ha_syslog.setFormatter(
|
|
|
|
logging.Formatter('%(name)s[%(process)d]: %(message)s'))
|
|
|
|
logging.root.addHandler(ha_syslog)
|
|
|
|
|
|
|
|
if args.foreground:
|
|
|
|
ha_stderr = logging.StreamHandler(sys.stderr)
|
2021-02-26 01:43:03 +01:00
|
|
|
ha_stderr.setFormatter(
|
2015-10-01 22:14:35 +02:00
|
|
|
logging.Formatter('%(asctime)s %(name)s[%(process)d]: %(message)s'))
|
|
|
|
logging.root.addHandler(ha_stderr)
|
2012-07-20 16:32:17 +02:00
|
|
|
|
2015-10-01 22:14:35 +02:00
|
|
|
sys.stdin.close()
|
|
|
|
|
|
|
|
log = logging.getLogger('qmemman.daemon')
|
|
|
|
|
2017-02-23 00:15:38 +01:00
|
|
|
config = configparser.SafeConfigParser({
|
2015-10-01 22:14:35 +02:00
|
|
|
'vm-min-mem': str(qubes.qmemman.algo.MIN_PREFMEM),
|
|
|
|
'dom0-mem-boost': str(qubes.qmemman.algo.DOM0_MEM_BOOST),
|
|
|
|
'cache-margin-factor': str(qubes.qmemman.algo.CACHE_FACTOR)
|
|
|
|
})
|
|
|
|
config.read(args.config)
|
|
|
|
|
|
|
|
if config.has_section('global'):
|
|
|
|
qubes.qmemman.algo.MIN_PREFMEM = \
|
|
|
|
qubes.utils.parse_size(config.get('global', 'vm-min-mem'))
|
|
|
|
qubes.qmemman.algo.DOM0_MEM_BOOST = \
|
|
|
|
qubes.utils.parse_size(config.get('global', 'dom0-mem-boost'))
|
|
|
|
qubes.qmemman.algo.CACHE_FACTOR = \
|
|
|
|
config.getfloat('global', 'cache-margin-factor')
|
2021-02-26 01:41:46 +01:00
|
|
|
loglevel = config.getint('global', 'log-level', fallback=30)
|
2021-02-13 00:38:52 +01:00
|
|
|
logging.root.setLevel(loglevel)
|
2015-10-01 22:14:35 +02:00
|
|
|
|
|
|
|
log.info('MIN_PREFMEM={algo.MIN_PREFMEM}'
|
|
|
|
' DOM0_MEM_BOOST={algo.DOM0_MEM_BOOST}'
|
|
|
|
' CACHE_FACTOR={algo.CACHE_FACTOR}'.format(
|
|
|
|
algo=qubes.qmemman.algo))
|
|
|
|
|
|
|
|
try:
|
|
|
|
os.unlink(SOCK_PATH)
|
2021-02-26 01:40:58 +01:00
|
|
|
except FileNotFoundError:
|
2015-10-01 22:14:35 +02:00
|
|
|
pass
|
|
|
|
|
|
|
|
log.debug('instantiating server')
|
|
|
|
os.umask(0)
|
2019-03-17 20:06:26 +01:00
|
|
|
|
|
|
|
# Initialize the connection to Xen and to XenStore
|
|
|
|
system_state.init()
|
|
|
|
|
2017-02-23 00:15:38 +01:00
|
|
|
server = socketserver.UnixStreamServer(SOCK_PATH, QMemmanReqHandler)
|
2017-01-18 22:16:46 +01:00
|
|
|
os.umask(0o077)
|
2015-10-01 22:14:35 +02:00
|
|
|
|
|
|
|
# notify systemd
|
|
|
|
nofity_socket = os.getenv('NOTIFY_SOCKET')
|
|
|
|
if nofity_socket:
|
|
|
|
log.debug('notifying systemd')
|
2021-02-26 01:40:58 +01:00
|
|
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
2015-10-01 22:14:35 +02:00
|
|
|
if nofity_socket.startswith('@'):
|
|
|
|
nofity_socket = '\0%s' % nofity_socket[1:]
|
2021-02-26 01:40:58 +01:00
|
|
|
sock.connect(nofity_socket)
|
|
|
|
sock.sendall(b"READY=1")
|
|
|
|
sock.close()
|
2015-10-01 22:14:35 +02:00
|
|
|
|
2017-02-23 00:15:38 +01:00
|
|
|
threading.Thread(target=server.serve_forever).start()
|
2021-02-26 01:40:58 +01:00
|
|
|
XSWatcher().watch_loop()
|