From a7bfc03509e60f9715a154463c562b7e48b248b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 16 Jul 2018 01:54:31 +0200 Subject: [PATCH] backup: avoid deadlock on logging during restore When both threads and processes are used, logging module can deadlock. Workaround this by re-initializating locks in logging module. See https://bugs.python.org/issue6721 for details. Revert this commit when the python issue is fixed (in the python version used for backup restore). --- qubesadmin/backup/restore.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/qubesadmin/backup/restore.py b/qubesadmin/backup/restore.py index c1d092a..877b2e1 100644 --- a/qubesadmin/backup/restore.py +++ b/qubesadmin/backup/restore.py @@ -248,6 +248,30 @@ def launch_scrypt(action, input_name, output_name, passphrase): p.pty = pty return p +def _fix_logging_lock_after_fork(): + """ + HACK: + This is running in a child process, parent might hold some lock + while fork was called (but will be released only in a parent + process). This specifically applies to a logging module and + results in a deadlock (if one is unlucky). "Fix" this by + reinitialize a lock on all registered logging handlers + just after a fork() call, until fixed upstream: + + https://bugs.python.org/issue6721 + """ + if not hasattr(logging, '_handlerList'): + return + + # pylint: disable=protected-access + for handler_ref in logging._handlerList: + handler = handler_ref() + if handler is None: + continue + if handler.lock: + handler.lock = type(handler.lock)() + + class ExtractWorker3(Process): '''Process for handling inner tar layer of backup archive''' # pylint: disable=too-many-instance-attributes @@ -362,6 +386,7 @@ class ExtractWorker3(Process): def run(self): try: + _fix_logging_lock_after_fork() self.__run__() except Exception: # Cleanup children