Browse Source

backup: avoid deadlock on logging during restore

When both threads and processes are used, logging module can deadlock.
Workaround this by re-initializating locks in logging module.
See https://bugs.python.org/issue6721 for details.
Revert this commit when the python issue is fixed (in the python version
used for backup restore).
Marek Marczykowski-Górecki 5 years ago
parent
commit
a7bfc03509
1 changed files with 25 additions and 0 deletions
  1. 25 0
      qubesadmin/backup/restore.py

+ 25 - 0
qubesadmin/backup/restore.py

@@ -248,6 +248,30 @@ def launch_scrypt(action, input_name, output_name, passphrase):
     p.pty = pty
     return p
 
+def _fix_logging_lock_after_fork():
+    """
+    HACK:
+    This is running in a child process, parent might hold some lock
+    while fork was called (but will be released only in a parent
+    process). This specifically applies to a logging module and
+    results in a deadlock (if one is unlucky). "Fix" this by
+    reinitialize a lock on all registered logging handlers
+    just after a fork() call, until fixed upstream:
+
+    https://bugs.python.org/issue6721
+    """
+    if not hasattr(logging, '_handlerList'):
+        return
+
+    # pylint: disable=protected-access
+    for handler_ref in logging._handlerList:
+        handler = handler_ref()
+        if handler is None:
+            continue
+        if handler.lock:
+            handler.lock = type(handler.lock)()
+
+
 class ExtractWorker3(Process):
     '''Process for handling inner tar layer of backup archive'''
     # pylint: disable=too-many-instance-attributes
@@ -362,6 +386,7 @@ class ExtractWorker3(Process):
 
     def run(self):
         try:
+            _fix_logging_lock_after_fork()
             self.__run__()
         except Exception:
             # Cleanup children