Add some pieces of an IPC framework that will eventually allow ebuild
authorZac Medico <zmedico@gentoo.org>
Thu, 12 Aug 2010 17:01:02 +0000 (10:01 -0700)
committerZac Medico <zmedico@gentoo.org>
Thu, 12 Aug 2010 17:01:02 +0000 (10:01 -0700)
processes can to communicate with portage's main python process.

Here are a few possible uses:

1) Robust subshell/subprocess die support. This allows the ebuild
   environment to reliably die without having to rely on signal IPC.

2) Delegation of portageq calls to the main python process, eliminating
   performance and userpriv permission issues.

3) Reliable ebuild termination in cases when the ebuild has accidentally
   left orphan processes running in the backgraound (as in bug 278895).

bin/ebuild-ipc [new file with mode: 0755]
bin/ebuild-ipc.py [new file with mode: 0755]
pym/_emerge/FifoIpcDaemon.py [new file with mode: 0644]
pym/portage/tests/ebuild/test_ipc_daemon.py [new file with mode: 0644]

diff --git a/bin/ebuild-ipc b/bin/ebuild-ipc
new file mode 100755 (executable)
index 0000000..b30b89a
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/bash
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
+# Set EPYTHON variable as empty so that dohtml doesn't try
+# to use potentially unsupported version of Python.
+EPYTHON= PYTHONPATH=$PORTAGE_PYM_PATH${PYTHONPATH:+:}$PYTHONPATH \
+       exec "$PORTAGE_BIN_PATH/ebuild-ipc.py" "$@"
diff --git a/bin/ebuild-ipc.py b/bin/ebuild-ipc.py
new file mode 100755 (executable)
index 0000000..43a5265
--- /dev/null
@@ -0,0 +1,71 @@
+#!/usr/bin/python
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# This is a helper which which ebuild processes can use
+# to communicate with portage's main python process.
+
+import os
+import pickle
+import select
+import sys
+
+# Avoid sandbox violations after python upgrade.
+pym_path = os.path.join(os.path.dirname(
+       os.path.dirname(os.path.realpath(__file__))), "pym")
+if os.environ.get("SANDBOX_ON") == "1":
+       sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+       if pym_path not in sandbox_write:
+               sandbox_write.append(pym_path)
+               os.environ["SANDBOX_WRITE"] = \
+                       ":".join(filter(None, sandbox_write))
+
+import portage
+
+class EbuildIpc(object):
+
+       def __init__(self):
+               self.fifo_dir = os.environ['PORTAGE_BUILDDIR']
+               self.ipc_in_fifo = os.path.join(self.fifo_dir, '.ipc_in')
+               self.ipc_out_fifo = os.path.join(self.fifo_dir, '.ipc_out')
+               self.ipc_lock_file = os.path.join(self.fifo_dir, '.ipc_lock')
+
+       def communicate(self, args):
+               lock_obj = portage.locks.lockfile(self.ipc_lock_file, unlinkfile=True)
+               try:
+                       return self._communicate(args)
+               finally:
+                       portage.locks.unlockfile(lock_obj)
+
+       def _communicate(self, args):
+               fifo_dir = os.environ['PORTAGE_BUILDDIR']
+               ipc_in_fifo = os.path.join(fifo_dir, '.ipc_in')
+               ipc_out_fifo = os.path.join(fifo_dir, '.ipc_out')
+               input_fd = os.open(ipc_out_fifo, os.O_RDONLY|os.O_NONBLOCK)
+               input_file = os.fdopen(input_fd, 'rb')
+               output_file = open(ipc_in_fifo, 'wb')
+               pickle.dump(args, output_file)
+               output_file.flush()
+
+               events = select.select([input_file], [], [])
+               if input_file in events[0]:
+                       reply = pickle.load(input_file)
+               else:
+                       reply = None
+
+               output_file.close()
+               input_file.close()
+
+               if reply == 'OK':
+                       rval = os.EX_OK
+               else:
+                       rval = 1
+
+               return rval
+
+def ebuild_ipc_main(args):
+       ebuild_ipc = EbuildIpc()
+       return ebuild_ipc.communicate(args)
+
+if __name__ == '__main__':
+       sys.exit(ebuild_ipc_main(sys.argv[1:]))
diff --git a/pym/_emerge/FifoIpcDaemon.py b/pym/_emerge/FifoIpcDaemon.py
new file mode 100644 (file)
index 0000000..16bc786
--- /dev/null
@@ -0,0 +1,108 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import pickle
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+from _emerge.PollConstants import PollConstants
+from portage.cache.mappings import slot_dict_class
+
+class FifoIpcDaemon(AbstractPollTask):
+
+       """
+    This class serves as an IPC daemon, which ebuild processes can use
+    to communicate with portage's main python process.
+
+    Here are a few possible uses:
+
+    1) Robust subshell/subprocess die support. This allows the ebuild
+       environment to reliably die without having to rely on signal IPC.
+
+    2) Delegation of portageq calls to the main python process, eliminating
+       performance and userpriv permission issues.
+
+    3) Reliable ebuild termination in cases when the ebuild has accidentally
+       left orphan processes running in the backgraound (as in bug 278895).
+       """
+
+       __slots__ = ("input_fifo", "output_fifo",) + \
+               ("_files", "_reg_id",)
+
+       _file_names = ("pipe_in",)
+       _files_dict = slot_dict_class(_file_names, prefix="")
+
+       def _start(self):
+               self._files = self._files_dict()
+               input_fd = os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
+               self._files.pipe_in = os.fdopen(input_fd, 'rb')
+
+               self._reg_id = self.scheduler.register(
+                       self._files.pipe_in.fileno(),
+                       self._registered_events, self._input_handler)
+
+               self._registered = True
+
+       def isAlive(self):
+               return self._registered
+
+       def cancel(self):
+               if self.returncode is None:
+                       self.returncode = 1
+                       self.cancelled = True
+               self._unregister()
+               self.wait()
+
+       def _wait(self):
+               if self.returncode is not None:
+                       return self.returncode
+
+               if self._registered:
+                       self.scheduler.schedule(self._reg_id)
+                       self._unregister()
+
+               if self.returncode is None:
+                       self.returncode = os.EX_OK
+
+               return self.returncode
+
+       def _input_handler(self, fd, event):
+
+               if event & PollConstants.POLLIN:
+
+                       buf = array.array('B')
+                       try:
+                               buf.fromfile(self._files.pipe_in, self._bufsize)
+                       except (EOFError, IOError):
+                               pass
+
+                       if buf:
+                               obj = pickle.loads(buf.tostring())
+                               if isinstance(obj, list) and \
+                                       obj and \
+                                       obj[0] == 'exit':
+                                       output_fd = os.open(self.output_fifo, os.O_WRONLY|os.O_NONBLOCK)
+                                       output_file = os.fdopen(output_fd, 'wb')
+                                       pickle.dump('OK', output_file)
+                                       output_file.close()
+                                       self._unregister()
+                                       self.wait()
+
+               self._unregister_if_appropriate(event)
+               return self._registered
+
+       def _unregister(self):
+               """
+               Unregister from the scheduler and close open files.
+               """
+
+               self._registered = False
+
+               if self._reg_id is not None:
+                       self.scheduler.unregister(self._reg_id)
+                       self._reg_id = None
+
+               if self._files is not None:
+                       for f in self._files.values():
+                               f.close()
+                       self._files = None
diff --git a/pym/portage/tests/ebuild/test_ipc_daemon.py b/pym/portage/tests/ebuild/test_ipc_daemon.py
new file mode 100644 (file)
index 0000000..c3d0ae2
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import tempfile
+from portage import os
+from portage.tests import TestCase
+from portage.const import PORTAGE_BIN_PATH
+from portage.const import PORTAGE_PYM_PATH
+from portage.const import BASH_BINARY
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.FifoIpcDaemon import FifoIpcDaemon
+from _emerge.TaskScheduler import TaskScheduler
+
+class IpcDaemonTestCase(TestCase):
+
+       def testIpcDaemon(self):
+               tmpdir = tempfile.mkdtemp()
+               try:
+                       env = {}
+                       env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH
+                       env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+                       env['PORTAGE_BUILDDIR'] = tmpdir
+                       input_fifo = os.path.join(tmpdir, '.ipc_in')
+                       output_fifo = os.path.join(tmpdir, '.ipc_out')
+                       os.mkfifo(input_fifo)
+                       os.mkfifo(output_fifo)
+                       task_scheduler = TaskScheduler(max_jobs=2)
+                       daemon = FifoIpcDaemon(input_fifo=input_fifo,
+                               output_fifo=output_fifo,
+                               scheduler=task_scheduler.sched_iface)
+                       proc = SpawnProcess(
+                               args=[BASH_BINARY, "-c", '"$PORTAGE_BIN_PATH"/ebuild-ipc exit 0'],
+                               env=env, scheduler=task_scheduler.sched_iface)
+                       task_scheduler.add(daemon)
+                       task_scheduler.add(proc)
+                       task_scheduler.run()
+                       self.assertEqual(proc.returncode, os.EX_OK)
+               finally:
+                       shutil.rmtree(tmpdir)