[autotest] Add job_aborter

Add the skeleton of job_aborter.  All of the cases that job_aborter
has to handle are added.

Due to considerations for job_aborter that came up, existing code has
been redesigned.

1. The implementation for aborting a job has been moved out.  The
existing design is unnecessarily complex.  The abort signaling
mechanism should not need to go through job_reporter and can be
handled directly by job_shepherd.  Since job_shepherd is written in
Go, the async aspect of that will be much easier to write than the
Python async that would be needed for job_reporter to play middleman.
The new logic will be added to monitor_db, job_aborter, and
job_shepherd; job_reporter need not play.

2. Job lease liveness is determined by fcntl locks.  This is more
reliable than working with pids and/or sending signals.  (As a bonus, this
prevents a job from being owned by two processes at the same in
certain cases.)

BUG=chromium:748234
TEST=Unittests

Change-Id: I51cad974722e2e549e94819b6a3411d569919383
Reviewed-on: https://chromium-review.googlesource.com/676085
Commit-Ready: Allen Li <ayatane@chromium.org>
Tested-by: Allen Li <ayatane@chromium.org>
Reviewed-by: Prathmesh Prabhu <pprabhu@chromium.org>
diff --git a/bin/job_aborter b/bin/job_aborter
new file mode 100755
index 0000000..47bd29b
--- /dev/null
+++ b/bin/job_aborter
@@ -0,0 +1,12 @@
+#!/bin/bash
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Job aborter daemon
+#
+# See http://goto.google.com/monitor_db_per_job_refactor
+
+set -eu
+readonly bin_dir="$(readlink -e -- "$(dirname -- "$0")")"
+exec "${bin_dir}/python_venv" -m lucifer.scripts.job_aborter "$@"
diff --git a/bin/test_lucifer b/bin/test_lucifer
index 3e422e5..e8628ef 100755
--- a/bin/test_lucifer
+++ b/bin/test_lucifer
@@ -10,4 +10,4 @@
 # cd for pytest configuration files
 cd "$bin_dir/../venv"
 
-"${bin_dir}/python_venv" -m pytest lucifer
+"${bin_dir}/python_venv" -m pytest lucifer "$@"
diff --git a/venv/lucifer/autotest.py b/venv/lucifer/autotest.py
index 7d89731..25095ff 100644
--- a/venv/lucifer/autotest.py
+++ b/venv/lucifer/autotest.py
@@ -92,6 +92,8 @@
     This enforces that monkeypatch() is called first.  Otherwise,
     autotest imports may or may not work.  When they do work, they may
     screw up global state.
+
+    @param name: name of module as string, e.g., 'frontend.afe.models'
     """
     if not _setup_done:
         raise ImportError('cannot load Autotest modules before monkeypatching')
diff --git a/venv/lucifer/autotest_unittest.py b/venv/lucifer/autotest_unittest.py
index 4571e44..8cf0947 100644
--- a/venv/lucifer/autotest_unittest.py
+++ b/venv/lucifer/autotest_unittest.py
@@ -30,10 +30,11 @@
         ('autotest_lib.server', False),
         ('some_lib.common', False),
 ])
-def test__CommonRemovingFinder__is_autotest_common(fullname, expected):
-    """Test _CommonRemovingFinder._is_common()."""
+def test__CommonRemovingFinder_find_module(fullname, expected):
+    """Test _CommonRemovingFinder.find_module()."""
     finder = autotest._CommonRemovingFinder()
-    assert finder._is_autotest_common(fullname) == expected
+    got = finder.find_module(fullname)
+    assert got == (finder if expected else None)
 
 
 @pytest.mark.parametrize('name,expected', [
diff --git a/venv/lucifer/eventlib.py b/venv/lucifer/eventlib.py
index d2934e3..bc65463 100644
--- a/venv/lucifer/eventlib.py
+++ b/venv/lucifer/eventlib.py
@@ -4,16 +4,11 @@
 
 """Event subprocess module.
 
-Event subprocesses are subprocesses that print event changes to stdout
-and reads command from stdin.
+Event subprocesses are subprocesses that print event changes to stdout.
 
-Each event and command is a UNIX line, with a terminating newline
-character.
+Each event is a UNIX line, with a terminating newline character.
 
-Only the abort command is supported.  The main process aborts the event
-subprocess when SIGUSR1 is received.
-
-run_event_command() starts such a process.
+run_event_command() starts such a process with a synchronous event handler.
 """
 
 from __future__ import absolute_import
@@ -21,15 +16,11 @@
 from __future__ import print_function
 
 import logging
-from signal import SIGUSR1
-from signal import SIG_IGN
 
 import enum
 import subprocess32
 from subprocess32 import PIPE
 
-from lucifer import sigtrap
-
 logger = logging.getLogger(__name__)
 
 
@@ -51,59 +42,27 @@
     COMPLETED = 'completed'
 
 
-class Command(enum.Enum):
-    """Command enum
-
-    Members of this enum represent all possible command
-    that can be sent to an event command.
-
-    The value of enum members must be a string, which is printed by
-    itself on a line to signal the event.
-
-    This should be backward compatible with all versions of
-    job_shepherd, which lives in the infra/lucifer repository.
-
-    This should only contain one command, ABORT.
-    """
-    ABORT = 'abort'
-
-
 def run_event_command(event_handler, args):
     """Run a command that emits events.
 
-    Events printed by the command will be handled by event_handler.  All
-    exceptions raised by event_handler will be caught and logged;
-    however, event_handler should not let any exceptions escape.
-
-    While the event command is running, SIGUSR1 is interpreted as an
-    abort command and sent to the subprocess via stdin.
+    Events printed by the command will be handled by event_handler
+    synchronously.  Exceptions raised by event_handler will not be
+    caught.  If an exception escapes, the child process's standard file
+    descriptors are closed and the process is waited for.  The
+    event command should terminate if this happens.
 
     @param event_handler: callable that takes an Event instance.
     @param args: passed to subprocess.Popen.
     """
     logger.debug('Starting event command with %r', args)
-
-    def abort_handler(_signum, _frame):
-        """Handle SIGUSR1 by sending abort to subprocess."""
-        _send_command(proc.stdin, Command.ABORT)
-
-    with sigtrap.handle_signal(SIGUSR1, SIG_IGN), \
-         subprocess32.Popen(args, stdin=PIPE, stdout=PIPE) as proc, \
-         sigtrap.handle_signal(SIGUSR1, abort_handler):
+    with subprocess32.Popen(args, stdout=PIPE) as proc:
+        logger.debug('Event command child pid is %d', proc.pid)
         _handle_subprocess_events(event_handler, proc)
-    logger.debug('Subprocess exited with %d', proc.returncode)
+    logger.debug('Event command child with pid %d exited with %d',
+                 proc.pid, proc.returncode)
     return proc.returncode
 
 
-def _send_command(f, command):
-    """Send a command.
-
-    f is a pipe file object.  command is a Command instance.
-    """
-    f.write('%s\n' % command.value)
-    f.flush()
-
-
 def _handle_subprocess_events(event_handler, proc):
     """Handle a subprocess that emits events.
 
diff --git a/venv/lucifer/eventlib_unittest.py b/venv/lucifer/eventlib_unittest.py
index 194b785..ad60e44 100644
--- a/venv/lucifer/eventlib_unittest.py
+++ b/venv/lucifer/eventlib_unittest.py
@@ -6,30 +6,13 @@
 from __future__ import division
 from __future__ import print_function
 
-import collections
-import os
-import unittest
-import signal
-import sys
-import time
-
-import mock
 import pytest
-import subprocess32
 
 from lucifer import eventlib
 from lucifer.eventlib import Event
 
 
-@pytest.fixture
-def signal_mock():
-    """Pytest fixture for mocking out signal handler setting."""
-    fake_signal = _FakeSignal(mock.sentinel.default_handler)
-    with mock.patch('signal.signal', fake_signal):
-        yield fake_signal
-
-
-def test_happy_path(signal_mock, capfd):
+def test_run_event_command_normal(capfd):
     """Test happy path."""
     handler = _FakeHandler()
 
@@ -44,82 +27,38 @@
     assert handler.events == [Event('starting'), Event('completed')]
     # Handler should return the exit status of the command.
     assert ret == 0
-    # Signal handler should be restored.
-    assert signal_mock.handlers[signal.SIGUSR1] == signal_mock.default_handler
-    # stderr should go to stderr.
+    # Child stderr should go to stderr.
     out, err = capfd.readouterr()
     assert out == ''
     assert err == 'log message\n'
 
 
-@pytest.mark.xfail(reason='Flaky due to sleep')
-def test_SIGUSR1_aborts():
-    """Test sending SIGUSR1 aborts."""
-    with subprocess32.Popen(
-            [sys.executable, '-m', 'lucifer.scripts.run_event_command',
-             sys.executable, '-m', 'lucifer.scripts.wait_for_abort']) as proc:
-        time.sleep(0.2)  # Wait for process to come up.
-        os.kill(proc.pid, signal.SIGUSR1)
-        time.sleep(0.1)
-        proc.poll()
-        # If this is None, the process failed to abort.  If this is
-        # -SIGUSR1 (-10), then the processes did not finish setting up
-        # yet.
-        assert proc.returncode == 0
+def test_run_event_command_with_invalid_events():
+    """Test passing invalid events."""
+    handler = _FakeHandler()
+    eventlib.run_event_command(
+            event_handler=handler,
+            args=['bash', '-c', 'echo foo; echo bar'])
+    # Handler should not be called with invalid events.
+    assert handler.events == []
 
 
-class RunEventCommandTestCase(unittest.TestCase):
-    """run_event_command() unit tests."""
+def test_run_event_command_with_failed_command():
+    """Test passing invalid events."""
+    handler = _FakeHandler()
+    ret = eventlib.run_event_command(
+            event_handler=handler,
+            args=['bash', '-c', 'exit 1'])
+    assert ret == 1
 
-    def setUp(self):
-        super(RunEventCommandTestCase, self).setUp()
-        self.signal = _FakeSignal(mock.sentinel.default_handler)
-        patch = mock.patch('signal.signal', self.signal)
-        patch.start()
-        self.addCleanup(patch.stop)
 
-    def test_failed_command(self):
-        """Test failed command."""
-        handler = _FakeHandler()
-
-        ret = eventlib.run_event_command(
-                event_handler=handler,
-                args=['bash', '-c', 'exit 1'])
-
-        # Handler should return the exit status of the command.
-        self.assertEqual(ret, 1)
-
-    def test_with_invalid_events(self):
-        """Test passing invalid events."""
-        handler = _FakeHandler()
-
+def test_run_event_command_should_not_hide_handler_exception():
+    """Test handler exceptions."""
+    handler = _RaisingHandler(_TestError)
+    with pytest.raises(_TestError):
         eventlib.run_event_command(
                 event_handler=handler,
-                args=['bash', '-c', 'echo foo; echo bar'])
-
-        # Handler should not be called with invalid events.
-        self.assertEqual(handler.events, [])
-
-    def test_should_not_hide_handler_exception(self):
-        """Test handler exceptions."""
-        handler = _RaisingHandler(_TestError)
-        with self.assertRaises(_TestError):
-            eventlib.run_event_command(
-                    event_handler=handler,
-                    args=['bash', '-c', 'echo starting; echo completed'])
-
-
-class _FakeSignal(object):
-    """Fake for signal.signal()"""
-
-    def __init__(self, default_handler):
-        self.default_handler = default_handler
-        self.handlers = collections.defaultdict(lambda: default_handler)
-
-    def __call__(self, signum, handler):
-        old = self.handlers[signum]
-        self.handlers[signum] = handler
-        return old
+                args=['bash', '-c', 'echo starting; echo completed'])
 
 
 class _FakeHandler(object):
diff --git a/venv/lucifer/leasing.py b/venv/lucifer/leasing.py
new file mode 100644
index 0000000..b0ab9a0
--- /dev/null
+++ b/venv/lucifer/leasing.py
@@ -0,0 +1,149 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Job leasing.
+
+Jobs are leased to processes to own and run.  A process owning a job
+grabs a fcntl lock on the corresponding job lease file.  If the lock on
+the job is released, the owning process is considered dead and the job
+lease is considered expired.  Some other process (job_aborter) will need
+to make the necessary updates to reflect the job's failure.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import fcntl
+import logging
+import os
+
+from scandir import scandir
+
+_HEARTBEAT_DEADLINE_SECS = 10 * 60
+_HEARTBEAT_SECS = 3 * 60
+
+logger = logging.getLogger(__name__)
+
+
+def get_expired_leases(jobdir):
+    """Yield expired JobLeases in jobdir.
+
+    Expired jobs are jobs whose lease files are no longer locked.
+
+    @param jobdir: job lease file directory
+    """
+    for lease in _job_leases_iter(jobdir):
+        if lease.expired():
+            yield lease
+
+
+def get_timed_out_leases(dbjob_model, jobdir):
+    """Yield timed out Jobs that are leased.
+
+    @param dbjob_model: Django model for Job
+    @param jobdir: job lease file directory
+    """
+    all_timed_out_dbjobs = (
+            dbjob_model.objects
+            .filter(hostqueueentry__complete=False)
+            .extra(where=['created_on + INTERVAL timeout_mins MINUTE < NOW()'])
+            .distinct()
+    )
+    for _, lease in _filter_leased(jobdir, all_timed_out_dbjobs):
+        yield lease
+
+
+def get_marked_aborting_leases(dbjob_model, jobdir):
+    """Yield Jobs marked for aborting that are leased.
+
+    @param dbjob_model: Django model for Job
+    @param jobdir: job lease file directory
+    """
+    all_aborting_dbjobs = (
+            dbjob_model.objects
+            .filter(hostqueueentry__aborted=True)
+            .filter(hostqueueentry__complete=False)
+            .distinct()
+    )
+    for _, lease in _filter_leased(jobdir, all_aborting_dbjobs):
+        yield lease
+
+
+def make_lease_file(jobdir, job_id):
+    """Make lease file corresponding to a job.
+
+    Kept to document/pin public API.  The actual creation happens in the
+    job_shepherd (which is written in Go).
+
+    @param jobdir: job lease file directory
+    @param job_id: Job ID
+    """
+    path = os.path.join(jobdir, str(job_id))
+    with open(path, 'w'):
+        pass
+    return path
+
+
+class JobLease(object):
+    "Represents a job lease."
+
+    def __init__(self, entry):
+        """Initialize instance.
+
+        @param entry: scandir.DirEntry instance
+        """
+        self._entry = entry
+
+    @property
+    def id(self):
+        """Return id of leased job."""
+        return int(self._entry.name)
+
+    def expired(self):
+        """Return True if the lease is expired."""
+        return not _fcntl_locked(self._entry.path)
+
+    def cleanup(self):
+        """Remove the lease file."""
+        os.unlink(self._entry.path)
+
+
+def _filter_leased(jobdir, dbjobs):
+    """Filter Job models for leased jobs.
+
+    Yields pairs of Job model and JobLease instances.
+
+    @param jobdir: job lease file directory
+    @param dbjobs: iterable of Django model Job instances
+    """
+    our_jobs = {job.id: job for job in _job_leases_iter(jobdir)}
+    for dbjob in dbjobs:
+        if dbjob.id in our_jobs:
+            yield dbjob, our_jobs[dbjob.id]
+
+
+def _job_leases_iter(jobdir):
+    """Yield JobLease instances from jobdir.
+
+    @param jobdir: job lease file directory
+    """
+    for entry in scandir(jobdir):
+        yield JobLease(entry)
+
+
+def _fcntl_locked(path):
+    """Return True if a file is fcntl locked.
+
+    @param path: path to file
+    """
+    fd = os.open(path, os.O_WRONLY)
+    try:
+        fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+    except IOError:
+        return True
+    else:
+        return False
+    finally:
+        os.close(fd)
diff --git a/venv/lucifer/leasing_unittest.py b/venv/lucifer/leasing_unittest.py
new file mode 100644
index 0000000..f9cc9f4
--- /dev/null
+++ b/venv/lucifer/leasing_unittest.py
@@ -0,0 +1,100 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import contextlib
+import os
+import sys
+
+import mock
+import subprocess32
+
+from lucifer import leasing
+
+
+def test_get_expired_leases(tmpdir):
+    """Test get_expired_leases()."""
+    _make_lease(tmpdir, 123)
+    with _make_locked_lease(tmpdir, 124):
+        got = list(leasing.get_expired_leases(str(tmpdir)))
+    assert all(isinstance(job, leasing.JobLease) for job in got)
+    assert [job.id for job in got] == [123]
+
+
+def test_get_timed_out_leases(tmpdir):
+    """Test get_timed_out_leases()."""
+    mock_model = mock.Mock()
+    (
+            mock_model.objects
+            .filter()
+            .extra()
+            .distinct
+    ).return_value = [_StubJob(122), _StubJob(123)]
+    _make_lease(tmpdir, 123)
+    _make_lease(tmpdir, 124)
+    got = list(leasing.get_timed_out_leases(mock_model, str(tmpdir)))
+
+    assert all(isinstance(job, leasing.JobLease) for job in got)
+    assert 123 in [job.id for job in got]
+    assert 124 not in [job.id for job in got]
+
+
+def test_get_marked_aborting_leases(tmpdir):
+    """Test get_marked_aborting_leases()."""
+    mock_model = mock.Mock()
+    (
+            mock_model.objects
+            .filter()
+            .filter()
+            .distinct
+    ).return_value = [_StubJob(122), _StubJob(123)]
+    _make_lease(tmpdir, 123)
+    _make_lease(tmpdir, 124)
+    got = list(leasing.get_marked_aborting_leases(mock_model, str(tmpdir)))
+
+    assert all(isinstance(job, leasing.JobLease) for job in got)
+    assert 123 in [job.id for job in got]
+    assert 124 not in [job.id for job in got]
+
+
+def test_Job_cleanup(tmpdir):
+    """Test Job.cleanup()."""
+    path = _make_lease(tmpdir, 123)
+    for job in leasing.get_expired_leases(str(tmpdir)):
+        job.cleanup()
+    assert not os.path.exists(path)
+
+
+@contextlib.contextmanager
+def _make_locked_lease(tmpdir, job_id):
+    path = _make_lease(tmpdir, job_id)
+    with _lock_lease(path):
+        yield path
+
+
+@contextlib.contextmanager
+def _lock_lease(path):
+    with subprocess32.Popen(
+            [sys.executable, '-um',
+             'lucifer.scripts.fcntl_lock', path],
+            stdout=subprocess32.PIPE) as proc:
+        # Wait for lock grab.
+        proc.stdout.readline()
+        try:
+            yield
+        finally:
+            proc.terminate()
+
+
+def _make_lease(tmpdir, job_id):
+    return leasing.make_lease_file(str(tmpdir), job_id)
+
+
+class _StubJob(object):
+
+    def __init__(self, job_id):
+        self.id = job_id
diff --git a/venv/lucifer/scripts/fcntl_lock.py b/venv/lucifer/scripts/fcntl_lock.py
new file mode 100644
index 0000000..0e60539
--- /dev/null
+++ b/venv/lucifer/scripts/fcntl_lock.py
@@ -0,0 +1,41 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Grab fcntl lock on file.
+
+This is used for testing ecg.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import fcntl
+import logging
+import os
+import sys
+import time
+
+from lucifer import loglib
+
+logger = logging.getLogger(__name__)
+
+
+def main(_args):
+    """Main function
+
+    @param args: list of command line args
+    """
+    loglib.configure_logging(name='fcntl_lock')
+    fd = os.open(sys.argv[1], os.O_WRONLY)
+    logger.debug('Opened %s', sys.argv[1])
+    fcntl.lockf(fd, fcntl.LOCK_EX)
+    logger.debug('Grabbed lock')
+    print('done')
+    while True:
+        time.sleep(10)
+
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv[1:]))
diff --git a/venv/lucifer/scripts/job_aborter.py b/venv/lucifer/scripts/job_aborter.py
new file mode 100644
index 0000000..3a30232
--- /dev/null
+++ b/venv/lucifer/scripts/job_aborter.py
@@ -0,0 +1,97 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""job_aborter
+
+This monitors job leases via the mtime on key files.  If a job fails to
+update its lease (by touching its file), it will be considered dead and
+the job will be aborted.
+
+See http://goto.google.com/monitor_db_per_job_refactor
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import argparse
+import logging
+import sys
+import time
+
+from lucifer import autotest
+from lucifer import leasing
+from lucifer import loglib
+
+logger = logging.getLogger(__name__)
+
+
+def main(args):
+    """Main function
+
+    @param args: list of command line args
+    """
+
+    parser = argparse.ArgumentParser(prog='job_aborter', description=__doc__)
+    parser.add_argument('--jobdir', required=True)
+    loglib.add_logging_options(parser)
+    args = parser.parse_args(args)
+    loglib.configure_logging_with_args(parser, args)
+
+    autotest.monkeypatch()
+    autotest.load('frontend.setup_django_environment')
+    _main_loop(jobdir=args.jobdir)
+    return 0
+
+
+def _main_loop(jobdir):
+    while True:
+        _main_loop_body(jobdir)
+        time.sleep(60)
+
+
+def _main_loop_body(jobdir):
+    _mark_expired_jobs_aborted(jobdir)
+    _abort_timed_out_jobs(jobdir)
+    _abort_jobs_marked_aborting(jobdir)
+    _abort_special_tasks_marked_aborted()
+    # TODO(crbug.com/748234): abort_jobs_past_max_runtime goes into
+    # job_shepherd
+
+
+def _mark_expired_jobs_aborted(jobdir):
+    job_ids = {job.id for job in leasing.get_expired_jobs(jobdir)}
+    _mark_aborted(job_ids)
+
+
+def _abort_timed_out_jobs(jobdir):
+    models = autotest.load('frontend.afe.models')
+    for lease in leasing.get_timed_out_leases(models, jobdir):
+        # TODO(crbug.com/748234): Abort job not implemented yet (in
+        # job_shepherd)
+        pass
+
+
+def _abort_jobs_marked_aborting(jobdir):
+    models = autotest.load('frontend.afe.models')
+    for lease in leasing.get_marked_aborting_leases(models, jobdir):
+        # TODO(crbug.com/748234): Abort job not implemented yet (in
+        # job_shepherd)
+        pass
+
+
+def _abort_special_tasks_marked_aborted():
+    # TODO(crbug.com/748234): Special tasks not implemented yet
+    pass
+
+
+def _mark_aborted(job_ids):
+    """Mark jobs aborted in database."""
+    models = autotest.load('frontend.afe.models')
+    for dbjob in models.Job.objects.filter(id__in=job_ids):
+        dbjob.abort()
+
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv[1:]))
diff --git a/venv/lucifer/scripts/run_event_command.py b/venv/lucifer/scripts/run_event_command.py
deleted file mode 100644
index 362ea54..0000000
--- a/venv/lucifer/scripts/run_event_command.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2017 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Run an event command.
-
-This is used for testing run_event_command() outside of Python.
-
-See eventlib for information about event commands.
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import sys
-
-from lucifer import eventlib
-from lucifer import loglib
-
-
-def main(args):
-    """Main function
-
-    @param args: list of command line args
-    """
-    loglib.configure_logging(name='run_event_command')
-    return eventlib.run_event_command(
-            event_handler=_handle_event,
-            args=args)
-
-
-def _handle_event(event):
-    print(event.name)
-
-
-if __name__ == '__main__':
-    sys.exit(main(sys.argv[1:]))
diff --git a/venv/lucifer/scripts/wait_for_abort.py b/venv/lucifer/scripts/wait_for_abort.py
deleted file mode 100644
index a7ff222..0000000
--- a/venv/lucifer/scripts/wait_for_abort.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2017 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Wait for an abort command.
-
-This is used for testing run_event_command().
-
-See eventlib for information about event commands.
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import sys
-
-from lucifer import loglib
-
-
-def main(_args):
-    """Main function
-
-    @param args: list of command line args
-    """
-    loglib.configure_logging(name='wait_for_abort')
-    while True:
-        line = sys.stdin.readline()
-        if line == 'abort\n':
-            sys.exit(0)
-
-
-if __name__ == '__main__':
-    sys.exit(main(sys.argv[1:]))
diff --git a/venv/lucifer/sigtrap.py b/venv/lucifer/sigtrap.py
deleted file mode 100644
index a604917..0000000
--- a/venv/lucifer/sigtrap.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright 2017 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Safe signal handling library.
-
-Safe signal handling is hard.  This module provides some tools to make
-it a little easier.
-
-Nothing in this module is thread safe.
-
-For reference, see signal(7) and
-https://docs.python.org/2/library/signal.html
-
-Python and signals:
-
-Python does not provide default signal handlers for SIGTERM or SIGHUP.
-This means that if you send SIGTERM to a Python process, it won't run
-any finally suites, __exit__() methods, or atexit functions!
-
-In general, anything process specific does not need to be cleaned up if
-the process is exiting.  This includes file descriptors (open files) and
-allocated memory.
-
-Anything external to the process needs to be cleaned up.  This includes
-lock files and IO transactions (storage or network).
-
-Subprocesses may or may not need to be cleaned up.  The orphaned
-subprocesses will be adopted and reaped by PID 1 or a subreaper.
-However, the signal, e.g. SIGTERM, will not be sent to subprocesses
-unless explicitly set to do so.
-
-It is possible to receive another signal while handling a signal.  After
-a signal handler returns, control returns to where the signal was
-received.  In other words, signal handler calls go onto a "stack",
-although signal handlers cannot return values.
-
-A exception raised by a signal handler that escapes the signal handler
-call will be raised where the signal was received.
-
-It is possible to receive a signal while handling an exception,
-including an exception raised while handling a signal.
-
-If multiple signals are received at once and their handlers all raise
-exceptions, you can probably expect Python to exit without running any
-finally suites, __exit__() methods, or atexit functions.
-
-It is possible to set signal handlers inside a signal handler.  Please
-do not do that.
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import logging
-import signal
-
-import contextlib2
-
-logger = logging.getLogger(__name__)
-
-
-class handle_signals(object):
-    """Context manager chaining multiple SignalHandlerContext.
-
-    This is single use.
-    """
-
-    def __init__(self, signums, handler):
-        self._handler = handler
-        self._signums = signums
-        self._stack = contextlib2.ExitStack()
-
-    def __enter__(self):
-        stack = self._stack.__enter__()
-        for signum in self._signums:
-            stack.enter_context(handle_signal(signum, self._handler))
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        return self._stack.__exit__(exc_type, exc_val, exc_tb)
-
-
-class handle_signal(object):
-    """Signal handler context.
-
-    This context manager sets a signal handler during the execution of
-    the suite and restores the original signal handler when exiting.
-
-    See signal.signal() for values of signum and handler.
-
-    This is reusable and reentrant.
-    """
-
-    def __init__(self, signum, handler):
-        self._handler = handler
-        self._signum = signum
-        self._old_handlers = []
-
-    def __enter__(self):
-        old = signal.signal(self._signum, self._handler)
-        self._old_handlers.append(old)
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        old = self._old_handlers.pop()
-        signal.signal(self._signum, old)
-        return False
diff --git a/venv/lucifer/sigtrap_unittest.py b/venv/lucifer/sigtrap_unittest.py
deleted file mode 100644
index e21e514..0000000
--- a/venv/lucifer/sigtrap_unittest.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2017 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import collections
-from signal import SIGHUP
-from signal import SIGTERM
-import unittest
-
-import mock
-from mock import sentinel
-
-from lucifer import sigtrap
-
-
-class SigtrapTestCase(unittest.TestCase):
-    """run_event_command() unit tests."""
-
-    def setUp(self):
-        super(SigtrapTestCase, self).setUp()
-        self.signal = _FakeSignal(sentinel.default_handler)
-        patch = mock.patch('signal.signal', self.signal)
-        patch.start()
-        self.addCleanup(patch.stop)
-
-    def test_handle_signal(self):
-        """Test handle_signal."""
-        handlers = self.signal.handlers
-        with sigtrap.handle_signal(SIGTERM, sentinel.new):
-            self.assertEqual(handlers[SIGTERM], sentinel.new)
-        self.assertEqual(handlers[SIGTERM], sentinel.default_handler)
-
-    def test_handle_signals(self):
-        """Test handle_signals."""
-        handlers = self.signal.handlers
-        with sigtrap.handle_signals([SIGTERM, SIGHUP],
-                                    sentinel.new):
-            self.assertEqual(handlers[SIGTERM], sentinel.new)
-            self.assertEqual(handlers[SIGHUP], sentinel.new)
-        self.assertEqual(handlers[SIGTERM], sentinel.default_handler)
-        self.assertEqual(handlers[SIGHUP], sentinel.default_handler)
-
-
-class _FakeSignal(object):
-    """Fake for signal.signal()"""
-
-    def __init__(self, handler):
-        self.handlers = collections.defaultdict(lambda: handler)
-
-    def __call__(self, signum, handler):
-        old = self.handlers[signum]
-        self.handlers[signum] = handler
-        return old
diff --git a/venv/requirements.txt b/venv/requirements.txt
index ef16180..5677908 100644
--- a/venv/requirements.txt
+++ b/venv/requirements.txt
@@ -10,6 +10,7 @@
 py==1.4.34
 pytest==3.1.3
 pytest-cov==2.5.1
+scandir==1.5
 setuptools==28.2.0
 six==1.10.0
 subprocess32==3.2.7