asyncio: sync with Tulip, add a new asyncio.coroutines module
diff --git a/Lib/asyncio/__init__.py b/Lib/asyncio/__init__.py
index 3df2f80..789424e 100644
--- a/Lib/asyncio/__init__.py
+++ b/Lib/asyncio/__init__.py
@@ -18,6 +18,7 @@
         import _overlapped  # Will also be exported.
 
 # This relies on each of the submodules having an __all__ variable.
+from .coroutines import *
 from .events import *
 from .futures import *
 from .locks import *
@@ -34,7 +35,8 @@
     from .unix_events import *  # pragma: no cover
 
 
-__all__ = (events.__all__ +
+__all__ = (coroutines.__all__ +
+           events.__all__ +
            futures.__all__ +
            locks.__all__ +
            protocols.__all__ +
diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py
index 90115e5..c42e7f9 100644
--- a/Lib/asyncio/base_events.py
+++ b/Lib/asyncio/base_events.py
@@ -26,9 +26,11 @@
 import os
 import sys
 
+from . import coroutines
 from . import events
 from . import futures
 from . import tasks
+from .coroutines import coroutine
 from .log import logger
 
 
@@ -118,7 +120,7 @@
             if not waiter.done():
                 waiter.set_result(waiter)
 
-    @tasks.coroutine
+    @coroutine
     def wait_closed(self):
         if self.sockets is None or self.waiters is None:
             return
@@ -175,7 +177,7 @@
         """Create write pipe transport."""
         raise NotImplementedError
 
-    @tasks.coroutine
+    @coroutine
     def _make_subprocess_transport(self, protocol, args, shell,
                                    stdin, stdout, stderr, bufsize,
                                    extra=None, **kwargs):
@@ -298,7 +300,7 @@
 
     def call_at(self, when, callback, *args):
         """Like call_later(), but uses an absolute time."""
-        if tasks.iscoroutinefunction(callback):
+        if coroutines.iscoroutinefunction(callback):
             raise TypeError("coroutines cannot be used with call_at()")
         if self._debug:
             self._assert_is_current_event_loop()
@@ -324,7 +326,7 @@
         return handle
 
     def _call_soon(self, callback, args, check_loop):
-        if tasks.iscoroutinefunction(callback):
+        if coroutines.iscoroutinefunction(callback):
             raise TypeError("coroutines cannot be used with call_soon()")
         if self._debug and check_loop:
             self._assert_is_current_event_loop()
@@ -361,7 +363,7 @@
         return handle
 
     def run_in_executor(self, executor, callback, *args):
-        if tasks.iscoroutinefunction(callback):
+        if coroutines.iscoroutinefunction(callback):
             raise TypeError("coroutines cannot be used with run_in_executor()")
         if isinstance(callback, events.Handle):
             assert not args
@@ -389,7 +391,7 @@
     def getnameinfo(self, sockaddr, flags=0):
         return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
 
-    @tasks.coroutine
+    @coroutine
     def create_connection(self, protocol_factory, host=None, port=None, *,
                           ssl=None, family=0, proto=0, flags=0, sock=None,
                           local_addr=None, server_hostname=None):
@@ -505,7 +507,7 @@
             sock, protocol_factory, ssl, server_hostname)
         return transport, protocol
 
-    @tasks.coroutine
+    @coroutine
     def _create_connection_transport(self, sock, protocol_factory, ssl,
                                      server_hostname):
         protocol = protocol_factory()
@@ -521,7 +523,7 @@
         yield from waiter
         return transport, protocol
 
-    @tasks.coroutine
+    @coroutine
     def create_datagram_endpoint(self, protocol_factory,
                                  local_addr=None, remote_addr=None, *,
                                  family=0, proto=0, flags=0):
@@ -593,7 +595,7 @@
         transport = self._make_datagram_transport(sock, protocol, r_addr)
         return transport, protocol
 
-    @tasks.coroutine
+    @coroutine
     def create_server(self, protocol_factory, host=None, port=None,
                       *,
                       family=socket.AF_UNSPEC,
@@ -672,7 +674,7 @@
             self._start_serving(protocol_factory, sock, ssl, server)
         return server
 
-    @tasks.coroutine
+    @coroutine
     def connect_read_pipe(self, protocol_factory, pipe):
         protocol = protocol_factory()
         waiter = futures.Future(loop=self)
@@ -680,7 +682,7 @@
         yield from waiter
         return transport, protocol
 
-    @tasks.coroutine
+    @coroutine
     def connect_write_pipe(self, protocol_factory, pipe):
         protocol = protocol_factory()
         waiter = futures.Future(loop=self)
@@ -688,7 +690,7 @@
         yield from waiter
         return transport, protocol
 
-    @tasks.coroutine
+    @coroutine
     def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
                          stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                          universal_newlines=False, shell=True, bufsize=0,
@@ -706,7 +708,7 @@
             protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
         return transport, protocol
 
-    @tasks.coroutine
+    @coroutine
     def subprocess_exec(self, protocol_factory, program, *args,
                         stdin=subprocess.PIPE, stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE, universal_newlines=False,
diff --git a/Lib/asyncio/base_subprocess.py b/Lib/asyncio/base_subprocess.py
index b78f816..2f933c5 100644
--- a/Lib/asyncio/base_subprocess.py
+++ b/Lib/asyncio/base_subprocess.py
@@ -2,8 +2,8 @@
 import subprocess
 
 from . import protocols
-from . import tasks
 from . import transports
+from .coroutines import coroutine
 
 
 class BaseSubprocessTransport(transports.SubprocessTransport):
@@ -65,7 +65,7 @@
     def kill(self):
         self._proc.kill()
 
-    @tasks.coroutine
+    @coroutine
     def _post_init(self):
         proc = self._proc
         loop = self._loop
diff --git a/Lib/asyncio/coroutines.py b/Lib/asyncio/coroutines.py
new file mode 100644
index 0000000..5b6d93f
--- /dev/null
+++ b/Lib/asyncio/coroutines.py
@@ -0,0 +1,140 @@
+__all__ = ['coroutine',
+           'iscoroutinefunction', 'iscoroutine']
+
+import functools
+import inspect
+import os
+import sys
+import traceback
+
+from . import events
+from . import futures
+from .log import logger
+
+# If you set _DEBUG to true, @coroutine will wrap the resulting
+# generator objects in a CoroWrapper instance (defined below).  That
+# instance will log a message when the generator is never iterated
+# over, which may happen when you forget to use "yield from" with a
+# coroutine call.  Note that the value of the _DEBUG flag is taken
+# when the decorator is used, so to be of any use it must be set
+# before you define your coroutines.  A downside of using this feature
+# is that tracebacks show entries for the CoroWrapper.__next__ method
+# when _DEBUG is true.
+_DEBUG = (not sys.flags.ignore_environment
+          and bool(os.environ.get('PYTHONASYNCIODEBUG')))
+
+_PY35 = (sys.version_info >= (3, 5))
+
+class CoroWrapper:
+    # Wrapper for coroutine in _DEBUG mode.
+
+    def __init__(self, gen, func):
+        assert inspect.isgenerator(gen), gen
+        self.gen = gen
+        self.func = func
+        self._source_traceback = traceback.extract_stack(sys._getframe(1))
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        return next(self.gen)
+
+    def send(self, *value):
+        # We use `*value` because of a bug in CPythons prior
+        # to 3.4.1. See issue #21209 and test_yield_from_corowrapper
+        # for details.  This workaround should be removed in 3.5.0.
+        if len(value) == 1:
+            value = value[0]
+        return self.gen.send(value)
+
+    def throw(self, exc):
+        return self.gen.throw(exc)
+
+    def close(self):
+        return self.gen.close()
+
+    @property
+    def gi_frame(self):
+        return self.gen.gi_frame
+
+    @property
+    def gi_running(self):
+        return self.gen.gi_running
+
+    @property
+    def gi_code(self):
+        return self.gen.gi_code
+
+    def __del__(self):
+        # Be careful accessing self.gen.frame -- self.gen might not exist.
+        gen = getattr(self, 'gen', None)
+        frame = getattr(gen, 'gi_frame', None)
+        if frame is not None and frame.f_lasti == -1:
+            func = events._format_callback(self.func, ())
+            tb = ''.join(traceback.format_list(self._source_traceback))
+            message = ('Coroutine %s was never yielded from\n'
+                       'Coroutine object created at (most recent call last):\n'
+                       '%s'
+                       % (func, tb.rstrip()))
+            logger.error(message)
+
+
+def coroutine(func):
+    """Decorator to mark coroutines.
+
+    If the coroutine is not yielded from before it is destroyed,
+    an error message is logged.
+    """
+    if inspect.isgeneratorfunction(func):
+        coro = func
+    else:
+        @functools.wraps(func)
+        def coro(*args, **kw):
+            res = func(*args, **kw)
+            if isinstance(res, futures.Future) or inspect.isgenerator(res):
+                res = yield from res
+            return res
+
+    if not _DEBUG:
+        wrapper = coro
+    else:
+        @functools.wraps(func)
+        def wrapper(*args, **kwds):
+            w = CoroWrapper(coro(*args, **kwds), func)
+            if w._source_traceback:
+                del w._source_traceback[-1]
+            w.__name__ = func.__name__
+            if _PY35:
+                w.__qualname__ = func.__qualname__
+            w.__doc__ = func.__doc__
+            return w
+
+    wrapper._is_coroutine = True  # For iscoroutinefunction().
+    return wrapper
+
+
+def iscoroutinefunction(func):
+    """Return True if func is a decorated coroutine function."""
+    return getattr(func, '_is_coroutine', False)
+
+
+def iscoroutine(obj):
+    """Return True if obj is a coroutine object."""
+    return isinstance(obj, CoroWrapper) or inspect.isgenerator(obj)
+
+
+def _format_coroutine(coro):
+    assert iscoroutine(coro)
+    if _PY35:
+        coro_name = coro.__qualname__
+    else:
+        coro_name = coro.__name__
+
+    filename = coro.gi_code.co_filename
+    if coro.gi_frame is not None:
+        lineno = coro.gi_frame.f_lineno
+        return '%s() at %s:%s' % (coro_name, filename, lineno)
+    else:
+        lineno = coro.gi_code.co_firstlineno
+        return '%s() done at %s:%s' % (coro_name, filename, lineno)
diff --git a/Lib/asyncio/locks.py b/Lib/asyncio/locks.py
index 29c4434..8d9e3b4 100644
--- a/Lib/asyncio/locks.py
+++ b/Lib/asyncio/locks.py
@@ -6,7 +6,7 @@
 
 from . import events
 from . import futures
-from . import tasks
+from .coroutines import coroutine
 
 
 class _ContextManager:
@@ -112,7 +112,7 @@
         """Return True if lock is acquired."""
         return self._locked
 
-    @tasks.coroutine
+    @coroutine
     def acquire(self):
         """Acquire a lock.
 
@@ -225,7 +225,7 @@
         to true again."""
         self._value = False
 
-    @tasks.coroutine
+    @coroutine
     def wait(self):
         """Block until the internal flag is true.
 
@@ -278,7 +278,7 @@
             extra = '{},waiters:{}'.format(extra, len(self._waiters))
         return '<{} [{}]>'.format(res[1:-1], extra)
 
-    @tasks.coroutine
+    @coroutine
     def wait(self):
         """Wait until notified.
 
@@ -306,7 +306,7 @@
         finally:
             yield from self.acquire()
 
-    @tasks.coroutine
+    @coroutine
     def wait_for(self, predicate):
         """Wait until a predicate becomes true.
 
@@ -402,7 +402,7 @@
         """Returns True if semaphore can not be acquired immediately."""
         return self._value == 0
 
-    @tasks.coroutine
+    @coroutine
     def acquire(self):
         """Acquire a semaphore.
 
diff --git a/Lib/asyncio/streams.py b/Lib/asyncio/streams.py
index e239248..a10b969 100644
--- a/Lib/asyncio/streams.py
+++ b/Lib/asyncio/streams.py
@@ -10,10 +10,12 @@
 if hasattr(socket, 'AF_UNIX'):
     __all__.extend(['open_unix_connection', 'start_unix_server'])
 
+from . import coroutines
 from . import events
 from . import futures
 from . import protocols
 from . import tasks
+from .coroutines import coroutine
 
 
 _DEFAULT_LIMIT = 2**16
@@ -33,7 +35,7 @@
         self.expected = expected
 
 
-@tasks.coroutine
+@coroutine
 def open_connection(host=None, port=None, *,
                     loop=None, limit=_DEFAULT_LIMIT, **kwds):
     """A wrapper for create_connection() returning a (reader, writer) pair.
@@ -63,7 +65,7 @@
     return reader, writer
 
 
-@tasks.coroutine
+@coroutine
 def start_server(client_connected_cb, host=None, port=None, *,
                  loop=None, limit=_DEFAULT_LIMIT, **kwds):
     """Start a socket server, call back for each client connected.
@@ -102,7 +104,7 @@
 if hasattr(socket, 'AF_UNIX'):
     # UNIX Domain Sockets are supported on this platform
 
-    @tasks.coroutine
+    @coroutine
     def open_unix_connection(path=None, *,
                              loop=None, limit=_DEFAULT_LIMIT, **kwds):
         """Similar to `open_connection` but works with UNIX Domain Sockets."""
@@ -116,7 +118,7 @@
         return reader, writer
 
 
-    @tasks.coroutine
+    @coroutine
     def start_unix_server(client_connected_cb, path=None, *,
                           loop=None, limit=_DEFAULT_LIMIT, **kwds):
         """Similar to `start_server` but works with UNIX Domain Sockets."""
@@ -210,7 +212,7 @@
                                                self._loop)
             res = self._client_connected_cb(self._stream_reader,
                                             self._stream_writer)
-            if tasks.iscoroutine(res):
+            if coroutines.iscoroutine(res):
                 tasks.Task(res, loop=self._loop)
 
     def connection_lost(self, exc):
@@ -373,7 +375,7 @@
                                'already waiting for incoming data' % func_name)
         return futures.Future(loop=self._loop)
 
-    @tasks.coroutine
+    @coroutine
     def readline(self):
         if self._exception is not None:
             raise self._exception
@@ -410,7 +412,7 @@
         self._maybe_resume_transport()
         return bytes(line)
 
-    @tasks.coroutine
+    @coroutine
     def read(self, n=-1):
         if self._exception is not None:
             raise self._exception
@@ -449,7 +451,7 @@
         self._maybe_resume_transport()
         return data
 
-    @tasks.coroutine
+    @coroutine
     def readexactly(self, n):
         if self._exception is not None:
             raise self._exception
diff --git a/Lib/asyncio/subprocess.py b/Lib/asyncio/subprocess.py
index 414e023..2cd6de6 100644
--- a/Lib/asyncio/subprocess.py
+++ b/Lib/asyncio/subprocess.py
@@ -8,6 +8,7 @@
 from . import protocols
 from . import streams
 from . import tasks
+from .coroutines import coroutine
 
 
 PIPE = subprocess.PIPE
@@ -94,7 +95,7 @@
     def returncode(self):
         return self._transport.get_returncode()
 
-    @tasks.coroutine
+    @coroutine
     def wait(self):
         """Wait until the process exit and return the process return code."""
         returncode = self._transport.get_returncode()
@@ -122,17 +123,17 @@
         self._check_alive()
         self._transport.kill()
 
-    @tasks.coroutine
+    @coroutine
     def _feed_stdin(self, input):
         self.stdin.write(input)
         yield from self.stdin.drain()
         self.stdin.close()
 
-    @tasks.coroutine
+    @coroutine
     def _noop(self):
         return None
 
-    @tasks.coroutine
+    @coroutine
     def _read_stream(self, fd):
         transport = self._transport.get_pipe_transport(fd)
         if fd == 2:
@@ -144,7 +145,7 @@
         transport.close()
         return output
 
-    @tasks.coroutine
+    @coroutine
     def communicate(self, input=None):
         if input:
             stdin = self._feed_stdin(input)
@@ -164,7 +165,7 @@
         return (stdout, stderr)
 
 
-@tasks.coroutine
+@coroutine
 def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
                             loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
     if loop is None:
@@ -178,7 +179,7 @@
     yield from protocol.waiter
     return Process(transport, protocol, loop)
 
-@tasks.coroutine
+@coroutine
 def create_subprocess_exec(program, *args, stdin=None, stdout=None,
                            stderr=None, loop=None,
                            limit=streams._DEFAULT_LIMIT, **kwds):
diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py
index db0bbf3..5b8f3eb 100644
--- a/Lib/asyncio/tasks.py
+++ b/Lib/asyncio/tasks.py
@@ -1,7 +1,6 @@
 """Support for tasks, coroutines and the scheduler."""
 
-__all__ = ['coroutine', 'Task',
-           'iscoroutinefunction', 'iscoroutine',
+__all__ = ['Task',
            'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
            'wait', 'wait_for', 'as_completed', 'sleep', 'async',
            'gather', 'shield',
@@ -11,146 +10,20 @@
 import functools
 import inspect
 import linecache
-import os
 import sys
 import traceback
 import weakref
 
+from . import coroutines
 from . import events
 from . import futures
+from .coroutines import coroutine
 from .log import logger
 
-# If you set _DEBUG to true, @coroutine will wrap the resulting
-# generator objects in a CoroWrapper instance (defined below).  That
-# instance will log a message when the generator is never iterated
-# over, which may happen when you forget to use "yield from" with a
-# coroutine call.  Note that the value of the _DEBUG flag is taken
-# when the decorator is used, so to be of any use it must be set
-# before you define your coroutines.  A downside of using this feature
-# is that tracebacks show entries for the CoroWrapper.__next__ method
-# when _DEBUG is true.
-_DEBUG = (not sys.flags.ignore_environment
-          and bool(os.environ.get('PYTHONASYNCIODEBUG')))
-
 _PY34 = (sys.version_info >= (3, 4))
 _PY35 = (sys.version_info >= (3, 5))
 
 
-class CoroWrapper:
-    # Wrapper for coroutine in _DEBUG mode.
-
-    def __init__(self, gen, func):
-        assert inspect.isgenerator(gen), gen
-        self.gen = gen
-        self.func = func
-        self._source_traceback = traceback.extract_stack(sys._getframe(1))
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        return next(self.gen)
-
-    def send(self, *value):
-        # We use `*value` because of a bug in CPythons prior
-        # to 3.4.1. See issue #21209 and test_yield_from_corowrapper
-        # for details.  This workaround should be removed in 3.5.0.
-        if len(value) == 1:
-            value = value[0]
-        return self.gen.send(value)
-
-    def throw(self, exc):
-        return self.gen.throw(exc)
-
-    def close(self):
-        return self.gen.close()
-
-    @property
-    def gi_frame(self):
-        return self.gen.gi_frame
-
-    @property
-    def gi_running(self):
-        return self.gen.gi_running
-
-    @property
-    def gi_code(self):
-        return self.gen.gi_code
-
-    def __del__(self):
-        # Be careful accessing self.gen.frame -- self.gen might not exist.
-        gen = getattr(self, 'gen', None)
-        frame = getattr(gen, 'gi_frame', None)
-        if frame is not None and frame.f_lasti == -1:
-            func = events._format_callback(self.func, ())
-            tb = ''.join(traceback.format_list(self._source_traceback))
-            message = ('Coroutine %s was never yielded from\n'
-                       'Coroutine object created at (most recent call last):\n'
-                       '%s'
-                       % (func, tb.rstrip()))
-            logger.error(message)
-
-
-def coroutine(func):
-    """Decorator to mark coroutines.
-
-    If the coroutine is not yielded from before it is destroyed,
-    an error message is logged.
-    """
-    if inspect.isgeneratorfunction(func):
-        coro = func
-    else:
-        @functools.wraps(func)
-        def coro(*args, **kw):
-            res = func(*args, **kw)
-            if isinstance(res, futures.Future) or inspect.isgenerator(res):
-                res = yield from res
-            return res
-
-    if not _DEBUG:
-        wrapper = coro
-    else:
-        @functools.wraps(func)
-        def wrapper(*args, **kwds):
-            w = CoroWrapper(coro(*args, **kwds), func)
-            if w._source_traceback:
-                del w._source_traceback[-1]
-            w.__name__ = func.__name__
-            if _PY35:
-                w.__qualname__ = func.__qualname__
-            w.__doc__ = func.__doc__
-            return w
-
-    wrapper._is_coroutine = True  # For iscoroutinefunction().
-    return wrapper
-
-
-def iscoroutinefunction(func):
-    """Return True if func is a decorated coroutine function."""
-    return getattr(func, '_is_coroutine', False)
-
-
-def iscoroutine(obj):
-    """Return True if obj is a coroutine object."""
-    return isinstance(obj, CoroWrapper) or inspect.isgenerator(obj)
-
-
-def _format_coroutine(coro):
-    assert iscoroutine(coro)
-    if _PY35:
-        coro_name = coro.__qualname__
-    else:
-        coro_name = coro.__name__
-
-    filename = coro.gi_code.co_filename
-    if coro.gi_frame is not None:
-        lineno = coro.gi_frame.f_lineno
-        return '%s() at %s:%s' % (coro_name, filename, lineno)
-    else:
-        lineno = coro.gi_code.co_firstlineno
-        return '%s() done at %s:%s' % (coro_name, filename, lineno)
-
-
 class Task(futures.Future):
     """A coroutine wrapped in a Future."""
 
@@ -193,7 +66,7 @@
         return {t for t in cls._all_tasks if t._loop is loop}
 
     def __init__(self, coro, *, loop=None):
-        assert iscoroutine(coro), repr(coro)  # Not a coroutine function!
+        assert coroutines.iscoroutine(coro), repr(coro)  # Not a coroutine function!
         super().__init__(loop=loop)
         if self._source_traceback:
             del self._source_traceback[-1]
@@ -225,7 +98,7 @@
         else:
             info.append(self._state.lower())
 
-        info.append(_format_coroutine(self._coro))
+        info.append(coroutines._format_coroutine(self._coro))
 
         if self._state == futures._FINISHED:
             info.append(self._format_result())
@@ -444,7 +317,7 @@
     Note: This does not raise TimeoutError! Futures that aren't done
     when the timeout occurs are returned in the second set.
     """
-    if isinstance(fs, futures.Future) or iscoroutine(fs):
+    if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs):
         raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
     if not fs:
         raise ValueError('Set of coroutines/Futures is empty.')
@@ -566,7 +439,7 @@
 
     Note: The futures 'f' are not necessarily members of fs.
     """
-    if isinstance(fs, futures.Future) or iscoroutine(fs):
+    if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs):
         raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
     loop = loop if loop is not None else events.get_event_loop()
     todo = {async(f, loop=loop) for f in set(fs)}
@@ -624,7 +497,7 @@
         if loop is not None and loop is not coro_or_future._loop:
             raise ValueError('loop argument must agree with Future')
         return coro_or_future
-    elif iscoroutine(coro_or_future):
+    elif coroutines.iscoroutine(coro_or_future):
         task = Task(coro_or_future, loop=loop)
         if task._source_traceback:
             del task._source_traceback[-1]
diff --git a/Lib/asyncio/test_utils.py b/Lib/asyncio/test_utils.py
index d9c7ae2..94054e7 100644
--- a/Lib/asyncio/test_utils.py
+++ b/Lib/asyncio/test_utils.py
@@ -27,6 +27,7 @@
 from . import futures
 from . import selectors
 from . import tasks
+from .coroutines import coroutine
 
 
 if sys.platform == 'win32':  # pragma: no cover
@@ -43,7 +44,7 @@
 
 
 def run_briefly(loop):
-    @tasks.coroutine
+    @coroutine
     def once():
         pass
     gen = once()
diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py
index ad4c229..1cb70ff 100644
--- a/Lib/asyncio/unix_events.py
+++ b/Lib/asyncio/unix_events.py
@@ -16,8 +16,8 @@
 from . import constants
 from . import events
 from . import selector_events
-from . import tasks
 from . import transports
+from .coroutines import coroutine
 from .log import logger
 
 
@@ -147,7 +147,7 @@
                                    extra=None):
         return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
 
-    @tasks.coroutine
+    @coroutine
     def _make_subprocess_transport(self, protocol, args, shell,
                                    stdin, stdout, stderr, bufsize,
                                    extra=None, **kwargs):
@@ -164,7 +164,7 @@
     def _child_watcher_callback(self, pid, returncode, transp):
         self.call_soon_threadsafe(transp._process_exited, returncode)
 
-    @tasks.coroutine
+    @coroutine
     def create_unix_connection(self, protocol_factory, path, *,
                                ssl=None, sock=None,
                                server_hostname=None):
@@ -199,7 +199,7 @@
             sock, protocol_factory, ssl, server_hostname)
         return transport, protocol
 
-    @tasks.coroutine
+    @coroutine
     def create_unix_server(self, protocol_factory, path=None, *,
                            sock=None, backlog=100, ssl=None):
         if isinstance(ssl, bool):
diff --git a/Lib/asyncio/windows_events.py b/Lib/asyncio/windows_events.py
index 19f2588..93b71b2 100644
--- a/Lib/asyncio/windows_events.py
+++ b/Lib/asyncio/windows_events.py
@@ -14,8 +14,9 @@
 from . import selector_events
 from . import tasks
 from . import windows_utils
-from .log import logger
 from . import _overlapped
+from .coroutines import coroutine
+from .log import logger
 
 
 __all__ = ['SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
@@ -129,7 +130,7 @@
     def _socketpair(self):
         return windows_utils.socketpair()
 
-    @tasks.coroutine
+    @coroutine
     def create_pipe_connection(self, protocol_factory, address):
         f = self._proactor.connect_pipe(address)
         pipe = yield from f
@@ -138,7 +139,7 @@
                                                  extra={'addr': address})
         return trans, protocol
 
-    @tasks.coroutine
+    @coroutine
     def start_serving_pipe(self, protocol_factory, address):
         server = PipeServer(address)
 
@@ -172,7 +173,7 @@
         self.call_soon(loop)
         return [server]
 
-    @tasks.coroutine
+    @coroutine
     def _make_subprocess_transport(self, protocol, args, shell,
                                    stdin, stdout, stderr, bufsize,
                                    extra=None, **kwargs):
@@ -258,7 +259,7 @@
             conn.settimeout(listener.gettimeout())
             return conn, conn.getpeername()
 
-        @tasks.coroutine
+        @coroutine
         def accept_coro(future, conn):
             # Coroutine closing the accept socket if the future is cancelled
             try: