Remove libevent.

Fixed any exposed bugs across the stack.

Add a poll() based implementation. Heavily leverages pollset infrastructure to allow small polls to be the norm. Exposes a mechanism to plug in epoll/kqueue for platforms where we have them.

Simplify iomgr callbacks to return one bit of success or failure (instead of the multi valued result that was mostly unused previously). This will ease the burden on new implementations, and the previous system provided no real value anyway.

Removed timeouts on endpoint read/write routines. This simplifies porting burden by providing a more orthogonal interface, and the functionality can always be replicated when desired by using an alarm combined with endpoint_shutdown. I'm fairly certain we ended up with this interface because it was convenient to do from libevent.

Things that need attention still:
- adding an fd to a pollset is O(n^2) - but this is probably ok given that we'll not
use this for multipolling once platform specific implementations are added.
- we rely on the backup poller too often - especially for SSL handshakes and for client
connection establishment we should have a better mechanism ([] []
- Linux needs to use epoll for multiple fds, FreeBSD variants (including
Darwin) need to use kqueue. ([] []
- Linux needs to use eventfd for poll kicking. ([]
	Change on 2015/01/07 by ctiller <ctiller@google.com>
-------------
Created by MOE: http://code.google.com/p/moe-java
MOE_MIGRATED_REVID=83461069
diff --git a/src/core/iomgr/iomgr.c b/src/core/iomgr/iomgr.c
new file mode 100644
index 0000000..03f56a5
--- /dev/null
+++ b/src/core/iomgr/iomgr.c
@@ -0,0 +1,204 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/iomgr/iomgr.h"
+
+#include <stdlib.h>
+
+#include "src/core/iomgr/iomgr_internal.h"
+#include "src/core/iomgr/alarm_internal.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/sync.h>
+
+typedef struct delayed_callback {
+  grpc_iomgr_cb_func cb;
+  void *cb_arg;
+  int success;
+  struct delayed_callback *next;
+} delayed_callback;
+
+static gpr_mu g_mu;
+static gpr_cv g_cv;
+static delayed_callback *g_cbs_head = NULL;
+static delayed_callback *g_cbs_tail = NULL;
+static int g_shutdown;
+static int g_refs;
+static gpr_event g_background_callback_executor_done;
+
+/* Execute followup callbacks continuously.
+   Other threads may check in and help during pollset_work() */
+static void background_callback_executor(void *ignored) {
+  gpr_mu_lock(&g_mu);
+  while (!g_shutdown) {
+    gpr_timespec deadline = gpr_inf_future;
+    if (g_cbs_head) {
+      delayed_callback *cb = g_cbs_head;
+      g_cbs_head = cb->next;
+      if (!g_cbs_head) g_cbs_tail = NULL;
+      gpr_mu_unlock(&g_mu);
+      cb->cb(cb->cb_arg, cb->success);
+      gpr_free(cb);
+      gpr_mu_lock(&g_mu);
+    } else if (grpc_alarm_check(&g_mu, gpr_now(), &deadline)) {
+    } else {
+      gpr_cv_wait(&g_cv, &g_mu, deadline);
+    }
+  }
+  gpr_mu_unlock(&g_mu);
+  gpr_event_set(&g_background_callback_executor_done, (void *)1);
+}
+
+void grpc_kick_poller() { gpr_cv_broadcast(&g_cv); }
+
+void grpc_iomgr_init() {
+  gpr_thd_id id;
+  gpr_mu_init(&g_mu);
+  gpr_cv_init(&g_cv);
+  grpc_alarm_list_init(gpr_now());
+  g_refs = 0;
+  grpc_iomgr_platform_init();
+  gpr_event_init(&g_background_callback_executor_done);
+  gpr_thd_new(&id, background_callback_executor, NULL, NULL);
+}
+
+void grpc_iomgr_shutdown() {
+  delayed_callback *cb;
+  gpr_timespec shutdown_deadline =
+      gpr_time_add(gpr_now(), gpr_time_from_seconds(10));
+
+  grpc_iomgr_platform_shutdown();
+
+  gpr_mu_lock(&g_mu);
+  g_shutdown = 1;
+  while (g_cbs_head || g_refs) {
+    gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed%s", g_refs,
+            g_cbs_head ? " and executing final callbacks" : "");
+    while (g_cbs_head) {
+      cb = g_cbs_head;
+      g_cbs_head = cb->next;
+      if (!g_cbs_head) g_cbs_tail = NULL;
+      gpr_mu_unlock(&g_mu);
+
+      cb->cb(cb->cb_arg, 0);
+      gpr_free(cb);
+      gpr_mu_lock(&g_mu);
+    }
+    if (g_refs) {
+      if (gpr_cv_wait(&g_cv, &g_mu, shutdown_deadline) && g_cbs_head == NULL) {
+        gpr_log(GPR_DEBUG,
+                "Failed to free %d iomgr objects before shutdown deadline: "
+                "memory leaks are likely",
+                g_refs);
+        break;
+      }
+    }
+  }
+  gpr_mu_unlock(&g_mu);
+
+  gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future);
+
+  grpc_alarm_list_shutdown();
+  gpr_mu_destroy(&g_mu);
+  gpr_cv_destroy(&g_cv);
+}
+
+void grpc_iomgr_ref() {
+  gpr_mu_lock(&g_mu);
+  ++g_refs;
+  gpr_mu_unlock(&g_mu);
+}
+
+void grpc_iomgr_unref() {
+  gpr_mu_lock(&g_mu);
+  if (0 == --g_refs) {
+    gpr_cv_signal(&g_cv);
+  }
+  gpr_mu_unlock(&g_mu);
+}
+
+void grpc_iomgr_add_delayed_callback(grpc_iomgr_cb_func cb, void *cb_arg,
+                                     int success) {
+  delayed_callback *dcb = gpr_malloc(sizeof(delayed_callback));
+  dcb->cb = cb;
+  dcb->cb_arg = cb_arg;
+  dcb->success = success;
+  gpr_mu_lock(&g_mu);
+  dcb->next = NULL;
+  if (!g_cbs_tail) {
+    g_cbs_head = g_cbs_tail = dcb;
+  } else {
+    g_cbs_tail->next = dcb;
+    g_cbs_tail = dcb;
+  }
+  gpr_cv_signal(&g_cv);
+  gpr_mu_unlock(&g_mu);
+}
+
+void grpc_iomgr_add_callback(grpc_iomgr_cb_func cb, void *cb_arg) {
+  grpc_iomgr_add_delayed_callback(cb, cb_arg, 1);
+}
+
+int grpc_maybe_call_delayed_callbacks(gpr_mu *drop_mu, int success) {
+  int n = 0;
+  gpr_mu *retake_mu = NULL;
+  delayed_callback *cb;
+  for (;;) {
+    /* check for new work */
+    if (!gpr_mu_trylock(&g_mu)) {
+      break;
+    }
+    cb = g_cbs_head;
+    if (!cb) {
+      gpr_mu_unlock(&g_mu);
+      break;
+    }
+    g_cbs_head = cb->next;
+    if (!g_cbs_head) g_cbs_tail = NULL;
+    gpr_mu_unlock(&g_mu);
+    /* if we have a mutex to drop, do so before executing work */
+    if (drop_mu) {
+      gpr_mu_unlock(drop_mu);
+      retake_mu = drop_mu;
+      drop_mu = NULL;
+    }
+    cb->cb(cb->cb_arg, success && cb->success);
+    gpr_free(cb);
+    n++;
+  }
+  if (retake_mu) {
+    gpr_mu_lock(retake_mu);
+  }
+  return n;
+}