blob: e74c32b219c16f7a4c31f3c342eee6037df3bb7f [file] [log] [blame]
ctiller58393c22015-01-07 14:03:30 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
ctiller58393c22015-01-07 14:03:30 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/iomgr/iomgr.h"
35
David Garcia Quintas5f228f52015-05-26 19:58:50 -070036#include <assert.h>
ctiller58393c22015-01-07 14:03:30 -080037#include <stdlib.h>
38
39#include "src/core/iomgr/iomgr_internal.h"
40#include "src/core/iomgr/alarm_internal.h"
41#include <grpc/support/alloc.h>
42#include <grpc/support/log.h>
43#include <grpc/support/thd.h>
44#include <grpc/support/sync.h>
45
ctiller58393c22015-01-07 14:03:30 -080046static gpr_mu g_mu;
Nicolas "Pixel" Nobleae7b45a2015-02-04 03:28:34 +010047static gpr_cv g_rcv;
David Garcia Quintas5f228f52015-05-26 19:58:50 -070048static grpc_iomgr_closure *g_cbs_head = NULL;
49static grpc_iomgr_closure *g_cbs_tail = NULL;
ctiller58393c22015-01-07 14:03:30 -080050static int g_shutdown;
51static int g_refs;
52static gpr_event g_background_callback_executor_done;
53
54/* Execute followup callbacks continuously.
55 Other threads may check in and help during pollset_work() */
56static void background_callback_executor(void *ignored) {
57 gpr_mu_lock(&g_mu);
58 while (!g_shutdown) {
59 gpr_timespec deadline = gpr_inf_future;
David Garcia Quintas00ff7df2015-05-12 00:19:47 -070060 gpr_timespec short_deadline =
61 gpr_time_add(gpr_now(), gpr_time_from_millis(100));
ctiller58393c22015-01-07 14:03:30 -080062 if (g_cbs_head) {
David Garcia Quintas5f228f52015-05-26 19:58:50 -070063 grpc_iomgr_closure *iocb = g_cbs_head;
64 int is_cb_ext_managed;
65 g_cbs_head = iocb->next;
ctiller58393c22015-01-07 14:03:30 -080066 if (!g_cbs_head) g_cbs_tail = NULL;
67 gpr_mu_unlock(&g_mu);
David Garcia Quintas5f228f52015-05-26 19:58:50 -070068 /* capture the managed state, as the callback may deallocate itself */
69 is_cb_ext_managed = iocb->is_ext_managed;
70 assert(iocb->success >= 0);
71 iocb->cb(iocb->cb_arg, iocb->success);
72 if (!is_cb_ext_managed) {
73 gpr_free(iocb);
74 }
ctiller58393c22015-01-07 14:03:30 -080075 gpr_mu_lock(&g_mu);
76 } else if (grpc_alarm_check(&g_mu, gpr_now(), &deadline)) {
77 } else {
David Garcia Quintas00ff7df2015-05-12 00:19:47 -070078 gpr_mu_unlock(&g_mu);
79 gpr_sleep_until(gpr_time_min(short_deadline, deadline));
80 gpr_mu_lock(&g_mu);
ctiller58393c22015-01-07 14:03:30 -080081 }
82 }
83 gpr_mu_unlock(&g_mu);
84 gpr_event_set(&g_background_callback_executor_done, (void *)1);
85}
86
David Garcia Quintas5b984ce2015-05-12 16:04:28 -070087void grpc_kick_poller(void) {
88 /* Empty. The background callback executor polls periodically. The activity
89 * the kicker is trying to draw the executor's attention to will be picked up
90 * either by one of the periodic wakeups or by one of the polling application
91 * threads. */
92}
ctiller58393c22015-01-07 14:03:30 -080093
Craig Tiller32946d32015-01-15 11:37:30 -080094void grpc_iomgr_init(void) {
ctiller58393c22015-01-07 14:03:30 -080095 gpr_thd_id id;
96 gpr_mu_init(&g_mu);
Nicolas "Pixel" Nobleae7b45a2015-02-04 03:28:34 +010097 gpr_cv_init(&g_rcv);
ctiller58393c22015-01-07 14:03:30 -080098 grpc_alarm_list_init(gpr_now());
99 g_refs = 0;
100 grpc_iomgr_platform_init();
101 gpr_event_init(&g_background_callback_executor_done);
102 gpr_thd_new(&id, background_callback_executor, NULL, NULL);
103}
104
Craig Tiller32946d32015-01-15 11:37:30 -0800105void grpc_iomgr_shutdown(void) {
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700106 grpc_iomgr_closure *iocb;
ctiller58393c22015-01-07 14:03:30 -0800107 gpr_timespec shutdown_deadline =
108 gpr_time_add(gpr_now(), gpr_time_from_seconds(10));
109
ctiller58393c22015-01-07 14:03:30 -0800110
111 gpr_mu_lock(&g_mu);
112 g_shutdown = 1;
113 while (g_cbs_head || g_refs) {
114 gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed%s", g_refs,
115 g_cbs_head ? " and executing final callbacks" : "");
116 while (g_cbs_head) {
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700117 int is_cb_ext_managed;
118 iocb = g_cbs_head;
119 g_cbs_head = iocb->next;
ctiller58393c22015-01-07 14:03:30 -0800120 if (!g_cbs_head) g_cbs_tail = NULL;
121 gpr_mu_unlock(&g_mu);
122
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700123 /* capture the managed state, as the callback may deallocate itself */
124 is_cb_ext_managed = iocb->is_ext_managed;
125 iocb->cb(iocb->cb_arg, 0);
126 if (!is_cb_ext_managed) {
127 gpr_free(iocb);
128 }
ctiller58393c22015-01-07 14:03:30 -0800129 gpr_mu_lock(&g_mu);
130 }
131 if (g_refs) {
Nicolas Noble8703f4d2015-03-23 13:52:18 -0700132 int timeout = 0;
133 gpr_timespec short_deadline = gpr_time_add(gpr_now(),
134 gpr_time_from_millis(100));
135 while (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) {
136 if (gpr_time_cmp(gpr_now(), shutdown_deadline) > 0) {
137 timeout = 1;
138 break;
139 }
140 }
141 if (timeout) {
ctiller58393c22015-01-07 14:03:30 -0800142 gpr_log(GPR_DEBUG,
143 "Failed to free %d iomgr objects before shutdown deadline: "
144 "memory leaks are likely",
145 g_refs);
146 break;
147 }
148 }
149 }
150 gpr_mu_unlock(&g_mu);
151
Nicolas "Pixel" Nobleae7b45a2015-02-04 03:28:34 +0100152 grpc_kick_poller();
ctiller58393c22015-01-07 14:03:30 -0800153 gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future);
154
David Klempnerd1785242015-01-28 17:00:21 -0800155 grpc_iomgr_platform_shutdown();
ctiller58393c22015-01-07 14:03:30 -0800156 grpc_alarm_list_shutdown();
157 gpr_mu_destroy(&g_mu);
Nicolas "Pixel" Nobleae7b45a2015-02-04 03:28:34 +0100158 gpr_cv_destroy(&g_rcv);
ctiller58393c22015-01-07 14:03:30 -0800159}
160
Craig Tiller32946d32015-01-15 11:37:30 -0800161void grpc_iomgr_ref(void) {
ctiller58393c22015-01-07 14:03:30 -0800162 gpr_mu_lock(&g_mu);
163 ++g_refs;
164 gpr_mu_unlock(&g_mu);
165}
166
Craig Tiller32946d32015-01-15 11:37:30 -0800167void grpc_iomgr_unref(void) {
ctiller58393c22015-01-07 14:03:30 -0800168 gpr_mu_lock(&g_mu);
169 if (0 == --g_refs) {
Nicolas "Pixel" Nobleae7b45a2015-02-04 03:28:34 +0100170 gpr_cv_signal(&g_rcv);
ctiller58393c22015-01-07 14:03:30 -0800171 }
172 gpr_mu_unlock(&g_mu);
173}
174
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700175grpc_iomgr_closure *grpc_iomgr_cb_create(grpc_iomgr_cb_func cb, void *cb_arg,
176 int is_ext_managed) {
177 grpc_iomgr_closure *iocb = gpr_malloc(sizeof(grpc_iomgr_closure));
178 iocb->cb = cb;
179 iocb->cb_arg = cb_arg;
180 iocb->is_ext_managed = is_ext_managed;
181 iocb->success = -1; /* uninitialized */
182 iocb->next = NULL;
183 return iocb;
184}
185
186void grpc_iomgr_add_delayed_callback(grpc_iomgr_closure *iocb, int success) {
187 iocb->success = success;
ctiller58393c22015-01-07 14:03:30 -0800188 gpr_mu_lock(&g_mu);
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700189 iocb->next = NULL;
ctiller58393c22015-01-07 14:03:30 -0800190 if (!g_cbs_tail) {
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700191 g_cbs_head = g_cbs_tail = iocb;
ctiller58393c22015-01-07 14:03:30 -0800192 } else {
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700193 g_cbs_tail->next = iocb;
194 g_cbs_tail = iocb;
ctiller58393c22015-01-07 14:03:30 -0800195 }
ctiller58393c22015-01-07 14:03:30 -0800196 gpr_mu_unlock(&g_mu);
197}
198
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700199
200void grpc_iomgr_add_callback(grpc_iomgr_closure *iocb) {
201 grpc_iomgr_add_delayed_callback(iocb, 1);
ctiller58393c22015-01-07 14:03:30 -0800202}
203
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700204
ctiller58393c22015-01-07 14:03:30 -0800205int grpc_maybe_call_delayed_callbacks(gpr_mu *drop_mu, int success) {
206 int n = 0;
207 gpr_mu *retake_mu = NULL;
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700208 grpc_iomgr_closure *iocb;
ctiller58393c22015-01-07 14:03:30 -0800209 for (;;) {
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700210 int is_cb_ext_managed;
ctiller58393c22015-01-07 14:03:30 -0800211 /* check for new work */
212 if (!gpr_mu_trylock(&g_mu)) {
213 break;
214 }
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700215 iocb = g_cbs_head;
216 if (!iocb) {
ctiller58393c22015-01-07 14:03:30 -0800217 gpr_mu_unlock(&g_mu);
218 break;
219 }
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700220 g_cbs_head = iocb->next;
ctiller58393c22015-01-07 14:03:30 -0800221 if (!g_cbs_head) g_cbs_tail = NULL;
222 gpr_mu_unlock(&g_mu);
223 /* if we have a mutex to drop, do so before executing work */
224 if (drop_mu) {
225 gpr_mu_unlock(drop_mu);
226 retake_mu = drop_mu;
227 drop_mu = NULL;
228 }
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700229 /* capture the managed state, as the callback may deallocate itself */
230 is_cb_ext_managed = iocb->is_ext_managed;
231 assert(iocb->success >= 0);
232 iocb->cb(iocb->cb_arg, success && iocb->success);
233 if (!is_cb_ext_managed) {
234 gpr_free(iocb);
235 }
ctiller58393c22015-01-07 14:03:30 -0800236 n++;
237 }
238 if (retake_mu) {
239 gpr_mu_lock(retake_mu);
240 }
241 return n;
Craig Tiller190d3602015-02-18 09:23:38 -0800242}