blob: 058685b295d0090bc2bd644761514d1f9bf969b2 [file] [log] [blame]
ctiller58393c22015-01-07 14:03:30 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
ctiller58393c22015-01-07 14:03:30 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/iomgr/iomgr.h"
35
36#include <stdlib.h>
37
38#include "src/core/iomgr/iomgr_internal.h"
39#include "src/core/iomgr/alarm_internal.h"
40#include <grpc/support/alloc.h>
41#include <grpc/support/log.h>
42#include <grpc/support/thd.h>
43#include <grpc/support/sync.h>
44
45typedef struct delayed_callback {
46 grpc_iomgr_cb_func cb;
47 void *cb_arg;
48 int success;
49 struct delayed_callback *next;
50} delayed_callback;
51
52static gpr_mu g_mu;
53static gpr_cv g_cv;
Nicolas "Pixel" Nobleae7b45a2015-02-04 03:28:34 +010054static gpr_cv g_rcv;
ctiller58393c22015-01-07 14:03:30 -080055static delayed_callback *g_cbs_head = NULL;
56static delayed_callback *g_cbs_tail = NULL;
57static int g_shutdown;
58static int g_refs;
59static gpr_event g_background_callback_executor_done;
60
61/* Execute followup callbacks continuously.
62 Other threads may check in and help during pollset_work() */
63static void background_callback_executor(void *ignored) {
64 gpr_mu_lock(&g_mu);
65 while (!g_shutdown) {
66 gpr_timespec deadline = gpr_inf_future;
67 if (g_cbs_head) {
68 delayed_callback *cb = g_cbs_head;
69 g_cbs_head = cb->next;
70 if (!g_cbs_head) g_cbs_tail = NULL;
71 gpr_mu_unlock(&g_mu);
72 cb->cb(cb->cb_arg, cb->success);
73 gpr_free(cb);
74 gpr_mu_lock(&g_mu);
75 } else if (grpc_alarm_check(&g_mu, gpr_now(), &deadline)) {
76 } else {
77 gpr_cv_wait(&g_cv, &g_mu, deadline);
78 }
79 }
80 gpr_mu_unlock(&g_mu);
81 gpr_event_set(&g_background_callback_executor_done, (void *)1);
82}
83
Craig Tiller32946d32015-01-15 11:37:30 -080084void grpc_kick_poller(void) { gpr_cv_broadcast(&g_cv); }
ctiller58393c22015-01-07 14:03:30 -080085
Craig Tiller32946d32015-01-15 11:37:30 -080086void grpc_iomgr_init(void) {
ctiller58393c22015-01-07 14:03:30 -080087 gpr_thd_id id;
88 gpr_mu_init(&g_mu);
89 gpr_cv_init(&g_cv);
Nicolas "Pixel" Nobleae7b45a2015-02-04 03:28:34 +010090 gpr_cv_init(&g_rcv);
ctiller58393c22015-01-07 14:03:30 -080091 grpc_alarm_list_init(gpr_now());
92 g_refs = 0;
93 grpc_iomgr_platform_init();
94 gpr_event_init(&g_background_callback_executor_done);
95 gpr_thd_new(&id, background_callback_executor, NULL, NULL);
96}
97
Craig Tiller32946d32015-01-15 11:37:30 -080098void grpc_iomgr_shutdown(void) {
ctiller58393c22015-01-07 14:03:30 -080099 delayed_callback *cb;
100 gpr_timespec shutdown_deadline =
101 gpr_time_add(gpr_now(), gpr_time_from_seconds(10));
102
ctiller58393c22015-01-07 14:03:30 -0800103
104 gpr_mu_lock(&g_mu);
105 g_shutdown = 1;
106 while (g_cbs_head || g_refs) {
107 gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed%s", g_refs,
108 g_cbs_head ? " and executing final callbacks" : "");
109 while (g_cbs_head) {
110 cb = g_cbs_head;
111 g_cbs_head = cb->next;
112 if (!g_cbs_head) g_cbs_tail = NULL;
113 gpr_mu_unlock(&g_mu);
114
115 cb->cb(cb->cb_arg, 0);
116 gpr_free(cb);
117 gpr_mu_lock(&g_mu);
118 }
119 if (g_refs) {
Nicolas "Pixel" Nobleae7b45a2015-02-04 03:28:34 +0100120 if (gpr_cv_wait(&g_rcv, &g_mu, shutdown_deadline) && g_cbs_head == NULL) {
ctiller58393c22015-01-07 14:03:30 -0800121 gpr_log(GPR_DEBUG,
122 "Failed to free %d iomgr objects before shutdown deadline: "
123 "memory leaks are likely",
124 g_refs);
125 break;
126 }
127 }
128 }
129 gpr_mu_unlock(&g_mu);
130
Nicolas "Pixel" Nobleae7b45a2015-02-04 03:28:34 +0100131 grpc_kick_poller();
ctiller58393c22015-01-07 14:03:30 -0800132 gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future);
133
David Klempnerd1785242015-01-28 17:00:21 -0800134 grpc_iomgr_platform_shutdown();
ctiller58393c22015-01-07 14:03:30 -0800135 grpc_alarm_list_shutdown();
136 gpr_mu_destroy(&g_mu);
137 gpr_cv_destroy(&g_cv);
Nicolas "Pixel" Nobleae7b45a2015-02-04 03:28:34 +0100138 gpr_cv_destroy(&g_rcv);
ctiller58393c22015-01-07 14:03:30 -0800139}
140
Craig Tiller32946d32015-01-15 11:37:30 -0800141void grpc_iomgr_ref(void) {
ctiller58393c22015-01-07 14:03:30 -0800142 gpr_mu_lock(&g_mu);
143 ++g_refs;
144 gpr_mu_unlock(&g_mu);
145}
146
Craig Tiller32946d32015-01-15 11:37:30 -0800147void grpc_iomgr_unref(void) {
ctiller58393c22015-01-07 14:03:30 -0800148 gpr_mu_lock(&g_mu);
149 if (0 == --g_refs) {
Nicolas "Pixel" Nobleae7b45a2015-02-04 03:28:34 +0100150 gpr_cv_signal(&g_rcv);
ctiller58393c22015-01-07 14:03:30 -0800151 }
152 gpr_mu_unlock(&g_mu);
153}
154
155void grpc_iomgr_add_delayed_callback(grpc_iomgr_cb_func cb, void *cb_arg,
156 int success) {
157 delayed_callback *dcb = gpr_malloc(sizeof(delayed_callback));
158 dcb->cb = cb;
159 dcb->cb_arg = cb_arg;
160 dcb->success = success;
161 gpr_mu_lock(&g_mu);
162 dcb->next = NULL;
163 if (!g_cbs_tail) {
164 g_cbs_head = g_cbs_tail = dcb;
165 } else {
166 g_cbs_tail->next = dcb;
167 g_cbs_tail = dcb;
168 }
169 gpr_cv_signal(&g_cv);
170 gpr_mu_unlock(&g_mu);
171}
172
173void grpc_iomgr_add_callback(grpc_iomgr_cb_func cb, void *cb_arg) {
174 grpc_iomgr_add_delayed_callback(cb, cb_arg, 1);
175}
176
177int grpc_maybe_call_delayed_callbacks(gpr_mu *drop_mu, int success) {
178 int n = 0;
179 gpr_mu *retake_mu = NULL;
180 delayed_callback *cb;
181 for (;;) {
182 /* check for new work */
183 if (!gpr_mu_trylock(&g_mu)) {
184 break;
185 }
186 cb = g_cbs_head;
187 if (!cb) {
188 gpr_mu_unlock(&g_mu);
189 break;
190 }
191 g_cbs_head = cb->next;
192 if (!g_cbs_head) g_cbs_tail = NULL;
193 gpr_mu_unlock(&g_mu);
194 /* if we have a mutex to drop, do so before executing work */
195 if (drop_mu) {
196 gpr_mu_unlock(drop_mu);
197 retake_mu = drop_mu;
198 drop_mu = NULL;
199 }
200 cb->cb(cb->cb_arg, success && cb->success);
201 gpr_free(cb);
202 n++;
203 }
204 if (retake_mu) {
205 gpr_mu_lock(retake_mu);
206 }
207 return n;
Craig Tiller190d3602015-02-18 09:23:38 -0800208}