blob: acb5b26c870880df0da53dc275c038d791dd4cac [file] [log] [blame]
ctiller3bf466f2014-12-19 16:21:57 -08001/*
2 *
Craig Tiller6169d5f2016-03-31 07:46:18 -07003 * Copyright 2015, Google Inc.
ctiller3bf466f2014-12-19 16:21:57 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tiller9533d042016-03-25 17:11:06 -070034#include "src/core/lib/iomgr/timer.h"
ctiller3bf466f2014-12-19 16:21:57 -080035
Craig Tiller6a7626c2015-07-19 22:21:41 -070036#include <grpc/support/log.h>
ctiller3bf466f2014-12-19 16:21:57 -080037#include <grpc/support/sync.h>
38#include <grpc/support/useful.h>
Craig Tiller9533d042016-03-25 17:11:06 -070039#include "src/core/lib/iomgr/time_averaged_stats.h"
40#include "src/core/lib/iomgr/timer_heap.h"
ctiller3bf466f2014-12-19 16:21:57 -080041
42#define INVALID_HEAP_INDEX 0xffffffffu
43
44#define LOG2_NUM_SHARDS 5
45#define NUM_SHARDS (1 << LOG2_NUM_SHARDS)
ctiller3bf466f2014-12-19 16:21:57 -080046#define ADD_DEADLINE_SCALE 0.33
47#define MIN_QUEUE_WINDOW_DURATION 0.01
48#define MAX_QUEUE_WINDOW_DURATION 1
49
Craig Tillera82950e2015-09-22 12:33:20 -070050typedef struct {
ctiller3bf466f2014-12-19 16:21:57 -080051 gpr_mu mu;
52 grpc_time_averaged_stats stats;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070053 /* All and only timers with deadlines <= this will be in the heap. */
ctiller3bf466f2014-12-19 16:21:57 -080054 gpr_timespec queue_deadline_cap;
55 gpr_timespec min_deadline;
56 /* Index in the g_shard_queue */
Craig Tiller7536af02015-12-22 13:49:30 -080057 uint32_t shard_queue_index;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070058 /* This holds all timers with deadlines < queue_deadline_cap. Timers in this
ctiller3bf466f2014-12-19 16:21:57 -080059 list have the top bit of their deadline set to 0. */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070060 grpc_timer_heap heap;
61 /* This holds timers whose deadline is >= queue_deadline_cap. */
62 grpc_timer list;
ctiller3bf466f2014-12-19 16:21:57 -080063} shard_type;
64
65/* Protects g_shard_queue */
66static gpr_mu g_mu;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070067/* Allow only one run_some_expired_timers at once */
ctiller3bf466f2014-12-19 16:21:57 -080068static gpr_mu g_checker_mu;
Craig Tiller6a7626c2015-07-19 22:21:41 -070069static gpr_clock_type g_clock_type;
ctiller3bf466f2014-12-19 16:21:57 -080070static shard_type g_shards[NUM_SHARDS];
71/* Protected by g_mu */
72static shard_type *g_shard_queue[NUM_SHARDS];
Craig Tiller317f68e2016-04-13 20:27:24 -070073static bool g_initialized = false;
ctiller3bf466f2014-12-19 16:21:57 -080074
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070075static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
Craig Tillera82950e2015-09-22 12:33:20 -070076 gpr_timespec *next, int success);
ctiller3bf466f2014-12-19 16:21:57 -080077
Craig Tillera82950e2015-09-22 12:33:20 -070078static gpr_timespec compute_min_deadline(shard_type *shard) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070079 return grpc_timer_heap_is_empty(&shard->heap)
Craig Tillera82950e2015-09-22 12:33:20 -070080 ? shard->queue_deadline_cap
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070081 : grpc_timer_heap_top(&shard->heap)->deadline;
ctiller3bf466f2014-12-19 16:21:57 -080082}
83
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070084void grpc_timer_list_init(gpr_timespec now) {
Craig Tiller7536af02015-12-22 13:49:30 -080085 uint32_t i;
ctiller3bf466f2014-12-19 16:21:57 -080086
Craig Tiller3f72df92016-04-13 20:26:07 -070087 g_initialized = true;
Craig Tillera82950e2015-09-22 12:33:20 -070088 gpr_mu_init(&g_mu);
89 gpr_mu_init(&g_checker_mu);
Craig Tiller6a7626c2015-07-19 22:21:41 -070090 g_clock_type = now.clock_type;
ctiller3bf466f2014-12-19 16:21:57 -080091
Craig Tillera82950e2015-09-22 12:33:20 -070092 for (i = 0; i < NUM_SHARDS; i++) {
93 shard_type *shard = &g_shards[i];
94 gpr_mu_init(&shard->mu);
95 grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1,
96 0.5);
97 shard->queue_deadline_cap = now;
98 shard->shard_queue_index = i;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070099 grpc_timer_heap_init(&shard->heap);
Craig Tillera82950e2015-09-22 12:33:20 -0700100 shard->list.next = shard->list.prev = &shard->list;
101 shard->min_deadline = compute_min_deadline(shard);
102 g_shard_queue[i] = shard;
103 }
ctiller3bf466f2014-12-19 16:21:57 -0800104}
105
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700106void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
ctiller3bf466f2014-12-19 16:21:57 -0800107 int i;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700108 run_some_expired_timers(exec_ctx, gpr_inf_future(g_clock_type), NULL, 0);
Craig Tillera82950e2015-09-22 12:33:20 -0700109 for (i = 0; i < NUM_SHARDS; i++) {
110 shard_type *shard = &g_shards[i];
111 gpr_mu_destroy(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700112 grpc_timer_heap_destroy(&shard->heap);
Craig Tillera82950e2015-09-22 12:33:20 -0700113 }
114 gpr_mu_destroy(&g_mu);
115 gpr_mu_destroy(&g_checker_mu);
Craig Tiller3f72df92016-04-13 20:26:07 -0700116 g_initialized = false;
ctiller3bf466f2014-12-19 16:21:57 -0800117}
118
119/* This is a cheap, but good enough, pointer hash for sharding the tasks: */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700120static size_t shard_idx(const grpc_timer *info) {
Craig Tillera82950e2015-09-22 12:33:20 -0700121 size_t x = (size_t)info;
ctiller3bf466f2014-12-19 16:21:57 -0800122 return ((x >> 4) ^ (x >> 9) ^ (x >> 14)) & (NUM_SHARDS - 1);
123}
124
Craig Tillera82950e2015-09-22 12:33:20 -0700125static double ts_to_dbl(gpr_timespec ts) {
126 return (double)ts.tv_sec + 1e-9 * ts.tv_nsec;
ctiller3bf466f2014-12-19 16:21:57 -0800127}
128
Craig Tillera82950e2015-09-22 12:33:20 -0700129static gpr_timespec dbl_to_ts(double d) {
ctiller3bf466f2014-12-19 16:21:57 -0800130 gpr_timespec ts;
Craig Tiller7536af02015-12-22 13:49:30 -0800131 ts.tv_sec = (int64_t)d;
132 ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec));
Craig Tiller143e7bf2015-07-13 08:41:49 -0700133 ts.clock_type = GPR_TIMESPAN;
ctiller3bf466f2014-12-19 16:21:57 -0800134 return ts;
135}
136
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700137static void list_join(grpc_timer *head, grpc_timer *timer) {
138 timer->next = head;
139 timer->prev = head->prev;
140 timer->next->prev = timer->prev->next = timer;
ctiller3bf466f2014-12-19 16:21:57 -0800141}
142
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700143static void list_remove(grpc_timer *timer) {
144 timer->next->prev = timer->prev;
145 timer->prev->next = timer->next;
ctiller3bf466f2014-12-19 16:21:57 -0800146}
147
Craig Tiller7536af02015-12-22 13:49:30 -0800148static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
ctiller3bf466f2014-12-19 16:21:57 -0800149 shard_type *temp;
150 temp = g_shard_queue[first_shard_queue_index];
Craig Tillera82950e2015-09-22 12:33:20 -0700151 g_shard_queue[first_shard_queue_index] =
152 g_shard_queue[first_shard_queue_index + 1];
ctiller3bf466f2014-12-19 16:21:57 -0800153 g_shard_queue[first_shard_queue_index + 1] = temp;
Craig Tillera82950e2015-09-22 12:33:20 -0700154 g_shard_queue[first_shard_queue_index]->shard_queue_index =
155 first_shard_queue_index;
156 g_shard_queue[first_shard_queue_index + 1]->shard_queue_index =
157 first_shard_queue_index + 1;
ctiller3bf466f2014-12-19 16:21:57 -0800158}
159
Craig Tillera82950e2015-09-22 12:33:20 -0700160static void note_deadline_change(shard_type *shard) {
161 while (shard->shard_queue_index > 0 &&
162 gpr_time_cmp(
163 shard->min_deadline,
164 g_shard_queue[shard->shard_queue_index - 1]->min_deadline) < 0) {
165 swap_adjacent_shards_in_queue(shard->shard_queue_index - 1);
166 }
167 while (shard->shard_queue_index < NUM_SHARDS - 1 &&
168 gpr_time_cmp(
169 shard->min_deadline,
170 g_shard_queue[shard->shard_queue_index + 1]->min_deadline) > 0) {
171 swap_adjacent_shards_in_queue(shard->shard_queue_index);
172 }
ctiller3bf466f2014-12-19 16:21:57 -0800173}
174
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700175void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
176 gpr_timespec deadline, grpc_iomgr_cb_func timer_cb,
177 void *timer_cb_arg, gpr_timespec now) {
178 int is_first_timer = 0;
179 shard_type *shard = &g_shards[shard_idx(timer)];
Craig Tillera82950e2015-09-22 12:33:20 -0700180 GPR_ASSERT(deadline.clock_type == g_clock_type);
181 GPR_ASSERT(now.clock_type == g_clock_type);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700182 grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg);
183 timer->deadline = deadline;
184 timer->triggered = 0;
ctiller3bf466f2014-12-19 16:21:57 -0800185
Craig Tiller3f72df92016-04-13 20:26:07 -0700186 if (!g_initialized) {
Craig Tillerf3596c52016-04-14 08:08:38 -0700187 timer->triggered = 1;
Craig Tiller317f68e2016-04-13 20:27:24 -0700188 grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, false, NULL);
Craig Tiller3f72df92016-04-13 20:26:07 -0700189 return;
190 }
191
192 if (gpr_time_cmp(deadline, now) <= 0) {
Craig Tillerf3596c52016-04-14 08:08:38 -0700193 timer->triggered = 1;
Craig Tiller317f68e2016-04-13 20:27:24 -0700194 grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, true, NULL);
Craig Tiller3f72df92016-04-13 20:26:07 -0700195 return;
196 }
197
ctiller3bf466f2014-12-19 16:21:57 -0800198 /* TODO(ctiller): check deadline expired */
199
Craig Tillera82950e2015-09-22 12:33:20 -0700200 gpr_mu_lock(&shard->mu);
201 grpc_time_averaged_stats_add_sample(&shard->stats,
202 ts_to_dbl(gpr_time_sub(deadline, now)));
203 if (gpr_time_cmp(deadline, shard->queue_deadline_cap) < 0) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700204 is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700205 } else {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700206 timer->heap_index = INVALID_HEAP_INDEX;
207 list_join(&shard->list, timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700208 }
209 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800210
211 /* Deadline may have decreased, we need to adjust the master queue. Note
212 that there is a potential racy unlocked region here. There could be a
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700213 reordering of multiple grpc_timer_init calls, at this point, but the < test
ctiller3bf466f2014-12-19 16:21:57 -0800214 below should ensure that we err on the side of caution. There could
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700215 also be a race with grpc_timer_check, which might beat us to the lock. In
216 that case, it is possible that the timer that we added will have already
ctiller3bf466f2014-12-19 16:21:57 -0800217 run by the time we hold the lock, but that too is a safe error.
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700218 Finally, it's possible that the grpc_timer_check that intervened failed to
219 trigger the new timer because the min_deadline hadn't yet been reduced.
220 In that case, the timer will simply have to wait for the next
221 grpc_timer_check. */
222 if (is_first_timer) {
Craig Tillera82950e2015-09-22 12:33:20 -0700223 gpr_mu_lock(&g_mu);
224 if (gpr_time_cmp(deadline, shard->min_deadline) < 0) {
225 gpr_timespec old_min_deadline = g_shard_queue[0]->min_deadline;
226 shard->min_deadline = deadline;
227 note_deadline_change(shard);
228 if (shard->shard_queue_index == 0 &&
229 gpr_time_cmp(deadline, old_min_deadline) < 0) {
230 grpc_kick_poller();
231 }
ctiller3bf466f2014-12-19 16:21:57 -0800232 }
Craig Tillera82950e2015-09-22 12:33:20 -0700233 gpr_mu_unlock(&g_mu);
234 }
ctiller3bf466f2014-12-19 16:21:57 -0800235}
236
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700237void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
238 shard_type *shard = &g_shards[shard_idx(timer)];
Craig Tillera82950e2015-09-22 12:33:20 -0700239 gpr_mu_lock(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700240 if (!timer->triggered) {
Craig Tiller6c396862016-01-28 13:53:40 -0800241 grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, false, NULL);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700242 timer->triggered = 1;
243 if (timer->heap_index == INVALID_HEAP_INDEX) {
244 list_remove(timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700245 } else {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700246 grpc_timer_heap_remove(&shard->heap, timer);
ctiller3bf466f2014-12-19 16:21:57 -0800247 }
Craig Tillera82950e2015-09-22 12:33:20 -0700248 }
249 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800250}
251
252/* This is called when the queue is empty and "now" has reached the
253 queue_deadline_cap. We compute a new queue deadline and then scan the map
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700254 for timers that fall at or under it. Returns true if the queue is no
ctiller3bf466f2014-12-19 16:21:57 -0800255 longer empty.
256 REQUIRES: shard->mu locked */
Craig Tillera82950e2015-09-22 12:33:20 -0700257static int refill_queue(shard_type *shard, gpr_timespec now) {
ctiller3bf466f2014-12-19 16:21:57 -0800258 /* Compute the new queue window width and bound by the limits: */
Craig Tillera82950e2015-09-22 12:33:20 -0700259 double computed_deadline_delta =
260 grpc_time_averaged_stats_update_average(&shard->stats) *
261 ADD_DEADLINE_SCALE;
262 double deadline_delta =
263 GPR_CLAMP(computed_deadline_delta, MIN_QUEUE_WINDOW_DURATION,
264 MAX_QUEUE_WINDOW_DURATION);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700265 grpc_timer *timer, *next;
ctiller3bf466f2014-12-19 16:21:57 -0800266
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700267 /* Compute the new cap and put all timers under it into the queue: */
Craig Tillera82950e2015-09-22 12:33:20 -0700268 shard->queue_deadline_cap = gpr_time_add(
269 gpr_time_max(now, shard->queue_deadline_cap), dbl_to_ts(deadline_delta));
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700270 for (timer = shard->list.next; timer != &shard->list; timer = next) {
271 next = timer->next;
ctiller3bf466f2014-12-19 16:21:57 -0800272
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700273 if (gpr_time_cmp(timer->deadline, shard->queue_deadline_cap) < 0) {
274 list_remove(timer);
275 grpc_timer_heap_add(&shard->heap, timer);
ctiller3bf466f2014-12-19 16:21:57 -0800276 }
Craig Tillera82950e2015-09-22 12:33:20 -0700277 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700278 return !grpc_timer_heap_is_empty(&shard->heap);
ctiller3bf466f2014-12-19 16:21:57 -0800279}
280
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700281/* This pops the next non-cancelled timer with deadline <= now from the queue,
ctiller3bf466f2014-12-19 16:21:57 -0800282 or returns NULL if there isn't one.
283 REQUIRES: shard->mu locked */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700284static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) {
285 grpc_timer *timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700286 for (;;) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700287 if (grpc_timer_heap_is_empty(&shard->heap)) {
Craig Tillera82950e2015-09-22 12:33:20 -0700288 if (gpr_time_cmp(now, shard->queue_deadline_cap) < 0) return NULL;
289 if (!refill_queue(shard, now)) return NULL;
ctiller3bf466f2014-12-19 16:21:57 -0800290 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700291 timer = grpc_timer_heap_top(&shard->heap);
292 if (gpr_time_cmp(timer->deadline, now) > 0) return NULL;
293 timer->triggered = 1;
294 grpc_timer_heap_pop(&shard->heap);
295 return timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700296 }
ctiller3bf466f2014-12-19 16:21:57 -0800297}
298
299/* REQUIRES: shard->mu unlocked */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700300static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
Craig Tillera82950e2015-09-22 12:33:20 -0700301 gpr_timespec now, gpr_timespec *new_min_deadline,
302 int success) {
ctiller3bf466f2014-12-19 16:21:57 -0800303 size_t n = 0;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700304 grpc_timer *timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700305 gpr_mu_lock(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700306 while ((timer = pop_one(shard, now))) {
Craig Tiller6c396862016-01-28 13:53:40 -0800307 grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, success, NULL);
Craig Tillera82950e2015-09-22 12:33:20 -0700308 n++;
309 }
310 *new_min_deadline = compute_min_deadline(shard);
311 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800312 return n;
313}
314
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700315static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
Craig Tillera82950e2015-09-22 12:33:20 -0700316 gpr_timespec *next, int success) {
ctiller3bf466f2014-12-19 16:21:57 -0800317 size_t n = 0;
ctiller3bf466f2014-12-19 16:21:57 -0800318
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700319 /* TODO(ctiller): verify that there are any timers (atomically) here */
ctiller3bf466f2014-12-19 16:21:57 -0800320
Craig Tillera82950e2015-09-22 12:33:20 -0700321 if (gpr_mu_trylock(&g_checker_mu)) {
322 gpr_mu_lock(&g_mu);
ctiller3bf466f2014-12-19 16:21:57 -0800323
Craig Tillera82950e2015-09-22 12:33:20 -0700324 while (gpr_time_cmp(g_shard_queue[0]->min_deadline, now) < 0) {
325 gpr_timespec new_min_deadline;
ctiller3bf466f2014-12-19 16:21:57 -0800326
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700327 /* For efficiency, we pop as many available timers as we can from the
328 shard. This may violate perfect timer deadline ordering, but that
Craig Tillera82950e2015-09-22 12:33:20 -0700329 shouldn't be a big deal because we don't make ordering guarantees. */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700330 n += pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline,
Craig Tillera82950e2015-09-22 12:33:20 -0700331 success);
ctiller3bf466f2014-12-19 16:21:57 -0800332
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700333 /* An grpc_timer_init() on the shard could intervene here, adding a new
334 timer that is earlier than new_min_deadline. However,
335 grpc_timer_init() will block on the master_lock before it can call
336 set_min_deadline, so this one will complete first and then the Addtimer
Craig Tillera82950e2015-09-22 12:33:20 -0700337 will reduce the min_deadline (perhaps unnecessarily). */
338 g_shard_queue[0]->min_deadline = new_min_deadline;
339 note_deadline_change(g_shard_queue[0]);
ctiller3bf466f2014-12-19 16:21:57 -0800340 }
341
Craig Tillera82950e2015-09-22 12:33:20 -0700342 if (next) {
343 *next = gpr_time_min(*next, g_shard_queue[0]->min_deadline);
344 }
345
346 gpr_mu_unlock(&g_mu);
347 gpr_mu_unlock(&g_checker_mu);
Craig Tiller9fb89ea2016-03-11 20:12:45 -0800348 } else if (next != NULL) {
Craig Tiller822f1d72016-03-12 06:54:47 -0800349 /* TODO(ctiller): this forces calling code to do an short poll, and
350 then retry the timer check (because this time through the timer list was
351 contended).
352
353 We could reduce the cost here dramatically by keeping a count of how many
354 currently active pollers got through the uncontended case above
355 successfully, and waking up other pollers IFF that count drops to zero.
356
357 Once that count is in place, this entire else branch could disappear. */
Craig Tiller9fb89ea2016-03-11 20:12:45 -0800358 *next = gpr_time_min(
Craig Tiller822f1d72016-03-12 06:54:47 -0800359 *next, gpr_time_add(now, gpr_time_from_millis(1, GPR_TIMESPAN)));
Craig Tillera82950e2015-09-22 12:33:20 -0700360 }
361
362 return (int)n;
ctiller3bf466f2014-12-19 16:21:57 -0800363}
364
Craig Tiller311445f2016-02-18 07:31:39 -0800365bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
366 gpr_timespec *next) {
Craig Tillera82950e2015-09-22 12:33:20 -0700367 GPR_ASSERT(now.clock_type == g_clock_type);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700368 return run_some_expired_timers(
Craig Tillera82950e2015-09-22 12:33:20 -0700369 exec_ctx, now, next,
370 gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0);
ctiller3bf466f2014-12-19 16:21:57 -0800371}