blob: 713f15b69ec28009bf85613c06f089a007ee2a3c [file] [log] [blame]
ctiller3bf466f2014-12-19 16:21:57 -08001/*
2 *
Craig Tiller6169d5f2016-03-31 07:46:18 -07003 * Copyright 2015, Google Inc.
ctiller3bf466f2014-12-19 16:21:57 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tiller9533d042016-03-25 17:11:06 -070034#include "src/core/lib/iomgr/timer.h"
ctiller3bf466f2014-12-19 16:21:57 -080035
Craig Tiller6a7626c2015-07-19 22:21:41 -070036#include <grpc/support/log.h>
ctiller3bf466f2014-12-19 16:21:57 -080037#include <grpc/support/sync.h>
38#include <grpc/support/useful.h>
Craig Tiller9533d042016-03-25 17:11:06 -070039#include "src/core/lib/iomgr/time_averaged_stats.h"
40#include "src/core/lib/iomgr/timer_heap.h"
ctiller3bf466f2014-12-19 16:21:57 -080041
42#define INVALID_HEAP_INDEX 0xffffffffu
43
44#define LOG2_NUM_SHARDS 5
45#define NUM_SHARDS (1 << LOG2_NUM_SHARDS)
ctiller3bf466f2014-12-19 16:21:57 -080046#define ADD_DEADLINE_SCALE 0.33
47#define MIN_QUEUE_WINDOW_DURATION 0.01
48#define MAX_QUEUE_WINDOW_DURATION 1
49
Craig Tillera82950e2015-09-22 12:33:20 -070050typedef struct {
ctiller3bf466f2014-12-19 16:21:57 -080051 gpr_mu mu;
52 grpc_time_averaged_stats stats;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070053 /* All and only timers with deadlines <= this will be in the heap. */
ctiller3bf466f2014-12-19 16:21:57 -080054 gpr_timespec queue_deadline_cap;
55 gpr_timespec min_deadline;
56 /* Index in the g_shard_queue */
Craig Tiller7536af02015-12-22 13:49:30 -080057 uint32_t shard_queue_index;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070058 /* This holds all timers with deadlines < queue_deadline_cap. Timers in this
ctiller3bf466f2014-12-19 16:21:57 -080059 list have the top bit of their deadline set to 0. */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070060 grpc_timer_heap heap;
61 /* This holds timers whose deadline is >= queue_deadline_cap. */
62 grpc_timer list;
ctiller3bf466f2014-12-19 16:21:57 -080063} shard_type;
64
65/* Protects g_shard_queue */
66static gpr_mu g_mu;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070067/* Allow only one run_some_expired_timers at once */
ctiller3bf466f2014-12-19 16:21:57 -080068static gpr_mu g_checker_mu;
Craig Tiller6a7626c2015-07-19 22:21:41 -070069static gpr_clock_type g_clock_type;
ctiller3bf466f2014-12-19 16:21:57 -080070static shard_type g_shards[NUM_SHARDS];
71/* Protected by g_mu */
72static shard_type *g_shard_queue[NUM_SHARDS];
73
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070074static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
Craig Tillera82950e2015-09-22 12:33:20 -070075 gpr_timespec *next, int success);
ctiller3bf466f2014-12-19 16:21:57 -080076
Craig Tillera82950e2015-09-22 12:33:20 -070077static gpr_timespec compute_min_deadline(shard_type *shard) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070078 return grpc_timer_heap_is_empty(&shard->heap)
Craig Tillera82950e2015-09-22 12:33:20 -070079 ? shard->queue_deadline_cap
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070080 : grpc_timer_heap_top(&shard->heap)->deadline;
ctiller3bf466f2014-12-19 16:21:57 -080081}
82
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070083void grpc_timer_list_init(gpr_timespec now) {
Craig Tiller7536af02015-12-22 13:49:30 -080084 uint32_t i;
ctiller3bf466f2014-12-19 16:21:57 -080085
Craig Tillera82950e2015-09-22 12:33:20 -070086 gpr_mu_init(&g_mu);
87 gpr_mu_init(&g_checker_mu);
Craig Tiller6a7626c2015-07-19 22:21:41 -070088 g_clock_type = now.clock_type;
ctiller3bf466f2014-12-19 16:21:57 -080089
Craig Tillera82950e2015-09-22 12:33:20 -070090 for (i = 0; i < NUM_SHARDS; i++) {
91 shard_type *shard = &g_shards[i];
92 gpr_mu_init(&shard->mu);
93 grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1,
94 0.5);
95 shard->queue_deadline_cap = now;
96 shard->shard_queue_index = i;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070097 grpc_timer_heap_init(&shard->heap);
Craig Tillera82950e2015-09-22 12:33:20 -070098 shard->list.next = shard->list.prev = &shard->list;
99 shard->min_deadline = compute_min_deadline(shard);
100 g_shard_queue[i] = shard;
101 }
ctiller3bf466f2014-12-19 16:21:57 -0800102}
103
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700104void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
ctiller3bf466f2014-12-19 16:21:57 -0800105 int i;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700106 run_some_expired_timers(exec_ctx, gpr_inf_future(g_clock_type), NULL, 0);
Craig Tillera82950e2015-09-22 12:33:20 -0700107 for (i = 0; i < NUM_SHARDS; i++) {
108 shard_type *shard = &g_shards[i];
109 gpr_mu_destroy(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700110 grpc_timer_heap_destroy(&shard->heap);
Craig Tillera82950e2015-09-22 12:33:20 -0700111 }
112 gpr_mu_destroy(&g_mu);
113 gpr_mu_destroy(&g_checker_mu);
ctiller3bf466f2014-12-19 16:21:57 -0800114}
115
116/* This is a cheap, but good enough, pointer hash for sharding the tasks: */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700117static size_t shard_idx(const grpc_timer *info) {
Craig Tillera82950e2015-09-22 12:33:20 -0700118 size_t x = (size_t)info;
ctiller3bf466f2014-12-19 16:21:57 -0800119 return ((x >> 4) ^ (x >> 9) ^ (x >> 14)) & (NUM_SHARDS - 1);
120}
121
Craig Tillera82950e2015-09-22 12:33:20 -0700122static double ts_to_dbl(gpr_timespec ts) {
123 return (double)ts.tv_sec + 1e-9 * ts.tv_nsec;
ctiller3bf466f2014-12-19 16:21:57 -0800124}
125
Craig Tillera82950e2015-09-22 12:33:20 -0700126static gpr_timespec dbl_to_ts(double d) {
ctiller3bf466f2014-12-19 16:21:57 -0800127 gpr_timespec ts;
Craig Tiller7536af02015-12-22 13:49:30 -0800128 ts.tv_sec = (int64_t)d;
129 ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec));
Craig Tiller143e7bf2015-07-13 08:41:49 -0700130 ts.clock_type = GPR_TIMESPAN;
ctiller3bf466f2014-12-19 16:21:57 -0800131 return ts;
132}
133
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700134static void list_join(grpc_timer *head, grpc_timer *timer) {
135 timer->next = head;
136 timer->prev = head->prev;
137 timer->next->prev = timer->prev->next = timer;
ctiller3bf466f2014-12-19 16:21:57 -0800138}
139
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700140static void list_remove(grpc_timer *timer) {
141 timer->next->prev = timer->prev;
142 timer->prev->next = timer->next;
ctiller3bf466f2014-12-19 16:21:57 -0800143}
144
Craig Tiller7536af02015-12-22 13:49:30 -0800145static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
ctiller3bf466f2014-12-19 16:21:57 -0800146 shard_type *temp;
147 temp = g_shard_queue[first_shard_queue_index];
Craig Tillera82950e2015-09-22 12:33:20 -0700148 g_shard_queue[first_shard_queue_index] =
149 g_shard_queue[first_shard_queue_index + 1];
ctiller3bf466f2014-12-19 16:21:57 -0800150 g_shard_queue[first_shard_queue_index + 1] = temp;
Craig Tillera82950e2015-09-22 12:33:20 -0700151 g_shard_queue[first_shard_queue_index]->shard_queue_index =
152 first_shard_queue_index;
153 g_shard_queue[first_shard_queue_index + 1]->shard_queue_index =
154 first_shard_queue_index + 1;
ctiller3bf466f2014-12-19 16:21:57 -0800155}
156
Craig Tillera82950e2015-09-22 12:33:20 -0700157static void note_deadline_change(shard_type *shard) {
158 while (shard->shard_queue_index > 0 &&
159 gpr_time_cmp(
160 shard->min_deadline,
161 g_shard_queue[shard->shard_queue_index - 1]->min_deadline) < 0) {
162 swap_adjacent_shards_in_queue(shard->shard_queue_index - 1);
163 }
164 while (shard->shard_queue_index < NUM_SHARDS - 1 &&
165 gpr_time_cmp(
166 shard->min_deadline,
167 g_shard_queue[shard->shard_queue_index + 1]->min_deadline) > 0) {
168 swap_adjacent_shards_in_queue(shard->shard_queue_index);
169 }
ctiller3bf466f2014-12-19 16:21:57 -0800170}
171
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700172void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
173 gpr_timespec deadline, grpc_iomgr_cb_func timer_cb,
174 void *timer_cb_arg, gpr_timespec now) {
175 int is_first_timer = 0;
176 shard_type *shard = &g_shards[shard_idx(timer)];
Craig Tillera82950e2015-09-22 12:33:20 -0700177 GPR_ASSERT(deadline.clock_type == g_clock_type);
178 GPR_ASSERT(now.clock_type == g_clock_type);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700179 grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg);
180 timer->deadline = deadline;
181 timer->triggered = 0;
ctiller3bf466f2014-12-19 16:21:57 -0800182
183 /* TODO(ctiller): check deadline expired */
184
Craig Tillera82950e2015-09-22 12:33:20 -0700185 gpr_mu_lock(&shard->mu);
186 grpc_time_averaged_stats_add_sample(&shard->stats,
187 ts_to_dbl(gpr_time_sub(deadline, now)));
188 if (gpr_time_cmp(deadline, shard->queue_deadline_cap) < 0) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700189 is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700190 } else {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700191 timer->heap_index = INVALID_HEAP_INDEX;
192 list_join(&shard->list, timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700193 }
194 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800195
196 /* Deadline may have decreased, we need to adjust the master queue. Note
197 that there is a potential racy unlocked region here. There could be a
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700198 reordering of multiple grpc_timer_init calls, at this point, but the < test
ctiller3bf466f2014-12-19 16:21:57 -0800199 below should ensure that we err on the side of caution. There could
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700200 also be a race with grpc_timer_check, which might beat us to the lock. In
201 that case, it is possible that the timer that we added will have already
ctiller3bf466f2014-12-19 16:21:57 -0800202 run by the time we hold the lock, but that too is a safe error.
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700203 Finally, it's possible that the grpc_timer_check that intervened failed to
204 trigger the new timer because the min_deadline hadn't yet been reduced.
205 In that case, the timer will simply have to wait for the next
206 grpc_timer_check. */
207 if (is_first_timer) {
Craig Tillera82950e2015-09-22 12:33:20 -0700208 gpr_mu_lock(&g_mu);
209 if (gpr_time_cmp(deadline, shard->min_deadline) < 0) {
210 gpr_timespec old_min_deadline = g_shard_queue[0]->min_deadline;
211 shard->min_deadline = deadline;
212 note_deadline_change(shard);
213 if (shard->shard_queue_index == 0 &&
214 gpr_time_cmp(deadline, old_min_deadline) < 0) {
215 grpc_kick_poller();
216 }
ctiller3bf466f2014-12-19 16:21:57 -0800217 }
Craig Tillera82950e2015-09-22 12:33:20 -0700218 gpr_mu_unlock(&g_mu);
219 }
ctiller3bf466f2014-12-19 16:21:57 -0800220}
221
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700222void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
223 shard_type *shard = &g_shards[shard_idx(timer)];
Craig Tillera82950e2015-09-22 12:33:20 -0700224 gpr_mu_lock(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700225 if (!timer->triggered) {
Craig Tiller6c396862016-01-28 13:53:40 -0800226 grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, false, NULL);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700227 timer->triggered = 1;
228 if (timer->heap_index == INVALID_HEAP_INDEX) {
229 list_remove(timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700230 } else {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700231 grpc_timer_heap_remove(&shard->heap, timer);
ctiller3bf466f2014-12-19 16:21:57 -0800232 }
Craig Tillera82950e2015-09-22 12:33:20 -0700233 }
234 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800235}
236
237/* This is called when the queue is empty and "now" has reached the
238 queue_deadline_cap. We compute a new queue deadline and then scan the map
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700239 for timers that fall at or under it. Returns true if the queue is no
ctiller3bf466f2014-12-19 16:21:57 -0800240 longer empty.
241 REQUIRES: shard->mu locked */
Craig Tillera82950e2015-09-22 12:33:20 -0700242static int refill_queue(shard_type *shard, gpr_timespec now) {
ctiller3bf466f2014-12-19 16:21:57 -0800243 /* Compute the new queue window width and bound by the limits: */
Craig Tillera82950e2015-09-22 12:33:20 -0700244 double computed_deadline_delta =
245 grpc_time_averaged_stats_update_average(&shard->stats) *
246 ADD_DEADLINE_SCALE;
247 double deadline_delta =
248 GPR_CLAMP(computed_deadline_delta, MIN_QUEUE_WINDOW_DURATION,
249 MAX_QUEUE_WINDOW_DURATION);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700250 grpc_timer *timer, *next;
ctiller3bf466f2014-12-19 16:21:57 -0800251
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700252 /* Compute the new cap and put all timers under it into the queue: */
Craig Tillera82950e2015-09-22 12:33:20 -0700253 shard->queue_deadline_cap = gpr_time_add(
254 gpr_time_max(now, shard->queue_deadline_cap), dbl_to_ts(deadline_delta));
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700255 for (timer = shard->list.next; timer != &shard->list; timer = next) {
256 next = timer->next;
ctiller3bf466f2014-12-19 16:21:57 -0800257
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700258 if (gpr_time_cmp(timer->deadline, shard->queue_deadline_cap) < 0) {
259 list_remove(timer);
260 grpc_timer_heap_add(&shard->heap, timer);
ctiller3bf466f2014-12-19 16:21:57 -0800261 }
Craig Tillera82950e2015-09-22 12:33:20 -0700262 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700263 return !grpc_timer_heap_is_empty(&shard->heap);
ctiller3bf466f2014-12-19 16:21:57 -0800264}
265
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700266/* This pops the next non-cancelled timer with deadline <= now from the queue,
ctiller3bf466f2014-12-19 16:21:57 -0800267 or returns NULL if there isn't one.
268 REQUIRES: shard->mu locked */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700269static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) {
270 grpc_timer *timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700271 for (;;) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700272 if (grpc_timer_heap_is_empty(&shard->heap)) {
Craig Tillera82950e2015-09-22 12:33:20 -0700273 if (gpr_time_cmp(now, shard->queue_deadline_cap) < 0) return NULL;
274 if (!refill_queue(shard, now)) return NULL;
ctiller3bf466f2014-12-19 16:21:57 -0800275 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700276 timer = grpc_timer_heap_top(&shard->heap);
277 if (gpr_time_cmp(timer->deadline, now) > 0) return NULL;
278 timer->triggered = 1;
279 grpc_timer_heap_pop(&shard->heap);
280 return timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700281 }
ctiller3bf466f2014-12-19 16:21:57 -0800282}
283
284/* REQUIRES: shard->mu unlocked */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700285static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
Craig Tillera82950e2015-09-22 12:33:20 -0700286 gpr_timespec now, gpr_timespec *new_min_deadline,
287 int success) {
ctiller3bf466f2014-12-19 16:21:57 -0800288 size_t n = 0;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700289 grpc_timer *timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700290 gpr_mu_lock(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700291 while ((timer = pop_one(shard, now))) {
Craig Tiller6c396862016-01-28 13:53:40 -0800292 grpc_exec_ctx_enqueue(exec_ctx, &timer->closure, success, NULL);
Craig Tillera82950e2015-09-22 12:33:20 -0700293 n++;
294 }
295 *new_min_deadline = compute_min_deadline(shard);
296 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800297 return n;
298}
299
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700300static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
Craig Tillera82950e2015-09-22 12:33:20 -0700301 gpr_timespec *next, int success) {
ctiller3bf466f2014-12-19 16:21:57 -0800302 size_t n = 0;
ctiller3bf466f2014-12-19 16:21:57 -0800303
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700304 /* TODO(ctiller): verify that there are any timers (atomically) here */
ctiller3bf466f2014-12-19 16:21:57 -0800305
Craig Tillera82950e2015-09-22 12:33:20 -0700306 if (gpr_mu_trylock(&g_checker_mu)) {
307 gpr_mu_lock(&g_mu);
ctiller3bf466f2014-12-19 16:21:57 -0800308
Craig Tillera82950e2015-09-22 12:33:20 -0700309 while (gpr_time_cmp(g_shard_queue[0]->min_deadline, now) < 0) {
310 gpr_timespec new_min_deadline;
ctiller3bf466f2014-12-19 16:21:57 -0800311
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700312 /* For efficiency, we pop as many available timers as we can from the
313 shard. This may violate perfect timer deadline ordering, but that
Craig Tillera82950e2015-09-22 12:33:20 -0700314 shouldn't be a big deal because we don't make ordering guarantees. */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700315 n += pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline,
Craig Tillera82950e2015-09-22 12:33:20 -0700316 success);
ctiller3bf466f2014-12-19 16:21:57 -0800317
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700318 /* An grpc_timer_init() on the shard could intervene here, adding a new
319 timer that is earlier than new_min_deadline. However,
320 grpc_timer_init() will block on the master_lock before it can call
321 set_min_deadline, so this one will complete first and then the Addtimer
Craig Tillera82950e2015-09-22 12:33:20 -0700322 will reduce the min_deadline (perhaps unnecessarily). */
323 g_shard_queue[0]->min_deadline = new_min_deadline;
324 note_deadline_change(g_shard_queue[0]);
ctiller3bf466f2014-12-19 16:21:57 -0800325 }
326
Craig Tillera82950e2015-09-22 12:33:20 -0700327 if (next) {
328 *next = gpr_time_min(*next, g_shard_queue[0]->min_deadline);
329 }
330
331 gpr_mu_unlock(&g_mu);
332 gpr_mu_unlock(&g_checker_mu);
Craig Tiller9fb89ea2016-03-11 20:12:45 -0800333 } else if (next != NULL) {
Craig Tiller822f1d72016-03-12 06:54:47 -0800334 /* TODO(ctiller): this forces calling code to do an short poll, and
335 then retry the timer check (because this time through the timer list was
336 contended).
337
338 We could reduce the cost here dramatically by keeping a count of how many
339 currently active pollers got through the uncontended case above
340 successfully, and waking up other pollers IFF that count drops to zero.
341
342 Once that count is in place, this entire else branch could disappear. */
Craig Tiller9fb89ea2016-03-11 20:12:45 -0800343 *next = gpr_time_min(
Craig Tiller822f1d72016-03-12 06:54:47 -0800344 *next, gpr_time_add(now, gpr_time_from_millis(1, GPR_TIMESPAN)));
Craig Tillera82950e2015-09-22 12:33:20 -0700345 }
346
347 return (int)n;
ctiller3bf466f2014-12-19 16:21:57 -0800348}
349
Craig Tiller311445f2016-02-18 07:31:39 -0800350bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
351 gpr_timespec *next) {
Craig Tillera82950e2015-09-22 12:33:20 -0700352 GPR_ASSERT(now.clock_type == g_clock_type);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700353 return run_some_expired_timers(
Craig Tillera82950e2015-09-22 12:33:20 -0700354 exec_ctx, now, next,
355 gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0);
ctiller3bf466f2014-12-19 16:21:57 -0800356}