blob: 40c83514724c61daef6521ee66540891236bb151 [file] [log] [blame]
ctiller3bf466f2014-12-19 16:21:57 -08001/*
2 *
Craig Tiller6169d5f2016-03-31 07:46:18 -07003 * Copyright 2015, Google Inc.
ctiller3bf466f2014-12-19 16:21:57 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
murgatroid999030c812016-09-16 13:25:08 -070034#include "src/core/lib/iomgr/port.h"
35
36#ifdef GRPC_TIMER_USE_GENERIC
37
Craig Tiller9533d042016-03-25 17:11:06 -070038#include "src/core/lib/iomgr/timer.h"
ctiller3bf466f2014-12-19 16:21:57 -080039
Craig Tiller6a7626c2015-07-19 22:21:41 -070040#include <grpc/support/log.h>
ctiller3bf466f2014-12-19 16:21:57 -080041#include <grpc/support/sync.h>
42#include <grpc/support/useful.h>
Craig Tiller9533d042016-03-25 17:11:06 -070043#include "src/core/lib/iomgr/time_averaged_stats.h"
44#include "src/core/lib/iomgr/timer_heap.h"
ctiller3bf466f2014-12-19 16:21:57 -080045
46#define INVALID_HEAP_INDEX 0xffffffffu
47
48#define LOG2_NUM_SHARDS 5
49#define NUM_SHARDS (1 << LOG2_NUM_SHARDS)
ctiller3bf466f2014-12-19 16:21:57 -080050#define ADD_DEADLINE_SCALE 0.33
51#define MIN_QUEUE_WINDOW_DURATION 0.01
52#define MAX_QUEUE_WINDOW_DURATION 1
53
Craig Tillera82950e2015-09-22 12:33:20 -070054typedef struct {
ctiller3bf466f2014-12-19 16:21:57 -080055 gpr_mu mu;
56 grpc_time_averaged_stats stats;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070057 /* All and only timers with deadlines <= this will be in the heap. */
ctiller3bf466f2014-12-19 16:21:57 -080058 gpr_timespec queue_deadline_cap;
59 gpr_timespec min_deadline;
60 /* Index in the g_shard_queue */
Craig Tiller7536af02015-12-22 13:49:30 -080061 uint32_t shard_queue_index;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070062 /* This holds all timers with deadlines < queue_deadline_cap. Timers in this
ctiller3bf466f2014-12-19 16:21:57 -080063 list have the top bit of their deadline set to 0. */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070064 grpc_timer_heap heap;
65 /* This holds timers whose deadline is >= queue_deadline_cap. */
66 grpc_timer list;
ctiller3bf466f2014-12-19 16:21:57 -080067} shard_type;
68
69/* Protects g_shard_queue */
70static gpr_mu g_mu;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070071/* Allow only one run_some_expired_timers at once */
ctiller3bf466f2014-12-19 16:21:57 -080072static gpr_mu g_checker_mu;
Craig Tiller6a7626c2015-07-19 22:21:41 -070073static gpr_clock_type g_clock_type;
ctiller3bf466f2014-12-19 16:21:57 -080074static shard_type g_shards[NUM_SHARDS];
75/* Protected by g_mu */
76static shard_type *g_shard_queue[NUM_SHARDS];
Craig Tiller317f68e2016-04-13 20:27:24 -070077static bool g_initialized = false;
ctiller3bf466f2014-12-19 16:21:57 -080078
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070079static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
Craig Tillerc027e772016-05-03 16:27:00 -070080 gpr_timespec *next, grpc_error *error);
ctiller3bf466f2014-12-19 16:21:57 -080081
Craig Tillera82950e2015-09-22 12:33:20 -070082static gpr_timespec compute_min_deadline(shard_type *shard) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070083 return grpc_timer_heap_is_empty(&shard->heap)
Craig Tillera82950e2015-09-22 12:33:20 -070084 ? shard->queue_deadline_cap
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070085 : grpc_timer_heap_top(&shard->heap)->deadline;
ctiller3bf466f2014-12-19 16:21:57 -080086}
87
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070088void grpc_timer_list_init(gpr_timespec now) {
Craig Tiller7536af02015-12-22 13:49:30 -080089 uint32_t i;
ctiller3bf466f2014-12-19 16:21:57 -080090
Craig Tiller3f72df92016-04-13 20:26:07 -070091 g_initialized = true;
Craig Tillera82950e2015-09-22 12:33:20 -070092 gpr_mu_init(&g_mu);
93 gpr_mu_init(&g_checker_mu);
Craig Tiller6a7626c2015-07-19 22:21:41 -070094 g_clock_type = now.clock_type;
ctiller3bf466f2014-12-19 16:21:57 -080095
Craig Tillera82950e2015-09-22 12:33:20 -070096 for (i = 0; i < NUM_SHARDS; i++) {
97 shard_type *shard = &g_shards[i];
98 gpr_mu_init(&shard->mu);
99 grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1,
100 0.5);
101 shard->queue_deadline_cap = now;
102 shard->shard_queue_index = i;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700103 grpc_timer_heap_init(&shard->heap);
Craig Tillera82950e2015-09-22 12:33:20 -0700104 shard->list.next = shard->list.prev = &shard->list;
105 shard->min_deadline = compute_min_deadline(shard);
106 g_shard_queue[i] = shard;
107 }
ctiller3bf466f2014-12-19 16:21:57 -0800108}
109
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700110void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
ctiller3bf466f2014-12-19 16:21:57 -0800111 int i;
Craig Tiller25dc5392016-05-10 10:53:55 -0700112 run_some_expired_timers(exec_ctx, gpr_inf_future(g_clock_type), NULL,
113 GRPC_ERROR_CREATE("Timer list shutdown"));
Craig Tillera82950e2015-09-22 12:33:20 -0700114 for (i = 0; i < NUM_SHARDS; i++) {
115 shard_type *shard = &g_shards[i];
116 gpr_mu_destroy(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700117 grpc_timer_heap_destroy(&shard->heap);
Craig Tillera82950e2015-09-22 12:33:20 -0700118 }
119 gpr_mu_destroy(&g_mu);
120 gpr_mu_destroy(&g_checker_mu);
Craig Tiller3f72df92016-04-13 20:26:07 -0700121 g_initialized = false;
ctiller3bf466f2014-12-19 16:21:57 -0800122}
123
124/* This is a cheap, but good enough, pointer hash for sharding the tasks: */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700125static size_t shard_idx(const grpc_timer *info) {
Craig Tillera82950e2015-09-22 12:33:20 -0700126 size_t x = (size_t)info;
ctiller3bf466f2014-12-19 16:21:57 -0800127 return ((x >> 4) ^ (x >> 9) ^ (x >> 14)) & (NUM_SHARDS - 1);
128}
129
Craig Tillera82950e2015-09-22 12:33:20 -0700130static double ts_to_dbl(gpr_timespec ts) {
131 return (double)ts.tv_sec + 1e-9 * ts.tv_nsec;
ctiller3bf466f2014-12-19 16:21:57 -0800132}
133
Craig Tillera82950e2015-09-22 12:33:20 -0700134static gpr_timespec dbl_to_ts(double d) {
ctiller3bf466f2014-12-19 16:21:57 -0800135 gpr_timespec ts;
Craig Tiller7536af02015-12-22 13:49:30 -0800136 ts.tv_sec = (int64_t)d;
137 ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec));
Craig Tiller143e7bf2015-07-13 08:41:49 -0700138 ts.clock_type = GPR_TIMESPAN;
ctiller3bf466f2014-12-19 16:21:57 -0800139 return ts;
140}
141
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700142static void list_join(grpc_timer *head, grpc_timer *timer) {
143 timer->next = head;
144 timer->prev = head->prev;
145 timer->next->prev = timer->prev->next = timer;
ctiller3bf466f2014-12-19 16:21:57 -0800146}
147
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700148static void list_remove(grpc_timer *timer) {
149 timer->next->prev = timer->prev;
150 timer->prev->next = timer->next;
ctiller3bf466f2014-12-19 16:21:57 -0800151}
152
Craig Tiller7536af02015-12-22 13:49:30 -0800153static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
ctiller3bf466f2014-12-19 16:21:57 -0800154 shard_type *temp;
155 temp = g_shard_queue[first_shard_queue_index];
Craig Tillera82950e2015-09-22 12:33:20 -0700156 g_shard_queue[first_shard_queue_index] =
157 g_shard_queue[first_shard_queue_index + 1];
ctiller3bf466f2014-12-19 16:21:57 -0800158 g_shard_queue[first_shard_queue_index + 1] = temp;
Craig Tillera82950e2015-09-22 12:33:20 -0700159 g_shard_queue[first_shard_queue_index]->shard_queue_index =
160 first_shard_queue_index;
161 g_shard_queue[first_shard_queue_index + 1]->shard_queue_index =
162 first_shard_queue_index + 1;
ctiller3bf466f2014-12-19 16:21:57 -0800163}
164
Craig Tillera82950e2015-09-22 12:33:20 -0700165static void note_deadline_change(shard_type *shard) {
166 while (shard->shard_queue_index > 0 &&
167 gpr_time_cmp(
168 shard->min_deadline,
169 g_shard_queue[shard->shard_queue_index - 1]->min_deadline) < 0) {
170 swap_adjacent_shards_in_queue(shard->shard_queue_index - 1);
171 }
172 while (shard->shard_queue_index < NUM_SHARDS - 1 &&
173 gpr_time_cmp(
174 shard->min_deadline,
175 g_shard_queue[shard->shard_queue_index + 1]->min_deadline) > 0) {
176 swap_adjacent_shards_in_queue(shard->shard_queue_index);
177 }
ctiller3bf466f2014-12-19 16:21:57 -0800178}
179
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700180void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800181 gpr_timespec deadline, grpc_closure *closure,
182 gpr_timespec now) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700183 int is_first_timer = 0;
184 shard_type *shard = &g_shards[shard_idx(timer)];
Craig Tillera82950e2015-09-22 12:33:20 -0700185 GPR_ASSERT(deadline.clock_type == g_clock_type);
186 GPR_ASSERT(now.clock_type == g_clock_type);
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800187 timer->closure = closure;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700188 timer->deadline = deadline;
189 timer->triggered = 0;
ctiller3bf466f2014-12-19 16:21:57 -0800190
Craig Tiller3f72df92016-04-13 20:26:07 -0700191 if (!g_initialized) {
Craig Tillerf3596c52016-04-14 08:08:38 -0700192 timer->triggered = 1;
Craig Tiller91031da2016-12-28 15:44:25 -0800193 grpc_closure_sched(
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800194 exec_ctx, timer->closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800195 GRPC_ERROR_CREATE("Attempt to create timer before initialization"));
Craig Tiller3f72df92016-04-13 20:26:07 -0700196 return;
197 }
198
199 if (gpr_time_cmp(deadline, now) <= 0) {
Craig Tillerf3596c52016-04-14 08:08:38 -0700200 timer->triggered = 1;
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800201 grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE);
Craig Tiller3f72df92016-04-13 20:26:07 -0700202 return;
203 }
204
ctiller3bf466f2014-12-19 16:21:57 -0800205 /* TODO(ctiller): check deadline expired */
206
Craig Tillera82950e2015-09-22 12:33:20 -0700207 gpr_mu_lock(&shard->mu);
208 grpc_time_averaged_stats_add_sample(&shard->stats,
209 ts_to_dbl(gpr_time_sub(deadline, now)));
210 if (gpr_time_cmp(deadline, shard->queue_deadline_cap) < 0) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700211 is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700212 } else {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700213 timer->heap_index = INVALID_HEAP_INDEX;
214 list_join(&shard->list, timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700215 }
216 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800217
218 /* Deadline may have decreased, we need to adjust the master queue. Note
219 that there is a potential racy unlocked region here. There could be a
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700220 reordering of multiple grpc_timer_init calls, at this point, but the < test
ctiller3bf466f2014-12-19 16:21:57 -0800221 below should ensure that we err on the side of caution. There could
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700222 also be a race with grpc_timer_check, which might beat us to the lock. In
223 that case, it is possible that the timer that we added will have already
ctiller3bf466f2014-12-19 16:21:57 -0800224 run by the time we hold the lock, but that too is a safe error.
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700225 Finally, it's possible that the grpc_timer_check that intervened failed to
226 trigger the new timer because the min_deadline hadn't yet been reduced.
227 In that case, the timer will simply have to wait for the next
228 grpc_timer_check. */
229 if (is_first_timer) {
Craig Tillera82950e2015-09-22 12:33:20 -0700230 gpr_mu_lock(&g_mu);
231 if (gpr_time_cmp(deadline, shard->min_deadline) < 0) {
232 gpr_timespec old_min_deadline = g_shard_queue[0]->min_deadline;
233 shard->min_deadline = deadline;
234 note_deadline_change(shard);
235 if (shard->shard_queue_index == 0 &&
236 gpr_time_cmp(deadline, old_min_deadline) < 0) {
237 grpc_kick_poller();
238 }
ctiller3bf466f2014-12-19 16:21:57 -0800239 }
Craig Tillera82950e2015-09-22 12:33:20 -0700240 gpr_mu_unlock(&g_mu);
241 }
ctiller3bf466f2014-12-19 16:21:57 -0800242}
243
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700244void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
Craig Tiller82c63eb2016-05-10 15:28:01 -0700245 if (!g_initialized) {
246 /* must have already been cancelled, also the shard mutex is invalid */
247 return;
248 }
249
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700250 shard_type *shard = &g_shards[shard_idx(timer)];
Craig Tillera82950e2015-09-22 12:33:20 -0700251 gpr_mu_lock(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700252 if (!timer->triggered) {
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800253 grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700254 timer->triggered = 1;
255 if (timer->heap_index == INVALID_HEAP_INDEX) {
256 list_remove(timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700257 } else {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700258 grpc_timer_heap_remove(&shard->heap, timer);
ctiller3bf466f2014-12-19 16:21:57 -0800259 }
Craig Tillera82950e2015-09-22 12:33:20 -0700260 }
261 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800262}
263
264/* This is called when the queue is empty and "now" has reached the
265 queue_deadline_cap. We compute a new queue deadline and then scan the map
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700266 for timers that fall at or under it. Returns true if the queue is no
ctiller3bf466f2014-12-19 16:21:57 -0800267 longer empty.
268 REQUIRES: shard->mu locked */
Craig Tillera82950e2015-09-22 12:33:20 -0700269static int refill_queue(shard_type *shard, gpr_timespec now) {
ctiller3bf466f2014-12-19 16:21:57 -0800270 /* Compute the new queue window width and bound by the limits: */
Craig Tillera82950e2015-09-22 12:33:20 -0700271 double computed_deadline_delta =
272 grpc_time_averaged_stats_update_average(&shard->stats) *
273 ADD_DEADLINE_SCALE;
274 double deadline_delta =
275 GPR_CLAMP(computed_deadline_delta, MIN_QUEUE_WINDOW_DURATION,
276 MAX_QUEUE_WINDOW_DURATION);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700277 grpc_timer *timer, *next;
ctiller3bf466f2014-12-19 16:21:57 -0800278
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700279 /* Compute the new cap and put all timers under it into the queue: */
Craig Tillera82950e2015-09-22 12:33:20 -0700280 shard->queue_deadline_cap = gpr_time_add(
281 gpr_time_max(now, shard->queue_deadline_cap), dbl_to_ts(deadline_delta));
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700282 for (timer = shard->list.next; timer != &shard->list; timer = next) {
283 next = timer->next;
ctiller3bf466f2014-12-19 16:21:57 -0800284
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700285 if (gpr_time_cmp(timer->deadline, shard->queue_deadline_cap) < 0) {
286 list_remove(timer);
287 grpc_timer_heap_add(&shard->heap, timer);
ctiller3bf466f2014-12-19 16:21:57 -0800288 }
Craig Tillera82950e2015-09-22 12:33:20 -0700289 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700290 return !grpc_timer_heap_is_empty(&shard->heap);
ctiller3bf466f2014-12-19 16:21:57 -0800291}
292
David G. Quintasdfff4de2016-06-07 19:57:33 -0700293/* This pops the next non-cancelled timer with deadline <= now from the
294 queue, or returns NULL if there isn't one.
ctiller3bf466f2014-12-19 16:21:57 -0800295 REQUIRES: shard->mu locked */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700296static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) {
297 grpc_timer *timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700298 for (;;) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700299 if (grpc_timer_heap_is_empty(&shard->heap)) {
Craig Tillera82950e2015-09-22 12:33:20 -0700300 if (gpr_time_cmp(now, shard->queue_deadline_cap) < 0) return NULL;
301 if (!refill_queue(shard, now)) return NULL;
ctiller3bf466f2014-12-19 16:21:57 -0800302 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700303 timer = grpc_timer_heap_top(&shard->heap);
304 if (gpr_time_cmp(timer->deadline, now) > 0) return NULL;
305 timer->triggered = 1;
306 grpc_timer_heap_pop(&shard->heap);
307 return timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700308 }
ctiller3bf466f2014-12-19 16:21:57 -0800309}
310
311/* REQUIRES: shard->mu unlocked */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700312static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
Craig Tillera82950e2015-09-22 12:33:20 -0700313 gpr_timespec now, gpr_timespec *new_min_deadline,
Craig Tillerc027e772016-05-03 16:27:00 -0700314 grpc_error *error) {
ctiller3bf466f2014-12-19 16:21:57 -0800315 size_t n = 0;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700316 grpc_timer *timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700317 gpr_mu_lock(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700318 while ((timer = pop_one(shard, now))) {
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800319 grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
Craig Tillera82950e2015-09-22 12:33:20 -0700320 n++;
321 }
322 *new_min_deadline = compute_min_deadline(shard);
323 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800324 return n;
325}
326
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700327static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now,
Craig Tillerc027e772016-05-03 16:27:00 -0700328 gpr_timespec *next, grpc_error *error) {
ctiller3bf466f2014-12-19 16:21:57 -0800329 size_t n = 0;
ctiller3bf466f2014-12-19 16:21:57 -0800330
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700331 /* TODO(ctiller): verify that there are any timers (atomically) here */
ctiller3bf466f2014-12-19 16:21:57 -0800332
Craig Tillera82950e2015-09-22 12:33:20 -0700333 if (gpr_mu_trylock(&g_checker_mu)) {
334 gpr_mu_lock(&g_mu);
ctiller3bf466f2014-12-19 16:21:57 -0800335
Craig Tillera82950e2015-09-22 12:33:20 -0700336 while (gpr_time_cmp(g_shard_queue[0]->min_deadline, now) < 0) {
337 gpr_timespec new_min_deadline;
ctiller3bf466f2014-12-19 16:21:57 -0800338
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700339 /* For efficiency, we pop as many available timers as we can from the
340 shard. This may violate perfect timer deadline ordering, but that
Craig Tillera82950e2015-09-22 12:33:20 -0700341 shouldn't be a big deal because we don't make ordering guarantees. */
Craig Tillerc027e772016-05-03 16:27:00 -0700342 n +=
343 pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline, error);
ctiller3bf466f2014-12-19 16:21:57 -0800344
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700345 /* An grpc_timer_init() on the shard could intervene here, adding a new
346 timer that is earlier than new_min_deadline. However,
347 grpc_timer_init() will block on the master_lock before it can call
348 set_min_deadline, so this one will complete first and then the Addtimer
Craig Tillera82950e2015-09-22 12:33:20 -0700349 will reduce the min_deadline (perhaps unnecessarily). */
350 g_shard_queue[0]->min_deadline = new_min_deadline;
351 note_deadline_change(g_shard_queue[0]);
ctiller3bf466f2014-12-19 16:21:57 -0800352 }
353
Craig Tillera82950e2015-09-22 12:33:20 -0700354 if (next) {
355 *next = gpr_time_min(*next, g_shard_queue[0]->min_deadline);
356 }
357
358 gpr_mu_unlock(&g_mu);
359 gpr_mu_unlock(&g_checker_mu);
Craig Tiller9fb89ea2016-03-11 20:12:45 -0800360 } else if (next != NULL) {
Craig Tiller822f1d72016-03-12 06:54:47 -0800361 /* TODO(ctiller): this forces calling code to do an short poll, and
362 then retry the timer check (because this time through the timer list was
363 contended).
364
365 We could reduce the cost here dramatically by keeping a count of how many
366 currently active pollers got through the uncontended case above
367 successfully, and waking up other pollers IFF that count drops to zero.
368
369 Once that count is in place, this entire else branch could disappear. */
Craig Tiller9fb89ea2016-03-11 20:12:45 -0800370 *next = gpr_time_min(
Craig Tiller822f1d72016-03-12 06:54:47 -0800371 *next, gpr_time_add(now, gpr_time_from_millis(1, GPR_TIMESPAN)));
Craig Tillera82950e2015-09-22 12:33:20 -0700372 }
373
Craig Tillerf707d622016-05-06 14:26:12 -0700374 GRPC_ERROR_UNREF(error);
Craig Tillerc027e772016-05-03 16:27:00 -0700375
Craig Tillera82950e2015-09-22 12:33:20 -0700376 return (int)n;
ctiller3bf466f2014-12-19 16:21:57 -0800377}
378
Craig Tiller311445f2016-02-18 07:31:39 -0800379bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
380 gpr_timespec *next) {
Craig Tillera82950e2015-09-22 12:33:20 -0700381 GPR_ASSERT(now.clock_type == g_clock_type);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700382 return run_some_expired_timers(
Craig Tillera82950e2015-09-22 12:33:20 -0700383 exec_ctx, now, next,
Craig Tillerc027e772016-05-03 16:27:00 -0700384 gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0
385 ? GRPC_ERROR_NONE
386 : GRPC_ERROR_CREATE("Shutting down timer system"));
ctiller3bf466f2014-12-19 16:21:57 -0800387}
murgatroid999030c812016-09-16 13:25:08 -0700388
389#endif /* GRPC_TIMER_USE_GENERIC */