blob: c7ce76ad93699a39fe9a091277fe307893a86d34 [file] [log] [blame]
ctiller3bf466f2014-12-19 16:21:57 -08001/*
2 *
Craig Tiller6169d5f2016-03-31 07:46:18 -07003 * Copyright 2015, Google Inc.
ctiller3bf466f2014-12-19 16:21:57 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
murgatroid999030c812016-09-16 13:25:08 -070034#include "src/core/lib/iomgr/port.h"
35
36#ifdef GRPC_TIMER_USE_GENERIC
37
Craig Tiller9533d042016-03-25 17:11:06 -070038#include "src/core/lib/iomgr/timer.h"
ctiller3bf466f2014-12-19 16:21:57 -080039
Craig Tiller2a1949e2017-03-21 09:29:12 -070040#include <grpc/support/alloc.h>
Craig Tiller6a7626c2015-07-19 22:21:41 -070041#include <grpc/support/log.h>
Craig Tiller2a1949e2017-03-21 09:29:12 -070042#include <grpc/support/string_util.h>
ctiller3bf466f2014-12-19 16:21:57 -080043#include <grpc/support/sync.h>
Craig Tiller185f6c92017-03-17 08:33:19 -070044#include <grpc/support/tls.h>
ctiller3bf466f2014-12-19 16:21:57 -080045#include <grpc/support/useful.h>
Craig Tiller2a1949e2017-03-21 09:29:12 -070046#include "src/core/lib/debug/trace.h"
Craig Tiller9533d042016-03-25 17:11:06 -070047#include "src/core/lib/iomgr/time_averaged_stats.h"
48#include "src/core/lib/iomgr/timer_heap.h"
Craig Tilleredbf2b92017-02-27 07:24:00 -080049#include "src/core/lib/support/spinlock.h"
ctiller3bf466f2014-12-19 16:21:57 -080050
51#define INVALID_HEAP_INDEX 0xffffffffu
52
53#define LOG2_NUM_SHARDS 5
54#define NUM_SHARDS (1 << LOG2_NUM_SHARDS)
ctiller3bf466f2014-12-19 16:21:57 -080055#define ADD_DEADLINE_SCALE 0.33
56#define MIN_QUEUE_WINDOW_DURATION 0.01
57#define MAX_QUEUE_WINDOW_DURATION 1
58
Craig Tiller0b4c5312017-03-24 15:23:57 -070059int grpc_timer_trace = 0;
60int grpc_timer_check_trace = 0;
Craig Tiller2a1949e2017-03-21 09:29:12 -070061
Craig Tillera82950e2015-09-22 12:33:20 -070062typedef struct {
ctiller3bf466f2014-12-19 16:21:57 -080063 gpr_mu mu;
64 grpc_time_averaged_stats stats;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070065 /* All and only timers with deadlines <= this will be in the heap. */
Craig Tiller7b2dd932017-03-16 16:25:12 -070066 gpr_atm queue_deadline_cap;
67 gpr_atm min_deadline;
ctiller3bf466f2014-12-19 16:21:57 -080068 /* Index in the g_shard_queue */
Craig Tiller7536af02015-12-22 13:49:30 -080069 uint32_t shard_queue_index;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070070 /* This holds all timers with deadlines < queue_deadline_cap. Timers in this
ctiller3bf466f2014-12-19 16:21:57 -080071 list have the top bit of their deadline set to 0. */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070072 grpc_timer_heap heap;
73 /* This holds timers whose deadline is >= queue_deadline_cap. */
74 grpc_timer list;
ctiller3bf466f2014-12-19 16:21:57 -080075} shard_type;
76
Craig Tiller185f6c92017-03-17 08:33:19 -070077struct shared_mutables {
78 gpr_atm min_timer;
79 /* Allow only one run_some_expired_timers at once */
80 gpr_spinlock checker_mu;
81 bool initialized;
82 /* Protects g_shard_queue */
83 gpr_mu mu;
84} GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE);
85
86static struct shared_mutables g_shared_mutables = {
87 .checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER, .initialized = false,
88};
Craig Tiller6a7626c2015-07-19 22:21:41 -070089static gpr_clock_type g_clock_type;
ctiller3bf466f2014-12-19 16:21:57 -080090static shard_type g_shards[NUM_SHARDS];
Craig Tiller185f6c92017-03-17 08:33:19 -070091/* Protected by g_shared_mutables.mu */
ctiller3bf466f2014-12-19 16:21:57 -080092static shard_type *g_shard_queue[NUM_SHARDS];
Craig Tiller7b2dd932017-03-16 16:25:12 -070093static gpr_timespec g_start_time;
Craig Tiller185f6c92017-03-17 08:33:19 -070094
95GPR_TLS_DECL(g_last_seen_min_timer);
ctiller3bf466f2014-12-19 16:21:57 -080096
Craig Tillerbd0af4f2017-03-20 08:33:02 -070097static gpr_atm saturating_add(gpr_atm a, gpr_atm b) {
98 if (a > GPR_ATM_MAX - b) {
99 return GPR_ATM_MAX;
100 }
101 return a + b;
102}
103
Craig Tiller7b2dd932017-03-16 16:25:12 -0700104static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_atm now,
105 gpr_atm *next, grpc_error *error);
ctiller3bf466f2014-12-19 16:21:57 -0800106
Craig Tiller7b2dd932017-03-16 16:25:12 -0700107static gpr_timespec dbl_to_ts(double d) {
108 gpr_timespec ts;
109 ts.tv_sec = (int64_t)d;
110 ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec));
111 ts.clock_type = GPR_TIMESPAN;
112 return ts;
113}
114
Craig Tiller185f6c92017-03-17 08:33:19 -0700115static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) {
Craig Tiller2a1949e2017-03-21 09:29:12 -0700116 ts = gpr_time_sub(ts, g_start_time);
Craig Tiller185f6c92017-03-17 08:33:19 -0700117 double x = GPR_MS_PER_SEC * (double)ts.tv_sec +
Craig Tiller883243a2017-03-24 16:08:08 -0700118 (double)ts.tv_nsec / GPR_NS_PER_MS +
119 (double)(GPR_NS_PER_SEC - 1) / (double)GPR_NS_PER_SEC;
Craig Tiller185f6c92017-03-17 08:33:19 -0700120 if (x < 0) return 0;
121 if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
122 return (gpr_atm)x;
123}
124
125static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) {
Craig Tiller2a1949e2017-03-21 09:29:12 -0700126 ts = gpr_time_sub(ts, g_start_time);
Craig Tiller185f6c92017-03-17 08:33:19 -0700127 double x =
128 GPR_MS_PER_SEC * (double)ts.tv_sec + (double)ts.tv_nsec / GPR_NS_PER_MS;
Craig Tiller7b2dd932017-03-16 16:25:12 -0700129 if (x < 0) return 0;
130 if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
131 return (gpr_atm)x;
132}
133
134static gpr_timespec atm_to_timespec(gpr_atm x) {
135 return gpr_time_add(g_start_time, dbl_to_ts((double)x / 1000.0));
136}
137
138static gpr_atm compute_min_deadline(shard_type *shard) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700139 return grpc_timer_heap_is_empty(&shard->heap)
Craig Tiller0b4c5312017-03-24 15:23:57 -0700140 ? saturating_add(shard->queue_deadline_cap, 1)
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700141 : grpc_timer_heap_top(&shard->heap)->deadline;
ctiller3bf466f2014-12-19 16:21:57 -0800142}
143
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700144void grpc_timer_list_init(gpr_timespec now) {
Craig Tiller7536af02015-12-22 13:49:30 -0800145 uint32_t i;
ctiller3bf466f2014-12-19 16:21:57 -0800146
Craig Tiller185f6c92017-03-17 08:33:19 -0700147 g_shared_mutables.initialized = true;
148 gpr_mu_init(&g_shared_mutables.mu);
Craig Tiller6a7626c2015-07-19 22:21:41 -0700149 g_clock_type = now.clock_type;
Craig Tiller7b2dd932017-03-16 16:25:12 -0700150 g_start_time = now;
Craig Tiller185f6c92017-03-17 08:33:19 -0700151 g_shared_mutables.min_timer = timespec_to_atm_round_down(now);
152 gpr_tls_init(&g_last_seen_min_timer);
Craig Tiller4e4647a2017-03-21 08:44:43 -0700153 gpr_tls_set(&g_last_seen_min_timer, 0);
Craig Tiller2a1949e2017-03-21 09:29:12 -0700154 grpc_register_tracer("timer", &grpc_timer_trace);
155 grpc_register_tracer("timer_check", &grpc_timer_check_trace);
ctiller3bf466f2014-12-19 16:21:57 -0800156
Craig Tillera82950e2015-09-22 12:33:20 -0700157 for (i = 0; i < NUM_SHARDS; i++) {
158 shard_type *shard = &g_shards[i];
159 gpr_mu_init(&shard->mu);
160 grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1,
161 0.5);
Craig Tiller185f6c92017-03-17 08:33:19 -0700162 shard->queue_deadline_cap = g_shared_mutables.min_timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700163 shard->shard_queue_index = i;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700164 grpc_timer_heap_init(&shard->heap);
Craig Tillera82950e2015-09-22 12:33:20 -0700165 shard->list.next = shard->list.prev = &shard->list;
166 shard->min_deadline = compute_min_deadline(shard);
167 g_shard_queue[i] = shard;
168 }
ctiller3bf466f2014-12-19 16:21:57 -0800169}
170
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700171void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
ctiller3bf466f2014-12-19 16:21:57 -0800172 int i;
ncteisen4b36a3d2017-03-13 19:08:06 -0700173 run_some_expired_timers(
Craig Tillerc9000632017-03-24 14:28:48 -0700174 exec_ctx, GPR_ATM_MAX, NULL,
ncteisen4b36a3d2017-03-13 19:08:06 -0700175 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
Craig Tillera82950e2015-09-22 12:33:20 -0700176 for (i = 0; i < NUM_SHARDS; i++) {
177 shard_type *shard = &g_shards[i];
178 gpr_mu_destroy(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700179 grpc_timer_heap_destroy(&shard->heap);
Craig Tillera82950e2015-09-22 12:33:20 -0700180 }
Craig Tiller185f6c92017-03-17 08:33:19 -0700181 gpr_mu_destroy(&g_shared_mutables.mu);
182 gpr_tls_destroy(&g_last_seen_min_timer);
183 g_shared_mutables.initialized = false;
ctiller3bf466f2014-12-19 16:21:57 -0800184}
185
Craig Tillera82950e2015-09-22 12:33:20 -0700186static double ts_to_dbl(gpr_timespec ts) {
187 return (double)ts.tv_sec + 1e-9 * ts.tv_nsec;
ctiller3bf466f2014-12-19 16:21:57 -0800188}
189
Craig Tiller0b4c5312017-03-24 15:23:57 -0700190/* returns true if the first element in the list */
Craig Tiller1a769262017-03-27 12:52:59 -0700191static void list_join(grpc_timer *head, grpc_timer *timer) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700192 timer->next = head;
193 timer->prev = head->prev;
194 timer->next->prev = timer->prev->next = timer;
ctiller3bf466f2014-12-19 16:21:57 -0800195}
196
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700197static void list_remove(grpc_timer *timer) {
198 timer->next->prev = timer->prev;
199 timer->prev->next = timer->next;
ctiller3bf466f2014-12-19 16:21:57 -0800200}
201
Craig Tiller7536af02015-12-22 13:49:30 -0800202static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
ctiller3bf466f2014-12-19 16:21:57 -0800203 shard_type *temp;
204 temp = g_shard_queue[first_shard_queue_index];
Craig Tillera82950e2015-09-22 12:33:20 -0700205 g_shard_queue[first_shard_queue_index] =
206 g_shard_queue[first_shard_queue_index + 1];
ctiller3bf466f2014-12-19 16:21:57 -0800207 g_shard_queue[first_shard_queue_index + 1] = temp;
Craig Tillera82950e2015-09-22 12:33:20 -0700208 g_shard_queue[first_shard_queue_index]->shard_queue_index =
209 first_shard_queue_index;
210 g_shard_queue[first_shard_queue_index + 1]->shard_queue_index =
211 first_shard_queue_index + 1;
ctiller3bf466f2014-12-19 16:21:57 -0800212}
213
Craig Tillera82950e2015-09-22 12:33:20 -0700214static void note_deadline_change(shard_type *shard) {
215 while (shard->shard_queue_index > 0 &&
Craig Tiller7b2dd932017-03-16 16:25:12 -0700216 shard->min_deadline <
217 g_shard_queue[shard->shard_queue_index - 1]->min_deadline) {
Craig Tillera82950e2015-09-22 12:33:20 -0700218 swap_adjacent_shards_in_queue(shard->shard_queue_index - 1);
219 }
220 while (shard->shard_queue_index < NUM_SHARDS - 1 &&
Craig Tiller7b2dd932017-03-16 16:25:12 -0700221 shard->min_deadline >
222 g_shard_queue[shard->shard_queue_index + 1]->min_deadline) {
Craig Tillera82950e2015-09-22 12:33:20 -0700223 swap_adjacent_shards_in_queue(shard->shard_queue_index);
224 }
ctiller3bf466f2014-12-19 16:21:57 -0800225}
226
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700227void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800228 gpr_timespec deadline, grpc_closure *closure,
229 gpr_timespec now) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700230 int is_first_timer = 0;
yang-g6955c5e2017-02-13 15:49:27 -0800231 shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
Craig Tillera82950e2015-09-22 12:33:20 -0700232 GPR_ASSERT(deadline.clock_type == g_clock_type);
233 GPR_ASSERT(now.clock_type == g_clock_type);
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800234 timer->closure = closure;
Craig Tiller185f6c92017-03-17 08:33:19 -0700235 timer->deadline = timespec_to_atm_round_up(deadline);
ctiller3bf466f2014-12-19 16:21:57 -0800236
Craig Tiller2a1949e2017-03-21 09:29:12 -0700237 if (grpc_timer_trace) {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700238 gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR
239 "] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]",
Craig Tillerafb168b2017-03-21 12:48:57 -0700240 timer, deadline.tv_sec, deadline.tv_nsec, timer->deadline,
241 now.tv_sec, now.tv_nsec, timespec_to_atm_round_down(now), closure,
242 closure->cb);
Craig Tiller2a1949e2017-03-21 09:29:12 -0700243 }
244
Craig Tiller185f6c92017-03-17 08:33:19 -0700245 if (!g_shared_mutables.initialized) {
Craig Tillerc84886b2017-02-16 13:10:38 -0800246 timer->pending = false;
ncteisen4b36a3d2017-03-13 19:08:06 -0700247 grpc_closure_sched(exec_ctx, timer->closure,
248 GRPC_ERROR_CREATE_FROM_STATIC_STRING(
249 "Attempt to create timer before initialization"));
Craig Tiller3f72df92016-04-13 20:26:07 -0700250 return;
251 }
252
Craig Tillerc84886b2017-02-16 13:10:38 -0800253 gpr_mu_lock(&shard->mu);
254 timer->pending = true;
Craig Tiller3f72df92016-04-13 20:26:07 -0700255 if (gpr_time_cmp(deadline, now) <= 0) {
Craig Tillerc84886b2017-02-16 13:10:38 -0800256 timer->pending = false;
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800257 grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE);
Craig Tillerc84886b2017-02-16 13:10:38 -0800258 gpr_mu_unlock(&shard->mu);
259 /* early out */
Craig Tiller3f72df92016-04-13 20:26:07 -0700260 return;
261 }
262
Craig Tillera82950e2015-09-22 12:33:20 -0700263 grpc_time_averaged_stats_add_sample(&shard->stats,
264 ts_to_dbl(gpr_time_sub(deadline, now)));
Craig Tiller7b2dd932017-03-16 16:25:12 -0700265 if (timer->deadline < shard->queue_deadline_cap) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700266 is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700267 } else {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700268 timer->heap_index = INVALID_HEAP_INDEX;
Craig Tiller1a769262017-03-27 12:52:59 -0700269 list_join(&shard->list, timer);
Craig Tiller0b4c5312017-03-24 15:23:57 -0700270 }
271 if (grpc_timer_trace) {
272 gpr_log(GPR_DEBUG, " .. add to shard %d with queue_deadline_cap=%" PRIdPTR
273 " => is_first_timer=%s",
274 (int)(shard - g_shards), shard->queue_deadline_cap,
275 is_first_timer ? "true" : "false");
Craig Tillera82950e2015-09-22 12:33:20 -0700276 }
277 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800278
279 /* Deadline may have decreased, we need to adjust the master queue. Note
280 that there is a potential racy unlocked region here. There could be a
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700281 reordering of multiple grpc_timer_init calls, at this point, but the < test
ctiller3bf466f2014-12-19 16:21:57 -0800282 below should ensure that we err on the side of caution. There could
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700283 also be a race with grpc_timer_check, which might beat us to the lock. In
284 that case, it is possible that the timer that we added will have already
ctiller3bf466f2014-12-19 16:21:57 -0800285 run by the time we hold the lock, but that too is a safe error.
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700286 Finally, it's possible that the grpc_timer_check that intervened failed to
287 trigger the new timer because the min_deadline hadn't yet been reduced.
288 In that case, the timer will simply have to wait for the next
289 grpc_timer_check. */
290 if (is_first_timer) {
Craig Tiller185f6c92017-03-17 08:33:19 -0700291 gpr_mu_lock(&g_shared_mutables.mu);
Craig Tiller18e74652017-03-24 15:44:27 -0700292 if (grpc_timer_trace) {
293 gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR,
294 shard->min_deadline);
295 }
Craig Tiller7b2dd932017-03-16 16:25:12 -0700296 if (timer->deadline < shard->min_deadline) {
297 gpr_atm old_min_deadline = g_shard_queue[0]->min_deadline;
298 shard->min_deadline = timer->deadline;
Craig Tillera82950e2015-09-22 12:33:20 -0700299 note_deadline_change(shard);
Craig Tiller7b2dd932017-03-16 16:25:12 -0700300 if (shard->shard_queue_index == 0 && timer->deadline < old_min_deadline) {
Craig Tiller185f6c92017-03-17 08:33:19 -0700301 gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, timer->deadline);
Craig Tillera82950e2015-09-22 12:33:20 -0700302 grpc_kick_poller();
303 }
ctiller3bf466f2014-12-19 16:21:57 -0800304 }
Craig Tiller185f6c92017-03-17 08:33:19 -0700305 gpr_mu_unlock(&g_shared_mutables.mu);
Craig Tillera82950e2015-09-22 12:33:20 -0700306 }
ctiller3bf466f2014-12-19 16:21:57 -0800307}
308
Craig Tiller185f6c92017-03-17 08:33:19 -0700309void grpc_timer_consume_kick(void) {
310 /* force re-evaluation of last seeen min */
311 gpr_tls_set(&g_last_seen_min_timer, 0);
312}
313
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700314void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
Craig Tiller185f6c92017-03-17 08:33:19 -0700315 if (!g_shared_mutables.initialized) {
Craig Tiller82c63eb2016-05-10 15:28:01 -0700316 /* must have already been cancelled, also the shard mutex is invalid */
317 return;
318 }
319
yang-g6955c5e2017-02-13 15:49:27 -0800320 shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
Craig Tillera82950e2015-09-22 12:33:20 -0700321 gpr_mu_lock(&shard->mu);
Craig Tillera0bfee92017-03-27 09:21:11 -0700322 if (grpc_timer_trace) {
323 gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
324 timer->pending ? "true" : "false");
325 }
Craig Tillerc84886b2017-02-16 13:10:38 -0800326 if (timer->pending) {
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800327 grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
Craig Tillerc84886b2017-02-16 13:10:38 -0800328 timer->pending = false;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700329 if (timer->heap_index == INVALID_HEAP_INDEX) {
330 list_remove(timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700331 } else {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700332 grpc_timer_heap_remove(&shard->heap, timer);
ctiller3bf466f2014-12-19 16:21:57 -0800333 }
Craig Tillera82950e2015-09-22 12:33:20 -0700334 }
335 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800336}
337
338/* This is called when the queue is empty and "now" has reached the
339 queue_deadline_cap. We compute a new queue deadline and then scan the map
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700340 for timers that fall at or under it. Returns true if the queue is no
ctiller3bf466f2014-12-19 16:21:57 -0800341 longer empty.
342 REQUIRES: shard->mu locked */
Craig Tiller7b2dd932017-03-16 16:25:12 -0700343static int refill_queue(shard_type *shard, gpr_atm now) {
ctiller3bf466f2014-12-19 16:21:57 -0800344 /* Compute the new queue window width and bound by the limits: */
Craig Tillera82950e2015-09-22 12:33:20 -0700345 double computed_deadline_delta =
346 grpc_time_averaged_stats_update_average(&shard->stats) *
347 ADD_DEADLINE_SCALE;
348 double deadline_delta =
349 GPR_CLAMP(computed_deadline_delta, MIN_QUEUE_WINDOW_DURATION,
350 MAX_QUEUE_WINDOW_DURATION);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700351 grpc_timer *timer, *next;
ctiller3bf466f2014-12-19 16:21:57 -0800352
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700353 /* Compute the new cap and put all timers under it into the queue: */
Craig Tillerbd0af4f2017-03-20 08:33:02 -0700354 shard->queue_deadline_cap =
355 saturating_add(GPR_MAX(now, shard->queue_deadline_cap),
356 (gpr_atm)(deadline_delta * 1000.0));
Craig Tiller0b4c5312017-03-24 15:23:57 -0700357
358 if (grpc_timer_check_trace) {
359 gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
360 (int)(shard - g_shards), shard->queue_deadline_cap);
361 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700362 for (timer = shard->list.next; timer != &shard->list; timer = next) {
363 next = timer->next;
ctiller3bf466f2014-12-19 16:21:57 -0800364
Craig Tiller7b2dd932017-03-16 16:25:12 -0700365 if (timer->deadline < shard->queue_deadline_cap) {
Craig Tiller18e74652017-03-24 15:44:27 -0700366 if (grpc_timer_check_trace) {
367 gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap",
368 timer->deadline);
369 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700370 list_remove(timer);
371 grpc_timer_heap_add(&shard->heap, timer);
ctiller3bf466f2014-12-19 16:21:57 -0800372 }
Craig Tillera82950e2015-09-22 12:33:20 -0700373 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700374 return !grpc_timer_heap_is_empty(&shard->heap);
ctiller3bf466f2014-12-19 16:21:57 -0800375}
376
David G. Quintasdfff4de2016-06-07 19:57:33 -0700377/* This pops the next non-cancelled timer with deadline <= now from the
378 queue, or returns NULL if there isn't one.
ctiller3bf466f2014-12-19 16:21:57 -0800379 REQUIRES: shard->mu locked */
Craig Tiller7b2dd932017-03-16 16:25:12 -0700380static grpc_timer *pop_one(shard_type *shard, gpr_atm now) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700381 grpc_timer *timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700382 for (;;) {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700383 if (grpc_timer_check_trace) {
384 gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s",
385 (int)(shard - g_shards),
386 grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
387 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700388 if (grpc_timer_heap_is_empty(&shard->heap)) {
Craig Tiller7b2dd932017-03-16 16:25:12 -0700389 if (now < shard->queue_deadline_cap) return NULL;
Craig Tillera82950e2015-09-22 12:33:20 -0700390 if (!refill_queue(shard, now)) return NULL;
ctiller3bf466f2014-12-19 16:21:57 -0800391 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700392 timer = grpc_timer_heap_top(&shard->heap);
Craig Tiller18e74652017-03-24 15:44:27 -0700393 if (grpc_timer_check_trace) {
394 gpr_log(GPR_DEBUG,
395 " .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR,
396 timer->deadline, now);
397 }
Craig Tiller0b4c5312017-03-24 15:23:57 -0700398 if (timer->deadline > now) return NULL;
Craig Tillera0bfee92017-03-27 09:21:11 -0700399 if (grpc_timer_trace) {
400 gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late", timer,
401 now - timer->deadline);
402 }
Craig Tillerc84886b2017-02-16 13:10:38 -0800403 timer->pending = false;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700404 grpc_timer_heap_pop(&shard->heap);
405 return timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700406 }
ctiller3bf466f2014-12-19 16:21:57 -0800407}
408
409/* REQUIRES: shard->mu unlocked */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700410static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard,
Craig Tiller7b2dd932017-03-16 16:25:12 -0700411 gpr_atm now, gpr_atm *new_min_deadline,
Craig Tillerc027e772016-05-03 16:27:00 -0700412 grpc_error *error) {
ctiller3bf466f2014-12-19 16:21:57 -0800413 size_t n = 0;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700414 grpc_timer *timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700415 gpr_mu_lock(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700416 while ((timer = pop_one(shard, now))) {
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800417 grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
Craig Tillera82950e2015-09-22 12:33:20 -0700418 n++;
419 }
420 *new_min_deadline = compute_min_deadline(shard);
421 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800422 return n;
423}
424
Craig Tiller7b2dd932017-03-16 16:25:12 -0700425static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_atm now,
426 gpr_atm *next, grpc_error *error) {
ctiller3bf466f2014-12-19 16:21:57 -0800427 size_t n = 0;
ctiller3bf466f2014-12-19 16:21:57 -0800428
Craig Tiller1a769262017-03-27 12:52:59 -0700429 gpr_atm min_timer = gpr_atm_no_barrier_load(&g_shared_mutables.min_timer);
Craig Tiller185f6c92017-03-17 08:33:19 -0700430 gpr_tls_set(&g_last_seen_min_timer, min_timer);
431 if (now < min_timer) {
432 if (next != NULL) *next = GPR_MIN(*next, min_timer);
433 return 0;
434 }
435
436 if (gpr_spinlock_trylock(&g_shared_mutables.checker_mu)) {
437 gpr_mu_lock(&g_shared_mutables.mu);
ctiller3bf466f2014-12-19 16:21:57 -0800438
Craig Tiller0b4c5312017-03-24 15:23:57 -0700439 if (grpc_timer_check_trace) {
440 gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRIdPTR,
441 (int)(g_shard_queue[0] - g_shards),
442 g_shard_queue[0]->min_deadline);
443 }
444
Craig Tiller041bf642017-03-27 09:26:07 -0700445 while (g_shard_queue[0]->min_deadline < now ||
446 (now != GPR_ATM_MAX && g_shard_queue[0]->min_deadline == now)) {
Craig Tiller7b2dd932017-03-16 16:25:12 -0700447 gpr_atm new_min_deadline;
ctiller3bf466f2014-12-19 16:21:57 -0800448
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700449 /* For efficiency, we pop as many available timers as we can from the
450 shard. This may violate perfect timer deadline ordering, but that
Craig Tillera82950e2015-09-22 12:33:20 -0700451 shouldn't be a big deal because we don't make ordering guarantees. */
Craig Tillerc027e772016-05-03 16:27:00 -0700452 n +=
453 pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline, error);
ctiller3bf466f2014-12-19 16:21:57 -0800454
Craig Tiller0b4c5312017-03-24 15:23:57 -0700455 if (grpc_timer_check_trace) {
Craig Tillera0bfee92017-03-27 09:21:11 -0700456 gpr_log(GPR_DEBUG, " .. popped --> %" PRIdPTR
457 ", shard[%d]->min_deadline %" PRIdPTR
458 " --> %" PRIdPTR ", now=%" PRIdPTR,
Craig Tiller0b4c5312017-03-24 15:23:57 -0700459 n, (int)(g_shard_queue[0] - g_shards),
Craig Tillera0bfee92017-03-27 09:21:11 -0700460 g_shard_queue[0]->min_deadline, new_min_deadline, now);
Craig Tiller0b4c5312017-03-24 15:23:57 -0700461 }
462
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700463 /* An grpc_timer_init() on the shard could intervene here, adding a new
464 timer that is earlier than new_min_deadline. However,
465 grpc_timer_init() will block on the master_lock before it can call
466 set_min_deadline, so this one will complete first and then the Addtimer
Craig Tillera82950e2015-09-22 12:33:20 -0700467 will reduce the min_deadline (perhaps unnecessarily). */
468 g_shard_queue[0]->min_deadline = new_min_deadline;
469 note_deadline_change(g_shard_queue[0]);
ctiller3bf466f2014-12-19 16:21:57 -0800470 }
471
Craig Tillera82950e2015-09-22 12:33:20 -0700472 if (next) {
Craig Tiller7b2dd932017-03-16 16:25:12 -0700473 *next = GPR_MIN(*next, g_shard_queue[0]->min_deadline);
Craig Tillera82950e2015-09-22 12:33:20 -0700474 }
475
Craig Tiller185f6c92017-03-17 08:33:19 -0700476 gpr_atm_no_barrier_store(&g_shared_mutables.min_timer,
477 g_shard_queue[0]->min_deadline);
478 gpr_mu_unlock(&g_shared_mutables.mu);
479 gpr_spinlock_unlock(&g_shared_mutables.checker_mu);
Craig Tillera82950e2015-09-22 12:33:20 -0700480 }
481
Craig Tillerf707d622016-05-06 14:26:12 -0700482 GRPC_ERROR_UNREF(error);
Craig Tillerc027e772016-05-03 16:27:00 -0700483
Craig Tillera82950e2015-09-22 12:33:20 -0700484 return (int)n;
ctiller3bf466f2014-12-19 16:21:57 -0800485}
486
Craig Tiller311445f2016-02-18 07:31:39 -0800487bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
488 gpr_timespec *next) {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700489 // prelude
Craig Tillera82950e2015-09-22 12:33:20 -0700490 GPR_ASSERT(now.clock_type == g_clock_type);
Craig Tiller185f6c92017-03-17 08:33:19 -0700491 gpr_atm now_atm = timespec_to_atm_round_down(now);
Craig Tiller1a769262017-03-27 12:52:59 -0700492
493 /* fetch from a thread-local first: this avoids contention on a globally
494 mutable cacheline in the common case */
495 gpr_atm min_timer = gpr_tls_get(&g_last_seen_min_timer);
496 if (now_atm < min_timer) {
497 if (next != NULL) {
498 *next =
499 atm_to_timespec(GPR_MIN(timespec_to_atm_round_up(*next), min_timer));
500 }
501 if (grpc_timer_check_trace) {
502 gpr_log(GPR_DEBUG,
Craig Tiller97d40112017-03-29 16:46:13 -0700503 "TIMER CHECK SKIP: now_atm=%" PRIdPTR " min_timer=%" PRIdPTR,
Craig Tiller1a769262017-03-27 12:52:59 -0700504 now_atm, min_timer);
505 }
506 return 0;
507 }
508
Craig Tiller18e15062017-03-20 09:35:39 -0700509 grpc_error *shutdown_error =
Craig Tillerc027e772016-05-03 16:27:00 -0700510 gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0
511 ? GRPC_ERROR_NONE
Craig Tiller0b4c5312017-03-24 15:23:57 -0700512 : GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system");
Craig Tiller1a769262017-03-27 12:52:59 -0700513
Craig Tiller0b4c5312017-03-24 15:23:57 -0700514 // tracing
Craig Tiller2a1949e2017-03-21 09:29:12 -0700515 if (grpc_timer_check_trace) {
516 char *next_str;
517 if (next == NULL) {
518 next_str = gpr_strdup("NULL");
519 } else {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700520 gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec,
Craig Tiller2a1949e2017-03-21 09:29:12 -0700521 next->tv_nsec, timespec_to_atm_round_down(*next));
522 }
Craig Tiller0b4c5312017-03-24 15:23:57 -0700523 gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRId64 ".%09d [%" PRIdPTR
Craig Tillerafb168b2017-03-21 12:48:57 -0700524 "] next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR,
525 now.tv_sec, now.tv_nsec, now_atm, next_str,
526 gpr_tls_get(&g_last_seen_min_timer),
527 gpr_atm_no_barrier_load(&g_shared_mutables.min_timer));
Craig Tiller2a1949e2017-03-21 09:29:12 -0700528 gpr_free(next_str);
529 }
Craig Tiller0b4c5312017-03-24 15:23:57 -0700530 // actual code
Craig Tiller99c718b2017-03-20 20:03:52 -0700531 bool r;
Craig Tiller2a1949e2017-03-21 09:29:12 -0700532 gpr_atm next_atm;
Craig Tiller18e15062017-03-20 09:35:39 -0700533 if (next == NULL) {
534 r = run_some_expired_timers(exec_ctx, now_atm, NULL, shutdown_error);
535 } else {
Craig Tiller2a1949e2017-03-21 09:29:12 -0700536 next_atm = timespec_to_atm_round_down(*next);
Craig Tiller18e15062017-03-20 09:35:39 -0700537 r = run_some_expired_timers(exec_ctx, now_atm, &next_atm, shutdown_error);
538 *next = atm_to_timespec(next_atm);
539 }
Craig Tiller0b4c5312017-03-24 15:23:57 -0700540 // tracing
Craig Tiller2a1949e2017-03-21 09:29:12 -0700541 if (grpc_timer_check_trace) {
542 char *next_str;
543 if (next == NULL) {
544 next_str = gpr_strdup("NULL");
545 } else {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700546 gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec,
Craig Tiller2a1949e2017-03-21 09:29:12 -0700547 next->tv_nsec, next_atm);
548 }
549 gpr_log(GPR_DEBUG, "TIMER CHECK END: %d timers triggered; next=%s", r,
550 next_str);
551 gpr_free(next_str);
552 }
553 return r > 0;
ctiller3bf466f2014-12-19 16:21:57 -0800554}
murgatroid999030c812016-09-16 13:25:08 -0700555
556#endif /* GRPC_TIMER_USE_GENERIC */