blob: c5955c869c2226ac56262291d629c88c20d3ebf1 [file] [log] [blame]
ctiller3bf466f2014-12-19 16:21:57 -08001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2015 gRPC authors.
ctiller3bf466f2014-12-19 16:21:57 -08004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
ctiller3bf466f2014-12-19 16:21:57 -08008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
ctiller3bf466f2014-12-19 16:21:57 -080010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
ctiller3bf466f2014-12-19 16:21:57 -080016 *
17 */
18
murgatroid999030c812016-09-16 13:25:08 -070019#include "src/core/lib/iomgr/port.h"
20
21#ifdef GRPC_TIMER_USE_GENERIC
22
Craig Tiller9533d042016-03-25 17:11:06 -070023#include "src/core/lib/iomgr/timer.h"
ctiller3bf466f2014-12-19 16:21:57 -080024
Craig Tiller2a1949e2017-03-21 09:29:12 -070025#include <grpc/support/alloc.h>
Craig Tiller6a7626c2015-07-19 22:21:41 -070026#include <grpc/support/log.h>
Craig Tiller2a1949e2017-03-21 09:29:12 -070027#include <grpc/support/string_util.h>
ctiller3bf466f2014-12-19 16:21:57 -080028#include <grpc/support/sync.h>
Craig Tiller185f6c92017-03-17 08:33:19 -070029#include <grpc/support/tls.h>
ctiller3bf466f2014-12-19 16:21:57 -080030#include <grpc/support/useful.h>
Craig Tiller2a1949e2017-03-21 09:29:12 -070031#include "src/core/lib/debug/trace.h"
Craig Tiller9533d042016-03-25 17:11:06 -070032#include "src/core/lib/iomgr/time_averaged_stats.h"
33#include "src/core/lib/iomgr/timer_heap.h"
Craig Tilleredbf2b92017-02-27 07:24:00 -080034#include "src/core/lib/support/spinlock.h"
ctiller3bf466f2014-12-19 16:21:57 -080035
36#define INVALID_HEAP_INDEX 0xffffffffu
37
38#define LOG2_NUM_SHARDS 5
39#define NUM_SHARDS (1 << LOG2_NUM_SHARDS)
ctiller3bf466f2014-12-19 16:21:57 -080040#define ADD_DEADLINE_SCALE 0.33
41#define MIN_QUEUE_WINDOW_DURATION 0.01
42#define MAX_QUEUE_WINDOW_DURATION 1
43
ncteisen06bce6e2017-07-10 07:58:49 -070044grpc_tracer_flag grpc_timer_trace = GRPC_TRACER_INITIALIZER(false, "timer");
ncteisen7712c7c2017-07-12 23:11:27 -070045grpc_tracer_flag grpc_timer_check_trace =
46 GRPC_TRACER_INITIALIZER(false, "timer_check");
Craig Tiller2a1949e2017-03-21 09:29:12 -070047
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -070048/* A "timer shard". Contains a 'heap' and a 'list' of timers. All timers with
49 * deadlines earlier than 'queue_deadline" cap are maintained in the heap and
50 * others are maintained in the list (unordered). This helps to keep the number
51 * of elements in the heap low.
52 *
53 * The 'queue_deadline_cap' gets recomputed periodically based on the timer
54 * stats maintained in 'stats' and the relevant timers are then moved from the
55 * 'list' to 'heap'
56 */
Craig Tillera82950e2015-09-22 12:33:20 -070057typedef struct {
ctiller3bf466f2014-12-19 16:21:57 -080058 gpr_mu mu;
59 grpc_time_averaged_stats stats;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070060 /* All and only timers with deadlines <= this will be in the heap. */
Craig Tiller7b2dd932017-03-16 16:25:12 -070061 gpr_atm queue_deadline_cap;
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -070062 /* The deadline of the next timer due in this shard */
Craig Tiller7b2dd932017-03-16 16:25:12 -070063 gpr_atm min_deadline;
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -070064 /* Index of this timer_shard in the g_shard_queue */
Craig Tiller7536af02015-12-22 13:49:30 -080065 uint32_t shard_queue_index;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070066 /* This holds all timers with deadlines < queue_deadline_cap. Timers in this
ctiller3bf466f2014-12-19 16:21:57 -080067 list have the top bit of their deadline set to 0. */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070068 grpc_timer_heap heap;
69 /* This holds timers whose deadline is >= queue_deadline_cap. */
70 grpc_timer list;
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -070071} timer_shard;
72
73/* Array of timer shards. Whenever a timer (grpc_timer *) is added, its address
74 * is hashed to select the timer shard to add the timer to */
75static timer_shard g_shards[NUM_SHARDS];
76
77/* Maintains a sorted list of timer shards (sorted by their min_deadline, i.e
78 * the deadline of the next timer in each shard).
79 * Access to this is protected by g_shared_mutables.mu */
80static timer_shard *g_shard_queue[NUM_SHARDS];
81
82/* Thread local variable that stores the deadline of the next timer the thread
83 * has last-seen. This is an optimization to prevent the thread from checking
84 * shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock,
85 * an expensive operation) */
86GPR_TLS_DECL(g_last_seen_min_timer);
ctiller3bf466f2014-12-19 16:21:57 -080087
Craig Tiller185f6c92017-03-17 08:33:19 -070088struct shared_mutables {
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -070089 /* The deadline of the next timer due across all timer shards */
Craig Tiller185f6c92017-03-17 08:33:19 -070090 gpr_atm min_timer;
91 /* Allow only one run_some_expired_timers at once */
92 gpr_spinlock checker_mu;
93 bool initialized;
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -070094 /* Protects g_shard_queue (and the shared_mutables struct itself) */
Craig Tiller185f6c92017-03-17 08:33:19 -070095 gpr_mu mu;
96} GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE);
97
98static struct shared_mutables g_shared_mutables = {
Yash Tibrewal533d1182017-09-18 10:48:22 -070099 0,
100 GPR_SPINLOCK_STATIC_INITIALIZER,
101 false,
102 {{0}}
Craig Tiller185f6c92017-03-17 08:33:19 -0700103};
Craig Tiller185f6c92017-03-17 08:33:19 -0700104
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700105static gpr_clock_type g_clock_type;
106static gpr_timespec g_start_time;
ctiller3bf466f2014-12-19 16:21:57 -0800107
Craig Tillerbd0af4f2017-03-20 08:33:02 -0700108static gpr_atm saturating_add(gpr_atm a, gpr_atm b) {
109 if (a > GPR_ATM_MAX - b) {
110 return GPR_ATM_MAX;
111 }
112 return a + b;
113}
114
Craig Tillerb2ef1cf2017-05-30 09:25:48 -0700115static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
116 gpr_atm now,
117 gpr_atm *next,
118 grpc_error *error);
ctiller3bf466f2014-12-19 16:21:57 -0800119
Craig Tiller7b2dd932017-03-16 16:25:12 -0700120static gpr_timespec dbl_to_ts(double d) {
121 gpr_timespec ts;
122 ts.tv_sec = (int64_t)d;
123 ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec));
124 ts.clock_type = GPR_TIMESPAN;
125 return ts;
126}
127
Craig Tiller185f6c92017-03-17 08:33:19 -0700128static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) {
Craig Tiller2a1949e2017-03-21 09:29:12 -0700129 ts = gpr_time_sub(ts, g_start_time);
Craig Tiller185f6c92017-03-17 08:33:19 -0700130 double x = GPR_MS_PER_SEC * (double)ts.tv_sec +
Craig Tiller883243a2017-03-24 16:08:08 -0700131 (double)ts.tv_nsec / GPR_NS_PER_MS +
132 (double)(GPR_NS_PER_SEC - 1) / (double)GPR_NS_PER_SEC;
Craig Tiller185f6c92017-03-17 08:33:19 -0700133 if (x < 0) return 0;
134 if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
135 return (gpr_atm)x;
136}
137
138static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) {
Craig Tiller2a1949e2017-03-21 09:29:12 -0700139 ts = gpr_time_sub(ts, g_start_time);
Craig Tiller185f6c92017-03-17 08:33:19 -0700140 double x =
141 GPR_MS_PER_SEC * (double)ts.tv_sec + (double)ts.tv_nsec / GPR_NS_PER_MS;
Craig Tiller7b2dd932017-03-16 16:25:12 -0700142 if (x < 0) return 0;
143 if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
144 return (gpr_atm)x;
145}
146
147static gpr_timespec atm_to_timespec(gpr_atm x) {
148 return gpr_time_add(g_start_time, dbl_to_ts((double)x / 1000.0));
149}
150
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700151static gpr_atm compute_min_deadline(timer_shard *shard) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700152 return grpc_timer_heap_is_empty(&shard->heap)
Craig Tiller0b4c5312017-03-24 15:23:57 -0700153 ? saturating_add(shard->queue_deadline_cap, 1)
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700154 : grpc_timer_heap_top(&shard->heap)->deadline;
ctiller3bf466f2014-12-19 16:21:57 -0800155}
156
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700157void grpc_timer_list_init(gpr_timespec now) {
Craig Tiller7536af02015-12-22 13:49:30 -0800158 uint32_t i;
ctiller3bf466f2014-12-19 16:21:57 -0800159
Craig Tiller185f6c92017-03-17 08:33:19 -0700160 g_shared_mutables.initialized = true;
161 gpr_mu_init(&g_shared_mutables.mu);
Craig Tiller6a7626c2015-07-19 22:21:41 -0700162 g_clock_type = now.clock_type;
Craig Tiller7b2dd932017-03-16 16:25:12 -0700163 g_start_time = now;
Craig Tiller185f6c92017-03-17 08:33:19 -0700164 g_shared_mutables.min_timer = timespec_to_atm_round_down(now);
165 gpr_tls_init(&g_last_seen_min_timer);
Craig Tiller4e4647a2017-03-21 08:44:43 -0700166 gpr_tls_set(&g_last_seen_min_timer, 0);
ncteisen06bce6e2017-07-10 07:58:49 -0700167 grpc_register_tracer(&grpc_timer_trace);
168 grpc_register_tracer(&grpc_timer_check_trace);
ctiller3bf466f2014-12-19 16:21:57 -0800169
Craig Tillera82950e2015-09-22 12:33:20 -0700170 for (i = 0; i < NUM_SHARDS; i++) {
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700171 timer_shard *shard = &g_shards[i];
Craig Tillera82950e2015-09-22 12:33:20 -0700172 gpr_mu_init(&shard->mu);
173 grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1,
174 0.5);
Craig Tiller185f6c92017-03-17 08:33:19 -0700175 shard->queue_deadline_cap = g_shared_mutables.min_timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700176 shard->shard_queue_index = i;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700177 grpc_timer_heap_init(&shard->heap);
Craig Tillera82950e2015-09-22 12:33:20 -0700178 shard->list.next = shard->list.prev = &shard->list;
179 shard->min_deadline = compute_min_deadline(shard);
180 g_shard_queue[i] = shard;
181 }
ctiller3bf466f2014-12-19 16:21:57 -0800182}
183
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700184void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
ctiller3bf466f2014-12-19 16:21:57 -0800185 int i;
ncteisen4b36a3d2017-03-13 19:08:06 -0700186 run_some_expired_timers(
Craig Tillerc9000632017-03-24 14:28:48 -0700187 exec_ctx, GPR_ATM_MAX, NULL,
ncteisen4b36a3d2017-03-13 19:08:06 -0700188 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
Craig Tillera82950e2015-09-22 12:33:20 -0700189 for (i = 0; i < NUM_SHARDS; i++) {
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700190 timer_shard *shard = &g_shards[i];
Craig Tillera82950e2015-09-22 12:33:20 -0700191 gpr_mu_destroy(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700192 grpc_timer_heap_destroy(&shard->heap);
Craig Tillera82950e2015-09-22 12:33:20 -0700193 }
Craig Tiller185f6c92017-03-17 08:33:19 -0700194 gpr_mu_destroy(&g_shared_mutables.mu);
195 gpr_tls_destroy(&g_last_seen_min_timer);
196 g_shared_mutables.initialized = false;
ctiller3bf466f2014-12-19 16:21:57 -0800197}
198
Craig Tillera82950e2015-09-22 12:33:20 -0700199static double ts_to_dbl(gpr_timespec ts) {
200 return (double)ts.tv_sec + 1e-9 * ts.tv_nsec;
ctiller3bf466f2014-12-19 16:21:57 -0800201}
202
Craig Tiller0b4c5312017-03-24 15:23:57 -0700203/* returns true if the first element in the list */
Craig Tiller1a769262017-03-27 12:52:59 -0700204static void list_join(grpc_timer *head, grpc_timer *timer) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700205 timer->next = head;
206 timer->prev = head->prev;
207 timer->next->prev = timer->prev->next = timer;
ctiller3bf466f2014-12-19 16:21:57 -0800208}
209
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700210static void list_remove(grpc_timer *timer) {
211 timer->next->prev = timer->prev;
212 timer->prev->next = timer->next;
ctiller3bf466f2014-12-19 16:21:57 -0800213}
214
Craig Tiller7536af02015-12-22 13:49:30 -0800215static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700216 timer_shard *temp;
ctiller3bf466f2014-12-19 16:21:57 -0800217 temp = g_shard_queue[first_shard_queue_index];
Craig Tillera82950e2015-09-22 12:33:20 -0700218 g_shard_queue[first_shard_queue_index] =
219 g_shard_queue[first_shard_queue_index + 1];
ctiller3bf466f2014-12-19 16:21:57 -0800220 g_shard_queue[first_shard_queue_index + 1] = temp;
Craig Tillera82950e2015-09-22 12:33:20 -0700221 g_shard_queue[first_shard_queue_index]->shard_queue_index =
222 first_shard_queue_index;
223 g_shard_queue[first_shard_queue_index + 1]->shard_queue_index =
224 first_shard_queue_index + 1;
ctiller3bf466f2014-12-19 16:21:57 -0800225}
226
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700227static void note_deadline_change(timer_shard *shard) {
Craig Tillera82950e2015-09-22 12:33:20 -0700228 while (shard->shard_queue_index > 0 &&
Craig Tiller7b2dd932017-03-16 16:25:12 -0700229 shard->min_deadline <
230 g_shard_queue[shard->shard_queue_index - 1]->min_deadline) {
Craig Tillera82950e2015-09-22 12:33:20 -0700231 swap_adjacent_shards_in_queue(shard->shard_queue_index - 1);
232 }
233 while (shard->shard_queue_index < NUM_SHARDS - 1 &&
Craig Tiller7b2dd932017-03-16 16:25:12 -0700234 shard->min_deadline >
235 g_shard_queue[shard->shard_queue_index + 1]->min_deadline) {
Craig Tillera82950e2015-09-22 12:33:20 -0700236 swap_adjacent_shards_in_queue(shard->shard_queue_index);
237 }
ctiller3bf466f2014-12-19 16:21:57 -0800238}
239
Vijay Pai58c33ba2017-09-01 14:08:42 -0700240void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = false; }
241
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700242void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800243 gpr_timespec deadline, grpc_closure *closure,
244 gpr_timespec now) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700245 int is_first_timer = 0;
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700246 timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
Craig Tillera82950e2015-09-22 12:33:20 -0700247 GPR_ASSERT(deadline.clock_type == g_clock_type);
248 GPR_ASSERT(now.clock_type == g_clock_type);
Masood Malekghassemib5b43722017-01-05 15:07:26 -0800249 timer->closure = closure;
Craig Tiller84f75d42017-05-03 13:06:35 -0700250 gpr_atm deadline_atm = timer->deadline = timespec_to_atm_round_up(deadline);
ctiller3bf466f2014-12-19 16:21:57 -0800251
Craig Tiller84f75d42017-05-03 13:06:35 -0700252 if (GRPC_TRACER_ON(grpc_timer_trace)) {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700253 gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR
254 "] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]",
Craig Tiller84f75d42017-05-03 13:06:35 -0700255 timer, deadline.tv_sec, deadline.tv_nsec, deadline_atm, now.tv_sec,
256 now.tv_nsec, timespec_to_atm_round_down(now), closure, closure->cb);
Craig Tiller2a1949e2017-03-21 09:29:12 -0700257 }
258
Craig Tiller185f6c92017-03-17 08:33:19 -0700259 if (!g_shared_mutables.initialized) {
Craig Tillerc84886b2017-02-16 13:10:38 -0800260 timer->pending = false;
ncteisen274bbbe2017-06-08 14:57:11 -0700261 GRPC_CLOSURE_SCHED(exec_ctx, timer->closure,
ncteisen4b36a3d2017-03-13 19:08:06 -0700262 GRPC_ERROR_CREATE_FROM_STATIC_STRING(
263 "Attempt to create timer before initialization"));
Craig Tiller3f72df92016-04-13 20:26:07 -0700264 return;
265 }
266
Craig Tillerc84886b2017-02-16 13:10:38 -0800267 gpr_mu_lock(&shard->mu);
268 timer->pending = true;
Craig Tiller3f72df92016-04-13 20:26:07 -0700269 if (gpr_time_cmp(deadline, now) <= 0) {
Craig Tillerc84886b2017-02-16 13:10:38 -0800270 timer->pending = false;
ncteisen274bbbe2017-06-08 14:57:11 -0700271 GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
Craig Tillerc84886b2017-02-16 13:10:38 -0800272 gpr_mu_unlock(&shard->mu);
273 /* early out */
Craig Tiller3f72df92016-04-13 20:26:07 -0700274 return;
275 }
276
Craig Tillera82950e2015-09-22 12:33:20 -0700277 grpc_time_averaged_stats_add_sample(&shard->stats,
278 ts_to_dbl(gpr_time_sub(deadline, now)));
Craig Tiller84f75d42017-05-03 13:06:35 -0700279 if (deadline_atm < shard->queue_deadline_cap) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700280 is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700281 } else {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700282 timer->heap_index = INVALID_HEAP_INDEX;
Craig Tiller1a769262017-03-27 12:52:59 -0700283 list_join(&shard->list, timer);
Craig Tiller0b4c5312017-03-24 15:23:57 -0700284 }
Craig Tiller84f75d42017-05-03 13:06:35 -0700285 if (GRPC_TRACER_ON(grpc_timer_trace)) {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700286 gpr_log(GPR_DEBUG, " .. add to shard %d with queue_deadline_cap=%" PRIdPTR
287 " => is_first_timer=%s",
288 (int)(shard - g_shards), shard->queue_deadline_cap,
289 is_first_timer ? "true" : "false");
Craig Tillera82950e2015-09-22 12:33:20 -0700290 }
291 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800292
293 /* Deadline may have decreased, we need to adjust the master queue. Note
294 that there is a potential racy unlocked region here. There could be a
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700295 reordering of multiple grpc_timer_init calls, at this point, but the < test
ctiller3bf466f2014-12-19 16:21:57 -0800296 below should ensure that we err on the side of caution. There could
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700297 also be a race with grpc_timer_check, which might beat us to the lock. In
298 that case, it is possible that the timer that we added will have already
ctiller3bf466f2014-12-19 16:21:57 -0800299 run by the time we hold the lock, but that too is a safe error.
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700300 Finally, it's possible that the grpc_timer_check that intervened failed to
301 trigger the new timer because the min_deadline hadn't yet been reduced.
302 In that case, the timer will simply have to wait for the next
303 grpc_timer_check. */
304 if (is_first_timer) {
Craig Tiller185f6c92017-03-17 08:33:19 -0700305 gpr_mu_lock(&g_shared_mutables.mu);
Craig Tiller84f75d42017-05-03 13:06:35 -0700306 if (GRPC_TRACER_ON(grpc_timer_trace)) {
Craig Tiller18e74652017-03-24 15:44:27 -0700307 gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR,
308 shard->min_deadline);
309 }
Craig Tiller84f75d42017-05-03 13:06:35 -0700310 if (deadline_atm < shard->min_deadline) {
Craig Tiller7b2dd932017-03-16 16:25:12 -0700311 gpr_atm old_min_deadline = g_shard_queue[0]->min_deadline;
Craig Tiller84f75d42017-05-03 13:06:35 -0700312 shard->min_deadline = deadline_atm;
Craig Tillera82950e2015-09-22 12:33:20 -0700313 note_deadline_change(shard);
Craig Tiller84f75d42017-05-03 13:06:35 -0700314 if (shard->shard_queue_index == 0 && deadline_atm < old_min_deadline) {
315 gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline_atm);
Craig Tillera82950e2015-09-22 12:33:20 -0700316 grpc_kick_poller();
317 }
ctiller3bf466f2014-12-19 16:21:57 -0800318 }
Craig Tiller185f6c92017-03-17 08:33:19 -0700319 gpr_mu_unlock(&g_shared_mutables.mu);
Craig Tillera82950e2015-09-22 12:33:20 -0700320 }
ctiller3bf466f2014-12-19 16:21:57 -0800321}
322
Craig Tiller185f6c92017-03-17 08:33:19 -0700323void grpc_timer_consume_kick(void) {
324 /* force re-evaluation of last seeen min */
325 gpr_tls_set(&g_last_seen_min_timer, 0);
326}
327
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700328void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
Craig Tiller185f6c92017-03-17 08:33:19 -0700329 if (!g_shared_mutables.initialized) {
Craig Tiller82c63eb2016-05-10 15:28:01 -0700330 /* must have already been cancelled, also the shard mutex is invalid */
331 return;
332 }
333
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700334 timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
Craig Tillera82950e2015-09-22 12:33:20 -0700335 gpr_mu_lock(&shard->mu);
Craig Tiller84f75d42017-05-03 13:06:35 -0700336 if (GRPC_TRACER_ON(grpc_timer_trace)) {
Craig Tillera0bfee92017-03-27 09:21:11 -0700337 gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
338 timer->pending ? "true" : "false");
339 }
Craig Tillerc84886b2017-02-16 13:10:38 -0800340 if (timer->pending) {
ncteisen274bbbe2017-06-08 14:57:11 -0700341 GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
Craig Tillerc84886b2017-02-16 13:10:38 -0800342 timer->pending = false;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700343 if (timer->heap_index == INVALID_HEAP_INDEX) {
344 list_remove(timer);
Craig Tillera82950e2015-09-22 12:33:20 -0700345 } else {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700346 grpc_timer_heap_remove(&shard->heap, timer);
ctiller3bf466f2014-12-19 16:21:57 -0800347 }
Craig Tillera82950e2015-09-22 12:33:20 -0700348 }
349 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800350}
351
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700352/* Rebalances the timer shard by computing a new 'queue_deadline_cap' and moving
353 all relevant timers in shard->list (i.e timers with deadlines earlier than
354 'queue_deadline_cap') into into shard->heap.
355 Returns 'true' if shard->heap has atleast ONE element
ctiller3bf466f2014-12-19 16:21:57 -0800356 REQUIRES: shard->mu locked */
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700357static int refill_heap(timer_shard *shard, gpr_atm now) {
ctiller3bf466f2014-12-19 16:21:57 -0800358 /* Compute the new queue window width and bound by the limits: */
Craig Tillera82950e2015-09-22 12:33:20 -0700359 double computed_deadline_delta =
360 grpc_time_averaged_stats_update_average(&shard->stats) *
361 ADD_DEADLINE_SCALE;
362 double deadline_delta =
363 GPR_CLAMP(computed_deadline_delta, MIN_QUEUE_WINDOW_DURATION,
364 MAX_QUEUE_WINDOW_DURATION);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700365 grpc_timer *timer, *next;
ctiller3bf466f2014-12-19 16:21:57 -0800366
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700367 /* Compute the new cap and put all timers under it into the queue: */
Craig Tillerbd0af4f2017-03-20 08:33:02 -0700368 shard->queue_deadline_cap =
369 saturating_add(GPR_MAX(now, shard->queue_deadline_cap),
370 (gpr_atm)(deadline_delta * 1000.0));
Craig Tiller0b4c5312017-03-24 15:23:57 -0700371
Craig Tiller84f75d42017-05-03 13:06:35 -0700372 if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700373 gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
374 (int)(shard - g_shards), shard->queue_deadline_cap);
375 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700376 for (timer = shard->list.next; timer != &shard->list; timer = next) {
377 next = timer->next;
ctiller3bf466f2014-12-19 16:21:57 -0800378
Craig Tiller7b2dd932017-03-16 16:25:12 -0700379 if (timer->deadline < shard->queue_deadline_cap) {
Craig Tiller84f75d42017-05-03 13:06:35 -0700380 if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
Craig Tiller18e74652017-03-24 15:44:27 -0700381 gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap",
382 timer->deadline);
383 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700384 list_remove(timer);
385 grpc_timer_heap_add(&shard->heap, timer);
ctiller3bf466f2014-12-19 16:21:57 -0800386 }
Craig Tillera82950e2015-09-22 12:33:20 -0700387 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700388 return !grpc_timer_heap_is_empty(&shard->heap);
ctiller3bf466f2014-12-19 16:21:57 -0800389}
390
David G. Quintasdfff4de2016-06-07 19:57:33 -0700391/* This pops the next non-cancelled timer with deadline <= now from the
392 queue, or returns NULL if there isn't one.
ctiller3bf466f2014-12-19 16:21:57 -0800393 REQUIRES: shard->mu locked */
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700394static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) {
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700395 grpc_timer *timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700396 for (;;) {
Craig Tiller84f75d42017-05-03 13:06:35 -0700397 if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700398 gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s",
399 (int)(shard - g_shards),
400 grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
401 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700402 if (grpc_timer_heap_is_empty(&shard->heap)) {
Craig Tiller7b2dd932017-03-16 16:25:12 -0700403 if (now < shard->queue_deadline_cap) return NULL;
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700404 if (!refill_heap(shard, now)) return NULL;
ctiller3bf466f2014-12-19 16:21:57 -0800405 }
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700406 timer = grpc_timer_heap_top(&shard->heap);
Craig Tiller84f75d42017-05-03 13:06:35 -0700407 if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
Craig Tiller18e74652017-03-24 15:44:27 -0700408 gpr_log(GPR_DEBUG,
409 " .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR,
410 timer->deadline, now);
411 }
Craig Tiller0b4c5312017-03-24 15:23:57 -0700412 if (timer->deadline > now) return NULL;
Craig Tiller84f75d42017-05-03 13:06:35 -0700413 if (GRPC_TRACER_ON(grpc_timer_trace)) {
Craig Tillera0bfee92017-03-27 09:21:11 -0700414 gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late", timer,
415 now - timer->deadline);
416 }
Craig Tillerc84886b2017-02-16 13:10:38 -0800417 timer->pending = false;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700418 grpc_timer_heap_pop(&shard->heap);
419 return timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700420 }
ctiller3bf466f2014-12-19 16:21:57 -0800421}
422
423/* REQUIRES: shard->mu unlocked */
Sree Kuchibhotlabd3ea982017-07-07 00:31:11 -0700424static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard,
Craig Tiller7b2dd932017-03-16 16:25:12 -0700425 gpr_atm now, gpr_atm *new_min_deadline,
Craig Tillerc027e772016-05-03 16:27:00 -0700426 grpc_error *error) {
ctiller3bf466f2014-12-19 16:21:57 -0800427 size_t n = 0;
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700428 grpc_timer *timer;
Craig Tillera82950e2015-09-22 12:33:20 -0700429 gpr_mu_lock(&shard->mu);
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700430 while ((timer = pop_one(shard, now))) {
ncteisen274bbbe2017-06-08 14:57:11 -0700431 GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
Craig Tillera82950e2015-09-22 12:33:20 -0700432 n++;
433 }
434 *new_min_deadline = compute_min_deadline(shard);
435 gpr_mu_unlock(&shard->mu);
ctiller3bf466f2014-12-19 16:21:57 -0800436 return n;
437}
438
Craig Tillerb2ef1cf2017-05-30 09:25:48 -0700439static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx,
440 gpr_atm now,
441 gpr_atm *next,
442 grpc_error *error) {
443 grpc_timer_check_result result = GRPC_TIMERS_NOT_CHECKED;
ctiller3bf466f2014-12-19 16:21:57 -0800444
Craig Tiller1a769262017-03-27 12:52:59 -0700445 gpr_atm min_timer = gpr_atm_no_barrier_load(&g_shared_mutables.min_timer);
Craig Tiller185f6c92017-03-17 08:33:19 -0700446 gpr_tls_set(&g_last_seen_min_timer, min_timer);
447 if (now < min_timer) {
448 if (next != NULL) *next = GPR_MIN(*next, min_timer);
Craig Tillercdb41dc2017-06-02 23:04:37 +0000449 return GRPC_TIMERS_CHECKED_AND_EMPTY;
Craig Tiller185f6c92017-03-17 08:33:19 -0700450 }
451
452 if (gpr_spinlock_trylock(&g_shared_mutables.checker_mu)) {
453 gpr_mu_lock(&g_shared_mutables.mu);
Craig Tillerb2ef1cf2017-05-30 09:25:48 -0700454 result = GRPC_TIMERS_CHECKED_AND_EMPTY;
ctiller3bf466f2014-12-19 16:21:57 -0800455
Craig Tiller84f75d42017-05-03 13:06:35 -0700456 if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700457 gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRIdPTR,
458 (int)(g_shard_queue[0] - g_shards),
459 g_shard_queue[0]->min_deadline);
460 }
461
Craig Tiller041bf642017-03-27 09:26:07 -0700462 while (g_shard_queue[0]->min_deadline < now ||
463 (now != GPR_ATM_MAX && g_shard_queue[0]->min_deadline == now)) {
Craig Tiller7b2dd932017-03-16 16:25:12 -0700464 gpr_atm new_min_deadline;
ctiller3bf466f2014-12-19 16:21:57 -0800465
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700466 /* For efficiency, we pop as many available timers as we can from the
467 shard. This may violate perfect timer deadline ordering, but that
Craig Tillera82950e2015-09-22 12:33:20 -0700468 shouldn't be a big deal because we don't make ordering guarantees. */
Craig Tillerb2ef1cf2017-05-30 09:25:48 -0700469 if (pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline,
470 error) > 0) {
471 result = GRPC_TIMERS_FIRED;
472 }
ctiller3bf466f2014-12-19 16:21:57 -0800473
Craig Tiller84f75d42017-05-03 13:06:35 -0700474 if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
Craig Tillerb2ef1cf2017-05-30 09:25:48 -0700475 gpr_log(GPR_DEBUG,
476 " .. result --> %d"
477 ", shard[%d]->min_deadline %" PRIdPTR " --> %" PRIdPTR
478 ", now=%" PRIdPTR,
479 result, (int)(g_shard_queue[0] - g_shards),
Craig Tillera0bfee92017-03-27 09:21:11 -0700480 g_shard_queue[0]->min_deadline, new_min_deadline, now);
Craig Tiller0b4c5312017-03-24 15:23:57 -0700481 }
482
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700483 /* An grpc_timer_init() on the shard could intervene here, adding a new
484 timer that is earlier than new_min_deadline. However,
485 grpc_timer_init() will block on the master_lock before it can call
486 set_min_deadline, so this one will complete first and then the Addtimer
Craig Tillera82950e2015-09-22 12:33:20 -0700487 will reduce the min_deadline (perhaps unnecessarily). */
488 g_shard_queue[0]->min_deadline = new_min_deadline;
489 note_deadline_change(g_shard_queue[0]);
ctiller3bf466f2014-12-19 16:21:57 -0800490 }
491
Craig Tillera82950e2015-09-22 12:33:20 -0700492 if (next) {
Craig Tiller7b2dd932017-03-16 16:25:12 -0700493 *next = GPR_MIN(*next, g_shard_queue[0]->min_deadline);
Craig Tillera82950e2015-09-22 12:33:20 -0700494 }
495
Craig Tiller185f6c92017-03-17 08:33:19 -0700496 gpr_atm_no_barrier_store(&g_shared_mutables.min_timer,
497 g_shard_queue[0]->min_deadline);
498 gpr_mu_unlock(&g_shared_mutables.mu);
499 gpr_spinlock_unlock(&g_shared_mutables.checker_mu);
Craig Tillera82950e2015-09-22 12:33:20 -0700500 }
501
Craig Tillerf707d622016-05-06 14:26:12 -0700502 GRPC_ERROR_UNREF(error);
Craig Tillerc027e772016-05-03 16:27:00 -0700503
Craig Tillerb2ef1cf2017-05-30 09:25:48 -0700504 return result;
ctiller3bf466f2014-12-19 16:21:57 -0800505}
506
Craig Tillerb2ef1cf2017-05-30 09:25:48 -0700507grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
508 gpr_timespec now, gpr_timespec *next) {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700509 // prelude
Craig Tillera82950e2015-09-22 12:33:20 -0700510 GPR_ASSERT(now.clock_type == g_clock_type);
Craig Tiller185f6c92017-03-17 08:33:19 -0700511 gpr_atm now_atm = timespec_to_atm_round_down(now);
Craig Tiller1a769262017-03-27 12:52:59 -0700512
513 /* fetch from a thread-local first: this avoids contention on a globally
514 mutable cacheline in the common case */
515 gpr_atm min_timer = gpr_tls_get(&g_last_seen_min_timer);
516 if (now_atm < min_timer) {
517 if (next != NULL) {
518 *next =
519 atm_to_timespec(GPR_MIN(timespec_to_atm_round_up(*next), min_timer));
520 }
Craig Tiller84f75d42017-05-03 13:06:35 -0700521 if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
Craig Tiller1a769262017-03-27 12:52:59 -0700522 gpr_log(GPR_DEBUG,
Craig Tiller97d40112017-03-29 16:46:13 -0700523 "TIMER CHECK SKIP: now_atm=%" PRIdPTR " min_timer=%" PRIdPTR,
Craig Tiller1a769262017-03-27 12:52:59 -0700524 now_atm, min_timer);
525 }
Craig Tillerb2ef1cf2017-05-30 09:25:48 -0700526 return GRPC_TIMERS_CHECKED_AND_EMPTY;
Craig Tiller1a769262017-03-27 12:52:59 -0700527 }
528
Craig Tiller18e15062017-03-20 09:35:39 -0700529 grpc_error *shutdown_error =
Craig Tillerc027e772016-05-03 16:27:00 -0700530 gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0
531 ? GRPC_ERROR_NONE
Craig Tiller0b4c5312017-03-24 15:23:57 -0700532 : GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system");
Craig Tiller1a769262017-03-27 12:52:59 -0700533
Craig Tiller0b4c5312017-03-24 15:23:57 -0700534 // tracing
Craig Tiller84f75d42017-05-03 13:06:35 -0700535 if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
Craig Tiller2a1949e2017-03-21 09:29:12 -0700536 char *next_str;
537 if (next == NULL) {
538 next_str = gpr_strdup("NULL");
539 } else {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700540 gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec,
Craig Tiller2a1949e2017-03-21 09:29:12 -0700541 next->tv_nsec, timespec_to_atm_round_down(*next));
542 }
Craig Tiller0b4c5312017-03-24 15:23:57 -0700543 gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRId64 ".%09d [%" PRIdPTR
Craig Tillerafb168b2017-03-21 12:48:57 -0700544 "] next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR,
545 now.tv_sec, now.tv_nsec, now_atm, next_str,
546 gpr_tls_get(&g_last_seen_min_timer),
547 gpr_atm_no_barrier_load(&g_shared_mutables.min_timer));
Craig Tiller2a1949e2017-03-21 09:29:12 -0700548 gpr_free(next_str);
549 }
Craig Tiller0b4c5312017-03-24 15:23:57 -0700550 // actual code
Craig Tillerb2ef1cf2017-05-30 09:25:48 -0700551 grpc_timer_check_result r;
Craig Tiller2a1949e2017-03-21 09:29:12 -0700552 gpr_atm next_atm;
Craig Tiller18e15062017-03-20 09:35:39 -0700553 if (next == NULL) {
554 r = run_some_expired_timers(exec_ctx, now_atm, NULL, shutdown_error);
555 } else {
Craig Tiller2a1949e2017-03-21 09:29:12 -0700556 next_atm = timespec_to_atm_round_down(*next);
Craig Tiller18e15062017-03-20 09:35:39 -0700557 r = run_some_expired_timers(exec_ctx, now_atm, &next_atm, shutdown_error);
558 *next = atm_to_timespec(next_atm);
559 }
Craig Tiller0b4c5312017-03-24 15:23:57 -0700560 // tracing
Craig Tiller84f75d42017-05-03 13:06:35 -0700561 if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
Craig Tiller2a1949e2017-03-21 09:29:12 -0700562 char *next_str;
563 if (next == NULL) {
564 next_str = gpr_strdup("NULL");
565 } else {
Craig Tiller0b4c5312017-03-24 15:23:57 -0700566 gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec,
Craig Tiller2a1949e2017-03-21 09:29:12 -0700567 next->tv_nsec, next_atm);
568 }
Craig Tiller27cb3232017-06-02 16:05:25 -0700569 gpr_log(GPR_DEBUG, "TIMER CHECK END: r=%d; next=%s", r, next_str);
Craig Tiller2a1949e2017-03-21 09:29:12 -0700570 gpr_free(next_str);
571 }
Craig Tillerb2ef1cf2017-05-30 09:25:48 -0700572 return r;
ctiller3bf466f2014-12-19 16:21:57 -0800573}
murgatroid999030c812016-09-16 13:25:08 -0700574
575#endif /* GRPC_TIMER_USE_GENERIC */