ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 1 | /* |
| 2 | * |
Craig Tiller | 6169d5f | 2016-03-31 07:46:18 -0700 | [diff] [blame] | 3 | * Copyright 2015, Google Inc. |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 4 | * All rights reserved. |
| 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions are |
| 8 | * met: |
| 9 | * |
| 10 | * * Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * * Redistributions in binary form must reproduce the above |
| 13 | * copyright notice, this list of conditions and the following disclaimer |
| 14 | * in the documentation and/or other materials provided with the |
| 15 | * distribution. |
| 16 | * * Neither the name of Google Inc. nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived from |
| 18 | * this software without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | * |
| 32 | */ |
| 33 | |
murgatroid99 | 9030c81 | 2016-09-16 13:25:08 -0700 | [diff] [blame] | 34 | #include "src/core/lib/iomgr/port.h" |
| 35 | |
| 36 | #ifdef GRPC_TIMER_USE_GENERIC |
| 37 | |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 38 | #include "src/core/lib/iomgr/timer.h" |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 39 | |
Craig Tiller | 6a7626c | 2015-07-19 22:21:41 -0700 | [diff] [blame] | 40 | #include <grpc/support/log.h> |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 41 | #include <grpc/support/sync.h> |
| 42 | #include <grpc/support/useful.h> |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 43 | #include "src/core/lib/iomgr/time_averaged_stats.h" |
| 44 | #include "src/core/lib/iomgr/timer_heap.h" |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 45 | |
| 46 | #define INVALID_HEAP_INDEX 0xffffffffu |
| 47 | |
| 48 | #define LOG2_NUM_SHARDS 5 |
| 49 | #define NUM_SHARDS (1 << LOG2_NUM_SHARDS) |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 50 | #define ADD_DEADLINE_SCALE 0.33 |
| 51 | #define MIN_QUEUE_WINDOW_DURATION 0.01 |
| 52 | #define MAX_QUEUE_WINDOW_DURATION 1 |
| 53 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 54 | typedef struct { |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 55 | gpr_mu mu; |
| 56 | grpc_time_averaged_stats stats; |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 57 | /* All and only timers with deadlines <= this will be in the heap. */ |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 58 | gpr_timespec queue_deadline_cap; |
| 59 | gpr_timespec min_deadline; |
| 60 | /* Index in the g_shard_queue */ |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 61 | uint32_t shard_queue_index; |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 62 | /* This holds all timers with deadlines < queue_deadline_cap. Timers in this |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 63 | list have the top bit of their deadline set to 0. */ |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 64 | grpc_timer_heap heap; |
| 65 | /* This holds timers whose deadline is >= queue_deadline_cap. */ |
| 66 | grpc_timer list; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 67 | } shard_type; |
| 68 | |
| 69 | /* Protects g_shard_queue */ |
| 70 | static gpr_mu g_mu; |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 71 | /* Allow only one run_some_expired_timers at once */ |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 72 | static gpr_mu g_checker_mu; |
Craig Tiller | 6a7626c | 2015-07-19 22:21:41 -0700 | [diff] [blame] | 73 | static gpr_clock_type g_clock_type; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 74 | static shard_type g_shards[NUM_SHARDS]; |
| 75 | /* Protected by g_mu */ |
| 76 | static shard_type *g_shard_queue[NUM_SHARDS]; |
Craig Tiller | 317f68e | 2016-04-13 20:27:24 -0700 | [diff] [blame] | 77 | static bool g_initialized = false; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 78 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 79 | static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now, |
Craig Tiller | c027e77 | 2016-05-03 16:27:00 -0700 | [diff] [blame] | 80 | gpr_timespec *next, grpc_error *error); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 81 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 82 | static gpr_timespec compute_min_deadline(shard_type *shard) { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 83 | return grpc_timer_heap_is_empty(&shard->heap) |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 84 | ? shard->queue_deadline_cap |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 85 | : grpc_timer_heap_top(&shard->heap)->deadline; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 86 | } |
| 87 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 88 | void grpc_timer_list_init(gpr_timespec now) { |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 89 | uint32_t i; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 90 | |
Craig Tiller | 3f72df9 | 2016-04-13 20:26:07 -0700 | [diff] [blame] | 91 | g_initialized = true; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 92 | gpr_mu_init(&g_mu); |
| 93 | gpr_mu_init(&g_checker_mu); |
Craig Tiller | 6a7626c | 2015-07-19 22:21:41 -0700 | [diff] [blame] | 94 | g_clock_type = now.clock_type; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 95 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 96 | for (i = 0; i < NUM_SHARDS; i++) { |
| 97 | shard_type *shard = &g_shards[i]; |
| 98 | gpr_mu_init(&shard->mu); |
| 99 | grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1, |
| 100 | 0.5); |
| 101 | shard->queue_deadline_cap = now; |
| 102 | shard->shard_queue_index = i; |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 103 | grpc_timer_heap_init(&shard->heap); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 104 | shard->list.next = shard->list.prev = &shard->list; |
| 105 | shard->min_deadline = compute_min_deadline(shard); |
| 106 | g_shard_queue[i] = shard; |
| 107 | } |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 108 | } |
| 109 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 110 | void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) { |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 111 | int i; |
Craig Tiller | 25dc539 | 2016-05-10 10:53:55 -0700 | [diff] [blame] | 112 | run_some_expired_timers(exec_ctx, gpr_inf_future(g_clock_type), NULL, |
| 113 | GRPC_ERROR_CREATE("Timer list shutdown")); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 114 | for (i = 0; i < NUM_SHARDS; i++) { |
| 115 | shard_type *shard = &g_shards[i]; |
| 116 | gpr_mu_destroy(&shard->mu); |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 117 | grpc_timer_heap_destroy(&shard->heap); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 118 | } |
| 119 | gpr_mu_destroy(&g_mu); |
| 120 | gpr_mu_destroy(&g_checker_mu); |
Craig Tiller | 3f72df9 | 2016-04-13 20:26:07 -0700 | [diff] [blame] | 121 | g_initialized = false; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | /* This is a cheap, but good enough, pointer hash for sharding the tasks: */ |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 125 | static size_t shard_idx(const grpc_timer *info) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 126 | size_t x = (size_t)info; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 127 | return ((x >> 4) ^ (x >> 9) ^ (x >> 14)) & (NUM_SHARDS - 1); |
| 128 | } |
| 129 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 130 | static double ts_to_dbl(gpr_timespec ts) { |
| 131 | return (double)ts.tv_sec + 1e-9 * ts.tv_nsec; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 132 | } |
| 133 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 134 | static gpr_timespec dbl_to_ts(double d) { |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 135 | gpr_timespec ts; |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 136 | ts.tv_sec = (int64_t)d; |
| 137 | ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec)); |
Craig Tiller | 143e7bf | 2015-07-13 08:41:49 -0700 | [diff] [blame] | 138 | ts.clock_type = GPR_TIMESPAN; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 139 | return ts; |
| 140 | } |
| 141 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 142 | static void list_join(grpc_timer *head, grpc_timer *timer) { |
| 143 | timer->next = head; |
| 144 | timer->prev = head->prev; |
| 145 | timer->next->prev = timer->prev->next = timer; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 146 | } |
| 147 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 148 | static void list_remove(grpc_timer *timer) { |
| 149 | timer->next->prev = timer->prev; |
| 150 | timer->prev->next = timer->next; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 151 | } |
| 152 | |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 153 | static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) { |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 154 | shard_type *temp; |
| 155 | temp = g_shard_queue[first_shard_queue_index]; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 156 | g_shard_queue[first_shard_queue_index] = |
| 157 | g_shard_queue[first_shard_queue_index + 1]; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 158 | g_shard_queue[first_shard_queue_index + 1] = temp; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 159 | g_shard_queue[first_shard_queue_index]->shard_queue_index = |
| 160 | first_shard_queue_index; |
| 161 | g_shard_queue[first_shard_queue_index + 1]->shard_queue_index = |
| 162 | first_shard_queue_index + 1; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 163 | } |
| 164 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 165 | static void note_deadline_change(shard_type *shard) { |
| 166 | while (shard->shard_queue_index > 0 && |
| 167 | gpr_time_cmp( |
| 168 | shard->min_deadline, |
| 169 | g_shard_queue[shard->shard_queue_index - 1]->min_deadline) < 0) { |
| 170 | swap_adjacent_shards_in_queue(shard->shard_queue_index - 1); |
| 171 | } |
| 172 | while (shard->shard_queue_index < NUM_SHARDS - 1 && |
| 173 | gpr_time_cmp( |
| 174 | shard->min_deadline, |
| 175 | g_shard_queue[shard->shard_queue_index + 1]->min_deadline) > 0) { |
| 176 | swap_adjacent_shards_in_queue(shard->shard_queue_index); |
| 177 | } |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 178 | } |
| 179 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 180 | void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, |
Masood Malekghassemi | b5b4372 | 2017-01-05 15:07:26 -0800 | [diff] [blame] | 181 | gpr_timespec deadline, grpc_closure *closure, |
| 182 | gpr_timespec now) { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 183 | int is_first_timer = 0; |
| 184 | shard_type *shard = &g_shards[shard_idx(timer)]; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 185 | GPR_ASSERT(deadline.clock_type == g_clock_type); |
| 186 | GPR_ASSERT(now.clock_type == g_clock_type); |
Masood Malekghassemi | b5b4372 | 2017-01-05 15:07:26 -0800 | [diff] [blame] | 187 | timer->closure = closure; |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 188 | timer->deadline = deadline; |
| 189 | timer->triggered = 0; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 190 | |
Craig Tiller | 3f72df9 | 2016-04-13 20:26:07 -0700 | [diff] [blame] | 191 | if (!g_initialized) { |
Craig Tiller | f3596c5 | 2016-04-14 08:08:38 -0700 | [diff] [blame] | 192 | timer->triggered = 1; |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 193 | grpc_closure_sched( |
Masood Malekghassemi | b5b4372 | 2017-01-05 15:07:26 -0800 | [diff] [blame] | 194 | exec_ctx, timer->closure, |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 195 | GRPC_ERROR_CREATE("Attempt to create timer before initialization")); |
Craig Tiller | 3f72df9 | 2016-04-13 20:26:07 -0700 | [diff] [blame] | 196 | return; |
| 197 | } |
| 198 | |
| 199 | if (gpr_time_cmp(deadline, now) <= 0) { |
Craig Tiller | f3596c5 | 2016-04-14 08:08:38 -0700 | [diff] [blame] | 200 | timer->triggered = 1; |
Masood Malekghassemi | b5b4372 | 2017-01-05 15:07:26 -0800 | [diff] [blame] | 201 | grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_NONE); |
Craig Tiller | 3f72df9 | 2016-04-13 20:26:07 -0700 | [diff] [blame] | 202 | return; |
| 203 | } |
| 204 | |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 205 | /* TODO(ctiller): check deadline expired */ |
| 206 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 207 | gpr_mu_lock(&shard->mu); |
| 208 | grpc_time_averaged_stats_add_sample(&shard->stats, |
| 209 | ts_to_dbl(gpr_time_sub(deadline, now))); |
| 210 | if (gpr_time_cmp(deadline, shard->queue_deadline_cap) < 0) { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 211 | is_first_timer = grpc_timer_heap_add(&shard->heap, timer); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 212 | } else { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 213 | timer->heap_index = INVALID_HEAP_INDEX; |
| 214 | list_join(&shard->list, timer); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 215 | } |
| 216 | gpr_mu_unlock(&shard->mu); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 217 | |
| 218 | /* Deadline may have decreased, we need to adjust the master queue. Note |
| 219 | that there is a potential racy unlocked region here. There could be a |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 220 | reordering of multiple grpc_timer_init calls, at this point, but the < test |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 221 | below should ensure that we err on the side of caution. There could |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 222 | also be a race with grpc_timer_check, which might beat us to the lock. In |
| 223 | that case, it is possible that the timer that we added will have already |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 224 | run by the time we hold the lock, but that too is a safe error. |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 225 | Finally, it's possible that the grpc_timer_check that intervened failed to |
| 226 | trigger the new timer because the min_deadline hadn't yet been reduced. |
| 227 | In that case, the timer will simply have to wait for the next |
| 228 | grpc_timer_check. */ |
| 229 | if (is_first_timer) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 230 | gpr_mu_lock(&g_mu); |
| 231 | if (gpr_time_cmp(deadline, shard->min_deadline) < 0) { |
| 232 | gpr_timespec old_min_deadline = g_shard_queue[0]->min_deadline; |
| 233 | shard->min_deadline = deadline; |
| 234 | note_deadline_change(shard); |
| 235 | if (shard->shard_queue_index == 0 && |
| 236 | gpr_time_cmp(deadline, old_min_deadline) < 0) { |
| 237 | grpc_kick_poller(); |
| 238 | } |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 239 | } |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 240 | gpr_mu_unlock(&g_mu); |
| 241 | } |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 242 | } |
| 243 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 244 | void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { |
Craig Tiller | 82c63eb | 2016-05-10 15:28:01 -0700 | [diff] [blame] | 245 | if (!g_initialized) { |
| 246 | /* must have already been cancelled, also the shard mutex is invalid */ |
| 247 | return; |
| 248 | } |
| 249 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 250 | shard_type *shard = &g_shards[shard_idx(timer)]; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 251 | gpr_mu_lock(&shard->mu); |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 252 | if (!timer->triggered) { |
Masood Malekghassemi | b5b4372 | 2017-01-05 15:07:26 -0800 | [diff] [blame] | 253 | grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 254 | timer->triggered = 1; |
| 255 | if (timer->heap_index == INVALID_HEAP_INDEX) { |
| 256 | list_remove(timer); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 257 | } else { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 258 | grpc_timer_heap_remove(&shard->heap, timer); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 259 | } |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 260 | } |
| 261 | gpr_mu_unlock(&shard->mu); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 262 | } |
| 263 | |
| 264 | /* This is called when the queue is empty and "now" has reached the |
| 265 | queue_deadline_cap. We compute a new queue deadline and then scan the map |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 266 | for timers that fall at or under it. Returns true if the queue is no |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 267 | longer empty. |
| 268 | REQUIRES: shard->mu locked */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 269 | static int refill_queue(shard_type *shard, gpr_timespec now) { |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 270 | /* Compute the new queue window width and bound by the limits: */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 271 | double computed_deadline_delta = |
| 272 | grpc_time_averaged_stats_update_average(&shard->stats) * |
| 273 | ADD_DEADLINE_SCALE; |
| 274 | double deadline_delta = |
| 275 | GPR_CLAMP(computed_deadline_delta, MIN_QUEUE_WINDOW_DURATION, |
| 276 | MAX_QUEUE_WINDOW_DURATION); |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 277 | grpc_timer *timer, *next; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 278 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 279 | /* Compute the new cap and put all timers under it into the queue: */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 280 | shard->queue_deadline_cap = gpr_time_add( |
| 281 | gpr_time_max(now, shard->queue_deadline_cap), dbl_to_ts(deadline_delta)); |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 282 | for (timer = shard->list.next; timer != &shard->list; timer = next) { |
| 283 | next = timer->next; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 284 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 285 | if (gpr_time_cmp(timer->deadline, shard->queue_deadline_cap) < 0) { |
| 286 | list_remove(timer); |
| 287 | grpc_timer_heap_add(&shard->heap, timer); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 288 | } |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 289 | } |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 290 | return !grpc_timer_heap_is_empty(&shard->heap); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 291 | } |
| 292 | |
David G. Quintas | dfff4de | 2016-06-07 19:57:33 -0700 | [diff] [blame] | 293 | /* This pops the next non-cancelled timer with deadline <= now from the |
| 294 | queue, or returns NULL if there isn't one. |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 295 | REQUIRES: shard->mu locked */ |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 296 | static grpc_timer *pop_one(shard_type *shard, gpr_timespec now) { |
| 297 | grpc_timer *timer; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 298 | for (;;) { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 299 | if (grpc_timer_heap_is_empty(&shard->heap)) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 300 | if (gpr_time_cmp(now, shard->queue_deadline_cap) < 0) return NULL; |
| 301 | if (!refill_queue(shard, now)) return NULL; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 302 | } |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 303 | timer = grpc_timer_heap_top(&shard->heap); |
| 304 | if (gpr_time_cmp(timer->deadline, now) > 0) return NULL; |
| 305 | timer->triggered = 1; |
| 306 | grpc_timer_heap_pop(&shard->heap); |
| 307 | return timer; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 308 | } |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 309 | } |
| 310 | |
| 311 | /* REQUIRES: shard->mu unlocked */ |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 312 | static size_t pop_timers(grpc_exec_ctx *exec_ctx, shard_type *shard, |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 313 | gpr_timespec now, gpr_timespec *new_min_deadline, |
Craig Tiller | c027e77 | 2016-05-03 16:27:00 -0700 | [diff] [blame] | 314 | grpc_error *error) { |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 315 | size_t n = 0; |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 316 | grpc_timer *timer; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 317 | gpr_mu_lock(&shard->mu); |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 318 | while ((timer = pop_one(shard, now))) { |
Masood Malekghassemi | b5b4372 | 2017-01-05 15:07:26 -0800 | [diff] [blame] | 319 | grpc_closure_sched(exec_ctx, timer->closure, GRPC_ERROR_REF(error)); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 320 | n++; |
| 321 | } |
| 322 | *new_min_deadline = compute_min_deadline(shard); |
| 323 | gpr_mu_unlock(&shard->mu); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 324 | return n; |
| 325 | } |
| 326 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 327 | static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_timespec now, |
Craig Tiller | c027e77 | 2016-05-03 16:27:00 -0700 | [diff] [blame] | 328 | gpr_timespec *next, grpc_error *error) { |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 329 | size_t n = 0; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 330 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 331 | /* TODO(ctiller): verify that there are any timers (atomically) here */ |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 332 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 333 | if (gpr_mu_trylock(&g_checker_mu)) { |
| 334 | gpr_mu_lock(&g_mu); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 335 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 336 | while (gpr_time_cmp(g_shard_queue[0]->min_deadline, now) < 0) { |
| 337 | gpr_timespec new_min_deadline; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 338 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 339 | /* For efficiency, we pop as many available timers as we can from the |
| 340 | shard. This may violate perfect timer deadline ordering, but that |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 341 | shouldn't be a big deal because we don't make ordering guarantees. */ |
Craig Tiller | c027e77 | 2016-05-03 16:27:00 -0700 | [diff] [blame] | 342 | n += |
| 343 | pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline, error); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 344 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 345 | /* An grpc_timer_init() on the shard could intervene here, adding a new |
| 346 | timer that is earlier than new_min_deadline. However, |
| 347 | grpc_timer_init() will block on the master_lock before it can call |
| 348 | set_min_deadline, so this one will complete first and then the Addtimer |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 349 | will reduce the min_deadline (perhaps unnecessarily). */ |
| 350 | g_shard_queue[0]->min_deadline = new_min_deadline; |
| 351 | note_deadline_change(g_shard_queue[0]); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 352 | } |
| 353 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 354 | if (next) { |
| 355 | *next = gpr_time_min(*next, g_shard_queue[0]->min_deadline); |
| 356 | } |
| 357 | |
| 358 | gpr_mu_unlock(&g_mu); |
| 359 | gpr_mu_unlock(&g_checker_mu); |
Craig Tiller | 9fb89ea | 2016-03-11 20:12:45 -0800 | [diff] [blame] | 360 | } else if (next != NULL) { |
Craig Tiller | 822f1d7 | 2016-03-12 06:54:47 -0800 | [diff] [blame] | 361 | /* TODO(ctiller): this forces calling code to do an short poll, and |
| 362 | then retry the timer check (because this time through the timer list was |
| 363 | contended). |
| 364 | |
| 365 | We could reduce the cost here dramatically by keeping a count of how many |
| 366 | currently active pollers got through the uncontended case above |
| 367 | successfully, and waking up other pollers IFF that count drops to zero. |
| 368 | |
| 369 | Once that count is in place, this entire else branch could disappear. */ |
Craig Tiller | 9fb89ea | 2016-03-11 20:12:45 -0800 | [diff] [blame] | 370 | *next = gpr_time_min( |
Craig Tiller | 822f1d7 | 2016-03-12 06:54:47 -0800 | [diff] [blame] | 371 | *next, gpr_time_add(now, gpr_time_from_millis(1, GPR_TIMESPAN))); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 372 | } |
| 373 | |
Craig Tiller | f707d62 | 2016-05-06 14:26:12 -0700 | [diff] [blame] | 374 | GRPC_ERROR_UNREF(error); |
Craig Tiller | c027e77 | 2016-05-03 16:27:00 -0700 | [diff] [blame] | 375 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 376 | return (int)n; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 377 | } |
| 378 | |
Craig Tiller | 311445f | 2016-02-18 07:31:39 -0800 | [diff] [blame] | 379 | bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now, |
| 380 | gpr_timespec *next) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 381 | GPR_ASSERT(now.clock_type == g_clock_type); |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 382 | return run_some_expired_timers( |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 383 | exec_ctx, now, next, |
Craig Tiller | c027e77 | 2016-05-03 16:27:00 -0700 | [diff] [blame] | 384 | gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0 |
| 385 | ? GRPC_ERROR_NONE |
| 386 | : GRPC_ERROR_CREATE("Shutting down timer system")); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 387 | } |
murgatroid99 | 9030c81 | 2016-09-16 13:25:08 -0700 | [diff] [blame] | 388 | |
| 389 | #endif /* GRPC_TIMER_USE_GENERIC */ |