ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 1 | /* |
| 2 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 3 | * Copyright 2015 gRPC authors. |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 4 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 5 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | * you may not use this file except in compliance with the License. |
| 7 | * You may obtain a copy of the License at |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 8 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 9 | * http://www.apache.org/licenses/LICENSE-2.0 |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 10 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 11 | * Unless required by applicable law or agreed to in writing, software |
| 12 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | * See the License for the specific language governing permissions and |
| 15 | * limitations under the License. |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 16 | * |
| 17 | */ |
| 18 | |
murgatroid99 | 9030c81 | 2016-09-16 13:25:08 -0700 | [diff] [blame] | 19 | #include "src/core/lib/iomgr/port.h" |
| 20 | |
| 21 | #ifdef GRPC_TIMER_USE_GENERIC |
| 22 | |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 23 | #include "src/core/lib/iomgr/timer.h" |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 24 | |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 25 | #include <grpc/support/alloc.h> |
Craig Tiller | 6a7626c | 2015-07-19 22:21:41 -0700 | [diff] [blame] | 26 | #include <grpc/support/log.h> |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 27 | #include <grpc/support/string_util.h> |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 28 | #include <grpc/support/sync.h> |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 29 | #include <grpc/support/tls.h> |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 30 | #include <grpc/support/useful.h> |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 31 | #include "src/core/lib/debug/trace.h" |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 32 | #include "src/core/lib/iomgr/time_averaged_stats.h" |
| 33 | #include "src/core/lib/iomgr/timer_heap.h" |
Craig Tiller | edbf2b9 | 2017-02-27 07:24:00 -0800 | [diff] [blame] | 34 | #include "src/core/lib/support/spinlock.h" |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 35 | |
| 36 | #define INVALID_HEAP_INDEX 0xffffffffu |
| 37 | |
| 38 | #define LOG2_NUM_SHARDS 5 |
| 39 | #define NUM_SHARDS (1 << LOG2_NUM_SHARDS) |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 40 | #define ADD_DEADLINE_SCALE 0.33 |
| 41 | #define MIN_QUEUE_WINDOW_DURATION 0.01 |
| 42 | #define MAX_QUEUE_WINDOW_DURATION 1 |
| 43 | |
ncteisen | 06bce6e | 2017-07-10 07:58:49 -0700 | [diff] [blame] | 44 | grpc_tracer_flag grpc_timer_trace = GRPC_TRACER_INITIALIZER(false, "timer"); |
ncteisen | 7712c7c | 2017-07-12 23:11:27 -0700 | [diff] [blame] | 45 | grpc_tracer_flag grpc_timer_check_trace = |
| 46 | GRPC_TRACER_INITIALIZER(false, "timer_check"); |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 47 | |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 48 | /* A "timer shard". Contains a 'heap' and a 'list' of timers. All timers with |
| 49 | * deadlines earlier than 'queue_deadline" cap are maintained in the heap and |
| 50 | * others are maintained in the list (unordered). This helps to keep the number |
| 51 | * of elements in the heap low. |
| 52 | * |
| 53 | * The 'queue_deadline_cap' gets recomputed periodically based on the timer |
| 54 | * stats maintained in 'stats' and the relevant timers are then moved from the |
| 55 | * 'list' to 'heap' |
| 56 | */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 57 | typedef struct { |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 58 | gpr_mu mu; |
| 59 | grpc_time_averaged_stats stats; |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 60 | /* All and only timers with deadlines <= this will be in the heap. */ |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 61 | gpr_atm queue_deadline_cap; |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 62 | /* The deadline of the next timer due in this shard */ |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 63 | gpr_atm min_deadline; |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 64 | /* Index of this timer_shard in the g_shard_queue */ |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 65 | uint32_t shard_queue_index; |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 66 | /* This holds all timers with deadlines < queue_deadline_cap. Timers in this |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 67 | list have the top bit of their deadline set to 0. */ |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 68 | grpc_timer_heap heap; |
| 69 | /* This holds timers whose deadline is >= queue_deadline_cap. */ |
| 70 | grpc_timer list; |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 71 | } timer_shard; |
| 72 | |
| 73 | /* Array of timer shards. Whenever a timer (grpc_timer *) is added, its address |
| 74 | * is hashed to select the timer shard to add the timer to */ |
| 75 | static timer_shard g_shards[NUM_SHARDS]; |
| 76 | |
| 77 | /* Maintains a sorted list of timer shards (sorted by their min_deadline, i.e |
| 78 | * the deadline of the next timer in each shard). |
| 79 | * Access to this is protected by g_shared_mutables.mu */ |
| 80 | static timer_shard *g_shard_queue[NUM_SHARDS]; |
| 81 | |
| 82 | /* Thread local variable that stores the deadline of the next timer the thread |
| 83 | * has last-seen. This is an optimization to prevent the thread from checking |
| 84 | * shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock, |
| 85 | * an expensive operation) */ |
| 86 | GPR_TLS_DECL(g_last_seen_min_timer); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 87 | |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 88 | struct shared_mutables { |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 89 | /* The deadline of the next timer due across all timer shards */ |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 90 | gpr_atm min_timer; |
| 91 | /* Allow only one run_some_expired_timers at once */ |
| 92 | gpr_spinlock checker_mu; |
| 93 | bool initialized; |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 94 | /* Protects g_shard_queue (and the shared_mutables struct itself) */ |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 95 | gpr_mu mu; |
| 96 | } GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE); |
| 97 | |
| 98 | static struct shared_mutables g_shared_mutables = { |
Yash Tibrewal | 533d118 | 2017-09-18 10:48:22 -0700 | [diff] [blame^] | 99 | 0, |
| 100 | GPR_SPINLOCK_STATIC_INITIALIZER, |
| 101 | false, |
| 102 | {{0}} |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 103 | }; |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 104 | |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 105 | static gpr_clock_type g_clock_type; |
| 106 | static gpr_timespec g_start_time; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 107 | |
Craig Tiller | bd0af4f | 2017-03-20 08:33:02 -0700 | [diff] [blame] | 108 | static gpr_atm saturating_add(gpr_atm a, gpr_atm b) { |
| 109 | if (a > GPR_ATM_MAX - b) { |
| 110 | return GPR_ATM_MAX; |
| 111 | } |
| 112 | return a + b; |
| 113 | } |
| 114 | |
Craig Tiller | b2ef1cf | 2017-05-30 09:25:48 -0700 | [diff] [blame] | 115 | static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, |
| 116 | gpr_atm now, |
| 117 | gpr_atm *next, |
| 118 | grpc_error *error); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 119 | |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 120 | static gpr_timespec dbl_to_ts(double d) { |
| 121 | gpr_timespec ts; |
| 122 | ts.tv_sec = (int64_t)d; |
| 123 | ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec)); |
| 124 | ts.clock_type = GPR_TIMESPAN; |
| 125 | return ts; |
| 126 | } |
| 127 | |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 128 | static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) { |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 129 | ts = gpr_time_sub(ts, g_start_time); |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 130 | double x = GPR_MS_PER_SEC * (double)ts.tv_sec + |
Craig Tiller | 883243a | 2017-03-24 16:08:08 -0700 | [diff] [blame] | 131 | (double)ts.tv_nsec / GPR_NS_PER_MS + |
| 132 | (double)(GPR_NS_PER_SEC - 1) / (double)GPR_NS_PER_SEC; |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 133 | if (x < 0) return 0; |
| 134 | if (x > GPR_ATM_MAX) return GPR_ATM_MAX; |
| 135 | return (gpr_atm)x; |
| 136 | } |
| 137 | |
| 138 | static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) { |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 139 | ts = gpr_time_sub(ts, g_start_time); |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 140 | double x = |
| 141 | GPR_MS_PER_SEC * (double)ts.tv_sec + (double)ts.tv_nsec / GPR_NS_PER_MS; |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 142 | if (x < 0) return 0; |
| 143 | if (x > GPR_ATM_MAX) return GPR_ATM_MAX; |
| 144 | return (gpr_atm)x; |
| 145 | } |
| 146 | |
| 147 | static gpr_timespec atm_to_timespec(gpr_atm x) { |
| 148 | return gpr_time_add(g_start_time, dbl_to_ts((double)x / 1000.0)); |
| 149 | } |
| 150 | |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 151 | static gpr_atm compute_min_deadline(timer_shard *shard) { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 152 | return grpc_timer_heap_is_empty(&shard->heap) |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 153 | ? saturating_add(shard->queue_deadline_cap, 1) |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 154 | : grpc_timer_heap_top(&shard->heap)->deadline; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 155 | } |
| 156 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 157 | void grpc_timer_list_init(gpr_timespec now) { |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 158 | uint32_t i; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 159 | |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 160 | g_shared_mutables.initialized = true; |
| 161 | gpr_mu_init(&g_shared_mutables.mu); |
Craig Tiller | 6a7626c | 2015-07-19 22:21:41 -0700 | [diff] [blame] | 162 | g_clock_type = now.clock_type; |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 163 | g_start_time = now; |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 164 | g_shared_mutables.min_timer = timespec_to_atm_round_down(now); |
| 165 | gpr_tls_init(&g_last_seen_min_timer); |
Craig Tiller | 4e4647a | 2017-03-21 08:44:43 -0700 | [diff] [blame] | 166 | gpr_tls_set(&g_last_seen_min_timer, 0); |
ncteisen | 06bce6e | 2017-07-10 07:58:49 -0700 | [diff] [blame] | 167 | grpc_register_tracer(&grpc_timer_trace); |
| 168 | grpc_register_tracer(&grpc_timer_check_trace); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 169 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 170 | for (i = 0; i < NUM_SHARDS; i++) { |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 171 | timer_shard *shard = &g_shards[i]; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 172 | gpr_mu_init(&shard->mu); |
| 173 | grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1, |
| 174 | 0.5); |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 175 | shard->queue_deadline_cap = g_shared_mutables.min_timer; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 176 | shard->shard_queue_index = i; |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 177 | grpc_timer_heap_init(&shard->heap); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 178 | shard->list.next = shard->list.prev = &shard->list; |
| 179 | shard->min_deadline = compute_min_deadline(shard); |
| 180 | g_shard_queue[i] = shard; |
| 181 | } |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 182 | } |
| 183 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 184 | void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) { |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 185 | int i; |
ncteisen | 4b36a3d | 2017-03-13 19:08:06 -0700 | [diff] [blame] | 186 | run_some_expired_timers( |
Craig Tiller | c900063 | 2017-03-24 14:28:48 -0700 | [diff] [blame] | 187 | exec_ctx, GPR_ATM_MAX, NULL, |
ncteisen | 4b36a3d | 2017-03-13 19:08:06 -0700 | [diff] [blame] | 188 | GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown")); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 189 | for (i = 0; i < NUM_SHARDS; i++) { |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 190 | timer_shard *shard = &g_shards[i]; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 191 | gpr_mu_destroy(&shard->mu); |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 192 | grpc_timer_heap_destroy(&shard->heap); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 193 | } |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 194 | gpr_mu_destroy(&g_shared_mutables.mu); |
| 195 | gpr_tls_destroy(&g_last_seen_min_timer); |
| 196 | g_shared_mutables.initialized = false; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 197 | } |
| 198 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 199 | static double ts_to_dbl(gpr_timespec ts) { |
| 200 | return (double)ts.tv_sec + 1e-9 * ts.tv_nsec; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 201 | } |
| 202 | |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 203 | /* returns true if the first element in the list */ |
Craig Tiller | 1a76926 | 2017-03-27 12:52:59 -0700 | [diff] [blame] | 204 | static void list_join(grpc_timer *head, grpc_timer *timer) { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 205 | timer->next = head; |
| 206 | timer->prev = head->prev; |
| 207 | timer->next->prev = timer->prev->next = timer; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 208 | } |
| 209 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 210 | static void list_remove(grpc_timer *timer) { |
| 211 | timer->next->prev = timer->prev; |
| 212 | timer->prev->next = timer->next; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 213 | } |
| 214 | |
Craig Tiller | 7536af0 | 2015-12-22 13:49:30 -0800 | [diff] [blame] | 215 | static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) { |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 216 | timer_shard *temp; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 217 | temp = g_shard_queue[first_shard_queue_index]; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 218 | g_shard_queue[first_shard_queue_index] = |
| 219 | g_shard_queue[first_shard_queue_index + 1]; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 220 | g_shard_queue[first_shard_queue_index + 1] = temp; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 221 | g_shard_queue[first_shard_queue_index]->shard_queue_index = |
| 222 | first_shard_queue_index; |
| 223 | g_shard_queue[first_shard_queue_index + 1]->shard_queue_index = |
| 224 | first_shard_queue_index + 1; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 225 | } |
| 226 | |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 227 | static void note_deadline_change(timer_shard *shard) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 228 | while (shard->shard_queue_index > 0 && |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 229 | shard->min_deadline < |
| 230 | g_shard_queue[shard->shard_queue_index - 1]->min_deadline) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 231 | swap_adjacent_shards_in_queue(shard->shard_queue_index - 1); |
| 232 | } |
| 233 | while (shard->shard_queue_index < NUM_SHARDS - 1 && |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 234 | shard->min_deadline > |
| 235 | g_shard_queue[shard->shard_queue_index + 1]->min_deadline) { |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 236 | swap_adjacent_shards_in_queue(shard->shard_queue_index); |
| 237 | } |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 238 | } |
| 239 | |
Vijay Pai | 58c33ba | 2017-09-01 14:08:42 -0700 | [diff] [blame] | 240 | void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = false; } |
| 241 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 242 | void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, |
Masood Malekghassemi | b5b4372 | 2017-01-05 15:07:26 -0800 | [diff] [blame] | 243 | gpr_timespec deadline, grpc_closure *closure, |
| 244 | gpr_timespec now) { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 245 | int is_first_timer = 0; |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 246 | timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)]; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 247 | GPR_ASSERT(deadline.clock_type == g_clock_type); |
| 248 | GPR_ASSERT(now.clock_type == g_clock_type); |
Masood Malekghassemi | b5b4372 | 2017-01-05 15:07:26 -0800 | [diff] [blame] | 249 | timer->closure = closure; |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 250 | gpr_atm deadline_atm = timer->deadline = timespec_to_atm_round_up(deadline); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 251 | |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 252 | if (GRPC_TRACER_ON(grpc_timer_trace)) { |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 253 | gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR |
| 254 | "] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]", |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 255 | timer, deadline.tv_sec, deadline.tv_nsec, deadline_atm, now.tv_sec, |
| 256 | now.tv_nsec, timespec_to_atm_round_down(now), closure, closure->cb); |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 257 | } |
| 258 | |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 259 | if (!g_shared_mutables.initialized) { |
Craig Tiller | c84886b | 2017-02-16 13:10:38 -0800 | [diff] [blame] | 260 | timer->pending = false; |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 261 | GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, |
ncteisen | 4b36a3d | 2017-03-13 19:08:06 -0700 | [diff] [blame] | 262 | GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
| 263 | "Attempt to create timer before initialization")); |
Craig Tiller | 3f72df9 | 2016-04-13 20:26:07 -0700 | [diff] [blame] | 264 | return; |
| 265 | } |
| 266 | |
Craig Tiller | c84886b | 2017-02-16 13:10:38 -0800 | [diff] [blame] | 267 | gpr_mu_lock(&shard->mu); |
| 268 | timer->pending = true; |
Craig Tiller | 3f72df9 | 2016-04-13 20:26:07 -0700 | [diff] [blame] | 269 | if (gpr_time_cmp(deadline, now) <= 0) { |
Craig Tiller | c84886b | 2017-02-16 13:10:38 -0800 | [diff] [blame] | 270 | timer->pending = false; |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 271 | GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE); |
Craig Tiller | c84886b | 2017-02-16 13:10:38 -0800 | [diff] [blame] | 272 | gpr_mu_unlock(&shard->mu); |
| 273 | /* early out */ |
Craig Tiller | 3f72df9 | 2016-04-13 20:26:07 -0700 | [diff] [blame] | 274 | return; |
| 275 | } |
| 276 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 277 | grpc_time_averaged_stats_add_sample(&shard->stats, |
| 278 | ts_to_dbl(gpr_time_sub(deadline, now))); |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 279 | if (deadline_atm < shard->queue_deadline_cap) { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 280 | is_first_timer = grpc_timer_heap_add(&shard->heap, timer); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 281 | } else { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 282 | timer->heap_index = INVALID_HEAP_INDEX; |
Craig Tiller | 1a76926 | 2017-03-27 12:52:59 -0700 | [diff] [blame] | 283 | list_join(&shard->list, timer); |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 284 | } |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 285 | if (GRPC_TRACER_ON(grpc_timer_trace)) { |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 286 | gpr_log(GPR_DEBUG, " .. add to shard %d with queue_deadline_cap=%" PRIdPTR |
| 287 | " => is_first_timer=%s", |
| 288 | (int)(shard - g_shards), shard->queue_deadline_cap, |
| 289 | is_first_timer ? "true" : "false"); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 290 | } |
| 291 | gpr_mu_unlock(&shard->mu); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 292 | |
| 293 | /* Deadline may have decreased, we need to adjust the master queue. Note |
| 294 | that there is a potential racy unlocked region here. There could be a |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 295 | reordering of multiple grpc_timer_init calls, at this point, but the < test |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 296 | below should ensure that we err on the side of caution. There could |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 297 | also be a race with grpc_timer_check, which might beat us to the lock. In |
| 298 | that case, it is possible that the timer that we added will have already |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 299 | run by the time we hold the lock, but that too is a safe error. |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 300 | Finally, it's possible that the grpc_timer_check that intervened failed to |
| 301 | trigger the new timer because the min_deadline hadn't yet been reduced. |
| 302 | In that case, the timer will simply have to wait for the next |
| 303 | grpc_timer_check. */ |
| 304 | if (is_first_timer) { |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 305 | gpr_mu_lock(&g_shared_mutables.mu); |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 306 | if (GRPC_TRACER_ON(grpc_timer_trace)) { |
Craig Tiller | 18e7465 | 2017-03-24 15:44:27 -0700 | [diff] [blame] | 307 | gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR, |
| 308 | shard->min_deadline); |
| 309 | } |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 310 | if (deadline_atm < shard->min_deadline) { |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 311 | gpr_atm old_min_deadline = g_shard_queue[0]->min_deadline; |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 312 | shard->min_deadline = deadline_atm; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 313 | note_deadline_change(shard); |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 314 | if (shard->shard_queue_index == 0 && deadline_atm < old_min_deadline) { |
| 315 | gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline_atm); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 316 | grpc_kick_poller(); |
| 317 | } |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 318 | } |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 319 | gpr_mu_unlock(&g_shared_mutables.mu); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 320 | } |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 321 | } |
| 322 | |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 323 | void grpc_timer_consume_kick(void) { |
| 324 | /* force re-evaluation of last seeen min */ |
| 325 | gpr_tls_set(&g_last_seen_min_timer, 0); |
| 326 | } |
| 327 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 328 | void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 329 | if (!g_shared_mutables.initialized) { |
Craig Tiller | 82c63eb | 2016-05-10 15:28:01 -0700 | [diff] [blame] | 330 | /* must have already been cancelled, also the shard mutex is invalid */ |
| 331 | return; |
| 332 | } |
| 333 | |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 334 | timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)]; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 335 | gpr_mu_lock(&shard->mu); |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 336 | if (GRPC_TRACER_ON(grpc_timer_trace)) { |
Craig Tiller | a0bfee9 | 2017-03-27 09:21:11 -0700 | [diff] [blame] | 337 | gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer, |
| 338 | timer->pending ? "true" : "false"); |
| 339 | } |
Craig Tiller | c84886b | 2017-02-16 13:10:38 -0800 | [diff] [blame] | 340 | if (timer->pending) { |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 341 | GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); |
Craig Tiller | c84886b | 2017-02-16 13:10:38 -0800 | [diff] [blame] | 342 | timer->pending = false; |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 343 | if (timer->heap_index == INVALID_HEAP_INDEX) { |
| 344 | list_remove(timer); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 345 | } else { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 346 | grpc_timer_heap_remove(&shard->heap, timer); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 347 | } |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 348 | } |
| 349 | gpr_mu_unlock(&shard->mu); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 350 | } |
| 351 | |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 352 | /* Rebalances the timer shard by computing a new 'queue_deadline_cap' and moving |
| 353 | all relevant timers in shard->list (i.e timers with deadlines earlier than |
| 354 | 'queue_deadline_cap') into into shard->heap. |
| 355 | Returns 'true' if shard->heap has atleast ONE element |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 356 | REQUIRES: shard->mu locked */ |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 357 | static int refill_heap(timer_shard *shard, gpr_atm now) { |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 358 | /* Compute the new queue window width and bound by the limits: */ |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 359 | double computed_deadline_delta = |
| 360 | grpc_time_averaged_stats_update_average(&shard->stats) * |
| 361 | ADD_DEADLINE_SCALE; |
| 362 | double deadline_delta = |
| 363 | GPR_CLAMP(computed_deadline_delta, MIN_QUEUE_WINDOW_DURATION, |
| 364 | MAX_QUEUE_WINDOW_DURATION); |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 365 | grpc_timer *timer, *next; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 366 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 367 | /* Compute the new cap and put all timers under it into the queue: */ |
Craig Tiller | bd0af4f | 2017-03-20 08:33:02 -0700 | [diff] [blame] | 368 | shard->queue_deadline_cap = |
| 369 | saturating_add(GPR_MAX(now, shard->queue_deadline_cap), |
| 370 | (gpr_atm)(deadline_delta * 1000.0)); |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 371 | |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 372 | if (GRPC_TRACER_ON(grpc_timer_check_trace)) { |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 373 | gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR, |
| 374 | (int)(shard - g_shards), shard->queue_deadline_cap); |
| 375 | } |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 376 | for (timer = shard->list.next; timer != &shard->list; timer = next) { |
| 377 | next = timer->next; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 378 | |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 379 | if (timer->deadline < shard->queue_deadline_cap) { |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 380 | if (GRPC_TRACER_ON(grpc_timer_check_trace)) { |
Craig Tiller | 18e7465 | 2017-03-24 15:44:27 -0700 | [diff] [blame] | 381 | gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap", |
| 382 | timer->deadline); |
| 383 | } |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 384 | list_remove(timer); |
| 385 | grpc_timer_heap_add(&shard->heap, timer); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 386 | } |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 387 | } |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 388 | return !grpc_timer_heap_is_empty(&shard->heap); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 389 | } |
| 390 | |
David G. Quintas | dfff4de | 2016-06-07 19:57:33 -0700 | [diff] [blame] | 391 | /* This pops the next non-cancelled timer with deadline <= now from the |
| 392 | queue, or returns NULL if there isn't one. |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 393 | REQUIRES: shard->mu locked */ |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 394 | static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) { |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 395 | grpc_timer *timer; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 396 | for (;;) { |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 397 | if (GRPC_TRACER_ON(grpc_timer_check_trace)) { |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 398 | gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s", |
| 399 | (int)(shard - g_shards), |
| 400 | grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false"); |
| 401 | } |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 402 | if (grpc_timer_heap_is_empty(&shard->heap)) { |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 403 | if (now < shard->queue_deadline_cap) return NULL; |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 404 | if (!refill_heap(shard, now)) return NULL; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 405 | } |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 406 | timer = grpc_timer_heap_top(&shard->heap); |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 407 | if (GRPC_TRACER_ON(grpc_timer_check_trace)) { |
Craig Tiller | 18e7465 | 2017-03-24 15:44:27 -0700 | [diff] [blame] | 408 | gpr_log(GPR_DEBUG, |
| 409 | " .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR, |
| 410 | timer->deadline, now); |
| 411 | } |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 412 | if (timer->deadline > now) return NULL; |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 413 | if (GRPC_TRACER_ON(grpc_timer_trace)) { |
Craig Tiller | a0bfee9 | 2017-03-27 09:21:11 -0700 | [diff] [blame] | 414 | gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late", timer, |
| 415 | now - timer->deadline); |
| 416 | } |
Craig Tiller | c84886b | 2017-02-16 13:10:38 -0800 | [diff] [blame] | 417 | timer->pending = false; |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 418 | grpc_timer_heap_pop(&shard->heap); |
| 419 | return timer; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 420 | } |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 421 | } |
| 422 | |
| 423 | /* REQUIRES: shard->mu unlocked */ |
Sree Kuchibhotla | bd3ea98 | 2017-07-07 00:31:11 -0700 | [diff] [blame] | 424 | static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard, |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 425 | gpr_atm now, gpr_atm *new_min_deadline, |
Craig Tiller | c027e77 | 2016-05-03 16:27:00 -0700 | [diff] [blame] | 426 | grpc_error *error) { |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 427 | size_t n = 0; |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 428 | grpc_timer *timer; |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 429 | gpr_mu_lock(&shard->mu); |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 430 | while ((timer = pop_one(shard, now))) { |
ncteisen | 274bbbe | 2017-06-08 14:57:11 -0700 | [diff] [blame] | 431 | GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error)); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 432 | n++; |
| 433 | } |
| 434 | *new_min_deadline = compute_min_deadline(shard); |
| 435 | gpr_mu_unlock(&shard->mu); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 436 | return n; |
| 437 | } |
| 438 | |
Craig Tiller | b2ef1cf | 2017-05-30 09:25:48 -0700 | [diff] [blame] | 439 | static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, |
| 440 | gpr_atm now, |
| 441 | gpr_atm *next, |
| 442 | grpc_error *error) { |
| 443 | grpc_timer_check_result result = GRPC_TIMERS_NOT_CHECKED; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 444 | |
Craig Tiller | 1a76926 | 2017-03-27 12:52:59 -0700 | [diff] [blame] | 445 | gpr_atm min_timer = gpr_atm_no_barrier_load(&g_shared_mutables.min_timer); |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 446 | gpr_tls_set(&g_last_seen_min_timer, min_timer); |
| 447 | if (now < min_timer) { |
| 448 | if (next != NULL) *next = GPR_MIN(*next, min_timer); |
Craig Tiller | cdb41dc | 2017-06-02 23:04:37 +0000 | [diff] [blame] | 449 | return GRPC_TIMERS_CHECKED_AND_EMPTY; |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 450 | } |
| 451 | |
| 452 | if (gpr_spinlock_trylock(&g_shared_mutables.checker_mu)) { |
| 453 | gpr_mu_lock(&g_shared_mutables.mu); |
Craig Tiller | b2ef1cf | 2017-05-30 09:25:48 -0700 | [diff] [blame] | 454 | result = GRPC_TIMERS_CHECKED_AND_EMPTY; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 455 | |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 456 | if (GRPC_TRACER_ON(grpc_timer_check_trace)) { |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 457 | gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRIdPTR, |
| 458 | (int)(g_shard_queue[0] - g_shards), |
| 459 | g_shard_queue[0]->min_deadline); |
| 460 | } |
| 461 | |
Craig Tiller | 041bf64 | 2017-03-27 09:26:07 -0700 | [diff] [blame] | 462 | while (g_shard_queue[0]->min_deadline < now || |
| 463 | (now != GPR_ATM_MAX && g_shard_queue[0]->min_deadline == now)) { |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 464 | gpr_atm new_min_deadline; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 465 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 466 | /* For efficiency, we pop as many available timers as we can from the |
| 467 | shard. This may violate perfect timer deadline ordering, but that |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 468 | shouldn't be a big deal because we don't make ordering guarantees. */ |
Craig Tiller | b2ef1cf | 2017-05-30 09:25:48 -0700 | [diff] [blame] | 469 | if (pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline, |
| 470 | error) > 0) { |
| 471 | result = GRPC_TIMERS_FIRED; |
| 472 | } |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 473 | |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 474 | if (GRPC_TRACER_ON(grpc_timer_check_trace)) { |
Craig Tiller | b2ef1cf | 2017-05-30 09:25:48 -0700 | [diff] [blame] | 475 | gpr_log(GPR_DEBUG, |
| 476 | " .. result --> %d" |
| 477 | ", shard[%d]->min_deadline %" PRIdPTR " --> %" PRIdPTR |
| 478 | ", now=%" PRIdPTR, |
| 479 | result, (int)(g_shard_queue[0] - g_shards), |
Craig Tiller | a0bfee9 | 2017-03-27 09:21:11 -0700 | [diff] [blame] | 480 | g_shard_queue[0]->min_deadline, new_min_deadline, now); |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 481 | } |
| 482 | |
David Garcia Quintas | f747bbc | 2015-10-04 23:09:47 -0700 | [diff] [blame] | 483 | /* An grpc_timer_init() on the shard could intervene here, adding a new |
| 484 | timer that is earlier than new_min_deadline. However, |
| 485 | grpc_timer_init() will block on the master_lock before it can call |
| 486 | set_min_deadline, so this one will complete first and then the Addtimer |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 487 | will reduce the min_deadline (perhaps unnecessarily). */ |
| 488 | g_shard_queue[0]->min_deadline = new_min_deadline; |
| 489 | note_deadline_change(g_shard_queue[0]); |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 490 | } |
| 491 | |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 492 | if (next) { |
Craig Tiller | 7b2dd93 | 2017-03-16 16:25:12 -0700 | [diff] [blame] | 493 | *next = GPR_MIN(*next, g_shard_queue[0]->min_deadline); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 494 | } |
| 495 | |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 496 | gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, |
| 497 | g_shard_queue[0]->min_deadline); |
| 498 | gpr_mu_unlock(&g_shared_mutables.mu); |
| 499 | gpr_spinlock_unlock(&g_shared_mutables.checker_mu); |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 500 | } |
| 501 | |
Craig Tiller | f707d62 | 2016-05-06 14:26:12 -0700 | [diff] [blame] | 502 | GRPC_ERROR_UNREF(error); |
Craig Tiller | c027e77 | 2016-05-03 16:27:00 -0700 | [diff] [blame] | 503 | |
Craig Tiller | b2ef1cf | 2017-05-30 09:25:48 -0700 | [diff] [blame] | 504 | return result; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 505 | } |
| 506 | |
Craig Tiller | b2ef1cf | 2017-05-30 09:25:48 -0700 | [diff] [blame] | 507 | grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, |
| 508 | gpr_timespec now, gpr_timespec *next) { |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 509 | // prelude |
Craig Tiller | a82950e | 2015-09-22 12:33:20 -0700 | [diff] [blame] | 510 | GPR_ASSERT(now.clock_type == g_clock_type); |
Craig Tiller | 185f6c9 | 2017-03-17 08:33:19 -0700 | [diff] [blame] | 511 | gpr_atm now_atm = timespec_to_atm_round_down(now); |
Craig Tiller | 1a76926 | 2017-03-27 12:52:59 -0700 | [diff] [blame] | 512 | |
| 513 | /* fetch from a thread-local first: this avoids contention on a globally |
| 514 | mutable cacheline in the common case */ |
| 515 | gpr_atm min_timer = gpr_tls_get(&g_last_seen_min_timer); |
| 516 | if (now_atm < min_timer) { |
| 517 | if (next != NULL) { |
| 518 | *next = |
| 519 | atm_to_timespec(GPR_MIN(timespec_to_atm_round_up(*next), min_timer)); |
| 520 | } |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 521 | if (GRPC_TRACER_ON(grpc_timer_check_trace)) { |
Craig Tiller | 1a76926 | 2017-03-27 12:52:59 -0700 | [diff] [blame] | 522 | gpr_log(GPR_DEBUG, |
Craig Tiller | 97d4011 | 2017-03-29 16:46:13 -0700 | [diff] [blame] | 523 | "TIMER CHECK SKIP: now_atm=%" PRIdPTR " min_timer=%" PRIdPTR, |
Craig Tiller | 1a76926 | 2017-03-27 12:52:59 -0700 | [diff] [blame] | 524 | now_atm, min_timer); |
| 525 | } |
Craig Tiller | b2ef1cf | 2017-05-30 09:25:48 -0700 | [diff] [blame] | 526 | return GRPC_TIMERS_CHECKED_AND_EMPTY; |
Craig Tiller | 1a76926 | 2017-03-27 12:52:59 -0700 | [diff] [blame] | 527 | } |
| 528 | |
Craig Tiller | 18e1506 | 2017-03-20 09:35:39 -0700 | [diff] [blame] | 529 | grpc_error *shutdown_error = |
Craig Tiller | c027e77 | 2016-05-03 16:27:00 -0700 | [diff] [blame] | 530 | gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0 |
| 531 | ? GRPC_ERROR_NONE |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 532 | : GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system"); |
Craig Tiller | 1a76926 | 2017-03-27 12:52:59 -0700 | [diff] [blame] | 533 | |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 534 | // tracing |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 535 | if (GRPC_TRACER_ON(grpc_timer_check_trace)) { |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 536 | char *next_str; |
| 537 | if (next == NULL) { |
| 538 | next_str = gpr_strdup("NULL"); |
| 539 | } else { |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 540 | gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec, |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 541 | next->tv_nsec, timespec_to_atm_round_down(*next)); |
| 542 | } |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 543 | gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRId64 ".%09d [%" PRIdPTR |
Craig Tiller | afb168b | 2017-03-21 12:48:57 -0700 | [diff] [blame] | 544 | "] next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR, |
| 545 | now.tv_sec, now.tv_nsec, now_atm, next_str, |
| 546 | gpr_tls_get(&g_last_seen_min_timer), |
| 547 | gpr_atm_no_barrier_load(&g_shared_mutables.min_timer)); |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 548 | gpr_free(next_str); |
| 549 | } |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 550 | // actual code |
Craig Tiller | b2ef1cf | 2017-05-30 09:25:48 -0700 | [diff] [blame] | 551 | grpc_timer_check_result r; |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 552 | gpr_atm next_atm; |
Craig Tiller | 18e1506 | 2017-03-20 09:35:39 -0700 | [diff] [blame] | 553 | if (next == NULL) { |
| 554 | r = run_some_expired_timers(exec_ctx, now_atm, NULL, shutdown_error); |
| 555 | } else { |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 556 | next_atm = timespec_to_atm_round_down(*next); |
Craig Tiller | 18e1506 | 2017-03-20 09:35:39 -0700 | [diff] [blame] | 557 | r = run_some_expired_timers(exec_ctx, now_atm, &next_atm, shutdown_error); |
| 558 | *next = atm_to_timespec(next_atm); |
| 559 | } |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 560 | // tracing |
Craig Tiller | 84f75d4 | 2017-05-03 13:06:35 -0700 | [diff] [blame] | 561 | if (GRPC_TRACER_ON(grpc_timer_check_trace)) { |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 562 | char *next_str; |
| 563 | if (next == NULL) { |
| 564 | next_str = gpr_strdup("NULL"); |
| 565 | } else { |
Craig Tiller | 0b4c531 | 2017-03-24 15:23:57 -0700 | [diff] [blame] | 566 | gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec, |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 567 | next->tv_nsec, next_atm); |
| 568 | } |
Craig Tiller | 27cb323 | 2017-06-02 16:05:25 -0700 | [diff] [blame] | 569 | gpr_log(GPR_DEBUG, "TIMER CHECK END: r=%d; next=%s", r, next_str); |
Craig Tiller | 2a1949e | 2017-03-21 09:29:12 -0700 | [diff] [blame] | 570 | gpr_free(next_str); |
| 571 | } |
Craig Tiller | b2ef1cf | 2017-05-30 09:25:48 -0700 | [diff] [blame] | 572 | return r; |
ctiller | 3bf466f | 2014-12-19 16:21:57 -0800 | [diff] [blame] | 573 | } |
murgatroid99 | 9030c81 | 2016-09-16 13:25:08 -0700 | [diff] [blame] | 574 | |
| 575 | #endif /* GRPC_TIMER_USE_GENERIC */ |