blob: ffc991f5ccd60c33fdee9cc73b6dfc2285092dae [file] [log] [blame]
Christopher Wileye8679812015-07-01 13:36:18 -07001/*
2 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26#include "event2/event-config.h"
Narayan Kamathfc74cb42017-09-13 12:53:52 +010027#include "evconfig-private.h"
Christopher Wileye8679812015-07-01 13:36:18 -070028
Narayan Kamathfc74cb42017-09-13 12:53:52 +010029#ifdef _WIN32
Christopher Wileye8679812015-07-01 13:36:18 -070030#include <winsock2.h>
31#define WIN32_LEAN_AND_MEAN
32#include <windows.h>
33#undef WIN32_LEAN_AND_MEAN
34#endif
35#include <sys/types.h>
Narayan Kamathfc74cb42017-09-13 12:53:52 +010036#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
Christopher Wileye8679812015-07-01 13:36:18 -070037#include <sys/time.h>
38#endif
39#include <sys/queue.h>
40#include <stdio.h>
41#include <stdlib.h>
Narayan Kamathfc74cb42017-09-13 12:53:52 +010042#ifndef _WIN32
Christopher Wileye8679812015-07-01 13:36:18 -070043#include <unistd.h>
44#endif
45#include <errno.h>
Haibo Huangb2279672019-05-31 16:12:39 -070046#include <limits.h>
Christopher Wileye8679812015-07-01 13:36:18 -070047#include <signal.h>
48#include <string.h>
49#include <time.h>
50
51#include "event-internal.h"
52#include "evmap-internal.h"
53#include "mm-internal.h"
54#include "changelist-internal.h"
55
56/** An entry for an evmap_io list: notes all the events that want to read or
57 write on a given fd, and the number of each.
58 */
59struct evmap_io {
Narayan Kamathfc74cb42017-09-13 12:53:52 +010060 struct event_dlist events;
Christopher Wileye8679812015-07-01 13:36:18 -070061 ev_uint16_t nread;
62 ev_uint16_t nwrite;
Narayan Kamathfc74cb42017-09-13 12:53:52 +010063 ev_uint16_t nclose;
Christopher Wileye8679812015-07-01 13:36:18 -070064};
65
66/* An entry for an evmap_signal list: notes all the events that want to know
67 when a signal triggers. */
68struct evmap_signal {
Narayan Kamathfc74cb42017-09-13 12:53:52 +010069 struct event_dlist events;
Christopher Wileye8679812015-07-01 13:36:18 -070070};
71
72/* On some platforms, fds start at 0 and increment by 1 as they are
73 allocated, and old numbers get used. For these platforms, we
74 implement io maps just like signal maps: as an array of pointers to
75 struct evmap_io. But on other platforms (windows), sockets are not
76 0-indexed, not necessarily consecutive, and not necessarily reused.
77 There, we use a hashtable to implement evmap_io.
78*/
79#ifdef EVMAP_USE_HT
80struct event_map_entry {
81 HT_ENTRY(event_map_entry) map_node;
82 evutil_socket_t fd;
83 union { /* This is a union in case we need to make more things that can
84 be in the hashtable. */
85 struct evmap_io evmap_io;
86 } ent;
87};
88
89/* Helper used by the event_io_map hashtable code; tries to return a good hash
90 * of the fd in e->fd. */
91static inline unsigned
92hashsocket(struct event_map_entry *e)
93{
94 /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
95 * matter. Our hashtable implementation really likes low-order bits,
96 * though, so let's do the rotate-and-add trick. */
97 unsigned h = (unsigned) e->fd;
98 h += (h >> 2) | (h << 30);
99 return h;
100}
101
102/* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
103 * have the same e->fd. */
104static inline int
105eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
106{
107 return e1->fd == e2->fd;
108}
109
110HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
111HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
112 0.5, mm_malloc, mm_realloc, mm_free)
113
114#define GET_IO_SLOT(x, map, slot, type) \
115 do { \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100116 struct event_map_entry key_, *ent_; \
117 key_.fd = slot; \
118 ent_ = HT_FIND(event_io_map, map, &key_); \
119 (x) = ent_ ? &ent_->ent.type : NULL; \
Christopher Wileye8679812015-07-01 13:36:18 -0700120 } while (0);
121
122#define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
123 do { \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100124 struct event_map_entry key_, *ent_; \
125 key_.fd = slot; \
126 HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \
127 event_map_entry, &key_, ptr, \
Christopher Wileye8679812015-07-01 13:36:18 -0700128 { \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100129 ent_ = *ptr; \
Christopher Wileye8679812015-07-01 13:36:18 -0700130 }, \
131 { \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100132 ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
133 if (EVUTIL_UNLIKELY(ent_ == NULL)) \
Christopher Wileye8679812015-07-01 13:36:18 -0700134 return (-1); \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100135 ent_->fd = slot; \
136 (ctor)(&ent_->ent.type); \
137 HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \
Christopher Wileye8679812015-07-01 13:36:18 -0700138 }); \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100139 (x) = &ent_->ent.type; \
Christopher Wileye8679812015-07-01 13:36:18 -0700140 } while (0)
141
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100142void evmap_io_initmap_(struct event_io_map *ctx)
Christopher Wileye8679812015-07-01 13:36:18 -0700143{
144 HT_INIT(event_io_map, ctx);
145}
146
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100147void evmap_io_clear_(struct event_io_map *ctx)
Christopher Wileye8679812015-07-01 13:36:18 -0700148{
149 struct event_map_entry **ent, **next, *this;
150 for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
151 this = *ent;
152 next = HT_NEXT_RMV(event_io_map, ctx, ent);
153 mm_free(this);
154 }
155 HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
156}
157#endif
158
159/* Set the variable 'x' to the field in event_map 'map' with fields of type
160 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
161 if there are no entries for 'slot'. Does no bounds-checking. */
162#define GET_SIGNAL_SLOT(x, map, slot, type) \
163 (x) = (struct type *)((map)->entries[slot])
164/* As GET_SLOT, but construct the entry for 'slot' if it is not present,
165 by allocating enough memory for a 'struct type', and initializing the new
166 value by calling the function 'ctor' on it. Makes the function
167 return -1 on allocation failure.
168 */
169#define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
170 do { \
171 if ((map)->entries[slot] == NULL) { \
172 (map)->entries[slot] = \
173 mm_calloc(1,sizeof(struct type)+fdinfo_len); \
174 if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
175 return (-1); \
176 (ctor)((struct type *)(map)->entries[slot]); \
177 } \
178 (x) = (struct type *)((map)->entries[slot]); \
179 } while (0)
180
181/* If we aren't using hashtables, then define the IO_SLOT macros and functions
182 as thin aliases over the SIGNAL_SLOT versions. */
183#ifndef EVMAP_USE_HT
184#define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
185#define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
186 GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
187#define FDINFO_OFFSET sizeof(struct evmap_io)
188void
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100189evmap_io_initmap_(struct event_io_map* ctx)
Christopher Wileye8679812015-07-01 13:36:18 -0700190{
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100191 evmap_signal_initmap_(ctx);
Christopher Wileye8679812015-07-01 13:36:18 -0700192}
193void
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100194evmap_io_clear_(struct event_io_map* ctx)
Christopher Wileye8679812015-07-01 13:36:18 -0700195{
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100196 evmap_signal_clear_(ctx);
Christopher Wileye8679812015-07-01 13:36:18 -0700197}
198#endif
199
200
201/** Expand 'map' with new entries of width 'msize' until it is big enough
202 to store a value in 'slot'.
203 */
204static int
205evmap_make_space(struct event_signal_map *map, int slot, int msize)
206{
207 if (map->nentries <= slot) {
208 int nentries = map->nentries ? map->nentries : 32;
209 void **tmp;
210
Haibo Huangb2279672019-05-31 16:12:39 -0700211 if (slot > INT_MAX / 2)
212 return (-1);
213
Christopher Wileye8679812015-07-01 13:36:18 -0700214 while (nentries <= slot)
215 nentries <<= 1;
216
Haibo Huangb2279672019-05-31 16:12:39 -0700217 if (nentries > INT_MAX / msize)
218 return (-1);
219
Christopher Wileye8679812015-07-01 13:36:18 -0700220 tmp = (void **)mm_realloc(map->entries, nentries * msize);
221 if (tmp == NULL)
222 return (-1);
223
224 memset(&tmp[map->nentries], 0,
225 (nentries - map->nentries) * msize);
226
227 map->nentries = nentries;
228 map->entries = tmp;
229 }
230
231 return (0);
232}
233
234void
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100235evmap_signal_initmap_(struct event_signal_map *ctx)
Christopher Wileye8679812015-07-01 13:36:18 -0700236{
237 ctx->nentries = 0;
238 ctx->entries = NULL;
239}
240
241void
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100242evmap_signal_clear_(struct event_signal_map *ctx)
Christopher Wileye8679812015-07-01 13:36:18 -0700243{
244 if (ctx->entries != NULL) {
245 int i;
246 for (i = 0; i < ctx->nentries; ++i) {
247 if (ctx->entries[i] != NULL)
248 mm_free(ctx->entries[i]);
249 }
250 mm_free(ctx->entries);
251 ctx->entries = NULL;
252 }
253 ctx->nentries = 0;
254}
255
256
257/* code specific to file descriptors */
258
259/** Constructor for struct evmap_io */
260static void
261evmap_io_init(struct evmap_io *entry)
262{
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100263 LIST_INIT(&entry->events);
Christopher Wileye8679812015-07-01 13:36:18 -0700264 entry->nread = 0;
265 entry->nwrite = 0;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100266 entry->nclose = 0;
Christopher Wileye8679812015-07-01 13:36:18 -0700267}
268
269
270/* return -1 on error, 0 on success if nothing changed in the event backend,
271 * and 1 on success if something did. */
272int
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100273evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev)
Christopher Wileye8679812015-07-01 13:36:18 -0700274{
275 const struct eventop *evsel = base->evsel;
276 struct event_io_map *io = &base->io;
277 struct evmap_io *ctx = NULL;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100278 int nread, nwrite, nclose, retval = 0;
Christopher Wileye8679812015-07-01 13:36:18 -0700279 short res = 0, old = 0;
280 struct event *old_ev;
281
282 EVUTIL_ASSERT(fd == ev->ev_fd);
283
284 if (fd < 0)
285 return 0;
286
287#ifndef EVMAP_USE_HT
288 if (fd >= io->nentries) {
289 if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
290 return (-1);
291 }
292#endif
293 GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
294 evsel->fdinfo_len);
295
296 nread = ctx->nread;
297 nwrite = ctx->nwrite;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100298 nclose = ctx->nclose;
Christopher Wileye8679812015-07-01 13:36:18 -0700299
300 if (nread)
301 old |= EV_READ;
302 if (nwrite)
303 old |= EV_WRITE;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100304 if (nclose)
305 old |= EV_CLOSED;
Christopher Wileye8679812015-07-01 13:36:18 -0700306
307 if (ev->ev_events & EV_READ) {
308 if (++nread == 1)
309 res |= EV_READ;
310 }
311 if (ev->ev_events & EV_WRITE) {
312 if (++nwrite == 1)
313 res |= EV_WRITE;
314 }
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100315 if (ev->ev_events & EV_CLOSED) {
316 if (++nclose == 1)
317 res |= EV_CLOSED;
318 }
319 if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) {
Christopher Wileye8679812015-07-01 13:36:18 -0700320 event_warnx("Too many events reading or writing on fd %d",
321 (int)fd);
322 return -1;
323 }
324 if (EVENT_DEBUG_MODE_IS_ON() &&
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100325 (old_ev = LIST_FIRST(&ctx->events)) &&
Christopher Wileye8679812015-07-01 13:36:18 -0700326 (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
327 event_warnx("Tried to mix edge-triggered and non-edge-triggered"
328 " events on fd %d", (int)fd);
329 return -1;
330 }
331
332 if (res) {
333 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
334 /* XXX(niels): we cannot mix edge-triggered and
335 * level-triggered, we should probably assert on
336 * this. */
337 if (evsel->add(base, ev->ev_fd,
338 old, (ev->ev_events & EV_ET) | res, extra) == -1)
339 return (-1);
340 retval = 1;
341 }
342
343 ctx->nread = (ev_uint16_t) nread;
344 ctx->nwrite = (ev_uint16_t) nwrite;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100345 ctx->nclose = (ev_uint16_t) nclose;
346 LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next);
Christopher Wileye8679812015-07-01 13:36:18 -0700347
348 return (retval);
349}
350
351/* return -1 on error, 0 on success if nothing changed in the event backend,
352 * and 1 on success if something did. */
353int
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100354evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev)
Christopher Wileye8679812015-07-01 13:36:18 -0700355{
356 const struct eventop *evsel = base->evsel;
357 struct event_io_map *io = &base->io;
358 struct evmap_io *ctx;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100359 int nread, nwrite, nclose, retval = 0;
Christopher Wileye8679812015-07-01 13:36:18 -0700360 short res = 0, old = 0;
361
362 if (fd < 0)
363 return 0;
364
365 EVUTIL_ASSERT(fd == ev->ev_fd);
366
367#ifndef EVMAP_USE_HT
368 if (fd >= io->nentries)
369 return (-1);
370#endif
371
372 GET_IO_SLOT(ctx, io, fd, evmap_io);
373
374 nread = ctx->nread;
375 nwrite = ctx->nwrite;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100376 nclose = ctx->nclose;
Christopher Wileye8679812015-07-01 13:36:18 -0700377
378 if (nread)
379 old |= EV_READ;
380 if (nwrite)
381 old |= EV_WRITE;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100382 if (nclose)
383 old |= EV_CLOSED;
Christopher Wileye8679812015-07-01 13:36:18 -0700384
385 if (ev->ev_events & EV_READ) {
386 if (--nread == 0)
387 res |= EV_READ;
388 EVUTIL_ASSERT(nread >= 0);
389 }
390 if (ev->ev_events & EV_WRITE) {
391 if (--nwrite == 0)
392 res |= EV_WRITE;
393 EVUTIL_ASSERT(nwrite >= 0);
394 }
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100395 if (ev->ev_events & EV_CLOSED) {
396 if (--nclose == 0)
397 res |= EV_CLOSED;
398 EVUTIL_ASSERT(nclose >= 0);
399 }
Christopher Wileye8679812015-07-01 13:36:18 -0700400
401 if (res) {
402 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
Haibo Huangb2279672019-05-31 16:12:39 -0700403 if (evsel->del(base, ev->ev_fd,
404 old, (ev->ev_events & EV_ET) | res, extra) == -1) {
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100405 retval = -1;
406 } else {
407 retval = 1;
408 }
Christopher Wileye8679812015-07-01 13:36:18 -0700409 }
410
411 ctx->nread = nread;
412 ctx->nwrite = nwrite;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100413 ctx->nclose = nclose;
414 LIST_REMOVE(ev, ev_io_next);
Christopher Wileye8679812015-07-01 13:36:18 -0700415
416 return (retval);
417}
418
419void
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100420evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events)
Christopher Wileye8679812015-07-01 13:36:18 -0700421{
422 struct event_io_map *io = &base->io;
423 struct evmap_io *ctx;
424 struct event *ev;
425
426#ifndef EVMAP_USE_HT
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100427 if (fd < 0 || fd >= io->nentries)
428 return;
Christopher Wileye8679812015-07-01 13:36:18 -0700429#endif
430 GET_IO_SLOT(ctx, io, fd, evmap_io);
431
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100432 if (NULL == ctx)
433 return;
434 LIST_FOREACH(ev, &ctx->events, ev_io_next) {
Christopher Wileye8679812015-07-01 13:36:18 -0700435 if (ev->ev_events & events)
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100436 event_active_nolock_(ev, ev->ev_events & events, 1);
Christopher Wileye8679812015-07-01 13:36:18 -0700437 }
438}
439
440/* code specific to signals */
441
442static void
443evmap_signal_init(struct evmap_signal *entry)
444{
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100445 LIST_INIT(&entry->events);
Christopher Wileye8679812015-07-01 13:36:18 -0700446}
447
448
449int
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100450evmap_signal_add_(struct event_base *base, int sig, struct event *ev)
Christopher Wileye8679812015-07-01 13:36:18 -0700451{
452 const struct eventop *evsel = base->evsigsel;
453 struct event_signal_map *map = &base->sigmap;
454 struct evmap_signal *ctx = NULL;
455
Haibo Huangb2279672019-05-31 16:12:39 -0700456 if (sig < 0 || sig >= NSIG)
457 return (-1);
458
Christopher Wileye8679812015-07-01 13:36:18 -0700459 if (sig >= map->nentries) {
460 if (evmap_make_space(
461 map, sig, sizeof(struct evmap_signal *)) == -1)
462 return (-1);
463 }
464 GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
465 base->evsigsel->fdinfo_len);
466
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100467 if (LIST_EMPTY(&ctx->events)) {
Christopher Wileye8679812015-07-01 13:36:18 -0700468 if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
469 == -1)
470 return (-1);
471 }
472
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100473 LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next);
Christopher Wileye8679812015-07-01 13:36:18 -0700474
475 return (1);
476}
477
478int
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100479evmap_signal_del_(struct event_base *base, int sig, struct event *ev)
Christopher Wileye8679812015-07-01 13:36:18 -0700480{
481 const struct eventop *evsel = base->evsigsel;
482 struct event_signal_map *map = &base->sigmap;
483 struct evmap_signal *ctx;
484
Haibo Huangb2279672019-05-31 16:12:39 -0700485 if (sig < 0 || sig >= map->nentries)
Christopher Wileye8679812015-07-01 13:36:18 -0700486 return (-1);
487
488 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
489
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100490 LIST_REMOVE(ev, ev_signal_next);
491
492 if (LIST_FIRST(&ctx->events) == NULL) {
Christopher Wileye8679812015-07-01 13:36:18 -0700493 if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
494 return (-1);
495 }
496
Christopher Wileye8679812015-07-01 13:36:18 -0700497 return (1);
498}
499
500void
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100501evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls)
Christopher Wileye8679812015-07-01 13:36:18 -0700502{
503 struct event_signal_map *map = &base->sigmap;
504 struct evmap_signal *ctx;
505 struct event *ev;
506
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100507 if (sig < 0 || sig >= map->nentries)
508 return;
Christopher Wileye8679812015-07-01 13:36:18 -0700509 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
510
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100511 if (!ctx)
512 return;
513 LIST_FOREACH(ev, &ctx->events, ev_signal_next)
514 event_active_nolock_(ev, EV_SIGNAL, ncalls);
Christopher Wileye8679812015-07-01 13:36:18 -0700515}
516
517void *
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100518evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd)
Christopher Wileye8679812015-07-01 13:36:18 -0700519{
520 struct evmap_io *ctx;
521 GET_IO_SLOT(ctx, map, fd, evmap_io);
522 if (ctx)
523 return ((char*)ctx) + sizeof(struct evmap_io);
524 else
525 return NULL;
526}
527
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100528/* Callback type for evmap_io_foreach_fd */
529typedef int (*evmap_io_foreach_fd_cb)(
530 struct event_base *, evutil_socket_t, struct evmap_io *, void *);
531
532/* Multipurpose helper function: Iterate over every file descriptor event_base
533 * for which we could have EV_READ or EV_WRITE events. For each such fd, call
534 * fn(base, signum, evmap_io, arg), where fn is the user-provided
535 * function, base is the event_base, signum is the signal number, evmap_io
536 * is an evmap_io structure containing a list of events pending on the
537 * file descriptor, and arg is the user-supplied argument.
538 *
539 * If fn returns 0, continue on to the next signal. Otherwise, return the same
540 * value that fn returned.
541 *
542 * Note that there is no guarantee that the file descriptors will be processed
543 * in any particular order.
544 */
545static int
546evmap_io_foreach_fd(struct event_base *base,
547 evmap_io_foreach_fd_cb fn,
548 void *arg)
549{
550 evutil_socket_t fd;
551 struct event_io_map *iomap = &base->io;
552 int r = 0;
553#ifdef EVMAP_USE_HT
554 struct event_map_entry **mapent;
555 HT_FOREACH(mapent, event_io_map, iomap) {
556 struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
557 fd = (*mapent)->fd;
558#else
559 for (fd = 0; fd < iomap->nentries; ++fd) {
560 struct evmap_io *ctx = iomap->entries[fd];
561 if (!ctx)
562 continue;
563#endif
564 if ((r = fn(base, fd, ctx, arg)))
565 break;
566 }
567 return r;
568}
569
570/* Callback type for evmap_signal_foreach_signal */
571typedef int (*evmap_signal_foreach_signal_cb)(
572 struct event_base *, int, struct evmap_signal *, void *);
573
574/* Multipurpose helper function: Iterate over every signal number in the
575 * event_base for which we could have signal events. For each such signal,
576 * call fn(base, signum, evmap_signal, arg), where fn is the user-provided
577 * function, base is the event_base, signum is the signal number, evmap_signal
578 * is an evmap_signal structure containing a list of events pending on the
579 * signal, and arg is the user-supplied argument.
580 *
581 * If fn returns 0, continue on to the next signal. Otherwise, return the same
582 * value that fn returned.
583 */
584static int
585evmap_signal_foreach_signal(struct event_base *base,
586 evmap_signal_foreach_signal_cb fn,
587 void *arg)
588{
589 struct event_signal_map *sigmap = &base->sigmap;
590 int r = 0;
591 int signum;
592
593 for (signum = 0; signum < sigmap->nentries; ++signum) {
594 struct evmap_signal *ctx = sigmap->entries[signum];
595 if (!ctx)
596 continue;
597 if ((r = fn(base, signum, ctx, arg)))
598 break;
599 }
600 return r;
601}
602
603/* Helper for evmap_reinit_: tell the backend to add every fd for which we have
604 * pending events, with the appropriate combination of EV_READ, EV_WRITE, and
605 * EV_ET. */
606static int
607evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd,
608 struct evmap_io *ctx, void *arg)
609{
610 const struct eventop *evsel = base->evsel;
611 void *extra;
612 int *result = arg;
613 short events = 0;
614 struct event *ev;
615 EVUTIL_ASSERT(ctx);
616
617 extra = ((char*)ctx) + sizeof(struct evmap_io);
618 if (ctx->nread)
619 events |= EV_READ;
620 if (ctx->nwrite)
621 events |= EV_WRITE;
622 if (ctx->nclose)
623 events |= EV_CLOSED;
624 if (evsel->fdinfo_len)
625 memset(extra, 0, evsel->fdinfo_len);
626 if (events &&
627 (ev = LIST_FIRST(&ctx->events)) &&
628 (ev->ev_events & EV_ET))
629 events |= EV_ET;
630 if (evsel->add(base, fd, 0, events, extra) == -1)
631 *result = -1;
632
633 return 0;
634}
635
636/* Helper for evmap_reinit_: tell the backend to add every signal for which we
637 * have pending events. */
638static int
639evmap_signal_reinit_iter_fn(struct event_base *base,
640 int signum, struct evmap_signal *ctx, void *arg)
641{
642 const struct eventop *evsel = base->evsigsel;
643 int *result = arg;
644
645 if (!LIST_EMPTY(&ctx->events)) {
646 if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1)
647 *result = -1;
648 }
649 return 0;
650}
651
652int
653evmap_reinit_(struct event_base *base)
654{
655 int result = 0;
656
657 evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result);
658 if (result < 0)
659 return -1;
660 evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result);
661 if (result < 0)
662 return -1;
663 return 0;
664}
665
666/* Helper for evmap_delete_all_: delete every event in an event_dlist. */
667static int
668delete_all_in_dlist(struct event_dlist *dlist)
669{
670 struct event *ev;
671 while ((ev = LIST_FIRST(dlist)))
672 event_del(ev);
673 return 0;
674}
675
676/* Helper for evmap_delete_all_: delete every event pending on an fd. */
677static int
678evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd,
679 struct evmap_io *io_info, void *arg)
680{
681 return delete_all_in_dlist(&io_info->events);
682}
683
684/* Helper for evmap_delete_all_: delete every event pending on a signal. */
685static int
686evmap_signal_delete_all_iter_fn(struct event_base *base, int signum,
687 struct evmap_signal *sig_info, void *arg)
688{
689 return delete_all_in_dlist(&sig_info->events);
690}
691
692void
693evmap_delete_all_(struct event_base *base)
694{
695 evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL);
696 evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL);
697}
698
Christopher Wileye8679812015-07-01 13:36:18 -0700699/** Per-fd structure for use with changelists. It keeps track, for each fd or
700 * signal using the changelist, of where its entry in the changelist is.
701 */
702struct event_changelist_fdinfo {
703 int idxplus1; /* this is the index +1, so that memset(0) will make it
704 * a no-such-element */
705};
706
707void
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100708event_changelist_init_(struct event_changelist *changelist)
Christopher Wileye8679812015-07-01 13:36:18 -0700709{
710 changelist->changes = NULL;
711 changelist->changes_size = 0;
712 changelist->n_changes = 0;
713}
714
715/** Helper: return the changelist_fdinfo corresponding to a given change. */
716static inline struct event_changelist_fdinfo *
717event_change_get_fdinfo(struct event_base *base,
718 const struct event_change *change)
719{
720 char *ptr;
721 if (change->read_change & EV_CHANGE_SIGNAL) {
722 struct evmap_signal *ctx;
723 GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
724 ptr = ((char*)ctx) + sizeof(struct evmap_signal);
725 } else {
726 struct evmap_io *ctx;
727 GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
728 ptr = ((char*)ctx) + sizeof(struct evmap_io);
729 }
730 return (void*)ptr;
731}
732
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100733/** Callback helper for event_changelist_assert_ok */
734static int
735event_changelist_assert_ok_foreach_iter_fn(
736 struct event_base *base,
737 evutil_socket_t fd, struct evmap_io *io, void *arg)
738{
739 struct event_changelist *changelist = &base->changelist;
740 struct event_changelist_fdinfo *f;
741 f = (void*)
742 ( ((char*)io) + sizeof(struct evmap_io) );
743 if (f->idxplus1) {
744 struct event_change *c = &changelist->changes[f->idxplus1 - 1];
745 EVUTIL_ASSERT(c->fd == fd);
746 }
747 return 0;
748}
749
Christopher Wileye8679812015-07-01 13:36:18 -0700750/** Make sure that the changelist is consistent with the evmap structures. */
751static void
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100752event_changelist_assert_ok(struct event_base *base)
Christopher Wileye8679812015-07-01 13:36:18 -0700753{
754 int i;
755 struct event_changelist *changelist = &base->changelist;
756
757 EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
758 for (i = 0; i < changelist->n_changes; ++i) {
759 struct event_change *c = &changelist->changes[i];
760 struct event_changelist_fdinfo *f;
761 EVUTIL_ASSERT(c->fd >= 0);
762 f = event_change_get_fdinfo(base, c);
763 EVUTIL_ASSERT(f);
764 EVUTIL_ASSERT(f->idxplus1 == i + 1);
765 }
766
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100767 evmap_io_foreach_fd(base,
768 event_changelist_assert_ok_foreach_iter_fn,
769 NULL);
Christopher Wileye8679812015-07-01 13:36:18 -0700770}
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100771
772#ifdef DEBUG_CHANGELIST
773#define event_changelist_check(base) event_changelist_assert_ok((base))
Christopher Wileye8679812015-07-01 13:36:18 -0700774#else
775#define event_changelist_check(base) ((void)0)
776#endif
777
778void
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100779event_changelist_remove_all_(struct event_changelist *changelist,
Christopher Wileye8679812015-07-01 13:36:18 -0700780 struct event_base *base)
781{
782 int i;
783
784 event_changelist_check(base);
785
786 for (i = 0; i < changelist->n_changes; ++i) {
787 struct event_change *ch = &changelist->changes[i];
788 struct event_changelist_fdinfo *fdinfo =
789 event_change_get_fdinfo(base, ch);
790 EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
791 fdinfo->idxplus1 = 0;
792 }
793
794 changelist->n_changes = 0;
795
796 event_changelist_check(base);
797}
798
799void
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100800event_changelist_freemem_(struct event_changelist *changelist)
Christopher Wileye8679812015-07-01 13:36:18 -0700801{
802 if (changelist->changes)
803 mm_free(changelist->changes);
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100804 event_changelist_init_(changelist); /* zero it all out. */
Christopher Wileye8679812015-07-01 13:36:18 -0700805}
806
807/** Increase the size of 'changelist' to hold more changes. */
808static int
809event_changelist_grow(struct event_changelist *changelist)
810{
811 int new_size;
812 struct event_change *new_changes;
813 if (changelist->changes_size < 64)
814 new_size = 64;
815 else
816 new_size = changelist->changes_size * 2;
817
818 new_changes = mm_realloc(changelist->changes,
819 new_size * sizeof(struct event_change));
820
821 if (EVUTIL_UNLIKELY(new_changes == NULL))
822 return (-1);
823
824 changelist->changes = new_changes;
825 changelist->changes_size = new_size;
826
827 return (0);
828}
829
830/** Return a pointer to the changelist entry for the file descriptor or signal
831 * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
832 * old_events field to old_events.
833 */
834static struct event_change *
835event_changelist_get_or_construct(struct event_changelist *changelist,
836 evutil_socket_t fd,
837 short old_events,
838 struct event_changelist_fdinfo *fdinfo)
839{
840 struct event_change *change;
841
842 if (fdinfo->idxplus1 == 0) {
843 int idx;
844 EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
845
846 if (changelist->n_changes == changelist->changes_size) {
847 if (event_changelist_grow(changelist) < 0)
848 return NULL;
849 }
850
851 idx = changelist->n_changes++;
852 change = &changelist->changes[idx];
853 fdinfo->idxplus1 = idx + 1;
854
855 memset(change, 0, sizeof(struct event_change));
856 change->fd = fd;
857 change->old_events = old_events;
858 } else {
859 change = &changelist->changes[fdinfo->idxplus1 - 1];
860 EVUTIL_ASSERT(change->fd == fd);
861 }
862 return change;
863}
864
865int
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100866event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
Christopher Wileye8679812015-07-01 13:36:18 -0700867 void *p)
868{
869 struct event_changelist *changelist = &base->changelist;
870 struct event_changelist_fdinfo *fdinfo = p;
871 struct event_change *change;
Haibo Huangb2279672019-05-31 16:12:39 -0700872 ev_uint8_t evchange = EV_CHANGE_ADD | (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
Christopher Wileye8679812015-07-01 13:36:18 -0700873
874 event_changelist_check(base);
875
876 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
877 if (!change)
878 return -1;
879
880 /* An add replaces any previous delete, but doesn't result in a no-op,
881 * since the delete might fail (because the fd had been closed since
882 * the last add, for instance. */
883
Haibo Huangb2279672019-05-31 16:12:39 -0700884 if (events & (EV_READ|EV_SIGNAL))
885 change->read_change = evchange;
886 if (events & EV_WRITE)
887 change->write_change = evchange;
888 if (events & EV_CLOSED)
889 change->close_change = evchange;
Christopher Wileye8679812015-07-01 13:36:18 -0700890
891 event_changelist_check(base);
892 return (0);
893}
894
895int
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100896event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
Christopher Wileye8679812015-07-01 13:36:18 -0700897 void *p)
898{
899 struct event_changelist *changelist = &base->changelist;
900 struct event_changelist_fdinfo *fdinfo = p;
901 struct event_change *change;
Haibo Huangb2279672019-05-31 16:12:39 -0700902 ev_uint8_t del = EV_CHANGE_DEL | (events & EV_ET);
Christopher Wileye8679812015-07-01 13:36:18 -0700903
904 event_changelist_check(base);
905 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
906 event_changelist_check(base);
907 if (!change)
908 return -1;
909
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100910 /* A delete on an event set that doesn't contain the event to be
911 deleted produces a no-op. This effectively emoves any previous
912 uncommitted add, rather than replacing it: on those platforms where
913 "add, delete, dispatch" is not the same as "no-op, dispatch", we
914 want the no-op behavior.
Christopher Wileye8679812015-07-01 13:36:18 -0700915
916 If we have a no-op item, we could remove it it from the list
917 entirely, but really there's not much point: skipping the no-op
918 change when we do the dispatch later is far cheaper than rejuggling
919 the array now.
920
921 As this stands, it also lets through deletions of events that are
922 not currently set.
923 */
924
925 if (events & (EV_READ|EV_SIGNAL)) {
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100926 if (!(change->old_events & (EV_READ | EV_SIGNAL)))
Christopher Wileye8679812015-07-01 13:36:18 -0700927 change->read_change = 0;
928 else
Haibo Huangb2279672019-05-31 16:12:39 -0700929 change->read_change = del;
Christopher Wileye8679812015-07-01 13:36:18 -0700930 }
931 if (events & EV_WRITE) {
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100932 if (!(change->old_events & EV_WRITE))
Christopher Wileye8679812015-07-01 13:36:18 -0700933 change->write_change = 0;
934 else
Haibo Huangb2279672019-05-31 16:12:39 -0700935 change->write_change = del;
Christopher Wileye8679812015-07-01 13:36:18 -0700936 }
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100937 if (events & EV_CLOSED) {
938 if (!(change->old_events & EV_CLOSED))
939 change->close_change = 0;
940 else
Haibo Huangb2279672019-05-31 16:12:39 -0700941 change->close_change = del;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100942 }
Christopher Wileye8679812015-07-01 13:36:18 -0700943
944 event_changelist_check(base);
945 return (0);
946}
947
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100948/* Helper for evmap_check_integrity_: verify that all of the events pending on
949 * given fd are set up correctly, and that the nread and nwrite counts on that
950 * fd are correct. */
951static int
952evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd,
953 struct evmap_io *io_info, void *arg)
Elliott Hughes2a572d12017-08-07 14:18:18 -0700954{
Elliott Hughes2a572d12017-08-07 14:18:18 -0700955 struct event *ev;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100956 int n_read = 0, n_write = 0, n_close = 0;
Josh Gao83a0c9c2017-08-10 12:30:25 -0700957
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100958 /* First, make sure the list itself isn't corrupt. Otherwise,
959 * running LIST_FOREACH could be an exciting adventure. */
960 EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next);
961
962 LIST_FOREACH(ev, &io_info->events, ev_io_next) {
Josh Gao83a0c9c2017-08-10 12:30:25 -0700963 EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100964 EVUTIL_ASSERT(ev->ev_fd == fd);
965 EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL));
966 EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
967 if (ev->ev_events & EV_READ)
968 ++n_read;
969 if (ev->ev_events & EV_WRITE)
970 ++n_write;
971 if (ev->ev_events & EV_CLOSED)
972 ++n_close;
Elliott Hughes2a572d12017-08-07 14:18:18 -0700973 }
Elliott Hughes2a572d12017-08-07 14:18:18 -0700974
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100975 EVUTIL_ASSERT(n_read == io_info->nread);
976 EVUTIL_ASSERT(n_write == io_info->nwrite);
977 EVUTIL_ASSERT(n_close == io_info->nclose);
Josh Gao83a0c9c2017-08-10 12:30:25 -0700978
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100979 return 0;
Josh Gao83a0c9c2017-08-10 12:30:25 -0700980}
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100981
982/* Helper for evmap_check_integrity_: verify that all of the events pending
983 * on given signal are set up correctly. */
984static int
985evmap_signal_check_integrity_fn(struct event_base *base,
986 int signum, struct evmap_signal *sig_info, void *arg)
987{
988 struct event *ev;
989 /* First, make sure the list itself isn't corrupt. */
990 EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next);
991
992 LIST_FOREACH(ev, &sig_info->events, ev_io_next) {
993 EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
994 EVUTIL_ASSERT(ev->ev_fd == signum);
995 EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL));
996 EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
997 }
998 return 0;
999}
1000
1001void
1002evmap_check_integrity_(struct event_base *base)
1003{
1004 evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL);
1005 evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL);
1006
1007 if (base->evsel->add == event_changelist_add_)
1008 event_changelist_assert_ok(base);
1009}
1010
1011/* Helper type for evmap_foreach_event_: Bundles a function to call on every
1012 * event, and the user-provided void* to use as its third argument. */
1013struct evmap_foreach_event_helper {
1014 event_base_foreach_event_cb fn;
1015 void *arg;
1016};
1017
1018/* Helper for evmap_foreach_event_: calls a provided function on every event
1019 * pending on a given fd. */
1020static int
1021evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd,
1022 struct evmap_io *io_info, void *arg)
1023{
1024 struct evmap_foreach_event_helper *h = arg;
1025 struct event *ev;
1026 int r;
1027 LIST_FOREACH(ev, &io_info->events, ev_io_next) {
1028 if ((r = h->fn(base, ev, h->arg)))
1029 return r;
1030 }
1031 return 0;
1032}
1033
1034/* Helper for evmap_foreach_event_: calls a provided function on every event
1035 * pending on a given signal. */
1036static int
1037evmap_signal_foreach_event_fn(struct event_base *base, int signum,
1038 struct evmap_signal *sig_info, void *arg)
1039{
1040 struct event *ev;
1041 struct evmap_foreach_event_helper *h = arg;
1042 int r;
1043 LIST_FOREACH(ev, &sig_info->events, ev_signal_next) {
1044 if ((r = h->fn(base, ev, h->arg)))
1045 return r;
1046 }
1047 return 0;
1048}
1049
1050int
1051evmap_foreach_event_(struct event_base *base,
1052 event_base_foreach_event_cb fn, void *arg)
1053{
1054 struct evmap_foreach_event_helper h;
1055 int r;
1056 h.fn = fn;
1057 h.arg = arg;
1058 if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h)))
1059 return r;
1060 return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h);
1061}
1062