xref: /libevent-2.1.12/evmap.c (revision ebfd8a89)
1 /*
2  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 #include "event2/event-config.h"
27 #include "evconfig-private.h"
28 
29 #ifdef _WIN32
30 #include <winsock2.h>
31 #define WIN32_LEAN_AND_MEAN
32 #include <windows.h>
33 #undef WIN32_LEAN_AND_MEAN
34 #endif
35 #include <sys/types.h>
36 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
37 #include <sys/time.h>
38 #endif
39 #include <sys/queue.h>
40 #include <stdio.h>
41 #include <stdlib.h>
42 #ifndef _WIN32
43 #include <unistd.h>
44 #endif
45 #include <errno.h>
46 #include <signal.h>
47 #include <string.h>
48 #include <time.h>
49 
50 #include "event-internal.h"
51 #include "evmap-internal.h"
52 #include "mm-internal.h"
53 #include "changelist-internal.h"
54 
55 /** An entry for an evmap_io list: notes all the events that want to read or
56 	write on a given fd, and the number of each.
57   */
58 struct evmap_io {
59 	struct event_dlist events;
60 	ev_uint16_t nread;
61 	ev_uint16_t nwrite;
62 };
63 
64 /* An entry for an evmap_signal list: notes all the events that want to know
65    when a signal triggers. */
66 struct evmap_signal {
67 	struct event_dlist events;
68 };
69 
70 /* On some platforms, fds start at 0 and increment by 1 as they are
71    allocated, and old numbers get used.  For these platforms, we
72    implement io maps just like signal maps: as an array of pointers to
73    struct evmap_io.  But on other platforms (windows), sockets are not
74    0-indexed, not necessarily consecutive, and not necessarily reused.
75    There, we use a hashtable to implement evmap_io.
76 */
77 #ifdef EVMAP_USE_HT
78 struct event_map_entry {
79 	HT_ENTRY(event_map_entry) map_node;
80 	evutil_socket_t fd;
81 	union { /* This is a union in case we need to make more things that can
82 			   be in the hashtable. */
83 		struct evmap_io evmap_io;
84 	} ent;
85 };
86 
87 /* Helper used by the event_io_map hashtable code; tries to return a good hash
88  * of the fd in e->fd. */
89 static inline unsigned
90 hashsocket(struct event_map_entry *e)
91 {
92 	/* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
93 	 * matter.  Our hashtable implementation really likes low-order bits,
94 	 * though, so let's do the rotate-and-add trick. */
95 	unsigned h = (unsigned) e->fd;
96 	h += (h >> 2) | (h << 30);
97 	return h;
98 }
99 
100 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
101  * have the same e->fd. */
102 static inline int
103 eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
104 {
105 	return e1->fd == e2->fd;
106 }
107 
108 HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
109 HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
110 			0.5, mm_malloc, mm_realloc, mm_free)
111 
112 #define GET_IO_SLOT(x, map, slot, type)					\
113 	do {								\
114 		struct event_map_entry key_, *ent_;			\
115 		key_.fd = slot;						\
116 		ent_ = HT_FIND(event_io_map, map, &key_);		\
117 		(x) = ent_ ? &ent_->ent.type : NULL;			\
118 	} while (0);
119 
120 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)	\
121 	do {								\
122 		struct event_map_entry key_, *ent_;			\
123 		key_.fd = slot;						\
124 		HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \
125 		    event_map_entry, &key_, ptr,			\
126 		    {							\
127 			    ent_ = *ptr;				\
128 		    },							\
129 		    {							\
130 			    ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
131 			    if (EVUTIL_UNLIKELY(ent_ == NULL))		\
132 				    return (-1);			\
133 			    ent_->fd = slot;				\
134 			    (ctor)(&ent_->ent.type);			\
135 			    HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \
136 				});					\
137 		(x) = &ent_->ent.type;					\
138 	} while (0)
139 
140 void evmap_io_initmap_(struct event_io_map *ctx)
141 {
142 	HT_INIT(event_io_map, ctx);
143 }
144 
145 void evmap_io_clear_(struct event_io_map *ctx)
146 {
147 	struct event_map_entry **ent, **next, *this;
148 	for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
149 		this = *ent;
150 		next = HT_NEXT_RMV(event_io_map, ctx, ent);
151 		mm_free(this);
152 	}
153 	HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
154 }
155 #endif
156 
157 /* Set the variable 'x' to the field in event_map 'map' with fields of type
158    'struct type *' corresponding to the fd or signal 'slot'.  Set 'x' to NULL
159    if there are no entries for 'slot'.  Does no bounds-checking. */
160 #define GET_SIGNAL_SLOT(x, map, slot, type)			\
161 	(x) = (struct type *)((map)->entries[slot])
162 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
163    by allocating enough memory for a 'struct type', and initializing the new
164    value by calling the function 'ctor' on it.  Makes the function
165    return -1 on allocation failure.
166  */
167 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)	\
168 	do {								\
169 		if ((map)->entries[slot] == NULL) {			\
170 			(map)->entries[slot] =				\
171 			    mm_calloc(1,sizeof(struct type)+fdinfo_len); \
172 			if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
173 				return (-1);				\
174 			(ctor)((struct type *)(map)->entries[slot]);	\
175 		}							\
176 		(x) = (struct type *)((map)->entries[slot]);		\
177 	} while (0)
178 
179 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
180    as thin aliases over the SIGNAL_SLOT versions. */
181 #ifndef EVMAP_USE_HT
182 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
183 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)	\
184 	GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
185 #define FDINFO_OFFSET sizeof(struct evmap_io)
186 void
187 evmap_io_initmap_(struct event_io_map* ctx)
188 {
189 	evmap_signal_initmap_(ctx);
190 }
191 void
192 evmap_io_clear_(struct event_io_map* ctx)
193 {
194 	evmap_signal_clear_(ctx);
195 }
196 #endif
197 
198 
199 /** Expand 'map' with new entries of width 'msize' until it is big enough
200 	to store a value in 'slot'.
201  */
202 static int
203 evmap_make_space(struct event_signal_map *map, int slot, int msize)
204 {
205 	if (map->nentries <= slot) {
206 		int nentries = map->nentries ? map->nentries : 32;
207 		void **tmp;
208 
209 		while (nentries <= slot)
210 			nentries <<= 1;
211 
212 		tmp = (void **)mm_realloc(map->entries, nentries * msize);
213 		if (tmp == NULL)
214 			return (-1);
215 
216 		memset(&tmp[map->nentries], 0,
217 		    (nentries - map->nentries) * msize);
218 
219 		map->nentries = nentries;
220 		map->entries = tmp;
221 	}
222 
223 	return (0);
224 }
225 
226 void
227 evmap_signal_initmap_(struct event_signal_map *ctx)
228 {
229 	ctx->nentries = 0;
230 	ctx->entries = NULL;
231 }
232 
233 void
234 evmap_signal_clear_(struct event_signal_map *ctx)
235 {
236 	if (ctx->entries != NULL) {
237 		int i;
238 		for (i = 0; i < ctx->nentries; ++i) {
239 			if (ctx->entries[i] != NULL)
240 				mm_free(ctx->entries[i]);
241 		}
242 		mm_free(ctx->entries);
243 		ctx->entries = NULL;
244 	}
245 	ctx->nentries = 0;
246 }
247 
248 
249 /* code specific to file descriptors */
250 
251 /** Constructor for struct evmap_io */
252 static void
253 evmap_io_init(struct evmap_io *entry)
254 {
255 	LIST_INIT(&entry->events);
256 	entry->nread = 0;
257 	entry->nwrite = 0;
258 }
259 
260 
261 /* return -1 on error, 0 on success if nothing changed in the event backend,
262  * and 1 on success if something did. */
263 int
264 evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev)
265 {
266 	const struct eventop *evsel = base->evsel;
267 	struct event_io_map *io = &base->io;
268 	struct evmap_io *ctx = NULL;
269 	int nread, nwrite, retval = 0;
270 	short res = 0, old = 0;
271 	struct event *old_ev;
272 
273 	EVUTIL_ASSERT(fd == ev->ev_fd);
274 
275 	if (fd < 0)
276 		return 0;
277 
278 #ifndef EVMAP_USE_HT
279 	if (fd >= io->nentries) {
280 		if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
281 			return (-1);
282 	}
283 #endif
284 	GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
285 						 evsel->fdinfo_len);
286 
287 	nread = ctx->nread;
288 	nwrite = ctx->nwrite;
289 
290 	if (nread)
291 		old |= EV_READ;
292 	if (nwrite)
293 		old |= EV_WRITE;
294 
295 	if (ev->ev_events & EV_READ) {
296 		if (++nread == 1)
297 			res |= EV_READ;
298 	}
299 	if (ev->ev_events & EV_WRITE) {
300 		if (++nwrite == 1)
301 			res |= EV_WRITE;
302 	}
303 	if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff)) {
304 		event_warnx("Too many events reading or writing on fd %d",
305 		    (int)fd);
306 		return -1;
307 	}
308 	if (EVENT_DEBUG_MODE_IS_ON() &&
309 	    (old_ev = LIST_FIRST(&ctx->events)) &&
310 	    (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
311 		event_warnx("Tried to mix edge-triggered and non-edge-triggered"
312 		    " events on fd %d", (int)fd);
313 		return -1;
314 	}
315 
316 	if (res) {
317 		void *extra = ((char*)ctx) + sizeof(struct evmap_io);
318 		/* XXX(niels): we cannot mix edge-triggered and
319 		 * level-triggered, we should probably assert on
320 		 * this. */
321 		if (evsel->add(base, ev->ev_fd,
322 			old, (ev->ev_events & EV_ET) | res, extra) == -1)
323 			return (-1);
324 		retval = 1;
325 	}
326 
327 	ctx->nread = (ev_uint16_t) nread;
328 	ctx->nwrite = (ev_uint16_t) nwrite;
329 	LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next);
330 
331 	return (retval);
332 }
333 
334 /* return -1 on error, 0 on success if nothing changed in the event backend,
335  * and 1 on success if something did. */
336 int
337 evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev)
338 {
339 	const struct eventop *evsel = base->evsel;
340 	struct event_io_map *io = &base->io;
341 	struct evmap_io *ctx;
342 	int nread, nwrite, retval = 0;
343 	short res = 0, old = 0;
344 
345 	if (fd < 0)
346 		return 0;
347 
348 	EVUTIL_ASSERT(fd == ev->ev_fd);
349 
350 #ifndef EVMAP_USE_HT
351 	if (fd >= io->nentries)
352 		return (-1);
353 #endif
354 
355 	GET_IO_SLOT(ctx, io, fd, evmap_io);
356 
357 	nread = ctx->nread;
358 	nwrite = ctx->nwrite;
359 
360 	if (nread)
361 		old |= EV_READ;
362 	if (nwrite)
363 		old |= EV_WRITE;
364 
365 	if (ev->ev_events & EV_READ) {
366 		if (--nread == 0)
367 			res |= EV_READ;
368 		EVUTIL_ASSERT(nread >= 0);
369 	}
370 	if (ev->ev_events & EV_WRITE) {
371 		if (--nwrite == 0)
372 			res |= EV_WRITE;
373 		EVUTIL_ASSERT(nwrite >= 0);
374 	}
375 
376 	if (res) {
377 		void *extra = ((char*)ctx) + sizeof(struct evmap_io);
378 		if (evsel->del(base, ev->ev_fd, old, res, extra) == -1) {
379 			retval = -1;
380 		} else {
381 			retval = 1;
382 		}
383 	}
384 
385 	ctx->nread = nread;
386 	ctx->nwrite = nwrite;
387 	LIST_REMOVE(ev, ev_io_next);
388 
389 	return (retval);
390 }
391 
392 void
393 evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events)
394 {
395 	struct event_io_map *io = &base->io;
396 	struct evmap_io *ctx;
397 	struct event *ev;
398 
399 #ifndef EVMAP_USE_HT
400 	EVUTIL_ASSERT(fd < io->nentries);
401 #endif
402 	GET_IO_SLOT(ctx, io, fd, evmap_io);
403 
404 	EVUTIL_ASSERT(ctx);
405 	LIST_FOREACH(ev, &ctx->events, ev_io_next) {
406 		if (ev->ev_events & events)
407 			event_active_nolock_(ev, ev->ev_events & events, 1);
408 	}
409 }
410 
411 /* code specific to signals */
412 
413 static void
414 evmap_signal_init(struct evmap_signal *entry)
415 {
416 	LIST_INIT(&entry->events);
417 }
418 
419 
420 int
421 evmap_signal_add_(struct event_base *base, int sig, struct event *ev)
422 {
423 	const struct eventop *evsel = base->evsigsel;
424 	struct event_signal_map *map = &base->sigmap;
425 	struct evmap_signal *ctx = NULL;
426 
427 	if (sig >= map->nentries) {
428 		if (evmap_make_space(
429 			map, sig, sizeof(struct evmap_signal *)) == -1)
430 			return (-1);
431 	}
432 	GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
433 	    base->evsigsel->fdinfo_len);
434 
435 	if (LIST_EMPTY(&ctx->events)) {
436 		if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
437 		    == -1)
438 			return (-1);
439 	}
440 
441 	LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next);
442 
443 	return (1);
444 }
445 
446 int
447 evmap_signal_del_(struct event_base *base, int sig, struct event *ev)
448 {
449 	const struct eventop *evsel = base->evsigsel;
450 	struct event_signal_map *map = &base->sigmap;
451 	struct evmap_signal *ctx;
452 
453 	if (sig >= map->nentries)
454 		return (-1);
455 
456 	GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
457 
458 	LIST_REMOVE(ev, ev_signal_next);
459 
460 	if (LIST_FIRST(&ctx->events) == NULL) {
461 		if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
462 			return (-1);
463 	}
464 
465 	return (1);
466 }
467 
468 void
469 evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls)
470 {
471 	struct event_signal_map *map = &base->sigmap;
472 	struct evmap_signal *ctx;
473 	struct event *ev;
474 
475 	EVUTIL_ASSERT(sig < map->nentries);
476 	GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
477 
478 	LIST_FOREACH(ev, &ctx->events, ev_signal_next)
479 		event_active_nolock_(ev, EV_SIGNAL, ncalls);
480 }
481 
482 void *
483 evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd)
484 {
485 	struct evmap_io *ctx;
486 	GET_IO_SLOT(ctx, map, fd, evmap_io);
487 	if (ctx)
488 		return ((char*)ctx) + sizeof(struct evmap_io);
489 	else
490 		return NULL;
491 }
492 
493 /* Callback type for evmap_io_foreach_fd */
494 typedef int (*evmap_io_foreach_fd_cb)(
495 	struct event_base *, evutil_socket_t, struct evmap_io *, void *);
496 
497 /* Multipurpose helper function: Iterate over every file descriptor event_base
498  * for which we could have EV_READ or EV_WRITE events.  For each such fd, call
499  * fn(base, signum, evmap_io, arg), where fn is the user-provided
500  * function, base is the event_base, signum is the signal number, evmap_io
501  * is an evmap_io structure containing a list of events pending on the
502  * file descriptor, and arg is the user-supplied argument.
503  *
504  * If fn returns 0, continue on to the next signal. Otherwise, return the same
505  * value that fn returned.
506  *
507  * Note that there is no guarantee that the file descriptors will be processed
508  * in any particular order.
509  */
510 static int
511 evmap_io_foreach_fd(struct event_base *base,
512     evmap_io_foreach_fd_cb fn,
513     void *arg)
514 {
515 	evutil_socket_t fd;
516 	struct event_io_map *iomap = &base->io;
517 	int r = 0;
518 #ifdef EVMAP_USE_HT
519 	struct event_map_entry **mapent;
520 	HT_FOREACH(mapent, event_io_map, iomap) {
521 		struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
522 		fd = (*mapent)->fd;
523 #else
524 	for (fd = 0; fd < iomap->nentries; ++fd) {
525 		struct evmap_io *ctx = iomap->entries[fd];
526 		if (!ctx)
527 			continue;
528 #endif
529 		if ((r = fn(base, fd, ctx, arg)))
530 			break;
531 	}
532 	return r;
533 }
534 
535 /* Callback type for evmap_signal_foreach_signal */
536 typedef int (*evmap_signal_foreach_signal_cb)(
537 	struct event_base *, int, struct evmap_signal *, void *);
538 
539 /* Multipurpose helper function: Iterate over every signal number in the
540  * event_base for which we could have signal events.  For each such signal,
541  * call fn(base, signum, evmap_signal, arg), where fn is the user-provided
542  * function, base is the event_base, signum is the signal number, evmap_signal
543  * is an evmap_signal structure containing a list of events pending on the
544  * signal, and arg is the user-supplied argument.
545  *
546  * If fn returns 0, continue on to the next signal. Otherwise, return the same
547  * value that fn returned.
548  */
549 static int
550 evmap_signal_foreach_signal(struct event_base *base,
551     evmap_signal_foreach_signal_cb fn,
552     void *arg)
553 {
554 	struct event_signal_map *sigmap = &base->sigmap;
555 	int r = 0;
556 	int signum;
557 
558 	for (signum = 0; signum < sigmap->nentries; ++signum) {
559 		struct evmap_signal *ctx = sigmap->entries[signum];
560 		if (!ctx)
561 			continue;
562 		if ((r = fn(base, signum, ctx, arg)))
563 			break;
564 	}
565 	return r;
566 }
567 
568 /* Helper for evmap_reinit_: tell the backend to add every fd for which we have
569  * pending events, with the appropriate combination of EV_READ, EV_WRITE, and
570  * EV_ET. */
571 static int
572 evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd,
573     struct evmap_io *ctx, void *arg)
574 {
575 	const struct eventop *evsel = base->evsel;
576 	void *extra;
577 	int *result = arg;
578 	short events = 0;
579 	struct event *ev;
580 	EVUTIL_ASSERT(ctx);
581 
582 	extra = ((char*)ctx) + sizeof(struct evmap_io);
583 	if (ctx->nread)
584 		events |= EV_READ;
585 	if (ctx->nwrite)
586 		events |= EV_WRITE;
587 	if (evsel->fdinfo_len)
588 		memset(extra, 0, evsel->fdinfo_len);
589 	if (events &&
590 	    (ev = LIST_FIRST(&ctx->events)) &&
591 	    (ev->ev_events & EV_ET))
592 		events |= EV_ET;
593 	if (evsel->add(base, fd, 0, events, extra) == -1)
594 		*result = -1;
595 
596 	return 0;
597 }
598 
599 /* Helper for evmap_reinit_: tell the backend to add every signal for which we
600  * have pending events.  */
601 static int
602 evmap_signal_reinit_iter_fn(struct event_base *base,
603     int signum, struct evmap_signal *ctx, void *arg)
604 {
605 	const struct eventop *evsel = base->evsigsel;
606 	int *result = arg;
607 
608 	if (!LIST_EMPTY(&ctx->events)) {
609 		if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1)
610 			*result = -1;
611 	}
612 	return 0;
613 }
614 
615 int
616 evmap_reinit_(struct event_base *base)
617 {
618 	int result = 0;
619 
620 	evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result);
621 	if (result < 0)
622 		return -1;
623 	evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result);
624 	if (result < 0)
625 		return -1;
626 	return 0;
627 }
628 
629 /* Helper for evmap_delete_all_: delete every event in an event_dlist. */
630 static int
631 delete_all_in_dlist(struct event_dlist *dlist)
632 {
633 	struct event *ev;
634 	while ((ev = LIST_FIRST(dlist)))
635 		event_del(ev);
636 	return 0;
637 }
638 
639 /* Helper for evmap_delete_all_: delete every event pending on an fd. */
640 static int
641 evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd,
642     struct evmap_io *io_info, void *arg)
643 {
644 	return delete_all_in_dlist(&io_info->events);
645 }
646 
647 /* Helper for evmap_delete_all_: delete every event pending on a signal. */
648 static int
649 evmap_signal_delete_all_iter_fn(struct event_base *base, int signum,
650     struct evmap_signal *sig_info, void *arg)
651 {
652 	return delete_all_in_dlist(&sig_info->events);
653 }
654 
655 void
656 evmap_delete_all_(struct event_base *base)
657 {
658 	evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL);
659 	evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL);
660 }
661 
662 /** Per-fd structure for use with changelists.  It keeps track, for each fd or
663  * signal using the changelist, of where its entry in the changelist is.
664  */
665 struct event_changelist_fdinfo {
666 	int idxplus1; /* this is the index +1, so that memset(0) will make it
667 		       * a no-such-element */
668 };
669 
670 void
671 event_changelist_init_(struct event_changelist *changelist)
672 {
673 	changelist->changes = NULL;
674 	changelist->changes_size = 0;
675 	changelist->n_changes = 0;
676 }
677 
678 /** Helper: return the changelist_fdinfo corresponding to a given change. */
679 static inline struct event_changelist_fdinfo *
680 event_change_get_fdinfo(struct event_base *base,
681     const struct event_change *change)
682 {
683 	char *ptr;
684 	if (change->read_change & EV_CHANGE_SIGNAL) {
685 		struct evmap_signal *ctx;
686 		GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
687 		ptr = ((char*)ctx) + sizeof(struct evmap_signal);
688 	} else {
689 		struct evmap_io *ctx;
690 		GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
691 		ptr = ((char*)ctx) + sizeof(struct evmap_io);
692 	}
693 	return (void*)ptr;
694 }
695 
696 /** Callback helper for event_changelist_assert_ok */
697 static int
698 event_changelist_assert_ok_foreach_iter_fn(
699 	struct event_base *base,
700 	evutil_socket_t fd, struct evmap_io *io, void *arg)
701 {
702 	struct event_changelist *changelist = &base->changelist;
703 	struct event_changelist_fdinfo *f;
704 	f = (void*)
705 	    ( ((char*)io) + sizeof(struct evmap_io) );
706 	if (f->idxplus1) {
707 		struct event_change *c = &changelist->changes[f->idxplus1 - 1];
708 		EVUTIL_ASSERT(c->fd == fd);
709 	}
710 	return 0;
711 }
712 
713 /** Make sure that the changelist is consistent with the evmap structures. */
714 static void
715 event_changelist_assert_ok(struct event_base *base)
716 {
717 	int i;
718 	struct event_changelist *changelist = &base->changelist;
719 
720 	EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
721 	for (i = 0; i < changelist->n_changes; ++i) {
722 		struct event_change *c = &changelist->changes[i];
723 		struct event_changelist_fdinfo *f;
724 		EVUTIL_ASSERT(c->fd >= 0);
725 		f = event_change_get_fdinfo(base, c);
726 		EVUTIL_ASSERT(f);
727 		EVUTIL_ASSERT(f->idxplus1 == i + 1);
728 	}
729 
730 	evmap_io_foreach_fd(base,
731 	    event_changelist_assert_ok_foreach_iter_fn,
732 	    NULL);
733 }
734 
735 #ifdef DEBUG_CHANGELIST
736 #define event_changelist_check(base)  event_changelist_assert_ok((base))
737 #else
738 #define event_changelist_check(base)  ((void)0)
739 #endif
740 
741 void
742 event_changelist_remove_all_(struct event_changelist *changelist,
743     struct event_base *base)
744 {
745 	int i;
746 
747 	event_changelist_check(base);
748 
749 	for (i = 0; i < changelist->n_changes; ++i) {
750 		struct event_change *ch = &changelist->changes[i];
751 		struct event_changelist_fdinfo *fdinfo =
752 		    event_change_get_fdinfo(base, ch);
753 		EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
754 		fdinfo->idxplus1 = 0;
755 	}
756 
757 	changelist->n_changes = 0;
758 
759 	event_changelist_check(base);
760 }
761 
762 void
763 event_changelist_freemem_(struct event_changelist *changelist)
764 {
765 	if (changelist->changes)
766 		mm_free(changelist->changes);
767 	event_changelist_init_(changelist); /* zero it all out. */
768 }
769 
770 /** Increase the size of 'changelist' to hold more changes. */
771 static int
772 event_changelist_grow(struct event_changelist *changelist)
773 {
774 	int new_size;
775 	struct event_change *new_changes;
776 	if (changelist->changes_size < 64)
777 		new_size = 64;
778 	else
779 		new_size = changelist->changes_size * 2;
780 
781 	new_changes = mm_realloc(changelist->changes,
782 	    new_size * sizeof(struct event_change));
783 
784 	if (EVUTIL_UNLIKELY(new_changes == NULL))
785 		return (-1);
786 
787 	changelist->changes = new_changes;
788 	changelist->changes_size = new_size;
789 
790 	return (0);
791 }
792 
793 /** Return a pointer to the changelist entry for the file descriptor or signal
794  * 'fd', whose fdinfo is 'fdinfo'.  If none exists, construct it, setting its
795  * old_events field to old_events.
796  */
797 static struct event_change *
798 event_changelist_get_or_construct(struct event_changelist *changelist,
799     evutil_socket_t fd,
800     short old_events,
801     struct event_changelist_fdinfo *fdinfo)
802 {
803 	struct event_change *change;
804 
805 	if (fdinfo->idxplus1 == 0) {
806 		int idx;
807 		EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
808 
809 		if (changelist->n_changes == changelist->changes_size) {
810 			if (event_changelist_grow(changelist) < 0)
811 				return NULL;
812 		}
813 
814 		idx = changelist->n_changes++;
815 		change = &changelist->changes[idx];
816 		fdinfo->idxplus1 = idx + 1;
817 
818 		memset(change, 0, sizeof(struct event_change));
819 		change->fd = fd;
820 		change->old_events = old_events;
821 	} else {
822 		change = &changelist->changes[fdinfo->idxplus1 - 1];
823 		EVUTIL_ASSERT(change->fd == fd);
824 	}
825 	return change;
826 }
827 
828 int
829 event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
830     void *p)
831 {
832 	struct event_changelist *changelist = &base->changelist;
833 	struct event_changelist_fdinfo *fdinfo = p;
834 	struct event_change *change;
835 
836 	event_changelist_check(base);
837 
838 	change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
839 	if (!change)
840 		return -1;
841 
842 	/* An add replaces any previous delete, but doesn't result in a no-op,
843 	 * since the delete might fail (because the fd had been closed since
844 	 * the last add, for instance. */
845 
846 	if (events & (EV_READ|EV_SIGNAL)) {
847 		change->read_change = EV_CHANGE_ADD |
848 		    (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
849 	}
850 	if (events & EV_WRITE) {
851 		change->write_change = EV_CHANGE_ADD |
852 		    (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
853 	}
854 
855 	event_changelist_check(base);
856 	return (0);
857 }
858 
859 int
860 event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
861     void *p)
862 {
863 	struct event_changelist *changelist = &base->changelist;
864 	struct event_changelist_fdinfo *fdinfo = p;
865 	struct event_change *change;
866 
867 	event_changelist_check(base);
868 	change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
869 	event_changelist_check(base);
870 	if (!change)
871 		return -1;
872 
873 	/* A delete on an event set that doesn't contain the event to be
874 	   deleted produces a no-op.  This effectively emoves any previous
875 	   uncommitted add, rather than replacing it: on those platforms where
876 	   "add, delete, dispatch" is not the same as "no-op, dispatch", we
877 	   want the no-op behavior.
878 
879 	   If we have a no-op item, we could remove it it from the list
880 	   entirely, but really there's not much point: skipping the no-op
881 	   change when we do the dispatch later is far cheaper than rejuggling
882 	   the array now.
883 
884 	   As this stands, it also lets through deletions of events that are
885 	   not currently set.
886 	 */
887 
888 	if (events & (EV_READ|EV_SIGNAL)) {
889 		if (!(change->old_events & (EV_READ | EV_SIGNAL)))
890 			change->read_change = 0;
891 		else
892 			change->read_change = EV_CHANGE_DEL;
893 	}
894 	if (events & EV_WRITE) {
895 		if (!(change->old_events & EV_WRITE))
896 			change->write_change = 0;
897 		else
898 			change->write_change = EV_CHANGE_DEL;
899 	}
900 
901 	event_changelist_check(base);
902 	return (0);
903 }
904 
905 /* Helper for evmap_check_integrity_: verify that all of the events pending on
906  * given fd are set up correctly, and that the nread and nwrite counts on that
907  * fd are correct. */
908 static int
909 evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd,
910     struct evmap_io *io_info, void *arg)
911 {
912 	struct event *ev;
913 	int n_read = 0, n_write = 0;
914 
915 	/* First, make sure the list itself isn't corrupt. Otherwise,
916 	 * running LIST_FOREACH could be an exciting adventure. */
917 	EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next);
918 
919 	LIST_FOREACH(ev, &io_info->events, ev_io_next) {
920 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
921 		EVUTIL_ASSERT(ev->ev_fd == fd);
922 		EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL));
923 		EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE)));
924 		if (ev->ev_events & EV_READ)
925 			++n_read;
926 		if (ev->ev_events & EV_WRITE)
927 			++n_write;
928 	}
929 
930 	EVUTIL_ASSERT(n_read == io_info->nread);
931 	EVUTIL_ASSERT(n_write == io_info->nwrite);
932 
933 	return 0;
934 }
935 
936 /* Helper for evmap_check_integrity_: verify that all of the events pending
937  * on given signal are set up correctly. */
938 static int
939 evmap_signal_check_integrity_fn(struct event_base *base,
940     int signum, struct evmap_signal *sig_info, void *arg)
941 {
942 	struct event *ev;
943 	/* First, make sure the list itself isn't corrupt. */
944 	EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next);
945 
946 	LIST_FOREACH(ev, &sig_info->events, ev_io_next) {
947 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
948 		EVUTIL_ASSERT(ev->ev_fd == signum);
949 		EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL));
950 		EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE)));
951 	}
952 	return 0;
953 }
954 
955 void
956 evmap_check_integrity_(struct event_base *base)
957 {
958 	evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL);
959 	evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL);
960 
961 	if (base->evsel->add == event_changelist_add_)
962 		event_changelist_assert_ok(base);
963 }
964 
965 /* Helper type for evmap_foreach_event_: Bundles a function to call on every
966  * event, and the user-provided void* to use as its third argument. */
967 struct evmap_foreach_event_helper {
968 	event_base_foreach_event_cb fn;
969 	void *arg;
970 };
971 
972 /* Helper for evmap_foreach_event_: calls a provided function on every event
973  * pending on a given fd.  */
974 static int
975 evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd,
976     struct evmap_io *io_info, void *arg)
977 {
978 	struct evmap_foreach_event_helper *h = arg;
979 	struct event *ev;
980 	int r;
981 	LIST_FOREACH(ev, &io_info->events, ev_io_next) {
982 		if ((r = h->fn(base, ev, h->arg)))
983 			return r;
984 	}
985 	return 0;
986 }
987 
988 /* Helper for evmap_foreach_event_: calls a provided function on every event
989  * pending on a given signal.  */
990 static int
991 evmap_signal_foreach_event_fn(struct event_base *base, int signum,
992     struct evmap_signal *sig_info, void *arg)
993 {
994 	struct event *ev;
995 	struct evmap_foreach_event_helper *h = arg;
996 	int r;
997 	LIST_FOREACH(ev, &sig_info->events, ev_signal_next) {
998 		if ((r = h->fn(base, ev, h->arg)))
999 			return r;
1000 	}
1001 	return 0;
1002 }
1003 
1004 int
1005 evmap_foreach_event_(struct event_base *base,
1006     event_base_foreach_event_cb fn, void *arg)
1007 {
1008 	struct evmap_foreach_event_helper h;
1009 	int r;
1010 	h.fn = fn;
1011 	h.arg = arg;
1012 	if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h)))
1013 		return r;
1014 	return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h);
1015 }
1016 
1017