1 /* $NetBSD: evmap.c,v 1.3 2017/01/31 23:17:39 christos Exp $ */ 2 /* 3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include "event2/event-config.h" 28 #include <sys/cdefs.h> 29 __RCSID("$NetBSD: evmap.c,v 1.3 2017/01/31 23:17:39 christos Exp $"); 30 #include "evconfig-private.h" 31 32 #ifdef _WIN32 33 #include <winsock2.h> 34 #define WIN32_LEAN_AND_MEAN 35 #include <windows.h> 36 #undef WIN32_LEAN_AND_MEAN 37 #endif 38 #include <sys/types.h> 39 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H) 40 #include <sys/time.h> 41 #endif 42 #include <sys/queue.h> 43 #include <stdio.h> 44 #include <stdlib.h> 45 #ifndef _WIN32 46 #include <unistd.h> 47 #endif 48 #include <errno.h> 49 #include <signal.h> 50 #include <string.h> 51 #include <time.h> 52 53 #include "event-internal.h" 54 #include "evmap-internal.h" 55 #include "mm-internal.h" 56 #include "changelist-internal.h" 57 58 /** An entry for an evmap_io list: notes all the events that want to read or 59 write on a given fd, and the number of each. 60 */ 61 struct evmap_io { 62 struct event_dlist events; 63 ev_uint16_t nread; 64 ev_uint16_t nwrite; 65 ev_uint16_t nclose; 66 }; 67 68 /* An entry for an evmap_signal list: notes all the events that want to know 69 when a signal triggers. */ 70 struct evmap_signal { 71 struct event_dlist events; 72 }; 73 74 /* On some platforms, fds start at 0 and increment by 1 as they are 75 allocated, and old numbers get used. For these platforms, we 76 implement io maps just like signal maps: as an array of pointers to 77 struct evmap_io. But on other platforms (windows), sockets are not 78 0-indexed, not necessarily consecutive, and not necessarily reused. 79 There, we use a hashtable to implement evmap_io. 80 */ 81 #ifdef EVMAP_USE_HT 82 struct event_map_entry { 83 HT_ENTRY(event_map_entry) map_node; 84 evutil_socket_t fd; 85 union { /* This is a union in case we need to make more things that can 86 be in the hashtable. */ 87 struct evmap_io evmap_io; 88 } ent; 89 }; 90 91 /* Helper used by the event_io_map hashtable code; tries to return a good hash 92 * of the fd in e->fd. */ 93 static inline unsigned 94 hashsocket(struct event_map_entry *e) 95 { 96 /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to 97 * matter. Our hashtable implementation really likes low-order bits, 98 * though, so let's do the rotate-and-add trick. */ 99 unsigned h = (unsigned) e->fd; 100 h += (h >> 2) | (h << 30); 101 return h; 102 } 103 104 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2 105 * have the same e->fd. */ 106 static inline int 107 eqsocket(struct event_map_entry *e1, struct event_map_entry *e2) 108 { 109 return e1->fd == e2->fd; 110 } 111 112 HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket) 113 HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket, 114 0.5, mm_malloc, mm_realloc, mm_free) 115 116 #define GET_IO_SLOT(x, map, slot, type) \ 117 do { \ 118 struct event_map_entry key_, *ent_; \ 119 key_.fd = slot; \ 120 ent_ = HT_FIND(event_io_map, map, &key_); \ 121 (x) = ent_ ? &ent_->ent.type : NULL; \ 122 } while (/*CONSTCOND*/0); 123 124 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ 125 do { \ 126 struct event_map_entry key_, *ent_; \ 127 key_.fd = slot; \ 128 HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \ 129 event_map_entry, &key_, ptr, \ 130 { \ 131 ent_ = *ptr; \ 132 }, \ 133 { \ 134 ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \ 135 if (EVUTIL_UNLIKELY(ent_ == NULL)) \ 136 return (-1); \ 137 ent_->fd = slot; \ 138 (ctor)(&ent_->ent.type); \ 139 HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \ 140 }); \ 141 (x) = &ent_->ent.type; \ 142 } while (/*CONSTCOND*/0) 143 144 void evmap_io_initmap_(struct event_io_map *ctx) 145 { 146 HT_INIT(event_io_map, ctx); 147 } 148 149 void evmap_io_clear_(struct event_io_map *ctx) 150 { 151 struct event_map_entry **ent, **next, *this; 152 for (ent = HT_START(event_io_map, ctx); ent; ent = next) { 153 this = *ent; 154 next = HT_NEXT_RMV(event_io_map, ctx, ent); 155 mm_free(this); 156 } 157 HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */ 158 } 159 #endif 160 161 /* Set the variable 'x' to the field in event_map 'map' with fields of type 162 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL 163 if there are no entries for 'slot'. Does no bounds-checking. */ 164 #define GET_SIGNAL_SLOT(x, map, slot, type) \ 165 (x) = (struct type *)((map)->entries[slot]) 166 /* As GET_SLOT, but construct the entry for 'slot' if it is not present, 167 by allocating enough memory for a 'struct type', and initializing the new 168 value by calling the function 'ctor' on it. Makes the function 169 return -1 on allocation failure. 170 */ 171 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \ 172 do { \ 173 if ((map)->entries[slot] == NULL) { \ 174 (map)->entries[slot] = \ 175 mm_calloc(1,sizeof(struct type)+fdinfo_len); \ 176 if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \ 177 return (-1); \ 178 (ctor)((struct type *)(map)->entries[slot]); \ 179 } \ 180 (x) = (struct type *)((map)->entries[slot]); \ 181 } while (/*CONSTCOND*/0) 182 183 /* If we aren't using hashtables, then define the IO_SLOT macros and functions 184 as thin aliases over the SIGNAL_SLOT versions. */ 185 #ifndef EVMAP_USE_HT 186 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type) 187 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \ 188 GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) 189 #define FDINFO_OFFSET sizeof(struct evmap_io) 190 void 191 evmap_io_initmap_(struct event_io_map* ctx) 192 { 193 evmap_signal_initmap_(ctx); 194 } 195 void 196 evmap_io_clear_(struct event_io_map* ctx) 197 { 198 evmap_signal_clear_(ctx); 199 } 200 #endif 201 202 203 /** Expand 'map' with new entries of width 'msize' until it is big enough 204 to store a value in 'slot'. 205 */ 206 static int 207 evmap_make_space(struct event_signal_map *map, int slot, int msize) 208 { 209 if (map->nentries <= slot) { 210 int nentries = map->nentries ? map->nentries : 32; 211 void **tmp; 212 213 while (nentries <= slot) 214 nentries <<= 1; 215 216 tmp = (void **)mm_realloc(map->entries, nentries * msize); 217 if (tmp == NULL) 218 return (-1); 219 220 memset(&tmp[map->nentries], 0, 221 (nentries - map->nentries) * msize); 222 223 map->nentries = nentries; 224 map->entries = tmp; 225 } 226 227 return (0); 228 } 229 230 void 231 evmap_signal_initmap_(struct event_signal_map *ctx) 232 { 233 ctx->nentries = 0; 234 ctx->entries = NULL; 235 } 236 237 void 238 evmap_signal_clear_(struct event_signal_map *ctx) 239 { 240 if (ctx->entries != NULL) { 241 int i; 242 for (i = 0; i < ctx->nentries; ++i) { 243 if (ctx->entries[i] != NULL) 244 mm_free(ctx->entries[i]); 245 } 246 mm_free(ctx->entries); 247 ctx->entries = NULL; 248 } 249 ctx->nentries = 0; 250 } 251 252 253 /* code specific to file descriptors */ 254 255 /** Constructor for struct evmap_io */ 256 static void 257 evmap_io_init(struct evmap_io *entry) 258 { 259 LIST_INIT(&entry->events); 260 entry->nread = 0; 261 entry->nwrite = 0; 262 entry->nclose = 0; 263 } 264 265 266 /* return -1 on error, 0 on success if nothing changed in the event backend, 267 * and 1 on success if something did. */ 268 int 269 evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev) 270 { 271 const struct eventop *evsel = base->evsel; 272 struct event_io_map *io = &base->io; 273 struct evmap_io *ctx = NULL; 274 int nread, nwrite, nclose, retval = 0; 275 short res = 0, old = 0; 276 struct event *old_ev; 277 278 EVUTIL_ASSERT(fd == ev->ev_fd); 279 280 if (fd < 0) 281 return 0; 282 283 #ifndef EVMAP_USE_HT 284 if (fd >= io->nentries) { 285 if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1) 286 return (-1); 287 } 288 #endif 289 GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init, 290 evsel->fdinfo_len); 291 292 nread = ctx->nread; 293 nwrite = ctx->nwrite; 294 nclose = ctx->nclose; 295 296 if (nread) 297 old |= EV_READ; 298 if (nwrite) 299 old |= EV_WRITE; 300 if (nclose) 301 old |= EV_CLOSED; 302 303 if (ev->ev_events & EV_READ) { 304 if (++nread == 1) 305 res |= EV_READ; 306 } 307 if (ev->ev_events & EV_WRITE) { 308 if (++nwrite == 1) 309 res |= EV_WRITE; 310 } 311 if (ev->ev_events & EV_CLOSED) { 312 if (++nclose == 1) 313 res |= EV_CLOSED; 314 } 315 if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) { 316 event_warnx("Too many events reading or writing on fd %d", 317 (int)fd); 318 return -1; 319 } 320 if (EVENT_DEBUG_MODE_IS_ON() && 321 (old_ev = LIST_FIRST(&ctx->events)) && 322 (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) { 323 event_warnx("Tried to mix edge-triggered and non-edge-triggered" 324 " events on fd %d", (int)fd); 325 return -1; 326 } 327 328 if (res) { 329 void *extra = ((char*)ctx) + sizeof(struct evmap_io); 330 /* XXX(niels): we cannot mix edge-triggered and 331 * level-triggered, we should probably assert on 332 * this. */ 333 if (evsel->add(base, ev->ev_fd, 334 old, (ev->ev_events & EV_ET) | res, extra) == -1) 335 return (-1); 336 retval = 1; 337 } 338 339 ctx->nread = (ev_uint16_t) nread; 340 ctx->nwrite = (ev_uint16_t) nwrite; 341 ctx->nclose = (ev_uint16_t) nclose; 342 LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next); 343 344 return (retval); 345 } 346 347 /* return -1 on error, 0 on success if nothing changed in the event backend, 348 * and 1 on success if something did. */ 349 int 350 evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev) 351 { 352 const struct eventop *evsel = base->evsel; 353 struct event_io_map *io = &base->io; 354 struct evmap_io *ctx; 355 int nread, nwrite, nclose, retval = 0; 356 short res = 0, old = 0; 357 358 if (fd < 0) 359 return 0; 360 361 EVUTIL_ASSERT(fd == ev->ev_fd); 362 363 #ifndef EVMAP_USE_HT 364 if (fd >= io->nentries) 365 return (-1); 366 #endif 367 368 GET_IO_SLOT(ctx, io, fd, evmap_io); 369 370 nread = ctx->nread; 371 nwrite = ctx->nwrite; 372 nclose = ctx->nclose; 373 374 if (nread) 375 old |= EV_READ; 376 if (nwrite) 377 old |= EV_WRITE; 378 if (nclose) 379 old |= EV_CLOSED; 380 381 if (ev->ev_events & EV_READ) { 382 if (--nread == 0) 383 res |= EV_READ; 384 EVUTIL_ASSERT(nread >= 0); 385 } 386 if (ev->ev_events & EV_WRITE) { 387 if (--nwrite == 0) 388 res |= EV_WRITE; 389 EVUTIL_ASSERT(nwrite >= 0); 390 } 391 if (ev->ev_events & EV_CLOSED) { 392 if (--nclose == 0) 393 res |= EV_CLOSED; 394 EVUTIL_ASSERT(nclose >= 0); 395 } 396 397 if (res) { 398 void *extra = ((char*)ctx) + sizeof(struct evmap_io); 399 if (evsel->del(base, ev->ev_fd, old, res, extra) == -1) { 400 retval = -1; 401 } else { 402 retval = 1; 403 } 404 } 405 406 ctx->nread = nread; 407 ctx->nwrite = nwrite; 408 ctx->nclose = nclose; 409 LIST_REMOVE(ev, ev_io_next); 410 411 return (retval); 412 } 413 414 void 415 evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events) 416 { 417 struct event_io_map *io = &base->io; 418 struct evmap_io *ctx; 419 struct event *ev; 420 421 #ifndef EVMAP_USE_HT 422 if (fd < 0 || fd >= io->nentries) 423 return; 424 #endif 425 GET_IO_SLOT(ctx, io, fd, evmap_io); 426 427 if (NULL == ctx) 428 return; 429 LIST_FOREACH(ev, &ctx->events, ev_io_next) { 430 if (ev->ev_events & events) 431 event_active_nolock_(ev, ev->ev_events & events, 1); 432 } 433 } 434 435 /* code specific to signals */ 436 437 static void 438 evmap_signal_init(struct evmap_signal *entry) 439 { 440 LIST_INIT(&entry->events); 441 } 442 443 444 int 445 evmap_signal_add_(struct event_base *base, int sig, struct event *ev) 446 { 447 const struct eventop *evsel = base->evsigsel; 448 struct event_signal_map *map = &base->sigmap; 449 struct evmap_signal *ctx = NULL; 450 451 if (sig >= map->nentries) { 452 if (evmap_make_space( 453 map, sig, sizeof(struct evmap_signal *)) == -1) 454 return (-1); 455 } 456 GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init, 457 base->evsigsel->fdinfo_len); 458 459 if (LIST_EMPTY(&ctx->events)) { 460 if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL) 461 == -1) 462 return (-1); 463 } 464 465 LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next); 466 467 return (1); 468 } 469 470 int 471 evmap_signal_del_(struct event_base *base, int sig, struct event *ev) 472 { 473 const struct eventop *evsel = base->evsigsel; 474 struct event_signal_map *map = &base->sigmap; 475 struct evmap_signal *ctx; 476 477 if (sig >= map->nentries) 478 return (-1); 479 480 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); 481 482 LIST_REMOVE(ev, ev_signal_next); 483 484 if (LIST_FIRST(&ctx->events) == NULL) { 485 if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1) 486 return (-1); 487 } 488 489 return (1); 490 } 491 492 void 493 evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls) 494 { 495 struct event_signal_map *map = &base->sigmap; 496 struct evmap_signal *ctx; 497 struct event *ev; 498 499 if (sig < 0 || sig >= map->nentries) 500 return; 501 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal); 502 503 if (!ctx) 504 return; 505 LIST_FOREACH(ev, &ctx->events, ev_signal_next) 506 event_active_nolock_(ev, EV_SIGNAL, ncalls); 507 } 508 509 void * 510 evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd) 511 { 512 struct evmap_io *ctx; 513 GET_IO_SLOT(ctx, map, fd, evmap_io); 514 if (ctx) 515 return ((char*)ctx) + sizeof(struct evmap_io); 516 else 517 return NULL; 518 } 519 520 /* Callback type for evmap_io_foreach_fd */ 521 typedef int (*evmap_io_foreach_fd_cb)( 522 struct event_base *, evutil_socket_t, struct evmap_io *, void *); 523 524 /* Multipurpose helper function: Iterate over every file descriptor event_base 525 * for which we could have EV_READ or EV_WRITE events. For each such fd, call 526 * fn(base, signum, evmap_io, arg), where fn is the user-provided 527 * function, base is the event_base, signum is the signal number, evmap_io 528 * is an evmap_io structure containing a list of events pending on the 529 * file descriptor, and arg is the user-supplied argument. 530 * 531 * If fn returns 0, continue on to the next signal. Otherwise, return the same 532 * value that fn returned. 533 * 534 * Note that there is no guarantee that the file descriptors will be processed 535 * in any particular order. 536 */ 537 static int 538 evmap_io_foreach_fd(struct event_base *base, 539 evmap_io_foreach_fd_cb fn, 540 void *arg) 541 { 542 evutil_socket_t fd; 543 struct event_io_map *iomap = &base->io; 544 int r = 0; 545 #ifdef EVMAP_USE_HT 546 struct event_map_entry **mapent; 547 HT_FOREACH(mapent, event_io_map, iomap) { 548 struct evmap_io *ctx = &(*mapent)->ent.evmap_io; 549 fd = (*mapent)->fd; 550 #else 551 for (fd = 0; fd < iomap->nentries; ++fd) { 552 struct evmap_io *ctx = iomap->entries[fd]; 553 if (!ctx) 554 continue; 555 #endif 556 if ((r = fn(base, fd, ctx, arg))) 557 break; 558 } 559 return r; 560 } 561 562 /* Callback type for evmap_signal_foreach_signal */ 563 typedef int (*evmap_signal_foreach_signal_cb)( 564 struct event_base *, int, struct evmap_signal *, void *); 565 566 /* Multipurpose helper function: Iterate over every signal number in the 567 * event_base for which we could have signal events. For each such signal, 568 * call fn(base, signum, evmap_signal, arg), where fn is the user-provided 569 * function, base is the event_base, signum is the signal number, evmap_signal 570 * is an evmap_signal structure containing a list of events pending on the 571 * signal, and arg is the user-supplied argument. 572 * 573 * If fn returns 0, continue on to the next signal. Otherwise, return the same 574 * value that fn returned. 575 */ 576 static int 577 evmap_signal_foreach_signal(struct event_base *base, 578 evmap_signal_foreach_signal_cb fn, 579 void *arg) 580 { 581 struct event_signal_map *sigmap = &base->sigmap; 582 int r = 0; 583 int signum; 584 585 for (signum = 0; signum < sigmap->nentries; ++signum) { 586 struct evmap_signal *ctx = sigmap->entries[signum]; 587 if (!ctx) 588 continue; 589 if ((r = fn(base, signum, ctx, arg))) 590 break; 591 } 592 return r; 593 } 594 595 /* Helper for evmap_reinit_: tell the backend to add every fd for which we have 596 * pending events, with the appropriate combination of EV_READ, EV_WRITE, and 597 * EV_ET. */ 598 static int 599 evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd, 600 struct evmap_io *ctx, void *arg) 601 { 602 const struct eventop *evsel = base->evsel; 603 void *extra; 604 int *result = arg; 605 short events = 0; 606 struct event *ev; 607 EVUTIL_ASSERT(ctx); 608 609 extra = ((char*)ctx) + sizeof(struct evmap_io); 610 if (ctx->nread) 611 events |= EV_READ; 612 if (ctx->nwrite) 613 events |= EV_WRITE; 614 if (ctx->nclose) 615 events |= EV_CLOSED; 616 if (evsel->fdinfo_len) 617 memset(extra, 0, evsel->fdinfo_len); 618 if (events && 619 (ev = LIST_FIRST(&ctx->events)) && 620 (ev->ev_events & EV_ET)) 621 events |= EV_ET; 622 if (evsel->add(base, fd, 0, events, extra) == -1) 623 *result = -1; 624 625 return 0; 626 } 627 628 /* Helper for evmap_reinit_: tell the backend to add every signal for which we 629 * have pending events. */ 630 static int 631 evmap_signal_reinit_iter_fn(struct event_base *base, 632 int signum, struct evmap_signal *ctx, void *arg) 633 { 634 const struct eventop *evsel = base->evsigsel; 635 int *result = arg; 636 637 if (!LIST_EMPTY(&ctx->events)) { 638 if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1) 639 *result = -1; 640 } 641 return 0; 642 } 643 644 int 645 evmap_reinit_(struct event_base *base) 646 { 647 int result = 0; 648 649 evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result); 650 if (result < 0) 651 return -1; 652 evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result); 653 if (result < 0) 654 return -1; 655 return 0; 656 } 657 658 /* Helper for evmap_delete_all_: delete every event in an event_dlist. */ 659 static int 660 delete_all_in_dlist(struct event_dlist *dlist) 661 { 662 struct event *ev; 663 while ((ev = LIST_FIRST(dlist))) 664 event_del(ev); 665 return 0; 666 } 667 668 /* Helper for evmap_delete_all_: delete every event pending on an fd. */ 669 static int 670 evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd, 671 struct evmap_io *io_info, void *arg) 672 { 673 return delete_all_in_dlist(&io_info->events); 674 } 675 676 /* Helper for evmap_delete_all_: delete every event pending on a signal. */ 677 static int 678 evmap_signal_delete_all_iter_fn(struct event_base *base, int signum, 679 struct evmap_signal *sig_info, void *arg) 680 { 681 return delete_all_in_dlist(&sig_info->events); 682 } 683 684 void 685 evmap_delete_all_(struct event_base *base) 686 { 687 evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL); 688 evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL); 689 } 690 691 /** Per-fd structure for use with changelists. It keeps track, for each fd or 692 * signal using the changelist, of where its entry in the changelist is. 693 */ 694 struct event_changelist_fdinfo { 695 int idxplus1; /* this is the index +1, so that memset(0) will make it 696 * a no-such-element */ 697 }; 698 699 void 700 event_changelist_init_(struct event_changelist *changelist) 701 { 702 changelist->changes = NULL; 703 changelist->changes_size = 0; 704 changelist->n_changes = 0; 705 } 706 707 /** Helper: return the changelist_fdinfo corresponding to a given change. */ 708 static inline struct event_changelist_fdinfo * 709 event_change_get_fdinfo(struct event_base *base, 710 const struct event_change *change) 711 { 712 char *ptr; 713 if (change->read_change & EV_CHANGE_SIGNAL) { 714 struct evmap_signal *ctx; 715 GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal); 716 ptr = ((char*)ctx) + sizeof(struct evmap_signal); 717 } else { 718 struct evmap_io *ctx; 719 GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io); 720 ptr = ((char*)ctx) + sizeof(struct evmap_io); 721 } 722 return (void*)ptr; 723 } 724 725 /** Callback helper for event_changelist_assert_ok */ 726 static int 727 event_changelist_assert_ok_foreach_iter_fn( 728 struct event_base *base, 729 evutil_socket_t fd, struct evmap_io *io, void *arg) 730 { 731 struct event_changelist *changelist = &base->changelist; 732 struct event_changelist_fdinfo *f; 733 f = (void*) 734 ( ((char*)io) + sizeof(struct evmap_io) ); 735 if (f->idxplus1) { 736 struct event_change *c = &changelist->changes[f->idxplus1 - 1]; 737 EVUTIL_ASSERT(c->fd == fd); 738 } 739 return 0; 740 } 741 742 /** Make sure that the changelist is consistent with the evmap structures. */ 743 static void 744 event_changelist_assert_ok(struct event_base *base) 745 { 746 int i; 747 struct event_changelist *changelist = &base->changelist; 748 749 EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes); 750 for (i = 0; i < changelist->n_changes; ++i) { 751 struct event_change *c = &changelist->changes[i]; 752 struct event_changelist_fdinfo *f; 753 EVUTIL_ASSERT(c->fd >= 0); 754 f = event_change_get_fdinfo(base, c); 755 EVUTIL_ASSERT(f); 756 EVUTIL_ASSERT(f->idxplus1 == i + 1); 757 } 758 759 evmap_io_foreach_fd(base, 760 event_changelist_assert_ok_foreach_iter_fn, 761 NULL); 762 } 763 764 #ifdef DEBUG_CHANGELIST 765 #define event_changelist_check(base) event_changelist_assert_ok((base)) 766 #else 767 #define event_changelist_check(base) ((void)0) 768 #endif 769 770 void 771 event_changelist_remove_all_(struct event_changelist *changelist, 772 struct event_base *base) 773 { 774 int i; 775 776 event_changelist_check(base); 777 778 for (i = 0; i < changelist->n_changes; ++i) { 779 struct event_change *ch = &changelist->changes[i]; 780 struct event_changelist_fdinfo *fdinfo = 781 event_change_get_fdinfo(base, ch); 782 EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1); 783 fdinfo->idxplus1 = 0; 784 } 785 786 changelist->n_changes = 0; 787 788 event_changelist_check(base); 789 } 790 791 void 792 event_changelist_freemem_(struct event_changelist *changelist) 793 { 794 if (changelist->changes) 795 mm_free(changelist->changes); 796 event_changelist_init_(changelist); /* zero it all out. */ 797 } 798 799 /** Increase the size of 'changelist' to hold more changes. */ 800 static int 801 event_changelist_grow(struct event_changelist *changelist) 802 { 803 int new_size; 804 struct event_change *new_changes; 805 if (changelist->changes_size < 64) 806 new_size = 64; 807 else 808 new_size = changelist->changes_size * 2; 809 810 new_changes = mm_realloc(changelist->changes, 811 new_size * sizeof(struct event_change)); 812 813 if (EVUTIL_UNLIKELY(new_changes == NULL)) 814 return (-1); 815 816 changelist->changes = new_changes; 817 changelist->changes_size = new_size; 818 819 return (0); 820 } 821 822 /** Return a pointer to the changelist entry for the file descriptor or signal 823 * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its 824 * old_events field to old_events. 825 */ 826 static struct event_change * 827 event_changelist_get_or_construct(struct event_changelist *changelist, 828 evutil_socket_t fd, 829 short old_events, 830 struct event_changelist_fdinfo *fdinfo) 831 { 832 struct event_change *change; 833 834 if (fdinfo->idxplus1 == 0) { 835 int idx; 836 EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size); 837 838 if (changelist->n_changes == changelist->changes_size) { 839 if (event_changelist_grow(changelist) < 0) 840 return NULL; 841 } 842 843 idx = changelist->n_changes++; 844 change = &changelist->changes[idx]; 845 fdinfo->idxplus1 = idx + 1; 846 847 memset(change, 0, sizeof(struct event_change)); 848 change->fd = fd; 849 change->old_events = old_events; 850 } else { 851 change = &changelist->changes[fdinfo->idxplus1 - 1]; 852 EVUTIL_ASSERT(change->fd == fd); 853 } 854 return change; 855 } 856 857 int 858 event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events, 859 void *p) 860 { 861 struct event_changelist *changelist = &base->changelist; 862 struct event_changelist_fdinfo *fdinfo = p; 863 struct event_change *change; 864 865 event_changelist_check(base); 866 867 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo); 868 if (!change) 869 return -1; 870 871 /* An add replaces any previous delete, but doesn't result in a no-op, 872 * since the delete might fail (because the fd had been closed since 873 * the last add, for instance. */ 874 875 if (events & (EV_READ|EV_SIGNAL)) { 876 change->read_change = EV_CHANGE_ADD | 877 (events & (EV_ET|EV_PERSIST|EV_SIGNAL)); 878 } 879 if (events & EV_WRITE) { 880 change->write_change = EV_CHANGE_ADD | 881 (events & (EV_ET|EV_PERSIST|EV_SIGNAL)); 882 } 883 if (events & EV_CLOSED) { 884 change->close_change = EV_CHANGE_ADD | 885 (events & (EV_ET|EV_PERSIST|EV_SIGNAL)); 886 } 887 888 event_changelist_check(base); 889 return (0); 890 } 891 892 int 893 event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events, 894 void *p) 895 { 896 struct event_changelist *changelist = &base->changelist; 897 struct event_changelist_fdinfo *fdinfo = p; 898 struct event_change *change; 899 900 event_changelist_check(base); 901 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo); 902 event_changelist_check(base); 903 if (!change) 904 return -1; 905 906 /* A delete on an event set that doesn't contain the event to be 907 deleted produces a no-op. This effectively emoves any previous 908 uncommitted add, rather than replacing it: on those platforms where 909 "add, delete, dispatch" is not the same as "no-op, dispatch", we 910 want the no-op behavior. 911 912 If we have a no-op item, we could remove it it from the list 913 entirely, but really there's not much point: skipping the no-op 914 change when we do the dispatch later is far cheaper than rejuggling 915 the array now. 916 917 As this stands, it also lets through deletions of events that are 918 not currently set. 919 */ 920 921 if (events & (EV_READ|EV_SIGNAL)) { 922 if (!(change->old_events & (EV_READ | EV_SIGNAL))) 923 change->read_change = 0; 924 else 925 change->read_change = EV_CHANGE_DEL; 926 } 927 if (events & EV_WRITE) { 928 if (!(change->old_events & EV_WRITE)) 929 change->write_change = 0; 930 else 931 change->write_change = EV_CHANGE_DEL; 932 } 933 if (events & EV_CLOSED) { 934 if (!(change->old_events & EV_CLOSED)) 935 change->close_change = 0; 936 else 937 change->close_change = EV_CHANGE_DEL; 938 } 939 940 event_changelist_check(base); 941 return (0); 942 } 943 944 /* Helper for evmap_check_integrity_: verify that all of the events pending on 945 * given fd are set up correctly, and that the nread and nwrite counts on that 946 * fd are correct. */ 947 static int 948 evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd, 949 struct evmap_io *io_info, void *arg) 950 { 951 struct event *ev; 952 int n_read = 0, n_write = 0, n_close = 0; 953 954 /* First, make sure the list itself isn't corrupt. Otherwise, 955 * running LIST_FOREACH could be an exciting adventure. */ 956 EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next); 957 958 LIST_FOREACH(ev, &io_info->events, ev_io_next) { 959 EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED); 960 EVUTIL_ASSERT(ev->ev_fd == fd); 961 EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL)); 962 EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))); 963 if (ev->ev_events & EV_READ) 964 ++n_read; 965 if (ev->ev_events & EV_WRITE) 966 ++n_write; 967 if (ev->ev_events & EV_CLOSED) 968 ++n_close; 969 } 970 971 EVUTIL_ASSERT(n_read == io_info->nread); 972 EVUTIL_ASSERT(n_write == io_info->nwrite); 973 EVUTIL_ASSERT(n_close == io_info->nclose); 974 975 return 0; 976 } 977 978 /* Helper for evmap_check_integrity_: verify that all of the events pending 979 * on given signal are set up correctly. */ 980 static int 981 evmap_signal_check_integrity_fn(struct event_base *base, 982 int signum, struct evmap_signal *sig_info, void *arg) 983 { 984 struct event *ev; 985 /* First, make sure the list itself isn't corrupt. */ 986 EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next); 987 988 LIST_FOREACH(ev, &sig_info->events, ev_io_next) { 989 EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED); 990 EVUTIL_ASSERT(ev->ev_fd == signum); 991 EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL)); 992 EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))); 993 } 994 return 0; 995 } 996 997 void 998 evmap_check_integrity_(struct event_base *base) 999 { 1000 evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL); 1001 evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL); 1002 1003 if (base->evsel->add == event_changelist_add_) 1004 event_changelist_assert_ok(base); 1005 } 1006 1007 /* Helper type for evmap_foreach_event_: Bundles a function to call on every 1008 * event, and the user-provided void* to use as its third argument. */ 1009 struct evmap_foreach_event_helper { 1010 event_base_foreach_event_cb fn; 1011 void *arg; 1012 }; 1013 1014 /* Helper for evmap_foreach_event_: calls a provided function on every event 1015 * pending on a given fd. */ 1016 static int 1017 evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd, 1018 struct evmap_io *io_info, void *arg) 1019 { 1020 struct evmap_foreach_event_helper *h = arg; 1021 struct event *ev; 1022 int r; 1023 LIST_FOREACH(ev, &io_info->events, ev_io_next) { 1024 if ((r = h->fn(base, ev, h->arg))) 1025 return r; 1026 } 1027 return 0; 1028 } 1029 1030 /* Helper for evmap_foreach_event_: calls a provided function on every event 1031 * pending on a given signal. */ 1032 static int 1033 evmap_signal_foreach_event_fn(struct event_base *base, int signum, 1034 struct evmap_signal *sig_info, void *arg) 1035 { 1036 struct event *ev; 1037 struct evmap_foreach_event_helper *h = arg; 1038 int r; 1039 LIST_FOREACH(ev, &sig_info->events, ev_signal_next) { 1040 if ((r = h->fn(base, ev, h->arg))) 1041 return r; 1042 } 1043 return 0; 1044 } 1045 1046 int 1047 evmap_foreach_event_(struct event_base *base, 1048 event_base_foreach_event_cb fn, void *arg) 1049 { 1050 struct evmap_foreach_event_helper h; 1051 int r; 1052 h.fn = fn; 1053 h.arg = arg; 1054 if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h))) 1055 return r; 1056 return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h); 1057 } 1058 1059