1 /* $NetBSD: buffer.c,v 1.7 2024/08/18 20:47:20 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu> 5 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include "event2/event-config.h" 31 #include "evconfig-private.h" 32 33 #ifdef _WIN32 34 #include <winsock2.h> 35 #include <windows.h> 36 #include <io.h> 37 #endif 38 39 #ifdef EVENT__HAVE_VASPRINTF 40 /* If we have vasprintf, we need to define _GNU_SOURCE before we include 41 * stdio.h. This comes from evconfig-private.h. 42 */ 43 #endif 44 45 #include <sys/types.h> 46 47 #ifdef EVENT__HAVE_SYS_TIME_H 48 #include <sys/time.h> 49 #endif 50 51 #ifdef EVENT__HAVE_SYS_SOCKET_H 52 #include <sys/socket.h> 53 #endif 54 55 #ifdef EVENT__HAVE_SYS_UIO_H 56 #include <sys/uio.h> 57 #endif 58 59 #ifdef EVENT__HAVE_SYS_IOCTL_H 60 #include <sys/ioctl.h> 61 #endif 62 63 #ifdef EVENT__HAVE_SYS_MMAN_H 64 #include <sys/mman.h> 65 #endif 66 67 #ifdef EVENT__HAVE_SYS_SENDFILE_H 68 #include <sys/sendfile.h> 69 #endif 70 #ifdef EVENT__HAVE_SYS_STAT_H 71 #include <sys/stat.h> 72 #endif 73 74 75 #include <errno.h> 76 #include <stdio.h> 77 #include <stdlib.h> 78 #include <string.h> 79 #ifdef EVENT__HAVE_STDARG_H 80 #include <stdarg.h> 81 #endif 82 #ifdef EVENT__HAVE_UNISTD_H 83 #include <unistd.h> 84 #endif 85 #include <limits.h> 86 87 #include "event2/event.h" 88 #include "event2/buffer.h" 89 #include "event2/buffer_compat.h" 90 #include "event2/bufferevent.h" 91 #include "event2/bufferevent_compat.h" 92 #include "event2/bufferevent_struct.h" 93 #include "event2/thread.h" 94 #include "log-internal.h" 95 #include "mm-internal.h" 96 #include "util-internal.h" 97 #include "evthread-internal.h" 98 #include "evbuffer-internal.h" 99 #include "bufferevent-internal.h" 100 #include "event-internal.h" 101 102 /* some systems do not have MAP_FAILED */ 103 #ifndef MAP_FAILED 104 #define MAP_FAILED ((void *)-1) 105 #endif 106 107 /* send file support */ 108 #if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__) 109 #define USE_SENDFILE 1 110 #define SENDFILE_IS_LINUX 1 111 #elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__) 112 #define USE_SENDFILE 1 113 #define SENDFILE_IS_FREEBSD 1 114 #elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__) 115 #define USE_SENDFILE 1 116 #define SENDFILE_IS_MACOSX 1 117 #elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) 118 #define USE_SENDFILE 1 119 #define SENDFILE_IS_SOLARIS 1 120 #endif 121 122 /* Mask of user-selectable callback flags. */ 123 #define EVBUFFER_CB_USER_FLAGS 0xffff 124 /* Mask of all internal-use-only flags. */ 125 #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000 126 127 /* Flag set if the callback is using the cb_obsolete function pointer */ 128 #define EVBUFFER_CB_OBSOLETE 0x00040000 129 130 /* evbuffer_chain support */ 131 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off) 132 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \ 133 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off)) 134 135 #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0) 136 #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0) 137 138 /* evbuffer_ptr support */ 139 #define PTR_NOT_FOUND(ptr) do { \ 140 (ptr)->pos = -1; \ 141 (ptr)->internal_.chain = NULL; \ 142 (ptr)->internal_.pos_in_chain = 0; \ 143 } while (0) 144 145 static void evbuffer_chain_align(struct evbuffer_chain *chain); 146 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain, 147 size_t datalen); 148 static void evbuffer_deferred_callback(struct event_callback *cb, void *arg); 149 static int evbuffer_ptr_memcmp(const struct evbuffer *buf, 150 const struct evbuffer_ptr *pos, const char *mem, size_t len); 151 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf, 152 size_t datlen); 153 static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 154 size_t howfar); 155 static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg); 156 static inline void evbuffer_chain_incref(struct evbuffer_chain *chain); 157 158 static struct evbuffer_chain * 159 evbuffer_chain_new(size_t size) 160 { 161 struct evbuffer_chain *chain; 162 size_t to_alloc; 163 164 if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE) 165 return (NULL); 166 167 size += EVBUFFER_CHAIN_SIZE; 168 169 /* get the next largest memory that can hold the buffer */ 170 if (size < EVBUFFER_CHAIN_MAX / 2) { 171 to_alloc = MIN_BUFFER_SIZE; 172 while (to_alloc < size) { 173 to_alloc <<= 1; 174 } 175 } else { 176 to_alloc = size; 177 } 178 179 /* we get everything in one chunk */ 180 if ((chain = mm_malloc(to_alloc)) == NULL) 181 return (NULL); 182 183 memset(chain, 0, EVBUFFER_CHAIN_SIZE); 184 185 chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE; 186 187 /* this way we can manipulate the buffer to different addresses, 188 * which is required for mmap for example. 189 */ 190 chain->buffer = EVBUFFER_CHAIN_EXTRA(unsigned char, chain); 191 192 chain->refcnt = 1; 193 194 return (chain); 195 } 196 197 static inline void 198 evbuffer_chain_free(struct evbuffer_chain *chain) 199 { 200 EVUTIL_ASSERT(chain->refcnt > 0); 201 if (--chain->refcnt > 0) { 202 /* chain is still referenced by other chains */ 203 return; 204 } 205 206 if (CHAIN_PINNED(chain)) { 207 /* will get freed once no longer dangling */ 208 chain->refcnt++; 209 chain->flags |= EVBUFFER_DANGLING; 210 return; 211 } 212 213 /* safe to release chain, it's either a referencing 214 * chain or all references to it have been freed */ 215 if (chain->flags & EVBUFFER_REFERENCE) { 216 struct evbuffer_chain_reference *info = 217 EVBUFFER_CHAIN_EXTRA( 218 struct evbuffer_chain_reference, 219 chain); 220 if (info->cleanupfn) 221 (*info->cleanupfn)(chain->buffer, 222 chain->buffer_len, 223 info->extra); 224 } 225 if (chain->flags & EVBUFFER_FILESEGMENT) { 226 struct evbuffer_chain_file_segment *info = 227 EVBUFFER_CHAIN_EXTRA( 228 struct evbuffer_chain_file_segment, 229 chain); 230 if (info->segment) { 231 #ifdef _WIN32 232 if (info->segment->is_mapping) 233 UnmapViewOfFile(chain->buffer); 234 #endif 235 evbuffer_file_segment_free(info->segment); 236 } 237 } 238 if (chain->flags & EVBUFFER_MULTICAST) { 239 struct evbuffer_multicast_parent *info = 240 EVBUFFER_CHAIN_EXTRA( 241 struct evbuffer_multicast_parent, 242 chain); 243 /* referencing chain is being freed, decrease 244 * refcounts of source chain and associated 245 * evbuffer (which get freed once both reach 246 * zero) */ 247 EVUTIL_ASSERT(info->source != NULL); 248 EVUTIL_ASSERT(info->parent != NULL); 249 EVBUFFER_LOCK(info->source); 250 evbuffer_chain_free(info->parent); 251 evbuffer_decref_and_unlock_(info->source); 252 } 253 254 mm_free(chain); 255 } 256 257 static void 258 evbuffer_free_all_chains(struct evbuffer_chain *chain) 259 { 260 struct evbuffer_chain *next; 261 for (; chain; chain = next) { 262 next = chain->next; 263 evbuffer_chain_free(chain); 264 } 265 } 266 267 #ifndef NDEBUG 268 static int 269 evbuffer_chains_all_empty(struct evbuffer_chain *chain) 270 { 271 for (; chain; chain = chain->next) { 272 if (chain->off) 273 return 0; 274 } 275 return 1; 276 } 277 #else 278 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid 279 "unused variable" warnings. */ 280 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { 281 return 1; 282 } 283 #endif 284 285 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior 286 * to replacing them all with a new chain. Return a pointer to the place 287 * where the new chain will go. 288 * 289 * Internal; requires lock. The caller must fix up buf->last and buf->first 290 * as needed; they might have been freed. 291 */ 292 static struct evbuffer_chain ** 293 evbuffer_free_trailing_empty_chains(struct evbuffer *buf) 294 { 295 struct evbuffer_chain **ch = buf->last_with_datap; 296 /* Find the first victim chain. It might be *last_with_datap */ 297 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) 298 ch = &(*ch)->next; 299 if (*ch) { 300 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); 301 evbuffer_free_all_chains(*ch); 302 *ch = NULL; 303 } 304 return ch; 305 } 306 307 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty 308 * chains as necessary. Requires lock. Does not schedule callbacks. 309 */ 310 static void 311 evbuffer_chain_insert(struct evbuffer *buf, 312 struct evbuffer_chain *chain) 313 { 314 ASSERT_EVBUFFER_LOCKED(buf); 315 if (*buf->last_with_datap == NULL) { 316 /* There are no chains data on the buffer at all. */ 317 EVUTIL_ASSERT(buf->last_with_datap == &buf->first); 318 EVUTIL_ASSERT(buf->first == NULL); 319 buf->first = buf->last = chain; 320 } else { 321 struct evbuffer_chain **chp; 322 chp = evbuffer_free_trailing_empty_chains(buf); 323 *chp = chain; 324 if (chain->off) 325 buf->last_with_datap = chp; 326 buf->last = chain; 327 } 328 buf->total_len += chain->off; 329 } 330 331 static inline struct evbuffer_chain * 332 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen) 333 { 334 struct evbuffer_chain *chain; 335 if ((chain = evbuffer_chain_new(datlen)) == NULL) 336 return NULL; 337 evbuffer_chain_insert(buf, chain); 338 return chain; 339 } 340 341 void 342 evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag) 343 { 344 EVUTIL_ASSERT((chain->flags & flag) == 0); 345 chain->flags |= flag; 346 } 347 348 void 349 evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag) 350 { 351 EVUTIL_ASSERT((chain->flags & flag) != 0); 352 chain->flags &= ~flag; 353 if (chain->flags & EVBUFFER_DANGLING) 354 evbuffer_chain_free(chain); 355 } 356 357 static inline void 358 evbuffer_chain_incref(struct evbuffer_chain *chain) 359 { 360 ++chain->refcnt; 361 } 362 363 struct evbuffer * 364 evbuffer_new(void) 365 { 366 struct evbuffer *buffer; 367 368 buffer = mm_calloc(1, sizeof(struct evbuffer)); 369 if (buffer == NULL) 370 return (NULL); 371 372 LIST_INIT(&buffer->callbacks); 373 buffer->refcnt = 1; 374 buffer->last_with_datap = &buffer->first; 375 376 return (buffer); 377 } 378 379 int 380 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags) 381 { 382 EVBUFFER_LOCK(buf); 383 buf->flags |= (ev_uint32_t)flags; 384 EVBUFFER_UNLOCK(buf); 385 return 0; 386 } 387 388 int 389 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags) 390 { 391 EVBUFFER_LOCK(buf); 392 buf->flags &= ~(ev_uint32_t)flags; 393 EVBUFFER_UNLOCK(buf); 394 return 0; 395 } 396 397 void 398 evbuffer_incref_(struct evbuffer *buf) 399 { 400 EVBUFFER_LOCK(buf); 401 ++buf->refcnt; 402 EVBUFFER_UNLOCK(buf); 403 } 404 405 void 406 evbuffer_incref_and_lock_(struct evbuffer *buf) 407 { 408 EVBUFFER_LOCK(buf); 409 ++buf->refcnt; 410 } 411 412 int 413 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base) 414 { 415 EVBUFFER_LOCK(buffer); 416 buffer->cb_queue = base; 417 buffer->deferred_cbs = 1; 418 event_deferred_cb_init_(&buffer->deferred, 419 event_base_get_npriorities(base) / 2, 420 evbuffer_deferred_callback, buffer); 421 EVBUFFER_UNLOCK(buffer); 422 return 0; 423 } 424 425 int 426 evbuffer_enable_locking(struct evbuffer *buf, void *lock) 427 { 428 #ifdef EVENT__DISABLE_THREAD_SUPPORT 429 return -1; 430 #else 431 if (buf->lock) 432 return -1; 433 434 if (!lock) { 435 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); 436 if (!lock) 437 return -1; 438 buf->lock = lock; 439 buf->own_lock = 1; 440 } else { 441 buf->lock = lock; 442 buf->own_lock = 0; 443 } 444 445 return 0; 446 #endif 447 } 448 449 void 450 evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev) 451 { 452 EVBUFFER_LOCK(buf); 453 buf->parent = bev; 454 EVBUFFER_UNLOCK(buf); 455 } 456 457 static void 458 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred) 459 { 460 struct evbuffer_cb_entry *cbent, *next; 461 struct evbuffer_cb_info info; 462 size_t new_size; 463 ev_uint32_t mask, masked_val; 464 int clear = 1; 465 466 if (running_deferred) { 467 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 468 masked_val = EVBUFFER_CB_ENABLED; 469 } else if (buffer->deferred_cbs) { 470 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 471 masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 472 /* Don't zero-out n_add/n_del, since the deferred callbacks 473 will want to see them. */ 474 clear = 0; 475 } else { 476 mask = EVBUFFER_CB_ENABLED; 477 masked_val = EVBUFFER_CB_ENABLED; 478 } 479 480 ASSERT_EVBUFFER_LOCKED(buffer); 481 482 if (LIST_EMPTY(&buffer->callbacks)) { 483 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 484 return; 485 } 486 if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0) 487 return; 488 489 new_size = buffer->total_len; 490 info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb; 491 info.n_added = buffer->n_add_for_cb; 492 info.n_deleted = buffer->n_del_for_cb; 493 if (clear) { 494 buffer->n_add_for_cb = 0; 495 buffer->n_del_for_cb = 0; 496 } 497 for (cbent = LIST_FIRST(&buffer->callbacks); 498 cbent != LIST_END(&buffer->callbacks); 499 cbent = next) { 500 /* Get the 'next' pointer now in case this callback decides 501 * to remove itself or something. */ 502 next = LIST_NEXT(cbent, next); 503 504 if ((cbent->flags & mask) != masked_val) 505 continue; 506 507 if ((cbent->flags & EVBUFFER_CB_OBSOLETE)) 508 cbent->cb.cb_obsolete(buffer, 509 info.orig_size, new_size, cbent->cbarg); 510 else 511 cbent->cb.cb_func(buffer, &info, cbent->cbarg); 512 } 513 } 514 515 void 516 evbuffer_invoke_callbacks_(struct evbuffer *buffer) 517 { 518 if (LIST_EMPTY(&buffer->callbacks)) { 519 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 520 return; 521 } 522 523 if (buffer->deferred_cbs) { 524 if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) { 525 evbuffer_incref_and_lock_(buffer); 526 if (buffer->parent) 527 bufferevent_incref_(buffer->parent); 528 EVBUFFER_UNLOCK(buffer); 529 } 530 } 531 532 evbuffer_run_callbacks(buffer, 0); 533 } 534 535 static void 536 evbuffer_deferred_callback(struct event_callback *cb, void *arg) 537 { 538 struct bufferevent *parent = NULL; 539 struct evbuffer *buffer = arg; 540 541 /* XXXX It would be better to run these callbacks without holding the 542 * lock */ 543 EVBUFFER_LOCK(buffer); 544 parent = buffer->parent; 545 evbuffer_run_callbacks(buffer, 1); 546 evbuffer_decref_and_unlock_(buffer); 547 if (parent) 548 bufferevent_decref_(parent); 549 } 550 551 static void 552 evbuffer_remove_all_callbacks(struct evbuffer *buffer) 553 { 554 struct evbuffer_cb_entry *cbent; 555 556 while ((cbent = LIST_FIRST(&buffer->callbacks))) { 557 LIST_REMOVE(cbent, next); 558 mm_free(cbent); 559 } 560 } 561 562 void 563 evbuffer_decref_and_unlock_(struct evbuffer *buffer) 564 { 565 struct evbuffer_chain *chain, *next; 566 ASSERT_EVBUFFER_LOCKED(buffer); 567 568 EVUTIL_ASSERT(buffer->refcnt > 0); 569 570 if (--buffer->refcnt > 0) { 571 EVBUFFER_UNLOCK(buffer); 572 return; 573 } 574 575 for (chain = buffer->first; chain != NULL; chain = next) { 576 next = chain->next; 577 evbuffer_chain_free(chain); 578 } 579 evbuffer_remove_all_callbacks(buffer); 580 if (buffer->deferred_cbs) 581 event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred); 582 583 EVBUFFER_UNLOCK(buffer); 584 if (buffer->own_lock) 585 EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE); 586 mm_free(buffer); 587 } 588 589 void 590 evbuffer_free(struct evbuffer *buffer) 591 { 592 EVBUFFER_LOCK(buffer); 593 evbuffer_decref_and_unlock_(buffer); 594 } 595 596 void 597 evbuffer_lock(struct evbuffer *buf) 598 { 599 EVBUFFER_LOCK(buf); 600 } 601 602 void 603 evbuffer_unlock(struct evbuffer *buf) 604 { 605 EVBUFFER_UNLOCK(buf); 606 } 607 608 size_t 609 evbuffer_get_length(const struct evbuffer *buffer) 610 { 611 size_t result; 612 613 EVBUFFER_LOCK(buffer); 614 615 result = (buffer->total_len); 616 617 EVBUFFER_UNLOCK(buffer); 618 619 return result; 620 } 621 622 size_t 623 evbuffer_get_contiguous_space(const struct evbuffer *buf) 624 { 625 struct evbuffer_chain *chain; 626 size_t result; 627 628 EVBUFFER_LOCK(buf); 629 chain = buf->first; 630 result = (chain != NULL ? chain->off : 0); 631 EVBUFFER_UNLOCK(buf); 632 633 return result; 634 } 635 636 size_t 637 evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) { 638 int n; 639 size_t res; 640 size_t to_alloc; 641 642 EVBUFFER_LOCK(buf); 643 644 res = to_alloc = 0; 645 646 for (n = 0; n < n_vec; n++) { 647 to_alloc += vec[n].iov_len; 648 } 649 650 if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) { 651 goto done; 652 } 653 654 for (n = 0; n < n_vec; n++) { 655 /* XXX each 'add' call here does a bunch of setup that's 656 * obviated by evbuffer_expand_fast_, and some cleanup that we 657 * would like to do only once. Instead we should just extract 658 * the part of the code that's needed. */ 659 660 if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) { 661 goto done; 662 } 663 664 res += vec[n].iov_len; 665 } 666 667 done: 668 EVBUFFER_UNLOCK(buf); 669 return res; 670 } 671 672 int 673 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, 674 struct evbuffer_iovec *vec, int n_vecs) 675 { 676 struct evbuffer_chain *chain, **chainp; 677 int n = -1; 678 679 EVBUFFER_LOCK(buf); 680 if (buf->freeze_end) 681 goto done; 682 if (n_vecs < 1) 683 goto done; 684 if (n_vecs == 1) { 685 if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) 686 goto done; 687 688 vec[0].iov_base = (void *)CHAIN_SPACE_PTR(chain); 689 vec[0].iov_len = (size_t)CHAIN_SPACE_LEN(chain); 690 EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size); 691 n = 1; 692 } else { 693 if (evbuffer_expand_fast_(buf, size, n_vecs)<0) 694 goto done; 695 n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs, 696 &chainp, 0); 697 } 698 699 done: 700 EVBUFFER_UNLOCK(buf); 701 return n; 702 703 } 704 705 static int 706 advance_last_with_data(struct evbuffer *buf) 707 { 708 int n = 0; 709 struct evbuffer_chain **chainp = buf->last_with_datap; 710 711 ASSERT_EVBUFFER_LOCKED(buf); 712 713 if (!*chainp) 714 return 0; 715 716 while ((*chainp)->next) { 717 chainp = &(*chainp)->next; 718 if ((*chainp)->off) 719 buf->last_with_datap = chainp; 720 ++n; 721 } 722 return n; 723 } 724 725 int 726 evbuffer_commit_space(struct evbuffer *buf, 727 struct evbuffer_iovec *vec, int n_vecs) 728 { 729 struct evbuffer_chain *chain, **firstchainp, **chainp; 730 int result = -1; 731 size_t added = 0; 732 int i; 733 734 EVBUFFER_LOCK(buf); 735 736 if (buf->freeze_end) 737 goto done; 738 if (n_vecs == 0) { 739 result = 0; 740 goto done; 741 } else if (n_vecs == 1 && 742 (buf->last && vec[0].iov_base == (void *)CHAIN_SPACE_PTR(buf->last))) { 743 /* The user only got or used one chain; it might not 744 * be the first one with space in it. */ 745 if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) 746 goto done; 747 buf->last->off += vec[0].iov_len; 748 added = vec[0].iov_len; 749 if (added) 750 advance_last_with_data(buf); 751 goto okay; 752 } 753 754 /* Advance 'firstchain' to the first chain with space in it. */ 755 firstchainp = buf->last_with_datap; 756 if (!*firstchainp) 757 goto done; 758 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 759 firstchainp = &(*firstchainp)->next; 760 } 761 762 chain = *firstchainp; 763 /* pass 1: make sure that the pointers and lengths of vecs[] are in 764 * bounds before we try to commit anything. */ 765 for (i=0; i<n_vecs; ++i) { 766 if (!chain) 767 goto done; 768 if (vec[i].iov_base != (void *)CHAIN_SPACE_PTR(chain) || 769 (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain)) 770 goto done; 771 chain = chain->next; 772 } 773 /* pass 2: actually adjust all the chains. */ 774 chainp = firstchainp; 775 for (i=0; i<n_vecs; ++i) { 776 (*chainp)->off += vec[i].iov_len; 777 added += vec[i].iov_len; 778 if (vec[i].iov_len) { 779 buf->last_with_datap = chainp; 780 } 781 chainp = &(*chainp)->next; 782 } 783 784 okay: 785 buf->total_len += added; 786 buf->n_add_for_cb += added; 787 result = 0; 788 evbuffer_invoke_callbacks_(buf); 789 790 done: 791 EVBUFFER_UNLOCK(buf); 792 return result; 793 } 794 795 static inline int 796 HAS_PINNED_R(struct evbuffer *buf) 797 { 798 return (buf->last && CHAIN_PINNED_R(buf->last)); 799 } 800 801 static inline void 802 ZERO_CHAIN(struct evbuffer *dst) 803 { 804 ASSERT_EVBUFFER_LOCKED(dst); 805 dst->first = NULL; 806 dst->last = NULL; 807 dst->last_with_datap = &(dst)->first; 808 dst->total_len = 0; 809 } 810 811 /* Prepares the contents of src to be moved to another buffer by removing 812 * read-pinned chains. The first pinned chain is saved in first, and the 813 * last in last. If src has no read-pinned chains, first and last are set 814 * to NULL. */ 815 static int 816 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, 817 struct evbuffer_chain **last) 818 { 819 struct evbuffer_chain *chain, **pinned; 820 821 ASSERT_EVBUFFER_LOCKED(src); 822 823 if (!HAS_PINNED_R(src)) { 824 *first = *last = NULL; 825 return 0; 826 } 827 828 pinned = src->last_with_datap; 829 if (!CHAIN_PINNED_R(*pinned)) 830 pinned = &(*pinned)->next; 831 EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned)); 832 chain = *first = *pinned; 833 *last = src->last; 834 835 /* If there's data in the first pinned chain, we need to allocate 836 * a new chain and copy the data over. */ 837 if (chain->off) { 838 struct evbuffer_chain *tmp; 839 840 EVUTIL_ASSERT(pinned == src->last_with_datap); 841 tmp = evbuffer_chain_new(chain->off); 842 if (!tmp) 843 return -1; 844 memcpy(tmp->buffer, chain->buffer + chain->misalign, 845 chain->off); 846 tmp->off = chain->off; 847 *src->last_with_datap = tmp; 848 src->last = tmp; 849 chain->misalign += chain->off; 850 chain->off = 0; 851 } else { 852 src->last = *src->last_with_datap; 853 *pinned = NULL; 854 } 855 856 return 0; 857 } 858 859 static inline void 860 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned, 861 struct evbuffer_chain *last) 862 { 863 ASSERT_EVBUFFER_LOCKED(src); 864 865 if (!pinned) { 866 ZERO_CHAIN(src); 867 return; 868 } 869 870 src->first = pinned; 871 src->last = last; 872 src->last_with_datap = &src->first; 873 src->total_len = 0; 874 } 875 876 static inline void 877 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src) 878 { 879 ASSERT_EVBUFFER_LOCKED(dst); 880 ASSERT_EVBUFFER_LOCKED(src); 881 dst->first = src->first; 882 if (src->last_with_datap == &src->first) 883 dst->last_with_datap = &dst->first; 884 else 885 dst->last_with_datap = src->last_with_datap; 886 dst->last = src->last; 887 dst->total_len = src->total_len; 888 } 889 890 static void 891 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 892 { 893 struct evbuffer_chain **chp; 894 895 ASSERT_EVBUFFER_LOCKED(dst); 896 ASSERT_EVBUFFER_LOCKED(src); 897 898 chp = evbuffer_free_trailing_empty_chains(dst); 899 *chp = src->first; 900 901 if (src->last_with_datap == &src->first) 902 dst->last_with_datap = chp; 903 else 904 dst->last_with_datap = src->last_with_datap; 905 dst->last = src->last; 906 dst->total_len += src->total_len; 907 } 908 909 static inline void 910 APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src) 911 { 912 struct evbuffer_chain *tmp; 913 struct evbuffer_chain *chain = src->first; 914 struct evbuffer_multicast_parent *extra; 915 916 ASSERT_EVBUFFER_LOCKED(dst); 917 ASSERT_EVBUFFER_LOCKED(src); 918 919 for (; chain; chain = chain->next) { 920 if (!chain->off || chain->flags & EVBUFFER_DANGLING) { 921 /* skip empty chains */ 922 continue; 923 } 924 925 tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent)); 926 if (!tmp) { 927 event_warn("%s: out of memory", __func__); 928 return; 929 } 930 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp); 931 /* reference evbuffer containing source chain so it 932 * doesn't get released while the chain is still 933 * being referenced to */ 934 evbuffer_incref_(src); 935 extra->source = src; 936 /* reference source chain which now becomes immutable */ 937 evbuffer_chain_incref(chain); 938 extra->parent = chain; 939 chain->flags |= EVBUFFER_IMMUTABLE; 940 tmp->buffer_len = chain->buffer_len; 941 tmp->misalign = chain->misalign; 942 tmp->off = chain->off; 943 tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE; 944 tmp->buffer = chain->buffer; 945 evbuffer_chain_insert(dst, tmp); 946 } 947 } 948 949 static void 950 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 951 { 952 ASSERT_EVBUFFER_LOCKED(dst); 953 ASSERT_EVBUFFER_LOCKED(src); 954 src->last->next = dst->first; 955 dst->first = src->first; 956 dst->total_len += src->total_len; 957 if (*dst->last_with_datap == NULL) { 958 if (src->last_with_datap == &(src)->first) 959 dst->last_with_datap = &dst->first; 960 else 961 dst->last_with_datap = src->last_with_datap; 962 } else if (dst->last_with_datap == &dst->first) { 963 dst->last_with_datap = &src->last->next; 964 } 965 } 966 967 int 968 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 969 { 970 struct evbuffer_chain *pinned, *last; 971 size_t in_total_len, out_total_len; 972 int result = 0; 973 974 EVBUFFER_LOCK2(inbuf, outbuf); 975 in_total_len = inbuf->total_len; 976 out_total_len = outbuf->total_len; 977 978 if (in_total_len == 0 || outbuf == inbuf) 979 goto done; 980 981 if (outbuf->freeze_end || inbuf->freeze_start) { 982 result = -1; 983 goto done; 984 } 985 986 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 987 result = -1; 988 goto done; 989 } 990 991 if (out_total_len == 0) { 992 /* There might be an empty chain at the start of outbuf; free 993 * it. */ 994 evbuffer_free_all_chains(outbuf->first); 995 COPY_CHAIN(outbuf, inbuf); 996 } else { 997 APPEND_CHAIN(outbuf, inbuf); 998 } 999 1000 RESTORE_PINNED(inbuf, pinned, last); 1001 1002 inbuf->n_del_for_cb += in_total_len; 1003 outbuf->n_add_for_cb += in_total_len; 1004 1005 evbuffer_invoke_callbacks_(inbuf); 1006 evbuffer_invoke_callbacks_(outbuf); 1007 1008 done: 1009 EVBUFFER_UNLOCK2(inbuf, outbuf); 1010 return result; 1011 } 1012 1013 int 1014 evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf) 1015 { 1016 size_t in_total_len, out_total_len; 1017 struct evbuffer_chain *chain; 1018 int result = 0; 1019 1020 EVBUFFER_LOCK2(inbuf, outbuf); 1021 in_total_len = inbuf->total_len; 1022 out_total_len = outbuf->total_len; 1023 chain = inbuf->first; 1024 1025 if (in_total_len == 0) 1026 goto done; 1027 1028 if (outbuf->freeze_end || outbuf == inbuf) { 1029 result = -1; 1030 goto done; 1031 } 1032 1033 for (; chain; chain = chain->next) { 1034 if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) { 1035 /* chain type can not be referenced */ 1036 result = -1; 1037 goto done; 1038 } 1039 } 1040 1041 if (out_total_len == 0) { 1042 /* There might be an empty chain at the start of outbuf; free 1043 * it. */ 1044 evbuffer_free_all_chains(outbuf->first); 1045 } 1046 APPEND_CHAIN_MULTICAST(outbuf, inbuf); 1047 1048 outbuf->n_add_for_cb += in_total_len; 1049 evbuffer_invoke_callbacks_(outbuf); 1050 1051 done: 1052 EVBUFFER_UNLOCK2(inbuf, outbuf); 1053 return result; 1054 } 1055 1056 int 1057 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 1058 { 1059 struct evbuffer_chain *pinned, *last; 1060 size_t in_total_len, out_total_len; 1061 int result = 0; 1062 1063 EVBUFFER_LOCK2(inbuf, outbuf); 1064 1065 in_total_len = inbuf->total_len; 1066 out_total_len = outbuf->total_len; 1067 1068 if (!in_total_len || inbuf == outbuf) 1069 goto done; 1070 1071 if (outbuf->freeze_start || inbuf->freeze_start) { 1072 result = -1; 1073 goto done; 1074 } 1075 1076 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 1077 result = -1; 1078 goto done; 1079 } 1080 1081 if (out_total_len == 0) { 1082 /* There might be an empty chain at the start of outbuf; free 1083 * it. */ 1084 evbuffer_free_all_chains(outbuf->first); 1085 COPY_CHAIN(outbuf, inbuf); 1086 } else { 1087 PREPEND_CHAIN(outbuf, inbuf); 1088 } 1089 1090 RESTORE_PINNED(inbuf, pinned, last); 1091 1092 inbuf->n_del_for_cb += in_total_len; 1093 outbuf->n_add_for_cb += in_total_len; 1094 1095 evbuffer_invoke_callbacks_(inbuf); 1096 evbuffer_invoke_callbacks_(outbuf); 1097 done: 1098 EVBUFFER_UNLOCK2(inbuf, outbuf); 1099 return result; 1100 } 1101 1102 int 1103 evbuffer_drain(struct evbuffer *buf, size_t len) 1104 { 1105 struct evbuffer_chain *chain, *next; 1106 size_t remaining, old_len; 1107 int result = 0; 1108 1109 EVBUFFER_LOCK(buf); 1110 old_len = buf->total_len; 1111 1112 if (old_len == 0) 1113 goto done; 1114 1115 if (buf->freeze_start) { 1116 result = -1; 1117 goto done; 1118 } 1119 1120 if (len >= old_len && !HAS_PINNED_R(buf)) { 1121 len = old_len; 1122 for (chain = buf->first; chain != NULL; chain = next) { 1123 next = chain->next; 1124 evbuffer_chain_free(chain); 1125 } 1126 1127 ZERO_CHAIN(buf); 1128 } else { 1129 if (len >= old_len) 1130 len = old_len; 1131 1132 buf->total_len -= len; 1133 remaining = len; 1134 for (chain = buf->first; 1135 remaining >= chain->off; 1136 chain = next) { 1137 next = chain->next; 1138 remaining -= chain->off; 1139 1140 if (chain == *buf->last_with_datap) { 1141 buf->last_with_datap = &buf->first; 1142 } 1143 if (&chain->next == buf->last_with_datap) 1144 buf->last_with_datap = &buf->first; 1145 1146 if (CHAIN_PINNED_R(chain)) { 1147 EVUTIL_ASSERT(remaining == 0); 1148 chain->misalign += chain->off; 1149 chain->off = 0; 1150 break; 1151 } else 1152 evbuffer_chain_free(chain); 1153 } 1154 1155 buf->first = chain; 1156 EVUTIL_ASSERT(remaining <= chain->off); 1157 chain->misalign += remaining; 1158 chain->off -= remaining; 1159 } 1160 1161 buf->n_del_for_cb += len; 1162 /* Tell someone about changes in this buffer */ 1163 evbuffer_invoke_callbacks_(buf); 1164 1165 done: 1166 EVBUFFER_UNLOCK(buf); 1167 return result; 1168 } 1169 1170 /* Reads data from an event buffer and drains the bytes read */ 1171 int 1172 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen) 1173 { 1174 ev_ssize_t n; 1175 EVBUFFER_LOCK(buf); 1176 n = evbuffer_copyout_from(buf, NULL, data_out, datlen); 1177 if (n > 0) { 1178 if (evbuffer_drain(buf, n)<0) 1179 n = -1; 1180 } 1181 EVBUFFER_UNLOCK(buf); 1182 return (int)n; 1183 } 1184 1185 ev_ssize_t 1186 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen) 1187 { 1188 return evbuffer_copyout_from(buf, NULL, data_out, datlen); 1189 } 1190 1191 ev_ssize_t 1192 evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos, 1193 void *data_out, size_t datlen) 1194 { 1195 /*XXX fails badly on sendfile case. */ 1196 struct evbuffer_chain *chain; 1197 char *data = data_out; 1198 size_t nread; 1199 ev_ssize_t result = 0; 1200 size_t pos_in_chain; 1201 1202 EVBUFFER_LOCK(buf); 1203 1204 if (pos) { 1205 if (datlen > (size_t)(EV_SSIZE_MAX - pos->pos)) { 1206 result = -1; 1207 goto done; 1208 } 1209 chain = pos->internal_.chain; 1210 pos_in_chain = pos->internal_.pos_in_chain; 1211 if (datlen + pos->pos > buf->total_len) 1212 datlen = buf->total_len - pos->pos; 1213 } else { 1214 chain = buf->first; 1215 pos_in_chain = 0; 1216 if (datlen > buf->total_len) 1217 datlen = buf->total_len; 1218 } 1219 1220 1221 if (datlen == 0) 1222 goto done; 1223 1224 if (buf->freeze_start) { 1225 result = -1; 1226 goto done; 1227 } 1228 1229 nread = datlen; 1230 1231 while (datlen && datlen >= chain->off - pos_in_chain) { 1232 size_t copylen = chain->off - pos_in_chain; 1233 memcpy(data, 1234 chain->buffer + chain->misalign + pos_in_chain, 1235 copylen); 1236 data += copylen; 1237 datlen -= copylen; 1238 1239 chain = chain->next; 1240 pos_in_chain = 0; 1241 EVUTIL_ASSERT(chain || datlen==0); 1242 } 1243 1244 if (datlen) { 1245 EVUTIL_ASSERT(chain); 1246 EVUTIL_ASSERT(datlen+pos_in_chain <= chain->off); 1247 1248 memcpy(data, chain->buffer + chain->misalign + pos_in_chain, 1249 datlen); 1250 } 1251 1252 result = nread; 1253 done: 1254 EVBUFFER_UNLOCK(buf); 1255 return result; 1256 } 1257 1258 /* reads data from the src buffer to the dst buffer, avoids memcpy as 1259 * possible. */ 1260 /* XXXX should return ev_ssize_t */ 1261 int 1262 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, 1263 size_t datlen) 1264 { 1265 /*XXX We should have an option to force this to be zero-copy.*/ 1266 1267 /*XXX can fail badly on sendfile case. */ 1268 struct evbuffer_chain *chain, *previous; 1269 size_t nread = 0; 1270 int result; 1271 1272 EVBUFFER_LOCK2(src, dst); 1273 1274 chain = previous = src->first; 1275 1276 if (datlen == 0 || dst == src) { 1277 result = 0; 1278 goto done; 1279 } 1280 1281 if (dst->freeze_end || src->freeze_start) { 1282 result = -1; 1283 goto done; 1284 } 1285 1286 /* short-cut if there is no more data buffered */ 1287 if (datlen >= src->total_len) { 1288 datlen = src->total_len; 1289 evbuffer_add_buffer(dst, src); 1290 result = (int)datlen; /*XXXX should return ev_ssize_t*/ 1291 goto done; 1292 } 1293 1294 /* removes chains if possible */ 1295 while (chain->off <= datlen) { 1296 /* We can't remove the last with data from src unless we 1297 * remove all chains, in which case we would have done the if 1298 * block above */ 1299 EVUTIL_ASSERT(chain != *src->last_with_datap); 1300 nread += chain->off; 1301 datlen -= chain->off; 1302 previous = chain; 1303 if (src->last_with_datap == &chain->next) 1304 src->last_with_datap = &src->first; 1305 chain = chain->next; 1306 } 1307 1308 if (chain != src->first) { 1309 /* we can remove the chain */ 1310 struct evbuffer_chain **chp; 1311 chp = evbuffer_free_trailing_empty_chains(dst); 1312 1313 if (dst->first == NULL) { 1314 dst->first = src->first; 1315 } else { 1316 *chp = src->first; 1317 } 1318 dst->last = previous; 1319 previous->next = NULL; 1320 src->first = chain; 1321 advance_last_with_data(dst); 1322 1323 dst->total_len += nread; 1324 dst->n_add_for_cb += nread; 1325 } 1326 1327 /* we know that there is more data in the src buffer than 1328 * we want to read, so we manually drain the chain */ 1329 evbuffer_add(dst, chain->buffer + chain->misalign, datlen); 1330 chain->misalign += datlen; 1331 chain->off -= datlen; 1332 nread += datlen; 1333 1334 /* You might think we would want to increment dst->n_add_for_cb 1335 * here too. But evbuffer_add above already took care of that. 1336 */ 1337 src->total_len -= nread; 1338 src->n_del_for_cb += nread; 1339 1340 if (nread) { 1341 evbuffer_invoke_callbacks_(dst); 1342 evbuffer_invoke_callbacks_(src); 1343 } 1344 result = (int)nread;/*XXXX should change return type */ 1345 1346 done: 1347 EVBUFFER_UNLOCK2(src, dst); 1348 return result; 1349 } 1350 1351 unsigned char * 1352 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size) 1353 { 1354 struct evbuffer_chain *chain, *next, *tmp, *last_with_data; 1355 unsigned char *buffer, *result = NULL; 1356 ev_ssize_t remaining; 1357 int removed_last_with_data = 0; 1358 int removed_last_with_datap = 0; 1359 1360 EVBUFFER_LOCK(buf); 1361 1362 chain = buf->first; 1363 1364 if (size < 0) 1365 size = buf->total_len; 1366 /* if size > buf->total_len, we cannot guarantee to the user that she 1367 * is going to have a long enough buffer afterwards; so we return 1368 * NULL */ 1369 if (size == 0 || (size_t)size > buf->total_len) 1370 goto done; 1371 1372 /* No need to pull up anything; the first size bytes are 1373 * already here. */ 1374 if (chain->off >= (size_t)size) { 1375 result = chain->buffer + chain->misalign; 1376 goto done; 1377 } 1378 1379 /* Make sure that none of the chains we need to copy from is pinned. */ 1380 remaining = size - chain->off; 1381 EVUTIL_ASSERT(remaining >= 0); 1382 for (tmp=chain->next; tmp; tmp=tmp->next) { 1383 if (CHAIN_PINNED(tmp)) 1384 goto done; 1385 if (tmp->off >= (size_t)remaining) 1386 break; 1387 remaining -= tmp->off; 1388 } 1389 1390 if (CHAIN_PINNED(chain)) { 1391 size_t old_off = chain->off; 1392 if (CHAIN_SPACE_LEN(chain) < size - chain->off) { 1393 /* not enough room at end of chunk. */ 1394 goto done; 1395 } 1396 buffer = CHAIN_SPACE_PTR(chain); 1397 tmp = chain; 1398 tmp->off = size; 1399 size -= old_off; 1400 chain = chain->next; 1401 } else if (chain->buffer_len - chain->misalign >= (size_t)size) { 1402 /* already have enough space in the first chain */ 1403 size_t old_off = chain->off; 1404 buffer = chain->buffer + chain->misalign + chain->off; 1405 tmp = chain; 1406 tmp->off = size; 1407 size -= old_off; 1408 chain = chain->next; 1409 } else { 1410 if ((tmp = evbuffer_chain_new(size)) == NULL) { 1411 event_warn("%s: out of memory", __func__); 1412 goto done; 1413 } 1414 buffer = tmp->buffer; 1415 tmp->off = size; 1416 buf->first = tmp; 1417 } 1418 1419 /* TODO(niels): deal with buffers that point to NULL like sendfile */ 1420 1421 /* Copy and free every chunk that will be entirely pulled into tmp */ 1422 last_with_data = *buf->last_with_datap; 1423 for (; chain != NULL && (size_t)size >= chain->off; chain = next) { 1424 next = chain->next; 1425 1426 if (chain->buffer) { 1427 memcpy(buffer, chain->buffer + chain->misalign, chain->off); 1428 size -= chain->off; 1429 buffer += chain->off; 1430 } 1431 if (chain == last_with_data) 1432 removed_last_with_data = 1; 1433 if (&chain->next == buf->last_with_datap) 1434 removed_last_with_datap = 1; 1435 1436 evbuffer_chain_free(chain); 1437 } 1438 1439 if (chain != NULL) { 1440 memcpy(buffer, chain->buffer + chain->misalign, size); 1441 chain->misalign += size; 1442 chain->off -= size; 1443 } else { 1444 buf->last = tmp; 1445 } 1446 1447 tmp->next = chain; 1448 1449 if (removed_last_with_data) { 1450 buf->last_with_datap = &buf->first; 1451 } else if (removed_last_with_datap) { 1452 if (buf->first->next && buf->first->next->off) 1453 buf->last_with_datap = &buf->first->next; 1454 else 1455 buf->last_with_datap = &buf->first; 1456 } 1457 1458 result = (tmp->buffer + tmp->misalign); 1459 1460 done: 1461 EVBUFFER_UNLOCK(buf); 1462 return result; 1463 } 1464 1465 /* 1466 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. 1467 * The returned buffer needs to be freed by the called. 1468 */ 1469 char * 1470 evbuffer_readline(struct evbuffer *buffer) 1471 { 1472 return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY); 1473 } 1474 1475 static inline ev_ssize_t 1476 evbuffer_strchr(struct evbuffer_ptr *it, const char chr) 1477 { 1478 struct evbuffer_chain *chain = it->internal_.chain; 1479 size_t i = it->internal_.pos_in_chain; 1480 while (chain != NULL) { 1481 char *buffer = (char *)chain->buffer + chain->misalign; 1482 char *cp = memchr(buffer+i, chr, chain->off-i); 1483 if (cp) { 1484 it->internal_.chain = chain; 1485 it->internal_.pos_in_chain = cp - buffer; 1486 it->pos += (cp - buffer - i); 1487 return it->pos; 1488 } 1489 it->pos += chain->off - i; 1490 i = 0; 1491 chain = chain->next; 1492 } 1493 1494 return (-1); 1495 } 1496 1497 static inline char * 1498 find_eol_char(char *s, size_t len) 1499 { 1500 #define CHUNK_SZ 128 1501 /* Lots of benchmarking found this approach to be faster in practice 1502 * than doing two memchrs over the whole buffer, doin a memchr on each 1503 * char of the buffer, or trying to emulate memchr by hand. */ 1504 char *s_end, *cr, *lf; 1505 s_end = s+len; 1506 while (s < s_end) { 1507 size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s); 1508 cr = memchr(s, '\r', chunk); 1509 lf = memchr(s, '\n', chunk); 1510 if (cr) { 1511 if (lf && lf < cr) 1512 return lf; 1513 return cr; 1514 } else if (lf) { 1515 return lf; 1516 } 1517 s += CHUNK_SZ; 1518 } 1519 1520 return NULL; 1521 #undef CHUNK_SZ 1522 } 1523 1524 static ev_ssize_t 1525 evbuffer_find_eol_char(struct evbuffer_ptr *it) 1526 { 1527 struct evbuffer_chain *chain = it->internal_.chain; 1528 size_t i = it->internal_.pos_in_chain; 1529 while (chain != NULL) { 1530 char *buffer = (char *)chain->buffer + chain->misalign; 1531 char *cp = find_eol_char(buffer+i, chain->off-i); 1532 if (cp) { 1533 it->internal_.chain = chain; 1534 it->internal_.pos_in_chain = cp - buffer; 1535 it->pos += (cp - buffer) - i; 1536 return it->pos; 1537 } 1538 it->pos += chain->off - i; 1539 i = 0; 1540 chain = chain->next; 1541 } 1542 1543 return (-1); 1544 } 1545 1546 static inline size_t 1547 evbuffer_strspn( 1548 struct evbuffer_ptr *ptr, const char *chrset) 1549 { 1550 size_t count = 0; 1551 struct evbuffer_chain *chain = ptr->internal_.chain; 1552 size_t i = ptr->internal_.pos_in_chain; 1553 1554 if (!chain) 1555 return 0; 1556 1557 while (1) { 1558 char *buffer = (char *)chain->buffer + chain->misalign; 1559 for (; i < chain->off; ++i) { 1560 const char *p = chrset; 1561 while (*p) { 1562 if (buffer[i] == *p++) 1563 goto next; 1564 } 1565 ptr->internal_.chain = chain; 1566 ptr->internal_.pos_in_chain = i; 1567 ptr->pos += count; 1568 return count; 1569 next: 1570 ++count; 1571 } 1572 i = 0; 1573 1574 if (! chain->next) { 1575 ptr->internal_.chain = chain; 1576 ptr->internal_.pos_in_chain = i; 1577 ptr->pos += count; 1578 return count; 1579 } 1580 1581 chain = chain->next; 1582 } 1583 } 1584 1585 1586 static inline int 1587 evbuffer_getchr(struct evbuffer_ptr *it) 1588 { 1589 struct evbuffer_chain *chain = it->internal_.chain; 1590 size_t off = it->internal_.pos_in_chain; 1591 1592 if (chain == NULL) 1593 return -1; 1594 1595 return (unsigned char)chain->buffer[chain->misalign + off]; 1596 } 1597 1598 struct evbuffer_ptr 1599 evbuffer_search_eol(struct evbuffer *buffer, 1600 struct evbuffer_ptr *start, size_t *eol_len_out, 1601 enum evbuffer_eol_style eol_style) 1602 { 1603 struct evbuffer_ptr it, it2; 1604 size_t extra_drain = 0; 1605 int ok = 0; 1606 1607 /* Avoid locking in trivial edge cases */ 1608 if (start && start->internal_.chain == NULL) { 1609 PTR_NOT_FOUND(&it); 1610 if (eol_len_out) 1611 *eol_len_out = extra_drain; 1612 return it; 1613 } 1614 1615 EVBUFFER_LOCK(buffer); 1616 1617 if (start) { 1618 memcpy(&it, start, sizeof(it)); 1619 } else { 1620 it.pos = 0; 1621 it.internal_.chain = buffer->first; 1622 it.internal_.pos_in_chain = 0; 1623 } 1624 1625 /* the eol_style determines our first stop character and how many 1626 * characters we are going to drain afterwards. */ 1627 switch (eol_style) { 1628 case EVBUFFER_EOL_ANY: 1629 if (evbuffer_find_eol_char(&it) < 0) 1630 goto done; 1631 memcpy(&it2, &it, sizeof(it)); 1632 extra_drain = evbuffer_strspn(&it2, "\r\n"); 1633 break; 1634 case EVBUFFER_EOL_CRLF_STRICT: { 1635 it = evbuffer_search(buffer, "\r\n", 2, &it); 1636 if (it.pos < 0) 1637 goto done; 1638 extra_drain = 2; 1639 break; 1640 } 1641 case EVBUFFER_EOL_CRLF: { 1642 ev_ssize_t start_pos = it.pos; 1643 /* Look for a LF ... */ 1644 if (evbuffer_strchr(&it, '\n') < 0) 1645 goto done; 1646 extra_drain = 1; 1647 /* ... optionally preceeded by a CR. */ 1648 if (it.pos == start_pos) 1649 break; /* If the first character is \n, don't back up */ 1650 /* This potentially does an extra linear walk over the first 1651 * few chains. Probably, that's not too expensive unless you 1652 * have a really pathological setup. */ 1653 memcpy(&it2, &it, sizeof(it)); 1654 if (evbuffer_ptr_subtract(buffer, &it2, 1)<0) 1655 break; 1656 if (evbuffer_getchr(&it2) == '\r') { 1657 memcpy(&it, &it2, sizeof(it)); 1658 extra_drain = 2; 1659 } 1660 break; 1661 } 1662 case EVBUFFER_EOL_LF: 1663 if (evbuffer_strchr(&it, '\n') < 0) 1664 goto done; 1665 extra_drain = 1; 1666 break; 1667 case EVBUFFER_EOL_NUL: 1668 if (evbuffer_strchr(&it, '\0') < 0) 1669 goto done; 1670 extra_drain = 1; 1671 break; 1672 default: 1673 goto done; 1674 } 1675 1676 ok = 1; 1677 done: 1678 EVBUFFER_UNLOCK(buffer); 1679 1680 if (!ok) 1681 PTR_NOT_FOUND(&it); 1682 if (eol_len_out) 1683 *eol_len_out = extra_drain; 1684 1685 return it; 1686 } 1687 1688 char * 1689 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, 1690 enum evbuffer_eol_style eol_style) 1691 { 1692 struct evbuffer_ptr it; 1693 char *line; 1694 size_t n_to_copy=0, extra_drain=0; 1695 char *result = NULL; 1696 1697 EVBUFFER_LOCK(buffer); 1698 1699 if (buffer->freeze_start) { 1700 goto done; 1701 } 1702 1703 it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style); 1704 if (it.pos < 0) 1705 goto done; 1706 n_to_copy = it.pos; 1707 1708 if ((line = mm_malloc(n_to_copy+1)) == NULL) { 1709 event_warn("%s: out of memory", __func__); 1710 goto done; 1711 } 1712 1713 evbuffer_remove(buffer, line, n_to_copy); 1714 line[n_to_copy] = '\0'; 1715 1716 evbuffer_drain(buffer, extra_drain); 1717 result = line; 1718 done: 1719 EVBUFFER_UNLOCK(buffer); 1720 1721 if (n_read_out) 1722 *n_read_out = result ? n_to_copy : 0; 1723 1724 return result; 1725 } 1726 1727 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096 1728 1729 /* Adds data to an event buffer */ 1730 1731 int 1732 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen) 1733 { 1734 struct evbuffer_chain *chain, *tmp; 1735 const unsigned char *data = data_in; 1736 size_t remain, to_alloc; 1737 int result = -1; 1738 1739 EVBUFFER_LOCK(buf); 1740 1741 if (buf->freeze_end) { 1742 goto done; 1743 } 1744 /* Prevent buf->total_len overflow */ 1745 if (datlen > EV_SIZE_MAX - buf->total_len) { 1746 goto done; 1747 } 1748 1749 if (*buf->last_with_datap == NULL) { 1750 chain = buf->last; 1751 } else { 1752 chain = *buf->last_with_datap; 1753 } 1754 1755 /* If there are no chains allocated for this buffer, allocate one 1756 * big enough to hold all the data. */ 1757 if (chain == NULL) { 1758 chain = evbuffer_chain_new(datlen); 1759 if (!chain) 1760 goto done; 1761 evbuffer_chain_insert(buf, chain); 1762 } 1763 1764 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1765 /* Always true for mutable buffers */ 1766 EVUTIL_ASSERT(chain->misalign >= 0 && 1767 (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); 1768 remain = chain->buffer_len - (size_t)chain->misalign - chain->off; 1769 if (remain >= datlen) { 1770 /* there's enough space to hold all the data in the 1771 * current last chain */ 1772 memcpy(chain->buffer + chain->misalign + chain->off, 1773 data, datlen); 1774 chain->off += datlen; 1775 buf->total_len += datlen; 1776 buf->n_add_for_cb += datlen; 1777 goto out; 1778 } else if (!CHAIN_PINNED(chain) && 1779 evbuffer_chain_should_realign(chain, datlen)) { 1780 /* we can fit the data into the misalignment */ 1781 evbuffer_chain_align(chain); 1782 1783 memcpy(chain->buffer + chain->off, data, datlen); 1784 chain->off += datlen; 1785 buf->total_len += datlen; 1786 buf->n_add_for_cb += datlen; 1787 goto out; 1788 } 1789 } else { 1790 /* we cannot write any data to the last chain */ 1791 remain = 0; 1792 } 1793 1794 /* we need to add another chain */ 1795 to_alloc = chain->buffer_len; 1796 if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2) 1797 to_alloc <<= 1; 1798 if (datlen > to_alloc) 1799 to_alloc = datlen; 1800 tmp = evbuffer_chain_new(to_alloc); 1801 if (tmp == NULL) 1802 goto done; 1803 1804 if (remain) { 1805 memcpy(chain->buffer + chain->misalign + chain->off, 1806 data, remain); 1807 chain->off += remain; 1808 buf->total_len += remain; 1809 buf->n_add_for_cb += remain; 1810 } 1811 1812 data += remain; 1813 datlen -= remain; 1814 1815 memcpy(tmp->buffer, data, datlen); 1816 tmp->off = datlen; 1817 evbuffer_chain_insert(buf, tmp); 1818 buf->n_add_for_cb += datlen; 1819 1820 out: 1821 evbuffer_invoke_callbacks_(buf); 1822 result = 0; 1823 done: 1824 EVBUFFER_UNLOCK(buf); 1825 return result; 1826 } 1827 1828 int 1829 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) 1830 { 1831 struct evbuffer_chain *chain, *tmp; 1832 int result = -1; 1833 1834 EVBUFFER_LOCK(buf); 1835 1836 if (datlen == 0) { 1837 result = 0; 1838 goto done; 1839 } 1840 if (buf->freeze_start) { 1841 goto done; 1842 } 1843 if (datlen > EV_SIZE_MAX - buf->total_len) { 1844 goto done; 1845 } 1846 1847 chain = buf->first; 1848 1849 if (chain == NULL) { 1850 chain = evbuffer_chain_new(datlen); 1851 if (!chain) 1852 goto done; 1853 evbuffer_chain_insert(buf, chain); 1854 } 1855 1856 /* we cannot touch immutable buffers */ 1857 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1858 /* Always true for mutable buffers */ 1859 EVUTIL_ASSERT(chain->misalign >= 0 && 1860 (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); 1861 1862 /* If this chain is empty, we can treat it as 1863 * 'empty at the beginning' rather than 'empty at the end' */ 1864 if (chain->off == 0) 1865 chain->misalign = chain->buffer_len; 1866 1867 if ((size_t)chain->misalign >= datlen) { 1868 /* we have enough space to fit everything */ 1869 memcpy(chain->buffer + chain->misalign - datlen, 1870 data, datlen); 1871 chain->off += datlen; 1872 chain->misalign -= datlen; 1873 buf->total_len += datlen; 1874 buf->n_add_for_cb += datlen; 1875 goto out; 1876 } else if (chain->misalign) { 1877 /* we can only fit some of the data. */ 1878 memcpy(chain->buffer, 1879 (char*)data + datlen - chain->misalign, 1880 (size_t)chain->misalign); 1881 chain->off += (size_t)chain->misalign; 1882 buf->total_len += (size_t)chain->misalign; 1883 buf->n_add_for_cb += (size_t)chain->misalign; 1884 datlen -= (size_t)chain->misalign; 1885 chain->misalign = 0; 1886 } 1887 } 1888 1889 /* we need to add another chain */ 1890 if ((tmp = evbuffer_chain_new(datlen)) == NULL) 1891 goto done; 1892 buf->first = tmp; 1893 if (buf->last_with_datap == &buf->first && chain->off) 1894 buf->last_with_datap = &tmp->next; 1895 1896 tmp->next = chain; 1897 1898 tmp->off = datlen; 1899 EVUTIL_ASSERT(datlen <= tmp->buffer_len); 1900 tmp->misalign = tmp->buffer_len - datlen; 1901 1902 memcpy(tmp->buffer + tmp->misalign, data, datlen); 1903 buf->total_len += datlen; 1904 buf->n_add_for_cb += datlen; 1905 1906 out: 1907 evbuffer_invoke_callbacks_(buf); 1908 result = 0; 1909 done: 1910 EVBUFFER_UNLOCK(buf); 1911 return result; 1912 } 1913 1914 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */ 1915 static void 1916 evbuffer_chain_align(struct evbuffer_chain *chain) 1917 { 1918 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE)); 1919 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY)); 1920 memmove(chain->buffer, chain->buffer + chain->misalign, chain->off); 1921 chain->misalign = 0; 1922 } 1923 1924 #define MAX_TO_COPY_IN_EXPAND 4096 1925 #define MAX_TO_REALIGN_IN_EXPAND 2048 1926 1927 /** Helper: return true iff we should realign chain to fit datalen bytes of 1928 data in it. */ 1929 static int 1930 evbuffer_chain_should_realign(struct evbuffer_chain *chain, 1931 size_t datlen) 1932 { 1933 return chain->buffer_len - chain->off >= datlen && 1934 (chain->off < chain->buffer_len / 2) && 1935 (chain->off <= MAX_TO_REALIGN_IN_EXPAND); 1936 } 1937 1938 /* Expands the available space in the event buffer to at least datlen, all in 1939 * a single chunk. Return that chunk. */ 1940 static struct evbuffer_chain * 1941 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen) 1942 { 1943 struct evbuffer_chain *chain, **chainp; 1944 struct evbuffer_chain *result = NULL; 1945 ASSERT_EVBUFFER_LOCKED(buf); 1946 1947 chainp = buf->last_with_datap; 1948 1949 /* XXX If *chainp is no longer writeable, but has enough space in its 1950 * misalign, this might be a bad idea: we could still use *chainp, not 1951 * (*chainp)->next. */ 1952 if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0) 1953 chainp = &(*chainp)->next; 1954 1955 /* 'chain' now points to the first chain with writable space (if any) 1956 * We will either use it, realign it, replace it, or resize it. */ 1957 chain = *chainp; 1958 1959 if (chain == NULL || 1960 (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { 1961 /* We can't use the last_with_data chain at all. Just add a 1962 * new one that's big enough. */ 1963 goto insert_new; 1964 } 1965 1966 /* If we can fit all the data, then we don't have to do anything */ 1967 if (CHAIN_SPACE_LEN(chain) >= datlen) { 1968 result = chain; 1969 goto ok; 1970 } 1971 1972 /* If the chain is completely empty, just replace it by adding a new 1973 * empty chain. */ 1974 if (chain->off == 0) { 1975 goto insert_new; 1976 } 1977 1978 /* If the misalignment plus the remaining space fulfills our data 1979 * needs, we could just force an alignment to happen. Afterwards, we 1980 * have enough space. But only do this if we're saving a lot of space 1981 * and not moving too much data. Otherwise the space savings are 1982 * probably offset by the time lost in copying. 1983 */ 1984 if (evbuffer_chain_should_realign(chain, datlen)) { 1985 evbuffer_chain_align(chain); 1986 result = chain; 1987 goto ok; 1988 } 1989 1990 /* At this point, we can either resize the last chunk with space in 1991 * it, use the next chunk after it, or If we add a new chunk, we waste 1992 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we 1993 * resize, we have to copy chain->off bytes. 1994 */ 1995 1996 /* Would expanding this chunk be affordable and worthwhile? */ 1997 if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 || 1998 chain->off > MAX_TO_COPY_IN_EXPAND || 1999 datlen >= (EVBUFFER_CHAIN_MAX - chain->off)) { 2000 /* It's not worth resizing this chain. Can the next one be 2001 * used? */ 2002 if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { 2003 /* Yes, we can just use the next chain (which should 2004 * be empty. */ 2005 result = chain->next; 2006 goto ok; 2007 } else { 2008 /* No; append a new chain (which will free all 2009 * terminal empty chains.) */ 2010 goto insert_new; 2011 } 2012 } else { 2013 /* Okay, we're going to try to resize this chain: Not doing so 2014 * would waste at least 1/8 of its current allocation, and we 2015 * can do so without having to copy more than 2016 * MAX_TO_COPY_IN_EXPAND bytes. */ 2017 /* figure out how much space we need */ 2018 size_t length = chain->off + datlen; 2019 struct evbuffer_chain *tmp = evbuffer_chain_new(length); 2020 if (tmp == NULL) 2021 goto err; 2022 2023 /* copy the data over that we had so far */ 2024 tmp->off = chain->off; 2025 memcpy(tmp->buffer, chain->buffer + chain->misalign, 2026 chain->off); 2027 /* fix up the list */ 2028 EVUTIL_ASSERT(*chainp == chain); 2029 result = *chainp = tmp; 2030 2031 if (buf->last == chain) 2032 buf->last = tmp; 2033 2034 tmp->next = chain->next; 2035 evbuffer_chain_free(chain); 2036 goto ok; 2037 } 2038 2039 insert_new: 2040 result = evbuffer_chain_insert_new(buf, datlen); 2041 if (!result) 2042 goto err; 2043 ok: 2044 EVUTIL_ASSERT(result); 2045 EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen); 2046 err: 2047 return result; 2048 } 2049 2050 /* Make sure that datlen bytes are available for writing in the last n 2051 * chains. Never copies or moves data. */ 2052 int 2053 evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n) 2054 { 2055 struct evbuffer_chain *chain = buf->last, *tmp, *next; 2056 size_t avail; 2057 int used; 2058 2059 ASSERT_EVBUFFER_LOCKED(buf); 2060 EVUTIL_ASSERT(n >= 2); 2061 2062 if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { 2063 /* There is no last chunk, or we can't touch the last chunk. 2064 * Just add a new chunk. */ 2065 chain = evbuffer_chain_new(datlen); 2066 if (chain == NULL) 2067 return (-1); 2068 2069 evbuffer_chain_insert(buf, chain); 2070 return (0); 2071 } 2072 2073 used = 0; /* number of chains we're using space in. */ 2074 avail = 0; /* how much space they have. */ 2075 /* How many bytes can we stick at the end of buffer as it is? Iterate 2076 * over the chains at the end of the buffer, tring to see how much 2077 * space we have in the first n. */ 2078 for (chain = *buf->last_with_datap; chain; chain = chain->next) { 2079 if (chain->off) { 2080 size_t space = (size_t) CHAIN_SPACE_LEN(chain); 2081 EVUTIL_ASSERT(chain == *buf->last_with_datap); 2082 if (space) { 2083 avail += space; 2084 ++used; 2085 } 2086 } else { 2087 /* No data in chain; realign it. */ 2088 chain->misalign = 0; 2089 avail += chain->buffer_len; 2090 ++used; 2091 } 2092 if (avail >= datlen) { 2093 /* There is already enough space. Just return */ 2094 return (0); 2095 } 2096 if (used == n) 2097 break; 2098 } 2099 2100 /* There wasn't enough space in the first n chains with space in 2101 * them. Either add a new chain with enough space, or replace all 2102 * empty chains with one that has enough space, depending on n. */ 2103 if (used < n) { 2104 /* The loop ran off the end of the chains before it hit n 2105 * chains; we can add another. */ 2106 EVUTIL_ASSERT(chain == NULL); 2107 2108 tmp = evbuffer_chain_new(datlen - avail); 2109 if (tmp == NULL) 2110 return (-1); 2111 2112 buf->last->next = tmp; 2113 buf->last = tmp; 2114 /* (we would only set last_with_data if we added the first 2115 * chain. But if the buffer had no chains, we would have 2116 * just allocated a new chain earlier) */ 2117 return (0); 2118 } else { 2119 /* Nuke _all_ the empty chains. */ 2120 int rmv_all = 0; /* True iff we removed last_with_data. */ 2121 chain = *buf->last_with_datap; 2122 if (!chain->off) { 2123 EVUTIL_ASSERT(chain == buf->first); 2124 rmv_all = 1; 2125 avail = 0; 2126 } else { 2127 /* can't overflow, since only mutable chains have 2128 * huge misaligns. */ 2129 avail = (size_t) CHAIN_SPACE_LEN(chain); 2130 chain = chain->next; 2131 } 2132 2133 2134 for (; chain; chain = next) { 2135 next = chain->next; 2136 EVUTIL_ASSERT(chain->off == 0); 2137 evbuffer_chain_free(chain); 2138 } 2139 EVUTIL_ASSERT(datlen >= avail); 2140 tmp = evbuffer_chain_new(datlen - avail); 2141 if (tmp == NULL) { 2142 if (rmv_all) { 2143 ZERO_CHAIN(buf); 2144 } else { 2145 buf->last = *buf->last_with_datap; 2146 (*buf->last_with_datap)->next = NULL; 2147 } 2148 return (-1); 2149 } 2150 2151 if (rmv_all) { 2152 buf->first = buf->last = tmp; 2153 buf->last_with_datap = &buf->first; 2154 } else { 2155 (*buf->last_with_datap)->next = tmp; 2156 buf->last = tmp; 2157 } 2158 return (0); 2159 } 2160 } 2161 2162 int 2163 evbuffer_expand(struct evbuffer *buf, size_t datlen) 2164 { 2165 struct evbuffer_chain *chain; 2166 2167 EVBUFFER_LOCK(buf); 2168 chain = evbuffer_expand_singlechain(buf, datlen); 2169 EVBUFFER_UNLOCK(buf); 2170 return chain ? 0 : -1; 2171 } 2172 2173 /* 2174 * Reads data from a file descriptor into a buffer. 2175 */ 2176 2177 #if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32) 2178 #define USE_IOVEC_IMPL 2179 #endif 2180 2181 #ifdef USE_IOVEC_IMPL 2182 2183 #ifdef EVENT__HAVE_SYS_UIO_H 2184 /* number of iovec we use for writev, fragmentation is going to determine 2185 * how much we end up writing */ 2186 2187 #define DEFAULT_WRITE_IOVEC 128 2188 2189 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC 2190 #define NUM_WRITE_IOVEC UIO_MAXIOV 2191 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC 2192 #define NUM_WRITE_IOVEC IOV_MAX 2193 #else 2194 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC 2195 #endif 2196 2197 #define IOV_TYPE struct iovec 2198 #define IOV_PTR_FIELD iov_base 2199 #define IOV_LEN_FIELD iov_len 2200 #define IOV_LEN_TYPE size_t 2201 #else 2202 #define NUM_WRITE_IOVEC 16 2203 #define IOV_TYPE WSABUF 2204 #define IOV_PTR_FIELD buf 2205 #define IOV_LEN_FIELD len 2206 #define IOV_LEN_TYPE unsigned long 2207 #endif 2208 #endif 2209 #define NUM_READ_IOVEC 4 2210 2211 #define EVBUFFER_MAX_READ 4096 2212 2213 /** Helper function to figure out which space to use for reading data into 2214 an evbuffer. Internal use only. 2215 2216 @param buf The buffer to read into 2217 @param howmuch How much we want to read. 2218 @param vecs An array of two or more iovecs or WSABUFs. 2219 @param n_vecs_avail The length of vecs 2220 @param chainp A pointer to a variable to hold the first chain we're 2221 reading into. 2222 @param exact Boolean: if true, we do not provide more than 'howmuch' 2223 space in the vectors, even if more space is available. 2224 @return The number of buffers we're using. 2225 */ 2226 int 2227 evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch, 2228 struct evbuffer_iovec *vecs, int n_vecs_avail, 2229 struct evbuffer_chain ***chainp, int exact) 2230 { 2231 struct evbuffer_chain *chain; 2232 struct evbuffer_chain **firstchainp; 2233 size_t so_far; 2234 int i; 2235 ASSERT_EVBUFFER_LOCKED(buf); 2236 2237 if (howmuch < 0) 2238 return -1; 2239 2240 so_far = 0; 2241 /* Let firstchain be the first chain with any space on it */ 2242 firstchainp = buf->last_with_datap; 2243 EVUTIL_ASSERT(*firstchainp); 2244 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 2245 firstchainp = &(*firstchainp)->next; 2246 } 2247 2248 chain = *firstchainp; 2249 EVUTIL_ASSERT(chain); 2250 for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { 2251 size_t avail = (size_t) CHAIN_SPACE_LEN(chain); 2252 if (avail > (howmuch - so_far) && exact) 2253 avail = howmuch - so_far; 2254 vecs[i].iov_base = (void *)CHAIN_SPACE_PTR(chain); 2255 vecs[i].iov_len = avail; 2256 so_far += avail; 2257 chain = chain->next; 2258 } 2259 2260 *chainp = firstchainp; 2261 return i; 2262 } 2263 2264 static int 2265 get_n_bytes_readable_on_socket(evutil_socket_t fd) 2266 { 2267 #if defined(FIONREAD) && defined(_WIN32) 2268 unsigned long lng = EVBUFFER_MAX_READ; 2269 if (ioctlsocket(fd, FIONREAD, &lng) < 0) 2270 return -1; 2271 /* Can overflow, but mostly harmlessly. XXXX */ 2272 return (int)lng; 2273 #elif defined(FIONREAD) 2274 int n = EVBUFFER_MAX_READ; 2275 if (ioctl(fd, FIONREAD, &n) < 0) 2276 return -1; 2277 return n; 2278 #else 2279 return EVBUFFER_MAX_READ; 2280 #endif 2281 } 2282 2283 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t 2284 * as howmuch? */ 2285 int 2286 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch) 2287 { 2288 struct evbuffer_chain **chainp; 2289 int n; 2290 int result; 2291 2292 #ifdef USE_IOVEC_IMPL 2293 int nvecs, i, remaining; 2294 #else 2295 struct evbuffer_chain *chain; 2296 unsigned char *p; 2297 #endif 2298 2299 EVBUFFER_LOCK(buf); 2300 2301 if (buf->freeze_end) { 2302 result = -1; 2303 goto done; 2304 } 2305 2306 n = get_n_bytes_readable_on_socket(fd); 2307 if (n <= 0 || n > EVBUFFER_MAX_READ) 2308 n = EVBUFFER_MAX_READ; 2309 if (howmuch < 0 || howmuch > n) 2310 howmuch = n; 2311 2312 #ifdef USE_IOVEC_IMPL 2313 /* Since we can use iovecs, we're willing to use the last 2314 * NUM_READ_IOVEC chains. */ 2315 if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) { 2316 result = -1; 2317 goto done; 2318 } else { 2319 IOV_TYPE vecs[NUM_READ_IOVEC]; 2320 #ifdef EVBUFFER_IOVEC_IS_NATIVE_ 2321 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs, 2322 NUM_READ_IOVEC, &chainp, 1); 2323 #else 2324 /* We aren't using the native struct iovec. Therefore, 2325 we are on win32. */ 2326 struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC]; 2327 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2, 2328 &chainp, 1); 2329 2330 for (i=0; i < nvecs; ++i) 2331 WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); 2332 #endif 2333 2334 #ifdef _WIN32 2335 { 2336 DWORD bytesRead; 2337 DWORD flags=0; 2338 if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { 2339 /* The read failed. It might be a close, 2340 * or it might be an error. */ 2341 if (WSAGetLastError() == WSAECONNABORTED) 2342 n = 0; 2343 else 2344 n = -1; 2345 } else 2346 n = bytesRead; 2347 } 2348 #else 2349 n = readv(fd, vecs, nvecs); 2350 #endif 2351 } 2352 2353 #else /*!USE_IOVEC_IMPL*/ 2354 /* If we don't have FIONREAD, we might waste some space here */ 2355 /* XXX we _will_ waste some space here if there is any space left 2356 * over on buf->last. */ 2357 if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { 2358 result = -1; 2359 goto done; 2360 } 2361 2362 /* We can append new data at this point */ 2363 p = chain->buffer + chain->misalign + chain->off; 2364 2365 #ifndef _WIN32 2366 n = read(fd, p, howmuch); 2367 #else 2368 n = recv(fd, p, howmuch, 0); 2369 #endif 2370 #endif /* USE_IOVEC_IMPL */ 2371 2372 if (n == -1) { 2373 result = -1; 2374 goto done; 2375 } 2376 if (n == 0) { 2377 result = 0; 2378 goto done; 2379 } 2380 2381 #ifdef USE_IOVEC_IMPL 2382 remaining = n; 2383 for (i=0; i < nvecs; ++i) { 2384 /* can't overflow, since only mutable chains have 2385 * huge misaligns. */ 2386 size_t space = (size_t) CHAIN_SPACE_LEN(*chainp); 2387 /* XXXX This is a kludge that can waste space in perverse 2388 * situations. */ 2389 if (space > EVBUFFER_CHAIN_MAX) 2390 space = EVBUFFER_CHAIN_MAX; 2391 if ((ev_ssize_t)space < remaining) { 2392 (*chainp)->off += space; 2393 remaining -= (int)space; 2394 } else { 2395 (*chainp)->off += remaining; 2396 buf->last_with_datap = chainp; 2397 break; 2398 } 2399 chainp = &(*chainp)->next; 2400 } 2401 #else 2402 chain->off += n; 2403 advance_last_with_data(buf); 2404 #endif 2405 buf->total_len += n; 2406 buf->n_add_for_cb += n; 2407 2408 /* Tell someone about changes in this buffer */ 2409 evbuffer_invoke_callbacks_(buf); 2410 result = n; 2411 done: 2412 EVBUFFER_UNLOCK(buf); 2413 return result; 2414 } 2415 2416 #ifdef USE_IOVEC_IMPL 2417 static inline int 2418 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd, 2419 ev_ssize_t howmuch) 2420 { 2421 IOV_TYPE iov[NUM_WRITE_IOVEC]; 2422 struct evbuffer_chain *chain = buffer->first; 2423 int n, i = 0; 2424 2425 if (howmuch < 0) 2426 return -1; 2427 2428 ASSERT_EVBUFFER_LOCKED(buffer); 2429 /* XXX make this top out at some maximal data length? if the 2430 * buffer has (say) 1MB in it, split over 128 chains, there's 2431 * no way it all gets written in one go. */ 2432 while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { 2433 #ifdef USE_SENDFILE 2434 /* we cannot write the file info via writev */ 2435 if (chain->flags & EVBUFFER_SENDFILE) 2436 break; 2437 #endif 2438 iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); 2439 if ((size_t)howmuch >= chain->off) { 2440 /* XXXcould be problematic when windows supports mmap*/ 2441 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off; 2442 howmuch -= chain->off; 2443 } else { 2444 /* XXXcould be problematic when windows supports mmap*/ 2445 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; 2446 break; 2447 } 2448 chain = chain->next; 2449 } 2450 if (! i) 2451 return 0; 2452 2453 #ifdef _WIN32 2454 { 2455 DWORD bytesSent; 2456 if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL)) 2457 n = -1; 2458 else 2459 n = bytesSent; 2460 } 2461 #else 2462 n = writev(fd, iov, i); 2463 #endif 2464 return (n); 2465 } 2466 #endif 2467 2468 #ifdef USE_SENDFILE 2469 static inline int 2470 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd, 2471 ev_ssize_t howmuch) 2472 { 2473 struct evbuffer_chain *chain = buffer->first; 2474 struct evbuffer_chain_file_segment *info = 2475 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, 2476 chain); 2477 const int source_fd = info->segment->fd; 2478 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD) 2479 int res; 2480 ev_off_t len = chain->off; 2481 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS) 2482 ev_ssize_t res; 2483 off_t offset = chain->misalign; 2484 #endif 2485 2486 ASSERT_EVBUFFER_LOCKED(buffer); 2487 2488 #if defined(SENDFILE_IS_MACOSX) 2489 res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0); 2490 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2491 return (-1); 2492 2493 return (len); 2494 #elif defined(SENDFILE_IS_FREEBSD) 2495 res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0); 2496 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2497 return (-1); 2498 2499 return (len); 2500 #elif defined(SENDFILE_IS_LINUX) 2501 /* TODO(niels): implement splice */ 2502 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2503 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2504 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ 2505 return (0); 2506 } 2507 return (res); 2508 #elif defined(SENDFILE_IS_SOLARIS) 2509 { 2510 const off_t offset_orig = offset; 2511 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2512 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2513 if (offset - offset_orig) 2514 return offset - offset_orig; 2515 /* if this is EAGAIN or EINTR and no bytes were 2516 * written, return 0 */ 2517 return (0); 2518 } 2519 return (res); 2520 } 2521 #endif 2522 } 2523 #endif 2524 2525 int 2526 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, 2527 ev_ssize_t howmuch) 2528 { 2529 int n = -1; 2530 2531 EVBUFFER_LOCK(buffer); 2532 2533 if (buffer->freeze_start) { 2534 goto done; 2535 } 2536 2537 if (howmuch < 0 || (size_t)howmuch > buffer->total_len) 2538 howmuch = buffer->total_len; 2539 2540 if (howmuch > 0) { 2541 #ifdef USE_SENDFILE 2542 struct evbuffer_chain *chain = buffer->first; 2543 if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE)) 2544 n = evbuffer_write_sendfile(buffer, fd, howmuch); 2545 else { 2546 #endif 2547 #ifdef USE_IOVEC_IMPL 2548 n = evbuffer_write_iovec(buffer, fd, howmuch); 2549 #elif defined(_WIN32) 2550 /* XXX(nickm) Don't disable this code until we know if 2551 * the WSARecv code above works. */ 2552 void *p = evbuffer_pullup(buffer, howmuch); 2553 EVUTIL_ASSERT(p || !howmuch); 2554 n = send(fd, p, howmuch, 0); 2555 #else 2556 void *p = evbuffer_pullup(buffer, howmuch); 2557 EVUTIL_ASSERT(p || !howmuch); 2558 n = write(fd, p, howmuch); 2559 #endif 2560 #ifdef USE_SENDFILE 2561 } 2562 #endif 2563 } 2564 2565 if (n > 0) 2566 evbuffer_drain(buffer, n); 2567 2568 done: 2569 EVBUFFER_UNLOCK(buffer); 2570 return (n); 2571 } 2572 2573 int 2574 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) 2575 { 2576 return evbuffer_write_atmost(buffer, fd, -1); 2577 } 2578 2579 unsigned char * 2580 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len) 2581 { 2582 unsigned char *search; 2583 struct evbuffer_ptr ptr; 2584 2585 EVBUFFER_LOCK(buffer); 2586 2587 ptr = evbuffer_search(buffer, (const char *)what, len, NULL); 2588 if (ptr.pos < 0) { 2589 search = NULL; 2590 } else { 2591 search = evbuffer_pullup(buffer, ptr.pos + len); 2592 if (search) 2593 search += ptr.pos; 2594 } 2595 EVBUFFER_UNLOCK(buffer); 2596 return search; 2597 } 2598 2599 /* Subract <b>howfar</b> from the position of <b>pos</b> within 2600 * <b>buf</b>. Returns 0 on success, -1 on failure. 2601 * 2602 * This isn't exposed yet, because of potential inefficiency issues. 2603 * Maybe it should be. */ 2604 static int 2605 evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 2606 size_t howfar) 2607 { 2608 if (pos->pos < 0) 2609 return -1; 2610 if (howfar > (size_t)pos->pos) 2611 return -1; 2612 if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) { 2613 pos->internal_.pos_in_chain -= howfar; 2614 pos->pos -= howfar; 2615 return 0; 2616 } else { 2617 const size_t newpos = pos->pos - howfar; 2618 /* Here's the inefficient part: it walks over the 2619 * chains until we hit newpos. */ 2620 return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET); 2621 } 2622 } 2623 2624 int 2625 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos, 2626 size_t position, enum evbuffer_ptr_how how) 2627 { 2628 size_t left = position; 2629 struct evbuffer_chain *chain = NULL; 2630 int result = 0; 2631 2632 EVBUFFER_LOCK(buf); 2633 2634 switch (how) { 2635 case EVBUFFER_PTR_SET: 2636 chain = buf->first; 2637 pos->pos = position; 2638 position = 0; 2639 break; 2640 case EVBUFFER_PTR_ADD: 2641 /* this avoids iterating over all previous chains if 2642 we just want to advance the position */ 2643 if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) { 2644 EVBUFFER_UNLOCK(buf); 2645 return -1; 2646 } 2647 chain = pos->internal_.chain; 2648 pos->pos += position; 2649 position = pos->internal_.pos_in_chain; 2650 break; 2651 } 2652 2653 EVUTIL_ASSERT(EV_SIZE_MAX - left >= position); 2654 while (chain && position + left >= chain->off) { 2655 left -= chain->off - position; 2656 chain = chain->next; 2657 position = 0; 2658 } 2659 if (chain) { 2660 pos->internal_.chain = chain; 2661 pos->internal_.pos_in_chain = position + left; 2662 } else if (left == 0) { 2663 /* The first byte in the (nonexistent) chain after the last chain */ 2664 pos->internal_.chain = NULL; 2665 pos->internal_.pos_in_chain = 0; 2666 } else { 2667 PTR_NOT_FOUND(pos); 2668 result = -1; 2669 } 2670 2671 EVBUFFER_UNLOCK(buf); 2672 2673 return result; 2674 } 2675 2676 /** 2677 Compare the bytes in buf at position pos to the len bytes in mem. Return 2678 less than 0, 0, or greater than 0 as memcmp. 2679 */ 2680 static int 2681 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos, 2682 const char *mem, size_t len) 2683 { 2684 struct evbuffer_chain *chain; 2685 size_t position; 2686 int r; 2687 2688 ASSERT_EVBUFFER_LOCKED(buf); 2689 2690 if (pos->pos < 0 || 2691 EV_SIZE_MAX - len < (size_t)pos->pos || 2692 pos->pos + len > buf->total_len) 2693 return -1; 2694 2695 chain = pos->internal_.chain; 2696 position = pos->internal_.pos_in_chain; 2697 while (len && chain) { 2698 size_t n_comparable; 2699 if (len + position > chain->off) 2700 n_comparable = chain->off - position; 2701 else 2702 n_comparable = len; 2703 r = memcmp(chain->buffer + chain->misalign + position, mem, 2704 n_comparable); 2705 if (r) 2706 return r; 2707 mem += n_comparable; 2708 len -= n_comparable; 2709 position = 0; 2710 chain = chain->next; 2711 } 2712 2713 return 0; 2714 } 2715 2716 struct evbuffer_ptr 2717 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start) 2718 { 2719 return evbuffer_search_range(buffer, what, len, start, NULL); 2720 } 2721 2722 struct evbuffer_ptr 2723 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) 2724 { 2725 struct evbuffer_ptr pos; 2726 struct evbuffer_chain *chain, *last_chain = NULL; 2727 const unsigned char *p; 2728 char first; 2729 2730 EVBUFFER_LOCK(buffer); 2731 2732 if (start) { 2733 memcpy(&pos, start, sizeof(pos)); 2734 chain = pos.internal_.chain; 2735 } else { 2736 pos.pos = 0; 2737 chain = pos.internal_.chain = buffer->first; 2738 pos.internal_.pos_in_chain = 0; 2739 } 2740 2741 if (end) 2742 last_chain = end->internal_.chain; 2743 2744 if (!len || len > EV_SSIZE_MAX) 2745 goto done; 2746 2747 first = what[0]; 2748 2749 while (chain) { 2750 const unsigned char *start_at = 2751 chain->buffer + chain->misalign + 2752 pos.internal_.pos_in_chain; 2753 p = memchr(start_at, first, 2754 chain->off - pos.internal_.pos_in_chain); 2755 if (p) { 2756 pos.pos += p - start_at; 2757 pos.internal_.pos_in_chain += p - start_at; 2758 if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { 2759 if (end && pos.pos + (ev_ssize_t)len > end->pos) 2760 goto not_found; 2761 else 2762 goto done; 2763 } 2764 ++pos.pos; 2765 ++pos.internal_.pos_in_chain; 2766 if (pos.internal_.pos_in_chain == chain->off) { 2767 chain = pos.internal_.chain = chain->next; 2768 pos.internal_.pos_in_chain = 0; 2769 } 2770 } else { 2771 if (chain == last_chain) 2772 goto not_found; 2773 pos.pos += chain->off - pos.internal_.pos_in_chain; 2774 chain = pos.internal_.chain = chain->next; 2775 pos.internal_.pos_in_chain = 0; 2776 } 2777 } 2778 2779 not_found: 2780 PTR_NOT_FOUND(&pos); 2781 done: 2782 EVBUFFER_UNLOCK(buffer); 2783 return pos; 2784 } 2785 2786 int 2787 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, 2788 struct evbuffer_ptr *start_at, 2789 struct evbuffer_iovec *vec, int n_vec) 2790 { 2791 struct evbuffer_chain *chain; 2792 int idx = 0; 2793 ev_ssize_t len_so_far = 0; 2794 2795 /* Avoid locking in trivial edge cases */ 2796 if (start_at && start_at->internal_.chain == NULL) 2797 return 0; 2798 2799 EVBUFFER_LOCK(buffer); 2800 2801 if (start_at) { 2802 chain = start_at->internal_.chain; 2803 len_so_far = chain->off 2804 - start_at->internal_.pos_in_chain; 2805 idx = 1; 2806 if (n_vec > 0) { 2807 vec[0].iov_base = (void *)(chain->buffer + chain->misalign 2808 + start_at->internal_.pos_in_chain); 2809 vec[0].iov_len = len_so_far; 2810 } 2811 chain = chain->next; 2812 } else { 2813 chain = buffer->first; 2814 } 2815 2816 if (n_vec == 0 && len < 0) { 2817 /* If no vectors are provided and they asked for "everything", 2818 * pretend they asked for the actual available amount. */ 2819 len = buffer->total_len; 2820 if (start_at) { 2821 len -= start_at->pos; 2822 } 2823 } 2824 2825 while (chain) { 2826 if (len >= 0 && len_so_far >= len) 2827 break; 2828 if (idx<n_vec) { 2829 vec[idx].iov_base = (void *)(chain->buffer + chain->misalign); 2830 vec[idx].iov_len = chain->off; 2831 } else if (len<0) { 2832 break; 2833 } 2834 ++idx; 2835 len_so_far += chain->off; 2836 chain = chain->next; 2837 } 2838 2839 EVBUFFER_UNLOCK(buffer); 2840 2841 return idx; 2842 } 2843 2844 2845 int 2846 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) 2847 { 2848 char *buffer; 2849 size_t space; 2850 int sz, result = -1; 2851 va_list aq; 2852 struct evbuffer_chain *chain; 2853 2854 2855 EVBUFFER_LOCK(buf); 2856 2857 if (buf->freeze_end) { 2858 goto done; 2859 } 2860 2861 /* make sure that at least some space is available */ 2862 if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL) 2863 goto done; 2864 2865 for (;;) { 2866 #if 0 2867 size_t used = chain->misalign + chain->off; 2868 buffer = (char *)chain->buffer + chain->misalign + chain->off; 2869 EVUTIL_ASSERT(chain->buffer_len >= used); 2870 space = chain->buffer_len - used; 2871 #endif 2872 buffer = (char*) CHAIN_SPACE_PTR(chain); 2873 space = (size_t) CHAIN_SPACE_LEN(chain); 2874 2875 #ifndef va_copy 2876 #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list)) 2877 #endif 2878 va_copy(aq, ap); 2879 2880 sz = evutil_vsnprintf(buffer, space, fmt, aq); 2881 2882 va_end(aq); 2883 2884 if (sz < 0) 2885 goto done; 2886 if (INT_MAX >= EVBUFFER_CHAIN_MAX && 2887 (size_t)sz >= EVBUFFER_CHAIN_MAX) 2888 goto done; 2889 if ((size_t)sz < space) { 2890 chain->off += sz; 2891 buf->total_len += sz; 2892 buf->n_add_for_cb += sz; 2893 2894 advance_last_with_data(buf); 2895 evbuffer_invoke_callbacks_(buf); 2896 result = sz; 2897 goto done; 2898 } 2899 if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL) 2900 goto done; 2901 } 2902 /* NOTREACHED */ 2903 2904 done: 2905 EVBUFFER_UNLOCK(buf); 2906 return result; 2907 } 2908 2909 int 2910 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) 2911 { 2912 int res = -1; 2913 va_list ap; 2914 2915 va_start(ap, fmt); 2916 res = evbuffer_add_vprintf(buf, fmt, ap); 2917 va_end(ap); 2918 2919 return (res); 2920 } 2921 2922 int 2923 evbuffer_add_reference(struct evbuffer *outbuf, 2924 const void *data, size_t datlen, 2925 evbuffer_ref_cleanup_cb cleanupfn, void *extra) 2926 { 2927 struct evbuffer_chain *chain; 2928 struct evbuffer_chain_reference *info; 2929 int result = -1; 2930 2931 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference)); 2932 if (!chain) 2933 return (-1); 2934 chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE; 2935 chain->buffer = (unsigned char *)data; 2936 chain->buffer_len = datlen; 2937 chain->off = datlen; 2938 2939 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain); 2940 info->cleanupfn = cleanupfn; 2941 info->extra = extra; 2942 2943 EVBUFFER_LOCK(outbuf); 2944 if (outbuf->freeze_end) { 2945 /* don't call chain_free; we do not want to actually invoke 2946 * the cleanup function */ 2947 mm_free(chain); 2948 goto done; 2949 } 2950 evbuffer_chain_insert(outbuf, chain); 2951 outbuf->n_add_for_cb += datlen; 2952 2953 evbuffer_invoke_callbacks_(outbuf); 2954 2955 result = 0; 2956 done: 2957 EVBUFFER_UNLOCK(outbuf); 2958 2959 return result; 2960 } 2961 2962 /* TODO(niels): we may want to add to automagically convert to mmap, in 2963 * case evbuffer_remove() or evbuffer_pullup() are being used. 2964 */ 2965 struct evbuffer_file_segment * 2966 evbuffer_file_segment_new( 2967 int fd, ev_off_t offset, ev_off_t length, unsigned flags) 2968 { 2969 struct evbuffer_file_segment *seg = 2970 mm_calloc(sizeof(struct evbuffer_file_segment), 1); 2971 if (!seg) 2972 return NULL; 2973 seg->refcnt = 1; 2974 seg->fd = fd; 2975 seg->flags = flags; 2976 seg->file_offset = offset; 2977 seg->cleanup_cb = NULL; 2978 seg->cleanup_cb_arg = NULL; 2979 #ifdef _WIN32 2980 #ifndef lseek 2981 #define lseek _lseeki64 2982 #endif 2983 #ifndef fstat 2984 #define fstat _fstat 2985 #endif 2986 #ifndef stat 2987 #define stat _stat 2988 #endif 2989 #endif 2990 if (length == -1) { 2991 struct stat st; 2992 if (fstat(fd, &st) < 0) 2993 goto err; 2994 length = st.st_size; 2995 } 2996 seg->length = length; 2997 2998 if (offset < 0 || length < 0 || 2999 ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) || 3000 (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length)) 3001 goto err; 3002 3003 #if defined(USE_SENDFILE) 3004 if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) { 3005 seg->can_sendfile = 1; 3006 goto done; 3007 } 3008 #endif 3009 3010 if (evbuffer_file_segment_materialize(seg)<0) 3011 goto err; 3012 3013 #if defined(USE_SENDFILE) 3014 done: 3015 #endif 3016 if (!(flags & EVBUF_FS_DISABLE_LOCKING)) { 3017 EVTHREAD_ALLOC_LOCK(seg->lock, 0); 3018 } 3019 return seg; 3020 err: 3021 mm_free(seg); 3022 return NULL; 3023 } 3024 3025 #ifdef EVENT__HAVE_MMAP 3026 static long 3027 get_page_size(void) 3028 { 3029 #ifdef SC_PAGE_SIZE 3030 return sysconf(SC_PAGE_SIZE); 3031 #elif defined(_SC_PAGE_SIZE) 3032 return sysconf(_SC_PAGE_SIZE); 3033 #else 3034 return 1; 3035 #endif 3036 } 3037 #endif 3038 3039 /* DOCDOC */ 3040 /* Requires lock */ 3041 static int 3042 evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg) 3043 { 3044 const unsigned flags = seg->flags; 3045 const int fd = seg->fd; 3046 const ev_off_t length = seg->length; 3047 const ev_off_t offset = seg->file_offset; 3048 3049 if (seg->contents) 3050 return 0; /* already materialized */ 3051 3052 #if defined(EVENT__HAVE_MMAP) 3053 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 3054 off_t offset_rounded = 0, offset_leftover = 0; 3055 void *mapped; 3056 if (offset) { 3057 /* mmap implementations don't generally like us 3058 * to have an offset that isn't a round */ 3059 long page_size = get_page_size(); 3060 if (page_size == -1) 3061 goto err; 3062 offset_leftover = offset % page_size; 3063 offset_rounded = offset - offset_leftover; 3064 } 3065 mapped = mmap(NULL, length + offset_leftover, 3066 PROT_READ, 3067 #ifdef MAP_NOCACHE 3068 MAP_NOCACHE | /* ??? */ 3069 #endif 3070 #ifdef MAP_FILE 3071 MAP_FILE | 3072 #endif 3073 MAP_PRIVATE, 3074 fd, offset_rounded); 3075 if (mapped == MAP_FAILED) { 3076 event_warn("%s: mmap(%d, %d, %zu) failed", 3077 __func__, fd, 0, (size_t)(offset + length)); 3078 } else { 3079 seg->mapping = mapped; 3080 seg->contents = (char*)mapped+offset_leftover; 3081 seg->mmap_offset = 0; 3082 seg->is_mapping = 1; 3083 goto done; 3084 } 3085 } 3086 #endif 3087 #ifdef _WIN32 3088 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 3089 intptr_t h = _get_osfhandle(fd); 3090 HANDLE m; 3091 ev_uint64_t total_size = length+offset; 3092 if ((HANDLE)h == INVALID_HANDLE_VALUE) 3093 goto err; 3094 m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY, 3095 (total_size >> 32), total_size & 0xfffffffful, 3096 NULL); 3097 if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */ 3098 seg->mapping_handle = m; 3099 seg->mmap_offset = offset; 3100 seg->is_mapping = 1; 3101 goto done; 3102 } 3103 } 3104 #endif 3105 { 3106 ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos; 3107 ev_off_t read_so_far = 0; 3108 char *mem; 3109 int e; 3110 ev_ssize_t n = 0; 3111 if (!(mem = mm_malloc(length))) 3112 goto err; 3113 if (start_pos < 0) { 3114 mm_free(mem); 3115 goto err; 3116 } 3117 if (lseek(fd, offset, SEEK_SET) < 0) { 3118 mm_free(mem); 3119 goto err; 3120 } 3121 while (read_so_far < length) { 3122 n = read(fd, mem+read_so_far, length-read_so_far); 3123 if (n <= 0) 3124 break; 3125 read_so_far += n; 3126 } 3127 3128 e = errno; 3129 pos = lseek(fd, start_pos, SEEK_SET); 3130 if (n < 0 || (n == 0 && length > read_so_far)) { 3131 mm_free(mem); 3132 errno = e; 3133 goto err; 3134 } else if (pos < 0) { 3135 mm_free(mem); 3136 goto err; 3137 } 3138 3139 seg->contents = mem; 3140 } 3141 3142 done: 3143 return 0; 3144 err: 3145 return -1; 3146 } 3147 3148 void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg, 3149 evbuffer_file_segment_cleanup_cb cb, void* arg) 3150 { 3151 EVUTIL_ASSERT(seg->refcnt > 0); 3152 seg->cleanup_cb = cb; 3153 seg->cleanup_cb_arg = arg; 3154 } 3155 3156 void 3157 evbuffer_file_segment_free(struct evbuffer_file_segment *seg) 3158 { 3159 int refcnt; 3160 EVLOCK_LOCK(seg->lock, 0); 3161 refcnt = --seg->refcnt; 3162 EVLOCK_UNLOCK(seg->lock, 0); 3163 if (refcnt > 0) 3164 return; 3165 EVUTIL_ASSERT(refcnt == 0); 3166 3167 if (seg->is_mapping) { 3168 #ifdef _WIN32 3169 CloseHandle(seg->mapping_handle); 3170 #elif defined (EVENT__HAVE_MMAP) 3171 off_t offset_leftover; 3172 offset_leftover = seg->file_offset % get_page_size(); 3173 if (munmap(seg->mapping, seg->length + offset_leftover) == -1) 3174 event_warn("%s: munmap failed", __func__); 3175 #endif 3176 } else if (seg->contents) { 3177 mm_free(seg->contents); 3178 } 3179 3180 if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) { 3181 close(seg->fd); 3182 } 3183 3184 if (seg->cleanup_cb) { 3185 (*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg, 3186 seg->flags, seg->cleanup_cb_arg); 3187 seg->cleanup_cb = NULL; 3188 seg->cleanup_cb_arg = NULL; 3189 } 3190 3191 EVTHREAD_FREE_LOCK(seg->lock, 0); 3192 mm_free(seg); 3193 } 3194 3195 int 3196 evbuffer_add_file_segment(struct evbuffer *buf, 3197 struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length) 3198 { 3199 struct evbuffer_chain *chain; 3200 struct evbuffer_chain_file_segment *extra; 3201 int can_use_sendfile = 0; 3202 3203 EVBUFFER_LOCK(buf); 3204 EVLOCK_LOCK(seg->lock, 0); 3205 if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) { 3206 can_use_sendfile = 1; 3207 } else { 3208 if (!seg->contents) { 3209 if (evbuffer_file_segment_materialize(seg)<0) { 3210 EVLOCK_UNLOCK(seg->lock, 0); 3211 EVBUFFER_UNLOCK(buf); 3212 return -1; 3213 } 3214 } 3215 } 3216 EVLOCK_UNLOCK(seg->lock, 0); 3217 3218 if (buf->freeze_end) 3219 goto err; 3220 3221 if (length < 0) { 3222 if (offset > seg->length) 3223 goto err; 3224 length = seg->length - offset; 3225 } 3226 3227 /* Can we actually add this? */ 3228 if (offset+length > seg->length) 3229 goto err; 3230 3231 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment)); 3232 if (!chain) 3233 goto err; 3234 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain); 3235 3236 chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT; 3237 if (can_use_sendfile && seg->can_sendfile) { 3238 chain->flags |= EVBUFFER_SENDFILE; 3239 chain->misalign = seg->file_offset + offset; 3240 chain->off = length; 3241 chain->buffer_len = chain->misalign + length; 3242 } else if (seg->is_mapping) { 3243 #ifdef _WIN32 3244 ev_uint64_t total_offset = seg->mmap_offset+offset; 3245 ev_uint64_t offset_rounded=0, offset_remaining=0; 3246 LPVOID data; 3247 if (total_offset) { 3248 SYSTEM_INFO si; 3249 memset(&si, 0, sizeof(si)); /* cargo cult */ 3250 GetSystemInfo(&si); 3251 offset_remaining = total_offset % si.dwAllocationGranularity; 3252 offset_rounded = total_offset - offset_remaining; 3253 } 3254 data = MapViewOfFile( 3255 seg->mapping_handle, 3256 FILE_MAP_READ, 3257 offset_rounded >> 32, 3258 offset_rounded & 0xfffffffful, 3259 length + offset_remaining); 3260 if (data == NULL) { 3261 mm_free(chain); 3262 goto err; 3263 } 3264 chain->buffer = (unsigned char*) data; 3265 chain->buffer_len = length+offset_remaining; 3266 chain->misalign = offset_remaining; 3267 chain->off = length; 3268 #else 3269 chain->buffer = (unsigned char*)(seg->contents + offset); 3270 chain->buffer_len = length; 3271 chain->off = length; 3272 #endif 3273 } else { 3274 chain->buffer = (unsigned char*)(seg->contents + offset); 3275 chain->buffer_len = length; 3276 chain->off = length; 3277 } 3278 3279 EVLOCK_LOCK(seg->lock, 0); 3280 ++seg->refcnt; 3281 EVLOCK_UNLOCK(seg->lock, 0); 3282 extra->segment = seg; 3283 buf->n_add_for_cb += length; 3284 evbuffer_chain_insert(buf, chain); 3285 3286 evbuffer_invoke_callbacks_(buf); 3287 3288 EVBUFFER_UNLOCK(buf); 3289 3290 return 0; 3291 err: 3292 EVBUFFER_UNLOCK(buf); 3293 evbuffer_file_segment_free(seg); /* Lowers the refcount */ 3294 return -1; 3295 } 3296 3297 int 3298 evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length) 3299 { 3300 struct evbuffer_file_segment *seg; 3301 unsigned flags = EVBUF_FS_CLOSE_ON_FREE; 3302 int r; 3303 3304 seg = evbuffer_file_segment_new(fd, offset, length, flags); 3305 if (!seg) 3306 return -1; 3307 r = evbuffer_add_file_segment(buf, seg, 0, length); 3308 if (r == 0) 3309 evbuffer_file_segment_free(seg); 3310 return r; 3311 } 3312 3313 int 3314 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg) 3315 { 3316 EVBUFFER_LOCK(buffer); 3317 3318 if (!LIST_EMPTY(&buffer->callbacks)) 3319 evbuffer_remove_all_callbacks(buffer); 3320 3321 if (cb) { 3322 struct evbuffer_cb_entry *ent = 3323 evbuffer_add_cb(buffer, NULL, cbarg); 3324 if (!ent) { 3325 EVBUFFER_UNLOCK(buffer); 3326 return -1; 3327 } 3328 ent->cb.cb_obsolete = cb; 3329 ent->flags |= EVBUFFER_CB_OBSOLETE; 3330 } 3331 EVBUFFER_UNLOCK(buffer); 3332 return 0; 3333 } 3334 3335 struct evbuffer_cb_entry * 3336 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3337 { 3338 struct evbuffer_cb_entry *e; 3339 if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry)))) 3340 return NULL; 3341 EVBUFFER_LOCK(buffer); 3342 e->cb.cb_func = cb; 3343 e->cbarg = cbarg; 3344 e->flags = EVBUFFER_CB_ENABLED; 3345 LIST_INSERT_HEAD(&buffer->callbacks, e, next); 3346 EVBUFFER_UNLOCK(buffer); 3347 return e; 3348 } 3349 3350 int 3351 evbuffer_remove_cb_entry(struct evbuffer *buffer, 3352 struct evbuffer_cb_entry *ent) 3353 { 3354 EVBUFFER_LOCK(buffer); 3355 LIST_REMOVE(ent, next); 3356 EVBUFFER_UNLOCK(buffer); 3357 mm_free(ent); 3358 return 0; 3359 } 3360 3361 int 3362 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3363 { 3364 struct evbuffer_cb_entry *cbent; 3365 int result = -1; 3366 EVBUFFER_LOCK(buffer); 3367 LIST_FOREACH(cbent, &buffer->callbacks, next) { 3368 if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { 3369 result = evbuffer_remove_cb_entry(buffer, cbent); 3370 goto done; 3371 } 3372 } 3373 done: 3374 EVBUFFER_UNLOCK(buffer); 3375 return result; 3376 } 3377 3378 int 3379 evbuffer_cb_set_flags(struct evbuffer *buffer, 3380 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3381 { 3382 /* the user isn't allowed to mess with these. */ 3383 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3384 EVBUFFER_LOCK(buffer); 3385 cb->flags |= flags; 3386 EVBUFFER_UNLOCK(buffer); 3387 return 0; 3388 } 3389 3390 int 3391 evbuffer_cb_clear_flags(struct evbuffer *buffer, 3392 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3393 { 3394 /* the user isn't allowed to mess with these. */ 3395 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3396 EVBUFFER_LOCK(buffer); 3397 cb->flags &= ~flags; 3398 EVBUFFER_UNLOCK(buffer); 3399 return 0; 3400 } 3401 3402 int 3403 evbuffer_freeze(struct evbuffer *buffer, int start) 3404 { 3405 EVBUFFER_LOCK(buffer); 3406 if (start) 3407 buffer->freeze_start = 1; 3408 else 3409 buffer->freeze_end = 1; 3410 EVBUFFER_UNLOCK(buffer); 3411 return 0; 3412 } 3413 3414 int 3415 evbuffer_unfreeze(struct evbuffer *buffer, int start) 3416 { 3417 EVBUFFER_LOCK(buffer); 3418 if (start) 3419 buffer->freeze_start = 0; 3420 else 3421 buffer->freeze_end = 0; 3422 EVBUFFER_UNLOCK(buffer); 3423 return 0; 3424 } 3425 3426 #if 0 3427 void 3428 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3429 { 3430 if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { 3431 cb->size_before_suspend = evbuffer_get_length(buffer); 3432 cb->flags |= EVBUFFER_CB_SUSPENDED; 3433 } 3434 } 3435 3436 void 3437 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3438 { 3439 if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { 3440 unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND); 3441 size_t sz = cb->size_before_suspend; 3442 cb->flags &= ~(EVBUFFER_CB_SUSPENDED| 3443 EVBUFFER_CB_CALL_ON_UNSUSPEND); 3444 cb->size_before_suspend = 0; 3445 if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { 3446 cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg); 3447 } 3448 } 3449 } 3450 #endif 3451 3452 int 3453 evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs, 3454 int max_cbs) 3455 { 3456 int r = 0; 3457 EVBUFFER_LOCK(buffer); 3458 if (buffer->deferred_cbs) { 3459 if (max_cbs < 1) { 3460 r = -1; 3461 goto done; 3462 } 3463 cbs[0] = &buffer->deferred; 3464 r = 1; 3465 } 3466 done: 3467 EVBUFFER_UNLOCK(buffer); 3468 return r; 3469 } 3470