1 /* $NetBSD: buffer.c,v 1.5 2021/04/07 03:36:48 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu> 5 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include "event2/event-config.h" 31 #include <sys/cdefs.h> 32 __RCSID("$NetBSD: buffer.c,v 1.5 2021/04/07 03:36:48 christos Exp $"); 33 #include "evconfig-private.h" 34 35 #ifdef _WIN32 36 #include <winsock2.h> 37 #include <windows.h> 38 #include <io.h> 39 #endif 40 41 #ifdef EVENT__HAVE_VASPRINTF 42 /* If we have vasprintf, we need to define _GNU_SOURCE before we include 43 * stdio.h. This comes from evconfig-private.h. 44 */ 45 #endif 46 47 #include <sys/types.h> 48 49 #ifdef EVENT__HAVE_SYS_TIME_H 50 #include <sys/time.h> 51 #endif 52 53 #ifdef EVENT__HAVE_SYS_SOCKET_H 54 #include <sys/socket.h> 55 #endif 56 57 #ifdef EVENT__HAVE_SYS_UIO_H 58 #include <sys/uio.h> 59 #endif 60 61 #ifdef EVENT__HAVE_SYS_IOCTL_H 62 #include <sys/ioctl.h> 63 #endif 64 65 #ifdef EVENT__HAVE_SYS_MMAN_H 66 #include <sys/mman.h> 67 #endif 68 69 #ifdef EVENT__HAVE_SYS_SENDFILE_H 70 #include <sys/sendfile.h> 71 #endif 72 #ifdef EVENT__HAVE_SYS_STAT_H 73 #include <sys/stat.h> 74 #endif 75 76 77 #include <errno.h> 78 #include <stdio.h> 79 #include <stdlib.h> 80 #include <string.h> 81 #ifdef EVENT__HAVE_STDARG_H 82 #include <stdarg.h> 83 #endif 84 #ifdef EVENT__HAVE_UNISTD_H 85 #include <unistd.h> 86 #endif 87 #include <limits.h> 88 89 #include "event2/event.h" 90 #include "event2/buffer.h" 91 #include "event2/buffer_compat.h" 92 #include "event2/bufferevent.h" 93 #include "event2/bufferevent_compat.h" 94 #include "event2/bufferevent_struct.h" 95 #include "event2/thread.h" 96 #include "log-internal.h" 97 #include "mm-internal.h" 98 #include "util-internal.h" 99 #include "evthread-internal.h" 100 #include "evbuffer-internal.h" 101 #include "bufferevent-internal.h" 102 #include "event-internal.h" 103 104 /* some systems do not have MAP_FAILED */ 105 #ifndef MAP_FAILED 106 #define MAP_FAILED ((void *)-1) 107 #endif 108 109 /* send file support */ 110 #if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__) 111 #define USE_SENDFILE 1 112 #define SENDFILE_IS_LINUX 1 113 #elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__) 114 #define USE_SENDFILE 1 115 #define SENDFILE_IS_FREEBSD 1 116 #elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__) 117 #define USE_SENDFILE 1 118 #define SENDFILE_IS_MACOSX 1 119 #elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__) 120 #define USE_SENDFILE 1 121 #define SENDFILE_IS_SOLARIS 1 122 #endif 123 124 /* Mask of user-selectable callback flags. */ 125 #define EVBUFFER_CB_USER_FLAGS 0xffff 126 /* Mask of all internal-use-only flags. */ 127 #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000 128 129 /* Flag set if the callback is using the cb_obsolete function pointer */ 130 #define EVBUFFER_CB_OBSOLETE 0x00040000 131 132 /* evbuffer_chain support */ 133 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off) 134 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \ 135 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off)) 136 137 #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0) 138 #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0) 139 140 /* evbuffer_ptr support */ 141 #define PTR_NOT_FOUND(ptr) do { \ 142 (ptr)->pos = -1; \ 143 (ptr)->internal_.chain = NULL; \ 144 (ptr)->internal_.pos_in_chain = 0; \ 145 } while (0) 146 147 static void evbuffer_chain_align(struct evbuffer_chain *chain); 148 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain, 149 size_t datalen); 150 static void evbuffer_deferred_callback(struct event_callback *cb, void *arg); 151 static int evbuffer_ptr_memcmp(const struct evbuffer *buf, 152 const struct evbuffer_ptr *pos, const char *mem, size_t len); 153 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf, 154 size_t datlen); 155 static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 156 size_t howfar); 157 static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg); 158 static inline void evbuffer_chain_incref(struct evbuffer_chain *chain); 159 160 static struct evbuffer_chain * 161 evbuffer_chain_new(size_t size) 162 { 163 struct evbuffer_chain *chain; 164 size_t to_alloc; 165 166 if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE) 167 return (NULL); 168 169 size += EVBUFFER_CHAIN_SIZE; 170 171 /* get the next largest memory that can hold the buffer */ 172 if (size < EVBUFFER_CHAIN_MAX / 2) { 173 to_alloc = MIN_BUFFER_SIZE; 174 while (to_alloc < size) { 175 to_alloc <<= 1; 176 } 177 } else { 178 to_alloc = size; 179 } 180 181 /* we get everything in one chunk */ 182 if ((chain = mm_malloc(to_alloc)) == NULL) 183 return (NULL); 184 185 memset(chain, 0, EVBUFFER_CHAIN_SIZE); 186 187 chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE; 188 189 /* this way we can manipulate the buffer to different addresses, 190 * which is required for mmap for example. 191 */ 192 chain->buffer = EVBUFFER_CHAIN_EXTRA(unsigned char, chain); 193 194 chain->refcnt = 1; 195 196 return (chain); 197 } 198 199 static inline void 200 evbuffer_chain_free(struct evbuffer_chain *chain) 201 { 202 EVUTIL_ASSERT(chain->refcnt > 0); 203 if (--chain->refcnt > 0) { 204 /* chain is still referenced by other chains */ 205 return; 206 } 207 208 if (CHAIN_PINNED(chain)) { 209 /* will get freed once no longer dangling */ 210 chain->refcnt++; 211 chain->flags |= EVBUFFER_DANGLING; 212 return; 213 } 214 215 /* safe to release chain, it's either a referencing 216 * chain or all references to it have been freed */ 217 if (chain->flags & EVBUFFER_REFERENCE) { 218 struct evbuffer_chain_reference *info = 219 EVBUFFER_CHAIN_EXTRA( 220 struct evbuffer_chain_reference, 221 chain); 222 if (info->cleanupfn) 223 (*info->cleanupfn)(chain->buffer, 224 chain->buffer_len, 225 info->extra); 226 } 227 if (chain->flags & EVBUFFER_FILESEGMENT) { 228 struct evbuffer_chain_file_segment *info = 229 EVBUFFER_CHAIN_EXTRA( 230 struct evbuffer_chain_file_segment, 231 chain); 232 if (info->segment) { 233 #ifdef _WIN32 234 if (info->segment->is_mapping) 235 UnmapViewOfFile(chain->buffer); 236 #endif 237 evbuffer_file_segment_free(info->segment); 238 } 239 } 240 if (chain->flags & EVBUFFER_MULTICAST) { 241 struct evbuffer_multicast_parent *info = 242 EVBUFFER_CHAIN_EXTRA( 243 struct evbuffer_multicast_parent, 244 chain); 245 /* referencing chain is being freed, decrease 246 * refcounts of source chain and associated 247 * evbuffer (which get freed once both reach 248 * zero) */ 249 EVUTIL_ASSERT(info->source != NULL); 250 EVUTIL_ASSERT(info->parent != NULL); 251 EVBUFFER_LOCK(info->source); 252 evbuffer_chain_free(info->parent); 253 evbuffer_decref_and_unlock_(info->source); 254 } 255 256 mm_free(chain); 257 } 258 259 static void 260 evbuffer_free_all_chains(struct evbuffer_chain *chain) 261 { 262 struct evbuffer_chain *next; 263 for (; chain; chain = next) { 264 next = chain->next; 265 evbuffer_chain_free(chain); 266 } 267 } 268 269 #ifndef NDEBUG 270 static int 271 evbuffer_chains_all_empty(struct evbuffer_chain *chain) 272 { 273 for (; chain; chain = chain->next) { 274 if (chain->off) 275 return 0; 276 } 277 return 1; 278 } 279 #else 280 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid 281 "unused variable" warnings. */ 282 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) { 283 return 1; 284 } 285 #endif 286 287 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior 288 * to replacing them all with a new chain. Return a pointer to the place 289 * where the new chain will go. 290 * 291 * Internal; requires lock. The caller must fix up buf->last and buf->first 292 * as needed; they might have been freed. 293 */ 294 static struct evbuffer_chain ** 295 evbuffer_free_trailing_empty_chains(struct evbuffer *buf) 296 { 297 struct evbuffer_chain **ch = buf->last_with_datap; 298 /* Find the first victim chain. It might be *last_with_datap */ 299 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch))) 300 ch = &(*ch)->next; 301 if (*ch) { 302 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch)); 303 evbuffer_free_all_chains(*ch); 304 *ch = NULL; 305 } 306 return ch; 307 } 308 309 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty 310 * chains as necessary. Requires lock. Does not schedule callbacks. 311 */ 312 static void 313 evbuffer_chain_insert(struct evbuffer *buf, 314 struct evbuffer_chain *chain) 315 { 316 ASSERT_EVBUFFER_LOCKED(buf); 317 if (*buf->last_with_datap == NULL) { 318 /* There are no chains data on the buffer at all. */ 319 EVUTIL_ASSERT(buf->last_with_datap == &buf->first); 320 EVUTIL_ASSERT(buf->first == NULL); 321 buf->first = buf->last = chain; 322 } else { 323 struct evbuffer_chain **chp; 324 chp = evbuffer_free_trailing_empty_chains(buf); 325 *chp = chain; 326 if (chain->off) 327 buf->last_with_datap = chp; 328 buf->last = chain; 329 } 330 buf->total_len += chain->off; 331 } 332 333 static inline struct evbuffer_chain * 334 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen) 335 { 336 struct evbuffer_chain *chain; 337 if ((chain = evbuffer_chain_new(datlen)) == NULL) 338 return NULL; 339 evbuffer_chain_insert(buf, chain); 340 return chain; 341 } 342 343 void 344 evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag) 345 { 346 EVUTIL_ASSERT((chain->flags & flag) == 0); 347 chain->flags |= flag; 348 } 349 350 void 351 evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag) 352 { 353 EVUTIL_ASSERT((chain->flags & flag) != 0); 354 chain->flags &= ~flag; 355 if (chain->flags & EVBUFFER_DANGLING) 356 evbuffer_chain_free(chain); 357 } 358 359 static inline void 360 evbuffer_chain_incref(struct evbuffer_chain *chain) 361 { 362 ++chain->refcnt; 363 } 364 365 struct evbuffer * 366 evbuffer_new(void) 367 { 368 struct evbuffer *buffer; 369 370 buffer = mm_calloc(1, sizeof(struct evbuffer)); 371 if (buffer == NULL) 372 return (NULL); 373 374 LIST_INIT(&buffer->callbacks); 375 buffer->refcnt = 1; 376 buffer->last_with_datap = &buffer->first; 377 378 return (buffer); 379 } 380 381 int 382 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags) 383 { 384 EVBUFFER_LOCK(buf); 385 buf->flags |= (ev_uint32_t)flags; 386 EVBUFFER_UNLOCK(buf); 387 return 0; 388 } 389 390 int 391 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags) 392 { 393 EVBUFFER_LOCK(buf); 394 buf->flags &= ~(ev_uint32_t)flags; 395 EVBUFFER_UNLOCK(buf); 396 return 0; 397 } 398 399 void 400 evbuffer_incref_(struct evbuffer *buf) 401 { 402 EVBUFFER_LOCK(buf); 403 ++buf->refcnt; 404 EVBUFFER_UNLOCK(buf); 405 } 406 407 void 408 evbuffer_incref_and_lock_(struct evbuffer *buf) 409 { 410 EVBUFFER_LOCK(buf); 411 ++buf->refcnt; 412 } 413 414 int 415 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base) 416 { 417 EVBUFFER_LOCK(buffer); 418 buffer->cb_queue = base; 419 buffer->deferred_cbs = 1; 420 event_deferred_cb_init_(&buffer->deferred, 421 event_base_get_npriorities(base) / 2, 422 evbuffer_deferred_callback, buffer); 423 EVBUFFER_UNLOCK(buffer); 424 return 0; 425 } 426 427 int 428 evbuffer_enable_locking(struct evbuffer *buf, void *lock) 429 { 430 #ifdef EVENT__DISABLE_THREAD_SUPPORT 431 return -1; 432 #else 433 if (buf->lock) 434 return -1; 435 436 if (!lock) { 437 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE); 438 if (!lock) 439 return -1; 440 buf->lock = lock; 441 buf->own_lock = 1; 442 } else { 443 buf->lock = lock; 444 buf->own_lock = 0; 445 } 446 447 return 0; 448 #endif 449 } 450 451 void 452 evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev) 453 { 454 EVBUFFER_LOCK(buf); 455 buf->parent = bev; 456 EVBUFFER_UNLOCK(buf); 457 } 458 459 static void 460 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred) 461 { 462 struct evbuffer_cb_entry *cbent, *next; 463 struct evbuffer_cb_info info; 464 size_t new_size; 465 ev_uint32_t mask, masked_val; 466 int clear = 1; 467 468 if (running_deferred) { 469 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 470 masked_val = EVBUFFER_CB_ENABLED; 471 } else if (buffer->deferred_cbs) { 472 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 473 masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED; 474 /* Don't zero-out n_add/n_del, since the deferred callbacks 475 will want to see them. */ 476 clear = 0; 477 } else { 478 mask = EVBUFFER_CB_ENABLED; 479 masked_val = EVBUFFER_CB_ENABLED; 480 } 481 482 ASSERT_EVBUFFER_LOCKED(buffer); 483 484 if (LIST_EMPTY(&buffer->callbacks)) { 485 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 486 return; 487 } 488 if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0) 489 return; 490 491 new_size = buffer->total_len; 492 info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb; 493 info.n_added = buffer->n_add_for_cb; 494 info.n_deleted = buffer->n_del_for_cb; 495 if (clear) { 496 buffer->n_add_for_cb = 0; 497 buffer->n_del_for_cb = 0; 498 } 499 for (cbent = LIST_FIRST(&buffer->callbacks); 500 cbent != LIST_END(&buffer->callbacks); 501 cbent = next) { 502 /* Get the 'next' pointer now in case this callback decides 503 * to remove itself or something. */ 504 next = LIST_NEXT(cbent, next); 505 506 if ((cbent->flags & mask) != masked_val) 507 continue; 508 509 if ((cbent->flags & EVBUFFER_CB_OBSOLETE)) 510 cbent->cb.cb_obsolete(buffer, 511 info.orig_size, new_size, cbent->cbarg); 512 else 513 cbent->cb.cb_func(buffer, &info, cbent->cbarg); 514 } 515 } 516 517 void 518 evbuffer_invoke_callbacks_(struct evbuffer *buffer) 519 { 520 if (LIST_EMPTY(&buffer->callbacks)) { 521 buffer->n_add_for_cb = buffer->n_del_for_cb = 0; 522 return; 523 } 524 525 if (buffer->deferred_cbs) { 526 if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) { 527 evbuffer_incref_and_lock_(buffer); 528 if (buffer->parent) 529 bufferevent_incref_(buffer->parent); 530 EVBUFFER_UNLOCK(buffer); 531 } 532 } 533 534 evbuffer_run_callbacks(buffer, 0); 535 } 536 537 static void 538 evbuffer_deferred_callback(struct event_callback *cb, void *arg) 539 { 540 struct bufferevent *parent = NULL; 541 struct evbuffer *buffer = arg; 542 543 /* XXXX It would be better to run these callbacks without holding the 544 * lock */ 545 EVBUFFER_LOCK(buffer); 546 parent = buffer->parent; 547 evbuffer_run_callbacks(buffer, 1); 548 evbuffer_decref_and_unlock_(buffer); 549 if (parent) 550 bufferevent_decref_(parent); 551 } 552 553 static void 554 evbuffer_remove_all_callbacks(struct evbuffer *buffer) 555 { 556 struct evbuffer_cb_entry *cbent; 557 558 while ((cbent = LIST_FIRST(&buffer->callbacks))) { 559 LIST_REMOVE(cbent, next); 560 mm_free(cbent); 561 } 562 } 563 564 void 565 evbuffer_decref_and_unlock_(struct evbuffer *buffer) 566 { 567 struct evbuffer_chain *chain, *next; 568 ASSERT_EVBUFFER_LOCKED(buffer); 569 570 EVUTIL_ASSERT(buffer->refcnt > 0); 571 572 if (--buffer->refcnt > 0) { 573 EVBUFFER_UNLOCK(buffer); 574 return; 575 } 576 577 for (chain = buffer->first; chain != NULL; chain = next) { 578 next = chain->next; 579 evbuffer_chain_free(chain); 580 } 581 evbuffer_remove_all_callbacks(buffer); 582 if (buffer->deferred_cbs) 583 event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred); 584 585 EVBUFFER_UNLOCK(buffer); 586 if (buffer->own_lock) 587 EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE); 588 mm_free(buffer); 589 } 590 591 void 592 evbuffer_free(struct evbuffer *buffer) 593 { 594 EVBUFFER_LOCK(buffer); 595 evbuffer_decref_and_unlock_(buffer); 596 } 597 598 void 599 evbuffer_lock(struct evbuffer *buf) 600 { 601 EVBUFFER_LOCK(buf); 602 } 603 604 void 605 evbuffer_unlock(struct evbuffer *buf) 606 { 607 EVBUFFER_UNLOCK(buf); 608 } 609 610 size_t 611 evbuffer_get_length(const struct evbuffer *buffer) 612 { 613 size_t result; 614 615 EVBUFFER_LOCK(buffer); 616 617 result = (buffer->total_len); 618 619 EVBUFFER_UNLOCK(buffer); 620 621 return result; 622 } 623 624 size_t 625 evbuffer_get_contiguous_space(const struct evbuffer *buf) 626 { 627 struct evbuffer_chain *chain; 628 size_t result; 629 630 EVBUFFER_LOCK(buf); 631 chain = buf->first; 632 result = (chain != NULL ? chain->off : 0); 633 EVBUFFER_UNLOCK(buf); 634 635 return result; 636 } 637 638 size_t 639 evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) { 640 int n; 641 size_t res; 642 size_t to_alloc; 643 644 EVBUFFER_LOCK(buf); 645 646 res = to_alloc = 0; 647 648 for (n = 0; n < n_vec; n++) { 649 to_alloc += vec[n].iov_len; 650 } 651 652 if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) { 653 goto done; 654 } 655 656 for (n = 0; n < n_vec; n++) { 657 /* XXX each 'add' call here does a bunch of setup that's 658 * obviated by evbuffer_expand_fast_, and some cleanup that we 659 * would like to do only once. Instead we should just extract 660 * the part of the code that's needed. */ 661 662 if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) { 663 goto done; 664 } 665 666 res += vec[n].iov_len; 667 } 668 669 done: 670 EVBUFFER_UNLOCK(buf); 671 return res; 672 } 673 674 int 675 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, 676 struct evbuffer_iovec *vec, int n_vecs) 677 { 678 struct evbuffer_chain *chain, **chainp; 679 int n = -1; 680 681 EVBUFFER_LOCK(buf); 682 if (buf->freeze_end) 683 goto done; 684 if (n_vecs < 1) 685 goto done; 686 if (n_vecs == 1) { 687 if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) 688 goto done; 689 690 vec[0].iov_base = (void *)CHAIN_SPACE_PTR(chain); 691 vec[0].iov_len = (size_t)CHAIN_SPACE_LEN(chain); 692 EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size); 693 n = 1; 694 } else { 695 if (evbuffer_expand_fast_(buf, size, n_vecs)<0) 696 goto done; 697 n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs, 698 &chainp, 0); 699 } 700 701 done: 702 EVBUFFER_UNLOCK(buf); 703 return n; 704 705 } 706 707 static int 708 advance_last_with_data(struct evbuffer *buf) 709 { 710 int n = 0; 711 struct evbuffer_chain **chainp = buf->last_with_datap; 712 713 ASSERT_EVBUFFER_LOCKED(buf); 714 715 if (!*chainp) 716 return 0; 717 718 while ((*chainp)->next) { 719 chainp = &(*chainp)->next; 720 if ((*chainp)->off) 721 buf->last_with_datap = chainp; 722 ++n; 723 } 724 return n; 725 } 726 727 int 728 evbuffer_commit_space(struct evbuffer *buf, 729 struct evbuffer_iovec *vec, int n_vecs) 730 { 731 struct evbuffer_chain *chain, **firstchainp, **chainp; 732 int result = -1; 733 size_t added = 0; 734 int i; 735 736 EVBUFFER_LOCK(buf); 737 738 if (buf->freeze_end) 739 goto done; 740 if (n_vecs == 0) { 741 result = 0; 742 goto done; 743 } else if (n_vecs == 1 && 744 (buf->last && vec[0].iov_base == (void *)CHAIN_SPACE_PTR(buf->last))) { 745 /* The user only got or used one chain; it might not 746 * be the first one with space in it. */ 747 if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) 748 goto done; 749 buf->last->off += vec[0].iov_len; 750 added = vec[0].iov_len; 751 if (added) 752 advance_last_with_data(buf); 753 goto okay; 754 } 755 756 /* Advance 'firstchain' to the first chain with space in it. */ 757 firstchainp = buf->last_with_datap; 758 if (!*firstchainp) 759 goto done; 760 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 761 firstchainp = &(*firstchainp)->next; 762 } 763 764 chain = *firstchainp; 765 /* pass 1: make sure that the pointers and lengths of vecs[] are in 766 * bounds before we try to commit anything. */ 767 for (i=0; i<n_vecs; ++i) { 768 if (!chain) 769 goto done; 770 if (vec[i].iov_base != (void *)CHAIN_SPACE_PTR(chain) || 771 (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain)) 772 goto done; 773 chain = chain->next; 774 } 775 /* pass 2: actually adjust all the chains. */ 776 chainp = firstchainp; 777 for (i=0; i<n_vecs; ++i) { 778 (*chainp)->off += vec[i].iov_len; 779 added += vec[i].iov_len; 780 if (vec[i].iov_len) { 781 buf->last_with_datap = chainp; 782 } 783 chainp = &(*chainp)->next; 784 } 785 786 okay: 787 buf->total_len += added; 788 buf->n_add_for_cb += added; 789 result = 0; 790 evbuffer_invoke_callbacks_(buf); 791 792 done: 793 EVBUFFER_UNLOCK(buf); 794 return result; 795 } 796 797 static inline int 798 HAS_PINNED_R(struct evbuffer *buf) 799 { 800 return (buf->last && CHAIN_PINNED_R(buf->last)); 801 } 802 803 static inline void 804 ZERO_CHAIN(struct evbuffer *dst) 805 { 806 ASSERT_EVBUFFER_LOCKED(dst); 807 dst->first = NULL; 808 dst->last = NULL; 809 dst->last_with_datap = &(dst)->first; 810 dst->total_len = 0; 811 } 812 813 /* Prepares the contents of src to be moved to another buffer by removing 814 * read-pinned chains. The first pinned chain is saved in first, and the 815 * last in last. If src has no read-pinned chains, first and last are set 816 * to NULL. */ 817 static int 818 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, 819 struct evbuffer_chain **last) 820 { 821 struct evbuffer_chain *chain, **pinned; 822 823 ASSERT_EVBUFFER_LOCKED(src); 824 825 if (!HAS_PINNED_R(src)) { 826 *first = *last = NULL; 827 return 0; 828 } 829 830 pinned = src->last_with_datap; 831 if (!CHAIN_PINNED_R(*pinned)) 832 pinned = &(*pinned)->next; 833 EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned)); 834 chain = *first = *pinned; 835 *last = src->last; 836 837 /* If there's data in the first pinned chain, we need to allocate 838 * a new chain and copy the data over. */ 839 if (chain->off) { 840 struct evbuffer_chain *tmp; 841 842 EVUTIL_ASSERT(pinned == src->last_with_datap); 843 tmp = evbuffer_chain_new(chain->off); 844 if (!tmp) 845 return -1; 846 memcpy(tmp->buffer, chain->buffer + chain->misalign, 847 chain->off); 848 tmp->off = chain->off; 849 *src->last_with_datap = tmp; 850 src->last = tmp; 851 chain->misalign += chain->off; 852 chain->off = 0; 853 } else { 854 src->last = *src->last_with_datap; 855 *pinned = NULL; 856 } 857 858 return 0; 859 } 860 861 static inline void 862 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned, 863 struct evbuffer_chain *last) 864 { 865 ASSERT_EVBUFFER_LOCKED(src); 866 867 if (!pinned) { 868 ZERO_CHAIN(src); 869 return; 870 } 871 872 src->first = pinned; 873 src->last = last; 874 src->last_with_datap = &src->first; 875 src->total_len = 0; 876 } 877 878 static inline void 879 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src) 880 { 881 ASSERT_EVBUFFER_LOCKED(dst); 882 ASSERT_EVBUFFER_LOCKED(src); 883 dst->first = src->first; 884 if (src->last_with_datap == &src->first) 885 dst->last_with_datap = &dst->first; 886 else 887 dst->last_with_datap = src->last_with_datap; 888 dst->last = src->last; 889 dst->total_len = src->total_len; 890 } 891 892 static void 893 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 894 { 895 struct evbuffer_chain **chp; 896 897 ASSERT_EVBUFFER_LOCKED(dst); 898 ASSERT_EVBUFFER_LOCKED(src); 899 900 chp = evbuffer_free_trailing_empty_chains(dst); 901 *chp = src->first; 902 903 if (src->last_with_datap == &src->first) 904 dst->last_with_datap = chp; 905 else 906 dst->last_with_datap = src->last_with_datap; 907 dst->last = src->last; 908 dst->total_len += src->total_len; 909 } 910 911 static inline void 912 APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src) 913 { 914 struct evbuffer_chain *tmp; 915 struct evbuffer_chain *chain = src->first; 916 struct evbuffer_multicast_parent *extra; 917 918 ASSERT_EVBUFFER_LOCKED(dst); 919 ASSERT_EVBUFFER_LOCKED(src); 920 921 for (; chain; chain = chain->next) { 922 if (!chain->off || chain->flags & EVBUFFER_DANGLING) { 923 /* skip empty chains */ 924 continue; 925 } 926 927 tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent)); 928 if (!tmp) { 929 event_warn("%s: out of memory", __func__); 930 return; 931 } 932 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp); 933 /* reference evbuffer containing source chain so it 934 * doesn't get released while the chain is still 935 * being referenced to */ 936 evbuffer_incref_(src); 937 extra->source = src; 938 /* reference source chain which now becomes immutable */ 939 evbuffer_chain_incref(chain); 940 extra->parent = chain; 941 chain->flags |= EVBUFFER_IMMUTABLE; 942 tmp->buffer_len = chain->buffer_len; 943 tmp->misalign = chain->misalign; 944 tmp->off = chain->off; 945 tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE; 946 tmp->buffer = chain->buffer; 947 evbuffer_chain_insert(dst, tmp); 948 } 949 } 950 951 static void 952 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src) 953 { 954 ASSERT_EVBUFFER_LOCKED(dst); 955 ASSERT_EVBUFFER_LOCKED(src); 956 src->last->next = dst->first; 957 dst->first = src->first; 958 dst->total_len += src->total_len; 959 if (*dst->last_with_datap == NULL) { 960 if (src->last_with_datap == &(src)->first) 961 dst->last_with_datap = &dst->first; 962 else 963 dst->last_with_datap = src->last_with_datap; 964 } else if (dst->last_with_datap == &dst->first) { 965 dst->last_with_datap = &src->last->next; 966 } 967 } 968 969 int 970 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 971 { 972 struct evbuffer_chain *pinned, *last; 973 size_t in_total_len, out_total_len; 974 int result = 0; 975 976 EVBUFFER_LOCK2(inbuf, outbuf); 977 in_total_len = inbuf->total_len; 978 out_total_len = outbuf->total_len; 979 980 if (in_total_len == 0 || outbuf == inbuf) 981 goto done; 982 983 if (outbuf->freeze_end || inbuf->freeze_start) { 984 result = -1; 985 goto done; 986 } 987 988 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 989 result = -1; 990 goto done; 991 } 992 993 if (out_total_len == 0) { 994 /* There might be an empty chain at the start of outbuf; free 995 * it. */ 996 evbuffer_free_all_chains(outbuf->first); 997 COPY_CHAIN(outbuf, inbuf); 998 } else { 999 APPEND_CHAIN(outbuf, inbuf); 1000 } 1001 1002 RESTORE_PINNED(inbuf, pinned, last); 1003 1004 inbuf->n_del_for_cb += in_total_len; 1005 outbuf->n_add_for_cb += in_total_len; 1006 1007 evbuffer_invoke_callbacks_(inbuf); 1008 evbuffer_invoke_callbacks_(outbuf); 1009 1010 done: 1011 EVBUFFER_UNLOCK2(inbuf, outbuf); 1012 return result; 1013 } 1014 1015 int 1016 evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf) 1017 { 1018 size_t in_total_len, out_total_len; 1019 struct evbuffer_chain *chain; 1020 int result = 0; 1021 1022 EVBUFFER_LOCK2(inbuf, outbuf); 1023 in_total_len = inbuf->total_len; 1024 out_total_len = outbuf->total_len; 1025 chain = inbuf->first; 1026 1027 if (in_total_len == 0) 1028 goto done; 1029 1030 if (outbuf->freeze_end || outbuf == inbuf) { 1031 result = -1; 1032 goto done; 1033 } 1034 1035 for (; chain; chain = chain->next) { 1036 if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) { 1037 /* chain type can not be referenced */ 1038 result = -1; 1039 goto done; 1040 } 1041 } 1042 1043 if (out_total_len == 0) { 1044 /* There might be an empty chain at the start of outbuf; free 1045 * it. */ 1046 evbuffer_free_all_chains(outbuf->first); 1047 } 1048 APPEND_CHAIN_MULTICAST(outbuf, inbuf); 1049 1050 outbuf->n_add_for_cb += in_total_len; 1051 evbuffer_invoke_callbacks_(outbuf); 1052 1053 done: 1054 EVBUFFER_UNLOCK2(inbuf, outbuf); 1055 return result; 1056 } 1057 1058 int 1059 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf) 1060 { 1061 struct evbuffer_chain *pinned, *last; 1062 size_t in_total_len, out_total_len; 1063 int result = 0; 1064 1065 EVBUFFER_LOCK2(inbuf, outbuf); 1066 1067 in_total_len = inbuf->total_len; 1068 out_total_len = outbuf->total_len; 1069 1070 if (!in_total_len || inbuf == outbuf) 1071 goto done; 1072 1073 if (outbuf->freeze_start || inbuf->freeze_start) { 1074 result = -1; 1075 goto done; 1076 } 1077 1078 if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) { 1079 result = -1; 1080 goto done; 1081 } 1082 1083 if (out_total_len == 0) { 1084 /* There might be an empty chain at the start of outbuf; free 1085 * it. */ 1086 evbuffer_free_all_chains(outbuf->first); 1087 COPY_CHAIN(outbuf, inbuf); 1088 } else { 1089 PREPEND_CHAIN(outbuf, inbuf); 1090 } 1091 1092 RESTORE_PINNED(inbuf, pinned, last); 1093 1094 inbuf->n_del_for_cb += in_total_len; 1095 outbuf->n_add_for_cb += in_total_len; 1096 1097 evbuffer_invoke_callbacks_(inbuf); 1098 evbuffer_invoke_callbacks_(outbuf); 1099 done: 1100 EVBUFFER_UNLOCK2(inbuf, outbuf); 1101 return result; 1102 } 1103 1104 int 1105 evbuffer_drain(struct evbuffer *buf, size_t len) 1106 { 1107 struct evbuffer_chain *chain, *next; 1108 size_t remaining, old_len; 1109 int result = 0; 1110 1111 EVBUFFER_LOCK(buf); 1112 old_len = buf->total_len; 1113 1114 if (old_len == 0) 1115 goto done; 1116 1117 if (buf->freeze_start) { 1118 result = -1; 1119 goto done; 1120 } 1121 1122 if (len >= old_len && !HAS_PINNED_R(buf)) { 1123 len = old_len; 1124 for (chain = buf->first; chain != NULL; chain = next) { 1125 next = chain->next; 1126 evbuffer_chain_free(chain); 1127 } 1128 1129 ZERO_CHAIN(buf); 1130 } else { 1131 if (len >= old_len) 1132 len = old_len; 1133 1134 buf->total_len -= len; 1135 remaining = len; 1136 for (chain = buf->first; 1137 remaining >= chain->off; 1138 chain = next) { 1139 next = chain->next; 1140 remaining -= chain->off; 1141 1142 if (chain == *buf->last_with_datap) { 1143 buf->last_with_datap = &buf->first; 1144 } 1145 if (&chain->next == buf->last_with_datap) 1146 buf->last_with_datap = &buf->first; 1147 1148 if (CHAIN_PINNED_R(chain)) { 1149 EVUTIL_ASSERT(remaining == 0); 1150 chain->misalign += chain->off; 1151 chain->off = 0; 1152 break; 1153 } else 1154 evbuffer_chain_free(chain); 1155 } 1156 1157 buf->first = chain; 1158 EVUTIL_ASSERT(remaining <= chain->off); 1159 chain->misalign += remaining; 1160 chain->off -= remaining; 1161 } 1162 1163 buf->n_del_for_cb += len; 1164 /* Tell someone about changes in this buffer */ 1165 evbuffer_invoke_callbacks_(buf); 1166 1167 done: 1168 EVBUFFER_UNLOCK(buf); 1169 return result; 1170 } 1171 1172 /* Reads data from an event buffer and drains the bytes read */ 1173 int 1174 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen) 1175 { 1176 ev_ssize_t n; 1177 EVBUFFER_LOCK(buf); 1178 n = evbuffer_copyout_from(buf, NULL, data_out, datlen); 1179 if (n > 0) { 1180 if (evbuffer_drain(buf, n)<0) 1181 n = -1; 1182 } 1183 EVBUFFER_UNLOCK(buf); 1184 return (int)n; 1185 } 1186 1187 ev_ssize_t 1188 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen) 1189 { 1190 return evbuffer_copyout_from(buf, NULL, data_out, datlen); 1191 } 1192 1193 ev_ssize_t 1194 evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos, 1195 void *data_out, size_t datlen) 1196 { 1197 /*XXX fails badly on sendfile case. */ 1198 struct evbuffer_chain *chain; 1199 char *data = data_out; 1200 size_t nread; 1201 ev_ssize_t result = 0; 1202 size_t pos_in_chain; 1203 1204 EVBUFFER_LOCK(buf); 1205 1206 if (pos) { 1207 if (datlen > (size_t)(EV_SSIZE_MAX - pos->pos)) { 1208 result = -1; 1209 goto done; 1210 } 1211 chain = pos->internal_.chain; 1212 pos_in_chain = pos->internal_.pos_in_chain; 1213 if (datlen + pos->pos > buf->total_len) 1214 datlen = buf->total_len - pos->pos; 1215 } else { 1216 chain = buf->first; 1217 pos_in_chain = 0; 1218 if (datlen > buf->total_len) 1219 datlen = buf->total_len; 1220 } 1221 1222 1223 if (datlen == 0) 1224 goto done; 1225 1226 if (buf->freeze_start) { 1227 result = -1; 1228 goto done; 1229 } 1230 1231 nread = datlen; 1232 1233 while (datlen && datlen >= chain->off - pos_in_chain) { 1234 size_t copylen = chain->off - pos_in_chain; 1235 memcpy(data, 1236 chain->buffer + chain->misalign + pos_in_chain, 1237 copylen); 1238 data += copylen; 1239 datlen -= copylen; 1240 1241 chain = chain->next; 1242 pos_in_chain = 0; 1243 EVUTIL_ASSERT(chain || datlen==0); 1244 } 1245 1246 if (datlen) { 1247 EVUTIL_ASSERT(chain); 1248 EVUTIL_ASSERT(datlen+pos_in_chain <= chain->off); 1249 1250 memcpy(data, chain->buffer + chain->misalign + pos_in_chain, 1251 datlen); 1252 } 1253 1254 result = nread; 1255 done: 1256 EVBUFFER_UNLOCK(buf); 1257 return result; 1258 } 1259 1260 /* reads data from the src buffer to the dst buffer, avoids memcpy as 1261 * possible. */ 1262 /* XXXX should return ev_ssize_t */ 1263 int 1264 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst, 1265 size_t datlen) 1266 { 1267 /*XXX We should have an option to force this to be zero-copy.*/ 1268 1269 /*XXX can fail badly on sendfile case. */ 1270 struct evbuffer_chain *chain, *previous; 1271 size_t nread = 0; 1272 int result; 1273 1274 EVBUFFER_LOCK2(src, dst); 1275 1276 chain = previous = src->first; 1277 1278 if (datlen == 0 || dst == src) { 1279 result = 0; 1280 goto done; 1281 } 1282 1283 if (dst->freeze_end || src->freeze_start) { 1284 result = -1; 1285 goto done; 1286 } 1287 1288 /* short-cut if there is no more data buffered */ 1289 if (datlen >= src->total_len) { 1290 datlen = src->total_len; 1291 evbuffer_add_buffer(dst, src); 1292 result = (int)datlen; /*XXXX should return ev_ssize_t*/ 1293 goto done; 1294 } 1295 1296 /* removes chains if possible */ 1297 while (chain->off <= datlen) { 1298 /* We can't remove the last with data from src unless we 1299 * remove all chains, in which case we would have done the if 1300 * block above */ 1301 EVUTIL_ASSERT(chain != *src->last_with_datap); 1302 nread += chain->off; 1303 datlen -= chain->off; 1304 previous = chain; 1305 if (src->last_with_datap == &chain->next) 1306 src->last_with_datap = &src->first; 1307 chain = chain->next; 1308 } 1309 1310 if (chain != src->first) { 1311 /* we can remove the chain */ 1312 struct evbuffer_chain **chp; 1313 chp = evbuffer_free_trailing_empty_chains(dst); 1314 1315 if (dst->first == NULL) { 1316 dst->first = src->first; 1317 } else { 1318 *chp = src->first; 1319 } 1320 dst->last = previous; 1321 previous->next = NULL; 1322 src->first = chain; 1323 advance_last_with_data(dst); 1324 1325 dst->total_len += nread; 1326 dst->n_add_for_cb += nread; 1327 } 1328 1329 /* we know that there is more data in the src buffer than 1330 * we want to read, so we manually drain the chain */ 1331 evbuffer_add(dst, chain->buffer + chain->misalign, datlen); 1332 chain->misalign += datlen; 1333 chain->off -= datlen; 1334 nread += datlen; 1335 1336 /* You might think we would want to increment dst->n_add_for_cb 1337 * here too. But evbuffer_add above already took care of that. 1338 */ 1339 src->total_len -= nread; 1340 src->n_del_for_cb += nread; 1341 1342 if (nread) { 1343 evbuffer_invoke_callbacks_(dst); 1344 evbuffer_invoke_callbacks_(src); 1345 } 1346 result = (int)nread;/*XXXX should change return type */ 1347 1348 done: 1349 EVBUFFER_UNLOCK2(src, dst); 1350 return result; 1351 } 1352 1353 unsigned char * 1354 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size) 1355 { 1356 struct evbuffer_chain *chain, *next, *tmp, *last_with_data; 1357 unsigned char *buffer, *result = NULL; 1358 ev_ssize_t remaining; 1359 int removed_last_with_data = 0; 1360 int removed_last_with_datap = 0; 1361 1362 EVBUFFER_LOCK(buf); 1363 1364 chain = buf->first; 1365 1366 if (size < 0) 1367 size = buf->total_len; 1368 /* if size > buf->total_len, we cannot guarantee to the user that she 1369 * is going to have a long enough buffer afterwards; so we return 1370 * NULL */ 1371 if (size == 0 || (size_t)size > buf->total_len) 1372 goto done; 1373 1374 /* No need to pull up anything; the first size bytes are 1375 * already here. */ 1376 if (chain->off >= (size_t)size) { 1377 result = chain->buffer + chain->misalign; 1378 goto done; 1379 } 1380 1381 /* Make sure that none of the chains we need to copy from is pinned. */ 1382 remaining = size - chain->off; 1383 EVUTIL_ASSERT(remaining >= 0); 1384 for (tmp=chain->next; tmp; tmp=tmp->next) { 1385 if (CHAIN_PINNED(tmp)) 1386 goto done; 1387 if (tmp->off >= (size_t)remaining) 1388 break; 1389 remaining -= tmp->off; 1390 } 1391 1392 if (CHAIN_PINNED(chain)) { 1393 size_t old_off = chain->off; 1394 if (CHAIN_SPACE_LEN(chain) < size - chain->off) { 1395 /* not enough room at end of chunk. */ 1396 goto done; 1397 } 1398 buffer = CHAIN_SPACE_PTR(chain); 1399 tmp = chain; 1400 tmp->off = size; 1401 size -= old_off; 1402 chain = chain->next; 1403 } else if (chain->buffer_len - chain->misalign >= (size_t)size) { 1404 /* already have enough space in the first chain */ 1405 size_t old_off = chain->off; 1406 buffer = chain->buffer + chain->misalign + chain->off; 1407 tmp = chain; 1408 tmp->off = size; 1409 size -= old_off; 1410 chain = chain->next; 1411 } else { 1412 if ((tmp = evbuffer_chain_new(size)) == NULL) { 1413 event_warn("%s: out of memory", __func__); 1414 goto done; 1415 } 1416 buffer = tmp->buffer; 1417 tmp->off = size; 1418 buf->first = tmp; 1419 } 1420 1421 /* TODO(niels): deal with buffers that point to NULL like sendfile */ 1422 1423 /* Copy and free every chunk that will be entirely pulled into tmp */ 1424 last_with_data = *buf->last_with_datap; 1425 for (; chain != NULL && (size_t)size >= chain->off; chain = next) { 1426 next = chain->next; 1427 1428 if (chain->buffer) { 1429 memcpy(buffer, chain->buffer + chain->misalign, chain->off); 1430 size -= chain->off; 1431 buffer += chain->off; 1432 } 1433 if (chain == last_with_data) 1434 removed_last_with_data = 1; 1435 if (&chain->next == buf->last_with_datap) 1436 removed_last_with_datap = 1; 1437 1438 evbuffer_chain_free(chain); 1439 } 1440 1441 if (chain != NULL) { 1442 memcpy(buffer, chain->buffer + chain->misalign, size); 1443 chain->misalign += size; 1444 chain->off -= size; 1445 } else { 1446 buf->last = tmp; 1447 } 1448 1449 tmp->next = chain; 1450 1451 if (removed_last_with_data) { 1452 buf->last_with_datap = &buf->first; 1453 } else if (removed_last_with_datap) { 1454 if (buf->first->next && buf->first->next->off) 1455 buf->last_with_datap = &buf->first->next; 1456 else 1457 buf->last_with_datap = &buf->first; 1458 } 1459 1460 result = (tmp->buffer + tmp->misalign); 1461 1462 done: 1463 EVBUFFER_UNLOCK(buf); 1464 return result; 1465 } 1466 1467 /* 1468 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. 1469 * The returned buffer needs to be freed by the called. 1470 */ 1471 char * 1472 evbuffer_readline(struct evbuffer *buffer) 1473 { 1474 return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY); 1475 } 1476 1477 static inline ev_ssize_t 1478 evbuffer_strchr(struct evbuffer_ptr *it, const char chr) 1479 { 1480 struct evbuffer_chain *chain = it->internal_.chain; 1481 size_t i = it->internal_.pos_in_chain; 1482 while (chain != NULL) { 1483 char *buffer = (char *)chain->buffer + chain->misalign; 1484 char *cp = memchr(buffer+i, chr, chain->off-i); 1485 if (cp) { 1486 it->internal_.chain = chain; 1487 it->internal_.pos_in_chain = cp - buffer; 1488 it->pos += (cp - buffer - i); 1489 return it->pos; 1490 } 1491 it->pos += chain->off - i; 1492 i = 0; 1493 chain = chain->next; 1494 } 1495 1496 return (-1); 1497 } 1498 1499 static inline char * 1500 find_eol_char(char *s, size_t len) 1501 { 1502 #define CHUNK_SZ 128 1503 /* Lots of benchmarking found this approach to be faster in practice 1504 * than doing two memchrs over the whole buffer, doin a memchr on each 1505 * char of the buffer, or trying to emulate memchr by hand. */ 1506 char *s_end, *cr, *lf; 1507 s_end = s+len; 1508 while (s < s_end) { 1509 size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s); 1510 cr = memchr(s, '\r', chunk); 1511 lf = memchr(s, '\n', chunk); 1512 if (cr) { 1513 if (lf && lf < cr) 1514 return lf; 1515 return cr; 1516 } else if (lf) { 1517 return lf; 1518 } 1519 s += CHUNK_SZ; 1520 } 1521 1522 return NULL; 1523 #undef CHUNK_SZ 1524 } 1525 1526 static ev_ssize_t 1527 evbuffer_find_eol_char(struct evbuffer_ptr *it) 1528 { 1529 struct evbuffer_chain *chain = it->internal_.chain; 1530 size_t i = it->internal_.pos_in_chain; 1531 while (chain != NULL) { 1532 char *buffer = (char *)chain->buffer + chain->misalign; 1533 char *cp = find_eol_char(buffer+i, chain->off-i); 1534 if (cp) { 1535 it->internal_.chain = chain; 1536 it->internal_.pos_in_chain = cp - buffer; 1537 it->pos += (cp - buffer) - i; 1538 return it->pos; 1539 } 1540 it->pos += chain->off - i; 1541 i = 0; 1542 chain = chain->next; 1543 } 1544 1545 return (-1); 1546 } 1547 1548 static inline size_t 1549 evbuffer_strspn( 1550 struct evbuffer_ptr *ptr, const char *chrset) 1551 { 1552 size_t count = 0; 1553 struct evbuffer_chain *chain = ptr->internal_.chain; 1554 size_t i = ptr->internal_.pos_in_chain; 1555 1556 if (!chain) 1557 return 0; 1558 1559 while (1) { 1560 char *buffer = (char *)chain->buffer + chain->misalign; 1561 for (; i < chain->off; ++i) { 1562 const char *p = chrset; 1563 while (*p) { 1564 if (buffer[i] == *p++) 1565 goto next; 1566 } 1567 ptr->internal_.chain = chain; 1568 ptr->internal_.pos_in_chain = i; 1569 ptr->pos += count; 1570 return count; 1571 next: 1572 ++count; 1573 } 1574 i = 0; 1575 1576 if (! chain->next) { 1577 ptr->internal_.chain = chain; 1578 ptr->internal_.pos_in_chain = i; 1579 ptr->pos += count; 1580 return count; 1581 } 1582 1583 chain = chain->next; 1584 } 1585 } 1586 1587 1588 static inline int 1589 evbuffer_getchr(struct evbuffer_ptr *it) 1590 { 1591 struct evbuffer_chain *chain = it->internal_.chain; 1592 size_t off = it->internal_.pos_in_chain; 1593 1594 if (chain == NULL) 1595 return -1; 1596 1597 return (unsigned char)chain->buffer[chain->misalign + off]; 1598 } 1599 1600 struct evbuffer_ptr 1601 evbuffer_search_eol(struct evbuffer *buffer, 1602 struct evbuffer_ptr *start, size_t *eol_len_out, 1603 enum evbuffer_eol_style eol_style) 1604 { 1605 struct evbuffer_ptr it, it2; 1606 size_t extra_drain = 0; 1607 int ok = 0; 1608 1609 /* Avoid locking in trivial edge cases */ 1610 if (start && start->internal_.chain == NULL) { 1611 PTR_NOT_FOUND(&it); 1612 if (eol_len_out) 1613 *eol_len_out = extra_drain; 1614 return it; 1615 } 1616 1617 EVBUFFER_LOCK(buffer); 1618 1619 if (start) { 1620 memcpy(&it, start, sizeof(it)); 1621 } else { 1622 it.pos = 0; 1623 it.internal_.chain = buffer->first; 1624 it.internal_.pos_in_chain = 0; 1625 } 1626 1627 /* the eol_style determines our first stop character and how many 1628 * characters we are going to drain afterwards. */ 1629 switch (eol_style) { 1630 case EVBUFFER_EOL_ANY: 1631 if (evbuffer_find_eol_char(&it) < 0) 1632 goto done; 1633 memcpy(&it2, &it, sizeof(it)); 1634 extra_drain = evbuffer_strspn(&it2, "\r\n"); 1635 break; 1636 case EVBUFFER_EOL_CRLF_STRICT: { 1637 it = evbuffer_search(buffer, "\r\n", 2, &it); 1638 if (it.pos < 0) 1639 goto done; 1640 extra_drain = 2; 1641 break; 1642 } 1643 case EVBUFFER_EOL_CRLF: { 1644 ev_ssize_t start_pos = it.pos; 1645 /* Look for a LF ... */ 1646 if (evbuffer_strchr(&it, '\n') < 0) 1647 goto done; 1648 extra_drain = 1; 1649 /* ... optionally preceeded by a CR. */ 1650 if (it.pos == start_pos) 1651 break; /* If the first character is \n, don't back up */ 1652 /* This potentially does an extra linear walk over the first 1653 * few chains. Probably, that's not too expensive unless you 1654 * have a really pathological setup. */ 1655 memcpy(&it2, &it, sizeof(it)); 1656 if (evbuffer_ptr_subtract(buffer, &it2, 1)<0) 1657 break; 1658 if (evbuffer_getchr(&it2) == '\r') { 1659 memcpy(&it, &it2, sizeof(it)); 1660 extra_drain = 2; 1661 } 1662 break; 1663 } 1664 case EVBUFFER_EOL_LF: 1665 if (evbuffer_strchr(&it, '\n') < 0) 1666 goto done; 1667 extra_drain = 1; 1668 break; 1669 case EVBUFFER_EOL_NUL: 1670 if (evbuffer_strchr(&it, '\0') < 0) 1671 goto done; 1672 extra_drain = 1; 1673 break; 1674 default: 1675 goto done; 1676 } 1677 1678 ok = 1; 1679 done: 1680 EVBUFFER_UNLOCK(buffer); 1681 1682 if (!ok) 1683 PTR_NOT_FOUND(&it); 1684 if (eol_len_out) 1685 *eol_len_out = extra_drain; 1686 1687 return it; 1688 } 1689 1690 char * 1691 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out, 1692 enum evbuffer_eol_style eol_style) 1693 { 1694 struct evbuffer_ptr it; 1695 char *line; 1696 size_t n_to_copy=0, extra_drain=0; 1697 char *result = NULL; 1698 1699 EVBUFFER_LOCK(buffer); 1700 1701 if (buffer->freeze_start) { 1702 goto done; 1703 } 1704 1705 it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style); 1706 if (it.pos < 0) 1707 goto done; 1708 n_to_copy = it.pos; 1709 1710 if ((line = mm_malloc(n_to_copy+1)) == NULL) { 1711 event_warn("%s: out of memory", __func__); 1712 goto done; 1713 } 1714 1715 evbuffer_remove(buffer, line, n_to_copy); 1716 line[n_to_copy] = '\0'; 1717 1718 evbuffer_drain(buffer, extra_drain); 1719 result = line; 1720 done: 1721 EVBUFFER_UNLOCK(buffer); 1722 1723 if (n_read_out) 1724 *n_read_out = result ? n_to_copy : 0; 1725 1726 return result; 1727 } 1728 1729 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096 1730 1731 /* Adds data to an event buffer */ 1732 1733 int 1734 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen) 1735 { 1736 struct evbuffer_chain *chain, *tmp; 1737 const unsigned char *data = data_in; 1738 size_t remain, to_alloc; 1739 int result = -1; 1740 1741 EVBUFFER_LOCK(buf); 1742 1743 if (buf->freeze_end) { 1744 goto done; 1745 } 1746 /* Prevent buf->total_len overflow */ 1747 if (datlen > EV_SIZE_MAX - buf->total_len) { 1748 goto done; 1749 } 1750 1751 if (*buf->last_with_datap == NULL) { 1752 chain = buf->last; 1753 } else { 1754 chain = *buf->last_with_datap; 1755 } 1756 1757 /* If there are no chains allocated for this buffer, allocate one 1758 * big enough to hold all the data. */ 1759 if (chain == NULL) { 1760 chain = evbuffer_chain_new(datlen); 1761 if (!chain) 1762 goto done; 1763 evbuffer_chain_insert(buf, chain); 1764 } 1765 1766 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1767 /* Always true for mutable buffers */ 1768 EVUTIL_ASSERT(chain->misalign >= 0 && 1769 (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); 1770 remain = chain->buffer_len - (size_t)chain->misalign - chain->off; 1771 if (remain >= datlen) { 1772 /* there's enough space to hold all the data in the 1773 * current last chain */ 1774 memcpy(chain->buffer + chain->misalign + chain->off, 1775 data, datlen); 1776 chain->off += datlen; 1777 buf->total_len += datlen; 1778 buf->n_add_for_cb += datlen; 1779 goto out; 1780 } else if (!CHAIN_PINNED(chain) && 1781 evbuffer_chain_should_realign(chain, datlen)) { 1782 /* we can fit the data into the misalignment */ 1783 evbuffer_chain_align(chain); 1784 1785 memcpy(chain->buffer + chain->off, data, datlen); 1786 chain->off += datlen; 1787 buf->total_len += datlen; 1788 buf->n_add_for_cb += datlen; 1789 goto out; 1790 } 1791 } else { 1792 /* we cannot write any data to the last chain */ 1793 remain = 0; 1794 } 1795 1796 /* we need to add another chain */ 1797 to_alloc = chain->buffer_len; 1798 if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2) 1799 to_alloc <<= 1; 1800 if (datlen > to_alloc) 1801 to_alloc = datlen; 1802 tmp = evbuffer_chain_new(to_alloc); 1803 if (tmp == NULL) 1804 goto done; 1805 1806 if (remain) { 1807 memcpy(chain->buffer + chain->misalign + chain->off, 1808 data, remain); 1809 chain->off += remain; 1810 buf->total_len += remain; 1811 buf->n_add_for_cb += remain; 1812 } 1813 1814 data += remain; 1815 datlen -= remain; 1816 1817 memcpy(tmp->buffer, data, datlen); 1818 tmp->off = datlen; 1819 evbuffer_chain_insert(buf, tmp); 1820 buf->n_add_for_cb += datlen; 1821 1822 out: 1823 evbuffer_invoke_callbacks_(buf); 1824 result = 0; 1825 done: 1826 EVBUFFER_UNLOCK(buf); 1827 return result; 1828 } 1829 1830 int 1831 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) 1832 { 1833 struct evbuffer_chain *chain, *tmp; 1834 int result = -1; 1835 1836 EVBUFFER_LOCK(buf); 1837 1838 if (datlen == 0) { 1839 result = 0; 1840 goto done; 1841 } 1842 if (buf->freeze_start) { 1843 goto done; 1844 } 1845 if (datlen > EV_SIZE_MAX - buf->total_len) { 1846 goto done; 1847 } 1848 1849 chain = buf->first; 1850 1851 if (chain == NULL) { 1852 chain = evbuffer_chain_new(datlen); 1853 if (!chain) 1854 goto done; 1855 evbuffer_chain_insert(buf, chain); 1856 } 1857 1858 /* we cannot touch immutable buffers */ 1859 if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { 1860 /* Always true for mutable buffers */ 1861 EVUTIL_ASSERT(chain->misalign >= 0 && 1862 (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX); 1863 1864 /* If this chain is empty, we can treat it as 1865 * 'empty at the beginning' rather than 'empty at the end' */ 1866 if (chain->off == 0) 1867 chain->misalign = chain->buffer_len; 1868 1869 if ((size_t)chain->misalign >= datlen) { 1870 /* we have enough space to fit everything */ 1871 memcpy(chain->buffer + chain->misalign - datlen, 1872 data, datlen); 1873 chain->off += datlen; 1874 chain->misalign -= datlen; 1875 buf->total_len += datlen; 1876 buf->n_add_for_cb += datlen; 1877 goto out; 1878 } else if (chain->misalign) { 1879 /* we can only fit some of the data. */ 1880 memcpy(chain->buffer, 1881 (const char*)data + datlen - chain->misalign, 1882 (size_t)chain->misalign); 1883 chain->off += (size_t)chain->misalign; 1884 buf->total_len += (size_t)chain->misalign; 1885 buf->n_add_for_cb += (size_t)chain->misalign; 1886 datlen -= (size_t)chain->misalign; 1887 chain->misalign = 0; 1888 } 1889 } 1890 1891 /* we need to add another chain */ 1892 if ((tmp = evbuffer_chain_new(datlen)) == NULL) 1893 goto done; 1894 buf->first = tmp; 1895 if (buf->last_with_datap == &buf->first && chain->off) 1896 buf->last_with_datap = &tmp->next; 1897 1898 tmp->next = chain; 1899 1900 tmp->off = datlen; 1901 EVUTIL_ASSERT(datlen <= tmp->buffer_len); 1902 tmp->misalign = tmp->buffer_len - datlen; 1903 1904 memcpy(tmp->buffer + tmp->misalign, data, datlen); 1905 buf->total_len += datlen; 1906 buf->n_add_for_cb += datlen; 1907 1908 out: 1909 evbuffer_invoke_callbacks_(buf); 1910 result = 0; 1911 done: 1912 EVBUFFER_UNLOCK(buf); 1913 return result; 1914 } 1915 1916 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */ 1917 static void 1918 evbuffer_chain_align(struct evbuffer_chain *chain) 1919 { 1920 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE)); 1921 EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY)); 1922 memmove(chain->buffer, chain->buffer + chain->misalign, chain->off); 1923 chain->misalign = 0; 1924 } 1925 1926 #define MAX_TO_COPY_IN_EXPAND 4096 1927 #define MAX_TO_REALIGN_IN_EXPAND 2048 1928 1929 /** Helper: return true iff we should realign chain to fit datalen bytes of 1930 data in it. */ 1931 static int 1932 evbuffer_chain_should_realign(struct evbuffer_chain *chain, 1933 size_t datlen) 1934 { 1935 return chain->buffer_len - chain->off >= datlen && 1936 (chain->off < chain->buffer_len / 2) && 1937 (chain->off <= MAX_TO_REALIGN_IN_EXPAND); 1938 } 1939 1940 /* Expands the available space in the event buffer to at least datlen, all in 1941 * a single chunk. Return that chunk. */ 1942 static struct evbuffer_chain * 1943 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen) 1944 { 1945 struct evbuffer_chain *chain, **chainp; 1946 struct evbuffer_chain *result = NULL; 1947 ASSERT_EVBUFFER_LOCKED(buf); 1948 1949 chainp = buf->last_with_datap; 1950 1951 /* XXX If *chainp is no longer writeable, but has enough space in its 1952 * misalign, this might be a bad idea: we could still use *chainp, not 1953 * (*chainp)->next. */ 1954 if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0) 1955 chainp = &(*chainp)->next; 1956 1957 /* 'chain' now points to the first chain with writable space (if any) 1958 * We will either use it, realign it, replace it, or resize it. */ 1959 chain = *chainp; 1960 1961 if (chain == NULL || 1962 (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { 1963 /* We can't use the last_with_data chain at all. Just add a 1964 * new one that's big enough. */ 1965 goto insert_new; 1966 } 1967 1968 /* If we can fit all the data, then we don't have to do anything */ 1969 if (CHAIN_SPACE_LEN(chain) >= datlen) { 1970 result = chain; 1971 goto ok; 1972 } 1973 1974 /* If the chain is completely empty, just replace it by adding a new 1975 * empty chain. */ 1976 if (chain->off == 0) { 1977 goto insert_new; 1978 } 1979 1980 /* If the misalignment plus the remaining space fulfills our data 1981 * needs, we could just force an alignment to happen. Afterwards, we 1982 * have enough space. But only do this if we're saving a lot of space 1983 * and not moving too much data. Otherwise the space savings are 1984 * probably offset by the time lost in copying. 1985 */ 1986 if (evbuffer_chain_should_realign(chain, datlen)) { 1987 evbuffer_chain_align(chain); 1988 result = chain; 1989 goto ok; 1990 } 1991 1992 /* At this point, we can either resize the last chunk with space in 1993 * it, use the next chunk after it, or If we add a new chunk, we waste 1994 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we 1995 * resize, we have to copy chain->off bytes. 1996 */ 1997 1998 /* Would expanding this chunk be affordable and worthwhile? */ 1999 if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 || 2000 chain->off > MAX_TO_COPY_IN_EXPAND || 2001 datlen >= (EVBUFFER_CHAIN_MAX - chain->off)) { 2002 /* It's not worth resizing this chain. Can the next one be 2003 * used? */ 2004 if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { 2005 /* Yes, we can just use the next chain (which should 2006 * be empty. */ 2007 result = chain->next; 2008 goto ok; 2009 } else { 2010 /* No; append a new chain (which will free all 2011 * terminal empty chains.) */ 2012 goto insert_new; 2013 } 2014 } else { 2015 /* Okay, we're going to try to resize this chain: Not doing so 2016 * would waste at least 1/8 of its current allocation, and we 2017 * can do so without having to copy more than 2018 * MAX_TO_COPY_IN_EXPAND bytes. */ 2019 /* figure out how much space we need */ 2020 size_t length = chain->off + datlen; 2021 struct evbuffer_chain *tmp = evbuffer_chain_new(length); 2022 if (tmp == NULL) 2023 goto err; 2024 2025 /* copy the data over that we had so far */ 2026 tmp->off = chain->off; 2027 memcpy(tmp->buffer, chain->buffer + chain->misalign, 2028 chain->off); 2029 /* fix up the list */ 2030 EVUTIL_ASSERT(*chainp == chain); 2031 result = *chainp = tmp; 2032 2033 if (buf->last == chain) 2034 buf->last = tmp; 2035 2036 tmp->next = chain->next; 2037 evbuffer_chain_free(chain); 2038 goto ok; 2039 } 2040 2041 insert_new: 2042 result = evbuffer_chain_insert_new(buf, datlen); 2043 if (!result) 2044 goto err; 2045 ok: 2046 EVUTIL_ASSERT(result); 2047 EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen); 2048 err: 2049 return result; 2050 } 2051 2052 /* Make sure that datlen bytes are available for writing in the last n 2053 * chains. Never copies or moves data. */ 2054 int 2055 evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n) 2056 { 2057 struct evbuffer_chain *chain = buf->last, *tmp, *next; 2058 size_t avail; 2059 int used; 2060 2061 ASSERT_EVBUFFER_LOCKED(buf); 2062 EVUTIL_ASSERT(n >= 2); 2063 2064 if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { 2065 /* There is no last chunk, or we can't touch the last chunk. 2066 * Just add a new chunk. */ 2067 chain = evbuffer_chain_new(datlen); 2068 if (chain == NULL) 2069 return (-1); 2070 2071 evbuffer_chain_insert(buf, chain); 2072 return (0); 2073 } 2074 2075 used = 0; /* number of chains we're using space in. */ 2076 avail = 0; /* how much space they have. */ 2077 /* How many bytes can we stick at the end of buffer as it is? Iterate 2078 * over the chains at the end of the buffer, tring to see how much 2079 * space we have in the first n. */ 2080 for (chain = *buf->last_with_datap; chain; chain = chain->next) { 2081 if (chain->off) { 2082 size_t space = (size_t) CHAIN_SPACE_LEN(chain); 2083 EVUTIL_ASSERT(chain == *buf->last_with_datap); 2084 if (space) { 2085 avail += space; 2086 ++used; 2087 } 2088 } else { 2089 /* No data in chain; realign it. */ 2090 chain->misalign = 0; 2091 avail += chain->buffer_len; 2092 ++used; 2093 } 2094 if (avail >= datlen) { 2095 /* There is already enough space. Just return */ 2096 return (0); 2097 } 2098 if (used == n) 2099 break; 2100 } 2101 2102 /* There wasn't enough space in the first n chains with space in 2103 * them. Either add a new chain with enough space, or replace all 2104 * empty chains with one that has enough space, depending on n. */ 2105 if (used < n) { 2106 /* The loop ran off the end of the chains before it hit n 2107 * chains; we can add another. */ 2108 EVUTIL_ASSERT(chain == NULL); 2109 2110 tmp = evbuffer_chain_new(datlen - avail); 2111 if (tmp == NULL) 2112 return (-1); 2113 2114 buf->last->next = tmp; 2115 buf->last = tmp; 2116 /* (we would only set last_with_data if we added the first 2117 * chain. But if the buffer had no chains, we would have 2118 * just allocated a new chain earlier) */ 2119 return (0); 2120 } else { 2121 /* Nuke _all_ the empty chains. */ 2122 int rmv_all = 0; /* True iff we removed last_with_data. */ 2123 chain = *buf->last_with_datap; 2124 if (!chain->off) { 2125 EVUTIL_ASSERT(chain == buf->first); 2126 rmv_all = 1; 2127 avail = 0; 2128 } else { 2129 /* can't overflow, since only mutable chains have 2130 * huge misaligns. */ 2131 avail = (size_t) CHAIN_SPACE_LEN(chain); 2132 chain = chain->next; 2133 } 2134 2135 2136 for (; chain; chain = next) { 2137 next = chain->next; 2138 EVUTIL_ASSERT(chain->off == 0); 2139 evbuffer_chain_free(chain); 2140 } 2141 EVUTIL_ASSERT(datlen >= avail); 2142 tmp = evbuffer_chain_new(datlen - avail); 2143 if (tmp == NULL) { 2144 if (rmv_all) { 2145 ZERO_CHAIN(buf); 2146 } else { 2147 buf->last = *buf->last_with_datap; 2148 (*buf->last_with_datap)->next = NULL; 2149 } 2150 return (-1); 2151 } 2152 2153 if (rmv_all) { 2154 buf->first = buf->last = tmp; 2155 buf->last_with_datap = &buf->first; 2156 } else { 2157 (*buf->last_with_datap)->next = tmp; 2158 buf->last = tmp; 2159 } 2160 return (0); 2161 } 2162 } 2163 2164 int 2165 evbuffer_expand(struct evbuffer *buf, size_t datlen) 2166 { 2167 struct evbuffer_chain *chain; 2168 2169 EVBUFFER_LOCK(buf); 2170 chain = evbuffer_expand_singlechain(buf, datlen); 2171 EVBUFFER_UNLOCK(buf); 2172 return chain ? 0 : -1; 2173 } 2174 2175 /* 2176 * Reads data from a file descriptor into a buffer. 2177 */ 2178 2179 #if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32) 2180 #define USE_IOVEC_IMPL 2181 #endif 2182 2183 #ifdef USE_IOVEC_IMPL 2184 2185 #ifdef EVENT__HAVE_SYS_UIO_H 2186 /* number of iovec we use for writev, fragmentation is going to determine 2187 * how much we end up writing */ 2188 2189 #define DEFAULT_WRITE_IOVEC 128 2190 2191 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC 2192 #define NUM_WRITE_IOVEC UIO_MAXIOV 2193 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC 2194 #define NUM_WRITE_IOVEC IOV_MAX 2195 #else 2196 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC 2197 #endif 2198 2199 #define IOV_TYPE struct iovec 2200 #define IOV_PTR_FIELD iov_base 2201 #define IOV_LEN_FIELD iov_len 2202 #define IOV_LEN_TYPE size_t 2203 #else 2204 #define NUM_WRITE_IOVEC 16 2205 #define IOV_TYPE WSABUF 2206 #define IOV_PTR_FIELD buf 2207 #define IOV_LEN_FIELD len 2208 #define IOV_LEN_TYPE unsigned long 2209 #endif 2210 #endif 2211 #define NUM_READ_IOVEC 4 2212 2213 #define EVBUFFER_MAX_READ 4096 2214 2215 /** Helper function to figure out which space to use for reading data into 2216 an evbuffer. Internal use only. 2217 2218 @param buf The buffer to read into 2219 @param howmuch How much we want to read. 2220 @param vecs An array of two or more iovecs or WSABUFs. 2221 @param n_vecs_avail The length of vecs 2222 @param chainp A pointer to a variable to hold the first chain we're 2223 reading into. 2224 @param exact Boolean: if true, we do not provide more than 'howmuch' 2225 space in the vectors, even if more space is available. 2226 @return The number of buffers we're using. 2227 */ 2228 int 2229 evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch, 2230 struct evbuffer_iovec *vecs, int n_vecs_avail, 2231 struct evbuffer_chain ***chainp, int exact) 2232 { 2233 struct evbuffer_chain *chain; 2234 struct evbuffer_chain **firstchainp; 2235 size_t so_far; 2236 int i; 2237 ASSERT_EVBUFFER_LOCKED(buf); 2238 2239 if (howmuch < 0) 2240 return -1; 2241 2242 so_far = 0; 2243 /* Let firstchain be the first chain with any space on it */ 2244 firstchainp = buf->last_with_datap; 2245 EVUTIL_ASSERT(*firstchainp); 2246 if (CHAIN_SPACE_LEN(*firstchainp) == 0) { 2247 firstchainp = &(*firstchainp)->next; 2248 } 2249 2250 chain = *firstchainp; 2251 EVUTIL_ASSERT(chain); 2252 for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) { 2253 size_t avail = (size_t) CHAIN_SPACE_LEN(chain); 2254 if (avail > (howmuch - so_far) && exact) 2255 avail = howmuch - so_far; 2256 vecs[i].iov_base = (void *)CHAIN_SPACE_PTR(chain); 2257 vecs[i].iov_len = avail; 2258 so_far += avail; 2259 chain = chain->next; 2260 } 2261 2262 *chainp = firstchainp; 2263 return i; 2264 } 2265 2266 static int 2267 get_n_bytes_readable_on_socket(evutil_socket_t fd) 2268 { 2269 #if defined(FIONREAD) && defined(_WIN32) 2270 unsigned long lng = EVBUFFER_MAX_READ; 2271 if (ioctlsocket(fd, FIONREAD, &lng) < 0) 2272 return -1; 2273 /* Can overflow, but mostly harmlessly. XXXX */ 2274 return (int)lng; 2275 #elif defined(FIONREAD) 2276 int n = EVBUFFER_MAX_READ; 2277 if (ioctl(fd, FIONREAD, &n) < 0) 2278 return -1; 2279 return n; 2280 #else 2281 return EVBUFFER_MAX_READ; 2282 #endif 2283 } 2284 2285 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t 2286 * as howmuch? */ 2287 int 2288 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch) 2289 { 2290 struct evbuffer_chain **chainp; 2291 int n; 2292 int result; 2293 2294 #ifdef USE_IOVEC_IMPL 2295 int nvecs, i, remaining; 2296 #else 2297 struct evbuffer_chain *chain; 2298 unsigned char *p; 2299 #endif 2300 2301 EVBUFFER_LOCK(buf); 2302 2303 if (buf->freeze_end) { 2304 result = -1; 2305 goto done; 2306 } 2307 2308 n = get_n_bytes_readable_on_socket(fd); 2309 if (n <= 0 || n > EVBUFFER_MAX_READ) 2310 n = EVBUFFER_MAX_READ; 2311 if (howmuch < 0 || howmuch > n) 2312 howmuch = n; 2313 2314 #ifdef USE_IOVEC_IMPL 2315 /* Since we can use iovecs, we're willing to use the last 2316 * NUM_READ_IOVEC chains. */ 2317 if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) { 2318 result = -1; 2319 goto done; 2320 } else { 2321 IOV_TYPE vecs[NUM_READ_IOVEC]; 2322 #ifdef EVBUFFER_IOVEC_IS_NATIVE_ 2323 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs, 2324 NUM_READ_IOVEC, &chainp, 1); 2325 #else 2326 /* We aren't using the native struct iovec. Therefore, 2327 we are on win32. */ 2328 struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC]; 2329 nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2, 2330 &chainp, 1); 2331 2332 for (i=0; i < nvecs; ++i) 2333 WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); 2334 #endif 2335 2336 #ifdef _WIN32 2337 { 2338 DWORD bytesRead; 2339 DWORD flags=0; 2340 if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { 2341 /* The read failed. It might be a close, 2342 * or it might be an error. */ 2343 if (WSAGetLastError() == WSAECONNABORTED) 2344 n = 0; 2345 else 2346 n = -1; 2347 } else 2348 n = bytesRead; 2349 } 2350 #else 2351 n = readv(fd, vecs, nvecs); 2352 #endif 2353 } 2354 2355 #else /*!USE_IOVEC_IMPL*/ 2356 /* If we don't have FIONREAD, we might waste some space here */ 2357 /* XXX we _will_ waste some space here if there is any space left 2358 * over on buf->last. */ 2359 if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) { 2360 result = -1; 2361 goto done; 2362 } 2363 2364 /* We can append new data at this point */ 2365 p = chain->buffer + chain->misalign + chain->off; 2366 2367 #ifndef _WIN32 2368 n = read(fd, p, howmuch); 2369 #else 2370 n = recv(fd, p, howmuch, 0); 2371 #endif 2372 #endif /* USE_IOVEC_IMPL */ 2373 2374 if (n == -1) { 2375 result = -1; 2376 goto done; 2377 } 2378 if (n == 0) { 2379 result = 0; 2380 goto done; 2381 } 2382 2383 #ifdef USE_IOVEC_IMPL 2384 remaining = n; 2385 for (i=0; i < nvecs; ++i) { 2386 /* can't overflow, since only mutable chains have 2387 * huge misaligns. */ 2388 size_t space = (size_t) CHAIN_SPACE_LEN(*chainp); 2389 /* XXXX This is a kludge that can waste space in perverse 2390 * situations. */ 2391 if (space > EVBUFFER_CHAIN_MAX) 2392 space = EVBUFFER_CHAIN_MAX; 2393 if ((ev_ssize_t)space < remaining) { 2394 (*chainp)->off += space; 2395 remaining -= (int)space; 2396 } else { 2397 (*chainp)->off += remaining; 2398 buf->last_with_datap = chainp; 2399 break; 2400 } 2401 chainp = &(*chainp)->next; 2402 } 2403 #else 2404 chain->off += n; 2405 advance_last_with_data(buf); 2406 #endif 2407 buf->total_len += n; 2408 buf->n_add_for_cb += n; 2409 2410 /* Tell someone about changes in this buffer */ 2411 evbuffer_invoke_callbacks_(buf); 2412 result = n; 2413 done: 2414 EVBUFFER_UNLOCK(buf); 2415 return result; 2416 } 2417 2418 #ifdef USE_IOVEC_IMPL 2419 static inline int 2420 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd, 2421 ev_ssize_t howmuch) 2422 { 2423 IOV_TYPE iov[NUM_WRITE_IOVEC]; 2424 struct evbuffer_chain *chain = buffer->first; 2425 int n, i = 0; 2426 2427 if (howmuch < 0) 2428 return -1; 2429 2430 ASSERT_EVBUFFER_LOCKED(buffer); 2431 /* XXX make this top out at some maximal data length? if the 2432 * buffer has (say) 1MB in it, split over 128 chains, there's 2433 * no way it all gets written in one go. */ 2434 while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { 2435 #ifdef USE_SENDFILE 2436 /* we cannot write the file info via writev */ 2437 if (chain->flags & EVBUFFER_SENDFILE) 2438 break; 2439 #endif 2440 iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); 2441 if ((size_t)howmuch >= chain->off) { 2442 /* XXXcould be problematic when windows supports mmap*/ 2443 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off; 2444 howmuch -= chain->off; 2445 } else { 2446 /* XXXcould be problematic when windows supports mmap*/ 2447 iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; 2448 break; 2449 } 2450 chain = chain->next; 2451 } 2452 if (! i) 2453 return 0; 2454 2455 #ifdef _WIN32 2456 { 2457 DWORD bytesSent; 2458 if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL)) 2459 n = -1; 2460 else 2461 n = bytesSent; 2462 } 2463 #else 2464 n = writev(fd, iov, i); 2465 #endif 2466 return (n); 2467 } 2468 #endif 2469 2470 #ifdef USE_SENDFILE 2471 static inline int 2472 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd, 2473 ev_ssize_t howmuch) 2474 { 2475 struct evbuffer_chain *chain = buffer->first; 2476 struct evbuffer_chain_file_segment *info = 2477 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, 2478 chain); 2479 const int source_fd = info->segment->fd; 2480 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD) 2481 int res; 2482 ev_off_t len = chain->off; 2483 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS) 2484 ev_ssize_t res; 2485 off_t offset = chain->misalign; 2486 #endif 2487 2488 ASSERT_EVBUFFER_LOCKED(buffer); 2489 2490 #if defined(SENDFILE_IS_MACOSX) 2491 res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0); 2492 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2493 return (-1); 2494 2495 return (len); 2496 #elif defined(SENDFILE_IS_FREEBSD) 2497 res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0); 2498 if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno)) 2499 return (-1); 2500 2501 return (len); 2502 #elif defined(SENDFILE_IS_LINUX) 2503 /* TODO(niels): implement splice */ 2504 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2505 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2506 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */ 2507 return (0); 2508 } 2509 return (res); 2510 #elif defined(SENDFILE_IS_SOLARIS) 2511 { 2512 const off_t offset_orig = offset; 2513 res = sendfile(dest_fd, source_fd, &offset, chain->off); 2514 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) { 2515 if (offset - offset_orig) 2516 return offset - offset_orig; 2517 /* if this is EAGAIN or EINTR and no bytes were 2518 * written, return 0 */ 2519 return (0); 2520 } 2521 return (res); 2522 } 2523 #endif 2524 } 2525 #endif 2526 2527 int 2528 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd, 2529 ev_ssize_t howmuch) 2530 { 2531 int n = -1; 2532 2533 EVBUFFER_LOCK(buffer); 2534 2535 if (buffer->freeze_start) { 2536 goto done; 2537 } 2538 2539 if (howmuch < 0 || (size_t)howmuch > buffer->total_len) 2540 howmuch = buffer->total_len; 2541 2542 if (howmuch > 0) { 2543 #ifdef USE_SENDFILE 2544 struct evbuffer_chain *chain = buffer->first; 2545 if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE)) 2546 n = evbuffer_write_sendfile(buffer, fd, howmuch); 2547 else { 2548 #endif 2549 #ifdef USE_IOVEC_IMPL 2550 n = evbuffer_write_iovec(buffer, fd, howmuch); 2551 #elif defined(_WIN32) 2552 /* XXX(nickm) Don't disable this code until we know if 2553 * the WSARecv code above works. */ 2554 void *p = evbuffer_pullup(buffer, howmuch); 2555 EVUTIL_ASSERT(p || !howmuch); 2556 n = send(fd, p, howmuch, 0); 2557 #else 2558 void *p = evbuffer_pullup(buffer, howmuch); 2559 EVUTIL_ASSERT(p || !howmuch); 2560 n = write(fd, p, howmuch); 2561 #endif 2562 #ifdef USE_SENDFILE 2563 } 2564 #endif 2565 } 2566 2567 if (n > 0) 2568 evbuffer_drain(buffer, n); 2569 2570 done: 2571 EVBUFFER_UNLOCK(buffer); 2572 return (n); 2573 } 2574 2575 int 2576 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) 2577 { 2578 return evbuffer_write_atmost(buffer, fd, -1); 2579 } 2580 2581 unsigned char * 2582 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len) 2583 { 2584 unsigned char *search; 2585 struct evbuffer_ptr ptr; 2586 2587 EVBUFFER_LOCK(buffer); 2588 2589 ptr = evbuffer_search(buffer, (const char *)what, len, NULL); 2590 if (ptr.pos < 0) { 2591 search = NULL; 2592 } else { 2593 search = evbuffer_pullup(buffer, ptr.pos + len); 2594 if (search) 2595 search += ptr.pos; 2596 } 2597 EVBUFFER_UNLOCK(buffer); 2598 return search; 2599 } 2600 2601 /* Subract <b>howfar</b> from the position of <b>pos</b> within 2602 * <b>buf</b>. Returns 0 on success, -1 on failure. 2603 * 2604 * This isn't exposed yet, because of potential inefficiency issues. 2605 * Maybe it should be. */ 2606 static int 2607 evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos, 2608 size_t howfar) 2609 { 2610 if (pos->pos < 0) 2611 return -1; 2612 if (howfar > (size_t)pos->pos) 2613 return -1; 2614 if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) { 2615 pos->internal_.pos_in_chain -= howfar; 2616 pos->pos -= howfar; 2617 return 0; 2618 } else { 2619 const size_t newpos = pos->pos - howfar; 2620 /* Here's the inefficient part: it walks over the 2621 * chains until we hit newpos. */ 2622 return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET); 2623 } 2624 } 2625 2626 int 2627 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos, 2628 size_t position, enum evbuffer_ptr_how how) 2629 { 2630 size_t left = position; 2631 struct evbuffer_chain *chain = NULL; 2632 int result = 0; 2633 2634 EVBUFFER_LOCK(buf); 2635 2636 switch (how) { 2637 case EVBUFFER_PTR_SET: 2638 chain = buf->first; 2639 pos->pos = position; 2640 position = 0; 2641 break; 2642 case EVBUFFER_PTR_ADD: 2643 /* this avoids iterating over all previous chains if 2644 we just want to advance the position */ 2645 if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) { 2646 EVBUFFER_UNLOCK(buf); 2647 return -1; 2648 } 2649 chain = pos->internal_.chain; 2650 pos->pos += position; 2651 position = pos->internal_.pos_in_chain; 2652 break; 2653 } 2654 2655 EVUTIL_ASSERT(EV_SIZE_MAX - left >= position); 2656 while (chain && position + left >= chain->off) { 2657 left -= chain->off - position; 2658 chain = chain->next; 2659 position = 0; 2660 } 2661 if (chain) { 2662 pos->internal_.chain = chain; 2663 pos->internal_.pos_in_chain = position + left; 2664 } else if (left == 0) { 2665 /* The first byte in the (nonexistent) chain after the last chain */ 2666 pos->internal_.chain = NULL; 2667 pos->internal_.pos_in_chain = 0; 2668 } else { 2669 PTR_NOT_FOUND(pos); 2670 result = -1; 2671 } 2672 2673 EVBUFFER_UNLOCK(buf); 2674 2675 return result; 2676 } 2677 2678 /** 2679 Compare the bytes in buf at position pos to the len bytes in mem. Return 2680 less than 0, 0, or greater than 0 as memcmp. 2681 */ 2682 static int 2683 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos, 2684 const char *mem, size_t len) 2685 { 2686 struct evbuffer_chain *chain; 2687 size_t position; 2688 int r; 2689 2690 ASSERT_EVBUFFER_LOCKED(buf); 2691 2692 if (pos->pos < 0 || 2693 EV_SIZE_MAX - len < (size_t)pos->pos || 2694 pos->pos + len > buf->total_len) 2695 return -1; 2696 2697 chain = pos->internal_.chain; 2698 position = pos->internal_.pos_in_chain; 2699 while (len && chain) { 2700 size_t n_comparable; 2701 if (len + position > chain->off) 2702 n_comparable = chain->off - position; 2703 else 2704 n_comparable = len; 2705 r = memcmp(chain->buffer + chain->misalign + position, mem, 2706 n_comparable); 2707 if (r) 2708 return r; 2709 mem += n_comparable; 2710 len -= n_comparable; 2711 position = 0; 2712 chain = chain->next; 2713 } 2714 2715 return 0; 2716 } 2717 2718 struct evbuffer_ptr 2719 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start) 2720 { 2721 return evbuffer_search_range(buffer, what, len, start, NULL); 2722 } 2723 2724 struct evbuffer_ptr 2725 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end) 2726 { 2727 struct evbuffer_ptr pos; 2728 struct evbuffer_chain *chain, *last_chain = NULL; 2729 const unsigned char *p; 2730 char first; 2731 2732 EVBUFFER_LOCK(buffer); 2733 2734 if (start) { 2735 memcpy(&pos, start, sizeof(pos)); 2736 chain = pos.internal_.chain; 2737 } else { 2738 pos.pos = 0; 2739 chain = pos.internal_.chain = buffer->first; 2740 pos.internal_.pos_in_chain = 0; 2741 } 2742 2743 if (end) 2744 last_chain = end->internal_.chain; 2745 2746 if (!len || len > EV_SSIZE_MAX) 2747 goto done; 2748 2749 first = what[0]; 2750 2751 while (chain) { 2752 const unsigned char *start_at = 2753 chain->buffer + chain->misalign + 2754 pos.internal_.pos_in_chain; 2755 p = memchr(start_at, first, 2756 chain->off - pos.internal_.pos_in_chain); 2757 if (p) { 2758 pos.pos += p - start_at; 2759 pos.internal_.pos_in_chain += p - start_at; 2760 if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) { 2761 if (end && pos.pos + (ev_ssize_t)len > end->pos) 2762 goto not_found; 2763 else 2764 goto done; 2765 } 2766 ++pos.pos; 2767 ++pos.internal_.pos_in_chain; 2768 if (pos.internal_.pos_in_chain == chain->off) { 2769 chain = pos.internal_.chain = chain->next; 2770 pos.internal_.pos_in_chain = 0; 2771 } 2772 } else { 2773 if (chain == last_chain) 2774 goto not_found; 2775 pos.pos += chain->off - pos.internal_.pos_in_chain; 2776 chain = pos.internal_.chain = chain->next; 2777 pos.internal_.pos_in_chain = 0; 2778 } 2779 } 2780 2781 not_found: 2782 PTR_NOT_FOUND(&pos); 2783 done: 2784 EVBUFFER_UNLOCK(buffer); 2785 return pos; 2786 } 2787 2788 int 2789 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len, 2790 struct evbuffer_ptr *start_at, 2791 struct evbuffer_iovec *vec, int n_vec) 2792 { 2793 struct evbuffer_chain *chain; 2794 int idx = 0; 2795 ev_ssize_t len_so_far = 0; 2796 2797 /* Avoid locking in trivial edge cases */ 2798 if (start_at && start_at->internal_.chain == NULL) 2799 return 0; 2800 2801 EVBUFFER_LOCK(buffer); 2802 2803 if (start_at) { 2804 chain = start_at->internal_.chain; 2805 len_so_far = chain->off 2806 - start_at->internal_.pos_in_chain; 2807 idx = 1; 2808 if (n_vec > 0) { 2809 vec[0].iov_base = (void *)(chain->buffer + chain->misalign 2810 + start_at->internal_.pos_in_chain); 2811 vec[0].iov_len = len_so_far; 2812 } 2813 chain = chain->next; 2814 } else { 2815 chain = buffer->first; 2816 } 2817 2818 if (n_vec == 0 && len < 0) { 2819 /* If no vectors are provided and they asked for "everything", 2820 * pretend they asked for the actual available amount. */ 2821 len = buffer->total_len; 2822 if (start_at) { 2823 len -= start_at->pos; 2824 } 2825 } 2826 2827 while (chain) { 2828 if (len >= 0 && len_so_far >= len) 2829 break; 2830 if (idx<n_vec) { 2831 vec[idx].iov_base = (void *)(chain->buffer + chain->misalign); 2832 vec[idx].iov_len = chain->off; 2833 } else if (len<0) { 2834 break; 2835 } 2836 ++idx; 2837 len_so_far += chain->off; 2838 chain = chain->next; 2839 } 2840 2841 EVBUFFER_UNLOCK(buffer); 2842 2843 return idx; 2844 } 2845 2846 2847 int 2848 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap) 2849 { 2850 char *buffer; 2851 size_t space; 2852 int sz, result = -1; 2853 va_list aq; 2854 struct evbuffer_chain *chain; 2855 2856 2857 EVBUFFER_LOCK(buf); 2858 2859 if (buf->freeze_end) { 2860 goto done; 2861 } 2862 2863 /* make sure that at least some space is available */ 2864 if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL) 2865 goto done; 2866 2867 for (;;) { 2868 #if 0 2869 size_t used = chain->misalign + chain->off; 2870 buffer = (char *)chain->buffer + chain->misalign + chain->off; 2871 EVUTIL_ASSERT(chain->buffer_len >= used); 2872 space = chain->buffer_len - used; 2873 #endif 2874 buffer = (char*) CHAIN_SPACE_PTR(chain); 2875 space = (size_t) CHAIN_SPACE_LEN(chain); 2876 2877 #ifndef va_copy 2878 #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list)) 2879 #endif 2880 va_copy(aq, ap); 2881 2882 sz = evutil_vsnprintf(buffer, space, fmt, aq); 2883 2884 va_end(aq); 2885 2886 if (sz < 0) 2887 goto done; 2888 if (INT_MAX >= EVBUFFER_CHAIN_MAX && 2889 (size_t)sz >= EVBUFFER_CHAIN_MAX) 2890 goto done; 2891 if ((size_t)sz < space) { 2892 chain->off += sz; 2893 buf->total_len += sz; 2894 buf->n_add_for_cb += sz; 2895 2896 advance_last_with_data(buf); 2897 evbuffer_invoke_callbacks_(buf); 2898 result = sz; 2899 goto done; 2900 } 2901 if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL) 2902 goto done; 2903 } 2904 /* NOTREACHED */ 2905 2906 done: 2907 EVBUFFER_UNLOCK(buf); 2908 return result; 2909 } 2910 2911 int 2912 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...) 2913 { 2914 int res = -1; 2915 va_list ap; 2916 2917 va_start(ap, fmt); 2918 res = evbuffer_add_vprintf(buf, fmt, ap); 2919 va_end(ap); 2920 2921 return (res); 2922 } 2923 2924 int 2925 evbuffer_add_reference(struct evbuffer *outbuf, 2926 const void *data, size_t datlen, 2927 evbuffer_ref_cleanup_cb cleanupfn, void *extra) 2928 { 2929 struct evbuffer_chain *chain; 2930 struct evbuffer_chain_reference *info; 2931 int result = -1; 2932 2933 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference)); 2934 if (!chain) 2935 return (-1); 2936 chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE; 2937 chain->buffer = __UNCONST(data); 2938 chain->buffer_len = datlen; 2939 chain->off = datlen; 2940 2941 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain); 2942 info->cleanupfn = cleanupfn; 2943 info->extra = extra; 2944 2945 EVBUFFER_LOCK(outbuf); 2946 if (outbuf->freeze_end) { 2947 /* don't call chain_free; we do not want to actually invoke 2948 * the cleanup function */ 2949 mm_free(chain); 2950 goto done; 2951 } 2952 evbuffer_chain_insert(outbuf, chain); 2953 outbuf->n_add_for_cb += datlen; 2954 2955 evbuffer_invoke_callbacks_(outbuf); 2956 2957 result = 0; 2958 done: 2959 EVBUFFER_UNLOCK(outbuf); 2960 2961 return result; 2962 } 2963 2964 /* TODO(niels): we may want to add to automagically convert to mmap, in 2965 * case evbuffer_remove() or evbuffer_pullup() are being used. 2966 */ 2967 struct evbuffer_file_segment * 2968 evbuffer_file_segment_new( 2969 int fd, ev_off_t offset, ev_off_t length, unsigned flags) 2970 { 2971 struct evbuffer_file_segment *seg = 2972 mm_calloc(sizeof(struct evbuffer_file_segment), 1); 2973 if (!seg) 2974 return NULL; 2975 seg->refcnt = 1; 2976 seg->fd = fd; 2977 seg->flags = flags; 2978 seg->file_offset = offset; 2979 seg->cleanup_cb = NULL; 2980 seg->cleanup_cb_arg = NULL; 2981 #ifdef _WIN32 2982 #ifndef lseek 2983 #define lseek _lseeki64 2984 #endif 2985 #ifndef fstat 2986 #define fstat _fstat 2987 #endif 2988 #ifndef stat 2989 #define stat _stat 2990 #endif 2991 #endif 2992 if (length == -1) { 2993 struct stat st; 2994 if (fstat(fd, &st) < 0) 2995 goto err; 2996 length = st.st_size; 2997 } 2998 seg->length = length; 2999 3000 if (offset < 0 || length < 0 || 3001 ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) || 3002 (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length)) 3003 goto err; 3004 3005 #if defined(USE_SENDFILE) 3006 if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) { 3007 seg->can_sendfile = 1; 3008 goto done; 3009 } 3010 #endif 3011 3012 if (evbuffer_file_segment_materialize(seg)<0) 3013 goto err; 3014 3015 #if defined(USE_SENDFILE) 3016 done: 3017 #endif 3018 if (!(flags & EVBUF_FS_DISABLE_LOCKING)) { 3019 EVTHREAD_ALLOC_LOCK(seg->lock, 0); 3020 } 3021 return seg; 3022 err: 3023 mm_free(seg); 3024 return NULL; 3025 } 3026 3027 #ifdef EVENT__HAVE_MMAP 3028 static long 3029 get_page_size(void) 3030 { 3031 #ifdef SC_PAGE_SIZE 3032 return sysconf(SC_PAGE_SIZE); 3033 #elif defined(_SC_PAGE_SIZE) 3034 return sysconf(_SC_PAGE_SIZE); 3035 #else 3036 return 1; 3037 #endif 3038 } 3039 #endif 3040 3041 /* DOCDOC */ 3042 /* Requires lock */ 3043 static int 3044 evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg) 3045 { 3046 const unsigned flags = seg->flags; 3047 const int fd = seg->fd; 3048 const ev_off_t length = seg->length; 3049 const ev_off_t offset = seg->file_offset; 3050 3051 if (seg->contents) 3052 return 0; /* already materialized */ 3053 3054 #if defined(EVENT__HAVE_MMAP) 3055 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 3056 off_t offset_rounded = 0, offset_leftover = 0; 3057 void *mapped; 3058 if (offset) { 3059 /* mmap implementations don't generally like us 3060 * to have an offset that isn't a round */ 3061 long page_size = get_page_size(); 3062 if (page_size == -1) 3063 goto err; 3064 offset_leftover = offset % page_size; 3065 offset_rounded = offset - offset_leftover; 3066 } 3067 mapped = mmap(NULL, length + offset_leftover, 3068 PROT_READ, 3069 #ifdef MAP_NOCACHE 3070 MAP_NOCACHE | /* ??? */ 3071 #endif 3072 #ifdef MAP_FILE 3073 MAP_FILE | 3074 #endif 3075 MAP_PRIVATE, 3076 fd, offset_rounded); 3077 if (mapped == MAP_FAILED) { 3078 event_warn("%s: mmap(%d, %d, %zu) failed", 3079 __func__, fd, 0, (size_t)(offset + length)); 3080 } else { 3081 seg->mapping = mapped; 3082 seg->contents = (char*)mapped+offset_leftover; 3083 seg->mmap_offset = 0; 3084 seg->is_mapping = 1; 3085 goto done; 3086 } 3087 } 3088 #endif 3089 #ifdef _WIN32 3090 if (!(flags & EVBUF_FS_DISABLE_MMAP)) { 3091 intptr_t h = _get_osfhandle(fd); 3092 HANDLE m; 3093 ev_uint64_t total_size = length+offset; 3094 if ((HANDLE)h == INVALID_HANDLE_VALUE) 3095 goto err; 3096 m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY, 3097 (total_size >> 32), total_size & 0xfffffffful, 3098 NULL); 3099 if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */ 3100 seg->mapping_handle = m; 3101 seg->mmap_offset = offset; 3102 seg->is_mapping = 1; 3103 goto done; 3104 } 3105 } 3106 #endif 3107 { 3108 ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos; 3109 ev_off_t read_so_far = 0; 3110 char *mem; 3111 int e; 3112 ev_ssize_t n = 0; 3113 if (!(mem = mm_malloc(length))) 3114 goto err; 3115 if (start_pos < 0) { 3116 mm_free(mem); 3117 goto err; 3118 } 3119 if (lseek(fd, offset, SEEK_SET) < 0) { 3120 mm_free(mem); 3121 goto err; 3122 } 3123 while (read_so_far < length) { 3124 n = read(fd, mem+read_so_far, length-read_so_far); 3125 if (n <= 0) 3126 break; 3127 read_so_far += n; 3128 } 3129 3130 e = errno; 3131 pos = lseek(fd, start_pos, SEEK_SET); 3132 if (n < 0 || (n == 0 && length > read_so_far)) { 3133 mm_free(mem); 3134 errno = e; 3135 goto err; 3136 } else if (pos < 0) { 3137 mm_free(mem); 3138 goto err; 3139 } 3140 3141 seg->contents = mem; 3142 } 3143 3144 done: 3145 return 0; 3146 err: 3147 return -1; 3148 } 3149 3150 void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg, 3151 evbuffer_file_segment_cleanup_cb cb, void* arg) 3152 { 3153 EVUTIL_ASSERT(seg->refcnt > 0); 3154 seg->cleanup_cb = cb; 3155 seg->cleanup_cb_arg = arg; 3156 } 3157 3158 void 3159 evbuffer_file_segment_free(struct evbuffer_file_segment *seg) 3160 { 3161 int refcnt; 3162 EVLOCK_LOCK(seg->lock, 0); 3163 refcnt = --seg->refcnt; 3164 EVLOCK_UNLOCK(seg->lock, 0); 3165 if (refcnt > 0) 3166 return; 3167 EVUTIL_ASSERT(refcnt == 0); 3168 3169 if (seg->is_mapping) { 3170 #ifdef _WIN32 3171 CloseHandle(seg->mapping_handle); 3172 #elif defined (EVENT__HAVE_MMAP) 3173 off_t offset_leftover; 3174 offset_leftover = seg->file_offset % get_page_size(); 3175 if (munmap(seg->mapping, seg->length + offset_leftover) == -1) 3176 event_warn("%s: munmap failed", __func__); 3177 #endif 3178 } else if (seg->contents) { 3179 mm_free(seg->contents); 3180 } 3181 3182 if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) { 3183 close(seg->fd); 3184 } 3185 3186 if (seg->cleanup_cb) { 3187 (*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg, 3188 seg->flags, seg->cleanup_cb_arg); 3189 seg->cleanup_cb = NULL; 3190 seg->cleanup_cb_arg = NULL; 3191 } 3192 3193 EVTHREAD_FREE_LOCK(seg->lock, 0); 3194 mm_free(seg); 3195 } 3196 3197 int 3198 evbuffer_add_file_segment(struct evbuffer *buf, 3199 struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length) 3200 { 3201 struct evbuffer_chain *chain; 3202 struct evbuffer_chain_file_segment *extra; 3203 int can_use_sendfile = 0; 3204 3205 EVBUFFER_LOCK(buf); 3206 EVLOCK_LOCK(seg->lock, 0); 3207 if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) { 3208 can_use_sendfile = 1; 3209 } else { 3210 if (!seg->contents) { 3211 if (evbuffer_file_segment_materialize(seg)<0) { 3212 EVLOCK_UNLOCK(seg->lock, 0); 3213 EVBUFFER_UNLOCK(buf); 3214 return -1; 3215 } 3216 } 3217 } 3218 EVLOCK_UNLOCK(seg->lock, 0); 3219 3220 if (buf->freeze_end) 3221 goto err; 3222 3223 if (length < 0) { 3224 if (offset > seg->length) 3225 goto err; 3226 length = seg->length - offset; 3227 } 3228 3229 /* Can we actually add this? */ 3230 if (offset+length > seg->length) 3231 goto err; 3232 3233 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment)); 3234 if (!chain) 3235 goto err; 3236 extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain); 3237 3238 chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT; 3239 if (can_use_sendfile && seg->can_sendfile) { 3240 chain->flags |= EVBUFFER_SENDFILE; 3241 chain->misalign = seg->file_offset + offset; 3242 chain->off = length; 3243 chain->buffer_len = chain->misalign + length; 3244 } else if (seg->is_mapping) { 3245 #ifdef _WIN32 3246 ev_uint64_t total_offset = seg->mmap_offset+offset; 3247 ev_uint64_t offset_rounded=0, offset_remaining=0; 3248 LPVOID data; 3249 if (total_offset) { 3250 SYSTEM_INFO si; 3251 memset(&si, 0, sizeof(si)); /* cargo cult */ 3252 GetSystemInfo(&si); 3253 offset_remaining = total_offset % si.dwAllocationGranularity; 3254 offset_rounded = total_offset - offset_remaining; 3255 } 3256 data = MapViewOfFile( 3257 seg->mapping_handle, 3258 FILE_MAP_READ, 3259 offset_rounded >> 32, 3260 offset_rounded & 0xfffffffful, 3261 length + offset_remaining); 3262 if (data == NULL) { 3263 mm_free(chain); 3264 goto err; 3265 } 3266 chain->buffer = (unsigned char*) data; 3267 chain->buffer_len = length+offset_remaining; 3268 chain->misalign = offset_remaining; 3269 chain->off = length; 3270 #else 3271 chain->buffer = (unsigned char*)(seg->contents + offset); 3272 chain->buffer_len = length; 3273 chain->off = length; 3274 #endif 3275 } else { 3276 chain->buffer = (unsigned char*)(seg->contents + offset); 3277 chain->buffer_len = length; 3278 chain->off = length; 3279 } 3280 3281 EVLOCK_LOCK(seg->lock, 0); 3282 ++seg->refcnt; 3283 EVLOCK_UNLOCK(seg->lock, 0); 3284 extra->segment = seg; 3285 buf->n_add_for_cb += length; 3286 evbuffer_chain_insert(buf, chain); 3287 3288 evbuffer_invoke_callbacks_(buf); 3289 3290 EVBUFFER_UNLOCK(buf); 3291 3292 return 0; 3293 err: 3294 EVBUFFER_UNLOCK(buf); 3295 evbuffer_file_segment_free(seg); /* Lowers the refcount */ 3296 return -1; 3297 } 3298 3299 int 3300 evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length) 3301 { 3302 struct evbuffer_file_segment *seg; 3303 unsigned flags = EVBUF_FS_CLOSE_ON_FREE; 3304 int r; 3305 3306 seg = evbuffer_file_segment_new(fd, offset, length, flags); 3307 if (!seg) 3308 return -1; 3309 r = evbuffer_add_file_segment(buf, seg, 0, length); 3310 if (r == 0) 3311 evbuffer_file_segment_free(seg); 3312 return r; 3313 } 3314 3315 int 3316 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg) 3317 { 3318 EVBUFFER_LOCK(buffer); 3319 3320 if (!LIST_EMPTY(&buffer->callbacks)) 3321 evbuffer_remove_all_callbacks(buffer); 3322 3323 if (cb) { 3324 struct evbuffer_cb_entry *ent = 3325 evbuffer_add_cb(buffer, NULL, cbarg); 3326 if (!ent) { 3327 EVBUFFER_UNLOCK(buffer); 3328 return -1; 3329 } 3330 ent->cb.cb_obsolete = cb; 3331 ent->flags |= EVBUFFER_CB_OBSOLETE; 3332 } 3333 EVBUFFER_UNLOCK(buffer); 3334 return 0; 3335 } 3336 3337 struct evbuffer_cb_entry * 3338 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3339 { 3340 struct evbuffer_cb_entry *e; 3341 if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry)))) 3342 return NULL; 3343 EVBUFFER_LOCK(buffer); 3344 e->cb.cb_func = cb; 3345 e->cbarg = cbarg; 3346 e->flags = EVBUFFER_CB_ENABLED; 3347 LIST_INSERT_HEAD(&buffer->callbacks, e, next); 3348 EVBUFFER_UNLOCK(buffer); 3349 return e; 3350 } 3351 3352 int 3353 evbuffer_remove_cb_entry(struct evbuffer *buffer, 3354 struct evbuffer_cb_entry *ent) 3355 { 3356 EVBUFFER_LOCK(buffer); 3357 LIST_REMOVE(ent, next); 3358 EVBUFFER_UNLOCK(buffer); 3359 mm_free(ent); 3360 return 0; 3361 } 3362 3363 int 3364 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg) 3365 { 3366 struct evbuffer_cb_entry *cbent; 3367 int result = -1; 3368 EVBUFFER_LOCK(buffer); 3369 LIST_FOREACH(cbent, &buffer->callbacks, next) { 3370 if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) { 3371 result = evbuffer_remove_cb_entry(buffer, cbent); 3372 goto done; 3373 } 3374 } 3375 done: 3376 EVBUFFER_UNLOCK(buffer); 3377 return result; 3378 } 3379 3380 int 3381 evbuffer_cb_set_flags(struct evbuffer *buffer, 3382 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3383 { 3384 /* the user isn't allowed to mess with these. */ 3385 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3386 EVBUFFER_LOCK(buffer); 3387 cb->flags |= flags; 3388 EVBUFFER_UNLOCK(buffer); 3389 return 0; 3390 } 3391 3392 int 3393 evbuffer_cb_clear_flags(struct evbuffer *buffer, 3394 struct evbuffer_cb_entry *cb, ev_uint32_t flags) 3395 { 3396 /* the user isn't allowed to mess with these. */ 3397 flags &= ~EVBUFFER_CB_INTERNAL_FLAGS; 3398 EVBUFFER_LOCK(buffer); 3399 cb->flags &= ~flags; 3400 EVBUFFER_UNLOCK(buffer); 3401 return 0; 3402 } 3403 3404 int 3405 evbuffer_freeze(struct evbuffer *buffer, int start) 3406 { 3407 EVBUFFER_LOCK(buffer); 3408 if (start) 3409 buffer->freeze_start = 1; 3410 else 3411 buffer->freeze_end = 1; 3412 EVBUFFER_UNLOCK(buffer); 3413 return 0; 3414 } 3415 3416 int 3417 evbuffer_unfreeze(struct evbuffer *buffer, int start) 3418 { 3419 EVBUFFER_LOCK(buffer); 3420 if (start) 3421 buffer->freeze_start = 0; 3422 else 3423 buffer->freeze_end = 0; 3424 EVBUFFER_UNLOCK(buffer); 3425 return 0; 3426 } 3427 3428 #if 0 3429 void 3430 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3431 { 3432 if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) { 3433 cb->size_before_suspend = evbuffer_get_length(buffer); 3434 cb->flags |= EVBUFFER_CB_SUSPENDED; 3435 } 3436 } 3437 3438 void 3439 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb) 3440 { 3441 if ((cb->flags & EVBUFFER_CB_SUSPENDED)) { 3442 unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND); 3443 size_t sz = cb->size_before_suspend; 3444 cb->flags &= ~(EVBUFFER_CB_SUSPENDED| 3445 EVBUFFER_CB_CALL_ON_UNSUSPEND); 3446 cb->size_before_suspend = 0; 3447 if (call && (cb->flags & EVBUFFER_CB_ENABLED)) { 3448 cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg); 3449 } 3450 } 3451 } 3452 #endif 3453 3454 int 3455 evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs, 3456 int max_cbs) 3457 { 3458 int r = 0; 3459 EVBUFFER_LOCK(buffer); 3460 if (buffer->deferred_cbs) { 3461 if (max_cbs < 1) { 3462 r = -1; 3463 goto done; 3464 } 3465 cbs[0] = &buffer->deferred; 3466 r = 1; 3467 } 3468 done: 3469 EVBUFFER_UNLOCK(buffer); 3470 return r; 3471 } 3472