xref: /netbsd-src/external/bsd/ntp/dist/sntp/libevent/buffer.c (revision 6d322f2f4598f0d8a138f10ea648ec4fabe41f8b)
1 /*	$NetBSD: buffer.c,v 1.1.1.1 2013/12/27 23:31:18 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
5  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include "event2/event-config.h"
31 #include "evconfig-private.h"
32 
33 #ifdef _WIN32
34 #include <winsock2.h>
35 #include <windows.h>
36 #include <io.h>
37 #endif
38 
39 #ifdef EVENT__HAVE_VASPRINTF
40 /* If we have vasprintf, we need to define _GNU_SOURCE before we include
41  * stdio.h.  This comes from evconfig-private.h.
42  */
43 #endif
44 
45 #include <sys/types.h>
46 
47 #ifdef EVENT__HAVE_SYS_TIME_H
48 #include <sys/time.h>
49 #endif
50 
51 #ifdef EVENT__HAVE_SYS_SOCKET_H
52 #include <sys/socket.h>
53 #endif
54 
55 #ifdef EVENT__HAVE_SYS_UIO_H
56 #include <sys/uio.h>
57 #endif
58 
59 #ifdef EVENT__HAVE_SYS_IOCTL_H
60 #include <sys/ioctl.h>
61 #endif
62 
63 #ifdef EVENT__HAVE_SYS_MMAN_H
64 #include <sys/mman.h>
65 #endif
66 
67 #ifdef EVENT__HAVE_SYS_SENDFILE_H
68 #include <sys/sendfile.h>
69 #endif
70 #ifdef EVENT__HAVE_SYS_STAT_H
71 #include <sys/stat.h>
72 #endif
73 
74 
75 #include <errno.h>
76 #include <stdio.h>
77 #include <stdlib.h>
78 #include <string.h>
79 #ifdef EVENT__HAVE_STDARG_H
80 #include <stdarg.h>
81 #endif
82 #ifdef EVENT__HAVE_UNISTD_H
83 #include <unistd.h>
84 #endif
85 #include <limits.h>
86 
87 #include "event2/event.h"
88 #include "event2/buffer.h"
89 #include "event2/buffer_compat.h"
90 #include "event2/bufferevent.h"
91 #include "event2/bufferevent_compat.h"
92 #include "event2/bufferevent_struct.h"
93 #include "event2/thread.h"
94 #include "log-internal.h"
95 #include "mm-internal.h"
96 #include "util-internal.h"
97 #include "evthread-internal.h"
98 #include "evbuffer-internal.h"
99 #include "bufferevent-internal.h"
100 
101 /* some systems do not have MAP_FAILED */
102 #ifndef MAP_FAILED
103 #define MAP_FAILED	((void *)-1)
104 #endif
105 
106 /* send file support */
107 #if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__)
108 #define USE_SENDFILE		1
109 #define SENDFILE_IS_LINUX	1
110 #elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__)
111 #define USE_SENDFILE		1
112 #define SENDFILE_IS_FREEBSD	1
113 #elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__)
114 #define USE_SENDFILE		1
115 #define SENDFILE_IS_MACOSX	1
116 #elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
117 #define USE_SENDFILE		1
118 #define SENDFILE_IS_SOLARIS	1
119 #endif
120 
121 /* Mask of user-selectable callback flags. */
122 #define EVBUFFER_CB_USER_FLAGS	    0xffff
123 /* Mask of all internal-use-only flags. */
124 #define EVBUFFER_CB_INTERNAL_FLAGS  0xffff0000
125 
126 /* Flag set if the callback is using the cb_obsolete function pointer  */
127 #define EVBUFFER_CB_OBSOLETE	       0x00040000
128 
129 /* evbuffer_chain support */
130 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
131 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
132 	    0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
133 
134 #define CHAIN_PINNED(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
135 #define CHAIN_PINNED_R(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
136 
137 /* evbuffer_ptr support */
138 #define PTR_NOT_FOUND(ptr) do {			\
139 	(ptr)->pos = -1;					\
140 	(ptr)->internal_.chain = NULL;		\
141 	(ptr)->internal_.pos_in_chain = 0;	\
142 } while (0)
143 
144 static void evbuffer_chain_align(struct evbuffer_chain *chain);
145 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,
146     size_t datalen);
147 static void evbuffer_deferred_callback(struct event_callback *cb, void *arg);
148 static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
149     const struct evbuffer_ptr *pos, const char *mem, size_t len);
150 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,
151     size_t datlen);
152 static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
153     size_t howfar);
154 static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg);
155 static inline void evbuffer_chain_incref(struct evbuffer_chain *chain);
156 
157 static struct evbuffer_chain *
158 evbuffer_chain_new(size_t size)
159 {
160 	struct evbuffer_chain *chain;
161 	size_t to_alloc;
162 
163 	size += EVBUFFER_CHAIN_SIZE;
164 
165 	/* get the next largest memory that can hold the buffer */
166 	to_alloc = MIN_BUFFER_SIZE;
167 	while (to_alloc < size)
168 		to_alloc <<= 1;
169 
170 	/* we get everything in one chunk */
171 	if ((chain = mm_malloc(to_alloc)) == NULL)
172 		return (NULL);
173 
174 	memset(chain, 0, EVBUFFER_CHAIN_SIZE);
175 
176 	chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
177 
178 	/* this way we can manipulate the buffer to different addresses,
179 	 * which is required for mmap for example.
180 	 */
181 	chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain);
182 
183 	chain->refcnt = 1;
184 
185 	return (chain);
186 }
187 
188 static inline void
189 evbuffer_chain_free(struct evbuffer_chain *chain)
190 {
191 	EVUTIL_ASSERT(chain->refcnt > 0);
192 	if (--chain->refcnt > 0) {
193 		/* chain is still referenced by other chains */
194 		return;
195 	}
196 
197 	if (CHAIN_PINNED(chain)) {
198 		/* will get freed once no longer dangling */
199 		chain->refcnt++;
200 		chain->flags |= EVBUFFER_DANGLING;
201 		return;
202 	}
203 
204 	/* safe to release chain, it's either a referencing
205 	 * chain or all references to it have been freed */
206 	if (chain->flags & EVBUFFER_REFERENCE) {
207 		struct evbuffer_chain_reference *info =
208 		    EVBUFFER_CHAIN_EXTRA(
209 			    struct evbuffer_chain_reference,
210 			    chain);
211 		if (info->cleanupfn)
212 			(*info->cleanupfn)(chain->buffer,
213 			    chain->buffer_len,
214 			    info->extra);
215 	}
216 	if (chain->flags & EVBUFFER_FILESEGMENT) {
217 		struct evbuffer_chain_file_segment *info =
218 		    EVBUFFER_CHAIN_EXTRA(
219 			    struct evbuffer_chain_file_segment,
220 			    chain);
221 		if (info->segment) {
222 #ifdef _WIN32
223 			if (info->segment->is_mapping)
224 				UnmapViewOfFile(chain->buffer);
225 #endif
226 			evbuffer_file_segment_free(info->segment);
227 		}
228 	}
229 	if (chain->flags & EVBUFFER_MULTICAST) {
230 		struct evbuffer_multicast_parent *info =
231 		    EVBUFFER_CHAIN_EXTRA(
232 			    struct evbuffer_multicast_parent,
233 			    chain);
234 		/* referencing chain is being freed, decrease
235 		 * refcounts of source chain and associated
236 		 * evbuffer (which get freed once both reach
237 		 * zero) */
238 		EVUTIL_ASSERT(info->source != NULL);
239 		EVUTIL_ASSERT(info->parent != NULL);
240 		EVBUFFER_LOCK(info->source);
241 		evbuffer_chain_free(info->parent);
242 		evbuffer_decref_and_unlock_(info->source);
243 	}
244 
245 	mm_free(chain);
246 }
247 
248 static void
249 evbuffer_free_all_chains(struct evbuffer_chain *chain)
250 {
251 	struct evbuffer_chain *next;
252 	for (; chain; chain = next) {
253 		next = chain->next;
254 		evbuffer_chain_free(chain);
255 	}
256 }
257 
258 #ifndef NDEBUG
259 static int
260 evbuffer_chains_all_empty(struct evbuffer_chain *chain)
261 {
262 	for (; chain; chain = chain->next) {
263 		if (chain->off)
264 			return 0;
265 	}
266 	return 1;
267 }
268 #else
269 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
270 "unused variable" warnings. */
271 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) {
272 	return 1;
273 }
274 #endif
275 
276 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
277  * to replacing them all with a new chain.  Return a pointer to the place
278  * where the new chain will go.
279  *
280  * Internal; requires lock.  The caller must fix up buf->last and buf->first
281  * as needed; they might have been freed.
282  */
283 static struct evbuffer_chain **
284 evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
285 {
286 	struct evbuffer_chain **ch = buf->last_with_datap;
287 	/* Find the first victim chain.  It might be *last_with_datap */
288 	while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
289 		ch = &(*ch)->next;
290 	if (*ch) {
291 		EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
292 		evbuffer_free_all_chains(*ch);
293 		*ch = NULL;
294 	}
295 	return ch;
296 }
297 
298 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
299  * chains as necessary.  Requires lock.  Does not schedule callbacks.
300  */
301 static void
302 evbuffer_chain_insert(struct evbuffer *buf,
303     struct evbuffer_chain *chain)
304 {
305 	ASSERT_EVBUFFER_LOCKED(buf);
306 	if (*buf->last_with_datap == NULL) {
307 		/* There are no chains data on the buffer at all. */
308 		EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
309 		EVUTIL_ASSERT(buf->first == NULL);
310 		buf->first = buf->last = chain;
311 	} else {
312 		struct evbuffer_chain **chp;
313 		chp = evbuffer_free_trailing_empty_chains(buf);
314 		*chp = chain;
315 		if (chain->off)
316 			buf->last_with_datap = chp;
317 		buf->last = chain;
318 	}
319 	buf->total_len += chain->off;
320 }
321 
322 static inline struct evbuffer_chain *
323 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
324 {
325 	struct evbuffer_chain *chain;
326 	if ((chain = evbuffer_chain_new(datlen)) == NULL)
327 		return NULL;
328 	evbuffer_chain_insert(buf, chain);
329 	return chain;
330 }
331 
332 void
333 evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag)
334 {
335 	EVUTIL_ASSERT((chain->flags & flag) == 0);
336 	chain->flags |= flag;
337 }
338 
339 void
340 evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag)
341 {
342 	EVUTIL_ASSERT((chain->flags & flag) != 0);
343 	chain->flags &= ~flag;
344 	if (chain->flags & EVBUFFER_DANGLING)
345 		evbuffer_chain_free(chain);
346 }
347 
348 static inline void
349 evbuffer_chain_incref(struct evbuffer_chain *chain)
350 {
351     ++chain->refcnt;
352 }
353 
354 struct evbuffer *
355 evbuffer_new(void)
356 {
357 	struct evbuffer *buffer;
358 
359 	buffer = mm_calloc(1, sizeof(struct evbuffer));
360 	if (buffer == NULL)
361 		return (NULL);
362 
363 	LIST_INIT(&buffer->callbacks);
364 	buffer->refcnt = 1;
365 	buffer->last_with_datap = &buffer->first;
366 
367 	return (buffer);
368 }
369 
370 int
371 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)
372 {
373 	EVBUFFER_LOCK(buf);
374 	buf->flags |= (ev_uint32_t)flags;
375 	EVBUFFER_UNLOCK(buf);
376 	return 0;
377 }
378 
379 int
380 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
381 {
382 	EVBUFFER_LOCK(buf);
383 	buf->flags &= ~(ev_uint32_t)flags;
384 	EVBUFFER_UNLOCK(buf);
385 	return 0;
386 }
387 
388 void
389 evbuffer_incref_(struct evbuffer *buf)
390 {
391 	EVBUFFER_LOCK(buf);
392 	++buf->refcnt;
393 	EVBUFFER_UNLOCK(buf);
394 }
395 
396 void
397 evbuffer_incref_and_lock_(struct evbuffer *buf)
398 {
399 	EVBUFFER_LOCK(buf);
400 	++buf->refcnt;
401 }
402 
403 int
404 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
405 {
406 	EVBUFFER_LOCK(buffer);
407 	buffer->cb_queue = base;
408 	buffer->deferred_cbs = 1;
409 	event_deferred_cb_init_(&buffer->deferred,
410 	    event_base_get_npriorities(base) / 2,
411 	    evbuffer_deferred_callback, buffer);
412 	EVBUFFER_UNLOCK(buffer);
413 	return 0;
414 }
415 
416 int
417 evbuffer_enable_locking(struct evbuffer *buf, void *lock)
418 {
419 #ifdef EVENT__DISABLE_THREAD_SUPPORT
420 	return -1;
421 #else
422 	if (buf->lock)
423 		return -1;
424 
425 	if (!lock) {
426 		EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
427 		if (!lock)
428 			return -1;
429 		buf->lock = lock;
430 		buf->own_lock = 1;
431 	} else {
432 		buf->lock = lock;
433 		buf->own_lock = 0;
434 	}
435 
436 	return 0;
437 #endif
438 }
439 
440 void
441 evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev)
442 {
443 	EVBUFFER_LOCK(buf);
444 	buf->parent = bev;
445 	EVBUFFER_UNLOCK(buf);
446 }
447 
448 static void
449 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
450 {
451 	struct evbuffer_cb_entry *cbent, *next;
452 	struct evbuffer_cb_info info;
453 	size_t new_size;
454 	ev_uint32_t mask, masked_val;
455 	int clear = 1;
456 
457 	if (running_deferred) {
458 		mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
459 		masked_val = EVBUFFER_CB_ENABLED;
460 	} else if (buffer->deferred_cbs) {
461 		mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
462 		masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
463 		/* Don't zero-out n_add/n_del, since the deferred callbacks
464 		   will want to see them. */
465 		clear = 0;
466 	} else {
467 		mask = EVBUFFER_CB_ENABLED;
468 		masked_val = EVBUFFER_CB_ENABLED;
469 	}
470 
471 	ASSERT_EVBUFFER_LOCKED(buffer);
472 
473 	if (LIST_EMPTY(&buffer->callbacks)) {
474 		buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
475 		return;
476 	}
477 	if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
478 		return;
479 
480 	new_size = buffer->total_len;
481 	info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
482 	info.n_added = buffer->n_add_for_cb;
483 	info.n_deleted = buffer->n_del_for_cb;
484 	if (clear) {
485 		buffer->n_add_for_cb = 0;
486 		buffer->n_del_for_cb = 0;
487 	}
488 	for (cbent = LIST_FIRST(&buffer->callbacks);
489 	     cbent != LIST_END(&buffer->callbacks);
490 	     cbent = next) {
491 		/* Get the 'next' pointer now in case this callback decides
492 		 * to remove itself or something. */
493 		next = LIST_NEXT(cbent, next);
494 
495 		if ((cbent->flags & mask) != masked_val)
496 			continue;
497 
498 		if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
499 			cbent->cb.cb_obsolete(buffer,
500 			    info.orig_size, new_size, cbent->cbarg);
501 		else
502 			cbent->cb.cb_func(buffer, &info, cbent->cbarg);
503 	}
504 }
505 
506 void
507 evbuffer_invoke_callbacks_(struct evbuffer *buffer)
508 {
509 	if (LIST_EMPTY(&buffer->callbacks)) {
510 		buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
511 		return;
512 	}
513 
514 	if (buffer->deferred_cbs) {
515 		if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) {
516 			evbuffer_incref_and_lock_(buffer);
517 			if (buffer->parent)
518 				bufferevent_incref_(buffer->parent);
519 		}
520 		EVBUFFER_UNLOCK(buffer);
521 	}
522 
523 	evbuffer_run_callbacks(buffer, 0);
524 }
525 
526 static void
527 evbuffer_deferred_callback(struct event_callback *cb, void *arg)
528 {
529 	struct bufferevent *parent = NULL;
530 	struct evbuffer *buffer = arg;
531 
532 	/* XXXX It would be better to run these callbacks without holding the
533 	 * lock */
534 	EVBUFFER_LOCK(buffer);
535 	parent = buffer->parent;
536 	evbuffer_run_callbacks(buffer, 1);
537 	evbuffer_decref_and_unlock_(buffer);
538 	if (parent)
539 		bufferevent_decref_(parent);
540 }
541 
542 static void
543 evbuffer_remove_all_callbacks(struct evbuffer *buffer)
544 {
545 	struct evbuffer_cb_entry *cbent;
546 
547 	while ((cbent = LIST_FIRST(&buffer->callbacks))) {
548 		LIST_REMOVE(cbent, next);
549 		mm_free(cbent);
550 	}
551 }
552 
553 void
554 evbuffer_decref_and_unlock_(struct evbuffer *buffer)
555 {
556 	struct evbuffer_chain *chain, *next;
557 	ASSERT_EVBUFFER_LOCKED(buffer);
558 
559 	EVUTIL_ASSERT(buffer->refcnt > 0);
560 
561 	if (--buffer->refcnt > 0) {
562 		EVBUFFER_UNLOCK(buffer);
563 		return;
564 	}
565 
566 	for (chain = buffer->first; chain != NULL; chain = next) {
567 		next = chain->next;
568 		evbuffer_chain_free(chain);
569 	}
570 	evbuffer_remove_all_callbacks(buffer);
571 	if (buffer->deferred_cbs)
572 		event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred);
573 
574 	EVBUFFER_UNLOCK(buffer);
575 	if (buffer->own_lock)
576 		EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
577 	mm_free(buffer);
578 }
579 
580 void
581 evbuffer_free(struct evbuffer *buffer)
582 {
583 	EVBUFFER_LOCK(buffer);
584 	evbuffer_decref_and_unlock_(buffer);
585 }
586 
587 void
588 evbuffer_lock(struct evbuffer *buf)
589 {
590 	EVBUFFER_LOCK(buf);
591 }
592 
593 void
594 evbuffer_unlock(struct evbuffer *buf)
595 {
596 	EVBUFFER_UNLOCK(buf);
597 }
598 
599 size_t
600 evbuffer_get_length(const struct evbuffer *buffer)
601 {
602 	size_t result;
603 
604 	EVBUFFER_LOCK(buffer);
605 
606 	result = (buffer->total_len);
607 
608 	EVBUFFER_UNLOCK(buffer);
609 
610 	return result;
611 }
612 
613 size_t
614 evbuffer_get_contiguous_space(const struct evbuffer *buf)
615 {
616 	struct evbuffer_chain *chain;
617 	size_t result;
618 
619 	EVBUFFER_LOCK(buf);
620 	chain = buf->first;
621 	result = (chain != NULL ? chain->off : 0);
622 	EVBUFFER_UNLOCK(buf);
623 
624 	return result;
625 }
626 
627 size_t
628 evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) {
629 	int n;
630 	size_t res;
631 	size_t to_alloc;
632 
633 	EVBUFFER_LOCK(buf);
634 
635 	res = to_alloc = 0;
636 
637 	for (n = 0; n < n_vec; n++) {
638 		to_alloc += vec[n].iov_len;
639 	}
640 
641 	if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) {
642 		goto done;
643 	}
644 
645 	for (n = 0; n < n_vec; n++) {
646 		/* XXX each 'add' call here does a bunch of setup that's
647 		 * obviated by evbuffer_expand_fast_, and some cleanup that we
648 		 * would like to do only once.  Instead we should just extract
649 		 * the part of the code that's needed. */
650 
651 		if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) {
652 			goto done;
653 		}
654 
655 		res += vec[n].iov_len;
656 	}
657 
658 done:
659     EVBUFFER_UNLOCK(buf);
660     return res;
661 }
662 
663 int
664 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
665     struct evbuffer_iovec *vec, int n_vecs)
666 {
667 	struct evbuffer_chain *chain, **chainp;
668 	int n = -1;
669 
670 	EVBUFFER_LOCK(buf);
671 	if (buf->freeze_end)
672 		goto done;
673 	if (n_vecs < 1)
674 		goto done;
675 	if (n_vecs == 1) {
676 		if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)
677 			goto done;
678 
679 		vec[0].iov_base = CHAIN_SPACE_PTR(chain);
680 		vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain);
681 		EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
682 		n = 1;
683 	} else {
684 		if (evbuffer_expand_fast_(buf, size, n_vecs)<0)
685 			goto done;
686 		n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs,
687 				&chainp, 0);
688 	}
689 
690 done:
691 	EVBUFFER_UNLOCK(buf);
692 	return n;
693 
694 }
695 
696 static int
697 advance_last_with_data(struct evbuffer *buf)
698 {
699 	int n = 0;
700 	ASSERT_EVBUFFER_LOCKED(buf);
701 
702 	if (!*buf->last_with_datap)
703 		return 0;
704 
705 	while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) {
706 		buf->last_with_datap = &(*buf->last_with_datap)->next;
707 		++n;
708 	}
709 	return n;
710 }
711 
712 int
713 evbuffer_commit_space(struct evbuffer *buf,
714     struct evbuffer_iovec *vec, int n_vecs)
715 {
716 	struct evbuffer_chain *chain, **firstchainp, **chainp;
717 	int result = -1;
718 	size_t added = 0;
719 	int i;
720 
721 	EVBUFFER_LOCK(buf);
722 
723 	if (buf->freeze_end)
724 		goto done;
725 	if (n_vecs == 0) {
726 		result = 0;
727 		goto done;
728 	} else if (n_vecs == 1 &&
729 	    (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) {
730 		/* The user only got or used one chain; it might not
731 		 * be the first one with space in it. */
732 		if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))
733 			goto done;
734 		buf->last->off += vec[0].iov_len;
735 		added = vec[0].iov_len;
736 		if (added)
737 			advance_last_with_data(buf);
738 		goto okay;
739 	}
740 
741 	/* Advance 'firstchain' to the first chain with space in it. */
742 	firstchainp = buf->last_with_datap;
743 	if (!*firstchainp)
744 		goto done;
745 	if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
746 		firstchainp = &(*firstchainp)->next;
747 	}
748 
749 	chain = *firstchainp;
750 	/* pass 1: make sure that the pointers and lengths of vecs[] are in
751 	 * bounds before we try to commit anything. */
752 	for (i=0; i<n_vecs; ++i) {
753 		if (!chain)
754 			goto done;
755 		if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) ||
756 		    (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))
757 			goto done;
758 		chain = chain->next;
759 	}
760 	/* pass 2: actually adjust all the chains. */
761 	chainp = firstchainp;
762 	for (i=0; i<n_vecs; ++i) {
763 		(*chainp)->off += vec[i].iov_len;
764 		added += vec[i].iov_len;
765 		if (vec[i].iov_len) {
766 			buf->last_with_datap = chainp;
767 		}
768 		chainp = &(*chainp)->next;
769 	}
770 
771 okay:
772 	buf->total_len += added;
773 	buf->n_add_for_cb += added;
774 	result = 0;
775 	evbuffer_invoke_callbacks_(buf);
776 
777 done:
778 	EVBUFFER_UNLOCK(buf);
779 	return result;
780 }
781 
782 static inline int
783 HAS_PINNED_R(struct evbuffer *buf)
784 {
785 	return (buf->last && CHAIN_PINNED_R(buf->last));
786 }
787 
788 static inline void
789 ZERO_CHAIN(struct evbuffer *dst)
790 {
791 	ASSERT_EVBUFFER_LOCKED(dst);
792 	dst->first = NULL;
793 	dst->last = NULL;
794 	dst->last_with_datap = &(dst)->first;
795 	dst->total_len = 0;
796 }
797 
798 /* Prepares the contents of src to be moved to another buffer by removing
799  * read-pinned chains. The first pinned chain is saved in first, and the
800  * last in last. If src has no read-pinned chains, first and last are set
801  * to NULL. */
802 static int
803 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,
804 		struct evbuffer_chain **last)
805 {
806 	struct evbuffer_chain *chain, **pinned;
807 
808 	ASSERT_EVBUFFER_LOCKED(src);
809 
810 	if (!HAS_PINNED_R(src)) {
811 		*first = *last = NULL;
812 		return 0;
813 	}
814 
815 	pinned = src->last_with_datap;
816 	if (!CHAIN_PINNED_R(*pinned))
817 		pinned = &(*pinned)->next;
818 	EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));
819 	chain = *first = *pinned;
820 	*last = src->last;
821 
822 	/* If there's data in the first pinned chain, we need to allocate
823 	 * a new chain and copy the data over. */
824 	if (chain->off) {
825 		struct evbuffer_chain *tmp;
826 
827 		EVUTIL_ASSERT(pinned == src->last_with_datap);
828 		tmp = evbuffer_chain_new(chain->off);
829 		if (!tmp)
830 			return -1;
831 		memcpy(tmp->buffer, chain->buffer + chain->misalign,
832 			chain->off);
833 		tmp->off = chain->off;
834 		*src->last_with_datap = tmp;
835 		src->last = tmp;
836 		chain->misalign += chain->off;
837 		chain->off = 0;
838 	} else {
839 		src->last = *src->last_with_datap;
840 		*pinned = NULL;
841 	}
842 
843 	return 0;
844 }
845 
846 static inline void
847 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,
848 		struct evbuffer_chain *last)
849 {
850 	ASSERT_EVBUFFER_LOCKED(src);
851 
852 	if (!pinned) {
853 		ZERO_CHAIN(src);
854 		return;
855 	}
856 
857 	src->first = pinned;
858 	src->last = last;
859 	src->last_with_datap = &src->first;
860 	src->total_len = 0;
861 }
862 
863 static inline void
864 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)
865 {
866 	ASSERT_EVBUFFER_LOCKED(dst);
867 	ASSERT_EVBUFFER_LOCKED(src);
868 	dst->first = src->first;
869 	if (src->last_with_datap == &src->first)
870 		dst->last_with_datap = &dst->first;
871 	else
872 		dst->last_with_datap = src->last_with_datap;
873 	dst->last = src->last;
874 	dst->total_len = src->total_len;
875 }
876 
877 static void
878 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
879 {
880 	ASSERT_EVBUFFER_LOCKED(dst);
881 	ASSERT_EVBUFFER_LOCKED(src);
882 	dst->last->next = src->first;
883 	if (src->last_with_datap == &src->first)
884 		dst->last_with_datap = &dst->last->next;
885 	else
886 		dst->last_with_datap = src->last_with_datap;
887 	dst->last = src->last;
888 	dst->total_len += src->total_len;
889 }
890 
891 static inline void
892 APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src)
893 {
894 	struct evbuffer_chain *tmp;
895 	struct evbuffer_chain *chain = src->first;
896 	struct evbuffer_multicast_parent *extra;
897 
898 	ASSERT_EVBUFFER_LOCKED(dst);
899 	ASSERT_EVBUFFER_LOCKED(src);
900 
901 	for (; chain; chain = chain->next) {
902 		if (!chain->off || chain->flags & EVBUFFER_DANGLING) {
903 			/* skip empty chains */
904 			continue;
905 		}
906 
907 		tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent));
908 		if (!tmp) {
909 			event_warn("%s: out of memory", __func__);
910 			return;
911 		}
912 		extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp);
913 		/* reference evbuffer containing source chain so it
914 		 * doesn't get released while the chain is still
915 		 * being referenced to */
916 		evbuffer_incref_(src);
917 		extra->source = src;
918 		/* reference source chain which now becomes immutable */
919 		evbuffer_chain_incref(chain);
920 		extra->parent = chain;
921 		chain->flags |= EVBUFFER_IMMUTABLE;
922 		tmp->buffer_len = chain->buffer_len;
923 		tmp->misalign = chain->misalign;
924 		tmp->off = chain->off;
925 		tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE;
926 		tmp->buffer = chain->buffer;
927 		evbuffer_chain_insert(dst, tmp);
928 	}
929 }
930 
931 static void
932 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
933 {
934 	ASSERT_EVBUFFER_LOCKED(dst);
935 	ASSERT_EVBUFFER_LOCKED(src);
936 	src->last->next = dst->first;
937 	dst->first = src->first;
938 	dst->total_len += src->total_len;
939 	if (*dst->last_with_datap == NULL) {
940 		if (src->last_with_datap == &(src)->first)
941 			dst->last_with_datap = &dst->first;
942 		else
943 			dst->last_with_datap = src->last_with_datap;
944 	} else if (dst->last_with_datap == &dst->first) {
945 		dst->last_with_datap = &src->last->next;
946 	}
947 }
948 
949 int
950 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
951 {
952 	struct evbuffer_chain *pinned, *last;
953 	size_t in_total_len, out_total_len;
954 	int result = 0;
955 
956 	EVBUFFER_LOCK2(inbuf, outbuf);
957 	in_total_len = inbuf->total_len;
958 	out_total_len = outbuf->total_len;
959 
960 	if (in_total_len == 0 || outbuf == inbuf)
961 		goto done;
962 
963 	if (outbuf->freeze_end || inbuf->freeze_start) {
964 		result = -1;
965 		goto done;
966 	}
967 
968 	if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
969 		result = -1;
970 		goto done;
971 	}
972 
973 	if (out_total_len == 0) {
974 		/* There might be an empty chain at the start of outbuf; free
975 		 * it. */
976 		evbuffer_free_all_chains(outbuf->first);
977 		COPY_CHAIN(outbuf, inbuf);
978 	} else {
979 		APPEND_CHAIN(outbuf, inbuf);
980 	}
981 
982 	RESTORE_PINNED(inbuf, pinned, last);
983 
984 	inbuf->n_del_for_cb += in_total_len;
985 	outbuf->n_add_for_cb += in_total_len;
986 
987 	evbuffer_invoke_callbacks_(inbuf);
988 	evbuffer_invoke_callbacks_(outbuf);
989 
990 done:
991 	EVBUFFER_UNLOCK2(inbuf, outbuf);
992 	return result;
993 }
994 
995 int
996 evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf)
997 {
998 	size_t in_total_len, out_total_len;
999 	struct evbuffer_chain *chain;
1000 	int result = 0;
1001 
1002 	EVBUFFER_LOCK2(inbuf, outbuf);
1003 	in_total_len = inbuf->total_len;
1004 	out_total_len = outbuf->total_len;
1005 	chain = inbuf->first;
1006 
1007 	if (in_total_len == 0)
1008 		goto done;
1009 
1010 	if (outbuf->freeze_end || outbuf == inbuf) {
1011 		result = -1;
1012 		goto done;
1013 	}
1014 
1015 	for (; chain; chain = chain->next) {
1016 		if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) {
1017 			/* chain type can not be referenced */
1018 			result = -1;
1019 			goto done;
1020 		}
1021 	}
1022 
1023 	if (out_total_len == 0) {
1024 		/* There might be an empty chain at the start of outbuf; free
1025 		 * it. */
1026 		evbuffer_free_all_chains(outbuf->first);
1027 	}
1028 	APPEND_CHAIN_MULTICAST(outbuf, inbuf);
1029 
1030 	outbuf->n_add_for_cb += in_total_len;
1031 	evbuffer_invoke_callbacks_(outbuf);
1032 
1033 done:
1034 	EVBUFFER_UNLOCK2(inbuf, outbuf);
1035 	return result;
1036 }
1037 
1038 int
1039 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
1040 {
1041 	struct evbuffer_chain *pinned, *last;
1042 	size_t in_total_len, out_total_len;
1043 	int result = 0;
1044 
1045 	EVBUFFER_LOCK2(inbuf, outbuf);
1046 
1047 	in_total_len = inbuf->total_len;
1048 	out_total_len = outbuf->total_len;
1049 
1050 	if (!in_total_len || inbuf == outbuf)
1051 		goto done;
1052 
1053 	if (outbuf->freeze_start || inbuf->freeze_start) {
1054 		result = -1;
1055 		goto done;
1056 	}
1057 
1058 	if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
1059 		result = -1;
1060 		goto done;
1061 	}
1062 
1063 	if (out_total_len == 0) {
1064 		/* There might be an empty chain at the start of outbuf; free
1065 		 * it. */
1066 		evbuffer_free_all_chains(outbuf->first);
1067 		COPY_CHAIN(outbuf, inbuf);
1068 	} else {
1069 		PREPEND_CHAIN(outbuf, inbuf);
1070 	}
1071 
1072 	RESTORE_PINNED(inbuf, pinned, last);
1073 
1074 	inbuf->n_del_for_cb += in_total_len;
1075 	outbuf->n_add_for_cb += in_total_len;
1076 
1077 	evbuffer_invoke_callbacks_(inbuf);
1078 	evbuffer_invoke_callbacks_(outbuf);
1079 done:
1080 	EVBUFFER_UNLOCK2(inbuf, outbuf);
1081 	return result;
1082 }
1083 
1084 int
1085 evbuffer_drain(struct evbuffer *buf, size_t len)
1086 {
1087 	struct evbuffer_chain *chain, *next;
1088 	size_t remaining, old_len;
1089 	int result = 0;
1090 
1091 	EVBUFFER_LOCK(buf);
1092 	old_len = buf->total_len;
1093 
1094 	if (old_len == 0)
1095 		goto done;
1096 
1097 	if (buf->freeze_start) {
1098 		result = -1;
1099 		goto done;
1100 	}
1101 
1102 	if (len >= old_len && !HAS_PINNED_R(buf)) {
1103 		len = old_len;
1104 		for (chain = buf->first; chain != NULL; chain = next) {
1105 			next = chain->next;
1106 			evbuffer_chain_free(chain);
1107 		}
1108 
1109 		ZERO_CHAIN(buf);
1110 	} else {
1111 		if (len >= old_len)
1112 			len = old_len;
1113 
1114 		buf->total_len -= len;
1115 		remaining = len;
1116 		for (chain = buf->first;
1117 		     remaining >= chain->off;
1118 		     chain = next) {
1119 			next = chain->next;
1120 			remaining -= chain->off;
1121 
1122 			if (chain == *buf->last_with_datap) {
1123 				buf->last_with_datap = &buf->first;
1124 			}
1125 			if (&chain->next == buf->last_with_datap)
1126 				buf->last_with_datap = &buf->first;
1127 
1128 			if (CHAIN_PINNED_R(chain)) {
1129 				EVUTIL_ASSERT(remaining == 0);
1130 				chain->misalign += chain->off;
1131 				chain->off = 0;
1132 				break;
1133 			} else
1134 				evbuffer_chain_free(chain);
1135 		}
1136 
1137 		buf->first = chain;
1138 		chain->misalign += remaining;
1139 		chain->off -= remaining;
1140 	}
1141 
1142 	buf->n_del_for_cb += len;
1143 	/* Tell someone about changes in this buffer */
1144 	evbuffer_invoke_callbacks_(buf);
1145 
1146 done:
1147 	EVBUFFER_UNLOCK(buf);
1148 	return result;
1149 }
1150 
1151 /* Reads data from an event buffer and drains the bytes read */
1152 int
1153 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
1154 {
1155 	ev_ssize_t n;
1156 	EVBUFFER_LOCK(buf);
1157 	n = evbuffer_copyout_from(buf, NULL, data_out, datlen);
1158 	if (n > 0) {
1159 		if (evbuffer_drain(buf, n)<0)
1160 			n = -1;
1161 	}
1162 	EVBUFFER_UNLOCK(buf);
1163 	return (int)n;
1164 }
1165 
1166 ev_ssize_t
1167 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)
1168 {
1169 	return evbuffer_copyout_from(buf, NULL, data_out, datlen);
1170 }
1171 
1172 ev_ssize_t
1173 evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos,
1174     void *data_out, size_t datlen)
1175 {
1176 	/*XXX fails badly on sendfile case. */
1177 	struct evbuffer_chain *chain;
1178 	char *data = data_out;
1179 	size_t nread;
1180 	ev_ssize_t result = 0;
1181 	size_t pos_in_chain;
1182 
1183 	EVBUFFER_LOCK(buf);
1184 
1185 	if (pos) {
1186 		chain = pos->internal_.chain;
1187 		pos_in_chain = pos->internal_.pos_in_chain;
1188 		if (datlen + pos->pos > buf->total_len)
1189 			datlen = buf->total_len - pos->pos;
1190 	} else {
1191 		chain = buf->first;
1192 		pos_in_chain = 0;
1193 		if (datlen > buf->total_len)
1194 			datlen = buf->total_len;
1195 	}
1196 
1197 
1198 	if (datlen == 0)
1199 		goto done;
1200 
1201 	if (buf->freeze_start) {
1202 		result = -1;
1203 		goto done;
1204 	}
1205 
1206 	nread = datlen;
1207 
1208 	while (datlen && datlen >= chain->off - pos_in_chain) {
1209 		size_t copylen = chain->off - pos_in_chain;
1210 		memcpy(data,
1211 		    chain->buffer + chain->misalign + pos_in_chain,
1212 		    copylen);
1213 		data += copylen;
1214 		datlen -= copylen;
1215 
1216 		chain = chain->next;
1217 		pos_in_chain = 0;
1218 		EVUTIL_ASSERT(chain || datlen==0);
1219 	}
1220 
1221 	if (datlen) {
1222 		EVUTIL_ASSERT(chain);
1223 		memcpy(data, chain->buffer + chain->misalign + pos_in_chain,
1224 		    datlen);
1225 	}
1226 
1227 	result = nread;
1228 done:
1229 	EVBUFFER_UNLOCK(buf);
1230 	return result;
1231 }
1232 
1233 /* reads data from the src buffer to the dst buffer, avoids memcpy as
1234  * possible. */
1235 /*  XXXX should return ev_ssize_t */
1236 int
1237 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
1238     size_t datlen)
1239 {
1240 	/*XXX We should have an option to force this to be zero-copy.*/
1241 
1242 	/*XXX can fail badly on sendfile case. */
1243 	struct evbuffer_chain *chain, *previous;
1244 	size_t nread = 0;
1245 	int result;
1246 
1247 	EVBUFFER_LOCK2(src, dst);
1248 
1249 	chain = previous = src->first;
1250 
1251 	if (datlen == 0 || dst == src) {
1252 		result = 0;
1253 		goto done;
1254 	}
1255 
1256 	if (dst->freeze_end || src->freeze_start) {
1257 		result = -1;
1258 		goto done;
1259 	}
1260 
1261 	/* short-cut if there is no more data buffered */
1262 	if (datlen >= src->total_len) {
1263 		datlen = src->total_len;
1264 		evbuffer_add_buffer(dst, src);
1265 		result = (int)datlen; /*XXXX should return ev_ssize_t*/
1266 		goto done;
1267 	}
1268 
1269 	/* removes chains if possible */
1270 	while (chain->off <= datlen) {
1271 		/* We can't remove the last with data from src unless we
1272 		 * remove all chains, in which case we would have done the if
1273 		 * block above */
1274 		EVUTIL_ASSERT(chain != *src->last_with_datap);
1275 		nread += chain->off;
1276 		datlen -= chain->off;
1277 		previous = chain;
1278 		if (src->last_with_datap == &chain->next)
1279 			src->last_with_datap = &src->first;
1280 		chain = chain->next;
1281 	}
1282 
1283 	if (nread) {
1284 		/* we can remove the chain */
1285 		struct evbuffer_chain **chp;
1286 		chp = evbuffer_free_trailing_empty_chains(dst);
1287 
1288 		if (dst->first == NULL) {
1289 			dst->first = src->first;
1290 		} else {
1291 			*chp = src->first;
1292 		}
1293 		dst->last = previous;
1294 		previous->next = NULL;
1295 		src->first = chain;
1296 		advance_last_with_data(dst);
1297 
1298 		dst->total_len += nread;
1299 		dst->n_add_for_cb += nread;
1300 	}
1301 
1302 	/* we know that there is more data in the src buffer than
1303 	 * we want to read, so we manually drain the chain */
1304 	evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
1305 	chain->misalign += datlen;
1306 	chain->off -= datlen;
1307 	nread += datlen;
1308 
1309 	/* You might think we would want to increment dst->n_add_for_cb
1310 	 * here too.  But evbuffer_add above already took care of that.
1311 	 */
1312 	src->total_len -= nread;
1313 	src->n_del_for_cb += nread;
1314 
1315 	if (nread) {
1316 		evbuffer_invoke_callbacks_(dst);
1317 		evbuffer_invoke_callbacks_(src);
1318 	}
1319 	result = (int)nread;/*XXXX should change return type */
1320 
1321 done:
1322 	EVBUFFER_UNLOCK2(src, dst);
1323 	return result;
1324 }
1325 
1326 unsigned char *
1327 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
1328 {
1329 	struct evbuffer_chain *chain, *next, *tmp, *last_with_data;
1330 	unsigned char *buffer, *result = NULL;
1331 	ev_ssize_t remaining;
1332 	int removed_last_with_data = 0;
1333 	int removed_last_with_datap = 0;
1334 
1335 	EVBUFFER_LOCK(buf);
1336 
1337 	chain = buf->first;
1338 
1339 	if (size < 0)
1340 		size = buf->total_len;
1341 	/* if size > buf->total_len, we cannot guarantee to the user that she
1342 	 * is going to have a long enough buffer afterwards; so we return
1343 	 * NULL */
1344 	if (size == 0 || (size_t)size > buf->total_len)
1345 		goto done;
1346 
1347 	/* No need to pull up anything; the first size bytes are
1348 	 * already here. */
1349 	if (chain->off >= (size_t)size) {
1350 		result = chain->buffer + chain->misalign;
1351 		goto done;
1352 	}
1353 
1354 	/* Make sure that none of the chains we need to copy from is pinned. */
1355 	remaining = size - chain->off;
1356 	EVUTIL_ASSERT(remaining >= 0);
1357 	for (tmp=chain->next; tmp; tmp=tmp->next) {
1358 		if (CHAIN_PINNED(tmp))
1359 			goto done;
1360 		if (tmp->off >= (size_t)remaining)
1361 			break;
1362 		remaining -= tmp->off;
1363 	}
1364 
1365 	if (CHAIN_PINNED(chain)) {
1366 		size_t old_off = chain->off;
1367 		if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
1368 			/* not enough room at end of chunk. */
1369 			goto done;
1370 		}
1371 		buffer = CHAIN_SPACE_PTR(chain);
1372 		tmp = chain;
1373 		tmp->off = size;
1374 		size -= old_off;
1375 		chain = chain->next;
1376 	} else if (chain->buffer_len - chain->misalign >= (size_t)size) {
1377 		/* already have enough space in the first chain */
1378 		size_t old_off = chain->off;
1379 		buffer = chain->buffer + chain->misalign + chain->off;
1380 		tmp = chain;
1381 		tmp->off = size;
1382 		size -= old_off;
1383 		chain = chain->next;
1384 	} else {
1385 		if ((tmp = evbuffer_chain_new(size)) == NULL) {
1386 			event_warn("%s: out of memory", __func__);
1387 			goto done;
1388 		}
1389 		buffer = tmp->buffer;
1390 		tmp->off = size;
1391 		buf->first = tmp;
1392 	}
1393 
1394 	/* TODO(niels): deal with buffers that point to NULL like sendfile */
1395 
1396 	/* Copy and free every chunk that will be entirely pulled into tmp */
1397 	last_with_data = *buf->last_with_datap;
1398 	for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
1399 		next = chain->next;
1400 
1401 		memcpy(buffer, chain->buffer + chain->misalign, chain->off);
1402 		size -= chain->off;
1403 		buffer += chain->off;
1404 		if (chain == last_with_data)
1405 			removed_last_with_data = 1;
1406 		if (&chain->next == buf->last_with_datap)
1407 			removed_last_with_datap = 1;
1408 
1409 		evbuffer_chain_free(chain);
1410 	}
1411 
1412 	if (chain != NULL) {
1413 		memcpy(buffer, chain->buffer + chain->misalign, size);
1414 		chain->misalign += size;
1415 		chain->off -= size;
1416 	} else {
1417 		buf->last = tmp;
1418 	}
1419 
1420 	tmp->next = chain;
1421 
1422 	if (removed_last_with_data) {
1423 		buf->last_with_datap = &buf->first;
1424 	} else if (removed_last_with_datap) {
1425 		if (buf->first->next && buf->first->next->off)
1426 			buf->last_with_datap = &buf->first->next;
1427 		else
1428 			buf->last_with_datap = &buf->first;
1429 	}
1430 
1431 	result = (tmp->buffer + tmp->misalign);
1432 
1433 done:
1434 	EVBUFFER_UNLOCK(buf);
1435 	return result;
1436 }
1437 
1438 /*
1439  * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
1440  * The returned buffer needs to be freed by the called.
1441  */
1442 char *
1443 evbuffer_readline(struct evbuffer *buffer)
1444 {
1445 	return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
1446 }
1447 
1448 static inline ev_ssize_t
1449 evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
1450 {
1451 	struct evbuffer_chain *chain = it->internal_.chain;
1452 	size_t i = it->internal_.pos_in_chain;
1453 	while (chain != NULL) {
1454 		char *buffer = (char *)chain->buffer + chain->misalign;
1455 		char *cp = memchr(buffer+i, chr, chain->off-i);
1456 		if (cp) {
1457 			it->internal_.chain = chain;
1458 			it->internal_.pos_in_chain = cp - buffer;
1459 			it->pos += (cp - buffer - i);
1460 			return it->pos;
1461 		}
1462 		it->pos += chain->off - i;
1463 		i = 0;
1464 		chain = chain->next;
1465 	}
1466 
1467 	return (-1);
1468 }
1469 
1470 static inline char *
1471 find_eol_char(char *s, size_t len)
1472 {
1473 #define CHUNK_SZ 128
1474 	/* Lots of benchmarking found this approach to be faster in practice
1475 	 * than doing two memchrs over the whole buffer, doin a memchr on each
1476 	 * char of the buffer, or trying to emulate memchr by hand. */
1477 	char *s_end, *cr, *lf;
1478 	s_end = s+len;
1479 	while (s < s_end) {
1480 		size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
1481 		cr = memchr(s, '\r', chunk);
1482 		lf = memchr(s, '\n', chunk);
1483 		if (cr) {
1484 			if (lf && lf < cr)
1485 				return lf;
1486 			return cr;
1487 		} else if (lf) {
1488 			return lf;
1489 		}
1490 		s += CHUNK_SZ;
1491 	}
1492 
1493 	return NULL;
1494 #undef CHUNK_SZ
1495 }
1496 
1497 static ev_ssize_t
1498 evbuffer_find_eol_char(struct evbuffer_ptr *it)
1499 {
1500 	struct evbuffer_chain *chain = it->internal_.chain;
1501 	size_t i = it->internal_.pos_in_chain;
1502 	while (chain != NULL) {
1503 		char *buffer = (char *)chain->buffer + chain->misalign;
1504 		char *cp = find_eol_char(buffer+i, chain->off-i);
1505 		if (cp) {
1506 			it->internal_.chain = chain;
1507 			it->internal_.pos_in_chain = cp - buffer;
1508 			it->pos += (cp - buffer) - i;
1509 			return it->pos;
1510 		}
1511 		it->pos += chain->off - i;
1512 		i = 0;
1513 		chain = chain->next;
1514 	}
1515 
1516 	return (-1);
1517 }
1518 
1519 static inline int
1520 evbuffer_strspn(
1521 	struct evbuffer_ptr *ptr, const char *chrset)
1522 {
1523 	int count = 0;
1524 	struct evbuffer_chain *chain = ptr->internal_.chain;
1525 	size_t i = ptr->internal_.pos_in_chain;
1526 
1527 	if (!chain)
1528 		return 0;
1529 
1530 	while (1) {
1531 		char *buffer = (char *)chain->buffer + chain->misalign;
1532 		for (; i < chain->off; ++i) {
1533 			const char *p = chrset;
1534 			while (*p) {
1535 				if (buffer[i] == *p++)
1536 					goto next;
1537 			}
1538 			ptr->internal_.chain = chain;
1539 			ptr->internal_.pos_in_chain = i;
1540 			ptr->pos += count;
1541 			return count;
1542 		next:
1543 			++count;
1544 		}
1545 		i = 0;
1546 
1547 		if (! chain->next) {
1548 			ptr->internal_.chain = chain;
1549 			ptr->internal_.pos_in_chain = i;
1550 			ptr->pos += count;
1551 			return count;
1552 		}
1553 
1554 		chain = chain->next;
1555 	}
1556 }
1557 
1558 
1559 static inline int
1560 evbuffer_getchr(struct evbuffer_ptr *it)
1561 {
1562 	struct evbuffer_chain *chain = it->internal_.chain;
1563 	size_t off = it->internal_.pos_in_chain;
1564 
1565 	if (chain == NULL)
1566 		return -1;
1567 
1568 	return (unsigned char)chain->buffer[chain->misalign + off];
1569 }
1570 
1571 struct evbuffer_ptr
1572 evbuffer_search_eol(struct evbuffer *buffer,
1573     struct evbuffer_ptr *start, size_t *eol_len_out,
1574     enum evbuffer_eol_style eol_style)
1575 {
1576 	struct evbuffer_ptr it, it2;
1577 	size_t extra_drain = 0;
1578 	int ok = 0;
1579 
1580 	/* Avoid locking in trivial edge cases */
1581 	if (start && start->internal_.chain == NULL) {
1582 		PTR_NOT_FOUND(&it);
1583 		if (eol_len_out)
1584 			*eol_len_out = extra_drain;
1585 		return it;
1586 	}
1587 
1588 	EVBUFFER_LOCK(buffer);
1589 
1590 	if (start) {
1591 		memcpy(&it, start, sizeof(it));
1592 	} else {
1593 		it.pos = 0;
1594 		it.internal_.chain = buffer->first;
1595 		it.internal_.pos_in_chain = 0;
1596 	}
1597 
1598 	/* the eol_style determines our first stop character and how many
1599 	 * characters we are going to drain afterwards. */
1600 	switch (eol_style) {
1601 	case EVBUFFER_EOL_ANY:
1602 		if (evbuffer_find_eol_char(&it) < 0)
1603 			goto done;
1604 		memcpy(&it2, &it, sizeof(it));
1605 		extra_drain = evbuffer_strspn(&it2, "\r\n");
1606 		break;
1607 	case EVBUFFER_EOL_CRLF_STRICT: {
1608 		it = evbuffer_search(buffer, "\r\n", 2, &it);
1609 		if (it.pos < 0)
1610 			goto done;
1611 		extra_drain = 2;
1612 		break;
1613 	}
1614 	case EVBUFFER_EOL_CRLF: {
1615 		ev_ssize_t start_pos = it.pos;
1616 		/* Look for a LF ... */
1617 		if (evbuffer_strchr(&it, '\n') < 0)
1618 			goto done;
1619 		extra_drain = 1;
1620 		/* ... optionally preceeded by a CR. */
1621 		if (it.pos == start_pos)
1622 			break; /* If the first character is \n, don't back up */
1623 		/* This potentially does an extra linear walk over the first
1624 		 * few chains.  Probably, that's not too expensive unless you
1625 		 * have a really pathological setup. */
1626 		memcpy(&it2, &it, sizeof(it));
1627 		if (evbuffer_ptr_subtract(buffer, &it2, 1)<0)
1628 			break;
1629 		if (evbuffer_getchr(&it2) == '\r') {
1630 			memcpy(&it, &it2, sizeof(it));
1631 			extra_drain = 2;
1632 		}
1633 		break;
1634 	}
1635 	case EVBUFFER_EOL_LF:
1636 		if (evbuffer_strchr(&it, '\n') < 0)
1637 			goto done;
1638 		extra_drain = 1;
1639 		break;
1640 	case EVBUFFER_EOL_NUL:
1641 		if (evbuffer_strchr(&it, '\0') < 0)
1642 			goto done;
1643 		extra_drain = 1;
1644 		break;
1645 	default:
1646 		goto done;
1647 	}
1648 
1649 	ok = 1;
1650 done:
1651 	EVBUFFER_UNLOCK(buffer);
1652 
1653 	if (!ok)
1654 		PTR_NOT_FOUND(&it);
1655 	if (eol_len_out)
1656 		*eol_len_out = extra_drain;
1657 
1658 	return it;
1659 }
1660 
1661 char *
1662 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
1663 		enum evbuffer_eol_style eol_style)
1664 {
1665 	struct evbuffer_ptr it;
1666 	char *line;
1667 	size_t n_to_copy=0, extra_drain=0;
1668 	char *result = NULL;
1669 
1670 	EVBUFFER_LOCK(buffer);
1671 
1672 	if (buffer->freeze_start) {
1673 		goto done;
1674 	}
1675 
1676 	it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
1677 	if (it.pos < 0)
1678 		goto done;
1679 	n_to_copy = it.pos;
1680 
1681 	if ((line = mm_malloc(n_to_copy+1)) == NULL) {
1682 		event_warn("%s: out of memory", __func__);
1683 		goto done;
1684 	}
1685 
1686 	evbuffer_remove(buffer, line, n_to_copy);
1687 	line[n_to_copy] = '\0';
1688 
1689 	evbuffer_drain(buffer, extra_drain);
1690 	result = line;
1691 done:
1692 	EVBUFFER_UNLOCK(buffer);
1693 
1694 	if (n_read_out)
1695 		*n_read_out = result ? n_to_copy : 0;
1696 
1697 	return result;
1698 }
1699 
1700 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
1701 
1702 /* Adds data to an event buffer */
1703 
1704 int
1705 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
1706 {
1707 	struct evbuffer_chain *chain, *tmp;
1708 	const unsigned char *data = data_in;
1709 	size_t remain, to_alloc;
1710 	int result = -1;
1711 
1712 	EVBUFFER_LOCK(buf);
1713 
1714 	if (buf->freeze_end) {
1715 		goto done;
1716 	}
1717 
1718 	chain = buf->last;
1719 
1720 	/* If there are no chains allocated for this buffer, allocate one
1721 	 * big enough to hold all the data. */
1722 	if (chain == NULL) {
1723 		chain = evbuffer_chain_new(datlen);
1724 		if (!chain)
1725 			goto done;
1726 		evbuffer_chain_insert(buf, chain);
1727 	}
1728 
1729 	if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
1730 		remain = (size_t)(chain->buffer_len - chain->misalign - chain->off);
1731 		if (remain >= datlen) {
1732 			/* there's enough space to hold all the data in the
1733 			 * current last chain */
1734 			memcpy(chain->buffer + chain->misalign + chain->off,
1735 			    data, datlen);
1736 			chain->off += datlen;
1737 			buf->total_len += datlen;
1738 			buf->n_add_for_cb += datlen;
1739 			goto out;
1740 		} else if (!CHAIN_PINNED(chain) &&
1741 		    evbuffer_chain_should_realign(chain, datlen)) {
1742 			/* we can fit the data into the misalignment */
1743 			evbuffer_chain_align(chain);
1744 
1745 			memcpy(chain->buffer + chain->off, data, datlen);
1746 			chain->off += datlen;
1747 			buf->total_len += datlen;
1748 			buf->n_add_for_cb += datlen;
1749 			goto out;
1750 		}
1751 	} else {
1752 		/* we cannot write any data to the last chain */
1753 		remain = 0;
1754 	}
1755 
1756 	/* we need to add another chain */
1757 	to_alloc = chain->buffer_len;
1758 	if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
1759 		to_alloc <<= 1;
1760 	if (datlen > to_alloc)
1761 		to_alloc = datlen;
1762 	tmp = evbuffer_chain_new(to_alloc);
1763 	if (tmp == NULL)
1764 		goto done;
1765 
1766 	if (remain) {
1767 		memcpy(chain->buffer + chain->misalign + chain->off,
1768 		    data, remain);
1769 		chain->off += remain;
1770 		buf->total_len += remain;
1771 		buf->n_add_for_cb += remain;
1772 	}
1773 
1774 	data += remain;
1775 	datlen -= remain;
1776 
1777 	memcpy(tmp->buffer, data, datlen);
1778 	tmp->off = datlen;
1779 	evbuffer_chain_insert(buf, tmp);
1780 	buf->n_add_for_cb += datlen;
1781 
1782 out:
1783 	evbuffer_invoke_callbacks_(buf);
1784 	result = 0;
1785 done:
1786 	EVBUFFER_UNLOCK(buf);
1787 	return result;
1788 }
1789 
1790 int
1791 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
1792 {
1793 	struct evbuffer_chain *chain, *tmp;
1794 	int result = -1;
1795 
1796 	EVBUFFER_LOCK(buf);
1797 
1798 	if (buf->freeze_start) {
1799 		goto done;
1800 	}
1801 
1802 	chain = buf->first;
1803 
1804 	if (chain == NULL) {
1805 		chain = evbuffer_chain_new(datlen);
1806 		if (!chain)
1807 			goto done;
1808 		evbuffer_chain_insert(buf, chain);
1809 	}
1810 
1811 	/* we cannot touch immutable buffers */
1812 	if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
1813 		/* If this chain is empty, we can treat it as
1814 		 * 'empty at the beginning' rather than 'empty at the end' */
1815 		if (chain->off == 0)
1816 			chain->misalign = chain->buffer_len;
1817 
1818 		if ((size_t)chain->misalign >= datlen) {
1819 			/* we have enough space to fit everything */
1820 			memcpy(chain->buffer + chain->misalign - datlen,
1821 			    data, datlen);
1822 			chain->off += datlen;
1823 			chain->misalign -= datlen;
1824 			buf->total_len += datlen;
1825 			buf->n_add_for_cb += datlen;
1826 			goto out;
1827 		} else if (chain->misalign) {
1828 			/* we can only fit some of the data. */
1829 			memcpy(chain->buffer,
1830 			    (char*)data + datlen - chain->misalign,
1831 			    (size_t)chain->misalign);
1832 			chain->off += (size_t)chain->misalign;
1833 			buf->total_len += (size_t)chain->misalign;
1834 			buf->n_add_for_cb += (size_t)chain->misalign;
1835 			datlen -= (size_t)chain->misalign;
1836 			chain->misalign = 0;
1837 		}
1838 	}
1839 
1840 	/* we need to add another chain */
1841 	if ((tmp = evbuffer_chain_new(datlen)) == NULL)
1842 		goto done;
1843 	buf->first = tmp;
1844 	if (buf->last_with_datap == &buf->first)
1845 		buf->last_with_datap = &tmp->next;
1846 
1847 	tmp->next = chain;
1848 
1849 	tmp->off = datlen;
1850 	tmp->misalign = tmp->buffer_len - datlen;
1851 
1852 	memcpy(tmp->buffer + tmp->misalign, data, datlen);
1853 	buf->total_len += datlen;
1854 	buf->n_add_for_cb += (size_t)chain->misalign;
1855 
1856 out:
1857 	evbuffer_invoke_callbacks_(buf);
1858 	result = 0;
1859 done:
1860 	EVBUFFER_UNLOCK(buf);
1861 	return result;
1862 }
1863 
1864 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */
1865 static void
1866 evbuffer_chain_align(struct evbuffer_chain *chain)
1867 {
1868 	EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
1869 	EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
1870 	memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
1871 	chain->misalign = 0;
1872 }
1873 
1874 #define MAX_TO_COPY_IN_EXPAND 4096
1875 #define MAX_TO_REALIGN_IN_EXPAND 2048
1876 
1877 /** Helper: return true iff we should realign chain to fit datalen bytes of
1878     data in it. */
1879 static int
1880 evbuffer_chain_should_realign(struct evbuffer_chain *chain,
1881     size_t datlen)
1882 {
1883 	return chain->buffer_len - chain->off >= datlen &&
1884 	    (chain->off < chain->buffer_len / 2) &&
1885 	    (chain->off <= MAX_TO_REALIGN_IN_EXPAND);
1886 }
1887 
1888 /* Expands the available space in the event buffer to at least datlen, all in
1889  * a single chunk.  Return that chunk. */
1890 static struct evbuffer_chain *
1891 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
1892 {
1893 	struct evbuffer_chain *chain, **chainp;
1894 	struct evbuffer_chain *result = NULL;
1895 	ASSERT_EVBUFFER_LOCKED(buf);
1896 
1897 	chainp = buf->last_with_datap;
1898 
1899 	/* XXX If *chainp is no longer writeable, but has enough space in its
1900 	 * misalign, this might be a bad idea: we could still use *chainp, not
1901 	 * (*chainp)->next. */
1902 	if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
1903 		chainp = &(*chainp)->next;
1904 
1905 	/* 'chain' now points to the first chain with writable space (if any)
1906 	 * We will either use it, realign it, replace it, or resize it. */
1907 	chain = *chainp;
1908 
1909 	if (chain == NULL ||
1910 	    (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
1911 		/* We can't use the last_with_data chain at all.  Just add a
1912 		 * new one that's big enough. */
1913 		goto insert_new;
1914 	}
1915 
1916 	/* If we can fit all the data, then we don't have to do anything */
1917 	if (CHAIN_SPACE_LEN(chain) >= datlen) {
1918 		result = chain;
1919 		goto ok;
1920 	}
1921 
1922 	/* If the chain is completely empty, just replace it by adding a new
1923 	 * empty chain. */
1924 	if (chain->off == 0) {
1925 		goto insert_new;
1926 	}
1927 
1928 	/* If the misalignment plus the remaining space fulfills our data
1929 	 * needs, we could just force an alignment to happen.  Afterwards, we
1930 	 * have enough space.  But only do this if we're saving a lot of space
1931 	 * and not moving too much data.  Otherwise the space savings are
1932 	 * probably offset by the time lost in copying.
1933 	 */
1934 	if (evbuffer_chain_should_realign(chain, datlen)) {
1935 		evbuffer_chain_align(chain);
1936 		result = chain;
1937 		goto ok;
1938 	}
1939 
1940 	/* At this point, we can either resize the last chunk with space in
1941 	 * it, use the next chunk after it, or   If we add a new chunk, we waste
1942 	 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk.  If we
1943 	 * resize, we have to copy chain->off bytes.
1944 	 */
1945 
1946 	/* Would expanding this chunk be affordable and worthwhile? */
1947 	if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
1948 	    chain->off > MAX_TO_COPY_IN_EXPAND) {
1949 		/* It's not worth resizing this chain. Can the next one be
1950 		 * used? */
1951 		if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) {
1952 			/* Yes, we can just use the next chain (which should
1953 			 * be empty. */
1954 			result = chain->next;
1955 			goto ok;
1956 		} else {
1957 			/* No; append a new chain (which will free all
1958 			 * terminal empty chains.) */
1959 			goto insert_new;
1960 		}
1961 	} else {
1962 		/* Okay, we're going to try to resize this chain: Not doing so
1963 		 * would waste at least 1/8 of its current allocation, and we
1964 		 * can do so without having to copy more than
1965 		 * MAX_TO_COPY_IN_EXPAND bytes. */
1966 		/* figure out how much space we need */
1967 		size_t length = chain->off + datlen;
1968 		struct evbuffer_chain *tmp = evbuffer_chain_new(length);
1969 		if (tmp == NULL)
1970 			goto err;
1971 
1972 		/* copy the data over that we had so far */
1973 		tmp->off = chain->off;
1974 		memcpy(tmp->buffer, chain->buffer + chain->misalign,
1975 		    chain->off);
1976 		/* fix up the list */
1977 		EVUTIL_ASSERT(*chainp == chain);
1978 		result = *chainp = tmp;
1979 
1980 		if (buf->last == chain)
1981 			buf->last = tmp;
1982 
1983 		tmp->next = chain->next;
1984 		evbuffer_chain_free(chain);
1985 		goto ok;
1986 	}
1987 
1988 insert_new:
1989 	result = evbuffer_chain_insert_new(buf, datlen);
1990 	if (!result)
1991 		goto err;
1992 ok:
1993 	EVUTIL_ASSERT(result);
1994 	EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
1995 err:
1996 	return result;
1997 }
1998 
1999 /* Make sure that datlen bytes are available for writing in the last n
2000  * chains.  Never copies or moves data. */
2001 int
2002 evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n)
2003 {
2004 	struct evbuffer_chain *chain = buf->last, *tmp, *next;
2005 	size_t avail;
2006 	int used;
2007 
2008 	ASSERT_EVBUFFER_LOCKED(buf);
2009 	EVUTIL_ASSERT(n >= 2);
2010 
2011 	if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
2012 		/* There is no last chunk, or we can't touch the last chunk.
2013 		 * Just add a new chunk. */
2014 		chain = evbuffer_chain_new(datlen);
2015 		if (chain == NULL)
2016 			return (-1);
2017 
2018 		evbuffer_chain_insert(buf, chain);
2019 		return (0);
2020 	}
2021 
2022 	used = 0; /* number of chains we're using space in. */
2023 	avail = 0; /* how much space they have. */
2024 	/* How many bytes can we stick at the end of buffer as it is?  Iterate
2025 	 * over the chains at the end of the buffer, tring to see how much
2026 	 * space we have in the first n. */
2027 	for (chain = *buf->last_with_datap; chain; chain = chain->next) {
2028 		if (chain->off) {
2029 			size_t space = (size_t) CHAIN_SPACE_LEN(chain);
2030 			EVUTIL_ASSERT(chain == *buf->last_with_datap);
2031 			if (space) {
2032 				avail += space;
2033 				++used;
2034 			}
2035 		} else {
2036 			/* No data in chain; realign it. */
2037 			chain->misalign = 0;
2038 			avail += chain->buffer_len;
2039 			++used;
2040 		}
2041 		if (avail >= datlen) {
2042 			/* There is already enough space.  Just return */
2043 			return (0);
2044 		}
2045 		if (used == n)
2046 			break;
2047 	}
2048 
2049 	/* There wasn't enough space in the first n chains with space in
2050 	 * them. Either add a new chain with enough space, or replace all
2051 	 * empty chains with one that has enough space, depending on n. */
2052 	if (used < n) {
2053 		/* The loop ran off the end of the chains before it hit n
2054 		 * chains; we can add another. */
2055 		EVUTIL_ASSERT(chain == NULL);
2056 
2057 		tmp = evbuffer_chain_new(datlen - avail);
2058 		if (tmp == NULL)
2059 			return (-1);
2060 
2061 		buf->last->next = tmp;
2062 		buf->last = tmp;
2063 		/* (we would only set last_with_data if we added the first
2064 		 * chain. But if the buffer had no chains, we would have
2065 		 * just allocated a new chain earlier) */
2066 		return (0);
2067 	} else {
2068 		/* Nuke _all_ the empty chains. */
2069 		int rmv_all = 0; /* True iff we removed last_with_data. */
2070 		chain = *buf->last_with_datap;
2071 		if (!chain->off) {
2072 			EVUTIL_ASSERT(chain == buf->first);
2073 			rmv_all = 1;
2074 			avail = 0;
2075 		} else {
2076 			avail = (size_t) CHAIN_SPACE_LEN(chain);
2077 			chain = chain->next;
2078 		}
2079 
2080 
2081 		for (; chain; chain = next) {
2082 			next = chain->next;
2083 			EVUTIL_ASSERT(chain->off == 0);
2084 			evbuffer_chain_free(chain);
2085 		}
2086 		tmp = evbuffer_chain_new(datlen - avail);
2087 		if (tmp == NULL) {
2088 			if (rmv_all) {
2089 				ZERO_CHAIN(buf);
2090 			} else {
2091 				buf->last = *buf->last_with_datap;
2092 				(*buf->last_with_datap)->next = NULL;
2093 			}
2094 			return (-1);
2095 		}
2096 
2097 		if (rmv_all) {
2098 			buf->first = buf->last = tmp;
2099 			buf->last_with_datap = &buf->first;
2100 		} else {
2101 			(*buf->last_with_datap)->next = tmp;
2102 			buf->last = tmp;
2103 		}
2104 		return (0);
2105 	}
2106 }
2107 
2108 int
2109 evbuffer_expand(struct evbuffer *buf, size_t datlen)
2110 {
2111 	struct evbuffer_chain *chain;
2112 
2113 	EVBUFFER_LOCK(buf);
2114 	chain = evbuffer_expand_singlechain(buf, datlen);
2115 	EVBUFFER_UNLOCK(buf);
2116 	return chain ? 0 : -1;
2117 }
2118 
2119 /*
2120  * Reads data from a file descriptor into a buffer.
2121  */
2122 
2123 #if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32)
2124 #define USE_IOVEC_IMPL
2125 #endif
2126 
2127 #ifdef USE_IOVEC_IMPL
2128 
2129 #ifdef EVENT__HAVE_SYS_UIO_H
2130 /* number of iovec we use for writev, fragmentation is going to determine
2131  * how much we end up writing */
2132 
2133 #define DEFAULT_WRITE_IOVEC 128
2134 
2135 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
2136 #define NUM_WRITE_IOVEC UIO_MAXIOV
2137 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
2138 #define NUM_WRITE_IOVEC IOV_MAX
2139 #else
2140 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
2141 #endif
2142 
2143 #define IOV_TYPE struct iovec
2144 #define IOV_PTR_FIELD iov_base
2145 #define IOV_LEN_FIELD iov_len
2146 #define IOV_LEN_TYPE size_t
2147 #else
2148 #define NUM_WRITE_IOVEC 16
2149 #define IOV_TYPE WSABUF
2150 #define IOV_PTR_FIELD buf
2151 #define IOV_LEN_FIELD len
2152 #define IOV_LEN_TYPE unsigned long
2153 #endif
2154 #endif
2155 #define NUM_READ_IOVEC 4
2156 
2157 #define EVBUFFER_MAX_READ	4096
2158 
2159 /** Helper function to figure out which space to use for reading data into
2160     an evbuffer.  Internal use only.
2161 
2162     @param buf The buffer to read into
2163     @param howmuch How much we want to read.
2164     @param vecs An array of two or more iovecs or WSABUFs.
2165     @param n_vecs_avail The length of vecs
2166     @param chainp A pointer to a variable to hold the first chain we're
2167       reading into.
2168     @param exact Boolean: if true, we do not provide more than 'howmuch'
2169       space in the vectors, even if more space is available.
2170     @return The number of buffers we're using.
2171  */
2172 int
2173 evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,
2174     struct evbuffer_iovec *vecs, int n_vecs_avail,
2175     struct evbuffer_chain ***chainp, int exact)
2176 {
2177 	struct evbuffer_chain *chain;
2178 	struct evbuffer_chain **firstchainp;
2179 	size_t so_far;
2180 	int i;
2181 	ASSERT_EVBUFFER_LOCKED(buf);
2182 
2183 	if (howmuch < 0)
2184 		return -1;
2185 
2186 	so_far = 0;
2187 	/* Let firstchain be the first chain with any space on it */
2188 	firstchainp = buf->last_with_datap;
2189 	if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
2190 		firstchainp = &(*firstchainp)->next;
2191 	}
2192 
2193 	chain = *firstchainp;
2194 	for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
2195 		size_t avail = (size_t) CHAIN_SPACE_LEN(chain);
2196 		if (avail > (howmuch - so_far) && exact)
2197 			avail = howmuch - so_far;
2198 		vecs[i].iov_base = CHAIN_SPACE_PTR(chain);
2199 		vecs[i].iov_len = avail;
2200 		so_far += avail;
2201 		chain = chain->next;
2202 	}
2203 
2204 	*chainp = firstchainp;
2205 	return i;
2206 }
2207 
2208 static int
2209 get_n_bytes_readable_on_socket(evutil_socket_t fd)
2210 {
2211 #if defined(FIONREAD) && defined(_WIN32)
2212 	unsigned long lng = EVBUFFER_MAX_READ;
2213 	if (ioctlsocket(fd, FIONREAD, &lng) < 0)
2214 		return -1;
2215 	return (int)lng;
2216 #elif defined(FIONREAD)
2217 	int n = EVBUFFER_MAX_READ;
2218 	if (ioctl(fd, FIONREAD, &n) < 0)
2219 		return -1;
2220 	return n;
2221 #else
2222 	return EVBUFFER_MAX_READ;
2223 #endif
2224 }
2225 
2226 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
2227  * as howmuch? */
2228 int
2229 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
2230 {
2231 	struct evbuffer_chain **chainp;
2232 	int n;
2233 	int result;
2234 
2235 #ifdef USE_IOVEC_IMPL
2236 	int nvecs, i, remaining;
2237 #else
2238 	struct evbuffer_chain *chain;
2239 	unsigned char *p;
2240 #endif
2241 
2242 	EVBUFFER_LOCK(buf);
2243 
2244 	if (buf->freeze_end) {
2245 		result = -1;
2246 		goto done;
2247 	}
2248 
2249 	n = get_n_bytes_readable_on_socket(fd);
2250 	if (n <= 0 || n > EVBUFFER_MAX_READ)
2251 		n = EVBUFFER_MAX_READ;
2252 	if (howmuch < 0 || howmuch > n)
2253 		howmuch = n;
2254 
2255 #ifdef USE_IOVEC_IMPL
2256 	/* Since we can use iovecs, we're willing to use the last
2257 	 * NUM_READ_IOVEC chains. */
2258 	if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) {
2259 		result = -1;
2260 		goto done;
2261 	} else {
2262 		IOV_TYPE vecs[NUM_READ_IOVEC];
2263 #ifdef EVBUFFER_IOVEC_IS_NATIVE_
2264 		nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs,
2265 		    NUM_READ_IOVEC, &chainp, 1);
2266 #else
2267 		/* We aren't using the native struct iovec.  Therefore,
2268 		   we are on win32. */
2269 		struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
2270 		nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2,
2271 		    &chainp, 1);
2272 
2273 		for (i=0; i < nvecs; ++i)
2274 			WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
2275 #endif
2276 
2277 #ifdef _WIN32
2278 		{
2279 			DWORD bytesRead;
2280 			DWORD flags=0;
2281 			if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
2282 				/* The read failed. It might be a close,
2283 				 * or it might be an error. */
2284 				if (WSAGetLastError() == WSAECONNABORTED)
2285 					n = 0;
2286 				else
2287 					n = -1;
2288 			} else
2289 				n = bytesRead;
2290 		}
2291 #else
2292 		n = readv(fd, vecs, nvecs);
2293 #endif
2294 	}
2295 
2296 #else /*!USE_IOVEC_IMPL*/
2297 	/* If we don't have FIONREAD, we might waste some space here */
2298 	/* XXX we _will_ waste some space here if there is any space left
2299 	 * over on buf->last. */
2300 	if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
2301 		result = -1;
2302 		goto done;
2303 	}
2304 
2305 	/* We can append new data at this point */
2306 	p = chain->buffer + chain->misalign + chain->off;
2307 
2308 #ifndef _WIN32
2309 	n = read(fd, p, howmuch);
2310 #else
2311 	n = recv(fd, p, howmuch, 0);
2312 #endif
2313 #endif /* USE_IOVEC_IMPL */
2314 
2315 	if (n == -1) {
2316 		result = -1;
2317 		goto done;
2318 	}
2319 	if (n == 0) {
2320 		result = 0;
2321 		goto done;
2322 	}
2323 
2324 #ifdef USE_IOVEC_IMPL
2325 	remaining = n;
2326 	for (i=0; i < nvecs; ++i) {
2327 		ev_ssize_t space = (ev_ssize_t) CHAIN_SPACE_LEN(*chainp);
2328 		if (space < remaining) {
2329 			(*chainp)->off += space;
2330 			remaining -= (int)space;
2331 		} else {
2332 			(*chainp)->off += remaining;
2333 			buf->last_with_datap = chainp;
2334 			break;
2335 		}
2336 		chainp = &(*chainp)->next;
2337 	}
2338 #else
2339 	chain->off += n;
2340 	advance_last_with_data(buf);
2341 #endif
2342 	buf->total_len += n;
2343 	buf->n_add_for_cb += n;
2344 
2345 	/* Tell someone about changes in this buffer */
2346 	evbuffer_invoke_callbacks_(buf);
2347 	result = n;
2348 done:
2349 	EVBUFFER_UNLOCK(buf);
2350 	return result;
2351 }
2352 
2353 #ifdef USE_IOVEC_IMPL
2354 static inline int
2355 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
2356     ev_ssize_t howmuch)
2357 {
2358 	IOV_TYPE iov[NUM_WRITE_IOVEC];
2359 	struct evbuffer_chain *chain = buffer->first;
2360 	int n, i = 0;
2361 
2362 	if (howmuch < 0)
2363 		return -1;
2364 
2365 	ASSERT_EVBUFFER_LOCKED(buffer);
2366 	/* XXX make this top out at some maximal data length?  if the
2367 	 * buffer has (say) 1MB in it, split over 128 chains, there's
2368 	 * no way it all gets written in one go. */
2369 	while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
2370 #ifdef USE_SENDFILE
2371 		/* we cannot write the file info via writev */
2372 		if (chain->flags & EVBUFFER_SENDFILE)
2373 			break;
2374 #endif
2375 		iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
2376 		if ((size_t)howmuch >= chain->off) {
2377 			/* XXXcould be problematic when windows supports mmap*/
2378 			iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
2379 			howmuch -= chain->off;
2380 		} else {
2381 			/* XXXcould be problematic when windows supports mmap*/
2382 			iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
2383 			break;
2384 		}
2385 		chain = chain->next;
2386 	}
2387 	if (! i)
2388 		return 0;
2389 
2390 #ifdef _WIN32
2391 	{
2392 		DWORD bytesSent;
2393 		if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
2394 			n = -1;
2395 		else
2396 			n = bytesSent;
2397 	}
2398 #else
2399 	n = writev(fd, iov, i);
2400 #endif
2401 	return (n);
2402 }
2403 #endif
2404 
2405 #ifdef USE_SENDFILE
2406 static inline int
2407 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd,
2408     ev_ssize_t howmuch)
2409 {
2410 	struct evbuffer_chain *chain = buffer->first;
2411 	struct evbuffer_chain_file_segment *info =
2412 	    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment,
2413 		chain);
2414 	const int source_fd = info->segment->fd;
2415 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
2416 	int res;
2417 	ev_off_t len = chain->off;
2418 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
2419 	ev_ssize_t res;
2420 	ev_off_t offset = chain->misalign;
2421 #endif
2422 
2423 	ASSERT_EVBUFFER_LOCKED(buffer);
2424 
2425 #if defined(SENDFILE_IS_MACOSX)
2426 	res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0);
2427 	if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
2428 		return (-1);
2429 
2430 	return (len);
2431 #elif defined(SENDFILE_IS_FREEBSD)
2432 	res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0);
2433 	if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
2434 		return (-1);
2435 
2436 	return (len);
2437 #elif defined(SENDFILE_IS_LINUX)
2438 	/* TODO(niels): implement splice */
2439 	res = sendfile(dest_fd, source_fd, &offset, chain->off);
2440 	if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
2441 		/* if this is EAGAIN or EINTR return 0; otherwise, -1 */
2442 		return (0);
2443 	}
2444 	return (res);
2445 #elif defined(SENDFILE_IS_SOLARIS)
2446 	{
2447 		const off_t offset_orig = offset;
2448 		res = sendfile(dest_fd, source_fd, &offset, chain->off);
2449 		if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
2450 			if (offset - offset_orig)
2451 				return offset - offset_orig;
2452 			/* if this is EAGAIN or EINTR and no bytes were
2453 			 * written, return 0 */
2454 			return (0);
2455 		}
2456 		return (res);
2457 	}
2458 #endif
2459 }
2460 #endif
2461 
2462 int
2463 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
2464     ev_ssize_t howmuch)
2465 {
2466 	int n = -1;
2467 
2468 	EVBUFFER_LOCK(buffer);
2469 
2470 	if (buffer->freeze_start) {
2471 		goto done;
2472 	}
2473 
2474 	if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
2475 		howmuch = buffer->total_len;
2476 
2477 	if (howmuch > 0) {
2478 #ifdef USE_SENDFILE
2479 		struct evbuffer_chain *chain = buffer->first;
2480 		if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
2481 			n = evbuffer_write_sendfile(buffer, fd, howmuch);
2482 		else {
2483 #endif
2484 #ifdef USE_IOVEC_IMPL
2485 		n = evbuffer_write_iovec(buffer, fd, howmuch);
2486 #elif defined(_WIN32)
2487 		/* XXX(nickm) Don't disable this code until we know if
2488 		 * the WSARecv code above works. */
2489 		void *p = evbuffer_pullup(buffer, howmuch);
2490 		n = send(fd, p, howmuch, 0);
2491 #else
2492 		void *p = evbuffer_pullup(buffer, howmuch);
2493 		n = write(fd, p, howmuch);
2494 #endif
2495 #ifdef USE_SENDFILE
2496 		}
2497 #endif
2498 	}
2499 
2500 	if (n > 0)
2501 		evbuffer_drain(buffer, n);
2502 
2503 done:
2504 	EVBUFFER_UNLOCK(buffer);
2505 	return (n);
2506 }
2507 
2508 int
2509 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
2510 {
2511 	return evbuffer_write_atmost(buffer, fd, -1);
2512 }
2513 
2514 unsigned char *
2515 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
2516 {
2517 	unsigned char *search;
2518 	struct evbuffer_ptr ptr;
2519 
2520 	EVBUFFER_LOCK(buffer);
2521 
2522 	ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
2523 	if (ptr.pos < 0) {
2524 		search = NULL;
2525 	} else {
2526 		search = evbuffer_pullup(buffer, ptr.pos + len);
2527 		if (search)
2528 			search += ptr.pos;
2529 	}
2530 	EVBUFFER_UNLOCK(buffer);
2531 	return search;
2532 }
2533 
2534 /* Subract <b>howfar</b> from the position of <b>pos</b> within
2535  * <b>buf</b>. Returns 0 on success, -1 on failure.
2536  *
2537  * This isn't exposed yet, because of potential inefficiency issues.
2538  * Maybe it should be. */
2539 static int
2540 evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
2541     size_t howfar)
2542 {
2543 	if (howfar > (size_t)pos->pos)
2544 		return -1;
2545 	if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) {
2546 		pos->internal_.pos_in_chain -= howfar;
2547 		pos->pos -= howfar;
2548 		return 0;
2549 	} else {
2550 		const size_t newpos = pos->pos - howfar;
2551 		/* Here's the inefficient part: it walks over the
2552 		 * chains until we hit newpos. */
2553 		return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET);
2554 	}
2555 }
2556 
2557 int
2558 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
2559     size_t position, enum evbuffer_ptr_how how)
2560 {
2561 	size_t left = position;
2562 	struct evbuffer_chain *chain = NULL;
2563 	int result = 0;
2564 
2565 	EVBUFFER_LOCK(buf);
2566 
2567 	switch (how) {
2568 	case EVBUFFER_PTR_SET:
2569 		chain = buf->first;
2570 		pos->pos = position;
2571 		position = 0;
2572 		break;
2573 	case EVBUFFER_PTR_ADD:
2574 		/* this avoids iterating over all previous chains if
2575 		   we just want to advance the position */
2576 		chain = pos->internal_.chain;
2577 		pos->pos += position;
2578 		position = pos->internal_.pos_in_chain;
2579 		break;
2580 	}
2581 
2582 	while (chain && position + left >= chain->off) {
2583 		left -= chain->off - position;
2584 		chain = chain->next;
2585 		position = 0;
2586 	}
2587 	if (chain) {
2588 		pos->internal_.chain = chain;
2589 		pos->internal_.pos_in_chain = position + left;
2590 	} else if (left == 0) {
2591 		/* The first byte in the (nonexistent) chain after the last chain */
2592 		pos->internal_.chain = NULL;
2593 		pos->internal_.pos_in_chain = 0;
2594 	} else {
2595 		PTR_NOT_FOUND(pos);
2596 		result = -1;
2597 	}
2598 
2599 	EVBUFFER_UNLOCK(buf);
2600 
2601 	return result;
2602 }
2603 
2604 /**
2605    Compare the bytes in buf at position pos to the len bytes in mem.  Return
2606    less than 0, 0, or greater than 0 as memcmp.
2607  */
2608 static int
2609 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
2610     const char *mem, size_t len)
2611 {
2612 	struct evbuffer_chain *chain;
2613 	size_t position;
2614 	int r;
2615 
2616 	ASSERT_EVBUFFER_LOCKED(buf);
2617 
2618 	if (pos->pos + len > buf->total_len)
2619 		return -1;
2620 
2621 	chain = pos->internal_.chain;
2622 	position = pos->internal_.pos_in_chain;
2623 	while (len && chain) {
2624 		size_t n_comparable;
2625 		if (len + position > chain->off)
2626 			n_comparable = chain->off - position;
2627 		else
2628 			n_comparable = len;
2629 		r = memcmp(chain->buffer + chain->misalign + position, mem,
2630 		    n_comparable);
2631 		if (r)
2632 			return r;
2633 		mem += n_comparable;
2634 		len -= n_comparable;
2635 		position = 0;
2636 		chain = chain->next;
2637 	}
2638 
2639 	return 0;
2640 }
2641 
2642 struct evbuffer_ptr
2643 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
2644 {
2645 	return evbuffer_search_range(buffer, what, len, start, NULL);
2646 }
2647 
2648 struct evbuffer_ptr
2649 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
2650 {
2651 	struct evbuffer_ptr pos;
2652 	struct evbuffer_chain *chain, *last_chain = NULL;
2653 	const unsigned char *p;
2654 	char first;
2655 
2656 	EVBUFFER_LOCK(buffer);
2657 
2658 	if (start) {
2659 		memcpy(&pos, start, sizeof(pos));
2660 		chain = pos.internal_.chain;
2661 	} else {
2662 		pos.pos = 0;
2663 		chain = pos.internal_.chain = buffer->first;
2664 		pos.internal_.pos_in_chain = 0;
2665 	}
2666 
2667 	if (end)
2668 		last_chain = end->internal_.chain;
2669 
2670 	if (!len || len > EV_SSIZE_MAX)
2671 		goto done;
2672 
2673 	first = what[0];
2674 
2675 	while (chain) {
2676 		const unsigned char *start_at =
2677 		    chain->buffer + chain->misalign +
2678 		    pos.internal_.pos_in_chain;
2679 		p = memchr(start_at, first,
2680 		    chain->off - pos.internal_.pos_in_chain);
2681 		if (p) {
2682 			pos.pos += p - start_at;
2683 			pos.internal_.pos_in_chain += p - start_at;
2684 			if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
2685 				if (end && pos.pos + (ev_ssize_t)len > end->pos)
2686 					goto not_found;
2687 				else
2688 					goto done;
2689 			}
2690 			++pos.pos;
2691 			++pos.internal_.pos_in_chain;
2692 			if (pos.internal_.pos_in_chain == chain->off) {
2693 				chain = pos.internal_.chain = chain->next;
2694 				pos.internal_.pos_in_chain = 0;
2695 			}
2696 		} else {
2697 			if (chain == last_chain)
2698 				goto not_found;
2699 			pos.pos += chain->off - pos.internal_.pos_in_chain;
2700 			chain = pos.internal_.chain = chain->next;
2701 			pos.internal_.pos_in_chain = 0;
2702 		}
2703 	}
2704 
2705 not_found:
2706 	PTR_NOT_FOUND(&pos);
2707 done:
2708 	EVBUFFER_UNLOCK(buffer);
2709 	return pos;
2710 }
2711 
2712 int
2713 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
2714     struct evbuffer_ptr *start_at,
2715     struct evbuffer_iovec *vec, int n_vec)
2716 {
2717 	struct evbuffer_chain *chain;
2718 	int idx = 0;
2719 	ev_ssize_t len_so_far = 0;
2720 
2721 	/* Avoid locking in trivial edge cases */
2722 	if (start_at && start_at->internal_.chain == NULL)
2723 		return 0;
2724 
2725 	EVBUFFER_LOCK(buffer);
2726 
2727 	if (start_at) {
2728 		chain = start_at->internal_.chain;
2729 		len_so_far = chain->off
2730 		    - start_at->internal_.pos_in_chain;
2731 		idx = 1;
2732 		if (n_vec > 0) {
2733 			vec[0].iov_base = chain->buffer + chain->misalign
2734 			    + start_at->internal_.pos_in_chain;
2735 			vec[0].iov_len = len_so_far;
2736 		}
2737 		chain = chain->next;
2738 	} else {
2739 		chain = buffer->first;
2740 	}
2741 
2742 	if (n_vec == 0 && len < 0) {
2743 		/* If no vectors are provided and they asked for "everything",
2744 		 * pretend they asked for the actual available amount. */
2745 		len = buffer->total_len - len_so_far;
2746 	}
2747 
2748 	while (chain) {
2749 		if (len >= 0 && len_so_far >= len)
2750 			break;
2751 		if (idx<n_vec) {
2752 			vec[idx].iov_base = chain->buffer + chain->misalign;
2753 			vec[idx].iov_len = chain->off;
2754 		} else if (len<0) {
2755 			break;
2756 		}
2757 		++idx;
2758 		len_so_far += chain->off;
2759 		chain = chain->next;
2760 	}
2761 
2762 	EVBUFFER_UNLOCK(buffer);
2763 
2764 	return idx;
2765 }
2766 
2767 
2768 int
2769 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
2770 {
2771 	char *buffer;
2772 	size_t space;
2773 	int sz, result = -1;
2774 	va_list aq;
2775 	struct evbuffer_chain *chain;
2776 
2777 
2778 	EVBUFFER_LOCK(buf);
2779 
2780 	if (buf->freeze_end) {
2781 		goto done;
2782 	}
2783 
2784 	/* make sure that at least some space is available */
2785 	if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)
2786 		goto done;
2787 
2788 	for (;;) {
2789 #if 0
2790 		size_t used = chain->misalign + chain->off;
2791 		buffer = (char *)chain->buffer + chain->misalign + chain->off;
2792 		EVUTIL_ASSERT(chain->buffer_len >= used);
2793 		space = chain->buffer_len - used;
2794 #endif
2795 		buffer = (char*) CHAIN_SPACE_PTR(chain);
2796 		space = (size_t) CHAIN_SPACE_LEN(chain);
2797 
2798 #ifndef va_copy
2799 #define	va_copy(dst, src)	memcpy(&(dst), &(src), sizeof(va_list))
2800 #endif
2801 		va_copy(aq, ap);
2802 
2803 		sz = evutil_vsnprintf(buffer, space, fmt, aq);
2804 
2805 		va_end(aq);
2806 
2807 		if (sz < 0)
2808 			goto done;
2809 		if ((size_t)sz < space) {
2810 			chain->off += sz;
2811 			buf->total_len += sz;
2812 			buf->n_add_for_cb += sz;
2813 
2814 			advance_last_with_data(buf);
2815 			evbuffer_invoke_callbacks_(buf);
2816 			result = sz;
2817 			goto done;
2818 		}
2819 		if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)
2820 			goto done;
2821 	}
2822 	/* NOTREACHED */
2823 
2824 done:
2825 	EVBUFFER_UNLOCK(buf);
2826 	return result;
2827 }
2828 
2829 int
2830 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
2831 {
2832 	int res = -1;
2833 	va_list ap;
2834 
2835 	va_start(ap, fmt);
2836 	res = evbuffer_add_vprintf(buf, fmt, ap);
2837 	va_end(ap);
2838 
2839 	return (res);
2840 }
2841 
2842 int
2843 evbuffer_add_reference(struct evbuffer *outbuf,
2844     const void *data, size_t datlen,
2845     evbuffer_ref_cleanup_cb cleanupfn, void *extra)
2846 {
2847 	struct evbuffer_chain *chain;
2848 	struct evbuffer_chain_reference *info;
2849 	int result = -1;
2850 
2851 	chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
2852 	if (!chain)
2853 		return (-1);
2854 	chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
2855 	chain->buffer = (u_char *)data;
2856 	chain->buffer_len = datlen;
2857 	chain->off = datlen;
2858 
2859 	info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
2860 	info->cleanupfn = cleanupfn;
2861 	info->extra = extra;
2862 
2863 	EVBUFFER_LOCK(outbuf);
2864 	if (outbuf->freeze_end) {
2865 		/* don't call chain_free; we do not want to actually invoke
2866 		 * the cleanup function */
2867 		mm_free(chain);
2868 		goto done;
2869 	}
2870 	evbuffer_chain_insert(outbuf, chain);
2871 	outbuf->n_add_for_cb += datlen;
2872 
2873 	evbuffer_invoke_callbacks_(outbuf);
2874 
2875 	result = 0;
2876 done:
2877 	EVBUFFER_UNLOCK(outbuf);
2878 
2879 	return result;
2880 }
2881 
2882 /* TODO(niels): we may want to add to automagically convert to mmap, in
2883  * case evbuffer_remove() or evbuffer_pullup() are being used.
2884  */
2885 struct evbuffer_file_segment *
2886 evbuffer_file_segment_new(
2887 	int fd, ev_off_t offset, ev_off_t length, unsigned flags)
2888 {
2889 	struct evbuffer_file_segment *seg =
2890 	    mm_calloc(sizeof(struct evbuffer_file_segment), 1);
2891 	if (!seg)
2892 		return NULL;
2893 	seg->refcnt = 1;
2894 	seg->fd = fd;
2895 	seg->flags = flags;
2896 	seg->file_offset = offset;
2897 	seg->cleanup_cb = NULL;
2898 	seg->cleanup_cb_arg = NULL;
2899 #ifdef _WIN32
2900 #ifndef lseek
2901 #define lseek _lseeki64
2902 #endif
2903 #ifndef fstat
2904 #define fstat _fstat
2905 #endif
2906 #ifndef stat
2907 #define stat _stat
2908 #endif
2909 #endif
2910 	if (length == -1) {
2911 		struct stat st;
2912 		if (fstat(fd, &st) < 0)
2913 			goto err;
2914 		length = st.st_size;
2915 	}
2916 	seg->length = length;
2917 
2918 #if defined(USE_SENDFILE)
2919 	if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) {
2920 		seg->can_sendfile = 1;
2921 		goto done;
2922 	}
2923 #endif
2924 
2925 	if (evbuffer_file_segment_materialize(seg)<0)
2926 		goto err;
2927 
2928 #if defined(USE_SENDFILE)
2929 done:
2930 #endif
2931 	if (!(flags & EVBUF_FS_DISABLE_LOCKING)) {
2932 		EVTHREAD_ALLOC_LOCK(seg->lock, 0);
2933 	}
2934 	return seg;
2935 err:
2936 	mm_free(seg);
2937 	return NULL;
2938 }
2939 
2940 /* DOCDOC */
2941 /* Requires lock */
2942 static int
2943 evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg)
2944 {
2945 	const unsigned flags = seg->flags;
2946 	const int fd = seg->fd;
2947 	const ev_off_t length = seg->length;
2948 	const ev_off_t offset = seg->file_offset;
2949 
2950 	if (seg->contents)
2951 		return 0; /* already materialized */
2952 
2953 #if defined(EVENT__HAVE_MMAP)
2954 	if (!(flags & EVBUF_FS_DISABLE_MMAP)) {
2955 		off_t offset_rounded = 0, offset_leftover = 0;
2956 		void *mapped;
2957 		if (offset) {
2958 			/* mmap implementations don't generally like us
2959 			 * to have an offset that isn't a round  */
2960 #ifdef SC_PAGE_SIZE
2961 			long page_size = sysconf(SC_PAGE_SIZE);
2962 #elif defined(_SC_PAGE_SIZE)
2963 			long page_size = sysconf(_SC_PAGE_SIZE);
2964 #else
2965 			long page_size = 1;
2966 #endif
2967 			if (page_size == -1)
2968 				goto err;
2969 			offset_leftover = offset % page_size;
2970 			offset_rounded = offset - offset_leftover;
2971 		}
2972 		mapped = mmap(NULL, length + offset_leftover,
2973 		    PROT_READ,
2974 #ifdef MAP_NOCACHE
2975 		    MAP_NOCACHE | /* ??? */
2976 #endif
2977 #ifdef MAP_FILE
2978 		    MAP_FILE |
2979 #endif
2980 		    MAP_PRIVATE,
2981 		    fd, offset_rounded);
2982 		if (mapped == MAP_FAILED) {
2983 			event_warn("%s: mmap(%d, %d, %zu) failed",
2984 			    __func__, fd, 0, (size_t)(offset + length));
2985 		} else {
2986 			seg->mapping = mapped;
2987 			seg->contents = (char*)mapped+offset_leftover;
2988 			seg->mmap_offset = 0;
2989 			seg->is_mapping = 1;
2990 			goto done;
2991 		}
2992 	}
2993 #endif
2994 #ifdef _WIN32
2995 	if (!(flags & EVBUF_FS_DISABLE_MMAP)) {
2996 		intptr_t h = _get_osfhandle(fd);
2997 		HANDLE m;
2998 		ev_uint64_t total_size = length+offset;
2999 		if ((HANDLE)h == INVALID_HANDLE_VALUE)
3000 			goto err;
3001 		m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY,
3002 		    (total_size >> 32), total_size & 0xfffffffful,
3003 		    NULL);
3004 		if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */
3005 			seg->mapping_handle = m;
3006 			seg->mmap_offset = offset;
3007 			seg->is_mapping = 1;
3008 			goto done;
3009 		}
3010 	}
3011 #endif
3012 	{
3013 		ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos;
3014 		ev_off_t read_so_far = 0;
3015 		char *mem;
3016 		int e;
3017 		ev_ssize_t n = 0;
3018 		if (!(mem = mm_malloc(length)))
3019 			goto err;
3020 		if (start_pos < 0) {
3021 			mm_free(mem);
3022 			goto err;
3023 		}
3024 		if (lseek(fd, offset, SEEK_SET) < 0) {
3025 			mm_free(mem);
3026 			goto err;
3027 		}
3028 		while (read_so_far < length) {
3029 			n = read(fd, mem+read_so_far, length-read_so_far);
3030 			if (n <= 0)
3031 				break;
3032 			read_so_far += n;
3033 		}
3034 
3035 		e = errno;
3036 		pos = lseek(fd, start_pos, SEEK_SET);
3037 		if (n < 0 || (n == 0 && length > read_so_far)) {
3038 			mm_free(mem);
3039 			errno = e;
3040 			goto err;
3041 		} else if (pos < 0) {
3042 			mm_free(mem);
3043 			goto err;
3044 		}
3045 
3046 		seg->contents = mem;
3047 	}
3048 
3049 done:
3050 	return 0;
3051 err:
3052 	return -1;
3053 }
3054 
3055 void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg,
3056 	evbuffer_file_segment_cleanup_cb cb, void* arg)
3057 {
3058 	EVUTIL_ASSERT(seg->refcnt > 0);
3059 	seg->cleanup_cb = cb;
3060 	seg->cleanup_cb_arg = arg;
3061 }
3062 
3063 void
3064 evbuffer_file_segment_free(struct evbuffer_file_segment *seg)
3065 {
3066 	int refcnt;
3067 	EVLOCK_LOCK(seg->lock, 0);
3068 	refcnt = --seg->refcnt;
3069 	EVLOCK_UNLOCK(seg->lock, 0);
3070 	if (refcnt > 0)
3071 		return;
3072 	EVUTIL_ASSERT(refcnt == 0);
3073 
3074 	if (seg->is_mapping) {
3075 #ifdef _WIN32
3076 		CloseHandle(seg->mapping_handle);
3077 #elif defined (EVENT__HAVE_MMAP)
3078 		if (munmap(seg->mapping, seg->length) == -1)
3079 			event_warn("%s: munmap failed", __func__);
3080 #endif
3081 	} else if (seg->contents) {
3082 		mm_free(seg->contents);
3083 	}
3084 
3085 	if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) {
3086 		close(seg->fd);
3087 	}
3088 
3089 	if (seg->cleanup_cb) {
3090 		(*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg,
3091 		    seg->flags, seg->cleanup_cb_arg);
3092 		seg->cleanup_cb = NULL;
3093 		seg->cleanup_cb_arg = NULL;
3094 	}
3095 
3096 	EVTHREAD_FREE_LOCK(seg->lock, 0);
3097 	mm_free(seg);
3098 }
3099 
3100 int
3101 evbuffer_add_file_segment(struct evbuffer *buf,
3102     struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length)
3103 {
3104 	struct evbuffer_chain *chain;
3105 	struct evbuffer_chain_file_segment *extra;
3106 	int can_use_sendfile = 0;
3107 
3108 	EVBUFFER_LOCK(buf);
3109 	EVLOCK_LOCK(seg->lock, 0);
3110 	if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) {
3111 		can_use_sendfile = 1;
3112 	} else {
3113 		if (!seg->contents) {
3114 			if (evbuffer_file_segment_materialize(seg)<0) {
3115 				EVLOCK_UNLOCK(seg->lock, 0);
3116 				EVBUFFER_UNLOCK(buf);
3117 				return -1;
3118 			}
3119 		}
3120 	}
3121 	++seg->refcnt;
3122 	EVLOCK_UNLOCK(seg->lock, 0);
3123 
3124 	if (buf->freeze_end)
3125 		goto err;
3126 
3127 	if (length < 0) {
3128 		if (offset > seg->length)
3129 			goto err;
3130 		length = seg->length - offset;
3131 	}
3132 
3133 	/* Can we actually add this? */
3134 	if (offset+length > seg->length)
3135 		goto err;
3136 
3137 	chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment));
3138 	if (!chain)
3139 		goto err;
3140 	extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain);
3141 
3142 	chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT;
3143 	if (can_use_sendfile && seg->can_sendfile) {
3144 		chain->flags |= EVBUFFER_SENDFILE;
3145 		chain->misalign = seg->file_offset + offset;
3146 		chain->off = length;
3147 		chain->buffer_len = chain->misalign + length;
3148 	} else if (seg->is_mapping) {
3149 #ifdef _WIN32
3150 		ev_uint64_t total_offset = seg->mmap_offset+offset;
3151 		ev_uint64_t offset_rounded=0, offset_remaining=0;
3152 		LPVOID data;
3153 		if (total_offset) {
3154 			SYSTEM_INFO si;
3155 			memset(&si, 0, sizeof(si)); /* cargo cult */
3156 			GetSystemInfo(&si);
3157 			offset_remaining = total_offset % si.dwAllocationGranularity;
3158 			offset_rounded = total_offset - offset_remaining;
3159 		}
3160 		data = MapViewOfFile(
3161 			seg->mapping_handle,
3162 			FILE_MAP_READ,
3163 			offset_rounded >> 32,
3164 			offset_rounded & 0xfffffffful,
3165 			length + offset_remaining);
3166 		if (data == NULL) {
3167 			mm_free(chain);
3168 			goto err;
3169 		}
3170 		chain->buffer = (unsigned char*) data;
3171 		chain->buffer_len = length+offset_remaining;
3172 		chain->misalign = offset_remaining;
3173 		chain->off = length;
3174 #else
3175 		chain->buffer = (unsigned char*)(seg->contents + offset);
3176 		chain->buffer_len = length;
3177 		chain->off = length;
3178 #endif
3179 	} else {
3180 		chain->buffer = (unsigned char*)(seg->contents + offset);
3181 		chain->buffer_len = length;
3182 		chain->off = length;
3183 	}
3184 
3185 	extra->segment = seg;
3186 	buf->n_add_for_cb += length;
3187 	evbuffer_chain_insert(buf, chain);
3188 
3189 	evbuffer_invoke_callbacks_(buf);
3190 
3191 	EVBUFFER_UNLOCK(buf);
3192 
3193 	return 0;
3194 err:
3195 	EVBUFFER_UNLOCK(buf);
3196 	evbuffer_file_segment_free(seg);
3197 	return -1;
3198 }
3199 
3200 int
3201 evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length)
3202 {
3203 	struct evbuffer_file_segment *seg;
3204 	unsigned flags = EVBUF_FS_CLOSE_ON_FREE;
3205 	int r;
3206 
3207 	seg = evbuffer_file_segment_new(fd, offset, length, flags);
3208 	if (!seg)
3209 		return -1;
3210 	r = evbuffer_add_file_segment(buf, seg, 0, length);
3211 	if (r == 0)
3212 		evbuffer_file_segment_free(seg);
3213 	return r;
3214 }
3215 
3216 void
3217 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
3218 {
3219 	EVBUFFER_LOCK(buffer);
3220 
3221 	if (!LIST_EMPTY(&buffer->callbacks))
3222 		evbuffer_remove_all_callbacks(buffer);
3223 
3224 	if (cb) {
3225 		struct evbuffer_cb_entry *ent =
3226 		    evbuffer_add_cb(buffer, NULL, cbarg);
3227 		ent->cb.cb_obsolete = cb;
3228 		ent->flags |= EVBUFFER_CB_OBSOLETE;
3229 	}
3230 	EVBUFFER_UNLOCK(buffer);
3231 }
3232 
3233 struct evbuffer_cb_entry *
3234 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
3235 {
3236 	struct evbuffer_cb_entry *e;
3237 	if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
3238 		return NULL;
3239 	EVBUFFER_LOCK(buffer);
3240 	e->cb.cb_func = cb;
3241 	e->cbarg = cbarg;
3242 	e->flags = EVBUFFER_CB_ENABLED;
3243 	LIST_INSERT_HEAD(&buffer->callbacks, e, next);
3244 	EVBUFFER_UNLOCK(buffer);
3245 	return e;
3246 }
3247 
3248 int
3249 evbuffer_remove_cb_entry(struct evbuffer *buffer,
3250 			 struct evbuffer_cb_entry *ent)
3251 {
3252 	EVBUFFER_LOCK(buffer);
3253 	LIST_REMOVE(ent, next);
3254 	EVBUFFER_UNLOCK(buffer);
3255 	mm_free(ent);
3256 	return 0;
3257 }
3258 
3259 int
3260 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
3261 {
3262 	struct evbuffer_cb_entry *cbent;
3263 	int result = -1;
3264 	EVBUFFER_LOCK(buffer);
3265 	LIST_FOREACH(cbent, &buffer->callbacks, next) {
3266 		if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
3267 			result = evbuffer_remove_cb_entry(buffer, cbent);
3268 			goto done;
3269 		}
3270 	}
3271 done:
3272 	EVBUFFER_UNLOCK(buffer);
3273 	return result;
3274 }
3275 
3276 int
3277 evbuffer_cb_set_flags(struct evbuffer *buffer,
3278 		      struct evbuffer_cb_entry *cb, ev_uint32_t flags)
3279 {
3280 	/* the user isn't allowed to mess with these. */
3281 	flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
3282 	EVBUFFER_LOCK(buffer);
3283 	cb->flags |= flags;
3284 	EVBUFFER_UNLOCK(buffer);
3285 	return 0;
3286 }
3287 
3288 int
3289 evbuffer_cb_clear_flags(struct evbuffer *buffer,
3290 		      struct evbuffer_cb_entry *cb, ev_uint32_t flags)
3291 {
3292 	/* the user isn't allowed to mess with these. */
3293 	flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
3294 	EVBUFFER_LOCK(buffer);
3295 	cb->flags &= ~flags;
3296 	EVBUFFER_UNLOCK(buffer);
3297 	return 0;
3298 }
3299 
3300 int
3301 evbuffer_freeze(struct evbuffer *buffer, int start)
3302 {
3303 	EVBUFFER_LOCK(buffer);
3304 	if (start)
3305 		buffer->freeze_start = 1;
3306 	else
3307 		buffer->freeze_end = 1;
3308 	EVBUFFER_UNLOCK(buffer);
3309 	return 0;
3310 }
3311 
3312 int
3313 evbuffer_unfreeze(struct evbuffer *buffer, int start)
3314 {
3315 	EVBUFFER_LOCK(buffer);
3316 	if (start)
3317 		buffer->freeze_start = 0;
3318 	else
3319 		buffer->freeze_end = 0;
3320 	EVBUFFER_UNLOCK(buffer);
3321 	return 0;
3322 }
3323 
3324 #if 0
3325 void
3326 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
3327 {
3328 	if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
3329 		cb->size_before_suspend = evbuffer_get_length(buffer);
3330 		cb->flags |= EVBUFFER_CB_SUSPENDED;
3331 	}
3332 }
3333 
3334 void
3335 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
3336 {
3337 	if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
3338 		unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
3339 		size_t sz = cb->size_before_suspend;
3340 		cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
3341 			       EVBUFFER_CB_CALL_ON_UNSUSPEND);
3342 		cb->size_before_suspend = 0;
3343 		if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
3344 			cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
3345 		}
3346 	}
3347 }
3348 #endif
3349 
3350