xref: /netbsd-src/external/bsd/ntp/dist/sntp/libevent/bufferevent_async.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: bufferevent_async.c,v 1.6 2020/05/25 20:47:33 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
5  *
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "event2/event-config.h"
32 #include "evconfig-private.h"
33 
34 #ifdef EVENT__HAVE_SYS_TIME_H
35 #include <sys/time.h>
36 #endif
37 
38 #include <errno.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #ifdef EVENT__HAVE_STDARG_H
43 #include <stdarg.h>
44 #endif
45 #ifdef EVENT__HAVE_UNISTD_H
46 #include <unistd.h>
47 #endif
48 
49 #ifdef _WIN32
50 #include <winsock2.h>
51 #include <ws2tcpip.h>
52 #endif
53 
54 #include <sys/queue.h>
55 
56 #include "event2/util.h"
57 #include "event2/bufferevent.h"
58 #include "event2/buffer.h"
59 #include "event2/bufferevent_struct.h"
60 #include "event2/event.h"
61 #include "event2/util.h"
62 #include "event-internal.h"
63 #include "log-internal.h"
64 #include "mm-internal.h"
65 #include "bufferevent-internal.h"
66 #include "util-internal.h"
67 #include "iocp-internal.h"
68 
69 #ifndef SO_UPDATE_CONNECT_CONTEXT
70 /* Mingw is sometimes missing this */
71 #define SO_UPDATE_CONNECT_CONTEXT 0x7010
72 #endif
73 
74 /* prototypes */
75 static int be_async_enable(struct bufferevent *, short);
76 static int be_async_disable(struct bufferevent *, short);
77 static void be_async_destruct(struct bufferevent *);
78 static int be_async_flush(struct bufferevent *, short, enum bufferevent_flush_mode);
79 static int be_async_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
80 
81 struct bufferevent_async {
82 	struct bufferevent_private bev;
83 	struct event_overlapped connect_overlapped;
84 	struct event_overlapped read_overlapped;
85 	struct event_overlapped write_overlapped;
86 	size_t read_in_progress;
87 	size_t write_in_progress;
88 	unsigned ok : 1;
89 	unsigned read_added : 1;
90 	unsigned write_added : 1;
91 };
92 
93 const struct bufferevent_ops bufferevent_ops_async = {
94 	"socket_async",
95 	evutil_offsetof(struct bufferevent_async, bev.bev),
96 	be_async_enable,
97 	be_async_disable,
98 	NULL, /* Unlink */
99 	be_async_destruct,
100 	bufferevent_generic_adj_timeouts_,
101 	be_async_flush,
102 	be_async_ctrl,
103 };
104 
105 static inline struct bufferevent_async *
106 upcast(struct bufferevent *bev)
107 {
108 	struct bufferevent_async *bev_a;
109 	if (bev->be_ops != &bufferevent_ops_async)
110 		return NULL;
111 	bev_a = EVUTIL_UPCAST(bev, struct bufferevent_async, bev.bev);
112 	return bev_a;
113 }
114 
115 static inline struct bufferevent_async *
116 upcast_connect(struct event_overlapped *eo)
117 {
118 	struct bufferevent_async *bev_a;
119 	bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, connect_overlapped);
120 	EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
121 	return bev_a;
122 }
123 
124 static inline struct bufferevent_async *
125 upcast_read(struct event_overlapped *eo)
126 {
127 	struct bufferevent_async *bev_a;
128 	bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, read_overlapped);
129 	EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
130 	return bev_a;
131 }
132 
133 static inline struct bufferevent_async *
134 upcast_write(struct event_overlapped *eo)
135 {
136 	struct bufferevent_async *bev_a;
137 	bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, write_overlapped);
138 	EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
139 	return bev_a;
140 }
141 
142 static void
143 bev_async_del_write(struct bufferevent_async *beva)
144 {
145 	struct bufferevent *bev = &beva->bev.bev;
146 
147 	if (beva->write_added) {
148 		beva->write_added = 0;
149 		event_base_del_virtual_(bev->ev_base);
150 	}
151 }
152 
153 static void
154 bev_async_del_read(struct bufferevent_async *beva)
155 {
156 	struct bufferevent *bev = &beva->bev.bev;
157 
158 	if (beva->read_added) {
159 		beva->read_added = 0;
160 		event_base_del_virtual_(bev->ev_base);
161 	}
162 }
163 
164 static void
165 bev_async_add_write(struct bufferevent_async *beva)
166 {
167 	struct bufferevent *bev = &beva->bev.bev;
168 
169 	if (!beva->write_added) {
170 		beva->write_added = 1;
171 		event_base_add_virtual_(bev->ev_base);
172 	}
173 }
174 
175 static void
176 bev_async_add_read(struct bufferevent_async *beva)
177 {
178 	struct bufferevent *bev = &beva->bev.bev;
179 
180 	if (!beva->read_added) {
181 		beva->read_added = 1;
182 		event_base_add_virtual_(bev->ev_base);
183 	}
184 }
185 
186 static void
187 bev_async_consider_writing(struct bufferevent_async *beva)
188 {
189 	size_t at_most;
190 	int limit;
191 	struct bufferevent *bev = &beva->bev.bev;
192 
193 	/* Don't write if there's a write in progress, or we do not
194 	 * want to write, or when there's nothing left to write. */
195 	if (beva->write_in_progress || beva->bev.connecting)
196 		return;
197 	if (!beva->ok || !(bev->enabled&EV_WRITE) ||
198 	    !evbuffer_get_length(bev->output)) {
199 		bev_async_del_write(beva);
200 		return;
201 	}
202 
203 	at_most = evbuffer_get_length(bev->output);
204 
205 	/* This is safe so long as bufferevent_get_write_max never returns
206 	 * more than INT_MAX.  That's true for now. XXXX */
207 	limit = (int)bufferevent_get_write_max_(&beva->bev);
208 	if (at_most >= (size_t)limit && limit >= 0)
209 		at_most = limit;
210 
211 	if (beva->bev.write_suspended) {
212 		bev_async_del_write(beva);
213 		return;
214 	}
215 
216 	/*  XXXX doesn't respect low-water mark very well. */
217 	bufferevent_incref_(bev);
218 	if (evbuffer_launch_write_(bev->output, at_most,
219 	    &beva->write_overlapped)) {
220 		bufferevent_decref_(bev);
221 		beva->ok = 0;
222 		bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0);
223 	} else {
224 		beva->write_in_progress = at_most;
225 		bufferevent_decrement_write_buckets_(&beva->bev, at_most);
226 		bev_async_add_write(beva);
227 	}
228 }
229 
230 static void
231 bev_async_consider_reading(struct bufferevent_async *beva)
232 {
233 	size_t cur_size;
234 	size_t read_high;
235 	size_t at_most;
236 	int limit;
237 	struct bufferevent *bev = &beva->bev.bev;
238 
239 	/* Don't read if there is a read in progress, or we do not
240 	 * want to read. */
241 	if (beva->read_in_progress || beva->bev.connecting)
242 		return;
243 	if (!beva->ok || !(bev->enabled&EV_READ)) {
244 		bev_async_del_read(beva);
245 		return;
246 	}
247 
248 	/* Don't read if we're full */
249 	cur_size = evbuffer_get_length(bev->input);
250 	read_high = bev->wm_read.high;
251 	if (read_high) {
252 		if (cur_size >= read_high) {
253 			bev_async_del_read(beva);
254 			return;
255 		}
256 		at_most = read_high - cur_size;
257 	} else {
258 		at_most = 16384; /* FIXME totally magic. */
259 	}
260 
261 	/* XXXX This over-commits. */
262 	/* XXXX see also not above on cast on bufferevent_get_write_max_() */
263 	limit = (int)bufferevent_get_read_max_(&beva->bev);
264 	if (at_most >= (size_t)limit && limit >= 0)
265 		at_most = limit;
266 
267 	if (beva->bev.read_suspended) {
268 		bev_async_del_read(beva);
269 		return;
270 	}
271 
272 	bufferevent_incref_(bev);
273 	if (evbuffer_launch_read_(bev->input, at_most, &beva->read_overlapped)) {
274 		beva->ok = 0;
275 		bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0);
276 		bufferevent_decref_(bev);
277 	} else {
278 		beva->read_in_progress = at_most;
279 		bufferevent_decrement_read_buckets_(&beva->bev, at_most);
280 		bev_async_add_read(beva);
281 	}
282 
283 	return;
284 }
285 
286 static void
287 be_async_outbuf_callback(struct evbuffer *buf,
288     const struct evbuffer_cb_info *cbinfo,
289     void *arg)
290 {
291 	struct bufferevent *bev = arg;
292 	struct bufferevent_async *bev_async = upcast(bev);
293 
294 	/* If we added data to the outbuf and were not writing before,
295 	 * we may want to write now. */
296 
297 	bufferevent_incref_and_lock_(bev);
298 
299 	if (cbinfo->n_added)
300 		bev_async_consider_writing(bev_async);
301 
302 	bufferevent_decref_and_unlock_(bev);
303 }
304 
305 static void
306 be_async_inbuf_callback(struct evbuffer *buf,
307     const struct evbuffer_cb_info *cbinfo,
308     void *arg)
309 {
310 	struct bufferevent *bev = arg;
311 	struct bufferevent_async *bev_async = upcast(bev);
312 
313 	/* If we drained data from the inbuf and were not reading before,
314 	 * we may want to read now */
315 
316 	bufferevent_incref_and_lock_(bev);
317 
318 	if (cbinfo->n_deleted)
319 		bev_async_consider_reading(bev_async);
320 
321 	bufferevent_decref_and_unlock_(bev);
322 }
323 
324 static int
325 be_async_enable(struct bufferevent *buf, short what)
326 {
327 	struct bufferevent_async *bev_async = upcast(buf);
328 
329 	if (!bev_async->ok)
330 		return -1;
331 
332 	if (bev_async->bev.connecting) {
333 		/* Don't launch anything during connection attempts. */
334 		return 0;
335 	}
336 
337 	if (what & EV_READ)
338 		BEV_RESET_GENERIC_READ_TIMEOUT(buf);
339 	if (what & EV_WRITE)
340 		BEV_RESET_GENERIC_WRITE_TIMEOUT(buf);
341 
342 	/* If we newly enable reading or writing, and we aren't reading or
343 	   writing already, consider launching a new read or write. */
344 
345 	if (what & EV_READ)
346 		bev_async_consider_reading(bev_async);
347 	if (what & EV_WRITE)
348 		bev_async_consider_writing(bev_async);
349 	return 0;
350 }
351 
352 static int
353 be_async_disable(struct bufferevent *bev, short what)
354 {
355 	struct bufferevent_async *bev_async = upcast(bev);
356 	/* XXXX If we disable reading or writing, we may want to consider
357 	 * canceling any in-progress read or write operation, though it might
358 	 * not work. */
359 
360 	if (what & EV_READ) {
361 		BEV_DEL_GENERIC_READ_TIMEOUT(bev);
362 		bev_async_del_read(bev_async);
363 	}
364 	if (what & EV_WRITE) {
365 		BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
366 		bev_async_del_write(bev_async);
367 	}
368 
369 	return 0;
370 }
371 
372 static void
373 be_async_destruct(struct bufferevent *bev)
374 {
375 	struct bufferevent_async *bev_async = upcast(bev);
376 	struct bufferevent_private *bev_p = BEV_UPCAST(bev);
377 	evutil_socket_t fd;
378 
379 	EVUTIL_ASSERT(!upcast(bev)->write_in_progress &&
380 			!upcast(bev)->read_in_progress);
381 
382 	bev_async_del_read(bev_async);
383 	bev_async_del_write(bev_async);
384 
385 	fd = evbuffer_overlapped_get_fd_(bev->input);
386 	if (fd != (evutil_socket_t)INVALID_SOCKET &&
387 		(bev_p->options & BEV_OPT_CLOSE_ON_FREE)) {
388 		evutil_closesocket(fd);
389 		evbuffer_overlapped_set_fd_(bev->input, INVALID_SOCKET);
390 	}
391 }
392 
393 /* GetQueuedCompletionStatus doesn't reliably yield WSA error codes, so
394  * we use WSAGetOverlappedResult to translate. */
395 static void
396 bev_async_set_wsa_error(struct bufferevent *bev, struct event_overlapped *eo)
397 {
398 	DWORD bytes, flags;
399 	evutil_socket_t fd;
400 
401 	fd = evbuffer_overlapped_get_fd_(bev->input);
402 	WSAGetOverlappedResult(fd, &eo->overlapped, &bytes, FALSE, &flags);
403 }
404 
405 static int
406 be_async_flush(struct bufferevent *bev, short what,
407     enum bufferevent_flush_mode mode)
408 {
409 	return 0;
410 }
411 
412 static void
413 connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
414     ev_ssize_t nbytes, int ok)
415 {
416 	struct bufferevent_async *bev_a = upcast_connect(eo);
417 	struct bufferevent *bev = &bev_a->bev.bev;
418 	evutil_socket_t sock;
419 
420 	BEV_LOCK(bev);
421 
422 	EVUTIL_ASSERT(bev_a->bev.connecting);
423 	bev_a->bev.connecting = 0;
424 	sock = evbuffer_overlapped_get_fd_(bev_a->bev.bev.input);
425 	/* XXXX Handle error? */
426 	setsockopt(sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0);
427 
428 	if (ok)
429 		bufferevent_async_set_connected_(bev);
430 	else
431 		bev_async_set_wsa_error(bev, eo);
432 
433 	bufferevent_run_eventcb_(bev,
434 			ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR, 0);
435 
436 	event_base_del_virtual_(bev->ev_base);
437 
438 	bufferevent_decref_and_unlock_(bev);
439 }
440 
441 static void
442 read_complete(struct event_overlapped *eo, ev_uintptr_t key,
443     ev_ssize_t nbytes, int ok)
444 {
445 	struct bufferevent_async *bev_a = upcast_read(eo);
446 	struct bufferevent *bev = &bev_a->bev.bev;
447 	short what = BEV_EVENT_READING;
448 	ev_ssize_t amount_unread;
449 	BEV_LOCK(bev);
450 	EVUTIL_ASSERT(bev_a->read_in_progress);
451 
452 	amount_unread = bev_a->read_in_progress - nbytes;
453 	evbuffer_commit_read_(bev->input, nbytes);
454 	bev_a->read_in_progress = 0;
455 	if (amount_unread)
456 		bufferevent_decrement_read_buckets_(&bev_a->bev, -amount_unread);
457 
458 	if (!ok)
459 		bev_async_set_wsa_error(bev, eo);
460 
461 	if (bev_a->ok) {
462 		if (ok && nbytes) {
463 			BEV_RESET_GENERIC_READ_TIMEOUT(bev);
464 			bufferevent_trigger_nolock_(bev, EV_READ, 0);
465 			bev_async_consider_reading(bev_a);
466 		} else if (!ok) {
467 			what |= BEV_EVENT_ERROR;
468 			bev_a->ok = 0;
469 			bufferevent_run_eventcb_(bev, what, 0);
470 		} else if (!nbytes) {
471 			what |= BEV_EVENT_EOF;
472 			bev_a->ok = 0;
473 			bufferevent_run_eventcb_(bev, what, 0);
474 		}
475 	}
476 
477 	bufferevent_decref_and_unlock_(bev);
478 }
479 
480 static void
481 write_complete(struct event_overlapped *eo, ev_uintptr_t key,
482     ev_ssize_t nbytes, int ok)
483 {
484 	struct bufferevent_async *bev_a = upcast_write(eo);
485 	struct bufferevent *bev = &bev_a->bev.bev;
486 	short what = BEV_EVENT_WRITING;
487 	ev_ssize_t amount_unwritten;
488 
489 	BEV_LOCK(bev);
490 	EVUTIL_ASSERT(bev_a->write_in_progress);
491 
492 	amount_unwritten = bev_a->write_in_progress - nbytes;
493 	evbuffer_commit_write_(bev->output, nbytes);
494 	bev_a->write_in_progress = 0;
495 
496 	if (amount_unwritten)
497 		bufferevent_decrement_write_buckets_(&bev_a->bev,
498 		                                     -amount_unwritten);
499 
500 
501 	if (!ok)
502 		bev_async_set_wsa_error(bev, eo);
503 
504 	if (bev_a->ok) {
505 		if (ok && nbytes) {
506 			BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
507 			bufferevent_trigger_nolock_(bev, EV_WRITE, 0);
508 			bev_async_consider_writing(bev_a);
509 		} else if (!ok) {
510 			what |= BEV_EVENT_ERROR;
511 			bev_a->ok = 0;
512 			bufferevent_run_eventcb_(bev, what, 0);
513 		} else if (!nbytes) {
514 			what |= BEV_EVENT_EOF;
515 			bev_a->ok = 0;
516 			bufferevent_run_eventcb_(bev, what, 0);
517 		}
518 	}
519 
520 	bufferevent_decref_and_unlock_(bev);
521 }
522 
523 struct bufferevent *
524 bufferevent_async_new_(struct event_base *base,
525     evutil_socket_t fd, int options)
526 {
527 	struct bufferevent_async *bev_a;
528 	struct bufferevent *bev;
529 	struct event_iocp_port *iocp;
530 
531 	options |= BEV_OPT_THREADSAFE;
532 
533 	if (!(iocp = event_base_get_iocp_(base)))
534 		return NULL;
535 
536 	if (fd >= 0 && event_iocp_port_associate_(iocp, fd, 1)<0) {
537 		int err = GetLastError();
538 		/* We may have alrady associated this fd with a port.
539 		 * Let's hope it's this port, and that the error code
540 		 * for doing this neer changes. */
541 		if (err != ERROR_INVALID_PARAMETER)
542 			return NULL;
543 	}
544 
545 	if (!(bev_a = mm_calloc(1, sizeof(struct bufferevent_async))))
546 		return NULL;
547 
548 	bev = &bev_a->bev.bev;
549 	if (!(bev->input = evbuffer_overlapped_new_(fd))) {
550 		mm_free(bev_a);
551 		return NULL;
552 	}
553 	if (!(bev->output = evbuffer_overlapped_new_(fd))) {
554 		evbuffer_free(bev->input);
555 		mm_free(bev_a);
556 		return NULL;
557 	}
558 
559 	if (bufferevent_init_common_(&bev_a->bev, base, &bufferevent_ops_async,
560 		options)<0)
561 		goto err;
562 
563 	evbuffer_add_cb(bev->input, be_async_inbuf_callback, bev);
564 	evbuffer_add_cb(bev->output, be_async_outbuf_callback, bev);
565 
566 	event_overlapped_init_(&bev_a->connect_overlapped, connect_complete);
567 	event_overlapped_init_(&bev_a->read_overlapped, read_complete);
568 	event_overlapped_init_(&bev_a->write_overlapped, write_complete);
569 
570 	bufferevent_init_generic_timeout_cbs_(bev);
571 
572 	bev_a->ok = fd >= 0;
573 
574 	return bev;
575 err:
576 	bufferevent_free(&bev_a->bev.bev);
577 	return NULL;
578 }
579 
580 void
581 bufferevent_async_set_connected_(struct bufferevent *bev)
582 {
583 	struct bufferevent_async *bev_async = upcast(bev);
584 	bev_async->ok = 1;
585 	bufferevent_init_generic_timeout_cbs_(bev);
586 	/* Now's a good time to consider reading/writing */
587 	be_async_enable(bev, bev->enabled);
588 }
589 
590 int
591 bufferevent_async_can_connect_(struct bufferevent *bev)
592 {
593 	const struct win32_extension_fns *ext =
594 	    event_get_win32_extension_fns_();
595 
596 	if (BEV_IS_ASYNC(bev) &&
597 	    event_base_get_iocp_(bev->ev_base) &&
598 	    ext && ext->ConnectEx)
599 		return 1;
600 
601 	return 0;
602 }
603 
604 int
605 bufferevent_async_connect_(struct bufferevent *bev, evutil_socket_t fd,
606 	const struct sockaddr *sa, int socklen)
607 {
608 	BOOL rc;
609 	struct bufferevent_async *bev_async = upcast(bev);
610 	struct sockaddr_storage ss;
611 	const struct win32_extension_fns *ext =
612 	    event_get_win32_extension_fns_();
613 
614 	EVUTIL_ASSERT(ext && ext->ConnectEx && fd >= 0 && sa != NULL);
615 
616 	/* ConnectEx() requires that the socket be bound to an address
617 	 * with bind() before using, otherwise it will fail. We attempt
618 	 * to issue a bind() here, taking into account that the error
619 	 * code is set to WSAEINVAL when the socket is already bound. */
620 	memset(&ss, 0, sizeof(ss));
621 	if (sa->sa_family == AF_INET) {
622 		struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
623 		sin->sin_family = AF_INET;
624 		sin->sin_addr.s_addr = INADDR_ANY;
625 	} else if (sa->sa_family == AF_INET6) {
626 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
627 		sin6->sin6_family = AF_INET6;
628 		sin6->sin6_addr = in6addr_any;
629 	} else {
630 		/* Well, the user will have to bind() */
631 		return -1;
632 	}
633 	if (bind(fd, (struct sockaddr *)&ss, sizeof(ss)) < 0 &&
634 	    WSAGetLastError() != WSAEINVAL)
635 		return -1;
636 
637 	event_base_add_virtual_(bev->ev_base);
638 	bufferevent_incref_(bev);
639 	rc = ext->ConnectEx(fd, sa, socklen, NULL, 0, NULL,
640 			    &bev_async->connect_overlapped.overlapped);
641 	if (rc || WSAGetLastError() == ERROR_IO_PENDING)
642 		return 0;
643 
644 	event_base_del_virtual_(bev->ev_base);
645 	bufferevent_decref_(bev);
646 
647 	return -1;
648 }
649 
650 static int
651 be_async_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
652     union bufferevent_ctrl_data *data)
653 {
654 	switch (op) {
655 	case BEV_CTRL_GET_FD:
656 		data->fd = evbuffer_overlapped_get_fd_(bev->input);
657 		return 0;
658 	case BEV_CTRL_SET_FD: {
659 		struct event_iocp_port *iocp;
660 
661 		if (data->fd == evbuffer_overlapped_get_fd_(bev->input))
662 			return 0;
663 		if (!(iocp = event_base_get_iocp_(bev->ev_base)))
664 			return -1;
665 		if (event_iocp_port_associate_(iocp, data->fd, 1) < 0)
666 			return -1;
667 		evbuffer_overlapped_set_fd_(bev->input, data->fd);
668 		evbuffer_overlapped_set_fd_(bev->output, data->fd);
669 		return 0;
670 	}
671 	case BEV_CTRL_CANCEL_ALL: {
672 		struct bufferevent_async *bev_a = upcast(bev);
673 		evutil_socket_t fd = evbuffer_overlapped_get_fd_(bev->input);
674 		if (fd != (evutil_socket_t)INVALID_SOCKET &&
675 		    (bev_a->bev.options & BEV_OPT_CLOSE_ON_FREE)) {
676 			closesocket(fd);
677 			evbuffer_overlapped_set_fd_(bev->input, INVALID_SOCKET);
678 		}
679 		bev_a->ok = 0;
680 		return 0;
681 	}
682 	case BEV_CTRL_GET_UNDERLYING:
683 	default:
684 		return -1;
685 	}
686 }
687 
688 
689