xref: /openbsd-src/usr.sbin/smtpd/ioev.c (revision 6a13ef69787db04ae501a22e92fa10865b44fd7c)
1 /*	$OpenBSD: ioev.c,v 1.40 2016/12/03 15:46:33 eric Exp $	*/
2 /*
3  * Copyright (c) 2012 Eric Faurot <eric@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/types.h>
19 #include <sys/queue.h>
20 #include <sys/socket.h>
21 
22 #include <err.h>
23 #include <errno.h>
24 #include <event.h>
25 #include <fcntl.h>
26 #include <inttypes.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30 #include <unistd.h>
31 
32 #include "ioev.h"
33 #include "iobuf.h"
34 
35 #ifdef IO_SSL
36 #include <openssl/err.h>
37 #include <openssl/ssl.h>
38 #endif
39 
40 enum {
41 	IO_STATE_NONE,
42 	IO_STATE_CONNECT,
43 	IO_STATE_CONNECT_SSL,
44 	IO_STATE_ACCEPT_SSL,
45 	IO_STATE_UP,
46 
47 	IO_STATE_MAX,
48 };
49 
50 #define IO_PAUSE_IN 		IO_IN
51 #define IO_PAUSE_OUT		IO_OUT
52 #define IO_READ			0x04
53 #define IO_WRITE		0x08
54 #define IO_RW			(IO_READ | IO_WRITE)
55 #define IO_RESET		0x10  /* internal */
56 #define IO_HELD			0x20  /* internal */
57 
58 struct io {
59 	int		 sock;
60 	void		*arg;
61 	void		(*cb)(struct io*, int, void *);
62 	struct iobuf	 iobuf;
63 	size_t		 lowat;
64 	int		 timeout;
65 	int		 flags;
66 	int		 state;
67 	struct event	 ev;
68 	void		*ssl;
69 	const char	*error; /* only valid immediately on callback */
70 };
71 
72 const char* io_strflags(int);
73 const char* io_evstr(short);
74 
75 void	_io_init(void);
76 void	io_hold(struct io *);
77 void	io_release(struct io *);
78 void	io_callback(struct io*, int);
79 void	io_dispatch(int, short, void *);
80 void	io_dispatch_connect(int, short, void *);
81 size_t	io_pending(struct io *);
82 size_t	io_queued(struct io*);
83 void	io_reset(struct io *, short, void (*)(int, short, void*));
84 void	io_frame_enter(const char *, struct io *, int);
85 void	io_frame_leave(struct io *);
86 
87 #ifdef IO_SSL
88 void	ssl_error(const char *); /* XXX external */
89 
90 static const char* io_ssl_error(void);
91 void	io_dispatch_accept_ssl(int, short, void *);
92 void	io_dispatch_connect_ssl(int, short, void *);
93 void	io_dispatch_read_ssl(int, short, void *);
94 void	io_dispatch_write_ssl(int, short, void *);
95 void	io_reload_ssl(struct io *io);
96 #endif
97 
98 static struct io	*current = NULL;
99 static uint64_t		 frame = 0;
100 static int		_io_debug = 0;
101 
102 #define io_debug(args...) do { if (_io_debug) printf(args); } while(0)
103 
104 
105 const char*
106 io_strio(struct io *io)
107 {
108 	static char	buf[128];
109 	char		ssl[128];
110 
111 	ssl[0] = '\0';
112 #ifdef IO_SSL
113 	if (io->ssl) {
114 		(void)snprintf(ssl, sizeof ssl, " ssl=%s:%s:%d",
115 		    SSL_get_version(io->ssl),
116 		    SSL_get_cipher_name(io->ssl),
117 		    SSL_get_cipher_bits(io->ssl, NULL));
118 	}
119 #endif
120 
121 	(void)snprintf(buf, sizeof buf,
122 	    "<io:%p fd=%d to=%d fl=%s%s ib=%zu ob=%zu>",
123 	    io, io->sock, io->timeout, io_strflags(io->flags), ssl,
124 	    io_pending(io), io_queued(io));
125 
126 	return (buf);
127 }
128 
129 #define CASE(x) case x : return #x
130 
131 const char*
132 io_strevent(int evt)
133 {
134 	static char buf[32];
135 
136 	switch (evt) {
137 	CASE(IO_CONNECTED);
138 	CASE(IO_TLSREADY);
139 	CASE(IO_DATAIN);
140 	CASE(IO_LOWAT);
141 	CASE(IO_DISCONNECTED);
142 	CASE(IO_TIMEOUT);
143 	CASE(IO_ERROR);
144 	default:
145 		(void)snprintf(buf, sizeof(buf), "IO_? %d", evt);
146 		return buf;
147 	}
148 }
149 
150 void
151 io_set_nonblocking(int fd)
152 {
153 	int	flags;
154 
155 	if ((flags = fcntl(fd, F_GETFL)) == -1)
156 		err(1, "io_set_blocking:fcntl(F_GETFL)");
157 
158 	flags |= O_NONBLOCK;
159 
160 	if (fcntl(fd, F_SETFL, flags) == -1)
161 		err(1, "io_set_blocking:fcntl(F_SETFL)");
162 }
163 
164 void
165 io_set_nolinger(int fd)
166 {
167 	struct linger    l;
168 
169 	memset(&l, 0, sizeof(l));
170 	if (setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l)) == -1)
171 		err(1, "io_set_linger:setsockopt()");
172 }
173 
174 /*
175  * Event framing must not rely on an io pointer to refer to the "same" io
176  * throughout the frame, because this is not always the case:
177  *
178  * 1) enter(addr0) -> free(addr0) -> leave(addr0) = SEGV
179  * 2) enter(addr0) -> free(addr0) -> malloc == addr0 -> leave(addr0) = BAD!
180  *
181  * In both case, the problem is that the io is freed in the callback, so
182  * the pointer becomes invalid. If that happens, the user is required to
183  * call io_clear, so we can adapt the frame state there.
184  */
185 void
186 io_frame_enter(const char *where, struct io *io, int ev)
187 {
188 	io_debug("\n=== %" PRIu64 " ===\n"
189 	    "io_frame_enter(%s, %s, %s)\n",
190 	    frame, where, io_evstr(ev), io_strio(io));
191 
192 	if (current)
193 		errx(1, "io_frame_enter: interleaved frames");
194 
195 	current = io;
196 
197 	io_hold(io);
198 }
199 
200 void
201 io_frame_leave(struct io *io)
202 {
203 	io_debug("io_frame_leave(%" PRIu64 ")\n", frame);
204 
205 	if (current && current != io)
206 		errx(1, "io_frame_leave: io mismatch");
207 
208 	/* io has been cleared */
209 	if (current == NULL)
210 		goto done;
211 
212 	/* TODO: There is a possible optimization there:
213 	 * In a typical half-duplex request/response scenario,
214 	 * the io is waiting to read a request, and when done, it queues
215 	 * the response in the output buffer and goes to write mode.
216 	 * There, the write event is set and will be triggered in the next
217 	 * event frame.  In most case, the write call could be done
218 	 * immediately as part of the last read frame, thus avoiding to go
219 	 * through the event loop machinery. So, as an optimisation, we
220 	 * could detect that case here and force an event dispatching.
221 	 */
222 
223 	/* Reload the io if it has not been reset already. */
224 	io_release(io);
225 	current = NULL;
226     done:
227 	io_debug("=== /%" PRIu64 "\n", frame);
228 
229 	frame += 1;
230 }
231 
232 void
233 _io_init()
234 {
235 	static int init = 0;
236 
237 	if (init)
238 		return;
239 
240 	init = 1;
241 	_io_debug = getenv("IO_DEBUG") != NULL;
242 }
243 
244 struct io *
245 io_new(void)
246 {
247 	struct io *io;
248 
249 	_io_init();
250 
251 	if ((io = calloc(1, sizeof(*io))) == NULL)
252 		return NULL;
253 
254 	io->sock = -1;
255 	io->timeout = -1;
256 
257 	if (iobuf_init(&io->iobuf, 0, 0) == -1) {
258 		free(io);
259 		return NULL;
260 	}
261 
262 	return io;
263 }
264 
265 void
266 io_free(struct io *io)
267 {
268 	io_debug("io_clear(%p)\n", io);
269 
270 	/* the current io is virtually dead */
271 	if (io == current)
272 		current = NULL;
273 
274 #ifdef IO_SSL
275 	if (io->ssl) {
276 		SSL_free(io->ssl);
277 		io->ssl = NULL;
278 	}
279 #endif
280 
281 	if (event_initialized(&io->ev))
282 		event_del(&io->ev);
283 	if (io->sock != -1) {
284 		close(io->sock);
285 		io->sock = -1;
286 	}
287 
288 	iobuf_clear(&io->iobuf);
289 	free(io);
290 }
291 
292 void
293 io_hold(struct io *io)
294 {
295 	io_debug("io_enter(%p)\n", io);
296 
297 	if (io->flags & IO_HELD)
298 		errx(1, "io_hold: io is already held");
299 
300 	io->flags &= ~IO_RESET;
301 	io->flags |= IO_HELD;
302 }
303 
304 void
305 io_release(struct io *io)
306 {
307 	if (!(io->flags & IO_HELD))
308 		errx(1, "io_release: io is not held");
309 
310 	io->flags &= ~IO_HELD;
311 	if (!(io->flags & IO_RESET))
312 		io_reload(io);
313 }
314 
315 void
316 io_set_fd(struct io *io, int fd)
317 {
318 	io->sock = fd;
319 	if (fd != -1)
320 		io_reload(io);
321 }
322 
323 void
324 io_set_callback(struct io *io, void(*cb)(struct io *, int, void *), void *arg)
325 {
326 	io->cb = cb;
327 	io->arg = arg;
328 }
329 
330 void
331 io_set_timeout(struct io *io, int msec)
332 {
333 	io_debug("io_set_timeout(%p, %d)\n", io, msec);
334 
335 	io->timeout = msec;
336 }
337 
338 void
339 io_set_lowat(struct io *io, size_t lowat)
340 {
341 	io_debug("io_set_lowat(%p, %zu)\n", io, lowat);
342 
343 	io->lowat = lowat;
344 }
345 
346 void
347 io_pause(struct io *io, int dir)
348 {
349 	io_debug("io_pause(%p, %x)\n", io, dir);
350 
351 	io->flags |= dir & (IO_PAUSE_IN | IO_PAUSE_OUT);
352 	io_reload(io);
353 }
354 
355 void
356 io_resume(struct io *io, int dir)
357 {
358 	io_debug("io_resume(%p, %x)\n", io, dir);
359 
360 	io->flags &= ~(dir & (IO_PAUSE_IN | IO_PAUSE_OUT));
361 	io_reload(io);
362 }
363 
364 void
365 io_set_read(struct io *io)
366 {
367 	int	mode;
368 
369 	io_debug("io_set_read(%p)\n", io);
370 
371 	mode = io->flags & IO_RW;
372 	if (!(mode == 0 || mode == IO_WRITE))
373 		errx(1, "io_set_read(): full-duplex or reading");
374 
375 	io->flags &= ~IO_RW;
376 	io->flags |= IO_READ;
377 	io_reload(io);
378 }
379 
380 void
381 io_set_write(struct io *io)
382 {
383 	int	mode;
384 
385 	io_debug("io_set_write(%p)\n", io);
386 
387 	mode = io->flags & IO_RW;
388 	if (!(mode == 0 || mode == IO_READ))
389 		errx(1, "io_set_write(): full-duplex or writing");
390 
391 	io->flags &= ~IO_RW;
392 	io->flags |= IO_WRITE;
393 	io_reload(io);
394 }
395 
396 const char *
397 io_error(struct io *io)
398 {
399 	return io->error;
400 }
401 
402 void *
403 io_ssl(struct io *io)
404 {
405 	return io->ssl;
406 }
407 
408 int
409 io_fileno(struct io *io)
410 {
411 	return io->sock;
412 }
413 
414 int
415 io_paused(struct io *io, int what)
416 {
417 	return (io->flags & (IO_PAUSE_IN | IO_PAUSE_OUT)) == what;
418 }
419 
420 /*
421  * Buffered output functions
422  */
423 
424 int
425 io_write(struct io *io, const void *buf, size_t len)
426 {
427 	int r;
428 
429 	r = iobuf_queue(&io->iobuf, buf, len);
430 
431 	io_reload(io);
432 
433 	return r;
434 }
435 
436 int
437 io_writev(struct io *io, const struct iovec *iov, int iovcount)
438 {
439 	int r;
440 
441 	r = iobuf_queuev(&io->iobuf, iov, iovcount);
442 
443 	io_reload(io);
444 
445 	return r;
446 }
447 
448 int
449 io_print(struct io *io, const char *s)
450 {
451 	return io_write(io, s, strlen(s));
452 }
453 
454 int
455 io_printf(struct io *io, const char *fmt, ...)
456 {
457 	va_list ap;
458 	int r;
459 
460 	va_start(ap, fmt);
461 	r = io_vprintf(io, fmt, ap);
462 	va_end(ap);
463 
464 	return r;
465 }
466 
467 int
468 io_vprintf(struct io *io, const char *fmt, va_list ap)
469 {
470 
471 	char *buf;
472 	int len;
473 
474 	len = vasprintf(&buf, fmt, ap);
475 	if (len == -1)
476 		return -1;
477 	len = io_write(io, buf, len);
478 	free(buf);
479 
480 	return len;
481 }
482 
483 size_t
484 io_queued(struct io *io)
485 {
486 	return iobuf_queued(&io->iobuf);
487 }
488 
489 /*
490  * Buffered input functions
491  */
492 
493 void *
494 io_data(struct io *io)
495 {
496 	return iobuf_data(&io->iobuf);
497 }
498 
499 size_t
500 io_datalen(struct io *io)
501 {
502 	return iobuf_len(&io->iobuf);
503 }
504 
505 char *
506 io_getline(struct io *io, size_t *sz)
507 {
508 	return iobuf_getline(&io->iobuf, sz);
509 }
510 
511 void
512 io_drop(struct io *io, size_t sz)
513 {
514 	return iobuf_drop(&io->iobuf, sz);
515 }
516 
517 
518 #define IO_READING(io) (((io)->flags & IO_RW) != IO_WRITE)
519 #define IO_WRITING(io) (((io)->flags & IO_RW) != IO_READ)
520 
521 /*
522  * Setup the necessary events as required by the current io state,
523  * honouring duplex mode and i/o pauses.
524  */
525 void
526 io_reload(struct io *io)
527 {
528 	short	events;
529 
530 	/* io will be reloaded at release time */
531 	if (io->flags & IO_HELD)
532 		return;
533 
534 	iobuf_normalize(&io->iobuf);
535 
536 #ifdef IO_SSL
537 	if (io->ssl) {
538 		io_reload_ssl(io);
539 		return;
540 	}
541 #endif
542 
543 	io_debug("io_reload(%p)\n", io);
544 
545 	events = 0;
546 	if (IO_READING(io) && !(io->flags & IO_PAUSE_IN))
547 		events = EV_READ;
548 	if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io))
549 		events |= EV_WRITE;
550 
551 	io_reset(io, events, io_dispatch);
552 }
553 
554 /* Set the requested event. */
555 void
556 io_reset(struct io *io, short events, void (*dispatch)(int, short, void*))
557 {
558 	struct timeval	tv, *ptv;
559 
560 	io_debug("io_reset(%p, %s, %p) -> %s\n",
561 	    io, io_evstr(events), dispatch, io_strio(io));
562 
563 	/*
564 	 * Indicate that the event has already been reset so that reload
565 	 * is not called on frame_leave.
566 	 */
567 	io->flags |= IO_RESET;
568 
569 	if (event_initialized(&io->ev))
570 		event_del(&io->ev);
571 
572 	/*
573 	 * The io is paused by the user, so we don't want the timeout to be
574 	 * effective.
575 	 */
576 	if (events == 0)
577 		return;
578 
579 	event_set(&io->ev, io->sock, events, dispatch, io);
580 	if (io->timeout >= 0) {
581 		tv.tv_sec = io->timeout / 1000;
582 		tv.tv_usec = (io->timeout % 1000) * 1000;
583 		ptv = &tv;
584 	} else
585 		ptv = NULL;
586 
587 	event_add(&io->ev, ptv);
588 }
589 
590 size_t
591 io_pending(struct io *io)
592 {
593 	return iobuf_len(&io->iobuf);
594 }
595 
596 const char*
597 io_strflags(int flags)
598 {
599 	static char	buf[64];
600 
601 	buf[0] = '\0';
602 
603 	switch (flags & IO_RW) {
604 	case 0:
605 		(void)strlcat(buf, "rw", sizeof buf);
606 		break;
607 	case IO_READ:
608 		(void)strlcat(buf, "R", sizeof buf);
609 		break;
610 	case IO_WRITE:
611 		(void)strlcat(buf, "W", sizeof buf);
612 		break;
613 	case IO_RW:
614 		(void)strlcat(buf, "RW", sizeof buf);
615 		break;
616 	}
617 
618 	if (flags & IO_PAUSE_IN)
619 		(void)strlcat(buf, ",F_PI", sizeof buf);
620 	if (flags & IO_PAUSE_OUT)
621 		(void)strlcat(buf, ",F_PO", sizeof buf);
622 
623 	return buf;
624 }
625 
626 const char*
627 io_evstr(short ev)
628 {
629 	static char	buf[64];
630 	char		buf2[16];
631 	int		n;
632 
633 	n = 0;
634 	buf[0] = '\0';
635 
636 	if (ev == 0) {
637 		(void)strlcat(buf, "<NONE>", sizeof(buf));
638 		return buf;
639 	}
640 
641 	if (ev & EV_TIMEOUT) {
642 		(void)strlcat(buf, "EV_TIMEOUT", sizeof(buf));
643 		ev &= ~EV_TIMEOUT;
644 		n++;
645 	}
646 
647 	if (ev & EV_READ) {
648 		if (n)
649 			(void)strlcat(buf, "|", sizeof(buf));
650 		(void)strlcat(buf, "EV_READ", sizeof(buf));
651 		ev &= ~EV_READ;
652 		n++;
653 	}
654 
655 	if (ev & EV_WRITE) {
656 		if (n)
657 			(void)strlcat(buf, "|", sizeof(buf));
658 		(void)strlcat(buf, "EV_WRITE", sizeof(buf));
659 		ev &= ~EV_WRITE;
660 		n++;
661 	}
662 
663 	if (ev & EV_SIGNAL) {
664 		if (n)
665 			(void)strlcat(buf, "|", sizeof(buf));
666 		(void)strlcat(buf, "EV_SIGNAL", sizeof(buf));
667 		ev &= ~EV_SIGNAL;
668 		n++;
669 	}
670 
671 	if (ev) {
672 		if (n)
673 			(void)strlcat(buf, "|", sizeof(buf));
674 		(void)strlcat(buf, "EV_?=0x", sizeof(buf));
675 		(void)snprintf(buf2, sizeof(buf2), "%hx", ev);
676 		(void)strlcat(buf, buf2, sizeof(buf));
677 	}
678 
679 	return buf;
680 }
681 
682 void
683 io_dispatch(int fd, short ev, void *humppa)
684 {
685 	struct io	*io = humppa;
686 	size_t		 w;
687 	ssize_t		 n;
688 	int		 saved_errno;
689 
690 	io_frame_enter("io_dispatch", io, ev);
691 
692 	if (ev == EV_TIMEOUT) {
693 		io_callback(io, IO_TIMEOUT);
694 		goto leave;
695 	}
696 
697 	if (ev & EV_WRITE && (w = io_queued(io))) {
698 		if ((n = iobuf_write(&io->iobuf, io->sock)) < 0) {
699 			if (n == IOBUF_WANT_WRITE) /* kqueue bug? */
700 				goto read;
701 			if (n == IOBUF_CLOSED)
702 				io_callback(io, IO_DISCONNECTED);
703 			else {
704 				saved_errno = errno;
705 				io->error = strerror(errno);
706 				errno = saved_errno;
707 				io_callback(io, IO_ERROR);
708 			}
709 			goto leave;
710 		}
711 		if (w > io->lowat && w - n <= io->lowat)
712 			io_callback(io, IO_LOWAT);
713 	}
714     read:
715 
716 	if (ev & EV_READ) {
717 		iobuf_normalize(&io->iobuf);
718 		if ((n = iobuf_read(&io->iobuf, io->sock)) < 0) {
719 			if (n == IOBUF_CLOSED)
720 				io_callback(io, IO_DISCONNECTED);
721 			else {
722 				saved_errno = errno;
723 				io->error = strerror(errno);
724 				errno = saved_errno;
725 				io_callback(io, IO_ERROR);
726 			}
727 			goto leave;
728 		}
729 		if (n)
730 			io_callback(io, IO_DATAIN);
731 	}
732 
733 leave:
734 	io_frame_leave(io);
735 }
736 
737 void
738 io_callback(struct io *io, int evt)
739 {
740 	io->cb(io, evt, io->arg);
741 }
742 
743 int
744 io_connect(struct io *io, const struct sockaddr *sa, const struct sockaddr *bsa)
745 {
746 	int	sock, errno_save;
747 
748 	if ((sock = socket(sa->sa_family, SOCK_STREAM, 0)) == -1)
749 		goto fail;
750 
751 	io_set_nonblocking(sock);
752 	io_set_nolinger(sock);
753 
754 	if (bsa && bind(sock, bsa, bsa->sa_len) == -1)
755 		goto fail;
756 
757 	if (connect(sock, sa, sa->sa_len) == -1)
758 		if (errno != EINPROGRESS)
759 			goto fail;
760 
761 	io->sock = sock;
762 	io_reset(io, EV_WRITE, io_dispatch_connect);
763 
764 	return (sock);
765 
766     fail:
767 	if (sock != -1) {
768 		errno_save = errno;
769 		close(sock);
770 		errno = errno_save;
771 		io->error = strerror(errno);
772 	}
773 	return (-1);
774 }
775 
776 void
777 io_dispatch_connect(int fd, short ev, void *humppa)
778 {
779 	struct io	*io = humppa;
780 	int		 r, e;
781 	socklen_t	 sl;
782 
783 	io_frame_enter("io_dispatch_connect", io, ev);
784 
785 	if (ev == EV_TIMEOUT) {
786 		close(fd);
787 		io->sock = -1;
788 		io_callback(io, IO_TIMEOUT);
789 	} else {
790 		sl = sizeof(e);
791 		r = getsockopt(fd, SOL_SOCKET, SO_ERROR, &e, &sl);
792 		if (r == -1)  {
793 			warn("io_dispatch_connect: getsockopt");
794 			e = errno;
795 		}
796 		if (e) {
797 			close(fd);
798 			io->sock = -1;
799 			io->error = strerror(e);
800 			io_callback(io, e == ETIMEDOUT ? IO_TIMEOUT : IO_ERROR);
801 		}
802 		else {
803 			io->state = IO_STATE_UP;
804 			io_callback(io, IO_CONNECTED);
805 		}
806 	}
807 
808 	io_frame_leave(io);
809 }
810 
811 #ifdef IO_SSL
812 
813 static const char*
814 io_ssl_error(void)
815 {
816 	static char	buf[128];
817 	unsigned long	e;
818 
819 	e = ERR_peek_last_error();
820 	if (e) {
821 		ERR_error_string(e, buf);
822 		return (buf);
823 	}
824 
825 	return ("No SSL error");
826 }
827 
828 int
829 io_start_tls(struct io *io, void *ssl)
830 {
831 	int	mode;
832 
833 	mode = io->flags & IO_RW;
834 	if (mode == 0 || mode == IO_RW)
835 		errx(1, "io_start_tls(): full-duplex or unset");
836 
837 	if (io->ssl)
838 		errx(1, "io_start_tls(): SSL already started");
839 	io->ssl = ssl;
840 
841 	if (SSL_set_fd(io->ssl, io->sock) == 0) {
842 		ssl_error("io_start_ssl:SSL_set_fd");
843 		return (-1);
844 	}
845 
846 	if (mode == IO_WRITE) {
847 		io->state = IO_STATE_CONNECT_SSL;
848 		SSL_set_connect_state(io->ssl);
849 		io_reset(io, EV_WRITE, io_dispatch_connect_ssl);
850 	} else {
851 		io->state = IO_STATE_ACCEPT_SSL;
852 		SSL_set_accept_state(io->ssl);
853 		io_reset(io, EV_READ, io_dispatch_accept_ssl);
854 	}
855 
856 	return (0);
857 }
858 
859 void
860 io_dispatch_accept_ssl(int fd, short event, void *humppa)
861 {
862 	struct io	*io = humppa;
863 	int		 e, ret;
864 
865 	io_frame_enter("io_dispatch_accept_ssl", io, event);
866 
867 	if (event == EV_TIMEOUT) {
868 		io_callback(io, IO_TIMEOUT);
869 		goto leave;
870 	}
871 
872 	if ((ret = SSL_accept(io->ssl)) > 0) {
873 		io->state = IO_STATE_UP;
874 		io_callback(io, IO_TLSREADY);
875 		goto leave;
876 	}
877 
878 	switch ((e = SSL_get_error(io->ssl, ret))) {
879 	case SSL_ERROR_WANT_READ:
880 		io_reset(io, EV_READ, io_dispatch_accept_ssl);
881 		break;
882 	case SSL_ERROR_WANT_WRITE:
883 		io_reset(io, EV_WRITE, io_dispatch_accept_ssl);
884 		break;
885 	default:
886 		io->error = io_ssl_error();
887 		ssl_error("io_dispatch_accept_ssl:SSL_accept");
888 		io_callback(io, IO_ERROR);
889 		break;
890 	}
891 
892     leave:
893 	io_frame_leave(io);
894 }
895 
896 void
897 io_dispatch_connect_ssl(int fd, short event, void *humppa)
898 {
899 	struct io	*io = humppa;
900 	int		 e, ret;
901 
902 	io_frame_enter("io_dispatch_connect_ssl", io, event);
903 
904 	if (event == EV_TIMEOUT) {
905 		io_callback(io, IO_TIMEOUT);
906 		goto leave;
907 	}
908 
909 	if ((ret = SSL_connect(io->ssl)) > 0) {
910 		io->state = IO_STATE_UP;
911 		io_callback(io, IO_TLSREADY);
912 		goto leave;
913 	}
914 
915 	switch ((e = SSL_get_error(io->ssl, ret))) {
916 	case SSL_ERROR_WANT_READ:
917 		io_reset(io, EV_READ, io_dispatch_connect_ssl);
918 		break;
919 	case SSL_ERROR_WANT_WRITE:
920 		io_reset(io, EV_WRITE, io_dispatch_connect_ssl);
921 		break;
922 	default:
923 		io->error = io_ssl_error();
924 		ssl_error("io_dispatch_connect_ssl:SSL_connect");
925 		io_callback(io, IO_TLSERROR);
926 		break;
927 	}
928 
929     leave:
930 	io_frame_leave(io);
931 }
932 
933 void
934 io_dispatch_read_ssl(int fd, short event, void *humppa)
935 {
936 	struct io	*io = humppa;
937 	int		 n, saved_errno;
938 
939 	io_frame_enter("io_dispatch_read_ssl", io, event);
940 
941 	if (event == EV_TIMEOUT) {
942 		io_callback(io, IO_TIMEOUT);
943 		goto leave;
944 	}
945 
946 again:
947 	iobuf_normalize(&io->iobuf);
948 	switch ((n = iobuf_read_ssl(&io->iobuf, (SSL*)io->ssl))) {
949 	case IOBUF_WANT_READ:
950 		io_reset(io, EV_READ, io_dispatch_read_ssl);
951 		break;
952 	case IOBUF_WANT_WRITE:
953 		io_reset(io, EV_WRITE, io_dispatch_read_ssl);
954 		break;
955 	case IOBUF_CLOSED:
956 		io_callback(io, IO_DISCONNECTED);
957 		break;
958 	case IOBUF_ERROR:
959 		saved_errno = errno;
960 		io->error = strerror(errno);
961 		errno = saved_errno;
962 		io_callback(io, IO_ERROR);
963 		break;
964 	case IOBUF_SSLERROR:
965 		io->error = io_ssl_error();
966 		ssl_error("io_dispatch_read_ssl:SSL_read");
967 		io_callback(io, IO_ERROR);
968 		break;
969 	default:
970 		io_debug("io_dispatch_read_ssl(...) -> r=%d\n", n);
971 		io_callback(io, IO_DATAIN);
972 		if (current == io && IO_READING(io) && SSL_pending(io->ssl))
973 			goto again;
974 	}
975 
976     leave:
977 	io_frame_leave(io);
978 }
979 
980 void
981 io_dispatch_write_ssl(int fd, short event, void *humppa)
982 {
983 	struct io	*io = humppa;
984 	int		 n, saved_errno;
985 	size_t		 w2, w;
986 
987 	io_frame_enter("io_dispatch_write_ssl", io, event);
988 
989 	if (event == EV_TIMEOUT) {
990 		io_callback(io, IO_TIMEOUT);
991 		goto leave;
992 	}
993 
994 	w = io_queued(io);
995 	switch ((n = iobuf_write_ssl(&io->iobuf, (SSL*)io->ssl))) {
996 	case IOBUF_WANT_READ:
997 		io_reset(io, EV_READ, io_dispatch_write_ssl);
998 		break;
999 	case IOBUF_WANT_WRITE:
1000 		io_reset(io, EV_WRITE, io_dispatch_write_ssl);
1001 		break;
1002 	case IOBUF_CLOSED:
1003 		io_callback(io, IO_DISCONNECTED);
1004 		break;
1005 	case IOBUF_ERROR:
1006 		saved_errno = errno;
1007 		io->error = strerror(errno);
1008 		errno = saved_errno;
1009 		io_callback(io, IO_ERROR);
1010 		break;
1011 	case IOBUF_SSLERROR:
1012 		io->error = io_ssl_error();
1013 		ssl_error("io_dispatch_write_ssl:SSL_write");
1014 		io_callback(io, IO_ERROR);
1015 		break;
1016 	default:
1017 		io_debug("io_dispatch_write_ssl(...) -> w=%d\n", n);
1018 		w2 = io_queued(io);
1019 		if (w > io->lowat && w2 <= io->lowat)
1020 			io_callback(io, IO_LOWAT);
1021 		break;
1022 	}
1023 
1024     leave:
1025 	io_frame_leave(io);
1026 }
1027 
1028 void
1029 io_reload_ssl(struct io *io)
1030 {
1031 	short	ev = 0;
1032 	void	(*dispatch)(int, short, void*) = NULL;
1033 
1034 	switch (io->state) {
1035 	case IO_STATE_CONNECT_SSL:
1036 		ev = EV_WRITE;
1037 		dispatch = io_dispatch_connect_ssl;
1038 		break;
1039 	case IO_STATE_ACCEPT_SSL:
1040 		ev = EV_READ;
1041 		dispatch = io_dispatch_accept_ssl;
1042 		break;
1043 	case IO_STATE_UP:
1044 		ev = 0;
1045 		if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) {
1046 			ev = EV_READ;
1047 			dispatch = io_dispatch_read_ssl;
1048 		}
1049 		else if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) &&
1050 		    io_queued(io)) {
1051 			ev = EV_WRITE;
1052 			dispatch = io_dispatch_write_ssl;
1053 		}
1054 		if (!ev)
1055 			return; /* paused */
1056 		break;
1057 	default:
1058 		errx(1, "io_reload_ssl(): bad state");
1059 	}
1060 
1061 	io_reset(io, ev, dispatch);
1062 }
1063 
1064 #endif /* IO_SSL */
1065