xref: /openbsd-src/usr.sbin/smtpd/ioev.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: ioev.c,v 1.42 2019/06/12 17:42:53 eric Exp $	*/
2 /*
3  * Copyright (c) 2012 Eric Faurot <eric@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/types.h>
19 #include <sys/queue.h>
20 #include <sys/socket.h>
21 
22 #include <err.h>
23 #include <errno.h>
24 #include <event.h>
25 #include <fcntl.h>
26 #include <inttypes.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30 #include <unistd.h>
31 
32 #include "ioev.h"
33 #include "iobuf.h"
34 
35 #ifdef IO_TLS
36 #include <openssl/err.h>
37 #include <openssl/ssl.h>
38 #endif
39 
40 enum {
41 	IO_STATE_NONE,
42 	IO_STATE_CONNECT,
43 	IO_STATE_CONNECT_TLS,
44 	IO_STATE_ACCEPT_TLS,
45 	IO_STATE_UP,
46 
47 	IO_STATE_MAX,
48 };
49 
50 #define IO_PAUSE_IN 		IO_IN
51 #define IO_PAUSE_OUT		IO_OUT
52 #define IO_READ			0x04
53 #define IO_WRITE		0x08
54 #define IO_RW			(IO_READ | IO_WRITE)
55 #define IO_RESET		0x10  /* internal */
56 #define IO_HELD			0x20  /* internal */
57 
58 struct io {
59 	int		 sock;
60 	void		*arg;
61 	void		(*cb)(struct io*, int, void *);
62 	struct iobuf	 iobuf;
63 	size_t		 lowat;
64 	int		 timeout;
65 	int		 flags;
66 	int		 state;
67 	struct event	 ev;
68 	void		*tls;
69 	const char	*error; /* only valid immediately on callback */
70 };
71 
72 const char* io_strflags(int);
73 const char* io_evstr(short);
74 
75 void	_io_init(void);
76 void	io_hold(struct io *);
77 void	io_release(struct io *);
78 void	io_callback(struct io*, int);
79 void	io_dispatch(int, short, void *);
80 void	io_dispatch_connect(int, short, void *);
81 size_t	io_pending(struct io *);
82 size_t	io_queued(struct io*);
83 void	io_reset(struct io *, short, void (*)(int, short, void*));
84 void	io_frame_enter(const char *, struct io *, int);
85 void	io_frame_leave(struct io *);
86 
87 #ifdef IO_TLS
88 void	ssl_error(const char *); /* XXX external */
89 
90 static const char* io_tls_error(void);
91 void	io_dispatch_accept_tls(int, short, void *);
92 void	io_dispatch_connect_tls(int, short, void *);
93 void	io_dispatch_read_tls(int, short, void *);
94 void	io_dispatch_write_tls(int, short, void *);
95 void	io_reload_tls(struct io *io);
96 #endif
97 
98 static struct io	*current = NULL;
99 static uint64_t		 frame = 0;
100 static int		_io_debug = 0;
101 
102 #define io_debug(args...) do { if (_io_debug) printf(args); } while(0)
103 
104 
105 const char*
106 io_strio(struct io *io)
107 {
108 	static char	buf[128];
109 	char		ssl[128];
110 
111 	ssl[0] = '\0';
112 #ifdef IO_TLS
113 	if (io->tls) {
114 		(void)snprintf(ssl, sizeof ssl, " tls=%s:%s:%d",
115 		    SSL_get_version(io->tls),
116 		    SSL_get_cipher_name(io->tls),
117 		    SSL_get_cipher_bits(io->tls, NULL));
118 	}
119 #endif
120 
121 	(void)snprintf(buf, sizeof buf,
122 	    "<io:%p fd=%d to=%d fl=%s%s ib=%zu ob=%zu>",
123 	    io, io->sock, io->timeout, io_strflags(io->flags), ssl,
124 	    io_pending(io), io_queued(io));
125 
126 	return (buf);
127 }
128 
129 #define CASE(x) case x : return #x
130 
131 const char*
132 io_strevent(int evt)
133 {
134 	static char buf[32];
135 
136 	switch (evt) {
137 	CASE(IO_CONNECTED);
138 	CASE(IO_TLSREADY);
139 	CASE(IO_DATAIN);
140 	CASE(IO_LOWAT);
141 	CASE(IO_DISCONNECTED);
142 	CASE(IO_TIMEOUT);
143 	CASE(IO_ERROR);
144 	default:
145 		(void)snprintf(buf, sizeof(buf), "IO_? %d", evt);
146 		return buf;
147 	}
148 }
149 
150 void
151 io_set_nonblocking(int fd)
152 {
153 	int	flags;
154 
155 	if ((flags = fcntl(fd, F_GETFL)) == -1)
156 		err(1, "io_set_blocking:fcntl(F_GETFL)");
157 
158 	flags |= O_NONBLOCK;
159 
160 	if (fcntl(fd, F_SETFL, flags) == -1)
161 		err(1, "io_set_blocking:fcntl(F_SETFL)");
162 }
163 
164 void
165 io_set_nolinger(int fd)
166 {
167 	struct linger    l;
168 
169 	memset(&l, 0, sizeof(l));
170 	if (setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l)) == -1)
171 		err(1, "io_set_linger:setsockopt()");
172 }
173 
174 /*
175  * Event framing must not rely on an io pointer to refer to the "same" io
176  * throughout the frame, because this is not always the case:
177  *
178  * 1) enter(addr0) -> free(addr0) -> leave(addr0) = SEGV
179  * 2) enter(addr0) -> free(addr0) -> malloc == addr0 -> leave(addr0) = BAD!
180  *
181  * In both case, the problem is that the io is freed in the callback, so
182  * the pointer becomes invalid. If that happens, the user is required to
183  * call io_clear, so we can adapt the frame state there.
184  */
185 void
186 io_frame_enter(const char *where, struct io *io, int ev)
187 {
188 	io_debug("\n=== %" PRIu64 " ===\n"
189 	    "io_frame_enter(%s, %s, %s)\n",
190 	    frame, where, io_evstr(ev), io_strio(io));
191 
192 	if (current)
193 		errx(1, "io_frame_enter: interleaved frames");
194 
195 	current = io;
196 
197 	io_hold(io);
198 }
199 
200 void
201 io_frame_leave(struct io *io)
202 {
203 	io_debug("io_frame_leave(%" PRIu64 ")\n", frame);
204 
205 	if (current && current != io)
206 		errx(1, "io_frame_leave: io mismatch");
207 
208 	/* io has been cleared */
209 	if (current == NULL)
210 		goto done;
211 
212 	/* TODO: There is a possible optimization there:
213 	 * In a typical half-duplex request/response scenario,
214 	 * the io is waiting to read a request, and when done, it queues
215 	 * the response in the output buffer and goes to write mode.
216 	 * There, the write event is set and will be triggered in the next
217 	 * event frame.  In most case, the write call could be done
218 	 * immediately as part of the last read frame, thus avoiding to go
219 	 * through the event loop machinery. So, as an optimisation, we
220 	 * could detect that case here and force an event dispatching.
221 	 */
222 
223 	/* Reload the io if it has not been reset already. */
224 	io_release(io);
225 	current = NULL;
226     done:
227 	io_debug("=== /%" PRIu64 "\n", frame);
228 
229 	frame += 1;
230 }
231 
232 void
233 _io_init()
234 {
235 	static int init = 0;
236 
237 	if (init)
238 		return;
239 
240 	init = 1;
241 	_io_debug = getenv("IO_DEBUG") != NULL;
242 }
243 
244 struct io *
245 io_new(void)
246 {
247 	struct io *io;
248 
249 	_io_init();
250 
251 	if ((io = calloc(1, sizeof(*io))) == NULL)
252 		return NULL;
253 
254 	io->sock = -1;
255 	io->timeout = -1;
256 
257 	if (iobuf_init(&io->iobuf, 0, 0) == -1) {
258 		free(io);
259 		return NULL;
260 	}
261 
262 	return io;
263 }
264 
265 void
266 io_free(struct io *io)
267 {
268 	io_debug("io_clear(%p)\n", io);
269 
270 	/* the current io is virtually dead */
271 	if (io == current)
272 		current = NULL;
273 
274 #ifdef IO_TLS
275 	SSL_free(io->tls);
276 	io->tls = NULL;
277 #endif
278 
279 	if (event_initialized(&io->ev))
280 		event_del(&io->ev);
281 	if (io->sock != -1) {
282 		close(io->sock);
283 		io->sock = -1;
284 	}
285 
286 	iobuf_clear(&io->iobuf);
287 	free(io);
288 }
289 
290 void
291 io_hold(struct io *io)
292 {
293 	io_debug("io_enter(%p)\n", io);
294 
295 	if (io->flags & IO_HELD)
296 		errx(1, "io_hold: io is already held");
297 
298 	io->flags &= ~IO_RESET;
299 	io->flags |= IO_HELD;
300 }
301 
302 void
303 io_release(struct io *io)
304 {
305 	if (!(io->flags & IO_HELD))
306 		errx(1, "io_release: io is not held");
307 
308 	io->flags &= ~IO_HELD;
309 	if (!(io->flags & IO_RESET))
310 		io_reload(io);
311 }
312 
313 void
314 io_set_fd(struct io *io, int fd)
315 {
316 	io->sock = fd;
317 	if (fd != -1)
318 		io_reload(io);
319 }
320 
321 void
322 io_set_callback(struct io *io, void(*cb)(struct io *, int, void *), void *arg)
323 {
324 	io->cb = cb;
325 	io->arg = arg;
326 }
327 
328 void
329 io_set_timeout(struct io *io, int msec)
330 {
331 	io_debug("io_set_timeout(%p, %d)\n", io, msec);
332 
333 	io->timeout = msec;
334 }
335 
336 void
337 io_set_lowat(struct io *io, size_t lowat)
338 {
339 	io_debug("io_set_lowat(%p, %zu)\n", io, lowat);
340 
341 	io->lowat = lowat;
342 }
343 
344 void
345 io_pause(struct io *io, int dir)
346 {
347 	io_debug("io_pause(%p, %x)\n", io, dir);
348 
349 	io->flags |= dir & (IO_PAUSE_IN | IO_PAUSE_OUT);
350 	io_reload(io);
351 }
352 
353 void
354 io_resume(struct io *io, int dir)
355 {
356 	io_debug("io_resume(%p, %x)\n", io, dir);
357 
358 	io->flags &= ~(dir & (IO_PAUSE_IN | IO_PAUSE_OUT));
359 	io_reload(io);
360 }
361 
362 void
363 io_set_read(struct io *io)
364 {
365 	int	mode;
366 
367 	io_debug("io_set_read(%p)\n", io);
368 
369 	mode = io->flags & IO_RW;
370 	if (!(mode == 0 || mode == IO_WRITE))
371 		errx(1, "io_set_read(): full-duplex or reading");
372 
373 	io->flags &= ~IO_RW;
374 	io->flags |= IO_READ;
375 	io_reload(io);
376 }
377 
378 void
379 io_set_write(struct io *io)
380 {
381 	int	mode;
382 
383 	io_debug("io_set_write(%p)\n", io);
384 
385 	mode = io->flags & IO_RW;
386 	if (!(mode == 0 || mode == IO_READ))
387 		errx(1, "io_set_write(): full-duplex or writing");
388 
389 	io->flags &= ~IO_RW;
390 	io->flags |= IO_WRITE;
391 	io_reload(io);
392 }
393 
394 const char *
395 io_error(struct io *io)
396 {
397 	return io->error;
398 }
399 
400 void *
401 io_tls(struct io *io)
402 {
403 	return io->tls;
404 }
405 
406 int
407 io_fileno(struct io *io)
408 {
409 	return io->sock;
410 }
411 
412 int
413 io_paused(struct io *io, int what)
414 {
415 	return (io->flags & (IO_PAUSE_IN | IO_PAUSE_OUT)) == what;
416 }
417 
418 /*
419  * Buffered output functions
420  */
421 
422 int
423 io_write(struct io *io, const void *buf, size_t len)
424 {
425 	int r;
426 
427 	r = iobuf_queue(&io->iobuf, buf, len);
428 
429 	io_reload(io);
430 
431 	return r;
432 }
433 
434 int
435 io_writev(struct io *io, const struct iovec *iov, int iovcount)
436 {
437 	int r;
438 
439 	r = iobuf_queuev(&io->iobuf, iov, iovcount);
440 
441 	io_reload(io);
442 
443 	return r;
444 }
445 
446 int
447 io_print(struct io *io, const char *s)
448 {
449 	return io_write(io, s, strlen(s));
450 }
451 
452 int
453 io_printf(struct io *io, const char *fmt, ...)
454 {
455 	va_list ap;
456 	int r;
457 
458 	va_start(ap, fmt);
459 	r = io_vprintf(io, fmt, ap);
460 	va_end(ap);
461 
462 	return r;
463 }
464 
465 int
466 io_vprintf(struct io *io, const char *fmt, va_list ap)
467 {
468 
469 	char *buf;
470 	int len;
471 
472 	len = vasprintf(&buf, fmt, ap);
473 	if (len == -1)
474 		return -1;
475 	len = io_write(io, buf, len);
476 	free(buf);
477 
478 	return len;
479 }
480 
481 size_t
482 io_queued(struct io *io)
483 {
484 	return iobuf_queued(&io->iobuf);
485 }
486 
487 /*
488  * Buffered input functions
489  */
490 
491 void *
492 io_data(struct io *io)
493 {
494 	return iobuf_data(&io->iobuf);
495 }
496 
497 size_t
498 io_datalen(struct io *io)
499 {
500 	return iobuf_len(&io->iobuf);
501 }
502 
503 char *
504 io_getline(struct io *io, size_t *sz)
505 {
506 	return iobuf_getline(&io->iobuf, sz);
507 }
508 
509 void
510 io_drop(struct io *io, size_t sz)
511 {
512 	return iobuf_drop(&io->iobuf, sz);
513 }
514 
515 
516 #define IO_READING(io) (((io)->flags & IO_RW) != IO_WRITE)
517 #define IO_WRITING(io) (((io)->flags & IO_RW) != IO_READ)
518 
519 /*
520  * Setup the necessary events as required by the current io state,
521  * honouring duplex mode and i/o pauses.
522  */
523 void
524 io_reload(struct io *io)
525 {
526 	short	events;
527 
528 	/* io will be reloaded at release time */
529 	if (io->flags & IO_HELD)
530 		return;
531 
532 	iobuf_normalize(&io->iobuf);
533 
534 #ifdef IO_TLS
535 	if (io->tls) {
536 		io_reload_tls(io);
537 		return;
538 	}
539 #endif
540 
541 	io_debug("io_reload(%p)\n", io);
542 
543 	events = 0;
544 	if (IO_READING(io) && !(io->flags & IO_PAUSE_IN))
545 		events = EV_READ;
546 	if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io))
547 		events |= EV_WRITE;
548 
549 	io_reset(io, events, io_dispatch);
550 }
551 
552 /* Set the requested event. */
553 void
554 io_reset(struct io *io, short events, void (*dispatch)(int, short, void*))
555 {
556 	struct timeval	tv, *ptv;
557 
558 	io_debug("io_reset(%p, %s, %p) -> %s\n",
559 	    io, io_evstr(events), dispatch, io_strio(io));
560 
561 	/*
562 	 * Indicate that the event has already been reset so that reload
563 	 * is not called on frame_leave.
564 	 */
565 	io->flags |= IO_RESET;
566 
567 	if (event_initialized(&io->ev))
568 		event_del(&io->ev);
569 
570 	/*
571 	 * The io is paused by the user, so we don't want the timeout to be
572 	 * effective.
573 	 */
574 	if (events == 0)
575 		return;
576 
577 	event_set(&io->ev, io->sock, events, dispatch, io);
578 	if (io->timeout >= 0) {
579 		tv.tv_sec = io->timeout / 1000;
580 		tv.tv_usec = (io->timeout % 1000) * 1000;
581 		ptv = &tv;
582 	} else
583 		ptv = NULL;
584 
585 	event_add(&io->ev, ptv);
586 }
587 
588 size_t
589 io_pending(struct io *io)
590 {
591 	return iobuf_len(&io->iobuf);
592 }
593 
594 const char*
595 io_strflags(int flags)
596 {
597 	static char	buf[64];
598 
599 	buf[0] = '\0';
600 
601 	switch (flags & IO_RW) {
602 	case 0:
603 		(void)strlcat(buf, "rw", sizeof buf);
604 		break;
605 	case IO_READ:
606 		(void)strlcat(buf, "R", sizeof buf);
607 		break;
608 	case IO_WRITE:
609 		(void)strlcat(buf, "W", sizeof buf);
610 		break;
611 	case IO_RW:
612 		(void)strlcat(buf, "RW", sizeof buf);
613 		break;
614 	}
615 
616 	if (flags & IO_PAUSE_IN)
617 		(void)strlcat(buf, ",F_PI", sizeof buf);
618 	if (flags & IO_PAUSE_OUT)
619 		(void)strlcat(buf, ",F_PO", sizeof buf);
620 
621 	return buf;
622 }
623 
624 const char*
625 io_evstr(short ev)
626 {
627 	static char	buf[64];
628 	char		buf2[16];
629 	int		n;
630 
631 	n = 0;
632 	buf[0] = '\0';
633 
634 	if (ev == 0) {
635 		(void)strlcat(buf, "<NONE>", sizeof(buf));
636 		return buf;
637 	}
638 
639 	if (ev & EV_TIMEOUT) {
640 		(void)strlcat(buf, "EV_TIMEOUT", sizeof(buf));
641 		ev &= ~EV_TIMEOUT;
642 		n++;
643 	}
644 
645 	if (ev & EV_READ) {
646 		if (n)
647 			(void)strlcat(buf, "|", sizeof(buf));
648 		(void)strlcat(buf, "EV_READ", sizeof(buf));
649 		ev &= ~EV_READ;
650 		n++;
651 	}
652 
653 	if (ev & EV_WRITE) {
654 		if (n)
655 			(void)strlcat(buf, "|", sizeof(buf));
656 		(void)strlcat(buf, "EV_WRITE", sizeof(buf));
657 		ev &= ~EV_WRITE;
658 		n++;
659 	}
660 
661 	if (ev & EV_SIGNAL) {
662 		if (n)
663 			(void)strlcat(buf, "|", sizeof(buf));
664 		(void)strlcat(buf, "EV_SIGNAL", sizeof(buf));
665 		ev &= ~EV_SIGNAL;
666 		n++;
667 	}
668 
669 	if (ev) {
670 		if (n)
671 			(void)strlcat(buf, "|", sizeof(buf));
672 		(void)strlcat(buf, "EV_?=0x", sizeof(buf));
673 		(void)snprintf(buf2, sizeof(buf2), "%hx", ev);
674 		(void)strlcat(buf, buf2, sizeof(buf));
675 	}
676 
677 	return buf;
678 }
679 
680 void
681 io_dispatch(int fd, short ev, void *humppa)
682 {
683 	struct io	*io = humppa;
684 	size_t		 w;
685 	ssize_t		 n;
686 	int		 saved_errno;
687 
688 	io_frame_enter("io_dispatch", io, ev);
689 
690 	if (ev == EV_TIMEOUT) {
691 		io_callback(io, IO_TIMEOUT);
692 		goto leave;
693 	}
694 
695 	if (ev & EV_WRITE && (w = io_queued(io))) {
696 		if ((n = iobuf_write(&io->iobuf, io->sock)) < 0) {
697 			if (n == IOBUF_WANT_WRITE) /* kqueue bug? */
698 				goto read;
699 			if (n == IOBUF_CLOSED)
700 				io_callback(io, IO_DISCONNECTED);
701 			else {
702 				saved_errno = errno;
703 				io->error = strerror(errno);
704 				errno = saved_errno;
705 				io_callback(io, IO_ERROR);
706 			}
707 			goto leave;
708 		}
709 		if (w > io->lowat && w - n <= io->lowat)
710 			io_callback(io, IO_LOWAT);
711 	}
712     read:
713 
714 	if (ev & EV_READ) {
715 		iobuf_normalize(&io->iobuf);
716 		if ((n = iobuf_read(&io->iobuf, io->sock)) < 0) {
717 			if (n == IOBUF_CLOSED)
718 				io_callback(io, IO_DISCONNECTED);
719 			else {
720 				saved_errno = errno;
721 				io->error = strerror(errno);
722 				errno = saved_errno;
723 				io_callback(io, IO_ERROR);
724 			}
725 			goto leave;
726 		}
727 		if (n)
728 			io_callback(io, IO_DATAIN);
729 	}
730 
731 leave:
732 	io_frame_leave(io);
733 }
734 
735 void
736 io_callback(struct io *io, int evt)
737 {
738 	io->cb(io, evt, io->arg);
739 }
740 
741 int
742 io_connect(struct io *io, const struct sockaddr *sa, const struct sockaddr *bsa)
743 {
744 	int	sock, errno_save;
745 
746 	if ((sock = socket(sa->sa_family, SOCK_STREAM, 0)) == -1)
747 		goto fail;
748 
749 	io_set_nonblocking(sock);
750 	io_set_nolinger(sock);
751 
752 	if (bsa && bind(sock, bsa, bsa->sa_len) == -1)
753 		goto fail;
754 
755 	if (connect(sock, sa, sa->sa_len) == -1)
756 		if (errno != EINPROGRESS)
757 			goto fail;
758 
759 	io->sock = sock;
760 	io_reset(io, EV_WRITE, io_dispatch_connect);
761 
762 	return (sock);
763 
764     fail:
765 	if (sock != -1) {
766 		errno_save = errno;
767 		close(sock);
768 		errno = errno_save;
769 		io->error = strerror(errno);
770 	}
771 	return (-1);
772 }
773 
774 void
775 io_dispatch_connect(int fd, short ev, void *humppa)
776 {
777 	struct io	*io = humppa;
778 	int		 r, e;
779 	socklen_t	 sl;
780 
781 	io_frame_enter("io_dispatch_connect", io, ev);
782 
783 	if (ev == EV_TIMEOUT) {
784 		close(fd);
785 		io->sock = -1;
786 		io_callback(io, IO_TIMEOUT);
787 	} else {
788 		sl = sizeof(e);
789 		r = getsockopt(fd, SOL_SOCKET, SO_ERROR, &e, &sl);
790 		if (r == -1)  {
791 			warn("io_dispatch_connect: getsockopt");
792 			e = errno;
793 		}
794 		if (e) {
795 			close(fd);
796 			io->sock = -1;
797 			io->error = strerror(e);
798 			io_callback(io, e == ETIMEDOUT ? IO_TIMEOUT : IO_ERROR);
799 		}
800 		else {
801 			io->state = IO_STATE_UP;
802 			io_callback(io, IO_CONNECTED);
803 		}
804 	}
805 
806 	io_frame_leave(io);
807 }
808 
809 #ifdef IO_TLS
810 
811 static const char*
812 io_tls_error(void)
813 {
814 	static char	buf[128];
815 	unsigned long	e;
816 
817 	e = ERR_peek_last_error();
818 	if (e) {
819 		ERR_error_string(e, buf);
820 		return (buf);
821 	}
822 
823 	return ("No TLS error");
824 }
825 
826 int
827 io_start_tls(struct io *io, void *tls)
828 {
829 	int	mode;
830 
831 	mode = io->flags & IO_RW;
832 	if (mode == 0 || mode == IO_RW)
833 		errx(1, "io_start_tls(): full-duplex or unset");
834 
835 	if (io->tls)
836 		errx(1, "io_start_tls(): TLS already started");
837 	io->tls = tls;
838 
839 	if (SSL_set_fd(io->tls, io->sock) == 0) {
840 		ssl_error("io_start_tls:SSL_set_fd");
841 		return (-1);
842 	}
843 
844 	if (mode == IO_WRITE) {
845 		io->state = IO_STATE_CONNECT_TLS;
846 		SSL_set_connect_state(io->tls);
847 		io_reset(io, EV_WRITE, io_dispatch_connect_tls);
848 	} else {
849 		io->state = IO_STATE_ACCEPT_TLS;
850 		SSL_set_accept_state(io->tls);
851 		io_reset(io, EV_READ, io_dispatch_accept_tls);
852 	}
853 
854 	return (0);
855 }
856 
857 void
858 io_dispatch_accept_tls(int fd, short event, void *humppa)
859 {
860 	struct io	*io = humppa;
861 	int		 e, ret;
862 
863 	io_frame_enter("io_dispatch_accept_tls", io, event);
864 
865 	if (event == EV_TIMEOUT) {
866 		io_callback(io, IO_TIMEOUT);
867 		goto leave;
868 	}
869 
870 	if ((ret = SSL_accept(io->tls)) > 0) {
871 		io->state = IO_STATE_UP;
872 		io_callback(io, IO_TLSREADY);
873 		goto leave;
874 	}
875 
876 	switch ((e = SSL_get_error(io->tls, ret))) {
877 	case SSL_ERROR_WANT_READ:
878 		io_reset(io, EV_READ, io_dispatch_accept_tls);
879 		break;
880 	case SSL_ERROR_WANT_WRITE:
881 		io_reset(io, EV_WRITE, io_dispatch_accept_tls);
882 		break;
883 	default:
884 		io->error = io_tls_error();
885 		ssl_error("io_dispatch_accept_tls:SSL_accept");
886 		io_callback(io, IO_ERROR);
887 		break;
888 	}
889 
890     leave:
891 	io_frame_leave(io);
892 }
893 
894 void
895 io_dispatch_connect_tls(int fd, short event, void *humppa)
896 {
897 	struct io	*io = humppa;
898 	int		 e, ret;
899 
900 	io_frame_enter("io_dispatch_connect_tls", io, event);
901 
902 	if (event == EV_TIMEOUT) {
903 		io_callback(io, IO_TIMEOUT);
904 		goto leave;
905 	}
906 
907 	if ((ret = SSL_connect(io->tls)) > 0) {
908 		io->state = IO_STATE_UP;
909 		io_callback(io, IO_TLSREADY);
910 		goto leave;
911 	}
912 
913 	switch ((e = SSL_get_error(io->tls, ret))) {
914 	case SSL_ERROR_WANT_READ:
915 		io_reset(io, EV_READ, io_dispatch_connect_tls);
916 		break;
917 	case SSL_ERROR_WANT_WRITE:
918 		io_reset(io, EV_WRITE, io_dispatch_connect_tls);
919 		break;
920 	default:
921 		io->error = io_tls_error();
922 		ssl_error("io_dispatch_connect_ssl:SSL_connect");
923 		io_callback(io, IO_TLSERROR);
924 		break;
925 	}
926 
927     leave:
928 	io_frame_leave(io);
929 }
930 
931 void
932 io_dispatch_read_tls(int fd, short event, void *humppa)
933 {
934 	struct io	*io = humppa;
935 	int		 n, saved_errno;
936 
937 	io_frame_enter("io_dispatch_read_tls", io, event);
938 
939 	if (event == EV_TIMEOUT) {
940 		io_callback(io, IO_TIMEOUT);
941 		goto leave;
942 	}
943 
944 again:
945 	iobuf_normalize(&io->iobuf);
946 	switch ((n = iobuf_read_tls(&io->iobuf, (SSL*)io->tls))) {
947 	case IOBUF_WANT_READ:
948 		io_reset(io, EV_READ, io_dispatch_read_tls);
949 		break;
950 	case IOBUF_WANT_WRITE:
951 		io_reset(io, EV_WRITE, io_dispatch_read_tls);
952 		break;
953 	case IOBUF_CLOSED:
954 		io_callback(io, IO_DISCONNECTED);
955 		break;
956 	case IOBUF_ERROR:
957 		saved_errno = errno;
958 		io->error = strerror(errno);
959 		errno = saved_errno;
960 		io_callback(io, IO_ERROR);
961 		break;
962 	case IOBUF_TLSERROR:
963 		io->error = io_tls_error();
964 		ssl_error("io_dispatch_read_tls:SSL_read");
965 		io_callback(io, IO_ERROR);
966 		break;
967 	default:
968 		io_debug("io_dispatch_read_tls(...) -> r=%d\n", n);
969 		io_callback(io, IO_DATAIN);
970 		if (current == io && IO_READING(io) && SSL_pending(io->tls))
971 			goto again;
972 	}
973 
974     leave:
975 	io_frame_leave(io);
976 }
977 
978 void
979 io_dispatch_write_tls(int fd, short event, void *humppa)
980 {
981 	struct io	*io = humppa;
982 	int		 n, saved_errno;
983 	size_t		 w2, w;
984 
985 	io_frame_enter("io_dispatch_write_tls", io, event);
986 
987 	if (event == EV_TIMEOUT) {
988 		io_callback(io, IO_TIMEOUT);
989 		goto leave;
990 	}
991 
992 	w = io_queued(io);
993 	switch ((n = iobuf_write_tls(&io->iobuf, (SSL*)io->tls))) {
994 	case IOBUF_WANT_READ:
995 		io_reset(io, EV_READ, io_dispatch_write_tls);
996 		break;
997 	case IOBUF_WANT_WRITE:
998 		io_reset(io, EV_WRITE, io_dispatch_write_tls);
999 		break;
1000 	case IOBUF_CLOSED:
1001 		io_callback(io, IO_DISCONNECTED);
1002 		break;
1003 	case IOBUF_ERROR:
1004 		saved_errno = errno;
1005 		io->error = strerror(errno);
1006 		errno = saved_errno;
1007 		io_callback(io, IO_ERROR);
1008 		break;
1009 	case IOBUF_TLSERROR:
1010 		io->error = io_tls_error();
1011 		ssl_error("io_dispatch_write_tls:SSL_write");
1012 		io_callback(io, IO_ERROR);
1013 		break;
1014 	default:
1015 		io_debug("io_dispatch_write_tls(...) -> w=%d\n", n);
1016 		w2 = io_queued(io);
1017 		if (w > io->lowat && w2 <= io->lowat)
1018 			io_callback(io, IO_LOWAT);
1019 		break;
1020 	}
1021 
1022     leave:
1023 	io_frame_leave(io);
1024 }
1025 
1026 void
1027 io_reload_tls(struct io *io)
1028 {
1029 	short	ev = 0;
1030 	void	(*dispatch)(int, short, void*) = NULL;
1031 
1032 	switch (io->state) {
1033 	case IO_STATE_CONNECT_TLS:
1034 		ev = EV_WRITE;
1035 		dispatch = io_dispatch_connect_tls;
1036 		break;
1037 	case IO_STATE_ACCEPT_TLS:
1038 		ev = EV_READ;
1039 		dispatch = io_dispatch_accept_tls;
1040 		break;
1041 	case IO_STATE_UP:
1042 		ev = 0;
1043 		if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) {
1044 			ev = EV_READ;
1045 			dispatch = io_dispatch_read_tls;
1046 		}
1047 		else if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) &&
1048 		    io_queued(io)) {
1049 			ev = EV_WRITE;
1050 			dispatch = io_dispatch_write_tls;
1051 		}
1052 		if (!ev)
1053 			return; /* paused */
1054 		break;
1055 	default:
1056 		errx(1, "io_reload_tls(): bad state");
1057 	}
1058 
1059 	io_reset(io, ev, dispatch);
1060 }
1061 
1062 #endif /* IO_TLS */
1063