xref: /openbsd-src/usr.sbin/smtpd/ioev.c (revision fb8aa7497fded39583f40e800732f9c046411717)
1 /*	$OpenBSD: ioev.c,v 1.26 2016/05/16 21:43:16 millert Exp $	*/
2 /*
3  * Copyright (c) 2012 Eric Faurot <eric@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/types.h>
19 #include <sys/queue.h>
20 #include <sys/socket.h>
21 
22 #include <err.h>
23 #include <errno.h>
24 #include <fcntl.h>
25 #include <inttypes.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 
31 #include "ioev.h"
32 #include "iobuf.h"
33 
34 #ifdef IO_SSL
35 #include <openssl/err.h>
36 #include <openssl/ssl.h>
37 #endif
38 
39 enum {
40 	IO_STATE_NONE,
41 	IO_STATE_CONNECT,
42 	IO_STATE_CONNECT_SSL,
43 	IO_STATE_ACCEPT_SSL,
44 	IO_STATE_UP,
45 
46 	IO_STATE_MAX,
47 };
48 
49 const char* io_strflags(int);
50 const char* io_evstr(short);
51 
52 void	_io_init(void);
53 void	io_hold(struct io *);
54 void	io_release(struct io *);
55 void	io_callback(struct io*, int);
56 void	io_dispatch(int, short, void *);
57 void	io_dispatch_connect(int, short, void *);
58 size_t	io_pending(struct io *);
59 size_t	io_queued(struct io*);
60 void	io_reset(struct io *, short, void (*)(int, short, void*));
61 void	io_frame_enter(const char *, struct io *, int);
62 void	io_frame_leave(struct io *);
63 
64 #ifdef IO_SSL
65 void	ssl_error(const char *); /* XXX external */
66 
67 static const char* io_ssl_error(void);
68 void	io_dispatch_accept_ssl(int, short, void *);
69 void	io_dispatch_connect_ssl(int, short, void *);
70 void	io_dispatch_read_ssl(int, short, void *);
71 void	io_dispatch_write_ssl(int, short, void *);
72 void	io_reload_ssl(struct io *io);
73 #endif
74 
75 static struct io	*current = NULL;
76 static uint64_t		 frame = 0;
77 static int		_io_debug = 0;
78 
79 #define io_debug(args...) do { if (_io_debug) printf(args); } while(0)
80 
81 
82 const char*
83 io_strio(struct io *io)
84 {
85 	static char	buf[128];
86 	char		ssl[128];
87 
88 	ssl[0] = '\0';
89 #ifdef IO_SSL
90 	if (io->ssl) {
91 		(void)snprintf(ssl, sizeof ssl, " ssl=%s:%s:%d",
92 		    SSL_get_version(io->ssl),
93 		    SSL_get_cipher_name(io->ssl),
94 		    SSL_get_cipher_bits(io->ssl, NULL));
95 	}
96 #endif
97 
98 	if (io->iobuf == NULL)
99 		(void)snprintf(buf, sizeof buf,
100 		    "<io:%p fd=%d to=%d fl=%s%s>",
101 		    io, io->sock, io->timeout, io_strflags(io->flags), ssl);
102 	else
103 		(void)snprintf(buf, sizeof buf,
104 		    "<io:%p fd=%d to=%d fl=%s%s ib=%zu ob=%zu>",
105 		    io, io->sock, io->timeout, io_strflags(io->flags), ssl,
106 		    io_pending(io), io_queued(io));
107 
108 	return (buf);
109 }
110 
111 #define CASE(x) case x : return #x
112 
113 const char*
114 io_strevent(int evt)
115 {
116 	static char buf[32];
117 
118 	switch (evt) {
119 	CASE(IO_CONNECTED);
120 	CASE(IO_TLSREADY);
121 	CASE(IO_TLSVERIFIED);
122 	CASE(IO_DATAIN);
123 	CASE(IO_LOWAT);
124 	CASE(IO_DISCONNECTED);
125 	CASE(IO_TIMEOUT);
126 	CASE(IO_ERROR);
127 	default:
128 		(void)snprintf(buf, sizeof(buf), "IO_? %d", evt);
129 		return buf;
130 	}
131 }
132 
133 void
134 io_set_nonblocking(int fd)
135 {
136 	int	flags;
137 
138 	if ((flags = fcntl(fd, F_GETFL)) == -1)
139 		err(1, "io_set_blocking:fcntl(F_GETFL)");
140 
141 	flags |= O_NONBLOCK;
142 
143 	if (fcntl(fd, F_SETFL, flags) == -1)
144 		err(1, "io_set_blocking:fcntl(F_SETFL)");
145 }
146 
147 void
148 io_set_nolinger(int fd)
149 {
150 	struct linger    l;
151 
152 	memset(&l, 0, sizeof(l));
153 	if (setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l)) == -1)
154 		err(1, "io_set_linger:setsockopt()");
155 }
156 
157 /*
158  * Event framing must not rely on an io pointer to refer to the "same" io
159  * throughout the frame, because this is not always the case:
160  *
161  * 1) enter(addr0) -> free(addr0) -> leave(addr0) = SEGV
162  * 2) enter(addr0) -> free(addr0) -> malloc == addr0 -> leave(addr0) = BAD!
163  *
164  * In both case, the problem is that the io is freed in the callback, so
165  * the pointer becomes invalid. If that happens, the user is required to
166  * call io_clear, so we can adapt the frame state there.
167  */
168 void
169 io_frame_enter(const char *where, struct io *io, int ev)
170 {
171 	io_debug("\n=== %" PRIu64 " ===\n"
172 	    "io_frame_enter(%s, %s, %s)\n",
173 	    frame, where, io_evstr(ev), io_strio(io));
174 
175 	if (current)
176 		errx(1, "io_frame_enter: interleaved frames");
177 
178 	current = io;
179 
180 	io_hold(io);
181 }
182 
183 void
184 io_frame_leave(struct io *io)
185 {
186 	io_debug("io_frame_leave(%" PRIu64 ")\n", frame);
187 
188 	if (current && current != io)
189 		errx(1, "io_frame_leave: io mismatch");
190 
191 	/* io has been cleared */
192 	if (current == NULL)
193 		goto done;
194 
195 	/* TODO: There is a possible optimization there:
196 	 * In a typical half-duplex request/response scenario,
197 	 * the io is waiting to read a request, and when done, it queues
198 	 * the response in the output buffer and goes to write mode.
199 	 * There, the write event is set and will be triggered in the next
200 	 * event frame.  In most case, the write call could be done
201 	 * immediately as part of the last read frame, thus avoiding to go
202 	 * through the event loop machinery. So, as an optimisation, we
203 	 * could detect that case here and force an event dispatching.
204 	 */
205 
206 	/* Reload the io if it has not been reset already. */
207 	io_release(io);
208 	current = NULL;
209     done:
210 	io_debug("=== /%" PRIu64 "\n", frame);
211 
212 	frame += 1;
213 }
214 
215 void
216 _io_init()
217 {
218 	static int init = 0;
219 
220 	if (init)
221 		return;
222 
223 	init = 1;
224 	_io_debug = getenv("IO_DEBUG") != NULL;
225 }
226 
227 void
228 io_init(struct io *io, int sock, void *arg,
229 	void(*cb)(struct io*, int), struct iobuf *iobuf)
230 {
231 	_io_init();
232 
233 	memset(io, 0, sizeof *io);
234 
235 	io->sock = sock;
236 	io->timeout = -1;
237 	io->arg = arg;
238 	io->iobuf = iobuf;
239 	io->cb = cb;
240 
241 	if (sock != -1)
242 		io_reload(io);
243 }
244 
245 void
246 io_clear(struct io *io)
247 {
248 	io_debug("io_clear(%p)\n", io);
249 
250 	/* the current io is virtually dead */
251 	if (io == current)
252 		current = NULL;
253 
254 #ifdef IO_SSL
255 	if (io->ssl) {
256 		SSL_free(io->ssl);
257 		io->ssl = NULL;
258 	}
259 #endif
260 
261 	if (event_initialized(&io->ev))
262 		event_del(&io->ev);
263 	if (io->sock != -1) {
264 		close(io->sock);
265 		io->sock = -1;
266 	}
267 }
268 
269 void
270 io_hold(struct io *io)
271 {
272 	io_debug("io_enter(%p)\n", io);
273 
274 	if (io->flags & IO_HELD)
275 		errx(1, "io_hold: io is already held");
276 
277 	io->flags &= ~IO_RESET;
278 	io->flags |= IO_HELD;
279 }
280 
281 void
282 io_release(struct io *io)
283 {
284 	if (!(io->flags & IO_HELD))
285 		errx(1, "io_release: io is not held");
286 
287 	io->flags &= ~IO_HELD;
288 	if (!(io->flags & IO_RESET))
289 		io_reload(io);
290 }
291 
292 void
293 io_set_timeout(struct io *io, int msec)
294 {
295 	io_debug("io_set_timeout(%p, %d)\n", io, msec);
296 
297 	io->timeout = msec;
298 }
299 
300 void
301 io_set_lowat(struct io *io, size_t lowat)
302 {
303 	io_debug("io_set_lowat(%p, %zu)\n", io, lowat);
304 
305 	io->lowat = lowat;
306 }
307 
308 void
309 io_pause(struct io *io, int dir)
310 {
311 	io_debug("io_pause(%p, %x)\n", io, dir);
312 
313 	io->flags |= dir & (IO_PAUSE_IN | IO_PAUSE_OUT);
314 	io_reload(io);
315 }
316 
317 void
318 io_resume(struct io *io, int dir)
319 {
320 	io_debug("io_resume(%p, %x)\n", io, dir);
321 
322 	io->flags &= ~(dir & (IO_PAUSE_IN | IO_PAUSE_OUT));
323 	io_reload(io);
324 }
325 
326 void
327 io_set_read(struct io *io)
328 {
329 	int	mode;
330 
331 	io_debug("io_set_read(%p)\n", io);
332 
333 	mode = io->flags & IO_RW;
334 	if (!(mode == 0 || mode == IO_WRITE))
335 		errx(1, "io_set_read(): full-duplex or reading");
336 
337 	io->flags &= ~IO_RW;
338 	io->flags |= IO_READ;
339 	io_reload(io);
340 }
341 
342 void
343 io_set_write(struct io *io)
344 {
345 	int	mode;
346 
347 	io_debug("io_set_write(%p)\n", io);
348 
349 	mode = io->flags & IO_RW;
350 	if (!(mode == 0 || mode == IO_READ))
351 		errx(1, "io_set_write(): full-duplex or writing");
352 
353 	io->flags &= ~IO_RW;
354 	io->flags |= IO_WRITE;
355 	io_reload(io);
356 }
357 
358 #define IO_READING(io) (((io)->flags & IO_RW) != IO_WRITE)
359 #define IO_WRITING(io) (((io)->flags & IO_RW) != IO_READ)
360 
361 /*
362  * Setup the necessary events as required by the current io state,
363  * honouring duplex mode and i/o pauses.
364  */
365 void
366 io_reload(struct io *io)
367 {
368 	short	events;
369 
370 	/* io will be reloaded at release time */
371 	if (io->flags & IO_HELD)
372 		return;
373 
374 #ifdef IO_SSL
375 	if (io->ssl) {
376 		io_reload_ssl(io);
377 		return;
378 	}
379 #endif
380 
381 	io_debug("io_reload(%p)\n", io);
382 
383 	events = 0;
384 	if (IO_READING(io) && !(io->flags & IO_PAUSE_IN))
385 		events = EV_READ;
386 	if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io))
387 		events |= EV_WRITE;
388 
389 	io_reset(io, events, io_dispatch);
390 }
391 
392 /* Set the requested event. */
393 void
394 io_reset(struct io *io, short events, void (*dispatch)(int, short, void*))
395 {
396 	struct timeval	tv, *ptv;
397 
398 	io_debug("io_reset(%p, %s, %p) -> %s\n",
399 	    io, io_evstr(events), dispatch, io_strio(io));
400 
401 	/*
402 	 * Indicate that the event has already been reset so that reload
403 	 * is not called on frame_leave.
404 	 */
405 	io->flags |= IO_RESET;
406 
407 	if (event_initialized(&io->ev))
408 		event_del(&io->ev);
409 
410 	/*
411 	 * The io is paused by the user, so we don't want the timeout to be
412 	 * effective.
413 	 */
414 	if (events == 0)
415 		return;
416 
417 	event_set(&io->ev, io->sock, events, dispatch, io);
418 	if (io->timeout >= 0) {
419 		tv.tv_sec = io->timeout / 1000;
420 		tv.tv_usec = (io->timeout % 1000) * 1000;
421 		ptv = &tv;
422 	} else
423 		ptv = NULL;
424 
425 	event_add(&io->ev, ptv);
426 }
427 
428 size_t
429 io_pending(struct io *io)
430 {
431 	return iobuf_len(io->iobuf);
432 }
433 
434 size_t
435 io_queued(struct io *io)
436 {
437 	return iobuf_queued(io->iobuf);
438 }
439 
440 const char*
441 io_strflags(int flags)
442 {
443 	static char	buf[64];
444 
445 	buf[0] = '\0';
446 
447 	switch (flags & IO_RW) {
448 	case 0:
449 		(void)strlcat(buf, "rw", sizeof buf);
450 		break;
451 	case IO_READ:
452 		(void)strlcat(buf, "R", sizeof buf);
453 		break;
454 	case IO_WRITE:
455 		(void)strlcat(buf, "W", sizeof buf);
456 		break;
457 	case IO_RW:
458 		(void)strlcat(buf, "RW", sizeof buf);
459 		break;
460 	}
461 
462 	if (flags & IO_PAUSE_IN)
463 		(void)strlcat(buf, ",F_PI", sizeof buf);
464 	if (flags & IO_PAUSE_OUT)
465 		(void)strlcat(buf, ",F_PO", sizeof buf);
466 
467 	return buf;
468 }
469 
470 const char*
471 io_evstr(short ev)
472 {
473 	static char	buf[64];
474 	char		buf2[16];
475 	int		n;
476 
477 	n = 0;
478 	buf[0] = '\0';
479 
480 	if (ev == 0) {
481 		(void)strlcat(buf, "<NONE>", sizeof(buf));
482 		return buf;
483 	}
484 
485 	if (ev & EV_TIMEOUT) {
486 		(void)strlcat(buf, "EV_TIMEOUT", sizeof(buf));
487 		ev &= ~EV_TIMEOUT;
488 		n++;
489 	}
490 
491 	if (ev & EV_READ) {
492 		if (n)
493 			(void)strlcat(buf, "|", sizeof(buf));
494 		(void)strlcat(buf, "EV_READ", sizeof(buf));
495 		ev &= ~EV_READ;
496 		n++;
497 	}
498 
499 	if (ev & EV_WRITE) {
500 		if (n)
501 			(void)strlcat(buf, "|", sizeof(buf));
502 		(void)strlcat(buf, "EV_WRITE", sizeof(buf));
503 		ev &= ~EV_WRITE;
504 		n++;
505 	}
506 
507 	if (ev & EV_SIGNAL) {
508 		if (n)
509 			(void)strlcat(buf, "|", sizeof(buf));
510 		(void)strlcat(buf, "EV_SIGNAL", sizeof(buf));
511 		ev &= ~EV_SIGNAL;
512 		n++;
513 	}
514 
515 	if (ev) {
516 		if (n)
517 			(void)strlcat(buf, "|", sizeof(buf));
518 		(void)strlcat(buf, "EV_?=0x", sizeof(buf));
519 		(void)snprintf(buf2, sizeof(buf2), "%hx", ev);
520 		(void)strlcat(buf, buf2, sizeof(buf));
521 	}
522 
523 	return buf;
524 }
525 
526 void
527 io_dispatch(int fd, short ev, void *humppa)
528 {
529 	struct io	*io = humppa;
530 	size_t		 w;
531 	ssize_t		 n;
532 	int		 saved_errno;
533 
534 	io_frame_enter("io_dispatch", io, ev);
535 
536 	if (ev == EV_TIMEOUT) {
537 		io_callback(io, IO_TIMEOUT);
538 		goto leave;
539 	}
540 
541 	if (ev & EV_WRITE && (w = io_queued(io))) {
542 		if ((n = iobuf_write(io->iobuf, io->sock)) < 0) {
543 			if (n == IOBUF_WANT_WRITE) /* kqueue bug? */
544 				goto read;
545 			if (n == IOBUF_CLOSED)
546 				io_callback(io, IO_DISCONNECTED);
547 			else {
548 				saved_errno = errno;
549 				io->error = strerror(errno);
550 				errno = saved_errno;
551 				io_callback(io, IO_ERROR);
552 			}
553 			goto leave;
554 		}
555 		if (w > io->lowat && w - n <= io->lowat)
556 			io_callback(io, IO_LOWAT);
557 	}
558     read:
559 
560 	if (ev & EV_READ) {
561 		if ((n = iobuf_read(io->iobuf, io->sock)) < 0) {
562 			if (n == IOBUF_CLOSED)
563 				io_callback(io, IO_DISCONNECTED);
564 			else {
565 				saved_errno = errno;
566 				io->error = strerror(errno);
567 				errno = saved_errno;
568 				io_callback(io, IO_ERROR);
569 			}
570 			goto leave;
571 		}
572 		if (n)
573 			io_callback(io, IO_DATAIN);
574 	}
575 
576 leave:
577 	io_frame_leave(io);
578 }
579 
580 void
581 io_callback(struct io *io, int evt)
582 {
583 	io->cb(io, evt);
584 }
585 
586 int
587 io_connect(struct io *io, const struct sockaddr *sa, const struct sockaddr *bsa)
588 {
589 	int	sock, errno_save;
590 
591 	if ((sock = socket(sa->sa_family, SOCK_STREAM, 0)) == -1)
592 		goto fail;
593 
594 	io_set_nonblocking(sock);
595 	io_set_nolinger(sock);
596 
597 	if (bsa && bind(sock, bsa, bsa->sa_len) == -1)
598 		goto fail;
599 
600 	if (connect(sock, sa, sa->sa_len) == -1)
601 		if (errno != EINPROGRESS)
602 			goto fail;
603 
604 	io->sock = sock;
605 	io_reset(io, EV_WRITE, io_dispatch_connect);
606 
607 	return (sock);
608 
609     fail:
610 	if (sock != -1) {
611 		errno_save = errno;
612 		close(sock);
613 		errno = errno_save;
614 		io->error = strerror(errno);
615 	}
616 	return (-1);
617 }
618 
619 void
620 io_dispatch_connect(int fd, short ev, void *humppa)
621 {
622 	struct io	*io = humppa;
623 	int		 r, e;
624 	socklen_t	 sl;
625 
626 	io_frame_enter("io_dispatch_connect", io, ev);
627 
628 	if (ev == EV_TIMEOUT) {
629 		close(fd);
630 		io->sock = -1;
631 		io_callback(io, IO_TIMEOUT);
632 	} else {
633 		sl = sizeof(e);
634 		r = getsockopt(fd, SOL_SOCKET, SO_ERROR, &e, &sl);
635 		if (r == -1)  {
636 			warn("io_dispatch_connect: getsockopt");
637 			e = errno;
638 		}
639 		if (e) {
640 			close(fd);
641 			io->sock = -1;
642 			io->error = strerror(e);
643 			io_callback(io, e == ETIMEDOUT ? IO_TIMEOUT : IO_ERROR);
644 		}
645 		else {
646 			io->state = IO_STATE_UP;
647 			io_callback(io, IO_CONNECTED);
648 		}
649 	}
650 
651 	io_frame_leave(io);
652 }
653 
654 #ifdef IO_SSL
655 
656 static const char*
657 io_ssl_error(void)
658 {
659 	static char	buf[128];
660 	unsigned long	e;
661 
662 	e = ERR_peek_last_error();
663 	if (e) {
664 		ERR_error_string(e, buf);
665 		return (buf);
666 	}
667 
668 	return ("No SSL error");
669 }
670 
671 int
672 io_start_tls(struct io *io, void *ssl)
673 {
674 	int	mode;
675 
676 	mode = io->flags & IO_RW;
677 	if (mode == 0 || mode == IO_RW)
678 		errx(1, "io_start_tls(): full-duplex or unset");
679 
680 	if (io->ssl)
681 		errx(1, "io_start_tls(): SSL already started");
682 	io->ssl = ssl;
683 
684 	if (SSL_set_fd(io->ssl, io->sock) == 0) {
685 		ssl_error("io_start_ssl:SSL_set_fd");
686 		return (-1);
687 	}
688 
689 	if (mode == IO_WRITE) {
690 		io->state = IO_STATE_CONNECT_SSL;
691 		SSL_set_connect_state(io->ssl);
692 		io_reset(io, EV_WRITE, io_dispatch_connect_ssl);
693 	} else {
694 		io->state = IO_STATE_ACCEPT_SSL;
695 		SSL_set_accept_state(io->ssl);
696 		io_reset(io, EV_READ, io_dispatch_accept_ssl);
697 	}
698 
699 	return (0);
700 }
701 
702 void
703 io_dispatch_accept_ssl(int fd, short event, void *humppa)
704 {
705 	struct io	*io = humppa;
706 	int		 e, ret;
707 
708 	io_frame_enter("io_dispatch_accept_ssl", io, event);
709 
710 	if (event == EV_TIMEOUT) {
711 		io_callback(io, IO_TIMEOUT);
712 		goto leave;
713 	}
714 
715 	if ((ret = SSL_accept(io->ssl)) > 0) {
716 		io->state = IO_STATE_UP;
717 		io_callback(io, IO_TLSREADY);
718 		goto leave;
719 	}
720 
721 	switch ((e = SSL_get_error(io->ssl, ret))) {
722 	case SSL_ERROR_WANT_READ:
723 		io_reset(io, EV_READ, io_dispatch_accept_ssl);
724 		break;
725 	case SSL_ERROR_WANT_WRITE:
726 		io_reset(io, EV_WRITE, io_dispatch_accept_ssl);
727 		break;
728 	default:
729 		io->error = io_ssl_error();
730 		ssl_error("io_dispatch_accept_ssl:SSL_accept");
731 		io_callback(io, IO_ERROR);
732 		break;
733 	}
734 
735     leave:
736 	io_frame_leave(io);
737 }
738 
739 void
740 io_dispatch_connect_ssl(int fd, short event, void *humppa)
741 {
742 	struct io	*io = humppa;
743 	int		 e, ret;
744 
745 	io_frame_enter("io_dispatch_connect_ssl", io, event);
746 
747 	if (event == EV_TIMEOUT) {
748 		io_callback(io, IO_TIMEOUT);
749 		goto leave;
750 	}
751 
752 	if ((ret = SSL_connect(io->ssl)) > 0) {
753 		io->state = IO_STATE_UP;
754 		io_callback(io, IO_TLSREADY);
755 		goto leave;
756 	}
757 
758 	switch ((e = SSL_get_error(io->ssl, ret))) {
759 	case SSL_ERROR_WANT_READ:
760 		io_reset(io, EV_READ, io_dispatch_connect_ssl);
761 		break;
762 	case SSL_ERROR_WANT_WRITE:
763 		io_reset(io, EV_WRITE, io_dispatch_connect_ssl);
764 		break;
765 	default:
766 		io->error = io_ssl_error();
767 		ssl_error("io_dispatch_connect_ssl:SSL_connect");
768 		io_callback(io, IO_TLSERROR);
769 		break;
770 	}
771 
772     leave:
773 	io_frame_leave(io);
774 }
775 
776 void
777 io_dispatch_read_ssl(int fd, short event, void *humppa)
778 {
779 	struct io	*io = humppa;
780 	int		 n, saved_errno;
781 
782 	io_frame_enter("io_dispatch_read_ssl", io, event);
783 
784 	if (event == EV_TIMEOUT) {
785 		io_callback(io, IO_TIMEOUT);
786 		goto leave;
787 	}
788 
789 again:
790 	switch ((n = iobuf_read_ssl(io->iobuf, (SSL*)io->ssl))) {
791 	case IOBUF_WANT_READ:
792 		io_reset(io, EV_READ, io_dispatch_read_ssl);
793 		break;
794 	case IOBUF_WANT_WRITE:
795 		io_reset(io, EV_WRITE, io_dispatch_read_ssl);
796 		break;
797 	case IOBUF_CLOSED:
798 		io_callback(io, IO_DISCONNECTED);
799 		break;
800 	case IOBUF_ERROR:
801 		saved_errno = errno;
802 		io->error = strerror(errno);
803 		errno = saved_errno;
804 		io_callback(io, IO_ERROR);
805 		break;
806 	case IOBUF_SSLERROR:
807 		io->error = io_ssl_error();
808 		ssl_error("io_dispatch_read_ssl:SSL_read");
809 		io_callback(io, IO_ERROR);
810 		break;
811 	default:
812 		io_debug("io_dispatch_read_ssl(...) -> r=%d\n", n);
813 		io_callback(io, IO_DATAIN);
814 		if (current == io && IO_READING(io) && SSL_pending(io->ssl))
815 			goto again;
816 	}
817 
818     leave:
819 	io_frame_leave(io);
820 }
821 
822 void
823 io_dispatch_write_ssl(int fd, short event, void *humppa)
824 {
825 	struct io	*io = humppa;
826 	int		 n, saved_errno;
827 	size_t		 w2, w;
828 
829 	io_frame_enter("io_dispatch_write_ssl", io, event);
830 
831 	if (event == EV_TIMEOUT) {
832 		io_callback(io, IO_TIMEOUT);
833 		goto leave;
834 	}
835 
836 	w = io_queued(io);
837 	switch ((n = iobuf_write_ssl(io->iobuf, (SSL*)io->ssl))) {
838 	case IOBUF_WANT_READ:
839 		io_reset(io, EV_READ, io_dispatch_write_ssl);
840 		break;
841 	case IOBUF_WANT_WRITE:
842 		io_reset(io, EV_WRITE, io_dispatch_write_ssl);
843 		break;
844 	case IOBUF_CLOSED:
845 		io_callback(io, IO_DISCONNECTED);
846 		break;
847 	case IOBUF_ERROR:
848 		saved_errno = errno;
849 		io->error = strerror(errno);
850 		errno = saved_errno;
851 		io_callback(io, IO_ERROR);
852 		break;
853 	case IOBUF_SSLERROR:
854 		io->error = io_ssl_error();
855 		ssl_error("io_dispatch_write_ssl:SSL_write");
856 		io_callback(io, IO_ERROR);
857 		break;
858 	default:
859 		io_debug("io_dispatch_write_ssl(...) -> w=%d\n", n);
860 		w2 = io_queued(io);
861 		if (w > io->lowat && w2 <= io->lowat)
862 			io_callback(io, IO_LOWAT);
863 		break;
864 	}
865 
866     leave:
867 	io_frame_leave(io);
868 }
869 
870 void
871 io_reload_ssl(struct io *io)
872 {
873 	short	ev = 0;
874 	void	(*dispatch)(int, short, void*) = NULL;
875 
876 	switch (io->state) {
877 	case IO_STATE_CONNECT_SSL:
878 		ev = EV_WRITE;
879 		dispatch = io_dispatch_connect_ssl;
880 		break;
881 	case IO_STATE_ACCEPT_SSL:
882 		ev = EV_READ;
883 		dispatch = io_dispatch_accept_ssl;
884 		break;
885 	case IO_STATE_UP:
886 		ev = 0;
887 		if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) {
888 			ev = EV_READ;
889 			dispatch = io_dispatch_read_ssl;
890 		}
891 		else if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) &&
892 		    io_queued(io)) {
893 			ev = EV_WRITE;
894 			dispatch = io_dispatch_write_ssl;
895 		}
896 		if (!ev)
897 			return; /* paused */
898 		break;
899 	default:
900 		errx(1, "io_reload_ssl(): bad state");
901 	}
902 
903 	io_reset(io, ev, dispatch);
904 }
905 
906 #endif /* IO_SSL */
907