1 /* $OpenBSD: ioev.c,v 1.19 2014/07/08 07:59:31 sobrado Exp $ */ 2 /* 3 * Copyright (c) 2012 Eric Faurot <eric@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <sys/types.h> 19 #include <sys/queue.h> 20 #include <sys/socket.h> 21 22 #include <err.h> 23 #include <errno.h> 24 #include <fcntl.h> 25 #include <inttypes.h> 26 #include <stdlib.h> 27 #include <string.h> 28 #include <stdio.h> 29 #include <unistd.h> 30 31 #include "ioev.h" 32 #include "iobuf.h" 33 34 #ifdef IO_SSL 35 #include <openssl/err.h> 36 #include <openssl/ssl.h> 37 #endif 38 39 enum { 40 IO_STATE_NONE, 41 IO_STATE_CONNECT, 42 IO_STATE_CONNECT_SSL, 43 IO_STATE_ACCEPT_SSL, 44 IO_STATE_UP, 45 46 IO_STATE_MAX, 47 }; 48 49 const char* io_strflags(int); 50 const char* io_evstr(short); 51 52 void _io_init(void); 53 void io_hold(struct io *); 54 void io_release(struct io *); 55 void io_callback(struct io*, int); 56 void io_dispatch(int, short, void *); 57 void io_dispatch_connect(int, short, void *); 58 size_t io_pending(struct io *); 59 size_t io_queued(struct io*); 60 void io_reset(struct io *, short, void (*)(int, short, void*)); 61 void io_frame_enter(const char *, struct io *, int); 62 void io_frame_leave(struct io *); 63 64 #ifdef IO_SSL 65 void ssl_error(const char *); /* XXX external */ 66 67 static const char* io_ssl_error(void); 68 void io_dispatch_accept_ssl(int, short, void *); 69 void io_dispatch_connect_ssl(int, short, void *); 70 void io_dispatch_read_ssl(int, short, void *); 71 void io_dispatch_write_ssl(int, short, void *); 72 void io_reload_ssl(struct io *io); 73 #endif 74 75 static struct io *current = NULL; 76 static uint64_t frame = 0; 77 static int _io_debug = 0; 78 79 #define io_debug(args...) do { if (_io_debug) printf(args); } while(0) 80 81 82 const char* 83 io_strio(struct io *io) 84 { 85 static char buf[128]; 86 char ssl[128]; 87 88 ssl[0] = '\0'; 89 #ifdef IO_SSL 90 if (io->ssl) { 91 (void)snprintf(ssl, sizeof ssl, " ssl=%s:%s:%d", 92 SSL_get_cipher_version(io->ssl), 93 SSL_get_cipher_name(io->ssl), 94 SSL_get_cipher_bits(io->ssl, NULL)); 95 } 96 #endif 97 98 if (io->iobuf == NULL) 99 (void)snprintf(buf, sizeof buf, 100 "<io:%p fd=%d to=%d fl=%s%s>", 101 io, io->sock, io->timeout, io_strflags(io->flags), ssl); 102 else 103 (void)snprintf(buf, sizeof buf, 104 "<io:%p fd=%d to=%d fl=%s%s ib=%zu ob=%zu>", 105 io, io->sock, io->timeout, io_strflags(io->flags), ssl, 106 io_pending(io), io_queued(io)); 107 108 return (buf); 109 } 110 111 #define CASE(x) case x : return #x 112 113 const char* 114 io_strevent(int evt) 115 { 116 static char buf[32]; 117 118 switch (evt) { 119 CASE(IO_CONNECTED); 120 CASE(IO_TLSREADY); 121 CASE(IO_TLSVERIFIED); 122 CASE(IO_DATAIN); 123 CASE(IO_LOWAT); 124 CASE(IO_DISCONNECTED); 125 CASE(IO_TIMEOUT); 126 CASE(IO_ERROR); 127 default: 128 (void)snprintf(buf, sizeof(buf), "IO_? %d", evt); 129 return buf; 130 } 131 } 132 133 void 134 io_set_blocking(int fd, int blocking) 135 { 136 int flags; 137 138 if ((flags = fcntl(fd, F_GETFL, 0)) == -1) 139 err(1, "io_set_blocking:fcntl(F_GETFL)"); 140 141 if (blocking) 142 flags &= ~O_NONBLOCK; 143 else 144 flags |= O_NONBLOCK; 145 146 if ((flags = fcntl(fd, F_SETFL, flags)) == -1) 147 err(1, "io_set_blocking:fcntl(F_SETFL)"); 148 } 149 150 void 151 io_set_linger(int fd, int linger) 152 { 153 struct linger l; 154 155 memset(&l, 0, sizeof(l)); 156 l.l_onoff = linger ? 1 : 0; 157 l.l_linger = linger; 158 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l)) == -1) 159 err(1, "io_set_linger:setsockopt()"); 160 } 161 162 /* 163 * Event framing must not rely on an io pointer to refer to the "same" io 164 * throughout the frame, because this is not always the case: 165 * 166 * 1) enter(addr0) -> free(addr0) -> leave(addr0) = SEGV 167 * 2) enter(addr0) -> free(addr0) -> malloc == addr0 -> leave(addr0) = BAD! 168 * 169 * In both case, the problem is that the io is freed in the callback, so 170 * the pointer becomes invalid. If that happens, the user is required to 171 * call io_clear, so we can adapt the frame state there. 172 */ 173 void 174 io_frame_enter(const char *where, struct io *io, int ev) 175 { 176 io_debug("\n=== %" PRIu64 " ===\n" 177 "io_frame_enter(%s, %s, %s)\n", 178 frame, where, io_evstr(ev), io_strio(io)); 179 180 if (current) 181 errx(1, "io_frame_enter: interleaved frames"); 182 183 current = io; 184 185 io_hold(io); 186 } 187 188 void 189 io_frame_leave(struct io *io) 190 { 191 io_debug("io_frame_leave(%" PRIu64 ")\n", frame); 192 193 if (current && current != io) 194 errx(1, "io_frame_leave: io mismatch"); 195 196 /* io has been cleared */ 197 if (current == NULL) 198 goto done; 199 200 /* TODO: There is a possible optimization there: 201 * In a typical half-duplex request/response scenario, 202 * the io is waiting to read a request, and when done, it queues 203 * the response in the output buffer and goes to write mode. 204 * There, the write event is set and will be triggered in the next 205 * event frame. In most case, the write call could be done 206 * immediately as part of the last read frame, thus avoiding to go 207 * through the event loop machinery. So, as an optimisation, we 208 * could detect that case here and force an event dispatching. 209 */ 210 211 /* Reload the io if it has not been reset already. */ 212 io_release(io); 213 current = NULL; 214 done: 215 io_debug("=== /%" PRIu64 "\n", frame); 216 217 frame += 1; 218 } 219 220 void 221 _io_init() 222 { 223 static int init = 0; 224 225 if (init) 226 return; 227 228 init = 1; 229 _io_debug = getenv("IO_DEBUG") != NULL; 230 } 231 232 void 233 io_init(struct io *io, int sock, void *arg, 234 void(*cb)(struct io*, int), struct iobuf *iobuf) 235 { 236 _io_init(); 237 238 memset(io, 0, sizeof *io); 239 240 io->sock = sock; 241 io->timeout = -1; 242 io->arg = arg; 243 io->iobuf = iobuf; 244 io->cb = cb; 245 246 if (sock != -1) 247 io_reload(io); 248 } 249 250 void 251 io_clear(struct io *io) 252 { 253 io_debug("io_clear(%p)\n", io); 254 255 /* the current io is virtually dead */ 256 if (io == current) 257 current = NULL; 258 259 #ifdef IO_SSL 260 if (io->ssl) { 261 SSL_free(io->ssl); 262 io->ssl = NULL; 263 } 264 #endif 265 266 if (event_initialized(&io->ev)) 267 event_del(&io->ev); 268 if (io->sock != -1) { 269 close(io->sock); 270 io->sock = -1; 271 } 272 } 273 274 void 275 io_hold(struct io *io) 276 { 277 io_debug("io_enter(%p)\n", io); 278 279 if (io->flags & IO_HELD) 280 errx(1, "io_hold: io is already held"); 281 282 io->flags &= ~IO_RESET; 283 io->flags |= IO_HELD; 284 } 285 286 void 287 io_release(struct io *io) 288 { 289 if (!(io->flags & IO_HELD)) 290 errx(1, "io_release: io is not held"); 291 292 io->flags &= ~IO_HELD; 293 if (!(io->flags & IO_RESET)) 294 io_reload(io); 295 } 296 297 void 298 io_set_timeout(struct io *io, int msec) 299 { 300 io_debug("io_set_timeout(%p, %d)\n", io, msec); 301 302 io->timeout = msec; 303 } 304 305 void 306 io_set_lowat(struct io *io, size_t lowat) 307 { 308 io_debug("io_set_lowat(%p, %zu)\n", io, lowat); 309 310 io->lowat = lowat; 311 } 312 313 void 314 io_pause(struct io *io, int dir) 315 { 316 io_debug("io_pause(%p, %x)\n", io, dir); 317 318 io->flags |= dir & (IO_PAUSE_IN | IO_PAUSE_OUT); 319 io_reload(io); 320 } 321 322 void 323 io_resume(struct io *io, int dir) 324 { 325 io_debug("io_resume(%p, %x)\n", io, dir); 326 327 io->flags &= ~(dir & (IO_PAUSE_IN | IO_PAUSE_OUT)); 328 io_reload(io); 329 } 330 331 void 332 io_set_read(struct io *io) 333 { 334 int mode; 335 336 io_debug("io_set_read(%p)\n", io); 337 338 mode = io->flags & IO_RW; 339 if (!(mode == 0 || mode == IO_WRITE)) 340 errx(1, "io_set_read(): full-duplex or reading"); 341 342 io->flags &= ~IO_RW; 343 io->flags |= IO_READ; 344 io_reload(io); 345 } 346 347 void 348 io_set_write(struct io *io) 349 { 350 int mode; 351 352 io_debug("io_set_write(%p)\n", io); 353 354 mode = io->flags & IO_RW; 355 if (!(mode == 0 || mode == IO_READ)) 356 errx(1, "io_set_write(): full-duplex or writing"); 357 358 io->flags &= ~IO_RW; 359 io->flags |= IO_WRITE; 360 io_reload(io); 361 } 362 363 #define IO_READING(io) (((io)->flags & IO_RW) != IO_WRITE) 364 #define IO_WRITING(io) (((io)->flags & IO_RW) != IO_READ) 365 366 /* 367 * Setup the necessary events as required by the current io state, 368 * honouring duplex mode and i/o pauses. 369 */ 370 void 371 io_reload(struct io *io) 372 { 373 short events; 374 375 /* io will be reloaded at release time */ 376 if (io->flags & IO_HELD) 377 return; 378 379 #ifdef IO_SSL 380 if (io->ssl) { 381 io_reload_ssl(io); 382 return; 383 } 384 #endif 385 386 io_debug("io_reload(%p)\n", io); 387 388 events = 0; 389 if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) 390 events = EV_READ; 391 if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io)) 392 events |= EV_WRITE; 393 394 io_reset(io, events, io_dispatch); 395 } 396 397 /* Set the requested event. */ 398 void 399 io_reset(struct io *io, short events, void (*dispatch)(int, short, void*)) 400 { 401 struct timeval tv, *ptv; 402 403 io_debug("io_reset(%p, %s, %p) -> %s\n", 404 io, io_evstr(events), dispatch, io_strio(io)); 405 406 /* 407 * Indicate that the event has already been reset so that reload 408 * is not called on frame_leave. 409 */ 410 io->flags |= IO_RESET; 411 412 if (event_initialized(&io->ev)) 413 event_del(&io->ev); 414 415 /* 416 * The io is paused by the user, so we don't want the timeout to be 417 * effective. 418 */ 419 if (events == 0) 420 return; 421 422 event_set(&io->ev, io->sock, events, dispatch, io); 423 if (io->timeout >= 0) { 424 tv.tv_sec = io->timeout / 1000; 425 tv.tv_usec = (io->timeout % 1000) * 1000; 426 ptv = &tv; 427 } else 428 ptv = NULL; 429 430 event_add(&io->ev, ptv); 431 } 432 433 size_t 434 io_pending(struct io *io) 435 { 436 return iobuf_len(io->iobuf); 437 } 438 439 size_t 440 io_queued(struct io *io) 441 { 442 return iobuf_queued(io->iobuf); 443 } 444 445 const char* 446 io_strflags(int flags) 447 { 448 static char buf[64]; 449 450 buf[0] = '\0'; 451 452 switch (flags & IO_RW) { 453 case 0: 454 (void)strlcat(buf, "rw", sizeof buf); 455 break; 456 case IO_READ: 457 (void)strlcat(buf, "R", sizeof buf); 458 break; 459 case IO_WRITE: 460 (void)strlcat(buf, "W", sizeof buf); 461 break; 462 case IO_RW: 463 (void)strlcat(buf, "RW", sizeof buf); 464 break; 465 } 466 467 if (flags & IO_PAUSE_IN) 468 (void)strlcat(buf, ",F_PI", sizeof buf); 469 if (flags & IO_PAUSE_OUT) 470 (void)strlcat(buf, ",F_PO", sizeof buf); 471 472 return buf; 473 } 474 475 const char* 476 io_evstr(short ev) 477 { 478 static char buf[64]; 479 char buf2[16]; 480 int n; 481 482 n = 0; 483 buf[0] = '\0'; 484 485 if (ev == 0) { 486 (void)strlcat(buf, "<NONE>", sizeof(buf)); 487 return buf; 488 } 489 490 if (ev & EV_TIMEOUT) { 491 (void)strlcat(buf, "EV_TIMEOUT", sizeof(buf)); 492 ev &= ~EV_TIMEOUT; 493 n++; 494 } 495 496 if (ev & EV_READ) { 497 if (n) 498 (void)strlcat(buf, "|", sizeof(buf)); 499 (void)strlcat(buf, "EV_READ", sizeof(buf)); 500 ev &= ~EV_READ; 501 n++; 502 } 503 504 if (ev & EV_WRITE) { 505 if (n) 506 (void)strlcat(buf, "|", sizeof(buf)); 507 (void)strlcat(buf, "EV_WRITE", sizeof(buf)); 508 ev &= ~EV_WRITE; 509 n++; 510 } 511 512 if (ev & EV_SIGNAL) { 513 if (n) 514 (void)strlcat(buf, "|", sizeof(buf)); 515 (void)strlcat(buf, "EV_SIGNAL", sizeof(buf)); 516 ev &= ~EV_SIGNAL; 517 n++; 518 } 519 520 if (ev) { 521 if (n) 522 (void)strlcat(buf, "|", sizeof(buf)); 523 (void)strlcat(buf, "EV_?=0x", sizeof(buf)); 524 (void)snprintf(buf2, sizeof(buf2), "%hx", ev); 525 (void)strlcat(buf, buf2, sizeof(buf)); 526 } 527 528 return buf; 529 } 530 531 void 532 io_dispatch(int fd, short ev, void *humppa) 533 { 534 struct io *io = humppa; 535 size_t w; 536 ssize_t n; 537 int saved_errno; 538 539 io_frame_enter("io_dispatch", io, ev); 540 541 if (ev == EV_TIMEOUT) { 542 io_callback(io, IO_TIMEOUT); 543 goto leave; 544 } 545 546 if (ev & EV_WRITE && (w = io_queued(io))) { 547 if ((n = iobuf_write(io->iobuf, io->sock)) < 0) { 548 if (n == IOBUF_WANT_WRITE) /* kqueue bug? */ 549 goto read; 550 if (n == IOBUF_CLOSED) 551 io_callback(io, IO_DISCONNECTED); 552 else { 553 saved_errno = errno; 554 io->error = strerror(errno); 555 errno = saved_errno; 556 io_callback(io, IO_ERROR); 557 } 558 goto leave; 559 } 560 if (w > io->lowat && w - n <= io->lowat) 561 io_callback(io, IO_LOWAT); 562 } 563 read: 564 565 if (ev & EV_READ) { 566 if ((n = iobuf_read(io->iobuf, io->sock)) < 0) { 567 if (n == IOBUF_CLOSED) 568 io_callback(io, IO_DISCONNECTED); 569 else { 570 saved_errno = errno; 571 io->error = strerror(errno); 572 errno = saved_errno; 573 io_callback(io, IO_ERROR); 574 } 575 goto leave; 576 } 577 if (n) 578 io_callback(io, IO_DATAIN); 579 } 580 581 leave: 582 io_frame_leave(io); 583 } 584 585 void 586 io_callback(struct io *io, int evt) 587 { 588 io->cb(io, evt); 589 } 590 591 int 592 io_connect(struct io *io, const struct sockaddr *sa, const struct sockaddr *bsa) 593 { 594 int sock, errno_save; 595 596 if ((sock = socket(sa->sa_family, SOCK_STREAM, 0)) == -1) 597 goto fail; 598 599 io_set_blocking(sock, 0); 600 io_set_linger(sock, 0); 601 602 if (bsa && bind(sock, bsa, bsa->sa_len) == -1) 603 goto fail; 604 605 if (connect(sock, sa, sa->sa_len) == -1) 606 if (errno != EINPROGRESS) 607 goto fail; 608 609 io->sock = sock; 610 io_reset(io, EV_WRITE, io_dispatch_connect); 611 612 return (sock); 613 614 fail: 615 if (sock != -1) { 616 errno_save = errno; 617 close(sock); 618 errno = errno_save; 619 io->error = strerror(errno); 620 } 621 return (-1); 622 } 623 624 void 625 io_dispatch_connect(int fd, short ev, void *humppa) 626 { 627 struct io *io = humppa; 628 int r, e; 629 socklen_t sl; 630 631 io_frame_enter("io_dispatch_connect", io, ev); 632 633 if (ev == EV_TIMEOUT) { 634 close(fd); 635 io->sock = -1; 636 io_callback(io, IO_TIMEOUT); 637 } else { 638 sl = sizeof(e); 639 r = getsockopt(fd, SOL_SOCKET, SO_ERROR, &e, &sl); 640 if (r == -1) { 641 warn("io_dispatch_connect: getsockopt"); 642 e = errno; 643 } 644 if (e) { 645 close(fd); 646 io->sock = -1; 647 io->error = strerror(e); 648 io_callback(io, e == ETIMEDOUT ? IO_TIMEOUT : IO_ERROR); 649 } 650 else { 651 io->state = IO_STATE_UP; 652 io_callback(io, IO_CONNECTED); 653 } 654 } 655 656 io_frame_leave(io); 657 } 658 659 #ifdef IO_SSL 660 661 static const char* 662 io_ssl_error(void) 663 { 664 static char buf[128]; 665 unsigned long e; 666 667 e = ERR_peek_last_error(); 668 if (e) { 669 ERR_error_string(e, buf); 670 return (buf); 671 } 672 673 return ("No SSL error"); 674 } 675 676 int 677 io_start_tls(struct io *io, void *ssl) 678 { 679 int mode; 680 681 mode = io->flags & IO_RW; 682 if (mode == 0 || mode == IO_RW) 683 errx(1, "io_start_tls(): full-duplex or unset"); 684 685 if (io->ssl) 686 errx(1, "io_start_tls(): SSL already started"); 687 io->ssl = ssl; 688 689 if (SSL_set_fd(io->ssl, io->sock) == 0) { 690 ssl_error("io_start_ssl:SSL_set_fd"); 691 return (-1); 692 } 693 694 if (mode == IO_WRITE) { 695 io->state = IO_STATE_CONNECT_SSL; 696 SSL_set_connect_state(io->ssl); 697 io_reset(io, EV_WRITE, io_dispatch_connect_ssl); 698 } else { 699 io->state = IO_STATE_ACCEPT_SSL; 700 SSL_set_accept_state(io->ssl); 701 io_reset(io, EV_READ, io_dispatch_accept_ssl); 702 } 703 704 return (0); 705 } 706 707 void 708 io_dispatch_accept_ssl(int fd, short event, void *humppa) 709 { 710 struct io *io = humppa; 711 int e, ret; 712 713 io_frame_enter("io_dispatch_accept_ssl", io, event); 714 715 if (event == EV_TIMEOUT) { 716 io_callback(io, IO_TIMEOUT); 717 goto leave; 718 } 719 720 if ((ret = SSL_accept(io->ssl)) > 0) { 721 io->state = IO_STATE_UP; 722 io_callback(io, IO_TLSREADY); 723 goto leave; 724 } 725 726 switch ((e = SSL_get_error(io->ssl, ret))) { 727 case SSL_ERROR_WANT_READ: 728 io_reset(io, EV_READ, io_dispatch_accept_ssl); 729 break; 730 case SSL_ERROR_WANT_WRITE: 731 io_reset(io, EV_WRITE, io_dispatch_accept_ssl); 732 break; 733 default: 734 io->error = io_ssl_error(); 735 ssl_error("io_dispatch_accept_ssl:SSL_accept"); 736 io_callback(io, IO_ERROR); 737 break; 738 } 739 740 leave: 741 io_frame_leave(io); 742 } 743 744 void 745 io_dispatch_connect_ssl(int fd, short event, void *humppa) 746 { 747 struct io *io = humppa; 748 int e, ret; 749 750 io_frame_enter("io_dispatch_connect_ssl", io, event); 751 752 if (event == EV_TIMEOUT) { 753 io_callback(io, IO_TIMEOUT); 754 goto leave; 755 } 756 757 if ((ret = SSL_connect(io->ssl)) > 0) { 758 io->state = IO_STATE_UP; 759 io_callback(io, IO_TLSREADY); 760 goto leave; 761 } 762 763 switch ((e = SSL_get_error(io->ssl, ret))) { 764 case SSL_ERROR_WANT_READ: 765 io_reset(io, EV_READ, io_dispatch_connect_ssl); 766 break; 767 case SSL_ERROR_WANT_WRITE: 768 io_reset(io, EV_WRITE, io_dispatch_connect_ssl); 769 break; 770 default: 771 io->error = io_ssl_error(); 772 ssl_error("io_dispatch_connect_ssl:SSL_connect"); 773 io_callback(io, IO_ERROR); 774 break; 775 } 776 777 leave: 778 io_frame_leave(io); 779 } 780 781 void 782 io_dispatch_read_ssl(int fd, short event, void *humppa) 783 { 784 struct io *io = humppa; 785 int n, saved_errno; 786 787 io_frame_enter("io_dispatch_read_ssl", io, event); 788 789 if (event == EV_TIMEOUT) { 790 io_callback(io, IO_TIMEOUT); 791 goto leave; 792 } 793 794 again: 795 switch ((n = iobuf_read_ssl(io->iobuf, (SSL*)io->ssl))) { 796 case IOBUF_WANT_READ: 797 io_reset(io, EV_READ, io_dispatch_read_ssl); 798 break; 799 case IOBUF_WANT_WRITE: 800 io_reset(io, EV_WRITE, io_dispatch_read_ssl); 801 break; 802 case IOBUF_CLOSED: 803 io_callback(io, IO_DISCONNECTED); 804 break; 805 case IOBUF_ERROR: 806 saved_errno = errno; 807 io->error = strerror(errno); 808 errno = saved_errno; 809 io_callback(io, IO_ERROR); 810 break; 811 case IOBUF_SSLERROR: 812 io->error = io_ssl_error(); 813 ssl_error("io_dispatch_read_ssl:SSL_read"); 814 io_callback(io, IO_ERROR); 815 break; 816 default: 817 io_debug("io_dispatch_read_ssl(...) -> r=%d\n", n); 818 io_callback(io, IO_DATAIN); 819 if (current == io && IO_READING(io) && SSL_pending(io->ssl)) 820 goto again; 821 } 822 823 leave: 824 io_frame_leave(io); 825 } 826 827 void 828 io_dispatch_write_ssl(int fd, short event, void *humppa) 829 { 830 struct io *io = humppa; 831 int n, saved_errno; 832 size_t w2, w; 833 834 io_frame_enter("io_dispatch_write_ssl", io, event); 835 836 if (event == EV_TIMEOUT) { 837 io_callback(io, IO_TIMEOUT); 838 goto leave; 839 } 840 841 w = io_queued(io); 842 switch ((n = iobuf_write_ssl(io->iobuf, (SSL*)io->ssl))) { 843 case IOBUF_WANT_READ: 844 io_reset(io, EV_READ, io_dispatch_write_ssl); 845 break; 846 case IOBUF_WANT_WRITE: 847 io_reset(io, EV_WRITE, io_dispatch_write_ssl); 848 break; 849 case IOBUF_CLOSED: 850 io_callback(io, IO_DISCONNECTED); 851 break; 852 case IOBUF_ERROR: 853 saved_errno = errno; 854 io->error = strerror(errno); 855 errno = saved_errno; 856 io_callback(io, IO_ERROR); 857 break; 858 case IOBUF_SSLERROR: 859 io->error = io_ssl_error(); 860 ssl_error("io_dispatch_write_ssl:SSL_write"); 861 io_callback(io, IO_ERROR); 862 break; 863 default: 864 io_debug("io_dispatch_write_ssl(...) -> w=%d\n", n); 865 w2 = io_queued(io); 866 if (w > io->lowat && w2 <= io->lowat) 867 io_callback(io, IO_LOWAT); 868 break; 869 } 870 871 leave: 872 io_frame_leave(io); 873 } 874 875 void 876 io_reload_ssl(struct io *io) 877 { 878 short ev = 0; 879 void (*dispatch)(int, short, void*) = NULL; 880 881 switch (io->state) { 882 case IO_STATE_CONNECT_SSL: 883 ev = EV_WRITE; 884 dispatch = io_dispatch_connect_ssl; 885 break; 886 case IO_STATE_ACCEPT_SSL: 887 ev = EV_READ; 888 dispatch = io_dispatch_accept_ssl; 889 break; 890 case IO_STATE_UP: 891 ev = 0; 892 if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) { 893 ev = EV_READ; 894 dispatch = io_dispatch_read_ssl; 895 } 896 else if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io)) { 897 ev = EV_WRITE; 898 dispatch = io_dispatch_write_ssl; 899 } 900 if (! ev) 901 return; /* paused */ 902 break; 903 default: 904 errx(1, "io_reload_ssl(): bad state"); 905 } 906 907 io_reset(io, ev, dispatch); 908 } 909 910 #endif /* IO_SSL */ 911