Lines Matching full:io

54 struct io {  struct
57 void (*cb)(struct io*, int, void *); argument
73 void io_hold(struct io *); argument
74 void io_release(struct io *);
75 void io_callback(struct io*, int);
78 size_t io_pending(struct io *);
79 size_t io_queued(struct io*);
80 void io_reset(struct io *, short, void (*)(int, short, void*));
81 void io_frame_enter(const char *, struct io *, int);
82 void io_frame_leave(struct io *);
90 void io_reload_tls(struct io *io);
93 static struct io *current = NULL;
101 io_strio(struct io *io) in io_strio() argument
108 if (io->tls) { in io_strio()
110 tls_conn_version(io->tls), in io_strio()
111 tls_conn_cipher(io->tls)); in io_strio()
116 "<io:%p fd=%d to=%d fl=%s%s ib=%zu ob=%zu>", in io_strio()
117 io, io->sock, io->timeout, io_strflags(io->flags), ssl, in io_strio()
118 io_pending(io), io_queued(io)); in io_strio()
169 * Event framing must not rely on an io pointer to refer to the "same" io
175 * In both case, the problem is that the io is freed in the callback, so
180 io_frame_enter(const char *where, struct io *io, int ev) in io_frame_enter() argument
184 frame, where, io_evstr(ev), io_strio(io)); in io_frame_enter()
189 current = io; in io_frame_enter()
191 io_hold(io); in io_frame_enter()
195 io_frame_leave(struct io *io) in io_frame_leave() argument
199 if (current && current != io) in io_frame_leave()
200 fatalx("io_frame_leave: io mismatch"); in io_frame_leave()
202 /* io has been cleared */ in io_frame_leave()
208 * the io is waiting to read a request, and when done, it queues in io_frame_leave()
217 /* Reload the io if it has not been reset already. */ in io_frame_leave()
218 io_release(io); in io_frame_leave()
238 struct io *
241 struct io *io; in io_new() local
245 if ((io = calloc(1, sizeof(*io))) == NULL) in io_new()
248 io->sock = -1; in io_new()
249 io->timeout = -1; in io_new()
251 if (iobuf_init(&io->iobuf, 0, 0) == -1) { in io_new()
252 free(io); in io_new()
256 return io; in io_new()
260 io_free(struct io *io) in io_free() argument
262 io_debug("io_clear(%p)\n", io); in io_free()
264 /* the current io is virtually dead */ in io_free()
265 if (io == current) in io_free()
269 tls_free(io->tls); in io_free()
270 io->tls = NULL; in io_free()
273 if (event_initialized(&io->ev)) in io_free()
274 event_del(&io->ev); in io_free()
275 if (io->sock != -1) { in io_free()
276 close(io->sock); in io_free()
277 io->sock = -1; in io_free()
280 iobuf_clear(&io->iobuf); in io_free()
281 free(io); in io_free()
285 io_hold(struct io *io) in io_hold() argument
287 io_debug("io_enter(%p)\n", io); in io_hold()
289 if (io->flags & IO_HELD) in io_hold()
290 fatalx("io_hold: io is already held"); in io_hold()
292 io->flags &= ~IO_RESET; in io_hold()
293 io->flags |= IO_HELD; in io_hold()
297 io_release(struct io *io) in io_release() argument
299 if (!(io->flags & IO_HELD)) in io_release()
300 fatalx("io_release: io is not held"); in io_release()
302 io->flags &= ~IO_HELD; in io_release()
303 if (!(io->flags & IO_RESET)) in io_release()
304 io_reload(io); in io_release()
308 io_set_fd(struct io *io, int fd) in io_set_fd() argument
310 io->sock = fd; in io_set_fd()
312 io_reload(io); in io_set_fd()
316 io_set_callback(struct io *io, void(*cb)(struct io *, int, void *), void *arg) in io_set_callback() argument
318 io->cb = cb; in io_set_callback()
319 io->arg = arg; in io_set_callback()
323 io_set_timeout(struct io *io, int msec) in io_set_timeout() argument
325 io_debug("io_set_timeout(%p, %d)\n", io, msec); in io_set_timeout()
327 io->timeout = msec; in io_set_timeout()
331 io_set_lowat(struct io *io, size_t lowat) in io_set_lowat() argument
333 io_debug("io_set_lowat(%p, %zu)\n", io, lowat); in io_set_lowat()
335 io->lowat = lowat; in io_set_lowat()
339 io_pause(struct io *io, int dir) in io_pause() argument
341 io_debug("io_pause(%p, %x)\n", io, dir); in io_pause()
343 io->flags |= dir & (IO_PAUSE_IN | IO_PAUSE_OUT); in io_pause()
344 io_reload(io); in io_pause()
348 io_resume(struct io *io, int dir) in io_resume() argument
350 io_debug("io_resume(%p, %x)\n", io, dir); in io_resume()
352 io->flags &= ~(dir & (IO_PAUSE_IN | IO_PAUSE_OUT)); in io_resume()
353 io_reload(io); in io_resume()
357 io_set_read(struct io *io) in io_set_read() argument
361 io_debug("io_set_read(%p)\n", io); in io_set_read()
363 mode = io->flags & IO_RW; in io_set_read()
367 io->flags &= ~IO_RW; in io_set_read()
368 io->flags |= IO_READ; in io_set_read()
369 io_reload(io); in io_set_read()
373 io_set_write(struct io *io) in io_set_write() argument
377 io_debug("io_set_write(%p)\n", io); in io_set_write()
379 mode = io->flags & IO_RW; in io_set_write()
383 io->flags &= ~IO_RW; in io_set_write()
384 io->flags |= IO_WRITE; in io_set_write()
385 io_reload(io); in io_set_write()
389 io_error(struct io *io) in io_error() argument
391 return io->error; in io_error()
395 io_tls(struct io *io) in io_tls() argument
397 return io->tls; in io_tls()
401 io_fileno(struct io *io) in io_fileno() argument
403 return io->sock; in io_fileno()
407 io_paused(struct io *io, int what) in io_paused() argument
409 return (io->flags & (IO_PAUSE_IN | IO_PAUSE_OUT)) == what; in io_paused()
417 io_write(struct io *io, const void *buf, size_t len) in io_write() argument
421 r = iobuf_queue(&io->iobuf, buf, len); in io_write()
423 io_reload(io); in io_write()
429 io_writev(struct io *io, const struct iovec *iov, int iovcount) in io_writev() argument
433 r = iobuf_queuev(&io->iobuf, iov, iovcount); in io_writev()
435 io_reload(io); in io_writev()
441 io_print(struct io *io, const char *s) in io_print() argument
443 return io_write(io, s, strlen(s)); in io_print()
447 io_printf(struct io *io, const char *fmt, ...) in io_printf() argument
453 r = io_vprintf(io, fmt, ap); in io_printf()
460 io_vprintf(struct io *io, const char *fmt, va_list ap) in io_vprintf() argument
469 len = io_write(io, buf, len); in io_vprintf()
476 io_queued(struct io *io) in io_queued() argument
478 return iobuf_queued(&io->iobuf); in io_queued()
486 io_data(struct io *io) in io_data() argument
488 return iobuf_data(&io->iobuf); in io_data()
492 io_datalen(struct io *io) in io_datalen() argument
494 return iobuf_len(&io->iobuf); in io_datalen()
498 io_getline(struct io *io, size_t *sz) in io_getline() argument
500 return iobuf_getline(&io->iobuf, sz); in io_getline()
504 io_drop(struct io *io, size_t sz) in io_drop() argument
506 return iobuf_drop(&io->iobuf, sz); in io_drop()
510 #define IO_READING(io) (((io)->flags & IO_RW) != IO_WRITE) argument
511 #define IO_WRITING(io) (((io)->flags & IO_RW) != IO_READ) argument
514 * Setup the necessary events as required by the current io state,
518 io_reload(struct io *io) in io_reload() argument
522 /* io will be reloaded at release time */ in io_reload()
523 if (io->flags & IO_HELD) in io_reload()
526 iobuf_normalize(&io->iobuf); in io_reload()
529 if (io->tls) { in io_reload()
530 io_reload_tls(io); in io_reload()
535 io_debug("io_reload(%p)\n", io); in io_reload()
538 if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) in io_reload()
540 if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io)) in io_reload()
543 io_reset(io, events, io_dispatch); in io_reload()
548 io_reset(struct io *io, short events, void (*dispatch)(int, short, void*)) in io_reset() argument
553 io, io_evstr(events), dispatch, io_strio(io)); in io_reset()
559 io->flags |= IO_RESET; in io_reset()
561 if (event_initialized(&io->ev)) in io_reset()
562 event_del(&io->ev); in io_reset()
565 * The io is paused by the user, so we don't want the timeout to be in io_reset()
571 event_set(&io->ev, io->sock, events, dispatch, io); in io_reset()
572 if (io->timeout >= 0) { in io_reset()
573 tv.tv_sec = io->timeout / 1000; in io_reset()
574 tv.tv_usec = (io->timeout % 1000) * 1000; in io_reset()
579 event_add(&io->ev, ptv); in io_reset()
583 io_pending(struct io *io) in io_pending() argument
585 return iobuf_len(&io->iobuf); in io_pending()
677 struct io *io = humppa; in io_dispatch() local
682 io_frame_enter("io_dispatch", io, ev); in io_dispatch()
685 io_callback(io, IO_TIMEOUT); in io_dispatch()
689 if (ev & EV_WRITE && (w = io_queued(io))) { in io_dispatch()
690 if ((n = iobuf_write(&io->iobuf, io->sock)) < 0) { in io_dispatch()
694 io_callback(io, IO_DISCONNECTED); in io_dispatch()
697 io->error = strerror(errno); in io_dispatch()
699 io_callback(io, IO_ERROR); in io_dispatch()
703 if (w > io->lowat && w - n <= io->lowat) in io_dispatch()
704 io_callback(io, IO_LOWAT); in io_dispatch()
709 iobuf_normalize(&io->iobuf); in io_dispatch()
710 if ((n = iobuf_read(&io->iobuf, io->sock)) < 0) { in io_dispatch()
712 io_callback(io, IO_DISCONNECTED); in io_dispatch()
715 io->error = strerror(errno); in io_dispatch()
717 io_callback(io, IO_ERROR); in io_dispatch()
722 io_callback(io, IO_DATAIN); in io_dispatch()
726 io_frame_leave(io); in io_dispatch()
730 io_callback(struct io *io, int evt) in io_callback() argument
732 io->cb(io, evt, io->arg); in io_callback()
736 io_connect(struct io *io, const struct sockaddr *sa, const struct sockaddr *bsa) in io_connect() argument
753 io->sock = sock; in io_connect()
754 io_reset(io, EV_WRITE, io_dispatch_connect); in io_connect()
763 io->error = strerror(errno); in io_connect()
771 struct io *io = humppa; in io_dispatch_connect() local
775 io_frame_enter("io_dispatch_connect", io, ev); in io_dispatch_connect()
779 io->sock = -1; in io_dispatch_connect()
780 io_callback(io, IO_TIMEOUT); in io_dispatch_connect()
790 io->sock = -1; in io_dispatch_connect()
791 io->error = strerror(e); in io_dispatch_connect()
792 io_callback(io, e == ETIMEDOUT ? IO_TIMEOUT : IO_ERROR); in io_dispatch_connect()
795 io->state = IO_STATE_UP; in io_dispatch_connect()
796 io_callback(io, IO_CONNECTED); in io_dispatch_connect()
800 io_frame_leave(io); in io_dispatch_connect()
805 io_connect_tls(struct io *io, struct tls *tls, const char *hostname) in io_connect_tls() argument
809 mode = io->flags & IO_RW; in io_connect_tls()
813 if (io->tls) in io_connect_tls()
816 if (tls_connect_socket(tls, io->sock, hostname) == -1) { in io_connect_tls()
817 io->error = tls_error(tls); in io_connect_tls()
821 io->tls = tls; in io_connect_tls()
822 io->state = IO_STATE_CONNECT_TLS; in io_connect_tls()
823 io_reset(io, EV_READ|EV_WRITE, io_dispatch_handshake_tls); in io_connect_tls()
829 io_accept_tls(struct io *io, struct tls *tls) in io_accept_tls() argument
833 mode = io->flags & IO_RW; in io_accept_tls()
837 if (io->tls) in io_accept_tls()
840 if (tls_accept_socket(tls, &io->tls, io->sock) == -1) { in io_accept_tls()
841 io->error = tls_error(tls); in io_accept_tls()
845 io->state = IO_STATE_ACCEPT_TLS; in io_accept_tls()
846 io_reset(io, EV_READ|EV_WRITE, io_dispatch_handshake_tls); in io_accept_tls()
854 struct io *io = humppa; in io_dispatch_handshake_tls() local
857 io_frame_enter("io_dispatch_handshake_tls", io, event); in io_dispatch_handshake_tls()
860 io_callback(io, IO_TIMEOUT); in io_dispatch_handshake_tls()
864 if ((ret = tls_handshake(io->tls)) == 0) { in io_dispatch_handshake_tls()
865 io->state = IO_STATE_UP; in io_dispatch_handshake_tls()
866 io_callback(io, IO_TLSREADY); in io_dispatch_handshake_tls()
870 io_reset(io, EV_READ, io_dispatch_handshake_tls); in io_dispatch_handshake_tls()
872 io_reset(io, EV_WRITE, io_dispatch_handshake_tls); in io_dispatch_handshake_tls()
874 io->error = tls_error(io->tls); in io_dispatch_handshake_tls()
875 io_callback(io, IO_ERROR); in io_dispatch_handshake_tls()
879 io_frame_leave(io); in io_dispatch_handshake_tls()
886 struct io *io = humppa; in io_dispatch_read_tls() local
889 io_frame_enter("io_dispatch_read_tls", io, event); in io_dispatch_read_tls()
892 io_callback(io, IO_TIMEOUT); in io_dispatch_read_tls()
897 iobuf_normalize(&io->iobuf); in io_dispatch_read_tls()
898 switch ((n = iobuf_read_tls(&io->iobuf, io->tls))) { in io_dispatch_read_tls()
900 io_reset(io, EV_READ, io_dispatch_read_tls); in io_dispatch_read_tls()
903 io_reset(io, EV_WRITE, io_dispatch_read_tls); in io_dispatch_read_tls()
906 io_callback(io, IO_DISCONNECTED); in io_dispatch_read_tls()
909 io->error = tls_error(io->tls); in io_dispatch_read_tls()
910 io_callback(io, IO_ERROR); in io_dispatch_read_tls()
914 io_callback(io, IO_DATAIN); in io_dispatch_read_tls()
915 if (current == io && IO_READING(io)) in io_dispatch_read_tls()
920 io_frame_leave(io); in io_dispatch_read_tls()
926 struct io *io = humppa; in io_dispatch_write_tls() local
930 io_frame_enter("io_dispatch_write_tls", io, event); in io_dispatch_write_tls()
933 io_callback(io, IO_TIMEOUT); in io_dispatch_write_tls()
937 w = io_queued(io); in io_dispatch_write_tls()
938 switch ((n = iobuf_write_tls(&io->iobuf, io->tls))) { in io_dispatch_write_tls()
940 io_reset(io, EV_READ, io_dispatch_write_tls); in io_dispatch_write_tls()
943 io_reset(io, EV_WRITE, io_dispatch_write_tls); in io_dispatch_write_tls()
946 io_callback(io, IO_DISCONNECTED); in io_dispatch_write_tls()
949 io->error = tls_error(io->tls); in io_dispatch_write_tls()
950 io_callback(io, IO_ERROR); in io_dispatch_write_tls()
954 w2 = io_queued(io); in io_dispatch_write_tls()
955 if (w > io->lowat && w2 <= io->lowat) in io_dispatch_write_tls()
956 io_callback(io, IO_LOWAT); in io_dispatch_write_tls()
961 io_frame_leave(io); in io_dispatch_write_tls()
965 io_reload_tls(struct io *io) in io_reload_tls() argument
967 if (io->state != IO_STATE_UP) in io_reload_tls()
970 if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) { in io_reload_tls()
971 io_reset(io, EV_READ, io_dispatch_read_tls); in io_reload_tls()
975 if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io)) { in io_reload_tls()
976 io_reset(io, EV_WRITE, io_dispatch_write_tls); in io_reload_tls()