1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved. 2 * 3 * Permission is hereby granted, free of charge, to any person obtaining a copy 4 * of this software and associated documentation files (the "Software"), to 5 * deal in the Software without restriction, including without limitation the 6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 * sell copies of the Software, and to permit persons to whom the Software is 8 * furnished to do so, subject to the following conditions: 9 * 10 * The above copyright notice and this permission notice shall be included in 11 * all copies or substantial portions of the Software. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 19 * IN THE SOFTWARE. 20 */ 21 22 #include "uv.h" 23 #include "uv/tree.h" 24 #include "internal.h" 25 #include "heap-inl.h" 26 #include <stdlib.h> 27 #include <string.h> 28 #include <unistd.h> 29 30 int uv_loop_init(uv_loop_t* loop) { 31 void* saved_data; 32 int err; 33 34 35 saved_data = loop->data; 36 memset(loop, 0, sizeof(*loop)); 37 loop->data = saved_data; 38 39 heap_init((struct heap*) &loop->timer_heap); 40 QUEUE_INIT(&loop->wq); 41 QUEUE_INIT(&loop->idle_handles); 42 QUEUE_INIT(&loop->async_handles); 43 QUEUE_INIT(&loop->check_handles); 44 QUEUE_INIT(&loop->prepare_handles); 45 QUEUE_INIT(&loop->handle_queue); 46 47 loop->active_handles = 0; 48 loop->active_reqs.count = 0; 49 loop->nfds = 0; 50 loop->watchers = NULL; 51 loop->nwatchers = 0; 52 QUEUE_INIT(&loop->pending_queue); 53 QUEUE_INIT(&loop->watcher_queue); 54 55 loop->closing_handles = NULL; 56 uv__update_time(loop); 57 loop->async_io_watcher.fd = -1; 58 loop->async_wfd = -1; 59 loop->signal_pipefd[0] = -1; 60 loop->signal_pipefd[1] = -1; 61 loop->backend_fd = -1; 62 loop->emfile_fd = -1; 63 64 loop->timer_counter = 0; 65 loop->stop_flag = 0; 66 67 err = uv__platform_loop_init(loop); 68 if (err) 69 return err; 70 71 uv__signal_global_once_init(); 72 err = uv_signal_init(loop, &loop->child_watcher); 73 if (err) 74 goto fail_signal_init; 75 76 uv__handle_unref(&loop->child_watcher); 77 loop->child_watcher.flags |= UV_HANDLE_INTERNAL; 78 QUEUE_INIT(&loop->process_handles); 79 80 err = uv_rwlock_init(&loop->cloexec_lock); 81 if (err) 82 goto fail_rwlock_init; 83 84 err = uv_mutex_init(&loop->wq_mutex); 85 if (err) 86 goto fail_mutex_init; 87 88 err = uv_async_init(loop, &loop->wq_async, uv__work_done); 89 if (err) 90 goto fail_async_init; 91 92 uv__handle_unref(&loop->wq_async); 93 loop->wq_async.flags |= UV_HANDLE_INTERNAL; 94 95 return 0; 96 97 fail_async_init: 98 uv_mutex_destroy(&loop->wq_mutex); 99 100 fail_mutex_init: 101 uv_rwlock_destroy(&loop->cloexec_lock); 102 103 fail_rwlock_init: 104 uv__signal_loop_cleanup(loop); 105 106 fail_signal_init: 107 uv__platform_loop_delete(loop); 108 109 uv__free(loop->watchers); 110 loop->nwatchers = 0; 111 return err; 112 } 113 114 115 int uv_loop_fork(uv_loop_t* loop) { 116 int err; 117 unsigned int i; 118 uv__io_t* w; 119 120 err = uv__io_fork(loop); 121 if (err) 122 return err; 123 124 err = uv__async_fork(loop); 125 if (err) 126 return err; 127 128 err = uv__signal_loop_fork(loop); 129 if (err) 130 return err; 131 132 /* Rearm all the watchers that aren't re-queued by the above. */ 133 for (i = 0; i < loop->nwatchers; i++) { 134 w = loop->watchers[i]; 135 if (w == NULL) 136 continue; 137 138 if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) { 139 w->events = 0; /* Force re-registration in uv__io_poll. */ 140 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); 141 } 142 } 143 144 return 0; 145 } 146 147 148 void uv__loop_close(uv_loop_t* loop) { 149 uv__signal_loop_cleanup(loop); 150 uv__platform_loop_delete(loop); 151 uv__async_stop(loop); 152 153 if (loop->emfile_fd != -1) { 154 uv__close(loop->emfile_fd); 155 loop->emfile_fd = -1; 156 } 157 158 if (loop->backend_fd != -1) { 159 uv__close(loop->backend_fd); 160 loop->backend_fd = -1; 161 } 162 163 uv_mutex_lock(&loop->wq_mutex); 164 assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!"); 165 assert(!uv__has_active_reqs(loop)); 166 uv_mutex_unlock(&loop->wq_mutex); 167 uv_mutex_destroy(&loop->wq_mutex); 168 169 /* 170 * Note that all thread pool stuff is finished at this point and 171 * it is safe to just destroy rw lock 172 */ 173 uv_rwlock_destroy(&loop->cloexec_lock); 174 175 #if 0 176 assert(QUEUE_EMPTY(&loop->pending_queue)); 177 assert(QUEUE_EMPTY(&loop->watcher_queue)); 178 assert(loop->nfds == 0); 179 #endif 180 181 uv__free(loop->watchers); 182 loop->watchers = NULL; 183 loop->nwatchers = 0; 184 } 185 186 187 int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) { 188 if (option != UV_LOOP_BLOCK_SIGNAL) 189 return UV_ENOSYS; 190 191 if (va_arg(ap, int) != SIGPROF) 192 return UV_EINVAL; 193 194 loop->flags |= UV_LOOP_BLOCK_SIGPROF; 195 return 0; 196 } 197