xref: /netbsd-src/external/bsd/wpa/dist/src/utils/eloop.c (revision fdd524d4ccd2bb0c6f67401e938dabf773eb0372)
1 /*
2  * Event loop based on select() loop
3  * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4  *
5  * This software may be distributed under the terms of the BSD license.
6  * See README for more details.
7  */
8 
9 #include "includes.h"
10 #include <assert.h>
11 
12 #include "common.h"
13 #include "trace.h"
14 #include "list.h"
15 #include "eloop.h"
16 
17 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18 #error Do not define both of poll and epoll
19 #endif
20 
21 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
22 #error Do not define both of poll and kqueue
23 #endif
24 
25 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
26     !defined(CONFIG_ELOOP_KQUEUE)
27 #define CONFIG_ELOOP_SELECT
28 #endif
29 
30 #ifdef CONFIG_ELOOP_POLL
31 #include <poll.h>
32 #endif /* CONFIG_ELOOP_POLL */
33 
34 #ifdef CONFIG_ELOOP_EPOLL
35 #include <sys/epoll.h>
36 #endif /* CONFIG_ELOOP_EPOLL */
37 
38 #ifdef CONFIG_ELOOP_KQUEUE
39 #include <sys/event.h>
40 #endif /* CONFIG_ELOOP_KQUEUE */
41 
42 struct eloop_sock {
43 	int sock;
44 	void *eloop_data;
45 	void *user_data;
46 	eloop_sock_handler handler;
47 	WPA_TRACE_REF(eloop);
48 	WPA_TRACE_REF(user);
49 	WPA_TRACE_INFO
50 };
51 
52 struct eloop_timeout {
53 	struct dl_list list;
54 	struct os_reltime time;
55 	void *eloop_data;
56 	void *user_data;
57 	eloop_timeout_handler handler;
58 	WPA_TRACE_REF(eloop);
59 	WPA_TRACE_REF(user);
60 	WPA_TRACE_INFO
61 };
62 
63 struct eloop_signal {
64 	int sig;
65 	void *user_data;
66 	eloop_signal_handler handler;
67 	int signaled;
68 };
69 
70 struct eloop_sock_table {
71 	int count;
72 	struct eloop_sock *table;
73 	eloop_event_type type;
74 	int changed;
75 };
76 
77 struct eloop_data {
78 	int max_sock;
79 
80 	int count; /* sum of all table counts */
81 #ifdef CONFIG_ELOOP_POLL
82 	int max_pollfd_map; /* number of pollfds_map currently allocated */
83 	int max_poll_fds; /* number of pollfds currently allocated */
84 	struct pollfd *pollfds;
85 	struct pollfd **pollfds_map;
86 #endif /* CONFIG_ELOOP_POLL */
87 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
88 	int max_fd;
89 	struct eloop_sock *fd_table;
90 #endif
91 #ifdef CONFIG_ELOOP_EPOLL
92 	int epollfd;
93 	int epoll_max_event_num;
94 	struct epoll_event *epoll_events;
95 #endif /* CONFIG_ELOOP_EPOLL */
96 #ifdef CONFIG_ELOOP_KQUEUE
97 	int kqueuefd;
98 	int kqueue_nevents;
99 	struct kevent *kqueue_events;
100 #endif /* CONFIG_ELOOP_KQUEUE */
101 	struct eloop_sock_table readers;
102 	struct eloop_sock_table writers;
103 	struct eloop_sock_table exceptions;
104 
105 	struct dl_list timeout;
106 
107 	int signal_count;
108 	struct eloop_signal *signals;
109 	int signaled;
110 	int pending_terminate;
111 
112 	int terminate;
113 };
114 
115 static struct eloop_data eloop;
116 
117 
118 #ifdef WPA_TRACE
119 
120 static void eloop_sigsegv_handler(int sig)
121 {
122 	wpa_trace_show("eloop SIGSEGV");
123 	abort();
124 }
125 
126 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
127 {
128 	int i;
129 	if (table == NULL || table->table == NULL)
130 		return;
131 	for (i = 0; i < table->count; i++) {
132 		wpa_trace_add_ref(&table->table[i], eloop,
133 				  table->table[i].eloop_data);
134 		wpa_trace_add_ref(&table->table[i], user,
135 				  table->table[i].user_data);
136 	}
137 }
138 
139 
140 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
141 {
142 	int i;
143 	if (table == NULL || table->table == NULL)
144 		return;
145 	for (i = 0; i < table->count; i++) {
146 		wpa_trace_remove_ref(&table->table[i], eloop,
147 				     table->table[i].eloop_data);
148 		wpa_trace_remove_ref(&table->table[i], user,
149 				     table->table[i].user_data);
150 	}
151 }
152 
153 #else /* WPA_TRACE */
154 
155 #define eloop_trace_sock_add_ref(table) do { } while (0)
156 #define eloop_trace_sock_remove_ref(table) do { } while (0)
157 
158 #endif /* WPA_TRACE */
159 
160 
161 int eloop_init(void)
162 {
163 	os_memset(&eloop, 0, sizeof(eloop));
164 	dl_list_init(&eloop.timeout);
165 #ifdef CONFIG_ELOOP_EPOLL
166 	eloop.epollfd = epoll_create1(0);
167 	if (eloop.epollfd < 0) {
168 		wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s\n",
169 			   __func__, strerror(errno));
170 		return -1;
171 	}
172 #endif /* CONFIG_ELOOP_EPOLL */
173 #ifdef CONFIG_ELOOP_KQUEUE
174 	eloop.kqueuefd = kqueue();
175 	if (eloop.kqueuefd < 0) {
176 		wpa_printf(MSG_ERROR, "%s: kqueue failed. %s\n",
177 			   __func__, strerror(errno));
178 		return -1;
179 	}
180 #endif /* CONFIG_ELOOP_KQUEUE */
181 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
182 	eloop.readers.type = EVENT_TYPE_READ;
183 	eloop.writers.type = EVENT_TYPE_WRITE;
184 	eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
185 #endif
186 #ifdef WPA_TRACE
187 	signal(SIGSEGV, eloop_sigsegv_handler);
188 #endif /* WPA_TRACE */
189 	return 0;
190 }
191 
192 #ifdef CONFIG_ELOOP_EPOLL
193 static int eloop_sock_queue(int sock, eloop_event_type type)
194 {
195 	struct epoll_event ev;
196 
197 	os_memset(&ev, 0, sizeof(ev));
198 	switch (type) {
199 	case EVENT_TYPE_READ:
200 		ev.events = EPOLLIN;
201 		break;
202 	case EVENT_TYPE_WRITE:
203 		ev.events = EPOLLOUT;
204 		break;
205 	/*
206 	 * Exceptions are always checked when using epoll, but I suppose it's
207 	 * possible that someone registered a socket *only* for exception
208 	 * handling.
209 	 */
210 	case EVENT_TYPE_EXCEPTION:
211 		ev.events = EPOLLERR | EPOLLHUP;
212 		break;
213 	}
214 	ev.data.fd = sock;
215 	if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
216 		wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d "
217 			   "failed. %s\n", __func__, sock, strerror(errno));
218 		return -1;
219 	}
220 	return 0;
221 }
222 #endif /* CONFIG_ELOOP_EPOLL */
223 
224 #ifdef CONFIG_ELOOP_KQUEUE
225 static int eloop_sock_queue(int sock, eloop_event_type type)
226 {
227 	int filter;
228 	struct kevent ke;
229 
230 	switch (type) {
231 	case EVENT_TYPE_READ:
232 		filter = EVFILT_READ;
233 		break;
234 	case EVENT_TYPE_WRITE:
235 		filter = EVFILT_WRITE;
236 		break;
237 	default:
238 		filter = 0;
239 	}
240 	EV_SET(&ke, sock, filter, EV_ADD, 0, 0, 0);
241 	if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
242 		wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d "
243 			   "failed. %s\n", __func__, sock, strerror(errno));
244 		return -1;
245 	}
246 	return 0;
247 }
248 #endif /* CONFIG_ELOOP_KQUEUE */
249 
250 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
251                                      int sock, eloop_sock_handler handler,
252                                      void *eloop_data, void *user_data)
253 {
254 #ifdef CONFIG_ELOOP_EPOLL
255 	struct epoll_event *temp_events;
256 #endif /* CONFIG_ELOOP_EPOLL */
257 #ifdef CONFIG_ELOOP_KQUEUE
258 	struct kevent *temp_events;
259 #endif /* CONFIG_ELOOP_EPOLL */
260 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
261 	struct eloop_sock *temp_table;
262 	int next;
263 #endif
264 	struct eloop_sock *tmp;
265 	int new_max_sock;
266 
267 	if (sock > eloop.max_sock)
268 		new_max_sock = sock;
269 	else
270 		new_max_sock = eloop.max_sock;
271 
272 	if (table == NULL)
273 		return -1;
274 
275 #ifdef CONFIG_ELOOP_POLL
276 	if (new_max_sock >= eloop.max_pollfd_map) {
277 		struct pollfd **nmap;
278 		nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
279 					sizeof(struct pollfd *));
280 		if (nmap == NULL)
281 			return -1;
282 
283 		eloop.max_pollfd_map = new_max_sock + 50;
284 		eloop.pollfds_map = nmap;
285 	}
286 
287 	if (eloop.count + 1 > eloop.max_poll_fds) {
288 		struct pollfd *n;
289 		int nmax = eloop.count + 1 + 50;
290 		n = os_realloc_array(eloop.pollfds, nmax,
291 				     sizeof(struct pollfd));
292 		if (n == NULL)
293 			return -1;
294 
295 		eloop.max_poll_fds = nmax;
296 		eloop.pollfds = n;
297 	}
298 #endif /* CONFIG_ELOOP_POLL */
299 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
300 	if (new_max_sock >= eloop.max_fd) {
301 		next = eloop.max_fd == 0 ? 16 : eloop.max_fd * 2;
302 		temp_table = os_realloc_array(eloop.fd_table, next,
303 					      sizeof(struct eloop_sock));
304 		if (temp_table == NULL)
305 			return -1;
306 
307 		eloop.max_fd = next;
308 		eloop.fd_table = temp_table;
309 	}
310 #endif
311 
312 #ifdef CONFIG_ELOOP_EPOLL
313 	if (eloop.count + 1 > eloop.epoll_max_event_num) {
314 		next = eloop.epoll_max_event_num == 0 ? 8 :
315 			eloop.epoll_max_event_num * 2;
316 		temp_events = os_realloc_array(eloop.epoll_events, next,
317 					       sizeof(struct epoll_event));
318 		if (temp_events == NULL) {
319 			wpa_printf(MSG_ERROR, "%s: malloc for epoll failed. "
320 				   "%s\n", __func__, strerror(errno));
321 			return -1;
322 		}
323 
324 		eloop.epoll_max_event_num = next;
325 		eloop.epoll_events = temp_events;
326 	}
327 #endif /* CONFIG_ELOOP_EPOLL */
328 #ifdef CONFIG_ELOOP_KQUEUE
329 	if (eloop.count + 1 > eloop.kqueue_nevents) {
330 		next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
331 		temp_events = os_malloc(next * sizeof(*temp_events));
332 		if (temp_events == NULL) {
333 			wpa_printf(MSG_ERROR, "%s: malloc for kqueue failed. "
334 				   "%s\n", __func__, strerror(errno));
335 			return -1;
336 		}
337 
338 		os_free(eloop.kqueue_events);
339 		eloop.kqueue_events = temp_events;
340 		eloop.kqueue_nevents = next;
341 	}
342 #endif /* CONFIG_ELOOP_KQUEUE */
343 
344 	eloop_trace_sock_remove_ref(table);
345 	tmp = os_realloc_array(table->table, table->count + 1,
346 			       sizeof(struct eloop_sock));
347 	if (tmp == NULL) {
348 		eloop_trace_sock_add_ref(table);
349 		return -1;
350 	}
351 
352 	tmp[table->count].sock = sock;
353 	tmp[table->count].eloop_data = eloop_data;
354 	tmp[table->count].user_data = user_data;
355 	tmp[table->count].handler = handler;
356 	wpa_trace_record(&tmp[table->count]);
357 	table->count++;
358 	table->table = tmp;
359 	eloop.max_sock = new_max_sock;
360 	eloop.count++;
361 	table->changed = 1;
362 	eloop_trace_sock_add_ref(table);
363 
364 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
365 	if (eloop_sock_queue(sock, table->type) == -1)
366 		return -1;
367 	os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
368 		  sizeof(struct eloop_sock));
369 #endif
370 	return 0;
371 }
372 
373 
374 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
375                                          int sock)
376 {
377 #ifdef CONFIG_ELOOP_KQUEUE
378 	struct kevent ke;
379 #endif
380 	int i;
381 
382 	if (table == NULL || table->table == NULL || table->count == 0)
383 		return;
384 
385 	for (i = 0; i < table->count; i++) {
386 		if (table->table[i].sock == sock)
387 			break;
388 	}
389 	if (i == table->count)
390 		return;
391 	eloop_trace_sock_remove_ref(table);
392 	if (i != table->count - 1) {
393 		os_memmove(&table->table[i], &table->table[i + 1],
394 			   (table->count - i - 1) *
395 			   sizeof(struct eloop_sock));
396 	}
397 	table->count--;
398 	eloop.count--;
399 	table->changed = 1;
400 	eloop_trace_sock_add_ref(table);
401 #ifdef CONFIG_ELOOP_EPOLL
402 	if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
403 		wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d "
404 			   "failed. %s\n", __func__, sock, strerror(errno));
405 		return;
406 	}
407 	os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
408 #endif /* CONFIG_ELOOP_EPOLL */
409 #ifdef CONFIG_ELOOP_KQUEUE
410 	EV_SET(&ke, sock, 0, EV_DELETE, 0, 0, 0);
411 	if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
412 		wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d "
413 			   "failed. %s\n", __func__, sock, strerror(errno));
414 		return;
415 	}
416 	os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
417 #endif /* CONFIG_ELOOP_KQUEUE */
418 }
419 
420 
421 #ifdef CONFIG_ELOOP_POLL
422 
423 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
424 {
425 	if (fd < mx && fd >= 0)
426 		return pollfds_map[fd];
427 	return NULL;
428 }
429 
430 
431 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
432 				    struct eloop_sock_table *writers,
433 				    struct eloop_sock_table *exceptions,
434 				    struct pollfd *pollfds,
435 				    struct pollfd **pollfds_map,
436 				    int max_pollfd_map)
437 {
438 	int i;
439 	int nxt = 0;
440 	int fd;
441 	struct pollfd *pfd;
442 
443 	/* Clear pollfd lookup map. It will be re-populated below. */
444 	os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
445 
446 	if (readers && readers->table) {
447 		for (i = 0; i < readers->count; i++) {
448 			fd = readers->table[i].sock;
449 			assert(fd >= 0 && fd < max_pollfd_map);
450 			pollfds[nxt].fd = fd;
451 			pollfds[nxt].events = POLLIN;
452 			pollfds[nxt].revents = 0;
453 			pollfds_map[fd] = &(pollfds[nxt]);
454 			nxt++;
455 		}
456 	}
457 
458 	if (writers && writers->table) {
459 		for (i = 0; i < writers->count; i++) {
460 			/*
461 			 * See if we already added this descriptor, update it
462 			 * if so.
463 			 */
464 			fd = writers->table[i].sock;
465 			assert(fd >= 0 && fd < max_pollfd_map);
466 			pfd = pollfds_map[fd];
467 			if (!pfd) {
468 				pfd = &(pollfds[nxt]);
469 				pfd->events = 0;
470 				pfd->fd = fd;
471 				pollfds[i].revents = 0;
472 				pollfds_map[fd] = pfd;
473 				nxt++;
474 			}
475 			pfd->events |= POLLOUT;
476 		}
477 	}
478 
479 	/*
480 	 * Exceptions are always checked when using poll, but I suppose it's
481 	 * possible that someone registered a socket *only* for exception
482 	 * handling. Set the POLLIN bit in this case.
483 	 */
484 	if (exceptions && exceptions->table) {
485 		for (i = 0; i < exceptions->count; i++) {
486 			/*
487 			 * See if we already added this descriptor, just use it
488 			 * if so.
489 			 */
490 			fd = exceptions->table[i].sock;
491 			assert(fd >= 0 && fd < max_pollfd_map);
492 			pfd = pollfds_map[fd];
493 			if (!pfd) {
494 				pfd = &(pollfds[nxt]);
495 				pfd->events = POLLIN;
496 				pfd->fd = fd;
497 				pollfds[i].revents = 0;
498 				pollfds_map[fd] = pfd;
499 				nxt++;
500 			}
501 		}
502 	}
503 
504 	return nxt;
505 }
506 
507 
508 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
509 					   struct pollfd **pollfds_map,
510 					   int max_pollfd_map,
511 					   short int revents)
512 {
513 	int i;
514 	struct pollfd *pfd;
515 
516 	if (!table || !table->table)
517 		return 0;
518 
519 	table->changed = 0;
520 	for (i = 0; i < table->count; i++) {
521 		pfd = find_pollfd(pollfds_map, table->table[i].sock,
522 				  max_pollfd_map);
523 		if (!pfd)
524 			continue;
525 
526 		if (!(pfd->revents & revents))
527 			continue;
528 
529 		table->table[i].handler(table->table[i].sock,
530 					table->table[i].eloop_data,
531 					table->table[i].user_data);
532 		if (table->changed)
533 			return 1;
534 	}
535 
536 	return 0;
537 }
538 
539 
540 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
541 				      struct eloop_sock_table *writers,
542 				      struct eloop_sock_table *exceptions,
543 				      struct pollfd **pollfds_map,
544 				      int max_pollfd_map)
545 {
546 	if (eloop_sock_table_dispatch_table(readers, pollfds_map,
547 					    max_pollfd_map, POLLIN | POLLERR |
548 					    POLLHUP))
549 		return; /* pollfds may be invalid at this point */
550 
551 	if (eloop_sock_table_dispatch_table(writers, pollfds_map,
552 					    max_pollfd_map, POLLOUT))
553 		return; /* pollfds may be invalid at this point */
554 
555 	eloop_sock_table_dispatch_table(exceptions, pollfds_map,
556 					max_pollfd_map, POLLERR | POLLHUP);
557 }
558 
559 #endif /* CONFIG_ELOOP_POLL */
560 
561 #ifdef CONFIG_ELOOP_SELECT
562 
563 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
564 				     fd_set *fds)
565 {
566 	int i;
567 
568 	FD_ZERO(fds);
569 
570 	if (table->table == NULL)
571 		return;
572 
573 	for (i = 0; i < table->count; i++) {
574 		assert(table->table[i].sock >= 0);
575 		FD_SET(table->table[i].sock, fds);
576 	}
577 }
578 
579 
580 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
581 				      fd_set *fds)
582 {
583 	int i;
584 
585 	if (table == NULL || table->table == NULL)
586 		return;
587 
588 	table->changed = 0;
589 	for (i = 0; i < table->count; i++) {
590 		if (FD_ISSET(table->table[i].sock, fds)) {
591 			table->table[i].handler(table->table[i].sock,
592 						table->table[i].eloop_data,
593 						table->table[i].user_data);
594 			if (table->changed)
595 				break;
596 		}
597 	}
598 }
599 
600 #endif /* CONFIG_ELOOP_SELECT */
601 
602 
603 #ifdef CONFIG_ELOOP_EPOLL
604 static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
605 {
606 	struct eloop_sock *table;
607 	int i;
608 
609 	for (i = 0; i < nfds; i++) {
610 		table = &eloop.fd_table[events[i].data.fd];
611 		if (table->handler == NULL)
612 			continue;
613 		table->handler(table->sock, table->eloop_data,
614 			       table->user_data);
615 		if (eloop.readers.changed ||
616 		    eloop.writers.changed ||
617 		    eloop.exceptions.changed)
618 			break;
619 	}
620 }
621 #endif /* CONFIG_ELOOP_EPOLL */
622 
623 
624 #ifdef CONFIG_ELOOP_KQUEUE
625 static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
626 {
627 	struct eloop_sock *table;
628 	int i;
629 
630 	for (i = 0; i < nfds; i++) {
631 		table = &eloop.fd_table[events[i].ident];
632 		if (table->handler == NULL)
633 			continue;
634 		table->handler(table->sock, table->eloop_data,
635 			       table->user_data);
636 		if (eloop.readers.changed ||
637 		    eloop.writers.changed ||
638 		    eloop.exceptions.changed)
639 			break;
640 	}
641 }
642 
643 static int eloop_sock_table_requeue(struct eloop_sock_table *table)
644 {
645 	int i, r;
646 
647 	r = 0;
648 	for (i = 0; i < table->count && table->table; i++) {
649 		if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
650 			r = -1;
651 	}
652 	return r;
653 }
654 
655 int eloop_sock_requeue(void)
656 {
657 	int r = 0;
658 
659 	close(eloop.kqueuefd);
660 	eloop.kqueuefd = kqueue();
661 	if (eloop.kqueuefd < 0) {
662 		wpa_printf(MSG_ERROR, "%s: kqueue failed. %s\n",
663 			   __func__, strerror(errno));
664 		return -1;
665 	}
666 
667 	if (eloop_sock_table_requeue(&eloop.readers) == -1)
668 		r = -1;
669 	if (eloop_sock_table_requeue(&eloop.writers) == -1)
670 		r = -1;
671 	if (eloop_sock_table_requeue(&eloop.exceptions) == -1)
672 		r = -1;
673 
674 	return r;
675 }
676 #else /* CONFIG_ELOOP_KQUEUE */
677 int eloop_sock_requeue(void)
678 {
679 
680 	return 0;
681 }
682 #endif /* !CONFIG_ELOOP_KQUEUE */
683 
684 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
685 {
686 	if (table) {
687 		int i;
688 		for (i = 0; i < table->count && table->table; i++) {
689 			wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
690 				   "sock=%d eloop_data=%p user_data=%p "
691 				   "handler=%p",
692 				   table->table[i].sock,
693 				   table->table[i].eloop_data,
694 				   table->table[i].user_data,
695 				   table->table[i].handler);
696 			wpa_trace_dump_funcname("eloop unregistered socket "
697 						"handler",
698 						table->table[i].handler);
699 			wpa_trace_dump("eloop sock", &table->table[i]);
700 		}
701 		os_free(table->table);
702 	}
703 }
704 
705 
706 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
707 			     void *eloop_data, void *user_data)
708 {
709 	return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
710 				   eloop_data, user_data);
711 }
712 
713 
714 void eloop_unregister_read_sock(int sock)
715 {
716 	eloop_unregister_sock(sock, EVENT_TYPE_READ);
717 }
718 
719 
720 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
721 {
722 	switch (type) {
723 	case EVENT_TYPE_READ:
724 		return &eloop.readers;
725 	case EVENT_TYPE_WRITE:
726 		return &eloop.writers;
727 	case EVENT_TYPE_EXCEPTION:
728 		return &eloop.exceptions;
729 	}
730 
731 	return NULL;
732 }
733 
734 
735 int eloop_register_sock(int sock, eloop_event_type type,
736 			eloop_sock_handler handler,
737 			void *eloop_data, void *user_data)
738 {
739 	struct eloop_sock_table *table;
740 
741 	assert(sock >= 0);
742 	table = eloop_get_sock_table(type);
743 	return eloop_sock_table_add_sock(table, sock, handler,
744 					 eloop_data, user_data);
745 }
746 
747 
748 void eloop_unregister_sock(int sock, eloop_event_type type)
749 {
750 	struct eloop_sock_table *table;
751 
752 	table = eloop_get_sock_table(type);
753 	eloop_sock_table_remove_sock(table, sock);
754 }
755 
756 
757 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
758 			   eloop_timeout_handler handler,
759 			   void *eloop_data, void *user_data)
760 {
761 	struct eloop_timeout *timeout, *tmp;
762 	os_time_t now_sec;
763 
764 	timeout = os_zalloc(sizeof(*timeout));
765 	if (timeout == NULL)
766 		return -1;
767 	if (os_get_reltime(&timeout->time) < 0) {
768 		os_free(timeout);
769 		return -1;
770 	}
771 	now_sec = timeout->time.sec;
772 	timeout->time.sec += secs;
773 	if (timeout->time.sec < now_sec) {
774 		/*
775 		 * Integer overflow - assume long enough timeout to be assumed
776 		 * to be infinite, i.e., the timeout would never happen.
777 		 */
778 		wpa_printf(MSG_DEBUG, "ELOOP: Too long timeout (secs=%u) to "
779 			   "ever happen - ignore it", secs);
780 		os_free(timeout);
781 		return 0;
782 	}
783 	timeout->time.usec += usecs;
784 	while (timeout->time.usec >= 1000000) {
785 		timeout->time.sec++;
786 		timeout->time.usec -= 1000000;
787 	}
788 	timeout->eloop_data = eloop_data;
789 	timeout->user_data = user_data;
790 	timeout->handler = handler;
791 	wpa_trace_add_ref(timeout, eloop, eloop_data);
792 	wpa_trace_add_ref(timeout, user, user_data);
793 	wpa_trace_record(timeout);
794 
795 	/* Maintain timeouts in order of increasing time */
796 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
797 		if (os_reltime_before(&timeout->time, &tmp->time)) {
798 			dl_list_add(tmp->list.prev, &timeout->list);
799 			return 0;
800 		}
801 	}
802 	dl_list_add_tail(&eloop.timeout, &timeout->list);
803 
804 	return 0;
805 }
806 
807 
808 static void eloop_remove_timeout(struct eloop_timeout *timeout)
809 {
810 	dl_list_del(&timeout->list);
811 	wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
812 	wpa_trace_remove_ref(timeout, user, timeout->user_data);
813 	os_free(timeout);
814 }
815 
816 
817 int eloop_cancel_timeout(eloop_timeout_handler handler,
818 			 void *eloop_data, void *user_data)
819 {
820 	struct eloop_timeout *timeout, *prev;
821 	int removed = 0;
822 
823 	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
824 			      struct eloop_timeout, list) {
825 		if (timeout->handler == handler &&
826 		    (timeout->eloop_data == eloop_data ||
827 		     eloop_data == ELOOP_ALL_CTX) &&
828 		    (timeout->user_data == user_data ||
829 		     user_data == ELOOP_ALL_CTX)) {
830 			eloop_remove_timeout(timeout);
831 			removed++;
832 		}
833 	}
834 
835 	return removed;
836 }
837 
838 
839 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
840 			     void *eloop_data, void *user_data,
841 			     struct os_reltime *remaining)
842 {
843 	struct eloop_timeout *timeout, *prev;
844 	int removed = 0;
845 	struct os_reltime now;
846 
847 	os_get_reltime(&now);
848 	remaining->sec = remaining->usec = 0;
849 
850 	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
851 			      struct eloop_timeout, list) {
852 		if (timeout->handler == handler &&
853 		    (timeout->eloop_data == eloop_data) &&
854 		    (timeout->user_data == user_data)) {
855 			removed = 1;
856 			if (os_reltime_before(&now, &timeout->time))
857 				os_reltime_sub(&timeout->time, &now, remaining);
858 			eloop_remove_timeout(timeout);
859 			break;
860 		}
861 	}
862 	return removed;
863 }
864 
865 
866 int eloop_is_timeout_registered(eloop_timeout_handler handler,
867 				void *eloop_data, void *user_data)
868 {
869 	struct eloop_timeout *tmp;
870 
871 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
872 		if (tmp->handler == handler &&
873 		    tmp->eloop_data == eloop_data &&
874 		    tmp->user_data == user_data)
875 			return 1;
876 	}
877 
878 	return 0;
879 }
880 
881 
882 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
883 			  eloop_timeout_handler handler, void *eloop_data,
884 			  void *user_data)
885 {
886 	struct os_reltime now, requested, remaining;
887 	struct eloop_timeout *tmp;
888 
889 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
890 		if (tmp->handler == handler &&
891 		    tmp->eloop_data == eloop_data &&
892 		    tmp->user_data == user_data) {
893 			requested.sec = req_secs;
894 			requested.usec = req_usecs;
895 			os_get_reltime(&now);
896 			os_reltime_sub(&tmp->time, &now, &remaining);
897 			if (os_reltime_before(&requested, &remaining)) {
898 				eloop_cancel_timeout(handler, eloop_data,
899 						     user_data);
900 				eloop_register_timeout(requested.sec,
901 						       requested.usec,
902 						       handler, eloop_data,
903 						       user_data);
904 				return 1;
905 			}
906 			return 0;
907 		}
908 	}
909 
910 	return -1;
911 }
912 
913 
914 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
915 			    eloop_timeout_handler handler, void *eloop_data,
916 			    void *user_data)
917 {
918 	struct os_reltime now, requested, remaining;
919 	struct eloop_timeout *tmp;
920 
921 	dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
922 		if (tmp->handler == handler &&
923 		    tmp->eloop_data == eloop_data &&
924 		    tmp->user_data == user_data) {
925 			requested.sec = req_secs;
926 			requested.usec = req_usecs;
927 			os_get_reltime(&now);
928 			os_reltime_sub(&tmp->time, &now, &remaining);
929 			if (os_reltime_before(&remaining, &requested)) {
930 				eloop_cancel_timeout(handler, eloop_data,
931 						     user_data);
932 				eloop_register_timeout(requested.sec,
933 						       requested.usec,
934 						       handler, eloop_data,
935 						       user_data);
936 				return 1;
937 			}
938 			return 0;
939 		}
940 	}
941 
942 	return -1;
943 }
944 
945 
946 #ifndef CONFIG_NATIVE_WINDOWS
947 static void eloop_handle_alarm(int sig)
948 {
949 	wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
950 		   "two seconds. Looks like there\n"
951 		   "is a bug that ends up in a busy loop that "
952 		   "prevents clean shutdown.\n"
953 		   "Killing program forcefully.\n");
954 	exit(1);
955 }
956 #endif /* CONFIG_NATIVE_WINDOWS */
957 
958 
959 static void eloop_handle_signal(int sig)
960 {
961 	int i;
962 
963 #ifndef CONFIG_NATIVE_WINDOWS
964 	if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
965 		/* Use SIGALRM to break out from potential busy loops that
966 		 * would not allow the program to be killed. */
967 		eloop.pending_terminate = 1;
968 		signal(SIGALRM, eloop_handle_alarm);
969 		alarm(2);
970 	}
971 #endif /* CONFIG_NATIVE_WINDOWS */
972 
973 	eloop.signaled++;
974 	for (i = 0; i < eloop.signal_count; i++) {
975 		if (eloop.signals[i].sig == sig) {
976 			eloop.signals[i].signaled++;
977 			break;
978 		}
979 	}
980 }
981 
982 
983 static void eloop_process_pending_signals(void)
984 {
985 	int i;
986 
987 	if (eloop.signaled == 0)
988 		return;
989 	eloop.signaled = 0;
990 
991 	if (eloop.pending_terminate) {
992 #ifndef CONFIG_NATIVE_WINDOWS
993 		alarm(0);
994 #endif /* CONFIG_NATIVE_WINDOWS */
995 		eloop.pending_terminate = 0;
996 	}
997 
998 	for (i = 0; i < eloop.signal_count; i++) {
999 		if (eloop.signals[i].signaled) {
1000 			eloop.signals[i].signaled = 0;
1001 			eloop.signals[i].handler(eloop.signals[i].sig,
1002 						 eloop.signals[i].user_data);
1003 		}
1004 	}
1005 }
1006 
1007 
1008 int eloop_register_signal(int sig, eloop_signal_handler handler,
1009 			  void *user_data)
1010 {
1011 	struct eloop_signal *tmp;
1012 
1013 	tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1014 			       sizeof(struct eloop_signal));
1015 	if (tmp == NULL)
1016 		return -1;
1017 
1018 	tmp[eloop.signal_count].sig = sig;
1019 	tmp[eloop.signal_count].user_data = user_data;
1020 	tmp[eloop.signal_count].handler = handler;
1021 	tmp[eloop.signal_count].signaled = 0;
1022 	eloop.signal_count++;
1023 	eloop.signals = tmp;
1024 	signal(sig, eloop_handle_signal);
1025 
1026 	return 0;
1027 }
1028 
1029 
1030 int eloop_register_signal_terminate(eloop_signal_handler handler,
1031 				    void *user_data)
1032 {
1033 	int ret = eloop_register_signal(SIGINT, handler, user_data);
1034 	if (ret == 0)
1035 		ret = eloop_register_signal(SIGTERM, handler, user_data);
1036 	return ret;
1037 }
1038 
1039 
1040 int eloop_register_signal_reconfig(eloop_signal_handler handler,
1041 				   void *user_data)
1042 {
1043 #ifdef CONFIG_NATIVE_WINDOWS
1044 	return 0;
1045 #else /* CONFIG_NATIVE_WINDOWS */
1046 	return eloop_register_signal(SIGHUP, handler, user_data);
1047 #endif /* CONFIG_NATIVE_WINDOWS */
1048 }
1049 
1050 
1051 void eloop_run(void)
1052 {
1053 #ifdef CONFIG_ELOOP_POLL
1054 	int num_poll_fds;
1055 	int timeout_ms = 0;
1056 #endif /* CONFIG_ELOOP_POLL */
1057 #ifdef CONFIG_ELOOP_SELECT
1058 	fd_set *rfds, *wfds, *efds;
1059 	struct timeval _tv;
1060 #endif /* CONFIG_ELOOP_SELECT */
1061 #ifdef CONFIG_ELOOP_EPOLL
1062 	int timeout_ms = -1;
1063 #endif /* CONFIG_ELOOP_EPOLL */
1064 #ifdef CONFIG_ELOOP_KQUEUE
1065 	struct timespec ts;
1066 #endif /* CONFIG_ELOOP_KQUEUE */
1067 	int res;
1068 	struct os_reltime tv, now;
1069 
1070 #ifdef CONFIG_ELOOP_SELECT
1071 	rfds = os_malloc(sizeof(*rfds));
1072 	wfds = os_malloc(sizeof(*wfds));
1073 	efds = os_malloc(sizeof(*efds));
1074 	if (rfds == NULL || wfds == NULL || efds == NULL)
1075 		goto out;
1076 #endif /* CONFIG_ELOOP_SELECT */
1077 
1078 	while (!eloop.terminate &&
1079 	       (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1080 		eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1081 		struct eloop_timeout *timeout;
1082 
1083 		if (eloop.pending_terminate) {
1084 			/*
1085 			 * This may happen in some corner cases where a signal
1086 			 * is received during a blocking operation. We need to
1087 			 * process the pending signals and exit if requested to
1088 			 * avoid hitting the SIGALRM limit if the blocking
1089 			 * operation took more than two seconds.
1090 			 */
1091 			eloop_process_pending_signals();
1092 			if (eloop.terminate)
1093 				break;
1094 		}
1095 
1096 		timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1097 					list);
1098 		if (timeout) {
1099 			os_get_reltime(&now);
1100 			if (os_reltime_before(&now, &timeout->time))
1101 				os_reltime_sub(&timeout->time, &now, &tv);
1102 			else
1103 				tv.sec = tv.usec = 0;
1104 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
1105 			timeout_ms = tv.sec * 1000 + tv.usec / 1000;
1106 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1107 #ifdef CONFIG_ELOOP_SELECT
1108 			_tv.tv_sec = tv.sec;
1109 			_tv.tv_usec = tv.usec;
1110 #endif /* CONFIG_ELOOP_SELECT */
1111 #ifdef CONFIG_ELOOP_KQUEUE
1112 			ts.tv_sec = tv.sec;
1113 			ts.tv_nsec = tv.usec * 1000L;
1114 #endif /* CONFIG_ELOOP_KQUEUE */
1115 		}
1116 
1117 #ifdef CONFIG_ELOOP_POLL
1118 		num_poll_fds = eloop_sock_table_set_fds(
1119 			&eloop.readers, &eloop.writers, &eloop.exceptions,
1120 			eloop.pollfds, eloop.pollfds_map,
1121 			eloop.max_pollfd_map);
1122 		res = poll(eloop.pollfds, num_poll_fds,
1123 			   timeout ? timeout_ms : -1);
1124 #endif /* CONFIG_ELOOP_POLL */
1125 #ifdef CONFIG_ELOOP_SELECT
1126 		eloop_sock_table_set_fds(&eloop.readers, rfds);
1127 		eloop_sock_table_set_fds(&eloop.writers, wfds);
1128 		eloop_sock_table_set_fds(&eloop.exceptions, efds);
1129 		res = select(eloop.max_sock + 1, rfds, wfds, efds,
1130 			     timeout ? &_tv : NULL);
1131 #endif /* CONFIG_ELOOP_SELECT */
1132 #ifdef CONFIG_ELOOP_EPOLL
1133 		if (eloop.count == 0) {
1134 			res = 0;
1135 		} else {
1136 			res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1137 					 eloop.count, timeout_ms);
1138 		}
1139 #endif /* CONFIG_ELOOP_EPOLL */
1140 #ifdef CONFIG_ELOOP_KQUEUE
1141 		if (eloop.count == 0) {
1142 			res = 0;
1143 		} else {
1144 			res = kevent(eloop.kqueuefd, NULL, 0,
1145 				     eloop.kqueue_events, eloop.kqueue_nevents,
1146 				     timeout ? &ts : NULL);
1147 		}
1148 #endif /* CONFIG_ELOOP_KQUEUE */
1149 		if (res < 0 && errno != EINTR && errno != 0) {
1150 			wpa_printf(MSG_ERROR, "eloop: %s: %s",
1151 #ifdef CONFIG_ELOOP_POLL
1152 				   "poll"
1153 #endif /* CONFIG_ELOOP_POLL */
1154 #ifdef CONFIG_ELOOP_SELECT
1155 				   "select"
1156 #endif /* CONFIG_ELOOP_SELECT */
1157 #ifdef CONFIG_ELOOP_EPOLL
1158 				   "epoll"
1159 #endif /* CONFIG_ELOOP_EPOLL */
1160 #ifdef CONFIG_ELOOP_KQUEUE
1161 				   "kqueue"
1162 #endif /* CONFIG_ELOOP_EKQUEUE */
1163 
1164 				   , strerror(errno));
1165 			goto out;
1166 		}
1167 
1168 		eloop.readers.changed = 0;
1169 		eloop.writers.changed = 0;
1170 		eloop.exceptions.changed = 0;
1171 
1172 		eloop_process_pending_signals();
1173 
1174 
1175 		/* check if some registered timeouts have occurred */
1176 		timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1177 					list);
1178 		if (timeout) {
1179 			os_get_reltime(&now);
1180 			if (!os_reltime_before(&now, &timeout->time)) {
1181 				void *eloop_data = timeout->eloop_data;
1182 				void *user_data = timeout->user_data;
1183 				eloop_timeout_handler handler =
1184 					timeout->handler;
1185 				eloop_remove_timeout(timeout);
1186 				handler(eloop_data, user_data);
1187 			}
1188 
1189 		}
1190 
1191 		if (res <= 0)
1192 			continue;
1193 
1194 		if (eloop.readers.changed ||
1195 		    eloop.writers.changed ||
1196 		    eloop.exceptions.changed) {
1197 			 /*
1198 			  * Sockets may have been closed and reopened with the
1199 			  * same FD in the signal or timeout handlers, so we
1200 			  * must skip the previous results and check again
1201 			  * whether any of the currently registered sockets have
1202 			  * events.
1203 			  */
1204 			continue;
1205 		}
1206 
1207 #ifdef CONFIG_ELOOP_POLL
1208 		eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1209 					  &eloop.exceptions, eloop.pollfds_map,
1210 					  eloop.max_pollfd_map);
1211 #endif /* CONFIG_ELOOP_POLL */
1212 #ifdef CONFIG_ELOOP_SELECT
1213 		eloop_sock_table_dispatch(&eloop.readers, rfds);
1214 		eloop_sock_table_dispatch(&eloop.writers, wfds);
1215 		eloop_sock_table_dispatch(&eloop.exceptions, efds);
1216 #endif /* CONFIG_ELOOP_SELECT */
1217 #ifdef CONFIG_ELOOP_EPOLL
1218 		eloop_sock_table_dispatch(eloop.epoll_events, res);
1219 #endif /* CONFIG_ELOOP_EPOLL */
1220 #ifdef CONFIG_ELOOP_KQUEUE
1221 		eloop_sock_table_dispatch(eloop.kqueue_events, res);
1222 #endif /* CONFIG_ELOOP_KQUEUE */
1223 	}
1224 
1225 	eloop.terminate = 0;
1226 out:
1227 #ifdef CONFIG_ELOOP_SELECT
1228 	os_free(rfds);
1229 	os_free(wfds);
1230 	os_free(efds);
1231 #endif /* CONFIG_ELOOP_SELECT */
1232 	return;
1233 }
1234 
1235 
1236 void eloop_terminate(void)
1237 {
1238 	eloop.terminate = 1;
1239 }
1240 
1241 
1242 void eloop_destroy(void)
1243 {
1244 	struct eloop_timeout *timeout, *prev;
1245 	struct os_reltime now;
1246 
1247 	os_get_reltime(&now);
1248 	dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1249 			      struct eloop_timeout, list) {
1250 		int sec, usec;
1251 		sec = timeout->time.sec - now.sec;
1252 		usec = timeout->time.usec - now.usec;
1253 		if (timeout->time.usec < now.usec) {
1254 			sec--;
1255 			usec += 1000000;
1256 		}
1257 		wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1258 			   "eloop_data=%p user_data=%p handler=%p",
1259 			   sec, usec, timeout->eloop_data, timeout->user_data,
1260 			   timeout->handler);
1261 		wpa_trace_dump_funcname("eloop unregistered timeout handler",
1262 					timeout->handler);
1263 		wpa_trace_dump("eloop timeout", timeout);
1264 		eloop_remove_timeout(timeout);
1265 	}
1266 	eloop_sock_table_destroy(&eloop.readers);
1267 	eloop_sock_table_destroy(&eloop.writers);
1268 	eloop_sock_table_destroy(&eloop.exceptions);
1269 	os_free(eloop.signals);
1270 
1271 #ifdef CONFIG_ELOOP_POLL
1272 	os_free(eloop.pollfds);
1273 	os_free(eloop.pollfds_map);
1274 #endif /* CONFIG_ELOOP_POLL */
1275 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1276 	os_free(eloop.fd_table);
1277 #endif
1278 #ifdef CONFIG_ELOOP_EPOLL
1279 	os_free(eloop.epoll_events);
1280 	close(eloop.epollfd);
1281 #endif /* CONFIG_ELOOP_EPOLL */
1282 #ifdef CONFIG_ELOOP_KQUEUE
1283 	os_free(eloop.kqueue_events);
1284 	close(eloop.kqueuefd);
1285 #endif /* CONFIG_ELOOP_KQUEUE */
1286 }
1287 
1288 
1289 int eloop_terminated(void)
1290 {
1291 	return eloop.terminate || eloop.pending_terminate;
1292 }
1293 
1294 
1295 void eloop_wait_for_read_sock(int sock)
1296 {
1297 #ifdef CONFIG_ELOOP_POLL
1298 	struct pollfd pfd;
1299 
1300 	if (sock < 0)
1301 		return;
1302 
1303 	os_memset(&pfd, 0, sizeof(pfd));
1304 	pfd.fd = sock;
1305 	pfd.events = POLLIN;
1306 
1307 	poll(&pfd, 1, -1);
1308 #endif /* CONFIG_ELOOP_POLL */
1309 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1310 	/*
1311 	 * We can use epoll() here. But epoll() requres 4 system calls.
1312 	 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1313 	 * epoll fd. So select() is better for performance here.
1314 	 */
1315 	fd_set rfds;
1316 
1317 	if (sock < 0)
1318 		return;
1319 
1320 	FD_ZERO(&rfds);
1321 	FD_SET(sock, &rfds);
1322 	select(sock + 1, &rfds, NULL, NULL, NULL);
1323 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1324 #ifdef CONFIG_ELOOP_KQUEUE
1325 	int kfd;
1326 	struct kevent ke1, ke2;
1327 
1328 	kfd = kqueue();
1329 	if (kfd == -1)
1330 		return;
1331 	EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
1332 	kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1333 	close(kfd);
1334 #endif /* CONFIG_ELOOP_KQUEUE */
1335 }
1336 
1337 #ifdef CONFIG_ELOOP_SELECT
1338 #undef CONFIG_ELOOP_SELECT
1339 #endif /* CONFIG_ELOOP_SELECT */
1340