1 /* $NetBSD: events.c,v 1.2 2017/02/14 01:16:49 christos Exp $ */
2
3 /*++
4 /* NAME
5 /* events 3
6 /* SUMMARY
7 /* event manager
8 /* SYNOPSIS
9 /* #include <events.h>
10 /*
11 /* time_t event_time()
12 /*
13 /* void event_loop(delay)
14 /* int delay;
15 /*
16 /* time_t event_request_timer(callback, context, delay)
17 /* void (*callback)(int event, void *context);
18 /* void *context;
19 /* int delay;
20 /*
21 /* int event_cancel_timer(callback, context)
22 /* void (*callback)(int event, void *context);
23 /* void *context;
24 /*
25 /* void event_enable_read(fd, callback, context)
26 /* int fd;
27 /* void (*callback)(int event, void *context);
28 /* void *context;
29 /*
30 /* void event_enable_write(fd, callback, context)
31 /* int fd;
32 /* void (*callback)(int event, void *context);
33 /* void *context;
34 /*
35 /* void event_disable_readwrite(fd)
36 /* int fd;
37 /*
38 /* void event_drain(time_limit)
39 /* int time_limit;
40 /*
41 /* void event_fork(void)
42 /* DESCRIPTION
43 /* This module delivers I/O and timer events.
44 /* Multiple I/O streams and timers can be monitored simultaneously.
45 /* Events are delivered via callback routines provided by the
46 /* application. When requesting an event, the application can provide
47 /* private context that is passed back when the callback routine is
48 /* executed.
49 /*
50 /* event_time() returns a cached value of the current time.
51 /*
52 /* event_loop() monitors all I/O channels for which the application has
53 /* expressed interest, and monitors the timer request queue.
54 /* It notifies the application whenever events of interest happen.
55 /* A negative delay value causes the function to pause until something
56 /* happens; a positive delay value causes event_loop() to return when
57 /* the next event happens or when the delay time in seconds is over,
58 /* whatever happens first. A zero delay effectuates a poll.
59 /*
60 /* Note: in order to avoid race conditions, event_loop() cannot
61 /* not be called recursively.
62 /*
63 /* event_request_timer() causes the specified callback function to
64 /* be called with the specified context argument after \fIdelay\fR
65 /* seconds, or as soon as possible thereafter. The delay should
66 /* not be negative (the manifest EVENT_NULL_DELAY provides for
67 /* convenient zero-delay notification).
68 /* The event argument is equal to EVENT_TIME.
69 /* Only one timer request can be active per (callback, context) pair.
70 /* Calling event_request_timer() with an existing (callback, context)
71 /* pair does not schedule a new event, but updates the time of event
72 /* delivery. The result is the absolute time at which the timer is
73 /* scheduled to go off.
74 /*
75 /* event_cancel_timer() cancels the specified (callback, context) request.
76 /* The application is allowed to cancel non-existing requests. The result
77 /* value is the amount of time left before the timer would have gone off,
78 /* or -1 in case of no pending timer.
79 /*
80 /* event_enable_read() (event_enable_write()) enables read (write) events
81 /* on the named I/O channel. It is up to the application to assemble
82 /* partial reads or writes.
83 /* An I/O channel cannot handle more than one request at the
84 /* same time. The application is allowed to enable an event that
85 /* is already enabled (same channel, same read or write operation,
86 /* but perhaps a different callback or context). On systems with
87 /* kernel-based event filters this is preferred usage, because
88 /* each disable and enable request would cost a system call.
89 /*
90 /* The manifest constants EVENT_NULL_CONTEXT and EVENT_NULL_TYPE
91 /* provide convenient null values.
92 /*
93 /* The callback routine has the following arguments:
94 /* .IP fd
95 /* The stream on which the event happened.
96 /* .IP event
97 /* An indication of the event type:
98 /* .RS
99 /* .IP EVENT_READ
100 /* read event,
101 /* .IP EVENT_WRITE
102 /* write event,
103 /* .IP EVENT_XCPT
104 /* exception (actually, any event other than read or write).
105 /* .RE
106 /* .IP context
107 /* Application context given to event_enable_read() (event_enable_write()).
108 /* .PP
109 /* event_disable_readwrite() disables further I/O events on the specified
110 /* I/O channel. The application is allowed to cancel non-existing
111 /* I/O event requests.
112 /*
113 /* event_drain() repeatedly calls event_loop() until no more timer
114 /* events or I/O events are pending or until the time limit is reached.
115 /* This routine must not be called from an event_whatever() callback
116 /* routine. Note: this function assumes that no new I/O events
117 /* will be registered.
118 /*
119 /* event_fork() must be called by a child process after it is
120 /* created with fork(), to re-initialize event processing.
121 /* DIAGNOSTICS
122 /* Panics: interface violations. Fatal errors: out of memory,
123 /* system call failure. Warnings: the number of available
124 /* file descriptors is much less than FD_SETSIZE.
125 /* BUGS
126 /* This module is based on event selection. It assumes that the
127 /* event_loop() routine is called frequently. This approach is
128 /* not suitable for applications with compute-bound loops that
129 /* take a significant amount of time.
130 /* LICENSE
131 /* .ad
132 /* .fi
133 /* The Secure Mailer license must be distributed with this software.
134 /* AUTHOR(S)
135 /* Wietse Venema
136 /* IBM T.J. Watson Research
137 /* P.O. Box 704
138 /* Yorktown Heights, NY 10598, USA
139 /*--*/
140
141 /* System libraries. */
142
143 #include "sys_defs.h"
144 #include <sys/time.h> /* XXX: 44BSD uses bzero() */
145 #include <time.h>
146 #include <errno.h>
147 #include <unistd.h>
148 #include <stddef.h> /* offsetof() */
149 #include <string.h> /* bzero() prototype for 44BSD */
150 #include <limits.h> /* INT_MAX */
151
152 #ifdef USE_SYS_SELECT_H
153 #include <sys/select.h>
154 #endif
155
156 /* Application-specific. */
157
158 #include "mymalloc.h"
159 #include "msg.h"
160 #include "iostuff.h"
161 #include "ring.h"
162 #include "events.h"
163
164 #if !defined(EVENTS_STYLE)
165 #error "must define EVENTS_STYLE"
166 #endif
167
168 /*
169 * Traditional BSD-style select(2). Works everywhere, but has a built-in
170 * upper bound on the number of file descriptors, and that limit is hard to
171 * change on Linux. Is sometimes emulated with SYSV-style poll(2) which
172 * doesn't have the file descriptor limit, but unfortunately does not help
173 * to improve the performance of servers with lots of connections.
174 */
175 #define EVENT_ALLOC_INCR 10
176
177 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
178 typedef fd_set EVENT_MASK;
179
180 #define EVENT_MASK_BYTE_COUNT(mask) sizeof(*(mask))
181 #define EVENT_MASK_ZERO(mask) FD_ZERO(mask)
182 #define EVENT_MASK_SET(fd, mask) FD_SET((fd), (mask))
183 #define EVENT_MASK_ISSET(fd, mask) FD_ISSET((fd), (mask))
184 #define EVENT_MASK_CLR(fd, mask) FD_CLR((fd), (mask))
185 #define EVENT_MASK_CMP(m1, m2) memcmp((m1), (m2), EVENT_MASK_BYTE_COUNT(m1))
186 #else
187
188 /*
189 * Kernel-based event filters (kqueue, /dev/poll, epoll). We use the
190 * following file descriptor mask structure which is expanded on the fly.
191 */
192 typedef struct {
193 char *data; /* bit mask */
194 size_t data_len; /* data byte count */
195 } EVENT_MASK;
196
197 /* Bits per byte, byte in vector, bit offset in byte, bytes per set. */
198 #define EVENT_MASK_NBBY (8)
199 #define EVENT_MASK_FD_BYTE(fd, mask) \
200 (((unsigned char *) (mask)->data)[(fd) / EVENT_MASK_NBBY])
201 #define EVENT_MASK_FD_BIT(fd) (1 << ((fd) % EVENT_MASK_NBBY))
202 #define EVENT_MASK_BYTES_NEEDED(len) \
203 (((len) + (EVENT_MASK_NBBY -1)) / EVENT_MASK_NBBY)
204 #define EVENT_MASK_BYTE_COUNT(mask) ((mask)->data_len)
205
206 /* Memory management. */
207 #define EVENT_MASK_ALLOC(mask, bit_len) do { \
208 size_t _byte_len = EVENT_MASK_BYTES_NEEDED(bit_len); \
209 (mask)->data = mymalloc(_byte_len); \
210 memset((mask)->data, 0, _byte_len); \
211 (mask)->data_len = _byte_len; \
212 } while (0)
213 #define EVENT_MASK_REALLOC(mask, bit_len) do { \
214 size_t _byte_len = EVENT_MASK_BYTES_NEEDED(bit_len); \
215 size_t _old_len = (mask)->data_len; \
216 (mask)->data = myrealloc((mask)->data, _byte_len); \
217 if (_byte_len > _old_len) \
218 memset((mask)->data + _old_len, 0, _byte_len - _old_len); \
219 (mask)->data_len = _byte_len; \
220 } while (0)
221 #define EVENT_MASK_FREE(mask) myfree((mask)->data)
222
223 /* Set operations, modeled after FD_ZERO/SET/ISSET/CLR. */
224 #define EVENT_MASK_ZERO(mask) \
225 memset((mask)->data, 0, (mask)->data_len)
226 #define EVENT_MASK_SET(fd, mask) \
227 (EVENT_MASK_FD_BYTE((fd), (mask)) |= EVENT_MASK_FD_BIT(fd))
228 #define EVENT_MASK_ISSET(fd, mask) \
229 (EVENT_MASK_FD_BYTE((fd), (mask)) & EVENT_MASK_FD_BIT(fd))
230 #define EVENT_MASK_CLR(fd, mask) \
231 (EVENT_MASK_FD_BYTE((fd), (mask)) &= ~EVENT_MASK_FD_BIT(fd))
232 #define EVENT_MASK_CMP(m1, m2) \
233 memcmp((m1)->data, (m2)->data, EVENT_MASK_BYTE_COUNT(m1))
234 #endif
235
236 /*
237 * I/O events.
238 */
239 typedef struct EVENT_FDTABLE EVENT_FDTABLE;
240
241 struct EVENT_FDTABLE {
242 EVENT_NOTIFY_RDWR_FN callback;
243 char *context;
244 };
245 static EVENT_MASK event_rmask; /* enabled read events */
246 static EVENT_MASK event_wmask; /* enabled write events */
247 static EVENT_MASK event_xmask; /* for bad news mostly */
248 static int event_fdlimit; /* per-process open file limit */
249 static EVENT_FDTABLE *event_fdtable; /* one slot per file descriptor */
250 static int event_fdslots; /* number of file descriptor slots */
251 static int event_max_fd = -1; /* highest fd number seen */
252
253 /*
254 * FreeBSD kqueue supports no system call to find out what descriptors are
255 * registered in the kernel-based filter. To implement our own sanity checks
256 * we maintain our own descriptor bitmask.
257 *
258 * FreeBSD kqueue does support application context pointers. Unfortunately,
259 * changing that information would cost a system call, and some of the
260 * competitors don't support application context. To keep the implementation
261 * simple we maintain our own table with call-back information.
262 *
263 * FreeBSD kqueue silently unregisters a descriptor from its filter when the
264 * descriptor is closed, so our information could get out of sync with the
265 * kernel. But that will never happen, because we have to meticulously
266 * unregister a file descriptor before it is closed, to avoid errors on
267 * systems that are built with EVENTS_STYLE == EVENTS_STYLE_SELECT.
268 */
269 #if (EVENTS_STYLE == EVENTS_STYLE_KQUEUE)
270 #include <sys/event.h>
271
272 /*
273 * Some early FreeBSD implementations don't have the EV_SET macro.
274 */
275 #ifndef EV_SET
276 #define EV_SET(kp, id, fi, fl, ffl, da, ud) do { \
277 (kp)->ident = (id); \
278 (kp)->filter = (fi); \
279 (kp)->flags = (fl); \
280 (kp)->fflags = (ffl); \
281 (kp)->data = (da); \
282 (kp)->udata = (ud); \
283 } while(0)
284 #endif
285
286 /*
287 * Macros to initialize the kernel-based filter; see event_init().
288 */
289 static int event_kq; /* handle to event filter */
290
291 #define EVENT_REG_INIT_HANDLE(er, n) do { \
292 er = event_kq = kqueue(); \
293 } while (0)
294 #define EVENT_REG_INIT_TEXT "kqueue"
295
296 #define EVENT_REG_FORK_HANDLE(er, n) do { \
297 (void) close(event_kq); \
298 EVENT_REG_INIT_HANDLE(er, (n)); \
299 } while (0)
300
301 /*
302 * Macros to update the kernel-based filter; see event_enable_read(),
303 * event_enable_write() and event_disable_readwrite().
304 */
305 #define EVENT_REG_FD_OP(er, fh, ev, op) do { \
306 struct kevent dummy; \
307 EV_SET(&dummy, (fh), (ev), (op), 0, 0, 0); \
308 (er) = kevent(event_kq, &dummy, 1, 0, 0, 0); \
309 } while (0)
310
311 #define EVENT_REG_ADD_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EV_ADD)
312 #define EVENT_REG_ADD_READ(e, f) EVENT_REG_ADD_OP((e), (f), EVFILT_READ)
313 #define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_ADD_OP((e), (f), EVFILT_WRITE)
314 #define EVENT_REG_ADD_TEXT "kevent EV_ADD"
315
316 #define EVENT_REG_DEL_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EV_DELETE)
317 #define EVENT_REG_DEL_READ(e, f) EVENT_REG_DEL_OP((e), (f), EVFILT_READ)
318 #define EVENT_REG_DEL_WRITE(e, f) EVENT_REG_DEL_OP((e), (f), EVFILT_WRITE)
319 #define EVENT_REG_DEL_TEXT "kevent EV_DELETE"
320
321 /*
322 * Macros to retrieve event buffers from the kernel; see event_loop().
323 */
324 typedef struct kevent EVENT_BUFFER;
325
326 #define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \
327 struct timespec ts; \
328 struct timespec *tsp; \
329 if ((delay) < 0) { \
330 tsp = 0; \
331 } else { \
332 tsp = &ts; \
333 ts.tv_nsec = 0; \
334 ts.tv_sec = (delay); \
335 } \
336 (event_count) = kevent(event_kq, (struct kevent *) 0, 0, (event_buf), \
337 (buflen), (tsp)); \
338 } while (0)
339 #define EVENT_BUFFER_READ_TEXT "kevent"
340
341 /*
342 * Macros to process event buffers from the kernel; see event_loop().
343 */
344 #define EVENT_GET_FD(bp) ((bp)->ident)
345 #define EVENT_GET_TYPE(bp) ((bp)->filter)
346 #define EVENT_TEST_READ(bp) (EVENT_GET_TYPE(bp) == EVFILT_READ)
347 #define EVENT_TEST_WRITE(bp) (EVENT_GET_TYPE(bp) == EVFILT_WRITE)
348
349 #endif
350
351 /*
352 * Solaris /dev/poll does not support application context, so we have to
353 * maintain our own. This has the benefit of avoiding an expensive system
354 * call just to change a call-back function or argument.
355 *
356 * Solaris /dev/poll does have a way to query if a specific descriptor is
357 * registered. However, we maintain a descriptor mask anyway because a) it
358 * avoids having to make an expensive system call to find out if something
359 * is registered, b) some EVENTS_STYLE_MUMBLE implementations need a
360 * descriptor bitmask anyway and c) we use the bitmask already to implement
361 * sanity checks.
362 */
363 #if (EVENTS_STYLE == EVENTS_STYLE_DEVPOLL)
364 #include <sys/devpoll.h>
365 #include <fcntl.h>
366
367 /*
368 * Macros to initialize the kernel-based filter; see event_init().
369 */
370 static int event_pollfd; /* handle to file descriptor set */
371
372 #define EVENT_REG_INIT_HANDLE(er, n) do { \
373 er = event_pollfd = open("/dev/poll", O_RDWR); \
374 if (event_pollfd >= 0) close_on_exec(event_pollfd, CLOSE_ON_EXEC); \
375 } while (0)
376 #define EVENT_REG_INIT_TEXT "open /dev/poll"
377
378 #define EVENT_REG_FORK_HANDLE(er, n) do { \
379 (void) close(event_pollfd); \
380 EVENT_REG_INIT_HANDLE(er, (n)); \
381 } while (0)
382
383 /*
384 * Macros to update the kernel-based filter; see event_enable_read(),
385 * event_enable_write() and event_disable_readwrite().
386 */
387 #define EVENT_REG_FD_OP(er, fh, ev) do { \
388 struct pollfd dummy; \
389 dummy.fd = (fh); \
390 dummy.events = (ev); \
391 (er) = write(event_pollfd, (void *) &dummy, \
392 sizeof(dummy)) != sizeof(dummy) ? -1 : 0; \
393 } while (0)
394
395 #define EVENT_REG_ADD_READ(e, f) EVENT_REG_FD_OP((e), (f), POLLIN)
396 #define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_FD_OP((e), (f), POLLOUT)
397 #define EVENT_REG_ADD_TEXT "write /dev/poll"
398
399 #define EVENT_REG_DEL_BOTH(e, f) EVENT_REG_FD_OP((e), (f), POLLREMOVE)
400 #define EVENT_REG_DEL_TEXT "write /dev/poll"
401
402 /*
403 * Macros to retrieve event buffers from the kernel; see event_loop().
404 */
405 typedef struct pollfd EVENT_BUFFER;
406
407 #define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \
408 struct dvpoll dvpoll; \
409 dvpoll.dp_fds = (event_buf); \
410 dvpoll.dp_nfds = (buflen); \
411 dvpoll.dp_timeout = (delay) < 0 ? -1 : (delay) * 1000; \
412 (event_count) = ioctl(event_pollfd, DP_POLL, &dvpoll); \
413 } while (0)
414 #define EVENT_BUFFER_READ_TEXT "ioctl DP_POLL"
415
416 /*
417 * Macros to process event buffers from the kernel; see event_loop().
418 */
419 #define EVENT_GET_FD(bp) ((bp)->fd)
420 #define EVENT_GET_TYPE(bp) ((bp)->revents)
421 #define EVENT_TEST_READ(bp) (EVENT_GET_TYPE(bp) & POLLIN)
422 #define EVENT_TEST_WRITE(bp) (EVENT_GET_TYPE(bp) & POLLOUT)
423
424 #endif
425
426 /*
427 * Linux epoll supports no system call to find out what descriptors are
428 * registered in the kernel-based filter. To implement our own sanity checks
429 * we maintain our own descriptor bitmask.
430 *
431 * Linux epoll does support application context pointers. Unfortunately,
432 * changing that information would cost a system call, and some of the
433 * competitors don't support application context. To keep the implementation
434 * simple we maintain our own table with call-back information.
435 *
436 * Linux epoll silently unregisters a descriptor from its filter when the
437 * descriptor is closed, so our information could get out of sync with the
438 * kernel. But that will never happen, because we have to meticulously
439 * unregister a file descriptor before it is closed, to avoid errors on
440 * systems that are built with EVENTS_STYLE == EVENTS_STYLE_SELECT.
441 */
442 #if (EVENTS_STYLE == EVENTS_STYLE_EPOLL)
443 #include <sys/epoll.h>
444
445 /*
446 * Macros to initialize the kernel-based filter; see event_init().
447 */
448 static int event_epollfd; /* epoll handle */
449
450 #define EVENT_REG_INIT_HANDLE(er, n) do { \
451 er = event_epollfd = epoll_create(n); \
452 if (event_epollfd >= 0) close_on_exec(event_epollfd, CLOSE_ON_EXEC); \
453 } while (0)
454 #define EVENT_REG_INIT_TEXT "epoll_create"
455
456 #define EVENT_REG_FORK_HANDLE(er, n) do { \
457 (void) close(event_epollfd); \
458 EVENT_REG_INIT_HANDLE(er, (n)); \
459 } while (0)
460
461 /*
462 * Macros to update the kernel-based filter; see event_enable_read(),
463 * event_enable_write() and event_disable_readwrite().
464 */
465 #define EVENT_REG_FD_OP(er, fh, ev, op) do { \
466 struct epoll_event dummy; \
467 dummy.events = (ev); \
468 dummy.data.fd = (fh); \
469 (er) = epoll_ctl(event_epollfd, (op), (fh), &dummy); \
470 } while (0)
471
472 #define EVENT_REG_ADD_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EPOLL_CTL_ADD)
473 #define EVENT_REG_ADD_READ(e, f) EVENT_REG_ADD_OP((e), (f), EPOLLIN)
474 #define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_ADD_OP((e), (f), EPOLLOUT)
475 #define EVENT_REG_ADD_TEXT "epoll_ctl EPOLL_CTL_ADD"
476
477 #define EVENT_REG_DEL_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EPOLL_CTL_DEL)
478 #define EVENT_REG_DEL_READ(e, f) EVENT_REG_DEL_OP((e), (f), EPOLLIN)
479 #define EVENT_REG_DEL_WRITE(e, f) EVENT_REG_DEL_OP((e), (f), EPOLLOUT)
480 #define EVENT_REG_DEL_TEXT "epoll_ctl EPOLL_CTL_DEL"
481
482 /*
483 * Macros to retrieve event buffers from the kernel; see event_loop().
484 */
485 typedef struct epoll_event EVENT_BUFFER;
486
487 #define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \
488 (event_count) = epoll_wait(event_epollfd, (event_buf), (buflen), \
489 (delay) < 0 ? -1 : (delay) * 1000); \
490 } while (0)
491 #define EVENT_BUFFER_READ_TEXT "epoll_wait"
492
493 /*
494 * Macros to process event buffers from the kernel; see event_loop().
495 */
496 #define EVENT_GET_FD(bp) ((bp)->data.fd)
497 #define EVENT_GET_TYPE(bp) ((bp)->events)
498 #define EVENT_TEST_READ(bp) (EVENT_GET_TYPE(bp) & EPOLLIN)
499 #define EVENT_TEST_WRITE(bp) (EVENT_GET_TYPE(bp) & EPOLLOUT)
500
501 #endif
502
503 /*
504 * Timer events. Timer requests are kept sorted, in a circular list. We use
505 * the RING abstraction, so we get to use a couple ugly macros.
506 *
507 * When a call-back function adds a timer request, we label the request with
508 * the event_loop() call instance that invoked the call-back. We use this to
509 * prevent zero-delay timer requests from running in a tight loop and
510 * starving I/O events.
511 */
512 typedef struct EVENT_TIMER EVENT_TIMER;
513
514 struct EVENT_TIMER {
515 time_t when; /* when event is wanted */
516 EVENT_NOTIFY_TIME_FN callback; /* callback function */
517 char *context; /* callback context */
518 long loop_instance; /* event_loop() call instance */
519 RING ring; /* linkage */
520 };
521
522 static RING event_timer_head; /* timer queue head */
523 static long event_loop_instance; /* event_loop() call instance */
524
525 #define RING_TO_TIMER(r) \
526 ((EVENT_TIMER *) ((void *) (r) - offsetof(EVENT_TIMER, ring)))
527
528 #define FOREACH_QUEUE_ENTRY(entry, head) \
529 for (entry = ring_succ(head); entry != (head); entry = ring_succ(entry))
530
531 #define FIRST_TIMER(head) \
532 (ring_succ(head) != (head) ? RING_TO_TIMER(ring_succ(head)) : 0)
533
534 /*
535 * Other private data structures.
536 */
537 static time_t event_present; /* cached time of day */
538
539 #define EVENT_INIT_NEEDED() (event_present == 0)
540
541 /* event_init - set up tables and such */
542
event_init(void)543 static void event_init(void)
544 {
545 EVENT_FDTABLE *fdp;
546 int err;
547
548 if (!EVENT_INIT_NEEDED())
549 msg_panic("event_init: repeated call");
550
551 /*
552 * Initialize the file descriptor masks and the call-back table. Where
553 * possible we extend these data structures on the fly. With select(2)
554 * based implementations we can only handle FD_SETSIZE open files.
555 */
556 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
557 if ((event_fdlimit = open_limit(FD_SETSIZE)) < 0)
558 msg_fatal("unable to determine open file limit");
559 #else
560 if ((event_fdlimit = open_limit(INT_MAX)) < 0)
561 msg_fatal("unable to determine open file limit");
562 #endif
563 if (event_fdlimit < FD_SETSIZE / 2 && event_fdlimit < 256)
564 msg_warn("could allocate space for only %d open files", event_fdlimit);
565 event_fdslots = EVENT_ALLOC_INCR;
566 event_fdtable = (EVENT_FDTABLE *)
567 mymalloc(sizeof(EVENT_FDTABLE) * event_fdslots);
568 for (fdp = event_fdtable; fdp < event_fdtable + event_fdslots; fdp++) {
569 fdp->callback = 0;
570 fdp->context = 0;
571 }
572
573 /*
574 * Initialize the I/O event request masks.
575 */
576 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
577 EVENT_MASK_ZERO(&event_rmask);
578 EVENT_MASK_ZERO(&event_wmask);
579 EVENT_MASK_ZERO(&event_xmask);
580 #else
581 EVENT_MASK_ALLOC(&event_rmask, event_fdslots);
582 EVENT_MASK_ALLOC(&event_wmask, event_fdslots);
583 EVENT_MASK_ALLOC(&event_xmask, event_fdslots);
584
585 /*
586 * Initialize the kernel-based filter.
587 */
588 EVENT_REG_INIT_HANDLE(err, event_fdslots);
589 if (err < 0)
590 msg_fatal("%s: %m", EVENT_REG_INIT_TEXT);
591 #endif
592
593 /*
594 * Initialize timer stuff.
595 */
596 ring_init(&event_timer_head);
597 (void) time(&event_present);
598
599 /*
600 * Avoid an infinite initialization loop.
601 */
602 if (EVENT_INIT_NEEDED())
603 msg_panic("event_init: unable to initialize");
604 }
605
606 /* event_extend - make room for more descriptor slots */
607
event_extend(int fd)608 static void event_extend(int fd)
609 {
610 const char *myname = "event_extend";
611 int old_slots = event_fdslots;
612 int new_slots = (event_fdslots > fd / 2 ?
613 2 * old_slots : fd + EVENT_ALLOC_INCR);
614 EVENT_FDTABLE *fdp;
615
616 #ifdef EVENT_REG_UPD_HANDLE
617 int err;
618
619 #endif
620
621 if (msg_verbose > 2)
622 msg_info("%s: fd %d", myname, fd);
623 event_fdtable = (EVENT_FDTABLE *)
624 myrealloc((void *) event_fdtable, sizeof(EVENT_FDTABLE) * new_slots);
625 event_fdslots = new_slots;
626 for (fdp = event_fdtable + old_slots;
627 fdp < event_fdtable + new_slots; fdp++) {
628 fdp->callback = 0;
629 fdp->context = 0;
630 }
631
632 /*
633 * Initialize the I/O event request masks.
634 */
635 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
636 EVENT_MASK_REALLOC(&event_rmask, new_slots);
637 EVENT_MASK_REALLOC(&event_wmask, new_slots);
638 EVENT_MASK_REALLOC(&event_xmask, new_slots);
639 #endif
640 #ifdef EVENT_REG_UPD_HANDLE
641 EVENT_REG_UPD_HANDLE(err, new_slots);
642 if (err < 0)
643 msg_fatal("%s: %s: %m", myname, EVENT_REG_UPD_TEXT);
644 #endif
645 }
646
647 /* event_time - look up cached time of day */
648
event_time(void)649 time_t event_time(void)
650 {
651 if (EVENT_INIT_NEEDED())
652 event_init();
653
654 return (event_present);
655 }
656
657 /* event_drain - loop until all pending events are done */
658
event_drain(int time_limit)659 void event_drain(int time_limit)
660 {
661 EVENT_MASK zero_mask;
662 time_t max_time;
663
664 if (EVENT_INIT_NEEDED())
665 return;
666
667 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
668 EVENT_MASK_ZERO(&zero_mask);
669 #else
670 EVENT_MASK_ALLOC(&zero_mask, event_fdslots);
671 #endif
672 (void) time(&event_present);
673 max_time = event_present + time_limit;
674 while (event_present < max_time
675 && (event_timer_head.pred != &event_timer_head
676 || EVENT_MASK_CMP(&zero_mask, &event_xmask) != 0)) {
677 event_loop(1);
678 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
679 if (EVENT_MASK_BYTE_COUNT(&zero_mask)
680 != EVENT_MASK_BYTES_NEEDED(event_fdslots))
681 EVENT_MASK_REALLOC(&zero_mask, event_fdslots);
682 #endif
683 }
684 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
685 EVENT_MASK_FREE(&zero_mask);
686 #endif
687 }
688
689 /* event_fork - resume event processing after fork() */
690
event_fork(void)691 void event_fork(void)
692 {
693 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
694 EVENT_FDTABLE *fdp;
695 int err;
696 int fd;
697
698 /*
699 * No event was ever registered, so there's nothing to be done.
700 */
701 if (EVENT_INIT_NEEDED())
702 return;
703
704 /*
705 * Close the existing filter handle and open a new kernel-based filter.
706 */
707 EVENT_REG_FORK_HANDLE(err, event_fdslots);
708 if (err < 0)
709 msg_fatal("%s: %m", EVENT_REG_INIT_TEXT);
710
711 /*
712 * Populate the new kernel-based filter with events that were registered
713 * in the parent process.
714 */
715 for (fd = 0; fd <= event_max_fd; fd++) {
716 if (EVENT_MASK_ISSET(fd, &event_wmask)) {
717 EVENT_MASK_CLR(fd, &event_wmask);
718 fdp = event_fdtable + fd;
719 event_enable_write(fd, fdp->callback, fdp->context);
720 } else if (EVENT_MASK_ISSET(fd, &event_rmask)) {
721 EVENT_MASK_CLR(fd, &event_rmask);
722 fdp = event_fdtable + fd;
723 event_enable_read(fd, fdp->callback, fdp->context);
724 }
725 }
726 #endif
727 }
728
729 /* event_enable_read - enable read events */
730
event_enable_read(int fd,EVENT_NOTIFY_RDWR_FN callback,void * context)731 void event_enable_read(int fd, EVENT_NOTIFY_RDWR_FN callback, void *context)
732 {
733 const char *myname = "event_enable_read";
734 EVENT_FDTABLE *fdp;
735 int err;
736
737 if (EVENT_INIT_NEEDED())
738 event_init();
739
740 /*
741 * Sanity checks.
742 */
743 if (fd < 0 || fd >= event_fdlimit)
744 msg_panic("%s: bad file descriptor: %d", myname, fd);
745
746 if (msg_verbose > 2)
747 msg_info("%s: fd %d", myname, fd);
748
749 if (fd >= event_fdslots)
750 event_extend(fd);
751
752 /*
753 * Disallow mixed (i.e. read and write) requests on the same descriptor.
754 */
755 if (EVENT_MASK_ISSET(fd, &event_wmask))
756 msg_panic("%s: fd %d: read/write I/O request", myname, fd);
757
758 /*
759 * Postfix 2.4 allows multiple event_enable_read() calls on the same
760 * descriptor without requiring event_disable_readwrite() calls between
761 * them. With kernel-based filters (kqueue, /dev/poll, epoll) it's
762 * wasteful to make system calls when we change only application
763 * call-back information. It has a noticeable effect on smtp-source
764 * performance.
765 */
766 if (EVENT_MASK_ISSET(fd, &event_rmask) == 0) {
767 EVENT_MASK_SET(fd, &event_xmask);
768 EVENT_MASK_SET(fd, &event_rmask);
769 if (event_max_fd < fd)
770 event_max_fd = fd;
771 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
772 EVENT_REG_ADD_READ(err, fd);
773 if (err < 0)
774 msg_fatal("%s: %s: %m", myname, EVENT_REG_ADD_TEXT);
775 #endif
776 }
777 fdp = event_fdtable + fd;
778 if (fdp->callback != callback || fdp->context != context) {
779 fdp->callback = callback;
780 fdp->context = context;
781 }
782 }
783
784 /* event_enable_write - enable write events */
785
event_enable_write(int fd,EVENT_NOTIFY_RDWR_FN callback,void * context)786 void event_enable_write(int fd, EVENT_NOTIFY_RDWR_FN callback, void *context)
787 {
788 const char *myname = "event_enable_write";
789 EVENT_FDTABLE *fdp;
790 int err;
791
792 if (EVENT_INIT_NEEDED())
793 event_init();
794
795 /*
796 * Sanity checks.
797 */
798 if (fd < 0 || fd >= event_fdlimit)
799 msg_panic("%s: bad file descriptor: %d", myname, fd);
800
801 if (msg_verbose > 2)
802 msg_info("%s: fd %d", myname, fd);
803
804 if (fd >= event_fdslots)
805 event_extend(fd);
806
807 /*
808 * Disallow mixed (i.e. read and write) requests on the same descriptor.
809 */
810 if (EVENT_MASK_ISSET(fd, &event_rmask))
811 msg_panic("%s: fd %d: read/write I/O request", myname, fd);
812
813 /*
814 * Postfix 2.4 allows multiple event_enable_write() calls on the same
815 * descriptor without requiring event_disable_readwrite() calls between
816 * them. With kernel-based filters (kqueue, /dev/poll, epoll) it's
817 * incredibly wasteful to make unregister and register system calls when
818 * we change only application call-back information. It has a noticeable
819 * effect on smtp-source performance.
820 */
821 if (EVENT_MASK_ISSET(fd, &event_wmask) == 0) {
822 EVENT_MASK_SET(fd, &event_xmask);
823 EVENT_MASK_SET(fd, &event_wmask);
824 if (event_max_fd < fd)
825 event_max_fd = fd;
826 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
827 EVENT_REG_ADD_WRITE(err, fd);
828 if (err < 0)
829 msg_fatal("%s: %s: %m", myname, EVENT_REG_ADD_TEXT);
830 #endif
831 }
832 fdp = event_fdtable + fd;
833 if (fdp->callback != callback || fdp->context != context) {
834 fdp->callback = callback;
835 fdp->context = context;
836 }
837 }
838
839 /* event_disable_readwrite - disable request for read or write events */
840
event_disable_readwrite(int fd)841 void event_disable_readwrite(int fd)
842 {
843 const char *myname = "event_disable_readwrite";
844 EVENT_FDTABLE *fdp;
845 int err;
846
847 if (EVENT_INIT_NEEDED())
848 event_init();
849
850 /*
851 * Sanity checks.
852 */
853 if (fd < 0 || fd >= event_fdlimit)
854 msg_panic("%s: bad file descriptor: %d", myname, fd);
855
856 if (msg_verbose > 2)
857 msg_info("%s: fd %d", myname, fd);
858
859 /*
860 * Don't complain when there is nothing to cancel. The request may have
861 * been canceled from another thread.
862 */
863 if (fd >= event_fdslots)
864 return;
865 #if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
866 #ifdef EVENT_REG_DEL_BOTH
867 /* XXX Can't seem to disable READ and WRITE events selectively. */
868 if (EVENT_MASK_ISSET(fd, &event_rmask)
869 || EVENT_MASK_ISSET(fd, &event_wmask)) {
870 EVENT_REG_DEL_BOTH(err, fd);
871 if (err < 0)
872 msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT);
873 }
874 #else
875 if (EVENT_MASK_ISSET(fd, &event_rmask)) {
876 EVENT_REG_DEL_READ(err, fd);
877 if (err < 0)
878 msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT);
879 } else if (EVENT_MASK_ISSET(fd, &event_wmask)) {
880 EVENT_REG_DEL_WRITE(err, fd);
881 if (err < 0)
882 msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT);
883 }
884 #endif /* EVENT_REG_DEL_BOTH */
885 #endif /* != EVENTS_STYLE_SELECT */
886 EVENT_MASK_CLR(fd, &event_xmask);
887 EVENT_MASK_CLR(fd, &event_rmask);
888 EVENT_MASK_CLR(fd, &event_wmask);
889 fdp = event_fdtable + fd;
890 fdp->callback = 0;
891 fdp->context = 0;
892 }
893
894 /* event_request_timer - (re)set timer */
895
event_request_timer(EVENT_NOTIFY_TIME_FN callback,void * context,int delay)896 time_t event_request_timer(EVENT_NOTIFY_TIME_FN callback, void *context, int delay)
897 {
898 const char *myname = "event_request_timer";
899 RING *ring;
900 EVENT_TIMER *timer;
901
902 if (EVENT_INIT_NEEDED())
903 event_init();
904
905 /*
906 * Sanity checks.
907 */
908 if (delay < 0)
909 msg_panic("%s: invalid delay: %d", myname, delay);
910
911 /*
912 * Make sure we schedule this event at the right time.
913 */
914 time(&event_present);
915
916 /*
917 * See if they are resetting an existing timer request. If so, take the
918 * request away from the timer queue so that it can be inserted at the
919 * right place.
920 */
921 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) {
922 timer = RING_TO_TIMER(ring);
923 if (timer->callback == callback && timer->context == context) {
924 timer->when = event_present + delay;
925 timer->loop_instance = event_loop_instance;
926 ring_detach(ring);
927 if (msg_verbose > 2)
928 msg_info("%s: reset 0x%lx 0x%lx %d", myname,
929 (long) callback, (long) context, delay);
930 break;
931 }
932 }
933
934 /*
935 * If not found, schedule a new timer request.
936 */
937 if (ring == &event_timer_head) {
938 timer = (EVENT_TIMER *) mymalloc(sizeof(EVENT_TIMER));
939 timer->when = event_present + delay;
940 timer->callback = callback;
941 timer->context = context;
942 timer->loop_instance = event_loop_instance;
943 if (msg_verbose > 2)
944 msg_info("%s: set 0x%lx 0x%lx %d", myname,
945 (long) callback, (long) context, delay);
946 }
947
948 /*
949 * Timer requests are kept sorted to reduce lookup overhead in the event
950 * loop.
951 *
952 * XXX Append the new request after existing requests for the same time
953 * slot. The event_loop() routine depends on this to avoid starving I/O
954 * events when a call-back function schedules a zero-delay timer request.
955 */
956 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) {
957 if (timer->when < RING_TO_TIMER(ring)->when)
958 break;
959 }
960 ring_prepend(ring, &timer->ring);
961
962 return (timer->when);
963 }
964
965 /* event_cancel_timer - cancel timer */
966
event_cancel_timer(EVENT_NOTIFY_TIME_FN callback,void * context)967 int event_cancel_timer(EVENT_NOTIFY_TIME_FN callback, void *context)
968 {
969 const char *myname = "event_cancel_timer";
970 RING *ring;
971 EVENT_TIMER *timer;
972 int time_left = -1;
973
974 if (EVENT_INIT_NEEDED())
975 event_init();
976
977 /*
978 * See if they are canceling an existing timer request. Do not complain
979 * when the request is not found. It might have been canceled from some
980 * other thread.
981 */
982 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) {
983 timer = RING_TO_TIMER(ring);
984 if (timer->callback == callback && timer->context == context) {
985 if ((time_left = timer->when - event_present) < 0)
986 time_left = 0;
987 ring_detach(ring);
988 myfree((void *) timer);
989 break;
990 }
991 }
992 if (msg_verbose > 2)
993 msg_info("%s: 0x%lx 0x%lx %d", myname,
994 (long) callback, (long) context, time_left);
995 return (time_left);
996 }
997
998 /* event_loop - wait for the next event */
999
event_loop(int delay)1000 void event_loop(int delay)
1001 {
1002 const char *myname = "event_loop";
1003 static int nested;
1004
1005 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
1006 fd_set rmask;
1007 fd_set wmask;
1008 fd_set xmask;
1009 struct timeval tv;
1010 struct timeval *tvp;
1011 int new_max_fd;
1012
1013 #else
1014 EVENT_BUFFER event_buf[100];
1015 EVENT_BUFFER *bp;
1016
1017 #endif
1018 int event_count;
1019 EVENT_TIMER *timer;
1020 int fd;
1021 EVENT_FDTABLE *fdp;
1022 int select_delay;
1023
1024 if (EVENT_INIT_NEEDED())
1025 event_init();
1026
1027 /*
1028 * XXX Also print the select() masks?
1029 */
1030 if (msg_verbose > 2) {
1031 RING *ring;
1032
1033 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) {
1034 timer = RING_TO_TIMER(ring);
1035 msg_info("%s: time left %3d for 0x%lx 0x%lx", myname,
1036 (int) (timer->when - event_present),
1037 (long) timer->callback, (long) timer->context);
1038 }
1039 }
1040
1041 /*
1042 * Find out when the next timer would go off. Timer requests are sorted.
1043 * If any timer is scheduled, adjust the delay appropriately.
1044 */
1045 if ((timer = FIRST_TIMER(&event_timer_head)) != 0) {
1046 event_present = time((time_t *) 0);
1047 if ((select_delay = timer->when - event_present) < 0) {
1048 select_delay = 0;
1049 } else if (delay >= 0 && select_delay > delay) {
1050 select_delay = delay;
1051 }
1052 } else {
1053 select_delay = delay;
1054 }
1055 if (msg_verbose > 2)
1056 msg_info("event_loop: select_delay %d", select_delay);
1057
1058 /*
1059 * Negative delay means: wait until something happens. Zero delay means:
1060 * poll. Positive delay means: wait at most this long.
1061 */
1062 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
1063 if (select_delay < 0) {
1064 tvp = 0;
1065 } else {
1066 tvp = &tv;
1067 tv.tv_usec = 0;
1068 tv.tv_sec = select_delay;
1069 }
1070
1071 /*
1072 * Pause until the next event happens. When select() has a problem, don't
1073 * go into a tight loop. Allow select() to be interrupted due to the
1074 * arrival of a signal.
1075 */
1076 rmask = event_rmask;
1077 wmask = event_wmask;
1078 xmask = event_xmask;
1079
1080 event_count = select(event_max_fd + 1, &rmask, &wmask, &xmask, tvp);
1081 if (event_count < 0) {
1082 if (errno != EINTR)
1083 msg_fatal("event_loop: select: %m");
1084 return;
1085 }
1086 #else
1087 EVENT_BUFFER_READ(event_count, event_buf,
1088 sizeof(event_buf) / sizeof(event_buf[0]),
1089 select_delay);
1090 if (event_count < 0) {
1091 if (errno != EINTR)
1092 msg_fatal("event_loop: " EVENT_BUFFER_READ_TEXT ": %m");
1093 return;
1094 }
1095 #endif
1096
1097 /*
1098 * Before entering the application call-back routines, make sure we
1099 * aren't being called from a call-back routine. Doing so would make us
1100 * vulnerable to all kinds of race conditions.
1101 */
1102 if (nested++ > 0)
1103 msg_panic("event_loop: recursive call");
1104
1105 /*
1106 * Deliver timer events. Allow the application to add/delete timer queue
1107 * requests while it is being called back. Requests are sorted: we keep
1108 * running over the timer request queue from the start, and stop when we
1109 * reach the future or the list end. We also stop when we reach a timer
1110 * request that was added by a call-back that was invoked from this
1111 * event_loop() call instance, for reasons that are explained below.
1112 *
1113 * To avoid dangling pointer problems 1) we must remove a request from the
1114 * timer queue before delivering its event to the application and 2) we
1115 * must look up the next timer request *after* calling the application.
1116 * The latter complicates the handling of zero-delay timer requests that
1117 * are added by event_loop() call-back functions.
1118 *
1119 * XXX When a timer event call-back function adds a new timer request,
1120 * event_request_timer() labels the request with the event_loop() call
1121 * instance that invoked the timer event call-back. We use this instance
1122 * label here to prevent zero-delay timer requests from running in a
1123 * tight loop and starving I/O events. To make this solution work,
1124 * event_request_timer() appends a new request after existing requests
1125 * for the same time slot.
1126 */
1127 event_present = time((time_t *) 0);
1128 event_loop_instance += 1;
1129
1130 while ((timer = FIRST_TIMER(&event_timer_head)) != 0) {
1131 if (timer->when > event_present)
1132 break;
1133 if (timer->loop_instance == event_loop_instance)
1134 break;
1135 ring_detach(&timer->ring); /* first this */
1136 if (msg_verbose > 2)
1137 msg_info("%s: timer 0x%lx 0x%lx", myname,
1138 (long) timer->callback, (long) timer->context);
1139 timer->callback(EVENT_TIME, timer->context); /* then this */
1140 myfree((void *) timer);
1141 }
1142
1143 /*
1144 * Deliver I/O events. Allow the application to cancel event requests
1145 * while it is being called back. To this end, we keep an eye on the
1146 * contents of event_xmask, so that we deliver only events that are still
1147 * wanted. We do not change the event request masks. It is up to the
1148 * application to determine when a read or write is complete.
1149 */
1150 #if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
1151 if (event_count > 0) {
1152 for (new_max_fd = 0, fd = 0; fd <= event_max_fd; fd++) {
1153 if (FD_ISSET(fd, &event_xmask)) {
1154 new_max_fd = fd;
1155 /* In case event_fdtable is updated. */
1156 fdp = event_fdtable + fd;
1157 if (FD_ISSET(fd, &xmask)) {
1158 if (msg_verbose > 2)
1159 msg_info("%s: exception fd=%d act=0x%lx 0x%lx", myname,
1160 fd, (long) fdp->callback, (long) fdp->context);
1161 fdp->callback(EVENT_XCPT, fdp->context);
1162 } else if (FD_ISSET(fd, &wmask)) {
1163 if (msg_verbose > 2)
1164 msg_info("%s: write fd=%d act=0x%lx 0x%lx", myname,
1165 fd, (long) fdp->callback, (long) fdp->context);
1166 fdp->callback(EVENT_WRITE, fdp->context);
1167 } else if (FD_ISSET(fd, &rmask)) {
1168 if (msg_verbose > 2)
1169 msg_info("%s: read fd=%d act=0x%lx 0x%lx", myname,
1170 fd, (long) fdp->callback, (long) fdp->context);
1171 fdp->callback(EVENT_READ, fdp->context);
1172 }
1173 }
1174 }
1175 event_max_fd = new_max_fd;
1176 }
1177 #else
1178 for (bp = event_buf; bp < event_buf + event_count; bp++) {
1179 fd = EVENT_GET_FD(bp);
1180 if (fd < 0 || fd > event_max_fd)
1181 msg_panic("%s: bad file descriptor: %d", myname, fd);
1182 if (EVENT_MASK_ISSET(fd, &event_xmask)) {
1183 fdp = event_fdtable + fd;
1184 if (EVENT_TEST_READ(bp)) {
1185 if (msg_verbose > 2)
1186 msg_info("%s: read fd=%d act=0x%lx 0x%lx", myname,
1187 fd, (long) fdp->callback, (long) fdp->context);
1188 fdp->callback(EVENT_READ, fdp->context);
1189 } else if (EVENT_TEST_WRITE(bp)) {
1190 if (msg_verbose > 2)
1191 msg_info("%s: write fd=%d act=0x%lx 0x%lx", myname,
1192 fd, (long) fdp->callback,
1193 (long) fdp->context);
1194 fdp->callback(EVENT_WRITE, fdp->context);
1195 } else {
1196 if (msg_verbose > 2)
1197 msg_info("%s: other fd=%d act=0x%lx 0x%lx", myname,
1198 fd, (long) fdp->callback, (long) fdp->context);
1199 fdp->callback(EVENT_XCPT, fdp->context);
1200 }
1201 }
1202 }
1203 #endif
1204 nested--;
1205 }
1206
1207 #ifdef TEST
1208
1209 /*
1210 * Proof-of-concept test program for the event manager. Schedule a series of
1211 * events at one-second intervals and let them happen, while echoing any
1212 * lines read from stdin.
1213 */
1214 #include <stdio.h>
1215 #include <ctype.h>
1216 #include <stdlib.h>
1217
1218 /* timer_event - display event */
1219
timer_event(int unused_event,void * context)1220 static void timer_event(int unused_event, void *context)
1221 {
1222 printf("%ld: %s\n", (long) event_present, context);
1223 fflush(stdout);
1224 }
1225
1226 /* echo - echo text received on stdin */
1227
echo(int unused_event,void * unused_context)1228 static void echo(int unused_event, void *unused_context)
1229 {
1230 char buf[BUFSIZ];
1231
1232 if (fgets(buf, sizeof(buf), stdin) == 0)
1233 exit(0);
1234 printf("Result: %s", buf);
1235 }
1236
1237 /* request - request a bunch of timer events */
1238
request(int unused_event,void * unused_context)1239 static void request(int unused_event, void *unused_context)
1240 {
1241 event_request_timer(timer_event, "3 first", 3);
1242 event_request_timer(timer_event, "3 second", 3);
1243 event_request_timer(timer_event, "4 first", 4);
1244 event_request_timer(timer_event, "4 second", 4);
1245 event_request_timer(timer_event, "2 first", 2);
1246 event_request_timer(timer_event, "2 second", 2);
1247 event_request_timer(timer_event, "1 first", 1);
1248 event_request_timer(timer_event, "1 second", 1);
1249 event_request_timer(timer_event, "0 first", 0);
1250 event_request_timer(timer_event, "0 second", 0);
1251 }
1252
main(int argc,void ** argv)1253 int main(int argc, void **argv)
1254 {
1255 if (argv[1])
1256 msg_verbose = atoi(argv[1]);
1257 event_request_timer(request, (void *) 0, 0);
1258 event_enable_read(fileno(stdin), echo, (void *) 0);
1259 event_drain(10);
1260 exit(0);
1261 }
1262
1263 #endif
1264