xref: /netbsd-src/tests/lib/libc/sys/t_eventfd.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /* $NetBSD: t_eventfd.c,v 1.2 2021/09/19 15:51:28 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __COPYRIGHT("@(#) Copyright (c) 2020\
31  The NetBSD Foundation, inc. All rights reserved.");
32 __RCSID("$NetBSD: t_eventfd.c,v 1.2 2021/09/19 15:51:28 thorpej Exp $");
33 
34 #include <sys/types.h>
35 #include <sys/event.h>
36 #include <sys/eventfd.h>
37 #include <sys/select.h>
38 #include <sys/stat.h>
39 #include <sys/syscall.h>
40 #include <errno.h>
41 #include <poll.h>
42 #include <pthread.h>
43 #include <stdlib.h>
44 #include <stdio.h>
45 #include <time.h>
46 #include <unistd.h>
47 
48 #include <atf-c.h>
49 
50 struct helper_context {
51 	int	efd;
52 
53 	pthread_mutex_t mutex;
54 	pthread_cond_t cond;
55 	pthread_barrier_t barrier;
56 	int	state;
57 };
58 
59 static void
60 init_helper_context(struct helper_context * const ctx)
61 {
62 	pthread_condattr_t condattr;
63 
64 	memset(ctx, 0, sizeof(*ctx));
65 
66 	ATF_REQUIRE(pthread_mutex_init(&ctx->mutex, NULL) == 0);
67 
68 	ATF_REQUIRE(pthread_condattr_init(&condattr) == 0);
69 	ATF_REQUIRE(pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) == 0);
70 	ATF_REQUIRE(pthread_cond_init(&ctx->cond, &condattr) == 0);
71 	ATF_REQUIRE(pthread_condattr_destroy(&condattr) == 0);
72 
73 	ATF_REQUIRE(pthread_barrier_init(&ctx->barrier, NULL, 2) == 0);
74 }
75 
76 static void
77 set_state(struct helper_context * const ctx, int const new)
78 {
79 	pthread_mutex_lock(&ctx->mutex);
80 	ctx->state = new;
81 	pthread_cond_signal(&ctx->cond);
82 	pthread_mutex_unlock(&ctx->mutex);
83 }
84 
85 static int
86 get_state(struct helper_context * const ctx)
87 {
88 	int rv;
89 
90 	pthread_mutex_lock(&ctx->mutex);
91 	rv = ctx->state;
92 	pthread_mutex_unlock(&ctx->mutex);
93 
94 	return rv;
95 }
96 
97 static bool
98 wait_state(struct helper_context * const ctx, int const val)
99 {
100 	struct timespec deadline;
101 	int error;
102 	bool rv;
103 
104 	pthread_mutex_lock(&ctx->mutex);
105 
106 	ATF_REQUIRE(clock_gettime(CLOCK_MONOTONIC, &deadline) == 0);
107 	deadline.tv_sec += 5;
108 
109 	while (ctx->state != val) {
110 		error = pthread_cond_timedwait(&ctx->cond, &ctx->mutex,
111 		    &deadline);
112 		if (error) {
113 			break;
114 		}
115 	}
116 	rv = ctx->state == val;
117 
118 	pthread_mutex_unlock(&ctx->mutex);
119 
120 	return rv;
121 }
122 
123 static bool
124 wait_barrier(struct helper_context * const ctx)
125 {
126 	int rv = pthread_barrier_wait(&ctx->barrier);
127 
128 	return rv == 0 || rv == PTHREAD_BARRIER_SERIAL_THREAD;
129 }
130 
131 /*****************************************************************************/
132 
133 static void *
134 eventfd_normal_helper(void * const v)
135 {
136 	struct helper_context * const ctx = v;
137 	eventfd_t efd_value;
138 
139 	ATF_REQUIRE(wait_barrier(ctx));
140 
141 	/* Read the value.  This will reset it to zero. */
142 	ATF_REQUIRE(get_state(ctx) == 666);
143 	ATF_REQUIRE(eventfd_read(ctx->efd, &efd_value) == 0);
144 
145 	/* Assert the value. */
146 	ATF_REQUIRE(efd_value == 0xcafebabe);
147 
148 	set_state(ctx, 0);
149 
150 	/* Wait for the main thread to prep the next test. */
151 	ATF_REQUIRE(wait_barrier(ctx));
152 
153 	/* Read the value. */
154 	ATF_REQUIRE(eventfd_read(ctx->efd, &efd_value) == 0);
155 
156 	/* Assert the value. */
157 	ATF_REQUIRE(efd_value == 0xbeefcafe);
158 
159 	ATF_REQUIRE(wait_barrier(ctx));
160 
161 	return NULL;
162 }
163 
164 ATF_TC(eventfd_normal);
165 ATF_TC_HEAD(eventfd_normal, tc)
166 {
167 	atf_tc_set_md_var(tc, "descr",
168 	    "validates basic normal eventfd operation");
169 }
170 ATF_TC_BODY(eventfd_normal, tc)
171 {
172 	struct helper_context ctx;
173 	pthread_t helper;
174 	void *join_val;
175 
176 	init_helper_context(&ctx);
177 
178 	ATF_REQUIRE((ctx.efd = eventfd(0, 0)) >= 0);
179 
180 	ATF_REQUIRE(pthread_create(&helper, NULL,
181 				   eventfd_normal_helper, &ctx) == 0);
182 
183 	/*
184 	 * Wait for the helper to block in read().  Give it some time
185 	 * so that if the read fails or returns immediately, we'll
186 	 * notice.
187 	 */
188 	set_state(&ctx, 666);
189 	ATF_REQUIRE(wait_barrier(&ctx));
190 	sleep(2);
191 	ATF_REQUIRE(get_state(&ctx) == 666);
192 
193 	/* Write a distinct value; helper will assert it. */
194 	ATF_REQUIRE(eventfd_write(ctx.efd, 0xcafebabe) == 0);
195 
196 	/* Wait for helper to read the value. */
197 	ATF_REQUIRE(wait_state(&ctx, 0));
198 
199 	/* Helper is now blocked in a barrier. */
200 
201 	/* Test additive property of the efd value. */
202 	ATF_REQUIRE(eventfd_write(ctx.efd, 0x0000cafe) == 0);
203 	ATF_REQUIRE(eventfd_write(ctx.efd, 0xbeef0000) == 0);
204 
205 	/* Satisfy the barrier; helper will read value and assert 0xbeefcafe. */
206 	ATF_REQUIRE(wait_barrier(&ctx));
207 
208 	/* And wait for it to finish. */
209 	ATF_REQUIRE(wait_barrier(&ctx));
210 
211 	/* Reap the helper. */
212 	ATF_REQUIRE(pthread_join(helper, &join_val) == 0);
213 
214 	(void) close(ctx.efd);
215 }
216 
217 /*****************************************************************************/
218 
219 ATF_TC(eventfd_semaphore);
220 ATF_TC_HEAD(eventfd_semaphore, tc)
221 {
222 	atf_tc_set_md_var(tc, "descr",
223 	    "validates semaphore and non-blocking eventfd operation");
224 }
225 ATF_TC_BODY(eventfd_semaphore, tc)
226 {
227 	eventfd_t efd_value;
228 	int efd;
229 
230 	ATF_REQUIRE((efd = eventfd(3, EFD_SEMAPHORE | EFD_NONBLOCK)) >= 0);
231 
232 	/* 3 reads should succeed without blocking. */
233 	ATF_REQUIRE(eventfd_read(efd, &efd_value) == 0);
234 	ATF_REQUIRE(efd_value == 1);
235 
236 	ATF_REQUIRE(eventfd_read(efd, &efd_value) == 0);
237 	ATF_REQUIRE(efd_value == 1);
238 
239 	ATF_REQUIRE(eventfd_read(efd, &efd_value) == 0);
240 	ATF_REQUIRE(efd_value == 1);
241 
242 	/* This one should block. */
243 	ATF_REQUIRE_ERRNO(EAGAIN,
244 	    eventfd_read(efd, &efd_value) == -1);
245 
246 	/* Add 1 to the semaphore. */
247 	ATF_REQUIRE(eventfd_write(efd, 1) == 0);
248 
249 	/* One more read allowed. */
250 	ATF_REQUIRE(eventfd_read(efd, &efd_value) == 0);
251 	ATF_REQUIRE(efd_value == 1);
252 
253 	/* And this one again should block. */
254 	ATF_REQUIRE_ERRNO(EAGAIN,
255 	    eventfd_read(efd, &efd_value) == -1);
256 
257 	(void) close(efd);
258 }
259 
260 /*****************************************************************************/
261 
262 ATF_TC(eventfd_select_poll_kevent_immed);
263 ATF_TC_HEAD(eventfd_select_poll_kevent_immed, tc)
264 {
265 	atf_tc_set_md_var(tc, "descr",
266 	    "validates select/poll/kevent behavior - immediate return");
267 }
268 ATF_TC_BODY(eventfd_select_poll_kevent_immed, tc)
269 {
270 	const struct timespec ts = { .tv_sec = 0, .tv_nsec = 0 };
271 	struct timeval tv;
272 	struct pollfd fds[1];
273 	fd_set readfds, writefds, exceptfds;
274 	int efd;
275 	int kq;
276 	struct kevent kev[2];
277 
278 	ATF_REQUIRE((efd = eventfd(0, EFD_NONBLOCK)) >= 0);
279 
280 	ATF_REQUIRE((kq = kqueue()) >= 0);
281 	EV_SET(&kev[0], efd, EVFILT_READ, EV_ADD, 0, 0, NULL);
282 	EV_SET(&kev[1], efd, EVFILT_WRITE, EV_ADD, 0, 0, NULL);
283 	ATF_REQUIRE(kevent(kq, kev, 2, NULL, 0, &ts) == 0);
284 
285 	/*
286 	 * efd should be writable but not readable.  Pass all of the
287 	 * event bits; we should only get back POLLOUT | POLLWRNORM.
288 	 */
289 	fds[0].fd = efd;
290 	fds[0].events = POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI |
291 	    POLLOUT | POLLWRNORM | POLLWRBAND | POLLHUP;
292 	fds[0].revents = 0;
293 	ATF_REQUIRE(poll(fds, 1, 0) == 1);
294 	ATF_REQUIRE(fds[0].revents == (POLLOUT | POLLWRNORM));
295 
296 	/*
297 	 * As above; efd should only be set in writefds upon return
298 	 * from the select() call.
299 	 */
300 	FD_ZERO(&readfds);
301 	FD_ZERO(&writefds);
302 	FD_ZERO(&exceptfds);
303 	tv.tv_sec = 0;
304 	tv.tv_usec = 0;
305 	FD_SET(efd, &readfds);
306 	FD_SET(efd, &writefds);
307 	FD_SET(efd, &exceptfds);
308 	ATF_REQUIRE(select(efd + 1, &readfds, &writefds, &exceptfds, &tv) == 1);
309 	ATF_REQUIRE(!FD_ISSET(efd, &readfds));
310 	ATF_REQUIRE(FD_ISSET(efd, &writefds));
311 	ATF_REQUIRE(!FD_ISSET(efd, &exceptfds));
312 
313 	/*
314 	 * Check that we get an EVFILT_WRITE event (and only that event)
315 	 * on efd.
316 	 */
317 	memset(kev, 0, sizeof(kev));
318 	ATF_REQUIRE(kevent(kq, NULL, 0, kev, 2, &ts) == 1);
319 	ATF_REQUIRE(kev[0].ident == (uintptr_t)efd);
320 	ATF_REQUIRE(kev[0].filter == EVFILT_WRITE);
321 	ATF_REQUIRE((kev[0].flags & (EV_EOF | EV_ERROR)) == 0);
322 	ATF_REQUIRE(kev[0].data == 0);
323 
324 	/*
325 	 * Write the maximum value into the eventfd.  This should result
326 	 * in the eventfd becoming readable but NOT writable.
327 	 */
328 	ATF_REQUIRE(eventfd_write(efd, UINT64_MAX - 1) == 0);
329 
330 	fds[0].fd = efd;
331 	fds[0].events = POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI |
332 	    POLLOUT | POLLWRNORM | POLLWRBAND | POLLHUP;
333 	fds[0].revents = 0;
334 	ATF_REQUIRE(poll(fds, 1, 0) == 1);
335 	ATF_REQUIRE(fds[0].revents == (POLLIN | POLLRDNORM));
336 
337 	FD_ZERO(&readfds);
338 	FD_ZERO(&writefds);
339 	FD_ZERO(&exceptfds);
340 	tv.tv_sec = 0;
341 	tv.tv_usec = 0;
342 	FD_SET(efd, &readfds);
343 	FD_SET(efd, &writefds);
344 	FD_SET(efd, &exceptfds);
345 	ATF_REQUIRE(select(efd + 1, &readfds, &writefds, &exceptfds, &tv) == 1);
346 	ATF_REQUIRE(FD_ISSET(efd, &readfds));
347 	ATF_REQUIRE(!FD_ISSET(efd, &writefds));
348 	ATF_REQUIRE(!FD_ISSET(efd, &exceptfds));
349 
350 	/*
351 	 * Check that we get an EVFILT_READ event (and only that event)
352 	 * on efd.
353 	 */
354 	memset(kev, 0, sizeof(kev));
355 	ATF_REQUIRE(kevent(kq, NULL, 0, kev, 2, &ts) == 1);
356 	ATF_REQUIRE(kev[0].ident == (uintptr_t)efd);
357 	ATF_REQUIRE(kev[0].filter == EVFILT_READ);
358 	ATF_REQUIRE((kev[0].flags & (EV_EOF | EV_ERROR)) == 0);
359 	ATF_REQUIRE(kev[0].data == (int64_t)(UINT64_MAX - 1));
360 
361 	(void) close(kq);
362 	(void) close(efd);
363 }
364 
365 /*****************************************************************************/
366 
367 static void *
368 eventfd_select_poll_kevent_block_helper(void * const v)
369 {
370 	struct helper_context * const ctx = v;
371 	struct pollfd fds[1];
372 	fd_set selfds;
373 	eventfd_t efd_value;
374 	int kq;
375 	struct kevent kev[1];
376 
377 	fds[0].fd = ctx->efd;
378 	fds[0].events = POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI;
379 	fds[0].revents = 0;
380 
381 	ATF_REQUIRE_ERRNO(EAGAIN,
382 	    eventfd_read(ctx->efd, &efd_value) == -1);
383 
384 	ATF_REQUIRE(wait_barrier(ctx));
385 
386 	ATF_REQUIRE(get_state(ctx) == 666);
387 	ATF_REQUIRE(poll(fds, 1, INFTIM) == 1);
388 	ATF_REQUIRE(fds[0].revents == (POLLIN | POLLRDNORM));
389 	set_state(ctx, 0);
390 
391 	ATF_REQUIRE(wait_barrier(ctx));
392 
393 	/*
394 	 * The maximum value was written to the eventfd, so we
395 	 * should block waiting for writability.
396 	 */
397 	fds[0].fd = ctx->efd;
398 	fds[0].events = POLLOUT | POLLWRNORM;
399 	fds[0].revents = 0;
400 
401 	ATF_REQUIRE_ERRNO(EAGAIN,
402 	    eventfd_write(ctx->efd, UINT64_MAX - 1) == -1);
403 
404 	ATF_REQUIRE(wait_barrier(ctx));
405 
406 	ATF_REQUIRE(get_state(ctx) == 666);
407 	ATF_REQUIRE(poll(fds, 1, INFTIM) == 1);
408 	ATF_REQUIRE(fds[0].revents == (POLLOUT | POLLWRNORM));
409 	set_state(ctx, 0);
410 
411 	ATF_REQUIRE(wait_barrier(ctx));
412 
413 	/*
414 	 * Now, the same dance again, with select().
415 	 */
416 
417 	FD_ZERO(&selfds);
418 	FD_SET(ctx->efd, &selfds);
419 
420 	ATF_REQUIRE_ERRNO(EAGAIN,
421 	    eventfd_read(ctx->efd, &efd_value) == -1);
422 
423 	ATF_REQUIRE(wait_barrier(ctx));
424 
425 	ATF_REQUIRE(get_state(ctx) == 666);
426 	ATF_REQUIRE(select(ctx->efd + 1, &selfds, NULL, NULL, NULL) == 1);
427 	ATF_REQUIRE(FD_ISSET(ctx->efd, &selfds));
428 	set_state(ctx, 0);
429 
430 	ATF_REQUIRE(wait_barrier(ctx));
431 
432 	FD_ZERO(&selfds);
433 	FD_SET(ctx->efd, &selfds);
434 
435 	ATF_REQUIRE_ERRNO(EAGAIN,
436 	    eventfd_write(ctx->efd, UINT64_MAX - 1) == -1);
437 
438 	ATF_REQUIRE(wait_barrier(ctx));
439 
440 	ATF_REQUIRE(get_state(ctx) == 666);
441 	ATF_REQUIRE(select(ctx->efd + 1, NULL, &selfds, NULL, NULL) == 1);
442 	ATF_REQUIRE(FD_ISSET(ctx->efd, &selfds));
443 	set_state(ctx, 0);
444 
445 	ATF_REQUIRE(wait_barrier(ctx));
446 
447 	/*
448 	 * Now, the same dance again, with kevent().
449 	 */
450 	ATF_REQUIRE((kq = kqueue()) >= 0);
451 
452 	EV_SET(&kev[0], ctx->efd, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, NULL);
453 	ATF_REQUIRE(kevent(kq, kev, 1, NULL, 0, NULL) == 0);
454 
455 	ATF_REQUIRE_ERRNO(EAGAIN,
456 	    eventfd_read(ctx->efd, &efd_value) == -1);
457 
458 	ATF_REQUIRE(wait_barrier(ctx));
459 
460 	ATF_REQUIRE(get_state(ctx) == 666);
461 	ATF_REQUIRE(kevent(kq, NULL, 0, kev, 1, NULL) == 1);
462 	ATF_REQUIRE(kev[0].ident == (uintptr_t)ctx->efd);
463 	ATF_REQUIRE(kev[0].filter == EVFILT_READ);
464 	ATF_REQUIRE((kev[0].flags & (EV_EOF | EV_ERROR)) == 0);
465 	ATF_REQUIRE(kev[0].data == (int64_t)(UINT64_MAX - 1));
466 	set_state(ctx, 0);
467 
468 	ATF_REQUIRE(wait_barrier(ctx));
469 
470 	EV_SET(&kev[0], ctx->efd, EVFILT_WRITE, EV_ADD | EV_ONESHOT, 0, 0,
471 	       NULL);
472 	ATF_REQUIRE(kevent(kq, kev, 1, NULL, 0, NULL) == 0);
473 
474 	ATF_REQUIRE_ERRNO(EAGAIN,
475 	    eventfd_write(ctx->efd, UINT64_MAX - 1) == -1);
476 
477 	ATF_REQUIRE(wait_barrier(ctx));
478 
479 	ATF_REQUIRE(get_state(ctx) == 666);
480 	ATF_REQUIRE(kevent(kq, NULL, 0, kev, 1, NULL) == 1);
481 	ATF_REQUIRE(kev[0].ident == (uintptr_t)ctx->efd);
482 	ATF_REQUIRE(kev[0].filter == EVFILT_WRITE);
483 	ATF_REQUIRE((kev[0].flags & (EV_EOF | EV_ERROR)) == 0);
484 	ATF_REQUIRE(kev[0].data == 0);
485 	set_state(ctx, 0);
486 
487 	ATF_REQUIRE(wait_barrier(ctx));
488 
489 	(void) close(kq);
490 
491 	return NULL;
492 }
493 
494 ATF_TC(eventfd_select_poll_kevent_block);
495 ATF_TC_HEAD(eventfd_select_poll_kevent_block, tc)
496 {
497 	atf_tc_set_md_var(tc, "descr",
498 	    "validates select/poll/kevent behavior - return after blocking");
499 }
500 ATF_TC_BODY(eventfd_select_poll_kevent_block, tc)
501 {
502 	struct helper_context ctx;
503 	pthread_t helper;
504 	eventfd_t efd_value;
505 	void *join_val;
506 
507 	init_helper_context(&ctx);
508 
509 	ATF_REQUIRE((ctx.efd = eventfd(0, EFD_NONBLOCK)) >= 0);
510 
511 	ATF_REQUIRE(pthread_create(&helper, NULL,
512 				   eventfd_select_poll_kevent_block_helper,
513 				   &ctx) == 0);
514 
515 	/*
516 	 * Wait for the helper to block in poll().  Give it some time
517 	 * so that if the poll returns immediately, we'll notice.
518 	 */
519 	set_state(&ctx, 666);
520 	ATF_REQUIRE(wait_barrier(&ctx));
521 	sleep(2);
522 	ATF_REQUIRE(get_state(&ctx) == 666);
523 
524 	/*
525 	 * Write the max value to the eventfd so that it becomes readable
526 	 * and unblocks the helper waiting in poll().
527 	 */
528 	ATF_REQUIRE(eventfd_write(ctx.efd, UINT64_MAX - 1) == 0);
529 
530 	/*
531 	 * Ensure the helper woke from the poll() call.
532 	 */
533 	ATF_REQUIRE(wait_barrier(&ctx));
534 	ATF_REQUIRE(get_state(&ctx) == 0);
535 
536 	/*
537 	 * Wait for the helper to block in poll(), this time waiting
538 	 * for writability.
539 	 */
540 	set_state(&ctx, 666);
541 	ATF_REQUIRE(wait_barrier(&ctx));
542 	sleep(2);
543 	ATF_REQUIRE(get_state(&ctx) == 666);
544 
545 	/*
546 	 * Now read the value, which will reset the eventfd to 0 and
547 	 * unblock the poll() call.
548 	 */
549 	ATF_REQUIRE(eventfd_read(ctx.efd, &efd_value) == 0);
550 	ATF_REQUIRE(efd_value == UINT64_MAX - 1);
551 
552 	/*
553 	 * Ensure that the helper woke from the poll() call.
554 	 */
555 	ATF_REQUIRE(wait_barrier(&ctx));
556 	ATF_REQUIRE(get_state(&ctx) == 0);
557 
558 	/*
559 	 * Wait for the helper to block in select(), waiting for readability.
560 	 */
561 	set_state(&ctx, 666);
562 	ATF_REQUIRE(wait_barrier(&ctx));
563 	sleep(2);
564 	ATF_REQUIRE(get_state(&ctx) == 666);
565 
566 	/*
567 	 * Write the max value to the eventfd so that it becomes readable
568 	 * and unblocks the helper waiting in select().
569 	 */
570 	efd_value = UINT64_MAX - 1;
571 	ATF_REQUIRE(eventfd_write(ctx.efd, UINT64_MAX - 1) == 0);
572 
573 	/*
574 	 * Ensure the helper woke from the select() call.
575 	 */
576 	ATF_REQUIRE(wait_barrier(&ctx));
577 	ATF_REQUIRE(get_state(&ctx) == 0);
578 
579 	/*
580 	 * Wait for the helper to block in select(), this time waiting
581 	 * for writability.
582 	 */
583 	set_state(&ctx, 666);
584 	ATF_REQUIRE(wait_barrier(&ctx));
585 	sleep(2);
586 	ATF_REQUIRE(get_state(&ctx) == 666);
587 
588 	/*
589 	 * Now read the value, which will reset the eventfd to 0 and
590 	 * unblock the select() call.
591 	 */
592 	ATF_REQUIRE(eventfd_read(ctx.efd, &efd_value) == 0);
593 	ATF_REQUIRE(efd_value == UINT64_MAX - 1);
594 
595 	/*
596 	 * Ensure that the helper woke from the select() call.
597 	 */
598 	ATF_REQUIRE(wait_barrier(&ctx));
599 	ATF_REQUIRE(get_state(&ctx) == 0);
600 
601 	/*
602 	 * Wait for the helper to block in kevent(), waiting for readability.
603 	 */
604 	set_state(&ctx, 666);
605 	ATF_REQUIRE(wait_barrier(&ctx));
606 	sleep(2);
607 	ATF_REQUIRE(get_state(&ctx) == 666);
608 
609 	/*
610 	 * Write the max value to the eventfd so that it becomes readable
611 	 * and unblocks the helper waiting in kevent().
612 	 */
613 	efd_value = UINT64_MAX - 1;
614 	ATF_REQUIRE(eventfd_write(ctx.efd, UINT64_MAX - 1) == 0);
615 
616 	/*
617 	 * Ensure the helper woke from the kevent() call.
618 	 */
619 	ATF_REQUIRE(wait_barrier(&ctx));
620 	ATF_REQUIRE(get_state(&ctx) == 0);
621 
622 	/*
623 	 * Wait for the helper to block in kevent(), this time waiting
624 	 * for writability.
625 	 */
626 	set_state(&ctx, 666);
627 	ATF_REQUIRE(wait_barrier(&ctx));
628 	sleep(2);
629 	ATF_REQUIRE(get_state(&ctx) == 666);
630 
631 	/*
632 	 * Now read the value, which will reset the eventfd to 0 and
633 	 * unblock the select() call.
634 	 */
635 	ATF_REQUIRE(eventfd_read(ctx.efd, &efd_value) == 0);
636 	ATF_REQUIRE(efd_value == UINT64_MAX - 1);
637 
638 	/*
639 	 * Ensure that the helper woke from the kevent() call.
640 	 */
641 	ATF_REQUIRE(wait_barrier(&ctx));
642 	ATF_REQUIRE(get_state(&ctx) == 0);
643 
644 	/* Reap the helper. */
645 	ATF_REQUIRE(pthread_join(helper, &join_val) == 0);
646 
647 	(void) close(ctx.efd);
648 }
649 
650 /*****************************************************************************/
651 
652 static void *
653 eventfd_restart_helper(void * const v)
654 {
655 	struct helper_context * const ctx = v;
656 	eventfd_t efd_value;
657 
658 	/*
659 	 * Issue a single read to ensure that the descriptor is valid.
660 	 * Thius will not block because it was created with an initial
661 	 * count of 1.
662 	 */
663 	ATF_REQUIRE(eventfd_read(ctx->efd, &efd_value) == 0);
664 	ATF_REQUIRE(efd_value == 1);
665 
666 	ATF_REQUIRE(wait_barrier(ctx));
667 
668 	/*
669 	 * Block in read.  The main thread will close the descriptor,
670 	 * which should unblock us and result in EBADF.
671 	 */
672 	ATF_REQUIRE(get_state(ctx) == 666);
673 	ATF_REQUIRE_ERRNO(EBADF, eventfd_read(ctx->efd, &efd_value) == -1);
674 	set_state(ctx, 0);
675 
676 	ATF_REQUIRE(wait_barrier(ctx));
677 
678 	return NULL;
679 }
680 
681 ATF_TC(eventfd_restart);
682 ATF_TC_HEAD(eventfd_restart, tc)
683 {
684 	atf_tc_set_md_var(tc, "descr",
685 	    "exercises the 'restart' fileop code path");
686 }
687 ATF_TC_BODY(eventfd_restart, tc)
688 {
689 	struct helper_context ctx;
690 	pthread_t helper;
691 	void *join_val;
692 
693 	init_helper_context(&ctx);
694 
695 	ATF_REQUIRE((ctx.efd = eventfd(1, 0)) >= 0);
696 
697 	ATF_REQUIRE(pthread_create(&helper, NULL,
698 				   eventfd_restart_helper, &ctx) == 0);
699 
700 	/*
701 	 * Wait for the helper to block in read().  Give it some time
702 	 * so that if the poll returns immediately, we'll notice.
703 	 */
704 	set_state(&ctx, 666);
705 	ATF_REQUIRE(wait_barrier(&ctx));
706 	sleep(2);
707 	ATF_REQUIRE(get_state(&ctx) == 666);
708 
709 	/*
710 	 * Close the descriptor.  This should unblock the reader,
711 	 * and cause it to receive EBADF.
712 	 */
713 	ATF_REQUIRE(close(ctx.efd) == 0);
714 
715 	/*
716 	 * Ensure that the helper woke from the read() call.
717 	 */
718 	ATF_REQUIRE(wait_barrier(&ctx));
719 	ATF_REQUIRE(get_state(&ctx) == 0);
720 
721 	/* Reap the helper. */
722 	ATF_REQUIRE(pthread_join(helper, &join_val) == 0);
723 }
724 
725 /*****************************************************************************/
726 
727 ATF_TC(eventfd_badflags);
728 ATF_TC_HEAD(eventfd_badflags, tc)
729 {
730 	atf_tc_set_md_var(tc, "descr",
731 	    "validates behavior when eventfd() called with bad flags");
732 }
733 ATF_TC_BODY(eventfd_badflags, tc)
734 {
735 	ATF_REQUIRE_ERRNO(EINVAL,
736 	    eventfd(0, ~(EFD_SEMAPHORE | EFD_CLOEXEC | EFD_NONBLOCK)) == -1);
737 }
738 
739 /*****************************************************************************/
740 
741 ATF_TC(eventfd_bufsize);
742 ATF_TC_HEAD(eventfd_bufsize, tc)
743 {
744 	atf_tc_set_md_var(tc, "descr",
745 	    "validates expected buffer size behavior");
746 }
747 ATF_TC_BODY(eventfd_bufsize, tc)
748 {
749 	eventfd_t efd_value[2];
750 	int efd;
751 
752 	ATF_REQUIRE((efd = eventfd(1, EFD_NONBLOCK)) >= 0);
753 
754 	ATF_REQUIRE_ERRNO(EINVAL,
755 	    read(efd, efd_value, sizeof(efd_value[0]) - 1) == -1);
756 
757 	efd_value[0] = 0xdeadbeef;
758 	efd_value[1] = 0xdeadbeef;
759 	ATF_REQUIRE(read(efd, efd_value, sizeof(efd_value)) ==
760 	    sizeof(efd_value[0]));
761 	ATF_REQUIRE(efd_value[0] == 1);
762 	ATF_REQUIRE(efd_value[1] == 0xdeadbeef);
763 
764 	ATF_REQUIRE_ERRNO(EINVAL,
765 	    write(efd, efd_value, sizeof(efd_value[0]) - 1) == -1);
766 	ATF_REQUIRE(write(efd, efd_value, sizeof(efd_value)) ==
767 	    sizeof(efd_value[0]));
768 
769 	ATF_REQUIRE(read(efd, efd_value, sizeof(efd_value)) ==
770 	    sizeof(efd_value[0]));
771 	ATF_REQUIRE(efd_value[0] == 1);
772 	ATF_REQUIRE(efd_value[1] == 0xdeadbeef);
773 
774 	(void) close(efd);
775 }
776 
777 /*****************************************************************************/
778 
779 ATF_TP_ADD_TCS(tp)
780 {
781 	ATF_TP_ADD_TC(tp, eventfd_normal);
782 	ATF_TP_ADD_TC(tp, eventfd_semaphore);
783 	ATF_TP_ADD_TC(tp, eventfd_badflags);
784 	ATF_TP_ADD_TC(tp, eventfd_bufsize);
785 	ATF_TP_ADD_TC(tp, eventfd_select_poll_kevent_immed);
786 	ATF_TP_ADD_TC(tp, eventfd_select_poll_kevent_block);
787 	ATF_TP_ADD_TC(tp, eventfd_restart);
788 
789 	return atf_no_error();
790 }
791