xref: /dpdk/lib/eal/freebsd/eal_interrupts.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/event.h>
8 #include <sys/queue.h>
9 #include <unistd.h>
10 
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
16 #include <rte_eal_trace.h>
17 
18 #include "eal_private.h"
19 #include "eal_alarm_private.h"
20 
21 #define MAX_INTR_EVENTS 16
22 
23 /**
24  * union buffer for reading on different devices
25  */
26 union rte_intr_read_buffer {
27 	char charbuf[16];                /* for others */
28 };
29 
30 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
31 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
32 
33 struct rte_intr_callback {
34 	TAILQ_ENTRY(rte_intr_callback) next;
35 	rte_intr_callback_fn cb_fn;  /**< callback address */
36 	void *cb_arg;                /**< parameter for callback */
37 	uint8_t pending_delete;      /**< delete after callback is called */
38 	rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
39 };
40 
41 struct rte_intr_source {
42 	TAILQ_ENTRY(rte_intr_source) next;
43 	struct rte_intr_handle intr_handle; /**< interrupt handle */
44 	struct rte_intr_cb_list callbacks;  /**< user callbacks */
45 	uint32_t active;
46 };
47 
48 /* global spinlock for interrupt data operation */
49 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
50 
51 /* interrupt sources list */
52 static struct rte_intr_source_list intr_sources;
53 
54 /* interrupt handling thread */
55 static pthread_t intr_thread;
56 
57 static volatile int kq = -1;
58 
59 static int
60 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
61 {
62 	/* alarm callbacks are special case */
63 	if (ih->type == RTE_INTR_HANDLE_ALARM) {
64 		uint64_t timeout_ns;
65 
66 		/* get soonest alarm timeout */
67 		if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
68 			return -1;
69 
70 		ke->filter = EVFILT_TIMER;
71 		/* timers are one shot */
72 		ke->flags |= EV_ONESHOT;
73 		ke->fflags = NOTE_NSECONDS;
74 		ke->data = timeout_ns;
75 	} else {
76 		ke->filter = EVFILT_READ;
77 	}
78 	ke->ident = ih->fd;
79 
80 	return 0;
81 }
82 
83 int
84 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
85 		rte_intr_callback_fn cb, void *cb_arg)
86 {
87 	struct rte_intr_callback *callback;
88 	struct rte_intr_source *src;
89 	int ret = 0, add_event = 0;
90 
91 	/* first do parameter checking */
92 	if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
93 		RTE_LOG(ERR, EAL,
94 			"Registering with invalid input parameter\n");
95 		return -EINVAL;
96 	}
97 	if (kq < 0) {
98 		RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
99 		return -ENODEV;
100 	}
101 
102 	rte_spinlock_lock(&intr_lock);
103 
104 	/* find the source for this intr_handle */
105 	TAILQ_FOREACH(src, &intr_sources, next) {
106 		if (src->intr_handle.fd == intr_handle->fd)
107 			break;
108 	}
109 
110 	/* if this is an alarm interrupt and it already has a callback,
111 	 * then we don't want to create a new callback because the only
112 	 * thing on the list should be eal_alarm_callback() and we may
113 	 * be called just to reset the timer.
114 	 */
115 	if (src != NULL && src->intr_handle.type == RTE_INTR_HANDLE_ALARM &&
116 		 !TAILQ_EMPTY(&src->callbacks)) {
117 		callback = NULL;
118 	} else {
119 		/* allocate a new interrupt callback entity */
120 		callback = calloc(1, sizeof(*callback));
121 		if (callback == NULL) {
122 			RTE_LOG(ERR, EAL, "Can not allocate memory\n");
123 			ret = -ENOMEM;
124 			goto fail;
125 		}
126 		callback->cb_fn = cb;
127 		callback->cb_arg = cb_arg;
128 		callback->pending_delete = 0;
129 		callback->ucb_fn = NULL;
130 
131 		if (src == NULL) {
132 			src = calloc(1, sizeof(*src));
133 			if (src == NULL) {
134 				RTE_LOG(ERR, EAL, "Can not allocate memory\n");
135 				ret = -ENOMEM;
136 				goto fail;
137 			} else {
138 				src->intr_handle = *intr_handle;
139 				TAILQ_INIT(&src->callbacks);
140 				TAILQ_INSERT_TAIL(&intr_sources, src, next);
141 			}
142 		}
143 
144 		/* we had no interrupts for this */
145 		if (TAILQ_EMPTY(&src->callbacks))
146 			add_event = 1;
147 
148 		TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
149 	}
150 
151 	/* add events to the queue. timer events are special as we need to
152 	 * re-set the timer.
153 	 */
154 	if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
155 		struct kevent ke;
156 
157 		memset(&ke, 0, sizeof(ke));
158 		ke.flags = EV_ADD; /* mark for addition to the queue */
159 
160 		if (intr_source_to_kevent(intr_handle, &ke) < 0) {
161 			RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
162 			ret = -ENODEV;
163 			goto fail;
164 		}
165 
166 		/**
167 		 * add the intr file descriptor into wait list.
168 		 */
169 		if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
170 			/* currently, nic_uio does not support interrupts, so
171 			 * this error will always be triggered and output to the
172 			 * user. so, don't output it unless debug log level set.
173 			 */
174 			if (errno == ENODEV)
175 				RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
176 					src->intr_handle.fd);
177 			else
178 				RTE_LOG(ERR, EAL, "Error adding fd %d "
179 						"kevent, %s\n",
180 						src->intr_handle.fd,
181 						strerror(errno));
182 			ret = -errno;
183 			goto fail;
184 		}
185 	}
186 	rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
187 	rte_spinlock_unlock(&intr_lock);
188 
189 	return 0;
190 fail:
191 	/* clean up */
192 	if (src != NULL) {
193 		if (callback != NULL)
194 			TAILQ_REMOVE(&(src->callbacks), callback, next);
195 		if (TAILQ_EMPTY(&(src->callbacks))) {
196 			TAILQ_REMOVE(&intr_sources, src, next);
197 			free(src);
198 		}
199 	}
200 	free(callback);
201 	rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
202 	rte_spinlock_unlock(&intr_lock);
203 	return ret;
204 }
205 
206 int
207 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
208 				rte_intr_callback_fn cb_fn, void *cb_arg,
209 				rte_intr_unregister_callback_fn ucb_fn)
210 {
211 	int ret;
212 	struct rte_intr_source *src;
213 	struct rte_intr_callback *cb, *next;
214 
215 	/* do parameter checking first */
216 	if (intr_handle == NULL || intr_handle->fd < 0) {
217 		RTE_LOG(ERR, EAL,
218 		"Unregistering with invalid input parameter\n");
219 		return -EINVAL;
220 	}
221 
222 	if (kq < 0) {
223 		RTE_LOG(ERR, EAL, "Kqueue is not active\n");
224 		return -ENODEV;
225 	}
226 
227 	rte_spinlock_lock(&intr_lock);
228 
229 	/* check if the insterrupt source for the fd is existent */
230 	TAILQ_FOREACH(src, &intr_sources, next)
231 		if (src->intr_handle.fd == intr_handle->fd)
232 			break;
233 
234 	/* No interrupt source registered for the fd */
235 	if (src == NULL) {
236 		ret = -ENOENT;
237 
238 	/* only usable if the source is active */
239 	} else if (src->active == 0) {
240 		ret = -EAGAIN;
241 
242 	} else {
243 		ret = 0;
244 
245 		/* walk through the callbacks and mark all that match. */
246 		for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
247 			next = TAILQ_NEXT(cb, next);
248 			if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
249 					cb->cb_arg == cb_arg)) {
250 				cb->pending_delete = 1;
251 				cb->ucb_fn = ucb_fn;
252 				ret++;
253 			}
254 		}
255 	}
256 
257 	rte_spinlock_unlock(&intr_lock);
258 
259 	return ret;
260 }
261 
262 int
263 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
264 		rte_intr_callback_fn cb_fn, void *cb_arg)
265 {
266 	int ret;
267 	struct rte_intr_source *src;
268 	struct rte_intr_callback *cb, *next;
269 
270 	/* do parameter checking first */
271 	if (intr_handle == NULL || intr_handle->fd < 0) {
272 		RTE_LOG(ERR, EAL,
273 		"Unregistering with invalid input parameter\n");
274 		return -EINVAL;
275 	}
276 	if (kq < 0) {
277 		RTE_LOG(ERR, EAL, "Kqueue is not active\n");
278 		return -ENODEV;
279 	}
280 
281 	rte_spinlock_lock(&intr_lock);
282 
283 	/* check if the insterrupt source for the fd is existent */
284 	TAILQ_FOREACH(src, &intr_sources, next)
285 		if (src->intr_handle.fd == intr_handle->fd)
286 			break;
287 
288 	/* No interrupt source registered for the fd */
289 	if (src == NULL) {
290 		ret = -ENOENT;
291 
292 	/* interrupt source has some active callbacks right now. */
293 	} else if (src->active != 0) {
294 		ret = -EAGAIN;
295 
296 	/* ok to remove. */
297 	} else {
298 		struct kevent ke;
299 
300 		ret = 0;
301 
302 		/* remove it from the kqueue */
303 		memset(&ke, 0, sizeof(ke));
304 		ke.flags = EV_DELETE; /* mark for deletion from the queue */
305 
306 		if (intr_source_to_kevent(intr_handle, &ke) < 0) {
307 			RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
308 			ret = -ENODEV;
309 			goto out;
310 		}
311 
312 		/**
313 		 * remove intr file descriptor from wait list.
314 		 */
315 		if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
316 			RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
317 				src->intr_handle.fd, strerror(errno));
318 			/* removing non-existent even is an expected condition
319 			 * in some circumstances (e.g. oneshot events).
320 			 */
321 		}
322 
323 		/*walk through the callbacks and remove all that match. */
324 		for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
325 			next = TAILQ_NEXT(cb, next);
326 			if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
327 					cb->cb_arg == cb_arg)) {
328 				TAILQ_REMOVE(&src->callbacks, cb, next);
329 				free(cb);
330 				ret++;
331 			}
332 		}
333 
334 		/* all callbacks for that source are removed. */
335 		if (TAILQ_EMPTY(&src->callbacks)) {
336 			TAILQ_REMOVE(&intr_sources, src, next);
337 			free(src);
338 		}
339 	}
340 out:
341 	rte_eal_trace_intr_callback_unregister(intr_handle, cb_fn, cb_arg,
342 		ret);
343 	rte_spinlock_unlock(&intr_lock);
344 
345 	return ret;
346 }
347 
348 int
349 rte_intr_callback_unregister_sync(const struct rte_intr_handle *intr_handle,
350 		rte_intr_callback_fn cb_fn, void *cb_arg)
351 {
352 	int ret = 0;
353 
354 	while ((ret = rte_intr_callback_unregister(intr_handle, cb_fn, cb_arg)) == -EAGAIN)
355 		rte_pause();
356 
357 	return ret;
358 }
359 
360 int
361 rte_intr_enable(const struct rte_intr_handle *intr_handle)
362 {
363 	int rc = 0;
364 
365 	if (intr_handle == NULL)
366 		return -1;
367 
368 	if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
369 		rc = 0;
370 		goto out;
371 	}
372 
373 	if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
374 		rc = -1;
375 		goto out;
376 	}
377 
378 	switch (intr_handle->type) {
379 	/* not used at this moment */
380 	case RTE_INTR_HANDLE_ALARM:
381 		rc = -1;
382 		break;
383 	/* not used at this moment */
384 	case RTE_INTR_HANDLE_DEV_EVENT:
385 		rc = -1;
386 		break;
387 	/* unknown handle type */
388 	default:
389 		RTE_LOG(ERR, EAL,
390 			"Unknown handle type of fd %d\n",
391 					intr_handle->fd);
392 		rc = -1;
393 		break;
394 	}
395 
396 out:
397 	rte_eal_trace_intr_enable(intr_handle, rc);
398 	return rc;
399 }
400 
401 int
402 rte_intr_disable(const struct rte_intr_handle *intr_handle)
403 {
404 	int rc = 0;
405 
406 	if (intr_handle == NULL)
407 		return -1;
408 
409 	if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
410 		rc = 0;
411 		goto out;
412 	}
413 
414 	if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
415 		rc = -1;
416 		goto out;
417 	}
418 
419 	switch (intr_handle->type) {
420 	/* not used at this moment */
421 	case RTE_INTR_HANDLE_ALARM:
422 		rc = -1;
423 		break;
424 	/* not used at this moment */
425 	case RTE_INTR_HANDLE_DEV_EVENT:
426 		rc = -1;
427 		break;
428 	/* unknown handle type */
429 	default:
430 		RTE_LOG(ERR, EAL,
431 			"Unknown handle type of fd %d\n",
432 					intr_handle->fd);
433 		rc = -1;
434 		break;
435 	}
436 out:
437 	rte_eal_trace_intr_disable(intr_handle, rc);
438 	return rc;
439 }
440 
441 int
442 rte_intr_ack(const struct rte_intr_handle *intr_handle)
443 {
444 	if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
445 		return 0;
446 
447 	return -1;
448 }
449 
450 static void
451 eal_intr_process_interrupts(struct kevent *events, int nfds)
452 {
453 	struct rte_intr_callback active_cb;
454 	union rte_intr_read_buffer buf;
455 	struct rte_intr_callback *cb, *next;
456 	struct rte_intr_source *src;
457 	bool call = false;
458 	int n, bytes_read;
459 	struct kevent ke;
460 
461 	for (n = 0; n < nfds; n++) {
462 		int event_fd = events[n].ident;
463 
464 		rte_spinlock_lock(&intr_lock);
465 		TAILQ_FOREACH(src, &intr_sources, next)
466 			if (src->intr_handle.fd == event_fd)
467 				break;
468 		if (src == NULL) {
469 			rte_spinlock_unlock(&intr_lock);
470 			continue;
471 		}
472 
473 		/* mark this interrupt source as active and release the lock. */
474 		src->active = 1;
475 		rte_spinlock_unlock(&intr_lock);
476 
477 		/* set the length to be read dor different handle type */
478 		switch (src->intr_handle.type) {
479 		case RTE_INTR_HANDLE_ALARM:
480 			bytes_read = 0;
481 			call = true;
482 			break;
483 		case RTE_INTR_HANDLE_VDEV:
484 		case RTE_INTR_HANDLE_EXT:
485 			bytes_read = 0;
486 			call = true;
487 			break;
488 		case RTE_INTR_HANDLE_DEV_EVENT:
489 			bytes_read = 0;
490 			call = true;
491 			break;
492 		default:
493 			bytes_read = 1;
494 			break;
495 		}
496 
497 		if (bytes_read > 0) {
498 			/**
499 			 * read out to clear the ready-to-be-read flag
500 			 * for epoll_wait.
501 			 */
502 			bytes_read = read(event_fd, &buf, bytes_read);
503 			if (bytes_read < 0) {
504 				if (errno == EINTR || errno == EWOULDBLOCK)
505 					continue;
506 
507 				RTE_LOG(ERR, EAL, "Error reading from file "
508 					"descriptor %d: %s\n",
509 					event_fd,
510 					strerror(errno));
511 			} else if (bytes_read == 0)
512 				RTE_LOG(ERR, EAL, "Read nothing from file "
513 					"descriptor %d\n", event_fd);
514 			else
515 				call = true;
516 		}
517 
518 		/* grab a lock, again to call callbacks and update status. */
519 		rte_spinlock_lock(&intr_lock);
520 
521 		if (call) {
522 			/* Finally, call all callbacks. */
523 			TAILQ_FOREACH(cb, &src->callbacks, next) {
524 
525 				/* make a copy and unlock. */
526 				active_cb = *cb;
527 				rte_spinlock_unlock(&intr_lock);
528 
529 				/* call the actual callback */
530 				active_cb.cb_fn(active_cb.cb_arg);
531 
532 				/*get the lock back. */
533 				rte_spinlock_lock(&intr_lock);
534 			}
535 		}
536 
537 		/* we done with that interrupt source, release it. */
538 		src->active = 0;
539 
540 		/* check if any callback are supposed to be removed */
541 		for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
542 			next = TAILQ_NEXT(cb, next);
543 			if (cb->pending_delete) {
544 				/* remove it from the kqueue */
545 				memset(&ke, 0, sizeof(ke));
546 				/* mark for deletion from the queue */
547 				ke.flags = EV_DELETE;
548 
549 				if (intr_source_to_kevent(&src->intr_handle, &ke) < 0) {
550 					RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
551 					rte_spinlock_unlock(&intr_lock);
552 					return;
553 				}
554 
555 				/**
556 				 * remove intr file descriptor from wait list.
557 				 */
558 				if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
559 					RTE_LOG(ERR, EAL, "Error removing fd %d kevent, "
560 						"%s\n", src->intr_handle.fd,
561 						strerror(errno));
562 					/* removing non-existent even is an expected
563 					 * condition in some circumstances
564 					 * (e.g. oneshot events).
565 					 */
566 				}
567 
568 				TAILQ_REMOVE(&src->callbacks, cb, next);
569 				if (cb->ucb_fn)
570 					cb->ucb_fn(&src->intr_handle, cb->cb_arg);
571 				free(cb);
572 			}
573 		}
574 
575 		/* all callbacks for that source are removed. */
576 		if (TAILQ_EMPTY(&src->callbacks)) {
577 			TAILQ_REMOVE(&intr_sources, src, next);
578 			free(src);
579 		}
580 
581 		rte_spinlock_unlock(&intr_lock);
582 	}
583 }
584 
585 static void *
586 eal_intr_thread_main(void *arg __rte_unused)
587 {
588 	struct kevent events[MAX_INTR_EVENTS];
589 	int nfds;
590 
591 	/* host thread, never break out */
592 	for (;;) {
593 		/* do not change anything, just wait */
594 		nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
595 
596 		/* kevent fail */
597 		if (nfds < 0) {
598 			if (errno == EINTR)
599 				continue;
600 			RTE_LOG(ERR, EAL,
601 				"kevent returns with fail\n");
602 			break;
603 		}
604 		/* kevent timeout, will never happen here */
605 		else if (nfds == 0)
606 			continue;
607 
608 		/* kevent has at least one fd ready to read */
609 		eal_intr_process_interrupts(events, nfds);
610 	}
611 	close(kq);
612 	kq = -1;
613 	return NULL;
614 }
615 
616 int
617 rte_eal_intr_init(void)
618 {
619 	int ret = 0;
620 
621 	/* init the global interrupt source head */
622 	TAILQ_INIT(&intr_sources);
623 
624 	kq = kqueue();
625 	if (kq < 0) {
626 		RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
627 		return -1;
628 	}
629 
630 	/* create the host thread to wait/handle the interrupt */
631 	ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
632 			eal_intr_thread_main, NULL);
633 	if (ret != 0) {
634 		rte_errno = -ret;
635 		RTE_LOG(ERR, EAL,
636 			"Failed to create thread for interrupt handling\n");
637 	}
638 
639 	return ret;
640 }
641 
642 int
643 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
644 		int epfd, int op, unsigned int vec, void *data)
645 {
646 	RTE_SET_USED(intr_handle);
647 	RTE_SET_USED(epfd);
648 	RTE_SET_USED(op);
649 	RTE_SET_USED(vec);
650 	RTE_SET_USED(data);
651 
652 	return -ENOTSUP;
653 }
654 
655 int
656 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
657 {
658 	RTE_SET_USED(intr_handle);
659 	RTE_SET_USED(nb_efd);
660 
661 	return 0;
662 }
663 
664 void
665 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
666 {
667 	RTE_SET_USED(intr_handle);
668 }
669 
670 int
671 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
672 {
673 	RTE_SET_USED(intr_handle);
674 	return 0;
675 }
676 
677 int
678 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
679 {
680 	RTE_SET_USED(intr_handle);
681 	return 1;
682 }
683 
684 int
685 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
686 {
687 	RTE_SET_USED(intr_handle);
688 	return 0;
689 }
690 
691 int
692 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
693 		int maxevents, int timeout)
694 {
695 	RTE_SET_USED(epfd);
696 	RTE_SET_USED(events);
697 	RTE_SET_USED(maxevents);
698 	RTE_SET_USED(timeout);
699 
700 	return -ENOTSUP;
701 }
702 
703 int
704 rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
705 			     int maxevents, int timeout)
706 {
707 	RTE_SET_USED(epfd);
708 	RTE_SET_USED(events);
709 	RTE_SET_USED(maxevents);
710 	RTE_SET_USED(timeout);
711 
712 	return -ENOTSUP;
713 }
714 
715 int
716 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
717 {
718 	RTE_SET_USED(epfd);
719 	RTE_SET_USED(op);
720 	RTE_SET_USED(fd);
721 	RTE_SET_USED(event);
722 
723 	return -ENOTSUP;
724 }
725 
726 int
727 rte_intr_tls_epfd(void)
728 {
729 	return -ENOTSUP;
730 }
731 
732 void
733 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
734 {
735 	RTE_SET_USED(intr_handle);
736 }
737 
738 int rte_thread_is_intr(void)
739 {
740 	return pthread_equal(intr_thread, pthread_self());
741 }
742