1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
3 */
4
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/event.h>
8 #include <sys/queue.h>
9 #include <unistd.h>
10
11 #include <eal_trace_internal.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
14 #include <rte_spinlock.h>
15 #include <rte_common.h>
16 #include <rte_interrupts.h>
17
18 #include "eal_private.h"
19 #include "eal_alarm_private.h"
20
21 #define MAX_INTR_EVENTS 16
22
23 /**
24 * union buffer for reading on different devices
25 */
26 union rte_intr_read_buffer {
27 char charbuf[16]; /* for others */
28 };
29
30 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
31 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
32
33 struct rte_intr_callback {
34 TAILQ_ENTRY(rte_intr_callback) next;
35 rte_intr_callback_fn cb_fn; /**< callback address */
36 void *cb_arg; /**< parameter for callback */
37 uint8_t pending_delete; /**< delete after callback is called */
38 rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
39 };
40
41 struct rte_intr_source {
42 TAILQ_ENTRY(rte_intr_source) next;
43 struct rte_intr_handle *intr_handle; /**< interrupt handle */
44 struct rte_intr_cb_list callbacks; /**< user callbacks */
45 uint32_t active;
46 };
47
48 /* global spinlock for interrupt data operation */
49 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
50
51 /* interrupt sources list */
52 static struct rte_intr_source_list intr_sources;
53
54 /* interrupt handling thread */
55 static rte_thread_t intr_thread;
56
57 static volatile int kq = -1;
58
59 static int
intr_source_to_kevent(const struct rte_intr_handle * ih,struct kevent * ke)60 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
61 {
62 /* alarm callbacks are special case */
63 if (rte_intr_type_get(ih) == RTE_INTR_HANDLE_ALARM) {
64 uint64_t timeout_ns;
65
66 /* get soonest alarm timeout */
67 if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
68 return -1;
69
70 ke->filter = EVFILT_TIMER;
71 /* timers are one shot */
72 ke->flags |= EV_ONESHOT;
73 ke->fflags = NOTE_NSECONDS;
74 ke->data = timeout_ns;
75 } else {
76 ke->filter = EVFILT_READ;
77 }
78 ke->ident = rte_intr_fd_get(ih);
79
80 return 0;
81 }
82
83 int
rte_intr_callback_register(const struct rte_intr_handle * intr_handle,rte_intr_callback_fn cb,void * cb_arg)84 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
85 rte_intr_callback_fn cb, void *cb_arg)
86 {
87 struct rte_intr_callback *callback;
88 struct rte_intr_source *src;
89 int ret = 0, add_event = 0;
90
91 /* first do parameter checking */
92 if (rte_intr_fd_get(intr_handle) < 0 || cb == NULL) {
93 EAL_LOG(ERR,
94 "Registering with invalid input parameter");
95 return -EINVAL;
96 }
97 if (kq < 0) {
98 EAL_LOG(ERR, "Kqueue is not active: %d", kq);
99 return -ENODEV;
100 }
101
102 rte_spinlock_lock(&intr_lock);
103
104 /* find the source for this intr_handle */
105 TAILQ_FOREACH(src, &intr_sources, next) {
106 if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
107 break;
108 }
109
110 /* if this is an alarm interrupt and it already has a callback,
111 * then we don't want to create a new callback because the only
112 * thing on the list should be eal_alarm_callback() and we may
113 * be called just to reset the timer.
114 */
115 if (src != NULL &&
116 rte_intr_type_get(src->intr_handle) == RTE_INTR_HANDLE_ALARM &&
117 !TAILQ_EMPTY(&src->callbacks)) {
118 callback = NULL;
119 } else {
120 /* allocate a new interrupt callback entity */
121 callback = calloc(1, sizeof(*callback));
122 if (callback == NULL) {
123 EAL_LOG(ERR, "Can not allocate memory");
124 ret = -ENOMEM;
125 goto fail;
126 }
127 callback->cb_fn = cb;
128 callback->cb_arg = cb_arg;
129 callback->pending_delete = 0;
130 callback->ucb_fn = NULL;
131
132 if (src == NULL) {
133 src = calloc(1, sizeof(*src));
134 if (src == NULL) {
135 EAL_LOG(ERR, "Can not allocate memory");
136 ret = -ENOMEM;
137 goto fail;
138 } else {
139 src->intr_handle = rte_intr_instance_dup(intr_handle);
140 if (src->intr_handle == NULL) {
141 EAL_LOG(ERR, "Can not create intr instance");
142 ret = -ENOMEM;
143 free(src);
144 src = NULL;
145 goto fail;
146 }
147 TAILQ_INIT(&src->callbacks);
148 TAILQ_INSERT_TAIL(&intr_sources, src, next);
149 }
150 }
151
152 /* we had no interrupts for this */
153 if (TAILQ_EMPTY(&src->callbacks))
154 add_event = 1;
155
156 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
157 }
158
159 /* add events to the queue. timer events are special as we need to
160 * re-set the timer.
161 */
162 if (add_event ||
163 rte_intr_type_get(src->intr_handle) == RTE_INTR_HANDLE_ALARM) {
164 struct kevent ke;
165
166 memset(&ke, 0, sizeof(ke));
167 ke.flags = EV_ADD; /* mark for addition to the queue */
168
169 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
170 EAL_LOG(ERR, "Cannot convert interrupt handle to kevent");
171 ret = -ENODEV;
172 goto fail;
173 }
174
175 /**
176 * add the intr file descriptor into wait list.
177 */
178 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
179 /* currently, nic_uio does not support interrupts, so
180 * this error will always be triggered and output to the
181 * user. so, don't output it unless debug log level set.
182 */
183 if (errno == ENODEV)
184 EAL_LOG(DEBUG, "Interrupt handle %d not supported",
185 rte_intr_fd_get(src->intr_handle));
186 else
187 EAL_LOG(ERR, "Error adding fd %d kevent, %s",
188 rte_intr_fd_get(src->intr_handle),
189 strerror(errno));
190 ret = -errno;
191 goto fail;
192 }
193 }
194 rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
195 rte_spinlock_unlock(&intr_lock);
196
197 return 0;
198 fail:
199 /* clean up */
200 if (src != NULL) {
201 if (callback != NULL)
202 TAILQ_REMOVE(&(src->callbacks), callback, next);
203 if (TAILQ_EMPTY(&(src->callbacks))) {
204 TAILQ_REMOVE(&intr_sources, src, next);
205 free(src);
206 }
207 }
208 free(callback);
209 rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
210 rte_spinlock_unlock(&intr_lock);
211 return ret;
212 }
213
214 int
rte_intr_callback_unregister_pending(const struct rte_intr_handle * intr_handle,rte_intr_callback_fn cb_fn,void * cb_arg,rte_intr_unregister_callback_fn ucb_fn)215 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
216 rte_intr_callback_fn cb_fn, void *cb_arg,
217 rte_intr_unregister_callback_fn ucb_fn)
218 {
219 int ret;
220 struct rte_intr_source *src;
221 struct rte_intr_callback *cb, *next;
222
223 /* do parameter checking first */
224 if (rte_intr_fd_get(intr_handle) < 0) {
225 EAL_LOG(ERR,
226 "Unregistering with invalid input parameter");
227 return -EINVAL;
228 }
229
230 if (kq < 0) {
231 EAL_LOG(ERR, "Kqueue is not active");
232 return -ENODEV;
233 }
234
235 rte_spinlock_lock(&intr_lock);
236
237 /* check if the interrupt source for the fd is existent */
238 TAILQ_FOREACH(src, &intr_sources, next)
239 if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
240 break;
241
242 /* No interrupt source registered for the fd */
243 if (src == NULL) {
244 ret = -ENOENT;
245
246 /* only usable if the source is active */
247 } else if (src->active == 0) {
248 ret = -EAGAIN;
249
250 } else {
251 ret = 0;
252
253 /* walk through the callbacks and mark all that match. */
254 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
255 next = TAILQ_NEXT(cb, next);
256 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
257 cb->cb_arg == cb_arg)) {
258 cb->pending_delete = 1;
259 cb->ucb_fn = ucb_fn;
260 ret++;
261 }
262 }
263 }
264
265 rte_spinlock_unlock(&intr_lock);
266
267 return ret;
268 }
269
270 int
rte_intr_callback_unregister(const struct rte_intr_handle * intr_handle,rte_intr_callback_fn cb_fn,void * cb_arg)271 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
272 rte_intr_callback_fn cb_fn, void *cb_arg)
273 {
274 int ret;
275 struct rte_intr_source *src;
276 struct rte_intr_callback *cb, *next;
277
278 /* do parameter checking first */
279 if (rte_intr_fd_get(intr_handle) < 0) {
280 EAL_LOG(ERR,
281 "Unregistering with invalid input parameter");
282 return -EINVAL;
283 }
284 if (kq < 0) {
285 EAL_LOG(ERR, "Kqueue is not active");
286 return -ENODEV;
287 }
288
289 rte_spinlock_lock(&intr_lock);
290
291 /* check if the interrupt source for the fd is existent */
292 TAILQ_FOREACH(src, &intr_sources, next)
293 if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
294 break;
295
296 /* No interrupt source registered for the fd */
297 if (src == NULL) {
298 ret = -ENOENT;
299
300 /* interrupt source has some active callbacks right now. */
301 } else if (src->active != 0) {
302 ret = -EAGAIN;
303
304 /* ok to remove. */
305 } else {
306 struct kevent ke;
307
308 ret = 0;
309
310 /* remove it from the kqueue */
311 memset(&ke, 0, sizeof(ke));
312 ke.flags = EV_DELETE; /* mark for deletion from the queue */
313
314 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
315 EAL_LOG(ERR, "Cannot convert to kevent");
316 ret = -ENODEV;
317 goto out;
318 }
319
320 /**
321 * remove intr file descriptor from wait list.
322 */
323 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
324 EAL_LOG(ERR, "Error removing fd %d kevent, %s",
325 rte_intr_fd_get(src->intr_handle),
326 strerror(errno));
327 /* removing non-existent even is an expected condition
328 * in some circumstances (e.g. oneshot events).
329 */
330 }
331
332 /*walk through the callbacks and remove all that match. */
333 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
334 next = TAILQ_NEXT(cb, next);
335 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
336 cb->cb_arg == cb_arg)) {
337 TAILQ_REMOVE(&src->callbacks, cb, next);
338 free(cb);
339 ret++;
340 }
341 }
342
343 /* all callbacks for that source are removed. */
344 if (TAILQ_EMPTY(&src->callbacks)) {
345 TAILQ_REMOVE(&intr_sources, src, next);
346 free(src);
347 }
348 }
349 out:
350 rte_eal_trace_intr_callback_unregister(intr_handle, cb_fn, cb_arg,
351 ret);
352 rte_spinlock_unlock(&intr_lock);
353
354 return ret;
355 }
356
357 int
rte_intr_callback_unregister_sync(const struct rte_intr_handle * intr_handle,rte_intr_callback_fn cb_fn,void * cb_arg)358 rte_intr_callback_unregister_sync(const struct rte_intr_handle *intr_handle,
359 rte_intr_callback_fn cb_fn, void *cb_arg)
360 {
361 int ret = 0;
362
363 while ((ret = rte_intr_callback_unregister(intr_handle, cb_fn, cb_arg)) == -EAGAIN)
364 rte_pause();
365
366 return ret;
367 }
368
369 int
rte_intr_enable(const struct rte_intr_handle * intr_handle)370 rte_intr_enable(const struct rte_intr_handle *intr_handle)
371 {
372 int rc = 0;
373
374 if (intr_handle == NULL)
375 return -1;
376
377 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
378 rc = 0;
379 goto out;
380 }
381
382 if (rte_intr_fd_get(intr_handle) < 0 ||
383 rte_intr_dev_fd_get(intr_handle) < 0) {
384 rc = -1;
385 goto out;
386 }
387
388 switch (rte_intr_type_get(intr_handle)) {
389 /* not used at this moment */
390 case RTE_INTR_HANDLE_ALARM:
391 rc = -1;
392 break;
393 /* not used at this moment */
394 case RTE_INTR_HANDLE_DEV_EVENT:
395 rc = -1;
396 break;
397 /* unknown handle type */
398 default:
399 EAL_LOG(ERR, "Unknown handle type of fd %d",
400 rte_intr_fd_get(intr_handle));
401 rc = -1;
402 break;
403 }
404
405 out:
406 rte_eal_trace_intr_enable(intr_handle, rc);
407 return rc;
408 }
409
410 int
rte_intr_disable(const struct rte_intr_handle * intr_handle)411 rte_intr_disable(const struct rte_intr_handle *intr_handle)
412 {
413 int rc = 0;
414
415 if (intr_handle == NULL)
416 return -1;
417
418 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
419 rc = 0;
420 goto out;
421 }
422
423 if (rte_intr_fd_get(intr_handle) < 0 ||
424 rte_intr_dev_fd_get(intr_handle) < 0) {
425 rc = -1;
426 goto out;
427 }
428
429 switch (rte_intr_type_get(intr_handle)) {
430 /* not used at this moment */
431 case RTE_INTR_HANDLE_ALARM:
432 rc = -1;
433 break;
434 /* not used at this moment */
435 case RTE_INTR_HANDLE_DEV_EVENT:
436 rc = -1;
437 break;
438 /* unknown handle type */
439 default:
440 EAL_LOG(ERR, "Unknown handle type of fd %d",
441 rte_intr_fd_get(intr_handle));
442 rc = -1;
443 break;
444 }
445 out:
446 rte_eal_trace_intr_disable(intr_handle, rc);
447 return rc;
448 }
449
450 int
rte_intr_ack(const struct rte_intr_handle * intr_handle)451 rte_intr_ack(const struct rte_intr_handle *intr_handle)
452 {
453 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV)
454 return 0;
455
456 return -1;
457 }
458
459 static void
eal_intr_process_interrupts(struct kevent * events,int nfds)460 eal_intr_process_interrupts(struct kevent *events, int nfds)
461 {
462 struct rte_intr_callback active_cb;
463 union rte_intr_read_buffer buf;
464 struct rte_intr_callback *cb, *next;
465 struct rte_intr_source *src;
466 bool call = false;
467 int n, bytes_read;
468 struct kevent ke;
469
470 for (n = 0; n < nfds; n++) {
471 int event_fd = events[n].ident;
472
473 rte_spinlock_lock(&intr_lock);
474 TAILQ_FOREACH(src, &intr_sources, next)
475 if (rte_intr_fd_get(src->intr_handle) == event_fd)
476 break;
477 if (src == NULL) {
478 rte_spinlock_unlock(&intr_lock);
479 continue;
480 }
481
482 /* mark this interrupt source as active and release the lock. */
483 src->active = 1;
484 rte_spinlock_unlock(&intr_lock);
485
486 /* set the length to be read dor different handle type */
487 switch (rte_intr_type_get(src->intr_handle)) {
488 case RTE_INTR_HANDLE_ALARM:
489 bytes_read = 0;
490 call = true;
491 break;
492 case RTE_INTR_HANDLE_VDEV:
493 case RTE_INTR_HANDLE_EXT:
494 bytes_read = 0;
495 call = true;
496 break;
497 case RTE_INTR_HANDLE_DEV_EVENT:
498 bytes_read = 0;
499 call = true;
500 break;
501 default:
502 bytes_read = 1;
503 break;
504 }
505
506 if (bytes_read > 0) {
507 /**
508 * read out to clear the ready-to-be-read flag
509 * for epoll_wait.
510 */
511 bytes_read = read(event_fd, &buf, bytes_read);
512 if (bytes_read < 0) {
513 if (errno == EINTR || errno == EWOULDBLOCK)
514 continue;
515
516 EAL_LOG(ERR, "Error reading from file "
517 "descriptor %d: %s",
518 event_fd,
519 strerror(errno));
520 } else if (bytes_read == 0)
521 EAL_LOG(ERR, "Read nothing from file "
522 "descriptor %d", event_fd);
523 else
524 call = true;
525 }
526
527 /* grab a lock, again to call callbacks and update status. */
528 rte_spinlock_lock(&intr_lock);
529
530 if (call) {
531 /* Finally, call all callbacks. */
532 TAILQ_FOREACH(cb, &src->callbacks, next) {
533
534 /* make a copy and unlock. */
535 active_cb = *cb;
536 rte_spinlock_unlock(&intr_lock);
537
538 /* call the actual callback */
539 active_cb.cb_fn(active_cb.cb_arg);
540
541 /*get the lock back. */
542 rte_spinlock_lock(&intr_lock);
543 }
544 }
545
546 /* we done with that interrupt source, release it. */
547 src->active = 0;
548
549 /* check if any callback are supposed to be removed */
550 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
551 next = TAILQ_NEXT(cb, next);
552 if (cb->pending_delete) {
553 /* remove it from the kqueue */
554 memset(&ke, 0, sizeof(ke));
555 /* mark for deletion from the queue */
556 ke.flags = EV_DELETE;
557
558 if (intr_source_to_kevent(src->intr_handle, &ke) < 0) {
559 EAL_LOG(ERR, "Cannot convert to kevent");
560 rte_spinlock_unlock(&intr_lock);
561 return;
562 }
563
564 /**
565 * remove intr file descriptor from wait list.
566 */
567 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
568 EAL_LOG(ERR, "Error removing fd %d kevent, %s",
569 rte_intr_fd_get(src->intr_handle),
570 strerror(errno));
571 /* removing non-existent even is an expected
572 * condition in some circumstances
573 * (e.g. oneshot events).
574 */
575 }
576
577 TAILQ_REMOVE(&src->callbacks, cb, next);
578 if (cb->ucb_fn)
579 cb->ucb_fn(src->intr_handle, cb->cb_arg);
580 free(cb);
581 }
582 }
583
584 /* all callbacks for that source are removed. */
585 if (TAILQ_EMPTY(&src->callbacks)) {
586 TAILQ_REMOVE(&intr_sources, src, next);
587 free(src);
588 }
589
590 rte_spinlock_unlock(&intr_lock);
591 }
592 }
593
594 static uint32_t
eal_intr_thread_main(void * arg __rte_unused)595 eal_intr_thread_main(void *arg __rte_unused)
596 {
597 struct kevent events[MAX_INTR_EVENTS];
598 int nfds;
599
600 /* host thread, never break out */
601 for (;;) {
602 /* do not change anything, just wait */
603 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
604
605 /* kevent fail */
606 if (nfds < 0) {
607 if (errno == EINTR)
608 continue;
609 EAL_LOG(ERR,
610 "kevent returns with fail");
611 break;
612 }
613 /* kevent timeout, will never happen here */
614 else if (nfds == 0)
615 continue;
616
617 /* kevent has at least one fd ready to read */
618 eal_intr_process_interrupts(events, nfds);
619 }
620 close(kq);
621 kq = -1;
622 return 0;
623 }
624
625 int
rte_eal_intr_init(void)626 rte_eal_intr_init(void)
627 {
628 int ret = 0;
629
630 /* init the global interrupt source head */
631 TAILQ_INIT(&intr_sources);
632
633 kq = kqueue();
634 if (kq < 0) {
635 EAL_LOG(ERR, "Cannot create kqueue instance");
636 return -1;
637 }
638
639 /* create the host thread to wait/handle the interrupt */
640 ret = rte_thread_create_internal_control(&intr_thread, "intr",
641 eal_intr_thread_main, NULL);
642 if (ret != 0) {
643 rte_errno = -ret;
644 EAL_LOG(ERR,
645 "Failed to create thread for interrupt handling");
646 }
647
648 return ret;
649 }
650
651 int
rte_intr_rx_ctl(struct rte_intr_handle * intr_handle,int epfd,int op,unsigned int vec,void * data)652 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
653 int epfd, int op, unsigned int vec, void *data)
654 {
655 RTE_SET_USED(intr_handle);
656 RTE_SET_USED(epfd);
657 RTE_SET_USED(op);
658 RTE_SET_USED(vec);
659 RTE_SET_USED(data);
660
661 return -ENOTSUP;
662 }
663
664 int
rte_intr_efd_enable(struct rte_intr_handle * intr_handle,uint32_t nb_efd)665 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
666 {
667 RTE_SET_USED(intr_handle);
668 RTE_SET_USED(nb_efd);
669
670 return 0;
671 }
672
673 void
rte_intr_efd_disable(struct rte_intr_handle * intr_handle)674 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
675 {
676 RTE_SET_USED(intr_handle);
677 }
678
679 int
rte_intr_dp_is_en(struct rte_intr_handle * intr_handle)680 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
681 {
682 RTE_SET_USED(intr_handle);
683 return 0;
684 }
685
686 int
rte_intr_allow_others(struct rte_intr_handle * intr_handle)687 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
688 {
689 RTE_SET_USED(intr_handle);
690 return 1;
691 }
692
693 int
rte_intr_cap_multiple(struct rte_intr_handle * intr_handle)694 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
695 {
696 RTE_SET_USED(intr_handle);
697 return 0;
698 }
699
700 int
rte_epoll_wait(int epfd,struct rte_epoll_event * events,int maxevents,int timeout)701 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
702 int maxevents, int timeout)
703 {
704 RTE_SET_USED(epfd);
705 RTE_SET_USED(events);
706 RTE_SET_USED(maxevents);
707 RTE_SET_USED(timeout);
708
709 return -ENOTSUP;
710 }
711
712 int
rte_epoll_wait_interruptible(int epfd,struct rte_epoll_event * events,int maxevents,int timeout)713 rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
714 int maxevents, int timeout)
715 {
716 RTE_SET_USED(epfd);
717 RTE_SET_USED(events);
718 RTE_SET_USED(maxevents);
719 RTE_SET_USED(timeout);
720
721 return -ENOTSUP;
722 }
723
724 int
rte_epoll_ctl(int epfd,int op,int fd,struct rte_epoll_event * event)725 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
726 {
727 RTE_SET_USED(epfd);
728 RTE_SET_USED(op);
729 RTE_SET_USED(fd);
730 RTE_SET_USED(event);
731
732 return -ENOTSUP;
733 }
734
735 int
rte_intr_tls_epfd(void)736 rte_intr_tls_epfd(void)
737 {
738 return -ENOTSUP;
739 }
740
741 void
rte_intr_free_epoll_fd(struct rte_intr_handle * intr_handle)742 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
743 {
744 RTE_SET_USED(intr_handle);
745 }
746
rte_thread_is_intr(void)747 int rte_thread_is_intr(void)
748 {
749 return rte_thread_equal(intr_thread, rte_thread_self());
750 }
751