1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
3 */
4
5 #include <rte_interrupts.h>
6
7 #include "eal_private.h"
8 #include "eal_windows.h"
9
10 #define IOCP_KEY_SHUTDOWN UINT32_MAX
11
12 static rte_thread_t intr_thread;
13
14 static HANDLE intr_iocp;
15 static HANDLE intr_thread_handle;
16
17 static void
eal_intr_process(const OVERLAPPED_ENTRY * event)18 eal_intr_process(const OVERLAPPED_ENTRY *event)
19 {
20 RTE_SET_USED(event);
21 }
22
23 static int
eal_intr_thread_handle_init(void)24 eal_intr_thread_handle_init(void)
25 {
26 DWORD thread_id = GetCurrentThreadId();
27
28 intr_thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE, thread_id);
29 if (intr_thread_handle == NULL) {
30 RTE_LOG_WIN32_ERR("OpenThread(%lu)", thread_id);
31 return -1;
32 }
33 return 0;
34 }
35
36 static uint32_t
eal_intr_thread_main(LPVOID arg __rte_unused)37 eal_intr_thread_main(LPVOID arg __rte_unused)
38 {
39 bool finished = false;
40
41 if (eal_intr_thread_handle_init() < 0) {
42 EAL_LOG(ERR, "Cannot open interrupt thread handle");
43 goto cleanup;
44 }
45
46 while (!finished) {
47 OVERLAPPED_ENTRY events[16];
48 ULONG event_count, i;
49 BOOL result;
50
51 result = GetQueuedCompletionStatusEx(
52 intr_iocp, events, RTE_DIM(events), &event_count,
53 INFINITE, /* no timeout */
54 TRUE); /* alertable wait for alarm APCs */
55
56 if (!result) {
57 DWORD error = GetLastError();
58 if (error != WAIT_IO_COMPLETION) {
59 RTE_LOG_WIN32_ERR("GetQueuedCompletionStatusEx()");
60 EAL_LOG(ERR, "Failed waiting for interrupts");
61 break;
62 }
63
64 /* No I/O events, all work is done in completed APCs. */
65 continue;
66 }
67
68 for (i = 0; i < event_count; i++) {
69 if (events[i].lpCompletionKey == IOCP_KEY_SHUTDOWN) {
70 finished = true;
71 break;
72 }
73 eal_intr_process(&events[i]);
74 }
75 }
76
77 CloseHandle(intr_thread_handle);
78 intr_thread_handle = NULL;
79
80 cleanup:
81 intr_thread.opaque_id = 0;
82
83 CloseHandle(intr_iocp);
84 intr_iocp = NULL;
85
86 return 0;
87 }
88
89 int
rte_eal_intr_init(void)90 rte_eal_intr_init(void)
91 {
92 int ret = 0;
93
94 intr_iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
95 if (intr_iocp == NULL) {
96 RTE_LOG_WIN32_ERR("CreateIoCompletionPort()");
97 EAL_LOG(ERR, "Cannot create interrupt IOCP");
98 return -1;
99 }
100
101 ret = rte_thread_create_internal_control(&intr_thread, "intr",
102 eal_intr_thread_main, NULL);
103 if (ret != 0) {
104 rte_errno = -ret;
105 EAL_LOG(ERR, "Cannot create interrupt thread");
106 }
107
108 return ret;
109 }
110
111 int
rte_thread_is_intr(void)112 rte_thread_is_intr(void)
113 {
114 return rte_thread_equal(intr_thread, rte_thread_self());
115 }
116
117 int
rte_intr_rx_ctl(__rte_unused struct rte_intr_handle * intr_handle,__rte_unused int epfd,__rte_unused int op,__rte_unused unsigned int vec,__rte_unused void * data)118 rte_intr_rx_ctl(__rte_unused struct rte_intr_handle *intr_handle,
119 __rte_unused int epfd, __rte_unused int op,
120 __rte_unused unsigned int vec, __rte_unused void *data)
121 {
122 return -ENOTSUP;
123 }
124
125 int
eal_intr_thread_schedule(void (* func)(void * arg),void * arg)126 eal_intr_thread_schedule(void (*func)(void *arg), void *arg)
127 {
128 if (!QueueUserAPC((PAPCFUNC)(ULONG_PTR)func,
129 intr_thread_handle, (ULONG_PTR)arg)) {
130 RTE_LOG_WIN32_ERR("QueueUserAPC()");
131 return -EINVAL;
132 }
133
134 return 0;
135 }
136
137 void
eal_intr_thread_cancel(void)138 eal_intr_thread_cancel(void)
139 {
140 if (!PostQueuedCompletionStatus(
141 intr_iocp, 0, IOCP_KEY_SHUTDOWN, NULL)) {
142 RTE_LOG_WIN32_ERR("PostQueuedCompletionStatus()");
143 EAL_LOG(ERR, "Cannot cancel interrupt thread");
144 return;
145 }
146
147 WaitForSingleObject(intr_thread_handle, INFINITE);
148 }
149
150 int
rte_intr_callback_register(__rte_unused const struct rte_intr_handle * intr_handle,__rte_unused rte_intr_callback_fn cb,__rte_unused void * cb_arg)151 rte_intr_callback_register(
152 __rte_unused const struct rte_intr_handle *intr_handle,
153 __rte_unused rte_intr_callback_fn cb, __rte_unused void *cb_arg)
154 {
155 return -ENOTSUP;
156 }
157
158 int
rte_intr_callback_unregister_pending(__rte_unused const struct rte_intr_handle * intr_handle,__rte_unused rte_intr_callback_fn cb_fn,__rte_unused void * cb_arg,__rte_unused rte_intr_unregister_callback_fn ucb_fn)159 rte_intr_callback_unregister_pending(
160 __rte_unused const struct rte_intr_handle *intr_handle,
161 __rte_unused rte_intr_callback_fn cb_fn, __rte_unused void *cb_arg,
162 __rte_unused rte_intr_unregister_callback_fn ucb_fn)
163 {
164 return -ENOTSUP;
165 }
166
167 int
rte_intr_callback_unregister(__rte_unused const struct rte_intr_handle * intr_handle,__rte_unused rte_intr_callback_fn cb_fn,__rte_unused void * cb_arg)168 rte_intr_callback_unregister(
169 __rte_unused const struct rte_intr_handle *intr_handle,
170 __rte_unused rte_intr_callback_fn cb_fn, __rte_unused void *cb_arg)
171 {
172 return 0;
173 }
174
175 int
rte_intr_callback_unregister_sync(__rte_unused const struct rte_intr_handle * intr_handle,__rte_unused rte_intr_callback_fn cb_fn,__rte_unused void * cb_arg)176 rte_intr_callback_unregister_sync(
177 __rte_unused const struct rte_intr_handle *intr_handle,
178 __rte_unused rte_intr_callback_fn cb_fn, __rte_unused void *cb_arg)
179 {
180 return 0;
181 }
182
183 int
rte_intr_enable(__rte_unused const struct rte_intr_handle * intr_handle)184 rte_intr_enable(__rte_unused const struct rte_intr_handle *intr_handle)
185 {
186 return -ENOTSUP;
187 }
188
189 int
rte_intr_ack(__rte_unused const struct rte_intr_handle * intr_handle)190 rte_intr_ack(__rte_unused const struct rte_intr_handle *intr_handle)
191 {
192 return -ENOTSUP;
193 }
194
195 int
rte_intr_disable(__rte_unused const struct rte_intr_handle * intr_handle)196 rte_intr_disable(__rte_unused const struct rte_intr_handle *intr_handle)
197 {
198 return -ENOTSUP;
199 }
200
201 int
rte_intr_efd_enable(struct rte_intr_handle * intr_handle,uint32_t nb_efd)202 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
203 {
204 RTE_SET_USED(intr_handle);
205 RTE_SET_USED(nb_efd);
206
207 return 0;
208 }
209
210 void
rte_intr_efd_disable(struct rte_intr_handle * intr_handle)211 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
212 {
213 RTE_SET_USED(intr_handle);
214 }
215
216 int
rte_intr_dp_is_en(struct rte_intr_handle * intr_handle)217 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
218 {
219 RTE_SET_USED(intr_handle);
220
221 return 0;
222 }
223
224 int
rte_intr_allow_others(struct rte_intr_handle * intr_handle)225 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
226 {
227 RTE_SET_USED(intr_handle);
228
229 return 1;
230 }
231
232 int
rte_intr_cap_multiple(struct rte_intr_handle * intr_handle)233 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
234 {
235 RTE_SET_USED(intr_handle);
236
237 return 0;
238 }
239
240 int
rte_epoll_wait(int epfd,struct rte_epoll_event * events,int maxevents,int timeout)241 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
242 int maxevents, int timeout)
243 {
244 RTE_SET_USED(epfd);
245 RTE_SET_USED(events);
246 RTE_SET_USED(maxevents);
247 RTE_SET_USED(timeout);
248
249 return -ENOTSUP;
250 }
251
252 int
rte_epoll_wait_interruptible(int epfd,struct rte_epoll_event * events,int maxevents,int timeout)253 rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
254 int maxevents, int timeout)
255 {
256 RTE_SET_USED(epfd);
257 RTE_SET_USED(events);
258 RTE_SET_USED(maxevents);
259 RTE_SET_USED(timeout);
260
261 return -ENOTSUP;
262 }
263
264 int
rte_epoll_ctl(int epfd,int op,int fd,struct rte_epoll_event * event)265 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
266 {
267 RTE_SET_USED(epfd);
268 RTE_SET_USED(op);
269 RTE_SET_USED(fd);
270 RTE_SET_USED(event);
271
272 return -ENOTSUP;
273 }
274
275 int
rte_intr_tls_epfd(void)276 rte_intr_tls_epfd(void)
277 {
278 return -ENOTSUP;
279 }
280
281 void
rte_intr_free_epoll_fd(struct rte_intr_handle * intr_handle)282 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
283 {
284 RTE_SET_USED(intr_handle);
285 }
286