xref: /dpdk/app/test/test_eventdev.c (revision 6305afee038f47b64236069eeb4355d28ef8fec9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include "test.h"
6 
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_mbuf.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 
13 #ifdef RTE_EXEC_ENV_WINDOWS
14 static int
15 test_eventdev_common(void)
16 {
17 	printf("eventdev_common not supported on Windows, skipping test\n");
18 	return TEST_SKIPPED;
19 }
20 
21 #else
22 
23 #include <rte_eventdev.h>
24 #include <rte_dev.h>
25 #include <rte_bus_vdev.h>
26 
27 #define TEST_DEV_ID   0
28 
29 static int
30 testsuite_setup(void)
31 {
32 	RTE_BUILD_BUG_ON(sizeof(struct rte_event) != 16);
33 	uint8_t count;
34 	count = rte_event_dev_count();
35 	if (!count) {
36 		int ret;
37 
38 		printf("Failed to find a valid event device,"
39 			" trying with event_skeleton device\n");
40 		ret = rte_vdev_init("event_skeleton", NULL);
41 		if (ret != 0) {
42 			printf("No event device, skipping\n");
43 			return TEST_SKIPPED;
44 		}
45 	}
46 	return TEST_SUCCESS;
47 }
48 
49 static void
50 testsuite_teardown(void)
51 {
52 }
53 
54 static int
55 test_eventdev_count(void)
56 {
57 	uint8_t count;
58 	count = rte_event_dev_count();
59 	TEST_ASSERT(count > 0, "Invalid eventdev count %" PRIu8, count);
60 	return TEST_SUCCESS;
61 }
62 
63 static int
64 test_eventdev_get_dev_id(void)
65 {
66 	int ret;
67 	ret = rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
68 	TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d", ret);
69 	return TEST_SUCCESS;
70 }
71 
72 static int
73 test_eventdev_socket_id(void)
74 {
75 	int socket_id;
76 	socket_id = rte_event_dev_socket_id(TEST_DEV_ID);
77 	TEST_ASSERT(socket_id != -EINVAL, "Failed to get socket_id %d",
78 				socket_id);
79 	socket_id = rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS);
80 	TEST_ASSERT(socket_id == -EINVAL, "Expected -EINVAL %d", socket_id);
81 
82 	return TEST_SUCCESS;
83 }
84 
85 static int
86 test_eventdev_info_get(void)
87 {
88 	int ret;
89 	struct rte_event_dev_info info;
90 	ret = rte_event_dev_info_get(TEST_DEV_ID, NULL);
91 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
92 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
93 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
94 	TEST_ASSERT(info.max_event_ports > 0,
95 			"Not enough event ports %d", info.max_event_ports);
96 	TEST_ASSERT(info.max_event_queues > 0,
97 			"Not enough event queues %d", info.max_event_queues);
98 	return TEST_SUCCESS;
99 }
100 
101 static inline void
102 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
103 			struct rte_event_dev_info *info)
104 {
105 	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
106 	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
107 	dev_conf->nb_event_ports = info->max_event_ports;
108 	dev_conf->nb_event_queues = info->max_event_queues;
109 	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
110 	dev_conf->nb_event_port_dequeue_depth =
111 			info->max_event_port_dequeue_depth;
112 	dev_conf->nb_event_port_enqueue_depth =
113 			info->max_event_port_enqueue_depth;
114 	dev_conf->nb_event_port_enqueue_depth =
115 			info->max_event_port_enqueue_depth;
116 	dev_conf->nb_events_limit =
117 			info->max_num_events;
118 }
119 
120 static int
121 test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
122 		struct rte_event_dev_info *info,
123 		void (*fn)(struct rte_event_dev_config *dev_conf,
124 			struct rte_event_dev_info *info))
125 {
126 	devconf_set_default_sane_values(dev_conf, info);
127 	fn(dev_conf, info);
128 	return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
129 }
130 
131 static void
132 max_dequeue_limit(struct rte_event_dev_config *dev_conf,
133 		  struct rte_event_dev_info *info)
134 {
135 	dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1;
136 }
137 
138 static void
139 max_events_limit(struct rte_event_dev_config *dev_conf,
140 		  struct rte_event_dev_info *info)
141 {
142 	dev_conf->nb_events_limit  = info->max_num_events + 1;
143 }
144 
145 static void
146 max_event_ports(struct rte_event_dev_config *dev_conf,
147 		  struct rte_event_dev_info *info)
148 {
149 	dev_conf->nb_event_ports = info->max_event_ports + 1;
150 }
151 
152 static void
153 max_event_queues(struct rte_event_dev_config *dev_conf,
154 		  struct rte_event_dev_info *info)
155 {
156 	dev_conf->nb_event_queues = info->max_event_queues + 1;
157 }
158 
159 static void
160 max_event_queue_flows(struct rte_event_dev_config *dev_conf,
161 		  struct rte_event_dev_info *info)
162 {
163 	dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1;
164 }
165 
166 static void
167 max_event_port_dequeue_depth(struct rte_event_dev_config *dev_conf,
168 		  struct rte_event_dev_info *info)
169 {
170 	dev_conf->nb_event_port_dequeue_depth =
171 		info->max_event_port_dequeue_depth + 1;
172 }
173 
174 static void
175 max_event_port_enqueue_depth(struct rte_event_dev_config *dev_conf,
176 		  struct rte_event_dev_info *info)
177 {
178 	dev_conf->nb_event_port_enqueue_depth =
179 		info->max_event_port_enqueue_depth + 1;
180 }
181 
182 
183 static int
184 test_eventdev_configure(void)
185 {
186 	int ret;
187 	struct rte_event_dev_config dev_conf;
188 	struct rte_event_dev_info info;
189 	ret = rte_event_dev_configure(TEST_DEV_ID, NULL);
190 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
191 
192 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
193 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
194 
195 	/* Check limits */
196 	TEST_ASSERT_EQUAL(-EINVAL,
197 		test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
198 		 "Config negative test failed");
199 	TEST_ASSERT_EQUAL(-EINVAL,
200 		test_ethdev_config_run(&dev_conf, &info, max_events_limit),
201 		 "Config negative test failed");
202 	TEST_ASSERT_EQUAL(-EINVAL,
203 		test_ethdev_config_run(&dev_conf, &info, max_event_ports),
204 		 "Config negative test failed");
205 	TEST_ASSERT_EQUAL(-EINVAL,
206 		test_ethdev_config_run(&dev_conf, &info, max_event_queues),
207 		 "Config negative test failed");
208 	TEST_ASSERT_EQUAL(-EINVAL,
209 		test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
210 		"Config negative test failed");
211 
212 	if (info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) {
213 		TEST_ASSERT_EQUAL(-EINVAL,
214 				test_ethdev_config_run(&dev_conf, &info,
215 					max_event_port_dequeue_depth),
216 				"Config negative test failed");
217 		TEST_ASSERT_EQUAL(-EINVAL,
218 				test_ethdev_config_run(&dev_conf, &info,
219 					max_event_port_enqueue_depth),
220 				"Config negative test failed");
221 	}
222 
223 	/* Positive case */
224 	devconf_set_default_sane_values(&dev_conf, &info);
225 	ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
226 	TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
227 
228 	/* re-configure */
229 	devconf_set_default_sane_values(&dev_conf, &info);
230 	dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1);
231 	dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1);
232 	ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
233 	TEST_ASSERT_SUCCESS(ret, "Failed to re configure eventdev");
234 
235 	/* re-configure back to max_event_queues and max_event_ports */
236 	devconf_set_default_sane_values(&dev_conf, &info);
237 	ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
238 	TEST_ASSERT_SUCCESS(ret, "Failed to re-configure eventdev");
239 
240 	return TEST_SUCCESS;
241 
242 }
243 
244 static int
245 eventdev_configure_setup(void)
246 {
247 	int ret;
248 	struct rte_event_dev_config dev_conf;
249 	struct rte_event_dev_info info;
250 
251 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
252 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
253 	devconf_set_default_sane_values(&dev_conf, &info);
254 	ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
255 	TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
256 
257 	return TEST_SUCCESS;
258 }
259 
260 static int
261 test_eventdev_queue_default_conf_get(void)
262 {
263 	int i, ret;
264 	struct rte_event_queue_conf qconf;
265 
266 	ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
267 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
268 
269 	uint32_t queue_count;
270 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
271 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
272 			    "Queue count get failed");
273 
274 	for (i = 0; i < (int)queue_count; i++) {
275 		ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
276 						 &qconf);
277 		TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
278 	}
279 
280 	return TEST_SUCCESS;
281 }
282 
283 static int
284 test_eventdev_queue_setup(void)
285 {
286 	int i, ret;
287 	struct rte_event_dev_info info;
288 	struct rte_event_queue_conf qconf;
289 
290 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
291 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
292 
293 	/* Negative cases */
294 	ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
295 	TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
296 	qconf.event_queue_cfg =	RTE_EVENT_QUEUE_CFG_ALL_TYPES;
297 	qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
298 	ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
299 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
300 
301 	qconf.nb_atomic_flows = info.max_event_queue_flows;
302 	qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
303 	qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
304 	ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
305 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
306 
307 	ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues,
308 					&qconf);
309 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
310 
311 	/* Positive case */
312 	ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
313 	TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
314 	ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
315 	TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
316 
317 	uint32_t queue_count;
318 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
319 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
320 			    "Queue count get failed");
321 
322 	for (i = 1; i < (int)queue_count; i++) {
323 		ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
324 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
325 	}
326 
327 	return TEST_SUCCESS;
328 }
329 
330 static int
331 test_eventdev_queue_count(void)
332 {
333 	int ret;
334 	struct rte_event_dev_info info;
335 
336 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
337 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
338 
339 	uint32_t queue_count;
340 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
341 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
342 			    "Queue count get failed");
343 	TEST_ASSERT_EQUAL(queue_count, info.max_event_queues,
344 			  "Wrong queue count");
345 
346 	return TEST_SUCCESS;
347 }
348 
349 static int
350 test_eventdev_queue_attr_priority(void)
351 {
352 	int i, ret;
353 	struct rte_event_dev_info info;
354 	struct rte_event_queue_conf qconf;
355 	uint8_t priority;
356 
357 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
358 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
359 
360 	uint32_t queue_count;
361 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
362 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
363 			    "Queue count get failed");
364 
365 	for (i = 0; i < (int)queue_count; i++) {
366 		ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
367 					&qconf);
368 		TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
369 		qconf.priority = i %  RTE_EVENT_DEV_PRIORITY_LOWEST;
370 		ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
371 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
372 	}
373 
374 	for (i = 0; i < (int)queue_count; i++) {
375 		uint32_t tmp;
376 		TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
377 				    RTE_EVENT_QUEUE_ATTR_PRIORITY, &tmp),
378 				    "Queue priority get failed");
379 		priority = tmp;
380 
381 		if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
382 			TEST_ASSERT_EQUAL(priority,
383 			 i %  RTE_EVENT_DEV_PRIORITY_LOWEST,
384 			 "Wrong priority value for queue%d", i);
385 		else
386 			TEST_ASSERT_EQUAL(priority,
387 			 RTE_EVENT_DEV_PRIORITY_NORMAL,
388 			 "Wrong priority value for queue%d", i);
389 	}
390 
391 	return TEST_SUCCESS;
392 }
393 
394 static int
395 test_eventdev_queue_attr_priority_runtime(void)
396 {
397 	uint32_t queue_count, queue_req, prio, deq_cnt;
398 	struct rte_event_queue_conf qconf;
399 	struct rte_event_port_conf pconf;
400 	struct rte_event_dev_info info;
401 	struct rte_event event = {
402 		.op = RTE_EVENT_OP_NEW,
403 		.event_type = RTE_EVENT_TYPE_CPU,
404 		.sched_type = RTE_SCHED_TYPE_ATOMIC,
405 		.u64 = 0xbadbadba,
406 	};
407 	int i, ret;
408 
409 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
410 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
411 
412 	if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR))
413 		return TEST_SKIPPED;
414 
415 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(
416 				    TEST_DEV_ID, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
417 				    &queue_count),
418 			    "Queue count get failed");
419 
420 	/* Need at least 2 queues to test LOW and HIGH priority. */
421 	TEST_ASSERT(queue_count > 1, "Not enough event queues, needed 2");
422 	queue_req = 2;
423 
424 	for (i = 0; i < (int)queue_count; i++) {
425 		ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i, &qconf);
426 		TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
427 		ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
428 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
429 	}
430 
431 	ret = rte_event_queue_attr_set(TEST_DEV_ID, 0,
432 				       RTE_EVENT_QUEUE_ATTR_PRIORITY,
433 				       RTE_EVENT_DEV_PRIORITY_LOWEST);
434 	if (ret == -ENOTSUP)
435 		return TEST_SKIPPED;
436 	TEST_ASSERT_SUCCESS(ret, "Queue0 priority set failed");
437 
438 	ret = rte_event_queue_attr_set(TEST_DEV_ID, 1,
439 				       RTE_EVENT_QUEUE_ATTR_PRIORITY,
440 				       RTE_EVENT_DEV_PRIORITY_HIGHEST);
441 	if (ret == -ENOTSUP)
442 		return TEST_SKIPPED;
443 	TEST_ASSERT_SUCCESS(ret, "Queue1 priority set failed");
444 
445 	/* Setup event port 0 */
446 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
447 	TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
448 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
449 	TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
450 	ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
451 	TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
452 		    TEST_DEV_ID);
453 
454 	ret = rte_event_dev_start(TEST_DEV_ID);
455 	TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
456 
457 	for (i = 0; i < (int)queue_req; i++) {
458 		event.queue_id = i;
459 		while (rte_event_enqueue_burst(TEST_DEV_ID, 0, &event, 1) != 1)
460 			rte_pause();
461 	}
462 
463 	prio = RTE_EVENT_DEV_PRIORITY_HIGHEST;
464 	deq_cnt = 0;
465 	while (deq_cnt < queue_req) {
466 		uint32_t queue_prio;
467 
468 		if (rte_event_dequeue_burst(TEST_DEV_ID, 0, &event, 1, 0) == 0)
469 			continue;
470 
471 		ret = rte_event_queue_attr_get(TEST_DEV_ID, event.queue_id,
472 					       RTE_EVENT_QUEUE_ATTR_PRIORITY,
473 					       &queue_prio);
474 		if (ret == -ENOTSUP)
475 			return TEST_SKIPPED;
476 
477 		TEST_ASSERT_SUCCESS(ret, "Queue priority get failed");
478 		TEST_ASSERT(queue_prio >= prio,
479 			    "Received event from a lower priority queue first");
480 		prio = queue_prio;
481 		deq_cnt++;
482 	}
483 
484 	return TEST_SUCCESS;
485 }
486 
487 static int
488 test_eventdev_queue_attr_weight_runtime(void)
489 {
490 	struct rte_event_queue_conf qconf;
491 	struct rte_event_dev_info info;
492 	uint32_t queue_count;
493 	int i, ret;
494 
495 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
496 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
497 
498 	if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR))
499 		return TEST_SKIPPED;
500 
501 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(
502 				    TEST_DEV_ID, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
503 				    &queue_count),
504 			    "Queue count get failed");
505 
506 	for (i = 0; i < (int)queue_count; i++) {
507 		ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i, &qconf);
508 		TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
509 		ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
510 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
511 	}
512 
513 	for (i = 0; i < (int)queue_count; i++) {
514 		uint32_t get_val;
515 		uint64_t set_val;
516 
517 		set_val = i % RTE_EVENT_QUEUE_WEIGHT_HIGHEST;
518 		ret = rte_event_queue_attr_set(
519 			TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_WEIGHT, set_val);
520 		if (ret == -ENOTSUP)
521 			return TEST_SKIPPED;
522 
523 		TEST_ASSERT_SUCCESS(ret, "Queue weight set failed");
524 
525 		ret = rte_event_queue_attr_get(
526 			TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_WEIGHT, &get_val);
527 		if (ret == -ENOTSUP)
528 			return TEST_SKIPPED;
529 
530 		TEST_ASSERT_SUCCESS(ret, "Queue weight get failed");
531 		TEST_ASSERT_EQUAL(get_val, set_val,
532 				  "Wrong weight value for queue%d", i);
533 	}
534 
535 	return TEST_SUCCESS;
536 }
537 
538 static int
539 test_eventdev_queue_attr_affinity_runtime(void)
540 {
541 	struct rte_event_queue_conf qconf;
542 	struct rte_event_dev_info info;
543 	uint32_t queue_count;
544 	int i, ret;
545 
546 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
547 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
548 
549 	if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR))
550 		return TEST_SKIPPED;
551 
552 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(
553 				    TEST_DEV_ID, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
554 				    &queue_count),
555 			    "Queue count get failed");
556 
557 	for (i = 0; i < (int)queue_count; i++) {
558 		ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i, &qconf);
559 		TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
560 		ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
561 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
562 	}
563 
564 	for (i = 0; i < (int)queue_count; i++) {
565 		uint32_t get_val;
566 		uint64_t set_val;
567 
568 		set_val = i % RTE_EVENT_QUEUE_AFFINITY_HIGHEST;
569 		ret = rte_event_queue_attr_set(
570 			TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_AFFINITY, set_val);
571 		if (ret == -ENOTSUP)
572 			return TEST_SKIPPED;
573 
574 		TEST_ASSERT_SUCCESS(ret, "Queue affinity set failed");
575 
576 		ret = rte_event_queue_attr_get(
577 			TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_AFFINITY, &get_val);
578 		if (ret == -ENOTSUP)
579 			return TEST_SKIPPED;
580 
581 		TEST_ASSERT_SUCCESS(ret, "Queue affinity get failed");
582 		TEST_ASSERT_EQUAL(get_val, set_val,
583 				  "Wrong affinity value for queue%d", i);
584 	}
585 
586 	return TEST_SUCCESS;
587 }
588 
589 static int
590 test_eventdev_queue_attr_nb_atomic_flows(void)
591 {
592 	int i, ret;
593 	struct rte_event_dev_info info;
594 	struct rte_event_queue_conf qconf;
595 	uint32_t nb_atomic_flows;
596 
597 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
598 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
599 
600 	uint32_t queue_count;
601 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
602 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
603 			    "Queue count get failed");
604 
605 	ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
606 	TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
607 
608 	if (qconf.nb_atomic_flows == 0)
609 		/* Assume PMD doesn't support atomic flows, return early */
610 		return -ENOTSUP;
611 
612 	qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
613 
614 	for (i = 0; i < (int)queue_count; i++) {
615 		ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
616 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
617 	}
618 
619 	for (i = 0; i < (int)queue_count; i++) {
620 		TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
621 				    RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS,
622 				    &nb_atomic_flows),
623 				    "Queue nb_atomic_flows get failed");
624 
625 		TEST_ASSERT_EQUAL(nb_atomic_flows, qconf.nb_atomic_flows,
626 				  "Wrong atomic flows value for queue%d", i);
627 	}
628 
629 	return TEST_SUCCESS;
630 }
631 
632 static int
633 test_eventdev_queue_attr_nb_atomic_order_sequences(void)
634 {
635 	int i, ret;
636 	struct rte_event_dev_info info;
637 	struct rte_event_queue_conf qconf;
638 	uint32_t nb_atomic_order_sequences;
639 
640 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
641 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
642 
643 	uint32_t queue_count;
644 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
645 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
646 			    "Queue count get failed");
647 
648 	ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
649 	TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
650 
651 	if (qconf.nb_atomic_order_sequences == 0)
652 		/* Assume PMD doesn't support reordering */
653 		return -ENOTSUP;
654 
655 	qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
656 
657 	for (i = 0; i < (int)queue_count; i++) {
658 		ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
659 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
660 	}
661 
662 	for (i = 0; i < (int)queue_count; i++) {
663 		TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
664 			    RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES,
665 			    &nb_atomic_order_sequences),
666 			    "Queue nb_atomic_order_sequencess get failed");
667 
668 		TEST_ASSERT_EQUAL(nb_atomic_order_sequences,
669 				  qconf.nb_atomic_order_sequences,
670 				  "Wrong atomic order sequences value for queue%d",
671 				  i);
672 	}
673 
674 	return TEST_SUCCESS;
675 }
676 
677 static int
678 test_eventdev_queue_attr_event_queue_cfg(void)
679 {
680 	int i, ret;
681 	struct rte_event_dev_info info;
682 	struct rte_event_queue_conf qconf;
683 	uint32_t event_queue_cfg;
684 
685 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
686 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
687 
688 	uint32_t queue_count;
689 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
690 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
691 			    "Queue count get failed");
692 
693 	ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
694 	TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
695 
696 	qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
697 
698 	for (i = 0; i < (int)queue_count; i++) {
699 		ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
700 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
701 	}
702 
703 	for (i = 0; i < (int)queue_count; i++) {
704 		TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
705 				    RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG,
706 				    &event_queue_cfg),
707 				    "Queue event_queue_cfg get failed");
708 
709 		TEST_ASSERT_EQUAL(event_queue_cfg, qconf.event_queue_cfg,
710 				  "Wrong event_queue_cfg value for queue%d",
711 				  i);
712 	}
713 
714 	return TEST_SUCCESS;
715 }
716 
717 static int
718 test_eventdev_port_default_conf_get(void)
719 {
720 	int i, ret;
721 	struct rte_event_port_conf pconf;
722 
723 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
724 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
725 
726 	uint32_t port_count;
727 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
728 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
729 				&port_count), "Port count get failed");
730 
731 	ret = rte_event_port_default_conf_get(TEST_DEV_ID,
732 			port_count + 1, NULL);
733 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
734 
735 	for (i = 0; i < (int)port_count; i++) {
736 		ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
737 							&pconf);
738 		TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
739 	}
740 
741 	return TEST_SUCCESS;
742 }
743 
744 static int
745 test_eventdev_port_setup(void)
746 {
747 	int i, ret;
748 	struct rte_event_dev_info info;
749 	struct rte_event_port_conf pconf;
750 
751 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
752 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
753 
754 	/* Negative cases */
755 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
756 	TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
757 	pconf.new_event_threshold = info.max_num_events + 1;
758 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
759 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
760 
761 	pconf.new_event_threshold = info.max_num_events;
762 	pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1;
763 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
764 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
765 
766 	pconf.dequeue_depth = info.max_event_port_dequeue_depth;
767 	pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1;
768 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
769 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
770 
771 	if (!(info.event_dev_cap &
772 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
773 		pconf.enqueue_depth = info.max_event_port_enqueue_depth;
774 		pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
775 		ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
776 		TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
777 		pconf.event_port_cfg = 0;
778 	}
779 
780 	ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
781 					&pconf);
782 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
783 
784 	/* Positive case */
785 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
786 	TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
787 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
788 	TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
789 
790 	uint32_t port_count;
791 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
792 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
793 				&port_count), "Port count get failed");
794 
795 	for (i = 1; i < (int)port_count; i++) {
796 		ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
797 		TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
798 	}
799 
800 	return TEST_SUCCESS;
801 }
802 
803 static int
804 test_eventdev_port_attr_dequeue_depth(void)
805 {
806 	int ret;
807 	struct rte_event_dev_info info;
808 	struct rte_event_port_conf pconf;
809 
810 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
811 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
812 
813 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
814 	TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
815 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
816 	TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
817 
818 	uint32_t value;
819 	TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
820 			RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
821 			0, "Call to get port dequeue depth failed");
822 	TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
823 			"Wrong port dequeue depth");
824 
825 	return TEST_SUCCESS;
826 }
827 
828 static int
829 test_eventdev_port_attr_enqueue_depth(void)
830 {
831 	int ret;
832 	struct rte_event_dev_info info;
833 	struct rte_event_port_conf pconf;
834 
835 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
836 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
837 
838 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
839 	TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
840 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
841 	TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
842 
843 	uint32_t value;
844 	TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
845 			RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
846 			0, "Call to get port enqueue depth failed");
847 	TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
848 			"Wrong port enqueue depth");
849 
850 	return TEST_SUCCESS;
851 }
852 
853 static int
854 test_eventdev_port_attr_new_event_threshold(void)
855 {
856 	int ret;
857 	struct rte_event_dev_info info;
858 	struct rte_event_port_conf pconf;
859 
860 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
861 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
862 
863 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
864 	TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
865 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
866 	TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
867 
868 	uint32_t value;
869 	TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
870 			RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD, &value),
871 			0, "Call to get port new event threshold failed");
872 	TEST_ASSERT_EQUAL((int32_t) value, pconf.new_event_threshold,
873 			"Wrong port new event threshold");
874 
875 	return TEST_SUCCESS;
876 }
877 
878 static int
879 test_eventdev_port_count(void)
880 {
881 	int ret;
882 	struct rte_event_dev_info info;
883 
884 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
885 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
886 
887 	uint32_t port_count;
888 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
889 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
890 				&port_count), "Port count get failed");
891 	TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count");
892 
893 	return TEST_SUCCESS;
894 }
895 
896 static int
897 test_eventdev_timeout_ticks(void)
898 {
899 	int ret;
900 	uint64_t timeout_ticks;
901 
902 	ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
903 	if (ret != -ENOTSUP)
904 		TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
905 
906 	return ret;
907 }
908 
909 
910 static int
911 test_eventdev_start_stop(void)
912 {
913 	int i, ret;
914 
915 	ret = eventdev_configure_setup();
916 	TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
917 
918 	uint32_t queue_count;
919 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
920 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
921 			    "Queue count get failed");
922 	for (i = 0; i < (int)queue_count; i++) {
923 		ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
924 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
925 	}
926 
927 	uint32_t port_count;
928 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
929 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
930 				&port_count), "Port count get failed");
931 
932 	for (i = 0; i < (int)port_count; i++) {
933 		ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
934 		TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
935 	}
936 
937 	ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
938 	TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
939 		    TEST_DEV_ID);
940 
941 	ret = rte_event_dev_start(TEST_DEV_ID);
942 	TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
943 
944 	rte_event_dev_stop(TEST_DEV_ID);
945 	return TEST_SUCCESS;
946 }
947 
948 
949 static int
950 eventdev_setup_device(void)
951 {
952 	int i, ret;
953 
954 	ret = eventdev_configure_setup();
955 	TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
956 
957 	uint32_t queue_count;
958 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
959 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
960 			    "Queue count get failed");
961 	for (i = 0; i < (int)queue_count; i++) {
962 		ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
963 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
964 	}
965 
966 	uint32_t port_count;
967 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
968 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
969 				&port_count), "Port count get failed");
970 
971 	for (i = 0; i < (int)port_count; i++) {
972 		ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
973 		TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
974 	}
975 
976 	ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
977 	TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
978 		    TEST_DEV_ID);
979 
980 	ret = rte_event_dev_start(TEST_DEV_ID);
981 	TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
982 
983 	return TEST_SUCCESS;
984 }
985 
986 static void
987 eventdev_stop_device(void)
988 {
989 	rte_event_dev_stop(TEST_DEV_ID);
990 }
991 
992 static int
993 test_eventdev_link(void)
994 {
995 	int ret, nb_queues, i;
996 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
997 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
998 
999 	ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
1000 	TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
1001 				 TEST_DEV_ID);
1002 
1003 	uint32_t queue_count;
1004 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
1005 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1006 			    "Queue count get failed");
1007 	nb_queues = queue_count;
1008 	for (i = 0; i < nb_queues; i++) {
1009 		queues[i] = i;
1010 		priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
1011 	}
1012 
1013 	ret = rte_event_port_link(TEST_DEV_ID, 0, queues,
1014 					priorities, nb_queues);
1015 	TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
1016 				 TEST_DEV_ID, ret);
1017 	return TEST_SUCCESS;
1018 }
1019 
1020 static int
1021 test_eventdev_unlink(void)
1022 {
1023 	int ret, nb_queues, i;
1024 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1025 
1026 	ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
1027 	TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
1028 				 TEST_DEV_ID);
1029 
1030 	uint32_t queue_count;
1031 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
1032 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1033 			    "Queue count get failed");
1034 	nb_queues = queue_count;
1035 	for (i = 0; i < nb_queues; i++)
1036 		queues[i] = i;
1037 
1038 	ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
1039 	TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
1040 				 TEST_DEV_ID);
1041 
1042 	ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
1043 	TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
1044 				 TEST_DEV_ID, ret);
1045 	return TEST_SUCCESS;
1046 }
1047 
1048 static int
1049 test_eventdev_link_get(void)
1050 {
1051 	int ret, i;
1052 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1053 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1054 
1055 	/* link all queues */
1056 	ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
1057 	TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
1058 				 TEST_DEV_ID);
1059 
1060 	uint32_t queue_count;
1061 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
1062 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
1063 			    "Queue count get failed");
1064 	const int nb_queues = queue_count;
1065 	for (i = 0; i < nb_queues; i++)
1066 		queues[i] = i;
1067 
1068 	ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
1069 	TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
1070 				 TEST_DEV_ID, ret);
1071 
1072 	ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
1073 	TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
1074 
1075 	/* link all queues and get the links */
1076 	for (i = 0; i < nb_queues; i++) {
1077 		queues[i] = i;
1078 		priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
1079 	}
1080 	ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
1081 					 nb_queues);
1082 	TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
1083 				 TEST_DEV_ID, ret);
1084 	ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
1085 	TEST_ASSERT(ret == nb_queues, "(%d)Wrong link get ret=%d expected=%d",
1086 				 TEST_DEV_ID, ret, nb_queues);
1087 	/* unlink all*/
1088 	ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
1089 	TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
1090 				 TEST_DEV_ID, ret);
1091 	/* link just one queue */
1092 	queues[0] = 0;
1093 	priorities[0] = RTE_EVENT_DEV_PRIORITY_NORMAL;
1094 
1095 	ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities, 1);
1096 	TEST_ASSERT(ret == 1, "Failed to link(device%d) ret=%d",
1097 				 TEST_DEV_ID, ret);
1098 	ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
1099 	TEST_ASSERT(ret == 1, "(%d)Wrong link get ret=%d expected=%d",
1100 					TEST_DEV_ID, ret, 1);
1101 	/* unlink the queue */
1102 	ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
1103 	TEST_ASSERT(ret == 1, "Failed to unlink(device%d) ret=%d",
1104 				 TEST_DEV_ID, ret);
1105 
1106 	/* 4links and 2 unlinks */
1107 	if (nb_queues >= 4) {
1108 		for (i = 0; i < 4; i++) {
1109 			queues[i] = i;
1110 			priorities[i] = 0x40;
1111 		}
1112 		ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
1113 						4);
1114 		TEST_ASSERT(ret == 4, "Failed to link(device%d) ret=%d",
1115 					 TEST_DEV_ID, ret);
1116 
1117 		for (i = 0; i < 2; i++)
1118 			queues[i] = i;
1119 
1120 		ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, 2);
1121 		TEST_ASSERT(ret == 2, "Failed to unlink(device%d) ret=%d",
1122 					 TEST_DEV_ID, ret);
1123 		ret = rte_event_port_links_get(TEST_DEV_ID, 0,
1124 						queues, priorities);
1125 		TEST_ASSERT(ret == 2, "(%d)Wrong link get ret=%d expected=%d",
1126 						TEST_DEV_ID, ret, 2);
1127 		TEST_ASSERT(queues[0] == 2, "ret=%d expected=%d", ret, 2);
1128 		TEST_ASSERT(priorities[0] == 0x40, "ret=%d expected=%d",
1129 							ret, 0x40);
1130 		TEST_ASSERT(queues[1] == 3, "ret=%d expected=%d", ret, 3);
1131 		TEST_ASSERT(priorities[1] == 0x40, "ret=%d expected=%d",
1132 					ret, 0x40);
1133 	}
1134 
1135 	return TEST_SUCCESS;
1136 }
1137 
1138 static int
1139 test_eventdev_profile_switch(void)
1140 {
1141 #define MAX_RETRIES   4
1142 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1143 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1144 	struct rte_event_queue_conf qcfg;
1145 	struct rte_event_port_conf pcfg;
1146 	struct rte_event_dev_info info;
1147 	struct rte_event ev;
1148 	uint8_t q, re;
1149 	int rc;
1150 
1151 	rte_event_dev_info_get(TEST_DEV_ID, &info);
1152 
1153 	if (info.max_profiles_per_port <= 1)
1154 		return TEST_SKIPPED;
1155 
1156 	if (info.max_event_queues <= 1)
1157 		return TEST_SKIPPED;
1158 
1159 	rc = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pcfg);
1160 	TEST_ASSERT_SUCCESS(rc, "Failed to get port0 default config");
1161 	rc = rte_event_port_setup(TEST_DEV_ID, 0, &pcfg);
1162 	TEST_ASSERT_SUCCESS(rc, "Failed to setup port0");
1163 
1164 	rc = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qcfg);
1165 	TEST_ASSERT_SUCCESS(rc, "Failed to get queue0 default config");
1166 	rc = rte_event_queue_setup(TEST_DEV_ID, 0, &qcfg);
1167 	TEST_ASSERT_SUCCESS(rc, "Failed to setup queue0");
1168 
1169 	q = 0;
1170 	rc = rte_event_port_profile_links_set(TEST_DEV_ID, 0, &q, NULL, 1, 0);
1171 	TEST_ASSERT(rc == 1, "Failed to link queue 0 to port 0 with profile 0");
1172 	q = 1;
1173 	rc = rte_event_port_profile_links_set(TEST_DEV_ID, 0, &q, NULL, 1, 1);
1174 	TEST_ASSERT(rc == 1, "Failed to link queue 1 to port 0 with profile 1");
1175 
1176 	rc = rte_event_port_profile_links_get(TEST_DEV_ID, 0, queues, priorities, 0);
1177 	TEST_ASSERT(rc == 1, "Failed to links");
1178 	TEST_ASSERT(queues[0] == 0, "Invalid queue found in link");
1179 
1180 	rc = rte_event_port_profile_links_get(TEST_DEV_ID, 0, queues, priorities, 1);
1181 	TEST_ASSERT(rc == 1, "Failed to links");
1182 	TEST_ASSERT(queues[0] == 1, "Invalid queue found in link");
1183 
1184 	rc = rte_event_dev_start(TEST_DEV_ID);
1185 	TEST_ASSERT_SUCCESS(rc, "Failed to start event device");
1186 
1187 	ev.event_type = RTE_EVENT_TYPE_CPU;
1188 	ev.queue_id = 0;
1189 	ev.op = RTE_EVENT_OP_NEW;
1190 	ev.flow_id = 0;
1191 	ev.u64 = 0xBADF00D0;
1192 	ev.sched_type = RTE_SCHED_TYPE_PARALLEL;
1193 	rc = rte_event_enqueue_burst(TEST_DEV_ID, 0, &ev, 1);
1194 	TEST_ASSERT(rc == 1, "Failed to enqueue event");
1195 	ev.queue_id = 1;
1196 	ev.flow_id = 1;
1197 	rc = rte_event_enqueue_burst(TEST_DEV_ID, 0, &ev, 1);
1198 	TEST_ASSERT(rc == 1, "Failed to enqueue event");
1199 
1200 	ev.event = 0;
1201 	ev.u64 = 0;
1202 
1203 	rc = rte_event_port_profile_switch(TEST_DEV_ID, 0, 1);
1204 	TEST_ASSERT_SUCCESS(rc, "Failed to change profile");
1205 
1206 	re = MAX_RETRIES;
1207 	while (re--) {
1208 		rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0);
1209 		printf("rc %d\n", rc);
1210 		if (rc)
1211 			break;
1212 	}
1213 
1214 	TEST_ASSERT(rc == 1, "Failed to dequeue event from profile 1");
1215 	TEST_ASSERT(ev.flow_id == 1, "Incorrect flow identifier from profile 1");
1216 	TEST_ASSERT(ev.queue_id == 1, "Incorrect queue identifier from profile 1");
1217 
1218 	re = MAX_RETRIES;
1219 	while (re--) {
1220 		rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0);
1221 		TEST_ASSERT(rc == 0, "Unexpected event dequeued from active profile");
1222 	}
1223 
1224 	rc = rte_event_port_profile_switch(TEST_DEV_ID, 0, 0);
1225 	TEST_ASSERT_SUCCESS(rc, "Failed to change profile");
1226 
1227 	re = MAX_RETRIES;
1228 	while (re--) {
1229 		rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0);
1230 		if (rc)
1231 			break;
1232 	}
1233 
1234 	TEST_ASSERT(rc == 1, "Failed to dequeue event from profile 1");
1235 	TEST_ASSERT(ev.flow_id == 0, "Incorrect flow identifier from profile 0");
1236 	TEST_ASSERT(ev.queue_id == 0, "Incorrect queue identifier from profile 0");
1237 
1238 	re = MAX_RETRIES;
1239 	while (re--) {
1240 		rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0);
1241 		TEST_ASSERT(rc == 0, "Unexpected event dequeued from active profile");
1242 	}
1243 
1244 	q = 0;
1245 	rc = rte_event_port_profile_unlink(TEST_DEV_ID, 0, &q, 1, 0);
1246 	TEST_ASSERT(rc == 1, "Failed to unlink queue 0 to port 0 with profile 0");
1247 	q = 1;
1248 	rc = rte_event_port_profile_unlink(TEST_DEV_ID, 0, &q, 1, 1);
1249 	TEST_ASSERT(rc == 1, "Failed to unlink queue 1 to port 0 with profile 1");
1250 
1251 	return TEST_SUCCESS;
1252 }
1253 
1254 static int
1255 preschedule_test(enum rte_event_dev_preschedule_type preschedule_type, const char *preschedule_name,
1256 		 uint8_t modify)
1257 {
1258 #define NB_EVENTS     1024
1259 	uint64_t start, total;
1260 	struct rte_event ev;
1261 	int rc, cnt;
1262 
1263 	ev.event_type = RTE_EVENT_TYPE_CPU;
1264 	ev.queue_id = 0;
1265 	ev.op = RTE_EVENT_OP_NEW;
1266 	ev.u64 = 0xBADF00D0;
1267 
1268 	for (cnt = 0; cnt < NB_EVENTS; cnt++) {
1269 		ev.flow_id = cnt;
1270 		rc = rte_event_enqueue_burst(TEST_DEV_ID, 0, &ev, 1);
1271 		TEST_ASSERT(rc == 1, "Failed to enqueue event");
1272 	}
1273 
1274 	if (modify) {
1275 		rc = rte_event_port_preschedule_modify(TEST_DEV_ID, 0, preschedule_type);
1276 		TEST_ASSERT_SUCCESS(rc, "Failed to modify preschedule type");
1277 	}
1278 
1279 	total = 0;
1280 	while (cnt) {
1281 		start = rte_rdtsc_precise();
1282 		rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0);
1283 		if (rc) {
1284 			total += rte_rdtsc_precise() - start;
1285 			cnt--;
1286 		}
1287 	}
1288 	printf("Preschedule type : %s, avg cycles %" PRIu64 "\n", preschedule_name,
1289 	       total / NB_EVENTS);
1290 
1291 	return TEST_SUCCESS;
1292 }
1293 
1294 static int
1295 preschedule_configure(enum rte_event_dev_preschedule_type type, struct rte_event_dev_info *info)
1296 {
1297 	struct rte_event_dev_config dev_conf;
1298 	struct rte_event_queue_conf qcfg;
1299 	struct rte_event_port_conf pcfg;
1300 	int rc;
1301 
1302 	devconf_set_default_sane_values(&dev_conf, info);
1303 	dev_conf.nb_event_ports = 1;
1304 	dev_conf.nb_event_queues = 1;
1305 	dev_conf.preschedule_type = type;
1306 
1307 	rc = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
1308 	TEST_ASSERT_SUCCESS(rc, "Failed to configure eventdev");
1309 
1310 	rc = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pcfg);
1311 	TEST_ASSERT_SUCCESS(rc, "Failed to get port0 default config");
1312 	rc = rte_event_port_setup(TEST_DEV_ID, 0, &pcfg);
1313 	TEST_ASSERT_SUCCESS(rc, "Failed to setup port0");
1314 
1315 	rc = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qcfg);
1316 	TEST_ASSERT_SUCCESS(rc, "Failed to get queue0 default config");
1317 	rc = rte_event_queue_setup(TEST_DEV_ID, 0, &qcfg);
1318 	TEST_ASSERT_SUCCESS(rc, "Failed to setup queue0");
1319 
1320 	rc = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
1321 	TEST_ASSERT(rc == (int)dev_conf.nb_event_queues, "Failed to link port, device %d",
1322 		    TEST_DEV_ID);
1323 
1324 	rc = rte_event_dev_start(TEST_DEV_ID);
1325 	TEST_ASSERT_SUCCESS(rc, "Failed to start event device");
1326 
1327 	return 0;
1328 }
1329 
1330 static int
1331 test_eventdev_preschedule_configure(void)
1332 {
1333 	struct rte_event_dev_info info;
1334 	int rc;
1335 
1336 	rte_event_dev_info_get(TEST_DEV_ID, &info);
1337 
1338 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE) == 0)
1339 		return TEST_SKIPPED;
1340 
1341 	rc = preschedule_configure(RTE_EVENT_PRESCHEDULE_NONE, &info);
1342 	TEST_ASSERT_SUCCESS(rc, "Failed to configure eventdev");
1343 	rc = preschedule_test(RTE_EVENT_PRESCHEDULE_NONE, "RTE_EVENT_PRESCHEDULE_NONE", 0);
1344 	TEST_ASSERT_SUCCESS(rc, "Failed to test preschedule RTE_EVENT_PRESCHEDULE_NONE");
1345 
1346 	rte_event_dev_stop(TEST_DEV_ID);
1347 	rc = preschedule_configure(RTE_EVENT_PRESCHEDULE, &info);
1348 	TEST_ASSERT_SUCCESS(rc, "Failed to configure eventdev");
1349 	rc = preschedule_test(RTE_EVENT_PRESCHEDULE, "RTE_EVENT_PRESCHEDULE", 0);
1350 	TEST_ASSERT_SUCCESS(rc, "Failed to test preschedule RTE_EVENT_PRESCHEDULE");
1351 
1352 	if (info.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE) {
1353 		rte_event_dev_stop(TEST_DEV_ID);
1354 		rc = preschedule_configure(RTE_EVENT_PRESCHEDULE_ADAPTIVE, &info);
1355 		TEST_ASSERT_SUCCESS(rc, "Failed to configure eventdev");
1356 		rc = preschedule_test(RTE_EVENT_PRESCHEDULE_ADAPTIVE,
1357 				      "RTE_EVENT_PRESCHEDULE_ADAPTIVE", 0);
1358 		TEST_ASSERT_SUCCESS(rc,
1359 				    "Failed to test preschedule RTE_EVENT_PRESCHEDULE_ADAPTIVE");
1360 	}
1361 
1362 	return TEST_SUCCESS;
1363 }
1364 
1365 static int
1366 test_eventdev_preschedule_modify(void)
1367 {
1368 	struct rte_event_dev_info info;
1369 	int rc;
1370 
1371 	rte_event_dev_info_get(TEST_DEV_ID, &info);
1372 	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE) == 0)
1373 		return TEST_SKIPPED;
1374 
1375 	rc = preschedule_configure(RTE_EVENT_PRESCHEDULE_NONE, &info);
1376 	TEST_ASSERT_SUCCESS(rc, "Failed to configure eventdev");
1377 	rc = preschedule_test(RTE_EVENT_PRESCHEDULE_NONE, "RTE_EVENT_PRESCHEDULE_NONE", 1);
1378 	TEST_ASSERT_SUCCESS(rc, "Failed to test per port preschedule RTE_EVENT_PRESCHEDULE_NONE");
1379 
1380 	rc = preschedule_test(RTE_EVENT_PRESCHEDULE, "RTE_EVENT_PRESCHEDULE", 1);
1381 	TEST_ASSERT_SUCCESS(rc, "Failed to test per port preschedule RTE_EVENT_PRESCHEDULE");
1382 
1383 	if (info.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE) {
1384 		rc = preschedule_test(RTE_EVENT_PRESCHEDULE_ADAPTIVE,
1385 				      "RTE_EVENT_PRESCHEDULE_ADAPTIVE", 1);
1386 		TEST_ASSERT_SUCCESS(
1387 			rc, "Failed to test per port preschedule RTE_EVENT_PRESCHEDULE_ADAPTIVE");
1388 	}
1389 
1390 	return TEST_SUCCESS;
1391 }
1392 
1393 static int
1394 test_eventdev_close(void)
1395 {
1396 	rte_event_dev_stop(TEST_DEV_ID);
1397 	return rte_event_dev_close(TEST_DEV_ID);
1398 }
1399 
1400 static struct unit_test_suite eventdev_common_testsuite  = {
1401 	.suite_name = "eventdev common code unit test suite",
1402 	.setup = testsuite_setup,
1403 	.teardown = testsuite_teardown,
1404 	.unit_test_cases = {
1405 		TEST_CASE_ST(NULL, NULL,
1406 			test_eventdev_count),
1407 		TEST_CASE_ST(NULL, NULL,
1408 			test_eventdev_get_dev_id),
1409 		TEST_CASE_ST(NULL, NULL,
1410 			test_eventdev_socket_id),
1411 		TEST_CASE_ST(NULL, NULL,
1412 			test_eventdev_info_get),
1413 		TEST_CASE_ST(NULL, NULL,
1414 			test_eventdev_configure),
1415 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1416 			test_eventdev_queue_default_conf_get),
1417 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1418 			test_eventdev_queue_setup),
1419 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1420 			test_eventdev_queue_count),
1421 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1422 			test_eventdev_queue_attr_priority),
1423 		TEST_CASE_ST(eventdev_configure_setup, eventdev_stop_device,
1424 			test_eventdev_queue_attr_priority_runtime),
1425 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1426 			test_eventdev_queue_attr_weight_runtime),
1427 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1428 			test_eventdev_queue_attr_affinity_runtime),
1429 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1430 			test_eventdev_queue_attr_nb_atomic_flows),
1431 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1432 			test_eventdev_queue_attr_nb_atomic_order_sequences),
1433 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1434 			test_eventdev_queue_attr_event_queue_cfg),
1435 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1436 			test_eventdev_port_default_conf_get),
1437 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1438 			test_eventdev_port_setup),
1439 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1440 			test_eventdev_port_attr_dequeue_depth),
1441 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1442 			test_eventdev_port_attr_enqueue_depth),
1443 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1444 			test_eventdev_port_attr_new_event_threshold),
1445 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1446 			test_eventdev_port_count),
1447 		TEST_CASE_ST(eventdev_configure_setup, NULL,
1448 			test_eventdev_timeout_ticks),
1449 		TEST_CASE_ST(NULL, NULL,
1450 			test_eventdev_start_stop),
1451 		TEST_CASE_ST(eventdev_configure_setup, eventdev_stop_device,
1452 			test_eventdev_profile_switch),
1453 		TEST_CASE_ST(eventdev_configure_setup, eventdev_stop_device,
1454 			test_eventdev_preschedule_configure),
1455 		TEST_CASE_ST(eventdev_configure_setup, eventdev_stop_device,
1456 			test_eventdev_preschedule_modify),
1457 		TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
1458 			test_eventdev_link),
1459 		TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
1460 			test_eventdev_unlink),
1461 		TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
1462 			test_eventdev_link_get),
1463 		TEST_CASE_ST(eventdev_setup_device, NULL,
1464 			test_eventdev_close),
1465 		TEST_CASES_END() /**< NULL terminate unit test array */
1466 	}
1467 };
1468 
1469 static int
1470 test_eventdev_common(void)
1471 {
1472 	return unit_test_suite_runner(&eventdev_common_testsuite);
1473 }
1474 
1475 static int
1476 test_eventdev_selftest_impl(const char *pmd, const char *opts)
1477 {
1478 	int ret = 0;
1479 
1480 	if (rte_event_dev_get_dev_id(pmd) == -ENODEV)
1481 		ret = rte_vdev_init(pmd, opts);
1482 	if (ret)
1483 		return TEST_SKIPPED;
1484 
1485 	return rte_event_dev_selftest(rte_event_dev_get_dev_id(pmd));
1486 }
1487 
1488 static int
1489 test_eventdev_selftest_sw(void)
1490 {
1491 	return test_eventdev_selftest_impl("event_sw", "");
1492 }
1493 
1494 static int
1495 test_eventdev_selftest_octeontx(void)
1496 {
1497 	return test_eventdev_selftest_impl("event_octeontx", "");
1498 }
1499 
1500 static int
1501 test_eventdev_selftest_dpaa2(void)
1502 {
1503 	return test_eventdev_selftest_impl("event_dpaa2", "");
1504 }
1505 
1506 static int
1507 test_eventdev_selftest_dlb2(void)
1508 {
1509 	return test_eventdev_selftest_impl("dlb2_event", "");
1510 }
1511 
1512 static int
1513 test_eventdev_selftest_cn9k(void)
1514 {
1515 	return test_eventdev_selftest_impl("event_cn9k", "");
1516 }
1517 
1518 static int
1519 test_eventdev_selftest_cn10k(void)
1520 {
1521 	return test_eventdev_selftest_impl("event_cn10k", "");
1522 }
1523 
1524 static int
1525 test_eventdev_selftest_cn20k(void)
1526 {
1527 	return test_eventdev_selftest_impl("event_cn20k", "");
1528 }
1529 
1530 #endif /* !RTE_EXEC_ENV_WINDOWS */
1531 
1532 REGISTER_FAST_TEST(eventdev_common_autotest, true, true, test_eventdev_common);
1533 
1534 #ifndef RTE_EXEC_ENV_WINDOWS
1535 REGISTER_FAST_TEST(eventdev_selftest_sw, true, true, test_eventdev_selftest_sw);
1536 REGISTER_DRIVER_TEST(eventdev_selftest_octeontx, test_eventdev_selftest_octeontx);
1537 REGISTER_DRIVER_TEST(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
1538 REGISTER_DRIVER_TEST(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2);
1539 REGISTER_DRIVER_TEST(eventdev_selftest_cn9k, test_eventdev_selftest_cn9k);
1540 REGISTER_DRIVER_TEST(eventdev_selftest_cn10k, test_eventdev_selftest_cn10k);
1541 REGISTER_DRIVER_TEST(eventdev_selftest_cn20k, test_eventdev_selftest_cn20k);
1542 
1543 #endif /* !RTE_EXEC_ENV_WINDOWS */
1544