xref: /dpdk/app/test/test_eventdev.c (revision 97b914f4e715565d53d38ac6e04815b9be5e58a9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include "test.h"
6 
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_mbuf.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 
13 #ifdef RTE_EXEC_ENV_WINDOWS
14 static int
15 test_eventdev_common(void)
16 {
17 	printf("eventdev_common not supported on Windows, skipping test\n");
18 	return TEST_SKIPPED;
19 }
20 
21 #else
22 
23 #include <rte_eventdev.h>
24 #include <rte_dev.h>
25 #include <rte_bus_vdev.h>
26 
27 #define TEST_DEV_ID   0
28 
29 static int
30 testsuite_setup(void)
31 {
32 	RTE_BUILD_BUG_ON(sizeof(struct rte_event) != 16);
33 	uint8_t count;
34 	count = rte_event_dev_count();
35 	if (!count) {
36 		printf("Failed to find a valid event device,"
37 			" testing with event_skeleton device\n");
38 		return rte_vdev_init("event_skeleton", NULL);
39 	}
40 	return TEST_SUCCESS;
41 }
42 
43 static void
44 testsuite_teardown(void)
45 {
46 }
47 
48 static int
49 test_eventdev_count(void)
50 {
51 	uint8_t count;
52 	count = rte_event_dev_count();
53 	TEST_ASSERT(count > 0, "Invalid eventdev count %" PRIu8, count);
54 	return TEST_SUCCESS;
55 }
56 
57 static int
58 test_eventdev_get_dev_id(void)
59 {
60 	int ret;
61 	ret = rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
62 	TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d", ret);
63 	return TEST_SUCCESS;
64 }
65 
66 static int
67 test_eventdev_socket_id(void)
68 {
69 	int socket_id;
70 	socket_id = rte_event_dev_socket_id(TEST_DEV_ID);
71 	TEST_ASSERT(socket_id != -EINVAL, "Failed to get socket_id %d",
72 				socket_id);
73 	socket_id = rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS);
74 	TEST_ASSERT(socket_id == -EINVAL, "Expected -EINVAL %d", socket_id);
75 
76 	return TEST_SUCCESS;
77 }
78 
79 static int
80 test_eventdev_info_get(void)
81 {
82 	int ret;
83 	struct rte_event_dev_info info;
84 	ret = rte_event_dev_info_get(TEST_DEV_ID, NULL);
85 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
86 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
87 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
88 	TEST_ASSERT(info.max_event_ports > 0,
89 			"Not enough event ports %d", info.max_event_ports);
90 	TEST_ASSERT(info.max_event_queues > 0,
91 			"Not enough event queues %d", info.max_event_queues);
92 	return TEST_SUCCESS;
93 }
94 
95 static inline void
96 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
97 			struct rte_event_dev_info *info)
98 {
99 	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
100 	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
101 	dev_conf->nb_event_ports = info->max_event_ports;
102 	dev_conf->nb_event_queues = info->max_event_queues;
103 	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
104 	dev_conf->nb_event_port_dequeue_depth =
105 			info->max_event_port_dequeue_depth;
106 	dev_conf->nb_event_port_enqueue_depth =
107 			info->max_event_port_enqueue_depth;
108 	dev_conf->nb_event_port_enqueue_depth =
109 			info->max_event_port_enqueue_depth;
110 	dev_conf->nb_events_limit =
111 			info->max_num_events;
112 }
113 
114 static int
115 test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
116 		struct rte_event_dev_info *info,
117 		void (*fn)(struct rte_event_dev_config *dev_conf,
118 			struct rte_event_dev_info *info))
119 {
120 	devconf_set_default_sane_values(dev_conf, info);
121 	fn(dev_conf, info);
122 	return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
123 }
124 
125 static void
126 max_dequeue_limit(struct rte_event_dev_config *dev_conf,
127 		  struct rte_event_dev_info *info)
128 {
129 	dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1;
130 }
131 
132 static void
133 max_events_limit(struct rte_event_dev_config *dev_conf,
134 		  struct rte_event_dev_info *info)
135 {
136 	dev_conf->nb_events_limit  = info->max_num_events + 1;
137 }
138 
139 static void
140 max_event_ports(struct rte_event_dev_config *dev_conf,
141 		  struct rte_event_dev_info *info)
142 {
143 	dev_conf->nb_event_ports = info->max_event_ports + 1;
144 }
145 
146 static void
147 max_event_queues(struct rte_event_dev_config *dev_conf,
148 		  struct rte_event_dev_info *info)
149 {
150 	dev_conf->nb_event_queues = info->max_event_queues + 1;
151 }
152 
153 static void
154 max_event_queue_flows(struct rte_event_dev_config *dev_conf,
155 		  struct rte_event_dev_info *info)
156 {
157 	dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1;
158 }
159 
160 static void
161 max_event_port_dequeue_depth(struct rte_event_dev_config *dev_conf,
162 		  struct rte_event_dev_info *info)
163 {
164 	dev_conf->nb_event_port_dequeue_depth =
165 		info->max_event_port_dequeue_depth + 1;
166 }
167 
168 static void
169 max_event_port_enqueue_depth(struct rte_event_dev_config *dev_conf,
170 		  struct rte_event_dev_info *info)
171 {
172 	dev_conf->nb_event_port_enqueue_depth =
173 		info->max_event_port_enqueue_depth + 1;
174 }
175 
176 
177 static int
178 test_eventdev_configure(void)
179 {
180 	int ret;
181 	struct rte_event_dev_config dev_conf;
182 	struct rte_event_dev_info info;
183 	ret = rte_event_dev_configure(TEST_DEV_ID, NULL);
184 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
185 
186 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
187 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
188 
189 	/* Check limits */
190 	TEST_ASSERT_EQUAL(-EINVAL,
191 		test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
192 		 "Config negative test failed");
193 	TEST_ASSERT_EQUAL(-EINVAL,
194 		test_ethdev_config_run(&dev_conf, &info, max_events_limit),
195 		 "Config negative test failed");
196 	TEST_ASSERT_EQUAL(-EINVAL,
197 		test_ethdev_config_run(&dev_conf, &info, max_event_ports),
198 		 "Config negative test failed");
199 	TEST_ASSERT_EQUAL(-EINVAL,
200 		test_ethdev_config_run(&dev_conf, &info, max_event_queues),
201 		 "Config negative test failed");
202 	TEST_ASSERT_EQUAL(-EINVAL,
203 		test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
204 		"Config negative test failed");
205 
206 	if (info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) {
207 		TEST_ASSERT_EQUAL(-EINVAL,
208 				test_ethdev_config_run(&dev_conf, &info,
209 					max_event_port_dequeue_depth),
210 				"Config negative test failed");
211 		TEST_ASSERT_EQUAL(-EINVAL,
212 				test_ethdev_config_run(&dev_conf, &info,
213 					max_event_port_enqueue_depth),
214 				"Config negative test failed");
215 	}
216 
217 	/* Positive case */
218 	devconf_set_default_sane_values(&dev_conf, &info);
219 	ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
220 	TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
221 
222 	/* re-configure */
223 	devconf_set_default_sane_values(&dev_conf, &info);
224 	dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1);
225 	dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1);
226 	ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
227 	TEST_ASSERT_SUCCESS(ret, "Failed to re configure eventdev");
228 
229 	/* re-configure back to max_event_queues and max_event_ports */
230 	devconf_set_default_sane_values(&dev_conf, &info);
231 	ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
232 	TEST_ASSERT_SUCCESS(ret, "Failed to re-configure eventdev");
233 
234 	return TEST_SUCCESS;
235 
236 }
237 
238 static int
239 eventdev_configure_setup(void)
240 {
241 	int ret;
242 	struct rte_event_dev_config dev_conf;
243 	struct rte_event_dev_info info;
244 
245 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
246 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
247 	devconf_set_default_sane_values(&dev_conf, &info);
248 	ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
249 	TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
250 
251 	return TEST_SUCCESS;
252 }
253 
254 static int
255 test_eventdev_queue_default_conf_get(void)
256 {
257 	int i, ret;
258 	struct rte_event_queue_conf qconf;
259 
260 	ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
261 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
262 
263 	uint32_t queue_count;
264 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
265 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
266 			    "Queue count get failed");
267 
268 	for (i = 0; i < (int)queue_count; i++) {
269 		ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
270 						 &qconf);
271 		TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
272 	}
273 
274 	return TEST_SUCCESS;
275 }
276 
277 static int
278 test_eventdev_queue_setup(void)
279 {
280 	int i, ret;
281 	struct rte_event_dev_info info;
282 	struct rte_event_queue_conf qconf;
283 
284 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
285 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
286 
287 	/* Negative cases */
288 	ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
289 	TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
290 	qconf.event_queue_cfg =	RTE_EVENT_QUEUE_CFG_ALL_TYPES;
291 	qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
292 	ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
293 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
294 
295 	qconf.nb_atomic_flows = info.max_event_queue_flows;
296 	qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
297 	qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
298 	ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
299 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
300 
301 	ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues,
302 					&qconf);
303 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
304 
305 	/* Positive case */
306 	ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
307 	TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
308 	ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
309 	TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
310 
311 	uint32_t queue_count;
312 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
313 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
314 			    "Queue count get failed");
315 
316 	for (i = 0; i < (int)queue_count; i++) {
317 		ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
318 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
319 	}
320 
321 	return TEST_SUCCESS;
322 }
323 
324 static int
325 test_eventdev_queue_count(void)
326 {
327 	int ret;
328 	struct rte_event_dev_info info;
329 
330 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
331 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
332 
333 	uint32_t queue_count;
334 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
335 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
336 			    "Queue count get failed");
337 	TEST_ASSERT_EQUAL(queue_count, info.max_event_queues,
338 			  "Wrong queue count");
339 
340 	return TEST_SUCCESS;
341 }
342 
343 static int
344 test_eventdev_queue_attr_priority(void)
345 {
346 	int i, ret;
347 	struct rte_event_dev_info info;
348 	struct rte_event_queue_conf qconf;
349 	uint8_t priority;
350 
351 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
352 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
353 
354 	uint32_t queue_count;
355 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
356 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
357 			    "Queue count get failed");
358 
359 	for (i = 0; i < (int)queue_count; i++) {
360 		ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
361 					&qconf);
362 		TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
363 		qconf.priority = i %  RTE_EVENT_DEV_PRIORITY_LOWEST;
364 		ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
365 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
366 	}
367 
368 	for (i = 0; i < (int)queue_count; i++) {
369 		uint32_t tmp;
370 		TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
371 				    RTE_EVENT_QUEUE_ATTR_PRIORITY, &tmp),
372 				    "Queue priority get failed");
373 		priority = tmp;
374 
375 		if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
376 			TEST_ASSERT_EQUAL(priority,
377 			 i %  RTE_EVENT_DEV_PRIORITY_LOWEST,
378 			 "Wrong priority value for queue%d", i);
379 		else
380 			TEST_ASSERT_EQUAL(priority,
381 			 RTE_EVENT_DEV_PRIORITY_NORMAL,
382 			 "Wrong priority value for queue%d", i);
383 	}
384 
385 	return TEST_SUCCESS;
386 }
387 
388 static int
389 test_eventdev_queue_attr_nb_atomic_flows(void)
390 {
391 	int i, ret;
392 	struct rte_event_dev_info info;
393 	struct rte_event_queue_conf qconf;
394 	uint32_t nb_atomic_flows;
395 
396 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
397 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
398 
399 	uint32_t queue_count;
400 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
401 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
402 			    "Queue count get failed");
403 
404 	ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
405 	TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
406 
407 	if (qconf.nb_atomic_flows == 0)
408 		/* Assume PMD doesn't support atomic flows, return early */
409 		return -ENOTSUP;
410 
411 	qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
412 
413 	for (i = 0; i < (int)queue_count; i++) {
414 		ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
415 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
416 	}
417 
418 	for (i = 0; i < (int)queue_count; i++) {
419 		TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
420 				    RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS,
421 				    &nb_atomic_flows),
422 				    "Queue nb_atomic_flows get failed");
423 
424 		TEST_ASSERT_EQUAL(nb_atomic_flows, qconf.nb_atomic_flows,
425 				  "Wrong atomic flows value for queue%d", i);
426 	}
427 
428 	return TEST_SUCCESS;
429 }
430 
431 static int
432 test_eventdev_queue_attr_nb_atomic_order_sequences(void)
433 {
434 	int i, ret;
435 	struct rte_event_dev_info info;
436 	struct rte_event_queue_conf qconf;
437 	uint32_t nb_atomic_order_sequences;
438 
439 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
440 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
441 
442 	uint32_t queue_count;
443 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
444 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
445 			    "Queue count get failed");
446 
447 	ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
448 	TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
449 
450 	if (qconf.nb_atomic_order_sequences == 0)
451 		/* Assume PMD doesn't support reordering */
452 		return -ENOTSUP;
453 
454 	qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
455 
456 	for (i = 0; i < (int)queue_count; i++) {
457 		ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
458 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
459 	}
460 
461 	for (i = 0; i < (int)queue_count; i++) {
462 		TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
463 			    RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES,
464 			    &nb_atomic_order_sequences),
465 			    "Queue nb_atomic_order_sequencess get failed");
466 
467 		TEST_ASSERT_EQUAL(nb_atomic_order_sequences,
468 				  qconf.nb_atomic_order_sequences,
469 				  "Wrong atomic order sequences value for queue%d",
470 				  i);
471 	}
472 
473 	return TEST_SUCCESS;
474 }
475 
476 static int
477 test_eventdev_queue_attr_event_queue_cfg(void)
478 {
479 	int i, ret;
480 	struct rte_event_dev_info info;
481 	struct rte_event_queue_conf qconf;
482 	uint32_t event_queue_cfg;
483 
484 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
485 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
486 
487 	uint32_t queue_count;
488 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
489 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
490 			    "Queue count get failed");
491 
492 	ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
493 	TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
494 
495 	qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
496 
497 	for (i = 0; i < (int)queue_count; i++) {
498 		ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
499 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
500 	}
501 
502 	for (i = 0; i < (int)queue_count; i++) {
503 		TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
504 				    RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG,
505 				    &event_queue_cfg),
506 				    "Queue event_queue_cfg get failed");
507 
508 		TEST_ASSERT_EQUAL(event_queue_cfg, qconf.event_queue_cfg,
509 				  "Wrong event_queue_cfg value for queue%d",
510 				  i);
511 	}
512 
513 	return TEST_SUCCESS;
514 }
515 
516 static int
517 test_eventdev_port_default_conf_get(void)
518 {
519 	int i, ret;
520 	struct rte_event_port_conf pconf;
521 
522 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
523 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
524 
525 	uint32_t port_count;
526 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
527 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
528 				&port_count), "Port count get failed");
529 
530 	ret = rte_event_port_default_conf_get(TEST_DEV_ID,
531 			port_count + 1, NULL);
532 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
533 
534 	for (i = 0; i < (int)port_count; i++) {
535 		ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
536 							&pconf);
537 		TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
538 	}
539 
540 	return TEST_SUCCESS;
541 }
542 
543 static int
544 test_eventdev_port_setup(void)
545 {
546 	int i, ret;
547 	struct rte_event_dev_info info;
548 	struct rte_event_port_conf pconf;
549 
550 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
551 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
552 
553 	/* Negative cases */
554 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
555 	TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
556 	pconf.new_event_threshold = info.max_num_events + 1;
557 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
558 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
559 
560 	pconf.new_event_threshold = info.max_num_events;
561 	pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1;
562 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
563 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
564 
565 	pconf.dequeue_depth = info.max_event_port_dequeue_depth;
566 	pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1;
567 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
568 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
569 
570 	if (!(info.event_dev_cap &
571 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
572 		pconf.enqueue_depth = info.max_event_port_enqueue_depth;
573 		pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
574 		ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
575 		TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
576 		pconf.event_port_cfg = 0;
577 	}
578 
579 	ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
580 					&pconf);
581 	TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
582 
583 	/* Positive case */
584 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
585 	TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
586 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
587 	TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
588 
589 	uint32_t port_count;
590 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
591 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
592 				&port_count), "Port count get failed");
593 
594 	for (i = 0; i < (int)port_count; i++) {
595 		ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
596 		TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
597 	}
598 
599 	return TEST_SUCCESS;
600 }
601 
602 static int
603 test_eventdev_port_attr_dequeue_depth(void)
604 {
605 	int ret;
606 	struct rte_event_dev_info info;
607 	struct rte_event_port_conf pconf;
608 
609 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
610 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
611 
612 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
613 	TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
614 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
615 	TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
616 
617 	uint32_t value;
618 	TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
619 			RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
620 			0, "Call to get port dequeue depth failed");
621 	TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
622 			"Wrong port dequeue depth");
623 
624 	return TEST_SUCCESS;
625 }
626 
627 static int
628 test_eventdev_port_attr_enqueue_depth(void)
629 {
630 	int ret;
631 	struct rte_event_dev_info info;
632 	struct rte_event_port_conf pconf;
633 
634 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
635 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
636 
637 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
638 	TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
639 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
640 	TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
641 
642 	uint32_t value;
643 	TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
644 			RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
645 			0, "Call to get port enqueue depth failed");
646 	TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
647 			"Wrong port enqueue depth");
648 
649 	return TEST_SUCCESS;
650 }
651 
652 static int
653 test_eventdev_port_attr_new_event_threshold(void)
654 {
655 	int ret;
656 	struct rte_event_dev_info info;
657 	struct rte_event_port_conf pconf;
658 
659 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
660 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
661 
662 	ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
663 	TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
664 	ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
665 	TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
666 
667 	uint32_t value;
668 	TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
669 			RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD, &value),
670 			0, "Call to get port new event threshold failed");
671 	TEST_ASSERT_EQUAL((int32_t) value, pconf.new_event_threshold,
672 			"Wrong port new event threshold");
673 
674 	return TEST_SUCCESS;
675 }
676 
677 static int
678 test_eventdev_port_count(void)
679 {
680 	int ret;
681 	struct rte_event_dev_info info;
682 
683 	ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
684 	TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
685 
686 	uint32_t port_count;
687 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
688 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
689 				&port_count), "Port count get failed");
690 	TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count");
691 
692 	return TEST_SUCCESS;
693 }
694 
695 static int
696 test_eventdev_timeout_ticks(void)
697 {
698 	int ret;
699 	uint64_t timeout_ticks;
700 
701 	ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
702 	if (ret != -ENOTSUP)
703 		TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
704 
705 	return ret;
706 }
707 
708 
709 static int
710 test_eventdev_start_stop(void)
711 {
712 	int i, ret;
713 
714 	ret = eventdev_configure_setup();
715 	TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
716 
717 	uint32_t queue_count;
718 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
719 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
720 			    "Queue count get failed");
721 	for (i = 0; i < (int)queue_count; i++) {
722 		ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
723 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
724 	}
725 
726 	uint32_t port_count;
727 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
728 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
729 				&port_count), "Port count get failed");
730 
731 	for (i = 0; i < (int)port_count; i++) {
732 		ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
733 		TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
734 	}
735 
736 	ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
737 	TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
738 		    TEST_DEV_ID);
739 
740 	ret = rte_event_dev_start(TEST_DEV_ID);
741 	TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
742 
743 	rte_event_dev_stop(TEST_DEV_ID);
744 	return TEST_SUCCESS;
745 }
746 
747 
748 static int
749 eventdev_setup_device(void)
750 {
751 	int i, ret;
752 
753 	ret = eventdev_configure_setup();
754 	TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
755 
756 	uint32_t queue_count;
757 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
758 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
759 			    "Queue count get failed");
760 	for (i = 0; i < (int)queue_count; i++) {
761 		ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
762 		TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
763 	}
764 
765 	uint32_t port_count;
766 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
767 				RTE_EVENT_DEV_ATTR_PORT_COUNT,
768 				&port_count), "Port count get failed");
769 
770 	for (i = 0; i < (int)port_count; i++) {
771 		ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
772 		TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
773 	}
774 
775 	ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
776 	TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
777 		    TEST_DEV_ID);
778 
779 	ret = rte_event_dev_start(TEST_DEV_ID);
780 	TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
781 
782 	return TEST_SUCCESS;
783 }
784 
785 static void
786 eventdev_stop_device(void)
787 {
788 	rte_event_dev_stop(TEST_DEV_ID);
789 }
790 
791 static int
792 test_eventdev_link(void)
793 {
794 	int ret, nb_queues, i;
795 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
796 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
797 
798 	ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
799 	TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
800 				 TEST_DEV_ID);
801 
802 	uint32_t queue_count;
803 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
804 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
805 			    "Queue count get failed");
806 	nb_queues = queue_count;
807 	for (i = 0; i < nb_queues; i++) {
808 		queues[i] = i;
809 		priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
810 	}
811 
812 	ret = rte_event_port_link(TEST_DEV_ID, 0, queues,
813 					priorities, nb_queues);
814 	TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
815 				 TEST_DEV_ID, ret);
816 	return TEST_SUCCESS;
817 }
818 
819 static int
820 test_eventdev_unlink(void)
821 {
822 	int ret, nb_queues, i;
823 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
824 
825 	ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
826 	TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
827 				 TEST_DEV_ID);
828 
829 	uint32_t queue_count;
830 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
831 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
832 			    "Queue count get failed");
833 	nb_queues = queue_count;
834 	for (i = 0; i < nb_queues; i++)
835 		queues[i] = i;
836 
837 	ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
838 	TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
839 				 TEST_DEV_ID);
840 
841 	ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
842 	TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
843 				 TEST_DEV_ID, ret);
844 	return TEST_SUCCESS;
845 }
846 
847 static int
848 test_eventdev_link_get(void)
849 {
850 	int ret, i;
851 	uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
852 	uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
853 
854 	/* link all queues */
855 	ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
856 	TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
857 				 TEST_DEV_ID);
858 
859 	uint32_t queue_count;
860 	TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
861 			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
862 			    "Queue count get failed");
863 	const int nb_queues = queue_count;
864 	for (i = 0; i < nb_queues; i++)
865 		queues[i] = i;
866 
867 	ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
868 	TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
869 				 TEST_DEV_ID, ret);
870 
871 	ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
872 	TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
873 
874 	/* link all queues and get the links */
875 	for (i = 0; i < nb_queues; i++) {
876 		queues[i] = i;
877 		priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
878 	}
879 	ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
880 					 nb_queues);
881 	TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
882 				 TEST_DEV_ID, ret);
883 	ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
884 	TEST_ASSERT(ret == nb_queues, "(%d)Wrong link get ret=%d expected=%d",
885 				 TEST_DEV_ID, ret, nb_queues);
886 	/* unlink all*/
887 	ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
888 	TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
889 				 TEST_DEV_ID, ret);
890 	/* link just one queue */
891 	queues[0] = 0;
892 	priorities[0] = RTE_EVENT_DEV_PRIORITY_NORMAL;
893 
894 	ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities, 1);
895 	TEST_ASSERT(ret == 1, "Failed to link(device%d) ret=%d",
896 				 TEST_DEV_ID, ret);
897 	ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
898 	TEST_ASSERT(ret == 1, "(%d)Wrong link get ret=%d expected=%d",
899 					TEST_DEV_ID, ret, 1);
900 	/* unlink the queue */
901 	ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
902 	TEST_ASSERT(ret == 1, "Failed to unlink(device%d) ret=%d",
903 				 TEST_DEV_ID, ret);
904 
905 	/* 4links and 2 unlinks */
906 	if (nb_queues >= 4) {
907 		for (i = 0; i < 4; i++) {
908 			queues[i] = i;
909 			priorities[i] = 0x40;
910 		}
911 		ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
912 						4);
913 		TEST_ASSERT(ret == 4, "Failed to link(device%d) ret=%d",
914 					 TEST_DEV_ID, ret);
915 
916 		for (i = 0; i < 2; i++)
917 			queues[i] = i;
918 
919 		ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, 2);
920 		TEST_ASSERT(ret == 2, "Failed to unlink(device%d) ret=%d",
921 					 TEST_DEV_ID, ret);
922 		ret = rte_event_port_links_get(TEST_DEV_ID, 0,
923 						queues, priorities);
924 		TEST_ASSERT(ret == 2, "(%d)Wrong link get ret=%d expected=%d",
925 						TEST_DEV_ID, ret, 2);
926 		TEST_ASSERT(queues[0] == 2, "ret=%d expected=%d", ret, 2);
927 		TEST_ASSERT(priorities[0] == 0x40, "ret=%d expected=%d",
928 							ret, 0x40);
929 		TEST_ASSERT(queues[1] == 3, "ret=%d expected=%d", ret, 3);
930 		TEST_ASSERT(priorities[1] == 0x40, "ret=%d expected=%d",
931 					ret, 0x40);
932 	}
933 
934 	return TEST_SUCCESS;
935 }
936 
937 static int
938 test_eventdev_close(void)
939 {
940 	rte_event_dev_stop(TEST_DEV_ID);
941 	return rte_event_dev_close(TEST_DEV_ID);
942 }
943 
944 static struct unit_test_suite eventdev_common_testsuite  = {
945 	.suite_name = "eventdev common code unit test suite",
946 	.setup = testsuite_setup,
947 	.teardown = testsuite_teardown,
948 	.unit_test_cases = {
949 		TEST_CASE_ST(NULL, NULL,
950 			test_eventdev_count),
951 		TEST_CASE_ST(NULL, NULL,
952 			test_eventdev_get_dev_id),
953 		TEST_CASE_ST(NULL, NULL,
954 			test_eventdev_socket_id),
955 		TEST_CASE_ST(NULL, NULL,
956 			test_eventdev_info_get),
957 		TEST_CASE_ST(NULL, NULL,
958 			test_eventdev_configure),
959 		TEST_CASE_ST(eventdev_configure_setup, NULL,
960 			test_eventdev_queue_default_conf_get),
961 		TEST_CASE_ST(eventdev_configure_setup, NULL,
962 			test_eventdev_queue_setup),
963 		TEST_CASE_ST(eventdev_configure_setup, NULL,
964 			test_eventdev_queue_count),
965 		TEST_CASE_ST(eventdev_configure_setup, NULL,
966 			test_eventdev_queue_attr_priority),
967 		TEST_CASE_ST(eventdev_configure_setup, NULL,
968 			test_eventdev_queue_attr_nb_atomic_flows),
969 		TEST_CASE_ST(eventdev_configure_setup, NULL,
970 			test_eventdev_queue_attr_nb_atomic_order_sequences),
971 		TEST_CASE_ST(eventdev_configure_setup, NULL,
972 			test_eventdev_queue_attr_event_queue_cfg),
973 		TEST_CASE_ST(eventdev_configure_setup, NULL,
974 			test_eventdev_port_default_conf_get),
975 		TEST_CASE_ST(eventdev_configure_setup, NULL,
976 			test_eventdev_port_setup),
977 		TEST_CASE_ST(eventdev_configure_setup, NULL,
978 			test_eventdev_port_attr_dequeue_depth),
979 		TEST_CASE_ST(eventdev_configure_setup, NULL,
980 			test_eventdev_port_attr_enqueue_depth),
981 		TEST_CASE_ST(eventdev_configure_setup, NULL,
982 			test_eventdev_port_attr_new_event_threshold),
983 		TEST_CASE_ST(eventdev_configure_setup, NULL,
984 			test_eventdev_port_count),
985 		TEST_CASE_ST(eventdev_configure_setup, NULL,
986 			test_eventdev_timeout_ticks),
987 		TEST_CASE_ST(NULL, NULL,
988 			test_eventdev_start_stop),
989 		TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
990 			test_eventdev_link),
991 		TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
992 			test_eventdev_unlink),
993 		TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
994 			test_eventdev_link_get),
995 		TEST_CASE_ST(eventdev_setup_device, NULL,
996 			test_eventdev_close),
997 		TEST_CASES_END() /**< NULL terminate unit test array */
998 	}
999 };
1000 
1001 static int
1002 test_eventdev_common(void)
1003 {
1004 	return unit_test_suite_runner(&eventdev_common_testsuite);
1005 }
1006 
1007 static int
1008 test_eventdev_selftest_impl(const char *pmd, const char *opts)
1009 {
1010 	int ret = 0;
1011 
1012 	if (rte_event_dev_get_dev_id(pmd) == -ENODEV)
1013 		ret = rte_vdev_init(pmd, opts);
1014 	if (ret)
1015 		return TEST_SKIPPED;
1016 
1017 	return rte_event_dev_selftest(rte_event_dev_get_dev_id(pmd));
1018 }
1019 
1020 static int
1021 test_eventdev_selftest_sw(void)
1022 {
1023 	return test_eventdev_selftest_impl("event_sw", "");
1024 }
1025 
1026 static int
1027 test_eventdev_selftest_octeontx(void)
1028 {
1029 	return test_eventdev_selftest_impl("event_octeontx", "");
1030 }
1031 
1032 static int
1033 test_eventdev_selftest_dpaa2(void)
1034 {
1035 	return test_eventdev_selftest_impl("event_dpaa2", "");
1036 }
1037 
1038 static int
1039 test_eventdev_selftest_dlb2(void)
1040 {
1041 	return test_eventdev_selftest_impl("dlb2_event", "");
1042 }
1043 
1044 static int
1045 test_eventdev_selftest_cn9k(void)
1046 {
1047 	return test_eventdev_selftest_impl("event_cn9k", "");
1048 }
1049 
1050 static int
1051 test_eventdev_selftest_cn10k(void)
1052 {
1053 	return test_eventdev_selftest_impl("event_cn10k", "");
1054 }
1055 
1056 #endif /* !RTE_EXEC_ENV_WINDOWS */
1057 
1058 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
1059 
1060 #ifndef RTE_EXEC_ENV_WINDOWS
1061 REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
1062 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
1063 		test_eventdev_selftest_octeontx);
1064 REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
1065 REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2);
1066 REGISTER_TEST_COMMAND(eventdev_selftest_cn9k, test_eventdev_selftest_cn9k);
1067 REGISTER_TEST_COMMAND(eventdev_selftest_cn10k, test_eventdev_selftest_cn10k);
1068 
1069 #endif /* !RTE_EXEC_ENV_WINDOWS */
1070