xref: /dpdk/app/test/test_event_eth_rx_adapter.c (revision c9902a15bd005b6d4fe072cf7b60fe4ee679155f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 #include <string.h>
5 #include <rte_common.h>
6 #include <rte_mempool.h>
7 #include <rte_mbuf.h>
8 #include <rte_ethdev.h>
9 #include <rte_eventdev.h>
10 #include <rte_bus_vdev.h>
11 
12 #include <rte_event_eth_rx_adapter.h>
13 
14 #include "test.h"
15 
16 #define MAX_NUM_RX_QUEUE	64
17 #define NB_MBUFS		(8192 * num_ports * MAX_NUM_RX_QUEUE)
18 #define MBUF_CACHE_SIZE		512
19 #define MBUF_PRIV_SIZE		0
20 #define TEST_INST_ID		0
21 #define TEST_DEV_ID		0
22 #define TEST_ETHDEV_ID		0
23 
24 struct event_eth_rx_adapter_test_params {
25 	struct rte_mempool *mp;
26 	uint16_t rx_rings, tx_rings;
27 	uint32_t caps;
28 	int rx_intr_port_inited;
29 	uint16_t rx_intr_port;
30 };
31 
32 static struct event_eth_rx_adapter_test_params default_params;
33 static bool event_dev_created;
34 static bool eth_dev_created;
35 
36 static inline int
37 port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
38 		struct rte_mempool *mp)
39 {
40 	const uint16_t rx_ring_size = 512, tx_ring_size = 512;
41 	int retval;
42 	uint16_t q;
43 	struct rte_eth_dev_info dev_info;
44 
45 	if (!rte_eth_dev_is_valid_port(port))
46 		return -1;
47 
48 	retval = rte_eth_dev_configure(port, 0, 0, port_conf);
49 
50 	retval = rte_eth_dev_info_get(port, &dev_info);
51 	if (retval != 0)
52 		return retval;
53 
54 	default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues,
55 					MAX_NUM_RX_QUEUE);
56 	default_params.tx_rings = 1;
57 
58 	/* Configure the Ethernet device. */
59 	retval = rte_eth_dev_configure(port, default_params.rx_rings,
60 				default_params.tx_rings, port_conf);
61 	if (retval != 0)
62 		return retval;
63 
64 	for (q = 0; q < default_params.rx_rings; q++) {
65 		retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
66 				rte_eth_dev_socket_id(port), NULL, mp);
67 		if (retval < 0)
68 			return retval;
69 	}
70 
71 	/* Allocate and set up 1 TX queue per Ethernet port. */
72 	for (q = 0; q < default_params.tx_rings; q++) {
73 		retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
74 				rte_eth_dev_socket_id(port), NULL);
75 		if (retval < 0)
76 			return retval;
77 	}
78 
79 	/* Start the Ethernet port. */
80 	retval = rte_eth_dev_start(port);
81 	if (retval < 0)
82 		return retval;
83 
84 	/* Display the port MAC address. */
85 	struct rte_ether_addr addr;
86 	retval = rte_eth_macaddr_get(port, &addr);
87 	if (retval < 0)
88 		return retval;
89 	printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
90 			   " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
91 			(unsigned int)port, RTE_ETHER_ADDR_BYTES(&addr));
92 
93 	/* Enable RX in promiscuous mode for the Ethernet device. */
94 	retval = rte_eth_promiscuous_enable(port);
95 	if (retval != 0)
96 		return retval;
97 
98 	return 0;
99 }
100 
101 static inline int
102 port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
103 {
104 	static const struct rte_eth_conf port_conf_default = {
105 		.rxmode = {
106 			.mq_mode = ETH_MQ_RX_NONE,
107 		},
108 		.intr_conf = {
109 			.rxq = 1,
110 		},
111 	};
112 
113 	return port_init_common(port, &port_conf_default, mp);
114 }
115 
116 static inline int
117 port_init(uint16_t port, struct rte_mempool *mp)
118 {
119 	static const struct rte_eth_conf port_conf_default = {
120 		.rxmode = {
121 			.mq_mode = ETH_MQ_RX_NONE,
122 		},
123 	};
124 
125 	return port_init_common(port, &port_conf_default, mp);
126 }
127 
128 static int
129 init_port_rx_intr(int num_ports)
130 {
131 	int retval;
132 	uint16_t portid;
133 	int err;
134 
135 	default_params.mp = rte_pktmbuf_pool_create("packet_pool",
136 						   NB_MBUFS,
137 						   MBUF_CACHE_SIZE,
138 						   MBUF_PRIV_SIZE,
139 						   RTE_MBUF_DEFAULT_BUF_SIZE,
140 						   rte_socket_id());
141 	if (!default_params.mp)
142 		return -ENOMEM;
143 
144 	RTE_ETH_FOREACH_DEV(portid) {
145 		retval = port_init_rx_intr(portid, default_params.mp);
146 		if (retval)
147 			continue;
148 		err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, portid,
149 							&default_params.caps);
150 		if (err)
151 			continue;
152 		if (!(default_params.caps &
153 			RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
154 			default_params.rx_intr_port_inited = 1;
155 			default_params.rx_intr_port = portid;
156 			return 0;
157 		}
158 		retval = rte_eth_dev_stop(portid);
159 		TEST_ASSERT(retval == 0, "Failed to stop port %u: %d\n",
160 					portid, retval);
161 	}
162 	return 0;
163 }
164 
165 static int
166 init_ports(int num_ports)
167 {
168 	uint16_t portid;
169 	int retval;
170 
171 	struct rte_mempool *ptr = rte_mempool_lookup("packet_pool");
172 
173 	if (ptr == NULL)
174 		default_params.mp = rte_pktmbuf_pool_create("packet_pool",
175 						NB_MBUFS,
176 						MBUF_CACHE_SIZE,
177 						MBUF_PRIV_SIZE,
178 						RTE_MBUF_DEFAULT_BUF_SIZE,
179 						rte_socket_id());
180 	else
181 		default_params.mp = ptr;
182 
183 	if (!default_params.mp)
184 		return -ENOMEM;
185 
186 	RTE_ETH_FOREACH_DEV(portid) {
187 		retval = port_init(portid, default_params.mp);
188 		if (retval)
189 			return retval;
190 	}
191 
192 	return 0;
193 }
194 
195 static int
196 testsuite_setup(void)
197 {
198 	int err;
199 	uint8_t count;
200 	struct rte_event_dev_info dev_info;
201 
202 	count = rte_event_dev_count();
203 	if (!count) {
204 		printf("Failed to find a valid event device,"
205 			" testing with event_skeleton device\n");
206 		err = rte_vdev_init("event_skeleton", NULL);
207 		TEST_ASSERT(err == 0, "Failed to create event_skeleton. err=%d",
208 			    err);
209 		event_dev_created = true;
210 	}
211 
212 	struct rte_event_dev_config config = {
213 			.nb_event_queues = 1,
214 			.nb_event_ports = 1,
215 	};
216 
217 	err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
218 	config.nb_event_queue_flows = dev_info.max_event_queue_flows;
219 	config.nb_event_port_dequeue_depth =
220 			dev_info.max_event_port_dequeue_depth;
221 	config.nb_event_port_enqueue_depth =
222 			dev_info.max_event_port_enqueue_depth;
223 	config.nb_events_limit =
224 			dev_info.max_num_events;
225 	err = rte_event_dev_configure(TEST_DEV_ID, &config);
226 	TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
227 			err);
228 
229 	count = rte_eth_dev_count_total();
230 	if (!count) {
231 		printf("Testing with net_null device\n");
232 		err = rte_vdev_init("net_null", NULL);
233 		TEST_ASSERT(err == 0, "Failed to create net_null. err=%d",
234 			    err);
235 		eth_dev_created = true;
236 	}
237 
238 	/*
239 	 * eth devices like octeontx use event device to receive packets
240 	 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
241 	 * call init_ports after rte_event_dev_configure
242 	 */
243 	err = init_ports(rte_eth_dev_count_total());
244 	TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
245 
246 	err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
247 						&default_params.caps);
248 	TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n",
249 			err);
250 
251 	return err;
252 }
253 
254 static int
255 testsuite_setup_rx_intr(void)
256 {
257 	int err;
258 	uint8_t count;
259 	struct rte_event_dev_info dev_info;
260 
261 	count = rte_event_dev_count();
262 	if (!count) {
263 		printf("Failed to find a valid event device,"
264 			" testing with event_skeleton device\n");
265 		err = rte_vdev_init("event_skeleton", NULL);
266 		TEST_ASSERT(err == 0, "Failed to create event_skeleton. err=%d",
267 			    err);
268 		event_dev_created = true;
269 	}
270 
271 	struct rte_event_dev_config config = {
272 		.nb_event_queues = 1,
273 		.nb_event_ports = 1,
274 	};
275 
276 	err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
277 	config.nb_event_queue_flows = dev_info.max_event_queue_flows;
278 	config.nb_event_port_dequeue_depth =
279 			dev_info.max_event_port_dequeue_depth;
280 	config.nb_event_port_enqueue_depth =
281 			dev_info.max_event_port_enqueue_depth;
282 	config.nb_events_limit =
283 			dev_info.max_num_events;
284 
285 	err = rte_event_dev_configure(TEST_DEV_ID, &config);
286 	TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
287 			err);
288 
289 	count = rte_eth_dev_count_total();
290 	if (!count) {
291 		printf("Testing with net_null device\n");
292 		err = rte_vdev_init("net_null", NULL);
293 		TEST_ASSERT(err == 0, "Failed to create net_null. err=%d",
294 			    err);
295 		eth_dev_created = true;
296 	}
297 
298 	/*
299 	 * eth devices like octeontx use event device to receive packets
300 	 * so rte_eth_dev_start invokes rte_event_dev_start internally, so
301 	 * call init_ports after rte_event_dev_configure
302 	 */
303 	err = init_port_rx_intr(rte_eth_dev_count_total());
304 	TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
305 
306 	if (!default_params.rx_intr_port_inited)
307 		return 0;
308 
309 	err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID,
310 						default_params.rx_intr_port,
311 						&default_params.caps);
312 	TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
313 
314 	return err;
315 }
316 
317 static void
318 testsuite_teardown(void)
319 {
320 	int err;
321 	uint32_t i;
322 	RTE_ETH_FOREACH_DEV(i)
323 		rte_eth_dev_stop(i);
324 
325 	if (eth_dev_created) {
326 		err = rte_vdev_uninit("net_null");
327 		if (err)
328 			printf("Failed to delete net_null. err=%d", err);
329 		eth_dev_created = false;
330 	}
331 
332 	rte_mempool_free(default_params.mp);
333 	if (event_dev_created) {
334 		err = rte_vdev_uninit("event_skeleton");
335 		if (err)
336 			printf("Failed to delete event_skeleton. err=%d", err);
337 		event_dev_created = false;
338 	}
339 
340 	memset(&default_params, 0, sizeof(default_params));
341 }
342 
343 static void
344 testsuite_teardown_rx_intr(void)
345 {
346 	int err;
347 	if (!default_params.rx_intr_port_inited)
348 		return;
349 
350 	rte_eth_dev_stop(default_params.rx_intr_port);
351 	if (eth_dev_created) {
352 		err = rte_vdev_uninit("net_null");
353 		if (err)
354 			printf("Failed to delete net_null. err=%d", err);
355 		eth_dev_created = false;
356 	}
357 	rte_mempool_free(default_params.mp);
358 	if (event_dev_created) {
359 		err = rte_vdev_uninit("event_skeleton");
360 		if (err)
361 			printf("Failed to delete event_skeleton. err=%d", err);
362 		event_dev_created = false;
363 	}
364 
365 	memset(&default_params, 0, sizeof(default_params));
366 }
367 
368 static int
369 adapter_create(void)
370 {
371 	int err;
372 	struct rte_event_dev_info dev_info;
373 	struct rte_event_port_conf rx_p_conf;
374 
375 	memset(&rx_p_conf, 0, sizeof(rx_p_conf));
376 
377 	err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
378 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
379 
380 	rx_p_conf.new_event_threshold = dev_info.max_num_events;
381 	rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
382 	rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
383 	err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
384 					&rx_p_conf);
385 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
386 
387 	return err;
388 }
389 
390 static void
391 adapter_free(void)
392 {
393 	rte_event_eth_rx_adapter_free(TEST_INST_ID);
394 }
395 
396 static int
397 adapter_create_free(void)
398 {
399 	int err;
400 
401 	struct rte_event_port_conf rx_p_conf = {
402 			.dequeue_depth = 8,
403 			.enqueue_depth = 8,
404 			.new_event_threshold = 1200,
405 	};
406 
407 	err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
408 					NULL);
409 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
410 
411 	err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
412 					&rx_p_conf);
413 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
414 
415 	err = rte_event_eth_rx_adapter_create(TEST_INST_ID,
416 					TEST_DEV_ID, &rx_p_conf);
417 	TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
418 
419 	err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
420 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
421 
422 	err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
423 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
424 
425 	err = rte_event_eth_rx_adapter_free(1);
426 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
427 
428 	return TEST_SUCCESS;
429 }
430 
431 static int
432 adapter_queue_add_del(void)
433 {
434 	int err;
435 	struct rte_event ev;
436 	uint32_t cap;
437 
438 	struct rte_event_eth_rx_adapter_queue_conf queue_config;
439 
440 	err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
441 					 &cap);
442 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
443 
444 	ev.queue_id = 0;
445 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
446 	ev.priority = 0;
447 
448 	queue_config.rx_queue_flags = 0;
449 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
450 		ev.flow_id = 1;
451 		queue_config.rx_queue_flags =
452 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
453 	}
454 	queue_config.ev = ev;
455 	queue_config.servicing_weight = 1;
456 
457 	err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
458 						rte_eth_dev_count_total(),
459 						-1, &queue_config);
460 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
461 
462 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
463 		err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
464 							TEST_ETHDEV_ID, 0,
465 							&queue_config);
466 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
467 
468 		err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
469 							TEST_ETHDEV_ID, 0);
470 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
471 
472 		err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
473 							TEST_ETHDEV_ID,
474 							-1,
475 							&queue_config);
476 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
477 
478 		err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
479 							TEST_ETHDEV_ID,
480 							-1);
481 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
482 	} else {
483 		err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
484 							TEST_ETHDEV_ID,
485 							0,
486 							&queue_config);
487 		TEST_ASSERT(err == -EINVAL, "Expected EINVAL got %d", err);
488 
489 		err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
490 							TEST_ETHDEV_ID, -1,
491 							&queue_config);
492 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
493 
494 		err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
495 							TEST_ETHDEV_ID, 0);
496 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
497 
498 		err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
499 							TEST_ETHDEV_ID, -1);
500 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
501 
502 		err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
503 							TEST_ETHDEV_ID, -1);
504 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
505 	}
506 
507 	err = rte_event_eth_rx_adapter_queue_add(1, TEST_ETHDEV_ID, -1,
508 						&queue_config);
509 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
510 
511 	err = rte_event_eth_rx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
512 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
513 
514 	return TEST_SUCCESS;
515 }
516 
517 static int
518 adapter_multi_eth_add_del(void)
519 {
520 	int err;
521 	struct rte_event ev;
522 
523 	uint16_t port_index, port_index_base, drv_id = 0;
524 	char driver_name[50];
525 
526 	struct rte_event_eth_rx_adapter_queue_conf queue_config;
527 
528 	ev.queue_id = 0;
529 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
530 	ev.priority = 0;
531 
532 	queue_config.rx_queue_flags = 0;
533 	queue_config.ev = ev;
534 	queue_config.servicing_weight = 1;
535 
536 	/* stop eth devices for existing */
537 	port_index = 0;
538 	for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
539 		err = rte_eth_dev_stop(port_index);
540 		TEST_ASSERT(err == 0, "Failed to stop port %u: %d\n",
541 					port_index, err);
542 	}
543 
544 	/* add the max port for rx_adapter */
545 	port_index = rte_eth_dev_count_total();
546 	port_index_base = port_index;
547 	for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
548 		snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
549 				drv_id);
550 		err = rte_vdev_init(driver_name, NULL);
551 		TEST_ASSERT(err == 0, "Failed driver %s got %d",
552 		driver_name, err);
553 		drv_id += 1;
554 	}
555 
556 	err = init_ports(rte_eth_dev_count_total());
557 	TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
558 
559 	/* eth_rx_adapter_queue_add for n ports */
560 	port_index = 0;
561 	for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
562 		err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
563 				port_index, -1,
564 				&queue_config);
565 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
566 	}
567 
568 	/* eth_rx_adapter_queue_del n ports */
569 	port_index = 0;
570 	for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
571 		err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
572 				port_index, -1);
573 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
574 	}
575 
576 	/* delete vdev ports */
577 	for (drv_id = 0, port_index = port_index_base;
578 	     port_index < RTE_MAX_ETHPORTS;
579 	     drv_id += 1, port_index += 1) {
580 		snprintf(driver_name, sizeof(driver_name), "%s%u", "net_null",
581 				drv_id);
582 		err = rte_vdev_uninit(driver_name);
583 		TEST_ASSERT(err == 0, "Failed driver %s got %d",
584 			    driver_name, err);
585 	}
586 
587 	return TEST_SUCCESS;
588 }
589 
590 static int
591 adapter_intr_queue_add_del(void)
592 {
593 	int err;
594 	struct rte_event ev;
595 	uint32_t cap;
596 	uint16_t eth_port;
597 	struct rte_event_eth_rx_adapter_queue_conf queue_config;
598 
599 	if (!default_params.rx_intr_port_inited)
600 		return 0;
601 
602 	eth_port = default_params.rx_intr_port;
603 	err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, eth_port, &cap);
604 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
605 
606 	ev.queue_id = 0;
607 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
608 	ev.priority = 0;
609 
610 	queue_config.rx_queue_flags = 0;
611 	queue_config.ev = ev;
612 
613 	/* weight = 0 => interrupt mode */
614 	queue_config.servicing_weight = 0;
615 
616 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
617 		/* add queue 0 */
618 		err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
619 							TEST_ETHDEV_ID, 0,
620 							&queue_config);
621 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
622 	}
623 
624 	/* add all queues */
625 	queue_config.servicing_weight = 0;
626 	err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
627 						TEST_ETHDEV_ID,
628 						-1,
629 						&queue_config);
630 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
631 
632 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
633 		/* del queue 0 */
634 		err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
635 							TEST_ETHDEV_ID,
636 							0);
637 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
638 	}
639 
640 	/* del remaining queues */
641 	err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
642 						TEST_ETHDEV_ID,
643 						-1);
644 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
645 
646 	/* add all queues */
647 	queue_config.servicing_weight = 0;
648 	err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
649 						TEST_ETHDEV_ID,
650 						-1,
651 						&queue_config);
652 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
653 
654 	/* intr -> poll mode queue */
655 	queue_config.servicing_weight = 1;
656 
657 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
658 		err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
659 							TEST_ETHDEV_ID,
660 							0,
661 							&queue_config);
662 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
663 	}
664 
665 	err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
666 						TEST_ETHDEV_ID,
667 						-1,
668 						 &queue_config);
669 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
670 
671 	/* del queues */
672 	err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
673 						TEST_ETHDEV_ID,
674 						-1);
675 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
676 
677 	return TEST_SUCCESS;
678 }
679 
680 static int
681 adapter_start_stop(void)
682 {
683 	int err;
684 	struct rte_event ev;
685 
686 	ev.queue_id = 0;
687 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
688 	ev.priority = 0;
689 
690 	struct rte_event_eth_rx_adapter_queue_conf queue_config;
691 
692 	queue_config.rx_queue_flags = 0;
693 	if (default_params.caps &
694 		RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
695 		ev.flow_id = 1;
696 		queue_config.rx_queue_flags =
697 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
698 	}
699 
700 	queue_config.ev = ev;
701 	queue_config.servicing_weight = 1;
702 
703 	err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
704 					-1, &queue_config);
705 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
706 
707 	err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
708 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
709 
710 	err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
711 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
712 
713 	err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
714 						-1);
715 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
716 
717 	err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
718 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
719 
720 	err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
721 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
722 
723 	err = rte_event_eth_rx_adapter_start(1);
724 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
725 
726 	err = rte_event_eth_rx_adapter_stop(1);
727 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
728 
729 	return TEST_SUCCESS;
730 }
731 
732 static int
733 adapter_stats(void)
734 {
735 	int err;
736 	struct rte_event_eth_rx_adapter_stats stats;
737 
738 	err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, NULL);
739 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
740 
741 	err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, &stats);
742 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
743 
744 	err = rte_event_eth_rx_adapter_stats_get(1, &stats);
745 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
746 
747 	return TEST_SUCCESS;
748 }
749 
750 static struct unit_test_suite event_eth_rx_tests = {
751 	.suite_name = "rx event eth adapter test suite",
752 	.setup = testsuite_setup,
753 	.teardown = testsuite_teardown,
754 	.unit_test_cases = {
755 		TEST_CASE_ST(NULL, NULL, adapter_create_free),
756 		TEST_CASE_ST(adapter_create, adapter_free,
757 					adapter_queue_add_del),
758 		TEST_CASE_ST(adapter_create, adapter_free,
759 					adapter_multi_eth_add_del),
760 		TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
761 		TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
762 		TEST_CASES_END() /**< NULL terminate unit test array */
763 	}
764 };
765 
766 static struct unit_test_suite event_eth_rx_intr_tests = {
767 	.suite_name = "rx event eth adapter test suite",
768 	.setup = testsuite_setup_rx_intr,
769 	.teardown = testsuite_teardown_rx_intr,
770 	.unit_test_cases = {
771 		TEST_CASE_ST(adapter_create, adapter_free,
772 			adapter_intr_queue_add_del),
773 		TEST_CASES_END() /**< NULL terminate unit test array */
774 	}
775 };
776 
777 static int
778 test_event_eth_rx_adapter_common(void)
779 {
780 	return unit_test_suite_runner(&event_eth_rx_tests);
781 }
782 
783 static int
784 test_event_eth_rx_intr_adapter_common(void)
785 {
786 	return unit_test_suite_runner(&event_eth_rx_intr_tests);
787 }
788 
789 REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
790 		test_event_eth_rx_adapter_common);
791 REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest,
792 		test_event_eth_rx_intr_adapter_common);
793