xref: /dpdk/app/test/test_event_eth_tx_adapter.c (revision c9902a15bd005b6d4fe072cf7b60fe4ee679155f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <string.h>
6 
7 #include <rte_bus_vdev.h>
8 #include <rte_common.h>
9 #include <rte_ethdev.h>
10 #include <rte_eth_ring.h>
11 #include <rte_eventdev.h>
12 #include <rte_event_eth_tx_adapter.h>
13 #include <rte_mbuf.h>
14 #include <rte_mempool.h>
15 #include <rte_service.h>
16 
17 #include "test.h"
18 
19 #define MAX_NUM_QUEUE		RTE_PMD_RING_MAX_RX_RINGS
20 #define TEST_INST_ID		0
21 #define TEST_DEV_ID		0
22 #define SOCKET0			0
23 #define RING_SIZE		256
24 #define ETH_NAME_LEN		32
25 #define NUM_ETH_PAIR		1
26 #define NUM_ETH_DEV		(2 * NUM_ETH_PAIR)
27 #define NB_MBUF			512
28 #define PAIR_PORT_INDEX(p)	((p) + NUM_ETH_PAIR)
29 #define PORT(p)			default_params.port[(p)]
30 #define TEST_ETHDEV_ID		PORT(0)
31 #define TEST_ETHDEV_PAIR_ID	PORT(PAIR_PORT_INDEX(0))
32 
33 #define EDEV_RETRY		0xffff
34 
35 struct event_eth_tx_adapter_test_params {
36 	struct rte_mempool *mp;
37 	uint16_t rx_rings, tx_rings;
38 	struct rte_ring *r[NUM_ETH_DEV][MAX_NUM_QUEUE];
39 	int port[NUM_ETH_DEV];
40 };
41 
42 static int event_dev_delete;
43 static struct event_eth_tx_adapter_test_params default_params;
44 static uint64_t eid = ~0ULL;
45 static uint32_t tid;
46 
47 static inline int
48 port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
49 		struct rte_mempool *mp)
50 {
51 	const uint16_t rx_ring_size = RING_SIZE, tx_ring_size = RING_SIZE;
52 	int retval;
53 	uint16_t q;
54 
55 	if (!rte_eth_dev_is_valid_port(port))
56 		return -1;
57 
58 	default_params.rx_rings = MAX_NUM_QUEUE;
59 	default_params.tx_rings = MAX_NUM_QUEUE;
60 
61 	/* Configure the Ethernet device. */
62 	retval = rte_eth_dev_configure(port, default_params.rx_rings,
63 				default_params.tx_rings, port_conf);
64 	if (retval != 0)
65 		return retval;
66 
67 	for (q = 0; q < default_params.rx_rings; q++) {
68 		retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
69 				rte_eth_dev_socket_id(port), NULL, mp);
70 		if (retval < 0)
71 			return retval;
72 	}
73 
74 	for (q = 0; q < default_params.tx_rings; q++) {
75 		retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
76 				rte_eth_dev_socket_id(port), NULL);
77 		if (retval < 0)
78 			return retval;
79 	}
80 
81 	/* Start the Ethernet port. */
82 	retval = rte_eth_dev_start(port);
83 	if (retval < 0)
84 		return retval;
85 
86 	/* Display the port MAC address. */
87 	struct rte_ether_addr addr;
88 	retval = rte_eth_macaddr_get(port, &addr);
89 	if (retval < 0)
90 		return retval;
91 	printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
92 			   " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
93 			(unsigned int)port, RTE_ETHER_ADDR_BYTES(&addr));
94 
95 	/* Enable RX in promiscuous mode for the Ethernet device. */
96 	retval = rte_eth_promiscuous_enable(port);
97 	if (retval != 0)
98 		return retval;
99 
100 	return 0;
101 }
102 
103 static inline int
104 port_init(uint16_t port, struct rte_mempool *mp)
105 {
106 	struct rte_eth_conf conf = { 0 };
107 	return port_init_common(port, &conf, mp);
108 }
109 
110 #define RING_NAME_LEN	20
111 #define DEV_NAME_LEN	20
112 
113 static int
114 init_ports(void)
115 {
116 	char ring_name[ETH_NAME_LEN];
117 	unsigned int i, j;
118 	struct rte_ring * const *c1;
119 	struct rte_ring * const *c2;
120 	int err;
121 
122 	if (!default_params.mp)
123 		default_params.mp = rte_pktmbuf_pool_create("mbuf_pool",
124 			NB_MBUF, 32,
125 			0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
126 
127 	if (!default_params.mp)
128 		return -ENOMEM;
129 
130 	for (i = 0; i < NUM_ETH_DEV; i++) {
131 		for (j = 0; j < MAX_NUM_QUEUE; j++) {
132 			snprintf(ring_name, sizeof(ring_name), "R%u%u", i, j);
133 			default_params.r[i][j] = rte_ring_create(ring_name,
134 						RING_SIZE,
135 						SOCKET0,
136 						RING_F_SP_ENQ | RING_F_SC_DEQ);
137 			TEST_ASSERT((default_params.r[i][j] != NULL),
138 				"Failed to allocate ring");
139 		}
140 	}
141 
142 	/*
143 	 * To create two pseudo-Ethernet ports where the traffic is
144 	 * switched between them, that is, traffic sent to port 1 is
145 	 * read back from port 2 and vice-versa
146 	 */
147 	for (i = 0; i < NUM_ETH_PAIR; i++) {
148 		char dev_name[DEV_NAME_LEN];
149 		int p;
150 
151 		c1 = default_params.r[i];
152 		c2 = default_params.r[PAIR_PORT_INDEX(i)];
153 
154 		snprintf(dev_name, DEV_NAME_LEN, "%u-%u", i, i + NUM_ETH_PAIR);
155 		p = rte_eth_from_rings(dev_name, c1, MAX_NUM_QUEUE,
156 				 c2, MAX_NUM_QUEUE, SOCKET0);
157 		TEST_ASSERT(p >= 0, "Port creation failed %s", dev_name);
158 		err = port_init(p, default_params.mp);
159 		TEST_ASSERT(err == 0, "Port init failed %s", dev_name);
160 		default_params.port[i] = p;
161 
162 		snprintf(dev_name, DEV_NAME_LEN, "%u-%u",  i + NUM_ETH_PAIR, i);
163 		p = rte_eth_from_rings(dev_name, c2, MAX_NUM_QUEUE,
164 				c1, MAX_NUM_QUEUE, SOCKET0);
165 		TEST_ASSERT(p > 0, "Port creation failed %s", dev_name);
166 		err = port_init(p, default_params.mp);
167 		TEST_ASSERT(err == 0, "Port init failed %s", dev_name);
168 		default_params.port[PAIR_PORT_INDEX(i)] = p;
169 	}
170 
171 	return 0;
172 }
173 
174 static void
175 deinit_ports(void)
176 {
177 	uint16_t i, j;
178 	char name[ETH_NAME_LEN];
179 
180 	for (i = 0; i < RTE_DIM(default_params.port); i++) {
181 		rte_eth_dev_stop(default_params.port[i]);
182 		rte_eth_dev_get_name_by_port(default_params.port[i], name);
183 		rte_vdev_uninit(name);
184 		for (j = 0; j < RTE_DIM(default_params.r[i]); j++)
185 			rte_ring_free(default_params.r[i][j]);
186 	}
187 }
188 
189 static int
190 testsuite_setup(void)
191 {
192 	const char *vdev_name = "event_sw0";
193 
194 	int err = init_ports();
195 	TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
196 
197 	if (rte_event_dev_count() == 0) {
198 		printf("Failed to find a valid event device,"
199 			" testing with event_sw0 device\n");
200 		err = rte_vdev_init(vdev_name, NULL);
201 		TEST_ASSERT(err == 0, "vdev %s creation failed  %d\n",
202 			vdev_name, err);
203 		event_dev_delete = 1;
204 	}
205 	return err;
206 }
207 
208 #define DEVICE_ID_SIZE 64
209 
210 static void
211 testsuite_teardown(void)
212 {
213 	deinit_ports();
214 	rte_mempool_free(default_params.mp);
215 	default_params.mp = NULL;
216 	if (event_dev_delete)
217 		rte_vdev_uninit("event_sw0");
218 }
219 
220 static int
221 tx_adapter_create(void)
222 {
223 	int err;
224 	struct rte_event_dev_info dev_info;
225 	struct rte_event_port_conf tx_p_conf;
226 	uint8_t priority;
227 	uint8_t queue_id;
228 
229 	struct rte_event_dev_config config = {
230 			.nb_event_queues = 1,
231 			.nb_event_ports = 1,
232 	};
233 
234 	struct rte_event_queue_conf wkr_q_conf = {
235 			.schedule_type = RTE_SCHED_TYPE_ORDERED,
236 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
237 			.nb_atomic_flows = 1024,
238 			.nb_atomic_order_sequences = 1024,
239 	};
240 
241 	memset(&tx_p_conf, 0, sizeof(tx_p_conf));
242 	err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
243 	config.nb_event_queue_flows = dev_info.max_event_queue_flows;
244 	config.nb_event_port_dequeue_depth =
245 			dev_info.max_event_port_dequeue_depth;
246 	config.nb_event_port_enqueue_depth =
247 			dev_info.max_event_port_enqueue_depth;
248 	config.nb_events_limit =
249 			dev_info.max_num_events;
250 
251 	err = rte_event_dev_configure(TEST_DEV_ID, &config);
252 	TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
253 			err);
254 
255 	queue_id = 0;
256 	err = rte_event_queue_setup(TEST_DEV_ID, 0, &wkr_q_conf);
257 	TEST_ASSERT(err == 0, "Event queue setup failed %d\n", err);
258 
259 	err = rte_event_port_setup(TEST_DEV_ID, 0, NULL);
260 	TEST_ASSERT(err == 0, "Event port setup failed %d\n", err);
261 
262 	priority = RTE_EVENT_DEV_PRIORITY_LOWEST;
263 	err = rte_event_port_link(TEST_DEV_ID, 0, &queue_id, &priority, 1);
264 	TEST_ASSERT(err == 1, "Error linking port %s\n",
265 		rte_strerror(rte_errno));
266 	err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
267 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
268 
269 	tx_p_conf.new_event_threshold = dev_info.max_num_events;
270 	tx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
271 	tx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
272 	err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
273 					&tx_p_conf);
274 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
275 
276 	return err;
277 }
278 
279 static void
280 tx_adapter_free(void)
281 {
282 	rte_event_eth_tx_adapter_free(TEST_INST_ID);
283 }
284 
285 static int
286 tx_adapter_create_free(void)
287 {
288 	int err;
289 	struct rte_event_dev_info dev_info;
290 	struct rte_event_port_conf tx_p_conf;
291 
292 	err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
293 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
294 
295 	tx_p_conf.new_event_threshold = dev_info.max_num_events;
296 	tx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
297 	tx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
298 
299 	err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
300 					NULL);
301 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
302 
303 	err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
304 					&tx_p_conf);
305 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
306 
307 	err = rte_event_eth_tx_adapter_create(TEST_INST_ID,
308 					TEST_DEV_ID, &tx_p_conf);
309 	TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
310 
311 	err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
312 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
313 
314 	err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
315 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
316 
317 	err = rte_event_eth_tx_adapter_free(1);
318 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
319 
320 	return TEST_SUCCESS;
321 }
322 
323 static int
324 tx_adapter_queue_add_del(void)
325 {
326 	int err;
327 	uint32_t cap;
328 
329 	err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
330 					 &cap);
331 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
332 
333 
334 	err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
335 						rte_eth_dev_count_total(),
336 						-1);
337 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
338 
339 	err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
340 						TEST_ETHDEV_ID,
341 						0);
342 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
343 
344 	err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
345 						TEST_ETHDEV_ID,
346 						-1);
347 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
348 
349 	err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
350 						TEST_ETHDEV_ID,
351 						0);
352 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
353 
354 	err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
355 						TEST_ETHDEV_ID,
356 						-1);
357 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
358 
359 	err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
360 						TEST_ETHDEV_ID,
361 						-1);
362 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
363 
364 	err = rte_event_eth_tx_adapter_queue_add(1, TEST_ETHDEV_ID, -1);
365 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
366 
367 	err = rte_event_eth_tx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
368 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
369 
370 	return TEST_SUCCESS;
371 }
372 
373 static int
374 tx_adapter_start_stop(void)
375 {
376 	int err;
377 
378 	err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
379 						-1);
380 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
381 
382 	err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
383 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
384 
385 	err = rte_event_eth_tx_adapter_stop(TEST_INST_ID);
386 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
387 
388 	err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
389 						-1);
390 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
391 
392 	err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
393 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
394 
395 	err = rte_event_eth_tx_adapter_stop(TEST_INST_ID);
396 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
397 
398 	err = rte_event_eth_tx_adapter_start(1);
399 
400 	err = rte_event_eth_tx_adapter_stop(1);
401 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
402 
403 	return TEST_SUCCESS;
404 }
405 
406 
407 static int
408 tx_adapter_single(uint16_t port, uint16_t tx_queue_id,
409 		struct rte_mbuf *m, uint8_t qid,
410 		uint8_t sched_type)
411 {
412 	struct rte_event event;
413 	struct rte_mbuf *r;
414 	int ret;
415 	unsigned int l;
416 
417 	event.queue_id = qid;
418 	event.op = RTE_EVENT_OP_NEW;
419 	event.event_type = RTE_EVENT_TYPE_CPU;
420 	event.sched_type = sched_type;
421 	event.mbuf = m;
422 
423 	m->port = port;
424 	rte_event_eth_tx_adapter_txq_set(m, tx_queue_id);
425 
426 	l = 0;
427 	while (rte_event_enqueue_burst(TEST_DEV_ID, 0, &event, 1) != 1) {
428 		l++;
429 		if (l > EDEV_RETRY)
430 			break;
431 	}
432 
433 	TEST_ASSERT(l < EDEV_RETRY, "Unable to enqueue to eventdev");
434 	l = 0;
435 	while (l++ < EDEV_RETRY) {
436 
437 		if (eid != ~0ULL) {
438 			ret = rte_service_run_iter_on_app_lcore(eid, 0);
439 			TEST_ASSERT(ret == 0, "failed to run service %d", ret);
440 		}
441 
442 		ret = rte_service_run_iter_on_app_lcore(tid, 0);
443 		TEST_ASSERT(ret == 0, "failed to run service %d", ret);
444 
445 		if (rte_eth_rx_burst(TEST_ETHDEV_PAIR_ID, tx_queue_id,
446 				&r, 1)) {
447 			TEST_ASSERT_EQUAL(r, m, "mbuf comparison failed"
448 					" expected %p received %p", m, r);
449 			return 0;
450 		}
451 	}
452 
453 	TEST_ASSERT(0, "Failed to receive packet");
454 	return -1;
455 }
456 
457 static int
458 tx_adapter_service(void)
459 {
460 	struct rte_event_eth_tx_adapter_stats stats;
461 	uint32_t i;
462 	int err;
463 	uint8_t ev_port, ev_qid;
464 	struct rte_mbuf  bufs[RING_SIZE];
465 	struct rte_mbuf *pbufs[RING_SIZE];
466 	struct rte_event_dev_info dev_info;
467 	struct rte_event_dev_config dev_conf;
468 	struct rte_event_queue_conf qconf;
469 	uint32_t qcnt, pcnt;
470 	uint16_t q;
471 	int internal_port;
472 	uint32_t cap;
473 
474 	memset(&dev_conf, 0, sizeof(dev_conf));
475 	err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
476 						&cap);
477 	TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
478 
479 	internal_port = !!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
480 	if (internal_port)
481 		return TEST_SUCCESS;
482 
483 	err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
484 						-1);
485 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
486 
487 	err = rte_event_eth_tx_adapter_event_port_get(TEST_INST_ID,
488 						&ev_port);
489 	TEST_ASSERT_SUCCESS(err, "Failed to get event port %d", err);
490 
491 	err = rte_event_dev_attr_get(TEST_DEV_ID, RTE_EVENT_DEV_ATTR_PORT_COUNT,
492 					&pcnt);
493 	TEST_ASSERT_SUCCESS(err, "Port count get failed");
494 
495 	err = rte_event_dev_attr_get(TEST_DEV_ID,
496 				RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &qcnt);
497 	TEST_ASSERT_SUCCESS(err, "Queue count get failed");
498 
499 	err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
500 	TEST_ASSERT_SUCCESS(err, "Dev info failed");
501 
502 	dev_conf.nb_event_queue_flows = dev_info.max_event_queue_flows;
503 	dev_conf.nb_event_port_dequeue_depth =
504 			dev_info.max_event_port_dequeue_depth;
505 	dev_conf.nb_event_port_enqueue_depth =
506 			dev_info.max_event_port_enqueue_depth;
507 	dev_conf.nb_events_limit =
508 			dev_info.max_num_events;
509 	dev_conf.nb_event_queues = qcnt + 1;
510 	dev_conf.nb_event_ports = pcnt;
511 	err = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
512 	TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
513 			err);
514 
515 	ev_qid = qcnt;
516 	qconf.nb_atomic_flows = dev_info.max_event_queue_flows;
517 	qconf.nb_atomic_order_sequences = 32;
518 	qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
519 	qconf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
520 	qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
521 	err = rte_event_queue_setup(TEST_DEV_ID, ev_qid, &qconf);
522 	TEST_ASSERT_SUCCESS(err, "Failed to setup queue %u", ev_qid);
523 
524 	/*
525 	 * Setup ports again so that the newly added queue is visible
526 	 * to them
527 	 */
528 	for (i = 0; i < pcnt; i++) {
529 
530 		int n_links;
531 		uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
532 		uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
533 
534 		if (i == ev_port)
535 			continue;
536 
537 		n_links = rte_event_port_links_get(TEST_DEV_ID, i, queues,
538 						priorities);
539 		TEST_ASSERT(n_links > 0, "Failed to get port links %d\n",
540 			n_links);
541 		err = rte_event_port_setup(TEST_DEV_ID, i, NULL);
542 		TEST_ASSERT(err == 0, "Failed to setup port err %d\n", err);
543 		err = rte_event_port_link(TEST_DEV_ID, i, queues, priorities,
544 					n_links);
545 		TEST_ASSERT(n_links == err, "Failed to link all queues"
546 			" err %s\n", rte_strerror(rte_errno));
547 	}
548 
549 	err = rte_event_port_link(TEST_DEV_ID, ev_port, &ev_qid, NULL, 1);
550 	TEST_ASSERT(err == 1, "Failed to link queue port %u",
551 		    ev_port);
552 
553 	err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
554 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
555 
556 	if (!(dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
557 		err = rte_event_dev_service_id_get(0, (uint32_t *)&eid);
558 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
559 
560 		err = rte_service_runstate_set(eid, 1);
561 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
562 
563 		err = rte_service_set_runstate_mapped_check(eid, 0);
564 		TEST_ASSERT(err == 0, "Expected 0 got %d", err);
565 	}
566 
567 	err = rte_event_eth_tx_adapter_service_id_get(TEST_INST_ID, &tid);
568 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
569 
570 	err = rte_service_runstate_set(tid, 1);
571 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
572 
573 	err = rte_service_set_runstate_mapped_check(tid, 0);
574 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
575 
576 	err = rte_event_dev_start(TEST_DEV_ID);
577 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
578 
579 	for (q = 0; q < MAX_NUM_QUEUE; q++) {
580 		for (i = 0; i < RING_SIZE; i++)
581 			pbufs[i] = &bufs[i];
582 		for (i = 0; i < RING_SIZE; i++) {
583 			pbufs[i] = &bufs[i];
584 			err = tx_adapter_single(TEST_ETHDEV_ID, q, pbufs[i],
585 						ev_qid,
586 						RTE_SCHED_TYPE_ORDERED);
587 			TEST_ASSERT(err == 0, "Expected 0 got %d", err);
588 		}
589 		for (i = 0; i < RING_SIZE; i++) {
590 			TEST_ASSERT_EQUAL(pbufs[i], &bufs[i],
591 				"Error: received data does not match"
592 				" that transmitted");
593 		}
594 	}
595 
596 	err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, NULL);
597 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
598 
599 	err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, &stats);
600 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
601 	TEST_ASSERT_EQUAL(stats.tx_packets, MAX_NUM_QUEUE * RING_SIZE,
602 			"stats.tx_packets expected %u got %"PRIu64,
603 			MAX_NUM_QUEUE * RING_SIZE,
604 			stats.tx_packets);
605 
606 	err = rte_event_eth_tx_adapter_stats_reset(TEST_INST_ID);
607 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
608 
609 	err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, &stats);
610 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
611 	TEST_ASSERT_EQUAL(stats.tx_packets, 0,
612 			"stats.tx_packets expected %u got %"PRIu64,
613 			0,
614 			stats.tx_packets);
615 
616 	err = rte_event_eth_tx_adapter_stats_get(1, &stats);
617 	TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
618 
619 	err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
620 						-1);
621 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
622 
623 	err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
624 	TEST_ASSERT(err == 0, "Expected 0 got %d", err);
625 
626 	rte_event_dev_stop(TEST_DEV_ID);
627 
628 	return TEST_SUCCESS;
629 }
630 
631 static int
632 tx_adapter_dynamic_device(void)
633 {
634 	uint16_t port_id = rte_eth_dev_count_avail();
635 	const char *null_dev[2] = { "eth_null0", "eth_null1" };
636 	struct rte_eth_conf dev_conf;
637 	int ret;
638 	size_t i;
639 
640 	memset(&dev_conf, 0, sizeof(dev_conf));
641 	for (i = 0; i < RTE_DIM(null_dev); i++) {
642 		ret = rte_vdev_init(null_dev[i], NULL);
643 		TEST_ASSERT_SUCCESS(ret, "%s Port creation failed %d",
644 				null_dev[i], ret);
645 
646 		if (i == 0) {
647 			ret = tx_adapter_create();
648 			TEST_ASSERT_SUCCESS(ret, "Adapter create failed %d",
649 					ret);
650 		}
651 
652 		ret = rte_eth_dev_configure(port_id + i, MAX_NUM_QUEUE,
653 					MAX_NUM_QUEUE, &dev_conf);
654 		TEST_ASSERT_SUCCESS(ret, "Failed to configure device %d", ret);
655 
656 		ret = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
657 							port_id + i, 0);
658 		TEST_ASSERT_SUCCESS(ret, "Failed to add queues %d", ret);
659 
660 	}
661 
662 	for (i = 0; i < RTE_DIM(null_dev); i++) {
663 		ret = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
664 							port_id + i, -1);
665 		TEST_ASSERT_SUCCESS(ret, "Failed to delete queues %d", ret);
666 	}
667 
668 	tx_adapter_free();
669 
670 	for (i = 0; i < RTE_DIM(null_dev); i++)
671 		rte_vdev_uninit(null_dev[i]);
672 
673 	return TEST_SUCCESS;
674 }
675 
676 static struct unit_test_suite event_eth_tx_tests = {
677 	.setup = testsuite_setup,
678 	.teardown = testsuite_teardown,
679 	.suite_name = "tx event eth adapter test suite",
680 	.unit_test_cases = {
681 		TEST_CASE_ST(NULL, NULL, tx_adapter_create_free),
682 		TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
683 					tx_adapter_queue_add_del),
684 		TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
685 					tx_adapter_start_stop),
686 		TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
687 					tx_adapter_service),
688 		TEST_CASE_ST(NULL, NULL, tx_adapter_dynamic_device),
689 		TEST_CASES_END() /**< NULL terminate unit test array */
690 	}
691 };
692 
693 static int
694 test_event_eth_tx_adapter_common(void)
695 {
696 	return unit_test_suite_runner(&event_eth_tx_tests);
697 }
698 
699 REGISTER_TEST_COMMAND(event_eth_tx_adapter_autotest,
700 		test_event_eth_tx_adapter_common);
701