xref: /dpdk/drivers/event/sw/sw_evdev_selftest.c (revision 2d0c29a37a9c080c1cccb1ad7941aba2ccf5437e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <string.h>
7 #include <stdint.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/queue.h>
11 
12 #include <rte_memory.h>
13 #include <rte_launch.h>
14 #include <rte_eal.h>
15 #include <rte_per_lcore.h>
16 #include <rte_lcore.h>
17 #include <rte_debug.h>
18 #include <rte_ethdev.h>
19 #include <rte_cycles.h>
20 #include <rte_eventdev.h>
21 #include <rte_pause.h>
22 #include <rte_service.h>
23 #include <rte_service_component.h>
24 #include <rte_bus_vdev.h>
25 
26 #include "sw_evdev.h"
27 
28 #define MAX_PORTS 16
29 #define MAX_QIDS 16
30 #define NUM_PACKETS (1<<18)
31 #define DEQUEUE_DEPTH 128
32 
33 static int evdev;
34 
35 struct test {
36 	struct rte_mempool *mbuf_pool;
37 	uint8_t port[MAX_PORTS];
38 	uint8_t qid[MAX_QIDS];
39 	int nb_qids;
40 	uint32_t service_id;
41 };
42 
43 static struct rte_event release_ev;
44 
45 static inline struct rte_mbuf *
46 rte_gen_arp(int portid, struct rte_mempool *mp)
47 {
48 	/*
49 	 * len = 14 + 46
50 	 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
51 	 */
52 	static const uint8_t arp_request[] = {
53 		/*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
54 		0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
55 		/*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
56 		0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
57 		/*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
58 		0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
59 		/*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
60 		0x00, 0x00, 0x00, 0x00
61 	};
62 	struct rte_mbuf *m;
63 	int pkt_len = sizeof(arp_request) - 1;
64 
65 	m = rte_pktmbuf_alloc(mp);
66 	if (!m)
67 		return 0;
68 
69 	memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
70 		arp_request, pkt_len);
71 	rte_pktmbuf_pkt_len(m) = pkt_len;
72 	rte_pktmbuf_data_len(m) = pkt_len;
73 
74 	RTE_SET_USED(portid);
75 
76 	return m;
77 }
78 
79 static void
80 xstats_print(void)
81 {
82 	const uint32_t XSTATS_MAX = 1024;
83 	uint32_t i;
84 	uint32_t ids[XSTATS_MAX];
85 	uint64_t values[XSTATS_MAX];
86 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
87 
88 	for (i = 0; i < XSTATS_MAX; i++)
89 		ids[i] = i;
90 
91 	/* Device names / values */
92 	int ret = rte_event_dev_xstats_names_get(evdev,
93 					RTE_EVENT_DEV_XSTATS_DEVICE, 0,
94 					xstats_names, ids, XSTATS_MAX);
95 	if (ret < 0) {
96 		printf("%d: xstats names get() returned error\n",
97 			__LINE__);
98 		return;
99 	}
100 	ret = rte_event_dev_xstats_get(evdev,
101 					RTE_EVENT_DEV_XSTATS_DEVICE,
102 					0, ids, values, ret);
103 	if (ret > (signed int)XSTATS_MAX)
104 		printf("%s %d: more xstats available than space\n",
105 				__func__, __LINE__);
106 	for (i = 0; (signed int)i < ret; i++) {
107 		printf("%d : %s : %"PRIu64"\n",
108 				i, xstats_names[i].name, values[i]);
109 	}
110 
111 	/* Port names / values */
112 	ret = rte_event_dev_xstats_names_get(evdev,
113 					RTE_EVENT_DEV_XSTATS_PORT, 0,
114 					xstats_names, ids, XSTATS_MAX);
115 	ret = rte_event_dev_xstats_get(evdev,
116 					RTE_EVENT_DEV_XSTATS_PORT, 1,
117 					ids, values, ret);
118 	if (ret > (signed int)XSTATS_MAX)
119 		printf("%s %d: more xstats available than space\n",
120 				__func__, __LINE__);
121 	for (i = 0; (signed int)i < ret; i++) {
122 		printf("%d : %s : %"PRIu64"\n",
123 				i, xstats_names[i].name, values[i]);
124 	}
125 
126 	/* Queue names / values */
127 	ret = rte_event_dev_xstats_names_get(evdev,
128 					RTE_EVENT_DEV_XSTATS_QUEUE, 0,
129 					xstats_names, ids, XSTATS_MAX);
130 	ret = rte_event_dev_xstats_get(evdev,
131 					RTE_EVENT_DEV_XSTATS_QUEUE,
132 					1, ids, values, ret);
133 	if (ret > (signed int)XSTATS_MAX)
134 		printf("%s %d: more xstats available than space\n",
135 				__func__, __LINE__);
136 	for (i = 0; (signed int)i < ret; i++) {
137 		printf("%d : %s : %"PRIu64"\n",
138 				i, xstats_names[i].name, values[i]);
139 	}
140 }
141 
142 /* initialization and config */
143 static inline int
144 init(struct test *t, int nb_queues, int nb_ports)
145 {
146 	struct rte_event_dev_config config = {
147 			.nb_event_queues = nb_queues,
148 			.nb_event_ports = nb_ports,
149 			.nb_event_queue_flows = 1024,
150 			.nb_events_limit = 4096,
151 			.nb_event_port_dequeue_depth = DEQUEUE_DEPTH,
152 			.nb_event_port_enqueue_depth = 128,
153 	};
154 	int ret;
155 
156 	void *temp = t->mbuf_pool; /* save and restore mbuf pool */
157 
158 	memset(t, 0, sizeof(*t));
159 	t->mbuf_pool = temp;
160 
161 	ret = rte_event_dev_configure(evdev, &config);
162 	if (ret < 0)
163 		printf("%d: Error configuring device\n", __LINE__);
164 	return ret;
165 };
166 
167 static inline int
168 create_ports(struct test *t, int num_ports)
169 {
170 	int i;
171 	static const struct rte_event_port_conf conf = {
172 			.new_event_threshold = 1024,
173 			.dequeue_depth = 32,
174 			.enqueue_depth = 64,
175 			.disable_implicit_release = 0,
176 	};
177 	if (num_ports > MAX_PORTS)
178 		return -1;
179 
180 	for (i = 0; i < num_ports; i++) {
181 		if (rte_event_port_setup(evdev, i, &conf) < 0) {
182 			printf("Error setting up port %d\n", i);
183 			return -1;
184 		}
185 		t->port[i] = i;
186 	}
187 
188 	return 0;
189 }
190 
191 static inline int
192 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
193 {
194 	int i;
195 
196 	/* Q creation */
197 	const struct rte_event_queue_conf conf = {
198 			.schedule_type = flags,
199 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
200 			.nb_atomic_flows = 1024,
201 			.nb_atomic_order_sequences = 1024,
202 	};
203 
204 	for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
205 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
206 			printf("%d: error creating qid %d\n", __LINE__, i);
207 			return -1;
208 		}
209 		t->qid[i] = i;
210 	}
211 	t->nb_qids += num_qids;
212 	if (t->nb_qids > MAX_QIDS)
213 		return -1;
214 
215 	return 0;
216 }
217 
218 static inline int
219 create_atomic_qids(struct test *t, int num_qids)
220 {
221 	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
222 }
223 
224 static inline int
225 create_ordered_qids(struct test *t, int num_qids)
226 {
227 	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
228 }
229 
230 
231 static inline int
232 create_unordered_qids(struct test *t, int num_qids)
233 {
234 	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
235 }
236 
237 static inline int
238 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
239 {
240 	int i;
241 
242 	/* Q creation */
243 	static const struct rte_event_queue_conf conf = {
244 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
245 			.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
246 	};
247 
248 	for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
249 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
250 			printf("%d: error creating qid %d\n", __LINE__, i);
251 			return -1;
252 		}
253 		t->qid[i] = i;
254 
255 		if (rte_event_port_link(evdev, ports[i - t->nb_qids],
256 				&t->qid[i], NULL, 1) != 1) {
257 			printf("%d: error creating link for qid %d\n",
258 					__LINE__, i);
259 			return -1;
260 		}
261 	}
262 	t->nb_qids += num_qids;
263 	if (t->nb_qids > MAX_QIDS)
264 		return -1;
265 
266 	return 0;
267 }
268 
269 /* destruction */
270 static inline int
271 cleanup(struct test *t __rte_unused)
272 {
273 	rte_event_dev_stop(evdev);
274 	rte_event_dev_close(evdev);
275 	return 0;
276 };
277 
278 struct test_event_dev_stats {
279 	uint64_t rx_pkts;       /**< Total packets received */
280 	uint64_t rx_dropped;    /**< Total packets dropped (Eg Invalid QID) */
281 	uint64_t tx_pkts;       /**< Total packets transmitted */
282 
283 	/** Packets received on this port */
284 	uint64_t port_rx_pkts[MAX_PORTS];
285 	/** Packets dropped on this port */
286 	uint64_t port_rx_dropped[MAX_PORTS];
287 	/** Packets inflight on this port */
288 	uint64_t port_inflight[MAX_PORTS];
289 	/** Packets transmitted on this port */
290 	uint64_t port_tx_pkts[MAX_PORTS];
291 	/** Packets received on this qid */
292 	uint64_t qid_rx_pkts[MAX_QIDS];
293 	/** Packets dropped on this qid */
294 	uint64_t qid_rx_dropped[MAX_QIDS];
295 	/** Packets transmitted on this qid */
296 	uint64_t qid_tx_pkts[MAX_QIDS];
297 };
298 
299 static inline int
300 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
301 {
302 	static uint32_t i;
303 	static uint32_t total_ids[3]; /* rx, tx and drop */
304 	static uint32_t port_rx_pkts_ids[MAX_PORTS];
305 	static uint32_t port_rx_dropped_ids[MAX_PORTS];
306 	static uint32_t port_inflight_ids[MAX_PORTS];
307 	static uint32_t port_tx_pkts_ids[MAX_PORTS];
308 	static uint32_t qid_rx_pkts_ids[MAX_QIDS];
309 	static uint32_t qid_rx_dropped_ids[MAX_QIDS];
310 	static uint32_t qid_tx_pkts_ids[MAX_QIDS];
311 
312 
313 	stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
314 			"dev_rx", &total_ids[0]);
315 	stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
316 			"dev_drop", &total_ids[1]);
317 	stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
318 			"dev_tx", &total_ids[2]);
319 	for (i = 0; i < MAX_PORTS; i++) {
320 		char name[32];
321 		snprintf(name, sizeof(name), "port_%u_rx", i);
322 		stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
323 				dev_id, name, &port_rx_pkts_ids[i]);
324 		snprintf(name, sizeof(name), "port_%u_drop", i);
325 		stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
326 				dev_id, name, &port_rx_dropped_ids[i]);
327 		snprintf(name, sizeof(name), "port_%u_inflight", i);
328 		stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
329 				dev_id, name, &port_inflight_ids[i]);
330 		snprintf(name, sizeof(name), "port_%u_tx", i);
331 		stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
332 				dev_id, name, &port_tx_pkts_ids[i]);
333 	}
334 	for (i = 0; i < MAX_QIDS; i++) {
335 		char name[32];
336 		snprintf(name, sizeof(name), "qid_%u_rx", i);
337 		stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
338 				dev_id, name, &qid_rx_pkts_ids[i]);
339 		snprintf(name, sizeof(name), "qid_%u_drop", i);
340 		stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
341 				dev_id, name, &qid_rx_dropped_ids[i]);
342 		snprintf(name, sizeof(name), "qid_%u_tx", i);
343 		stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
344 				dev_id, name, &qid_tx_pkts_ids[i]);
345 	}
346 
347 	return 0;
348 }
349 
350 /* run_prio_packet_test
351  * This performs a basic packet priority check on the test instance passed in.
352  * It is factored out of the main priority tests as the same tests must be
353  * performed to ensure prioritization of each type of QID.
354  *
355  * Requirements:
356  *  - An initialized test structure, including mempool
357  *  - t->port[0] is initialized for both Enq / Deq of packets to the QID
358  *  - t->qid[0] is the QID to be tested
359  *  - if LB QID, the CQ must be mapped to the QID.
360  */
361 static int
362 run_prio_packet_test(struct test *t)
363 {
364 	int err;
365 	const uint32_t MAGIC_SEQN[] = {4711, 1234};
366 	const uint32_t PRIORITY[] = {
367 		RTE_EVENT_DEV_PRIORITY_NORMAL,
368 		RTE_EVENT_DEV_PRIORITY_HIGHEST
369 	};
370 	unsigned int i;
371 	for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
372 		/* generate pkt and enqueue */
373 		struct rte_event ev;
374 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
375 		if (!arp) {
376 			printf("%d: gen of pkt failed\n", __LINE__);
377 			return -1;
378 		}
379 		arp->seqn = MAGIC_SEQN[i];
380 
381 		ev = (struct rte_event){
382 			.priority = PRIORITY[i],
383 			.op = RTE_EVENT_OP_NEW,
384 			.queue_id = t->qid[0],
385 			.mbuf = arp
386 		};
387 		err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
388 		if (err != 1) {
389 			printf("%d: error failed to enqueue\n", __LINE__);
390 			return -1;
391 		}
392 	}
393 
394 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
395 
396 	struct test_event_dev_stats stats;
397 	err = test_event_dev_stats_get(evdev, &stats);
398 	if (err) {
399 		printf("%d: error failed to get stats\n", __LINE__);
400 		return -1;
401 	}
402 
403 	if (stats.port_rx_pkts[t->port[0]] != 2) {
404 		printf("%d: error stats incorrect for directed port\n",
405 				__LINE__);
406 		rte_event_dev_dump(evdev, stdout);
407 		return -1;
408 	}
409 
410 	struct rte_event ev, ev2;
411 	uint32_t deq_pkts;
412 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
413 	if (deq_pkts != 1) {
414 		printf("%d: error failed to deq\n", __LINE__);
415 		rte_event_dev_dump(evdev, stdout);
416 		return -1;
417 	}
418 	if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
419 		printf("%d: first packet out not highest priority\n",
420 				__LINE__);
421 		rte_event_dev_dump(evdev, stdout);
422 		return -1;
423 	}
424 	rte_pktmbuf_free(ev.mbuf);
425 
426 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
427 	if (deq_pkts != 1) {
428 		printf("%d: error failed to deq\n", __LINE__);
429 		rte_event_dev_dump(evdev, stdout);
430 		return -1;
431 	}
432 	if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
433 		printf("%d: second packet out not lower priority\n",
434 				__LINE__);
435 		rte_event_dev_dump(evdev, stdout);
436 		return -1;
437 	}
438 	rte_pktmbuf_free(ev2.mbuf);
439 
440 	cleanup(t);
441 	return 0;
442 }
443 
444 static int
445 test_single_directed_packet(struct test *t)
446 {
447 	const int rx_enq = 0;
448 	const int wrk_enq = 2;
449 	int err;
450 
451 	/* Create instance with 3 directed QIDs going to 3 ports */
452 	if (init(t, 3, 3) < 0 ||
453 			create_ports(t, 3) < 0 ||
454 			create_directed_qids(t, 3, t->port) < 0)
455 		return -1;
456 
457 	if (rte_event_dev_start(evdev) < 0) {
458 		printf("%d: Error with start call\n", __LINE__);
459 		return -1;
460 	}
461 
462 	/************** FORWARD ****************/
463 	struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
464 	struct rte_event ev = {
465 			.op = RTE_EVENT_OP_NEW,
466 			.queue_id = wrk_enq,
467 			.mbuf = arp,
468 	};
469 
470 	if (!arp) {
471 		printf("%d: gen of pkt failed\n", __LINE__);
472 		return -1;
473 	}
474 
475 	const uint32_t MAGIC_SEQN = 4711;
476 	arp->seqn = MAGIC_SEQN;
477 
478 	/* generate pkt and enqueue */
479 	err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
480 	if (err != 1) {
481 		printf("%d: error failed to enqueue\n", __LINE__);
482 		return -1;
483 	}
484 
485 	/* Run schedule() as dir packets may need to be re-ordered */
486 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
487 
488 	struct test_event_dev_stats stats;
489 	err = test_event_dev_stats_get(evdev, &stats);
490 	if (err) {
491 		printf("%d: error failed to get stats\n", __LINE__);
492 		return -1;
493 	}
494 
495 	if (stats.port_rx_pkts[rx_enq] != 1) {
496 		printf("%d: error stats incorrect for directed port\n",
497 				__LINE__);
498 		return -1;
499 	}
500 
501 	uint32_t deq_pkts;
502 	deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
503 	if (deq_pkts != 1) {
504 		printf("%d: error failed to deq\n", __LINE__);
505 		return -1;
506 	}
507 
508 	err = test_event_dev_stats_get(evdev, &stats);
509 	if (stats.port_rx_pkts[wrk_enq] != 0 &&
510 			stats.port_rx_pkts[wrk_enq] != 1) {
511 		printf("%d: error directed stats post-dequeue\n", __LINE__);
512 		return -1;
513 	}
514 
515 	if (ev.mbuf->seqn != MAGIC_SEQN) {
516 		printf("%d: error magic sequence number not dequeued\n",
517 				__LINE__);
518 		return -1;
519 	}
520 
521 	rte_pktmbuf_free(ev.mbuf);
522 	cleanup(t);
523 	return 0;
524 }
525 
526 static int
527 test_directed_forward_credits(struct test *t)
528 {
529 	uint32_t i;
530 	int32_t err;
531 
532 	if (init(t, 1, 1) < 0 ||
533 			create_ports(t, 1) < 0 ||
534 			create_directed_qids(t, 1, t->port) < 0)
535 		return -1;
536 
537 	if (rte_event_dev_start(evdev) < 0) {
538 		printf("%d: Error with start call\n", __LINE__);
539 		return -1;
540 	}
541 
542 	struct rte_event ev = {
543 			.op = RTE_EVENT_OP_NEW,
544 			.queue_id = 0,
545 	};
546 
547 	for (i = 0; i < 1000; i++) {
548 		err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
549 		if (err != 1) {
550 			printf("%d: error failed to enqueue\n", __LINE__);
551 			return -1;
552 		}
553 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
554 
555 		uint32_t deq_pkts;
556 		deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
557 		if (deq_pkts != 1) {
558 			printf("%d: error failed to deq\n", __LINE__);
559 			return -1;
560 		}
561 
562 		/* re-write event to be a forward, and continue looping it */
563 		ev.op = RTE_EVENT_OP_FORWARD;
564 	}
565 
566 	cleanup(t);
567 	return 0;
568 }
569 
570 
571 static int
572 test_priority_directed(struct test *t)
573 {
574 	if (init(t, 1, 1) < 0 ||
575 			create_ports(t, 1) < 0 ||
576 			create_directed_qids(t, 1, t->port) < 0) {
577 		printf("%d: Error initializing device\n", __LINE__);
578 		return -1;
579 	}
580 
581 	if (rte_event_dev_start(evdev) < 0) {
582 		printf("%d: Error with start call\n", __LINE__);
583 		return -1;
584 	}
585 
586 	return run_prio_packet_test(t);
587 }
588 
589 static int
590 test_priority_atomic(struct test *t)
591 {
592 	if (init(t, 1, 1) < 0 ||
593 			create_ports(t, 1) < 0 ||
594 			create_atomic_qids(t, 1) < 0) {
595 		printf("%d: Error initializing device\n", __LINE__);
596 		return -1;
597 	}
598 
599 	/* map the QID */
600 	if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
601 		printf("%d: error mapping qid to port\n", __LINE__);
602 		return -1;
603 	}
604 	if (rte_event_dev_start(evdev) < 0) {
605 		printf("%d: Error with start call\n", __LINE__);
606 		return -1;
607 	}
608 
609 	return run_prio_packet_test(t);
610 }
611 
612 static int
613 test_priority_ordered(struct test *t)
614 {
615 	if (init(t, 1, 1) < 0 ||
616 			create_ports(t, 1) < 0 ||
617 			create_ordered_qids(t, 1) < 0) {
618 		printf("%d: Error initializing device\n", __LINE__);
619 		return -1;
620 	}
621 
622 	/* map the QID */
623 	if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
624 		printf("%d: error mapping qid to port\n", __LINE__);
625 		return -1;
626 	}
627 	if (rte_event_dev_start(evdev) < 0) {
628 		printf("%d: Error with start call\n", __LINE__);
629 		return -1;
630 	}
631 
632 	return run_prio_packet_test(t);
633 }
634 
635 static int
636 test_priority_unordered(struct test *t)
637 {
638 	if (init(t, 1, 1) < 0 ||
639 			create_ports(t, 1) < 0 ||
640 			create_unordered_qids(t, 1) < 0) {
641 		printf("%d: Error initializing device\n", __LINE__);
642 		return -1;
643 	}
644 
645 	/* map the QID */
646 	if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
647 		printf("%d: error mapping qid to port\n", __LINE__);
648 		return -1;
649 	}
650 	if (rte_event_dev_start(evdev) < 0) {
651 		printf("%d: Error with start call\n", __LINE__);
652 		return -1;
653 	}
654 
655 	return run_prio_packet_test(t);
656 }
657 
658 static int
659 burst_packets(struct test *t)
660 {
661 	/************** CONFIG ****************/
662 	uint32_t i;
663 	int err;
664 	int ret;
665 
666 	/* Create instance with 2 ports and 2 queues */
667 	if (init(t, 2, 2) < 0 ||
668 			create_ports(t, 2) < 0 ||
669 			create_atomic_qids(t, 2) < 0) {
670 		printf("%d: Error initializing device\n", __LINE__);
671 		return -1;
672 	}
673 
674 	/* CQ mapping to QID */
675 	ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
676 	if (ret != 1) {
677 		printf("%d: error mapping lb qid0\n", __LINE__);
678 		return -1;
679 	}
680 	ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
681 	if (ret != 1) {
682 		printf("%d: error mapping lb qid1\n", __LINE__);
683 		return -1;
684 	}
685 
686 	if (rte_event_dev_start(evdev) < 0) {
687 		printf("%d: Error with start call\n", __LINE__);
688 		return -1;
689 	}
690 
691 	/************** FORWARD ****************/
692 	const uint32_t rx_port = 0;
693 	const uint32_t NUM_PKTS = 2;
694 
695 	for (i = 0; i < NUM_PKTS; i++) {
696 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
697 		if (!arp) {
698 			printf("%d: error generating pkt\n", __LINE__);
699 			return -1;
700 		}
701 
702 		struct rte_event ev = {
703 				.op = RTE_EVENT_OP_NEW,
704 				.queue_id = i % 2,
705 				.flow_id = i % 3,
706 				.mbuf = arp,
707 		};
708 		/* generate pkt and enqueue */
709 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
710 		if (err != 1) {
711 			printf("%d: Failed to enqueue\n", __LINE__);
712 			return -1;
713 		}
714 	}
715 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
716 
717 	/* Check stats for all NUM_PKTS arrived to sched core */
718 	struct test_event_dev_stats stats;
719 
720 	err = test_event_dev_stats_get(evdev, &stats);
721 	if (err) {
722 		printf("%d: failed to get stats\n", __LINE__);
723 		return -1;
724 	}
725 	if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
726 		printf("%d: Sched core didn't receive all %d pkts\n",
727 				__LINE__, NUM_PKTS);
728 		rte_event_dev_dump(evdev, stdout);
729 		return -1;
730 	}
731 
732 	uint32_t deq_pkts;
733 	int p;
734 
735 	deq_pkts = 0;
736 	/******** DEQ QID 1 *******/
737 	do {
738 		struct rte_event ev;
739 		p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
740 		deq_pkts += p;
741 		rte_pktmbuf_free(ev.mbuf);
742 	} while (p);
743 
744 	if (deq_pkts != NUM_PKTS/2) {
745 		printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
746 				__LINE__);
747 		return -1;
748 	}
749 
750 	/******** DEQ QID 2 *******/
751 	deq_pkts = 0;
752 	do {
753 		struct rte_event ev;
754 		p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
755 		deq_pkts += p;
756 		rte_pktmbuf_free(ev.mbuf);
757 	} while (p);
758 	if (deq_pkts != NUM_PKTS/2) {
759 		printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
760 				__LINE__);
761 		return -1;
762 	}
763 
764 	cleanup(t);
765 	return 0;
766 }
767 
768 static int
769 abuse_inflights(struct test *t)
770 {
771 	const int rx_enq = 0;
772 	const int wrk_enq = 2;
773 	int err;
774 
775 	/* Create instance with 4 ports */
776 	if (init(t, 1, 4) < 0 ||
777 			create_ports(t, 4) < 0 ||
778 			create_atomic_qids(t, 1) < 0) {
779 		printf("%d: Error initializing device\n", __LINE__);
780 		return -1;
781 	}
782 
783 	/* CQ mapping to QID */
784 	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
785 	if (err != 1) {
786 		printf("%d: error mapping lb qid\n", __LINE__);
787 		cleanup(t);
788 		return -1;
789 	}
790 
791 	if (rte_event_dev_start(evdev) < 0) {
792 		printf("%d: Error with start call\n", __LINE__);
793 		return -1;
794 	}
795 
796 	/* Enqueue op only */
797 	err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
798 	if (err != 1) {
799 		printf("%d: Failed to enqueue\n", __LINE__);
800 		return -1;
801 	}
802 
803 	/* schedule */
804 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
805 
806 	struct test_event_dev_stats stats;
807 
808 	err = test_event_dev_stats_get(evdev, &stats);
809 	if (err) {
810 		printf("%d: failed to get stats\n", __LINE__);
811 		return -1;
812 	}
813 
814 	if (stats.rx_pkts != 0 ||
815 			stats.tx_pkts != 0 ||
816 			stats.port_inflight[wrk_enq] != 0) {
817 		printf("%d: Sched core didn't handle pkt as expected\n",
818 				__LINE__);
819 		return -1;
820 	}
821 
822 	cleanup(t);
823 	return 0;
824 }
825 
826 static int
827 xstats_tests(struct test *t)
828 {
829 	const int wrk_enq = 2;
830 	int err;
831 
832 	/* Create instance with 4 ports */
833 	if (init(t, 1, 4) < 0 ||
834 			create_ports(t, 4) < 0 ||
835 			create_atomic_qids(t, 1) < 0) {
836 		printf("%d: Error initializing device\n", __LINE__);
837 		return -1;
838 	}
839 
840 	/* CQ mapping to QID */
841 	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
842 	if (err != 1) {
843 		printf("%d: error mapping lb qid\n", __LINE__);
844 		cleanup(t);
845 		return -1;
846 	}
847 
848 	if (rte_event_dev_start(evdev) < 0) {
849 		printf("%d: Error with start call\n", __LINE__);
850 		return -1;
851 	}
852 
853 	const uint32_t XSTATS_MAX = 1024;
854 
855 	uint32_t i;
856 	uint32_t ids[XSTATS_MAX];
857 	uint64_t values[XSTATS_MAX];
858 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
859 
860 	for (i = 0; i < XSTATS_MAX; i++)
861 		ids[i] = i;
862 
863 	/* Device names / values */
864 	int ret = rte_event_dev_xstats_names_get(evdev,
865 					RTE_EVENT_DEV_XSTATS_DEVICE,
866 					0, xstats_names, ids, XSTATS_MAX);
867 	if (ret != 6) {
868 		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
869 		return -1;
870 	}
871 	ret = rte_event_dev_xstats_get(evdev,
872 					RTE_EVENT_DEV_XSTATS_DEVICE,
873 					0, ids, values, ret);
874 	if (ret != 6) {
875 		printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
876 		return -1;
877 	}
878 
879 	/* Port names / values */
880 	ret = rte_event_dev_xstats_names_get(evdev,
881 					RTE_EVENT_DEV_XSTATS_PORT, 0,
882 					xstats_names, ids, XSTATS_MAX);
883 	if (ret != 21) {
884 		printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
885 		return -1;
886 	}
887 	ret = rte_event_dev_xstats_get(evdev,
888 					RTE_EVENT_DEV_XSTATS_PORT, 0,
889 					ids, values, ret);
890 	if (ret != 21) {
891 		printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
892 		return -1;
893 	}
894 
895 	/* Queue names / values */
896 	ret = rte_event_dev_xstats_names_get(evdev,
897 					RTE_EVENT_DEV_XSTATS_QUEUE,
898 					0, xstats_names, ids, XSTATS_MAX);
899 	if (ret != 16) {
900 		printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
901 		return -1;
902 	}
903 
904 	/* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
905 	ret = rte_event_dev_xstats_get(evdev,
906 					RTE_EVENT_DEV_XSTATS_QUEUE,
907 					1, ids, values, ret);
908 	if (ret != -EINVAL) {
909 		printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
910 		return -1;
911 	}
912 
913 	ret = rte_event_dev_xstats_get(evdev,
914 					RTE_EVENT_DEV_XSTATS_QUEUE,
915 					0, ids, values, ret);
916 	if (ret != 16) {
917 		printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
918 		return -1;
919 	}
920 
921 	/* enqueue packets to check values */
922 	for (i = 0; i < 3; i++) {
923 		struct rte_event ev;
924 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
925 		if (!arp) {
926 			printf("%d: gen of pkt failed\n", __LINE__);
927 			return -1;
928 		}
929 		ev.queue_id = t->qid[i];
930 		ev.op = RTE_EVENT_OP_NEW;
931 		ev.mbuf = arp;
932 		ev.flow_id = 7;
933 		arp->seqn = i;
934 
935 		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
936 		if (err != 1) {
937 			printf("%d: Failed to enqueue\n", __LINE__);
938 			return -1;
939 		}
940 	}
941 
942 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
943 
944 	/* Device names / values */
945 	int num_stats = rte_event_dev_xstats_names_get(evdev,
946 					RTE_EVENT_DEV_XSTATS_DEVICE, 0,
947 					xstats_names, ids, XSTATS_MAX);
948 	if (num_stats < 0)
949 		goto fail;
950 	ret = rte_event_dev_xstats_get(evdev,
951 					RTE_EVENT_DEV_XSTATS_DEVICE,
952 					0, ids, values, num_stats);
953 	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
954 	for (i = 0; (signed int)i < ret; i++) {
955 		if (expected[i] != values[i]) {
956 			printf(
957 				"%d Error xstat %d (id %d) %s : %"PRIu64
958 				", expect %"PRIu64"\n",
959 				__LINE__, i, ids[i], xstats_names[i].name,
960 				values[i], expected[i]);
961 			goto fail;
962 		}
963 	}
964 
965 	ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
966 					0, NULL, 0);
967 
968 	/* ensure reset statistics are zero-ed */
969 	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
970 	ret = rte_event_dev_xstats_get(evdev,
971 					RTE_EVENT_DEV_XSTATS_DEVICE,
972 					0, ids, values, num_stats);
973 	for (i = 0; (signed int)i < ret; i++) {
974 		if (expected_zero[i] != values[i]) {
975 			printf(
976 				"%d Error, xstat %d (id %d) %s : %"PRIu64
977 				", expect %"PRIu64"\n",
978 				__LINE__, i, ids[i], xstats_names[i].name,
979 				values[i], expected_zero[i]);
980 			goto fail;
981 		}
982 	}
983 
984 	/* port reset checks */
985 	num_stats = rte_event_dev_xstats_names_get(evdev,
986 					RTE_EVENT_DEV_XSTATS_PORT, 0,
987 					xstats_names, ids, XSTATS_MAX);
988 	if (num_stats < 0)
989 		goto fail;
990 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
991 					0, ids, values, num_stats);
992 
993 	static const uint64_t port_expected[] = {
994 		3 /* rx */,
995 		0 /* tx */,
996 		0 /* drop */,
997 		0 /* inflights */,
998 		0 /* avg pkt cycles */,
999 		29 /* credits */,
1000 		0 /* rx ring used */,
1001 		4096 /* rx ring free */,
1002 		0 /* cq ring used */,
1003 		32 /* cq ring free */,
1004 		0 /* dequeue calls */,
1005 		/* 10 dequeue burst buckets */
1006 		0, 0, 0, 0, 0,
1007 		0, 0, 0, 0, 0,
1008 	};
1009 	if (ret != RTE_DIM(port_expected)) {
1010 		printf(
1011 			"%s %d: wrong number of port stats (%d), expected %zu\n",
1012 			__func__, __LINE__, ret, RTE_DIM(port_expected));
1013 	}
1014 
1015 	for (i = 0; (signed int)i < ret; i++) {
1016 		if (port_expected[i] != values[i]) {
1017 			printf(
1018 				"%s : %d: Error stat %s is %"PRIu64
1019 				", expected %"PRIu64"\n",
1020 				__func__, __LINE__, xstats_names[i].name,
1021 				values[i], port_expected[i]);
1022 			goto fail;
1023 		}
1024 	}
1025 
1026 	ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1027 					0, NULL, 0);
1028 
1029 	/* ensure reset statistics are zero-ed */
1030 	static const uint64_t port_expected_zero[] = {
1031 		0 /* rx */,
1032 		0 /* tx */,
1033 		0 /* drop */,
1034 		0 /* inflights */,
1035 		0 /* avg pkt cycles */,
1036 		29 /* credits */,
1037 		0 /* rx ring used */,
1038 		4096 /* rx ring free */,
1039 		0 /* cq ring used */,
1040 		32 /* cq ring free */,
1041 		0 /* dequeue calls */,
1042 		/* 10 dequeue burst buckets */
1043 		0, 0, 0, 0, 0,
1044 		0, 0, 0, 0, 0,
1045 	};
1046 	ret = rte_event_dev_xstats_get(evdev,
1047 					RTE_EVENT_DEV_XSTATS_PORT,
1048 					0, ids, values, num_stats);
1049 	for (i = 0; (signed int)i < ret; i++) {
1050 		if (port_expected_zero[i] != values[i]) {
1051 			printf(
1052 				"%d, Error, xstat %d (id %d) %s : %"PRIu64
1053 				", expect %"PRIu64"\n",
1054 				__LINE__, i, ids[i], xstats_names[i].name,
1055 				values[i], port_expected_zero[i]);
1056 			goto fail;
1057 		}
1058 	}
1059 
1060 	/* QUEUE STATS TESTS */
1061 	num_stats = rte_event_dev_xstats_names_get(evdev,
1062 						RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1063 						xstats_names, ids, XSTATS_MAX);
1064 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1065 					0, ids, values, num_stats);
1066 	if (ret < 0) {
1067 		printf("xstats get returned %d\n", ret);
1068 		goto fail;
1069 	}
1070 	if ((unsigned int)ret > XSTATS_MAX)
1071 		printf("%s %d: more xstats available than space\n",
1072 				__func__, __LINE__);
1073 
1074 	static const uint64_t queue_expected[] = {
1075 		3 /* rx */,
1076 		3 /* tx */,
1077 		0 /* drop */,
1078 		3 /* inflights */,
1079 		0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1080 		/* QID-to-Port: pinned_flows, packets */
1081 		0, 0,
1082 		0, 0,
1083 		1, 3,
1084 		0, 0,
1085 	};
1086 	for (i = 0; (signed int)i < ret; i++) {
1087 		if (queue_expected[i] != values[i]) {
1088 			printf(
1089 				"%d, Error, xstat %d (id %d) %s : %"PRIu64
1090 				", expect %"PRIu64"\n",
1091 				__LINE__, i, ids[i], xstats_names[i].name,
1092 				values[i], queue_expected[i]);
1093 			goto fail;
1094 		}
1095 	}
1096 
1097 	/* Reset the queue stats here */
1098 	ret = rte_event_dev_xstats_reset(evdev,
1099 					RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1100 					NULL,
1101 					0);
1102 
1103 	/* Verify that the resetable stats are reset, and others are not */
1104 	static const uint64_t queue_expected_zero[] = {
1105 		0 /* rx */,
1106 		0 /* tx */,
1107 		0 /* drop */,
1108 		3 /* inflight */,
1109 		0, 0, 0, 0, /* 4 iq used */
1110 		/* QID-to-Port: pinned_flows, packets */
1111 		0, 0,
1112 		0, 0,
1113 		1, 0,
1114 		0, 0,
1115 	};
1116 
1117 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1118 					ids, values, num_stats);
1119 	int fails = 0;
1120 	for (i = 0; (signed int)i < ret; i++) {
1121 		if (queue_expected_zero[i] != values[i]) {
1122 			printf(
1123 				"%d, Error, xstat %d (id %d) %s : %"PRIu64
1124 				", expect %"PRIu64"\n",
1125 				__LINE__, i, ids[i], xstats_names[i].name,
1126 				values[i], queue_expected_zero[i]);
1127 			fails++;
1128 		}
1129 	}
1130 	if (fails) {
1131 		printf("%d : %d of values were not as expected above\n",
1132 				__LINE__, fails);
1133 		goto fail;
1134 	}
1135 
1136 	cleanup(t);
1137 	return 0;
1138 
1139 fail:
1140 	rte_event_dev_dump(0, stdout);
1141 	cleanup(t);
1142 	return -1;
1143 }
1144 
1145 
1146 static int
1147 xstats_id_abuse_tests(struct test *t)
1148 {
1149 	int err;
1150 	const uint32_t XSTATS_MAX = 1024;
1151 	const uint32_t link_port = 2;
1152 
1153 	uint32_t ids[XSTATS_MAX];
1154 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1155 
1156 	/* Create instance with 4 ports */
1157 	if (init(t, 1, 4) < 0 ||
1158 			create_ports(t, 4) < 0 ||
1159 			create_atomic_qids(t, 1) < 0) {
1160 		printf("%d: Error initializing device\n", __LINE__);
1161 		goto fail;
1162 	}
1163 
1164 	err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1165 	if (err != 1) {
1166 		printf("%d: error mapping lb qid\n", __LINE__);
1167 		goto fail;
1168 	}
1169 
1170 	if (rte_event_dev_start(evdev) < 0) {
1171 		printf("%d: Error with start call\n", __LINE__);
1172 		goto fail;
1173 	}
1174 
1175 	/* no test for device, as it ignores the port/q number */
1176 	int num_stats = rte_event_dev_xstats_names_get(evdev,
1177 					RTE_EVENT_DEV_XSTATS_PORT,
1178 					UINT8_MAX-1, xstats_names, ids,
1179 					XSTATS_MAX);
1180 	if (num_stats != 0) {
1181 		printf("%d: expected %d stats, got return %d\n", __LINE__,
1182 				0, num_stats);
1183 		goto fail;
1184 	}
1185 
1186 	num_stats = rte_event_dev_xstats_names_get(evdev,
1187 					RTE_EVENT_DEV_XSTATS_QUEUE,
1188 					UINT8_MAX-1, xstats_names, ids,
1189 					XSTATS_MAX);
1190 	if (num_stats != 0) {
1191 		printf("%d: expected %d stats, got return %d\n", __LINE__,
1192 				0, num_stats);
1193 		goto fail;
1194 	}
1195 
1196 	cleanup(t);
1197 	return 0;
1198 fail:
1199 	cleanup(t);
1200 	return -1;
1201 }
1202 
1203 static int
1204 port_reconfig_credits(struct test *t)
1205 {
1206 	if (init(t, 1, 1) < 0) {
1207 		printf("%d: Error initializing device\n", __LINE__);
1208 		return -1;
1209 	}
1210 
1211 	uint32_t i;
1212 	const uint32_t NUM_ITERS = 32;
1213 	for (i = 0; i < NUM_ITERS; i++) {
1214 		const struct rte_event_queue_conf conf = {
1215 			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1216 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1217 			.nb_atomic_flows = 1024,
1218 			.nb_atomic_order_sequences = 1024,
1219 		};
1220 		if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1221 			printf("%d: error creating qid\n", __LINE__);
1222 			return -1;
1223 		}
1224 		t->qid[0] = 0;
1225 
1226 		static const struct rte_event_port_conf port_conf = {
1227 				.new_event_threshold = 128,
1228 				.dequeue_depth = 32,
1229 				.enqueue_depth = 64,
1230 				.disable_implicit_release = 0,
1231 		};
1232 		if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1233 			printf("%d Error setting up port\n", __LINE__);
1234 			return -1;
1235 		}
1236 
1237 		int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1238 		if (links != 1) {
1239 			printf("%d: error mapping lb qid\n", __LINE__);
1240 			goto fail;
1241 		}
1242 
1243 		if (rte_event_dev_start(evdev) < 0) {
1244 			printf("%d: Error with start call\n", __LINE__);
1245 			goto fail;
1246 		}
1247 
1248 		const uint32_t NPKTS = 1;
1249 		uint32_t j;
1250 		for (j = 0; j < NPKTS; j++) {
1251 			struct rte_event ev;
1252 			struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1253 			if (!arp) {
1254 				printf("%d: gen of pkt failed\n", __LINE__);
1255 				goto fail;
1256 			}
1257 			ev.queue_id = t->qid[0];
1258 			ev.op = RTE_EVENT_OP_NEW;
1259 			ev.mbuf = arp;
1260 			int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1261 			if (err != 1) {
1262 				printf("%d: Failed to enqueue\n", __LINE__);
1263 				rte_event_dev_dump(0, stdout);
1264 				goto fail;
1265 			}
1266 		}
1267 
1268 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
1269 
1270 		struct rte_event ev[NPKTS];
1271 		int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1272 							NPKTS, 0);
1273 		if (deq != 1)
1274 			printf("%d error; no packet dequeued\n", __LINE__);
1275 
1276 		/* let cleanup below stop the device on last iter */
1277 		if (i != NUM_ITERS-1)
1278 			rte_event_dev_stop(evdev);
1279 	}
1280 
1281 	cleanup(t);
1282 	return 0;
1283 fail:
1284 	cleanup(t);
1285 	return -1;
1286 }
1287 
1288 static int
1289 port_single_lb_reconfig(struct test *t)
1290 {
1291 	if (init(t, 2, 2) < 0) {
1292 		printf("%d: Error initializing device\n", __LINE__);
1293 		goto fail;
1294 	}
1295 
1296 	static const struct rte_event_queue_conf conf_lb_atomic = {
1297 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1298 		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1299 		.nb_atomic_flows = 1024,
1300 		.nb_atomic_order_sequences = 1024,
1301 	};
1302 	if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1303 		printf("%d: error creating qid\n", __LINE__);
1304 		goto fail;
1305 	}
1306 
1307 	static const struct rte_event_queue_conf conf_single_link = {
1308 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1309 		.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1310 	};
1311 	if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1312 		printf("%d: error creating qid\n", __LINE__);
1313 		goto fail;
1314 	}
1315 
1316 	struct rte_event_port_conf port_conf = {
1317 		.new_event_threshold = 128,
1318 		.dequeue_depth = 32,
1319 		.enqueue_depth = 64,
1320 		.disable_implicit_release = 0,
1321 	};
1322 	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1323 		printf("%d Error setting up port\n", __LINE__);
1324 		goto fail;
1325 	}
1326 	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1327 		printf("%d Error setting up port\n", __LINE__);
1328 		goto fail;
1329 	}
1330 
1331 	/* link port to lb queue */
1332 	uint8_t queue_id = 0;
1333 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1334 		printf("%d: error creating link for qid\n", __LINE__);
1335 		goto fail;
1336 	}
1337 
1338 	int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1339 	if (ret != 1) {
1340 		printf("%d: Error unlinking lb port\n", __LINE__);
1341 		goto fail;
1342 	}
1343 
1344 	queue_id = 1;
1345 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1346 		printf("%d: error creating link for qid\n", __LINE__);
1347 		goto fail;
1348 	}
1349 
1350 	queue_id = 0;
1351 	int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1352 	if (err != 1) {
1353 		printf("%d: error mapping lb qid\n", __LINE__);
1354 		goto fail;
1355 	}
1356 
1357 	if (rte_event_dev_start(evdev) < 0) {
1358 		printf("%d: Error with start call\n", __LINE__);
1359 		goto fail;
1360 	}
1361 
1362 	cleanup(t);
1363 	return 0;
1364 fail:
1365 	cleanup(t);
1366 	return -1;
1367 }
1368 
1369 static int
1370 xstats_brute_force(struct test *t)
1371 {
1372 	uint32_t i;
1373 	const uint32_t XSTATS_MAX = 1024;
1374 	uint32_t ids[XSTATS_MAX];
1375 	uint64_t values[XSTATS_MAX];
1376 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1377 
1378 
1379 	/* Create instance with 4 ports */
1380 	if (init(t, 1, 4) < 0 ||
1381 			create_ports(t, 4) < 0 ||
1382 			create_atomic_qids(t, 1) < 0) {
1383 		printf("%d: Error initializing device\n", __LINE__);
1384 		return -1;
1385 	}
1386 
1387 	int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1388 	if (err != 1) {
1389 		printf("%d: error mapping lb qid\n", __LINE__);
1390 		goto fail;
1391 	}
1392 
1393 	if (rte_event_dev_start(evdev) < 0) {
1394 		printf("%d: Error with start call\n", __LINE__);
1395 		goto fail;
1396 	}
1397 
1398 	for (i = 0; i < XSTATS_MAX; i++)
1399 		ids[i] = i;
1400 
1401 	for (i = 0; i < 3; i++) {
1402 		uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1403 		uint32_t j;
1404 		for (j = 0; j < UINT8_MAX; j++) {
1405 			rte_event_dev_xstats_names_get(evdev, mode,
1406 				j, xstats_names, ids, XSTATS_MAX);
1407 
1408 			rte_event_dev_xstats_get(evdev, mode, j, ids,
1409 						 values, XSTATS_MAX);
1410 		}
1411 	}
1412 
1413 	cleanup(t);
1414 	return 0;
1415 fail:
1416 	cleanup(t);
1417 	return -1;
1418 }
1419 
1420 static int
1421 xstats_id_reset_tests(struct test *t)
1422 {
1423 	const int wrk_enq = 2;
1424 	int err;
1425 
1426 	/* Create instance with 4 ports */
1427 	if (init(t, 1, 4) < 0 ||
1428 			create_ports(t, 4) < 0 ||
1429 			create_atomic_qids(t, 1) < 0) {
1430 		printf("%d: Error initializing device\n", __LINE__);
1431 		return -1;
1432 	}
1433 
1434 	/* CQ mapping to QID */
1435 	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1436 	if (err != 1) {
1437 		printf("%d: error mapping lb qid\n", __LINE__);
1438 		goto fail;
1439 	}
1440 
1441 	if (rte_event_dev_start(evdev) < 0) {
1442 		printf("%d: Error with start call\n", __LINE__);
1443 		goto fail;
1444 	}
1445 
1446 #define XSTATS_MAX 1024
1447 	int ret;
1448 	uint32_t i;
1449 	uint32_t ids[XSTATS_MAX];
1450 	uint64_t values[XSTATS_MAX];
1451 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1452 
1453 	for (i = 0; i < XSTATS_MAX; i++)
1454 		ids[i] = i;
1455 
1456 #define NUM_DEV_STATS 6
1457 	/* Device names / values */
1458 	int num_stats = rte_event_dev_xstats_names_get(evdev,
1459 					RTE_EVENT_DEV_XSTATS_DEVICE,
1460 					0, xstats_names, ids, XSTATS_MAX);
1461 	if (num_stats != NUM_DEV_STATS) {
1462 		printf("%d: expected %d stats, got return %d\n", __LINE__,
1463 				NUM_DEV_STATS, num_stats);
1464 		goto fail;
1465 	}
1466 	ret = rte_event_dev_xstats_get(evdev,
1467 					RTE_EVENT_DEV_XSTATS_DEVICE,
1468 					0, ids, values, num_stats);
1469 	if (ret != NUM_DEV_STATS) {
1470 		printf("%d: expected %d stats, got return %d\n", __LINE__,
1471 				NUM_DEV_STATS, ret);
1472 		goto fail;
1473 	}
1474 
1475 #define NPKTS 7
1476 	for (i = 0; i < NPKTS; i++) {
1477 		struct rte_event ev;
1478 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1479 		if (!arp) {
1480 			printf("%d: gen of pkt failed\n", __LINE__);
1481 			goto fail;
1482 		}
1483 		ev.queue_id = t->qid[i];
1484 		ev.op = RTE_EVENT_OP_NEW;
1485 		ev.mbuf = arp;
1486 		arp->seqn = i;
1487 
1488 		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1489 		if (err != 1) {
1490 			printf("%d: Failed to enqueue\n", __LINE__);
1491 			goto fail;
1492 		}
1493 	}
1494 
1495 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
1496 
1497 	static const char * const dev_names[] = {
1498 		"dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1499 		"dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1500 	};
1501 	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1502 	for (i = 0; (int)i < ret; i++) {
1503 		unsigned int id;
1504 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1505 								dev_names[i],
1506 								&id);
1507 		if (id != i) {
1508 			printf("%d: %s id incorrect, expected %d got %d\n",
1509 					__LINE__, dev_names[i], i, id);
1510 			goto fail;
1511 		}
1512 		if (val != dev_expected[i]) {
1513 			printf("%d: %s value incorrect, expected %"
1514 				PRIu64" got %d\n", __LINE__, dev_names[i],
1515 				dev_expected[i], id);
1516 			goto fail;
1517 		}
1518 		/* reset to zero */
1519 		int reset_ret = rte_event_dev_xstats_reset(evdev,
1520 						RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1521 						&id,
1522 						1);
1523 		if (reset_ret) {
1524 			printf("%d: failed to reset successfully\n", __LINE__);
1525 			goto fail;
1526 		}
1527 		dev_expected[i] = 0;
1528 		/* check value again */
1529 		val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1530 		if (val != dev_expected[i]) {
1531 			printf("%d: %s value incorrect, expected %"PRIu64
1532 				" got %"PRIu64"\n", __LINE__, dev_names[i],
1533 				dev_expected[i], val);
1534 			goto fail;
1535 		}
1536 	};
1537 
1538 /* 48 is stat offset from start of the devices whole xstats.
1539  * This WILL break every time we add a statistic to a port
1540  * or the device, but there is no other way to test
1541  */
1542 #define PORT_OFF 48
1543 /* num stats for the tested port. CQ size adds more stats to a port */
1544 #define NUM_PORT_STATS 21
1545 /* the port to test. */
1546 #define PORT 2
1547 	num_stats = rte_event_dev_xstats_names_get(evdev,
1548 					RTE_EVENT_DEV_XSTATS_PORT, PORT,
1549 					xstats_names, ids, XSTATS_MAX);
1550 	if (num_stats != NUM_PORT_STATS) {
1551 		printf("%d: expected %d stats, got return %d\n",
1552 			__LINE__, NUM_PORT_STATS, num_stats);
1553 		goto fail;
1554 	}
1555 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1556 					ids, values, num_stats);
1557 
1558 	if (ret != NUM_PORT_STATS) {
1559 		printf("%d: expected %d stats, got return %d\n",
1560 				__LINE__, NUM_PORT_STATS, ret);
1561 		goto fail;
1562 	}
1563 	static const char * const port_names[] = {
1564 		"port_2_rx",
1565 		"port_2_tx",
1566 		"port_2_drop",
1567 		"port_2_inflight",
1568 		"port_2_avg_pkt_cycles",
1569 		"port_2_credits",
1570 		"port_2_rx_ring_used",
1571 		"port_2_rx_ring_free",
1572 		"port_2_cq_ring_used",
1573 		"port_2_cq_ring_free",
1574 		"port_2_dequeue_calls",
1575 		"port_2_dequeues_returning_0",
1576 		"port_2_dequeues_returning_1-4",
1577 		"port_2_dequeues_returning_5-8",
1578 		"port_2_dequeues_returning_9-12",
1579 		"port_2_dequeues_returning_13-16",
1580 		"port_2_dequeues_returning_17-20",
1581 		"port_2_dequeues_returning_21-24",
1582 		"port_2_dequeues_returning_25-28",
1583 		"port_2_dequeues_returning_29-32",
1584 		"port_2_dequeues_returning_33-36",
1585 	};
1586 	uint64_t port_expected[] = {
1587 		0, /* rx */
1588 		NPKTS, /* tx */
1589 		0, /* drop */
1590 		NPKTS, /* inflight */
1591 		0, /* avg pkt cycles */
1592 		0, /* credits */
1593 		0, /* rx ring used */
1594 		4096, /* rx ring free */
1595 		NPKTS,  /* cq ring used */
1596 		25, /* cq ring free */
1597 		0, /* dequeue zero calls */
1598 		0, 0, 0, 0, 0, /* 10 dequeue buckets */
1599 		0, 0, 0, 0, 0,
1600 	};
1601 	uint64_t port_expected_zero[] = {
1602 		0, /* rx */
1603 		0, /* tx */
1604 		0, /* drop */
1605 		NPKTS, /* inflight */
1606 		0, /* avg pkt cycles */
1607 		0, /* credits */
1608 		0, /* rx ring used */
1609 		4096, /* rx ring free */
1610 		NPKTS,  /* cq ring used */
1611 		25, /* cq ring free */
1612 		0, /* dequeue zero calls */
1613 		0, 0, 0, 0, 0, /* 10 dequeue buckets */
1614 		0, 0, 0, 0, 0,
1615 	};
1616 	if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1617 			RTE_DIM(port_names) != NUM_PORT_STATS) {
1618 		printf("%d: port array of wrong size\n", __LINE__);
1619 		goto fail;
1620 	}
1621 
1622 	int failed = 0;
1623 	for (i = 0; (int)i < ret; i++) {
1624 		unsigned int id;
1625 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1626 								port_names[i],
1627 								&id);
1628 		if (id != i + PORT_OFF) {
1629 			printf("%d: %s id incorrect, expected %d got %d\n",
1630 					__LINE__, port_names[i], i+PORT_OFF,
1631 					id);
1632 			failed = 1;
1633 		}
1634 		if (val != port_expected[i]) {
1635 			printf("%d: %s value incorrect, expected %"PRIu64
1636 				" got %d\n", __LINE__, port_names[i],
1637 				port_expected[i], id);
1638 			failed = 1;
1639 		}
1640 		/* reset to zero */
1641 		int reset_ret = rte_event_dev_xstats_reset(evdev,
1642 						RTE_EVENT_DEV_XSTATS_PORT, PORT,
1643 						&id,
1644 						1);
1645 		if (reset_ret) {
1646 			printf("%d: failed to reset successfully\n", __LINE__);
1647 			failed = 1;
1648 		}
1649 		/* check value again */
1650 		val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1651 		if (val != port_expected_zero[i]) {
1652 			printf("%d: %s value incorrect, expected %"PRIu64
1653 				" got %"PRIu64"\n", __LINE__, port_names[i],
1654 				port_expected_zero[i], val);
1655 			failed = 1;
1656 		}
1657 	};
1658 	if (failed)
1659 		goto fail;
1660 
1661 /* num queue stats */
1662 #define NUM_Q_STATS 16
1663 /* queue offset from start of the devices whole xstats.
1664  * This will break every time we add a statistic to a device/port/queue
1665  */
1666 #define QUEUE_OFF 90
1667 	const uint32_t queue = 0;
1668 	num_stats = rte_event_dev_xstats_names_get(evdev,
1669 					RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1670 					xstats_names, ids, XSTATS_MAX);
1671 	if (num_stats != NUM_Q_STATS) {
1672 		printf("%d: expected %d stats, got return %d\n",
1673 			__LINE__, NUM_Q_STATS, num_stats);
1674 		goto fail;
1675 	}
1676 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1677 					queue, ids, values, num_stats);
1678 	if (ret != NUM_Q_STATS) {
1679 		printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1680 		goto fail;
1681 	}
1682 	static const char * const queue_names[] = {
1683 		"qid_0_rx",
1684 		"qid_0_tx",
1685 		"qid_0_drop",
1686 		"qid_0_inflight",
1687 		"qid_0_iq_0_used",
1688 		"qid_0_iq_1_used",
1689 		"qid_0_iq_2_used",
1690 		"qid_0_iq_3_used",
1691 		"qid_0_port_0_pinned_flows",
1692 		"qid_0_port_0_packets",
1693 		"qid_0_port_1_pinned_flows",
1694 		"qid_0_port_1_packets",
1695 		"qid_0_port_2_pinned_flows",
1696 		"qid_0_port_2_packets",
1697 		"qid_0_port_3_pinned_flows",
1698 		"qid_0_port_3_packets",
1699 	};
1700 	uint64_t queue_expected[] = {
1701 		7, /* rx */
1702 		7, /* tx */
1703 		0, /* drop */
1704 		7, /* inflight */
1705 		0, /* iq 0 used */
1706 		0, /* iq 1 used */
1707 		0, /* iq 2 used */
1708 		0, /* iq 3 used */
1709 		/* QID-to-Port: pinned_flows, packets */
1710 		0, 0,
1711 		0, 0,
1712 		1, 7,
1713 		0, 0,
1714 	};
1715 	uint64_t queue_expected_zero[] = {
1716 		0, /* rx */
1717 		0, /* tx */
1718 		0, /* drop */
1719 		7, /* inflight */
1720 		0, /* iq 0 used */
1721 		0, /* iq 1 used */
1722 		0, /* iq 2 used */
1723 		0, /* iq 3 used */
1724 		/* QID-to-Port: pinned_flows, packets */
1725 		0, 0,
1726 		0, 0,
1727 		1, 0,
1728 		0, 0,
1729 	};
1730 	if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1731 			RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1732 			RTE_DIM(queue_names) != NUM_Q_STATS) {
1733 		printf("%d : queue array of wrong size\n", __LINE__);
1734 		goto fail;
1735 	}
1736 
1737 	failed = 0;
1738 	for (i = 0; (int)i < ret; i++) {
1739 		unsigned int id;
1740 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1741 								queue_names[i],
1742 								&id);
1743 		if (id != i + QUEUE_OFF) {
1744 			printf("%d: %s id incorrect, expected %d got %d\n",
1745 					__LINE__, queue_names[i], i+QUEUE_OFF,
1746 					id);
1747 			failed = 1;
1748 		}
1749 		if (val != queue_expected[i]) {
1750 			printf("%d: %d: %s value , expected %"PRIu64
1751 				" got %"PRIu64"\n", i, __LINE__,
1752 				queue_names[i], queue_expected[i], val);
1753 			failed = 1;
1754 		}
1755 		/* reset to zero */
1756 		int reset_ret = rte_event_dev_xstats_reset(evdev,
1757 						RTE_EVENT_DEV_XSTATS_QUEUE,
1758 						queue, &id, 1);
1759 		if (reset_ret) {
1760 			printf("%d: failed to reset successfully\n", __LINE__);
1761 			failed = 1;
1762 		}
1763 		/* check value again */
1764 		val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1765 							0);
1766 		if (val != queue_expected_zero[i]) {
1767 			printf("%d: %s value incorrect, expected %"PRIu64
1768 				" got %"PRIu64"\n", __LINE__, queue_names[i],
1769 				queue_expected_zero[i], val);
1770 			failed = 1;
1771 		}
1772 	};
1773 
1774 	if (failed)
1775 		goto fail;
1776 
1777 	cleanup(t);
1778 	return 0;
1779 fail:
1780 	cleanup(t);
1781 	return -1;
1782 }
1783 
1784 static int
1785 ordered_reconfigure(struct test *t)
1786 {
1787 	if (init(t, 1, 1) < 0 ||
1788 			create_ports(t, 1) < 0) {
1789 		printf("%d: Error initializing device\n", __LINE__);
1790 		return -1;
1791 	}
1792 
1793 	const struct rte_event_queue_conf conf = {
1794 			.schedule_type = RTE_SCHED_TYPE_ORDERED,
1795 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1796 			.nb_atomic_flows = 1024,
1797 			.nb_atomic_order_sequences = 1024,
1798 	};
1799 
1800 	if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1801 		printf("%d: error creating qid\n", __LINE__);
1802 		goto failed;
1803 	}
1804 
1805 	if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1806 		printf("%d: error creating qid, for 2nd time\n", __LINE__);
1807 		goto failed;
1808 	}
1809 
1810 	rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1811 	if (rte_event_dev_start(evdev) < 0) {
1812 		printf("%d: Error with start call\n", __LINE__);
1813 		return -1;
1814 	}
1815 
1816 	cleanup(t);
1817 	return 0;
1818 failed:
1819 	cleanup(t);
1820 	return -1;
1821 }
1822 
1823 static int
1824 qid_priorities(struct test *t)
1825 {
1826 	/* Test works by having a CQ with enough empty space for all packets,
1827 	 * and enqueueing 3 packets to 3 QIDs. They must return based on the
1828 	 * priority of the QID, not the ingress order, to pass the test
1829 	 */
1830 	unsigned int i;
1831 	/* Create instance with 1 ports, and 3 qids */
1832 	if (init(t, 3, 1) < 0 ||
1833 			create_ports(t, 1) < 0) {
1834 		printf("%d: Error initializing device\n", __LINE__);
1835 		return -1;
1836 	}
1837 
1838 	for (i = 0; i < 3; i++) {
1839 		/* Create QID */
1840 		const struct rte_event_queue_conf conf = {
1841 			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1842 			/* increase priority (0 == highest), as we go */
1843 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1844 			.nb_atomic_flows = 1024,
1845 			.nb_atomic_order_sequences = 1024,
1846 		};
1847 
1848 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1849 			printf("%d: error creating qid %d\n", __LINE__, i);
1850 			return -1;
1851 		}
1852 		t->qid[i] = i;
1853 	}
1854 	t->nb_qids = i;
1855 	/* map all QIDs to port */
1856 	rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1857 
1858 	if (rte_event_dev_start(evdev) < 0) {
1859 		printf("%d: Error with start call\n", __LINE__);
1860 		return -1;
1861 	}
1862 
1863 	/* enqueue 3 packets, setting seqn and QID to check priority */
1864 	for (i = 0; i < 3; i++) {
1865 		struct rte_event ev;
1866 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1867 		if (!arp) {
1868 			printf("%d: gen of pkt failed\n", __LINE__);
1869 			return -1;
1870 		}
1871 		ev.queue_id = t->qid[i];
1872 		ev.op = RTE_EVENT_OP_NEW;
1873 		ev.mbuf = arp;
1874 		arp->seqn = i;
1875 
1876 		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1877 		if (err != 1) {
1878 			printf("%d: Failed to enqueue\n", __LINE__);
1879 			return -1;
1880 		}
1881 	}
1882 
1883 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
1884 
1885 	/* dequeue packets, verify priority was upheld */
1886 	struct rte_event ev[32];
1887 	uint32_t deq_pkts =
1888 		rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1889 	if (deq_pkts != 3) {
1890 		printf("%d: failed to deq packets\n", __LINE__);
1891 		rte_event_dev_dump(evdev, stdout);
1892 		return -1;
1893 	}
1894 	for (i = 0; i < 3; i++) {
1895 		if (ev[i].mbuf->seqn != 2-i) {
1896 			printf(
1897 				"%d: qid priority test: seqn %d incorrectly prioritized\n",
1898 					__LINE__, i);
1899 		}
1900 	}
1901 
1902 	cleanup(t);
1903 	return 0;
1904 }
1905 
1906 static int
1907 unlink_in_progress(struct test *t)
1908 {
1909 	/* Test unlinking API, in particular that when an unlink request has
1910 	 * not yet been seen by the scheduler thread, that the
1911 	 * unlink_in_progress() function returns the number of unlinks.
1912 	 */
1913 	unsigned int i;
1914 	/* Create instance with 1 ports, and 3 qids */
1915 	if (init(t, 3, 1) < 0 ||
1916 			create_ports(t, 1) < 0) {
1917 		printf("%d: Error initializing device\n", __LINE__);
1918 		return -1;
1919 	}
1920 
1921 	for (i = 0; i < 3; i++) {
1922 		/* Create QID */
1923 		const struct rte_event_queue_conf conf = {
1924 			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1925 			/* increase priority (0 == highest), as we go */
1926 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1927 			.nb_atomic_flows = 1024,
1928 			.nb_atomic_order_sequences = 1024,
1929 		};
1930 
1931 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1932 			printf("%d: error creating qid %d\n", __LINE__, i);
1933 			return -1;
1934 		}
1935 		t->qid[i] = i;
1936 	}
1937 	t->nb_qids = i;
1938 	/* map all QIDs to port */
1939 	rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1940 
1941 	if (rte_event_dev_start(evdev) < 0) {
1942 		printf("%d: Error with start call\n", __LINE__);
1943 		return -1;
1944 	}
1945 
1946 	/* unlink all ports to have outstanding unlink requests */
1947 	int ret = rte_event_port_unlink(evdev, t->port[0], NULL, 0);
1948 	if (ret < 0) {
1949 		printf("%d: Failed to unlink queues\n", __LINE__);
1950 		return -1;
1951 	}
1952 
1953 	/* get active unlinks here, expect 3 */
1954 	int unlinks_in_progress =
1955 		rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1956 	if (unlinks_in_progress != 3) {
1957 		printf("%d: Expected num unlinks in progress == 3, got %d\n",
1958 				__LINE__, unlinks_in_progress);
1959 		return -1;
1960 	}
1961 
1962 	/* run scheduler service on this thread to ack the unlinks */
1963 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
1964 
1965 	/* active unlinks expected as 0 as scheduler thread has acked */
1966 	unlinks_in_progress =
1967 		rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1968 	if (unlinks_in_progress != 0) {
1969 		printf("%d: Expected num unlinks in progress == 0, got %d\n",
1970 				__LINE__, unlinks_in_progress);
1971 	}
1972 
1973 	cleanup(t);
1974 	return 0;
1975 }
1976 
1977 static int
1978 load_balancing(struct test *t)
1979 {
1980 	const int rx_enq = 0;
1981 	int err;
1982 	uint32_t i;
1983 
1984 	if (init(t, 1, 4) < 0 ||
1985 			create_ports(t, 4) < 0 ||
1986 			create_atomic_qids(t, 1) < 0) {
1987 		printf("%d: Error initializing device\n", __LINE__);
1988 		return -1;
1989 	}
1990 
1991 	for (i = 0; i < 3; i++) {
1992 		/* map port 1 - 3 inclusive */
1993 		if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1994 				NULL, 1) != 1) {
1995 			printf("%d: error mapping qid to port %d\n",
1996 					__LINE__, i);
1997 			return -1;
1998 		}
1999 	}
2000 
2001 	if (rte_event_dev_start(evdev) < 0) {
2002 		printf("%d: Error with start call\n", __LINE__);
2003 		return -1;
2004 	}
2005 
2006 	/************** FORWARD ****************/
2007 	/*
2008 	 * Create a set of flows that test the load-balancing operation of the
2009 	 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
2010 	 * with a new flow, which should be sent to the 3rd mapped CQ
2011 	 */
2012 	static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
2013 
2014 	for (i = 0; i < RTE_DIM(flows); i++) {
2015 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2016 		if (!arp) {
2017 			printf("%d: gen of pkt failed\n", __LINE__);
2018 			return -1;
2019 		}
2020 
2021 		struct rte_event ev = {
2022 				.op = RTE_EVENT_OP_NEW,
2023 				.queue_id = t->qid[0],
2024 				.flow_id = flows[i],
2025 				.mbuf = arp,
2026 		};
2027 		/* generate pkt and enqueue */
2028 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2029 		if (err != 1) {
2030 			printf("%d: Failed to enqueue\n", __LINE__);
2031 			return -1;
2032 		}
2033 	}
2034 
2035 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2036 
2037 	struct test_event_dev_stats stats;
2038 	err = test_event_dev_stats_get(evdev, &stats);
2039 	if (err) {
2040 		printf("%d: failed to get stats\n", __LINE__);
2041 		return -1;
2042 	}
2043 
2044 	if (stats.port_inflight[1] != 4) {
2045 		printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2046 				__func__);
2047 		return -1;
2048 	}
2049 	if (stats.port_inflight[2] != 2) {
2050 		printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2051 				__func__);
2052 		return -1;
2053 	}
2054 	if (stats.port_inflight[3] != 3) {
2055 		printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2056 				__func__);
2057 		return -1;
2058 	}
2059 
2060 	cleanup(t);
2061 	return 0;
2062 }
2063 
2064 static int
2065 load_balancing_history(struct test *t)
2066 {
2067 	struct test_event_dev_stats stats = {0};
2068 	const int rx_enq = 0;
2069 	int err;
2070 	uint32_t i;
2071 
2072 	/* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2073 	if (init(t, 1, 4) < 0 ||
2074 			create_ports(t, 4) < 0 ||
2075 			create_atomic_qids(t, 1) < 0)
2076 		return -1;
2077 
2078 	/* CQ mapping to QID */
2079 	if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2080 		printf("%d: error mapping port 1 qid\n", __LINE__);
2081 		return -1;
2082 	}
2083 	if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2084 		printf("%d: error mapping port 2 qid\n", __LINE__);
2085 		return -1;
2086 	}
2087 	if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2088 		printf("%d: error mapping port 3 qid\n", __LINE__);
2089 		return -1;
2090 	}
2091 	if (rte_event_dev_start(evdev) < 0) {
2092 		printf("%d: Error with start call\n", __LINE__);
2093 		return -1;
2094 	}
2095 
2096 	/*
2097 	 * Create a set of flows that test the load-balancing operation of the
2098 	 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2099 	 * the packet from CQ 0, send in a new set of flows. Ensure that:
2100 	 *  1. The new flow 3 gets into the empty CQ0
2101 	 *  2. packets for existing flow gets added into CQ1
2102 	 *  3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2103 	 *     more outstanding pkts
2104 	 *
2105 	 *  This test makes sure that when a flow ends (i.e. all packets
2106 	 *  have been completed for that flow), that the flow can be moved
2107 	 *  to a different CQ when new packets come in for that flow.
2108 	 */
2109 	static uint32_t flows1[] = {0, 1, 1, 2};
2110 
2111 	for (i = 0; i < RTE_DIM(flows1); i++) {
2112 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2113 		struct rte_event ev = {
2114 				.flow_id = flows1[i],
2115 				.op = RTE_EVENT_OP_NEW,
2116 				.queue_id = t->qid[0],
2117 				.event_type = RTE_EVENT_TYPE_CPU,
2118 				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2119 				.mbuf = arp
2120 		};
2121 
2122 		if (!arp) {
2123 			printf("%d: gen of pkt failed\n", __LINE__);
2124 			return -1;
2125 		}
2126 		arp->hash.rss = flows1[i];
2127 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2128 		if (err != 1) {
2129 			printf("%d: Failed to enqueue\n", __LINE__);
2130 			return -1;
2131 		}
2132 	}
2133 
2134 	/* call the scheduler */
2135 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2136 
2137 	/* Dequeue the flow 0 packet from port 1, so that we can then drop */
2138 	struct rte_event ev;
2139 	if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2140 		printf("%d: failed to dequeue\n", __LINE__);
2141 		return -1;
2142 	}
2143 	if (ev.mbuf->hash.rss != flows1[0]) {
2144 		printf("%d: unexpected flow received\n", __LINE__);
2145 		return -1;
2146 	}
2147 
2148 	/* drop the flow 0 packet from port 1 */
2149 	rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2150 
2151 	/* call the scheduler */
2152 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2153 
2154 	/*
2155 	 * Set up the next set of flows, first a new flow to fill up
2156 	 * CQ 0, so that the next flow 0 packet should go to CQ2
2157 	 */
2158 	static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2159 
2160 	for (i = 0; i < RTE_DIM(flows2); i++) {
2161 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2162 		struct rte_event ev = {
2163 				.flow_id = flows2[i],
2164 				.op = RTE_EVENT_OP_NEW,
2165 				.queue_id = t->qid[0],
2166 				.event_type = RTE_EVENT_TYPE_CPU,
2167 				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2168 				.mbuf = arp
2169 		};
2170 
2171 		if (!arp) {
2172 			printf("%d: gen of pkt failed\n", __LINE__);
2173 			return -1;
2174 		}
2175 		arp->hash.rss = flows2[i];
2176 
2177 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2178 		if (err != 1) {
2179 			printf("%d: Failed to enqueue\n", __LINE__);
2180 			return -1;
2181 		}
2182 	}
2183 
2184 	/* schedule */
2185 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2186 
2187 	err = test_event_dev_stats_get(evdev, &stats);
2188 	if (err) {
2189 		printf("%d:failed to get stats\n", __LINE__);
2190 		return -1;
2191 	}
2192 
2193 	/*
2194 	 * Now check the resulting inflights on each port.
2195 	 */
2196 	if (stats.port_inflight[1] != 3) {
2197 		printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2198 				__func__);
2199 		printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2200 				(unsigned int)stats.port_inflight[1],
2201 				(unsigned int)stats.port_inflight[2],
2202 				(unsigned int)stats.port_inflight[3]);
2203 		return -1;
2204 	}
2205 	if (stats.port_inflight[2] != 4) {
2206 		printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2207 				__func__);
2208 		printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2209 				(unsigned int)stats.port_inflight[1],
2210 				(unsigned int)stats.port_inflight[2],
2211 				(unsigned int)stats.port_inflight[3]);
2212 		return -1;
2213 	}
2214 	if (stats.port_inflight[3] != 2) {
2215 		printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2216 				__func__);
2217 		printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2218 				(unsigned int)stats.port_inflight[1],
2219 				(unsigned int)stats.port_inflight[2],
2220 				(unsigned int)stats.port_inflight[3]);
2221 		return -1;
2222 	}
2223 
2224 	for (i = 1; i <= 3; i++) {
2225 		struct rte_event ev;
2226 		while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2227 			rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2228 	}
2229 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2230 
2231 	cleanup(t);
2232 	return 0;
2233 }
2234 
2235 static int
2236 invalid_qid(struct test *t)
2237 {
2238 	struct test_event_dev_stats stats;
2239 	const int rx_enq = 0;
2240 	int err;
2241 	uint32_t i;
2242 
2243 	if (init(t, 1, 4) < 0 ||
2244 			create_ports(t, 4) < 0 ||
2245 			create_atomic_qids(t, 1) < 0) {
2246 		printf("%d: Error initializing device\n", __LINE__);
2247 		return -1;
2248 	}
2249 
2250 	/* CQ mapping to QID */
2251 	for (i = 0; i < 4; i++) {
2252 		err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2253 				NULL, 1);
2254 		if (err != 1) {
2255 			printf("%d: error mapping port 1 qid\n", __LINE__);
2256 			return -1;
2257 		}
2258 	}
2259 
2260 	if (rte_event_dev_start(evdev) < 0) {
2261 		printf("%d: Error with start call\n", __LINE__);
2262 		return -1;
2263 	}
2264 
2265 	/*
2266 	 * Send in a packet with an invalid qid to the scheduler.
2267 	 * We should see the packed enqueued OK, but the inflights for
2268 	 * that packet should not be incremented, and the rx_dropped
2269 	 * should be incremented.
2270 	 */
2271 	static uint32_t flows1[] = {20};
2272 
2273 	for (i = 0; i < RTE_DIM(flows1); i++) {
2274 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2275 		if (!arp) {
2276 			printf("%d: gen of pkt failed\n", __LINE__);
2277 			return -1;
2278 		}
2279 
2280 		struct rte_event ev = {
2281 				.op = RTE_EVENT_OP_NEW,
2282 				.queue_id = t->qid[0] + flows1[i],
2283 				.flow_id = i,
2284 				.mbuf = arp,
2285 		};
2286 		/* generate pkt and enqueue */
2287 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2288 		if (err != 1) {
2289 			printf("%d: Failed to enqueue\n", __LINE__);
2290 			return -1;
2291 		}
2292 	}
2293 
2294 	/* call the scheduler */
2295 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2296 
2297 	err = test_event_dev_stats_get(evdev, &stats);
2298 	if (err) {
2299 		printf("%d: failed to get stats\n", __LINE__);
2300 		return -1;
2301 	}
2302 
2303 	/*
2304 	 * Now check the resulting inflights on the port, and the rx_dropped.
2305 	 */
2306 	if (stats.port_inflight[0] != 0) {
2307 		printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2308 				__func__);
2309 		rte_event_dev_dump(evdev, stdout);
2310 		return -1;
2311 	}
2312 	if (stats.port_rx_dropped[0] != 1) {
2313 		printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2314 		rte_event_dev_dump(evdev, stdout);
2315 		return -1;
2316 	}
2317 	/* each packet drop should only be counted in one place - port or dev */
2318 	if (stats.rx_dropped != 0) {
2319 		printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2320 				__func__);
2321 		rte_event_dev_dump(evdev, stdout);
2322 		return -1;
2323 	}
2324 
2325 	cleanup(t);
2326 	return 0;
2327 }
2328 
2329 static int
2330 single_packet(struct test *t)
2331 {
2332 	const uint32_t MAGIC_SEQN = 7321;
2333 	struct rte_event ev;
2334 	struct test_event_dev_stats stats;
2335 	const int rx_enq = 0;
2336 	const int wrk_enq = 2;
2337 	int err;
2338 
2339 	/* Create instance with 4 ports */
2340 	if (init(t, 1, 4) < 0 ||
2341 			create_ports(t, 4) < 0 ||
2342 			create_atomic_qids(t, 1) < 0) {
2343 		printf("%d: Error initializing device\n", __LINE__);
2344 		return -1;
2345 	}
2346 
2347 	/* CQ mapping to QID */
2348 	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2349 	if (err != 1) {
2350 		printf("%d: error mapping lb qid\n", __LINE__);
2351 		cleanup(t);
2352 		return -1;
2353 	}
2354 
2355 	if (rte_event_dev_start(evdev) < 0) {
2356 		printf("%d: Error with start call\n", __LINE__);
2357 		return -1;
2358 	}
2359 
2360 	/************** Gen pkt and enqueue ****************/
2361 	struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2362 	if (!arp) {
2363 		printf("%d: gen of pkt failed\n", __LINE__);
2364 		return -1;
2365 	}
2366 
2367 	ev.op = RTE_EVENT_OP_NEW;
2368 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2369 	ev.mbuf = arp;
2370 	ev.queue_id = 0;
2371 	ev.flow_id = 3;
2372 	arp->seqn = MAGIC_SEQN;
2373 
2374 	err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2375 	if (err != 1) {
2376 		printf("%d: Failed to enqueue\n", __LINE__);
2377 		return -1;
2378 	}
2379 
2380 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2381 
2382 	err = test_event_dev_stats_get(evdev, &stats);
2383 	if (err) {
2384 		printf("%d: failed to get stats\n", __LINE__);
2385 		return -1;
2386 	}
2387 
2388 	if (stats.rx_pkts != 1 ||
2389 			stats.tx_pkts != 1 ||
2390 			stats.port_inflight[wrk_enq] != 1) {
2391 		printf("%d: Sched core didn't handle pkt as expected\n",
2392 				__LINE__);
2393 		rte_event_dev_dump(evdev, stdout);
2394 		return -1;
2395 	}
2396 
2397 	uint32_t deq_pkts;
2398 
2399 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2400 	if (deq_pkts < 1) {
2401 		printf("%d: Failed to deq\n", __LINE__);
2402 		return -1;
2403 	}
2404 
2405 	err = test_event_dev_stats_get(evdev, &stats);
2406 	if (err) {
2407 		printf("%d: failed to get stats\n", __LINE__);
2408 		return -1;
2409 	}
2410 
2411 	err = test_event_dev_stats_get(evdev, &stats);
2412 	if (ev.mbuf->seqn != MAGIC_SEQN) {
2413 		printf("%d: magic sequence number not dequeued\n", __LINE__);
2414 		return -1;
2415 	}
2416 
2417 	rte_pktmbuf_free(ev.mbuf);
2418 	err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2419 	if (err != 1) {
2420 		printf("%d: Failed to enqueue\n", __LINE__);
2421 		return -1;
2422 	}
2423 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2424 
2425 	err = test_event_dev_stats_get(evdev, &stats);
2426 	if (stats.port_inflight[wrk_enq] != 0) {
2427 		printf("%d: port inflight not correct\n", __LINE__);
2428 		return -1;
2429 	}
2430 
2431 	cleanup(t);
2432 	return 0;
2433 }
2434 
2435 static int
2436 inflight_counts(struct test *t)
2437 {
2438 	struct rte_event ev;
2439 	struct test_event_dev_stats stats;
2440 	const int rx_enq = 0;
2441 	const int p1 = 1;
2442 	const int p2 = 2;
2443 	int err;
2444 	int i;
2445 
2446 	/* Create instance with 4 ports */
2447 	if (init(t, 2, 3) < 0 ||
2448 			create_ports(t, 3) < 0 ||
2449 			create_atomic_qids(t, 2) < 0) {
2450 		printf("%d: Error initializing device\n", __LINE__);
2451 		return -1;
2452 	}
2453 
2454 	/* CQ mapping to QID */
2455 	err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2456 	if (err != 1) {
2457 		printf("%d: error mapping lb qid\n", __LINE__);
2458 		cleanup(t);
2459 		return -1;
2460 	}
2461 	err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2462 	if (err != 1) {
2463 		printf("%d: error mapping lb qid\n", __LINE__);
2464 		cleanup(t);
2465 		return -1;
2466 	}
2467 
2468 	if (rte_event_dev_start(evdev) < 0) {
2469 		printf("%d: Error with start call\n", __LINE__);
2470 		return -1;
2471 	}
2472 
2473 	/************** FORWARD ****************/
2474 #define QID1_NUM 5
2475 	for (i = 0; i < QID1_NUM; i++) {
2476 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2477 
2478 		if (!arp) {
2479 			printf("%d: gen of pkt failed\n", __LINE__);
2480 			goto err;
2481 		}
2482 
2483 		ev.queue_id =  t->qid[0];
2484 		ev.op = RTE_EVENT_OP_NEW;
2485 		ev.mbuf = arp;
2486 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2487 		if (err != 1) {
2488 			printf("%d: Failed to enqueue\n", __LINE__);
2489 			goto err;
2490 		}
2491 	}
2492 #define QID2_NUM 3
2493 	for (i = 0; i < QID2_NUM; i++) {
2494 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2495 
2496 		if (!arp) {
2497 			printf("%d: gen of pkt failed\n", __LINE__);
2498 			goto err;
2499 		}
2500 		ev.queue_id =  t->qid[1];
2501 		ev.op = RTE_EVENT_OP_NEW;
2502 		ev.mbuf = arp;
2503 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2504 		if (err != 1) {
2505 			printf("%d: Failed to enqueue\n", __LINE__);
2506 			goto err;
2507 		}
2508 	}
2509 
2510 	/* schedule */
2511 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2512 
2513 	err = test_event_dev_stats_get(evdev, &stats);
2514 	if (err) {
2515 		printf("%d: failed to get stats\n", __LINE__);
2516 		goto err;
2517 	}
2518 
2519 	if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2520 			stats.tx_pkts != QID1_NUM + QID2_NUM) {
2521 		printf("%d: Sched core didn't handle pkt as expected\n",
2522 				__LINE__);
2523 		goto err;
2524 	}
2525 
2526 	if (stats.port_inflight[p1] != QID1_NUM) {
2527 		printf("%d: %s port 1 inflight not correct\n", __LINE__,
2528 				__func__);
2529 		goto err;
2530 	}
2531 	if (stats.port_inflight[p2] != QID2_NUM) {
2532 		printf("%d: %s port 2 inflight not correct\n", __LINE__,
2533 				__func__);
2534 		goto err;
2535 	}
2536 
2537 	/************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/
2538 	/* port 1 */
2539 	struct rte_event events[QID1_NUM + QID2_NUM];
2540 	uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2541 			RTE_DIM(events), 0);
2542 
2543 	if (deq_pkts != QID1_NUM) {
2544 		printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2545 		goto err;
2546 	}
2547 	err = test_event_dev_stats_get(evdev, &stats);
2548 	if (stats.port_inflight[p1] != QID1_NUM) {
2549 		printf("%d: port 1 inflight decrement after DEQ != 0\n",
2550 				__LINE__);
2551 		goto err;
2552 	}
2553 	for (i = 0; i < QID1_NUM; i++) {
2554 		err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2555 				1);
2556 		if (err != 1) {
2557 			printf("%d: %s rte enqueue of inf release failed\n",
2558 				__LINE__, __func__);
2559 			goto err;
2560 		}
2561 	}
2562 
2563 	/*
2564 	 * As the scheduler core decrements inflights, it needs to run to
2565 	 * process packets to act on the drop messages
2566 	 */
2567 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2568 
2569 	err = test_event_dev_stats_get(evdev, &stats);
2570 	if (stats.port_inflight[p1] != 0) {
2571 		printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2572 		goto err;
2573 	}
2574 
2575 	/* port2 */
2576 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2577 			RTE_DIM(events), 0);
2578 	if (deq_pkts != QID2_NUM) {
2579 		printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2580 		goto err;
2581 	}
2582 	err = test_event_dev_stats_get(evdev, &stats);
2583 	if (stats.port_inflight[p2] != QID2_NUM) {
2584 		printf("%d: port 1 inflight decrement after DEQ != 0\n",
2585 				__LINE__);
2586 		goto err;
2587 	}
2588 	for (i = 0; i < QID2_NUM; i++) {
2589 		err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2590 				1);
2591 		if (err != 1) {
2592 			printf("%d: %s rte enqueue of inf release failed\n",
2593 				__LINE__, __func__);
2594 			goto err;
2595 		}
2596 	}
2597 
2598 	/*
2599 	 * As the scheduler core decrements inflights, it needs to run to
2600 	 * process packets to act on the drop messages
2601 	 */
2602 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2603 
2604 	err = test_event_dev_stats_get(evdev, &stats);
2605 	if (stats.port_inflight[p2] != 0) {
2606 		printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2607 		goto err;
2608 	}
2609 	cleanup(t);
2610 	return 0;
2611 
2612 err:
2613 	rte_event_dev_dump(evdev, stdout);
2614 	cleanup(t);
2615 	return -1;
2616 }
2617 
2618 static int
2619 parallel_basic(struct test *t, int check_order)
2620 {
2621 	const uint8_t rx_port = 0;
2622 	const uint8_t w1_port = 1;
2623 	const uint8_t w3_port = 3;
2624 	const uint8_t tx_port = 4;
2625 	int err;
2626 	int i;
2627 	uint32_t deq_pkts, j;
2628 	struct rte_mbuf *mbufs[3];
2629 	struct rte_mbuf *mbufs_out[3] = { 0 };
2630 	const uint32_t MAGIC_SEQN = 1234;
2631 
2632 	/* Create instance with 4 ports */
2633 	if (init(t, 2, tx_port + 1) < 0 ||
2634 			create_ports(t, tx_port + 1) < 0 ||
2635 			(check_order ?  create_ordered_qids(t, 1) :
2636 				create_unordered_qids(t, 1)) < 0 ||
2637 			create_directed_qids(t, 1, &tx_port)) {
2638 		printf("%d: Error initializing device\n", __LINE__);
2639 		return -1;
2640 	}
2641 
2642 	/*
2643 	 * CQ mapping to QID
2644 	 * We need three ports, all mapped to the same ordered qid0. Then we'll
2645 	 * take a packet out to each port, re-enqueue in reverse order,
2646 	 * then make sure the reordering has taken place properly when we
2647 	 * dequeue from the tx_port.
2648 	 *
2649 	 * Simplified test setup diagram:
2650 	 *
2651 	 * rx_port        w1_port
2652 	 *        \     /         \
2653 	 *         qid0 - w2_port - qid1
2654 	 *              \         /     \
2655 	 *                w3_port        tx_port
2656 	 */
2657 	/* CQ mapping to QID for LB ports (directed mapped on create) */
2658 	for (i = w1_port; i <= w3_port; i++) {
2659 		err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2660 				1);
2661 		if (err != 1) {
2662 			printf("%d: error mapping lb qid\n", __LINE__);
2663 			cleanup(t);
2664 			return -1;
2665 		}
2666 	}
2667 
2668 	if (rte_event_dev_start(evdev) < 0) {
2669 		printf("%d: Error with start call\n", __LINE__);
2670 		return -1;
2671 	}
2672 
2673 	/* Enqueue 3 packets to the rx port */
2674 	for (i = 0; i < 3; i++) {
2675 		struct rte_event ev;
2676 		mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2677 		if (!mbufs[i]) {
2678 			printf("%d: gen of pkt failed\n", __LINE__);
2679 			return -1;
2680 		}
2681 
2682 		ev.queue_id = t->qid[0];
2683 		ev.op = RTE_EVENT_OP_NEW;
2684 		ev.mbuf = mbufs[i];
2685 		mbufs[i]->seqn = MAGIC_SEQN + i;
2686 
2687 		/* generate pkt and enqueue */
2688 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2689 		if (err != 1) {
2690 			printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2691 					__LINE__, i, err);
2692 			return -1;
2693 		}
2694 	}
2695 
2696 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2697 
2698 	/* use extra slot to make logic in loops easier */
2699 	struct rte_event deq_ev[w3_port + 1];
2700 
2701 	/* Dequeue the 3 packets, one from each worker port */
2702 	for (i = w1_port; i <= w3_port; i++) {
2703 		deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2704 				&deq_ev[i], 1, 0);
2705 		if (deq_pkts != 1) {
2706 			printf("%d: Failed to deq\n", __LINE__);
2707 			rte_event_dev_dump(evdev, stdout);
2708 			return -1;
2709 		}
2710 	}
2711 
2712 	/* Enqueue each packet in reverse order, flushing after each one */
2713 	for (i = w3_port; i >= w1_port; i--) {
2714 
2715 		deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2716 		deq_ev[i].queue_id = t->qid[1];
2717 		err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2718 		if (err != 1) {
2719 			printf("%d: Failed to enqueue\n", __LINE__);
2720 			return -1;
2721 		}
2722 	}
2723 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2724 
2725 	/* dequeue from the tx ports, we should get 3 packets */
2726 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2727 			3, 0);
2728 
2729 	/* Check to see if we've got all 3 packets */
2730 	if (deq_pkts != 3) {
2731 		printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2732 			__LINE__, deq_pkts, tx_port);
2733 		rte_event_dev_dump(evdev, stdout);
2734 		return 1;
2735 	}
2736 
2737 	/* Check to see if the sequence numbers are in expected order */
2738 	if (check_order) {
2739 		for (j = 0 ; j < deq_pkts ; j++) {
2740 			if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2741 				printf(
2742 					"%d: Incorrect sequence number(%d) from port %d\n",
2743 					__LINE__, mbufs_out[j]->seqn, tx_port);
2744 				return -1;
2745 			}
2746 		}
2747 	}
2748 
2749 	/* Destroy the instance */
2750 	cleanup(t);
2751 	return 0;
2752 }
2753 
2754 static int
2755 ordered_basic(struct test *t)
2756 {
2757 	return parallel_basic(t, 1);
2758 }
2759 
2760 static int
2761 unordered_basic(struct test *t)
2762 {
2763 	return parallel_basic(t, 0);
2764 }
2765 
2766 static int
2767 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2768 {
2769 	const struct rte_event new_ev = {
2770 			.op = RTE_EVENT_OP_NEW
2771 			/* all other fields zero */
2772 	};
2773 	struct rte_event ev = new_ev;
2774 	unsigned int rx_port = 0; /* port we get the first flow on */
2775 	char rx_port_used_stat[64];
2776 	char rx_port_free_stat[64];
2777 	char other_port_used_stat[64];
2778 
2779 	if (init(t, 1, 2) < 0 ||
2780 			create_ports(t, 2) < 0 ||
2781 			create_atomic_qids(t, 1) < 0) {
2782 		printf("%d: Error initializing device\n", __LINE__);
2783 		return -1;
2784 	}
2785 	int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2786 	if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2787 			nb_links != 1) {
2788 		printf("%d: Error links queue to ports\n", __LINE__);
2789 		goto err;
2790 	}
2791 	if (rte_event_dev_start(evdev) < 0) {
2792 		printf("%d: Error with start call\n", __LINE__);
2793 		goto err;
2794 	}
2795 
2796 	/* send one packet and see where it goes, port 0 or 1 */
2797 	if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2798 		printf("%d: Error doing first enqueue\n", __LINE__);
2799 		goto err;
2800 	}
2801 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2802 
2803 	if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2804 			!= 1)
2805 		rx_port = 1;
2806 
2807 	snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2808 			"port_%u_cq_ring_used", rx_port);
2809 	snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2810 			"port_%u_cq_ring_free", rx_port);
2811 	snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2812 			"port_%u_cq_ring_used", rx_port ^ 1);
2813 	if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2814 			!= 1) {
2815 		printf("%d: Error, first event not scheduled\n", __LINE__);
2816 		goto err;
2817 	}
2818 
2819 	/* now fill up the rx port's queue with one flow to cause HOLB */
2820 	do {
2821 		ev = new_ev;
2822 		if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2823 			printf("%d: Error with enqueue\n", __LINE__);
2824 			goto err;
2825 		}
2826 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
2827 	} while (rte_event_dev_xstats_by_name_get(evdev,
2828 				rx_port_free_stat, NULL) != 0);
2829 
2830 	/* one more packet, which needs to stay in IQ - i.e. HOLB */
2831 	ev = new_ev;
2832 	if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2833 		printf("%d: Error with enqueue\n", __LINE__);
2834 		goto err;
2835 	}
2836 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2837 
2838 	/* check that the other port still has an empty CQ */
2839 	if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2840 			!= 0) {
2841 		printf("%d: Error, second port CQ is not empty\n", __LINE__);
2842 		goto err;
2843 	}
2844 	/* check IQ now has one packet */
2845 	if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2846 			!= 1) {
2847 		printf("%d: Error, QID does not have exactly 1 packet\n",
2848 			__LINE__);
2849 		goto err;
2850 	}
2851 
2852 	/* send another flow, which should pass the other IQ entry */
2853 	ev = new_ev;
2854 	ev.flow_id = 1;
2855 	if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2856 		printf("%d: Error with enqueue\n", __LINE__);
2857 		goto err;
2858 	}
2859 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2860 
2861 	if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2862 			!= 1) {
2863 		printf("%d: Error, second flow did not pass out first\n",
2864 			__LINE__);
2865 		goto err;
2866 	}
2867 
2868 	if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2869 			!= 1) {
2870 		printf("%d: Error, QID does not have exactly 1 packet\n",
2871 			__LINE__);
2872 		goto err;
2873 	}
2874 	cleanup(t);
2875 	return 0;
2876 err:
2877 	rte_event_dev_dump(evdev, stdout);
2878 	cleanup(t);
2879 	return -1;
2880 }
2881 
2882 static void
2883 flush(uint8_t dev_id __rte_unused, struct rte_event event, void *arg)
2884 {
2885 	*((uint8_t *) arg) += (event.u64 == 0xCA11BACC) ? 1 : 0;
2886 }
2887 
2888 static int
2889 dev_stop_flush(struct test *t) /* test to check we can properly flush events */
2890 {
2891 	const struct rte_event new_ev = {
2892 		.op = RTE_EVENT_OP_NEW,
2893 		.u64 = 0xCA11BACC,
2894 		.queue_id = 0
2895 	};
2896 	struct rte_event ev = new_ev;
2897 	uint8_t count = 0;
2898 	int i;
2899 
2900 	if (init(t, 1, 1) < 0 ||
2901 	    create_ports(t, 1) < 0 ||
2902 	    create_atomic_qids(t, 1) < 0) {
2903 		printf("%d: Error initializing device\n", __LINE__);
2904 		return -1;
2905 	}
2906 
2907 	/* Link the queue so *_start() doesn't error out */
2908 	if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1) {
2909 		printf("%d: Error linking queue to port\n", __LINE__);
2910 		goto err;
2911 	}
2912 
2913 	if (rte_event_dev_start(evdev) < 0) {
2914 		printf("%d: Error with start call\n", __LINE__);
2915 		goto err;
2916 	}
2917 
2918 	for (i = 0; i < DEQUEUE_DEPTH + 1; i++) {
2919 		if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2920 			printf("%d: Error enqueuing events\n", __LINE__);
2921 			goto err;
2922 		}
2923 	}
2924 
2925 	/* Schedule the events from the port to the IQ. At least one event
2926 	 * should be remaining in the queue.
2927 	 */
2928 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2929 
2930 	if (rte_event_dev_stop_flush_callback_register(evdev, flush, &count)) {
2931 		printf("%d: Error installing the flush callback\n", __LINE__);
2932 		goto err;
2933 	}
2934 
2935 	cleanup(t);
2936 
2937 	if (count == 0) {
2938 		printf("%d: Error executing the flush callback\n", __LINE__);
2939 		goto err;
2940 	}
2941 
2942 	if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
2943 		printf("%d: Error uninstalling the flush callback\n", __LINE__);
2944 		goto err;
2945 	}
2946 
2947 	return 0;
2948 err:
2949 	rte_event_dev_dump(evdev, stdout);
2950 	cleanup(t);
2951 	return -1;
2952 }
2953 
2954 static int
2955 worker_loopback_worker_fn(void *arg)
2956 {
2957 	struct test *t = arg;
2958 	uint8_t port = t->port[1];
2959 	int count = 0;
2960 	int enqd;
2961 
2962 	/*
2963 	 * Takes packets from the input port and then loops them back through
2964 	 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2965 	 * so each packet goes through 8*16 = 128 times.
2966 	 */
2967 	printf("%d: \tWorker function started\n", __LINE__);
2968 	while (count < NUM_PACKETS) {
2969 #define BURST_SIZE 32
2970 		struct rte_event ev[BURST_SIZE];
2971 		uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2972 				BURST_SIZE, 0);
2973 		if (nb_rx == 0) {
2974 			rte_pause();
2975 			continue;
2976 		}
2977 
2978 		for (i = 0; i < nb_rx; i++) {
2979 			ev[i].queue_id++;
2980 			if (ev[i].queue_id != 8) {
2981 				ev[i].op = RTE_EVENT_OP_FORWARD;
2982 				enqd = rte_event_enqueue_burst(evdev, port,
2983 						&ev[i], 1);
2984 				if (enqd != 1) {
2985 					printf("%d: Can't enqueue FWD!!\n",
2986 							__LINE__);
2987 					return -1;
2988 				}
2989 				continue;
2990 			}
2991 
2992 			ev[i].queue_id = 0;
2993 			ev[i].mbuf->udata64++;
2994 			if (ev[i].mbuf->udata64 != 16) {
2995 				ev[i].op = RTE_EVENT_OP_FORWARD;
2996 				enqd = rte_event_enqueue_burst(evdev, port,
2997 						&ev[i], 1);
2998 				if (enqd != 1) {
2999 					printf("%d: Can't enqueue FWD!!\n",
3000 							__LINE__);
3001 					return -1;
3002 				}
3003 				continue;
3004 			}
3005 			/* we have hit 16 iterations through system - drop */
3006 			rte_pktmbuf_free(ev[i].mbuf);
3007 			count++;
3008 			ev[i].op = RTE_EVENT_OP_RELEASE;
3009 			enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
3010 			if (enqd != 1) {
3011 				printf("%d drop enqueue failed\n", __LINE__);
3012 				return -1;
3013 			}
3014 		}
3015 	}
3016 
3017 	return 0;
3018 }
3019 
3020 static int
3021 worker_loopback_producer_fn(void *arg)
3022 {
3023 	struct test *t = arg;
3024 	uint8_t port = t->port[0];
3025 	uint64_t count = 0;
3026 
3027 	printf("%d: \tProducer function started\n", __LINE__);
3028 	while (count < NUM_PACKETS) {
3029 		struct rte_mbuf *m = 0;
3030 		do {
3031 			m = rte_pktmbuf_alloc(t->mbuf_pool);
3032 		} while (m == NULL);
3033 
3034 		m->udata64 = 0;
3035 
3036 		struct rte_event ev = {
3037 				.op = RTE_EVENT_OP_NEW,
3038 				.queue_id = t->qid[0],
3039 				.flow_id = (uintptr_t)m & 0xFFFF,
3040 				.mbuf = m,
3041 		};
3042 
3043 		if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
3044 			while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
3045 					1)
3046 				rte_pause();
3047 		}
3048 
3049 		count++;
3050 	}
3051 
3052 	return 0;
3053 }
3054 
3055 static int
3056 worker_loopback(struct test *t, uint8_t disable_implicit_release)
3057 {
3058 	/* use a single producer core, and a worker core to see what happens
3059 	 * if the worker loops packets back multiple times
3060 	 */
3061 	struct test_event_dev_stats stats;
3062 	uint64_t print_cycles = 0, cycles = 0;
3063 	uint64_t tx_pkts = 0;
3064 	int err;
3065 	int w_lcore, p_lcore;
3066 
3067 	if (init(t, 8, 2) < 0 ||
3068 			create_atomic_qids(t, 8) < 0) {
3069 		printf("%d: Error initializing device\n", __LINE__);
3070 		return -1;
3071 	}
3072 
3073 	/* RX with low max events */
3074 	static struct rte_event_port_conf conf = {
3075 			.dequeue_depth = 32,
3076 			.enqueue_depth = 64,
3077 	};
3078 	/* beware: this cannot be initialized in the static above as it would
3079 	 * only be initialized once - and this needs to be set for multiple runs
3080 	 */
3081 	conf.new_event_threshold = 512;
3082 	conf.disable_implicit_release = disable_implicit_release;
3083 
3084 	if (rte_event_port_setup(evdev, 0, &conf) < 0) {
3085 		printf("Error setting up RX port\n");
3086 		return -1;
3087 	}
3088 	t->port[0] = 0;
3089 	/* TX with higher max events */
3090 	conf.new_event_threshold = 4096;
3091 	if (rte_event_port_setup(evdev, 1, &conf) < 0) {
3092 		printf("Error setting up TX port\n");
3093 		return -1;
3094 	}
3095 	t->port[1] = 1;
3096 
3097 	/* CQ mapping to QID */
3098 	err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
3099 	if (err != 8) { /* should have mapped all queues*/
3100 		printf("%d: error mapping port 2 to all qids\n", __LINE__);
3101 		return -1;
3102 	}
3103 
3104 	if (rte_event_dev_start(evdev) < 0) {
3105 		printf("%d: Error with start call\n", __LINE__);
3106 		return -1;
3107 	}
3108 
3109 	p_lcore = rte_get_next_lcore(
3110 			/* start core */ -1,
3111 			/* skip master */ 1,
3112 			/* wrap */ 0);
3113 	w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
3114 
3115 	rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
3116 	rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3117 
3118 	print_cycles = cycles = rte_get_timer_cycles();
3119 	while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
3120 			rte_eal_get_lcore_state(w_lcore) != FINISHED) {
3121 
3122 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
3123 
3124 		uint64_t new_cycles = rte_get_timer_cycles();
3125 
3126 		if (new_cycles - print_cycles > rte_get_timer_hz()) {
3127 			test_event_dev_stats_get(evdev, &stats);
3128 			printf(
3129 				"%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3130 				__LINE__, stats.rx_pkts, stats.tx_pkts);
3131 
3132 			print_cycles = new_cycles;
3133 		}
3134 		if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3135 			test_event_dev_stats_get(evdev, &stats);
3136 			if (stats.tx_pkts == tx_pkts) {
3137 				rte_event_dev_dump(evdev, stdout);
3138 				printf("Dumping xstats:\n");
3139 				xstats_print();
3140 				printf(
3141 					"%d: No schedules for seconds, deadlock\n",
3142 					__LINE__);
3143 				return -1;
3144 			}
3145 			tx_pkts = stats.tx_pkts;
3146 			cycles = new_cycles;
3147 		}
3148 	}
3149 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
3150 	/* ensure all completions are flushed */
3151 
3152 	rte_eal_mp_wait_lcore();
3153 
3154 	cleanup(t);
3155 	return 0;
3156 }
3157 
3158 static struct rte_mempool *eventdev_func_mempool;
3159 
3160 int
3161 test_sw_eventdev(void)
3162 {
3163 	struct test *t;
3164 	int ret;
3165 
3166 	t = malloc(sizeof(struct test));
3167 	if (t == NULL)
3168 		return -1;
3169 	/* manually initialize the op, older gcc's complain on static
3170 	 * initialization of struct elements that are a bitfield.
3171 	 */
3172 	release_ev.op = RTE_EVENT_OP_RELEASE;
3173 
3174 	const char *eventdev_name = "event_sw";
3175 	evdev = rte_event_dev_get_dev_id(eventdev_name);
3176 	if (evdev < 0) {
3177 		printf("%d: Eventdev %s not found - creating.\n",
3178 				__LINE__, eventdev_name);
3179 		if (rte_vdev_init(eventdev_name, NULL) < 0) {
3180 			printf("Error creating eventdev\n");
3181 			goto test_fail;
3182 		}
3183 		evdev = rte_event_dev_get_dev_id(eventdev_name);
3184 		if (evdev < 0) {
3185 			printf("Error finding newly created eventdev\n");
3186 			goto test_fail;
3187 		}
3188 	}
3189 
3190 	if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
3191 		printf("Failed to get service ID for software event dev\n");
3192 		goto test_fail;
3193 	}
3194 
3195 	rte_service_runstate_set(t->service_id, 1);
3196 	rte_service_set_runstate_mapped_check(t->service_id, 0);
3197 
3198 	/* Only create mbuf pool once, reuse for each test run */
3199 	if (!eventdev_func_mempool) {
3200 		eventdev_func_mempool = rte_pktmbuf_pool_create(
3201 				"EVENTDEV_SW_SA_MBUF_POOL",
3202 				(1<<12), /* 4k buffers */
3203 				32 /*MBUF_CACHE_SIZE*/,
3204 				0,
3205 				512, /* use very small mbufs */
3206 				rte_socket_id());
3207 		if (!eventdev_func_mempool) {
3208 			printf("ERROR creating mempool\n");
3209 			goto test_fail;
3210 		}
3211 	}
3212 	t->mbuf_pool = eventdev_func_mempool;
3213 	printf("*** Running Single Directed Packet test...\n");
3214 	ret = test_single_directed_packet(t);
3215 	if (ret != 0) {
3216 		printf("ERROR - Single Directed Packet test FAILED.\n");
3217 		goto test_fail;
3218 	}
3219 	printf("*** Running Directed Forward Credit test...\n");
3220 	ret = test_directed_forward_credits(t);
3221 	if (ret != 0) {
3222 		printf("ERROR - Directed Forward Credit test FAILED.\n");
3223 		goto test_fail;
3224 	}
3225 	printf("*** Running Single Load Balanced Packet test...\n");
3226 	ret = single_packet(t);
3227 	if (ret != 0) {
3228 		printf("ERROR - Single Packet test FAILED.\n");
3229 		goto test_fail;
3230 	}
3231 	printf("*** Running Unordered Basic test...\n");
3232 	ret = unordered_basic(t);
3233 	if (ret != 0) {
3234 		printf("ERROR -  Unordered Basic test FAILED.\n");
3235 		goto test_fail;
3236 	}
3237 	printf("*** Running Ordered Basic test...\n");
3238 	ret = ordered_basic(t);
3239 	if (ret != 0) {
3240 		printf("ERROR -  Ordered Basic test FAILED.\n");
3241 		goto test_fail;
3242 	}
3243 	printf("*** Running Burst Packets test...\n");
3244 	ret = burst_packets(t);
3245 	if (ret != 0) {
3246 		printf("ERROR - Burst Packets test FAILED.\n");
3247 		goto test_fail;
3248 	}
3249 	printf("*** Running Load Balancing test...\n");
3250 	ret = load_balancing(t);
3251 	if (ret != 0) {
3252 		printf("ERROR - Load Balancing test FAILED.\n");
3253 		goto test_fail;
3254 	}
3255 	printf("*** Running Prioritized Directed test...\n");
3256 	ret = test_priority_directed(t);
3257 	if (ret != 0) {
3258 		printf("ERROR - Prioritized Directed test FAILED.\n");
3259 		goto test_fail;
3260 	}
3261 	printf("*** Running Prioritized Atomic test...\n");
3262 	ret = test_priority_atomic(t);
3263 	if (ret != 0) {
3264 		printf("ERROR - Prioritized Atomic test FAILED.\n");
3265 		goto test_fail;
3266 	}
3267 
3268 	printf("*** Running Prioritized Ordered test...\n");
3269 	ret = test_priority_ordered(t);
3270 	if (ret != 0) {
3271 		printf("ERROR - Prioritized Ordered test FAILED.\n");
3272 		goto test_fail;
3273 	}
3274 	printf("*** Running Prioritized Unordered test...\n");
3275 	ret = test_priority_unordered(t);
3276 	if (ret != 0) {
3277 		printf("ERROR - Prioritized Unordered test FAILED.\n");
3278 		goto test_fail;
3279 	}
3280 	printf("*** Running Invalid QID test...\n");
3281 	ret = invalid_qid(t);
3282 	if (ret != 0) {
3283 		printf("ERROR - Invalid QID test FAILED.\n");
3284 		goto test_fail;
3285 	}
3286 	printf("*** Running Load Balancing History test...\n");
3287 	ret = load_balancing_history(t);
3288 	if (ret != 0) {
3289 		printf("ERROR - Load Balancing History test FAILED.\n");
3290 		goto test_fail;
3291 	}
3292 	printf("*** Running Inflight Count test...\n");
3293 	ret = inflight_counts(t);
3294 	if (ret != 0) {
3295 		printf("ERROR - Inflight Count test FAILED.\n");
3296 		goto test_fail;
3297 	}
3298 	printf("*** Running Abuse Inflights test...\n");
3299 	ret = abuse_inflights(t);
3300 	if (ret != 0) {
3301 		printf("ERROR - Abuse Inflights test FAILED.\n");
3302 		goto test_fail;
3303 	}
3304 	printf("*** Running XStats test...\n");
3305 	ret = xstats_tests(t);
3306 	if (ret != 0) {
3307 		printf("ERROR - XStats test FAILED.\n");
3308 		goto test_fail;
3309 	}
3310 	printf("*** Running XStats ID Reset test...\n");
3311 	ret = xstats_id_reset_tests(t);
3312 	if (ret != 0) {
3313 		printf("ERROR - XStats ID Reset test FAILED.\n");
3314 		goto test_fail;
3315 	}
3316 	printf("*** Running XStats Brute Force test...\n");
3317 	ret = xstats_brute_force(t);
3318 	if (ret != 0) {
3319 		printf("ERROR - XStats Brute Force test FAILED.\n");
3320 		goto test_fail;
3321 	}
3322 	printf("*** Running XStats ID Abuse test...\n");
3323 	ret = xstats_id_abuse_tests(t);
3324 	if (ret != 0) {
3325 		printf("ERROR - XStats ID Abuse test FAILED.\n");
3326 		goto test_fail;
3327 	}
3328 	printf("*** Running QID Priority test...\n");
3329 	ret = qid_priorities(t);
3330 	if (ret != 0) {
3331 		printf("ERROR - QID Priority test FAILED.\n");
3332 		goto test_fail;
3333 	}
3334 	printf("*** Running Unlink-in-progress test...\n");
3335 	ret = unlink_in_progress(t);
3336 	if (ret != 0) {
3337 		printf("ERROR - Unlink in progress test FAILED.\n");
3338 		goto test_fail;
3339 	}
3340 	printf("*** Running Ordered Reconfigure test...\n");
3341 	ret = ordered_reconfigure(t);
3342 	if (ret != 0) {
3343 		printf("ERROR - Ordered Reconfigure test FAILED.\n");
3344 		goto test_fail;
3345 	}
3346 	printf("*** Running Port LB Single Reconfig test...\n");
3347 	ret = port_single_lb_reconfig(t);
3348 	if (ret != 0) {
3349 		printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3350 		goto test_fail;
3351 	}
3352 	printf("*** Running Port Reconfig Credits test...\n");
3353 	ret = port_reconfig_credits(t);
3354 	if (ret != 0) {
3355 		printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3356 		goto test_fail;
3357 	}
3358 	printf("*** Running Head-of-line-blocking test...\n");
3359 	ret = holb(t);
3360 	if (ret != 0) {
3361 		printf("ERROR - Head-of-line-blocking test FAILED.\n");
3362 		goto test_fail;
3363 	}
3364 	printf("*** Running Stop Flush test...\n");
3365 	ret = dev_stop_flush(t);
3366 	if (ret != 0) {
3367 		printf("ERROR - Stop Flush test FAILED.\n");
3368 		goto test_fail;
3369 	}
3370 	if (rte_lcore_count() >= 3) {
3371 		printf("*** Running Worker loopback test...\n");
3372 		ret = worker_loopback(t, 0);
3373 		if (ret != 0) {
3374 			printf("ERROR - Worker loopback test FAILED.\n");
3375 			return ret;
3376 		}
3377 
3378 		printf("*** Running Worker loopback test (implicit release disabled)...\n");
3379 		ret = worker_loopback(t, 1);
3380 		if (ret != 0) {
3381 			printf("ERROR - Worker loopback test FAILED.\n");
3382 			goto test_fail;
3383 		}
3384 	} else {
3385 		printf("### Not enough cores for worker loopback tests.\n");
3386 		printf("### Need at least 3 cores for the tests.\n");
3387 	}
3388 
3389 	/*
3390 	 * Free test instance, leaving mempool initialized, and a pointer to it
3391 	 * in static eventdev_func_mempool, as it is re-used on re-runs
3392 	 */
3393 	free(t);
3394 
3395 	printf("SW Eventdev Selftest Successful.\n");
3396 	return 0;
3397 test_fail:
3398 	free(t);
3399 	printf("SW Eventdev Selftest Failed.\n");
3400 	return -1;
3401 }
3402