xref: /dpdk/drivers/event/sw/sw_evdev_selftest.c (revision d007a7f39de3f17071acdefcd48b49e07306040f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <string.h>
7 #include <stdint.h>
8 #include <stdlib.h>
9 #include <errno.h>
10 #include <unistd.h>
11 #include <sys/queue.h>
12 
13 #include <rte_memory.h>
14 #include <rte_launch.h>
15 #include <rte_eal.h>
16 #include <rte_per_lcore.h>
17 #include <rte_lcore.h>
18 #include <rte_debug.h>
19 #include <rte_ethdev.h>
20 #include <rte_cycles.h>
21 #include <rte_eventdev.h>
22 #include <rte_pause.h>
23 #include <rte_service.h>
24 #include <rte_service_component.h>
25 #include <bus_vdev_driver.h>
26 
27 #include "sw_evdev.h"
28 
29 #define MAX_PORTS 16
30 #define MAX_QIDS 16
31 #define NUM_PACKETS (1 << 17)
32 #define DEQUEUE_DEPTH 128
33 
34 static int evdev;
35 
36 struct test {
37 	struct rte_mempool *mbuf_pool;
38 	uint8_t port[MAX_PORTS];
39 	uint8_t qid[MAX_QIDS];
40 	int nb_qids;
41 	uint32_t service_id;
42 };
43 
44 typedef uint8_t counter_dynfield_t;
45 static int counter_dynfield_offset = -1;
46 
47 static inline counter_dynfield_t *
48 counter_field(struct rte_mbuf *mbuf)
49 {
50 	return RTE_MBUF_DYNFIELD(mbuf, \
51 			counter_dynfield_offset, counter_dynfield_t *);
52 }
53 
54 static struct rte_event release_ev;
55 
56 static inline struct rte_mbuf *
57 rte_gen_arp(int portid, struct rte_mempool *mp)
58 {
59 	/*
60 	 * len = 14 + 46
61 	 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
62 	 */
63 	static const uint8_t arp_request[] = {
64 		/*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
65 		0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
66 		/*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
67 		0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
68 		/*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
69 		0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
70 		/*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
71 		0x00, 0x00, 0x00, 0x00
72 	};
73 	struct rte_mbuf *m;
74 	int pkt_len = sizeof(arp_request) - 1;
75 
76 	m = rte_pktmbuf_alloc(mp);
77 	if (!m)
78 		return 0;
79 
80 	memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
81 		arp_request, pkt_len);
82 	rte_pktmbuf_pkt_len(m) = pkt_len;
83 	rte_pktmbuf_data_len(m) = pkt_len;
84 
85 	RTE_SET_USED(portid);
86 
87 	return m;
88 }
89 
90 static void
91 xstats_print(void)
92 {
93 	const uint32_t XSTATS_MAX = 1024;
94 	uint32_t i;
95 	uint64_t ids[XSTATS_MAX];
96 	uint64_t values[XSTATS_MAX];
97 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
98 
99 	for (i = 0; i < XSTATS_MAX; i++)
100 		ids[i] = i;
101 
102 	/* Device names / values */
103 	int ret = rte_event_dev_xstats_names_get(evdev,
104 					RTE_EVENT_DEV_XSTATS_DEVICE, 0,
105 					xstats_names, ids, XSTATS_MAX);
106 	if (ret < 0) {
107 		printf("%d: xstats names get() returned error\n",
108 			__LINE__);
109 		return;
110 	}
111 	ret = rte_event_dev_xstats_get(evdev,
112 					RTE_EVENT_DEV_XSTATS_DEVICE,
113 					0, ids, values, ret);
114 	if (ret > (signed int)XSTATS_MAX)
115 		printf("%s %d: more xstats available than space\n",
116 				__func__, __LINE__);
117 	for (i = 0; (signed int)i < ret; i++) {
118 		printf("%d : %s : %"PRIu64"\n",
119 				i, xstats_names[i].name, values[i]);
120 	}
121 
122 	/* Port names / values */
123 	ret = rte_event_dev_xstats_names_get(evdev,
124 					RTE_EVENT_DEV_XSTATS_PORT, 0,
125 					xstats_names, ids, XSTATS_MAX);
126 	ret = rte_event_dev_xstats_get(evdev,
127 					RTE_EVENT_DEV_XSTATS_PORT, 1,
128 					ids, values, ret);
129 	if (ret > (signed int)XSTATS_MAX)
130 		printf("%s %d: more xstats available than space\n",
131 				__func__, __LINE__);
132 	for (i = 0; (signed int)i < ret; i++) {
133 		printf("%d : %s : %"PRIu64"\n",
134 				i, xstats_names[i].name, values[i]);
135 	}
136 
137 	/* Queue names / values */
138 	ret = rte_event_dev_xstats_names_get(evdev,
139 					RTE_EVENT_DEV_XSTATS_QUEUE, 0,
140 					xstats_names, ids, XSTATS_MAX);
141 	ret = rte_event_dev_xstats_get(evdev,
142 					RTE_EVENT_DEV_XSTATS_QUEUE,
143 					1, ids, values, ret);
144 	if (ret > (signed int)XSTATS_MAX)
145 		printf("%s %d: more xstats available than space\n",
146 				__func__, __LINE__);
147 	for (i = 0; (signed int)i < ret; i++) {
148 		printf("%d : %s : %"PRIu64"\n",
149 				i, xstats_names[i].name, values[i]);
150 	}
151 }
152 
153 /* initialization and config */
154 static inline int
155 init(struct test *t, int nb_queues, int nb_ports)
156 {
157 	struct rte_event_dev_config config = {
158 			.nb_event_queues = nb_queues,
159 			.nb_event_ports = nb_ports,
160 			.nb_event_queue_flows = 1024,
161 			.nb_events_limit = 4096,
162 			.nb_event_port_dequeue_depth = DEQUEUE_DEPTH,
163 			.nb_event_port_enqueue_depth = 128,
164 	};
165 	int ret;
166 
167 	void *temp = t->mbuf_pool; /* save and restore mbuf pool */
168 
169 	memset(t, 0, sizeof(*t));
170 	t->mbuf_pool = temp;
171 
172 	ret = rte_event_dev_configure(evdev, &config);
173 	if (ret < 0)
174 		printf("%d: Error configuring device\n", __LINE__);
175 	return ret;
176 };
177 
178 static inline int
179 create_ports(struct test *t, int num_ports)
180 {
181 	int i;
182 	static const struct rte_event_port_conf conf = {
183 			.new_event_threshold = 1024,
184 			.dequeue_depth = 32,
185 			.enqueue_depth = 64,
186 	};
187 	if (num_ports > MAX_PORTS)
188 		return -1;
189 
190 	for (i = 0; i < num_ports; i++) {
191 		if (rte_event_port_setup(evdev, i, &conf) < 0) {
192 			printf("Error setting up port %d\n", i);
193 			return -1;
194 		}
195 		t->port[i] = i;
196 	}
197 
198 	return 0;
199 }
200 
201 static inline int
202 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
203 {
204 	int i;
205 
206 	/* Q creation */
207 	const struct rte_event_queue_conf conf = {
208 			.schedule_type = flags,
209 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
210 			.nb_atomic_flows = 1024,
211 			.nb_atomic_order_sequences = 1024,
212 	};
213 
214 	for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
215 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
216 			printf("%d: error creating qid %d\n", __LINE__, i);
217 			return -1;
218 		}
219 		t->qid[i] = i;
220 	}
221 	t->nb_qids += num_qids;
222 	if (t->nb_qids > MAX_QIDS)
223 		return -1;
224 
225 	return 0;
226 }
227 
228 static inline int
229 create_atomic_qids(struct test *t, int num_qids)
230 {
231 	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
232 }
233 
234 static inline int
235 create_ordered_qids(struct test *t, int num_qids)
236 {
237 	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
238 }
239 
240 
241 static inline int
242 create_unordered_qids(struct test *t, int num_qids)
243 {
244 	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
245 }
246 
247 static inline int
248 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
249 {
250 	int i;
251 
252 	/* Q creation */
253 	static const struct rte_event_queue_conf conf = {
254 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
255 			.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
256 	};
257 
258 	for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
259 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
260 			printf("%d: error creating qid %d\n", __LINE__, i);
261 			return -1;
262 		}
263 		t->qid[i] = i;
264 
265 		if (rte_event_port_link(evdev, ports[i - t->nb_qids],
266 				&t->qid[i], NULL, 1) != 1) {
267 			printf("%d: error creating link for qid %d\n",
268 					__LINE__, i);
269 			return -1;
270 		}
271 	}
272 	t->nb_qids += num_qids;
273 	if (t->nb_qids > MAX_QIDS)
274 		return -1;
275 
276 	return 0;
277 }
278 
279 /* destruction */
280 static inline int
281 cleanup(struct test *t __rte_unused)
282 {
283 	rte_event_dev_stop(evdev);
284 	rte_event_dev_close(evdev);
285 	return 0;
286 };
287 
288 struct test_event_dev_stats {
289 	uint64_t rx_pkts;       /**< Total packets received */
290 	uint64_t rx_dropped;    /**< Total packets dropped (Eg Invalid QID) */
291 	uint64_t tx_pkts;       /**< Total packets transmitted */
292 
293 	/** Packets received on this port */
294 	uint64_t port_rx_pkts[MAX_PORTS];
295 	/** Packets dropped on this port */
296 	uint64_t port_rx_dropped[MAX_PORTS];
297 	/** Packets inflight on this port */
298 	uint64_t port_inflight[MAX_PORTS];
299 	/** Packets transmitted on this port */
300 	uint64_t port_tx_pkts[MAX_PORTS];
301 	/** Packets received on this qid */
302 	uint64_t qid_rx_pkts[MAX_QIDS];
303 	/** Packets dropped on this qid */
304 	uint64_t qid_rx_dropped[MAX_QIDS];
305 	/** Packets transmitted on this qid */
306 	uint64_t qid_tx_pkts[MAX_QIDS];
307 };
308 
309 static inline int
310 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
311 {
312 	static uint32_t i;
313 	static uint64_t total_ids[3]; /* rx, tx and drop */
314 	static uint64_t port_rx_pkts_ids[MAX_PORTS];
315 	static uint64_t port_rx_dropped_ids[MAX_PORTS];
316 	static uint64_t port_inflight_ids[MAX_PORTS];
317 	static uint64_t port_tx_pkts_ids[MAX_PORTS];
318 	static uint64_t qid_rx_pkts_ids[MAX_QIDS];
319 	static uint64_t qid_rx_dropped_ids[MAX_QIDS];
320 	static uint64_t qid_tx_pkts_ids[MAX_QIDS];
321 
322 	stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
323 			"dev_rx", &total_ids[0]);
324 	stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
325 			"dev_drop", &total_ids[1]);
326 	stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
327 			"dev_tx", &total_ids[2]);
328 	for (i = 0; i < MAX_PORTS; i++) {
329 		char name[32];
330 		snprintf(name, sizeof(name), "port_%u_rx", i);
331 		stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
332 				dev_id, name, &port_rx_pkts_ids[i]);
333 		snprintf(name, sizeof(name), "port_%u_drop", i);
334 		stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
335 				dev_id, name, &port_rx_dropped_ids[i]);
336 		snprintf(name, sizeof(name), "port_%u_inflight", i);
337 		stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
338 				dev_id, name, &port_inflight_ids[i]);
339 		snprintf(name, sizeof(name), "port_%u_tx", i);
340 		stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
341 				dev_id, name, &port_tx_pkts_ids[i]);
342 	}
343 	for (i = 0; i < MAX_QIDS; i++) {
344 		char name[32];
345 		snprintf(name, sizeof(name), "qid_%u_rx", i);
346 		stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
347 				dev_id, name, &qid_rx_pkts_ids[i]);
348 		snprintf(name, sizeof(name), "qid_%u_drop", i);
349 		stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
350 				dev_id, name, &qid_rx_dropped_ids[i]);
351 		snprintf(name, sizeof(name), "qid_%u_tx", i);
352 		stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
353 				dev_id, name, &qid_tx_pkts_ids[i]);
354 	}
355 
356 	return 0;
357 }
358 
359 /* run_prio_packet_test
360  * This performs a basic packet priority check on the test instance passed in.
361  * It is factored out of the main priority tests as the same tests must be
362  * performed to ensure prioritization of each type of QID.
363  *
364  * Requirements:
365  *  - An initialized test structure, including mempool
366  *  - t->port[0] is initialized for both Enq / Deq of packets to the QID
367  *  - t->qid[0] is the QID to be tested
368  *  - if LB QID, the CQ must be mapped to the QID.
369  */
370 static int
371 run_prio_packet_test(struct test *t)
372 {
373 	int err;
374 	const uint32_t MAGIC_SEQN[] = {4711, 1234};
375 	const uint32_t PRIORITY[] = {
376 		RTE_EVENT_DEV_PRIORITY_NORMAL,
377 		RTE_EVENT_DEV_PRIORITY_HIGHEST
378 	};
379 	unsigned int i;
380 	for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
381 		/* generate pkt and enqueue */
382 		struct rte_event ev;
383 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
384 		if (!arp) {
385 			printf("%d: gen of pkt failed\n", __LINE__);
386 			return -1;
387 		}
388 		*rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN[i];
389 
390 		ev = (struct rte_event){
391 			.priority = PRIORITY[i],
392 			.op = RTE_EVENT_OP_NEW,
393 			.queue_id = t->qid[0],
394 			.mbuf = arp
395 		};
396 		err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
397 		if (err != 1) {
398 			printf("%d: error failed to enqueue\n", __LINE__);
399 			return -1;
400 		}
401 	}
402 
403 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
404 
405 	struct test_event_dev_stats stats;
406 	err = test_event_dev_stats_get(evdev, &stats);
407 	if (err) {
408 		printf("%d: error failed to get stats\n", __LINE__);
409 		return -1;
410 	}
411 
412 	if (stats.port_rx_pkts[t->port[0]] != 2) {
413 		printf("%d: error stats incorrect for directed port\n",
414 				__LINE__);
415 		rte_event_dev_dump(evdev, stdout);
416 		return -1;
417 	}
418 
419 	struct rte_event ev, ev2;
420 	uint32_t deq_pkts;
421 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
422 	if (deq_pkts != 1) {
423 		printf("%d: error failed to deq\n", __LINE__);
424 		rte_event_dev_dump(evdev, stdout);
425 		return -1;
426 	}
427 	if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN[1]) {
428 		printf("%d: first packet out not highest priority\n",
429 				__LINE__);
430 		rte_event_dev_dump(evdev, stdout);
431 		return -1;
432 	}
433 	rte_pktmbuf_free(ev.mbuf);
434 
435 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
436 	if (deq_pkts != 1) {
437 		printf("%d: error failed to deq\n", __LINE__);
438 		rte_event_dev_dump(evdev, stdout);
439 		return -1;
440 	}
441 	if (*rte_event_pmd_selftest_seqn(ev2.mbuf) != MAGIC_SEQN[0]) {
442 		printf("%d: second packet out not lower priority\n",
443 				__LINE__);
444 		rte_event_dev_dump(evdev, stdout);
445 		return -1;
446 	}
447 	rte_pktmbuf_free(ev2.mbuf);
448 
449 	cleanup(t);
450 	return 0;
451 }
452 
453 static int
454 test_single_directed_packet(struct test *t)
455 {
456 	const int rx_enq = 0;
457 	const int wrk_enq = 2;
458 	int err;
459 
460 	/* Create instance with 3 directed QIDs going to 3 ports */
461 	if (init(t, 3, 3) < 0 ||
462 			create_ports(t, 3) < 0 ||
463 			create_directed_qids(t, 3, t->port) < 0)
464 		return -1;
465 
466 	if (rte_event_dev_start(evdev) < 0) {
467 		printf("%d: Error with start call\n", __LINE__);
468 		return -1;
469 	}
470 
471 	/************** FORWARD ****************/
472 	struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
473 	struct rte_event ev = {
474 			.op = RTE_EVENT_OP_NEW,
475 			.queue_id = wrk_enq,
476 			.mbuf = arp,
477 	};
478 
479 	if (!arp) {
480 		printf("%d: gen of pkt failed\n", __LINE__);
481 		return -1;
482 	}
483 
484 	const uint32_t MAGIC_SEQN = 4711;
485 	*rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN;
486 
487 	/* generate pkt and enqueue */
488 	err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
489 	if (err != 1) {
490 		printf("%d: error failed to enqueue\n", __LINE__);
491 		return -1;
492 	}
493 
494 	/* Run schedule() as dir packets may need to be re-ordered */
495 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
496 
497 	struct test_event_dev_stats stats;
498 	err = test_event_dev_stats_get(evdev, &stats);
499 	if (err) {
500 		printf("%d: error failed to get stats\n", __LINE__);
501 		return -1;
502 	}
503 
504 	if (stats.port_rx_pkts[rx_enq] != 1) {
505 		printf("%d: error stats incorrect for directed port\n",
506 				__LINE__);
507 		return -1;
508 	}
509 
510 	uint32_t deq_pkts;
511 	deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
512 	if (deq_pkts != 1) {
513 		printf("%d: error failed to deq\n", __LINE__);
514 		return -1;
515 	}
516 
517 	err = test_event_dev_stats_get(evdev, &stats);
518 	if (stats.port_rx_pkts[wrk_enq] != 0 &&
519 			stats.port_rx_pkts[wrk_enq] != 1) {
520 		printf("%d: error directed stats post-dequeue\n", __LINE__);
521 		return -1;
522 	}
523 
524 	if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN) {
525 		printf("%d: error magic sequence number not dequeued\n",
526 				__LINE__);
527 		return -1;
528 	}
529 
530 	rte_pktmbuf_free(ev.mbuf);
531 	cleanup(t);
532 	return 0;
533 }
534 
535 static int
536 test_directed_forward_credits(struct test *t)
537 {
538 	uint32_t i;
539 	int32_t err;
540 
541 	if (init(t, 1, 1) < 0 ||
542 			create_ports(t, 1) < 0 ||
543 			create_directed_qids(t, 1, t->port) < 0)
544 		return -1;
545 
546 	if (rte_event_dev_start(evdev) < 0) {
547 		printf("%d: Error with start call\n", __LINE__);
548 		return -1;
549 	}
550 
551 	struct rte_event ev = {
552 			.op = RTE_EVENT_OP_NEW,
553 			.queue_id = 0,
554 	};
555 
556 	for (i = 0; i < 1000; i++) {
557 		err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
558 		if (err != 1) {
559 			printf("%d: error failed to enqueue\n", __LINE__);
560 			return -1;
561 		}
562 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
563 
564 		uint32_t deq_pkts;
565 		deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
566 		if (deq_pkts != 1) {
567 			printf("%d: error failed to deq\n", __LINE__);
568 			return -1;
569 		}
570 
571 		/* re-write event to be a forward, and continue looping it */
572 		ev.op = RTE_EVENT_OP_FORWARD;
573 	}
574 
575 	cleanup(t);
576 	return 0;
577 }
578 
579 
580 static int
581 test_priority_directed(struct test *t)
582 {
583 	if (init(t, 1, 1) < 0 ||
584 			create_ports(t, 1) < 0 ||
585 			create_directed_qids(t, 1, t->port) < 0) {
586 		printf("%d: Error initializing device\n", __LINE__);
587 		return -1;
588 	}
589 
590 	if (rte_event_dev_start(evdev) < 0) {
591 		printf("%d: Error with start call\n", __LINE__);
592 		return -1;
593 	}
594 
595 	return run_prio_packet_test(t);
596 }
597 
598 static int
599 test_priority_atomic(struct test *t)
600 {
601 	if (init(t, 1, 1) < 0 ||
602 			create_ports(t, 1) < 0 ||
603 			create_atomic_qids(t, 1) < 0) {
604 		printf("%d: Error initializing device\n", __LINE__);
605 		return -1;
606 	}
607 
608 	/* map the QID */
609 	if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
610 		printf("%d: error mapping qid to port\n", __LINE__);
611 		return -1;
612 	}
613 	if (rte_event_dev_start(evdev) < 0) {
614 		printf("%d: Error with start call\n", __LINE__);
615 		return -1;
616 	}
617 
618 	return run_prio_packet_test(t);
619 }
620 
621 static int
622 test_priority_ordered(struct test *t)
623 {
624 	if (init(t, 1, 1) < 0 ||
625 			create_ports(t, 1) < 0 ||
626 			create_ordered_qids(t, 1) < 0) {
627 		printf("%d: Error initializing device\n", __LINE__);
628 		return -1;
629 	}
630 
631 	/* map the QID */
632 	if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
633 		printf("%d: error mapping qid to port\n", __LINE__);
634 		return -1;
635 	}
636 	if (rte_event_dev_start(evdev) < 0) {
637 		printf("%d: Error with start call\n", __LINE__);
638 		return -1;
639 	}
640 
641 	return run_prio_packet_test(t);
642 }
643 
644 static int
645 test_priority_unordered(struct test *t)
646 {
647 	if (init(t, 1, 1) < 0 ||
648 			create_ports(t, 1) < 0 ||
649 			create_unordered_qids(t, 1) < 0) {
650 		printf("%d: Error initializing device\n", __LINE__);
651 		return -1;
652 	}
653 
654 	/* map the QID */
655 	if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
656 		printf("%d: error mapping qid to port\n", __LINE__);
657 		return -1;
658 	}
659 	if (rte_event_dev_start(evdev) < 0) {
660 		printf("%d: Error with start call\n", __LINE__);
661 		return -1;
662 	}
663 
664 	return run_prio_packet_test(t);
665 }
666 
667 static int
668 burst_packets(struct test *t)
669 {
670 	/************** CONFIG ****************/
671 	uint32_t i;
672 	int err;
673 	int ret;
674 
675 	/* Create instance with 2 ports and 2 queues */
676 	if (init(t, 2, 2) < 0 ||
677 			create_ports(t, 2) < 0 ||
678 			create_atomic_qids(t, 2) < 0) {
679 		printf("%d: Error initializing device\n", __LINE__);
680 		return -1;
681 	}
682 
683 	/* CQ mapping to QID */
684 	ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
685 	if (ret != 1) {
686 		printf("%d: error mapping lb qid0\n", __LINE__);
687 		return -1;
688 	}
689 	ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
690 	if (ret != 1) {
691 		printf("%d: error mapping lb qid1\n", __LINE__);
692 		return -1;
693 	}
694 
695 	if (rte_event_dev_start(evdev) < 0) {
696 		printf("%d: Error with start call\n", __LINE__);
697 		return -1;
698 	}
699 
700 	/************** FORWARD ****************/
701 	const uint32_t rx_port = 0;
702 	const uint32_t NUM_PKTS = 2;
703 
704 	for (i = 0; i < NUM_PKTS; i++) {
705 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
706 		if (!arp) {
707 			printf("%d: error generating pkt\n", __LINE__);
708 			return -1;
709 		}
710 
711 		struct rte_event ev = {
712 				.op = RTE_EVENT_OP_NEW,
713 				.queue_id = i % 2,
714 				.flow_id = i % 3,
715 				.mbuf = arp,
716 		};
717 		/* generate pkt and enqueue */
718 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
719 		if (err != 1) {
720 			printf("%d: Failed to enqueue\n", __LINE__);
721 			return -1;
722 		}
723 	}
724 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
725 
726 	/* Check stats for all NUM_PKTS arrived to sched core */
727 	struct test_event_dev_stats stats;
728 
729 	err = test_event_dev_stats_get(evdev, &stats);
730 	if (err) {
731 		printf("%d: failed to get stats\n", __LINE__);
732 		return -1;
733 	}
734 	if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
735 		printf("%d: Sched core didn't receive all %d pkts\n",
736 				__LINE__, NUM_PKTS);
737 		rte_event_dev_dump(evdev, stdout);
738 		return -1;
739 	}
740 
741 	uint32_t deq_pkts;
742 	int p;
743 
744 	deq_pkts = 0;
745 	/******** DEQ QID 1 *******/
746 	do {
747 		struct rte_event ev;
748 		p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
749 		deq_pkts += p;
750 		rte_pktmbuf_free(ev.mbuf);
751 	} while (p);
752 
753 	if (deq_pkts != NUM_PKTS/2) {
754 		printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
755 				__LINE__);
756 		return -1;
757 	}
758 
759 	/******** DEQ QID 2 *******/
760 	deq_pkts = 0;
761 	do {
762 		struct rte_event ev;
763 		p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
764 		deq_pkts += p;
765 		rte_pktmbuf_free(ev.mbuf);
766 	} while (p);
767 	if (deq_pkts != NUM_PKTS/2) {
768 		printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
769 				__LINE__);
770 		return -1;
771 	}
772 
773 	cleanup(t);
774 	return 0;
775 }
776 
777 static int
778 abuse_inflights(struct test *t)
779 {
780 	const int rx_enq = 0;
781 	const int wrk_enq = 2;
782 	int err;
783 
784 	/* Create instance with 4 ports */
785 	if (init(t, 1, 4) < 0 ||
786 			create_ports(t, 4) < 0 ||
787 			create_atomic_qids(t, 1) < 0) {
788 		printf("%d: Error initializing device\n", __LINE__);
789 		return -1;
790 	}
791 
792 	/* CQ mapping to QID */
793 	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
794 	if (err != 1) {
795 		printf("%d: error mapping lb qid\n", __LINE__);
796 		cleanup(t);
797 		return -1;
798 	}
799 
800 	if (rte_event_dev_start(evdev) < 0) {
801 		printf("%d: Error with start call\n", __LINE__);
802 		return -1;
803 	}
804 
805 	/* Enqueue op only */
806 	err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
807 	if (err != 1) {
808 		printf("%d: Failed to enqueue\n", __LINE__);
809 		return -1;
810 	}
811 
812 	/* schedule */
813 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
814 
815 	struct test_event_dev_stats stats;
816 
817 	err = test_event_dev_stats_get(evdev, &stats);
818 	if (err) {
819 		printf("%d: failed to get stats\n", __LINE__);
820 		return -1;
821 	}
822 
823 	if (stats.rx_pkts != 0 ||
824 			stats.tx_pkts != 0 ||
825 			stats.port_inflight[wrk_enq] != 0) {
826 		printf("%d: Sched core didn't handle pkt as expected\n",
827 				__LINE__);
828 		return -1;
829 	}
830 
831 	cleanup(t);
832 	return 0;
833 }
834 
835 static int
836 xstats_tests(struct test *t)
837 {
838 	const int wrk_enq = 2;
839 	int err;
840 
841 	/* Create instance with 4 ports */
842 	if (init(t, 1, 4) < 0 ||
843 			create_ports(t, 4) < 0 ||
844 			create_atomic_qids(t, 1) < 0) {
845 		printf("%d: Error initializing device\n", __LINE__);
846 		return -1;
847 	}
848 
849 	/* CQ mapping to QID */
850 	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
851 	if (err != 1) {
852 		printf("%d: error mapping lb qid\n", __LINE__);
853 		cleanup(t);
854 		return -1;
855 	}
856 
857 	if (rte_event_dev_start(evdev) < 0) {
858 		printf("%d: Error with start call\n", __LINE__);
859 		return -1;
860 	}
861 
862 	const uint32_t XSTATS_MAX = 1024;
863 
864 	uint32_t i;
865 	uint64_t ids[XSTATS_MAX];
866 	uint64_t values[XSTATS_MAX];
867 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
868 
869 	for (i = 0; i < XSTATS_MAX; i++)
870 		ids[i] = i;
871 
872 	/* Device names / values */
873 	int ret = rte_event_dev_xstats_names_get(evdev,
874 					RTE_EVENT_DEV_XSTATS_DEVICE,
875 					0, xstats_names, ids, XSTATS_MAX);
876 	if (ret != 8) {
877 		printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
878 		return -1;
879 	}
880 	ret = rte_event_dev_xstats_get(evdev,
881 					RTE_EVENT_DEV_XSTATS_DEVICE,
882 					0, ids, values, ret);
883 	if (ret != 8) {
884 		printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
885 		return -1;
886 	}
887 
888 	/* Port names / values */
889 	ret = rte_event_dev_xstats_names_get(evdev,
890 					RTE_EVENT_DEV_XSTATS_PORT, 0,
891 					xstats_names, ids, XSTATS_MAX);
892 	if (ret != 21) {
893 		printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
894 		return -1;
895 	}
896 	ret = rte_event_dev_xstats_get(evdev,
897 					RTE_EVENT_DEV_XSTATS_PORT, 0,
898 					ids, values, ret);
899 	if (ret != 21) {
900 		printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
901 		return -1;
902 	}
903 
904 	/* Queue names / values */
905 	ret = rte_event_dev_xstats_names_get(evdev,
906 					RTE_EVENT_DEV_XSTATS_QUEUE,
907 					0, xstats_names, ids, XSTATS_MAX);
908 	if (ret != 16) {
909 		printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
910 		return -1;
911 	}
912 
913 	/* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
914 	ret = rte_event_dev_xstats_get(evdev,
915 					RTE_EVENT_DEV_XSTATS_QUEUE,
916 					1, ids, values, ret);
917 	if (ret != -EINVAL) {
918 		printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
919 		return -1;
920 	}
921 
922 	ret = rte_event_dev_xstats_get(evdev,
923 					RTE_EVENT_DEV_XSTATS_QUEUE,
924 					0, ids, values, ret);
925 	if (ret != 16) {
926 		printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
927 		return -1;
928 	}
929 
930 	/* enqueue packets to check values */
931 	for (i = 0; i < 3; i++) {
932 		struct rte_event ev;
933 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
934 		if (!arp) {
935 			printf("%d: gen of pkt failed\n", __LINE__);
936 			return -1;
937 		}
938 		ev.queue_id = t->qid[i];
939 		ev.op = RTE_EVENT_OP_NEW;
940 		ev.mbuf = arp;
941 		ev.flow_id = 7;
942 		*rte_event_pmd_selftest_seqn(arp) = i;
943 
944 		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
945 		if (err != 1) {
946 			printf("%d: Failed to enqueue\n", __LINE__);
947 			return -1;
948 		}
949 	}
950 
951 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
952 
953 	/* Device names / values */
954 	int num_stats = rte_event_dev_xstats_names_get(evdev,
955 					RTE_EVENT_DEV_XSTATS_DEVICE, 0,
956 					xstats_names, ids, XSTATS_MAX);
957 	if (num_stats < 0)
958 		goto fail;
959 	ret = rte_event_dev_xstats_get(evdev,
960 					RTE_EVENT_DEV_XSTATS_DEVICE,
961 					0, ids, values, num_stats);
962 	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0, 4, 1};
963 	for (i = 0; (signed int)i < ret; i++) {
964 		if (expected[i] != values[i]) {
965 			printf("%d Error xstat %d (id %" PRIu64
966 			       ") %s : %" PRIu64 ", expect %" PRIu64 "\n",
967 			       __LINE__, i, ids[i], xstats_names[i].name,
968 			       values[i], expected[i]);
969 			goto fail;
970 		}
971 	}
972 
973 	ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
974 					0, NULL, 0);
975 
976 	/* ensure reset statistics are zero-ed */
977 	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0, 0, 0};
978 	ret = rte_event_dev_xstats_get(evdev,
979 					RTE_EVENT_DEV_XSTATS_DEVICE,
980 					0, ids, values, num_stats);
981 	for (i = 0; (signed int)i < ret; i++) {
982 		if (expected_zero[i] != values[i]) {
983 			printf("%d Error, xstat %d (id %" PRIu64
984 			       ") %s : %" PRIu64 ", expect %" PRIu64 "\n",
985 			       __LINE__, i, ids[i], xstats_names[i].name,
986 			       values[i], expected_zero[i]);
987 			goto fail;
988 		}
989 	}
990 
991 	/* port reset checks */
992 	num_stats = rte_event_dev_xstats_names_get(evdev,
993 					RTE_EVENT_DEV_XSTATS_PORT, 0,
994 					xstats_names, ids, XSTATS_MAX);
995 	if (num_stats < 0)
996 		goto fail;
997 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
998 					0, ids, values, num_stats);
999 
1000 	static const uint64_t port_expected[] = {
1001 		3 /* rx */,
1002 		0 /* tx */,
1003 		0 /* drop */,
1004 		0 /* inflights */,
1005 		0 /* avg pkt cycles */,
1006 		29 /* credits */,
1007 		0 /* rx ring used */,
1008 		4096 /* rx ring free */,
1009 		0 /* cq ring used */,
1010 		32 /* cq ring free */,
1011 		0 /* dequeue calls */,
1012 		/* 10 dequeue burst buckets */
1013 		0, 0, 0, 0, 0,
1014 		0, 0, 0, 0, 0,
1015 	};
1016 	if (ret != RTE_DIM(port_expected)) {
1017 		printf(
1018 			"%s %d: wrong number of port stats (%d), expected %zu\n",
1019 			__func__, __LINE__, ret, RTE_DIM(port_expected));
1020 	}
1021 
1022 	for (i = 0; (signed int)i < ret; i++) {
1023 		if (port_expected[i] != values[i]) {
1024 			printf(
1025 				"%s : %d: Error stat %s is %"PRIu64
1026 				", expected %"PRIu64"\n",
1027 				__func__, __LINE__, xstats_names[i].name,
1028 				values[i], port_expected[i]);
1029 			goto fail;
1030 		}
1031 	}
1032 
1033 	ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1034 					0, NULL, 0);
1035 
1036 	/* ensure reset statistics are zero-ed */
1037 	static const uint64_t port_expected_zero[] = {
1038 		0 /* rx */,
1039 		0 /* tx */,
1040 		0 /* drop */,
1041 		0 /* inflights */,
1042 		0 /* avg pkt cycles */,
1043 		29 /* credits */,
1044 		0 /* rx ring used */,
1045 		4096 /* rx ring free */,
1046 		0 /* cq ring used */,
1047 		32 /* cq ring free */,
1048 		0 /* dequeue calls */,
1049 		/* 10 dequeue burst buckets */
1050 		0, 0, 0, 0, 0,
1051 		0, 0, 0, 0, 0,
1052 	};
1053 	ret = rte_event_dev_xstats_get(evdev,
1054 					RTE_EVENT_DEV_XSTATS_PORT,
1055 					0, ids, values, num_stats);
1056 	for (i = 0; (signed int)i < ret; i++) {
1057 		if (port_expected_zero[i] != values[i]) {
1058 			printf("%d, Error, xstat %d (id %" PRIu64
1059 			       ") %s : %" PRIu64 ", expect %" PRIu64 "\n",
1060 			       __LINE__, i, ids[i], xstats_names[i].name,
1061 			       values[i], port_expected_zero[i]);
1062 			goto fail;
1063 		}
1064 	}
1065 
1066 	/* QUEUE STATS TESTS */
1067 	num_stats = rte_event_dev_xstats_names_get(evdev,
1068 						RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1069 						xstats_names, ids, XSTATS_MAX);
1070 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1071 					0, ids, values, num_stats);
1072 	if (ret < 0) {
1073 		printf("xstats get returned %d\n", ret);
1074 		goto fail;
1075 	}
1076 	if ((unsigned int)ret > XSTATS_MAX)
1077 		printf("%s %d: more xstats available than space\n",
1078 				__func__, __LINE__);
1079 
1080 	static const uint64_t queue_expected[] = {
1081 		3 /* rx */,
1082 		3 /* tx */,
1083 		0 /* drop */,
1084 		3 /* inflights */,
1085 		0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1086 		/* QID-to-Port: pinned_flows, packets */
1087 		0, 0,
1088 		0, 0,
1089 		1, 3,
1090 		0, 0,
1091 	};
1092 	for (i = 0; (signed int)i < ret; i++) {
1093 		if (queue_expected[i] != values[i]) {
1094 			printf("%d, Error, xstat %d (id %" PRIu64
1095 			       ") %s : %" PRIu64 ", expect %" PRIu64 "\n",
1096 			       __LINE__, i, ids[i], xstats_names[i].name,
1097 			       values[i], queue_expected[i]);
1098 			goto fail;
1099 		}
1100 	}
1101 
1102 	/* Reset the queue stats here */
1103 	ret = rte_event_dev_xstats_reset(evdev,
1104 					RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1105 					NULL,
1106 					0);
1107 
1108 	/* Verify that the resettable stats are reset, and others are not */
1109 	static const uint64_t queue_expected_zero[] = {
1110 		0 /* rx */,
1111 		0 /* tx */,
1112 		0 /* drop */,
1113 		3 /* inflight */,
1114 		0, 0, 0, 0, /* 4 iq used */
1115 		/* QID-to-Port: pinned_flows, packets */
1116 		0, 0,
1117 		0, 0,
1118 		1, 0,
1119 		0, 0,
1120 	};
1121 
1122 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1123 					ids, values, num_stats);
1124 	int fails = 0;
1125 	for (i = 0; (signed int)i < ret; i++) {
1126 		if (queue_expected_zero[i] != values[i]) {
1127 			printf("%d, Error, xstat %d (id %" PRIu64
1128 			       ") %s : %" PRIu64 ", expect %" PRIu64 "\n",
1129 			       __LINE__, i, ids[i], xstats_names[i].name,
1130 			       values[i], queue_expected_zero[i]);
1131 			fails++;
1132 		}
1133 	}
1134 	if (fails) {
1135 		printf("%d : %d of values were not as expected above\n",
1136 				__LINE__, fails);
1137 		goto fail;
1138 	}
1139 
1140 	cleanup(t);
1141 	return 0;
1142 
1143 fail:
1144 	rte_event_dev_dump(0, stdout);
1145 	cleanup(t);
1146 	return -1;
1147 }
1148 
1149 
1150 static int
1151 xstats_id_abuse_tests(struct test *t)
1152 {
1153 	int err;
1154 	const uint32_t XSTATS_MAX = 1024;
1155 	const uint32_t link_port = 2;
1156 
1157 	uint64_t ids[XSTATS_MAX];
1158 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1159 
1160 	/* Create instance with 4 ports */
1161 	if (init(t, 1, 4) < 0 ||
1162 			create_ports(t, 4) < 0 ||
1163 			create_atomic_qids(t, 1) < 0) {
1164 		printf("%d: Error initializing device\n", __LINE__);
1165 		goto fail;
1166 	}
1167 
1168 	err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1169 	if (err != 1) {
1170 		printf("%d: error mapping lb qid\n", __LINE__);
1171 		goto fail;
1172 	}
1173 
1174 	if (rte_event_dev_start(evdev) < 0) {
1175 		printf("%d: Error with start call\n", __LINE__);
1176 		goto fail;
1177 	}
1178 
1179 	/* no test for device, as it ignores the port/q number */
1180 	int num_stats = rte_event_dev_xstats_names_get(evdev,
1181 					RTE_EVENT_DEV_XSTATS_PORT,
1182 					UINT8_MAX-1, xstats_names, ids,
1183 					XSTATS_MAX);
1184 	if (num_stats != 0) {
1185 		printf("%d: expected %d stats, got return %d\n", __LINE__,
1186 				0, num_stats);
1187 		goto fail;
1188 	}
1189 
1190 	num_stats = rte_event_dev_xstats_names_get(evdev,
1191 					RTE_EVENT_DEV_XSTATS_QUEUE,
1192 					UINT8_MAX-1, xstats_names, ids,
1193 					XSTATS_MAX);
1194 	if (num_stats != 0) {
1195 		printf("%d: expected %d stats, got return %d\n", __LINE__,
1196 				0, num_stats);
1197 		goto fail;
1198 	}
1199 
1200 	cleanup(t);
1201 	return 0;
1202 fail:
1203 	cleanup(t);
1204 	return -1;
1205 }
1206 
1207 static int
1208 port_reconfig_credits(struct test *t)
1209 {
1210 	if (init(t, 1, 1) < 0) {
1211 		printf("%d: Error initializing device\n", __LINE__);
1212 		return -1;
1213 	}
1214 
1215 	uint32_t i;
1216 	const uint32_t NUM_ITERS = 32;
1217 	for (i = 0; i < NUM_ITERS; i++) {
1218 		const struct rte_event_queue_conf conf = {
1219 			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1220 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1221 			.nb_atomic_flows = 1024,
1222 			.nb_atomic_order_sequences = 1024,
1223 		};
1224 		if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1225 			printf("%d: error creating qid\n", __LINE__);
1226 			return -1;
1227 		}
1228 		t->qid[0] = 0;
1229 
1230 		static const struct rte_event_port_conf port_conf = {
1231 				.new_event_threshold = 128,
1232 				.dequeue_depth = 32,
1233 				.enqueue_depth = 64,
1234 		};
1235 		if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1236 			printf("%d Error setting up port\n", __LINE__);
1237 			return -1;
1238 		}
1239 
1240 		int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1241 		if (links != 1) {
1242 			printf("%d: error mapping lb qid\n", __LINE__);
1243 			goto fail;
1244 		}
1245 
1246 		if (rte_event_dev_start(evdev) < 0) {
1247 			printf("%d: Error with start call\n", __LINE__);
1248 			goto fail;
1249 		}
1250 
1251 		const uint32_t NPKTS = 1;
1252 		uint32_t j;
1253 		for (j = 0; j < NPKTS; j++) {
1254 			struct rte_event ev;
1255 			struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1256 			if (!arp) {
1257 				printf("%d: gen of pkt failed\n", __LINE__);
1258 				goto fail;
1259 			}
1260 			ev.queue_id = t->qid[0];
1261 			ev.op = RTE_EVENT_OP_NEW;
1262 			ev.mbuf = arp;
1263 			int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1264 			if (err != 1) {
1265 				printf("%d: Failed to enqueue\n", __LINE__);
1266 				rte_event_dev_dump(0, stdout);
1267 				goto fail;
1268 			}
1269 		}
1270 
1271 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
1272 
1273 		struct rte_event ev[NPKTS];
1274 		int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1275 							NPKTS, 0);
1276 		if (deq != 1)
1277 			printf("%d error; no packet dequeued\n", __LINE__);
1278 
1279 		/* let cleanup below stop the device on last iter */
1280 		if (i != NUM_ITERS-1)
1281 			rte_event_dev_stop(evdev);
1282 	}
1283 
1284 	cleanup(t);
1285 	return 0;
1286 fail:
1287 	cleanup(t);
1288 	return -1;
1289 }
1290 
1291 static int
1292 port_single_lb_reconfig(struct test *t)
1293 {
1294 	if (init(t, 2, 2) < 0) {
1295 		printf("%d: Error initializing device\n", __LINE__);
1296 		goto fail;
1297 	}
1298 
1299 	static const struct rte_event_queue_conf conf_lb_atomic = {
1300 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1301 		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1302 		.nb_atomic_flows = 1024,
1303 		.nb_atomic_order_sequences = 1024,
1304 	};
1305 	if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1306 		printf("%d: error creating qid\n", __LINE__);
1307 		goto fail;
1308 	}
1309 
1310 	static const struct rte_event_queue_conf conf_single_link = {
1311 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1312 		.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1313 	};
1314 	if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1315 		printf("%d: error creating qid\n", __LINE__);
1316 		goto fail;
1317 	}
1318 
1319 	struct rte_event_port_conf port_conf = {
1320 		.new_event_threshold = 128,
1321 		.dequeue_depth = 32,
1322 		.enqueue_depth = 64,
1323 	};
1324 	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1325 		printf("%d Error setting up port\n", __LINE__);
1326 		goto fail;
1327 	}
1328 	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1329 		printf("%d Error setting up port\n", __LINE__);
1330 		goto fail;
1331 	}
1332 
1333 	/* link port to lb queue */
1334 	uint8_t queue_id = 0;
1335 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1336 		printf("%d: error creating link for qid\n", __LINE__);
1337 		goto fail;
1338 	}
1339 
1340 	int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1341 	if (ret != 1) {
1342 		printf("%d: Error unlinking lb port\n", __LINE__);
1343 		goto fail;
1344 	}
1345 
1346 	queue_id = 1;
1347 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1348 		printf("%d: error creating link for qid\n", __LINE__);
1349 		goto fail;
1350 	}
1351 
1352 	queue_id = 0;
1353 	int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1354 	if (err != 1) {
1355 		printf("%d: error mapping lb qid\n", __LINE__);
1356 		goto fail;
1357 	}
1358 
1359 	if (rte_event_dev_start(evdev) < 0) {
1360 		printf("%d: Error with start call\n", __LINE__);
1361 		goto fail;
1362 	}
1363 
1364 	cleanup(t);
1365 	return 0;
1366 fail:
1367 	cleanup(t);
1368 	return -1;
1369 }
1370 
1371 static int
1372 xstats_brute_force(struct test *t)
1373 {
1374 	uint32_t i;
1375 	const uint32_t XSTATS_MAX = 1024;
1376 	uint64_t ids[XSTATS_MAX];
1377 	uint64_t values[XSTATS_MAX];
1378 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1379 
1380 
1381 	/* Create instance with 4 ports */
1382 	if (init(t, 1, 4) < 0 ||
1383 			create_ports(t, 4) < 0 ||
1384 			create_atomic_qids(t, 1) < 0) {
1385 		printf("%d: Error initializing device\n", __LINE__);
1386 		return -1;
1387 	}
1388 
1389 	int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1390 	if (err != 1) {
1391 		printf("%d: error mapping lb qid\n", __LINE__);
1392 		goto fail;
1393 	}
1394 
1395 	if (rte_event_dev_start(evdev) < 0) {
1396 		printf("%d: Error with start call\n", __LINE__);
1397 		goto fail;
1398 	}
1399 
1400 	for (i = 0; i < XSTATS_MAX; i++)
1401 		ids[i] = i;
1402 
1403 	for (i = 0; i < 3; i++) {
1404 		uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1405 		uint32_t j;
1406 		for (j = 0; j < UINT8_MAX; j++) {
1407 			rte_event_dev_xstats_names_get(evdev, mode,
1408 				j, xstats_names, ids, XSTATS_MAX);
1409 
1410 			rte_event_dev_xstats_get(evdev, mode, j, ids,
1411 						 values, XSTATS_MAX);
1412 		}
1413 	}
1414 
1415 	cleanup(t);
1416 	return 0;
1417 fail:
1418 	cleanup(t);
1419 	return -1;
1420 }
1421 
1422 static int
1423 xstats_id_reset_tests(struct test *t)
1424 {
1425 	const int wrk_enq = 2;
1426 	int err;
1427 
1428 	/* Create instance with 4 ports */
1429 	if (init(t, 1, 4) < 0 ||
1430 			create_ports(t, 4) < 0 ||
1431 			create_atomic_qids(t, 1) < 0) {
1432 		printf("%d: Error initializing device\n", __LINE__);
1433 		return -1;
1434 	}
1435 
1436 	/* CQ mapping to QID */
1437 	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1438 	if (err != 1) {
1439 		printf("%d: error mapping lb qid\n", __LINE__);
1440 		goto fail;
1441 	}
1442 
1443 	if (rte_event_dev_start(evdev) < 0) {
1444 		printf("%d: Error with start call\n", __LINE__);
1445 		goto fail;
1446 	}
1447 
1448 #define XSTATS_MAX 1024
1449 	int ret;
1450 	uint32_t i;
1451 	uint64_t ids[XSTATS_MAX];
1452 	uint64_t values[XSTATS_MAX];
1453 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1454 
1455 	for (i = 0; i < XSTATS_MAX; i++)
1456 		ids[i] = i;
1457 
1458 #define NUM_DEV_STATS 8
1459 	/* Device names / values */
1460 	int num_stats = rte_event_dev_xstats_names_get(evdev,
1461 					RTE_EVENT_DEV_XSTATS_DEVICE,
1462 					0, xstats_names, ids, XSTATS_MAX);
1463 	if (num_stats != NUM_DEV_STATS) {
1464 		printf("%d: expected %d stats, got return %d\n", __LINE__,
1465 				NUM_DEV_STATS, num_stats);
1466 		goto fail;
1467 	}
1468 	ret = rte_event_dev_xstats_get(evdev,
1469 					RTE_EVENT_DEV_XSTATS_DEVICE,
1470 					0, ids, values, num_stats);
1471 	if (ret != NUM_DEV_STATS) {
1472 		printf("%d: expected %d stats, got return %d\n", __LINE__,
1473 				NUM_DEV_STATS, ret);
1474 		goto fail;
1475 	}
1476 
1477 #define NPKTS 7
1478 	for (i = 0; i < NPKTS; i++) {
1479 		struct rte_event ev;
1480 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1481 		if (!arp) {
1482 			printf("%d: gen of pkt failed\n", __LINE__);
1483 			goto fail;
1484 		}
1485 		ev.queue_id = t->qid[i];
1486 		ev.flow_id = 0;
1487 		ev.op = RTE_EVENT_OP_NEW;
1488 		ev.mbuf = arp;
1489 		*rte_event_pmd_selftest_seqn(arp) = i;
1490 
1491 		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1492 		if (err != 1) {
1493 			printf("%d: Failed to enqueue\n", __LINE__);
1494 			goto fail;
1495 		}
1496 	}
1497 
1498 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
1499 
1500 	static const char * const dev_names[] = {
1501 		"dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1502 		"dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1503 		"dev_sched_last_iter_bitmask",
1504 		"dev_sched_progress_last_iter"
1505 	};
1506 	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0, 4, 1};
1507 	for (i = 0; (int)i < ret; i++) {
1508 		uint64_t id;
1509 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1510 								dev_names[i],
1511 								&id);
1512 		if (id != i) {
1513 			printf("%d: %s id incorrect, expected %d got %" PRIu64
1514 			       "\n",
1515 			       __LINE__, dev_names[i], i, id);
1516 			goto fail;
1517 		}
1518 		if (val != dev_expected[i]) {
1519 			printf("%d: %s value incorrect, expected %"
1520 				PRIu64" got %"PRIu64"\n", __LINE__,
1521 				dev_names[i], dev_expected[i], val);
1522 			goto fail;
1523 		}
1524 		/* reset to zero */
1525 		int reset_ret = rte_event_dev_xstats_reset(evdev,
1526 						RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1527 						&id,
1528 						1);
1529 		if (reset_ret) {
1530 			printf("%d: failed to reset successfully\n", __LINE__);
1531 			goto fail;
1532 		}
1533 		dev_expected[i] = 0;
1534 		/* check value again */
1535 		val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1536 		if (val != dev_expected[i]) {
1537 			printf("%d: %s value incorrect, expected %"PRIu64
1538 				" got %"PRIu64"\n", __LINE__, dev_names[i],
1539 				dev_expected[i], val);
1540 			goto fail;
1541 		}
1542 	};
1543 
1544 /* 49 is stat offset from start of the devices whole xstats.
1545  * This WILL break every time we add a statistic to a port
1546  * or the device, but there is no other way to test
1547  */
1548 #define PORT_OFF 50
1549 /* num stats for the tested port. CQ size adds more stats to a port */
1550 #define NUM_PORT_STATS 21
1551 /* the port to test. */
1552 #define PORT 2
1553 	num_stats = rte_event_dev_xstats_names_get(evdev,
1554 					RTE_EVENT_DEV_XSTATS_PORT, PORT,
1555 					xstats_names, ids, XSTATS_MAX);
1556 	if (num_stats != NUM_PORT_STATS) {
1557 		printf("%d: expected %d stats, got return %d\n",
1558 			__LINE__, NUM_PORT_STATS, num_stats);
1559 		goto fail;
1560 	}
1561 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1562 					ids, values, num_stats);
1563 
1564 	if (ret != NUM_PORT_STATS) {
1565 		printf("%d: expected %d stats, got return %d\n",
1566 				__LINE__, NUM_PORT_STATS, ret);
1567 		goto fail;
1568 	}
1569 	static const char * const port_names[] = {
1570 		"port_2_rx",
1571 		"port_2_tx",
1572 		"port_2_drop",
1573 		"port_2_inflight",
1574 		"port_2_avg_pkt_cycles",
1575 		"port_2_credits",
1576 		"port_2_rx_ring_used",
1577 		"port_2_rx_ring_free",
1578 		"port_2_cq_ring_used",
1579 		"port_2_cq_ring_free",
1580 		"port_2_dequeue_calls",
1581 		"port_2_dequeues_returning_0",
1582 		"port_2_dequeues_returning_1-4",
1583 		"port_2_dequeues_returning_5-8",
1584 		"port_2_dequeues_returning_9-12",
1585 		"port_2_dequeues_returning_13-16",
1586 		"port_2_dequeues_returning_17-20",
1587 		"port_2_dequeues_returning_21-24",
1588 		"port_2_dequeues_returning_25-28",
1589 		"port_2_dequeues_returning_29-32",
1590 		"port_2_dequeues_returning_33-36",
1591 	};
1592 	uint64_t port_expected[] = {
1593 		0, /* rx */
1594 		NPKTS, /* tx */
1595 		0, /* drop */
1596 		NPKTS, /* inflight */
1597 		0, /* avg pkt cycles */
1598 		0, /* credits */
1599 		0, /* rx ring used */
1600 		4096, /* rx ring free */
1601 		NPKTS,  /* cq ring used */
1602 		25, /* cq ring free */
1603 		0, /* dequeue zero calls */
1604 		0, 0, 0, 0, 0, /* 10 dequeue buckets */
1605 		0, 0, 0, 0, 0,
1606 	};
1607 	uint64_t port_expected_zero[] = {
1608 		0, /* rx */
1609 		0, /* tx */
1610 		0, /* drop */
1611 		NPKTS, /* inflight */
1612 		0, /* avg pkt cycles */
1613 		0, /* credits */
1614 		0, /* rx ring used */
1615 		4096, /* rx ring free */
1616 		NPKTS,  /* cq ring used */
1617 		25, /* cq ring free */
1618 		0, /* dequeue zero calls */
1619 		0, 0, 0, 0, 0, /* 10 dequeue buckets */
1620 		0, 0, 0, 0, 0,
1621 	};
1622 	if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1623 			RTE_DIM(port_names) != NUM_PORT_STATS) {
1624 		printf("%d: port array of wrong size\n", __LINE__);
1625 		goto fail;
1626 	}
1627 
1628 	int failed = 0;
1629 	for (i = 0; (int)i < ret; i++) {
1630 		uint64_t id;
1631 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1632 								port_names[i],
1633 								&id);
1634 		if (id != i + PORT_OFF) {
1635 			printf("%d: %s id incorrect, expected %d got %" PRIu64
1636 			       "\n",
1637 			       __LINE__, port_names[i], i + PORT_OFF, id);
1638 			failed = 1;
1639 		}
1640 		if (val != port_expected[i]) {
1641 			printf("%d: %s value incorrect, expected %" PRIu64
1642 			       " got %" PRIu64 "\n",
1643 			       __LINE__, port_names[i], port_expected[i], val);
1644 			failed = 1;
1645 		}
1646 		/* reset to zero */
1647 		int reset_ret = rte_event_dev_xstats_reset(evdev,
1648 						RTE_EVENT_DEV_XSTATS_PORT, PORT,
1649 						&id,
1650 						1);
1651 		if (reset_ret) {
1652 			printf("%d: failed to reset successfully\n", __LINE__);
1653 			failed = 1;
1654 		}
1655 		/* check value again */
1656 		val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1657 		if (val != port_expected_zero[i]) {
1658 			printf("%d: %s value incorrect, expected %"PRIu64
1659 				" got %"PRIu64"\n", __LINE__, port_names[i],
1660 				port_expected_zero[i], val);
1661 			failed = 1;
1662 		}
1663 	};
1664 	if (failed)
1665 		goto fail;
1666 
1667 /* num queue stats */
1668 #define NUM_Q_STATS 16
1669 /* queue offset from start of the devices whole xstats.
1670  * This will break every time we add a statistic to a device/port/queue
1671  */
1672 #define QUEUE_OFF 92
1673 	const uint32_t queue = 0;
1674 	num_stats = rte_event_dev_xstats_names_get(evdev,
1675 					RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1676 					xstats_names, ids, XSTATS_MAX);
1677 	if (num_stats != NUM_Q_STATS) {
1678 		printf("%d: expected %d stats, got return %d\n",
1679 			__LINE__, NUM_Q_STATS, num_stats);
1680 		goto fail;
1681 	}
1682 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1683 					queue, ids, values, num_stats);
1684 	if (ret != NUM_Q_STATS) {
1685 		printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1686 		goto fail;
1687 	}
1688 	static const char * const queue_names[] = {
1689 		"qid_0_rx",
1690 		"qid_0_tx",
1691 		"qid_0_drop",
1692 		"qid_0_inflight",
1693 		"qid_0_iq_0_used",
1694 		"qid_0_iq_1_used",
1695 		"qid_0_iq_2_used",
1696 		"qid_0_iq_3_used",
1697 		"qid_0_port_0_pinned_flows",
1698 		"qid_0_port_0_packets",
1699 		"qid_0_port_1_pinned_flows",
1700 		"qid_0_port_1_packets",
1701 		"qid_0_port_2_pinned_flows",
1702 		"qid_0_port_2_packets",
1703 		"qid_0_port_3_pinned_flows",
1704 		"qid_0_port_3_packets",
1705 	};
1706 	uint64_t queue_expected[] = {
1707 		7, /* rx */
1708 		7, /* tx */
1709 		0, /* drop */
1710 		7, /* inflight */
1711 		0, /* iq 0 used */
1712 		0, /* iq 1 used */
1713 		0, /* iq 2 used */
1714 		0, /* iq 3 used */
1715 		/* QID-to-Port: pinned_flows, packets */
1716 		0, 0,
1717 		0, 0,
1718 		1, 7,
1719 		0, 0,
1720 	};
1721 	uint64_t queue_expected_zero[] = {
1722 		0, /* rx */
1723 		0, /* tx */
1724 		0, /* drop */
1725 		7, /* inflight */
1726 		0, /* iq 0 used */
1727 		0, /* iq 1 used */
1728 		0, /* iq 2 used */
1729 		0, /* iq 3 used */
1730 		/* QID-to-Port: pinned_flows, packets */
1731 		0, 0,
1732 		0, 0,
1733 		1, 0,
1734 		0, 0,
1735 	};
1736 	if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1737 			RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1738 			RTE_DIM(queue_names) != NUM_Q_STATS) {
1739 		printf("%d : queue array of wrong size\n", __LINE__);
1740 		goto fail;
1741 	}
1742 
1743 	failed = 0;
1744 	for (i = 0; (int)i < ret; i++) {
1745 		uint64_t id;
1746 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1747 								queue_names[i],
1748 								&id);
1749 		if (id != i + QUEUE_OFF) {
1750 			printf("%d: %s id incorrect, expected %d got %" PRIu64
1751 			       "\n",
1752 			       __LINE__, queue_names[i], i + QUEUE_OFF, id);
1753 			failed = 1;
1754 		}
1755 		if (val != queue_expected[i]) {
1756 			printf("%d: %d: %s value , expected %"PRIu64
1757 				" got %"PRIu64"\n", i, __LINE__,
1758 				queue_names[i], queue_expected[i], val);
1759 			failed = 1;
1760 		}
1761 		/* reset to zero */
1762 		int reset_ret = rte_event_dev_xstats_reset(evdev,
1763 						RTE_EVENT_DEV_XSTATS_QUEUE,
1764 						queue, &id, 1);
1765 		if (reset_ret) {
1766 			printf("%d: failed to reset successfully\n", __LINE__);
1767 			failed = 1;
1768 		}
1769 		/* check value again */
1770 		val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1771 							0);
1772 		if (val != queue_expected_zero[i]) {
1773 			printf("%d: %s value incorrect, expected %"PRIu64
1774 				" got %"PRIu64"\n", __LINE__, queue_names[i],
1775 				queue_expected_zero[i], val);
1776 			failed = 1;
1777 		}
1778 	};
1779 
1780 	if (failed)
1781 		goto fail;
1782 
1783 	cleanup(t);
1784 	return 0;
1785 fail:
1786 	cleanup(t);
1787 	return -1;
1788 }
1789 
1790 static int
1791 ordered_reconfigure(struct test *t)
1792 {
1793 	if (init(t, 1, 1) < 0 ||
1794 			create_ports(t, 1) < 0) {
1795 		printf("%d: Error initializing device\n", __LINE__);
1796 		return -1;
1797 	}
1798 
1799 	const struct rte_event_queue_conf conf = {
1800 			.schedule_type = RTE_SCHED_TYPE_ORDERED,
1801 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1802 			.nb_atomic_flows = 1024,
1803 			.nb_atomic_order_sequences = 1024,
1804 	};
1805 
1806 	if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1807 		printf("%d: error creating qid\n", __LINE__);
1808 		goto failed;
1809 	}
1810 
1811 	if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1812 		printf("%d: error creating qid, for 2nd time\n", __LINE__);
1813 		goto failed;
1814 	}
1815 
1816 	rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1817 	if (rte_event_dev_start(evdev) < 0) {
1818 		printf("%d: Error with start call\n", __LINE__);
1819 		return -1;
1820 	}
1821 
1822 	cleanup(t);
1823 	return 0;
1824 failed:
1825 	cleanup(t);
1826 	return -1;
1827 }
1828 
1829 static int
1830 qid_priorities(struct test *t)
1831 {
1832 	/* Test works by having a CQ with enough empty space for all packets,
1833 	 * and enqueueing 3 packets to 3 QIDs. They must return based on the
1834 	 * priority of the QID, not the ingress order, to pass the test
1835 	 */
1836 	unsigned int i;
1837 	/* Create instance with 1 ports, and 3 qids */
1838 	if (init(t, 3, 1) < 0 ||
1839 			create_ports(t, 1) < 0) {
1840 		printf("%d: Error initializing device\n", __LINE__);
1841 		return -1;
1842 	}
1843 
1844 	for (i = 0; i < 3; i++) {
1845 		/* Create QID */
1846 		const struct rte_event_queue_conf conf = {
1847 			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1848 			/* increase priority (0 == highest), as we go */
1849 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1850 			.nb_atomic_flows = 1024,
1851 			.nb_atomic_order_sequences = 1024,
1852 		};
1853 
1854 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1855 			printf("%d: error creating qid %d\n", __LINE__, i);
1856 			return -1;
1857 		}
1858 		t->qid[i] = i;
1859 	}
1860 	t->nb_qids = i;
1861 	/* map all QIDs to port */
1862 	rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1863 
1864 	if (rte_event_dev_start(evdev) < 0) {
1865 		printf("%d: Error with start call\n", __LINE__);
1866 		return -1;
1867 	}
1868 
1869 	/* enqueue 3 packets, setting seqn and QID to check priority */
1870 	for (i = 0; i < 3; i++) {
1871 		struct rte_event ev;
1872 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1873 		if (!arp) {
1874 			printf("%d: gen of pkt failed\n", __LINE__);
1875 			return -1;
1876 		}
1877 		ev.queue_id = t->qid[i];
1878 		ev.op = RTE_EVENT_OP_NEW;
1879 		ev.mbuf = arp;
1880 		*rte_event_pmd_selftest_seqn(arp) = i;
1881 
1882 		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1883 		if (err != 1) {
1884 			printf("%d: Failed to enqueue\n", __LINE__);
1885 			return -1;
1886 		}
1887 	}
1888 
1889 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
1890 
1891 	/* dequeue packets, verify priority was upheld */
1892 	struct rte_event ev[32];
1893 	uint32_t deq_pkts =
1894 		rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1895 	if (deq_pkts != 3) {
1896 		printf("%d: failed to deq packets\n", __LINE__);
1897 		rte_event_dev_dump(evdev, stdout);
1898 		return -1;
1899 	}
1900 	for (i = 0; i < 3; i++) {
1901 		if (*rte_event_pmd_selftest_seqn(ev[i].mbuf) != 2-i) {
1902 			printf(
1903 				"%d: qid priority test: seqn %d incorrectly prioritized\n",
1904 					__LINE__, i);
1905 		}
1906 	}
1907 
1908 	cleanup(t);
1909 	return 0;
1910 }
1911 
1912 static int
1913 unlink_in_progress(struct test *t)
1914 {
1915 	/* Test unlinking API, in particular that when an unlink request has
1916 	 * not yet been seen by the scheduler thread, that the
1917 	 * unlink_in_progress() function returns the number of unlinks.
1918 	 */
1919 	unsigned int i;
1920 	/* Create instance with 1 ports, and 3 qids */
1921 	if (init(t, 3, 1) < 0 ||
1922 			create_ports(t, 1) < 0) {
1923 		printf("%d: Error initializing device\n", __LINE__);
1924 		return -1;
1925 	}
1926 
1927 	for (i = 0; i < 3; i++) {
1928 		/* Create QID */
1929 		const struct rte_event_queue_conf conf = {
1930 			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1931 			/* increase priority (0 == highest), as we go */
1932 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1933 			.nb_atomic_flows = 1024,
1934 			.nb_atomic_order_sequences = 1024,
1935 		};
1936 
1937 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1938 			printf("%d: error creating qid %d\n", __LINE__, i);
1939 			return -1;
1940 		}
1941 		t->qid[i] = i;
1942 	}
1943 	t->nb_qids = i;
1944 	/* map all QIDs to port */
1945 	rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1946 
1947 	if (rte_event_dev_start(evdev) < 0) {
1948 		printf("%d: Error with start call\n", __LINE__);
1949 		return -1;
1950 	}
1951 
1952 	/* unlink all ports to have outstanding unlink requests */
1953 	int ret = rte_event_port_unlink(evdev, t->port[0], NULL, 0);
1954 	if (ret < 0) {
1955 		printf("%d: Failed to unlink queues\n", __LINE__);
1956 		return -1;
1957 	}
1958 
1959 	/* get active unlinks here, expect 3 */
1960 	int unlinks_in_progress =
1961 		rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1962 	if (unlinks_in_progress != 3) {
1963 		printf("%d: Expected num unlinks in progress == 3, got %d\n",
1964 				__LINE__, unlinks_in_progress);
1965 		return -1;
1966 	}
1967 
1968 	/* run scheduler service on this thread to ack the unlinks */
1969 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
1970 
1971 	/* active unlinks expected as 0 as scheduler thread has acked */
1972 	unlinks_in_progress =
1973 		rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1974 	if (unlinks_in_progress != 0) {
1975 		printf("%d: Expected num unlinks in progress == 0, got %d\n",
1976 				__LINE__, unlinks_in_progress);
1977 	}
1978 
1979 	cleanup(t);
1980 	return 0;
1981 }
1982 
1983 static int
1984 load_balancing(struct test *t)
1985 {
1986 	const int rx_enq = 0;
1987 	int err;
1988 	uint32_t i;
1989 
1990 	if (init(t, 1, 4) < 0 ||
1991 			create_ports(t, 4) < 0 ||
1992 			create_atomic_qids(t, 1) < 0) {
1993 		printf("%d: Error initializing device\n", __LINE__);
1994 		return -1;
1995 	}
1996 
1997 	for (i = 0; i < 3; i++) {
1998 		/* map port 1 - 3 inclusive */
1999 		if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
2000 				NULL, 1) != 1) {
2001 			printf("%d: error mapping qid to port %d\n",
2002 					__LINE__, i);
2003 			return -1;
2004 		}
2005 	}
2006 
2007 	if (rte_event_dev_start(evdev) < 0) {
2008 		printf("%d: Error with start call\n", __LINE__);
2009 		return -1;
2010 	}
2011 
2012 	/************** FORWARD ****************/
2013 	/*
2014 	 * Create a set of flows that test the load-balancing operation of the
2015 	 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
2016 	 * with a new flow, which should be sent to the 3rd mapped CQ
2017 	 */
2018 	static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
2019 
2020 	for (i = 0; i < RTE_DIM(flows); i++) {
2021 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2022 		if (!arp) {
2023 			printf("%d: gen of pkt failed\n", __LINE__);
2024 			return -1;
2025 		}
2026 
2027 		struct rte_event ev = {
2028 				.op = RTE_EVENT_OP_NEW,
2029 				.queue_id = t->qid[0],
2030 				.flow_id = flows[i],
2031 				.mbuf = arp,
2032 		};
2033 		/* generate pkt and enqueue */
2034 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2035 		if (err != 1) {
2036 			printf("%d: Failed to enqueue\n", __LINE__);
2037 			return -1;
2038 		}
2039 	}
2040 
2041 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2042 
2043 	struct test_event_dev_stats stats;
2044 	err = test_event_dev_stats_get(evdev, &stats);
2045 	if (err) {
2046 		printf("%d: failed to get stats\n", __LINE__);
2047 		return -1;
2048 	}
2049 
2050 	if (stats.port_inflight[1] != 4) {
2051 		printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2052 				__func__);
2053 		return -1;
2054 	}
2055 	if (stats.port_inflight[2] != 2) {
2056 		printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2057 				__func__);
2058 		return -1;
2059 	}
2060 	if (stats.port_inflight[3] != 3) {
2061 		printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2062 				__func__);
2063 		return -1;
2064 	}
2065 
2066 	cleanup(t);
2067 	return 0;
2068 }
2069 
2070 static int
2071 load_balancing_history(struct test *t)
2072 {
2073 	struct test_event_dev_stats stats = {0};
2074 	const int rx_enq = 0;
2075 	int err;
2076 	uint32_t i;
2077 
2078 	/* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2079 	if (init(t, 1, 4) < 0 ||
2080 			create_ports(t, 4) < 0 ||
2081 			create_atomic_qids(t, 1) < 0)
2082 		return -1;
2083 
2084 	/* CQ mapping to QID */
2085 	if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2086 		printf("%d: error mapping port 1 qid\n", __LINE__);
2087 		return -1;
2088 	}
2089 	if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2090 		printf("%d: error mapping port 2 qid\n", __LINE__);
2091 		return -1;
2092 	}
2093 	if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2094 		printf("%d: error mapping port 3 qid\n", __LINE__);
2095 		return -1;
2096 	}
2097 	if (rte_event_dev_start(evdev) < 0) {
2098 		printf("%d: Error with start call\n", __LINE__);
2099 		return -1;
2100 	}
2101 
2102 	/*
2103 	 * Create a set of flows that test the load-balancing operation of the
2104 	 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2105 	 * the packet from CQ 0, send in a new set of flows. Ensure that:
2106 	 *  1. The new flow 3 gets into the empty CQ0
2107 	 *  2. packets for existing flow gets added into CQ1
2108 	 *  3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2109 	 *     more outstanding pkts
2110 	 *
2111 	 *  This test makes sure that when a flow ends (i.e. all packets
2112 	 *  have been completed for that flow), that the flow can be moved
2113 	 *  to a different CQ when new packets come in for that flow.
2114 	 */
2115 	static uint32_t flows1[] = {0, 1, 1, 2};
2116 
2117 	for (i = 0; i < RTE_DIM(flows1); i++) {
2118 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2119 		struct rte_event ev = {
2120 				.flow_id = flows1[i],
2121 				.op = RTE_EVENT_OP_NEW,
2122 				.queue_id = t->qid[0],
2123 				.event_type = RTE_EVENT_TYPE_CPU,
2124 				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2125 				.mbuf = arp
2126 		};
2127 
2128 		if (!arp) {
2129 			printf("%d: gen of pkt failed\n", __LINE__);
2130 			return -1;
2131 		}
2132 		arp->hash.rss = flows1[i];
2133 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2134 		if (err != 1) {
2135 			printf("%d: Failed to enqueue\n", __LINE__);
2136 			return -1;
2137 		}
2138 	}
2139 
2140 	/* call the scheduler */
2141 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2142 
2143 	/* Dequeue the flow 0 packet from port 1, so that we can then drop */
2144 	struct rte_event ev;
2145 	if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2146 		printf("%d: failed to dequeue\n", __LINE__);
2147 		return -1;
2148 	}
2149 	if (ev.mbuf->hash.rss != flows1[0]) {
2150 		printf("%d: unexpected flow received\n", __LINE__);
2151 		return -1;
2152 	}
2153 
2154 	/* drop the flow 0 packet from port 1 */
2155 	rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2156 
2157 	/* call the scheduler */
2158 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2159 
2160 	/*
2161 	 * Set up the next set of flows, first a new flow to fill up
2162 	 * CQ 0, so that the next flow 0 packet should go to CQ2
2163 	 */
2164 	static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2165 
2166 	for (i = 0; i < RTE_DIM(flows2); i++) {
2167 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2168 		struct rte_event ev = {
2169 				.flow_id = flows2[i],
2170 				.op = RTE_EVENT_OP_NEW,
2171 				.queue_id = t->qid[0],
2172 				.event_type = RTE_EVENT_TYPE_CPU,
2173 				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2174 				.mbuf = arp
2175 		};
2176 
2177 		if (!arp) {
2178 			printf("%d: gen of pkt failed\n", __LINE__);
2179 			return -1;
2180 		}
2181 		arp->hash.rss = flows2[i];
2182 
2183 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2184 		if (err != 1) {
2185 			printf("%d: Failed to enqueue\n", __LINE__);
2186 			return -1;
2187 		}
2188 	}
2189 
2190 	/* schedule */
2191 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2192 
2193 	err = test_event_dev_stats_get(evdev, &stats);
2194 	if (err) {
2195 		printf("%d:failed to get stats\n", __LINE__);
2196 		return -1;
2197 	}
2198 
2199 	/*
2200 	 * Now check the resulting inflights on each port.
2201 	 */
2202 	if (stats.port_inflight[1] != 3) {
2203 		printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2204 				__func__);
2205 		printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2206 				(unsigned int)stats.port_inflight[1],
2207 				(unsigned int)stats.port_inflight[2],
2208 				(unsigned int)stats.port_inflight[3]);
2209 		return -1;
2210 	}
2211 	if (stats.port_inflight[2] != 4) {
2212 		printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2213 				__func__);
2214 		printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2215 				(unsigned int)stats.port_inflight[1],
2216 				(unsigned int)stats.port_inflight[2],
2217 				(unsigned int)stats.port_inflight[3]);
2218 		return -1;
2219 	}
2220 	if (stats.port_inflight[3] != 2) {
2221 		printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2222 				__func__);
2223 		printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2224 				(unsigned int)stats.port_inflight[1],
2225 				(unsigned int)stats.port_inflight[2],
2226 				(unsigned int)stats.port_inflight[3]);
2227 		return -1;
2228 	}
2229 
2230 	for (i = 1; i <= 3; i++) {
2231 		struct rte_event ev;
2232 		while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2233 			rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2234 	}
2235 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2236 
2237 	cleanup(t);
2238 	return 0;
2239 }
2240 
2241 static int
2242 invalid_qid(struct test *t)
2243 {
2244 	struct test_event_dev_stats stats;
2245 	const int rx_enq = 0;
2246 	int err;
2247 	uint32_t i;
2248 
2249 	if (init(t, 1, 4) < 0 ||
2250 			create_ports(t, 4) < 0 ||
2251 			create_atomic_qids(t, 1) < 0) {
2252 		printf("%d: Error initializing device\n", __LINE__);
2253 		return -1;
2254 	}
2255 
2256 	/* CQ mapping to QID */
2257 	for (i = 0; i < 4; i++) {
2258 		err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2259 				NULL, 1);
2260 		if (err != 1) {
2261 			printf("%d: error mapping port 1 qid\n", __LINE__);
2262 			return -1;
2263 		}
2264 	}
2265 
2266 	if (rte_event_dev_start(evdev) < 0) {
2267 		printf("%d: Error with start call\n", __LINE__);
2268 		return -1;
2269 	}
2270 
2271 	/*
2272 	 * Send in a packet with an invalid qid to the scheduler.
2273 	 * We should see the packed enqueued OK, but the inflights for
2274 	 * that packet should not be incremented, and the rx_dropped
2275 	 * should be incremented.
2276 	 */
2277 	static uint32_t flows1[] = {20};
2278 
2279 	for (i = 0; i < RTE_DIM(flows1); i++) {
2280 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2281 		if (!arp) {
2282 			printf("%d: gen of pkt failed\n", __LINE__);
2283 			return -1;
2284 		}
2285 
2286 		struct rte_event ev = {
2287 				.op = RTE_EVENT_OP_NEW,
2288 				.queue_id = t->qid[0] + flows1[i],
2289 				.flow_id = i,
2290 				.mbuf = arp,
2291 		};
2292 		/* generate pkt and enqueue */
2293 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2294 		if (err != 1) {
2295 			printf("%d: Failed to enqueue\n", __LINE__);
2296 			return -1;
2297 		}
2298 	}
2299 
2300 	/* call the scheduler */
2301 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2302 
2303 	err = test_event_dev_stats_get(evdev, &stats);
2304 	if (err) {
2305 		printf("%d: failed to get stats\n", __LINE__);
2306 		return -1;
2307 	}
2308 
2309 	/*
2310 	 * Now check the resulting inflights on the port, and the rx_dropped.
2311 	 */
2312 	if (stats.port_inflight[0] != 0) {
2313 		printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2314 				__func__);
2315 		rte_event_dev_dump(evdev, stdout);
2316 		return -1;
2317 	}
2318 	if (stats.port_rx_dropped[0] != 1) {
2319 		printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2320 		rte_event_dev_dump(evdev, stdout);
2321 		return -1;
2322 	}
2323 	/* each packet drop should only be counted in one place - port or dev */
2324 	if (stats.rx_dropped != 0) {
2325 		printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2326 				__func__);
2327 		rte_event_dev_dump(evdev, stdout);
2328 		return -1;
2329 	}
2330 
2331 	cleanup(t);
2332 	return 0;
2333 }
2334 
2335 static int
2336 single_packet(struct test *t)
2337 {
2338 	const uint32_t MAGIC_SEQN = 7321;
2339 	struct rte_event ev;
2340 	struct test_event_dev_stats stats;
2341 	const int rx_enq = 0;
2342 	const int wrk_enq = 2;
2343 	int err;
2344 
2345 	/* Create instance with 4 ports */
2346 	if (init(t, 1, 4) < 0 ||
2347 			create_ports(t, 4) < 0 ||
2348 			create_atomic_qids(t, 1) < 0) {
2349 		printf("%d: Error initializing device\n", __LINE__);
2350 		return -1;
2351 	}
2352 
2353 	/* CQ mapping to QID */
2354 	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2355 	if (err != 1) {
2356 		printf("%d: error mapping lb qid\n", __LINE__);
2357 		cleanup(t);
2358 		return -1;
2359 	}
2360 
2361 	if (rte_event_dev_start(evdev) < 0) {
2362 		printf("%d: Error with start call\n", __LINE__);
2363 		return -1;
2364 	}
2365 
2366 	/************** Gen pkt and enqueue ****************/
2367 	struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2368 	if (!arp) {
2369 		printf("%d: gen of pkt failed\n", __LINE__);
2370 		return -1;
2371 	}
2372 
2373 	ev.op = RTE_EVENT_OP_NEW;
2374 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2375 	ev.mbuf = arp;
2376 	ev.queue_id = 0;
2377 	ev.flow_id = 3;
2378 	*rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN;
2379 
2380 	err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2381 	if (err != 1) {
2382 		printf("%d: Failed to enqueue\n", __LINE__);
2383 		return -1;
2384 	}
2385 
2386 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2387 
2388 	err = test_event_dev_stats_get(evdev, &stats);
2389 	if (err) {
2390 		printf("%d: failed to get stats\n", __LINE__);
2391 		return -1;
2392 	}
2393 
2394 	if (stats.rx_pkts != 1 ||
2395 			stats.tx_pkts != 1 ||
2396 			stats.port_inflight[wrk_enq] != 1) {
2397 		printf("%d: Sched core didn't handle pkt as expected\n",
2398 				__LINE__);
2399 		rte_event_dev_dump(evdev, stdout);
2400 		return -1;
2401 	}
2402 
2403 	uint32_t deq_pkts;
2404 
2405 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2406 	if (deq_pkts < 1) {
2407 		printf("%d: Failed to deq\n", __LINE__);
2408 		return -1;
2409 	}
2410 
2411 	err = test_event_dev_stats_get(evdev, &stats);
2412 	if (err) {
2413 		printf("%d: failed to get stats\n", __LINE__);
2414 		return -1;
2415 	}
2416 
2417 	err = test_event_dev_stats_get(evdev, &stats);
2418 	if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN) {
2419 		printf("%d: magic sequence number not dequeued\n", __LINE__);
2420 		return -1;
2421 	}
2422 
2423 	rte_pktmbuf_free(ev.mbuf);
2424 	err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2425 	if (err != 1) {
2426 		printf("%d: Failed to enqueue\n", __LINE__);
2427 		return -1;
2428 	}
2429 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2430 
2431 	err = test_event_dev_stats_get(evdev, &stats);
2432 	if (stats.port_inflight[wrk_enq] != 0) {
2433 		printf("%d: port inflight not correct\n", __LINE__);
2434 		return -1;
2435 	}
2436 
2437 	cleanup(t);
2438 	return 0;
2439 }
2440 
2441 static int
2442 inflight_counts(struct test *t)
2443 {
2444 	struct rte_event ev;
2445 	struct test_event_dev_stats stats;
2446 	const int rx_enq = 0;
2447 	const int p1 = 1;
2448 	const int p2 = 2;
2449 	int err;
2450 	int i;
2451 
2452 	/* Create instance with 4 ports */
2453 	if (init(t, 2, 3) < 0 ||
2454 			create_ports(t, 3) < 0 ||
2455 			create_atomic_qids(t, 2) < 0) {
2456 		printf("%d: Error initializing device\n", __LINE__);
2457 		return -1;
2458 	}
2459 
2460 	/* CQ mapping to QID */
2461 	err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2462 	if (err != 1) {
2463 		printf("%d: error mapping lb qid\n", __LINE__);
2464 		cleanup(t);
2465 		return -1;
2466 	}
2467 	err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2468 	if (err != 1) {
2469 		printf("%d: error mapping lb qid\n", __LINE__);
2470 		cleanup(t);
2471 		return -1;
2472 	}
2473 
2474 	if (rte_event_dev_start(evdev) < 0) {
2475 		printf("%d: Error with start call\n", __LINE__);
2476 		return -1;
2477 	}
2478 
2479 	/************** FORWARD ****************/
2480 #define QID1_NUM 5
2481 	for (i = 0; i < QID1_NUM; i++) {
2482 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2483 
2484 		if (!arp) {
2485 			printf("%d: gen of pkt failed\n", __LINE__);
2486 			goto err;
2487 		}
2488 
2489 		ev.queue_id =  t->qid[0];
2490 		ev.op = RTE_EVENT_OP_NEW;
2491 		ev.mbuf = arp;
2492 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2493 		if (err != 1) {
2494 			printf("%d: Failed to enqueue\n", __LINE__);
2495 			goto err;
2496 		}
2497 	}
2498 #define QID2_NUM 3
2499 	for (i = 0; i < QID2_NUM; i++) {
2500 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2501 
2502 		if (!arp) {
2503 			printf("%d: gen of pkt failed\n", __LINE__);
2504 			goto err;
2505 		}
2506 		ev.queue_id =  t->qid[1];
2507 		ev.op = RTE_EVENT_OP_NEW;
2508 		ev.mbuf = arp;
2509 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2510 		if (err != 1) {
2511 			printf("%d: Failed to enqueue\n", __LINE__);
2512 			goto err;
2513 		}
2514 	}
2515 
2516 	/* schedule */
2517 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2518 
2519 	err = test_event_dev_stats_get(evdev, &stats);
2520 	if (err) {
2521 		printf("%d: failed to get stats\n", __LINE__);
2522 		goto err;
2523 	}
2524 
2525 	if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2526 			stats.tx_pkts != QID1_NUM + QID2_NUM) {
2527 		printf("%d: Sched core didn't handle pkt as expected\n",
2528 				__LINE__);
2529 		goto err;
2530 	}
2531 
2532 	if (stats.port_inflight[p1] != QID1_NUM) {
2533 		printf("%d: %s port 1 inflight not correct\n", __LINE__,
2534 				__func__);
2535 		goto err;
2536 	}
2537 	if (stats.port_inflight[p2] != QID2_NUM) {
2538 		printf("%d: %s port 2 inflight not correct\n", __LINE__,
2539 				__func__);
2540 		goto err;
2541 	}
2542 
2543 	/************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/
2544 	/* port 1 */
2545 	struct rte_event events[QID1_NUM + QID2_NUM];
2546 	uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2547 			RTE_DIM(events), 0);
2548 
2549 	if (deq_pkts != QID1_NUM) {
2550 		printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2551 		goto err;
2552 	}
2553 	err = test_event_dev_stats_get(evdev, &stats);
2554 	if (stats.port_inflight[p1] != QID1_NUM) {
2555 		printf("%d: port 1 inflight decrement after DEQ != 0\n",
2556 				__LINE__);
2557 		goto err;
2558 	}
2559 	for (i = 0; i < QID1_NUM; i++) {
2560 		err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2561 				1);
2562 		if (err != 1) {
2563 			printf("%d: %s rte enqueue of inf release failed\n",
2564 				__LINE__, __func__);
2565 			goto err;
2566 		}
2567 	}
2568 
2569 	/*
2570 	 * As the scheduler core decrements inflights, it needs to run to
2571 	 * process packets to act on the drop messages
2572 	 */
2573 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2574 
2575 	err = test_event_dev_stats_get(evdev, &stats);
2576 	if (stats.port_inflight[p1] != 0) {
2577 		printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2578 		goto err;
2579 	}
2580 
2581 	/* port2 */
2582 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2583 			RTE_DIM(events), 0);
2584 	if (deq_pkts != QID2_NUM) {
2585 		printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2586 		goto err;
2587 	}
2588 	err = test_event_dev_stats_get(evdev, &stats);
2589 	if (stats.port_inflight[p2] != QID2_NUM) {
2590 		printf("%d: port 1 inflight decrement after DEQ != 0\n",
2591 				__LINE__);
2592 		goto err;
2593 	}
2594 	for (i = 0; i < QID2_NUM; i++) {
2595 		err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2596 				1);
2597 		if (err != 1) {
2598 			printf("%d: %s rte enqueue of inf release failed\n",
2599 				__LINE__, __func__);
2600 			goto err;
2601 		}
2602 	}
2603 
2604 	/*
2605 	 * As the scheduler core decrements inflights, it needs to run to
2606 	 * process packets to act on the drop messages
2607 	 */
2608 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2609 
2610 	err = test_event_dev_stats_get(evdev, &stats);
2611 	if (stats.port_inflight[p2] != 0) {
2612 		printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2613 		goto err;
2614 	}
2615 	cleanup(t);
2616 	return 0;
2617 
2618 err:
2619 	rte_event_dev_dump(evdev, stdout);
2620 	cleanup(t);
2621 	return -1;
2622 }
2623 
2624 static int
2625 parallel_basic(struct test *t, int check_order)
2626 {
2627 	const uint8_t rx_port = 0;
2628 	const uint8_t w1_port = 1;
2629 	const uint8_t w3_port = 3;
2630 	const uint8_t tx_port = 4;
2631 	int err;
2632 	int i;
2633 	uint32_t deq_pkts, j;
2634 	struct rte_mbuf *mbufs[3];
2635 	struct rte_mbuf *mbufs_out[3] = { 0 };
2636 	const uint32_t MAGIC_SEQN = 1234;
2637 
2638 	/* Create instance with 4 ports */
2639 	if (init(t, 2, tx_port + 1) < 0 ||
2640 			create_ports(t, tx_port + 1) < 0 ||
2641 			(check_order ?  create_ordered_qids(t, 1) :
2642 				create_unordered_qids(t, 1)) < 0 ||
2643 			create_directed_qids(t, 1, &tx_port)) {
2644 		printf("%d: Error initializing device\n", __LINE__);
2645 		return -1;
2646 	}
2647 
2648 	/*
2649 	 * CQ mapping to QID
2650 	 * We need three ports, all mapped to the same ordered qid0. Then we'll
2651 	 * take a packet out to each port, re-enqueue in reverse order,
2652 	 * then make sure the reordering has taken place properly when we
2653 	 * dequeue from the tx_port.
2654 	 *
2655 	 * Simplified test setup diagram:
2656 	 *
2657 	 * rx_port        w1_port
2658 	 *        \     /         \
2659 	 *         qid0 - w2_port - qid1
2660 	 *              \         /     \
2661 	 *                w3_port        tx_port
2662 	 */
2663 	/* CQ mapping to QID for LB ports (directed mapped on create) */
2664 	for (i = w1_port; i <= w3_port; i++) {
2665 		err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2666 				1);
2667 		if (err != 1) {
2668 			printf("%d: error mapping lb qid\n", __LINE__);
2669 			cleanup(t);
2670 			return -1;
2671 		}
2672 	}
2673 
2674 	if (rte_event_dev_start(evdev) < 0) {
2675 		printf("%d: Error with start call\n", __LINE__);
2676 		return -1;
2677 	}
2678 
2679 	/* Enqueue 3 packets to the rx port */
2680 	for (i = 0; i < 3; i++) {
2681 		struct rte_event ev;
2682 		mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2683 		if (!mbufs[i]) {
2684 			printf("%d: gen of pkt failed\n", __LINE__);
2685 			return -1;
2686 		}
2687 
2688 		ev.queue_id = t->qid[0];
2689 		ev.op = RTE_EVENT_OP_NEW;
2690 		ev.mbuf = mbufs[i];
2691 		*rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
2692 
2693 		/* generate pkt and enqueue */
2694 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2695 		if (err != 1) {
2696 			printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2697 					__LINE__, i, err);
2698 			return -1;
2699 		}
2700 	}
2701 
2702 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2703 
2704 	/* use extra slot to make logic in loops easier */
2705 	struct rte_event deq_ev[w3_port + 1];
2706 
2707 	/* Dequeue the 3 packets, one from each worker port */
2708 	for (i = w1_port; i <= w3_port; i++) {
2709 		deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2710 				&deq_ev[i], 1, 0);
2711 		if (deq_pkts != 1) {
2712 			printf("%d: Failed to deq\n", __LINE__);
2713 			rte_event_dev_dump(evdev, stdout);
2714 			return -1;
2715 		}
2716 	}
2717 
2718 	/* Enqueue each packet in reverse order, flushing after each one */
2719 	for (i = w3_port; i >= w1_port; i--) {
2720 
2721 		deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2722 		deq_ev[i].queue_id = t->qid[1];
2723 		err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2724 		if (err != 1) {
2725 			printf("%d: Failed to enqueue\n", __LINE__);
2726 			return -1;
2727 		}
2728 	}
2729 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2730 
2731 	/* dequeue from the tx ports, we should get 3 packets */
2732 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2733 			3, 0);
2734 
2735 	/* Check to see if we've got all 3 packets */
2736 	if (deq_pkts != 3) {
2737 		printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2738 			__LINE__, deq_pkts, tx_port);
2739 		rte_event_dev_dump(evdev, stdout);
2740 		return 1;
2741 	}
2742 
2743 	/* Check to see if the sequence numbers are in expected order */
2744 	if (check_order) {
2745 		for (j = 0 ; j < deq_pkts ; j++) {
2746 			if (*rte_event_pmd_selftest_seqn(deq_ev[j].mbuf) !=
2747 					MAGIC_SEQN + j) {
2748 				printf("%d: Incorrect sequence number(%d) from port %d\n",
2749 					__LINE__,
2750 					*rte_event_pmd_selftest_seqn(mbufs_out[j]),
2751 					tx_port);
2752 				return -1;
2753 			}
2754 		}
2755 	}
2756 
2757 	/* Destroy the instance */
2758 	cleanup(t);
2759 	return 0;
2760 }
2761 
2762 static int
2763 ordered_basic(struct test *t)
2764 {
2765 	return parallel_basic(t, 1);
2766 }
2767 
2768 static int
2769 unordered_basic(struct test *t)
2770 {
2771 	return parallel_basic(t, 0);
2772 }
2773 
2774 static int
2775 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2776 {
2777 	const struct rte_event new_ev = {
2778 			.op = RTE_EVENT_OP_NEW
2779 			/* all other fields zero */
2780 	};
2781 	struct rte_event ev = new_ev;
2782 	unsigned int rx_port = 0; /* port we get the first flow on */
2783 	char rx_port_used_stat[64];
2784 	char rx_port_free_stat[64];
2785 	char other_port_used_stat[64];
2786 
2787 	if (init(t, 1, 2) < 0 ||
2788 			create_ports(t, 2) < 0 ||
2789 			create_atomic_qids(t, 1) < 0) {
2790 		printf("%d: Error initializing device\n", __LINE__);
2791 		return -1;
2792 	}
2793 	int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2794 	if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2795 			nb_links != 1) {
2796 		printf("%d: Error links queue to ports\n", __LINE__);
2797 		goto err;
2798 	}
2799 	if (rte_event_dev_start(evdev) < 0) {
2800 		printf("%d: Error with start call\n", __LINE__);
2801 		goto err;
2802 	}
2803 
2804 	/* send one packet and see where it goes, port 0 or 1 */
2805 	if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2806 		printf("%d: Error doing first enqueue\n", __LINE__);
2807 		goto err;
2808 	}
2809 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2810 
2811 	if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2812 			!= 1)
2813 		rx_port = 1;
2814 
2815 	snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2816 			"port_%u_cq_ring_used", rx_port);
2817 	snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2818 			"port_%u_cq_ring_free", rx_port);
2819 	snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2820 			"port_%u_cq_ring_used", rx_port ^ 1);
2821 	if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2822 			!= 1) {
2823 		printf("%d: Error, first event not scheduled\n", __LINE__);
2824 		goto err;
2825 	}
2826 
2827 	/* now fill up the rx port's queue with one flow to cause HOLB */
2828 	do {
2829 		ev = new_ev;
2830 		if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2831 			printf("%d: Error with enqueue\n", __LINE__);
2832 			goto err;
2833 		}
2834 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
2835 	} while (rte_event_dev_xstats_by_name_get(evdev,
2836 				rx_port_free_stat, NULL) != 0);
2837 
2838 	/* one more packet, which needs to stay in IQ - i.e. HOLB */
2839 	ev = new_ev;
2840 	if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2841 		printf("%d: Error with enqueue\n", __LINE__);
2842 		goto err;
2843 	}
2844 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2845 
2846 	/* check that the other port still has an empty CQ */
2847 	if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2848 			!= 0) {
2849 		printf("%d: Error, second port CQ is not empty\n", __LINE__);
2850 		goto err;
2851 	}
2852 	/* check IQ now has one packet */
2853 	if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2854 			!= 1) {
2855 		printf("%d: Error, QID does not have exactly 1 packet\n",
2856 			__LINE__);
2857 		goto err;
2858 	}
2859 
2860 	/* send another flow, which should pass the other IQ entry */
2861 	ev = new_ev;
2862 	ev.flow_id = 1;
2863 	if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2864 		printf("%d: Error with enqueue\n", __LINE__);
2865 		goto err;
2866 	}
2867 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2868 
2869 	if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2870 			!= 1) {
2871 		printf("%d: Error, second flow did not pass out first\n",
2872 			__LINE__);
2873 		goto err;
2874 	}
2875 
2876 	if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2877 			!= 1) {
2878 		printf("%d: Error, QID does not have exactly 1 packet\n",
2879 			__LINE__);
2880 		goto err;
2881 	}
2882 	cleanup(t);
2883 	return 0;
2884 err:
2885 	rte_event_dev_dump(evdev, stdout);
2886 	cleanup(t);
2887 	return -1;
2888 }
2889 
2890 static void
2891 flush(uint8_t dev_id __rte_unused, struct rte_event event, void *arg)
2892 {
2893 	*((uint8_t *) arg) += (event.u64 == 0xCA11BACC) ? 1 : 0;
2894 }
2895 
2896 static int
2897 dev_stop_flush(struct test *t) /* test to check we can properly flush events */
2898 {
2899 	const struct rte_event new_ev = {
2900 		.op = RTE_EVENT_OP_NEW,
2901 		.u64 = 0xCA11BACC,
2902 		.queue_id = 0
2903 	};
2904 	struct rte_event ev = new_ev;
2905 	uint8_t count = 0;
2906 	int i;
2907 
2908 	if (init(t, 1, 1) < 0 ||
2909 	    create_ports(t, 1) < 0 ||
2910 	    create_atomic_qids(t, 1) < 0) {
2911 		printf("%d: Error initializing device\n", __LINE__);
2912 		return -1;
2913 	}
2914 
2915 	/* Link the queue so *_start() doesn't error out */
2916 	if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1) {
2917 		printf("%d: Error linking queue to port\n", __LINE__);
2918 		goto err;
2919 	}
2920 
2921 	if (rte_event_dev_start(evdev) < 0) {
2922 		printf("%d: Error with start call\n", __LINE__);
2923 		goto err;
2924 	}
2925 
2926 	for (i = 0; i < DEQUEUE_DEPTH + 1; i++) {
2927 		if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2928 			printf("%d: Error enqueuing events\n", __LINE__);
2929 			goto err;
2930 		}
2931 	}
2932 
2933 	/* Schedule the events from the port to the IQ. At least one event
2934 	 * should be remaining in the queue.
2935 	 */
2936 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2937 
2938 	if (rte_event_dev_stop_flush_callback_register(evdev, flush, &count)) {
2939 		printf("%d: Error installing the flush callback\n", __LINE__);
2940 		goto err;
2941 	}
2942 
2943 	cleanup(t);
2944 
2945 	if (count == 0) {
2946 		printf("%d: Error executing the flush callback\n", __LINE__);
2947 		goto err;
2948 	}
2949 
2950 	if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
2951 		printf("%d: Error uninstalling the flush callback\n", __LINE__);
2952 		goto err;
2953 	}
2954 
2955 	return 0;
2956 err:
2957 	rte_event_dev_dump(evdev, stdout);
2958 	cleanup(t);
2959 	return -1;
2960 }
2961 
2962 static int
2963 ordered_atomic_hist_completion(struct test *t)
2964 {
2965 	const int rx_enq = 0;
2966 	int err;
2967 
2968 	/* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2969 	if (init(t, 2, 2) < 0 ||
2970 			create_ports(t, 2) < 0 ||
2971 			create_ordered_qids(t, 1) < 0 ||
2972 			create_atomic_qids(t, 1) < 0)
2973 		return -1;
2974 
2975 	/* Helpers to identify queues */
2976 	const uint8_t qid_ordered = t->qid[0];
2977 	const uint8_t qid_atomic = t->qid[1];
2978 
2979 	/* CQ mapping to QID */
2980 	if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2981 		printf("%d: error mapping port 1 qid\n", __LINE__);
2982 		return -1;
2983 	}
2984 	if (rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1) != 1) {
2985 		printf("%d: error mapping port 1 qid\n", __LINE__);
2986 		return -1;
2987 	}
2988 	if (rte_event_dev_start(evdev) < 0) {
2989 		printf("%d: Error with start call\n", __LINE__);
2990 		return -1;
2991 	}
2992 
2993 	/* Enqueue 1x ordered event, to be RELEASE-ed by the worker
2994 	 * CPU, which may cause hist-list corruption (by not comleting)
2995 	 */
2996 	struct rte_event ord_ev = {
2997 		.op = RTE_EVENT_OP_NEW,
2998 		.queue_id = qid_ordered,
2999 		.event_type = RTE_EVENT_TYPE_CPU,
3000 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
3001 	};
3002 	err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ord_ev, 1);
3003 	if (err != 1) {
3004 		printf("%d: Failed to enqueue\n", __LINE__);
3005 		return -1;
3006 	}
3007 
3008 	/* call the scheduler. This schedules the above event as a single
3009 	 * event in an ORDERED queue, to the worker.
3010 	 */
3011 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
3012 
3013 	/* Dequeue ORDERED event 0 from port 1, so that we can then drop */
3014 	struct rte_event ev;
3015 	if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
3016 		printf("%d: failed to dequeue\n", __LINE__);
3017 		return -1;
3018 	}
3019 
3020 	/* drop the ORDERED event. Here the history list should be completed,
3021 	 * but might not be if the hist-list bug exists. Call scheduler to make
3022 	 * it act on the RELEASE that was enqueued.
3023 	 */
3024 	rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
3025 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
3026 
3027 	/* Enqueue 1x atomic event, to then FORWARD to trigger atomic hist-list
3028 	 * completion. If the bug exists, the ORDERED entry may be completed in
3029 	 * error (aka, using the ORDERED-ROB for the ATOMIC event). This is the
3030 	 * main focus of this unit test.
3031 	 */
3032 	{
3033 		struct rte_event ev = {
3034 			.op = RTE_EVENT_OP_NEW,
3035 			.queue_id = qid_atomic,
3036 			.event_type = RTE_EVENT_TYPE_CPU,
3037 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
3038 			.flow_id = 123,
3039 		};
3040 
3041 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
3042 		if (err != 1) {
3043 			printf("%d: Failed to enqueue\n", __LINE__);
3044 			return -1;
3045 		}
3046 	}
3047 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
3048 
3049 	/* Deq ATM event, then forward it for more than HIST_LIST_SIZE times,
3050 	 * to re-use the history list entry that may be corrupted previously.
3051 	 */
3052 	for (int i = 0; i < SW_PORT_HIST_LIST + 2; i++) {
3053 		if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
3054 			printf("%d: failed to dequeue, did corrupt ORD hist "
3055 				"list steal this ATM event?\n", __LINE__);
3056 			return -1;
3057 		}
3058 
3059 		/* Re-enqueue the ATM event as FWD, trigger hist-list. */
3060 		ev.op = RTE_EVENT_OP_FORWARD;
3061 		err = rte_event_enqueue_burst(evdev, t->port[1], &ev, 1);
3062 		if (err != 1) {
3063 			printf("%d: Failed to enqueue\n", __LINE__);
3064 			return -1;
3065 		}
3066 
3067 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
3068 	}
3069 
3070 	/* If HIST-LIST + N count of dequeues succeed above, the hist list
3071 	 * has not been corrupted. If it is corrupted, the ATM event is pushed
3072 	 * into the ORDERED-ROB and will not dequeue.
3073 	 */
3074 
3075 	/* release the ATM event that's been forwarded HIST_LIST times */
3076 	err = rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
3077 	if (err != 1) {
3078 		printf("%d: Failed to enqueue\n", __LINE__);
3079 		return -1;
3080 	}
3081 
3082 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
3083 
3084 	cleanup(t);
3085 	return 0;
3086 }
3087 
3088 static int
3089 worker_loopback_worker_fn(void *arg)
3090 {
3091 	struct test *t = arg;
3092 	uint8_t port = t->port[1];
3093 	int count = 0;
3094 	int enqd;
3095 
3096 	/*
3097 	 * Takes packets from the input port and then loops them back through
3098 	 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
3099 	 * so each packet goes through 8*16 = 128 times.
3100 	 */
3101 	printf("%d: \tWorker function started\n", __LINE__);
3102 	while (count < NUM_PACKETS) {
3103 #define BURST_SIZE 32
3104 		struct rte_event ev[BURST_SIZE];
3105 		uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
3106 				BURST_SIZE, 0);
3107 		if (nb_rx == 0) {
3108 			rte_pause();
3109 			continue;
3110 		}
3111 
3112 		for (i = 0; i < nb_rx; i++) {
3113 			ev[i].queue_id++;
3114 			if (ev[i].queue_id != 8) {
3115 				ev[i].op = RTE_EVENT_OP_FORWARD;
3116 				enqd = rte_event_enqueue_burst(evdev, port,
3117 						&ev[i], 1);
3118 				if (enqd != 1) {
3119 					printf("%d: Can't enqueue FWD!!\n",
3120 							__LINE__);
3121 					return -1;
3122 				}
3123 				continue;
3124 			}
3125 
3126 			ev[i].queue_id = 0;
3127 			(*counter_field(ev[i].mbuf))++;
3128 			if (*counter_field(ev[i].mbuf) != 16) {
3129 				ev[i].op = RTE_EVENT_OP_FORWARD;
3130 				enqd = rte_event_enqueue_burst(evdev, port,
3131 						&ev[i], 1);
3132 				if (enqd != 1) {
3133 					printf("%d: Can't enqueue FWD!!\n",
3134 							__LINE__);
3135 					return -1;
3136 				}
3137 				continue;
3138 			}
3139 			/* we have hit 16 iterations through system - drop */
3140 			rte_pktmbuf_free(ev[i].mbuf);
3141 			count++;
3142 			ev[i].op = RTE_EVENT_OP_RELEASE;
3143 			enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
3144 			if (enqd != 1) {
3145 				printf("%d drop enqueue failed\n", __LINE__);
3146 				return -1;
3147 			}
3148 		}
3149 	}
3150 
3151 	return 0;
3152 }
3153 
3154 static int
3155 worker_loopback_producer_fn(void *arg)
3156 {
3157 	struct test *t = arg;
3158 	uint8_t port = t->port[0];
3159 	uint64_t count = 0;
3160 
3161 	printf("%d: \tProducer function started\n", __LINE__);
3162 	while (count < NUM_PACKETS) {
3163 		struct rte_mbuf *m = 0;
3164 		do {
3165 			m = rte_pktmbuf_alloc(t->mbuf_pool);
3166 		} while (m == NULL);
3167 
3168 		*counter_field(m) = 0;
3169 
3170 		struct rte_event ev = {
3171 				.op = RTE_EVENT_OP_NEW,
3172 				.queue_id = t->qid[0],
3173 				.flow_id = (uintptr_t)m & 0xFFFF,
3174 				.mbuf = m,
3175 		};
3176 
3177 		if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
3178 			while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
3179 					1)
3180 				rte_pause();
3181 		}
3182 
3183 		count++;
3184 	}
3185 
3186 	return 0;
3187 }
3188 
3189 static int
3190 worker_loopback(struct test *t, uint8_t disable_implicit_release)
3191 {
3192 	/* use a single producer core, and a worker core to see what happens
3193 	 * if the worker loops packets back multiple times
3194 	 */
3195 	struct test_event_dev_stats stats;
3196 	uint64_t print_cycles = 0, cycles = 0;
3197 	uint64_t tx_pkts = 0;
3198 	int err;
3199 	int w_lcore, p_lcore;
3200 
3201 	static const struct rte_mbuf_dynfield counter_dynfield_desc = {
3202 		.name = "rte_event_sw_dynfield_selftest_counter",
3203 		.size = sizeof(counter_dynfield_t),
3204 		.align = __alignof__(counter_dynfield_t),
3205 	};
3206 	counter_dynfield_offset =
3207 		rte_mbuf_dynfield_register(&counter_dynfield_desc);
3208 	if (counter_dynfield_offset < 0) {
3209 		printf("Error registering mbuf field\n");
3210 		return -rte_errno;
3211 	}
3212 
3213 	if (init(t, 8, 2) < 0 ||
3214 			create_atomic_qids(t, 8) < 0) {
3215 		printf("%d: Error initializing device\n", __LINE__);
3216 		return -1;
3217 	}
3218 
3219 	/* RX with low max events */
3220 	static struct rte_event_port_conf conf = {
3221 			.dequeue_depth = 32,
3222 			.enqueue_depth = 64,
3223 	};
3224 	/* beware: this cannot be initialized in the static above as it would
3225 	 * only be initialized once - and this needs to be set for multiple runs
3226 	 */
3227 	conf.new_event_threshold = 512;
3228 	conf.event_port_cfg = disable_implicit_release ?
3229 		RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
3230 
3231 	if (rte_event_port_setup(evdev, 0, &conf) < 0) {
3232 		printf("Error setting up RX port\n");
3233 		return -1;
3234 	}
3235 	t->port[0] = 0;
3236 	/* TX with higher max events */
3237 	conf.new_event_threshold = 4096;
3238 	if (rte_event_port_setup(evdev, 1, &conf) < 0) {
3239 		printf("Error setting up TX port\n");
3240 		return -1;
3241 	}
3242 	t->port[1] = 1;
3243 
3244 	/* CQ mapping to QID */
3245 	err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
3246 	if (err != 8) { /* should have mapped all queues*/
3247 		printf("%d: error mapping port 2 to all qids\n", __LINE__);
3248 		return -1;
3249 	}
3250 
3251 	if (rte_event_dev_start(evdev) < 0) {
3252 		printf("%d: Error with start call\n", __LINE__);
3253 		return -1;
3254 	}
3255 
3256 	p_lcore = rte_get_next_lcore(
3257 			/* start core */ -1,
3258 			/* skip main */ 1,
3259 			/* wrap */ 0);
3260 	w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
3261 
3262 	rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
3263 	rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3264 
3265 	print_cycles = cycles = rte_get_timer_cycles();
3266 	while (rte_eal_get_lcore_state(p_lcore) != WAIT ||
3267 			rte_eal_get_lcore_state(w_lcore) != WAIT) {
3268 
3269 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
3270 
3271 		uint64_t new_cycles = rte_get_timer_cycles();
3272 
3273 		if (new_cycles - print_cycles > rte_get_timer_hz()) {
3274 			test_event_dev_stats_get(evdev, &stats);
3275 			printf(
3276 				"%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3277 				__LINE__, stats.rx_pkts, stats.tx_pkts);
3278 
3279 			print_cycles = new_cycles;
3280 		}
3281 		if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3282 			test_event_dev_stats_get(evdev, &stats);
3283 			if (stats.tx_pkts == tx_pkts) {
3284 				rte_event_dev_dump(evdev, stdout);
3285 				printf("Dumping xstats:\n");
3286 				xstats_print();
3287 				printf(
3288 					"%d: No schedules for seconds, deadlock\n",
3289 					__LINE__);
3290 				return -1;
3291 			}
3292 			tx_pkts = stats.tx_pkts;
3293 			cycles = new_cycles;
3294 		}
3295 	}
3296 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
3297 	/* ensure all completions are flushed */
3298 
3299 	rte_eal_mp_wait_lcore();
3300 
3301 	cleanup(t);
3302 	return 0;
3303 }
3304 
3305 static struct rte_mempool *eventdev_func_mempool;
3306 
3307 int
3308 test_sw_eventdev(void)
3309 {
3310 	struct test *t;
3311 	int ret;
3312 
3313 	t = malloc(sizeof(struct test));
3314 	if (t == NULL)
3315 		return -1;
3316 	/* manually initialize the op, older gcc's complain on static
3317 	 * initialization of struct elements that are a bitfield.
3318 	 */
3319 	release_ev.op = RTE_EVENT_OP_RELEASE;
3320 
3321 	const char *eventdev_name = "event_sw";
3322 	evdev = rte_event_dev_get_dev_id(eventdev_name);
3323 	if (evdev < 0) {
3324 		printf("%d: Eventdev %s not found - creating.\n",
3325 				__LINE__, eventdev_name);
3326 		if (rte_vdev_init(eventdev_name, NULL) < 0) {
3327 			printf("Error creating eventdev\n");
3328 			goto test_fail;
3329 		}
3330 		evdev = rte_event_dev_get_dev_id(eventdev_name);
3331 		if (evdev < 0) {
3332 			printf("Error finding newly created eventdev\n");
3333 			goto test_fail;
3334 		}
3335 	}
3336 
3337 	if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
3338 		printf("Failed to get service ID for software event dev\n");
3339 		goto test_fail;
3340 	}
3341 
3342 	rte_service_runstate_set(t->service_id, 1);
3343 	rte_service_set_runstate_mapped_check(t->service_id, 0);
3344 
3345 	/* Only create mbuf pool once, reuse for each test run */
3346 	if (!eventdev_func_mempool) {
3347 		eventdev_func_mempool = rte_pktmbuf_pool_create(
3348 				"EVENTDEV_SW_SA_MBUF_POOL",
3349 				(1<<12), /* 4k buffers */
3350 				32 /*MBUF_CACHE_SIZE*/,
3351 				0,
3352 				512, /* use very small mbufs */
3353 				rte_socket_id());
3354 		if (!eventdev_func_mempool) {
3355 			printf("ERROR creating mempool\n");
3356 			goto test_fail;
3357 		}
3358 	}
3359 	t->mbuf_pool = eventdev_func_mempool;
3360 	printf("*** Running Single Directed Packet test...\n");
3361 	ret = test_single_directed_packet(t);
3362 	if (ret != 0) {
3363 		printf("ERROR - Single Directed Packet test FAILED.\n");
3364 		goto test_fail;
3365 	}
3366 	printf("*** Running Directed Forward Credit test...\n");
3367 	ret = test_directed_forward_credits(t);
3368 	if (ret != 0) {
3369 		printf("ERROR - Directed Forward Credit test FAILED.\n");
3370 		goto test_fail;
3371 	}
3372 	printf("*** Running Single Load Balanced Packet test...\n");
3373 	ret = single_packet(t);
3374 	if (ret != 0) {
3375 		printf("ERROR - Single Packet test FAILED.\n");
3376 		goto test_fail;
3377 	}
3378 	printf("*** Running Unordered Basic test...\n");
3379 	ret = unordered_basic(t);
3380 	if (ret != 0) {
3381 		printf("ERROR -  Unordered Basic test FAILED.\n");
3382 		goto test_fail;
3383 	}
3384 	printf("*** Running Ordered Basic test...\n");
3385 	ret = ordered_basic(t);
3386 	if (ret != 0) {
3387 		printf("ERROR -  Ordered Basic test FAILED.\n");
3388 		goto test_fail;
3389 	}
3390 	printf("*** Running Burst Packets test...\n");
3391 	ret = burst_packets(t);
3392 	if (ret != 0) {
3393 		printf("ERROR - Burst Packets test FAILED.\n");
3394 		goto test_fail;
3395 	}
3396 	printf("*** Running Load Balancing test...\n");
3397 	ret = load_balancing(t);
3398 	if (ret != 0) {
3399 		printf("ERROR - Load Balancing test FAILED.\n");
3400 		goto test_fail;
3401 	}
3402 	printf("*** Running Prioritized Directed test...\n");
3403 	ret = test_priority_directed(t);
3404 	if (ret != 0) {
3405 		printf("ERROR - Prioritized Directed test FAILED.\n");
3406 		goto test_fail;
3407 	}
3408 	printf("*** Running Prioritized Atomic test...\n");
3409 	ret = test_priority_atomic(t);
3410 	if (ret != 0) {
3411 		printf("ERROR - Prioritized Atomic test FAILED.\n");
3412 		goto test_fail;
3413 	}
3414 
3415 	printf("*** Running Prioritized Ordered test...\n");
3416 	ret = test_priority_ordered(t);
3417 	if (ret != 0) {
3418 		printf("ERROR - Prioritized Ordered test FAILED.\n");
3419 		goto test_fail;
3420 	}
3421 	printf("*** Running Prioritized Unordered test...\n");
3422 	ret = test_priority_unordered(t);
3423 	if (ret != 0) {
3424 		printf("ERROR - Prioritized Unordered test FAILED.\n");
3425 		goto test_fail;
3426 	}
3427 	printf("*** Running Invalid QID test...\n");
3428 	ret = invalid_qid(t);
3429 	if (ret != 0) {
3430 		printf("ERROR - Invalid QID test FAILED.\n");
3431 		goto test_fail;
3432 	}
3433 	printf("*** Running Load Balancing History test...\n");
3434 	ret = load_balancing_history(t);
3435 	if (ret != 0) {
3436 		printf("ERROR - Load Balancing History test FAILED.\n");
3437 		goto test_fail;
3438 	}
3439 	printf("*** Running Inflight Count test...\n");
3440 	ret = inflight_counts(t);
3441 	if (ret != 0) {
3442 		printf("ERROR - Inflight Count test FAILED.\n");
3443 		goto test_fail;
3444 	}
3445 	printf("*** Running Abuse Inflights test...\n");
3446 	ret = abuse_inflights(t);
3447 	if (ret != 0) {
3448 		printf("ERROR - Abuse Inflights test FAILED.\n");
3449 		goto test_fail;
3450 	}
3451 	printf("*** Running XStats test...\n");
3452 	ret = xstats_tests(t);
3453 	if (ret != 0) {
3454 		printf("ERROR - XStats test FAILED.\n");
3455 		goto test_fail;
3456 	}
3457 	printf("*** Running XStats ID Reset test...\n");
3458 	ret = xstats_id_reset_tests(t);
3459 	if (ret != 0) {
3460 		printf("ERROR - XStats ID Reset test FAILED.\n");
3461 		goto test_fail;
3462 	}
3463 	printf("*** Running XStats Brute Force test...\n");
3464 	ret = xstats_brute_force(t);
3465 	if (ret != 0) {
3466 		printf("ERROR - XStats Brute Force test FAILED.\n");
3467 		goto test_fail;
3468 	}
3469 	printf("*** Running XStats ID Abuse test...\n");
3470 	ret = xstats_id_abuse_tests(t);
3471 	if (ret != 0) {
3472 		printf("ERROR - XStats ID Abuse test FAILED.\n");
3473 		goto test_fail;
3474 	}
3475 	printf("*** Running QID Priority test...\n");
3476 	ret = qid_priorities(t);
3477 	if (ret != 0) {
3478 		printf("ERROR - QID Priority test FAILED.\n");
3479 		goto test_fail;
3480 	}
3481 	printf("*** Running Unlink-in-progress test...\n");
3482 	ret = unlink_in_progress(t);
3483 	if (ret != 0) {
3484 		printf("ERROR - Unlink in progress test FAILED.\n");
3485 		goto test_fail;
3486 	}
3487 	printf("*** Running Ordered Reconfigure test...\n");
3488 	ret = ordered_reconfigure(t);
3489 	if (ret != 0) {
3490 		printf("ERROR - Ordered Reconfigure test FAILED.\n");
3491 		goto test_fail;
3492 	}
3493 	printf("*** Running Port LB Single Reconfig test...\n");
3494 	ret = port_single_lb_reconfig(t);
3495 	if (ret != 0) {
3496 		printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3497 		goto test_fail;
3498 	}
3499 	printf("*** Running Port Reconfig Credits test...\n");
3500 	ret = port_reconfig_credits(t);
3501 	if (ret != 0) {
3502 		printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3503 		goto test_fail;
3504 	}
3505 	printf("*** Running Head-of-line-blocking test...\n");
3506 	ret = holb(t);
3507 	if (ret != 0) {
3508 		printf("ERROR - Head-of-line-blocking test FAILED.\n");
3509 		goto test_fail;
3510 	}
3511 	printf("*** Running Stop Flush test...\n");
3512 	ret = dev_stop_flush(t);
3513 	if (ret != 0) {
3514 		printf("ERROR - Stop Flush test FAILED.\n");
3515 		goto test_fail;
3516 	}
3517 	printf("*** Running Ordered & Atomic hist-list completion test...\n");
3518 	ret = ordered_atomic_hist_completion(t);
3519 	if (ret != 0) {
3520 		printf("ERROR - Ordered & Atomic hist-list test FAILED.\n");
3521 		goto test_fail;
3522 	}
3523 	if (rte_lcore_count() >= 3) {
3524 		printf("*** Running Worker loopback test...\n");
3525 		ret = worker_loopback(t, 0);
3526 		if (ret != 0) {
3527 			printf("ERROR - Worker loopback test FAILED.\n");
3528 			return ret;
3529 		}
3530 
3531 		printf("*** Running Worker loopback test (implicit release disabled)...\n");
3532 		ret = worker_loopback(t, 1);
3533 		if (ret != 0) {
3534 			printf("ERROR - Worker loopback test FAILED.\n");
3535 			goto test_fail;
3536 		}
3537 	} else {
3538 		printf("### Not enough cores for worker loopback tests.\n");
3539 		printf("### Need at least 3 cores for the tests.\n");
3540 	}
3541 
3542 	/*
3543 	 * Free test instance, leaving mempool initialized, and a pointer to it
3544 	 * in static eventdev_func_mempool, as it is re-used on re-runs
3545 	 */
3546 	free(t);
3547 
3548 	printf("SW Eventdev Selftest Successful.\n");
3549 	return 0;
3550 test_fail:
3551 	free(t);
3552 	printf("SW Eventdev Selftest Failed.\n");
3553 	return -1;
3554 }
3555