xref: /dpdk/drivers/event/sw/sw_evdev_selftest.c (revision 08966fe7f79fdaa5019b7559eebe84a1e3787b89)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdalign.h>
6 #include <stdio.h>
7 #include <string.h>
8 #include <stdint.h>
9 #include <stdlib.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <sys/queue.h>
13 
14 #include <rte_memory.h>
15 #include <rte_launch.h>
16 #include <rte_eal.h>
17 #include <rte_per_lcore.h>
18 #include <rte_lcore.h>
19 #include <rte_debug.h>
20 #include <rte_ethdev.h>
21 #include <rte_cycles.h>
22 #include <rte_eventdev.h>
23 #include <rte_pause.h>
24 #include <rte_service.h>
25 #include <rte_service_component.h>
26 #include <bus_vdev_driver.h>
27 
28 #include "sw_evdev.h"
29 
30 #define MAX_PORTS 16
31 #define MAX_QIDS 16
32 #define NUM_PACKETS (1 << 17)
33 #define DEQUEUE_DEPTH 128
34 
35 static int evdev;
36 
37 struct test {
38 	struct rte_mempool *mbuf_pool;
39 	uint8_t port[MAX_PORTS];
40 	uint8_t qid[MAX_QIDS];
41 	int nb_qids;
42 	uint32_t service_id;
43 };
44 
45 typedef uint8_t counter_dynfield_t;
46 static int counter_dynfield_offset = -1;
47 
48 static inline counter_dynfield_t *
counter_field(struct rte_mbuf * mbuf)49 counter_field(struct rte_mbuf *mbuf)
50 {
51 	return RTE_MBUF_DYNFIELD(mbuf, \
52 			counter_dynfield_offset, counter_dynfield_t *);
53 }
54 
55 static struct rte_event release_ev;
56 
57 static inline struct rte_mbuf *
rte_gen_arp(int portid,struct rte_mempool * mp)58 rte_gen_arp(int portid, struct rte_mempool *mp)
59 {
60 	/*
61 	 * len = 14 + 46
62 	 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
63 	 */
64 	static const uint8_t arp_request[] = {
65 		/*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
66 		0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
67 		/*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
68 		0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
69 		/*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
70 		0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
71 		/*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
72 		0x00, 0x00, 0x00, 0x00
73 	};
74 	struct rte_mbuf *m;
75 	int pkt_len = sizeof(arp_request) - 1;
76 
77 	m = rte_pktmbuf_alloc(mp);
78 	if (!m)
79 		return 0;
80 
81 	memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
82 		arp_request, pkt_len);
83 	rte_pktmbuf_pkt_len(m) = pkt_len;
84 	rte_pktmbuf_data_len(m) = pkt_len;
85 
86 	RTE_SET_USED(portid);
87 
88 	return m;
89 }
90 
91 static void
xstats_print(void)92 xstats_print(void)
93 {
94 	const uint32_t XSTATS_MAX = 1024;
95 	uint32_t i;
96 	uint64_t ids[XSTATS_MAX];
97 	uint64_t values[XSTATS_MAX];
98 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
99 
100 	for (i = 0; i < XSTATS_MAX; i++)
101 		ids[i] = i;
102 
103 	/* Device names / values */
104 	int ret = rte_event_dev_xstats_names_get(evdev,
105 					RTE_EVENT_DEV_XSTATS_DEVICE, 0,
106 					xstats_names, ids, XSTATS_MAX);
107 	if (ret < 0) {
108 		printf("%d: xstats names get() returned error\n",
109 			__LINE__);
110 		return;
111 	}
112 	ret = rte_event_dev_xstats_get(evdev,
113 					RTE_EVENT_DEV_XSTATS_DEVICE,
114 					0, ids, values, ret);
115 	if (ret > (signed int)XSTATS_MAX)
116 		printf("%s %d: more xstats available than space\n",
117 				__func__, __LINE__);
118 	for (i = 0; (signed int)i < ret; i++) {
119 		printf("%d : %s : %"PRIu64"\n",
120 				i, xstats_names[i].name, values[i]);
121 	}
122 
123 	/* Port names / values */
124 	ret = rte_event_dev_xstats_names_get(evdev,
125 					RTE_EVENT_DEV_XSTATS_PORT, 0,
126 					xstats_names, ids, XSTATS_MAX);
127 	ret = rte_event_dev_xstats_get(evdev,
128 					RTE_EVENT_DEV_XSTATS_PORT, 1,
129 					ids, values, ret);
130 	if (ret > (signed int)XSTATS_MAX)
131 		printf("%s %d: more xstats available than space\n",
132 				__func__, __LINE__);
133 	for (i = 0; (signed int)i < ret; i++) {
134 		printf("%d : %s : %"PRIu64"\n",
135 				i, xstats_names[i].name, values[i]);
136 	}
137 
138 	/* Queue names / values */
139 	ret = rte_event_dev_xstats_names_get(evdev,
140 					RTE_EVENT_DEV_XSTATS_QUEUE, 0,
141 					xstats_names, ids, XSTATS_MAX);
142 	ret = rte_event_dev_xstats_get(evdev,
143 					RTE_EVENT_DEV_XSTATS_QUEUE,
144 					1, ids, values, ret);
145 	if (ret > (signed int)XSTATS_MAX)
146 		printf("%s %d: more xstats available than space\n",
147 				__func__, __LINE__);
148 	for (i = 0; (signed int)i < ret; i++) {
149 		printf("%d : %s : %"PRIu64"\n",
150 				i, xstats_names[i].name, values[i]);
151 	}
152 }
153 
154 /* initialization and config */
155 static inline int
init(struct test * t,int nb_queues,int nb_ports)156 init(struct test *t, int nb_queues, int nb_ports)
157 {
158 	struct rte_event_dev_config config = {
159 			.nb_event_queues = nb_queues,
160 			.nb_event_ports = nb_ports,
161 			.nb_event_queue_flows = 1024,
162 			.nb_events_limit = 4096,
163 			.nb_event_port_dequeue_depth = DEQUEUE_DEPTH,
164 			.nb_event_port_enqueue_depth = 128,
165 	};
166 	int ret;
167 
168 	void *temp = t->mbuf_pool; /* save and restore mbuf pool */
169 
170 	memset(t, 0, sizeof(*t));
171 	t->mbuf_pool = temp;
172 
173 	ret = rte_event_dev_configure(evdev, &config);
174 	if (ret < 0)
175 		printf("%d: Error configuring device\n", __LINE__);
176 	return ret;
177 };
178 
179 static inline int
create_ports(struct test * t,int num_ports)180 create_ports(struct test *t, int num_ports)
181 {
182 	int i;
183 	static const struct rte_event_port_conf conf = {
184 			.new_event_threshold = 1024,
185 			.dequeue_depth = 32,
186 			.enqueue_depth = 64,
187 	};
188 	if (num_ports > MAX_PORTS)
189 		return -1;
190 
191 	for (i = 0; i < num_ports; i++) {
192 		if (rte_event_port_setup(evdev, i, &conf) < 0) {
193 			printf("Error setting up port %d\n", i);
194 			return -1;
195 		}
196 		t->port[i] = i;
197 	}
198 
199 	return 0;
200 }
201 
202 static inline int
create_lb_qids(struct test * t,int num_qids,uint32_t flags)203 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
204 {
205 	int i;
206 
207 	/* Q creation */
208 	const struct rte_event_queue_conf conf = {
209 			.schedule_type = flags,
210 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
211 			.nb_atomic_flows = 1024,
212 			.nb_atomic_order_sequences = 1024,
213 	};
214 
215 	for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
216 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
217 			printf("%d: error creating qid %d\n", __LINE__, i);
218 			return -1;
219 		}
220 		t->qid[i] = i;
221 	}
222 	t->nb_qids += num_qids;
223 	if (t->nb_qids > MAX_QIDS)
224 		return -1;
225 
226 	return 0;
227 }
228 
229 static inline int
create_atomic_qids(struct test * t,int num_qids)230 create_atomic_qids(struct test *t, int num_qids)
231 {
232 	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
233 }
234 
235 static inline int
create_ordered_qids(struct test * t,int num_qids)236 create_ordered_qids(struct test *t, int num_qids)
237 {
238 	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
239 }
240 
241 
242 static inline int
create_unordered_qids(struct test * t,int num_qids)243 create_unordered_qids(struct test *t, int num_qids)
244 {
245 	return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
246 }
247 
248 static inline int
create_directed_qids(struct test * t,int num_qids,const uint8_t ports[])249 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
250 {
251 	int i;
252 
253 	/* Q creation */
254 	static const struct rte_event_queue_conf conf = {
255 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
256 			.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
257 	};
258 
259 	for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
260 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
261 			printf("%d: error creating qid %d\n", __LINE__, i);
262 			return -1;
263 		}
264 		t->qid[i] = i;
265 
266 		if (rte_event_port_link(evdev, ports[i - t->nb_qids],
267 				&t->qid[i], NULL, 1) != 1) {
268 			printf("%d: error creating link for qid %d\n",
269 					__LINE__, i);
270 			return -1;
271 		}
272 	}
273 	t->nb_qids += num_qids;
274 	if (t->nb_qids > MAX_QIDS)
275 		return -1;
276 
277 	return 0;
278 }
279 
280 /* destruction */
281 static inline int
cleanup(struct test * t __rte_unused)282 cleanup(struct test *t __rte_unused)
283 {
284 	rte_event_dev_stop(evdev);
285 	rte_event_dev_close(evdev);
286 	return 0;
287 };
288 
289 struct test_event_dev_stats {
290 	uint64_t rx_pkts;       /**< Total packets received */
291 	uint64_t rx_dropped;    /**< Total packets dropped (Eg Invalid QID) */
292 	uint64_t tx_pkts;       /**< Total packets transmitted */
293 
294 	/** Packets received on this port */
295 	uint64_t port_rx_pkts[MAX_PORTS];
296 	/** Packets dropped on this port */
297 	uint64_t port_rx_dropped[MAX_PORTS];
298 	/** Packets inflight on this port */
299 	uint64_t port_inflight[MAX_PORTS];
300 	/** Packets transmitted on this port */
301 	uint64_t port_tx_pkts[MAX_PORTS];
302 	/** Packets received on this qid */
303 	uint64_t qid_rx_pkts[MAX_QIDS];
304 	/** Packets dropped on this qid */
305 	uint64_t qid_rx_dropped[MAX_QIDS];
306 	/** Packets transmitted on this qid */
307 	uint64_t qid_tx_pkts[MAX_QIDS];
308 };
309 
310 static inline int
test_event_dev_stats_get(int dev_id,struct test_event_dev_stats * stats)311 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
312 {
313 	static uint32_t i;
314 	static uint64_t total_ids[3]; /* rx, tx and drop */
315 	static uint64_t port_rx_pkts_ids[MAX_PORTS];
316 	static uint64_t port_rx_dropped_ids[MAX_PORTS];
317 	static uint64_t port_inflight_ids[MAX_PORTS];
318 	static uint64_t port_tx_pkts_ids[MAX_PORTS];
319 	static uint64_t qid_rx_pkts_ids[MAX_QIDS];
320 	static uint64_t qid_rx_dropped_ids[MAX_QIDS];
321 	static uint64_t qid_tx_pkts_ids[MAX_QIDS];
322 
323 	stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
324 			"dev_rx", &total_ids[0]);
325 	stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
326 			"dev_drop", &total_ids[1]);
327 	stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
328 			"dev_tx", &total_ids[2]);
329 	for (i = 0; i < MAX_PORTS; i++) {
330 		char name[32];
331 		snprintf(name, sizeof(name), "port_%u_rx", i);
332 		stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
333 				dev_id, name, &port_rx_pkts_ids[i]);
334 		snprintf(name, sizeof(name), "port_%u_drop", i);
335 		stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
336 				dev_id, name, &port_rx_dropped_ids[i]);
337 		snprintf(name, sizeof(name), "port_%u_inflight", i);
338 		stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
339 				dev_id, name, &port_inflight_ids[i]);
340 		snprintf(name, sizeof(name), "port_%u_tx", i);
341 		stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
342 				dev_id, name, &port_tx_pkts_ids[i]);
343 	}
344 	for (i = 0; i < MAX_QIDS; i++) {
345 		char name[32];
346 		snprintf(name, sizeof(name), "qid_%u_rx", i);
347 		stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
348 				dev_id, name, &qid_rx_pkts_ids[i]);
349 		snprintf(name, sizeof(name), "qid_%u_drop", i);
350 		stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
351 				dev_id, name, &qid_rx_dropped_ids[i]);
352 		snprintf(name, sizeof(name), "qid_%u_tx", i);
353 		stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
354 				dev_id, name, &qid_tx_pkts_ids[i]);
355 	}
356 
357 	return 0;
358 }
359 
360 /* run_prio_packet_test
361  * This performs a basic packet priority check on the test instance passed in.
362  * It is factored out of the main priority tests as the same tests must be
363  * performed to ensure prioritization of each type of QID.
364  *
365  * Requirements:
366  *  - An initialized test structure, including mempool
367  *  - t->port[0] is initialized for both Enq / Deq of packets to the QID
368  *  - t->qid[0] is the QID to be tested
369  *  - if LB QID, the CQ must be mapped to the QID.
370  */
371 static int
run_prio_packet_test(struct test * t)372 run_prio_packet_test(struct test *t)
373 {
374 	int err;
375 	const uint32_t MAGIC_SEQN[] = {4711, 1234};
376 	const uint32_t PRIORITY[] = {
377 		RTE_EVENT_DEV_PRIORITY_NORMAL,
378 		RTE_EVENT_DEV_PRIORITY_HIGHEST
379 	};
380 	unsigned int i;
381 	for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
382 		/* generate pkt and enqueue */
383 		struct rte_event ev;
384 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
385 		if (!arp) {
386 			printf("%d: gen of pkt failed\n", __LINE__);
387 			return -1;
388 		}
389 		*rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN[i];
390 
391 		ev = (struct rte_event){
392 			.priority = PRIORITY[i],
393 			.op = RTE_EVENT_OP_NEW,
394 			.queue_id = t->qid[0],
395 			.mbuf = arp
396 		};
397 		err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
398 		if (err != 1) {
399 			printf("%d: error failed to enqueue\n", __LINE__);
400 			return -1;
401 		}
402 	}
403 
404 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
405 
406 	struct test_event_dev_stats stats;
407 	err = test_event_dev_stats_get(evdev, &stats);
408 	if (err) {
409 		printf("%d: error failed to get stats\n", __LINE__);
410 		return -1;
411 	}
412 
413 	if (stats.port_rx_pkts[t->port[0]] != 2) {
414 		printf("%d: error stats incorrect for directed port\n",
415 				__LINE__);
416 		rte_event_dev_dump(evdev, stdout);
417 		return -1;
418 	}
419 
420 	struct rte_event ev, ev2;
421 	uint32_t deq_pkts;
422 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
423 	if (deq_pkts != 1) {
424 		printf("%d: error failed to deq\n", __LINE__);
425 		rte_event_dev_dump(evdev, stdout);
426 		return -1;
427 	}
428 	if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN[1]) {
429 		printf("%d: first packet out not highest priority\n",
430 				__LINE__);
431 		rte_event_dev_dump(evdev, stdout);
432 		return -1;
433 	}
434 	rte_pktmbuf_free(ev.mbuf);
435 
436 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
437 	if (deq_pkts != 1) {
438 		printf("%d: error failed to deq\n", __LINE__);
439 		rte_event_dev_dump(evdev, stdout);
440 		return -1;
441 	}
442 	if (*rte_event_pmd_selftest_seqn(ev2.mbuf) != MAGIC_SEQN[0]) {
443 		printf("%d: second packet out not lower priority\n",
444 				__LINE__);
445 		rte_event_dev_dump(evdev, stdout);
446 		return -1;
447 	}
448 	rte_pktmbuf_free(ev2.mbuf);
449 
450 	cleanup(t);
451 	return 0;
452 }
453 
454 static int
test_single_directed_packet(struct test * t)455 test_single_directed_packet(struct test *t)
456 {
457 	const int rx_enq = 0;
458 	const int wrk_enq = 2;
459 	int err;
460 
461 	/* Create instance with 3 directed QIDs going to 3 ports */
462 	if (init(t, 3, 3) < 0 ||
463 			create_ports(t, 3) < 0 ||
464 			create_directed_qids(t, 3, t->port) < 0)
465 		return -1;
466 
467 	if (rte_event_dev_start(evdev) < 0) {
468 		printf("%d: Error with start call\n", __LINE__);
469 		return -1;
470 	}
471 
472 	/************** FORWARD ****************/
473 	struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
474 	struct rte_event ev = {
475 			.op = RTE_EVENT_OP_NEW,
476 			.queue_id = wrk_enq,
477 			.mbuf = arp,
478 	};
479 
480 	if (!arp) {
481 		printf("%d: gen of pkt failed\n", __LINE__);
482 		return -1;
483 	}
484 
485 	const uint32_t MAGIC_SEQN = 4711;
486 	*rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN;
487 
488 	/* generate pkt and enqueue */
489 	err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
490 	if (err != 1) {
491 		printf("%d: error failed to enqueue\n", __LINE__);
492 		return -1;
493 	}
494 
495 	/* Run schedule() as dir packets may need to be re-ordered */
496 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
497 
498 	struct test_event_dev_stats stats;
499 	err = test_event_dev_stats_get(evdev, &stats);
500 	if (err) {
501 		printf("%d: error failed to get stats\n", __LINE__);
502 		return -1;
503 	}
504 
505 	if (stats.port_rx_pkts[rx_enq] != 1) {
506 		printf("%d: error stats incorrect for directed port\n",
507 				__LINE__);
508 		return -1;
509 	}
510 
511 	uint32_t deq_pkts;
512 	deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
513 	if (deq_pkts != 1) {
514 		printf("%d: error failed to deq\n", __LINE__);
515 		return -1;
516 	}
517 
518 	err = test_event_dev_stats_get(evdev, &stats);
519 	if (stats.port_rx_pkts[wrk_enq] != 0 &&
520 			stats.port_rx_pkts[wrk_enq] != 1) {
521 		printf("%d: error directed stats post-dequeue\n", __LINE__);
522 		return -1;
523 	}
524 
525 	if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN) {
526 		printf("%d: error magic sequence number not dequeued\n",
527 				__LINE__);
528 		return -1;
529 	}
530 
531 	rte_pktmbuf_free(ev.mbuf);
532 	cleanup(t);
533 	return 0;
534 }
535 
536 static int
test_directed_forward_credits(struct test * t)537 test_directed_forward_credits(struct test *t)
538 {
539 	uint32_t i;
540 	int32_t err;
541 
542 	if (init(t, 1, 1) < 0 ||
543 			create_ports(t, 1) < 0 ||
544 			create_directed_qids(t, 1, t->port) < 0)
545 		return -1;
546 
547 	if (rte_event_dev_start(evdev) < 0) {
548 		printf("%d: Error with start call\n", __LINE__);
549 		return -1;
550 	}
551 
552 	struct rte_event ev = {
553 			.op = RTE_EVENT_OP_NEW,
554 			.queue_id = 0,
555 	};
556 
557 	for (i = 0; i < 1000; i++) {
558 		err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
559 		if (err != 1) {
560 			printf("%d: error failed to enqueue\n", __LINE__);
561 			return -1;
562 		}
563 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
564 
565 		uint32_t deq_pkts;
566 		deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
567 		if (deq_pkts != 1) {
568 			printf("%d: error failed to deq\n", __LINE__);
569 			return -1;
570 		}
571 
572 		/* re-write event to be a forward, and continue looping it */
573 		ev.op = RTE_EVENT_OP_FORWARD;
574 	}
575 
576 	cleanup(t);
577 	return 0;
578 }
579 
580 
581 static int
test_priority_directed(struct test * t)582 test_priority_directed(struct test *t)
583 {
584 	if (init(t, 1, 1) < 0 ||
585 			create_ports(t, 1) < 0 ||
586 			create_directed_qids(t, 1, t->port) < 0) {
587 		printf("%d: Error initializing device\n", __LINE__);
588 		return -1;
589 	}
590 
591 	if (rte_event_dev_start(evdev) < 0) {
592 		printf("%d: Error with start call\n", __LINE__);
593 		return -1;
594 	}
595 
596 	return run_prio_packet_test(t);
597 }
598 
599 static int
test_priority_atomic(struct test * t)600 test_priority_atomic(struct test *t)
601 {
602 	if (init(t, 1, 1) < 0 ||
603 			create_ports(t, 1) < 0 ||
604 			create_atomic_qids(t, 1) < 0) {
605 		printf("%d: Error initializing device\n", __LINE__);
606 		return -1;
607 	}
608 
609 	/* map the QID */
610 	if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
611 		printf("%d: error mapping qid to port\n", __LINE__);
612 		return -1;
613 	}
614 	if (rte_event_dev_start(evdev) < 0) {
615 		printf("%d: Error with start call\n", __LINE__);
616 		return -1;
617 	}
618 
619 	return run_prio_packet_test(t);
620 }
621 
622 static int
test_priority_ordered(struct test * t)623 test_priority_ordered(struct test *t)
624 {
625 	if (init(t, 1, 1) < 0 ||
626 			create_ports(t, 1) < 0 ||
627 			create_ordered_qids(t, 1) < 0) {
628 		printf("%d: Error initializing device\n", __LINE__);
629 		return -1;
630 	}
631 
632 	/* map the QID */
633 	if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
634 		printf("%d: error mapping qid to port\n", __LINE__);
635 		return -1;
636 	}
637 	if (rte_event_dev_start(evdev) < 0) {
638 		printf("%d: Error with start call\n", __LINE__);
639 		return -1;
640 	}
641 
642 	return run_prio_packet_test(t);
643 }
644 
645 static int
test_priority_unordered(struct test * t)646 test_priority_unordered(struct test *t)
647 {
648 	if (init(t, 1, 1) < 0 ||
649 			create_ports(t, 1) < 0 ||
650 			create_unordered_qids(t, 1) < 0) {
651 		printf("%d: Error initializing device\n", __LINE__);
652 		return -1;
653 	}
654 
655 	/* map the QID */
656 	if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
657 		printf("%d: error mapping qid to port\n", __LINE__);
658 		return -1;
659 	}
660 	if (rte_event_dev_start(evdev) < 0) {
661 		printf("%d: Error with start call\n", __LINE__);
662 		return -1;
663 	}
664 
665 	return run_prio_packet_test(t);
666 }
667 
668 static int
burst_packets(struct test * t)669 burst_packets(struct test *t)
670 {
671 	/************** CONFIG ****************/
672 	uint32_t i;
673 	int err;
674 	int ret;
675 
676 	/* Create instance with 2 ports and 2 queues */
677 	if (init(t, 2, 2) < 0 ||
678 			create_ports(t, 2) < 0 ||
679 			create_atomic_qids(t, 2) < 0) {
680 		printf("%d: Error initializing device\n", __LINE__);
681 		return -1;
682 	}
683 
684 	/* CQ mapping to QID */
685 	ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
686 	if (ret != 1) {
687 		printf("%d: error mapping lb qid0\n", __LINE__);
688 		return -1;
689 	}
690 	ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
691 	if (ret != 1) {
692 		printf("%d: error mapping lb qid1\n", __LINE__);
693 		return -1;
694 	}
695 
696 	if (rte_event_dev_start(evdev) < 0) {
697 		printf("%d: Error with start call\n", __LINE__);
698 		return -1;
699 	}
700 
701 	/************** FORWARD ****************/
702 	const uint32_t rx_port = 0;
703 	const uint32_t NUM_PKTS = 2;
704 
705 	for (i = 0; i < NUM_PKTS; i++) {
706 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
707 		if (!arp) {
708 			printf("%d: error generating pkt\n", __LINE__);
709 			return -1;
710 		}
711 
712 		struct rte_event ev = {
713 				.op = RTE_EVENT_OP_NEW,
714 				.queue_id = i % 2,
715 				.flow_id = i % 3,
716 				.mbuf = arp,
717 		};
718 		/* generate pkt and enqueue */
719 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
720 		if (err != 1) {
721 			printf("%d: Failed to enqueue\n", __LINE__);
722 			return -1;
723 		}
724 	}
725 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
726 
727 	/* Check stats for all NUM_PKTS arrived to sched core */
728 	struct test_event_dev_stats stats;
729 
730 	err = test_event_dev_stats_get(evdev, &stats);
731 	if (err) {
732 		printf("%d: failed to get stats\n", __LINE__);
733 		return -1;
734 	}
735 	if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
736 		printf("%d: Sched core didn't receive all %d pkts\n",
737 				__LINE__, NUM_PKTS);
738 		rte_event_dev_dump(evdev, stdout);
739 		return -1;
740 	}
741 
742 	uint32_t deq_pkts;
743 	int p;
744 
745 	deq_pkts = 0;
746 	/******** DEQ QID 1 *******/
747 	do {
748 		struct rte_event ev;
749 		p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
750 		deq_pkts += p;
751 		rte_pktmbuf_free(ev.mbuf);
752 	} while (p);
753 
754 	if (deq_pkts != NUM_PKTS/2) {
755 		printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
756 				__LINE__);
757 		return -1;
758 	}
759 
760 	/******** DEQ QID 2 *******/
761 	deq_pkts = 0;
762 	do {
763 		struct rte_event ev;
764 		p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
765 		deq_pkts += p;
766 		rte_pktmbuf_free(ev.mbuf);
767 	} while (p);
768 	if (deq_pkts != NUM_PKTS/2) {
769 		printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
770 				__LINE__);
771 		return -1;
772 	}
773 
774 	cleanup(t);
775 	return 0;
776 }
777 
778 static int
abuse_inflights(struct test * t)779 abuse_inflights(struct test *t)
780 {
781 	const int rx_enq = 0;
782 	const int wrk_enq = 2;
783 	int err;
784 
785 	/* Create instance with 4 ports */
786 	if (init(t, 1, 4) < 0 ||
787 			create_ports(t, 4) < 0 ||
788 			create_atomic_qids(t, 1) < 0) {
789 		printf("%d: Error initializing device\n", __LINE__);
790 		return -1;
791 	}
792 
793 	/* CQ mapping to QID */
794 	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
795 	if (err != 1) {
796 		printf("%d: error mapping lb qid\n", __LINE__);
797 		cleanup(t);
798 		return -1;
799 	}
800 
801 	if (rte_event_dev_start(evdev) < 0) {
802 		printf("%d: Error with start call\n", __LINE__);
803 		return -1;
804 	}
805 
806 	/* Enqueue op only */
807 	err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
808 	if (err != 1) {
809 		printf("%d: Failed to enqueue\n", __LINE__);
810 		return -1;
811 	}
812 
813 	/* schedule */
814 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
815 
816 	struct test_event_dev_stats stats;
817 
818 	err = test_event_dev_stats_get(evdev, &stats);
819 	if (err) {
820 		printf("%d: failed to get stats\n", __LINE__);
821 		return -1;
822 	}
823 
824 	if (stats.rx_pkts != 0 ||
825 			stats.tx_pkts != 0 ||
826 			stats.port_inflight[wrk_enq] != 0) {
827 		printf("%d: Sched core didn't handle pkt as expected\n",
828 				__LINE__);
829 		return -1;
830 	}
831 
832 	cleanup(t);
833 	return 0;
834 }
835 
836 static int
xstats_tests(struct test * t)837 xstats_tests(struct test *t)
838 {
839 	const int wrk_enq = 2;
840 	int err;
841 
842 	/* Create instance with 4 ports */
843 	if (init(t, 1, 4) < 0 ||
844 			create_ports(t, 4) < 0 ||
845 			create_atomic_qids(t, 1) < 0) {
846 		printf("%d: Error initializing device\n", __LINE__);
847 		return -1;
848 	}
849 
850 	/* CQ mapping to QID */
851 	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
852 	if (err != 1) {
853 		printf("%d: error mapping lb qid\n", __LINE__);
854 		cleanup(t);
855 		return -1;
856 	}
857 
858 	if (rte_event_dev_start(evdev) < 0) {
859 		printf("%d: Error with start call\n", __LINE__);
860 		return -1;
861 	}
862 
863 	const uint32_t XSTATS_MAX = 1024;
864 
865 	uint32_t i;
866 	uint64_t ids[XSTATS_MAX];
867 	uint64_t values[XSTATS_MAX];
868 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
869 
870 	for (i = 0; i < XSTATS_MAX; i++)
871 		ids[i] = i;
872 
873 	/* Device names / values */
874 	int ret = rte_event_dev_xstats_names_get(evdev,
875 					RTE_EVENT_DEV_XSTATS_DEVICE,
876 					0, xstats_names, ids, XSTATS_MAX);
877 	if (ret != 8) {
878 		printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
879 		return -1;
880 	}
881 	ret = rte_event_dev_xstats_get(evdev,
882 					RTE_EVENT_DEV_XSTATS_DEVICE,
883 					0, ids, values, ret);
884 	if (ret != 8) {
885 		printf("%d: expected 8 stats, got return %d\n", __LINE__, ret);
886 		return -1;
887 	}
888 
889 	/* Port names / values */
890 	ret = rte_event_dev_xstats_names_get(evdev,
891 					RTE_EVENT_DEV_XSTATS_PORT, 0,
892 					xstats_names, ids, XSTATS_MAX);
893 	if (ret != 21) {
894 		printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
895 		return -1;
896 	}
897 	ret = rte_event_dev_xstats_get(evdev,
898 					RTE_EVENT_DEV_XSTATS_PORT, 0,
899 					ids, values, ret);
900 	if (ret != 21) {
901 		printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
902 		return -1;
903 	}
904 
905 	/* Queue names / values */
906 	ret = rte_event_dev_xstats_names_get(evdev,
907 					RTE_EVENT_DEV_XSTATS_QUEUE,
908 					0, xstats_names, ids, XSTATS_MAX);
909 	if (ret != 16) {
910 		printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
911 		return -1;
912 	}
913 
914 	/* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
915 	ret = rte_event_dev_xstats_get(evdev,
916 					RTE_EVENT_DEV_XSTATS_QUEUE,
917 					1, ids, values, ret);
918 	if (ret != -EINVAL) {
919 		printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
920 		return -1;
921 	}
922 
923 	ret = rte_event_dev_xstats_get(evdev,
924 					RTE_EVENT_DEV_XSTATS_QUEUE,
925 					0, ids, values, ret);
926 	if (ret != 16) {
927 		printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
928 		return -1;
929 	}
930 
931 	/* enqueue packets to check values */
932 	for (i = 0; i < 3; i++) {
933 		struct rte_event ev;
934 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
935 		if (!arp) {
936 			printf("%d: gen of pkt failed\n", __LINE__);
937 			return -1;
938 		}
939 		ev.queue_id = t->qid[i];
940 		ev.op = RTE_EVENT_OP_NEW;
941 		ev.mbuf = arp;
942 		ev.flow_id = 7;
943 		*rte_event_pmd_selftest_seqn(arp) = i;
944 
945 		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
946 		if (err != 1) {
947 			printf("%d: Failed to enqueue\n", __LINE__);
948 			return -1;
949 		}
950 	}
951 
952 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
953 
954 	/* Device names / values */
955 	int num_stats = rte_event_dev_xstats_names_get(evdev,
956 					RTE_EVENT_DEV_XSTATS_DEVICE, 0,
957 					xstats_names, ids, XSTATS_MAX);
958 	if (num_stats < 0)
959 		goto fail;
960 	ret = rte_event_dev_xstats_get(evdev,
961 					RTE_EVENT_DEV_XSTATS_DEVICE,
962 					0, ids, values, num_stats);
963 	static const uint64_t expected[] = {3, 3, 0, 1, 0, 0, 4, 1};
964 	for (i = 0; (signed int)i < ret; i++) {
965 		if (expected[i] != values[i]) {
966 			printf("%d Error xstat %d (id %" PRIu64
967 			       ") %s : %" PRIu64 ", expect %" PRIu64 "\n",
968 			       __LINE__, i, ids[i], xstats_names[i].name,
969 			       values[i], expected[i]);
970 			goto fail;
971 		}
972 	}
973 
974 	ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
975 					0, NULL, 0);
976 
977 	/* ensure reset statistics are zero-ed */
978 	static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0, 0, 0};
979 	ret = rte_event_dev_xstats_get(evdev,
980 					RTE_EVENT_DEV_XSTATS_DEVICE,
981 					0, ids, values, num_stats);
982 	for (i = 0; (signed int)i < ret; i++) {
983 		if (expected_zero[i] != values[i]) {
984 			printf("%d Error, xstat %d (id %" PRIu64
985 			       ") %s : %" PRIu64 ", expect %" PRIu64 "\n",
986 			       __LINE__, i, ids[i], xstats_names[i].name,
987 			       values[i], expected_zero[i]);
988 			goto fail;
989 		}
990 	}
991 
992 	/* port reset checks */
993 	num_stats = rte_event_dev_xstats_names_get(evdev,
994 					RTE_EVENT_DEV_XSTATS_PORT, 0,
995 					xstats_names, ids, XSTATS_MAX);
996 	if (num_stats < 0)
997 		goto fail;
998 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
999 					0, ids, values, num_stats);
1000 
1001 	static const uint64_t port_expected[] = {
1002 		3 /* rx */,
1003 		0 /* tx */,
1004 		0 /* drop */,
1005 		0 /* inflights */,
1006 		0 /* avg pkt cycles */,
1007 		29 /* credits */,
1008 		0 /* rx ring used */,
1009 		4096 /* rx ring free */,
1010 		0 /* cq ring used */,
1011 		32 /* cq ring free */,
1012 		0 /* dequeue calls */,
1013 		/* 10 dequeue burst buckets */
1014 		0, 0, 0, 0, 0,
1015 		0, 0, 0, 0, 0,
1016 	};
1017 	if (ret != RTE_DIM(port_expected)) {
1018 		printf(
1019 			"%s %d: wrong number of port stats (%d), expected %zu\n",
1020 			__func__, __LINE__, ret, RTE_DIM(port_expected));
1021 	}
1022 
1023 	for (i = 0; (signed int)i < ret; i++) {
1024 		if (port_expected[i] != values[i]) {
1025 			printf(
1026 				"%s : %d: Error stat %s is %"PRIu64
1027 				", expected %"PRIu64"\n",
1028 				__func__, __LINE__, xstats_names[i].name,
1029 				values[i], port_expected[i]);
1030 			goto fail;
1031 		}
1032 	}
1033 
1034 	ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1035 					0, NULL, 0);
1036 
1037 	/* ensure reset statistics are zero-ed */
1038 	static const uint64_t port_expected_zero[] = {
1039 		0 /* rx */,
1040 		0 /* tx */,
1041 		0 /* drop */,
1042 		0 /* inflights */,
1043 		0 /* avg pkt cycles */,
1044 		29 /* credits */,
1045 		0 /* rx ring used */,
1046 		4096 /* rx ring free */,
1047 		0 /* cq ring used */,
1048 		32 /* cq ring free */,
1049 		0 /* dequeue calls */,
1050 		/* 10 dequeue burst buckets */
1051 		0, 0, 0, 0, 0,
1052 		0, 0, 0, 0, 0,
1053 	};
1054 	ret = rte_event_dev_xstats_get(evdev,
1055 					RTE_EVENT_DEV_XSTATS_PORT,
1056 					0, ids, values, num_stats);
1057 	for (i = 0; (signed int)i < ret; i++) {
1058 		if (port_expected_zero[i] != values[i]) {
1059 			printf("%d, Error, xstat %d (id %" PRIu64
1060 			       ") %s : %" PRIu64 ", expect %" PRIu64 "\n",
1061 			       __LINE__, i, ids[i], xstats_names[i].name,
1062 			       values[i], port_expected_zero[i]);
1063 			goto fail;
1064 		}
1065 	}
1066 
1067 	/* QUEUE STATS TESTS */
1068 	num_stats = rte_event_dev_xstats_names_get(evdev,
1069 						RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1070 						xstats_names, ids, XSTATS_MAX);
1071 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1072 					0, ids, values, num_stats);
1073 	if (ret < 0) {
1074 		printf("xstats get returned %d\n", ret);
1075 		goto fail;
1076 	}
1077 	if ((unsigned int)ret > XSTATS_MAX)
1078 		printf("%s %d: more xstats available than space\n",
1079 				__func__, __LINE__);
1080 
1081 	static const uint64_t queue_expected[] = {
1082 		3 /* rx */,
1083 		3 /* tx */,
1084 		0 /* drop */,
1085 		3 /* inflights */,
1086 		0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1087 		/* QID-to-Port: pinned_flows, packets */
1088 		0, 0,
1089 		0, 0,
1090 		1, 3,
1091 		0, 0,
1092 	};
1093 	for (i = 0; (signed int)i < ret; i++) {
1094 		if (queue_expected[i] != values[i]) {
1095 			printf("%d, Error, xstat %d (id %" PRIu64
1096 			       ") %s : %" PRIu64 ", expect %" PRIu64 "\n",
1097 			       __LINE__, i, ids[i], xstats_names[i].name,
1098 			       values[i], queue_expected[i]);
1099 			goto fail;
1100 		}
1101 	}
1102 
1103 	/* Reset the queue stats here */
1104 	ret = rte_event_dev_xstats_reset(evdev,
1105 					RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1106 					NULL,
1107 					0);
1108 
1109 	/* Verify that the resettable stats are reset, and others are not */
1110 	static const uint64_t queue_expected_zero[] = {
1111 		0 /* rx */,
1112 		0 /* tx */,
1113 		0 /* drop */,
1114 		3 /* inflight */,
1115 		0, 0, 0, 0, /* 4 iq used */
1116 		/* QID-to-Port: pinned_flows, packets */
1117 		0, 0,
1118 		0, 0,
1119 		1, 0,
1120 		0, 0,
1121 	};
1122 
1123 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1124 					ids, values, num_stats);
1125 	int fails = 0;
1126 	for (i = 0; (signed int)i < ret; i++) {
1127 		if (queue_expected_zero[i] != values[i]) {
1128 			printf("%d, Error, xstat %d (id %" PRIu64
1129 			       ") %s : %" PRIu64 ", expect %" PRIu64 "\n",
1130 			       __LINE__, i, ids[i], xstats_names[i].name,
1131 			       values[i], queue_expected_zero[i]);
1132 			fails++;
1133 		}
1134 	}
1135 	if (fails) {
1136 		printf("%d : %d of values were not as expected above\n",
1137 				__LINE__, fails);
1138 		goto fail;
1139 	}
1140 
1141 	cleanup(t);
1142 	return 0;
1143 
1144 fail:
1145 	rte_event_dev_dump(0, stdout);
1146 	cleanup(t);
1147 	return -1;
1148 }
1149 
1150 
1151 static int
xstats_id_abuse_tests(struct test * t)1152 xstats_id_abuse_tests(struct test *t)
1153 {
1154 	int err;
1155 	const uint32_t XSTATS_MAX = 1024;
1156 	const uint32_t link_port = 2;
1157 
1158 	uint64_t ids[XSTATS_MAX];
1159 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1160 
1161 	/* Create instance with 4 ports */
1162 	if (init(t, 1, 4) < 0 ||
1163 			create_ports(t, 4) < 0 ||
1164 			create_atomic_qids(t, 1) < 0) {
1165 		printf("%d: Error initializing device\n", __LINE__);
1166 		goto fail;
1167 	}
1168 
1169 	err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1170 	if (err != 1) {
1171 		printf("%d: error mapping lb qid\n", __LINE__);
1172 		goto fail;
1173 	}
1174 
1175 	if (rte_event_dev_start(evdev) < 0) {
1176 		printf("%d: Error with start call\n", __LINE__);
1177 		goto fail;
1178 	}
1179 
1180 	/* no test for device, as it ignores the port/q number */
1181 	int num_stats = rte_event_dev_xstats_names_get(evdev,
1182 					RTE_EVENT_DEV_XSTATS_PORT,
1183 					UINT8_MAX-1, xstats_names, ids,
1184 					XSTATS_MAX);
1185 	if (num_stats != 0) {
1186 		printf("%d: expected %d stats, got return %d\n", __LINE__,
1187 				0, num_stats);
1188 		goto fail;
1189 	}
1190 
1191 	num_stats = rte_event_dev_xstats_names_get(evdev,
1192 					RTE_EVENT_DEV_XSTATS_QUEUE,
1193 					UINT8_MAX-1, xstats_names, ids,
1194 					XSTATS_MAX);
1195 	if (num_stats != 0) {
1196 		printf("%d: expected %d stats, got return %d\n", __LINE__,
1197 				0, num_stats);
1198 		goto fail;
1199 	}
1200 
1201 	cleanup(t);
1202 	return 0;
1203 fail:
1204 	cleanup(t);
1205 	return -1;
1206 }
1207 
1208 static int
port_reconfig_credits(struct test * t)1209 port_reconfig_credits(struct test *t)
1210 {
1211 	if (init(t, 1, 1) < 0) {
1212 		printf("%d: Error initializing device\n", __LINE__);
1213 		return -1;
1214 	}
1215 
1216 	uint32_t i;
1217 	const uint32_t NUM_ITERS = 32;
1218 	for (i = 0; i < NUM_ITERS; i++) {
1219 		const struct rte_event_queue_conf conf = {
1220 			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1221 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1222 			.nb_atomic_flows = 1024,
1223 			.nb_atomic_order_sequences = 1024,
1224 		};
1225 		if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1226 			printf("%d: error creating qid\n", __LINE__);
1227 			return -1;
1228 		}
1229 		t->qid[0] = 0;
1230 
1231 		static const struct rte_event_port_conf port_conf = {
1232 				.new_event_threshold = 128,
1233 				.dequeue_depth = 32,
1234 				.enqueue_depth = 64,
1235 		};
1236 		if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1237 			printf("%d Error setting up port\n", __LINE__);
1238 			return -1;
1239 		}
1240 
1241 		int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1242 		if (links != 1) {
1243 			printf("%d: error mapping lb qid\n", __LINE__);
1244 			goto fail;
1245 		}
1246 
1247 		if (rte_event_dev_start(evdev) < 0) {
1248 			printf("%d: Error with start call\n", __LINE__);
1249 			goto fail;
1250 		}
1251 
1252 		const uint32_t NPKTS = 1;
1253 		uint32_t j;
1254 		for (j = 0; j < NPKTS; j++) {
1255 			struct rte_event ev;
1256 			struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1257 			if (!arp) {
1258 				printf("%d: gen of pkt failed\n", __LINE__);
1259 				goto fail;
1260 			}
1261 			ev.queue_id = t->qid[0];
1262 			ev.op = RTE_EVENT_OP_NEW;
1263 			ev.mbuf = arp;
1264 			int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1265 			if (err != 1) {
1266 				printf("%d: Failed to enqueue\n", __LINE__);
1267 				rte_event_dev_dump(0, stdout);
1268 				goto fail;
1269 			}
1270 		}
1271 
1272 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
1273 
1274 		struct rte_event ev[NPKTS];
1275 		int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1276 							NPKTS, 0);
1277 		if (deq != 1)
1278 			printf("%d error; no packet dequeued\n", __LINE__);
1279 
1280 		/* let cleanup below stop the device on last iter */
1281 		if (i != NUM_ITERS-1)
1282 			rte_event_dev_stop(evdev);
1283 	}
1284 
1285 	cleanup(t);
1286 	return 0;
1287 fail:
1288 	cleanup(t);
1289 	return -1;
1290 }
1291 
1292 static int
port_single_lb_reconfig(struct test * t)1293 port_single_lb_reconfig(struct test *t)
1294 {
1295 	if (init(t, 2, 2) < 0) {
1296 		printf("%d: Error initializing device\n", __LINE__);
1297 		goto fail;
1298 	}
1299 
1300 	static const struct rte_event_queue_conf conf_lb_atomic = {
1301 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1302 		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1303 		.nb_atomic_flows = 1024,
1304 		.nb_atomic_order_sequences = 1024,
1305 	};
1306 	if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1307 		printf("%d: error creating qid\n", __LINE__);
1308 		goto fail;
1309 	}
1310 
1311 	static const struct rte_event_queue_conf conf_single_link = {
1312 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1313 		.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1314 	};
1315 	if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1316 		printf("%d: error creating qid\n", __LINE__);
1317 		goto fail;
1318 	}
1319 
1320 	struct rte_event_port_conf port_conf = {
1321 		.new_event_threshold = 128,
1322 		.dequeue_depth = 32,
1323 		.enqueue_depth = 64,
1324 	};
1325 	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1326 		printf("%d Error setting up port\n", __LINE__);
1327 		goto fail;
1328 	}
1329 	if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1330 		printf("%d Error setting up port\n", __LINE__);
1331 		goto fail;
1332 	}
1333 
1334 	/* link port to lb queue */
1335 	uint8_t queue_id = 0;
1336 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1337 		printf("%d: error creating link for qid\n", __LINE__);
1338 		goto fail;
1339 	}
1340 
1341 	int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1342 	if (ret != 1) {
1343 		printf("%d: Error unlinking lb port\n", __LINE__);
1344 		goto fail;
1345 	}
1346 
1347 	queue_id = 1;
1348 	if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1349 		printf("%d: error creating link for qid\n", __LINE__);
1350 		goto fail;
1351 	}
1352 
1353 	queue_id = 0;
1354 	int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1355 	if (err != 1) {
1356 		printf("%d: error mapping lb qid\n", __LINE__);
1357 		goto fail;
1358 	}
1359 
1360 	if (rte_event_dev_start(evdev) < 0) {
1361 		printf("%d: Error with start call\n", __LINE__);
1362 		goto fail;
1363 	}
1364 
1365 	cleanup(t);
1366 	return 0;
1367 fail:
1368 	cleanup(t);
1369 	return -1;
1370 }
1371 
1372 static int
xstats_brute_force(struct test * t)1373 xstats_brute_force(struct test *t)
1374 {
1375 	uint32_t i;
1376 	const uint32_t XSTATS_MAX = 1024;
1377 	uint64_t ids[XSTATS_MAX];
1378 	uint64_t values[XSTATS_MAX];
1379 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1380 
1381 
1382 	/* Create instance with 4 ports */
1383 	if (init(t, 1, 4) < 0 ||
1384 			create_ports(t, 4) < 0 ||
1385 			create_atomic_qids(t, 1) < 0) {
1386 		printf("%d: Error initializing device\n", __LINE__);
1387 		return -1;
1388 	}
1389 
1390 	int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1391 	if (err != 1) {
1392 		printf("%d: error mapping lb qid\n", __LINE__);
1393 		goto fail;
1394 	}
1395 
1396 	if (rte_event_dev_start(evdev) < 0) {
1397 		printf("%d: Error with start call\n", __LINE__);
1398 		goto fail;
1399 	}
1400 
1401 	for (i = 0; i < XSTATS_MAX; i++)
1402 		ids[i] = i;
1403 
1404 	for (i = 0; i < 3; i++) {
1405 		uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1406 		uint32_t j;
1407 		for (j = 0; j < UINT8_MAX; j++) {
1408 			rte_event_dev_xstats_names_get(evdev, mode,
1409 				j, xstats_names, ids, XSTATS_MAX);
1410 
1411 			rte_event_dev_xstats_get(evdev, mode, j, ids,
1412 						 values, XSTATS_MAX);
1413 		}
1414 	}
1415 
1416 	cleanup(t);
1417 	return 0;
1418 fail:
1419 	cleanup(t);
1420 	return -1;
1421 }
1422 
1423 static int
xstats_id_reset_tests(struct test * t)1424 xstats_id_reset_tests(struct test *t)
1425 {
1426 	const int wrk_enq = 2;
1427 	int err;
1428 
1429 	/* Create instance with 4 ports */
1430 	if (init(t, 1, 4) < 0 ||
1431 			create_ports(t, 4) < 0 ||
1432 			create_atomic_qids(t, 1) < 0) {
1433 		printf("%d: Error initializing device\n", __LINE__);
1434 		return -1;
1435 	}
1436 
1437 	/* CQ mapping to QID */
1438 	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1439 	if (err != 1) {
1440 		printf("%d: error mapping lb qid\n", __LINE__);
1441 		goto fail;
1442 	}
1443 
1444 	if (rte_event_dev_start(evdev) < 0) {
1445 		printf("%d: Error with start call\n", __LINE__);
1446 		goto fail;
1447 	}
1448 
1449 #define XSTATS_MAX 1024
1450 	int ret;
1451 	uint32_t i;
1452 	uint64_t ids[XSTATS_MAX];
1453 	uint64_t values[XSTATS_MAX];
1454 	struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1455 
1456 	for (i = 0; i < XSTATS_MAX; i++)
1457 		ids[i] = i;
1458 
1459 #define NUM_DEV_STATS 8
1460 	/* Device names / values */
1461 	int num_stats = rte_event_dev_xstats_names_get(evdev,
1462 					RTE_EVENT_DEV_XSTATS_DEVICE,
1463 					0, xstats_names, ids, XSTATS_MAX);
1464 	if (num_stats != NUM_DEV_STATS) {
1465 		printf("%d: expected %d stats, got return %d\n", __LINE__,
1466 				NUM_DEV_STATS, num_stats);
1467 		goto fail;
1468 	}
1469 	ret = rte_event_dev_xstats_get(evdev,
1470 					RTE_EVENT_DEV_XSTATS_DEVICE,
1471 					0, ids, values, num_stats);
1472 	if (ret != NUM_DEV_STATS) {
1473 		printf("%d: expected %d stats, got return %d\n", __LINE__,
1474 				NUM_DEV_STATS, ret);
1475 		goto fail;
1476 	}
1477 
1478 #define NPKTS 7
1479 	for (i = 0; i < NPKTS; i++) {
1480 		struct rte_event ev;
1481 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1482 		if (!arp) {
1483 			printf("%d: gen of pkt failed\n", __LINE__);
1484 			goto fail;
1485 		}
1486 		ev.queue_id = t->qid[i];
1487 		ev.flow_id = 0;
1488 		ev.op = RTE_EVENT_OP_NEW;
1489 		ev.mbuf = arp;
1490 		*rte_event_pmd_selftest_seqn(arp) = i;
1491 
1492 		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1493 		if (err != 1) {
1494 			printf("%d: Failed to enqueue\n", __LINE__);
1495 			goto fail;
1496 		}
1497 	}
1498 
1499 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
1500 
1501 	static const char * const dev_names[] = {
1502 		"dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1503 		"dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1504 		"dev_sched_last_iter_bitmask",
1505 		"dev_sched_progress_last_iter"
1506 	};
1507 	uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0, 4, 1};
1508 	for (i = 0; (int)i < ret; i++) {
1509 		uint64_t id;
1510 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1511 								dev_names[i],
1512 								&id);
1513 		if (id != i) {
1514 			printf("%d: %s id incorrect, expected %d got %" PRIu64
1515 			       "\n",
1516 			       __LINE__, dev_names[i], i, id);
1517 			goto fail;
1518 		}
1519 		if (val != dev_expected[i]) {
1520 			printf("%d: %s value incorrect, expected %"
1521 				PRIu64" got %"PRIu64"\n", __LINE__,
1522 				dev_names[i], dev_expected[i], val);
1523 			goto fail;
1524 		}
1525 		/* reset to zero */
1526 		int reset_ret = rte_event_dev_xstats_reset(evdev,
1527 						RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1528 						&id,
1529 						1);
1530 		if (reset_ret) {
1531 			printf("%d: failed to reset successfully\n", __LINE__);
1532 			goto fail;
1533 		}
1534 		dev_expected[i] = 0;
1535 		/* check value again */
1536 		val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1537 		if (val != dev_expected[i]) {
1538 			printf("%d: %s value incorrect, expected %"PRIu64
1539 				" got %"PRIu64"\n", __LINE__, dev_names[i],
1540 				dev_expected[i], val);
1541 			goto fail;
1542 		}
1543 	};
1544 
1545 /* 49 is stat offset from start of the devices whole xstats.
1546  * This WILL break every time we add a statistic to a port
1547  * or the device, but there is no other way to test
1548  */
1549 #define PORT_OFF 50
1550 /* num stats for the tested port. CQ size adds more stats to a port */
1551 #define NUM_PORT_STATS 21
1552 /* the port to test. */
1553 #define PORT 2
1554 	num_stats = rte_event_dev_xstats_names_get(evdev,
1555 					RTE_EVENT_DEV_XSTATS_PORT, PORT,
1556 					xstats_names, ids, XSTATS_MAX);
1557 	if (num_stats != NUM_PORT_STATS) {
1558 		printf("%d: expected %d stats, got return %d\n",
1559 			__LINE__, NUM_PORT_STATS, num_stats);
1560 		goto fail;
1561 	}
1562 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1563 					ids, values, num_stats);
1564 
1565 	if (ret != NUM_PORT_STATS) {
1566 		printf("%d: expected %d stats, got return %d\n",
1567 				__LINE__, NUM_PORT_STATS, ret);
1568 		goto fail;
1569 	}
1570 	static const char * const port_names[] = {
1571 		"port_2_rx",
1572 		"port_2_tx",
1573 		"port_2_drop",
1574 		"port_2_inflight",
1575 		"port_2_avg_pkt_cycles",
1576 		"port_2_credits",
1577 		"port_2_rx_ring_used",
1578 		"port_2_rx_ring_free",
1579 		"port_2_cq_ring_used",
1580 		"port_2_cq_ring_free",
1581 		"port_2_dequeue_calls",
1582 		"port_2_dequeues_returning_0",
1583 		"port_2_dequeues_returning_1-4",
1584 		"port_2_dequeues_returning_5-8",
1585 		"port_2_dequeues_returning_9-12",
1586 		"port_2_dequeues_returning_13-16",
1587 		"port_2_dequeues_returning_17-20",
1588 		"port_2_dequeues_returning_21-24",
1589 		"port_2_dequeues_returning_25-28",
1590 		"port_2_dequeues_returning_29-32",
1591 		"port_2_dequeues_returning_33-36",
1592 	};
1593 	uint64_t port_expected[] = {
1594 		0, /* rx */
1595 		NPKTS, /* tx */
1596 		0, /* drop */
1597 		NPKTS, /* inflight */
1598 		0, /* avg pkt cycles */
1599 		0, /* credits */
1600 		0, /* rx ring used */
1601 		4096, /* rx ring free */
1602 		NPKTS,  /* cq ring used */
1603 		25, /* cq ring free */
1604 		0, /* dequeue zero calls */
1605 		0, 0, 0, 0, 0, /* 10 dequeue buckets */
1606 		0, 0, 0, 0, 0,
1607 	};
1608 	uint64_t port_expected_zero[] = {
1609 		0, /* rx */
1610 		0, /* tx */
1611 		0, /* drop */
1612 		NPKTS, /* inflight */
1613 		0, /* avg pkt cycles */
1614 		0, /* credits */
1615 		0, /* rx ring used */
1616 		4096, /* rx ring free */
1617 		NPKTS,  /* cq ring used */
1618 		25, /* cq ring free */
1619 		0, /* dequeue zero calls */
1620 		0, 0, 0, 0, 0, /* 10 dequeue buckets */
1621 		0, 0, 0, 0, 0,
1622 	};
1623 	if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1624 			RTE_DIM(port_names) != NUM_PORT_STATS) {
1625 		printf("%d: port array of wrong size\n", __LINE__);
1626 		goto fail;
1627 	}
1628 
1629 	int failed = 0;
1630 	for (i = 0; (int)i < ret; i++) {
1631 		uint64_t id;
1632 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1633 								port_names[i],
1634 								&id);
1635 		if (id != i + PORT_OFF) {
1636 			printf("%d: %s id incorrect, expected %d got %" PRIu64
1637 			       "\n",
1638 			       __LINE__, port_names[i], i + PORT_OFF, id);
1639 			failed = 1;
1640 		}
1641 		if (val != port_expected[i]) {
1642 			printf("%d: %s value incorrect, expected %" PRIu64
1643 			       " got %" PRIu64 "\n",
1644 			       __LINE__, port_names[i], port_expected[i], val);
1645 			failed = 1;
1646 		}
1647 		/* reset to zero */
1648 		int reset_ret = rte_event_dev_xstats_reset(evdev,
1649 						RTE_EVENT_DEV_XSTATS_PORT, PORT,
1650 						&id,
1651 						1);
1652 		if (reset_ret) {
1653 			printf("%d: failed to reset successfully\n", __LINE__);
1654 			failed = 1;
1655 		}
1656 		/* check value again */
1657 		val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1658 		if (val != port_expected_zero[i]) {
1659 			printf("%d: %s value incorrect, expected %"PRIu64
1660 				" got %"PRIu64"\n", __LINE__, port_names[i],
1661 				port_expected_zero[i], val);
1662 			failed = 1;
1663 		}
1664 	};
1665 	if (failed)
1666 		goto fail;
1667 
1668 /* num queue stats */
1669 #define NUM_Q_STATS 16
1670 /* queue offset from start of the devices whole xstats.
1671  * This will break every time we add a statistic to a device/port/queue
1672  */
1673 #define QUEUE_OFF 92
1674 	const uint32_t queue = 0;
1675 	num_stats = rte_event_dev_xstats_names_get(evdev,
1676 					RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1677 					xstats_names, ids, XSTATS_MAX);
1678 	if (num_stats != NUM_Q_STATS) {
1679 		printf("%d: expected %d stats, got return %d\n",
1680 			__LINE__, NUM_Q_STATS, num_stats);
1681 		goto fail;
1682 	}
1683 	ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1684 					queue, ids, values, num_stats);
1685 	if (ret != NUM_Q_STATS) {
1686 		printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1687 		goto fail;
1688 	}
1689 	static const char * const queue_names[] = {
1690 		"qid_0_rx",
1691 		"qid_0_tx",
1692 		"qid_0_drop",
1693 		"qid_0_inflight",
1694 		"qid_0_iq_0_used",
1695 		"qid_0_iq_1_used",
1696 		"qid_0_iq_2_used",
1697 		"qid_0_iq_3_used",
1698 		"qid_0_port_0_pinned_flows",
1699 		"qid_0_port_0_packets",
1700 		"qid_0_port_1_pinned_flows",
1701 		"qid_0_port_1_packets",
1702 		"qid_0_port_2_pinned_flows",
1703 		"qid_0_port_2_packets",
1704 		"qid_0_port_3_pinned_flows",
1705 		"qid_0_port_3_packets",
1706 	};
1707 	uint64_t queue_expected[] = {
1708 		7, /* rx */
1709 		7, /* tx */
1710 		0, /* drop */
1711 		7, /* inflight */
1712 		0, /* iq 0 used */
1713 		0, /* iq 1 used */
1714 		0, /* iq 2 used */
1715 		0, /* iq 3 used */
1716 		/* QID-to-Port: pinned_flows, packets */
1717 		0, 0,
1718 		0, 0,
1719 		1, 7,
1720 		0, 0,
1721 	};
1722 	uint64_t queue_expected_zero[] = {
1723 		0, /* rx */
1724 		0, /* tx */
1725 		0, /* drop */
1726 		7, /* inflight */
1727 		0, /* iq 0 used */
1728 		0, /* iq 1 used */
1729 		0, /* iq 2 used */
1730 		0, /* iq 3 used */
1731 		/* QID-to-Port: pinned_flows, packets */
1732 		0, 0,
1733 		0, 0,
1734 		1, 0,
1735 		0, 0,
1736 	};
1737 	if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1738 			RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1739 			RTE_DIM(queue_names) != NUM_Q_STATS) {
1740 		printf("%d : queue array of wrong size\n", __LINE__);
1741 		goto fail;
1742 	}
1743 
1744 	failed = 0;
1745 	for (i = 0; (int)i < ret; i++) {
1746 		uint64_t id;
1747 		uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1748 								queue_names[i],
1749 								&id);
1750 		if (id != i + QUEUE_OFF) {
1751 			printf("%d: %s id incorrect, expected %d got %" PRIu64
1752 			       "\n",
1753 			       __LINE__, queue_names[i], i + QUEUE_OFF, id);
1754 			failed = 1;
1755 		}
1756 		if (val != queue_expected[i]) {
1757 			printf("%d: %d: %s value , expected %"PRIu64
1758 				" got %"PRIu64"\n", i, __LINE__,
1759 				queue_names[i], queue_expected[i], val);
1760 			failed = 1;
1761 		}
1762 		/* reset to zero */
1763 		int reset_ret = rte_event_dev_xstats_reset(evdev,
1764 						RTE_EVENT_DEV_XSTATS_QUEUE,
1765 						queue, &id, 1);
1766 		if (reset_ret) {
1767 			printf("%d: failed to reset successfully\n", __LINE__);
1768 			failed = 1;
1769 		}
1770 		/* check value again */
1771 		val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1772 							0);
1773 		if (val != queue_expected_zero[i]) {
1774 			printf("%d: %s value incorrect, expected %"PRIu64
1775 				" got %"PRIu64"\n", __LINE__, queue_names[i],
1776 				queue_expected_zero[i], val);
1777 			failed = 1;
1778 		}
1779 	};
1780 
1781 	if (failed)
1782 		goto fail;
1783 
1784 	cleanup(t);
1785 	return 0;
1786 fail:
1787 	cleanup(t);
1788 	return -1;
1789 }
1790 
1791 static int
ordered_reconfigure(struct test * t)1792 ordered_reconfigure(struct test *t)
1793 {
1794 	if (init(t, 1, 1) < 0 ||
1795 			create_ports(t, 1) < 0) {
1796 		printf("%d: Error initializing device\n", __LINE__);
1797 		return -1;
1798 	}
1799 
1800 	const struct rte_event_queue_conf conf = {
1801 			.schedule_type = RTE_SCHED_TYPE_ORDERED,
1802 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1803 			.nb_atomic_flows = 1024,
1804 			.nb_atomic_order_sequences = 1024,
1805 	};
1806 
1807 	if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1808 		printf("%d: error creating qid\n", __LINE__);
1809 		goto failed;
1810 	}
1811 
1812 	if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1813 		printf("%d: error creating qid, for 2nd time\n", __LINE__);
1814 		goto failed;
1815 	}
1816 
1817 	rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1818 	if (rte_event_dev_start(evdev) < 0) {
1819 		printf("%d: Error with start call\n", __LINE__);
1820 		return -1;
1821 	}
1822 
1823 	cleanup(t);
1824 	return 0;
1825 failed:
1826 	cleanup(t);
1827 	return -1;
1828 }
1829 
1830 static int
qid_priorities(struct test * t)1831 qid_priorities(struct test *t)
1832 {
1833 	/* Test works by having a CQ with enough empty space for all packets,
1834 	 * and enqueueing 3 packets to 3 QIDs. They must return based on the
1835 	 * priority of the QID, not the ingress order, to pass the test
1836 	 */
1837 	unsigned int i;
1838 	/* Create instance with 1 ports, and 3 qids */
1839 	if (init(t, 3, 1) < 0 ||
1840 			create_ports(t, 1) < 0) {
1841 		printf("%d: Error initializing device\n", __LINE__);
1842 		return -1;
1843 	}
1844 
1845 	for (i = 0; i < 3; i++) {
1846 		/* Create QID */
1847 		const struct rte_event_queue_conf conf = {
1848 			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1849 			/* increase priority (0 == highest), as we go */
1850 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1851 			.nb_atomic_flows = 1024,
1852 			.nb_atomic_order_sequences = 1024,
1853 		};
1854 
1855 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1856 			printf("%d: error creating qid %d\n", __LINE__, i);
1857 			return -1;
1858 		}
1859 		t->qid[i] = i;
1860 	}
1861 	t->nb_qids = i;
1862 	/* map all QIDs to port */
1863 	rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1864 
1865 	if (rte_event_dev_start(evdev) < 0) {
1866 		printf("%d: Error with start call\n", __LINE__);
1867 		return -1;
1868 	}
1869 
1870 	/* enqueue 3 packets, setting seqn and QID to check priority */
1871 	for (i = 0; i < 3; i++) {
1872 		struct rte_event ev;
1873 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1874 		if (!arp) {
1875 			printf("%d: gen of pkt failed\n", __LINE__);
1876 			return -1;
1877 		}
1878 		ev.queue_id = t->qid[i];
1879 		ev.op = RTE_EVENT_OP_NEW;
1880 		ev.mbuf = arp;
1881 		*rte_event_pmd_selftest_seqn(arp) = i;
1882 
1883 		int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1884 		if (err != 1) {
1885 			printf("%d: Failed to enqueue\n", __LINE__);
1886 			return -1;
1887 		}
1888 	}
1889 
1890 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
1891 
1892 	/* dequeue packets, verify priority was upheld */
1893 	struct rte_event ev[32];
1894 	uint32_t deq_pkts =
1895 		rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1896 	if (deq_pkts != 3) {
1897 		printf("%d: failed to deq packets\n", __LINE__);
1898 		rte_event_dev_dump(evdev, stdout);
1899 		return -1;
1900 	}
1901 	for (i = 0; i < 3; i++) {
1902 		if (*rte_event_pmd_selftest_seqn(ev[i].mbuf) != 2-i) {
1903 			printf(
1904 				"%d: qid priority test: seqn %d incorrectly prioritized\n",
1905 					__LINE__, i);
1906 		}
1907 	}
1908 
1909 	cleanup(t);
1910 	return 0;
1911 }
1912 
1913 static int
unlink_in_progress(struct test * t)1914 unlink_in_progress(struct test *t)
1915 {
1916 	/* Test unlinking API, in particular that when an unlink request has
1917 	 * not yet been seen by the scheduler thread, that the
1918 	 * unlink_in_progress() function returns the number of unlinks.
1919 	 */
1920 	unsigned int i;
1921 	/* Create instance with 1 ports, and 3 qids */
1922 	if (init(t, 3, 1) < 0 ||
1923 			create_ports(t, 1) < 0) {
1924 		printf("%d: Error initializing device\n", __LINE__);
1925 		return -1;
1926 	}
1927 
1928 	for (i = 0; i < 3; i++) {
1929 		/* Create QID */
1930 		const struct rte_event_queue_conf conf = {
1931 			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1932 			/* increase priority (0 == highest), as we go */
1933 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1934 			.nb_atomic_flows = 1024,
1935 			.nb_atomic_order_sequences = 1024,
1936 		};
1937 
1938 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1939 			printf("%d: error creating qid %d\n", __LINE__, i);
1940 			return -1;
1941 		}
1942 		t->qid[i] = i;
1943 	}
1944 	t->nb_qids = i;
1945 	/* map all QIDs to port */
1946 	rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1947 
1948 	if (rte_event_dev_start(evdev) < 0) {
1949 		printf("%d: Error with start call\n", __LINE__);
1950 		return -1;
1951 	}
1952 
1953 	/* unlink all ports to have outstanding unlink requests */
1954 	int ret = rte_event_port_unlink(evdev, t->port[0], NULL, 0);
1955 	if (ret < 0) {
1956 		printf("%d: Failed to unlink queues\n", __LINE__);
1957 		return -1;
1958 	}
1959 
1960 	/* get active unlinks here, expect 3 */
1961 	int unlinks_in_progress =
1962 		rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1963 	if (unlinks_in_progress != 3) {
1964 		printf("%d: Expected num unlinks in progress == 3, got %d\n",
1965 				__LINE__, unlinks_in_progress);
1966 		return -1;
1967 	}
1968 
1969 	/* run scheduler service on this thread to ack the unlinks */
1970 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
1971 
1972 	/* active unlinks expected as 0 as scheduler thread has acked */
1973 	unlinks_in_progress =
1974 		rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1975 	if (unlinks_in_progress != 0) {
1976 		printf("%d: Expected num unlinks in progress == 0, got %d\n",
1977 				__LINE__, unlinks_in_progress);
1978 	}
1979 
1980 	cleanup(t);
1981 	return 0;
1982 }
1983 
1984 static int
load_balancing(struct test * t)1985 load_balancing(struct test *t)
1986 {
1987 	const int rx_enq = 0;
1988 	int err;
1989 	uint32_t i;
1990 
1991 	if (init(t, 1, 4) < 0 ||
1992 			create_ports(t, 4) < 0 ||
1993 			create_atomic_qids(t, 1) < 0) {
1994 		printf("%d: Error initializing device\n", __LINE__);
1995 		return -1;
1996 	}
1997 
1998 	for (i = 0; i < 3; i++) {
1999 		/* map port 1 - 3 inclusive */
2000 		if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
2001 				NULL, 1) != 1) {
2002 			printf("%d: error mapping qid to port %d\n",
2003 					__LINE__, i);
2004 			return -1;
2005 		}
2006 	}
2007 
2008 	if (rte_event_dev_start(evdev) < 0) {
2009 		printf("%d: Error with start call\n", __LINE__);
2010 		return -1;
2011 	}
2012 
2013 	/************** FORWARD ****************/
2014 	/*
2015 	 * Create a set of flows that test the load-balancing operation of the
2016 	 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
2017 	 * with a new flow, which should be sent to the 3rd mapped CQ
2018 	 */
2019 	static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
2020 
2021 	for (i = 0; i < RTE_DIM(flows); i++) {
2022 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2023 		if (!arp) {
2024 			printf("%d: gen of pkt failed\n", __LINE__);
2025 			return -1;
2026 		}
2027 
2028 		struct rte_event ev = {
2029 				.op = RTE_EVENT_OP_NEW,
2030 				.queue_id = t->qid[0],
2031 				.flow_id = flows[i],
2032 				.mbuf = arp,
2033 		};
2034 		/* generate pkt and enqueue */
2035 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2036 		if (err != 1) {
2037 			printf("%d: Failed to enqueue\n", __LINE__);
2038 			return -1;
2039 		}
2040 	}
2041 
2042 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2043 
2044 	struct test_event_dev_stats stats;
2045 	err = test_event_dev_stats_get(evdev, &stats);
2046 	if (err) {
2047 		printf("%d: failed to get stats\n", __LINE__);
2048 		return -1;
2049 	}
2050 
2051 	if (stats.port_inflight[1] != 4) {
2052 		printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2053 				__func__);
2054 		return -1;
2055 	}
2056 	if (stats.port_inflight[2] != 2) {
2057 		printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2058 				__func__);
2059 		return -1;
2060 	}
2061 	if (stats.port_inflight[3] != 3) {
2062 		printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2063 				__func__);
2064 		return -1;
2065 	}
2066 
2067 	cleanup(t);
2068 	return 0;
2069 }
2070 
2071 static int
load_balancing_history(struct test * t)2072 load_balancing_history(struct test *t)
2073 {
2074 	struct test_event_dev_stats stats = {0};
2075 	const int rx_enq = 0;
2076 	int err;
2077 	uint32_t i;
2078 
2079 	/* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2080 	if (init(t, 1, 4) < 0 ||
2081 			create_ports(t, 4) < 0 ||
2082 			create_atomic_qids(t, 1) < 0)
2083 		return -1;
2084 
2085 	/* CQ mapping to QID */
2086 	if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2087 		printf("%d: error mapping port 1 qid\n", __LINE__);
2088 		return -1;
2089 	}
2090 	if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2091 		printf("%d: error mapping port 2 qid\n", __LINE__);
2092 		return -1;
2093 	}
2094 	if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2095 		printf("%d: error mapping port 3 qid\n", __LINE__);
2096 		return -1;
2097 	}
2098 	if (rte_event_dev_start(evdev) < 0) {
2099 		printf("%d: Error with start call\n", __LINE__);
2100 		return -1;
2101 	}
2102 
2103 	/*
2104 	 * Create a set of flows that test the load-balancing operation of the
2105 	 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2106 	 * the packet from CQ 0, send in a new set of flows. Ensure that:
2107 	 *  1. The new flow 3 gets into the empty CQ0
2108 	 *  2. packets for existing flow gets added into CQ1
2109 	 *  3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2110 	 *     more outstanding pkts
2111 	 *
2112 	 *  This test makes sure that when a flow ends (i.e. all packets
2113 	 *  have been completed for that flow), that the flow can be moved
2114 	 *  to a different CQ when new packets come in for that flow.
2115 	 */
2116 	static uint32_t flows1[] = {0, 1, 1, 2};
2117 
2118 	for (i = 0; i < RTE_DIM(flows1); i++) {
2119 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2120 		struct rte_event ev = {
2121 				.flow_id = flows1[i],
2122 				.op = RTE_EVENT_OP_NEW,
2123 				.queue_id = t->qid[0],
2124 				.event_type = RTE_EVENT_TYPE_CPU,
2125 				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2126 				.mbuf = arp
2127 		};
2128 
2129 		if (!arp) {
2130 			printf("%d: gen of pkt failed\n", __LINE__);
2131 			return -1;
2132 		}
2133 		arp->hash.rss = flows1[i];
2134 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2135 		if (err != 1) {
2136 			printf("%d: Failed to enqueue\n", __LINE__);
2137 			return -1;
2138 		}
2139 	}
2140 
2141 	/* call the scheduler */
2142 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2143 
2144 	/* Dequeue the flow 0 packet from port 1, so that we can then drop */
2145 	struct rte_event ev;
2146 	if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2147 		printf("%d: failed to dequeue\n", __LINE__);
2148 		return -1;
2149 	}
2150 	if (ev.mbuf->hash.rss != flows1[0]) {
2151 		printf("%d: unexpected flow received\n", __LINE__);
2152 		return -1;
2153 	}
2154 
2155 	/* drop the flow 0 packet from port 1 */
2156 	rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2157 
2158 	/* call the scheduler */
2159 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2160 
2161 	/*
2162 	 * Set up the next set of flows, first a new flow to fill up
2163 	 * CQ 0, so that the next flow 0 packet should go to CQ2
2164 	 */
2165 	static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2166 
2167 	for (i = 0; i < RTE_DIM(flows2); i++) {
2168 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2169 		struct rte_event ev = {
2170 				.flow_id = flows2[i],
2171 				.op = RTE_EVENT_OP_NEW,
2172 				.queue_id = t->qid[0],
2173 				.event_type = RTE_EVENT_TYPE_CPU,
2174 				.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2175 				.mbuf = arp
2176 		};
2177 
2178 		if (!arp) {
2179 			printf("%d: gen of pkt failed\n", __LINE__);
2180 			return -1;
2181 		}
2182 		arp->hash.rss = flows2[i];
2183 
2184 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2185 		if (err != 1) {
2186 			printf("%d: Failed to enqueue\n", __LINE__);
2187 			return -1;
2188 		}
2189 	}
2190 
2191 	/* schedule */
2192 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2193 
2194 	err = test_event_dev_stats_get(evdev, &stats);
2195 	if (err) {
2196 		printf("%d:failed to get stats\n", __LINE__);
2197 		return -1;
2198 	}
2199 
2200 	/*
2201 	 * Now check the resulting inflights on each port.
2202 	 */
2203 	if (stats.port_inflight[1] != 3) {
2204 		printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2205 				__func__);
2206 		printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2207 				(unsigned int)stats.port_inflight[1],
2208 				(unsigned int)stats.port_inflight[2],
2209 				(unsigned int)stats.port_inflight[3]);
2210 		return -1;
2211 	}
2212 	if (stats.port_inflight[2] != 4) {
2213 		printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2214 				__func__);
2215 		printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2216 				(unsigned int)stats.port_inflight[1],
2217 				(unsigned int)stats.port_inflight[2],
2218 				(unsigned int)stats.port_inflight[3]);
2219 		return -1;
2220 	}
2221 	if (stats.port_inflight[3] != 2) {
2222 		printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2223 				__func__);
2224 		printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2225 				(unsigned int)stats.port_inflight[1],
2226 				(unsigned int)stats.port_inflight[2],
2227 				(unsigned int)stats.port_inflight[3]);
2228 		return -1;
2229 	}
2230 
2231 	for (i = 1; i <= 3; i++) {
2232 		struct rte_event ev;
2233 		while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2234 			rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2235 	}
2236 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2237 
2238 	cleanup(t);
2239 	return 0;
2240 }
2241 
2242 static int
invalid_qid(struct test * t)2243 invalid_qid(struct test *t)
2244 {
2245 	struct test_event_dev_stats stats;
2246 	const int rx_enq = 0;
2247 	int err;
2248 	uint32_t i;
2249 
2250 	if (init(t, 1, 4) < 0 ||
2251 			create_ports(t, 4) < 0 ||
2252 			create_atomic_qids(t, 1) < 0) {
2253 		printf("%d: Error initializing device\n", __LINE__);
2254 		return -1;
2255 	}
2256 
2257 	/* CQ mapping to QID */
2258 	for (i = 0; i < 4; i++) {
2259 		err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2260 				NULL, 1);
2261 		if (err != 1) {
2262 			printf("%d: error mapping port 1 qid\n", __LINE__);
2263 			return -1;
2264 		}
2265 	}
2266 
2267 	if (rte_event_dev_start(evdev) < 0) {
2268 		printf("%d: Error with start call\n", __LINE__);
2269 		return -1;
2270 	}
2271 
2272 	/*
2273 	 * Send in a packet with an invalid qid to the scheduler.
2274 	 * We should see the packed enqueued OK, but the inflights for
2275 	 * that packet should not be incremented, and the rx_dropped
2276 	 * should be incremented.
2277 	 */
2278 	static uint32_t flows1[] = {20};
2279 
2280 	for (i = 0; i < RTE_DIM(flows1); i++) {
2281 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2282 		if (!arp) {
2283 			printf("%d: gen of pkt failed\n", __LINE__);
2284 			return -1;
2285 		}
2286 
2287 		struct rte_event ev = {
2288 				.op = RTE_EVENT_OP_NEW,
2289 				.queue_id = t->qid[0] + flows1[i],
2290 				.flow_id = i,
2291 				.mbuf = arp,
2292 		};
2293 		/* generate pkt and enqueue */
2294 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2295 		if (err != 1) {
2296 			printf("%d: Failed to enqueue\n", __LINE__);
2297 			return -1;
2298 		}
2299 	}
2300 
2301 	/* call the scheduler */
2302 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2303 
2304 	err = test_event_dev_stats_get(evdev, &stats);
2305 	if (err) {
2306 		printf("%d: failed to get stats\n", __LINE__);
2307 		return -1;
2308 	}
2309 
2310 	/*
2311 	 * Now check the resulting inflights on the port, and the rx_dropped.
2312 	 */
2313 	if (stats.port_inflight[0] != 0) {
2314 		printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2315 				__func__);
2316 		rte_event_dev_dump(evdev, stdout);
2317 		return -1;
2318 	}
2319 	if (stats.port_rx_dropped[0] != 1) {
2320 		printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2321 		rte_event_dev_dump(evdev, stdout);
2322 		return -1;
2323 	}
2324 	/* each packet drop should only be counted in one place - port or dev */
2325 	if (stats.rx_dropped != 0) {
2326 		printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2327 				__func__);
2328 		rte_event_dev_dump(evdev, stdout);
2329 		return -1;
2330 	}
2331 
2332 	cleanup(t);
2333 	return 0;
2334 }
2335 
2336 static int
single_packet(struct test * t)2337 single_packet(struct test *t)
2338 {
2339 	const uint32_t MAGIC_SEQN = 7321;
2340 	struct rte_event ev;
2341 	struct test_event_dev_stats stats;
2342 	const int rx_enq = 0;
2343 	const int wrk_enq = 2;
2344 	int err;
2345 
2346 	/* Create instance with 4 ports */
2347 	if (init(t, 1, 4) < 0 ||
2348 			create_ports(t, 4) < 0 ||
2349 			create_atomic_qids(t, 1) < 0) {
2350 		printf("%d: Error initializing device\n", __LINE__);
2351 		return -1;
2352 	}
2353 
2354 	/* CQ mapping to QID */
2355 	err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2356 	if (err != 1) {
2357 		printf("%d: error mapping lb qid\n", __LINE__);
2358 		cleanup(t);
2359 		return -1;
2360 	}
2361 
2362 	if (rte_event_dev_start(evdev) < 0) {
2363 		printf("%d: Error with start call\n", __LINE__);
2364 		return -1;
2365 	}
2366 
2367 	/************** Gen pkt and enqueue ****************/
2368 	struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2369 	if (!arp) {
2370 		printf("%d: gen of pkt failed\n", __LINE__);
2371 		return -1;
2372 	}
2373 
2374 	ev.op = RTE_EVENT_OP_NEW;
2375 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2376 	ev.mbuf = arp;
2377 	ev.queue_id = 0;
2378 	ev.flow_id = 3;
2379 	*rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN;
2380 
2381 	err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2382 	if (err != 1) {
2383 		printf("%d: Failed to enqueue\n", __LINE__);
2384 		return -1;
2385 	}
2386 
2387 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2388 
2389 	err = test_event_dev_stats_get(evdev, &stats);
2390 	if (err) {
2391 		printf("%d: failed to get stats\n", __LINE__);
2392 		return -1;
2393 	}
2394 
2395 	if (stats.rx_pkts != 1 ||
2396 			stats.tx_pkts != 1 ||
2397 			stats.port_inflight[wrk_enq] != 1) {
2398 		printf("%d: Sched core didn't handle pkt as expected\n",
2399 				__LINE__);
2400 		rte_event_dev_dump(evdev, stdout);
2401 		return -1;
2402 	}
2403 
2404 	uint32_t deq_pkts;
2405 
2406 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2407 	if (deq_pkts < 1) {
2408 		printf("%d: Failed to deq\n", __LINE__);
2409 		return -1;
2410 	}
2411 
2412 	err = test_event_dev_stats_get(evdev, &stats);
2413 	if (err) {
2414 		printf("%d: failed to get stats\n", __LINE__);
2415 		return -1;
2416 	}
2417 
2418 	err = test_event_dev_stats_get(evdev, &stats);
2419 	if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN) {
2420 		printf("%d: magic sequence number not dequeued\n", __LINE__);
2421 		return -1;
2422 	}
2423 
2424 	rte_pktmbuf_free(ev.mbuf);
2425 	err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2426 	if (err != 1) {
2427 		printf("%d: Failed to enqueue\n", __LINE__);
2428 		return -1;
2429 	}
2430 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2431 
2432 	err = test_event_dev_stats_get(evdev, &stats);
2433 	if (stats.port_inflight[wrk_enq] != 0) {
2434 		printf("%d: port inflight not correct\n", __LINE__);
2435 		return -1;
2436 	}
2437 
2438 	cleanup(t);
2439 	return 0;
2440 }
2441 
2442 static int
inflight_counts(struct test * t)2443 inflight_counts(struct test *t)
2444 {
2445 	struct rte_event ev;
2446 	struct test_event_dev_stats stats;
2447 	const int rx_enq = 0;
2448 	const int p1 = 1;
2449 	const int p2 = 2;
2450 	int err;
2451 	int i;
2452 
2453 	/* Create instance with 4 ports */
2454 	if (init(t, 2, 3) < 0 ||
2455 			create_ports(t, 3) < 0 ||
2456 			create_atomic_qids(t, 2) < 0) {
2457 		printf("%d: Error initializing device\n", __LINE__);
2458 		return -1;
2459 	}
2460 
2461 	/* CQ mapping to QID */
2462 	err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2463 	if (err != 1) {
2464 		printf("%d: error mapping lb qid\n", __LINE__);
2465 		cleanup(t);
2466 		return -1;
2467 	}
2468 	err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2469 	if (err != 1) {
2470 		printf("%d: error mapping lb qid\n", __LINE__);
2471 		cleanup(t);
2472 		return -1;
2473 	}
2474 
2475 	if (rte_event_dev_start(evdev) < 0) {
2476 		printf("%d: Error with start call\n", __LINE__);
2477 		return -1;
2478 	}
2479 
2480 	/************** FORWARD ****************/
2481 #define QID1_NUM 5
2482 	for (i = 0; i < QID1_NUM; i++) {
2483 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2484 
2485 		if (!arp) {
2486 			printf("%d: gen of pkt failed\n", __LINE__);
2487 			goto err;
2488 		}
2489 
2490 		ev.queue_id =  t->qid[0];
2491 		ev.op = RTE_EVENT_OP_NEW;
2492 		ev.mbuf = arp;
2493 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2494 		if (err != 1) {
2495 			printf("%d: Failed to enqueue\n", __LINE__);
2496 			goto err;
2497 		}
2498 	}
2499 #define QID2_NUM 3
2500 	for (i = 0; i < QID2_NUM; i++) {
2501 		struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2502 
2503 		if (!arp) {
2504 			printf("%d: gen of pkt failed\n", __LINE__);
2505 			goto err;
2506 		}
2507 		ev.queue_id =  t->qid[1];
2508 		ev.op = RTE_EVENT_OP_NEW;
2509 		ev.mbuf = arp;
2510 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2511 		if (err != 1) {
2512 			printf("%d: Failed to enqueue\n", __LINE__);
2513 			goto err;
2514 		}
2515 	}
2516 
2517 	/* schedule */
2518 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2519 
2520 	err = test_event_dev_stats_get(evdev, &stats);
2521 	if (err) {
2522 		printf("%d: failed to get stats\n", __LINE__);
2523 		goto err;
2524 	}
2525 
2526 	if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2527 			stats.tx_pkts != QID1_NUM + QID2_NUM) {
2528 		printf("%d: Sched core didn't handle pkt as expected\n",
2529 				__LINE__);
2530 		goto err;
2531 	}
2532 
2533 	if (stats.port_inflight[p1] != QID1_NUM) {
2534 		printf("%d: %s port 1 inflight not correct\n", __LINE__,
2535 				__func__);
2536 		goto err;
2537 	}
2538 	if (stats.port_inflight[p2] != QID2_NUM) {
2539 		printf("%d: %s port 2 inflight not correct\n", __LINE__,
2540 				__func__);
2541 		goto err;
2542 	}
2543 
2544 	/************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/
2545 	/* port 1 */
2546 	struct rte_event events[QID1_NUM + QID2_NUM];
2547 	uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2548 			RTE_DIM(events), 0);
2549 
2550 	if (deq_pkts != QID1_NUM) {
2551 		printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2552 		goto err;
2553 	}
2554 	err = test_event_dev_stats_get(evdev, &stats);
2555 	if (stats.port_inflight[p1] != QID1_NUM) {
2556 		printf("%d: port 1 inflight decrement after DEQ != 0\n",
2557 				__LINE__);
2558 		goto err;
2559 	}
2560 	for (i = 0; i < QID1_NUM; i++) {
2561 		err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2562 				1);
2563 		if (err != 1) {
2564 			printf("%d: %s rte enqueue of inf release failed\n",
2565 				__LINE__, __func__);
2566 			goto err;
2567 		}
2568 	}
2569 
2570 	/*
2571 	 * As the scheduler core decrements inflights, it needs to run to
2572 	 * process packets to act on the drop messages
2573 	 */
2574 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2575 
2576 	err = test_event_dev_stats_get(evdev, &stats);
2577 	if (stats.port_inflight[p1] != 0) {
2578 		printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2579 		goto err;
2580 	}
2581 
2582 	/* port2 */
2583 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2584 			RTE_DIM(events), 0);
2585 	if (deq_pkts != QID2_NUM) {
2586 		printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2587 		goto err;
2588 	}
2589 	err = test_event_dev_stats_get(evdev, &stats);
2590 	if (stats.port_inflight[p2] != QID2_NUM) {
2591 		printf("%d: port 1 inflight decrement after DEQ != 0\n",
2592 				__LINE__);
2593 		goto err;
2594 	}
2595 	for (i = 0; i < QID2_NUM; i++) {
2596 		err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2597 				1);
2598 		if (err != 1) {
2599 			printf("%d: %s rte enqueue of inf release failed\n",
2600 				__LINE__, __func__);
2601 			goto err;
2602 		}
2603 	}
2604 
2605 	/*
2606 	 * As the scheduler core decrements inflights, it needs to run to
2607 	 * process packets to act on the drop messages
2608 	 */
2609 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2610 
2611 	err = test_event_dev_stats_get(evdev, &stats);
2612 	if (stats.port_inflight[p2] != 0) {
2613 		printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2614 		goto err;
2615 	}
2616 	cleanup(t);
2617 	return 0;
2618 
2619 err:
2620 	rte_event_dev_dump(evdev, stdout);
2621 	cleanup(t);
2622 	return -1;
2623 }
2624 
2625 static int
parallel_basic(struct test * t,int check_order)2626 parallel_basic(struct test *t, int check_order)
2627 {
2628 	const uint8_t rx_port = 0;
2629 	const uint8_t w1_port = 1;
2630 	const uint8_t w3_port = 3;
2631 	const uint8_t tx_port = 4;
2632 	int err;
2633 	int i;
2634 	uint32_t deq_pkts, j;
2635 	struct rte_mbuf *mbufs[3];
2636 	struct rte_mbuf *mbufs_out[3] = { 0 };
2637 	const uint32_t MAGIC_SEQN = 1234;
2638 
2639 	/* Create instance with 4 ports */
2640 	if (init(t, 2, tx_port + 1) < 0 ||
2641 			create_ports(t, tx_port + 1) < 0 ||
2642 			(check_order ?  create_ordered_qids(t, 1) :
2643 				create_unordered_qids(t, 1)) < 0 ||
2644 			create_directed_qids(t, 1, &tx_port)) {
2645 		printf("%d: Error initializing device\n", __LINE__);
2646 		return -1;
2647 	}
2648 
2649 	/*
2650 	 * CQ mapping to QID
2651 	 * We need three ports, all mapped to the same ordered qid0. Then we'll
2652 	 * take a packet out to each port, re-enqueue in reverse order,
2653 	 * then make sure the reordering has taken place properly when we
2654 	 * dequeue from the tx_port.
2655 	 *
2656 	 * Simplified test setup diagram:
2657 	 *
2658 	 * rx_port        w1_port
2659 	 *        \     /         \
2660 	 *         qid0 - w2_port - qid1
2661 	 *              \         /     \
2662 	 *                w3_port        tx_port
2663 	 */
2664 	/* CQ mapping to QID for LB ports (directed mapped on create) */
2665 	for (i = w1_port; i <= w3_port; i++) {
2666 		err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2667 				1);
2668 		if (err != 1) {
2669 			printf("%d: error mapping lb qid\n", __LINE__);
2670 			cleanup(t);
2671 			return -1;
2672 		}
2673 	}
2674 
2675 	if (rte_event_dev_start(evdev) < 0) {
2676 		printf("%d: Error with start call\n", __LINE__);
2677 		return -1;
2678 	}
2679 
2680 	/* Enqueue 3 packets to the rx port */
2681 	for (i = 0; i < 3; i++) {
2682 		struct rte_event ev;
2683 		mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2684 		if (!mbufs[i]) {
2685 			printf("%d: gen of pkt failed\n", __LINE__);
2686 			return -1;
2687 		}
2688 
2689 		ev.queue_id = t->qid[0];
2690 		ev.op = RTE_EVENT_OP_NEW;
2691 		ev.mbuf = mbufs[i];
2692 		*rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
2693 
2694 		/* generate pkt and enqueue */
2695 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2696 		if (err != 1) {
2697 			printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2698 					__LINE__, i, err);
2699 			return -1;
2700 		}
2701 	}
2702 
2703 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2704 
2705 	/* use extra slot to make logic in loops easier */
2706 	struct rte_event deq_ev[w3_port + 1];
2707 
2708 	/* Dequeue the 3 packets, one from each worker port */
2709 	for (i = w1_port; i <= w3_port; i++) {
2710 		deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2711 				&deq_ev[i], 1, 0);
2712 		if (deq_pkts != 1) {
2713 			printf("%d: Failed to deq\n", __LINE__);
2714 			rte_event_dev_dump(evdev, stdout);
2715 			return -1;
2716 		}
2717 	}
2718 
2719 	/* Enqueue each packet in reverse order, flushing after each one */
2720 	for (i = w3_port; i >= w1_port; i--) {
2721 
2722 		deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2723 		deq_ev[i].queue_id = t->qid[1];
2724 		err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2725 		if (err != 1) {
2726 			printf("%d: Failed to enqueue\n", __LINE__);
2727 			return -1;
2728 		}
2729 	}
2730 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2731 
2732 	/* dequeue from the tx ports, we should get 3 packets */
2733 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2734 			3, 0);
2735 
2736 	/* Check to see if we've got all 3 packets */
2737 	if (deq_pkts != 3) {
2738 		printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2739 			__LINE__, deq_pkts, tx_port);
2740 		rte_event_dev_dump(evdev, stdout);
2741 		return 1;
2742 	}
2743 
2744 	/* Check to see if the sequence numbers are in expected order */
2745 	if (check_order) {
2746 		for (j = 0 ; j < deq_pkts ; j++) {
2747 			if (*rte_event_pmd_selftest_seqn(deq_ev[j].mbuf) !=
2748 					MAGIC_SEQN + j) {
2749 				printf("%d: Incorrect sequence number(%d) from port %d\n",
2750 					__LINE__,
2751 					*rte_event_pmd_selftest_seqn(mbufs_out[j]),
2752 					tx_port);
2753 				return -1;
2754 			}
2755 		}
2756 	}
2757 
2758 	/* Destroy the instance */
2759 	cleanup(t);
2760 	return 0;
2761 }
2762 
2763 static int
ordered_basic(struct test * t)2764 ordered_basic(struct test *t)
2765 {
2766 	return parallel_basic(t, 1);
2767 }
2768 
2769 static int
unordered_basic(struct test * t)2770 unordered_basic(struct test *t)
2771 {
2772 	return parallel_basic(t, 0);
2773 }
2774 
2775 static int
holb(struct test * t)2776 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2777 {
2778 	const struct rte_event new_ev = {
2779 			.op = RTE_EVENT_OP_NEW
2780 			/* all other fields zero */
2781 	};
2782 	struct rte_event ev = new_ev;
2783 	unsigned int rx_port = 0; /* port we get the first flow on */
2784 	char rx_port_used_stat[64];
2785 	char rx_port_free_stat[64];
2786 	char other_port_used_stat[64];
2787 
2788 	if (init(t, 1, 2) < 0 ||
2789 			create_ports(t, 2) < 0 ||
2790 			create_atomic_qids(t, 1) < 0) {
2791 		printf("%d: Error initializing device\n", __LINE__);
2792 		return -1;
2793 	}
2794 	int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2795 	if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2796 			nb_links != 1) {
2797 		printf("%d: Error links queue to ports\n", __LINE__);
2798 		goto err;
2799 	}
2800 	if (rte_event_dev_start(evdev) < 0) {
2801 		printf("%d: Error with start call\n", __LINE__);
2802 		goto err;
2803 	}
2804 
2805 	/* send one packet and see where it goes, port 0 or 1 */
2806 	if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2807 		printf("%d: Error doing first enqueue\n", __LINE__);
2808 		goto err;
2809 	}
2810 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2811 
2812 	if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2813 			!= 1)
2814 		rx_port = 1;
2815 
2816 	snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2817 			"port_%u_cq_ring_used", rx_port);
2818 	snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2819 			"port_%u_cq_ring_free", rx_port);
2820 	snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2821 			"port_%u_cq_ring_used", rx_port ^ 1);
2822 	if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2823 			!= 1) {
2824 		printf("%d: Error, first event not scheduled\n", __LINE__);
2825 		goto err;
2826 	}
2827 
2828 	/* now fill up the rx port's queue with one flow to cause HOLB */
2829 	do {
2830 		ev = new_ev;
2831 		if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2832 			printf("%d: Error with enqueue\n", __LINE__);
2833 			goto err;
2834 		}
2835 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
2836 	} while (rte_event_dev_xstats_by_name_get(evdev,
2837 				rx_port_free_stat, NULL) != 0);
2838 
2839 	/* one more packet, which needs to stay in IQ - i.e. HOLB */
2840 	ev = new_ev;
2841 	if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2842 		printf("%d: Error with enqueue\n", __LINE__);
2843 		goto err;
2844 	}
2845 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2846 
2847 	/* check that the other port still has an empty CQ */
2848 	if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2849 			!= 0) {
2850 		printf("%d: Error, second port CQ is not empty\n", __LINE__);
2851 		goto err;
2852 	}
2853 	/* check IQ now has one packet */
2854 	if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2855 			!= 1) {
2856 		printf("%d: Error, QID does not have exactly 1 packet\n",
2857 			__LINE__);
2858 		goto err;
2859 	}
2860 
2861 	/* send another flow, which should pass the other IQ entry */
2862 	ev = new_ev;
2863 	ev.flow_id = 1;
2864 	if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2865 		printf("%d: Error with enqueue\n", __LINE__);
2866 		goto err;
2867 	}
2868 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2869 
2870 	if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2871 			!= 1) {
2872 		printf("%d: Error, second flow did not pass out first\n",
2873 			__LINE__);
2874 		goto err;
2875 	}
2876 
2877 	if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2878 			!= 1) {
2879 		printf("%d: Error, QID does not have exactly 1 packet\n",
2880 			__LINE__);
2881 		goto err;
2882 	}
2883 	cleanup(t);
2884 	return 0;
2885 err:
2886 	rte_event_dev_dump(evdev, stdout);
2887 	cleanup(t);
2888 	return -1;
2889 }
2890 
2891 static void
flush(uint8_t dev_id __rte_unused,struct rte_event event,void * arg)2892 flush(uint8_t dev_id __rte_unused, struct rte_event event, void *arg)
2893 {
2894 	*((uint8_t *) arg) += (event.u64 == 0xCA11BACC) ? 1 : 0;
2895 }
2896 
2897 static int
dev_stop_flush(struct test * t)2898 dev_stop_flush(struct test *t) /* test to check we can properly flush events */
2899 {
2900 	const struct rte_event new_ev = {
2901 		.op = RTE_EVENT_OP_NEW,
2902 		.u64 = 0xCA11BACC,
2903 		.queue_id = 0
2904 	};
2905 	struct rte_event ev = new_ev;
2906 	uint8_t count = 0;
2907 	int i;
2908 
2909 	if (init(t, 1, 1) < 0 ||
2910 	    create_ports(t, 1) < 0 ||
2911 	    create_atomic_qids(t, 1) < 0) {
2912 		printf("%d: Error initializing device\n", __LINE__);
2913 		return -1;
2914 	}
2915 
2916 	/* Link the queue so *_start() doesn't error out */
2917 	if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1) {
2918 		printf("%d: Error linking queue to port\n", __LINE__);
2919 		goto err;
2920 	}
2921 
2922 	if (rte_event_dev_start(evdev) < 0) {
2923 		printf("%d: Error with start call\n", __LINE__);
2924 		goto err;
2925 	}
2926 
2927 	for (i = 0; i < DEQUEUE_DEPTH + 1; i++) {
2928 		if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2929 			printf("%d: Error enqueuing events\n", __LINE__);
2930 			goto err;
2931 		}
2932 	}
2933 
2934 	/* Schedule the events from the port to the IQ. At least one event
2935 	 * should be remaining in the queue.
2936 	 */
2937 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
2938 
2939 	if (rte_event_dev_stop_flush_callback_register(evdev, flush, &count)) {
2940 		printf("%d: Error installing the flush callback\n", __LINE__);
2941 		goto err;
2942 	}
2943 
2944 	cleanup(t);
2945 
2946 	if (count == 0) {
2947 		printf("%d: Error executing the flush callback\n", __LINE__);
2948 		goto err;
2949 	}
2950 
2951 	if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
2952 		printf("%d: Error uninstalling the flush callback\n", __LINE__);
2953 		goto err;
2954 	}
2955 
2956 	return 0;
2957 err:
2958 	rte_event_dev_dump(evdev, stdout);
2959 	cleanup(t);
2960 	return -1;
2961 }
2962 
2963 static int
ordered_atomic_hist_completion(struct test * t)2964 ordered_atomic_hist_completion(struct test *t)
2965 {
2966 	const int rx_enq = 0;
2967 	int err;
2968 
2969 	/* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2970 	if (init(t, 2, 2) < 0 ||
2971 			create_ports(t, 2) < 0 ||
2972 			create_ordered_qids(t, 1) < 0 ||
2973 			create_atomic_qids(t, 1) < 0)
2974 		return -1;
2975 
2976 	/* Helpers to identify queues */
2977 	const uint8_t qid_ordered = t->qid[0];
2978 	const uint8_t qid_atomic = t->qid[1];
2979 
2980 	/* CQ mapping to QID */
2981 	if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2982 		printf("%d: error mapping port 1 qid\n", __LINE__);
2983 		return -1;
2984 	}
2985 	if (rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1) != 1) {
2986 		printf("%d: error mapping port 1 qid\n", __LINE__);
2987 		return -1;
2988 	}
2989 	if (rte_event_dev_start(evdev) < 0) {
2990 		printf("%d: Error with start call\n", __LINE__);
2991 		return -1;
2992 	}
2993 
2994 	/* Enqueue 1x ordered event, to be RELEASE-ed by the worker
2995 	 * CPU, which may cause hist-list corruption (by not comleting)
2996 	 */
2997 	struct rte_event ord_ev = {
2998 		.op = RTE_EVENT_OP_NEW,
2999 		.queue_id = qid_ordered,
3000 		.event_type = RTE_EVENT_TYPE_CPU,
3001 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
3002 	};
3003 	err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ord_ev, 1);
3004 	if (err != 1) {
3005 		printf("%d: Failed to enqueue\n", __LINE__);
3006 		return -1;
3007 	}
3008 
3009 	/* call the scheduler. This schedules the above event as a single
3010 	 * event in an ORDERED queue, to the worker.
3011 	 */
3012 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
3013 
3014 	/* Dequeue ORDERED event 0 from port 1, so that we can then drop */
3015 	struct rte_event ev;
3016 	if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
3017 		printf("%d: failed to dequeue\n", __LINE__);
3018 		return -1;
3019 	}
3020 
3021 	/* drop the ORDERED event. Here the history list should be completed,
3022 	 * but might not be if the hist-list bug exists. Call scheduler to make
3023 	 * it act on the RELEASE that was enqueued.
3024 	 */
3025 	rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
3026 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
3027 
3028 	/* Enqueue 1x atomic event, to then FORWARD to trigger atomic hist-list
3029 	 * completion. If the bug exists, the ORDERED entry may be completed in
3030 	 * error (aka, using the ORDERED-ROB for the ATOMIC event). This is the
3031 	 * main focus of this unit test.
3032 	 */
3033 	{
3034 		struct rte_event ev = {
3035 			.op = RTE_EVENT_OP_NEW,
3036 			.queue_id = qid_atomic,
3037 			.event_type = RTE_EVENT_TYPE_CPU,
3038 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
3039 			.flow_id = 123,
3040 		};
3041 
3042 		err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
3043 		if (err != 1) {
3044 			printf("%d: Failed to enqueue\n", __LINE__);
3045 			return -1;
3046 		}
3047 	}
3048 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
3049 
3050 	/* Deq ATM event, then forward it for more than HIST_LIST_SIZE times,
3051 	 * to re-use the history list entry that may be corrupted previously.
3052 	 */
3053 	for (int i = 0; i < SW_PORT_HIST_LIST + 2; i++) {
3054 		if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
3055 			printf("%d: failed to dequeue, did corrupt ORD hist "
3056 				"list steal this ATM event?\n", __LINE__);
3057 			return -1;
3058 		}
3059 
3060 		/* Re-enqueue the ATM event as FWD, trigger hist-list. */
3061 		ev.op = RTE_EVENT_OP_FORWARD;
3062 		err = rte_event_enqueue_burst(evdev, t->port[1], &ev, 1);
3063 		if (err != 1) {
3064 			printf("%d: Failed to enqueue\n", __LINE__);
3065 			return -1;
3066 		}
3067 
3068 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
3069 	}
3070 
3071 	/* If HIST-LIST + N count of dequeues succeed above, the hist list
3072 	 * has not been corrupted. If it is corrupted, the ATM event is pushed
3073 	 * into the ORDERED-ROB and will not dequeue.
3074 	 */
3075 
3076 	/* release the ATM event that's been forwarded HIST_LIST times */
3077 	err = rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
3078 	if (err != 1) {
3079 		printf("%d: Failed to enqueue\n", __LINE__);
3080 		return -1;
3081 	}
3082 
3083 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
3084 
3085 	cleanup(t);
3086 	return 0;
3087 }
3088 
3089 static int
worker_loopback_worker_fn(void * arg)3090 worker_loopback_worker_fn(void *arg)
3091 {
3092 	struct test *t = arg;
3093 	uint8_t port = t->port[1];
3094 	int count = 0;
3095 	int enqd;
3096 
3097 	/*
3098 	 * Takes packets from the input port and then loops them back through
3099 	 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
3100 	 * so each packet goes through 8*16 = 128 times.
3101 	 */
3102 	printf("%d: \tWorker function started\n", __LINE__);
3103 	while (count < NUM_PACKETS) {
3104 #define BURST_SIZE 32
3105 		struct rte_event ev[BURST_SIZE];
3106 		uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
3107 				BURST_SIZE, 0);
3108 		if (nb_rx == 0) {
3109 			rte_pause();
3110 			continue;
3111 		}
3112 
3113 		for (i = 0; i < nb_rx; i++) {
3114 			ev[i].queue_id++;
3115 			if (ev[i].queue_id != 8) {
3116 				ev[i].op = RTE_EVENT_OP_FORWARD;
3117 				enqd = rte_event_enqueue_burst(evdev, port,
3118 						&ev[i], 1);
3119 				if (enqd != 1) {
3120 					printf("%d: Can't enqueue FWD!!\n",
3121 							__LINE__);
3122 					return -1;
3123 				}
3124 				continue;
3125 			}
3126 
3127 			ev[i].queue_id = 0;
3128 			(*counter_field(ev[i].mbuf))++;
3129 			if (*counter_field(ev[i].mbuf) != 16) {
3130 				ev[i].op = RTE_EVENT_OP_FORWARD;
3131 				enqd = rte_event_enqueue_burst(evdev, port,
3132 						&ev[i], 1);
3133 				if (enqd != 1) {
3134 					printf("%d: Can't enqueue FWD!!\n",
3135 							__LINE__);
3136 					return -1;
3137 				}
3138 				continue;
3139 			}
3140 			/* we have hit 16 iterations through system - drop */
3141 			rte_pktmbuf_free(ev[i].mbuf);
3142 			count++;
3143 			ev[i].op = RTE_EVENT_OP_RELEASE;
3144 			enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
3145 			if (enqd != 1) {
3146 				printf("%d drop enqueue failed\n", __LINE__);
3147 				return -1;
3148 			}
3149 		}
3150 	}
3151 
3152 	return 0;
3153 }
3154 
3155 static int
worker_loopback_producer_fn(void * arg)3156 worker_loopback_producer_fn(void *arg)
3157 {
3158 	struct test *t = arg;
3159 	uint8_t port = t->port[0];
3160 	uint64_t count = 0;
3161 
3162 	printf("%d: \tProducer function started\n", __LINE__);
3163 	while (count < NUM_PACKETS) {
3164 		struct rte_mbuf *m = 0;
3165 		do {
3166 			m = rte_pktmbuf_alloc(t->mbuf_pool);
3167 		} while (m == NULL);
3168 
3169 		*counter_field(m) = 0;
3170 
3171 		struct rte_event ev = {
3172 				.op = RTE_EVENT_OP_NEW,
3173 				.queue_id = t->qid[0],
3174 				.flow_id = (uintptr_t)m & 0xFFFF,
3175 				.mbuf = m,
3176 		};
3177 
3178 		if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
3179 			while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
3180 					1)
3181 				rte_pause();
3182 		}
3183 
3184 		count++;
3185 	}
3186 
3187 	return 0;
3188 }
3189 
3190 static int
worker_loopback(struct test * t,uint8_t disable_implicit_release)3191 worker_loopback(struct test *t, uint8_t disable_implicit_release)
3192 {
3193 	/* use a single producer core, and a worker core to see what happens
3194 	 * if the worker loops packets back multiple times
3195 	 */
3196 	struct test_event_dev_stats stats;
3197 	uint64_t print_cycles = 0, cycles = 0;
3198 	uint64_t tx_pkts = 0;
3199 	int err;
3200 	int w_lcore, p_lcore;
3201 
3202 	static const struct rte_mbuf_dynfield counter_dynfield_desc = {
3203 		.name = "rte_event_sw_dynfield_selftest_counter",
3204 		.size = sizeof(counter_dynfield_t),
3205 		.align = alignof(counter_dynfield_t),
3206 	};
3207 	counter_dynfield_offset =
3208 		rte_mbuf_dynfield_register(&counter_dynfield_desc);
3209 	if (counter_dynfield_offset < 0) {
3210 		printf("Error registering mbuf field\n");
3211 		return -rte_errno;
3212 	}
3213 
3214 	if (init(t, 8, 2) < 0 ||
3215 			create_atomic_qids(t, 8) < 0) {
3216 		printf("%d: Error initializing device\n", __LINE__);
3217 		return -1;
3218 	}
3219 
3220 	/* RX with low max events */
3221 	static struct rte_event_port_conf conf = {
3222 			.dequeue_depth = 32,
3223 			.enqueue_depth = 64,
3224 	};
3225 	/* beware: this cannot be initialized in the static above as it would
3226 	 * only be initialized once - and this needs to be set for multiple runs
3227 	 */
3228 	conf.new_event_threshold = 512;
3229 	conf.event_port_cfg = disable_implicit_release ?
3230 		RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
3231 
3232 	if (rte_event_port_setup(evdev, 0, &conf) < 0) {
3233 		printf("Error setting up RX port\n");
3234 		return -1;
3235 	}
3236 	t->port[0] = 0;
3237 	/* TX with higher max events */
3238 	conf.new_event_threshold = 4096;
3239 	if (rte_event_port_setup(evdev, 1, &conf) < 0) {
3240 		printf("Error setting up TX port\n");
3241 		return -1;
3242 	}
3243 	t->port[1] = 1;
3244 
3245 	/* CQ mapping to QID */
3246 	err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
3247 	if (err != 8) { /* should have mapped all queues*/
3248 		printf("%d: error mapping port 2 to all qids\n", __LINE__);
3249 		return -1;
3250 	}
3251 
3252 	if (rte_event_dev_start(evdev) < 0) {
3253 		printf("%d: Error with start call\n", __LINE__);
3254 		return -1;
3255 	}
3256 
3257 	p_lcore = rte_get_next_lcore(
3258 			/* start core */ -1,
3259 			/* skip main */ 1,
3260 			/* wrap */ 0);
3261 	w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
3262 
3263 	rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
3264 	rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3265 
3266 	print_cycles = cycles = rte_get_timer_cycles();
3267 	while (rte_eal_get_lcore_state(p_lcore) != WAIT ||
3268 			rte_eal_get_lcore_state(w_lcore) != WAIT) {
3269 
3270 		rte_service_run_iter_on_app_lcore(t->service_id, 1);
3271 
3272 		uint64_t new_cycles = rte_get_timer_cycles();
3273 
3274 		if (new_cycles - print_cycles > rte_get_timer_hz()) {
3275 			test_event_dev_stats_get(evdev, &stats);
3276 			printf(
3277 				"%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3278 				__LINE__, stats.rx_pkts, stats.tx_pkts);
3279 
3280 			print_cycles = new_cycles;
3281 		}
3282 		if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3283 			test_event_dev_stats_get(evdev, &stats);
3284 			if (stats.tx_pkts == tx_pkts) {
3285 				rte_event_dev_dump(evdev, stdout);
3286 				printf("Dumping xstats:\n");
3287 				xstats_print();
3288 				printf(
3289 					"%d: No schedules for seconds, deadlock\n",
3290 					__LINE__);
3291 				return -1;
3292 			}
3293 			tx_pkts = stats.tx_pkts;
3294 			cycles = new_cycles;
3295 		}
3296 	}
3297 	rte_service_run_iter_on_app_lcore(t->service_id, 1);
3298 	/* ensure all completions are flushed */
3299 
3300 	rte_eal_mp_wait_lcore();
3301 
3302 	cleanup(t);
3303 	return 0;
3304 }
3305 
3306 static struct rte_mempool *eventdev_func_mempool;
3307 
3308 int
test_sw_eventdev(void)3309 test_sw_eventdev(void)
3310 {
3311 	struct test *t;
3312 	int ret;
3313 
3314 	t = malloc(sizeof(struct test));
3315 	if (t == NULL)
3316 		return -1;
3317 	/* manually initialize the op, older gcc's complain on static
3318 	 * initialization of struct elements that are a bitfield.
3319 	 */
3320 	release_ev.op = RTE_EVENT_OP_RELEASE;
3321 
3322 	const char *eventdev_name = "event_sw";
3323 	evdev = rte_event_dev_get_dev_id(eventdev_name);
3324 	if (evdev < 0) {
3325 		printf("%d: Eventdev %s not found - creating.\n",
3326 				__LINE__, eventdev_name);
3327 		if (rte_vdev_init(eventdev_name, NULL) < 0) {
3328 			printf("Error creating eventdev\n");
3329 			goto test_fail;
3330 		}
3331 		evdev = rte_event_dev_get_dev_id(eventdev_name);
3332 		if (evdev < 0) {
3333 			printf("Error finding newly created eventdev\n");
3334 			goto test_fail;
3335 		}
3336 	}
3337 
3338 	if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
3339 		printf("Failed to get service ID for software event dev\n");
3340 		goto test_fail;
3341 	}
3342 
3343 	rte_service_runstate_set(t->service_id, 1);
3344 	rte_service_set_runstate_mapped_check(t->service_id, 0);
3345 
3346 	/* Only create mbuf pool once, reuse for each test run */
3347 	if (!eventdev_func_mempool) {
3348 		eventdev_func_mempool = rte_pktmbuf_pool_create(
3349 				"EVENTDEV_SW_SA_MBUF_POOL",
3350 				(1<<12), /* 4k buffers */
3351 				32 /*MBUF_CACHE_SIZE*/,
3352 				0,
3353 				512, /* use very small mbufs */
3354 				rte_socket_id());
3355 		if (!eventdev_func_mempool) {
3356 			printf("ERROR creating mempool\n");
3357 			goto test_fail;
3358 		}
3359 	}
3360 	t->mbuf_pool = eventdev_func_mempool;
3361 	printf("*** Running Single Directed Packet test...\n");
3362 	ret = test_single_directed_packet(t);
3363 	if (ret != 0) {
3364 		printf("ERROR - Single Directed Packet test FAILED.\n");
3365 		goto test_fail;
3366 	}
3367 	printf("*** Running Directed Forward Credit test...\n");
3368 	ret = test_directed_forward_credits(t);
3369 	if (ret != 0) {
3370 		printf("ERROR - Directed Forward Credit test FAILED.\n");
3371 		goto test_fail;
3372 	}
3373 	printf("*** Running Single Load Balanced Packet test...\n");
3374 	ret = single_packet(t);
3375 	if (ret != 0) {
3376 		printf("ERROR - Single Packet test FAILED.\n");
3377 		goto test_fail;
3378 	}
3379 	printf("*** Running Unordered Basic test...\n");
3380 	ret = unordered_basic(t);
3381 	if (ret != 0) {
3382 		printf("ERROR -  Unordered Basic test FAILED.\n");
3383 		goto test_fail;
3384 	}
3385 	printf("*** Running Ordered Basic test...\n");
3386 	ret = ordered_basic(t);
3387 	if (ret != 0) {
3388 		printf("ERROR -  Ordered Basic test FAILED.\n");
3389 		goto test_fail;
3390 	}
3391 	printf("*** Running Burst Packets test...\n");
3392 	ret = burst_packets(t);
3393 	if (ret != 0) {
3394 		printf("ERROR - Burst Packets test FAILED.\n");
3395 		goto test_fail;
3396 	}
3397 	printf("*** Running Load Balancing test...\n");
3398 	ret = load_balancing(t);
3399 	if (ret != 0) {
3400 		printf("ERROR - Load Balancing test FAILED.\n");
3401 		goto test_fail;
3402 	}
3403 	printf("*** Running Prioritized Directed test...\n");
3404 	ret = test_priority_directed(t);
3405 	if (ret != 0) {
3406 		printf("ERROR - Prioritized Directed test FAILED.\n");
3407 		goto test_fail;
3408 	}
3409 	printf("*** Running Prioritized Atomic test...\n");
3410 	ret = test_priority_atomic(t);
3411 	if (ret != 0) {
3412 		printf("ERROR - Prioritized Atomic test FAILED.\n");
3413 		goto test_fail;
3414 	}
3415 
3416 	printf("*** Running Prioritized Ordered test...\n");
3417 	ret = test_priority_ordered(t);
3418 	if (ret != 0) {
3419 		printf("ERROR - Prioritized Ordered test FAILED.\n");
3420 		goto test_fail;
3421 	}
3422 	printf("*** Running Prioritized Unordered test...\n");
3423 	ret = test_priority_unordered(t);
3424 	if (ret != 0) {
3425 		printf("ERROR - Prioritized Unordered test FAILED.\n");
3426 		goto test_fail;
3427 	}
3428 	printf("*** Running Invalid QID test...\n");
3429 	ret = invalid_qid(t);
3430 	if (ret != 0) {
3431 		printf("ERROR - Invalid QID test FAILED.\n");
3432 		goto test_fail;
3433 	}
3434 	printf("*** Running Load Balancing History test...\n");
3435 	ret = load_balancing_history(t);
3436 	if (ret != 0) {
3437 		printf("ERROR - Load Balancing History test FAILED.\n");
3438 		goto test_fail;
3439 	}
3440 	printf("*** Running Inflight Count test...\n");
3441 	ret = inflight_counts(t);
3442 	if (ret != 0) {
3443 		printf("ERROR - Inflight Count test FAILED.\n");
3444 		goto test_fail;
3445 	}
3446 	printf("*** Running Abuse Inflights test...\n");
3447 	ret = abuse_inflights(t);
3448 	if (ret != 0) {
3449 		printf("ERROR - Abuse Inflights test FAILED.\n");
3450 		goto test_fail;
3451 	}
3452 	printf("*** Running XStats test...\n");
3453 	ret = xstats_tests(t);
3454 	if (ret != 0) {
3455 		printf("ERROR - XStats test FAILED.\n");
3456 		goto test_fail;
3457 	}
3458 	printf("*** Running XStats ID Reset test...\n");
3459 	ret = xstats_id_reset_tests(t);
3460 	if (ret != 0) {
3461 		printf("ERROR - XStats ID Reset test FAILED.\n");
3462 		goto test_fail;
3463 	}
3464 	printf("*** Running XStats Brute Force test...\n");
3465 	ret = xstats_brute_force(t);
3466 	if (ret != 0) {
3467 		printf("ERROR - XStats Brute Force test FAILED.\n");
3468 		goto test_fail;
3469 	}
3470 	printf("*** Running XStats ID Abuse test...\n");
3471 	ret = xstats_id_abuse_tests(t);
3472 	if (ret != 0) {
3473 		printf("ERROR - XStats ID Abuse test FAILED.\n");
3474 		goto test_fail;
3475 	}
3476 	printf("*** Running QID Priority test...\n");
3477 	ret = qid_priorities(t);
3478 	if (ret != 0) {
3479 		printf("ERROR - QID Priority test FAILED.\n");
3480 		goto test_fail;
3481 	}
3482 	printf("*** Running Unlink-in-progress test...\n");
3483 	ret = unlink_in_progress(t);
3484 	if (ret != 0) {
3485 		printf("ERROR - Unlink in progress test FAILED.\n");
3486 		goto test_fail;
3487 	}
3488 	printf("*** Running Ordered Reconfigure test...\n");
3489 	ret = ordered_reconfigure(t);
3490 	if (ret != 0) {
3491 		printf("ERROR - Ordered Reconfigure test FAILED.\n");
3492 		goto test_fail;
3493 	}
3494 	printf("*** Running Port LB Single Reconfig test...\n");
3495 	ret = port_single_lb_reconfig(t);
3496 	if (ret != 0) {
3497 		printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3498 		goto test_fail;
3499 	}
3500 	printf("*** Running Port Reconfig Credits test...\n");
3501 	ret = port_reconfig_credits(t);
3502 	if (ret != 0) {
3503 		printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3504 		goto test_fail;
3505 	}
3506 	printf("*** Running Head-of-line-blocking test...\n");
3507 	ret = holb(t);
3508 	if (ret != 0) {
3509 		printf("ERROR - Head-of-line-blocking test FAILED.\n");
3510 		goto test_fail;
3511 	}
3512 	printf("*** Running Stop Flush test...\n");
3513 	ret = dev_stop_flush(t);
3514 	if (ret != 0) {
3515 		printf("ERROR - Stop Flush test FAILED.\n");
3516 		goto test_fail;
3517 	}
3518 	printf("*** Running Ordered & Atomic hist-list completion test...\n");
3519 	ret = ordered_atomic_hist_completion(t);
3520 	if (ret != 0) {
3521 		printf("ERROR - Ordered & Atomic hist-list test FAILED.\n");
3522 		goto test_fail;
3523 	}
3524 	if (rte_lcore_count() >= 3) {
3525 		printf("*** Running Worker loopback test...\n");
3526 		ret = worker_loopback(t, 0);
3527 		if (ret != 0) {
3528 			printf("ERROR - Worker loopback test FAILED.\n");
3529 			return ret;
3530 		}
3531 
3532 		printf("*** Running Worker loopback test (implicit release disabled)...\n");
3533 		ret = worker_loopback(t, 1);
3534 		if (ret != 0) {
3535 			printf("ERROR - Worker loopback test FAILED.\n");
3536 			goto test_fail;
3537 		}
3538 	} else {
3539 		printf("### Not enough cores for worker loopback tests.\n");
3540 		printf("### Need at least 3 cores for the tests.\n");
3541 	}
3542 
3543 	/*
3544 	 * Free test instance, leaving mempool initialized, and a pointer to it
3545 	 * in static eventdev_func_mempool, as it is re-used on re-runs
3546 	 */
3547 	free(t);
3548 
3549 	printf("SW Eventdev Selftest Successful.\n");
3550 	return 0;
3551 test_fail:
3552 	free(t);
3553 	printf("SW Eventdev Selftest Failed.\n");
3554 	return -1;
3555 }
3556