xref: /dpdk/drivers/event/opdl/opdl_test.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1e07a3ed7SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2e07a3ed7SBruce Richardson  * Copyright(c) 2017 Intel Corporation
3d548ef51SLiang Ma  */
4d548ef51SLiang Ma 
5d548ef51SLiang Ma #include <stdio.h>
6d548ef51SLiang Ma #include <string.h>
7d548ef51SLiang Ma #include <stdint.h>
872b452c5SDmitry Kozlyuk #include <stdlib.h>
9d548ef51SLiang Ma #include <errno.h>
10d548ef51SLiang Ma #include <unistd.h>
11d548ef51SLiang Ma #include <sys/queue.h>
12d548ef51SLiang Ma 
13d548ef51SLiang Ma #include <rte_memory.h>
14d548ef51SLiang Ma #include <rte_memzone.h>
15d548ef51SLiang Ma #include <rte_launch.h>
16d548ef51SLiang Ma #include <rte_eal.h>
17d548ef51SLiang Ma #include <rte_per_lcore.h>
18d548ef51SLiang Ma #include <rte_lcore.h>
19d548ef51SLiang Ma #include <rte_debug.h>
20d548ef51SLiang Ma #include <rte_ethdev.h>
21d548ef51SLiang Ma #include <rte_cycles.h>
22d548ef51SLiang Ma #include <rte_eventdev.h>
234851ef2bSDavid Marchand #include <bus_vdev_driver.h>
24d548ef51SLiang Ma #include <rte_pause.h>
25d548ef51SLiang Ma 
26d548ef51SLiang Ma #include "opdl_evdev.h"
27d548ef51SLiang Ma #include "opdl_log.h"
28d548ef51SLiang Ma 
29d548ef51SLiang Ma 
30d548ef51SLiang Ma #define MAX_PORTS 16
31d548ef51SLiang Ma #define MAX_QIDS 16
32d548ef51SLiang Ma #define NUM_PACKETS (1<<18)
33d548ef51SLiang Ma #define NUM_EVENTS 256
34d548ef51SLiang Ma #define BURST_SIZE 32
35d548ef51SLiang Ma 
36d548ef51SLiang Ma 
37d548ef51SLiang Ma 
38d548ef51SLiang Ma static int evdev;
39d548ef51SLiang Ma 
40d548ef51SLiang Ma struct test {
41d548ef51SLiang Ma 	struct rte_mempool *mbuf_pool;
42d548ef51SLiang Ma 	uint8_t port[MAX_PORTS];
43d548ef51SLiang Ma 	uint8_t qid[MAX_QIDS];
44d548ef51SLiang Ma 	int nb_qids;
45d548ef51SLiang Ma };
46d548ef51SLiang Ma 
47d548ef51SLiang Ma static struct rte_mempool *eventdev_func_mempool;
48d548ef51SLiang Ma 
49d548ef51SLiang Ma static __rte_always_inline struct rte_mbuf *
50d548ef51SLiang Ma rte_gen_arp(int portid, struct rte_mempool *mp)
51d548ef51SLiang Ma {
52d548ef51SLiang Ma 	/*
53d548ef51SLiang Ma 	 * len = 14 + 46
54d548ef51SLiang Ma 	 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
55d548ef51SLiang Ma 	 */
56d548ef51SLiang Ma 	static const uint8_t arp_request[] = {
57d548ef51SLiang Ma 		/*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
58d548ef51SLiang Ma 		0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
59d548ef51SLiang Ma 		/*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
60d548ef51SLiang Ma 		0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
61d548ef51SLiang Ma 		/*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
62d548ef51SLiang Ma 		0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
63d548ef51SLiang Ma 		/*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
64d548ef51SLiang Ma 		0x00, 0x00, 0x00, 0x00
65d548ef51SLiang Ma 	};
66d548ef51SLiang Ma 	struct rte_mbuf *m;
67d548ef51SLiang Ma 	int pkt_len = sizeof(arp_request) - 1;
68d548ef51SLiang Ma 
69d548ef51SLiang Ma 	m = rte_pktmbuf_alloc(mp);
70d548ef51SLiang Ma 	if (!m)
71d548ef51SLiang Ma 		return 0;
72d548ef51SLiang Ma 
73d548ef51SLiang Ma 	memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
74d548ef51SLiang Ma 		arp_request, pkt_len);
75d548ef51SLiang Ma 	rte_pktmbuf_pkt_len(m) = pkt_len;
76d548ef51SLiang Ma 	rte_pktmbuf_data_len(m) = pkt_len;
77d548ef51SLiang Ma 
78d548ef51SLiang Ma 	RTE_SET_USED(portid);
79d548ef51SLiang Ma 
80d548ef51SLiang Ma 	return m;
81d548ef51SLiang Ma }
82d548ef51SLiang Ma 
83d548ef51SLiang Ma /* initialization and config */
84d548ef51SLiang Ma static __rte_always_inline int
85d548ef51SLiang Ma init(struct test *t, int nb_queues, int nb_ports)
86d548ef51SLiang Ma {
87d548ef51SLiang Ma 	struct rte_event_dev_config config = {
88d548ef51SLiang Ma 			.nb_event_queues = nb_queues,
89d548ef51SLiang Ma 			.nb_event_ports = nb_ports,
90d548ef51SLiang Ma 			.nb_event_queue_flows = 1024,
91d548ef51SLiang Ma 			.nb_events_limit = 4096,
92d548ef51SLiang Ma 			.nb_event_port_dequeue_depth = 128,
93d548ef51SLiang Ma 			.nb_event_port_enqueue_depth = 128,
94d548ef51SLiang Ma 	};
95d548ef51SLiang Ma 	int ret;
96d548ef51SLiang Ma 
97d548ef51SLiang Ma 	void *temp = t->mbuf_pool; /* save and restore mbuf pool */
98d548ef51SLiang Ma 
99d548ef51SLiang Ma 	memset(t, 0, sizeof(*t));
100d548ef51SLiang Ma 	t->mbuf_pool = temp;
101d548ef51SLiang Ma 
102d548ef51SLiang Ma 	ret = rte_event_dev_configure(evdev, &config);
103d548ef51SLiang Ma 	if (ret < 0)
104*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: Error configuring device", __LINE__);
105d548ef51SLiang Ma 	return ret;
106d548ef51SLiang Ma };
107d548ef51SLiang Ma 
108d548ef51SLiang Ma static __rte_always_inline int
109d548ef51SLiang Ma create_ports(struct test *t, int num_ports)
110d548ef51SLiang Ma {
111d548ef51SLiang Ma 	int i;
112d548ef51SLiang Ma 	static const struct rte_event_port_conf conf = {
113d548ef51SLiang Ma 			.new_event_threshold = 1024,
114d548ef51SLiang Ma 			.dequeue_depth = 32,
115d548ef51SLiang Ma 			.enqueue_depth = 32,
116d548ef51SLiang Ma 	};
117d548ef51SLiang Ma 	if (num_ports > MAX_PORTS)
118d548ef51SLiang Ma 		return -1;
119d548ef51SLiang Ma 
120d548ef51SLiang Ma 	for (i = 0; i < num_ports; i++) {
121d548ef51SLiang Ma 		if (rte_event_port_setup(evdev, i, &conf) < 0) {
122*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "Error setting up port %d", i);
123d548ef51SLiang Ma 			return -1;
124d548ef51SLiang Ma 		}
125d548ef51SLiang Ma 		t->port[i] = i;
126d548ef51SLiang Ma 	}
127d548ef51SLiang Ma 
128d548ef51SLiang Ma 	return 0;
129d548ef51SLiang Ma };
130d548ef51SLiang Ma 
131d548ef51SLiang Ma static __rte_always_inline int
132d548ef51SLiang Ma create_queues_type(struct test *t, int num_qids, enum queue_type flags)
133d548ef51SLiang Ma {
134d548ef51SLiang Ma 	int i;
135d548ef51SLiang Ma 	uint8_t type;
136d548ef51SLiang Ma 
137d548ef51SLiang Ma 	switch (flags) {
138d548ef51SLiang Ma 	case OPDL_Q_TYPE_ORDERED:
139d548ef51SLiang Ma 		type = RTE_SCHED_TYPE_ORDERED;
140d548ef51SLiang Ma 		break;
141d548ef51SLiang Ma 	case OPDL_Q_TYPE_ATOMIC:
142d548ef51SLiang Ma 		type = RTE_SCHED_TYPE_ATOMIC;
143d548ef51SLiang Ma 		break;
144d548ef51SLiang Ma 	default:
145d548ef51SLiang Ma 		type = 0;
146d548ef51SLiang Ma 	}
147d548ef51SLiang Ma 
148d548ef51SLiang Ma 	/* Q creation */
149d548ef51SLiang Ma 	const struct rte_event_queue_conf conf = {
150d548ef51SLiang Ma 		.event_queue_cfg =
151d548ef51SLiang Ma 		(flags == OPDL_Q_TYPE_SINGLE_LINK ?
152d548ef51SLiang Ma 		 RTE_EVENT_QUEUE_CFG_SINGLE_LINK : 0),
153d548ef51SLiang Ma 		.schedule_type = type,
154d548ef51SLiang Ma 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
155d548ef51SLiang Ma 		.nb_atomic_flows = 1024,
156d548ef51SLiang Ma 		.nb_atomic_order_sequences = 1024,
157d548ef51SLiang Ma 	};
158d548ef51SLiang Ma 
159d548ef51SLiang Ma 	for (i = t->nb_qids ; i < t->nb_qids + num_qids; i++) {
160d548ef51SLiang Ma 		if (rte_event_queue_setup(evdev, i, &conf) < 0) {
161*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%d: error creating qid %d ",
162d548ef51SLiang Ma 					__LINE__, i);
163d548ef51SLiang Ma 			return -1;
164d548ef51SLiang Ma 		}
165d548ef51SLiang Ma 		t->qid[i] = i;
166d548ef51SLiang Ma 	}
167d548ef51SLiang Ma 
168d548ef51SLiang Ma 	t->nb_qids += num_qids;
169d548ef51SLiang Ma 
170d548ef51SLiang Ma 	if (t->nb_qids > MAX_QIDS)
171d548ef51SLiang Ma 		return -1;
172d548ef51SLiang Ma 
173d548ef51SLiang Ma 	return 0;
174d548ef51SLiang Ma }
175d548ef51SLiang Ma 
176d548ef51SLiang Ma 
177d548ef51SLiang Ma /* destruction */
178d548ef51SLiang Ma static __rte_always_inline int
179d548ef51SLiang Ma cleanup(struct test *t __rte_unused)
180d548ef51SLiang Ma {
181d548ef51SLiang Ma 	rte_event_dev_stop(evdev);
182d548ef51SLiang Ma 	rte_event_dev_close(evdev);
183*f665790aSDavid Marchand 	PMD_DRV_LOG(ERR, "clean up for test done");
184d548ef51SLiang Ma 	return 0;
185d548ef51SLiang Ma };
186d548ef51SLiang Ma 
187d548ef51SLiang Ma static int
188d548ef51SLiang Ma ordered_basic(struct test *t)
189d548ef51SLiang Ma {
190d548ef51SLiang Ma 	const uint8_t rx_port = 0;
191d548ef51SLiang Ma 	const uint8_t w1_port = 1;
192d548ef51SLiang Ma 	const uint8_t w3_port = 3;
193d548ef51SLiang Ma 	const uint8_t tx_port = 4;
194d548ef51SLiang Ma 	int err;
195d548ef51SLiang Ma 	uint32_t i;
196d548ef51SLiang Ma 	uint32_t deq_pkts;
197d548ef51SLiang Ma 	struct rte_mbuf *mbufs[3];
198d548ef51SLiang Ma 
199d548ef51SLiang Ma 	const uint32_t MAGIC_SEQN = 1234;
200d548ef51SLiang Ma 
201d548ef51SLiang Ma 	/* Create instance with 5 ports */
202d548ef51SLiang Ma 	if (init(t, 2, tx_port+1) < 0 ||
203d548ef51SLiang Ma 	    create_ports(t, tx_port+1) < 0 ||
204d548ef51SLiang Ma 	    create_queues_type(t, 2, OPDL_Q_TYPE_ORDERED)) {
205*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__);
206d548ef51SLiang Ma 		return -1;
207d548ef51SLiang Ma 	}
208d548ef51SLiang Ma 
209d548ef51SLiang Ma 	/*
210d548ef51SLiang Ma 	 * CQ mapping to QID
211d548ef51SLiang Ma 	 * We need three ports, all mapped to the same ordered qid0. Then we'll
212d548ef51SLiang Ma 	 * take a packet out to each port, re-enqueue in reverse order,
213d548ef51SLiang Ma 	 * then make sure the reordering has taken place properly when we
214d548ef51SLiang Ma 	 * dequeue from the tx_port.
215d548ef51SLiang Ma 	 *
216d548ef51SLiang Ma 	 * Simplified test setup diagram:
217d548ef51SLiang Ma 	 *
218d548ef51SLiang Ma 	 * rx_port        w1_port
219d548ef51SLiang Ma 	 *        \     /         \
220d548ef51SLiang Ma 	 *         qid0 - w2_port - qid1
221d548ef51SLiang Ma 	 *              \         /     \
222d548ef51SLiang Ma 	 *                w3_port        tx_port
223d548ef51SLiang Ma 	 */
224d548ef51SLiang Ma 	/* CQ mapping to QID for LB ports (directed mapped on create) */
225d548ef51SLiang Ma 	for (i = w1_port; i <= w3_port; i++) {
226d548ef51SLiang Ma 		err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
227d548ef51SLiang Ma 				1);
228d548ef51SLiang Ma 		if (err != 1) {
229*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%d: error mapping lb qid",
230d548ef51SLiang Ma 					__LINE__);
231d548ef51SLiang Ma 			cleanup(t);
232d548ef51SLiang Ma 			return -1;
233d548ef51SLiang Ma 		}
234d548ef51SLiang Ma 	}
235d548ef51SLiang Ma 
236d548ef51SLiang Ma 	err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
237d548ef51SLiang Ma 			1);
238d548ef51SLiang Ma 	if (err != 1) {
239*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: error mapping TX  qid", __LINE__);
240d548ef51SLiang Ma 		cleanup(t);
241d548ef51SLiang Ma 		return -1;
242d548ef51SLiang Ma 	}
243d548ef51SLiang Ma 
244d548ef51SLiang Ma 	if (rte_event_dev_start(evdev) < 0) {
245*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: Error with start call", __LINE__);
246d548ef51SLiang Ma 		return -1;
247d548ef51SLiang Ma 	}
248d548ef51SLiang Ma 	/* Enqueue 3 packets to the rx port */
249d548ef51SLiang Ma 	for (i = 0; i < 3; i++) {
250d548ef51SLiang Ma 		struct rte_event ev;
251d548ef51SLiang Ma 		mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
252d548ef51SLiang Ma 		if (!mbufs[i]) {
253*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%d: gen of pkt failed", __LINE__);
254d548ef51SLiang Ma 			return -1;
255d548ef51SLiang Ma 		}
256d548ef51SLiang Ma 
257d548ef51SLiang Ma 		ev.queue_id = t->qid[0];
258d548ef51SLiang Ma 		ev.op = RTE_EVENT_OP_NEW;
259d548ef51SLiang Ma 		ev.mbuf = mbufs[i];
260ca4355e4SDavid Marchand 		*rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
261d548ef51SLiang Ma 
262d548ef51SLiang Ma 		/* generate pkt and enqueue */
263d548ef51SLiang Ma 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
264d548ef51SLiang Ma 		if (err != 1) {
265*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u",
266d548ef51SLiang Ma 					__LINE__, i, err);
267d548ef51SLiang Ma 			return -1;
268d548ef51SLiang Ma 		}
269d548ef51SLiang Ma 	}
270d548ef51SLiang Ma 
271d548ef51SLiang Ma 	/* use extra slot to make logic in loops easier */
272d548ef51SLiang Ma 	struct rte_event deq_ev[w3_port + 1];
273d548ef51SLiang Ma 
274d548ef51SLiang Ma 	uint32_t  seq  = 0;
275d548ef51SLiang Ma 
276d548ef51SLiang Ma 	/* Dequeue the 3 packets, one from each worker port */
277d548ef51SLiang Ma 	for (i = w1_port; i <= w3_port; i++) {
278d548ef51SLiang Ma 		deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
279d548ef51SLiang Ma 				&deq_ev[i], 1, 0);
280d548ef51SLiang Ma 		if (deq_pkts != 1) {
281*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%d: Failed to deq", __LINE__);
282d548ef51SLiang Ma 			rte_event_dev_dump(evdev, stdout);
283d548ef51SLiang Ma 			return -1;
284d548ef51SLiang Ma 		}
285ca4355e4SDavid Marchand 		seq = *rte_event_pmd_selftest_seqn(deq_ev[i].mbuf)  - MAGIC_SEQN;
286d548ef51SLiang Ma 
287d548ef51SLiang Ma 		if (seq != (i-1)) {
288d548ef51SLiang Ma 			PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , "
289*f665790aSDavid Marchand 					"port number is %u", seq, i);
290d548ef51SLiang Ma 			return -1;
291d548ef51SLiang Ma 		}
292d548ef51SLiang Ma 	}
293d548ef51SLiang Ma 
294d548ef51SLiang Ma 	/* Enqueue each packet in reverse order, flushing after each one */
295d548ef51SLiang Ma 	for (i = w3_port; i >= w1_port; i--) {
296d548ef51SLiang Ma 
297d548ef51SLiang Ma 		deq_ev[i].op = RTE_EVENT_OP_FORWARD;
298d548ef51SLiang Ma 		deq_ev[i].queue_id = t->qid[1];
299d548ef51SLiang Ma 		err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
300d548ef51SLiang Ma 		if (err != 1) {
301*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%d: Failed to enqueue", __LINE__);
302d548ef51SLiang Ma 			return -1;
303d548ef51SLiang Ma 		}
304d548ef51SLiang Ma 	}
305d548ef51SLiang Ma 
306d548ef51SLiang Ma 	/* dequeue from the tx ports, we should get 3 packets */
307d548ef51SLiang Ma 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
308d548ef51SLiang Ma 			3, 0);
309d548ef51SLiang Ma 
310d548ef51SLiang Ma 	/* Check to see if we've got all 3 packets */
311d548ef51SLiang Ma 	if (deq_pkts != 3) {
312*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d",
313d548ef51SLiang Ma 			__LINE__, deq_pkts, tx_port);
314d548ef51SLiang Ma 		rte_event_dev_dump(evdev, stdout);
315d548ef51SLiang Ma 		return 1;
316d548ef51SLiang Ma 	}
317d548ef51SLiang Ma 
318d548ef51SLiang Ma 	/* Destroy the instance */
319d548ef51SLiang Ma 	cleanup(t);
320d548ef51SLiang Ma 
321d548ef51SLiang Ma 	return 0;
322d548ef51SLiang Ma }
323d548ef51SLiang Ma 
324d548ef51SLiang Ma 
325d548ef51SLiang Ma static int
326d548ef51SLiang Ma atomic_basic(struct test *t)
327d548ef51SLiang Ma {
328d548ef51SLiang Ma 	const uint8_t rx_port = 0;
329d548ef51SLiang Ma 	const uint8_t w1_port = 1;
330d548ef51SLiang Ma 	const uint8_t w3_port = 3;
331d548ef51SLiang Ma 	const uint8_t tx_port = 4;
332d548ef51SLiang Ma 	int err;
333d548ef51SLiang Ma 	int i;
334d548ef51SLiang Ma 	uint32_t deq_pkts;
335d548ef51SLiang Ma 	struct rte_mbuf *mbufs[3];
336d548ef51SLiang Ma 	const uint32_t MAGIC_SEQN = 1234;
337d548ef51SLiang Ma 
338d548ef51SLiang Ma 	/* Create instance with 5 ports */
339d548ef51SLiang Ma 	if (init(t, 2, tx_port+1) < 0 ||
340d548ef51SLiang Ma 	    create_ports(t, tx_port+1) < 0 ||
341d548ef51SLiang Ma 	    create_queues_type(t, 2, OPDL_Q_TYPE_ATOMIC)) {
342*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__);
343d548ef51SLiang Ma 		return -1;
344d548ef51SLiang Ma 	}
345d548ef51SLiang Ma 
346d548ef51SLiang Ma 
347d548ef51SLiang Ma 	/*
348d548ef51SLiang Ma 	 * CQ mapping to QID
349d548ef51SLiang Ma 	 * We need three ports, all mapped to the same ordered qid0. Then we'll
350d548ef51SLiang Ma 	 * take a packet out to each port, re-enqueue in reverse order,
351d548ef51SLiang Ma 	 * then make sure the reordering has taken place properly when we
352d548ef51SLiang Ma 	 * dequeue from the tx_port.
353d548ef51SLiang Ma 	 *
354d548ef51SLiang Ma 	 * Simplified test setup diagram:
355d548ef51SLiang Ma 	 *
356d548ef51SLiang Ma 	 * rx_port        w1_port
357d548ef51SLiang Ma 	 *        \     /         \
358d548ef51SLiang Ma 	 *         qid0 - w2_port - qid1
359d548ef51SLiang Ma 	 *              \         /     \
360d548ef51SLiang Ma 	 *                w3_port        tx_port
361d548ef51SLiang Ma 	 */
362d548ef51SLiang Ma 	/* CQ mapping to QID for Atomic  ports (directed mapped on create) */
363d548ef51SLiang Ma 	for (i = w1_port; i <= w3_port; i++) {
364d548ef51SLiang Ma 		err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
365d548ef51SLiang Ma 				1);
366d548ef51SLiang Ma 		if (err != 1) {
367*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%d: error mapping lb qid",
368d548ef51SLiang Ma 					__LINE__);
369d548ef51SLiang Ma 			cleanup(t);
370d548ef51SLiang Ma 			return -1;
371d548ef51SLiang Ma 		}
372d548ef51SLiang Ma 	}
373d548ef51SLiang Ma 
374d548ef51SLiang Ma 	err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
375d548ef51SLiang Ma 			1);
376d548ef51SLiang Ma 	if (err != 1) {
377*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: error mapping TX  qid", __LINE__);
378d548ef51SLiang Ma 		cleanup(t);
379d548ef51SLiang Ma 		return -1;
380d548ef51SLiang Ma 	}
381d548ef51SLiang Ma 
382d548ef51SLiang Ma 	if (rte_event_dev_start(evdev) < 0) {
383*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: Error with start call", __LINE__);
384d548ef51SLiang Ma 		return -1;
385d548ef51SLiang Ma 	}
386d548ef51SLiang Ma 
387d548ef51SLiang Ma 	/* Enqueue 3 packets to the rx port */
388d548ef51SLiang Ma 	for (i = 0; i < 3; i++) {
389d548ef51SLiang Ma 		struct rte_event ev;
390d548ef51SLiang Ma 		mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
391d548ef51SLiang Ma 		if (!mbufs[i]) {
392*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%d: gen of pkt failed", __LINE__);
393d548ef51SLiang Ma 			return -1;
394d548ef51SLiang Ma 		}
395d548ef51SLiang Ma 
396d548ef51SLiang Ma 		ev.queue_id = t->qid[0];
397d548ef51SLiang Ma 		ev.op = RTE_EVENT_OP_NEW;
398d548ef51SLiang Ma 		ev.flow_id = 1;
399d548ef51SLiang Ma 		ev.mbuf = mbufs[i];
400ca4355e4SDavid Marchand 		*rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
401d548ef51SLiang Ma 
402d548ef51SLiang Ma 		/* generate pkt and enqueue */
403d548ef51SLiang Ma 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
404d548ef51SLiang Ma 		if (err != 1) {
405*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u",
406d548ef51SLiang Ma 					__LINE__, i, err);
407d548ef51SLiang Ma 			return -1;
408d548ef51SLiang Ma 		}
409d548ef51SLiang Ma 	}
410d548ef51SLiang Ma 
411d548ef51SLiang Ma 	/* use extra slot to make logic in loops easier */
412d548ef51SLiang Ma 	struct rte_event deq_ev[w3_port + 1];
413d548ef51SLiang Ma 
414d548ef51SLiang Ma 	/* Dequeue the 3 packets, one from each worker port */
415d548ef51SLiang Ma 	for (i = w1_port; i <= w3_port; i++) {
416d548ef51SLiang Ma 
417d548ef51SLiang Ma 		deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
418d548ef51SLiang Ma 				deq_ev, 3, 0);
419d548ef51SLiang Ma 
420d548ef51SLiang Ma 		if (t->port[i] != 2) {
421d548ef51SLiang Ma 			if (deq_pkts != 0) {
422*f665790aSDavid Marchand 				PMD_DRV_LOG(ERR, "%d: deq none zero !",
423d548ef51SLiang Ma 						__LINE__);
424d548ef51SLiang Ma 				rte_event_dev_dump(evdev, stdout);
425d548ef51SLiang Ma 				return -1;
426d548ef51SLiang Ma 			}
427d548ef51SLiang Ma 		} else {
428d548ef51SLiang Ma 
429d548ef51SLiang Ma 			if (deq_pkts != 3) {
430*f665790aSDavid Marchand 				PMD_DRV_LOG(ERR, "%d: deq not eqal to 3 %u !",
431d548ef51SLiang Ma 						__LINE__, deq_pkts);
432d548ef51SLiang Ma 				rte_event_dev_dump(evdev, stdout);
433d548ef51SLiang Ma 				return -1;
434d548ef51SLiang Ma 			}
435d548ef51SLiang Ma 
436be455196SHarry van Haaren 			int j;
437be455196SHarry van Haaren 			for (j = 0; j < 3; j++) {
438d548ef51SLiang Ma 				deq_ev[j].op = RTE_EVENT_OP_FORWARD;
439d548ef51SLiang Ma 				deq_ev[j].queue_id = t->qid[1];
440d548ef51SLiang Ma 			}
441d548ef51SLiang Ma 
442d548ef51SLiang Ma 			err = rte_event_enqueue_burst(evdev, t->port[i],
443d548ef51SLiang Ma 					deq_ev, 3);
444d548ef51SLiang Ma 
445d548ef51SLiang Ma 			if (err != 3) {
446d548ef51SLiang Ma 				PMD_DRV_LOG(ERR, "port %d: Failed to enqueue pkt %u, "
447*f665790aSDavid Marchand 						"retval = %u",
448d548ef51SLiang Ma 						t->port[i], 3, err);
449d548ef51SLiang Ma 				return -1;
450d548ef51SLiang Ma 			}
451d548ef51SLiang Ma 
452d548ef51SLiang Ma 		}
453d548ef51SLiang Ma 
454d548ef51SLiang Ma 	}
455d548ef51SLiang Ma 
456d548ef51SLiang Ma 
457d548ef51SLiang Ma 	/* dequeue from the tx ports, we should get 3 packets */
458d548ef51SLiang Ma 	deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
459d548ef51SLiang Ma 			3, 0);
460d548ef51SLiang Ma 
461d548ef51SLiang Ma 	/* Check to see if we've got all 3 packets */
462d548ef51SLiang Ma 	if (deq_pkts != 3) {
463*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d",
464d548ef51SLiang Ma 			__LINE__, deq_pkts, tx_port);
465d548ef51SLiang Ma 		rte_event_dev_dump(evdev, stdout);
466d548ef51SLiang Ma 		return 1;
467d548ef51SLiang Ma 	}
468d548ef51SLiang Ma 
469d548ef51SLiang Ma 	cleanup(t);
470d548ef51SLiang Ma 
471d548ef51SLiang Ma 	return 0;
472d548ef51SLiang Ma }
473d548ef51SLiang Ma static __rte_always_inline int
4741bdfe4d7SPavan Nikhilesh check_qid_stats(uint64_t id[], int index)
475d548ef51SLiang Ma {
476d548ef51SLiang Ma 
477d548ef51SLiang Ma 	if (index == 0) {
478d548ef51SLiang Ma 		if (id[0] != 3 || id[1] != 3
479d548ef51SLiang Ma 				|| id[2] != 3)
480d548ef51SLiang Ma 			return -1;
481d548ef51SLiang Ma 	} else if (index == 1) {
482d548ef51SLiang Ma 		if (id[0] != 5 || id[1] != 5
483d548ef51SLiang Ma 				|| id[2] != 2)
484d548ef51SLiang Ma 			return -1;
485d548ef51SLiang Ma 	} else if (index == 2) {
486d548ef51SLiang Ma 		if (id[0] != 3 || id[1] != 1
487d548ef51SLiang Ma 				|| id[2] != 1)
488d548ef51SLiang Ma 			return -1;
489d548ef51SLiang Ma 	}
490d548ef51SLiang Ma 
491d548ef51SLiang Ma 	return 0;
492d548ef51SLiang Ma }
493d548ef51SLiang Ma 
494d548ef51SLiang Ma 
495d548ef51SLiang Ma static int
496d548ef51SLiang Ma check_statistics(void)
497d548ef51SLiang Ma {
498d548ef51SLiang Ma 	int num_ports = 3; /* Hard-coded for this app */
499be455196SHarry van Haaren 	int i;
500d548ef51SLiang Ma 
501be455196SHarry van Haaren 	for (i = 0; i < num_ports; i++) {
502d548ef51SLiang Ma 		int num_stats, num_stats_returned;
503d548ef51SLiang Ma 
504d548ef51SLiang Ma 		num_stats = rte_event_dev_xstats_names_get(0,
505d548ef51SLiang Ma 				RTE_EVENT_DEV_XSTATS_PORT,
506d548ef51SLiang Ma 				i,
507d548ef51SLiang Ma 				NULL,
508d548ef51SLiang Ma 				NULL,
509d548ef51SLiang Ma 				0);
510d548ef51SLiang Ma 		if (num_stats > 0) {
511d548ef51SLiang Ma 
5121bdfe4d7SPavan Nikhilesh 			uint64_t id[num_stats];
513d548ef51SLiang Ma 			struct rte_event_dev_xstats_name names[num_stats];
514d548ef51SLiang Ma 			uint64_t values[num_stats];
515d548ef51SLiang Ma 
516d548ef51SLiang Ma 			num_stats_returned = rte_event_dev_xstats_names_get(0,
517d548ef51SLiang Ma 					RTE_EVENT_DEV_XSTATS_PORT,
518d548ef51SLiang Ma 					i,
519d548ef51SLiang Ma 					names,
520d548ef51SLiang Ma 					id,
521d548ef51SLiang Ma 					num_stats);
522d548ef51SLiang Ma 
523d548ef51SLiang Ma 			if (num_stats == num_stats_returned) {
524d548ef51SLiang Ma 				num_stats_returned = rte_event_dev_xstats_get(0,
525d548ef51SLiang Ma 						RTE_EVENT_DEV_XSTATS_PORT,
526d548ef51SLiang Ma 						i,
527d548ef51SLiang Ma 						id,
528d548ef51SLiang Ma 						values,
529d548ef51SLiang Ma 						num_stats);
530d548ef51SLiang Ma 
531d548ef51SLiang Ma 				if (num_stats == num_stats_returned) {
532d548ef51SLiang Ma 					int err;
533d548ef51SLiang Ma 
534d548ef51SLiang Ma 					err = check_qid_stats(id, i);
535d548ef51SLiang Ma 
536d548ef51SLiang Ma 					if (err)
537d548ef51SLiang Ma 						return err;
538d548ef51SLiang Ma 
539d548ef51SLiang Ma 				} else {
540d548ef51SLiang Ma 					return -1;
541d548ef51SLiang Ma 				}
542d548ef51SLiang Ma 			} else {
543d548ef51SLiang Ma 				return -1;
544d548ef51SLiang Ma 			}
545d548ef51SLiang Ma 		} else {
546d548ef51SLiang Ma 			return -1;
547d548ef51SLiang Ma 		}
548d548ef51SLiang Ma 	}
549d548ef51SLiang Ma 	return 0;
550d548ef51SLiang Ma }
551d548ef51SLiang Ma 
552d548ef51SLiang Ma #define OLD_NUM_PACKETS 3
553d548ef51SLiang Ma #define NEW_NUM_PACKETS 2
554d548ef51SLiang Ma static int
555d548ef51SLiang Ma single_link_w_stats(struct test *t)
556d548ef51SLiang Ma {
557d548ef51SLiang Ma 	const uint8_t rx_port = 0;
558d548ef51SLiang Ma 	const uint8_t w1_port = 1;
559d548ef51SLiang Ma 	const uint8_t tx_port = 2;
560d548ef51SLiang Ma 	int err;
561d548ef51SLiang Ma 	int i;
562d548ef51SLiang Ma 	uint32_t deq_pkts;
563d548ef51SLiang Ma 	struct rte_mbuf *mbufs[3];
564d548ef51SLiang Ma 	RTE_SET_USED(mbufs);
565d548ef51SLiang Ma 
566d548ef51SLiang Ma 	/* Create instance with 3 ports */
567d548ef51SLiang Ma 	if (init(t, 2, tx_port + 1) < 0 ||
568d548ef51SLiang Ma 	    create_ports(t, 3) < 0 || /* 0,1,2 */
569d548ef51SLiang Ma 	    create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
570d548ef51SLiang Ma 	    create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
571*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__);
572d548ef51SLiang Ma 		return -1;
573d548ef51SLiang Ma 	}
574d548ef51SLiang Ma 
575d548ef51SLiang Ma 
576d548ef51SLiang Ma 	/*
577d548ef51SLiang Ma 	 *
578d548ef51SLiang Ma 	 * Simplified test setup diagram:
579d548ef51SLiang Ma 	 *
580d548ef51SLiang Ma 	 * rx_port(0)
581d548ef51SLiang Ma 	 *           \
582d548ef51SLiang Ma 	 *            qid0 - w1_port(1) - qid1
583d548ef51SLiang Ma 	 *                                    \
584d548ef51SLiang Ma 	 *                                     tx_port(2)
585d548ef51SLiang Ma 	 */
586d548ef51SLiang Ma 
587d548ef51SLiang Ma 	err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
588d548ef51SLiang Ma 				  1);
589d548ef51SLiang Ma 	if (err != 1) {
590*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]",
591d548ef51SLiang Ma 		       __LINE__,
592d548ef51SLiang Ma 		       t->port[1],
593d548ef51SLiang Ma 		       t->qid[0]);
594d548ef51SLiang Ma 		cleanup(t);
595d548ef51SLiang Ma 		return -1;
596d548ef51SLiang Ma 	}
597d548ef51SLiang Ma 
598d548ef51SLiang Ma 	err = rte_event_port_link(evdev, t->port[2], &t->qid[1], NULL,
599d548ef51SLiang Ma 				  1);
600d548ef51SLiang Ma 	if (err != 1) {
601*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]",
602d548ef51SLiang Ma 		       __LINE__,
603d548ef51SLiang Ma 		       t->port[2],
604d548ef51SLiang Ma 		       t->qid[1]);
605d548ef51SLiang Ma 		cleanup(t);
606d548ef51SLiang Ma 		return -1;
607d548ef51SLiang Ma 	}
608d548ef51SLiang Ma 
609d548ef51SLiang Ma 	if (rte_event_dev_start(evdev) != 0) {
610*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: failed to start device", __LINE__);
611d548ef51SLiang Ma 		cleanup(t);
612d548ef51SLiang Ma 		return -1;
613d548ef51SLiang Ma 	}
614d548ef51SLiang Ma 
615d548ef51SLiang Ma 	/*
616d548ef51SLiang Ma 	 * Enqueue 3 packets to the rx port
617d548ef51SLiang Ma 	 */
618d548ef51SLiang Ma 	for (i = 0; i < 3; i++) {
619d548ef51SLiang Ma 		struct rte_event ev;
620d548ef51SLiang Ma 		mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
621d548ef51SLiang Ma 		if (!mbufs[i]) {
622*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%d: gen of pkt failed", __LINE__);
623d548ef51SLiang Ma 			return -1;
624d548ef51SLiang Ma 		}
625d548ef51SLiang Ma 
626d548ef51SLiang Ma 		ev.queue_id = t->qid[0];
627d548ef51SLiang Ma 		ev.op = RTE_EVENT_OP_NEW;
628d548ef51SLiang Ma 		ev.mbuf = mbufs[i];
629ca4355e4SDavid Marchand 		*rte_event_pmd_selftest_seqn(mbufs[i]) = 1234 + i;
630d548ef51SLiang Ma 
631d548ef51SLiang Ma 		/* generate pkt and enqueue */
632d548ef51SLiang Ma 		err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
633d548ef51SLiang Ma 		if (err != 1) {
634*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u",
635d548ef51SLiang Ma 			       __LINE__,
636d548ef51SLiang Ma 			       t->port[rx_port],
637d548ef51SLiang Ma 			       err);
638d548ef51SLiang Ma 			return -1;
639d548ef51SLiang Ma 		}
640d548ef51SLiang Ma 	}
641d548ef51SLiang Ma 
642d548ef51SLiang Ma 	/* Dequeue the 3 packets, from SINGLE_LINK worker port */
643d548ef51SLiang Ma 	struct rte_event deq_ev[3];
644d548ef51SLiang Ma 
645d548ef51SLiang Ma 	deq_pkts = rte_event_dequeue_burst(evdev,
646d548ef51SLiang Ma 					   t->port[w1_port],
647d548ef51SLiang Ma 					   deq_ev, 3, 0);
648d548ef51SLiang Ma 
649d548ef51SLiang Ma 	if (deq_pkts != 3) {
650*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: deq not 3 !", __LINE__);
651d548ef51SLiang Ma 		cleanup(t);
652d548ef51SLiang Ma 		return -1;
653d548ef51SLiang Ma 	}
654d548ef51SLiang Ma 
655d548ef51SLiang Ma 	/* Just enqueue 2 onto new ring */
656d548ef51SLiang Ma 	for (i = 0; i < NEW_NUM_PACKETS; i++)
657d548ef51SLiang Ma 		deq_ev[i].queue_id = t->qid[1];
658d548ef51SLiang Ma 
659d548ef51SLiang Ma 	deq_pkts = rte_event_enqueue_burst(evdev,
660d548ef51SLiang Ma 					   t->port[w1_port],
661d548ef51SLiang Ma 					   deq_ev,
662d548ef51SLiang Ma 					   NEW_NUM_PACKETS);
663d548ef51SLiang Ma 
664d548ef51SLiang Ma 	if (deq_pkts != 2) {
665*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: enq not 2 but %u!", __LINE__, deq_pkts);
666d548ef51SLiang Ma 		cleanup(t);
667d548ef51SLiang Ma 		return -1;
668d548ef51SLiang Ma 	}
669d548ef51SLiang Ma 
670d548ef51SLiang Ma 	/* dequeue from the tx ports, we should get 2 packets */
671d548ef51SLiang Ma 	deq_pkts = rte_event_dequeue_burst(evdev,
672d548ef51SLiang Ma 					   t->port[tx_port],
673d548ef51SLiang Ma 					   deq_ev,
674d548ef51SLiang Ma 					   3,
675d548ef51SLiang Ma 					   0);
676d548ef51SLiang Ma 
677d548ef51SLiang Ma 	/* Check to see if we've got all 2 packets */
678d548ef51SLiang Ma 	if (deq_pkts != 2) {
679*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: expected 2 pkts at tx port got %d from port %d",
680d548ef51SLiang Ma 			__LINE__, deq_pkts, tx_port);
681d548ef51SLiang Ma 		cleanup(t);
682d548ef51SLiang Ma 		return -1;
683d548ef51SLiang Ma 	}
684d548ef51SLiang Ma 
685d548ef51SLiang Ma 	if (!check_statistics()) {
686d548ef51SLiang Ma 		PMD_DRV_LOG(ERR, "xstats check failed");
687d548ef51SLiang Ma 		cleanup(t);
688d548ef51SLiang Ma 		return -1;
689d548ef51SLiang Ma 	}
690d548ef51SLiang Ma 
691d548ef51SLiang Ma 	cleanup(t);
692d548ef51SLiang Ma 
693d548ef51SLiang Ma 	return 0;
694d548ef51SLiang Ma }
695d548ef51SLiang Ma 
696d548ef51SLiang Ma static int
697d548ef51SLiang Ma single_link(struct test *t)
698d548ef51SLiang Ma {
699d548ef51SLiang Ma 	const uint8_t tx_port = 2;
700d548ef51SLiang Ma 	int err;
701d548ef51SLiang Ma 	struct rte_mbuf *mbufs[3];
702d548ef51SLiang Ma 	RTE_SET_USED(mbufs);
703d548ef51SLiang Ma 
704d548ef51SLiang Ma 	/* Create instance with 5 ports */
705d548ef51SLiang Ma 	if (init(t, 2, tx_port+1) < 0 ||
706d548ef51SLiang Ma 	    create_ports(t, 3) < 0 || /* 0,1,2 */
707d548ef51SLiang Ma 	    create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
708d548ef51SLiang Ma 	    create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
709*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__);
710d548ef51SLiang Ma 		return -1;
711d548ef51SLiang Ma 	}
712d548ef51SLiang Ma 
713d548ef51SLiang Ma 
714d548ef51SLiang Ma 	/*
715d548ef51SLiang Ma 	 *
716d548ef51SLiang Ma 	 * Simplified test setup diagram:
717d548ef51SLiang Ma 	 *
718d548ef51SLiang Ma 	 * rx_port(0)
719d548ef51SLiang Ma 	 *           \
720d548ef51SLiang Ma 	 *            qid0 - w1_port(1) - qid1
721d548ef51SLiang Ma 	 *                                    \
722d548ef51SLiang Ma 	 *                                     tx_port(2)
723d548ef51SLiang Ma 	 */
724d548ef51SLiang Ma 
725d548ef51SLiang Ma 	err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
726d548ef51SLiang Ma 				  1);
727d548ef51SLiang Ma 	if (err != 1) {
728*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: error mapping lb qid", __LINE__);
729d548ef51SLiang Ma 		cleanup(t);
730d548ef51SLiang Ma 		return -1;
731d548ef51SLiang Ma 	}
732d548ef51SLiang Ma 
733d548ef51SLiang Ma 	err = rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL,
734d548ef51SLiang Ma 				  1);
735d548ef51SLiang Ma 	if (err != 1) {
736*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: error mapping lb qid", __LINE__);
737d548ef51SLiang Ma 		cleanup(t);
738d548ef51SLiang Ma 		return -1;
739d548ef51SLiang Ma 	}
740d548ef51SLiang Ma 
741d548ef51SLiang Ma 	if (rte_event_dev_start(evdev) == 0) {
742d548ef51SLiang Ma 		PMD_DRV_LOG(ERR, "%d: start DIDN'T FAIL with more than 1 "
743*f665790aSDavid Marchand 				"SINGLE_LINK PORT", __LINE__);
744d548ef51SLiang Ma 		cleanup(t);
745d548ef51SLiang Ma 		return -1;
746d548ef51SLiang Ma 	}
747d548ef51SLiang Ma 
748d548ef51SLiang Ma 	cleanup(t);
749d548ef51SLiang Ma 
750d548ef51SLiang Ma 	return 0;
751d548ef51SLiang Ma }
752d548ef51SLiang Ma 
753d548ef51SLiang Ma 
754d548ef51SLiang Ma static __rte_always_inline void
755d548ef51SLiang Ma populate_event_burst(struct rte_event ev[],
756d548ef51SLiang Ma 		     uint8_t qid,
757d548ef51SLiang Ma 		     uint16_t num_events)
758d548ef51SLiang Ma {
759d548ef51SLiang Ma 	uint16_t i;
760d548ef51SLiang Ma 	for (i = 0; i < num_events; i++) {
761d548ef51SLiang Ma 		ev[i].flow_id = 1;
762d548ef51SLiang Ma 		ev[i].op = RTE_EVENT_OP_NEW;
763d548ef51SLiang Ma 		ev[i].sched_type = RTE_SCHED_TYPE_ORDERED;
764d548ef51SLiang Ma 		ev[i].queue_id = qid;
765d548ef51SLiang Ma 		ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
766d548ef51SLiang Ma 		ev[i].sub_event_type = 0;
767d548ef51SLiang Ma 		ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
768d548ef51SLiang Ma 		ev[i].mbuf = (struct rte_mbuf *)0xdead0000;
769d548ef51SLiang Ma 	}
770d548ef51SLiang Ma }
771d548ef51SLiang Ma 
772d548ef51SLiang Ma #define NUM_QUEUES 3
773d548ef51SLiang Ma #define BATCH_SIZE 32
774d548ef51SLiang Ma 
775d548ef51SLiang Ma static int
776d548ef51SLiang Ma qid_basic(struct test *t)
777d548ef51SLiang Ma {
778d548ef51SLiang Ma 	int err = 0;
779d548ef51SLiang Ma 
780d548ef51SLiang Ma 	uint8_t q_id = 0;
781d548ef51SLiang Ma 	uint8_t p_id = 0;
782d548ef51SLiang Ma 
783d548ef51SLiang Ma 	uint32_t num_events;
784d548ef51SLiang Ma 	uint32_t i;
785d548ef51SLiang Ma 
786d548ef51SLiang Ma 	struct rte_event ev[BATCH_SIZE];
787d548ef51SLiang Ma 
788d548ef51SLiang Ma 	/* Create instance with 4 ports */
789d548ef51SLiang Ma 	if (init(t, NUM_QUEUES, NUM_QUEUES+1) < 0 ||
790d548ef51SLiang Ma 	    create_ports(t, NUM_QUEUES+1) < 0 ||
791d548ef51SLiang Ma 	    create_queues_type(t, NUM_QUEUES, OPDL_Q_TYPE_ORDERED)) {
792*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__);
793d548ef51SLiang Ma 		return -1;
794d548ef51SLiang Ma 	}
795d548ef51SLiang Ma 
796d548ef51SLiang Ma 	for (i = 0; i < NUM_QUEUES; i++) {
797d548ef51SLiang Ma 		int nb_linked;
798d548ef51SLiang Ma 		q_id = i;
799d548ef51SLiang Ma 
800d548ef51SLiang Ma 		nb_linked = rte_event_port_link(evdev,
801d548ef51SLiang Ma 				i+1, /* port = q_id + 1*/
802d548ef51SLiang Ma 				&q_id,
803d548ef51SLiang Ma 				NULL,
804d548ef51SLiang Ma 				1);
805d548ef51SLiang Ma 
806d548ef51SLiang Ma 		if (nb_linked != 1) {
807d548ef51SLiang Ma 
808*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%s:%d: error mapping port:%u to queue:%u",
809d548ef51SLiang Ma 					__FILE__,
810d548ef51SLiang Ma 					__LINE__,
811d548ef51SLiang Ma 					i + 1,
812d548ef51SLiang Ma 					q_id);
813d548ef51SLiang Ma 
814d548ef51SLiang Ma 			err = -1;
815d548ef51SLiang Ma 			break;
816d548ef51SLiang Ma 		}
817d548ef51SLiang Ma 
818d548ef51SLiang Ma 	}
819d548ef51SLiang Ma 
820d548ef51SLiang Ma 
821d548ef51SLiang Ma 	/* Try and link to the same port again */
822d548ef51SLiang Ma 	if (!err) {
823d548ef51SLiang Ma 		uint8_t t_qid = 0;
824d548ef51SLiang Ma 		if (rte_event_port_link(evdev,
825d548ef51SLiang Ma 					1,
826d548ef51SLiang Ma 					&t_qid,
827d548ef51SLiang Ma 					NULL,
828d548ef51SLiang Ma 					1) > 0) {
829*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%s:%d: Second call to port link on same port DID NOT fail",
830d548ef51SLiang Ma 					__FILE__,
831d548ef51SLiang Ma 					__LINE__);
832d548ef51SLiang Ma 			err = -1;
833d548ef51SLiang Ma 		}
834d548ef51SLiang Ma 
835d548ef51SLiang Ma 		uint32_t test_num_events;
836d548ef51SLiang Ma 
837d548ef51SLiang Ma 		if (!err) {
838d548ef51SLiang Ma 			test_num_events = rte_event_dequeue_burst(evdev,
839d548ef51SLiang Ma 					p_id,
840d548ef51SLiang Ma 					ev,
841d548ef51SLiang Ma 					BATCH_SIZE,
842d548ef51SLiang Ma 					0);
843d548ef51SLiang Ma 			if (test_num_events != 0) {
844*f665790aSDavid Marchand 				PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing 0 packets from port %u on stopped device",
845d548ef51SLiang Ma 						__FILE__,
846d548ef51SLiang Ma 						__LINE__,
847d548ef51SLiang Ma 						p_id);
848d548ef51SLiang Ma 				err = -1;
849d548ef51SLiang Ma 			}
850d548ef51SLiang Ma 		}
851d548ef51SLiang Ma 
852d548ef51SLiang Ma 		if (!err) {
853d548ef51SLiang Ma 			test_num_events = rte_event_enqueue_burst(evdev,
854d548ef51SLiang Ma 					p_id,
855d548ef51SLiang Ma 					ev,
856d548ef51SLiang Ma 					BATCH_SIZE);
857d548ef51SLiang Ma 			if (test_num_events != 0) {
858*f665790aSDavid Marchand 				PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing 0 packets to port %u on stopped device",
859d548ef51SLiang Ma 						__FILE__,
860d548ef51SLiang Ma 						__LINE__,
861d548ef51SLiang Ma 						p_id);
862d548ef51SLiang Ma 				err = -1;
863d548ef51SLiang Ma 			}
864d548ef51SLiang Ma 		}
865d548ef51SLiang Ma 	}
866d548ef51SLiang Ma 
867d548ef51SLiang Ma 
8687be78d02SJosh Soref 	/* Start the device */
869d548ef51SLiang Ma 	if (!err) {
870d548ef51SLiang Ma 		if (rte_event_dev_start(evdev) < 0) {
871*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%s:%d: Error with start call",
872d548ef51SLiang Ma 					__FILE__,
873d548ef51SLiang Ma 					__LINE__);
874d548ef51SLiang Ma 			err = -1;
875d548ef51SLiang Ma 		}
876d548ef51SLiang Ma 	}
877d548ef51SLiang Ma 
878d548ef51SLiang Ma 
879d548ef51SLiang Ma 	/* Check we can't do any more links now that device is started.*/
880d548ef51SLiang Ma 	if (!err) {
881d548ef51SLiang Ma 		uint8_t t_qid = 0;
882d548ef51SLiang Ma 		if (rte_event_port_link(evdev,
883d548ef51SLiang Ma 					1,
884d548ef51SLiang Ma 					&t_qid,
885d548ef51SLiang Ma 					NULL,
886d548ef51SLiang Ma 					1) > 0) {
887*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%s:%d: Call to port link on started device DID NOT fail",
888d548ef51SLiang Ma 					__FILE__,
889d548ef51SLiang Ma 					__LINE__);
890d548ef51SLiang Ma 			err = -1;
891d548ef51SLiang Ma 		}
892d548ef51SLiang Ma 	}
893d548ef51SLiang Ma 
894d548ef51SLiang Ma 	if (!err) {
895d548ef51SLiang Ma 
896d548ef51SLiang Ma 		q_id = 0;
897d548ef51SLiang Ma 
898d548ef51SLiang Ma 		populate_event_burst(ev,
899d548ef51SLiang Ma 				q_id,
900d548ef51SLiang Ma 				BATCH_SIZE);
901d548ef51SLiang Ma 
902d548ef51SLiang Ma 		num_events = rte_event_enqueue_burst(evdev,
903d548ef51SLiang Ma 				p_id,
904d548ef51SLiang Ma 				ev,
905d548ef51SLiang Ma 				BATCH_SIZE);
906d548ef51SLiang Ma 		if (num_events != BATCH_SIZE) {
907*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing rx packets",
908d548ef51SLiang Ma 					__FILE__,
909d548ef51SLiang Ma 					__LINE__);
910d548ef51SLiang Ma 			err = -1;
911d548ef51SLiang Ma 		}
912d548ef51SLiang Ma 	}
913d548ef51SLiang Ma 
914d548ef51SLiang Ma 	if (!err) {
915d548ef51SLiang Ma 		while (++p_id < NUM_QUEUES) {
916d548ef51SLiang Ma 
917d548ef51SLiang Ma 			num_events = rte_event_dequeue_burst(evdev,
918d548ef51SLiang Ma 					p_id,
919d548ef51SLiang Ma 					ev,
920d548ef51SLiang Ma 					BATCH_SIZE,
921d548ef51SLiang Ma 					0);
922d548ef51SLiang Ma 
923d548ef51SLiang Ma 			if (num_events != BATCH_SIZE) {
924*f665790aSDavid Marchand 				PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from port %u",
925d548ef51SLiang Ma 						__FILE__,
926d548ef51SLiang Ma 						__LINE__,
927d548ef51SLiang Ma 						p_id);
928d548ef51SLiang Ma 				err = -1;
929d548ef51SLiang Ma 				break;
930d548ef51SLiang Ma 			}
931d548ef51SLiang Ma 
932d548ef51SLiang Ma 			if (ev[0].queue_id != q_id) {
933*f665790aSDavid Marchand 				PMD_DRV_LOG(ERR, "%s:%d: Error event portid[%u] q_id:[%u] does not match expected:[%u]",
934d548ef51SLiang Ma 						__FILE__,
935d548ef51SLiang Ma 						__LINE__,
936d548ef51SLiang Ma 						p_id,
937d548ef51SLiang Ma 						ev[0].queue_id,
938d548ef51SLiang Ma 						q_id);
939d548ef51SLiang Ma 				err = -1;
940d548ef51SLiang Ma 				break;
941d548ef51SLiang Ma 			}
942d548ef51SLiang Ma 
943d548ef51SLiang Ma 			populate_event_burst(ev,
944d548ef51SLiang Ma 					++q_id,
945d548ef51SLiang Ma 					BATCH_SIZE);
946d548ef51SLiang Ma 
947d548ef51SLiang Ma 			num_events = rte_event_enqueue_burst(evdev,
948d548ef51SLiang Ma 					p_id,
949d548ef51SLiang Ma 					ev,
950d548ef51SLiang Ma 					BATCH_SIZE);
951d548ef51SLiang Ma 			if (num_events != BATCH_SIZE) {
952*f665790aSDavid Marchand 				PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing packets from port:%u to queue:%u",
953d548ef51SLiang Ma 						__FILE__,
954d548ef51SLiang Ma 						__LINE__,
955d548ef51SLiang Ma 						p_id,
956d548ef51SLiang Ma 						q_id);
957d548ef51SLiang Ma 				err = -1;
958d548ef51SLiang Ma 				break;
959d548ef51SLiang Ma 			}
960d548ef51SLiang Ma 		}
961d548ef51SLiang Ma 	}
962d548ef51SLiang Ma 
963d548ef51SLiang Ma 	if (!err) {
964d548ef51SLiang Ma 		num_events = rte_event_dequeue_burst(evdev,
965d548ef51SLiang Ma 				p_id,
966d548ef51SLiang Ma 				ev,
967d548ef51SLiang Ma 				BATCH_SIZE,
968d548ef51SLiang Ma 				0);
969d548ef51SLiang Ma 		if (num_events != BATCH_SIZE) {
970*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from tx port %u",
971d548ef51SLiang Ma 					__FILE__,
972d548ef51SLiang Ma 					__LINE__,
973d548ef51SLiang Ma 					p_id);
974d548ef51SLiang Ma 			err = -1;
975d548ef51SLiang Ma 		}
976d548ef51SLiang Ma 	}
977d548ef51SLiang Ma 
978d548ef51SLiang Ma 	cleanup(t);
979d548ef51SLiang Ma 
980d548ef51SLiang Ma 	return err;
981d548ef51SLiang Ma }
982d548ef51SLiang Ma 
983d548ef51SLiang Ma 
984d548ef51SLiang Ma 
985d548ef51SLiang Ma int
986d548ef51SLiang Ma opdl_selftest(void)
987d548ef51SLiang Ma {
988d548ef51SLiang Ma 	struct test *t = malloc(sizeof(struct test));
989d548ef51SLiang Ma 	int ret;
990d548ef51SLiang Ma 
991d548ef51SLiang Ma 	const char *eventdev_name = "event_opdl0";
992d548ef51SLiang Ma 
993d548ef51SLiang Ma 	evdev = rte_event_dev_get_dev_id(eventdev_name);
994d548ef51SLiang Ma 
995d548ef51SLiang Ma 	if (evdev < 0) {
996*f665790aSDavid Marchand 		PMD_DRV_LOG(ERR, "%d: Eventdev %s not found - creating.",
997d548ef51SLiang Ma 				__LINE__, eventdev_name);
998d548ef51SLiang Ma 		/* turn on stats by default */
999d548ef51SLiang Ma 		if (rte_vdev_init(eventdev_name, "do_validation=1") < 0) {
1000*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "Error creating eventdev");
1001a2526b26SLiang Ma 			free(t);
1002d548ef51SLiang Ma 			return -1;
1003d548ef51SLiang Ma 		}
1004d548ef51SLiang Ma 		evdev = rte_event_dev_get_dev_id(eventdev_name);
1005d548ef51SLiang Ma 		if (evdev < 0) {
1006*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "Error finding newly created eventdev");
1007a2526b26SLiang Ma 			free(t);
1008d548ef51SLiang Ma 			return -1;
1009d548ef51SLiang Ma 		}
1010d548ef51SLiang Ma 	}
1011d548ef51SLiang Ma 
1012d548ef51SLiang Ma 	/* Only create mbuf pool once, reuse for each test run */
1013d548ef51SLiang Ma 	if (!eventdev_func_mempool) {
1014d548ef51SLiang Ma 		eventdev_func_mempool = rte_pktmbuf_pool_create(
1015d548ef51SLiang Ma 				"EVENTDEV_SW_SA_MBUF_POOL",
1016d548ef51SLiang Ma 				(1<<12), /* 4k buffers */
1017d548ef51SLiang Ma 				32 /*MBUF_CACHE_SIZE*/,
1018d548ef51SLiang Ma 				0,
1019d548ef51SLiang Ma 				512, /* use very small mbufs */
1020d548ef51SLiang Ma 				rte_socket_id());
1021d548ef51SLiang Ma 		if (!eventdev_func_mempool) {
1022*f665790aSDavid Marchand 			PMD_DRV_LOG(ERR, "ERROR creating mempool");
1023a2526b26SLiang Ma 			free(t);
1024d548ef51SLiang Ma 			return -1;
1025d548ef51SLiang Ma 		}
1026d548ef51SLiang Ma 	}
1027d548ef51SLiang Ma 	t->mbuf_pool = eventdev_func_mempool;
1028d548ef51SLiang Ma 
1029*f665790aSDavid Marchand 	PMD_DRV_LOG(ERR, "*** Running Ordered Basic test...");
1030d548ef51SLiang Ma 	ret = ordered_basic(t);
1031d548ef51SLiang Ma 
1032*f665790aSDavid Marchand 	PMD_DRV_LOG(ERR, "*** Running Atomic Basic test...");
1033d548ef51SLiang Ma 	ret = atomic_basic(t);
1034d548ef51SLiang Ma 
1035d548ef51SLiang Ma 
1036*f665790aSDavid Marchand 	PMD_DRV_LOG(ERR, "*** Running QID  Basic test...");
1037d548ef51SLiang Ma 	ret = qid_basic(t);
1038d548ef51SLiang Ma 
1039*f665790aSDavid Marchand 	PMD_DRV_LOG(ERR, "*** Running SINGLE LINK failure test...");
1040d548ef51SLiang Ma 	ret = single_link(t);
1041d548ef51SLiang Ma 
1042*f665790aSDavid Marchand 	PMD_DRV_LOG(ERR, "*** Running SINGLE LINK w stats test...");
1043d548ef51SLiang Ma 	ret = single_link_w_stats(t);
1044d548ef51SLiang Ma 
1045d548ef51SLiang Ma 	/*
1046a2526b26SLiang Ma 	 * Free test instance, free  mempool
1047d548ef51SLiang Ma 	 */
1048a2526b26SLiang Ma 	rte_mempool_free(t->mbuf_pool);
1049d548ef51SLiang Ma 	free(t);
1050d548ef51SLiang Ma 
1051d548ef51SLiang Ma 	if (ret != 0)
1052d548ef51SLiang Ma 		return ret;
1053d548ef51SLiang Ma 	return 0;
1054d548ef51SLiang Ma 
1055d548ef51SLiang Ma }
1056