xref: /dpdk/drivers/event/sw/sw_evdev.c (revision dca926ca9faa61b0ec7bdc93c605558d37b08fb1)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2016-2017 Intel Corporation
3aaa4a221SBruce Richardson  */
4aaa4a221SBruce Richardson 
5b1b3d9f9SJerin Jacob #include <inttypes.h>
6aaa4a221SBruce Richardson #include <string.h>
7aaa4a221SBruce Richardson 
8d4a586d2SJianfeng Tan #include <rte_bus_vdev.h>
9aaa4a221SBruce Richardson #include <rte_kvargs.h>
10aaa4a221SBruce Richardson #include <rte_ring.h>
11371a688fSBruce Richardson #include <rte_errno.h>
1286aed50aSBruce Richardson #include <rte_event_ring.h>
13a599eb31SHarry van Haaren #include <rte_service_component.h>
14aaa4a221SBruce Richardson 
15aaa4a221SBruce Richardson #include "sw_evdev.h"
16*dca926caSGage Eads #include "iq_chunk.h"
17aaa4a221SBruce Richardson 
18aaa4a221SBruce Richardson #define EVENTDEV_NAME_SW_PMD event_sw
19aaa4a221SBruce Richardson #define NUMA_NODE_ARG "numa_node"
20aaa4a221SBruce Richardson #define SCHED_QUANTA_ARG "sched_quanta"
21aaa4a221SBruce Richardson #define CREDIT_QUANTA_ARG "credit_quanta"
22aaa4a221SBruce Richardson 
2398dc055fSBruce Richardson static void
2498dc055fSBruce Richardson sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
2598dc055fSBruce Richardson 
2698dc055fSBruce Richardson static int
27371a688fSBruce Richardson sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
28371a688fSBruce Richardson 		const uint8_t priorities[], uint16_t num)
29371a688fSBruce Richardson {
30371a688fSBruce Richardson 	struct sw_port *p = port;
31371a688fSBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
32371a688fSBruce Richardson 	int i;
33371a688fSBruce Richardson 
34371a688fSBruce Richardson 	RTE_SET_USED(priorities);
35371a688fSBruce Richardson 	for (i = 0; i < num; i++) {
36371a688fSBruce Richardson 		struct sw_qid *q = &sw->qids[queues[i]];
37e1f2dcdbSGage Eads 		unsigned int j;
38371a688fSBruce Richardson 
39371a688fSBruce Richardson 		/* check for qid map overflow */
40371a688fSBruce Richardson 		if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
41371a688fSBruce Richardson 			rte_errno = -EDQUOT;
42371a688fSBruce Richardson 			break;
43371a688fSBruce Richardson 		}
44371a688fSBruce Richardson 
45371a688fSBruce Richardson 		if (p->is_directed && p->num_qids_mapped > 0) {
46371a688fSBruce Richardson 			rte_errno = -EDQUOT;
47371a688fSBruce Richardson 			break;
48371a688fSBruce Richardson 		}
49371a688fSBruce Richardson 
50e1f2dcdbSGage Eads 		for (j = 0; j < q->cq_num_mapped_cqs; j++) {
51e1f2dcdbSGage Eads 			if (q->cq_map[j] == p->id)
52e1f2dcdbSGage Eads 				break;
53e1f2dcdbSGage Eads 		}
54e1f2dcdbSGage Eads 
55e1f2dcdbSGage Eads 		/* check if port is already linked */
56e1f2dcdbSGage Eads 		if (j < q->cq_num_mapped_cqs)
57e1f2dcdbSGage Eads 			continue;
58e1f2dcdbSGage Eads 
59371a688fSBruce Richardson 		if (q->type == SW_SCHED_TYPE_DIRECT) {
60371a688fSBruce Richardson 			/* check directed qids only map to one port */
61371a688fSBruce Richardson 			if (p->num_qids_mapped > 0) {
62371a688fSBruce Richardson 				rte_errno = -EDQUOT;
63371a688fSBruce Richardson 				break;
64371a688fSBruce Richardson 			}
65371a688fSBruce Richardson 			/* check port only takes a directed flow */
66371a688fSBruce Richardson 			if (num > 1) {
67371a688fSBruce Richardson 				rte_errno = -EDQUOT;
68371a688fSBruce Richardson 				break;
69371a688fSBruce Richardson 			}
70371a688fSBruce Richardson 
71371a688fSBruce Richardson 			p->is_directed = 1;
72371a688fSBruce Richardson 			p->num_qids_mapped = 1;
73371a688fSBruce Richardson 		} else if (q->type == RTE_SCHED_TYPE_ORDERED) {
74371a688fSBruce Richardson 			p->num_ordered_qids++;
75371a688fSBruce Richardson 			p->num_qids_mapped++;
766da10cf0SHarry van Haaren 		} else if (q->type == RTE_SCHED_TYPE_ATOMIC ||
776da10cf0SHarry van Haaren 				q->type == RTE_SCHED_TYPE_PARALLEL) {
78371a688fSBruce Richardson 			p->num_qids_mapped++;
79371a688fSBruce Richardson 		}
80371a688fSBruce Richardson 
81371a688fSBruce Richardson 		q->cq_map[q->cq_num_mapped_cqs] = p->id;
82371a688fSBruce Richardson 		rte_smp_wmb();
83371a688fSBruce Richardson 		q->cq_num_mapped_cqs++;
84371a688fSBruce Richardson 	}
85371a688fSBruce Richardson 	return i;
86371a688fSBruce Richardson }
87371a688fSBruce Richardson 
88371a688fSBruce Richardson static int
89371a688fSBruce Richardson sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
90371a688fSBruce Richardson 		uint16_t nb_unlinks)
91371a688fSBruce Richardson {
92371a688fSBruce Richardson 	struct sw_port *p = port;
93371a688fSBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
94371a688fSBruce Richardson 	unsigned int i, j;
95371a688fSBruce Richardson 
96371a688fSBruce Richardson 	int unlinked = 0;
97371a688fSBruce Richardson 	for (i = 0; i < nb_unlinks; i++) {
98371a688fSBruce Richardson 		struct sw_qid *q = &sw->qids[queues[i]];
99371a688fSBruce Richardson 		for (j = 0; j < q->cq_num_mapped_cqs; j++) {
100371a688fSBruce Richardson 			if (q->cq_map[j] == p->id) {
101371a688fSBruce Richardson 				q->cq_map[j] =
102371a688fSBruce Richardson 					q->cq_map[q->cq_num_mapped_cqs - 1];
103371a688fSBruce Richardson 				rte_smp_wmb();
104371a688fSBruce Richardson 				q->cq_num_mapped_cqs--;
105371a688fSBruce Richardson 				unlinked++;
106371a688fSBruce Richardson 
107371a688fSBruce Richardson 				p->num_qids_mapped--;
108371a688fSBruce Richardson 
109371a688fSBruce Richardson 				if (q->type == RTE_SCHED_TYPE_ORDERED)
110371a688fSBruce Richardson 					p->num_ordered_qids--;
111371a688fSBruce Richardson 
112371a688fSBruce Richardson 				continue;
113371a688fSBruce Richardson 			}
114371a688fSBruce Richardson 		}
115371a688fSBruce Richardson 	}
116371a688fSBruce Richardson 	return unlinked;
117371a688fSBruce Richardson }
118371a688fSBruce Richardson 
119371a688fSBruce Richardson static int
12098dc055fSBruce Richardson sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
12198dc055fSBruce Richardson 		const struct rte_event_port_conf *conf)
12298dc055fSBruce Richardson {
12398dc055fSBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
12498dc055fSBruce Richardson 	struct sw_port *p = &sw->ports[port_id];
12586aed50aSBruce Richardson 	char buf[RTE_RING_NAMESIZE];
12698dc055fSBruce Richardson 	unsigned int i;
12798dc055fSBruce Richardson 
12898dc055fSBruce Richardson 	struct rte_event_dev_info info;
12998dc055fSBruce Richardson 	sw_info_get(dev, &info);
13098dc055fSBruce Richardson 
13198dc055fSBruce Richardson 	/* detect re-configuring and return credits to instance if needed */
13298dc055fSBruce Richardson 	if (p->initialized) {
13398dc055fSBruce Richardson 		/* taking credits from pool is done one quanta at a time, and
13498dc055fSBruce Richardson 		 * credits may be spend (counted in p->inflights) or still
13598dc055fSBruce Richardson 		 * available in the port (p->inflight_credits). We must return
13698dc055fSBruce Richardson 		 * the sum to no leak credits
13798dc055fSBruce Richardson 		 */
13898dc055fSBruce Richardson 		int possible_inflights = p->inflight_credits + p->inflights;
13998dc055fSBruce Richardson 		rte_atomic32_sub(&sw->inflights, possible_inflights);
14098dc055fSBruce Richardson 	}
14198dc055fSBruce Richardson 
14298dc055fSBruce Richardson 	*p = (struct sw_port){0}; /* zero entire structure */
14398dc055fSBruce Richardson 	p->id = port_id;
14498dc055fSBruce Richardson 	p->sw = sw;
14598dc055fSBruce Richardson 
14686aed50aSBruce Richardson 	/* check to see if rings exists - port_setup() can be called multiple
14786aed50aSBruce Richardson 	 * times legally (assuming device is stopped). If ring exists, free it
14886aed50aSBruce Richardson 	 * to so it gets re-created with the correct size
14986aed50aSBruce Richardson 	 */
15086aed50aSBruce Richardson 	snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
15186aed50aSBruce Richardson 			port_id, "rx_worker_ring");
15286aed50aSBruce Richardson 	struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf);
15386aed50aSBruce Richardson 	if (existing_ring)
15486aed50aSBruce Richardson 		rte_event_ring_free(existing_ring);
15586aed50aSBruce Richardson 
15686aed50aSBruce Richardson 	p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
15786aed50aSBruce Richardson 			dev->data->socket_id,
15886aed50aSBruce Richardson 			RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
15998dc055fSBruce Richardson 	if (p->rx_worker_ring == NULL) {
16098dc055fSBruce Richardson 		SW_LOG_ERR("Error creating RX worker ring for port %d\n",
16198dc055fSBruce Richardson 				port_id);
16298dc055fSBruce Richardson 		return -1;
16398dc055fSBruce Richardson 	}
16498dc055fSBruce Richardson 
16598dc055fSBruce Richardson 	p->inflight_max = conf->new_event_threshold;
16698dc055fSBruce Richardson 
16786aed50aSBruce Richardson 	/* check if ring exists, same as rx_worker above */
16886aed50aSBruce Richardson 	snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
16986aed50aSBruce Richardson 			port_id, "cq_worker_ring");
17086aed50aSBruce Richardson 	existing_ring = rte_event_ring_lookup(buf);
17186aed50aSBruce Richardson 	if (existing_ring)
17286aed50aSBruce Richardson 		rte_event_ring_free(existing_ring);
17386aed50aSBruce Richardson 
17486aed50aSBruce Richardson 	p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
17586aed50aSBruce Richardson 			dev->data->socket_id,
17686aed50aSBruce Richardson 			RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
17798dc055fSBruce Richardson 	if (p->cq_worker_ring == NULL) {
17886aed50aSBruce Richardson 		rte_event_ring_free(p->rx_worker_ring);
17998dc055fSBruce Richardson 		SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
18098dc055fSBruce Richardson 				port_id);
18198dc055fSBruce Richardson 		return -1;
18298dc055fSBruce Richardson 	}
18398dc055fSBruce Richardson 	sw->cq_ring_space[port_id] = conf->dequeue_depth;
18498dc055fSBruce Richardson 
18598dc055fSBruce Richardson 	/* set hist list contents to empty */
18698dc055fSBruce Richardson 	for (i = 0; i < SW_PORT_HIST_LIST; i++) {
18798dc055fSBruce Richardson 		p->hist_list[i].fid = -1;
18898dc055fSBruce Richardson 		p->hist_list[i].qid = -1;
18998dc055fSBruce Richardson 	}
19098dc055fSBruce Richardson 	dev->data->ports[port_id] = p;
19198dc055fSBruce Richardson 
19298dc055fSBruce Richardson 	rte_smp_wmb();
19398dc055fSBruce Richardson 	p->initialized = 1;
19498dc055fSBruce Richardson 	return 0;
19598dc055fSBruce Richardson }
19698dc055fSBruce Richardson 
19798dc055fSBruce Richardson static void
19898dc055fSBruce Richardson sw_port_release(void *port)
19998dc055fSBruce Richardson {
20098dc055fSBruce Richardson 	struct sw_port *p = (void *)port;
20198dc055fSBruce Richardson 	if (p == NULL)
20298dc055fSBruce Richardson 		return;
20398dc055fSBruce Richardson 
20486aed50aSBruce Richardson 	rte_event_ring_free(p->rx_worker_ring);
20586aed50aSBruce Richardson 	rte_event_ring_free(p->cq_worker_ring);
20698dc055fSBruce Richardson 	memset(p, 0, sizeof(*p));
20798dc055fSBruce Richardson }
20898dc055fSBruce Richardson 
2095ffb2f14SBruce Richardson static int32_t
2105ffb2f14SBruce Richardson qid_init(struct sw_evdev *sw, unsigned int idx, int type,
2115ffb2f14SBruce Richardson 		const struct rte_event_queue_conf *queue_conf)
2125ffb2f14SBruce Richardson {
2135ffb2f14SBruce Richardson 	unsigned int i;
2145ffb2f14SBruce Richardson 	int dev_id = sw->data->dev_id;
2155ffb2f14SBruce Richardson 	int socket_id = sw->data->socket_id;
216*dca926caSGage Eads 	char buf[IQ_ROB_NAMESIZE];
2175ffb2f14SBruce Richardson 	struct sw_qid *qid = &sw->qids[idx];
2185ffb2f14SBruce Richardson 
219*dca926caSGage Eads 	for (i = 0; i < SW_IQS_MAX; i++)
220*dca926caSGage Eads 		iq_init(sw, &qid->iq[i]);
2215ffb2f14SBruce Richardson 
2225ffb2f14SBruce Richardson 	/* Initialize the FID structures to no pinning (-1), and zero packets */
2235ffb2f14SBruce Richardson 	const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
2245ffb2f14SBruce Richardson 	for (i = 0; i < RTE_DIM(qid->fids); i++)
2255ffb2f14SBruce Richardson 		qid->fids[i] = fid;
2265ffb2f14SBruce Richardson 
2275ffb2f14SBruce Richardson 	qid->id = idx;
2285ffb2f14SBruce Richardson 	qid->type = type;
2295ffb2f14SBruce Richardson 	qid->priority = queue_conf->priority;
2305ffb2f14SBruce Richardson 
2315ffb2f14SBruce Richardson 	if (qid->type == RTE_SCHED_TYPE_ORDERED) {
2325ffb2f14SBruce Richardson 		char ring_name[RTE_RING_NAMESIZE];
2335ffb2f14SBruce Richardson 		uint32_t window_size;
2345ffb2f14SBruce Richardson 
2355ffb2f14SBruce Richardson 		/* rte_ring and window_size_mask require require window_size to
2365ffb2f14SBruce Richardson 		 * be a power-of-2.
2375ffb2f14SBruce Richardson 		 */
2385ffb2f14SBruce Richardson 		window_size = rte_align32pow2(
2395ffb2f14SBruce Richardson 				queue_conf->nb_atomic_order_sequences);
2405ffb2f14SBruce Richardson 
2415ffb2f14SBruce Richardson 		qid->window_size = window_size - 1;
2425ffb2f14SBruce Richardson 
2435ffb2f14SBruce Richardson 		if (!window_size) {
2445ffb2f14SBruce Richardson 			SW_LOG_DBG(
2455ffb2f14SBruce Richardson 				"invalid reorder_window_size for ordered queue\n"
2465ffb2f14SBruce Richardson 				);
2475ffb2f14SBruce Richardson 			goto cleanup;
2485ffb2f14SBruce Richardson 		}
2495ffb2f14SBruce Richardson 
2505ffb2f14SBruce Richardson 		snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
2515ffb2f14SBruce Richardson 		qid->reorder_buffer = rte_zmalloc_socket(buf,
2525ffb2f14SBruce Richardson 				window_size * sizeof(qid->reorder_buffer[0]),
2535ffb2f14SBruce Richardson 				0, socket_id);
2545ffb2f14SBruce Richardson 		if (!qid->reorder_buffer) {
2555ffb2f14SBruce Richardson 			SW_LOG_DBG("reorder_buffer malloc failed\n");
2565ffb2f14SBruce Richardson 			goto cleanup;
2575ffb2f14SBruce Richardson 		}
2585ffb2f14SBruce Richardson 
2595ffb2f14SBruce Richardson 		memset(&qid->reorder_buffer[0],
2605ffb2f14SBruce Richardson 		       0,
2615ffb2f14SBruce Richardson 		       window_size * sizeof(qid->reorder_buffer[0]));
2625ffb2f14SBruce Richardson 
2635ffb2f14SBruce Richardson 		snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist",
2645ffb2f14SBruce Richardson 				dev_id, idx);
2655ffb2f14SBruce Richardson 
2665ffb2f14SBruce Richardson 		/* lookup the ring, and if it already exists, free it */
2675ffb2f14SBruce Richardson 		struct rte_ring *cleanup = rte_ring_lookup(ring_name);
2685ffb2f14SBruce Richardson 		if (cleanup)
2695ffb2f14SBruce Richardson 			rte_ring_free(cleanup);
2705ffb2f14SBruce Richardson 
2715ffb2f14SBruce Richardson 		qid->reorder_buffer_freelist = rte_ring_create(ring_name,
2725ffb2f14SBruce Richardson 				window_size,
2735ffb2f14SBruce Richardson 				socket_id,
2745ffb2f14SBruce Richardson 				RING_F_SP_ENQ | RING_F_SC_DEQ);
2755ffb2f14SBruce Richardson 		if (!qid->reorder_buffer_freelist) {
2765ffb2f14SBruce Richardson 			SW_LOG_DBG("freelist ring create failed");
2775ffb2f14SBruce Richardson 			goto cleanup;
2785ffb2f14SBruce Richardson 		}
2795ffb2f14SBruce Richardson 
2805ffb2f14SBruce Richardson 		/* Populate the freelist with reorder buffer entries. Enqueue
2815ffb2f14SBruce Richardson 		 * 'window_size - 1' entries because the rte_ring holds only
2825ffb2f14SBruce Richardson 		 * that many.
2835ffb2f14SBruce Richardson 		 */
2845ffb2f14SBruce Richardson 		for (i = 0; i < window_size - 1; i++) {
2855ffb2f14SBruce Richardson 			if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,
2865ffb2f14SBruce Richardson 						&qid->reorder_buffer[i]) < 0)
2875ffb2f14SBruce Richardson 				goto cleanup;
2885ffb2f14SBruce Richardson 		}
2895ffb2f14SBruce Richardson 
2905ffb2f14SBruce Richardson 		qid->reorder_buffer_index = 0;
2915ffb2f14SBruce Richardson 		qid->cq_next_tx = 0;
2925ffb2f14SBruce Richardson 	}
2935ffb2f14SBruce Richardson 
2945ffb2f14SBruce Richardson 	qid->initialized = 1;
2955ffb2f14SBruce Richardson 
2965ffb2f14SBruce Richardson 	return 0;
2975ffb2f14SBruce Richardson 
2985ffb2f14SBruce Richardson cleanup:
2995ffb2f14SBruce Richardson 	for (i = 0; i < SW_IQS_MAX; i++) {
300*dca926caSGage Eads 		if (qid->iq[i].head)
301*dca926caSGage Eads 			iq_free_chunk(sw, qid->iq[i].head);
3025ffb2f14SBruce Richardson 	}
3035ffb2f14SBruce Richardson 
3045ffb2f14SBruce Richardson 	if (qid->reorder_buffer) {
3055ffb2f14SBruce Richardson 		rte_free(qid->reorder_buffer);
3065ffb2f14SBruce Richardson 		qid->reorder_buffer = NULL;
3075ffb2f14SBruce Richardson 	}
3085ffb2f14SBruce Richardson 
3095ffb2f14SBruce Richardson 	if (qid->reorder_buffer_freelist) {
3105ffb2f14SBruce Richardson 		rte_ring_free(qid->reorder_buffer_freelist);
3115ffb2f14SBruce Richardson 		qid->reorder_buffer_freelist = NULL;
3125ffb2f14SBruce Richardson 	}
3135ffb2f14SBruce Richardson 
3145ffb2f14SBruce Richardson 	return -EINVAL;
3155ffb2f14SBruce Richardson }
3165ffb2f14SBruce Richardson 
317e1f2dcdbSGage Eads static void
318e1f2dcdbSGage Eads sw_queue_release(struct rte_eventdev *dev, uint8_t id)
319e1f2dcdbSGage Eads {
320e1f2dcdbSGage Eads 	struct sw_evdev *sw = sw_pmd_priv(dev);
321e1f2dcdbSGage Eads 	struct sw_qid *qid = &sw->qids[id];
322e1f2dcdbSGage Eads 	uint32_t i;
323e1f2dcdbSGage Eads 
324*dca926caSGage Eads 	for (i = 0; i < SW_IQS_MAX; i++) {
325*dca926caSGage Eads 		if (!qid->iq[i].head)
326*dca926caSGage Eads 			continue;
327*dca926caSGage Eads 		iq_free_chunk(sw, qid->iq[i].head);
328*dca926caSGage Eads 	}
329e1f2dcdbSGage Eads 
330e1f2dcdbSGage Eads 	if (qid->type == RTE_SCHED_TYPE_ORDERED) {
331e1f2dcdbSGage Eads 		rte_free(qid->reorder_buffer);
332e1f2dcdbSGage Eads 		rte_ring_free(qid->reorder_buffer_freelist);
333e1f2dcdbSGage Eads 	}
334e1f2dcdbSGage Eads 	memset(qid, 0, sizeof(*qid));
335e1f2dcdbSGage Eads }
336e1f2dcdbSGage Eads 
3375ffb2f14SBruce Richardson static int
3385ffb2f14SBruce Richardson sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
3395ffb2f14SBruce Richardson 		const struct rte_event_queue_conf *conf)
3405ffb2f14SBruce Richardson {
3415ffb2f14SBruce Richardson 	int type;
3425ffb2f14SBruce Richardson 
34313370a38SPavan Nikhilesh 	type = conf->schedule_type;
34413370a38SPavan Nikhilesh 
3455ffb2f14SBruce Richardson 	if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
3465ffb2f14SBruce Richardson 		type = SW_SCHED_TYPE_DIRECT;
34713370a38SPavan Nikhilesh 	} else if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
34813370a38SPavan Nikhilesh 			& conf->event_queue_cfg) {
3495ffb2f14SBruce Richardson 		SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
3505ffb2f14SBruce Richardson 		return -ENOTSUP;
3515ffb2f14SBruce Richardson 	}
3525ffb2f14SBruce Richardson 
3535ffb2f14SBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
354e1f2dcdbSGage Eads 
355e1f2dcdbSGage Eads 	if (sw->qids[queue_id].initialized)
356e1f2dcdbSGage Eads 		sw_queue_release(dev, queue_id);
357e1f2dcdbSGage Eads 
3585ffb2f14SBruce Richardson 	return qid_init(sw, queue_id, type, conf);
3595ffb2f14SBruce Richardson }
3605ffb2f14SBruce Richardson 
3615ffb2f14SBruce Richardson static void
3621a3a4531SBruce Richardson sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
3631a3a4531SBruce Richardson 				 struct rte_event_queue_conf *conf)
3641a3a4531SBruce Richardson {
3651a3a4531SBruce Richardson 	RTE_SET_USED(dev);
3661a3a4531SBruce Richardson 	RTE_SET_USED(queue_id);
3671a3a4531SBruce Richardson 
3681a3a4531SBruce Richardson 	static const struct rte_event_queue_conf default_conf = {
3691a3a4531SBruce Richardson 		.nb_atomic_flows = 4096,
3701a3a4531SBruce Richardson 		.nb_atomic_order_sequences = 1,
37113370a38SPavan Nikhilesh 		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
3721a3a4531SBruce Richardson 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
3731a3a4531SBruce Richardson 	};
3741a3a4531SBruce Richardson 
3751a3a4531SBruce Richardson 	*conf = default_conf;
3761a3a4531SBruce Richardson }
3771a3a4531SBruce Richardson 
3781a3a4531SBruce Richardson static void
3791a3a4531SBruce Richardson sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
3801a3a4531SBruce Richardson 		 struct rte_event_port_conf *port_conf)
3811a3a4531SBruce Richardson {
3821a3a4531SBruce Richardson 	RTE_SET_USED(dev);
3831a3a4531SBruce Richardson 	RTE_SET_USED(port_id);
3841a3a4531SBruce Richardson 
3851a3a4531SBruce Richardson 	port_conf->new_event_threshold = 1024;
3861a3a4531SBruce Richardson 	port_conf->dequeue_depth = 16;
3871a3a4531SBruce Richardson 	port_conf->enqueue_depth = 16;
3881a3a4531SBruce Richardson }
3891a3a4531SBruce Richardson 
3901c6c0e4cSBruce Richardson static int
3911c6c0e4cSBruce Richardson sw_dev_configure(const struct rte_eventdev *dev)
3921c6c0e4cSBruce Richardson {
3931c6c0e4cSBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
3941c6c0e4cSBruce Richardson 	const struct rte_eventdev_data *data = dev->data;
3951c6c0e4cSBruce Richardson 	const struct rte_event_dev_config *conf = &data->dev_conf;
396*dca926caSGage Eads 	int num_chunks, i;
3971c6c0e4cSBruce Richardson 
3981c6c0e4cSBruce Richardson 	sw->qid_count = conf->nb_event_queues;
3991c6c0e4cSBruce Richardson 	sw->port_count = conf->nb_event_ports;
4001c6c0e4cSBruce Richardson 	sw->nb_events_limit = conf->nb_events_limit;
401656af918SBruce Richardson 	rte_atomic32_set(&sw->inflights, 0);
4021c6c0e4cSBruce Richardson 
403*dca926caSGage Eads 	/* Number of chunks sized for worst-case spread of events across IQs */
404*dca926caSGage Eads 	num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) +
405*dca926caSGage Eads 			sw->qid_count*SW_IQS_MAX*2;
406*dca926caSGage Eads 
407*dca926caSGage Eads 	/* If this is a reconfiguration, free the previous IQ allocation */
408*dca926caSGage Eads 	if (sw->chunks)
409*dca926caSGage Eads 		rte_free(sw->chunks);
410*dca926caSGage Eads 
411*dca926caSGage Eads 	sw->chunks = rte_malloc_socket(NULL,
412*dca926caSGage Eads 				       sizeof(struct sw_queue_chunk) *
413*dca926caSGage Eads 				       num_chunks,
414*dca926caSGage Eads 				       0,
415*dca926caSGage Eads 				       sw->data->socket_id);
416*dca926caSGage Eads 	if (!sw->chunks)
417*dca926caSGage Eads 		return -ENOMEM;
418*dca926caSGage Eads 
419*dca926caSGage Eads 	sw->chunk_list_head = NULL;
420*dca926caSGage Eads 	for (i = 0; i < num_chunks; i++)
421*dca926caSGage Eads 		iq_free_chunk(sw, &sw->chunks[i]);
422*dca926caSGage Eads 
4231c6c0e4cSBruce Richardson 	if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
4241c6c0e4cSBruce Richardson 		return -ENOTSUP;
4251c6c0e4cSBruce Richardson 
4261c6c0e4cSBruce Richardson 	return 0;
4271c6c0e4cSBruce Richardson }
4281c6c0e4cSBruce Richardson 
42967255ee9SNikhil Rao struct rte_eth_dev;
43067255ee9SNikhil Rao 
43167255ee9SNikhil Rao static int
43267255ee9SNikhil Rao sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
43367255ee9SNikhil Rao 			const struct rte_eth_dev *eth_dev,
43467255ee9SNikhil Rao 			uint32_t *caps)
43567255ee9SNikhil Rao {
43667255ee9SNikhil Rao 	RTE_SET_USED(dev);
43767255ee9SNikhil Rao 	RTE_SET_USED(eth_dev);
43867255ee9SNikhil Rao 	*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
43967255ee9SNikhil Rao 	return 0;
44067255ee9SNikhil Rao }
44167255ee9SNikhil Rao 
442b88e2b73SBruce Richardson static void
443b88e2b73SBruce Richardson sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
444b88e2b73SBruce Richardson {
445b88e2b73SBruce Richardson 	RTE_SET_USED(dev);
446b88e2b73SBruce Richardson 
447b88e2b73SBruce Richardson 	static const struct rte_event_dev_info evdev_sw_info = {
448b88e2b73SBruce Richardson 			.driver_name = SW_PMD_NAME,
449b88e2b73SBruce Richardson 			.max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
450b88e2b73SBruce Richardson 			.max_event_queue_flows = SW_QID_NUM_FIDS,
451b88e2b73SBruce Richardson 			.max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
452b88e2b73SBruce Richardson 			.max_event_priority_levels = SW_IQS_MAX,
453b88e2b73SBruce Richardson 			.max_event_ports = SW_PORTS_MAX,
454b88e2b73SBruce Richardson 			.max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
455b88e2b73SBruce Richardson 			.max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
456b88e2b73SBruce Richardson 			.max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
457b88e2b73SBruce Richardson 			.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
458315d9f40SJerin Jacob 					RTE_EVENT_DEV_CAP_BURST_MODE |
459b88e2b73SBruce Richardson 					RTE_EVENT_DEV_CAP_EVENT_QOS),
460b88e2b73SBruce Richardson 	};
461b88e2b73SBruce Richardson 
462b88e2b73SBruce Richardson 	*info = evdev_sw_info;
463b88e2b73SBruce Richardson }
464b88e2b73SBruce Richardson 
465c66baa68SBruce Richardson static void
466c66baa68SBruce Richardson sw_dump(struct rte_eventdev *dev, FILE *f)
467c66baa68SBruce Richardson {
468c66baa68SBruce Richardson 	const struct sw_evdev *sw = sw_pmd_priv(dev);
469c66baa68SBruce Richardson 
470c66baa68SBruce Richardson 	static const char * const q_type_strings[] = {
471c66baa68SBruce Richardson 			"Ordered", "Atomic", "Parallel", "Directed"
472c66baa68SBruce Richardson 	};
473c66baa68SBruce Richardson 	uint32_t i;
474c66baa68SBruce Richardson 	fprintf(f, "EventDev %s: ports %d, qids %d\n", "todo-fix-name",
475c66baa68SBruce Richardson 			sw->port_count, sw->qid_count);
476c66baa68SBruce Richardson 
477c66baa68SBruce Richardson 	fprintf(f, "\trx   %"PRIu64"\n\tdrop %"PRIu64"\n\ttx   %"PRIu64"\n",
478c66baa68SBruce Richardson 		sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts);
479c66baa68SBruce Richardson 	fprintf(f, "\tsched calls: %"PRIu64"\n", sw->sched_called);
480c66baa68SBruce Richardson 	fprintf(f, "\tsched cq/qid call: %"PRIu64"\n", sw->sched_cq_qid_called);
481c66baa68SBruce Richardson 	fprintf(f, "\tsched no IQ enq: %"PRIu64"\n", sw->sched_no_iq_enqueues);
482c66baa68SBruce Richardson 	fprintf(f, "\tsched no CQ enq: %"PRIu64"\n", sw->sched_no_cq_enqueues);
483c66baa68SBruce Richardson 	uint32_t inflights = rte_atomic32_read(&sw->inflights);
484c66baa68SBruce Richardson 	uint32_t credits = sw->nb_events_limit - inflights;
485c66baa68SBruce Richardson 	fprintf(f, "\tinflight %d, credits: %d\n", inflights, credits);
486c66baa68SBruce Richardson 
487c66baa68SBruce Richardson #define COL_RED "\x1b[31m"
488c66baa68SBruce Richardson #define COL_RESET "\x1b[0m"
489c66baa68SBruce Richardson 
490c66baa68SBruce Richardson 	for (i = 0; i < sw->port_count; i++) {
491c66baa68SBruce Richardson 		int max, j;
492c66baa68SBruce Richardson 		const struct sw_port *p = &sw->ports[i];
493c66baa68SBruce Richardson 		if (!p->initialized) {
494c66baa68SBruce Richardson 			fprintf(f, "  %sPort %d not initialized.%s\n",
495c66baa68SBruce Richardson 				COL_RED, i, COL_RESET);
496c66baa68SBruce Richardson 			continue;
497c66baa68SBruce Richardson 		}
498c66baa68SBruce Richardson 		fprintf(f, "  Port %d %s\n", i,
499c66baa68SBruce Richardson 			p->is_directed ? " (SingleCons)" : "");
500c66baa68SBruce Richardson 		fprintf(f, "\trx   %"PRIu64"\tdrop %"PRIu64"\ttx   %"PRIu64
501c66baa68SBruce Richardson 			"\t%sinflight %d%s\n", sw->ports[i].stats.rx_pkts,
502c66baa68SBruce Richardson 			sw->ports[i].stats.rx_dropped,
503c66baa68SBruce Richardson 			sw->ports[i].stats.tx_pkts,
504c66baa68SBruce Richardson 			(p->inflights == p->inflight_max) ?
505c66baa68SBruce Richardson 				COL_RED : COL_RESET,
506c66baa68SBruce Richardson 			sw->ports[i].inflights, COL_RESET);
507c66baa68SBruce Richardson 
508c66baa68SBruce Richardson 		fprintf(f, "\tMax New: %u"
509c66baa68SBruce Richardson 			"\tAvg cycles PP: %"PRIu64"\tCredits: %u\n",
510c66baa68SBruce Richardson 			sw->ports[i].inflight_max,
511c66baa68SBruce Richardson 			sw->ports[i].avg_pkt_ticks,
512c66baa68SBruce Richardson 			sw->ports[i].inflight_credits);
513c66baa68SBruce Richardson 		fprintf(f, "\tReceive burst distribution:\n");
514c66baa68SBruce Richardson 		float zp_percent = p->zero_polls * 100.0 / p->total_polls;
515c66baa68SBruce Richardson 		fprintf(f, zp_percent < 10 ? "\t\t0:%.02f%% " : "\t\t0:%.0f%% ",
516c66baa68SBruce Richardson 				zp_percent);
517c66baa68SBruce Richardson 		for (max = (int)RTE_DIM(p->poll_buckets); max-- > 0;)
518c66baa68SBruce Richardson 			if (p->poll_buckets[max] != 0)
519c66baa68SBruce Richardson 				break;
520c66baa68SBruce Richardson 		for (j = 0; j <= max; j++) {
521c66baa68SBruce Richardson 			if (p->poll_buckets[j] != 0) {
522c66baa68SBruce Richardson 				float poll_pc = p->poll_buckets[j] * 100.0 /
523c66baa68SBruce Richardson 					p->total_polls;
524c66baa68SBruce Richardson 				fprintf(f, "%u-%u:%.02f%% ",
525c66baa68SBruce Richardson 					((j << SW_DEQ_STAT_BUCKET_SHIFT) + 1),
526c66baa68SBruce Richardson 					((j+1) << SW_DEQ_STAT_BUCKET_SHIFT),
527c66baa68SBruce Richardson 					poll_pc);
528c66baa68SBruce Richardson 			}
529c66baa68SBruce Richardson 		}
530c66baa68SBruce Richardson 		fprintf(f, "\n");
531c66baa68SBruce Richardson 
532c66baa68SBruce Richardson 		if (p->rx_worker_ring) {
53386aed50aSBruce Richardson 			uint64_t used = rte_event_ring_count(p->rx_worker_ring);
53486aed50aSBruce Richardson 			uint64_t space = rte_event_ring_free_count(
53586aed50aSBruce Richardson 					p->rx_worker_ring);
536c66baa68SBruce Richardson 			const char *col = (space == 0) ? COL_RED : COL_RESET;
537c66baa68SBruce Richardson 			fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
538c66baa68SBruce Richardson 					PRIu64 COL_RESET"\n", col, used, space);
539c66baa68SBruce Richardson 		} else
540c66baa68SBruce Richardson 			fprintf(f, "\trx ring not initialized.\n");
541c66baa68SBruce Richardson 
542c66baa68SBruce Richardson 		if (p->cq_worker_ring) {
54386aed50aSBruce Richardson 			uint64_t used = rte_event_ring_count(p->cq_worker_ring);
54486aed50aSBruce Richardson 			uint64_t space = rte_event_ring_free_count(
54586aed50aSBruce Richardson 					p->cq_worker_ring);
546c66baa68SBruce Richardson 			const char *col = (space == 0) ? COL_RED : COL_RESET;
547c66baa68SBruce Richardson 			fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
548c66baa68SBruce Richardson 					PRIu64 COL_RESET"\n", col, used, space);
549c66baa68SBruce Richardson 		} else
550c66baa68SBruce Richardson 			fprintf(f, "\tcq ring not initialized.\n");
551c66baa68SBruce Richardson 	}
552c66baa68SBruce Richardson 
553c66baa68SBruce Richardson 	for (i = 0; i < sw->qid_count; i++) {
554c66baa68SBruce Richardson 		const struct sw_qid *qid = &sw->qids[i];
555c66baa68SBruce Richardson 		if (!qid->initialized) {
556c66baa68SBruce Richardson 			fprintf(f, "  %sQueue %d not initialized.%s\n",
557c66baa68SBruce Richardson 				COL_RED, i, COL_RESET);
558c66baa68SBruce Richardson 			continue;
559c66baa68SBruce Richardson 		}
560c66baa68SBruce Richardson 		int affinities_per_port[SW_PORTS_MAX] = {0};
561c66baa68SBruce Richardson 		uint32_t inflights = 0;
562c66baa68SBruce Richardson 
563c66baa68SBruce Richardson 		fprintf(f, "  Queue %d (%s)\n", i, q_type_strings[qid->type]);
564c66baa68SBruce Richardson 		fprintf(f, "\trx   %"PRIu64"\tdrop %"PRIu64"\ttx   %"PRIu64"\n",
565c66baa68SBruce Richardson 			qid->stats.rx_pkts, qid->stats.rx_dropped,
566c66baa68SBruce Richardson 			qid->stats.tx_pkts);
567c66baa68SBruce Richardson 		if (qid->type == RTE_SCHED_TYPE_ORDERED) {
568c66baa68SBruce Richardson 			struct rte_ring *rob_buf_free =
569c66baa68SBruce Richardson 				qid->reorder_buffer_freelist;
570c66baa68SBruce Richardson 			if (rob_buf_free)
571c66baa68SBruce Richardson 				fprintf(f, "\tReorder entries in use: %u\n",
572c66baa68SBruce Richardson 					rte_ring_free_count(rob_buf_free));
573c66baa68SBruce Richardson 			else
574c66baa68SBruce Richardson 				fprintf(f,
575c66baa68SBruce Richardson 					"\tReorder buffer not initialized\n");
576c66baa68SBruce Richardson 		}
577c66baa68SBruce Richardson 
578c66baa68SBruce Richardson 		uint32_t flow;
579c66baa68SBruce Richardson 		for (flow = 0; flow < RTE_DIM(qid->fids); flow++)
580c66baa68SBruce Richardson 			if (qid->fids[flow].cq != -1) {
581c66baa68SBruce Richardson 				affinities_per_port[qid->fids[flow].cq]++;
582c66baa68SBruce Richardson 				inflights += qid->fids[flow].pcount;
583c66baa68SBruce Richardson 			}
584c66baa68SBruce Richardson 
5850e1eadd0SHarry van Haaren 		uint32_t port;
5860e1eadd0SHarry van Haaren 		fprintf(f, "\tPer Port Stats:\n");
5870e1eadd0SHarry van Haaren 		for (port = 0; port < sw->port_count; port++) {
5880e1eadd0SHarry van Haaren 			fprintf(f, "\t  Port %d: Pkts: %"PRIu64, port,
5890e1eadd0SHarry van Haaren 					qid->to_port[port]);
5900e1eadd0SHarry van Haaren 			fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
5910e1eadd0SHarry van Haaren 		}
592c66baa68SBruce Richardson 
593c66baa68SBruce Richardson 		uint32_t iq;
594c66baa68SBruce Richardson 		uint32_t iq_printed = 0;
595c66baa68SBruce Richardson 		for (iq = 0; iq < SW_IQS_MAX; iq++) {
596*dca926caSGage Eads 			if (!qid->iq[iq].head) {
597c66baa68SBruce Richardson 				fprintf(f, "\tiq %d is not initialized.\n", iq);
598c66baa68SBruce Richardson 				iq_printed = 1;
599c66baa68SBruce Richardson 				continue;
600c66baa68SBruce Richardson 			}
601*dca926caSGage Eads 			uint32_t used = iq_count(&qid->iq[iq]);
602*dca926caSGage Eads 			const char *col = COL_RESET;
603c66baa68SBruce Richardson 			if (used > 0) {
604*dca926caSGage Eads 				fprintf(f, "\t%siq %d: Used %d"
605*dca926caSGage Eads 					COL_RESET"\n", col, iq, used);
606c66baa68SBruce Richardson 				iq_printed = 1;
607c66baa68SBruce Richardson 			}
608c66baa68SBruce Richardson 		}
609c66baa68SBruce Richardson 		if (iq_printed == 0)
610c66baa68SBruce Richardson 			fprintf(f, "\t-- iqs empty --\n");
611c66baa68SBruce Richardson 	}
612c66baa68SBruce Richardson }
613c66baa68SBruce Richardson 
614aaa4a221SBruce Richardson static int
615374acbf7SBruce Richardson sw_start(struct rte_eventdev *dev)
616374acbf7SBruce Richardson {
617374acbf7SBruce Richardson 	unsigned int i, j;
618374acbf7SBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
619a599eb31SHarry van Haaren 
6209f9fad8fSPavan Nikhilesh 	rte_service_component_runstate_set(sw->service_id, 1);
6219f9fad8fSPavan Nikhilesh 
622a599eb31SHarry van Haaren 	/* check a service core is mapped to this service */
6239f9fad8fSPavan Nikhilesh 	if (!rte_service_runstate_get(sw->service_id)) {
624a599eb31SHarry van Haaren 		SW_LOG_ERR("Warning: No Service core enabled on service %s\n",
625a894d481SHarry van Haaren 				sw->service_name);
6269f9fad8fSPavan Nikhilesh 		return -ENOENT;
6279f9fad8fSPavan Nikhilesh 	}
628a599eb31SHarry van Haaren 
629374acbf7SBruce Richardson 	/* check all ports are set up */
630374acbf7SBruce Richardson 	for (i = 0; i < sw->port_count; i++)
631374acbf7SBruce Richardson 		if (sw->ports[i].rx_worker_ring == NULL) {
632374acbf7SBruce Richardson 			SW_LOG_ERR("Port %d not configured\n", i);
633374acbf7SBruce Richardson 			return -ESTALE;
634374acbf7SBruce Richardson 		}
635374acbf7SBruce Richardson 
636374acbf7SBruce Richardson 	/* check all queues are configured and mapped to ports*/
637374acbf7SBruce Richardson 	for (i = 0; i < sw->qid_count; i++)
638*dca926caSGage Eads 		if (sw->qids[i].iq[0].head == NULL ||
639374acbf7SBruce Richardson 				sw->qids[i].cq_num_mapped_cqs == 0) {
640374acbf7SBruce Richardson 			SW_LOG_ERR("Queue %d not configured\n", i);
641374acbf7SBruce Richardson 			return -ENOLINK;
642374acbf7SBruce Richardson 		}
643374acbf7SBruce Richardson 
644374acbf7SBruce Richardson 	/* build up our prioritized array of qids */
645374acbf7SBruce Richardson 	/* We don't use qsort here, as if all/multiple entries have the same
646374acbf7SBruce Richardson 	 * priority, the result is non-deterministic. From "man 3 qsort":
647374acbf7SBruce Richardson 	 * "If two members compare as equal, their order in the sorted
648374acbf7SBruce Richardson 	 * array is undefined."
649374acbf7SBruce Richardson 	 */
650374acbf7SBruce Richardson 	uint32_t qidx = 0;
651374acbf7SBruce Richardson 	for (j = 0; j <= RTE_EVENT_DEV_PRIORITY_LOWEST; j++) {
652374acbf7SBruce Richardson 		for (i = 0; i < sw->qid_count; i++) {
653374acbf7SBruce Richardson 			if (sw->qids[i].priority == j) {
654374acbf7SBruce Richardson 				sw->qids_prioritized[qidx] = &sw->qids[i];
655374acbf7SBruce Richardson 				qidx++;
656374acbf7SBruce Richardson 			}
657374acbf7SBruce Richardson 		}
658374acbf7SBruce Richardson 	}
659374acbf7SBruce Richardson 
660c1ad03dfSBruce Richardson 	if (sw_xstats_init(sw) < 0)
661c1ad03dfSBruce Richardson 		return -EINVAL;
662c1ad03dfSBruce Richardson 
663374acbf7SBruce Richardson 	rte_smp_wmb();
664374acbf7SBruce Richardson 	sw->started = 1;
665374acbf7SBruce Richardson 
666374acbf7SBruce Richardson 	return 0;
667374acbf7SBruce Richardson }
668374acbf7SBruce Richardson 
669374acbf7SBruce Richardson static void
670374acbf7SBruce Richardson sw_stop(struct rte_eventdev *dev)
671374acbf7SBruce Richardson {
672374acbf7SBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
673c1ad03dfSBruce Richardson 	sw_xstats_uninit(sw);
674374acbf7SBruce Richardson 	sw->started = 0;
675374acbf7SBruce Richardson 	rte_smp_wmb();
676374acbf7SBruce Richardson }
677374acbf7SBruce Richardson 
678374acbf7SBruce Richardson static int
679374acbf7SBruce Richardson sw_close(struct rte_eventdev *dev)
680374acbf7SBruce Richardson {
681374acbf7SBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
682374acbf7SBruce Richardson 	uint32_t i;
683374acbf7SBruce Richardson 
684374acbf7SBruce Richardson 	for (i = 0; i < sw->qid_count; i++)
685374acbf7SBruce Richardson 		sw_queue_release(dev, i);
686374acbf7SBruce Richardson 	sw->qid_count = 0;
687374acbf7SBruce Richardson 
688374acbf7SBruce Richardson 	for (i = 0; i < sw->port_count; i++)
689374acbf7SBruce Richardson 		sw_port_release(&sw->ports[i]);
690374acbf7SBruce Richardson 	sw->port_count = 0;
691374acbf7SBruce Richardson 
692374acbf7SBruce Richardson 	memset(&sw->stats, 0, sizeof(sw->stats));
693374acbf7SBruce Richardson 	sw->sched_called = 0;
694374acbf7SBruce Richardson 	sw->sched_no_iq_enqueues = 0;
695374acbf7SBruce Richardson 	sw->sched_no_cq_enqueues = 0;
696374acbf7SBruce Richardson 	sw->sched_cq_qid_called = 0;
697374acbf7SBruce Richardson 
698374acbf7SBruce Richardson 	return 0;
699374acbf7SBruce Richardson }
700374acbf7SBruce Richardson 
701374acbf7SBruce Richardson static int
702aaa4a221SBruce Richardson assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
703aaa4a221SBruce Richardson {
704aaa4a221SBruce Richardson 	int *socket_id = opaque;
705aaa4a221SBruce Richardson 	*socket_id = atoi(value);
706aaa4a221SBruce Richardson 	if (*socket_id >= RTE_MAX_NUMA_NODES)
707aaa4a221SBruce Richardson 		return -1;
708aaa4a221SBruce Richardson 	return 0;
709aaa4a221SBruce Richardson }
710aaa4a221SBruce Richardson 
711aaa4a221SBruce Richardson static int
712aaa4a221SBruce Richardson set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
713aaa4a221SBruce Richardson {
714aaa4a221SBruce Richardson 	int *quanta = opaque;
715aaa4a221SBruce Richardson 	*quanta = atoi(value);
716aaa4a221SBruce Richardson 	if (*quanta < 0 || *quanta >= 4096)
717aaa4a221SBruce Richardson 		return -1;
718aaa4a221SBruce Richardson 	return 0;
719aaa4a221SBruce Richardson }
720aaa4a221SBruce Richardson 
721aaa4a221SBruce Richardson static int
722aaa4a221SBruce Richardson set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
723aaa4a221SBruce Richardson {
724aaa4a221SBruce Richardson 	int *credit = opaque;
725aaa4a221SBruce Richardson 	*credit = atoi(value);
726aaa4a221SBruce Richardson 	if (*credit < 0 || *credit >= 128)
727aaa4a221SBruce Richardson 		return -1;
728aaa4a221SBruce Richardson 	return 0;
729aaa4a221SBruce Richardson }
730aaa4a221SBruce Richardson 
731a599eb31SHarry van Haaren 
732a599eb31SHarry van Haaren static int32_t sw_sched_service_func(void *args)
733a599eb31SHarry van Haaren {
734a599eb31SHarry van Haaren 	struct rte_eventdev *dev = args;
735a599eb31SHarry van Haaren 	sw_event_schedule(dev);
736a599eb31SHarry van Haaren 	return 0;
737a599eb31SHarry van Haaren }
738a599eb31SHarry van Haaren 
739aaa4a221SBruce Richardson static int
7405d2aa461SJan Blunck sw_probe(struct rte_vdev_device *vdev)
741aaa4a221SBruce Richardson {
742aaa4a221SBruce Richardson 	static const struct rte_eventdev_ops evdev_sw_ops = {
7431c6c0e4cSBruce Richardson 			.dev_configure = sw_dev_configure,
744b88e2b73SBruce Richardson 			.dev_infos_get = sw_info_get,
745374acbf7SBruce Richardson 			.dev_close = sw_close,
746374acbf7SBruce Richardson 			.dev_start = sw_start,
747374acbf7SBruce Richardson 			.dev_stop = sw_stop,
748c66baa68SBruce Richardson 			.dump = sw_dump,
7491a3a4531SBruce Richardson 
7501a3a4531SBruce Richardson 			.queue_def_conf = sw_queue_def_conf,
7515ffb2f14SBruce Richardson 			.queue_setup = sw_queue_setup,
7525ffb2f14SBruce Richardson 			.queue_release = sw_queue_release,
7531a3a4531SBruce Richardson 			.port_def_conf = sw_port_def_conf,
75498dc055fSBruce Richardson 			.port_setup = sw_port_setup,
75598dc055fSBruce Richardson 			.port_release = sw_port_release,
756371a688fSBruce Richardson 			.port_link = sw_port_link,
757371a688fSBruce Richardson 			.port_unlink = sw_port_unlink,
758c1ad03dfSBruce Richardson 
75967255ee9SNikhil Rao 			.eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
76067255ee9SNikhil Rao 
761c1ad03dfSBruce Richardson 			.xstats_get = sw_xstats_get,
762c1ad03dfSBruce Richardson 			.xstats_get_names = sw_xstats_get_names,
763c1ad03dfSBruce Richardson 			.xstats_get_by_name = sw_xstats_get_by_name,
764c1ad03dfSBruce Richardson 			.xstats_reset = sw_xstats_reset,
765aaa4a221SBruce Richardson 	};
766aaa4a221SBruce Richardson 
767aaa4a221SBruce Richardson 	static const char *const args[] = {
768aaa4a221SBruce Richardson 		NUMA_NODE_ARG,
769aaa4a221SBruce Richardson 		SCHED_QUANTA_ARG,
770aaa4a221SBruce Richardson 		CREDIT_QUANTA_ARG,
771aaa4a221SBruce Richardson 		NULL
772aaa4a221SBruce Richardson 	};
7735d2aa461SJan Blunck 	const char *name;
7745d2aa461SJan Blunck 	const char *params;
775aaa4a221SBruce Richardson 	struct rte_eventdev *dev;
776aaa4a221SBruce Richardson 	struct sw_evdev *sw;
777aaa4a221SBruce Richardson 	int socket_id = rte_socket_id();
778aaa4a221SBruce Richardson 	int sched_quanta  = SW_DEFAULT_SCHED_QUANTA;
779aaa4a221SBruce Richardson 	int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
780aaa4a221SBruce Richardson 
7815d2aa461SJan Blunck 	name = rte_vdev_device_name(vdev);
7825d2aa461SJan Blunck 	params = rte_vdev_device_args(vdev);
783aaa4a221SBruce Richardson 	if (params != NULL && params[0] != '\0') {
784aaa4a221SBruce Richardson 		struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
785aaa4a221SBruce Richardson 
786aaa4a221SBruce Richardson 		if (!kvlist) {
787aaa4a221SBruce Richardson 			SW_LOG_INFO(
788aaa4a221SBruce Richardson 				"Ignoring unsupported parameters when creating device '%s'\n",
789aaa4a221SBruce Richardson 				name);
790aaa4a221SBruce Richardson 		} else {
791aaa4a221SBruce Richardson 			int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
792aaa4a221SBruce Richardson 					assign_numa_node, &socket_id);
793aaa4a221SBruce Richardson 			if (ret != 0) {
794aaa4a221SBruce Richardson 				SW_LOG_ERR(
795aaa4a221SBruce Richardson 					"%s: Error parsing numa node parameter",
796aaa4a221SBruce Richardson 					name);
797aaa4a221SBruce Richardson 				rte_kvargs_free(kvlist);
798aaa4a221SBruce Richardson 				return ret;
799aaa4a221SBruce Richardson 			}
800aaa4a221SBruce Richardson 
801aaa4a221SBruce Richardson 			ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
802aaa4a221SBruce Richardson 					set_sched_quanta, &sched_quanta);
803aaa4a221SBruce Richardson 			if (ret != 0) {
804aaa4a221SBruce Richardson 				SW_LOG_ERR(
805aaa4a221SBruce Richardson 					"%s: Error parsing sched quanta parameter",
806aaa4a221SBruce Richardson 					name);
807aaa4a221SBruce Richardson 				rte_kvargs_free(kvlist);
808aaa4a221SBruce Richardson 				return ret;
809aaa4a221SBruce Richardson 			}
810aaa4a221SBruce Richardson 
811aaa4a221SBruce Richardson 			ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
812aaa4a221SBruce Richardson 					set_credit_quanta, &credit_quanta);
813aaa4a221SBruce Richardson 			if (ret != 0) {
814aaa4a221SBruce Richardson 				SW_LOG_ERR(
815aaa4a221SBruce Richardson 					"%s: Error parsing credit quanta parameter",
816aaa4a221SBruce Richardson 					name);
817aaa4a221SBruce Richardson 				rte_kvargs_free(kvlist);
818aaa4a221SBruce Richardson 				return ret;
819aaa4a221SBruce Richardson 			}
820aaa4a221SBruce Richardson 
821aaa4a221SBruce Richardson 			rte_kvargs_free(kvlist);
822aaa4a221SBruce Richardson 		}
823aaa4a221SBruce Richardson 	}
824aaa4a221SBruce Richardson 
825aaa4a221SBruce Richardson 	SW_LOG_INFO(
826aaa4a221SBruce Richardson 			"Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
827aaa4a221SBruce Richardson 			name, socket_id, sched_quanta, credit_quanta);
828aaa4a221SBruce Richardson 
829aaa4a221SBruce Richardson 	dev = rte_event_pmd_vdev_init(name,
830aaa4a221SBruce Richardson 			sizeof(struct sw_evdev), socket_id);
831aaa4a221SBruce Richardson 	if (dev == NULL) {
832aaa4a221SBruce Richardson 		SW_LOG_ERR("eventdev vdev init() failed");
833aaa4a221SBruce Richardson 		return -EFAULT;
834aaa4a221SBruce Richardson 	}
835aaa4a221SBruce Richardson 	dev->dev_ops = &evdev_sw_ops;
836656af918SBruce Richardson 	dev->enqueue = sw_event_enqueue;
837656af918SBruce Richardson 	dev->enqueue_burst = sw_event_enqueue_burst;
83865293784SJerin Jacob 	dev->enqueue_new_burst = sw_event_enqueue_burst;
8395eea2d10SJerin Jacob 	dev->enqueue_forward_burst = sw_event_enqueue_burst;
840656af918SBruce Richardson 	dev->dequeue = sw_event_dequeue;
841656af918SBruce Richardson 	dev->dequeue_burst = sw_event_dequeue_burst;
842656af918SBruce Richardson 
843656af918SBruce Richardson 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
844656af918SBruce Richardson 		return 0;
845aaa4a221SBruce Richardson 
846aaa4a221SBruce Richardson 	sw = dev->data->dev_private;
847aaa4a221SBruce Richardson 	sw->data = dev->data;
848aaa4a221SBruce Richardson 
849aaa4a221SBruce Richardson 	/* copy values passed from vdev command line to instance */
850aaa4a221SBruce Richardson 	sw->credit_update_quanta = credit_quanta;
851aaa4a221SBruce Richardson 	sw->sched_quanta = sched_quanta;
852aaa4a221SBruce Richardson 
853a599eb31SHarry van Haaren 	/* register service with EAL */
854a599eb31SHarry van Haaren 	struct rte_service_spec service;
855a599eb31SHarry van Haaren 	memset(&service, 0, sizeof(struct rte_service_spec));
856a599eb31SHarry van Haaren 	snprintf(service.name, sizeof(service.name), "%s_service", name);
857a599eb31SHarry van Haaren 	snprintf(sw->service_name, sizeof(sw->service_name), "%s_service",
858a599eb31SHarry van Haaren 			name);
859a599eb31SHarry van Haaren 	service.socket_id = socket_id;
860a599eb31SHarry van Haaren 	service.callback = sw_sched_service_func;
861a599eb31SHarry van Haaren 	service.callback_userdata = (void *)dev;
862a599eb31SHarry van Haaren 
863a894d481SHarry van Haaren 	int32_t ret = rte_service_component_register(&service, &sw->service_id);
864a599eb31SHarry van Haaren 	if (ret) {
865a599eb31SHarry van Haaren 		SW_LOG_ERR("service register() failed");
866a599eb31SHarry van Haaren 		return -ENOEXEC;
867a599eb31SHarry van Haaren 	}
868a599eb31SHarry van Haaren 
8694c2fd979SPavan Nikhilesh 	dev->data->service_inited = 1;
8704c2fd979SPavan Nikhilesh 	dev->data->service_id = sw->service_id;
8714c2fd979SPavan Nikhilesh 
872aaa4a221SBruce Richardson 	return 0;
873aaa4a221SBruce Richardson }
874aaa4a221SBruce Richardson 
875aaa4a221SBruce Richardson static int
8765d2aa461SJan Blunck sw_remove(struct rte_vdev_device *vdev)
877aaa4a221SBruce Richardson {
8785d2aa461SJan Blunck 	const char *name;
8795d2aa461SJan Blunck 
8805d2aa461SJan Blunck 	name = rte_vdev_device_name(vdev);
881aaa4a221SBruce Richardson 	if (name == NULL)
882aaa4a221SBruce Richardson 		return -EINVAL;
883aaa4a221SBruce Richardson 
884aaa4a221SBruce Richardson 	SW_LOG_INFO("Closing eventdev sw device %s\n", name);
885aaa4a221SBruce Richardson 
886aaa4a221SBruce Richardson 	return rte_event_pmd_vdev_uninit(name);
887aaa4a221SBruce Richardson }
888aaa4a221SBruce Richardson 
889aaa4a221SBruce Richardson static struct rte_vdev_driver evdev_sw_pmd_drv = {
890aaa4a221SBruce Richardson 	.probe = sw_probe,
891aaa4a221SBruce Richardson 	.remove = sw_remove
892aaa4a221SBruce Richardson };
893aaa4a221SBruce Richardson 
894aaa4a221SBruce Richardson RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
895aaa4a221SBruce Richardson RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
896aaa4a221SBruce Richardson 		SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");
897