xref: /dpdk/drivers/event/sw/sw_evdev.c (revision 88ca872150d0b61b4e6ffcb96f5cecc9e781adb5)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2016-2017 Intel Corporation
3aaa4a221SBruce Richardson  */
4aaa4a221SBruce Richardson 
5b1b3d9f9SJerin Jacob #include <inttypes.h>
672b452c5SDmitry Kozlyuk #include <stdlib.h>
7aaa4a221SBruce Richardson #include <string.h>
8aaa4a221SBruce Richardson 
94851ef2bSDavid Marchand #include <bus_vdev_driver.h>
10aaa4a221SBruce Richardson #include <rte_kvargs.h>
11aaa4a221SBruce Richardson #include <rte_ring.h>
12371a688fSBruce Richardson #include <rte_errno.h>
1386aed50aSBruce Richardson #include <rte_event_ring.h>
14a599eb31SHarry van Haaren #include <rte_service_component.h>
15aaa4a221SBruce Richardson 
16aaa4a221SBruce Richardson #include "sw_evdev.h"
17dca926caSGage Eads #include "iq_chunk.h"
1870207f35SRadu Nicolau #include "event_ring.h"
19aaa4a221SBruce Richardson 
20aaa4a221SBruce Richardson #define EVENTDEV_NAME_SW_PMD event_sw
21aaa4a221SBruce Richardson #define NUMA_NODE_ARG "numa_node"
22aaa4a221SBruce Richardson #define SCHED_QUANTA_ARG "sched_quanta"
23aaa4a221SBruce Richardson #define CREDIT_QUANTA_ARG "credit_quanta"
2470207f35SRadu Nicolau #define MIN_BURST_SIZE_ARG "min_burst"
2570207f35SRadu Nicolau #define DEQ_BURST_SIZE_ARG "deq_burst"
2670207f35SRadu Nicolau #define REFIL_ONCE_ARG "refill_once"
27aaa4a221SBruce Richardson 
2898dc055fSBruce Richardson static void
2998dc055fSBruce Richardson sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
3098dc055fSBruce Richardson 
3198dc055fSBruce Richardson static int
32371a688fSBruce Richardson sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
33371a688fSBruce Richardson 		const uint8_t priorities[], uint16_t num)
34371a688fSBruce Richardson {
35371a688fSBruce Richardson 	struct sw_port *p = port;
36371a688fSBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
37371a688fSBruce Richardson 	int i;
38371a688fSBruce Richardson 
39371a688fSBruce Richardson 	RTE_SET_USED(priorities);
40371a688fSBruce Richardson 	for (i = 0; i < num; i++) {
41371a688fSBruce Richardson 		struct sw_qid *q = &sw->qids[queues[i]];
42e1f2dcdbSGage Eads 		unsigned int j;
43371a688fSBruce Richardson 
44371a688fSBruce Richardson 		/* check for qid map overflow */
45371a688fSBruce Richardson 		if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
460e213244SDilshod Urazov 			rte_errno = EDQUOT;
47371a688fSBruce Richardson 			break;
48371a688fSBruce Richardson 		}
49371a688fSBruce Richardson 
50371a688fSBruce Richardson 		if (p->is_directed && p->num_qids_mapped > 0) {
510e213244SDilshod Urazov 			rte_errno = EDQUOT;
52371a688fSBruce Richardson 			break;
53371a688fSBruce Richardson 		}
54371a688fSBruce Richardson 
55e1f2dcdbSGage Eads 		for (j = 0; j < q->cq_num_mapped_cqs; j++) {
56e1f2dcdbSGage Eads 			if (q->cq_map[j] == p->id)
57e1f2dcdbSGage Eads 				break;
58e1f2dcdbSGage Eads 		}
59e1f2dcdbSGage Eads 
60e1f2dcdbSGage Eads 		/* check if port is already linked */
61e1f2dcdbSGage Eads 		if (j < q->cq_num_mapped_cqs)
62e1f2dcdbSGage Eads 			continue;
63e1f2dcdbSGage Eads 
64371a688fSBruce Richardson 		if (q->type == SW_SCHED_TYPE_DIRECT) {
65371a688fSBruce Richardson 			/* check directed qids only map to one port */
66371a688fSBruce Richardson 			if (p->num_qids_mapped > 0) {
670e213244SDilshod Urazov 				rte_errno = EDQUOT;
68371a688fSBruce Richardson 				break;
69371a688fSBruce Richardson 			}
70371a688fSBruce Richardson 			/* check port only takes a directed flow */
71371a688fSBruce Richardson 			if (num > 1) {
720e213244SDilshod Urazov 				rte_errno = EDQUOT;
73371a688fSBruce Richardson 				break;
74371a688fSBruce Richardson 			}
75371a688fSBruce Richardson 
76371a688fSBruce Richardson 			p->is_directed = 1;
77371a688fSBruce Richardson 			p->num_qids_mapped = 1;
78371a688fSBruce Richardson 		} else if (q->type == RTE_SCHED_TYPE_ORDERED) {
79371a688fSBruce Richardson 			p->num_ordered_qids++;
80371a688fSBruce Richardson 			p->num_qids_mapped++;
816da10cf0SHarry van Haaren 		} else if (q->type == RTE_SCHED_TYPE_ATOMIC ||
826da10cf0SHarry van Haaren 				q->type == RTE_SCHED_TYPE_PARALLEL) {
83371a688fSBruce Richardson 			p->num_qids_mapped++;
84371a688fSBruce Richardson 		}
85371a688fSBruce Richardson 
86371a688fSBruce Richardson 		q->cq_map[q->cq_num_mapped_cqs] = p->id;
87371a688fSBruce Richardson 		rte_smp_wmb();
88371a688fSBruce Richardson 		q->cq_num_mapped_cqs++;
89371a688fSBruce Richardson 	}
90371a688fSBruce Richardson 	return i;
91371a688fSBruce Richardson }
92371a688fSBruce Richardson 
93371a688fSBruce Richardson static int
94371a688fSBruce Richardson sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
95371a688fSBruce Richardson 		uint16_t nb_unlinks)
96371a688fSBruce Richardson {
97371a688fSBruce Richardson 	struct sw_port *p = port;
98371a688fSBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
99371a688fSBruce Richardson 	unsigned int i, j;
100371a688fSBruce Richardson 
101371a688fSBruce Richardson 	int unlinked = 0;
102371a688fSBruce Richardson 	for (i = 0; i < nb_unlinks; i++) {
103371a688fSBruce Richardson 		struct sw_qid *q = &sw->qids[queues[i]];
104371a688fSBruce Richardson 		for (j = 0; j < q->cq_num_mapped_cqs; j++) {
105371a688fSBruce Richardson 			if (q->cq_map[j] == p->id) {
106371a688fSBruce Richardson 				q->cq_map[j] =
107371a688fSBruce Richardson 					q->cq_map[q->cq_num_mapped_cqs - 1];
108371a688fSBruce Richardson 				rte_smp_wmb();
109371a688fSBruce Richardson 				q->cq_num_mapped_cqs--;
110371a688fSBruce Richardson 				unlinked++;
111371a688fSBruce Richardson 
112371a688fSBruce Richardson 				p->num_qids_mapped--;
113371a688fSBruce Richardson 
114371a688fSBruce Richardson 				if (q->type == RTE_SCHED_TYPE_ORDERED)
115371a688fSBruce Richardson 					p->num_ordered_qids--;
116371a688fSBruce Richardson 
117371a688fSBruce Richardson 				continue;
118371a688fSBruce Richardson 			}
119371a688fSBruce Richardson 		}
120371a688fSBruce Richardson 	}
121bd5ac24fSHarry van Haaren 
122bd5ac24fSHarry van Haaren 	p->unlinks_in_progress += unlinked;
123bd5ac24fSHarry van Haaren 	rte_smp_mb();
124bd5ac24fSHarry van Haaren 
125371a688fSBruce Richardson 	return unlinked;
126371a688fSBruce Richardson }
127371a688fSBruce Richardson 
128371a688fSBruce Richardson static int
129bd5ac24fSHarry van Haaren sw_port_unlinks_in_progress(struct rte_eventdev *dev, void *port)
130bd5ac24fSHarry van Haaren {
131bd5ac24fSHarry van Haaren 	RTE_SET_USED(dev);
132bd5ac24fSHarry van Haaren 	struct sw_port *p = port;
133bd5ac24fSHarry van Haaren 	return p->unlinks_in_progress;
134bd5ac24fSHarry van Haaren }
135bd5ac24fSHarry van Haaren 
136bd5ac24fSHarry van Haaren static int
13798dc055fSBruce Richardson sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
13898dc055fSBruce Richardson 		const struct rte_event_port_conf *conf)
13998dc055fSBruce Richardson {
14098dc055fSBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
14198dc055fSBruce Richardson 	struct sw_port *p = &sw->ports[port_id];
14286aed50aSBruce Richardson 	char buf[RTE_RING_NAMESIZE];
14398dc055fSBruce Richardson 	unsigned int i;
14498dc055fSBruce Richardson 
14598dc055fSBruce Richardson 	struct rte_event_dev_info info;
14698dc055fSBruce Richardson 	sw_info_get(dev, &info);
14798dc055fSBruce Richardson 
14898dc055fSBruce Richardson 	/* detect re-configuring and return credits to instance if needed */
14998dc055fSBruce Richardson 	if (p->initialized) {
15098dc055fSBruce Richardson 		/* taking credits from pool is done one quanta at a time, and
15198dc055fSBruce Richardson 		 * credits may be spend (counted in p->inflights) or still
15298dc055fSBruce Richardson 		 * available in the port (p->inflight_credits). We must return
15398dc055fSBruce Richardson 		 * the sum to no leak credits
15498dc055fSBruce Richardson 		 */
15598dc055fSBruce Richardson 		int possible_inflights = p->inflight_credits + p->inflights;
15698dc055fSBruce Richardson 		rte_atomic32_sub(&sw->inflights, possible_inflights);
15798dc055fSBruce Richardson 	}
15898dc055fSBruce Richardson 
15998dc055fSBruce Richardson 	*p = (struct sw_port){0}; /* zero entire structure */
16098dc055fSBruce Richardson 	p->id = port_id;
16198dc055fSBruce Richardson 	p->sw = sw;
16298dc055fSBruce Richardson 
16386aed50aSBruce Richardson 	/* check to see if rings exists - port_setup() can be called multiple
16486aed50aSBruce Richardson 	 * times legally (assuming device is stopped). If ring exists, free it
16586aed50aSBruce Richardson 	 * to so it gets re-created with the correct size
16686aed50aSBruce Richardson 	 */
16786aed50aSBruce Richardson 	snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
16886aed50aSBruce Richardson 			port_id, "rx_worker_ring");
16986aed50aSBruce Richardson 	struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf);
17086aed50aSBruce Richardson 	rte_event_ring_free(existing_ring);
17186aed50aSBruce Richardson 
17286aed50aSBruce Richardson 	p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
17386aed50aSBruce Richardson 			dev->data->socket_id,
17486aed50aSBruce Richardson 			RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
17598dc055fSBruce Richardson 	if (p->rx_worker_ring == NULL) {
176*f665790aSDavid Marchand 		SW_LOG_ERR("Error creating RX worker ring for port %d",
17798dc055fSBruce Richardson 				port_id);
17898dc055fSBruce Richardson 		return -1;
17998dc055fSBruce Richardson 	}
18098dc055fSBruce Richardson 
18198dc055fSBruce Richardson 	p->inflight_max = conf->new_event_threshold;
18275d11313STimothy McDaniel 	p->implicit_release = !(conf->event_port_cfg &
18375d11313STimothy McDaniel 				RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
18498dc055fSBruce Richardson 
18586aed50aSBruce Richardson 	/* check if ring exists, same as rx_worker above */
18686aed50aSBruce Richardson 	snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
18786aed50aSBruce Richardson 			port_id, "cq_worker_ring");
18886aed50aSBruce Richardson 	existing_ring = rte_event_ring_lookup(buf);
18986aed50aSBruce Richardson 	rte_event_ring_free(existing_ring);
19086aed50aSBruce Richardson 
19186aed50aSBruce Richardson 	p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
19286aed50aSBruce Richardson 			dev->data->socket_id,
19386aed50aSBruce Richardson 			RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
19498dc055fSBruce Richardson 	if (p->cq_worker_ring == NULL) {
19586aed50aSBruce Richardson 		rte_event_ring_free(p->rx_worker_ring);
196*f665790aSDavid Marchand 		SW_LOG_ERR("Error creating CQ worker ring for port %d",
19798dc055fSBruce Richardson 				port_id);
19898dc055fSBruce Richardson 		return -1;
19998dc055fSBruce Richardson 	}
20098dc055fSBruce Richardson 	sw->cq_ring_space[port_id] = conf->dequeue_depth;
20198dc055fSBruce Richardson 
20298dc055fSBruce Richardson 	/* set hist list contents to empty */
20398dc055fSBruce Richardson 	for (i = 0; i < SW_PORT_HIST_LIST; i++) {
20498dc055fSBruce Richardson 		p->hist_list[i].fid = -1;
20598dc055fSBruce Richardson 		p->hist_list[i].qid = -1;
20698dc055fSBruce Richardson 	}
20798dc055fSBruce Richardson 	dev->data->ports[port_id] = p;
20898dc055fSBruce Richardson 
20998dc055fSBruce Richardson 	rte_smp_wmb();
21098dc055fSBruce Richardson 	p->initialized = 1;
21198dc055fSBruce Richardson 	return 0;
21298dc055fSBruce Richardson }
21398dc055fSBruce Richardson 
21498dc055fSBruce Richardson static void
21598dc055fSBruce Richardson sw_port_release(void *port)
21698dc055fSBruce Richardson {
21798dc055fSBruce Richardson 	struct sw_port *p = (void *)port;
21898dc055fSBruce Richardson 	if (p == NULL)
21998dc055fSBruce Richardson 		return;
22098dc055fSBruce Richardson 
22186aed50aSBruce Richardson 	rte_event_ring_free(p->rx_worker_ring);
22286aed50aSBruce Richardson 	rte_event_ring_free(p->cq_worker_ring);
22398dc055fSBruce Richardson 	memset(p, 0, sizeof(*p));
22498dc055fSBruce Richardson }
22598dc055fSBruce Richardson 
2265ffb2f14SBruce Richardson static int32_t
2275ffb2f14SBruce Richardson qid_init(struct sw_evdev *sw, unsigned int idx, int type,
2285ffb2f14SBruce Richardson 		const struct rte_event_queue_conf *queue_conf)
2295ffb2f14SBruce Richardson {
2305ffb2f14SBruce Richardson 	unsigned int i;
2315ffb2f14SBruce Richardson 	int socket_id = sw->data->socket_id;
2325ffb2f14SBruce Richardson 	struct sw_qid *qid = &sw->qids[idx];
2335ffb2f14SBruce Richardson 
2345ffb2f14SBruce Richardson 	/* Initialize the FID structures to no pinning (-1), and zero packets */
2355ffb2f14SBruce Richardson 	const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
2365ffb2f14SBruce Richardson 	for (i = 0; i < RTE_DIM(qid->fids); i++)
2375ffb2f14SBruce Richardson 		qid->fids[i] = fid;
2385ffb2f14SBruce Richardson 
2395ffb2f14SBruce Richardson 	qid->id = idx;
2405ffb2f14SBruce Richardson 	qid->type = type;
2415ffb2f14SBruce Richardson 	qid->priority = queue_conf->priority;
2425ffb2f14SBruce Richardson 
2435ffb2f14SBruce Richardson 	if (qid->type == RTE_SCHED_TYPE_ORDERED) {
2445ffb2f14SBruce Richardson 		uint32_t window_size;
2455ffb2f14SBruce Richardson 
24623f3dac4SStephen Hemminger 		/* rte_ring and window_size_mask require window_size to
2475ffb2f14SBruce Richardson 		 * be a power-of-2.
2485ffb2f14SBruce Richardson 		 */
2495ffb2f14SBruce Richardson 		window_size = rte_align32pow2(
2505ffb2f14SBruce Richardson 				queue_conf->nb_atomic_order_sequences);
2515ffb2f14SBruce Richardson 
2525ffb2f14SBruce Richardson 		qid->window_size = window_size - 1;
2535ffb2f14SBruce Richardson 
2545ffb2f14SBruce Richardson 		if (!window_size) {
2555ffb2f14SBruce Richardson 			SW_LOG_DBG(
256*f665790aSDavid Marchand 				"invalid reorder_window_size for ordered queue"
2575ffb2f14SBruce Richardson 				);
2585ffb2f14SBruce Richardson 			goto cleanup;
2595ffb2f14SBruce Richardson 		}
2605ffb2f14SBruce Richardson 
261510e2b65SStephen Hemminger 		qid->reorder_buffer = rte_zmalloc_socket(NULL,
2625ffb2f14SBruce Richardson 				window_size * sizeof(qid->reorder_buffer[0]),
2635ffb2f14SBruce Richardson 				0, socket_id);
2645ffb2f14SBruce Richardson 		if (!qid->reorder_buffer) {
265*f665790aSDavid Marchand 			SW_LOG_DBG("reorder_buffer malloc failed");
2665ffb2f14SBruce Richardson 			goto cleanup;
2675ffb2f14SBruce Richardson 		}
2685ffb2f14SBruce Richardson 
2695ffb2f14SBruce Richardson 		memset(&qid->reorder_buffer[0],
2705ffb2f14SBruce Richardson 		       0,
2715ffb2f14SBruce Richardson 		       window_size * sizeof(qid->reorder_buffer[0]));
2725ffb2f14SBruce Richardson 
27370207f35SRadu Nicolau 		qid->reorder_buffer_freelist = rob_ring_create(window_size,
27470207f35SRadu Nicolau 				socket_id);
2755ffb2f14SBruce Richardson 		if (!qid->reorder_buffer_freelist) {
2765ffb2f14SBruce Richardson 			SW_LOG_DBG("freelist ring create failed");
2775ffb2f14SBruce Richardson 			goto cleanup;
2785ffb2f14SBruce Richardson 		}
2795ffb2f14SBruce Richardson 
2805ffb2f14SBruce Richardson 		/* Populate the freelist with reorder buffer entries. Enqueue
2815ffb2f14SBruce Richardson 		 * 'window_size - 1' entries because the rte_ring holds only
2825ffb2f14SBruce Richardson 		 * that many.
2835ffb2f14SBruce Richardson 		 */
2845ffb2f14SBruce Richardson 		for (i = 0; i < window_size - 1; i++) {
28570207f35SRadu Nicolau 			if (rob_ring_enqueue(qid->reorder_buffer_freelist,
28670207f35SRadu Nicolau 						&qid->reorder_buffer[i]) != 1)
2875ffb2f14SBruce Richardson 				goto cleanup;
2885ffb2f14SBruce Richardson 		}
2895ffb2f14SBruce Richardson 
2905ffb2f14SBruce Richardson 		qid->reorder_buffer_index = 0;
2915ffb2f14SBruce Richardson 		qid->cq_next_tx = 0;
2925ffb2f14SBruce Richardson 	}
2935ffb2f14SBruce Richardson 
2945ffb2f14SBruce Richardson 	qid->initialized = 1;
2955ffb2f14SBruce Richardson 
2965ffb2f14SBruce Richardson 	return 0;
2975ffb2f14SBruce Richardson 
2985ffb2f14SBruce Richardson cleanup:
2995ffb2f14SBruce Richardson 	if (qid->reorder_buffer) {
3005ffb2f14SBruce Richardson 		rte_free(qid->reorder_buffer);
3015ffb2f14SBruce Richardson 		qid->reorder_buffer = NULL;
3025ffb2f14SBruce Richardson 	}
3035ffb2f14SBruce Richardson 
3045ffb2f14SBruce Richardson 	if (qid->reorder_buffer_freelist) {
30570207f35SRadu Nicolau 		rob_ring_free(qid->reorder_buffer_freelist);
3065ffb2f14SBruce Richardson 		qid->reorder_buffer_freelist = NULL;
3075ffb2f14SBruce Richardson 	}
3085ffb2f14SBruce Richardson 
3095ffb2f14SBruce Richardson 	return -EINVAL;
3105ffb2f14SBruce Richardson }
3115ffb2f14SBruce Richardson 
312e1f2dcdbSGage Eads static void
313e1f2dcdbSGage Eads sw_queue_release(struct rte_eventdev *dev, uint8_t id)
314e1f2dcdbSGage Eads {
315e1f2dcdbSGage Eads 	struct sw_evdev *sw = sw_pmd_priv(dev);
316e1f2dcdbSGage Eads 	struct sw_qid *qid = &sw->qids[id];
317e1f2dcdbSGage Eads 
318e1f2dcdbSGage Eads 	if (qid->type == RTE_SCHED_TYPE_ORDERED) {
319e1f2dcdbSGage Eads 		rte_free(qid->reorder_buffer);
32070207f35SRadu Nicolau 		rob_ring_free(qid->reorder_buffer_freelist);
321e1f2dcdbSGage Eads 	}
322e1f2dcdbSGage Eads 	memset(qid, 0, sizeof(*qid));
323e1f2dcdbSGage Eads }
324e1f2dcdbSGage Eads 
3255ffb2f14SBruce Richardson static int
3265ffb2f14SBruce Richardson sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
3275ffb2f14SBruce Richardson 		const struct rte_event_queue_conf *conf)
3285ffb2f14SBruce Richardson {
3295ffb2f14SBruce Richardson 	int type;
3305ffb2f14SBruce Richardson 
33113370a38SPavan Nikhilesh 	type = conf->schedule_type;
33213370a38SPavan Nikhilesh 
3335ffb2f14SBruce Richardson 	if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
3345ffb2f14SBruce Richardson 		type = SW_SCHED_TYPE_DIRECT;
33513370a38SPavan Nikhilesh 	} else if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
33613370a38SPavan Nikhilesh 			& conf->event_queue_cfg) {
337*f665790aSDavid Marchand 		SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported");
3385ffb2f14SBruce Richardson 		return -ENOTSUP;
3395ffb2f14SBruce Richardson 	}
3405ffb2f14SBruce Richardson 
3415ffb2f14SBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
342e1f2dcdbSGage Eads 
343e1f2dcdbSGage Eads 	if (sw->qids[queue_id].initialized)
344e1f2dcdbSGage Eads 		sw_queue_release(dev, queue_id);
345e1f2dcdbSGage Eads 
3465ffb2f14SBruce Richardson 	return qid_init(sw, queue_id, type, conf);
3475ffb2f14SBruce Richardson }
3485ffb2f14SBruce Richardson 
3495ffb2f14SBruce Richardson static void
35045219005SGage Eads sw_init_qid_iqs(struct sw_evdev *sw)
35145219005SGage Eads {
35245219005SGage Eads 	int i, j;
35345219005SGage Eads 
35445219005SGage Eads 	/* Initialize the IQ memory of all configured qids */
35545219005SGage Eads 	for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
35645219005SGage Eads 		struct sw_qid *qid = &sw->qids[i];
35745219005SGage Eads 
35845219005SGage Eads 		if (!qid->initialized)
35945219005SGage Eads 			continue;
36045219005SGage Eads 
36145219005SGage Eads 		for (j = 0; j < SW_IQS_MAX; j++)
36245219005SGage Eads 			iq_init(sw, &qid->iq[j]);
36345219005SGage Eads 	}
36445219005SGage Eads }
36545219005SGage Eads 
3668490488aSGage Eads static int
3678490488aSGage Eads sw_qids_empty(struct sw_evdev *sw)
36845219005SGage Eads {
3698490488aSGage Eads 	unsigned int i, j;
3708490488aSGage Eads 
3718490488aSGage Eads 	for (i = 0; i < sw->qid_count; i++) {
3728490488aSGage Eads 		for (j = 0; j < SW_IQS_MAX; j++) {
3738490488aSGage Eads 			if (iq_count(&sw->qids[i].iq[j]))
3748490488aSGage Eads 				return 0;
3758490488aSGage Eads 		}
3768490488aSGage Eads 	}
3778490488aSGage Eads 
3788490488aSGage Eads 	return 1;
3798490488aSGage Eads }
3808490488aSGage Eads 
3818490488aSGage Eads static int
3828490488aSGage Eads sw_ports_empty(struct sw_evdev *sw)
3838490488aSGage Eads {
3848490488aSGage Eads 	unsigned int i;
3858490488aSGage Eads 
3868490488aSGage Eads 	for (i = 0; i < sw->port_count; i++) {
3878490488aSGage Eads 		if ((rte_event_ring_count(sw->ports[i].rx_worker_ring)) ||
3888490488aSGage Eads 		     rte_event_ring_count(sw->ports[i].cq_worker_ring))
3898490488aSGage Eads 			return 0;
3908490488aSGage Eads 	}
3918490488aSGage Eads 
3928490488aSGage Eads 	return 1;
3938490488aSGage Eads }
3948490488aSGage Eads 
3958490488aSGage Eads static void
3968490488aSGage Eads sw_drain_ports(struct rte_eventdev *dev)
3978490488aSGage Eads {
3988490488aSGage Eads 	struct sw_evdev *sw = sw_pmd_priv(dev);
3998490488aSGage Eads 	eventdev_stop_flush_t flush;
4008490488aSGage Eads 	unsigned int i;
4018490488aSGage Eads 	uint8_t dev_id;
4028490488aSGage Eads 	void *arg;
4038490488aSGage Eads 
4048490488aSGage Eads 	flush = dev->dev_ops->dev_stop_flush;
4058490488aSGage Eads 	dev_id = dev->data->dev_id;
4068490488aSGage Eads 	arg = dev->data->dev_stop_flush_arg;
4078490488aSGage Eads 
4088490488aSGage Eads 	for (i = 0; i < sw->port_count; i++) {
4098490488aSGage Eads 		struct rte_event ev;
4108490488aSGage Eads 
4118490488aSGage Eads 		while (rte_event_dequeue_burst(dev_id, i, &ev, 1, 0)) {
4128490488aSGage Eads 			if (flush)
4138490488aSGage Eads 				flush(dev_id, ev, arg);
4148490488aSGage Eads 
4158490488aSGage Eads 			ev.op = RTE_EVENT_OP_RELEASE;
4168490488aSGage Eads 			rte_event_enqueue_burst(dev_id, i, &ev, 1);
4178490488aSGage Eads 		}
4188490488aSGage Eads 	}
4198490488aSGage Eads }
4208490488aSGage Eads 
4218490488aSGage Eads static void
4228490488aSGage Eads sw_drain_queue(struct rte_eventdev *dev, struct sw_iq *iq)
4238490488aSGage Eads {
4248490488aSGage Eads 	struct sw_evdev *sw = sw_pmd_priv(dev);
4258490488aSGage Eads 	eventdev_stop_flush_t flush;
4268490488aSGage Eads 	uint8_t dev_id;
4278490488aSGage Eads 	void *arg;
4288490488aSGage Eads 
4298490488aSGage Eads 	flush = dev->dev_ops->dev_stop_flush;
4308490488aSGage Eads 	dev_id = dev->data->dev_id;
4318490488aSGage Eads 	arg = dev->data->dev_stop_flush_arg;
4328490488aSGage Eads 
4338490488aSGage Eads 	while (iq_count(iq) > 0) {
4348490488aSGage Eads 		struct rte_event ev;
4358490488aSGage Eads 
4368490488aSGage Eads 		iq_dequeue_burst(sw, iq, &ev, 1);
4378490488aSGage Eads 
4388490488aSGage Eads 		if (flush)
4398490488aSGage Eads 			flush(dev_id, ev, arg);
4408490488aSGage Eads 	}
4418490488aSGage Eads }
4428490488aSGage Eads 
4438490488aSGage Eads static void
4448490488aSGage Eads sw_drain_queues(struct rte_eventdev *dev)
4458490488aSGage Eads {
4468490488aSGage Eads 	struct sw_evdev *sw = sw_pmd_priv(dev);
4478490488aSGage Eads 	unsigned int i, j;
4488490488aSGage Eads 
4498490488aSGage Eads 	for (i = 0; i < sw->qid_count; i++) {
4508490488aSGage Eads 		for (j = 0; j < SW_IQS_MAX; j++)
4518490488aSGage Eads 			sw_drain_queue(dev, &sw->qids[i].iq[j]);
4528490488aSGage Eads 	}
4538490488aSGage Eads }
4548490488aSGage Eads 
4558490488aSGage Eads static void
4568490488aSGage Eads sw_clean_qid_iqs(struct rte_eventdev *dev)
4578490488aSGage Eads {
4588490488aSGage Eads 	struct sw_evdev *sw = sw_pmd_priv(dev);
45945219005SGage Eads 	int i, j;
46045219005SGage Eads 
46145219005SGage Eads 	/* Release the IQ memory of all configured qids */
46245219005SGage Eads 	for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
46345219005SGage Eads 		struct sw_qid *qid = &sw->qids[i];
46445219005SGage Eads 
46545219005SGage Eads 		for (j = 0; j < SW_IQS_MAX; j++) {
46645219005SGage Eads 			if (!qid->iq[j].head)
46745219005SGage Eads 				continue;
46845219005SGage Eads 			iq_free_chunk_list(sw, qid->iq[j].head);
46945219005SGage Eads 			qid->iq[j].head = NULL;
47045219005SGage Eads 		}
47145219005SGage Eads 	}
47245219005SGage Eads }
47345219005SGage Eads 
47445219005SGage Eads static void
4751a3a4531SBruce Richardson sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
4761a3a4531SBruce Richardson 				 struct rte_event_queue_conf *conf)
4771a3a4531SBruce Richardson {
4781a3a4531SBruce Richardson 	RTE_SET_USED(dev);
4791a3a4531SBruce Richardson 	RTE_SET_USED(queue_id);
4801a3a4531SBruce Richardson 
4811a3a4531SBruce Richardson 	static const struct rte_event_queue_conf default_conf = {
4821a3a4531SBruce Richardson 		.nb_atomic_flows = 4096,
4831a3a4531SBruce Richardson 		.nb_atomic_order_sequences = 1,
48413370a38SPavan Nikhilesh 		.schedule_type = RTE_SCHED_TYPE_ATOMIC,
4851a3a4531SBruce Richardson 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
4861a3a4531SBruce Richardson 	};
4871a3a4531SBruce Richardson 
4881a3a4531SBruce Richardson 	*conf = default_conf;
4891a3a4531SBruce Richardson }
4901a3a4531SBruce Richardson 
4911a3a4531SBruce Richardson static void
4921a3a4531SBruce Richardson sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
4931a3a4531SBruce Richardson 		 struct rte_event_port_conf *port_conf)
4941a3a4531SBruce Richardson {
4951a3a4531SBruce Richardson 	RTE_SET_USED(dev);
4961a3a4531SBruce Richardson 	RTE_SET_USED(port_id);
4971a3a4531SBruce Richardson 
4981a3a4531SBruce Richardson 	port_conf->new_event_threshold = 1024;
4991a3a4531SBruce Richardson 	port_conf->dequeue_depth = 16;
5001a3a4531SBruce Richardson 	port_conf->enqueue_depth = 16;
50175d11313STimothy McDaniel 	port_conf->event_port_cfg = 0;
5021a3a4531SBruce Richardson }
5031a3a4531SBruce Richardson 
5041c6c0e4cSBruce Richardson static int
5051c6c0e4cSBruce Richardson sw_dev_configure(const struct rte_eventdev *dev)
5061c6c0e4cSBruce Richardson {
5071c6c0e4cSBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
5081c6c0e4cSBruce Richardson 	const struct rte_eventdev_data *data = dev->data;
5091c6c0e4cSBruce Richardson 	const struct rte_event_dev_config *conf = &data->dev_conf;
510dca926caSGage Eads 	int num_chunks, i;
5111c6c0e4cSBruce Richardson 
5121c6c0e4cSBruce Richardson 	sw->qid_count = conf->nb_event_queues;
5131c6c0e4cSBruce Richardson 	sw->port_count = conf->nb_event_ports;
5141c6c0e4cSBruce Richardson 	sw->nb_events_limit = conf->nb_events_limit;
515656af918SBruce Richardson 	rte_atomic32_set(&sw->inflights, 0);
5161c6c0e4cSBruce Richardson 
517dca926caSGage Eads 	/* Number of chunks sized for worst-case spread of events across IQs */
518dca926caSGage Eads 	num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) +
519dca926caSGage Eads 			sw->qid_count*SW_IQS_MAX*2;
520dca926caSGage Eads 
52145219005SGage Eads 	/* If this is a reconfiguration, free the previous IQ allocation. All
52245219005SGage Eads 	 * IQ chunk references were cleaned out of the QIDs in sw_stop(), and
52345219005SGage Eads 	 * will be reinitialized in sw_start().
52445219005SGage Eads 	 */
525dca926caSGage Eads 	rte_free(sw->chunks);
526dca926caSGage Eads 
527dca926caSGage Eads 	sw->chunks = rte_malloc_socket(NULL,
528dca926caSGage Eads 				       sizeof(struct sw_queue_chunk) *
529dca926caSGage Eads 				       num_chunks,
530dca926caSGage Eads 				       0,
531dca926caSGage Eads 				       sw->data->socket_id);
532dca926caSGage Eads 	if (!sw->chunks)
533dca926caSGage Eads 		return -ENOMEM;
534dca926caSGage Eads 
535dca926caSGage Eads 	sw->chunk_list_head = NULL;
536dca926caSGage Eads 	for (i = 0; i < num_chunks; i++)
537dca926caSGage Eads 		iq_free_chunk(sw, &sw->chunks[i]);
538dca926caSGage Eads 
5391c6c0e4cSBruce Richardson 	if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
5401c6c0e4cSBruce Richardson 		return -ENOTSUP;
5411c6c0e4cSBruce Richardson 
5421c6c0e4cSBruce Richardson 	return 0;
5431c6c0e4cSBruce Richardson }
5441c6c0e4cSBruce Richardson 
54567255ee9SNikhil Rao struct rte_eth_dev;
54667255ee9SNikhil Rao 
54767255ee9SNikhil Rao static int
54867255ee9SNikhil Rao sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
54967255ee9SNikhil Rao 			const struct rte_eth_dev *eth_dev,
55067255ee9SNikhil Rao 			uint32_t *caps)
55167255ee9SNikhil Rao {
55267255ee9SNikhil Rao 	RTE_SET_USED(dev);
55367255ee9SNikhil Rao 	RTE_SET_USED(eth_dev);
55467255ee9SNikhil Rao 	*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
55567255ee9SNikhil Rao 	return 0;
55667255ee9SNikhil Rao }
55767255ee9SNikhil Rao 
55847d05b29SErik Gabriel Carrillo static int
55953548ad3SPavan Nikhilesh sw_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
56047d05b29SErik Gabriel Carrillo 			  uint32_t *caps,
56153548ad3SPavan Nikhilesh 			  const struct event_timer_adapter_ops **ops)
56247d05b29SErik Gabriel Carrillo {
56347d05b29SErik Gabriel Carrillo 	RTE_SET_USED(dev);
56447d05b29SErik Gabriel Carrillo 	RTE_SET_USED(flags);
5653d9d8adfSNaga Harish K S V 	*caps = RTE_EVENT_TIMER_ADAPTER_SW_CAP;
56647d05b29SErik Gabriel Carrillo 
56747d05b29SErik Gabriel Carrillo 	/* Use default SW ops */
56847d05b29SErik Gabriel Carrillo 	*ops = NULL;
56947d05b29SErik Gabriel Carrillo 
57047d05b29SErik Gabriel Carrillo 	return 0;
57147d05b29SErik Gabriel Carrillo }
57247d05b29SErik Gabriel Carrillo 
5739dc1bd73SAbhinandan Gujjar static int
5749dc1bd73SAbhinandan Gujjar sw_crypto_adapter_caps_get(const struct rte_eventdev *dev,
5759dc1bd73SAbhinandan Gujjar 			   const struct rte_cryptodev *cdev,
5769dc1bd73SAbhinandan Gujjar 			   uint32_t *caps)
5779dc1bd73SAbhinandan Gujjar {
5789dc1bd73SAbhinandan Gujjar 	RTE_SET_USED(dev);
5799dc1bd73SAbhinandan Gujjar 	RTE_SET_USED(cdev);
5809dc1bd73SAbhinandan Gujjar 	*caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
5819dc1bd73SAbhinandan Gujjar 	return 0;
5829dc1bd73SAbhinandan Gujjar }
5839dc1bd73SAbhinandan Gujjar 
584b88e2b73SBruce Richardson static void
585b88e2b73SBruce Richardson sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
586b88e2b73SBruce Richardson {
587b88e2b73SBruce Richardson 	RTE_SET_USED(dev);
588b88e2b73SBruce Richardson 
589b88e2b73SBruce Richardson 	static const struct rte_event_dev_info evdev_sw_info = {
590b88e2b73SBruce Richardson 			.driver_name = SW_PMD_NAME,
591b88e2b73SBruce Richardson 			.max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
592b88e2b73SBruce Richardson 			.max_event_queue_flows = SW_QID_NUM_FIDS,
593b88e2b73SBruce Richardson 			.max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
594b88e2b73SBruce Richardson 			.max_event_priority_levels = SW_IQS_MAX,
595b88e2b73SBruce Richardson 			.max_event_ports = SW_PORTS_MAX,
596b88e2b73SBruce Richardson 			.max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
597b88e2b73SBruce Richardson 			.max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
598b88e2b73SBruce Richardson 			.max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
599ec36d881SGage Eads 			.event_dev_cap = (
600de799b2fSBruce Richardson 				RTE_EVENT_DEV_CAP_ATOMIC |
601de799b2fSBruce Richardson 				RTE_EVENT_DEV_CAP_ORDERED |
602de799b2fSBruce Richardson 				RTE_EVENT_DEV_CAP_PARALLEL |
603ec36d881SGage Eads 				RTE_EVENT_DEV_CAP_QUEUE_QOS |
604315d9f40SJerin Jacob 				RTE_EVENT_DEV_CAP_BURST_MODE |
605ec36d881SGage Eads 				RTE_EVENT_DEV_CAP_EVENT_QOS |
606285aa440SLiang Ma 				RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE|
607285aa440SLiang Ma 				RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
608285aa440SLiang Ma 				RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
60975d11313STimothy McDaniel 				RTE_EVENT_DEV_CAP_NONSEQ_MODE |
610bd991897SMattias Rönnblom 				RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
611bd991897SMattias Rönnblom 				RTE_EVENT_DEV_CAP_MAINTENANCE_FREE),
612d007a7f3SPavan Nikhilesh 			.max_profiles_per_port = 1,
613b88e2b73SBruce Richardson 	};
614b88e2b73SBruce Richardson 
615b88e2b73SBruce Richardson 	*info = evdev_sw_info;
616b88e2b73SBruce Richardson }
617b88e2b73SBruce Richardson 
618c66baa68SBruce Richardson static void
619c66baa68SBruce Richardson sw_dump(struct rte_eventdev *dev, FILE *f)
620c66baa68SBruce Richardson {
621c66baa68SBruce Richardson 	const struct sw_evdev *sw = sw_pmd_priv(dev);
622c66baa68SBruce Richardson 
623c66baa68SBruce Richardson 	static const char * const q_type_strings[] = {
624c66baa68SBruce Richardson 			"Ordered", "Atomic", "Parallel", "Directed"
625c66baa68SBruce Richardson 	};
626c66baa68SBruce Richardson 	uint32_t i;
627ec70113aSStephen Hemminger 	fprintf(f, "EventDev %s: ports %d, qids %d\n",
628ec70113aSStephen Hemminger 		dev->data->name, sw->port_count, sw->qid_count);
629c66baa68SBruce Richardson 
630c66baa68SBruce Richardson 	fprintf(f, "\trx   %"PRIu64"\n\tdrop %"PRIu64"\n\ttx   %"PRIu64"\n",
631c66baa68SBruce Richardson 		sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts);
632c66baa68SBruce Richardson 	fprintf(f, "\tsched calls: %"PRIu64"\n", sw->sched_called);
633c66baa68SBruce Richardson 	fprintf(f, "\tsched cq/qid call: %"PRIu64"\n", sw->sched_cq_qid_called);
634c66baa68SBruce Richardson 	fprintf(f, "\tsched no IQ enq: %"PRIu64"\n", sw->sched_no_iq_enqueues);
635c66baa68SBruce Richardson 	fprintf(f, "\tsched no CQ enq: %"PRIu64"\n", sw->sched_no_cq_enqueues);
636c66baa68SBruce Richardson 	uint32_t inflights = rte_atomic32_read(&sw->inflights);
637c66baa68SBruce Richardson 	uint32_t credits = sw->nb_events_limit - inflights;
638c66baa68SBruce Richardson 	fprintf(f, "\tinflight %d, credits: %d\n", inflights, credits);
639c66baa68SBruce Richardson 
640c66baa68SBruce Richardson #define COL_RED "\x1b[31m"
641c66baa68SBruce Richardson #define COL_RESET "\x1b[0m"
642c66baa68SBruce Richardson 
643c66baa68SBruce Richardson 	for (i = 0; i < sw->port_count; i++) {
644c66baa68SBruce Richardson 		int max, j;
645c66baa68SBruce Richardson 		const struct sw_port *p = &sw->ports[i];
646c66baa68SBruce Richardson 		if (!p->initialized) {
647c66baa68SBruce Richardson 			fprintf(f, "  %sPort %d not initialized.%s\n",
648c66baa68SBruce Richardson 				COL_RED, i, COL_RESET);
649c66baa68SBruce Richardson 			continue;
650c66baa68SBruce Richardson 		}
651c66baa68SBruce Richardson 		fprintf(f, "  Port %d %s\n", i,
652c66baa68SBruce Richardson 			p->is_directed ? " (SingleCons)" : "");
653c66baa68SBruce Richardson 		fprintf(f, "\trx   %"PRIu64"\tdrop %"PRIu64"\ttx   %"PRIu64
654c66baa68SBruce Richardson 			"\t%sinflight %d%s\n", sw->ports[i].stats.rx_pkts,
655c66baa68SBruce Richardson 			sw->ports[i].stats.rx_dropped,
656c66baa68SBruce Richardson 			sw->ports[i].stats.tx_pkts,
657c66baa68SBruce Richardson 			(p->inflights == p->inflight_max) ?
658c66baa68SBruce Richardson 				COL_RED : COL_RESET,
659c66baa68SBruce Richardson 			sw->ports[i].inflights, COL_RESET);
660c66baa68SBruce Richardson 
661c66baa68SBruce Richardson 		fprintf(f, "\tMax New: %u"
662c66baa68SBruce Richardson 			"\tAvg cycles PP: %"PRIu64"\tCredits: %u\n",
663c66baa68SBruce Richardson 			sw->ports[i].inflight_max,
664c66baa68SBruce Richardson 			sw->ports[i].avg_pkt_ticks,
665c66baa68SBruce Richardson 			sw->ports[i].inflight_credits);
666c66baa68SBruce Richardson 		fprintf(f, "\tReceive burst distribution:\n");
667c66baa68SBruce Richardson 		float zp_percent = p->zero_polls * 100.0 / p->total_polls;
668c66baa68SBruce Richardson 		fprintf(f, zp_percent < 10 ? "\t\t0:%.02f%% " : "\t\t0:%.0f%% ",
669c66baa68SBruce Richardson 				zp_percent);
670c66baa68SBruce Richardson 		for (max = (int)RTE_DIM(p->poll_buckets); max-- > 0;)
671c66baa68SBruce Richardson 			if (p->poll_buckets[max] != 0)
672c66baa68SBruce Richardson 				break;
673c66baa68SBruce Richardson 		for (j = 0; j <= max; j++) {
674c66baa68SBruce Richardson 			if (p->poll_buckets[j] != 0) {
675c66baa68SBruce Richardson 				float poll_pc = p->poll_buckets[j] * 100.0 /
676c66baa68SBruce Richardson 					p->total_polls;
677c66baa68SBruce Richardson 				fprintf(f, "%u-%u:%.02f%% ",
678c66baa68SBruce Richardson 					((j << SW_DEQ_STAT_BUCKET_SHIFT) + 1),
679c66baa68SBruce Richardson 					((j+1) << SW_DEQ_STAT_BUCKET_SHIFT),
680c66baa68SBruce Richardson 					poll_pc);
681c66baa68SBruce Richardson 			}
682c66baa68SBruce Richardson 		}
683c66baa68SBruce Richardson 		fprintf(f, "\n");
684c66baa68SBruce Richardson 
685c66baa68SBruce Richardson 		if (p->rx_worker_ring) {
68686aed50aSBruce Richardson 			uint64_t used = rte_event_ring_count(p->rx_worker_ring);
68786aed50aSBruce Richardson 			uint64_t space = rte_event_ring_free_count(
68886aed50aSBruce Richardson 					p->rx_worker_ring);
689c66baa68SBruce Richardson 			const char *col = (space == 0) ? COL_RED : COL_RESET;
690c66baa68SBruce Richardson 			fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
691c66baa68SBruce Richardson 					PRIu64 COL_RESET"\n", col, used, space);
692c66baa68SBruce Richardson 		} else
693c66baa68SBruce Richardson 			fprintf(f, "\trx ring not initialized.\n");
694c66baa68SBruce Richardson 
695c66baa68SBruce Richardson 		if (p->cq_worker_ring) {
69686aed50aSBruce Richardson 			uint64_t used = rte_event_ring_count(p->cq_worker_ring);
69786aed50aSBruce Richardson 			uint64_t space = rte_event_ring_free_count(
69886aed50aSBruce Richardson 					p->cq_worker_ring);
699c66baa68SBruce Richardson 			const char *col = (space == 0) ? COL_RED : COL_RESET;
700c66baa68SBruce Richardson 			fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
701c66baa68SBruce Richardson 					PRIu64 COL_RESET"\n", col, used, space);
702c66baa68SBruce Richardson 		} else
703c66baa68SBruce Richardson 			fprintf(f, "\tcq ring not initialized.\n");
704c66baa68SBruce Richardson 	}
705c66baa68SBruce Richardson 
706c66baa68SBruce Richardson 	for (i = 0; i < sw->qid_count; i++) {
707c66baa68SBruce Richardson 		const struct sw_qid *qid = &sw->qids[i];
708c66baa68SBruce Richardson 		if (!qid->initialized) {
709c66baa68SBruce Richardson 			fprintf(f, "  %sQueue %d not initialized.%s\n",
710c66baa68SBruce Richardson 				COL_RED, i, COL_RESET);
711c66baa68SBruce Richardson 			continue;
712c66baa68SBruce Richardson 		}
713c66baa68SBruce Richardson 		int affinities_per_port[SW_PORTS_MAX] = {0};
714c66baa68SBruce Richardson 
715c66baa68SBruce Richardson 		fprintf(f, "  Queue %d (%s)\n", i, q_type_strings[qid->type]);
716c66baa68SBruce Richardson 		fprintf(f, "\trx   %"PRIu64"\tdrop %"PRIu64"\ttx   %"PRIu64"\n",
717c66baa68SBruce Richardson 			qid->stats.rx_pkts, qid->stats.rx_dropped,
718c66baa68SBruce Richardson 			qid->stats.tx_pkts);
719c66baa68SBruce Richardson 		if (qid->type == RTE_SCHED_TYPE_ORDERED) {
72070207f35SRadu Nicolau 			struct rob_ring *rob_buf_free =
721c66baa68SBruce Richardson 				qid->reorder_buffer_freelist;
722c66baa68SBruce Richardson 			if (rob_buf_free)
723c66baa68SBruce Richardson 				fprintf(f, "\tReorder entries in use: %u\n",
72470207f35SRadu Nicolau 					rob_ring_free_count(rob_buf_free));
725c66baa68SBruce Richardson 			else
726c66baa68SBruce Richardson 				fprintf(f,
727c66baa68SBruce Richardson 					"\tReorder buffer not initialized\n");
728c66baa68SBruce Richardson 		}
729c66baa68SBruce Richardson 
730c66baa68SBruce Richardson 		uint32_t flow;
731c66baa68SBruce Richardson 		for (flow = 0; flow < RTE_DIM(qid->fids); flow++)
732c66baa68SBruce Richardson 			if (qid->fids[flow].cq != -1) {
733c66baa68SBruce Richardson 				affinities_per_port[qid->fids[flow].cq]++;
734c66baa68SBruce Richardson 			}
735c66baa68SBruce Richardson 
7360e1eadd0SHarry van Haaren 		uint32_t port;
7370e1eadd0SHarry van Haaren 		fprintf(f, "\tPer Port Stats:\n");
7380e1eadd0SHarry van Haaren 		for (port = 0; port < sw->port_count; port++) {
7390e1eadd0SHarry van Haaren 			fprintf(f, "\t  Port %d: Pkts: %"PRIu64, port,
7400e1eadd0SHarry van Haaren 					qid->to_port[port]);
7410e1eadd0SHarry van Haaren 			fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
7420e1eadd0SHarry van Haaren 		}
743c66baa68SBruce Richardson 
744c66baa68SBruce Richardson 		uint32_t iq;
745c66baa68SBruce Richardson 		uint32_t iq_printed = 0;
746c66baa68SBruce Richardson 		for (iq = 0; iq < SW_IQS_MAX; iq++) {
747dca926caSGage Eads 			if (!qid->iq[iq].head) {
748c66baa68SBruce Richardson 				fprintf(f, "\tiq %d is not initialized.\n", iq);
749c66baa68SBruce Richardson 				iq_printed = 1;
750c66baa68SBruce Richardson 				continue;
751c66baa68SBruce Richardson 			}
752dca926caSGage Eads 			uint32_t used = iq_count(&qid->iq[iq]);
753dca926caSGage Eads 			const char *col = COL_RESET;
754c66baa68SBruce Richardson 			if (used > 0) {
755dca926caSGage Eads 				fprintf(f, "\t%siq %d: Used %d"
756dca926caSGage Eads 					COL_RESET"\n", col, iq, used);
757c66baa68SBruce Richardson 				iq_printed = 1;
758c66baa68SBruce Richardson 			}
759c66baa68SBruce Richardson 		}
760c66baa68SBruce Richardson 		if (iq_printed == 0)
761c66baa68SBruce Richardson 			fprintf(f, "\t-- iqs empty --\n");
762c66baa68SBruce Richardson 	}
763c66baa68SBruce Richardson }
764c66baa68SBruce Richardson 
765aaa4a221SBruce Richardson static int
766374acbf7SBruce Richardson sw_start(struct rte_eventdev *dev)
767374acbf7SBruce Richardson {
768374acbf7SBruce Richardson 	unsigned int i, j;
769374acbf7SBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
770a599eb31SHarry van Haaren 
7719f9fad8fSPavan Nikhilesh 	rte_service_component_runstate_set(sw->service_id, 1);
7729f9fad8fSPavan Nikhilesh 
773a599eb31SHarry van Haaren 	/* check a service core is mapped to this service */
7749f9fad8fSPavan Nikhilesh 	if (!rte_service_runstate_get(sw->service_id)) {
775*f665790aSDavid Marchand 		SW_LOG_ERR("Warning: No Service core enabled on service %s",
776a894d481SHarry van Haaren 				sw->service_name);
7779f9fad8fSPavan Nikhilesh 		return -ENOENT;
7789f9fad8fSPavan Nikhilesh 	}
779a599eb31SHarry van Haaren 
780374acbf7SBruce Richardson 	/* check all ports are set up */
781374acbf7SBruce Richardson 	for (i = 0; i < sw->port_count; i++)
782374acbf7SBruce Richardson 		if (sw->ports[i].rx_worker_ring == NULL) {
783*f665790aSDavid Marchand 			SW_LOG_ERR("Port %d not configured", i);
784374acbf7SBruce Richardson 			return -ESTALE;
785374acbf7SBruce Richardson 		}
786374acbf7SBruce Richardson 
787374acbf7SBruce Richardson 	/* check all queues are configured and mapped to ports*/
788374acbf7SBruce Richardson 	for (i = 0; i < sw->qid_count; i++)
78945219005SGage Eads 		if (!sw->qids[i].initialized ||
790374acbf7SBruce Richardson 		    sw->qids[i].cq_num_mapped_cqs == 0) {
791*f665790aSDavid Marchand 			SW_LOG_ERR("Queue %d not configured", i);
792374acbf7SBruce Richardson 			return -ENOLINK;
793374acbf7SBruce Richardson 		}
794374acbf7SBruce Richardson 
795374acbf7SBruce Richardson 	/* build up our prioritized array of qids */
796374acbf7SBruce Richardson 	/* We don't use qsort here, as if all/multiple entries have the same
797374acbf7SBruce Richardson 	 * priority, the result is non-deterministic. From "man 3 qsort":
798374acbf7SBruce Richardson 	 * "If two members compare as equal, their order in the sorted
799374acbf7SBruce Richardson 	 * array is undefined."
800374acbf7SBruce Richardson 	 */
801374acbf7SBruce Richardson 	uint32_t qidx = 0;
802374acbf7SBruce Richardson 	for (j = 0; j <= RTE_EVENT_DEV_PRIORITY_LOWEST; j++) {
803374acbf7SBruce Richardson 		for (i = 0; i < sw->qid_count; i++) {
804374acbf7SBruce Richardson 			if (sw->qids[i].priority == j) {
805374acbf7SBruce Richardson 				sw->qids_prioritized[qidx] = &sw->qids[i];
806374acbf7SBruce Richardson 				qidx++;
807374acbf7SBruce Richardson 			}
808374acbf7SBruce Richardson 		}
809374acbf7SBruce Richardson 	}
810374acbf7SBruce Richardson 
81145219005SGage Eads 	sw_init_qid_iqs(sw);
81245219005SGage Eads 
813c1ad03dfSBruce Richardson 	if (sw_xstats_init(sw) < 0)
814c1ad03dfSBruce Richardson 		return -EINVAL;
815c1ad03dfSBruce Richardson 
816374acbf7SBruce Richardson 	rte_smp_wmb();
817374acbf7SBruce Richardson 	sw->started = 1;
818374acbf7SBruce Richardson 
819374acbf7SBruce Richardson 	return 0;
820374acbf7SBruce Richardson }
821374acbf7SBruce Richardson 
822374acbf7SBruce Richardson static void
823374acbf7SBruce Richardson sw_stop(struct rte_eventdev *dev)
824374acbf7SBruce Richardson {
825374acbf7SBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
8268490488aSGage Eads 	int32_t runstate;
8278490488aSGage Eads 
8288490488aSGage Eads 	/* Stop the scheduler if it's running */
8298490488aSGage Eads 	runstate = rte_service_runstate_get(sw->service_id);
8308490488aSGage Eads 	if (runstate == 1)
8318490488aSGage Eads 		rte_service_runstate_set(sw->service_id, 0);
8328490488aSGage Eads 
8338490488aSGage Eads 	while (rte_service_may_be_active(sw->service_id))
8348490488aSGage Eads 		rte_pause();
8358490488aSGage Eads 
8368490488aSGage Eads 	/* Flush all events out of the device */
8378490488aSGage Eads 	while (!(sw_qids_empty(sw) && sw_ports_empty(sw))) {
8388490488aSGage Eads 		sw_event_schedule(dev);
8398490488aSGage Eads 		sw_drain_ports(dev);
8408490488aSGage Eads 		sw_drain_queues(dev);
8418490488aSGage Eads 	}
8428490488aSGage Eads 
8438490488aSGage Eads 	sw_clean_qid_iqs(dev);
844c1ad03dfSBruce Richardson 	sw_xstats_uninit(sw);
845374acbf7SBruce Richardson 	sw->started = 0;
846374acbf7SBruce Richardson 	rte_smp_wmb();
8478490488aSGage Eads 
8488490488aSGage Eads 	if (runstate == 1)
8498490488aSGage Eads 		rte_service_runstate_set(sw->service_id, 1);
850374acbf7SBruce Richardson }
851374acbf7SBruce Richardson 
852374acbf7SBruce Richardson static int
853374acbf7SBruce Richardson sw_close(struct rte_eventdev *dev)
854374acbf7SBruce Richardson {
855374acbf7SBruce Richardson 	struct sw_evdev *sw = sw_pmd_priv(dev);
856374acbf7SBruce Richardson 	uint32_t i;
857374acbf7SBruce Richardson 
858374acbf7SBruce Richardson 	for (i = 0; i < sw->qid_count; i++)
859374acbf7SBruce Richardson 		sw_queue_release(dev, i);
860374acbf7SBruce Richardson 	sw->qid_count = 0;
861374acbf7SBruce Richardson 
862374acbf7SBruce Richardson 	for (i = 0; i < sw->port_count; i++)
863374acbf7SBruce Richardson 		sw_port_release(&sw->ports[i]);
864374acbf7SBruce Richardson 	sw->port_count = 0;
865374acbf7SBruce Richardson 
866374acbf7SBruce Richardson 	memset(&sw->stats, 0, sizeof(sw->stats));
867374acbf7SBruce Richardson 	sw->sched_called = 0;
868374acbf7SBruce Richardson 	sw->sched_no_iq_enqueues = 0;
869374acbf7SBruce Richardson 	sw->sched_no_cq_enqueues = 0;
870374acbf7SBruce Richardson 	sw->sched_cq_qid_called = 0;
871374acbf7SBruce Richardson 
872374acbf7SBruce Richardson 	return 0;
873374acbf7SBruce Richardson }
874374acbf7SBruce Richardson 
875374acbf7SBruce Richardson static int
876aaa4a221SBruce Richardson assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
877aaa4a221SBruce Richardson {
878aaa4a221SBruce Richardson 	int *socket_id = opaque;
879aaa4a221SBruce Richardson 	*socket_id = atoi(value);
880aaa4a221SBruce Richardson 	if (*socket_id >= RTE_MAX_NUMA_NODES)
881aaa4a221SBruce Richardson 		return -1;
882aaa4a221SBruce Richardson 	return 0;
883aaa4a221SBruce Richardson }
884aaa4a221SBruce Richardson 
885aaa4a221SBruce Richardson static int
886aaa4a221SBruce Richardson set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
887aaa4a221SBruce Richardson {
888aaa4a221SBruce Richardson 	int *quanta = opaque;
889aaa4a221SBruce Richardson 	*quanta = atoi(value);
890aaa4a221SBruce Richardson 	if (*quanta < 0 || *quanta >= 4096)
891aaa4a221SBruce Richardson 		return -1;
892aaa4a221SBruce Richardson 	return 0;
893aaa4a221SBruce Richardson }
894aaa4a221SBruce Richardson 
895aaa4a221SBruce Richardson static int
896aaa4a221SBruce Richardson set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
897aaa4a221SBruce Richardson {
898aaa4a221SBruce Richardson 	int *credit = opaque;
899aaa4a221SBruce Richardson 	*credit = atoi(value);
900aaa4a221SBruce Richardson 	if (*credit < 0 || *credit >= 128)
901aaa4a221SBruce Richardson 		return -1;
902aaa4a221SBruce Richardson 	return 0;
903aaa4a221SBruce Richardson }
904aaa4a221SBruce Richardson 
90570207f35SRadu Nicolau static int
90670207f35SRadu Nicolau set_deq_burst_sz(const char *key __rte_unused, const char *value, void *opaque)
90770207f35SRadu Nicolau {
90870207f35SRadu Nicolau 	int *deq_burst_sz = opaque;
90970207f35SRadu Nicolau 	*deq_burst_sz = atoi(value);
91070207f35SRadu Nicolau 	if (*deq_burst_sz < 0 || *deq_burst_sz > SCHED_DEQUEUE_MAX_BURST_SIZE)
91170207f35SRadu Nicolau 		return -1;
91270207f35SRadu Nicolau 	return 0;
91370207f35SRadu Nicolau }
91470207f35SRadu Nicolau 
91570207f35SRadu Nicolau static int
91670207f35SRadu Nicolau set_min_burst_sz(const char *key __rte_unused, const char *value, void *opaque)
91770207f35SRadu Nicolau {
91870207f35SRadu Nicolau 	int *min_burst_sz = opaque;
91970207f35SRadu Nicolau 	*min_burst_sz = atoi(value);
92070207f35SRadu Nicolau 	if (*min_burst_sz < 0 || *min_burst_sz > SCHED_DEQUEUE_MAX_BURST_SIZE)
92170207f35SRadu Nicolau 		return -1;
92270207f35SRadu Nicolau 	return 0;
92370207f35SRadu Nicolau }
92470207f35SRadu Nicolau 
92570207f35SRadu Nicolau static int
92670207f35SRadu Nicolau set_refill_once(const char *key __rte_unused, const char *value, void *opaque)
92770207f35SRadu Nicolau {
92870207f35SRadu Nicolau 	int *refill_once_per_call = opaque;
92970207f35SRadu Nicolau 	*refill_once_per_call = atoi(value);
93070207f35SRadu Nicolau 	if (*refill_once_per_call < 0 || *refill_once_per_call > 1)
93170207f35SRadu Nicolau 		return -1;
93270207f35SRadu Nicolau 	return 0;
93370207f35SRadu Nicolau }
934a599eb31SHarry van Haaren 
935a599eb31SHarry van Haaren static int32_t sw_sched_service_func(void *args)
936a599eb31SHarry van Haaren {
937a599eb31SHarry van Haaren 	struct rte_eventdev *dev = args;
9384689c579SMattias Rönnblom 	return sw_event_schedule(dev);
939a599eb31SHarry van Haaren }
940a599eb31SHarry van Haaren 
941aaa4a221SBruce Richardson static int
9425d2aa461SJan Blunck sw_probe(struct rte_vdev_device *vdev)
943aaa4a221SBruce Richardson {
94423d06e37SPavan Nikhilesh 	static struct eventdev_ops evdev_sw_ops = {
9451c6c0e4cSBruce Richardson 			.dev_configure = sw_dev_configure,
946b88e2b73SBruce Richardson 			.dev_infos_get = sw_info_get,
947374acbf7SBruce Richardson 			.dev_close = sw_close,
948374acbf7SBruce Richardson 			.dev_start = sw_start,
949374acbf7SBruce Richardson 			.dev_stop = sw_stop,
950c66baa68SBruce Richardson 			.dump = sw_dump,
9511a3a4531SBruce Richardson 
9521a3a4531SBruce Richardson 			.queue_def_conf = sw_queue_def_conf,
9535ffb2f14SBruce Richardson 			.queue_setup = sw_queue_setup,
9545ffb2f14SBruce Richardson 			.queue_release = sw_queue_release,
9551a3a4531SBruce Richardson 			.port_def_conf = sw_port_def_conf,
95698dc055fSBruce Richardson 			.port_setup = sw_port_setup,
95798dc055fSBruce Richardson 			.port_release = sw_port_release,
958371a688fSBruce Richardson 			.port_link = sw_port_link,
959371a688fSBruce Richardson 			.port_unlink = sw_port_unlink,
960bd5ac24fSHarry van Haaren 			.port_unlinks_in_progress = sw_port_unlinks_in_progress,
961c1ad03dfSBruce Richardson 
96267255ee9SNikhil Rao 			.eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
96367255ee9SNikhil Rao 
96447d05b29SErik Gabriel Carrillo 			.timer_adapter_caps_get = sw_timer_adapter_caps_get,
96547d05b29SErik Gabriel Carrillo 
9669dc1bd73SAbhinandan Gujjar 			.crypto_adapter_caps_get = sw_crypto_adapter_caps_get,
9679dc1bd73SAbhinandan Gujjar 
968c1ad03dfSBruce Richardson 			.xstats_get = sw_xstats_get,
969c1ad03dfSBruce Richardson 			.xstats_get_names = sw_xstats_get_names,
970c1ad03dfSBruce Richardson 			.xstats_get_by_name = sw_xstats_get_by_name,
971c1ad03dfSBruce Richardson 			.xstats_reset = sw_xstats_reset,
972561c5c7bSPavan Nikhilesh 
973561c5c7bSPavan Nikhilesh 			.dev_selftest = test_sw_eventdev,
974aaa4a221SBruce Richardson 	};
975aaa4a221SBruce Richardson 
976aaa4a221SBruce Richardson 	static const char *const args[] = {
977aaa4a221SBruce Richardson 		NUMA_NODE_ARG,
978aaa4a221SBruce Richardson 		SCHED_QUANTA_ARG,
979aaa4a221SBruce Richardson 		CREDIT_QUANTA_ARG,
98070207f35SRadu Nicolau 		MIN_BURST_SIZE_ARG,
98170207f35SRadu Nicolau 		DEQ_BURST_SIZE_ARG,
98270207f35SRadu Nicolau 		REFIL_ONCE_ARG,
983aaa4a221SBruce Richardson 		NULL
984aaa4a221SBruce Richardson 	};
9855d2aa461SJan Blunck 	const char *name;
9865d2aa461SJan Blunck 	const char *params;
987aaa4a221SBruce Richardson 	struct rte_eventdev *dev;
988aaa4a221SBruce Richardson 	struct sw_evdev *sw;
989aaa4a221SBruce Richardson 	int socket_id = rte_socket_id();
990aaa4a221SBruce Richardson 	int sched_quanta  = SW_DEFAULT_SCHED_QUANTA;
991aaa4a221SBruce Richardson 	int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
99270207f35SRadu Nicolau 	int min_burst_size = 1;
99370207f35SRadu Nicolau 	int deq_burst_size = SCHED_DEQUEUE_DEFAULT_BURST_SIZE;
99470207f35SRadu Nicolau 	int refill_once = 0;
995aaa4a221SBruce Richardson 
9965d2aa461SJan Blunck 	name = rte_vdev_device_name(vdev);
9975d2aa461SJan Blunck 	params = rte_vdev_device_args(vdev);
998aaa4a221SBruce Richardson 	if (params != NULL && params[0] != '\0') {
999aaa4a221SBruce Richardson 		struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
1000aaa4a221SBruce Richardson 
1001aaa4a221SBruce Richardson 		if (!kvlist) {
1002aaa4a221SBruce Richardson 			SW_LOG_INFO(
1003*f665790aSDavid Marchand 				"Ignoring unsupported parameters when creating device '%s'",
1004aaa4a221SBruce Richardson 				name);
1005aaa4a221SBruce Richardson 		} else {
1006aaa4a221SBruce Richardson 			int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
1007aaa4a221SBruce Richardson 					assign_numa_node, &socket_id);
1008aaa4a221SBruce Richardson 			if (ret != 0) {
1009aaa4a221SBruce Richardson 				SW_LOG_ERR(
1010aaa4a221SBruce Richardson 					"%s: Error parsing numa node parameter",
1011aaa4a221SBruce Richardson 					name);
1012aaa4a221SBruce Richardson 				rte_kvargs_free(kvlist);
1013aaa4a221SBruce Richardson 				return ret;
1014aaa4a221SBruce Richardson 			}
1015aaa4a221SBruce Richardson 
1016aaa4a221SBruce Richardson 			ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
1017aaa4a221SBruce Richardson 					set_sched_quanta, &sched_quanta);
1018aaa4a221SBruce Richardson 			if (ret != 0) {
1019aaa4a221SBruce Richardson 				SW_LOG_ERR(
1020aaa4a221SBruce Richardson 					"%s: Error parsing sched quanta parameter",
1021aaa4a221SBruce Richardson 					name);
1022aaa4a221SBruce Richardson 				rte_kvargs_free(kvlist);
1023aaa4a221SBruce Richardson 				return ret;
1024aaa4a221SBruce Richardson 			}
1025aaa4a221SBruce Richardson 
1026aaa4a221SBruce Richardson 			ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
1027aaa4a221SBruce Richardson 					set_credit_quanta, &credit_quanta);
1028aaa4a221SBruce Richardson 			if (ret != 0) {
1029aaa4a221SBruce Richardson 				SW_LOG_ERR(
1030aaa4a221SBruce Richardson 					"%s: Error parsing credit quanta parameter",
1031aaa4a221SBruce Richardson 					name);
1032aaa4a221SBruce Richardson 				rte_kvargs_free(kvlist);
1033aaa4a221SBruce Richardson 				return ret;
1034aaa4a221SBruce Richardson 			}
1035aaa4a221SBruce Richardson 
103670207f35SRadu Nicolau 			ret = rte_kvargs_process(kvlist, MIN_BURST_SIZE_ARG,
103770207f35SRadu Nicolau 					set_min_burst_sz, &min_burst_size);
103870207f35SRadu Nicolau 			if (ret != 0) {
103970207f35SRadu Nicolau 				SW_LOG_ERR(
104070207f35SRadu Nicolau 					"%s: Error parsing minimum burst size parameter",
104170207f35SRadu Nicolau 					name);
104270207f35SRadu Nicolau 				rte_kvargs_free(kvlist);
104370207f35SRadu Nicolau 				return ret;
104470207f35SRadu Nicolau 			}
104570207f35SRadu Nicolau 
104670207f35SRadu Nicolau 			ret = rte_kvargs_process(kvlist, DEQ_BURST_SIZE_ARG,
104770207f35SRadu Nicolau 					set_deq_burst_sz, &deq_burst_size);
104870207f35SRadu Nicolau 			if (ret != 0) {
104970207f35SRadu Nicolau 				SW_LOG_ERR(
105070207f35SRadu Nicolau 					"%s: Error parsing dequeue burst size parameter",
105170207f35SRadu Nicolau 					name);
105270207f35SRadu Nicolau 				rte_kvargs_free(kvlist);
105370207f35SRadu Nicolau 				return ret;
105470207f35SRadu Nicolau 			}
105570207f35SRadu Nicolau 
105670207f35SRadu Nicolau 			ret = rte_kvargs_process(kvlist, REFIL_ONCE_ARG,
105770207f35SRadu Nicolau 					set_refill_once, &refill_once);
105870207f35SRadu Nicolau 			if (ret != 0) {
105970207f35SRadu Nicolau 				SW_LOG_ERR(
106070207f35SRadu Nicolau 					"%s: Error parsing refill once per call switch",
106170207f35SRadu Nicolau 					name);
106270207f35SRadu Nicolau 				rte_kvargs_free(kvlist);
106370207f35SRadu Nicolau 				return ret;
106470207f35SRadu Nicolau 			}
106570207f35SRadu Nicolau 
1066aaa4a221SBruce Richardson 			rte_kvargs_free(kvlist);
1067aaa4a221SBruce Richardson 		}
1068aaa4a221SBruce Richardson 	}
1069aaa4a221SBruce Richardson 
1070aaa4a221SBruce Richardson 	SW_LOG_INFO(
107170207f35SRadu Nicolau 			"Creating eventdev sw device %s, numa_node=%d, "
107270207f35SRadu Nicolau 			"sched_quanta=%d, credit_quanta=%d "
1073*f665790aSDavid Marchand 			"min_burst=%d, deq_burst=%d, refill_once=%d",
107470207f35SRadu Nicolau 			name, socket_id, sched_quanta, credit_quanta,
107570207f35SRadu Nicolau 			min_burst_size, deq_burst_size, refill_once);
1076aaa4a221SBruce Richardson 
1077aaa4a221SBruce Richardson 	dev = rte_event_pmd_vdev_init(name,
1078928b5c70SBruce Richardson 			sizeof(struct sw_evdev), socket_id, vdev);
1079aaa4a221SBruce Richardson 	if (dev == NULL) {
1080aaa4a221SBruce Richardson 		SW_LOG_ERR("eventdev vdev init() failed");
1081aaa4a221SBruce Richardson 		return -EFAULT;
1082aaa4a221SBruce Richardson 	}
1083aaa4a221SBruce Richardson 	dev->dev_ops = &evdev_sw_ops;
1084656af918SBruce Richardson 	dev->enqueue_burst = sw_event_enqueue_burst;
108565293784SJerin Jacob 	dev->enqueue_new_burst = sw_event_enqueue_burst;
10865eea2d10SJerin Jacob 	dev->enqueue_forward_burst = sw_event_enqueue_burst;
1087656af918SBruce Richardson 	dev->dequeue_burst = sw_event_dequeue_burst;
1088656af918SBruce Richardson 
1089656af918SBruce Richardson 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1090656af918SBruce Richardson 		return 0;
1091aaa4a221SBruce Richardson 
1092aaa4a221SBruce Richardson 	sw = dev->data->dev_private;
1093aaa4a221SBruce Richardson 	sw->data = dev->data;
1094aaa4a221SBruce Richardson 
1095aaa4a221SBruce Richardson 	/* copy values passed from vdev command line to instance */
1096aaa4a221SBruce Richardson 	sw->credit_update_quanta = credit_quanta;
1097aaa4a221SBruce Richardson 	sw->sched_quanta = sched_quanta;
109870207f35SRadu Nicolau 	sw->sched_min_burst_size = min_burst_size;
109970207f35SRadu Nicolau 	sw->sched_deq_burst_size = deq_burst_size;
110070207f35SRadu Nicolau 	sw->refill_once_per_iter = refill_once;
1101aaa4a221SBruce Richardson 
1102a599eb31SHarry van Haaren 	/* register service with EAL */
1103a599eb31SHarry van Haaren 	struct rte_service_spec service;
1104a599eb31SHarry van Haaren 	memset(&service, 0, sizeof(struct rte_service_spec));
1105a599eb31SHarry van Haaren 	snprintf(service.name, sizeof(service.name), "%s_service", name);
1106a599eb31SHarry van Haaren 	snprintf(sw->service_name, sizeof(sw->service_name), "%s_service",
1107a599eb31SHarry van Haaren 			name);
1108a599eb31SHarry van Haaren 	service.socket_id = socket_id;
1109a599eb31SHarry van Haaren 	service.callback = sw_sched_service_func;
1110a599eb31SHarry van Haaren 	service.callback_userdata = (void *)dev;
1111a599eb31SHarry van Haaren 
1112a894d481SHarry van Haaren 	int32_t ret = rte_service_component_register(&service, &sw->service_id);
1113a599eb31SHarry van Haaren 	if (ret) {
1114a599eb31SHarry van Haaren 		SW_LOG_ERR("service register() failed");
1115a599eb31SHarry van Haaren 		return -ENOEXEC;
1116a599eb31SHarry van Haaren 	}
1117a599eb31SHarry van Haaren 
11184c2fd979SPavan Nikhilesh 	dev->data->service_inited = 1;
11194c2fd979SPavan Nikhilesh 	dev->data->service_id = sw->service_id;
11204c2fd979SPavan Nikhilesh 
112185be9971SPavan Nikhilesh 	event_dev_probing_finish(dev);
112285be9971SPavan Nikhilesh 
1123aaa4a221SBruce Richardson 	return 0;
1124aaa4a221SBruce Richardson }
1125aaa4a221SBruce Richardson 
1126aaa4a221SBruce Richardson static int
11275d2aa461SJan Blunck sw_remove(struct rte_vdev_device *vdev)
1128aaa4a221SBruce Richardson {
11295d2aa461SJan Blunck 	const char *name;
11305d2aa461SJan Blunck 
11315d2aa461SJan Blunck 	name = rte_vdev_device_name(vdev);
1132aaa4a221SBruce Richardson 	if (name == NULL)
1133aaa4a221SBruce Richardson 		return -EINVAL;
1134aaa4a221SBruce Richardson 
1135*f665790aSDavid Marchand 	SW_LOG_INFO("Closing eventdev sw device %s", name);
1136aaa4a221SBruce Richardson 
1137aaa4a221SBruce Richardson 	return rte_event_pmd_vdev_uninit(name);
1138aaa4a221SBruce Richardson }
1139aaa4a221SBruce Richardson 
1140aaa4a221SBruce Richardson static struct rte_vdev_driver evdev_sw_pmd_drv = {
1141aaa4a221SBruce Richardson 	.probe = sw_probe,
1142aaa4a221SBruce Richardson 	.remove = sw_remove
1143aaa4a221SBruce Richardson };
1144aaa4a221SBruce Richardson 
1145aaa4a221SBruce Richardson RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
1146aaa4a221SBruce Richardson RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
114770207f35SRadu Nicolau 		SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>"
114870207f35SRadu Nicolau 		MIN_BURST_SIZE_ARG "=<int>" DEQ_BURST_SIZE_ARG "=<int>"
114970207f35SRadu Nicolau 		REFIL_ONCE_ARG "=<int>");
1150eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(eventdev_sw_log_level, NOTICE);
1151