xref: /dpdk/drivers/event/dlb2/dlb2.c (revision e20e2148cf9268fa16ad6d0baff943a3eaae5bf0)
1ef3da1e7STimothy McDaniel /* SPDX-License-Identifier: BSD-3-Clause
2d0ce87e4STimothy McDaniel  * Copyright(c) 2016-2022 Intel Corporation
3ef3da1e7STimothy McDaniel  */
4ef3da1e7STimothy McDaniel 
55433956dSTimothy McDaniel #include <assert.h>
65433956dSTimothy McDaniel #include <errno.h>
75433956dSTimothy McDaniel #include <nmmintrin.h>
85433956dSTimothy McDaniel #include <pthread.h>
95433956dSTimothy McDaniel #include <stdint.h>
105433956dSTimothy McDaniel #include <stdbool.h>
115433956dSTimothy McDaniel #include <stdio.h>
125433956dSTimothy McDaniel #include <string.h>
135433956dSTimothy McDaniel #include <sys/mman.h>
1429420808SThomas Monjalon #include <fcntl.h>
15ef3da1e7STimothy McDaniel 
165433956dSTimothy McDaniel #include <rte_common.h>
175433956dSTimothy McDaniel #include <rte_config.h>
185433956dSTimothy McDaniel #include <rte_cycles.h>
195433956dSTimothy McDaniel #include <rte_debug.h>
201acb7f54SDavid Marchand #include <dev_driver.h>
215433956dSTimothy McDaniel #include <rte_errno.h>
225433956dSTimothy McDaniel #include <rte_eventdev.h>
2325187042SBruce Richardson #include <eventdev_pmd.h>
245433956dSTimothy McDaniel #include <rte_io.h>
255433956dSTimothy McDaniel #include <rte_kvargs.h>
265433956dSTimothy McDaniel #include <rte_log.h>
275433956dSTimothy McDaniel #include <rte_malloc.h>
285433956dSTimothy McDaniel #include <rte_mbuf.h>
29a2e4f1f5STimothy McDaniel #include <rte_power_intrinsics.h>
305433956dSTimothy McDaniel #include <rte_prefetch.h>
315433956dSTimothy McDaniel #include <rte_ring.h>
325433956dSTimothy McDaniel #include <rte_string_fns.h>
335433956dSTimothy McDaniel 
345433956dSTimothy McDaniel #include "dlb2_priv.h"
35e7c9971aSTimothy McDaniel #include "dlb2_iface.h"
365433956dSTimothy McDaniel #include "dlb2_inline_fns.h"
375433956dSTimothy McDaniel 
385433956dSTimothy McDaniel /*
39d8c16de5SAbdullah Sevincer  * Bypass memory fencing instructions when port is of Producer type.
40d8c16de5SAbdullah Sevincer  * This should be enabled very carefully with understanding that producer
41d8c16de5SAbdullah Sevincer  * is not doing any writes which need fencing. The movdir64 instruction used to
42d8c16de5SAbdullah Sevincer  * enqueue events to DLB is a weakly-ordered instruction and movdir64 write
43d8c16de5SAbdullah Sevincer  * to DLB can go ahead of relevant application writes like updates to buffers
44d8c16de5SAbdullah Sevincer  * being sent with event
45d8c16de5SAbdullah Sevincer  */
46d8c16de5SAbdullah Sevincer #define DLB2_BYPASS_FENCE_ON_PP 0  /* 1 == Bypass fence, 0 == do not bypass */
47d8c16de5SAbdullah Sevincer 
48d8c16de5SAbdullah Sevincer /*
495433956dSTimothy McDaniel  * Resources exposed to eventdev. Some values overridden at runtime using
505433956dSTimothy McDaniel  * values returned by the DLB kernel driver.
515433956dSTimothy McDaniel  */
525433956dSTimothy McDaniel #if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
535433956dSTimothy McDaniel #error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
545433956dSTimothy McDaniel #endif
55e7c9971aSTimothy McDaniel static struct rte_event_dev_info evdev_dlb2_default_info = {
56e7c9971aSTimothy McDaniel 	.driver_name = "", /* probe will set */
57e7c9971aSTimothy McDaniel 	.min_dequeue_timeout_ns = DLB2_MIN_DEQUEUE_TIMEOUT_NS,
58e7c9971aSTimothy McDaniel 	.max_dequeue_timeout_ns = DLB2_MAX_DEQUEUE_TIMEOUT_NS,
59e7c9971aSTimothy McDaniel #if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB2_MAX_NUM_LDB_QUEUES)
60e7c9971aSTimothy McDaniel 	.max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
61e7c9971aSTimothy McDaniel #else
62e7c9971aSTimothy McDaniel 	.max_event_queues = DLB2_MAX_NUM_LDB_QUEUES,
63e7c9971aSTimothy McDaniel #endif
64e7c9971aSTimothy McDaniel 	.max_event_queue_flows = DLB2_MAX_NUM_FLOWS,
65e7c9971aSTimothy McDaniel 	.max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
66e7c9971aSTimothy McDaniel 	.max_event_priority_levels = DLB2_QID_PRIORITIES,
67e7c9971aSTimothy McDaniel 	.max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
6886fe66d4STimothy McDaniel 	.max_event_port_dequeue_depth = DLB2_DEFAULT_CQ_DEPTH,
69e7c9971aSTimothy McDaniel 	.max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
70e7c9971aSTimothy McDaniel 	.max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
71e7c9971aSTimothy McDaniel 	.max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
72b66a418dSTimothy McDaniel 	.max_single_link_event_port_queue_pairs =
73b66a418dSTimothy McDaniel 		DLB2_MAX_NUM_DIR_PORTS(DLB2_HW_V2),
74498f7f91SBruce Richardson 	.event_dev_cap = (RTE_EVENT_DEV_CAP_ATOMIC |
75498f7f91SBruce Richardson 			  RTE_EVENT_DEV_CAP_ORDERED |
76498f7f91SBruce Richardson 			  RTE_EVENT_DEV_CAP_PARALLEL |
77498f7f91SBruce Richardson 			  RTE_EVENT_DEV_CAP_EVENT_QOS |
78a992d9beSBruce Richardson 			  RTE_EVENT_DEV_CAP_NONSEQ_MODE |
79e7c9971aSTimothy McDaniel 			  RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
80bd991897SMattias Rönnblom 			  RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
81d39e23f2STimothy McDaniel 			  RTE_EVENT_DEV_CAP_BURST_MODE |
82d39e23f2STimothy McDaniel 			  RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
83d39e23f2STimothy McDaniel 			  RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
84d39e23f2STimothy McDaniel 			  RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
856e2e98d6SAbdullah Sevincer 			  RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ |
86bd991897SMattias Rönnblom 			  RTE_EVENT_DEV_CAP_MAINTENANCE_FREE),
87d007a7f3SPavan Nikhilesh 	.max_profiles_per_port = 1,
88e7c9971aSTimothy McDaniel };
895433956dSTimothy McDaniel 
905433956dSTimothy McDaniel struct process_local_port_data
91b66a418dSTimothy McDaniel dlb2_port[DLB2_MAX_NUM_PORTS_ALL][DLB2_NUM_PORT_TYPES];
925433956dSTimothy McDaniel 
93f3cad285STimothy McDaniel static void
94f3cad285STimothy McDaniel dlb2_free_qe_mem(struct dlb2_port *qm_port)
95f3cad285STimothy McDaniel {
96f3cad285STimothy McDaniel 	if (qm_port == NULL)
97f3cad285STimothy McDaniel 		return;
98f3cad285STimothy McDaniel 
99f3cad285STimothy McDaniel 	rte_free(qm_port->qe4);
100f3cad285STimothy McDaniel 	qm_port->qe4 = NULL;
101f3cad285STimothy McDaniel 
1026e2e98d6SAbdullah Sevincer 	if (qm_port->order) {
1036e2e98d6SAbdullah Sevincer 		rte_free(qm_port->order);
1046e2e98d6SAbdullah Sevincer 		qm_port->order = NULL;
1056e2e98d6SAbdullah Sevincer 	}
1066e2e98d6SAbdullah Sevincer 
107f3cad285STimothy McDaniel 	rte_free(qm_port->int_arm_qe);
108f3cad285STimothy McDaniel 	qm_port->int_arm_qe = NULL;
109f3cad285STimothy McDaniel 
110f3cad285STimothy McDaniel 	rte_free(qm_port->consume_qe);
111f3cad285STimothy McDaniel 	qm_port->consume_qe = NULL;
112f3cad285STimothy McDaniel 
113f3cad285STimothy McDaniel 	rte_memzone_free(dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz);
114f3cad285STimothy McDaniel 	dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
115f3cad285STimothy McDaniel }
116f3cad285STimothy McDaniel 
117e7c9971aSTimothy McDaniel /* override defaults with value(s) provided on command line */
118e7c9971aSTimothy McDaniel static void
119e7c9971aSTimothy McDaniel dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
120e7c9971aSTimothy McDaniel 				 int *qid_depth_thresholds)
121e7c9971aSTimothy McDaniel {
122e7c9971aSTimothy McDaniel 	int q;
123e7c9971aSTimothy McDaniel 
124b66a418dSTimothy McDaniel 	for (q = 0; q < DLB2_MAX_NUM_QUEUES(dlb2->version); q++) {
125e7c9971aSTimothy McDaniel 		if (qid_depth_thresholds[q] != 0)
126e7c9971aSTimothy McDaniel 			dlb2->ev_queues[q].depth_threshold =
127e7c9971aSTimothy McDaniel 				qid_depth_thresholds[q];
128e7c9971aSTimothy McDaniel 	}
129e7c9971aSTimothy McDaniel }
130e7c9971aSTimothy McDaniel 
131ffa46fc4STimothy McDaniel /* override defaults with value(s) provided on command line */
132ffa46fc4STimothy McDaniel static void
133bec8901bSTimothy McDaniel dlb2_init_port_cos(struct dlb2_eventdev *dlb2, int *port_cos)
134bec8901bSTimothy McDaniel {
135bec8901bSTimothy McDaniel 	int q;
136bec8901bSTimothy McDaniel 
137e3191f10SAbdullah Sevincer 	for (q = 0; q < DLB2_MAX_NUM_PORTS_ALL; q++) {
138bec8901bSTimothy McDaniel 		dlb2->ev_ports[q].cos_id = port_cos[q];
139728717ebSAbdullah Sevincer 		if (port_cos[q] != DLB2_COS_DEFAULT &&
140728717ebSAbdullah Sevincer 		    dlb2->cos_ports[port_cos[q]] < DLB2_MAX_NUM_LDB_PORTS_PER_COS) {
141bec8901bSTimothy McDaniel 			dlb2->cos_ports[port_cos[q]]++;
142728717ebSAbdullah Sevincer 			dlb2->max_cos_port = q;
143bec8901bSTimothy McDaniel 		}
144bec8901bSTimothy McDaniel 	}
145e3191f10SAbdullah Sevincer }
146bec8901bSTimothy McDaniel 
147bec8901bSTimothy McDaniel static void
148bec8901bSTimothy McDaniel dlb2_init_cos_bw(struct dlb2_eventdev *dlb2,
149bec8901bSTimothy McDaniel 		 struct dlb2_cos_bw *cos_bw)
150bec8901bSTimothy McDaniel {
151bec8901bSTimothy McDaniel 	int q;
15254089151STimothy McDaniel 
15354089151STimothy McDaniel 
15454089151STimothy McDaniel 	/* If cos_bw not set, then split evenly */
15554089151STimothy McDaniel 	if (cos_bw->val[0] == 0 && cos_bw->val[1] == 0 &&
15654089151STimothy McDaniel 		cos_bw->val[2] == 0 && cos_bw->val[3] == 0) {
15754089151STimothy McDaniel 		cos_bw->val[0] = 25;
15854089151STimothy McDaniel 		cos_bw->val[1] = 25;
15954089151STimothy McDaniel 		cos_bw->val[2] = 25;
16054089151STimothy McDaniel 		cos_bw->val[3] = 25;
16154089151STimothy McDaniel 	}
16254089151STimothy McDaniel 
163bec8901bSTimothy McDaniel 	for (q = 0; q < DLB2_COS_NUM_VALS; q++)
164bec8901bSTimothy McDaniel 		dlb2->cos_bw[q] = cos_bw->val[q];
165bec8901bSTimothy McDaniel 
166bec8901bSTimothy McDaniel }
167bec8901bSTimothy McDaniel 
168e7c9971aSTimothy McDaniel static int
169e7c9971aSTimothy McDaniel dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
170e7c9971aSTimothy McDaniel {
171e7c9971aSTimothy McDaniel 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
17286fe66d4STimothy McDaniel 	int num_ldb_ports;
173e7c9971aSTimothy McDaniel 	int ret;
174e7c9971aSTimothy McDaniel 
175e7c9971aSTimothy McDaniel 	/* Query driver resources provisioned for this device */
176e7c9971aSTimothy McDaniel 
177e7c9971aSTimothy McDaniel 	ret = dlb2_iface_get_num_resources(handle,
178e7c9971aSTimothy McDaniel 					   &dlb2->hw_rsrc_query_results);
179e7c9971aSTimothy McDaniel 	if (ret) {
180f665790aSDavid Marchand 		DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d", ret);
181e7c9971aSTimothy McDaniel 		return ret;
182e7c9971aSTimothy McDaniel 	}
183e7c9971aSTimothy McDaniel 
184e7c9971aSTimothy McDaniel 	/* Complete filling in device resource info returned to evdev app,
185e7c9971aSTimothy McDaniel 	 * overriding any default values.
186e7c9971aSTimothy McDaniel 	 * The capabilities (CAPs) were set at compile time.
187e7c9971aSTimothy McDaniel 	 */
188e7c9971aSTimothy McDaniel 
18986fe66d4STimothy McDaniel 	if (dlb2->max_cq_depth != DLB2_DEFAULT_CQ_DEPTH)
19086fe66d4STimothy McDaniel 		num_ldb_ports = DLB2_MAX_HL_ENTRIES / dlb2->max_cq_depth;
19186fe66d4STimothy McDaniel 	else
19286fe66d4STimothy McDaniel 		num_ldb_ports = dlb2->hw_rsrc_query_results.num_ldb_ports;
19386fe66d4STimothy McDaniel 
194e7c9971aSTimothy McDaniel 	evdev_dlb2_default_info.max_event_queues =
195e7c9971aSTimothy McDaniel 		dlb2->hw_rsrc_query_results.num_ldb_queues;
196e7c9971aSTimothy McDaniel 
19786fe66d4STimothy McDaniel 	evdev_dlb2_default_info.max_event_ports = num_ldb_ports;
198e7c9971aSTimothy McDaniel 
1994ce7bf9eSTimothy McDaniel 	if (dlb2->version == DLB2_HW_V2_5) {
2004ce7bf9eSTimothy McDaniel 		evdev_dlb2_default_info.max_num_events =
2014ce7bf9eSTimothy McDaniel 			dlb2->hw_rsrc_query_results.num_credits;
2024ce7bf9eSTimothy McDaniel 	} else {
203e7c9971aSTimothy McDaniel 		evdev_dlb2_default_info.max_num_events =
204e7c9971aSTimothy McDaniel 			dlb2->hw_rsrc_query_results.num_ldb_credits;
2054ce7bf9eSTimothy McDaniel 	}
206e7c9971aSTimothy McDaniel 	/* Save off values used when creating the scheduling domain. */
207e7c9971aSTimothy McDaniel 
208e7c9971aSTimothy McDaniel 	handle->info.num_sched_domains =
209e7c9971aSTimothy McDaniel 		dlb2->hw_rsrc_query_results.num_sched_domains;
210e7c9971aSTimothy McDaniel 
2114ce7bf9eSTimothy McDaniel 	if (dlb2->version == DLB2_HW_V2_5) {
2124ce7bf9eSTimothy McDaniel 		handle->info.hw_rsrc_max.nb_events_limit =
2134ce7bf9eSTimothy McDaniel 			dlb2->hw_rsrc_query_results.num_credits;
2144ce7bf9eSTimothy McDaniel 	} else {
215e7c9971aSTimothy McDaniel 		handle->info.hw_rsrc_max.nb_events_limit =
216e7c9971aSTimothy McDaniel 			dlb2->hw_rsrc_query_results.num_ldb_credits;
2174ce7bf9eSTimothy McDaniel 	}
218e7c9971aSTimothy McDaniel 	handle->info.hw_rsrc_max.num_queues =
219e7c9971aSTimothy McDaniel 		dlb2->hw_rsrc_query_results.num_ldb_queues +
220e7c9971aSTimothy McDaniel 		dlb2->hw_rsrc_query_results.num_dir_ports;
221e7c9971aSTimothy McDaniel 
222e7c9971aSTimothy McDaniel 	handle->info.hw_rsrc_max.num_ldb_queues =
223e7c9971aSTimothy McDaniel 		dlb2->hw_rsrc_query_results.num_ldb_queues;
224e7c9971aSTimothy McDaniel 
22586fe66d4STimothy McDaniel 	handle->info.hw_rsrc_max.num_ldb_ports = num_ldb_ports;
226e7c9971aSTimothy McDaniel 
227e7c9971aSTimothy McDaniel 	handle->info.hw_rsrc_max.num_dir_ports =
228e7c9971aSTimothy McDaniel 		dlb2->hw_rsrc_query_results.num_dir_ports;
229e7c9971aSTimothy McDaniel 
230e7c9971aSTimothy McDaniel 	handle->info.hw_rsrc_max.reorder_window_size =
231e7c9971aSTimothy McDaniel 		dlb2->hw_rsrc_query_results.num_hist_list_entries;
232e7c9971aSTimothy McDaniel 
233e7c9971aSTimothy McDaniel 	return 0;
234e7c9971aSTimothy McDaniel }
235e7c9971aSTimothy McDaniel 
2365433956dSTimothy McDaniel #define DLB2_BASE_10 10
2375433956dSTimothy McDaniel 
2385433956dSTimothy McDaniel static int
2395433956dSTimothy McDaniel dlb2_string_to_int(int *result, const char *str)
2405433956dSTimothy McDaniel {
2415433956dSTimothy McDaniel 	long ret;
2425433956dSTimothy McDaniel 	char *endptr;
2435433956dSTimothy McDaniel 
2445433956dSTimothy McDaniel 	if (str == NULL || result == NULL)
2455433956dSTimothy McDaniel 		return -EINVAL;
2465433956dSTimothy McDaniel 
2475433956dSTimothy McDaniel 	errno = 0;
2485433956dSTimothy McDaniel 	ret = strtol(str, &endptr, DLB2_BASE_10);
2495433956dSTimothy McDaniel 	if (errno)
2505433956dSTimothy McDaniel 		return -errno;
2515433956dSTimothy McDaniel 
2525433956dSTimothy McDaniel 	/* long int and int may be different width for some architectures */
2535433956dSTimothy McDaniel 	if (ret < INT_MIN || ret > INT_MAX || endptr == str)
2545433956dSTimothy McDaniel 		return -EINVAL;
2555433956dSTimothy McDaniel 
2565433956dSTimothy McDaniel 	*result = ret;
2575433956dSTimothy McDaniel 	return 0;
2585433956dSTimothy McDaniel }
2595433956dSTimothy McDaniel 
2605433956dSTimothy McDaniel static int
2618d1d9070SAbdullah Sevincer set_producer_coremask(const char *key __rte_unused,
2628d1d9070SAbdullah Sevincer 		      const char *value,
2638d1d9070SAbdullah Sevincer 		      void *opaque)
2648d1d9070SAbdullah Sevincer {
2658d1d9070SAbdullah Sevincer 	const char **mask_str = opaque;
2668d1d9070SAbdullah Sevincer 
2678d1d9070SAbdullah Sevincer 	if (value == NULL || opaque == NULL) {
268f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
2698d1d9070SAbdullah Sevincer 		return -EINVAL;
2708d1d9070SAbdullah Sevincer 	}
2718d1d9070SAbdullah Sevincer 
2728d1d9070SAbdullah Sevincer 	*mask_str = value;
2738d1d9070SAbdullah Sevincer 
2748d1d9070SAbdullah Sevincer 	return 0;
2758d1d9070SAbdullah Sevincer }
2768d1d9070SAbdullah Sevincer 
2778d1d9070SAbdullah Sevincer static int
2785433956dSTimothy McDaniel set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
2795433956dSTimothy McDaniel {
2805433956dSTimothy McDaniel 	int *socket_id = opaque;
2815433956dSTimothy McDaniel 	int ret;
2825433956dSTimothy McDaniel 
2835433956dSTimothy McDaniel 	ret = dlb2_string_to_int(socket_id, value);
2845433956dSTimothy McDaniel 	if (ret < 0)
2855433956dSTimothy McDaniel 		return ret;
2865433956dSTimothy McDaniel 
2875433956dSTimothy McDaniel 	if (*socket_id > RTE_MAX_NUMA_NODES)
2885433956dSTimothy McDaniel 		return -EINVAL;
2895433956dSTimothy McDaniel 	return 0;
2905433956dSTimothy McDaniel }
2915433956dSTimothy McDaniel 
29286fe66d4STimothy McDaniel 
29386fe66d4STimothy McDaniel static int
29486fe66d4STimothy McDaniel set_max_cq_depth(const char *key __rte_unused,
29586fe66d4STimothy McDaniel 		 const char *value,
29686fe66d4STimothy McDaniel 		 void *opaque)
29786fe66d4STimothy McDaniel {
29886fe66d4STimothy McDaniel 	int *max_cq_depth = opaque;
29986fe66d4STimothy McDaniel 	int ret;
30086fe66d4STimothy McDaniel 
30186fe66d4STimothy McDaniel 	if (value == NULL || opaque == NULL) {
302f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
30386fe66d4STimothy McDaniel 		return -EINVAL;
30486fe66d4STimothy McDaniel 	}
30586fe66d4STimothy McDaniel 
30686fe66d4STimothy McDaniel 	ret = dlb2_string_to_int(max_cq_depth, value);
30786fe66d4STimothy McDaniel 	if (ret < 0)
30886fe66d4STimothy McDaniel 		return ret;
30986fe66d4STimothy McDaniel 
31086fe66d4STimothy McDaniel 	if (*max_cq_depth < DLB2_MIN_CQ_DEPTH_OVERRIDE ||
31186fe66d4STimothy McDaniel 	    *max_cq_depth > DLB2_MAX_CQ_DEPTH_OVERRIDE ||
31286fe66d4STimothy McDaniel 	    !rte_is_power_of_2(*max_cq_depth)) {
3136e2e98d6SAbdullah Sevincer 		DLB2_LOG_ERR("dlb2: Allowed max_cq_depth range %d - %d and should be power of 2",
31486fe66d4STimothy McDaniel 			     DLB2_MIN_CQ_DEPTH_OVERRIDE,
31586fe66d4STimothy McDaniel 			     DLB2_MAX_CQ_DEPTH_OVERRIDE);
31686fe66d4STimothy McDaniel 		return -EINVAL;
31786fe66d4STimothy McDaniel 	}
31886fe66d4STimothy McDaniel 
31986fe66d4STimothy McDaniel 	return 0;
32086fe66d4STimothy McDaniel }
32186fe66d4STimothy McDaniel 
3225433956dSTimothy McDaniel static int
3230fc71ad8STimothy McDaniel set_max_enq_depth(const char *key __rte_unused,
3240fc71ad8STimothy McDaniel 		  const char *value,
3250fc71ad8STimothy McDaniel 		  void *opaque)
3260fc71ad8STimothy McDaniel {
3270fc71ad8STimothy McDaniel 	int *max_enq_depth = opaque;
3280fc71ad8STimothy McDaniel 	int ret;
3290fc71ad8STimothy McDaniel 
3300fc71ad8STimothy McDaniel 	if (value == NULL || opaque == NULL) {
331f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
3320fc71ad8STimothy McDaniel 		return -EINVAL;
3330fc71ad8STimothy McDaniel 	}
3340fc71ad8STimothy McDaniel 
3350fc71ad8STimothy McDaniel 	ret = dlb2_string_to_int(max_enq_depth, value);
3360fc71ad8STimothy McDaniel 	if (ret < 0)
3370fc71ad8STimothy McDaniel 		return ret;
3380fc71ad8STimothy McDaniel 
3390fc71ad8STimothy McDaniel 	if (*max_enq_depth < DLB2_MIN_ENQ_DEPTH_OVERRIDE ||
3400fc71ad8STimothy McDaniel 	    *max_enq_depth > DLB2_MAX_ENQ_DEPTH_OVERRIDE ||
3410fc71ad8STimothy McDaniel 	    !rte_is_power_of_2(*max_enq_depth)) {
342f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: max_enq_depth %d and %d and a power of 2",
3430fc71ad8STimothy McDaniel 		DLB2_MIN_ENQ_DEPTH_OVERRIDE,
3440fc71ad8STimothy McDaniel 		DLB2_MAX_ENQ_DEPTH_OVERRIDE);
3450fc71ad8STimothy McDaniel 		return -EINVAL;
3460fc71ad8STimothy McDaniel 	}
3470fc71ad8STimothy McDaniel 
3480fc71ad8STimothy McDaniel 	return 0;
3490fc71ad8STimothy McDaniel }
3500fc71ad8STimothy McDaniel 
3510fc71ad8STimothy McDaniel static int
3525433956dSTimothy McDaniel set_max_num_events(const char *key __rte_unused,
3535433956dSTimothy McDaniel 		   const char *value,
3545433956dSTimothy McDaniel 		   void *opaque)
3555433956dSTimothy McDaniel {
3565433956dSTimothy McDaniel 	int *max_num_events = opaque;
3575433956dSTimothy McDaniel 	int ret;
3585433956dSTimothy McDaniel 
3595433956dSTimothy McDaniel 	if (value == NULL || opaque == NULL) {
360f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
3615433956dSTimothy McDaniel 		return -EINVAL;
3625433956dSTimothy McDaniel 	}
3635433956dSTimothy McDaniel 
3645433956dSTimothy McDaniel 	ret = dlb2_string_to_int(max_num_events, value);
3655433956dSTimothy McDaniel 	if (ret < 0)
3665433956dSTimothy McDaniel 		return ret;
3675433956dSTimothy McDaniel 
3685433956dSTimothy McDaniel 	if (*max_num_events < 0 || *max_num_events >
3695433956dSTimothy McDaniel 			DLB2_MAX_NUM_LDB_CREDITS) {
370f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d",
3715433956dSTimothy McDaniel 			     DLB2_MAX_NUM_LDB_CREDITS);
3725433956dSTimothy McDaniel 		return -EINVAL;
3735433956dSTimothy McDaniel 	}
3745433956dSTimothy McDaniel 
3755433956dSTimothy McDaniel 	return 0;
3765433956dSTimothy McDaniel }
3775433956dSTimothy McDaniel 
3785433956dSTimothy McDaniel static int
3795433956dSTimothy McDaniel set_num_dir_credits(const char *key __rte_unused,
3805433956dSTimothy McDaniel 		    const char *value,
3815433956dSTimothy McDaniel 		    void *opaque)
3825433956dSTimothy McDaniel {
3835433956dSTimothy McDaniel 	int *num_dir_credits = opaque;
3845433956dSTimothy McDaniel 	int ret;
3855433956dSTimothy McDaniel 
3865433956dSTimothy McDaniel 	if (value == NULL || opaque == NULL) {
387f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
3885433956dSTimothy McDaniel 		return -EINVAL;
3895433956dSTimothy McDaniel 	}
3905433956dSTimothy McDaniel 
3915433956dSTimothy McDaniel 	ret = dlb2_string_to_int(num_dir_credits, value);
3925433956dSTimothy McDaniel 	if (ret < 0)
3935433956dSTimothy McDaniel 		return ret;
3945433956dSTimothy McDaniel 
3955433956dSTimothy McDaniel 	if (*num_dir_credits < 0 ||
396b66a418dSTimothy McDaniel 	    *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)) {
397f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d",
398b66a418dSTimothy McDaniel 			     DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2));
3995433956dSTimothy McDaniel 		return -EINVAL;
4005433956dSTimothy McDaniel 	}
4015433956dSTimothy McDaniel 
4025433956dSTimothy McDaniel 	return 0;
4035433956dSTimothy McDaniel }
4045433956dSTimothy McDaniel 
4055433956dSTimothy McDaniel static int
4065433956dSTimothy McDaniel set_dev_id(const char *key __rte_unused,
4075433956dSTimothy McDaniel 	   const char *value,
4085433956dSTimothy McDaniel 	   void *opaque)
4095433956dSTimothy McDaniel {
4105433956dSTimothy McDaniel 	int *dev_id = opaque;
4115433956dSTimothy McDaniel 	int ret;
4125433956dSTimothy McDaniel 
4135433956dSTimothy McDaniel 	if (value == NULL || opaque == NULL) {
414f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
4155433956dSTimothy McDaniel 		return -EINVAL;
4165433956dSTimothy McDaniel 	}
4175433956dSTimothy McDaniel 
4185433956dSTimothy McDaniel 	ret = dlb2_string_to_int(dev_id, value);
4195433956dSTimothy McDaniel 	if (ret < 0)
4205433956dSTimothy McDaniel 		return ret;
4215433956dSTimothy McDaniel 
4225433956dSTimothy McDaniel 	return 0;
4235433956dSTimothy McDaniel }
4245433956dSTimothy McDaniel 
4255433956dSTimothy McDaniel static int
4267be66a3bSTimothy McDaniel set_poll_interval(const char *key __rte_unused,
4277be66a3bSTimothy McDaniel 	const char *value,
4287be66a3bSTimothy McDaniel 	void *opaque)
4297be66a3bSTimothy McDaniel {
4307be66a3bSTimothy McDaniel 	int *poll_interval = opaque;
4317be66a3bSTimothy McDaniel 	int ret;
4327be66a3bSTimothy McDaniel 
4337be66a3bSTimothy McDaniel 	if (value == NULL || opaque == NULL) {
434f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
4357be66a3bSTimothy McDaniel 		return -EINVAL;
4367be66a3bSTimothy McDaniel 	}
4377be66a3bSTimothy McDaniel 
4387be66a3bSTimothy McDaniel 	ret = dlb2_string_to_int(poll_interval, value);
4397be66a3bSTimothy McDaniel 	if (ret < 0)
4407be66a3bSTimothy McDaniel 		return ret;
4417be66a3bSTimothy McDaniel 
4427be66a3bSTimothy McDaniel 	return 0;
4437be66a3bSTimothy McDaniel }
4447be66a3bSTimothy McDaniel 
4457be66a3bSTimothy McDaniel static int
446bec8901bSTimothy McDaniel set_port_cos(const char *key __rte_unused,
447bec8901bSTimothy McDaniel 	     const char *value,
448bec8901bSTimothy McDaniel 	     void *opaque)
449bec8901bSTimothy McDaniel {
450bec8901bSTimothy McDaniel 	struct dlb2_port_cos *port_cos = opaque;
451bec8901bSTimothy McDaniel 	int first, last, cos_id, i;
452bec8901bSTimothy McDaniel 
453bec8901bSTimothy McDaniel 	if (value == NULL || opaque == NULL) {
454f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
455bec8901bSTimothy McDaniel 		return -EINVAL;
456bec8901bSTimothy McDaniel 	}
457bec8901bSTimothy McDaniel 
458bec8901bSTimothy McDaniel 	/* command line override may take one of the following 3 forms:
459bec8901bSTimothy McDaniel 	 * port_cos=port-port:<cos_id> ... a range of ports
460bec8901bSTimothy McDaniel 	 * port_cos=port:<cos_id> ... just one port
461bec8901bSTimothy McDaniel 	 */
46254089151STimothy McDaniel 	if (sscanf(value, "%d-%d:%d", &first, &last, &cos_id) == 3) {
463bec8901bSTimothy McDaniel 		/* we have everything we need */
464bec8901bSTimothy McDaniel 	} else if (sscanf(value, "%d:%d", &first, &cos_id) == 2) {
465bec8901bSTimothy McDaniel 		last = first;
466bec8901bSTimothy McDaniel 	} else {
467f665790aSDavid Marchand 		DLB2_LOG_ERR("Error parsing ldb port port_cos devarg. Should be port-port:val, or port:val");
468bec8901bSTimothy McDaniel 		return -EINVAL;
469bec8901bSTimothy McDaniel 	}
470bec8901bSTimothy McDaniel 
471bec8901bSTimothy McDaniel 	if (first > last || first < 0 ||
472bec8901bSTimothy McDaniel 		last >= DLB2_MAX_NUM_LDB_PORTS) {
473f665790aSDavid Marchand 		DLB2_LOG_ERR("Error parsing ldb port cos_id arg, invalid port value");
474bec8901bSTimothy McDaniel 		return -EINVAL;
475bec8901bSTimothy McDaniel 	}
476bec8901bSTimothy McDaniel 
477bec8901bSTimothy McDaniel 	if (cos_id < DLB2_COS_0 || cos_id > DLB2_COS_3) {
478f665790aSDavid Marchand 		DLB2_LOG_ERR("Error parsing ldb port cos_id devarg, must be between 0 and 4");
479bec8901bSTimothy McDaniel 		return -EINVAL;
480bec8901bSTimothy McDaniel 	}
481bec8901bSTimothy McDaniel 
482bec8901bSTimothy McDaniel 	for (i = first; i <= last; i++)
483bec8901bSTimothy McDaniel 		port_cos->cos_id[i] = cos_id; /* indexed by port */
484bec8901bSTimothy McDaniel 
485bec8901bSTimothy McDaniel 	return 0;
486bec8901bSTimothy McDaniel }
487bec8901bSTimothy McDaniel 
488bec8901bSTimothy McDaniel static int
489bec8901bSTimothy McDaniel set_cos_bw(const char *key __rte_unused,
490bec8901bSTimothy McDaniel 	     const char *value,
491bec8901bSTimothy McDaniel 	     void *opaque)
492bec8901bSTimothy McDaniel {
493bec8901bSTimothy McDaniel 	struct dlb2_cos_bw *cos_bw = opaque;
494bec8901bSTimothy McDaniel 
495bec8901bSTimothy McDaniel 	if (opaque == NULL) {
496f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
497bec8901bSTimothy McDaniel 		return -EINVAL;
498bec8901bSTimothy McDaniel 	}
499bec8901bSTimothy McDaniel 
500bec8901bSTimothy McDaniel 	/* format must be %d,%d,%d,%d */
501bec8901bSTimothy McDaniel 
50254089151STimothy McDaniel 	if (sscanf(value, "%d:%d:%d:%d", &cos_bw->val[0], &cos_bw->val[1],
503bec8901bSTimothy McDaniel 		   &cos_bw->val[2], &cos_bw->val[3]) != 4) {
504f665790aSDavid Marchand 		DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100");
505bec8901bSTimothy McDaniel 		return -EINVAL;
506bec8901bSTimothy McDaniel 	}
507bec8901bSTimothy McDaniel 	if (cos_bw->val[0] + cos_bw->val[1] + cos_bw->val[2] + cos_bw->val[3] > 100) {
508f665790aSDavid Marchand 		DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3  where all values combined are <= 100");
509bec8901bSTimothy McDaniel 		return -EINVAL;
510bec8901bSTimothy McDaniel 	}
511bec8901bSTimothy McDaniel 
512bec8901bSTimothy McDaniel 	return 0;
513bec8901bSTimothy McDaniel }
514bec8901bSTimothy McDaniel 
515bec8901bSTimothy McDaniel static int
5167be66a3bSTimothy McDaniel set_sw_credit_quanta(const char *key __rte_unused,
5177be66a3bSTimothy McDaniel 	const char *value,
5187be66a3bSTimothy McDaniel 	void *opaque)
5197be66a3bSTimothy McDaniel {
5207be66a3bSTimothy McDaniel 	int *sw_credit_quanta = opaque;
5217be66a3bSTimothy McDaniel 	int ret;
5227be66a3bSTimothy McDaniel 
5237be66a3bSTimothy McDaniel 	if (value == NULL || opaque == NULL) {
524f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
5257be66a3bSTimothy McDaniel 		return -EINVAL;
5267be66a3bSTimothy McDaniel 	}
5277be66a3bSTimothy McDaniel 
5287be66a3bSTimothy McDaniel 	ret = dlb2_string_to_int(sw_credit_quanta, value);
5297be66a3bSTimothy McDaniel 	if (ret < 0)
5307be66a3bSTimothy McDaniel 		return ret;
5317be66a3bSTimothy McDaniel 
53287ecdd9eSTimothy McDaniel 	if (*sw_credit_quanta <= 0) {
533f665790aSDavid Marchand 		DLB2_LOG_ERR("sw_credit_quanta must be > 0");
53487ecdd9eSTimothy McDaniel 		return -EINVAL;
53587ecdd9eSTimothy McDaniel 	}
53687ecdd9eSTimothy McDaniel 
5377be66a3bSTimothy McDaniel 	return 0;
5387be66a3bSTimothy McDaniel }
5397be66a3bSTimothy McDaniel 
5407be66a3bSTimothy McDaniel static int
541e4869c0bSPravin Pathak set_hw_credit_quanta(const char *key __rte_unused,
542e4869c0bSPravin Pathak 	const char *value,
543e4869c0bSPravin Pathak 	void *opaque)
544e4869c0bSPravin Pathak {
545e4869c0bSPravin Pathak 	int *hw_credit_quanta = opaque;
546e4869c0bSPravin Pathak 	int ret;
547e4869c0bSPravin Pathak 
548e4869c0bSPravin Pathak 	if (value == NULL || opaque == NULL) {
549f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
550e4869c0bSPravin Pathak 		return -EINVAL;
551e4869c0bSPravin Pathak 	}
552e4869c0bSPravin Pathak 
553e4869c0bSPravin Pathak 	ret = dlb2_string_to_int(hw_credit_quanta, value);
554e4869c0bSPravin Pathak 	if (ret < 0)
555e4869c0bSPravin Pathak 		return ret;
556e4869c0bSPravin Pathak 
557e4869c0bSPravin Pathak 	return 0;
558e4869c0bSPravin Pathak }
559e4869c0bSPravin Pathak 
560e4869c0bSPravin Pathak static int
5617be66a3bSTimothy McDaniel set_default_depth_thresh(const char *key __rte_unused,
5627be66a3bSTimothy McDaniel 	const char *value,
5637be66a3bSTimothy McDaniel 	void *opaque)
5647be66a3bSTimothy McDaniel {
5657be66a3bSTimothy McDaniel 	int *default_depth_thresh = opaque;
5667be66a3bSTimothy McDaniel 	int ret;
5677be66a3bSTimothy McDaniel 
5687be66a3bSTimothy McDaniel 	if (value == NULL || opaque == NULL) {
569f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
5707be66a3bSTimothy McDaniel 		return -EINVAL;
5717be66a3bSTimothy McDaniel 	}
5727be66a3bSTimothy McDaniel 
5737be66a3bSTimothy McDaniel 	ret = dlb2_string_to_int(default_depth_thresh, value);
5747be66a3bSTimothy McDaniel 	if (ret < 0)
5757be66a3bSTimothy McDaniel 		return ret;
5767be66a3bSTimothy McDaniel 
5777be66a3bSTimothy McDaniel 	return 0;
5787be66a3bSTimothy McDaniel }
5797be66a3bSTimothy McDaniel 
5807be66a3bSTimothy McDaniel static int
581fcc5489cSTimothy McDaniel set_vector_opts_enab(const char *key __rte_unused,
582000a7b8eSTimothy McDaniel 	const char *value,
583000a7b8eSTimothy McDaniel 	void *opaque)
584000a7b8eSTimothy McDaniel {
585fcc5489cSTimothy McDaniel 	bool *dlb2_vector_opts_enabled = opaque;
586000a7b8eSTimothy McDaniel 
587000a7b8eSTimothy McDaniel 	if (value == NULL || opaque == NULL) {
588f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
589000a7b8eSTimothy McDaniel 		return -EINVAL;
590000a7b8eSTimothy McDaniel 	}
591000a7b8eSTimothy McDaniel 
592000a7b8eSTimothy McDaniel 	if ((*value == 'y') || (*value == 'Y'))
593fcc5489cSTimothy McDaniel 		*dlb2_vector_opts_enabled = true;
594000a7b8eSTimothy McDaniel 	else
595fcc5489cSTimothy McDaniel 		*dlb2_vector_opts_enabled = false;
596000a7b8eSTimothy McDaniel 
597000a7b8eSTimothy McDaniel 	return 0;
598000a7b8eSTimothy McDaniel }
599000a7b8eSTimothy McDaniel 
600000a7b8eSTimothy McDaniel static int
6018d1d9070SAbdullah Sevincer set_default_ldb_port_allocation(const char *key __rte_unused,
6028d1d9070SAbdullah Sevincer 		      const char *value,
6038d1d9070SAbdullah Sevincer 		      void *opaque)
6048d1d9070SAbdullah Sevincer {
6058d1d9070SAbdullah Sevincer 	bool *default_ldb_port_allocation = opaque;
6068d1d9070SAbdullah Sevincer 
6078d1d9070SAbdullah Sevincer 	if (value == NULL || opaque == NULL) {
608f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
6098d1d9070SAbdullah Sevincer 		return -EINVAL;
6108d1d9070SAbdullah Sevincer 	}
6118d1d9070SAbdullah Sevincer 
6128d1d9070SAbdullah Sevincer 	if ((*value == 'y') || (*value == 'Y'))
6138d1d9070SAbdullah Sevincer 		*default_ldb_port_allocation = true;
6148d1d9070SAbdullah Sevincer 	else
6158d1d9070SAbdullah Sevincer 		*default_ldb_port_allocation = false;
6168d1d9070SAbdullah Sevincer 
6178d1d9070SAbdullah Sevincer 	return 0;
6188d1d9070SAbdullah Sevincer }
6198d1d9070SAbdullah Sevincer 
6208d1d9070SAbdullah Sevincer static int
621b977a659SAbdullah Sevincer set_enable_cq_weight(const char *key __rte_unused,
622b977a659SAbdullah Sevincer 		      const char *value,
623b977a659SAbdullah Sevincer 		      void *opaque)
624b977a659SAbdullah Sevincer {
625b977a659SAbdullah Sevincer 	bool *enable_cq_weight = opaque;
626b977a659SAbdullah Sevincer 
627b977a659SAbdullah Sevincer 	if (value == NULL || opaque == NULL) {
628f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
629b977a659SAbdullah Sevincer 		return -EINVAL;
630b977a659SAbdullah Sevincer 	}
631b977a659SAbdullah Sevincer 
632b977a659SAbdullah Sevincer 	if ((*value == 'y') || (*value == 'Y'))
633b977a659SAbdullah Sevincer 		*enable_cq_weight = true;
634b977a659SAbdullah Sevincer 	else
635b977a659SAbdullah Sevincer 		*enable_cq_weight = false;
636b977a659SAbdullah Sevincer 
637b977a659SAbdullah Sevincer 	return 0;
638b977a659SAbdullah Sevincer }
639b977a659SAbdullah Sevincer 
640b977a659SAbdullah Sevincer static int
6415433956dSTimothy McDaniel set_qid_depth_thresh(const char *key __rte_unused,
6425433956dSTimothy McDaniel 		     const char *value,
6435433956dSTimothy McDaniel 		     void *opaque)
6445433956dSTimothy McDaniel {
6455433956dSTimothy McDaniel 	struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
6465433956dSTimothy McDaniel 	int first, last, thresh, i;
6475433956dSTimothy McDaniel 
6485433956dSTimothy McDaniel 	if (value == NULL || opaque == NULL) {
649f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
6505433956dSTimothy McDaniel 		return -EINVAL;
6515433956dSTimothy McDaniel 	}
6525433956dSTimothy McDaniel 
6535433956dSTimothy McDaniel 	/* command line override may take one of the following 3 forms:
6545433956dSTimothy McDaniel 	 * qid_depth_thresh=all:<threshold_value> ... all queues
6555433956dSTimothy McDaniel 	 * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
6565433956dSTimothy McDaniel 	 * qid_depth_thresh=qid:<threshold_value> ... just one queue
6575433956dSTimothy McDaniel 	 */
6585433956dSTimothy McDaniel 	if (sscanf(value, "all:%d", &thresh) == 1) {
6595433956dSTimothy McDaniel 		first = 0;
660b66a418dSTimothy McDaniel 		last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2) - 1;
6615433956dSTimothy McDaniel 	} else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
6625433956dSTimothy McDaniel 		/* we have everything we need */
6635433956dSTimothy McDaniel 	} else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
6645433956dSTimothy McDaniel 		last = first;
6655433956dSTimothy McDaniel 	} else {
666f665790aSDavid Marchand 		DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val");
6675433956dSTimothy McDaniel 		return -EINVAL;
6685433956dSTimothy McDaniel 	}
6695433956dSTimothy McDaniel 
670b66a418dSTimothy McDaniel 	if (first > last || first < 0 ||
671b66a418dSTimothy McDaniel 		last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2)) {
672f665790aSDavid Marchand 		DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value");
673b66a418dSTimothy McDaniel 		return -EINVAL;
674b66a418dSTimothy McDaniel 	}
675b66a418dSTimothy McDaniel 
676b66a418dSTimothy McDaniel 	if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
677f665790aSDavid Marchand 		DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d",
678b66a418dSTimothy McDaniel 			     DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
679b66a418dSTimothy McDaniel 		return -EINVAL;
680b66a418dSTimothy McDaniel 	}
681b66a418dSTimothy McDaniel 
682b66a418dSTimothy McDaniel 	for (i = first; i <= last; i++)
683b66a418dSTimothy McDaniel 		qid_thresh->val[i] = thresh; /* indexed by qid */
684b66a418dSTimothy McDaniel 
685b66a418dSTimothy McDaniel 	return 0;
686b66a418dSTimothy McDaniel }
687b66a418dSTimothy McDaniel 
688b66a418dSTimothy McDaniel static int
689b66a418dSTimothy McDaniel set_qid_depth_thresh_v2_5(const char *key __rte_unused,
690b66a418dSTimothy McDaniel 			  const char *value,
691b66a418dSTimothy McDaniel 			  void *opaque)
692b66a418dSTimothy McDaniel {
693b66a418dSTimothy McDaniel 	struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
694b66a418dSTimothy McDaniel 	int first, last, thresh, i;
695b66a418dSTimothy McDaniel 
696b66a418dSTimothy McDaniel 	if (value == NULL || opaque == NULL) {
697f665790aSDavid Marchand 		DLB2_LOG_ERR("NULL pointer");
698b66a418dSTimothy McDaniel 		return -EINVAL;
699b66a418dSTimothy McDaniel 	}
700b66a418dSTimothy McDaniel 
701b66a418dSTimothy McDaniel 	/* command line override may take one of the following 3 forms:
702b66a418dSTimothy McDaniel 	 * qid_depth_thresh=all:<threshold_value> ... all queues
703b66a418dSTimothy McDaniel 	 * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
704b66a418dSTimothy McDaniel 	 * qid_depth_thresh=qid:<threshold_value> ... just one queue
705b66a418dSTimothy McDaniel 	 */
706b66a418dSTimothy McDaniel 	if (sscanf(value, "all:%d", &thresh) == 1) {
707b66a418dSTimothy McDaniel 		first = 0;
708b66a418dSTimothy McDaniel 		last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) - 1;
709b66a418dSTimothy McDaniel 	} else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
710b66a418dSTimothy McDaniel 		/* we have everything we need */
711b66a418dSTimothy McDaniel 	} else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
712b66a418dSTimothy McDaniel 		last = first;
713b66a418dSTimothy McDaniel 	} else {
714f665790aSDavid Marchand 		DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val");
715b66a418dSTimothy McDaniel 		return -EINVAL;
716b66a418dSTimothy McDaniel 	}
717b66a418dSTimothy McDaniel 
718b66a418dSTimothy McDaniel 	if (first > last || first < 0 ||
719b66a418dSTimothy McDaniel 		last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5)) {
720f665790aSDavid Marchand 		DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value");
7215433956dSTimothy McDaniel 		return -EINVAL;
7225433956dSTimothy McDaniel 	}
7235433956dSTimothy McDaniel 
7245433956dSTimothy McDaniel 	if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
725f665790aSDavid Marchand 		DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d",
7265433956dSTimothy McDaniel 			     DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
7275433956dSTimothy McDaniel 		return -EINVAL;
7285433956dSTimothy McDaniel 	}
7295433956dSTimothy McDaniel 
7305433956dSTimothy McDaniel 	for (i = first; i <= last; i++)
7315433956dSTimothy McDaniel 		qid_thresh->val[i] = thresh; /* indexed by qid */
7325433956dSTimothy McDaniel 
7335433956dSTimothy McDaniel 	return 0;
7345433956dSTimothy McDaniel }
7355433956dSTimothy McDaniel 
736e7c9971aSTimothy McDaniel static void
737f3cad285STimothy McDaniel dlb2_eventdev_info_get(struct rte_eventdev *dev,
738f3cad285STimothy McDaniel 		       struct rte_event_dev_info *dev_info)
739f3cad285STimothy McDaniel {
740f3cad285STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
741f3cad285STimothy McDaniel 	int ret;
742f3cad285STimothy McDaniel 
743f3cad285STimothy McDaniel 	ret = dlb2_hw_query_resources(dlb2);
744f3cad285STimothy McDaniel 	if (ret) {
745f3cad285STimothy McDaniel 		const struct rte_eventdev_data *data = dev->data;
746f3cad285STimothy McDaniel 
747f665790aSDavid Marchand 		DLB2_LOG_ERR("get resources err=%d, devid=%d",
748f3cad285STimothy McDaniel 			     ret, data->dev_id);
749f3cad285STimothy McDaniel 		/* fn is void, so fall through and return values set up in
750f3cad285STimothy McDaniel 		 * probe
751f3cad285STimothy McDaniel 		 */
752f3cad285STimothy McDaniel 	}
753f3cad285STimothy McDaniel 
754f3cad285STimothy McDaniel 	/* Add num resources currently owned by this domain.
755f3cad285STimothy McDaniel 	 * These would become available if the scheduling domain were reset due
756f3cad285STimothy McDaniel 	 * to the application recalling eventdev_configure to *reconfigure* the
757f3cad285STimothy McDaniel 	 * domain.
758f3cad285STimothy McDaniel 	 */
759f3cad285STimothy McDaniel 	evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
760f3cad285STimothy McDaniel 	evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
76162e45206STimothy McDaniel 	if (dlb2->version == DLB2_HW_V2_5) {
76262e45206STimothy McDaniel 		evdev_dlb2_default_info.max_num_events +=
76362e45206STimothy McDaniel 			dlb2->max_credits;
76462e45206STimothy McDaniel 	} else {
76562e45206STimothy McDaniel 		evdev_dlb2_default_info.max_num_events +=
76662e45206STimothy McDaniel 			dlb2->max_ldb_credits;
76762e45206STimothy McDaniel 	}
768f3cad285STimothy McDaniel 	evdev_dlb2_default_info.max_event_queues =
769f3cad285STimothy McDaniel 		RTE_MIN(evdev_dlb2_default_info.max_event_queues,
770f3cad285STimothy McDaniel 			RTE_EVENT_MAX_QUEUES_PER_DEV);
771f3cad285STimothy McDaniel 
772f3cad285STimothy McDaniel 	evdev_dlb2_default_info.max_num_events =
773f3cad285STimothy McDaniel 		RTE_MIN(evdev_dlb2_default_info.max_num_events,
774f3cad285STimothy McDaniel 			dlb2->max_num_events_override);
775f3cad285STimothy McDaniel 
776f3cad285STimothy McDaniel 	*dev_info = evdev_dlb2_default_info;
777f3cad285STimothy McDaniel }
778f3cad285STimothy McDaniel 
779f3cad285STimothy McDaniel static int
780bec8901bSTimothy McDaniel dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2,
781bec8901bSTimothy McDaniel 			    struct dlb2_hw_dev *handle,
78262e45206STimothy McDaniel 			    const struct dlb2_hw_rsrcs *resources_asked,
78362e45206STimothy McDaniel 			    uint8_t device_version)
784f3cad285STimothy McDaniel {
785f3cad285STimothy McDaniel 	int ret = 0;
786bec8901bSTimothy McDaniel 	uint32_t cos_ports = 0;
787f3cad285STimothy McDaniel 	struct dlb2_create_sched_domain_args *cfg;
788f3cad285STimothy McDaniel 
789f3cad285STimothy McDaniel 	if (resources_asked == NULL) {
790f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter");
791f3cad285STimothy McDaniel 		ret = EINVAL;
792f3cad285STimothy McDaniel 		goto error_exit;
793f3cad285STimothy McDaniel 	}
794f3cad285STimothy McDaniel 
795f3cad285STimothy McDaniel 	/* Map generic qm resources to dlb2 resources */
796f3cad285STimothy McDaniel 	cfg = &handle->cfg.resources;
797f3cad285STimothy McDaniel 
798f3cad285STimothy McDaniel 	/* DIR ports and queues */
799f3cad285STimothy McDaniel 
800f3cad285STimothy McDaniel 	cfg->num_dir_ports = resources_asked->num_dir_ports;
80162e45206STimothy McDaniel 	if (device_version == DLB2_HW_V2_5)
80262e45206STimothy McDaniel 		cfg->num_credits = resources_asked->num_credits;
80362e45206STimothy McDaniel 	else
804f3cad285STimothy McDaniel 		cfg->num_dir_credits = resources_asked->num_dir_credits;
805f3cad285STimothy McDaniel 
806f3cad285STimothy McDaniel 	/* LDB queues */
807f3cad285STimothy McDaniel 
808f3cad285STimothy McDaniel 	cfg->num_ldb_queues = resources_asked->num_ldb_queues;
809f3cad285STimothy McDaniel 
810f3cad285STimothy McDaniel 	/* LDB ports */
811f3cad285STimothy McDaniel 
81254089151STimothy McDaniel 	/* tally of COS ports from cmd line */
81354089151STimothy McDaniel 	cos_ports = dlb2->cos_ports[0] + dlb2->cos_ports[1] +
81454089151STimothy McDaniel 		    dlb2->cos_ports[2] + dlb2->cos_ports[3];
815f3cad285STimothy McDaniel 
816728717ebSAbdullah Sevincer 	if (cos_ports > resources_asked->num_ldb_ports ||
817728717ebSAbdullah Sevincer 	    (cos_ports && dlb2->max_cos_port >= resources_asked->num_ldb_ports)) {
818f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: num_ldb_ports < cos_ports");
819bec8901bSTimothy McDaniel 		ret = EINVAL;
820bec8901bSTimothy McDaniel 		goto error_exit;
821f3cad285STimothy McDaniel 	}
822f3cad285STimothy McDaniel 
823bec8901bSTimothy McDaniel 	cfg->cos_strict = 0; /* Best effort */
824b828e0daSAbdullah Sevincer 	cfg->num_cos_ldb_ports[0] = dlb2->cos_ports[0];
825bec8901bSTimothy McDaniel 	cfg->num_cos_ldb_ports[1] = dlb2->cos_ports[1];
826bec8901bSTimothy McDaniel 	cfg->num_cos_ldb_ports[2] = dlb2->cos_ports[2];
827bec8901bSTimothy McDaniel 	cfg->num_cos_ldb_ports[3] = dlb2->cos_ports[3];
828b828e0daSAbdullah Sevincer 	cfg->num_ldb_ports = resources_asked->num_ldb_ports - cos_ports;
829bec8901bSTimothy McDaniel 
83062e45206STimothy McDaniel 	if (device_version == DLB2_HW_V2)
83162e45206STimothy McDaniel 		cfg->num_ldb_credits = resources_asked->num_ldb_credits;
832f3cad285STimothy McDaniel 
833f3cad285STimothy McDaniel 	cfg->num_atomic_inflights =
834f3cad285STimothy McDaniel 		DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
835f3cad285STimothy McDaniel 		cfg->num_ldb_queues;
836f3cad285STimothy McDaniel 
837f3cad285STimothy McDaniel 	cfg->num_hist_list_entries = resources_asked->num_ldb_ports *
8389c9e7232SAbdullah Sevincer 		evdev_dlb2_default_info.max_event_port_dequeue_depth;
839f3cad285STimothy McDaniel 
84062e45206STimothy McDaniel 	if (device_version == DLB2_HW_V2_5) {
841e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, credits=%d",
84262e45206STimothy McDaniel 			     cfg->num_ldb_queues,
84362e45206STimothy McDaniel 			     resources_asked->num_ldb_ports,
84462e45206STimothy McDaniel 			     cfg->num_dir_ports,
84562e45206STimothy McDaniel 			     cfg->num_atomic_inflights,
84662e45206STimothy McDaniel 			     cfg->num_hist_list_entries,
84762e45206STimothy McDaniel 			     cfg->num_credits);
84862e45206STimothy McDaniel 	} else {
849e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d",
850f3cad285STimothy McDaniel 			     cfg->num_ldb_queues,
851f3cad285STimothy McDaniel 			     resources_asked->num_ldb_ports,
852f3cad285STimothy McDaniel 			     cfg->num_dir_ports,
853f3cad285STimothy McDaniel 			     cfg->num_atomic_inflights,
854f3cad285STimothy McDaniel 			     cfg->num_hist_list_entries,
855f3cad285STimothy McDaniel 			     cfg->num_ldb_credits,
856f3cad285STimothy McDaniel 			     cfg->num_dir_credits);
85762e45206STimothy McDaniel 	}
858f3cad285STimothy McDaniel 
859f3cad285STimothy McDaniel 	/* Configure the QM */
860f3cad285STimothy McDaniel 
861f3cad285STimothy McDaniel 	ret = dlb2_iface_sched_domain_create(handle, cfg);
862f3cad285STimothy McDaniel 	if (ret < 0) {
863f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s",
864f3cad285STimothy McDaniel 			     ret,
865f3cad285STimothy McDaniel 			     dlb2_error_strings[cfg->response.status]);
866f3cad285STimothy McDaniel 
867f3cad285STimothy McDaniel 		goto error_exit;
868f3cad285STimothy McDaniel 	}
869f3cad285STimothy McDaniel 
870f3cad285STimothy McDaniel 	handle->domain_id = cfg->response.id;
871f3cad285STimothy McDaniel 	handle->cfg.configured = true;
872f3cad285STimothy McDaniel 
873f3cad285STimothy McDaniel error_exit:
874f3cad285STimothy McDaniel 
875f3cad285STimothy McDaniel 	return ret;
876f3cad285STimothy McDaniel }
877f3cad285STimothy McDaniel 
878f3cad285STimothy McDaniel static void
879f3cad285STimothy McDaniel dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
880f3cad285STimothy McDaniel {
881f3cad285STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
882f3cad285STimothy McDaniel 	enum dlb2_configuration_state config_state;
883f3cad285STimothy McDaniel 	int i, j;
884f3cad285STimothy McDaniel 
885f3cad285STimothy McDaniel 	dlb2_iface_domain_reset(dlb2);
886f3cad285STimothy McDaniel 
887f3cad285STimothy McDaniel 	/* Free all dynamically allocated port memory */
888f3cad285STimothy McDaniel 	for (i = 0; i < dlb2->num_ports; i++)
889f3cad285STimothy McDaniel 		dlb2_free_qe_mem(&dlb2->ev_ports[i].qm_port);
890f3cad285STimothy McDaniel 
891f3cad285STimothy McDaniel 	/* If reconfiguring, mark the device's queues and ports as "previously
892f3cad285STimothy McDaniel 	 * configured." If the user doesn't reconfigure them, the PMD will
893f3cad285STimothy McDaniel 	 * reapply their previous configuration when the device is started.
894f3cad285STimothy McDaniel 	 */
895f3cad285STimothy McDaniel 	config_state = (reconfig) ? DLB2_PREV_CONFIGURED :
896f3cad285STimothy McDaniel 		DLB2_NOT_CONFIGURED;
897f3cad285STimothy McDaniel 
898f3cad285STimothy McDaniel 	for (i = 0; i < dlb2->num_ports; i++) {
899f3cad285STimothy McDaniel 		dlb2->ev_ports[i].qm_port.config_state = config_state;
900f3cad285STimothy McDaniel 		/* Reset setup_done so ports can be reconfigured */
901f3cad285STimothy McDaniel 		dlb2->ev_ports[i].setup_done = false;
902f3cad285STimothy McDaniel 		for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
903f3cad285STimothy McDaniel 			dlb2->ev_ports[i].link[j].mapped = false;
904f3cad285STimothy McDaniel 	}
905f3cad285STimothy McDaniel 
906f3cad285STimothy McDaniel 	for (i = 0; i < dlb2->num_queues; i++)
907f3cad285STimothy McDaniel 		dlb2->ev_queues[i].qm_queue.config_state = config_state;
908f3cad285STimothy McDaniel 
909b66a418dSTimothy McDaniel 	for (i = 0; i < DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5); i++)
910f3cad285STimothy McDaniel 		dlb2->ev_queues[i].setup_done = false;
911f3cad285STimothy McDaniel 
912f3cad285STimothy McDaniel 	dlb2->num_ports = 0;
913f3cad285STimothy McDaniel 	dlb2->num_ldb_ports = 0;
914f3cad285STimothy McDaniel 	dlb2->num_dir_ports = 0;
915f3cad285STimothy McDaniel 	dlb2->num_queues = 0;
916f3cad285STimothy McDaniel 	dlb2->num_ldb_queues = 0;
917f3cad285STimothy McDaniel 	dlb2->num_dir_queues = 0;
918f3cad285STimothy McDaniel 	dlb2->configured = false;
919f3cad285STimothy McDaniel }
920f3cad285STimothy McDaniel 
921f3cad285STimothy McDaniel /* Note: 1 QM instance per QM device, QM instance/device == event device */
922f3cad285STimothy McDaniel static int
923f3cad285STimothy McDaniel dlb2_eventdev_configure(const struct rte_eventdev *dev)
924f3cad285STimothy McDaniel {
925f3cad285STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
926f3cad285STimothy McDaniel 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
927f3cad285STimothy McDaniel 	struct dlb2_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
928f3cad285STimothy McDaniel 	const struct rte_eventdev_data *data = dev->data;
929f3cad285STimothy McDaniel 	const struct rte_event_dev_config *config = &data->dev_conf;
930f3cad285STimothy McDaniel 	int ret;
931f3cad285STimothy McDaniel 
932f3cad285STimothy McDaniel 	/* If this eventdev is already configured, we must release the current
933f3cad285STimothy McDaniel 	 * scheduling domain before attempting to configure a new one.
934f3cad285STimothy McDaniel 	 */
935f3cad285STimothy McDaniel 	if (dlb2->configured) {
936f3cad285STimothy McDaniel 		dlb2_hw_reset_sched_domain(dev, true);
937f3cad285STimothy McDaniel 		ret = dlb2_hw_query_resources(dlb2);
938f3cad285STimothy McDaniel 		if (ret) {
939f665790aSDavid Marchand 			DLB2_LOG_ERR("get resources err=%d, devid=%d",
940f3cad285STimothy McDaniel 				     ret, data->dev_id);
941f3cad285STimothy McDaniel 			return ret;
942f3cad285STimothy McDaniel 		}
943f3cad285STimothy McDaniel 	}
944f3cad285STimothy McDaniel 
945f3cad285STimothy McDaniel 	if (config->nb_event_queues > rsrcs->num_queues) {
946f665790aSDavid Marchand 		DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).",
947f3cad285STimothy McDaniel 			     config->nb_event_queues,
948f3cad285STimothy McDaniel 			     rsrcs->num_queues);
949f3cad285STimothy McDaniel 		return -EINVAL;
950f3cad285STimothy McDaniel 	}
951f3cad285STimothy McDaniel 	if (config->nb_event_ports > (rsrcs->num_ldb_ports
952f3cad285STimothy McDaniel 			+ rsrcs->num_dir_ports)) {
953f665790aSDavid Marchand 		DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).",
954f3cad285STimothy McDaniel 			     config->nb_event_ports,
955f3cad285STimothy McDaniel 			     (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
956f3cad285STimothy McDaniel 		return -EINVAL;
957f3cad285STimothy McDaniel 	}
958f3cad285STimothy McDaniel 	if (config->nb_events_limit > rsrcs->nb_events_limit) {
959f665790aSDavid Marchand 		DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).",
960f3cad285STimothy McDaniel 			     config->nb_events_limit,
961f3cad285STimothy McDaniel 			     rsrcs->nb_events_limit);
962f3cad285STimothy McDaniel 		return -EINVAL;
963f3cad285STimothy McDaniel 	}
964f3cad285STimothy McDaniel 
965f3cad285STimothy McDaniel 	if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
966f3cad285STimothy McDaniel 		dlb2->global_dequeue_wait = false;
967f3cad285STimothy McDaniel 	else {
968f3cad285STimothy McDaniel 		uint32_t timeout32;
969f3cad285STimothy McDaniel 
970f3cad285STimothy McDaniel 		dlb2->global_dequeue_wait = true;
971f3cad285STimothy McDaniel 
972f3cad285STimothy McDaniel 		/* note size mismatch of timeout vals in eventdev lib. */
973f3cad285STimothy McDaniel 		timeout32 = config->dequeue_timeout_ns;
974f3cad285STimothy McDaniel 
975f3cad285STimothy McDaniel 		dlb2->global_dequeue_wait_ticks =
976f3cad285STimothy McDaniel 			timeout32 * (rte_get_timer_hz() / 1E9);
977f3cad285STimothy McDaniel 	}
978f3cad285STimothy McDaniel 
979f3cad285STimothy McDaniel 	/* Does this platform support umonitor/umwait? */
9807be66a3bSTimothy McDaniel 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG))
981f3cad285STimothy McDaniel 		dlb2->umwait_allowed = true;
982f3cad285STimothy McDaniel 
983f3cad285STimothy McDaniel 	rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
984f3cad285STimothy McDaniel 	rsrcs->num_ldb_ports  = config->nb_event_ports - rsrcs->num_dir_ports;
985f3cad285STimothy McDaniel 	/* 1 dir queue per dir port */
986f3cad285STimothy McDaniel 	rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
987f3cad285STimothy McDaniel 
98862e45206STimothy McDaniel 	if (dlb2->version == DLB2_HW_V2_5) {
98962e45206STimothy McDaniel 		rsrcs->num_credits = 0;
99062e45206STimothy McDaniel 		if (rsrcs->num_ldb_queues || rsrcs->num_dir_ports)
99162e45206STimothy McDaniel 			rsrcs->num_credits = config->nb_events_limit;
99262e45206STimothy McDaniel 	} else {
99362e45206STimothy McDaniel 		/* Scale down nb_events_limit by 4 for directed credits,
99462e45206STimothy McDaniel 		 * since there are 4x as many load-balanced credits.
995f3cad285STimothy McDaniel 		 */
996f3cad285STimothy McDaniel 		rsrcs->num_ldb_credits = 0;
997f3cad285STimothy McDaniel 		rsrcs->num_dir_credits = 0;
998f3cad285STimothy McDaniel 
999f3cad285STimothy McDaniel 		if (rsrcs->num_ldb_queues)
1000f3cad285STimothy McDaniel 			rsrcs->num_ldb_credits = config->nb_events_limit;
1001f3cad285STimothy McDaniel 		if (rsrcs->num_dir_ports)
1002e4869c0bSPravin Pathak 			rsrcs->num_dir_credits = config->nb_events_limit / 2;
1003f3cad285STimothy McDaniel 		if (dlb2->num_dir_credits_override != -1)
1004f3cad285STimothy McDaniel 			rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
100562e45206STimothy McDaniel 	}
1006f3cad285STimothy McDaniel 
1007bec8901bSTimothy McDaniel 	if (dlb2_hw_create_sched_domain(dlb2, handle, rsrcs,
1008bec8901bSTimothy McDaniel 					dlb2->version) < 0) {
1009f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed");
1010f3cad285STimothy McDaniel 		return -ENODEV;
1011f3cad285STimothy McDaniel 	}
1012f3cad285STimothy McDaniel 
1013f3cad285STimothy McDaniel 	dlb2->new_event_limit = config->nb_events_limit;
1014e12a0166STyler Retzlaff 	rte_atomic_store_explicit(&dlb2->inflights, 0, rte_memory_order_seq_cst);
1015f3cad285STimothy McDaniel 
1016f3cad285STimothy McDaniel 	/* Save number of ports/queues for this event dev */
1017f3cad285STimothy McDaniel 	dlb2->num_ports = config->nb_event_ports;
1018f3cad285STimothy McDaniel 	dlb2->num_queues = config->nb_event_queues;
1019f3cad285STimothy McDaniel 	dlb2->num_dir_ports = rsrcs->num_dir_ports;
1020f3cad285STimothy McDaniel 	dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
1021f3cad285STimothy McDaniel 	dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
1022f3cad285STimothy McDaniel 	dlb2->num_dir_queues = dlb2->num_dir_ports;
102362e45206STimothy McDaniel 	if (dlb2->version == DLB2_HW_V2_5) {
102462e45206STimothy McDaniel 		dlb2->credit_pool = rsrcs->num_credits;
102562e45206STimothy McDaniel 		dlb2->max_credits = rsrcs->num_credits;
102662e45206STimothy McDaniel 	} else {
1027f3cad285STimothy McDaniel 		dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
1028f3cad285STimothy McDaniel 		dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
1029f3cad285STimothy McDaniel 		dlb2->dir_credit_pool = rsrcs->num_dir_credits;
1030f3cad285STimothy McDaniel 		dlb2->max_dir_credits = rsrcs->num_dir_credits;
103162e45206STimothy McDaniel 	}
1032f3cad285STimothy McDaniel 
1033f3cad285STimothy McDaniel 	dlb2->configured = true;
1034f3cad285STimothy McDaniel 
1035f3cad285STimothy McDaniel 	return 0;
1036f3cad285STimothy McDaniel }
1037f3cad285STimothy McDaniel 
1038f3cad285STimothy McDaniel static void
103999f66f33STimothy McDaniel dlb2_eventdev_port_default_conf_get(struct rte_eventdev *dev,
104099f66f33STimothy McDaniel 				    uint8_t port_id,
104199f66f33STimothy McDaniel 				    struct rte_event_port_conf *port_conf)
104299f66f33STimothy McDaniel {
104399f66f33STimothy McDaniel 	RTE_SET_USED(port_id);
104499f66f33STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
104599f66f33STimothy McDaniel 
104699f66f33STimothy McDaniel 	port_conf->new_event_threshold = dlb2->new_event_limit;
104799f66f33STimothy McDaniel 	port_conf->dequeue_depth = 32;
104899f66f33STimothy McDaniel 	port_conf->enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH;
104999f66f33STimothy McDaniel 	port_conf->event_port_cfg = 0;
105099f66f33STimothy McDaniel }
105199f66f33STimothy McDaniel 
105299f66f33STimothy McDaniel static void
105399f66f33STimothy McDaniel dlb2_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
105499f66f33STimothy McDaniel 				     uint8_t queue_id,
105599f66f33STimothy McDaniel 				     struct rte_event_queue_conf *queue_conf)
105699f66f33STimothy McDaniel {
105799f66f33STimothy McDaniel 	RTE_SET_USED(dev);
105899f66f33STimothy McDaniel 	RTE_SET_USED(queue_id);
105999f66f33STimothy McDaniel 
106099f66f33STimothy McDaniel 	queue_conf->nb_atomic_flows = 1024;
106199f66f33STimothy McDaniel 	queue_conf->nb_atomic_order_sequences = 64;
106299f66f33STimothy McDaniel 	queue_conf->event_queue_cfg = 0;
106399f66f33STimothy McDaniel 	queue_conf->priority = 0;
106499f66f33STimothy McDaniel }
106599f66f33STimothy McDaniel 
10667e668e57STimothy McDaniel static int32_t
10677e668e57STimothy McDaniel dlb2_get_sn_allocation(struct dlb2_eventdev *dlb2, int group)
10687e668e57STimothy McDaniel {
10697e668e57STimothy McDaniel 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
10707e668e57STimothy McDaniel 	struct dlb2_get_sn_allocation_args cfg;
10717e668e57STimothy McDaniel 	int ret;
10727e668e57STimothy McDaniel 
10737e668e57STimothy McDaniel 	cfg.group = group;
10747e668e57STimothy McDaniel 
10757e668e57STimothy McDaniel 	ret = dlb2_iface_get_sn_allocation(handle, &cfg);
10767e668e57STimothy McDaniel 	if (ret < 0) {
1077f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)",
10787e668e57STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
10797e668e57STimothy McDaniel 		return ret;
10807e668e57STimothy McDaniel 	}
10817e668e57STimothy McDaniel 
10827e668e57STimothy McDaniel 	return cfg.response.id;
10837e668e57STimothy McDaniel }
10847e668e57STimothy McDaniel 
10857e668e57STimothy McDaniel static int
10867e668e57STimothy McDaniel dlb2_set_sn_allocation(struct dlb2_eventdev *dlb2, int group, int num)
10877e668e57STimothy McDaniel {
10887e668e57STimothy McDaniel 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
10897e668e57STimothy McDaniel 	struct dlb2_set_sn_allocation_args cfg;
10907e668e57STimothy McDaniel 	int ret;
10917e668e57STimothy McDaniel 
10927e668e57STimothy McDaniel 	cfg.num = num;
10937e668e57STimothy McDaniel 	cfg.group = group;
10947e668e57STimothy McDaniel 
10957e668e57STimothy McDaniel 	ret = dlb2_iface_set_sn_allocation(handle, &cfg);
10967e668e57STimothy McDaniel 	if (ret < 0) {
1097f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)",
10987e668e57STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
10997e668e57STimothy McDaniel 		return ret;
11007e668e57STimothy McDaniel 	}
11017e668e57STimothy McDaniel 
11027e668e57STimothy McDaniel 	return ret;
11037e668e57STimothy McDaniel }
11047e668e57STimothy McDaniel 
11057e668e57STimothy McDaniel static int32_t
11067e668e57STimothy McDaniel dlb2_get_sn_occupancy(struct dlb2_eventdev *dlb2, int group)
11077e668e57STimothy McDaniel {
11087e668e57STimothy McDaniel 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
11097e668e57STimothy McDaniel 	struct dlb2_get_sn_occupancy_args cfg;
11107e668e57STimothy McDaniel 	int ret;
11117e668e57STimothy McDaniel 
11127e668e57STimothy McDaniel 	cfg.group = group;
11137e668e57STimothy McDaniel 
11147e668e57STimothy McDaniel 	ret = dlb2_iface_get_sn_occupancy(handle, &cfg);
11157e668e57STimothy McDaniel 	if (ret < 0) {
1116f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)",
11177e668e57STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
11187e668e57STimothy McDaniel 		return ret;
11197e668e57STimothy McDaniel 	}
11207e668e57STimothy McDaniel 
11217e668e57STimothy McDaniel 	return cfg.response.id;
11227e668e57STimothy McDaniel }
11237e668e57STimothy McDaniel 
11247e668e57STimothy McDaniel /* Query the current sequence number allocations and, if they conflict with the
11257e668e57STimothy McDaniel  * requested LDB queue configuration, attempt to re-allocate sequence numbers.
11267e668e57STimothy McDaniel  * This is best-effort; if it fails, the PMD will attempt to configure the
11277e668e57STimothy McDaniel  * load-balanced queue and return an error.
11287e668e57STimothy McDaniel  */
11297e668e57STimothy McDaniel static void
11307e668e57STimothy McDaniel dlb2_program_sn_allocation(struct dlb2_eventdev *dlb2,
11317e668e57STimothy McDaniel 			   const struct rte_event_queue_conf *queue_conf)
11327e668e57STimothy McDaniel {
11337e668e57STimothy McDaniel 	int grp_occupancy[DLB2_NUM_SN_GROUPS];
11347e668e57STimothy McDaniel 	int grp_alloc[DLB2_NUM_SN_GROUPS];
11357e668e57STimothy McDaniel 	int i, sequence_numbers;
11367e668e57STimothy McDaniel 
11377e668e57STimothy McDaniel 	sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
11387e668e57STimothy McDaniel 
11397e668e57STimothy McDaniel 	for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
11407e668e57STimothy McDaniel 		int total_slots;
11417e668e57STimothy McDaniel 
11427e668e57STimothy McDaniel 		grp_alloc[i] = dlb2_get_sn_allocation(dlb2, i);
11437e668e57STimothy McDaniel 		if (grp_alloc[i] < 0)
11447e668e57STimothy McDaniel 			return;
11457e668e57STimothy McDaniel 
11467e668e57STimothy McDaniel 		total_slots = DLB2_MAX_LDB_SN_ALLOC / grp_alloc[i];
11477e668e57STimothy McDaniel 
11487e668e57STimothy McDaniel 		grp_occupancy[i] = dlb2_get_sn_occupancy(dlb2, i);
11497e668e57STimothy McDaniel 		if (grp_occupancy[i] < 0)
11507e668e57STimothy McDaniel 			return;
11517e668e57STimothy McDaniel 
11527e668e57STimothy McDaniel 		/* DLB has at least one available slot for the requested
11537e668e57STimothy McDaniel 		 * sequence numbers, so no further configuration required.
11547e668e57STimothy McDaniel 		 */
11557e668e57STimothy McDaniel 		if (grp_alloc[i] == sequence_numbers &&
11567e668e57STimothy McDaniel 		    grp_occupancy[i] < total_slots)
11577e668e57STimothy McDaniel 			return;
11587e668e57STimothy McDaniel 	}
11597e668e57STimothy McDaniel 
11607e668e57STimothy McDaniel 	/* None of the sequence number groups are configured for the requested
11617e668e57STimothy McDaniel 	 * sequence numbers, so we have to reconfigure one of them. This is
11627e668e57STimothy McDaniel 	 * only possible if a group is not in use.
11637e668e57STimothy McDaniel 	 */
11647e668e57STimothy McDaniel 	for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
11657e668e57STimothy McDaniel 		if (grp_occupancy[i] == 0)
11667e668e57STimothy McDaniel 			break;
11677e668e57STimothy McDaniel 	}
11687e668e57STimothy McDaniel 
11697e668e57STimothy McDaniel 	if (i == DLB2_NUM_SN_GROUPS) {
1170f665790aSDavid Marchand 		DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots",
11717e668e57STimothy McDaniel 		       __func__, sequence_numbers);
11727e668e57STimothy McDaniel 		return;
11737e668e57STimothy McDaniel 	}
11747e668e57STimothy McDaniel 
11757e668e57STimothy McDaniel 	/* Attempt to configure slot i with the requested number of sequence
11767e668e57STimothy McDaniel 	 * numbers. Ignore the return value -- if this fails, the error will be
11777e668e57STimothy McDaniel 	 * caught during subsequent queue configuration.
11787e668e57STimothy McDaniel 	 */
11797e668e57STimothy McDaniel 	dlb2_set_sn_allocation(dlb2, i, sequence_numbers);
11807e668e57STimothy McDaniel }
11817e668e57STimothy McDaniel 
11827e668e57STimothy McDaniel static int32_t
11837e668e57STimothy McDaniel dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2,
11847e668e57STimothy McDaniel 			 struct dlb2_eventdev_queue *ev_queue,
11857e668e57STimothy McDaniel 			 const struct rte_event_queue_conf *evq_conf)
11867e668e57STimothy McDaniel {
11877e668e57STimothy McDaniel 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
11887e668e57STimothy McDaniel 	struct dlb2_queue *queue = &ev_queue->qm_queue;
11897e668e57STimothy McDaniel 	struct dlb2_create_ldb_queue_args cfg;
11907e668e57STimothy McDaniel 	int32_t ret;
11917e668e57STimothy McDaniel 	uint32_t qm_qid;
11927e668e57STimothy McDaniel 	int sched_type = -1;
11937e668e57STimothy McDaniel 
11947e668e57STimothy McDaniel 	if (evq_conf == NULL)
11957e668e57STimothy McDaniel 		return -EINVAL;
11967e668e57STimothy McDaniel 
11977e668e57STimothy McDaniel 	if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
11987e668e57STimothy McDaniel 		if (evq_conf->nb_atomic_order_sequences != 0)
11997e668e57STimothy McDaniel 			sched_type = RTE_SCHED_TYPE_ORDERED;
12007e668e57STimothy McDaniel 		else
12017e668e57STimothy McDaniel 			sched_type = RTE_SCHED_TYPE_PARALLEL;
12027e668e57STimothy McDaniel 	} else
12037e668e57STimothy McDaniel 		sched_type = evq_conf->schedule_type;
12047e668e57STimothy McDaniel 
12057e668e57STimothy McDaniel 	cfg.num_atomic_inflights = DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE;
12067e668e57STimothy McDaniel 	cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
12077e668e57STimothy McDaniel 	cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
12087e668e57STimothy McDaniel 
12097e668e57STimothy McDaniel 	if (sched_type != RTE_SCHED_TYPE_ORDERED) {
12107e668e57STimothy McDaniel 		cfg.num_sequence_numbers = 0;
12117e668e57STimothy McDaniel 		cfg.num_qid_inflights = 2048;
12127e668e57STimothy McDaniel 	}
12137e668e57STimothy McDaniel 
12147e668e57STimothy McDaniel 	/* App should set this to the number of hardware flows they want, not
12157e668e57STimothy McDaniel 	 * the overall number of flows they're going to use. E.g. if app is
12167e668e57STimothy McDaniel 	 * using 64 flows and sets compression to 64, best-case they'll get
12177e668e57STimothy McDaniel 	 * 64 unique hashed flows in hardware.
12187e668e57STimothy McDaniel 	 */
12197e668e57STimothy McDaniel 	switch (evq_conf->nb_atomic_flows) {
12207e668e57STimothy McDaniel 	/* Valid DLB2 compression levels */
12217e668e57STimothy McDaniel 	case 64:
12227e668e57STimothy McDaniel 	case 128:
12237e668e57STimothy McDaniel 	case 256:
12247e668e57STimothy McDaniel 	case 512:
12257e668e57STimothy McDaniel 	case (1 * 1024): /* 1K */
12267e668e57STimothy McDaniel 	case (2 * 1024): /* 2K */
12277e668e57STimothy McDaniel 	case (4 * 1024): /* 4K */
12287e668e57STimothy McDaniel 	case (64 * 1024): /* 64K */
12297e668e57STimothy McDaniel 		cfg.lock_id_comp_level = evq_conf->nb_atomic_flows;
12307e668e57STimothy McDaniel 		break;
12317e668e57STimothy McDaniel 	default:
12327e668e57STimothy McDaniel 		/* Invalid compression level */
12337e668e57STimothy McDaniel 		cfg.lock_id_comp_level = 0; /* no compression */
12347e668e57STimothy McDaniel 	}
12357e668e57STimothy McDaniel 
12367e668e57STimothy McDaniel 	if (ev_queue->depth_threshold == 0) {
12377be66a3bSTimothy McDaniel 		cfg.depth_threshold = dlb2->default_depth_thresh;
12387be66a3bSTimothy McDaniel 		ev_queue->depth_threshold =
12397be66a3bSTimothy McDaniel 			dlb2->default_depth_thresh;
12407e668e57STimothy McDaniel 	} else
12417e668e57STimothy McDaniel 		cfg.depth_threshold = ev_queue->depth_threshold;
12427e668e57STimothy McDaniel 
12437e668e57STimothy McDaniel 	ret = dlb2_iface_ldb_queue_create(handle, &cfg);
12447e668e57STimothy McDaniel 	if (ret < 0) {
1245f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)",
12467e668e57STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
12477e668e57STimothy McDaniel 		return -EINVAL;
12487e668e57STimothy McDaniel 	}
12497e668e57STimothy McDaniel 
12507e668e57STimothy McDaniel 	qm_qid = cfg.response.id;
12517e668e57STimothy McDaniel 
12527e668e57STimothy McDaniel 	/* Save off queue config for debug, resource lookups, and reconfig */
12537e668e57STimothy McDaniel 	queue->num_qid_inflights = cfg.num_qid_inflights;
12547e668e57STimothy McDaniel 	queue->num_atm_inflights = cfg.num_atomic_inflights;
12557e668e57STimothy McDaniel 
12567e668e57STimothy McDaniel 	queue->sched_type = sched_type;
12577e668e57STimothy McDaniel 	queue->config_state = DLB2_CONFIGURED;
12587e668e57STimothy McDaniel 
1259e99981afSDavid Marchand 	DLB2_LOG_LINE_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d",
12607e668e57STimothy McDaniel 		     qm_qid,
12617e668e57STimothy McDaniel 		     cfg.num_atomic_inflights,
12627e668e57STimothy McDaniel 		     cfg.num_sequence_numbers,
12637e668e57STimothy McDaniel 		     cfg.num_qid_inflights);
12647e668e57STimothy McDaniel 
12657e668e57STimothy McDaniel 	return qm_qid;
12667e668e57STimothy McDaniel }
12677e668e57STimothy McDaniel 
12687e668e57STimothy McDaniel static int
12697e668e57STimothy McDaniel dlb2_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
12707e668e57STimothy McDaniel 			      struct dlb2_eventdev_queue *ev_queue,
12717e668e57STimothy McDaniel 			      const struct rte_event_queue_conf *queue_conf)
12727e668e57STimothy McDaniel {
12737e668e57STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
12747e668e57STimothy McDaniel 	int32_t qm_qid;
12757e668e57STimothy McDaniel 
12767e668e57STimothy McDaniel 	if (queue_conf->nb_atomic_order_sequences)
12777e668e57STimothy McDaniel 		dlb2_program_sn_allocation(dlb2, queue_conf);
12787e668e57STimothy McDaniel 
12797e668e57STimothy McDaniel 	qm_qid = dlb2_hw_create_ldb_queue(dlb2, ev_queue, queue_conf);
12807e668e57STimothy McDaniel 	if (qm_qid < 0) {
1281f665790aSDavid Marchand 		DLB2_LOG_ERR("Failed to create the load-balanced queue");
12827e668e57STimothy McDaniel 
12837e668e57STimothy McDaniel 		return qm_qid;
12847e668e57STimothy McDaniel 	}
12857e668e57STimothy McDaniel 
12867e668e57STimothy McDaniel 	dlb2->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
12877e668e57STimothy McDaniel 
12887e668e57STimothy McDaniel 	ev_queue->qm_queue.id = qm_qid;
12897e668e57STimothy McDaniel 
12907e668e57STimothy McDaniel 	return 0;
12917e668e57STimothy McDaniel }
12927e668e57STimothy McDaniel 
12937e668e57STimothy McDaniel static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
12947e668e57STimothy McDaniel {
12957e668e57STimothy McDaniel 	int i, num = 0;
12967e668e57STimothy McDaniel 
12977e668e57STimothy McDaniel 	for (i = 0; i < dlb2->num_queues; i++) {
12987e668e57STimothy McDaniel 		if (dlb2->ev_queues[i].setup_done &&
12997e668e57STimothy McDaniel 		    dlb2->ev_queues[i].qm_queue.is_directed)
13007e668e57STimothy McDaniel 			num++;
13017e668e57STimothy McDaniel 	}
13027e668e57STimothy McDaniel 
13037e668e57STimothy McDaniel 	return num;
13047e668e57STimothy McDaniel }
13057e668e57STimothy McDaniel 
13067e668e57STimothy McDaniel static void
13077e668e57STimothy McDaniel dlb2_queue_link_teardown(struct dlb2_eventdev *dlb2,
13087e668e57STimothy McDaniel 			 struct dlb2_eventdev_queue *ev_queue)
13097e668e57STimothy McDaniel {
13107e668e57STimothy McDaniel 	struct dlb2_eventdev_port *ev_port;
13117e668e57STimothy McDaniel 	int i, j;
13127e668e57STimothy McDaniel 
13137e668e57STimothy McDaniel 	for (i = 0; i < dlb2->num_ports; i++) {
13147e668e57STimothy McDaniel 		ev_port = &dlb2->ev_ports[i];
13157e668e57STimothy McDaniel 
13167e668e57STimothy McDaniel 		for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
13177e668e57STimothy McDaniel 			if (!ev_port->link[j].valid ||
13187e668e57STimothy McDaniel 			    ev_port->link[j].queue_id != ev_queue->id)
13197e668e57STimothy McDaniel 				continue;
13207e668e57STimothy McDaniel 
13217e668e57STimothy McDaniel 			ev_port->link[j].valid = false;
13227e668e57STimothy McDaniel 			ev_port->num_links--;
13237e668e57STimothy McDaniel 		}
13247e668e57STimothy McDaniel 	}
13257e668e57STimothy McDaniel 
13267e668e57STimothy McDaniel 	ev_queue->num_links = 0;
13277e668e57STimothy McDaniel }
13287e668e57STimothy McDaniel 
13297e668e57STimothy McDaniel static int
13307e668e57STimothy McDaniel dlb2_eventdev_queue_setup(struct rte_eventdev *dev,
13317e668e57STimothy McDaniel 			  uint8_t ev_qid,
13327e668e57STimothy McDaniel 			  const struct rte_event_queue_conf *queue_conf)
13337e668e57STimothy McDaniel {
13347e668e57STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
13357e668e57STimothy McDaniel 	struct dlb2_eventdev_queue *ev_queue;
13367e668e57STimothy McDaniel 	int ret;
13377e668e57STimothy McDaniel 
13387e668e57STimothy McDaniel 	if (queue_conf == NULL)
13397e668e57STimothy McDaniel 		return -EINVAL;
13407e668e57STimothy McDaniel 
13417e668e57STimothy McDaniel 	if (ev_qid >= dlb2->num_queues)
13427e668e57STimothy McDaniel 		return -EINVAL;
13437e668e57STimothy McDaniel 
13447e668e57STimothy McDaniel 	ev_queue = &dlb2->ev_queues[ev_qid];
13457e668e57STimothy McDaniel 
13467e668e57STimothy McDaniel 	ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
13477e668e57STimothy McDaniel 		RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
13487e668e57STimothy McDaniel 	ev_queue->id = ev_qid;
13497e668e57STimothy McDaniel 	ev_queue->conf = *queue_conf;
13507e668e57STimothy McDaniel 
13517e668e57STimothy McDaniel 	if (!ev_queue->qm_queue.is_directed) {
13527e668e57STimothy McDaniel 		ret = dlb2_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
13537e668e57STimothy McDaniel 	} else {
13547e668e57STimothy McDaniel 		/* The directed queue isn't setup until link time, at which
13557e668e57STimothy McDaniel 		 * point we know its directed port ID. Directed queue setup
13567e668e57STimothy McDaniel 		 * will only fail if this queue is already setup or there are
13577e668e57STimothy McDaniel 		 * no directed queues left to configure.
13587e668e57STimothy McDaniel 		 */
13597e668e57STimothy McDaniel 		ret = 0;
13607e668e57STimothy McDaniel 
13617e668e57STimothy McDaniel 		ev_queue->qm_queue.config_state = DLB2_NOT_CONFIGURED;
13627e668e57STimothy McDaniel 
13637e668e57STimothy McDaniel 		if (ev_queue->setup_done ||
13647e668e57STimothy McDaniel 		    dlb2_num_dir_queues_setup(dlb2) == dlb2->num_dir_queues)
13657e668e57STimothy McDaniel 			ret = -EINVAL;
13667e668e57STimothy McDaniel 	}
13677e668e57STimothy McDaniel 
13687e668e57STimothy McDaniel 	/* Tear down pre-existing port->queue links */
13697e668e57STimothy McDaniel 	if (!ret && dlb2->run_state == DLB2_RUN_STATE_STOPPED)
13707e668e57STimothy McDaniel 		dlb2_queue_link_teardown(dlb2, ev_queue);
13717e668e57STimothy McDaniel 
13727e668e57STimothy McDaniel 	if (!ret)
13737e668e57STimothy McDaniel 		ev_queue->setup_done = true;
13747e668e57STimothy McDaniel 
13757e668e57STimothy McDaniel 	return ret;
13767e668e57STimothy McDaniel }
13777e668e57STimothy McDaniel 
13783a6d0c04STimothy McDaniel static int
13793a6d0c04STimothy McDaniel dlb2_init_consume_qe(struct dlb2_port *qm_port, char *mz_name)
13803a6d0c04STimothy McDaniel {
13813a6d0c04STimothy McDaniel 	struct dlb2_cq_pop_qe *qe;
13823a6d0c04STimothy McDaniel 
13833a6d0c04STimothy McDaniel 	qe = rte_zmalloc(mz_name,
13843a6d0c04STimothy McDaniel 			DLB2_NUM_QES_PER_CACHE_LINE *
13853a6d0c04STimothy McDaniel 				sizeof(struct dlb2_cq_pop_qe),
13863a6d0c04STimothy McDaniel 			RTE_CACHE_LINE_SIZE);
13873a6d0c04STimothy McDaniel 
13883a6d0c04STimothy McDaniel 	if (qe == NULL)	{
1389f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: no memory for consume_qe");
13903a6d0c04STimothy McDaniel 		return -ENOMEM;
13913a6d0c04STimothy McDaniel 	}
13923a6d0c04STimothy McDaniel 	qm_port->consume_qe = qe;
13933a6d0c04STimothy McDaniel 
13943a6d0c04STimothy McDaniel 	qe->qe_valid = 0;
13953a6d0c04STimothy McDaniel 	qe->qe_frag = 0;
13963a6d0c04STimothy McDaniel 	qe->qe_comp = 0;
13973a6d0c04STimothy McDaniel 	qe->cq_token = 1;
13983a6d0c04STimothy McDaniel 	/* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
13993a6d0c04STimothy McDaniel 	 * and so on.
14003a6d0c04STimothy McDaniel 	 */
14013a6d0c04STimothy McDaniel 	qe->tokens = 0;	/* set at run time */
14023a6d0c04STimothy McDaniel 	qe->meas_lat = 0;
14033a6d0c04STimothy McDaniel 	qe->no_dec = 0;
14043a6d0c04STimothy McDaniel 	/* Completion IDs are disabled */
14053a6d0c04STimothy McDaniel 	qe->cmp_id = 0;
14063a6d0c04STimothy McDaniel 
14073a6d0c04STimothy McDaniel 	return 0;
14083a6d0c04STimothy McDaniel }
14093a6d0c04STimothy McDaniel 
14103a6d0c04STimothy McDaniel static int
14113a6d0c04STimothy McDaniel dlb2_init_int_arm_qe(struct dlb2_port *qm_port, char *mz_name)
14123a6d0c04STimothy McDaniel {
14133a6d0c04STimothy McDaniel 	struct dlb2_enqueue_qe *qe;
14143a6d0c04STimothy McDaniel 
14153a6d0c04STimothy McDaniel 	qe = rte_zmalloc(mz_name,
14163a6d0c04STimothy McDaniel 			DLB2_NUM_QES_PER_CACHE_LINE *
14173a6d0c04STimothy McDaniel 				sizeof(struct dlb2_enqueue_qe),
14183a6d0c04STimothy McDaniel 			RTE_CACHE_LINE_SIZE);
14193a6d0c04STimothy McDaniel 
14203a6d0c04STimothy McDaniel 	if (qe == NULL) {
1421f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: no memory for complete_qe");
14223a6d0c04STimothy McDaniel 		return -ENOMEM;
14233a6d0c04STimothy McDaniel 	}
14243a6d0c04STimothy McDaniel 	qm_port->int_arm_qe = qe;
14253a6d0c04STimothy McDaniel 
14263a6d0c04STimothy McDaniel 	/* V2 - INT ARM is CQ_TOKEN + FRAG */
14273a6d0c04STimothy McDaniel 	qe->qe_valid = 0;
14283a6d0c04STimothy McDaniel 	qe->qe_frag = 1;
14293a6d0c04STimothy McDaniel 	qe->qe_comp = 0;
14303a6d0c04STimothy McDaniel 	qe->cq_token = 1;
14313a6d0c04STimothy McDaniel 	qe->meas_lat = 0;
14323a6d0c04STimothy McDaniel 	qe->no_dec = 0;
14333a6d0c04STimothy McDaniel 	/* Completion IDs are disabled */
14343a6d0c04STimothy McDaniel 	qe->cmp_id = 0;
14353a6d0c04STimothy McDaniel 
14363a6d0c04STimothy McDaniel 	return 0;
14373a6d0c04STimothy McDaniel }
14383a6d0c04STimothy McDaniel 
14393a6d0c04STimothy McDaniel static int
14403a6d0c04STimothy McDaniel dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name)
14413a6d0c04STimothy McDaniel {
14423a6d0c04STimothy McDaniel 	int ret, sz;
14433a6d0c04STimothy McDaniel 
14443a6d0c04STimothy McDaniel 	sz = DLB2_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb2_enqueue_qe);
14453a6d0c04STimothy McDaniel 
14463a6d0c04STimothy McDaniel 	qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
14473a6d0c04STimothy McDaniel 
14483a6d0c04STimothy McDaniel 	if (qm_port->qe4 == NULL) {
1449f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: no qe4 memory");
14503a6d0c04STimothy McDaniel 		ret = -ENOMEM;
14513a6d0c04STimothy McDaniel 		goto error_exit;
14523a6d0c04STimothy McDaniel 	}
14533a6d0c04STimothy McDaniel 
14546e2e98d6SAbdullah Sevincer 	if (qm_port->reorder_en) {
14556e2e98d6SAbdullah Sevincer 		sz = sizeof(struct dlb2_reorder);
14566e2e98d6SAbdullah Sevincer 		qm_port->order = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
14576e2e98d6SAbdullah Sevincer 
14586e2e98d6SAbdullah Sevincer 		if (qm_port->order == NULL) {
14596e2e98d6SAbdullah Sevincer 			DLB2_LOG_ERR("dlb2: no reorder memory");
14606e2e98d6SAbdullah Sevincer 			ret = -ENOMEM;
14616e2e98d6SAbdullah Sevincer 			goto error_exit;
14626e2e98d6SAbdullah Sevincer 		}
14636e2e98d6SAbdullah Sevincer 	}
14646e2e98d6SAbdullah Sevincer 
14653a6d0c04STimothy McDaniel 	ret = dlb2_init_int_arm_qe(qm_port, mz_name);
14663a6d0c04STimothy McDaniel 	if (ret < 0) {
1467f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d", ret);
14683a6d0c04STimothy McDaniel 		goto error_exit;
14693a6d0c04STimothy McDaniel 	}
14703a6d0c04STimothy McDaniel 
14713a6d0c04STimothy McDaniel 	ret = dlb2_init_consume_qe(qm_port, mz_name);
14723a6d0c04STimothy McDaniel 	if (ret < 0) {
1473f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d", ret);
14743a6d0c04STimothy McDaniel 		goto error_exit;
14753a6d0c04STimothy McDaniel 	}
14763a6d0c04STimothy McDaniel 
14773a6d0c04STimothy McDaniel 	return 0;
14783a6d0c04STimothy McDaniel 
14793a6d0c04STimothy McDaniel error_exit:
14803a6d0c04STimothy McDaniel 
14813a6d0c04STimothy McDaniel 	dlb2_free_qe_mem(qm_port);
14823a6d0c04STimothy McDaniel 
14833a6d0c04STimothy McDaniel 	return ret;
14843a6d0c04STimothy McDaniel }
14853a6d0c04STimothy McDaniel 
148607d55c41STimothy McDaniel static inline uint16_t
148707d55c41STimothy McDaniel dlb2_event_enqueue_burst_delayed(void *event_port,
148807d55c41STimothy McDaniel 				 const struct rte_event events[],
148907d55c41STimothy McDaniel 				 uint16_t num);
149007d55c41STimothy McDaniel 
149107d55c41STimothy McDaniel static inline uint16_t
149207d55c41STimothy McDaniel dlb2_event_enqueue_new_burst_delayed(void *event_port,
149307d55c41STimothy McDaniel 				     const struct rte_event events[],
149407d55c41STimothy McDaniel 				     uint16_t num);
149507d55c41STimothy McDaniel 
149607d55c41STimothy McDaniel static inline uint16_t
149707d55c41STimothy McDaniel dlb2_event_enqueue_forward_burst_delayed(void *event_port,
149807d55c41STimothy McDaniel 					 const struct rte_event events[],
149907d55c41STimothy McDaniel 					 uint16_t num);
150007d55c41STimothy McDaniel 
1501000a7b8eSTimothy McDaniel /* Generate the required bitmask for rotate-style expected QE gen bits.
1502000a7b8eSTimothy McDaniel  * This requires a pattern of 1's and zeros, starting with expected as
1503000a7b8eSTimothy McDaniel  * 1 bits, so when hardware writes 0's they're "new". This requires the
1504000a7b8eSTimothy McDaniel  * ring size to be powers of 2 to wrap correctly.
1505000a7b8eSTimothy McDaniel  */
1506000a7b8eSTimothy McDaniel static void
1507000a7b8eSTimothy McDaniel dlb2_hw_cq_bitmask_init(struct dlb2_port *qm_port, uint32_t cq_depth)
1508000a7b8eSTimothy McDaniel {
1509000a7b8eSTimothy McDaniel 	uint64_t cq_build_mask = 0;
1510000a7b8eSTimothy McDaniel 	uint32_t i;
1511000a7b8eSTimothy McDaniel 
1512000a7b8eSTimothy McDaniel 	if (cq_depth > 64)
1513000a7b8eSTimothy McDaniel 		return; /* need to fall back to scalar code */
1514000a7b8eSTimothy McDaniel 
1515000a7b8eSTimothy McDaniel 	/*
1516000a7b8eSTimothy McDaniel 	 * all 1's in first u64, all zeros in second is correct bit pattern to
1517000a7b8eSTimothy McDaniel 	 * start. Special casing == 64 easier than adapting complex loop logic.
1518000a7b8eSTimothy McDaniel 	 */
1519000a7b8eSTimothy McDaniel 	if (cq_depth == 64) {
1520000a7b8eSTimothy McDaniel 		qm_port->cq_rolling_mask = 0;
1521000a7b8eSTimothy McDaniel 		qm_port->cq_rolling_mask_2 = -1;
1522000a7b8eSTimothy McDaniel 		return;
1523000a7b8eSTimothy McDaniel 	}
1524000a7b8eSTimothy McDaniel 
1525000a7b8eSTimothy McDaniel 	for (i = 0; i < 64; i += (cq_depth * 2))
1526000a7b8eSTimothy McDaniel 		cq_build_mask |= ((1ULL << cq_depth) - 1) << (i + cq_depth);
1527000a7b8eSTimothy McDaniel 
1528000a7b8eSTimothy McDaniel 	qm_port->cq_rolling_mask = cq_build_mask;
1529000a7b8eSTimothy McDaniel 	qm_port->cq_rolling_mask_2 = cq_build_mask;
1530000a7b8eSTimothy McDaniel }
1531000a7b8eSTimothy McDaniel 
15323a6d0c04STimothy McDaniel static int
15333a6d0c04STimothy McDaniel dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
15343a6d0c04STimothy McDaniel 			struct dlb2_eventdev_port *ev_port,
15353a6d0c04STimothy McDaniel 			uint32_t dequeue_depth,
15363a6d0c04STimothy McDaniel 			uint32_t enqueue_depth)
15373a6d0c04STimothy McDaniel {
15383a6d0c04STimothy McDaniel 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
15393a6d0c04STimothy McDaniel 	struct dlb2_create_ldb_port_args cfg = { {0} };
15403a6d0c04STimothy McDaniel 	int ret;
15413a6d0c04STimothy McDaniel 	struct dlb2_port *qm_port = NULL;
15423a6d0c04STimothy McDaniel 	char mz_name[RTE_MEMZONE_NAMESIZE];
15433a6d0c04STimothy McDaniel 	uint32_t qm_port_id;
154462e45206STimothy McDaniel 	uint16_t ldb_credit_high_watermark = 0;
154562e45206STimothy McDaniel 	uint16_t dir_credit_high_watermark = 0;
154662e45206STimothy McDaniel 	uint16_t credit_high_watermark = 0;
15473a6d0c04STimothy McDaniel 
15483a6d0c04STimothy McDaniel 	if (handle == NULL)
15493a6d0c04STimothy McDaniel 		return -EINVAL;
15503a6d0c04STimothy McDaniel 
15513a6d0c04STimothy McDaniel 	if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1552f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: invalid cq depth, must be at least %d",
15533a6d0c04STimothy McDaniel 			     DLB2_MIN_CQ_DEPTH);
15543a6d0c04STimothy McDaniel 		return -EINVAL;
15553a6d0c04STimothy McDaniel 	}
15563a6d0c04STimothy McDaniel 
15573a6d0c04STimothy McDaniel 	rte_spinlock_lock(&handle->resource_lock);
15583a6d0c04STimothy McDaniel 
15593a6d0c04STimothy McDaniel 	/* We round up to the next power of 2 if necessary */
15603a6d0c04STimothy McDaniel 	cfg.cq_depth = rte_align32pow2(dequeue_depth);
15613a6d0c04STimothy McDaniel 	cfg.cq_depth_threshold = 1;
15623a6d0c04STimothy McDaniel 
15639c9e7232SAbdullah Sevincer 	cfg.cq_history_list_size = cfg.cq_depth;
15643a6d0c04STimothy McDaniel 
1565bec8901bSTimothy McDaniel 	cfg.cos_id = ev_port->cos_id;
1566bec8901bSTimothy McDaniel 	cfg.cos_strict = 0;/* best effots */
15673a6d0c04STimothy McDaniel 
15683a6d0c04STimothy McDaniel 	/* User controls the LDB high watermark via enqueue depth. The DIR high
15693a6d0c04STimothy McDaniel 	 * watermark is equal, unless the directed credit pool is too small.
15703a6d0c04STimothy McDaniel 	 */
157162e45206STimothy McDaniel 	if (dlb2->version == DLB2_HW_V2) {
15723a6d0c04STimothy McDaniel 		ldb_credit_high_watermark = enqueue_depth;
157362e45206STimothy McDaniel 		/* If there are no directed ports, the kernel driver will
157462e45206STimothy McDaniel 		 * ignore this port's directed credit settings. Don't use
157562e45206STimothy McDaniel 		 * enqueue_depth if it would require more directed credits
157662e45206STimothy McDaniel 		 * than are available.
15773a6d0c04STimothy McDaniel 		 */
15783a6d0c04STimothy McDaniel 		dir_credit_high_watermark =
15793a6d0c04STimothy McDaniel 			RTE_MIN(enqueue_depth,
15803a6d0c04STimothy McDaniel 				handle->cfg.num_dir_credits / dlb2->num_ports);
158162e45206STimothy McDaniel 	} else
158262e45206STimothy McDaniel 		credit_high_watermark = enqueue_depth;
15833a6d0c04STimothy McDaniel 
15843a6d0c04STimothy McDaniel 	/* Per QM values */
15853a6d0c04STimothy McDaniel 
15863a6d0c04STimothy McDaniel 	ret = dlb2_iface_ldb_port_create(handle, &cfg,  dlb2->poll_mode);
15873a6d0c04STimothy McDaniel 	if (ret < 0) {
1588f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)",
15893a6d0c04STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
15903a6d0c04STimothy McDaniel 		goto error_exit;
15913a6d0c04STimothy McDaniel 	}
15923a6d0c04STimothy McDaniel 
15933a6d0c04STimothy McDaniel 	qm_port_id = cfg.response.id;
15943a6d0c04STimothy McDaniel 
1595e99981afSDavid Marchand 	DLB2_LOG_LINE_DBG("dlb2: ev_port %d uses qm LB port %d <<<<<",
15963a6d0c04STimothy McDaniel 		     ev_port->id, qm_port_id);
15973a6d0c04STimothy McDaniel 
15983a6d0c04STimothy McDaniel 	qm_port = &ev_port->qm_port;
15993a6d0c04STimothy McDaniel 	qm_port->ev_port = ev_port; /* back ptr */
16003a6d0c04STimothy McDaniel 	qm_port->dlb2 = dlb2; /* back ptr */
16013a6d0c04STimothy McDaniel 	/*
16023a6d0c04STimothy McDaniel 	 * Allocate and init local qe struct(s).
16033a6d0c04STimothy McDaniel 	 * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
16043a6d0c04STimothy McDaniel 	 */
16053a6d0c04STimothy McDaniel 
16063a6d0c04STimothy McDaniel 	snprintf(mz_name, sizeof(mz_name), "dlb2_ldb_port%d",
16073a6d0c04STimothy McDaniel 		 ev_port->id);
16083a6d0c04STimothy McDaniel 
16093a6d0c04STimothy McDaniel 	ret = dlb2_init_qe_mem(qm_port, mz_name);
16103a6d0c04STimothy McDaniel 	if (ret < 0) {
1611f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d", ret);
16123a6d0c04STimothy McDaniel 		goto error_exit;
16133a6d0c04STimothy McDaniel 	}
16143a6d0c04STimothy McDaniel 
16153a6d0c04STimothy McDaniel 	qm_port->id = qm_port_id;
16163a6d0c04STimothy McDaniel 
1617b977a659SAbdullah Sevincer 	if (dlb2->version == DLB2_HW_V2_5 && (dlb2->enable_cq_weight == true)) {
1618b977a659SAbdullah Sevincer 		struct dlb2_enable_cq_weight_args cq_weight_args = { {0} };
1619ffa46fc4STimothy McDaniel 		cq_weight_args.port_id = qm_port->id;
1620b977a659SAbdullah Sevincer 		cq_weight_args.limit = dequeue_depth;
1621ffa46fc4STimothy McDaniel 		ret = dlb2_iface_enable_cq_weight(handle, &cq_weight_args);
1622b977a659SAbdullah Sevincer 
1623ffa46fc4STimothy McDaniel 		if (ret < 0) {
1624f665790aSDavid Marchand 			DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)",
1625ffa46fc4STimothy McDaniel 					ret,
1626ffa46fc4STimothy McDaniel 					dlb2_error_strings[cfg.response.  status]);
1627ffa46fc4STimothy McDaniel 			goto error_exit;
1628ffa46fc4STimothy McDaniel 		}
1629ffa46fc4STimothy McDaniel 	}
163062e45206STimothy McDaniel 
16313a6d0c04STimothy McDaniel 	/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
16323a6d0c04STimothy McDaniel 	 * the effective depth is smaller.
16333a6d0c04STimothy McDaniel 	 */
16343a6d0c04STimothy McDaniel 	qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
16353a6d0c04STimothy McDaniel 	qm_port->cq_idx = 0;
16363a6d0c04STimothy McDaniel 	qm_port->cq_idx_unmasked = 0;
16373a6d0c04STimothy McDaniel 
16383a6d0c04STimothy McDaniel 	if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
16393a6d0c04STimothy McDaniel 		qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
16403a6d0c04STimothy McDaniel 	else
16413a6d0c04STimothy McDaniel 		qm_port->cq_depth_mask = qm_port->cq_depth - 1;
16423a6d0c04STimothy McDaniel 
16433d4e27fdSDavid Marchand 	qm_port->gen_bit_shift = rte_popcount32(qm_port->cq_depth_mask);
16443a6d0c04STimothy McDaniel 	/* starting value of gen bit - it toggles at wrap time */
16453a6d0c04STimothy McDaniel 	qm_port->gen_bit = 1;
16463a6d0c04STimothy McDaniel 
1647000a7b8eSTimothy McDaniel 	dlb2_hw_cq_bitmask_init(qm_port, qm_port->cq_depth);
1648000a7b8eSTimothy McDaniel 
16493a6d0c04STimothy McDaniel 	qm_port->int_armed = false;
16503a6d0c04STimothy McDaniel 
16513a6d0c04STimothy McDaniel 	/* Save off for later use in info and lookup APIs. */
16523a6d0c04STimothy McDaniel 	qm_port->qid_mappings = &dlb2->qm_ldb_to_ev_queue_id[0];
16533a6d0c04STimothy McDaniel 
16543a6d0c04STimothy McDaniel 	qm_port->dequeue_depth = dequeue_depth;
1655c667583dSTimothy McDaniel 	qm_port->token_pop_thresh = dequeue_depth;
165607d55c41STimothy McDaniel 
165707d55c41STimothy McDaniel 	/* The default enqueue functions do not include delayed-pop support for
165807d55c41STimothy McDaniel 	 * performance reasons.
165907d55c41STimothy McDaniel 	 */
166007d55c41STimothy McDaniel 	if (qm_port->token_pop_mode == DELAYED_POP) {
166107d55c41STimothy McDaniel 		dlb2->event_dev->enqueue_burst =
166207d55c41STimothy McDaniel 			dlb2_event_enqueue_burst_delayed;
166307d55c41STimothy McDaniel 		dlb2->event_dev->enqueue_new_burst =
166407d55c41STimothy McDaniel 			dlb2_event_enqueue_new_burst_delayed;
166507d55c41STimothy McDaniel 		dlb2->event_dev->enqueue_forward_burst =
166607d55c41STimothy McDaniel 			dlb2_event_enqueue_forward_burst_delayed;
166707d55c41STimothy McDaniel 	}
166807d55c41STimothy McDaniel 
16693a6d0c04STimothy McDaniel 	qm_port->owed_tokens = 0;
16703a6d0c04STimothy McDaniel 	qm_port->issued_releases = 0;
16713a6d0c04STimothy McDaniel 
16723a6d0c04STimothy McDaniel 	/* Save config message too. */
16733a6d0c04STimothy McDaniel 	rte_memcpy(&qm_port->cfg.ldb, &cfg, sizeof(qm_port->cfg.ldb));
16743a6d0c04STimothy McDaniel 
16753a6d0c04STimothy McDaniel 	/* update state */
16763a6d0c04STimothy McDaniel 	qm_port->state = PORT_STARTED; /* enabled at create time */
16773a6d0c04STimothy McDaniel 	qm_port->config_state = DLB2_CONFIGURED;
16783a6d0c04STimothy McDaniel 
167962e45206STimothy McDaniel 	if (dlb2->version == DLB2_HW_V2) {
16803a6d0c04STimothy McDaniel 		qm_port->dir_credits = dir_credit_high_watermark;
16813a6d0c04STimothy McDaniel 		qm_port->ldb_credits = ldb_credit_high_watermark;
16823a6d0c04STimothy McDaniel 		qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
16833a6d0c04STimothy McDaniel 		qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
16843a6d0c04STimothy McDaniel 
1685e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d",
16863a6d0c04STimothy McDaniel 			     qm_port_id,
16873a6d0c04STimothy McDaniel 			     dequeue_depth,
16883a6d0c04STimothy McDaniel 			     qm_port->ldb_credits,
16893a6d0c04STimothy McDaniel 			     qm_port->dir_credits);
169062e45206STimothy McDaniel 	} else {
169162e45206STimothy McDaniel 		qm_port->credits = credit_high_watermark;
169262e45206STimothy McDaniel 		qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool;
16933a6d0c04STimothy McDaniel 
1694e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("dlb2: created ldb port %d, depth = %d, credits=%d",
169562e45206STimothy McDaniel 			     qm_port_id,
169662e45206STimothy McDaniel 			     dequeue_depth,
169762e45206STimothy McDaniel 			     qm_port->credits);
169862e45206STimothy McDaniel 	}
1699000a7b8eSTimothy McDaniel 
1700000a7b8eSTimothy McDaniel 	qm_port->use_scalar = false;
1701000a7b8eSTimothy McDaniel 
1702000a7b8eSTimothy McDaniel #if (!defined RTE_ARCH_X86_64)
1703000a7b8eSTimothy McDaniel 	qm_port->use_scalar = true;
1704000a7b8eSTimothy McDaniel #else
1705000a7b8eSTimothy McDaniel 	if ((qm_port->cq_depth > 64) ||
1706000a7b8eSTimothy McDaniel 	    (!rte_is_power_of_2(qm_port->cq_depth)) ||
1707fcc5489cSTimothy McDaniel 	    (dlb2->vector_opts_enabled == false))
1708000a7b8eSTimothy McDaniel 		qm_port->use_scalar = true;
1709000a7b8eSTimothy McDaniel #endif
1710000a7b8eSTimothy McDaniel 
17113a6d0c04STimothy McDaniel 	rte_spinlock_unlock(&handle->resource_lock);
17123a6d0c04STimothy McDaniel 
17133a6d0c04STimothy McDaniel 	return 0;
17143a6d0c04STimothy McDaniel 
17153a6d0c04STimothy McDaniel error_exit:
17163a6d0c04STimothy McDaniel 
17173a6d0c04STimothy McDaniel 	if (qm_port)
17183a6d0c04STimothy McDaniel 		dlb2_free_qe_mem(qm_port);
17193a6d0c04STimothy McDaniel 
17203a6d0c04STimothy McDaniel 	rte_spinlock_unlock(&handle->resource_lock);
17213a6d0c04STimothy McDaniel 
1722f665790aSDavid Marchand 	DLB2_LOG_ERR("dlb2: create ldb port failed!");
17233a6d0c04STimothy McDaniel 
17243a6d0c04STimothy McDaniel 	return ret;
17253a6d0c04STimothy McDaniel }
17263a6d0c04STimothy McDaniel 
17273a6d0c04STimothy McDaniel static void
17283a6d0c04STimothy McDaniel dlb2_port_link_teardown(struct dlb2_eventdev *dlb2,
17293a6d0c04STimothy McDaniel 			struct dlb2_eventdev_port *ev_port)
17303a6d0c04STimothy McDaniel {
17313a6d0c04STimothy McDaniel 	struct dlb2_eventdev_queue *ev_queue;
17323a6d0c04STimothy McDaniel 	int i;
17333a6d0c04STimothy McDaniel 
17343a6d0c04STimothy McDaniel 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
17353a6d0c04STimothy McDaniel 		if (!ev_port->link[i].valid)
17363a6d0c04STimothy McDaniel 			continue;
17373a6d0c04STimothy McDaniel 
17383a6d0c04STimothy McDaniel 		ev_queue = &dlb2->ev_queues[ev_port->link[i].queue_id];
17393a6d0c04STimothy McDaniel 
17403a6d0c04STimothy McDaniel 		ev_port->link[i].valid = false;
17413a6d0c04STimothy McDaniel 		ev_port->num_links--;
17423a6d0c04STimothy McDaniel 		ev_queue->num_links--;
17433a6d0c04STimothy McDaniel 	}
17443a6d0c04STimothy McDaniel }
17453a6d0c04STimothy McDaniel 
17463a6d0c04STimothy McDaniel static int
17473a6d0c04STimothy McDaniel dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
17483a6d0c04STimothy McDaniel 			struct dlb2_eventdev_port *ev_port,
17493a6d0c04STimothy McDaniel 			uint32_t dequeue_depth,
17503a6d0c04STimothy McDaniel 			uint32_t enqueue_depth)
17513a6d0c04STimothy McDaniel {
17523a6d0c04STimothy McDaniel 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
17533a6d0c04STimothy McDaniel 	struct dlb2_create_dir_port_args cfg = { {0} };
17543a6d0c04STimothy McDaniel 	int ret;
17553a6d0c04STimothy McDaniel 	struct dlb2_port *qm_port = NULL;
17563a6d0c04STimothy McDaniel 	char mz_name[RTE_MEMZONE_NAMESIZE];
17573a6d0c04STimothy McDaniel 	uint32_t qm_port_id;
175862e45206STimothy McDaniel 	uint16_t ldb_credit_high_watermark = 0;
175962e45206STimothy McDaniel 	uint16_t dir_credit_high_watermark = 0;
176062e45206STimothy McDaniel 	uint16_t credit_high_watermark = 0;
17613a6d0c04STimothy McDaniel 
17623a6d0c04STimothy McDaniel 	if (dlb2 == NULL || handle == NULL)
17633a6d0c04STimothy McDaniel 		return -EINVAL;
17643a6d0c04STimothy McDaniel 
17653a6d0c04STimothy McDaniel 	if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1766f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d",
17673a6d0c04STimothy McDaniel 			     DLB2_MIN_CQ_DEPTH, DLB2_MAX_INPUT_QUEUE_DEPTH);
17683a6d0c04STimothy McDaniel 		return -EINVAL;
17693a6d0c04STimothy McDaniel 	}
17703a6d0c04STimothy McDaniel 
17713a6d0c04STimothy McDaniel 	if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1772f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d",
17733a6d0c04STimothy McDaniel 			     DLB2_MIN_ENQUEUE_DEPTH);
17743a6d0c04STimothy McDaniel 		return -EINVAL;
17753a6d0c04STimothy McDaniel 	}
17763a6d0c04STimothy McDaniel 
17773a6d0c04STimothy McDaniel 	rte_spinlock_lock(&handle->resource_lock);
17783a6d0c04STimothy McDaniel 
17793a6d0c04STimothy McDaniel 	/* Directed queues are configured at link time. */
17803a6d0c04STimothy McDaniel 	cfg.queue_id = -1;
17813a6d0c04STimothy McDaniel 
17823a6d0c04STimothy McDaniel 	/* We round up to the next power of 2 if necessary */
17833a6d0c04STimothy McDaniel 	cfg.cq_depth = rte_align32pow2(dequeue_depth);
17843a6d0c04STimothy McDaniel 	cfg.cq_depth_threshold = 1;
17853a6d0c04STimothy McDaniel 
17863a6d0c04STimothy McDaniel 	/* User controls the LDB high watermark via enqueue depth. The DIR high
17873a6d0c04STimothy McDaniel 	 * watermark is equal, unless the directed credit pool is too small.
17883a6d0c04STimothy McDaniel 	 */
178962e45206STimothy McDaniel 	if (dlb2->version == DLB2_HW_V2) {
17903a6d0c04STimothy McDaniel 		ldb_credit_high_watermark = enqueue_depth;
179162e45206STimothy McDaniel 		/* Don't use enqueue_depth if it would require more directed
179262e45206STimothy McDaniel 		 * credits than are available.
17933a6d0c04STimothy McDaniel 		 */
17943a6d0c04STimothy McDaniel 		dir_credit_high_watermark =
17953a6d0c04STimothy McDaniel 			RTE_MIN(enqueue_depth,
17963a6d0c04STimothy McDaniel 				handle->cfg.num_dir_credits / dlb2->num_ports);
179762e45206STimothy McDaniel 	} else
179862e45206STimothy McDaniel 		credit_high_watermark = enqueue_depth;
17993a6d0c04STimothy McDaniel 
18008d1d9070SAbdullah Sevincer 	if (ev_port->conf.event_port_cfg & RTE_EVENT_PORT_CFG_HINT_PRODUCER)
18018d1d9070SAbdullah Sevincer 		cfg.is_producer = 1;
18028d1d9070SAbdullah Sevincer 
18033a6d0c04STimothy McDaniel 	/* Per QM values */
18043a6d0c04STimothy McDaniel 
18053a6d0c04STimothy McDaniel 	ret = dlb2_iface_dir_port_create(handle, &cfg,  dlb2->poll_mode);
18063a6d0c04STimothy McDaniel 	if (ret < 0) {
1807f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)",
18083a6d0c04STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
18093a6d0c04STimothy McDaniel 		goto error_exit;
18103a6d0c04STimothy McDaniel 	}
18113a6d0c04STimothy McDaniel 
18123a6d0c04STimothy McDaniel 	qm_port_id = cfg.response.id;
18133a6d0c04STimothy McDaniel 
1814e99981afSDavid Marchand 	DLB2_LOG_LINE_DBG("dlb2: ev_port %d uses qm DIR port %d <<<<<",
18153a6d0c04STimothy McDaniel 		     ev_port->id, qm_port_id);
18163a6d0c04STimothy McDaniel 
18173a6d0c04STimothy McDaniel 	qm_port = &ev_port->qm_port;
18183a6d0c04STimothy McDaniel 	qm_port->ev_port = ev_port; /* back ptr */
18193a6d0c04STimothy McDaniel 	qm_port->dlb2 = dlb2;  /* back ptr */
18203a6d0c04STimothy McDaniel 
18213a6d0c04STimothy McDaniel 	/*
18223a6d0c04STimothy McDaniel 	 * Init local qe struct(s).
18233a6d0c04STimothy McDaniel 	 * Note: MOVDIR64 requires the enqueue QE to be aligned
18243a6d0c04STimothy McDaniel 	 */
18253a6d0c04STimothy McDaniel 
18263a6d0c04STimothy McDaniel 	snprintf(mz_name, sizeof(mz_name), "dlb2_dir_port%d",
18273a6d0c04STimothy McDaniel 		 ev_port->id);
18283a6d0c04STimothy McDaniel 
18293a6d0c04STimothy McDaniel 	ret = dlb2_init_qe_mem(qm_port, mz_name);
18303a6d0c04STimothy McDaniel 
18313a6d0c04STimothy McDaniel 	if (ret < 0) {
1832f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d", ret);
18333a6d0c04STimothy McDaniel 		goto error_exit;
18343a6d0c04STimothy McDaniel 	}
18353a6d0c04STimothy McDaniel 
18363a6d0c04STimothy McDaniel 	qm_port->id = qm_port_id;
18373a6d0c04STimothy McDaniel 
183862e45206STimothy McDaniel 	if (dlb2->version == DLB2_HW_V2) {
18393a6d0c04STimothy McDaniel 		qm_port->cached_ldb_credits = 0;
18403a6d0c04STimothy McDaniel 		qm_port->cached_dir_credits = 0;
184162e45206STimothy McDaniel 	} else
184262e45206STimothy McDaniel 		qm_port->cached_credits = 0;
184362e45206STimothy McDaniel 
18443a6d0c04STimothy McDaniel 	/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
18453a6d0c04STimothy McDaniel 	 * the effective depth is smaller.
18463a6d0c04STimothy McDaniel 	 */
18473a6d0c04STimothy McDaniel 	qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
18483a6d0c04STimothy McDaniel 	qm_port->cq_idx = 0;
18493a6d0c04STimothy McDaniel 	qm_port->cq_idx_unmasked = 0;
18503a6d0c04STimothy McDaniel 
18513a6d0c04STimothy McDaniel 	if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
18523a6d0c04STimothy McDaniel 		qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
18533a6d0c04STimothy McDaniel 	else
18543a6d0c04STimothy McDaniel 		qm_port->cq_depth_mask = cfg.cq_depth - 1;
18553a6d0c04STimothy McDaniel 
18563d4e27fdSDavid Marchand 	qm_port->gen_bit_shift = rte_popcount32(qm_port->cq_depth_mask);
18573a6d0c04STimothy McDaniel 	/* starting value of gen bit - it toggles at wrap time */
18583a6d0c04STimothy McDaniel 	qm_port->gen_bit = 1;
1859000a7b8eSTimothy McDaniel 	dlb2_hw_cq_bitmask_init(qm_port, qm_port->cq_depth);
18603a6d0c04STimothy McDaniel 
18613a6d0c04STimothy McDaniel 	qm_port->int_armed = false;
18623a6d0c04STimothy McDaniel 
18633a6d0c04STimothy McDaniel 	/* Save off for later use in info and lookup APIs. */
18643a6d0c04STimothy McDaniel 	qm_port->qid_mappings = &dlb2->qm_dir_to_ev_queue_id[0];
18653a6d0c04STimothy McDaniel 
18663a6d0c04STimothy McDaniel 	qm_port->dequeue_depth = dequeue_depth;
18673a6d0c04STimothy McDaniel 
1868c667583dSTimothy McDaniel 	/* Directed ports are auto-pop, by default. */
1869c667583dSTimothy McDaniel 	qm_port->token_pop_mode = AUTO_POP;
18703a6d0c04STimothy McDaniel 	qm_port->owed_tokens = 0;
18713a6d0c04STimothy McDaniel 	qm_port->issued_releases = 0;
18723a6d0c04STimothy McDaniel 
18733a6d0c04STimothy McDaniel 	/* Save config message too. */
18743a6d0c04STimothy McDaniel 	rte_memcpy(&qm_port->cfg.dir, &cfg, sizeof(qm_port->cfg.dir));
18753a6d0c04STimothy McDaniel 
18763a6d0c04STimothy McDaniel 	/* update state */
18773a6d0c04STimothy McDaniel 	qm_port->state = PORT_STARTED; /* enabled at create time */
18783a6d0c04STimothy McDaniel 	qm_port->config_state = DLB2_CONFIGURED;
18793a6d0c04STimothy McDaniel 
188062e45206STimothy McDaniel 	if (dlb2->version == DLB2_HW_V2) {
18813a6d0c04STimothy McDaniel 		qm_port->dir_credits = dir_credit_high_watermark;
18823a6d0c04STimothy McDaniel 		qm_port->ldb_credits = ldb_credit_high_watermark;
18833a6d0c04STimothy McDaniel 		qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
18843a6d0c04STimothy McDaniel 		qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
18853a6d0c04STimothy McDaniel 
1886e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d",
18873a6d0c04STimothy McDaniel 			     qm_port_id,
18883a6d0c04STimothy McDaniel 			     dequeue_depth,
18893a6d0c04STimothy McDaniel 			     dir_credit_high_watermark,
18903a6d0c04STimothy McDaniel 			     ldb_credit_high_watermark);
189162e45206STimothy McDaniel 	} else {
189262e45206STimothy McDaniel 		qm_port->credits = credit_high_watermark;
189362e45206STimothy McDaniel 		qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool;
18943a6d0c04STimothy McDaniel 
1895e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("dlb2: created dir port %d, depth = %d cr=%d",
189662e45206STimothy McDaniel 			     qm_port_id,
189762e45206STimothy McDaniel 			     dequeue_depth,
189862e45206STimothy McDaniel 			     credit_high_watermark);
189962e45206STimothy McDaniel 	}
1900000a7b8eSTimothy McDaniel 
1901000a7b8eSTimothy McDaniel #if (!defined RTE_ARCH_X86_64)
1902000a7b8eSTimothy McDaniel 	qm_port->use_scalar = true;
1903000a7b8eSTimothy McDaniel #else
1904000a7b8eSTimothy McDaniel 	if ((qm_port->cq_depth > 64) ||
1905000a7b8eSTimothy McDaniel 	    (!rte_is_power_of_2(qm_port->cq_depth)) ||
1906fcc5489cSTimothy McDaniel 	    (dlb2->vector_opts_enabled == false))
1907000a7b8eSTimothy McDaniel 		qm_port->use_scalar = true;
1908000a7b8eSTimothy McDaniel #endif
1909000a7b8eSTimothy McDaniel 
19103a6d0c04STimothy McDaniel 	rte_spinlock_unlock(&handle->resource_lock);
19113a6d0c04STimothy McDaniel 
19123a6d0c04STimothy McDaniel 	return 0;
19133a6d0c04STimothy McDaniel 
19143a6d0c04STimothy McDaniel error_exit:
19153a6d0c04STimothy McDaniel 
19163a6d0c04STimothy McDaniel 	if (qm_port)
19173a6d0c04STimothy McDaniel 		dlb2_free_qe_mem(qm_port);
19183a6d0c04STimothy McDaniel 
19193a6d0c04STimothy McDaniel 	rte_spinlock_unlock(&handle->resource_lock);
19203a6d0c04STimothy McDaniel 
1921f665790aSDavid Marchand 	DLB2_LOG_ERR("dlb2: create dir port failed!");
19223a6d0c04STimothy McDaniel 
19233a6d0c04STimothy McDaniel 	return ret;
19243a6d0c04STimothy McDaniel }
19253a6d0c04STimothy McDaniel 
19263a6d0c04STimothy McDaniel static int
19273a6d0c04STimothy McDaniel dlb2_eventdev_port_setup(struct rte_eventdev *dev,
19283a6d0c04STimothy McDaniel 			 uint8_t ev_port_id,
19293a6d0c04STimothy McDaniel 			 const struct rte_event_port_conf *port_conf)
19303a6d0c04STimothy McDaniel {
19313a6d0c04STimothy McDaniel 	struct dlb2_eventdev *dlb2;
19323a6d0c04STimothy McDaniel 	struct dlb2_eventdev_port *ev_port;
1933e4869c0bSPravin Pathak 	uint32_t hw_credit_quanta, sw_credit_quanta;
1934bdd0b609SAbdullah Sevincer 	int ret;
19353a6d0c04STimothy McDaniel 
19363a6d0c04STimothy McDaniel 	if (dev == NULL || port_conf == NULL) {
1937f665790aSDavid Marchand 		DLB2_LOG_ERR("Null parameter");
19383a6d0c04STimothy McDaniel 		return -EINVAL;
19393a6d0c04STimothy McDaniel 	}
19403a6d0c04STimothy McDaniel 
19413a6d0c04STimothy McDaniel 	dlb2 = dlb2_pmd_priv(dev);
19423a6d0c04STimothy McDaniel 
1943b66a418dSTimothy McDaniel 	if (ev_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
19443a6d0c04STimothy McDaniel 		return -EINVAL;
19453a6d0c04STimothy McDaniel 
19463a6d0c04STimothy McDaniel 	if (port_conf->dequeue_depth >
19473a6d0c04STimothy McDaniel 		evdev_dlb2_default_info.max_event_port_dequeue_depth ||
19483a6d0c04STimothy McDaniel 	    port_conf->enqueue_depth >
19493a6d0c04STimothy McDaniel 		evdev_dlb2_default_info.max_event_port_enqueue_depth)
19503a6d0c04STimothy McDaniel 		return -EINVAL;
19513a6d0c04STimothy McDaniel 
19526e2e98d6SAbdullah Sevincer 	if ((port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_INDEPENDENT_ENQ) &&
19536e2e98d6SAbdullah Sevincer 	    port_conf->dequeue_depth > DLB2_MAX_CQ_DEPTH_REORDER) {
19546e2e98d6SAbdullah Sevincer 		DLB2_LOG_ERR("evport %d: Max dequeue depth supported with reorder is %d",
19556e2e98d6SAbdullah Sevincer 			     ev_port_id, DLB2_MAX_CQ_DEPTH_REORDER);
19566e2e98d6SAbdullah Sevincer 		return -EINVAL;
19576e2e98d6SAbdullah Sevincer 	}
19586e2e98d6SAbdullah Sevincer 
19593a6d0c04STimothy McDaniel 	ev_port = &dlb2->ev_ports[ev_port_id];
19603a6d0c04STimothy McDaniel 	/* configured? */
19613a6d0c04STimothy McDaniel 	if (ev_port->setup_done) {
1962f665790aSDavid Marchand 		DLB2_LOG_ERR("evport %d is already configured", ev_port_id);
19633a6d0c04STimothy McDaniel 		return -EINVAL;
19643a6d0c04STimothy McDaniel 	}
19653a6d0c04STimothy McDaniel 
196687ecdd9eSTimothy McDaniel 	/* Default for worker ports */
196787ecdd9eSTimothy McDaniel 	sw_credit_quanta = dlb2->sw_credit_quanta;
196887ecdd9eSTimothy McDaniel 	hw_credit_quanta = dlb2->hw_credit_quanta;
196987ecdd9eSTimothy McDaniel 
1970d8c16de5SAbdullah Sevincer 	ev_port->qm_port.is_producer = false;
19713a6d0c04STimothy McDaniel 	ev_port->qm_port.is_directed = port_conf->event_port_cfg &
19723a6d0c04STimothy McDaniel 		RTE_EVENT_PORT_CFG_SINGLE_LINK;
19733a6d0c04STimothy McDaniel 
197487ecdd9eSTimothy McDaniel 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_HINT_PRODUCER) {
197587ecdd9eSTimothy McDaniel 		/* Producer type ports. Mostly enqueue */
197687ecdd9eSTimothy McDaniel 		sw_credit_quanta = DLB2_SW_CREDIT_P_QUANTA_DEFAULT;
197787ecdd9eSTimothy McDaniel 		hw_credit_quanta = DLB2_SW_CREDIT_P_BATCH_SZ;
1978d8c16de5SAbdullah Sevincer 		ev_port->qm_port.is_producer = true;
197987ecdd9eSTimothy McDaniel 	}
198087ecdd9eSTimothy McDaniel 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_HINT_CONSUMER) {
198187ecdd9eSTimothy McDaniel 		/* Consumer type ports. Mostly dequeue */
198287ecdd9eSTimothy McDaniel 		sw_credit_quanta = DLB2_SW_CREDIT_C_QUANTA_DEFAULT;
198387ecdd9eSTimothy McDaniel 		hw_credit_quanta = DLB2_SW_CREDIT_C_BATCH_SZ;
198487ecdd9eSTimothy McDaniel 	}
198587ecdd9eSTimothy McDaniel 	ev_port->credit_update_quanta = sw_credit_quanta;
198687ecdd9eSTimothy McDaniel 	ev_port->qm_port.hw_credit_quanta = hw_credit_quanta;
198787ecdd9eSTimothy McDaniel 
1988d8c16de5SAbdullah Sevincer 	/*
1989d8c16de5SAbdullah Sevincer 	 * Validate credit config before creating port
1990d8c16de5SAbdullah Sevincer 	 */
1991d8c16de5SAbdullah Sevincer 
199287ecdd9eSTimothy McDaniel 	if (port_conf->enqueue_depth > sw_credit_quanta ||
199387ecdd9eSTimothy McDaniel 	    port_conf->enqueue_depth > hw_credit_quanta) {
1994f665790aSDavid Marchand 		DLB2_LOG_ERR("Invalid port config. Enqueue depth %d must be <= credit quanta %d and batch size %d",
199587ecdd9eSTimothy McDaniel 			     port_conf->enqueue_depth,
199687ecdd9eSTimothy McDaniel 			     sw_credit_quanta,
199787ecdd9eSTimothy McDaniel 			     hw_credit_quanta);
199887ecdd9eSTimothy McDaniel 		return -EINVAL;
199987ecdd9eSTimothy McDaniel 	}
20006e2e98d6SAbdullah Sevincer 	ev_port->enq_retries = port_conf->enqueue_depth;
20016e2e98d6SAbdullah Sevincer 
20026e2e98d6SAbdullah Sevincer 	ev_port->qm_port.reorder_id = 0;
20036e2e98d6SAbdullah Sevincer 	ev_port->qm_port.reorder_en = port_conf->event_port_cfg &
20046e2e98d6SAbdullah Sevincer 				      RTE_EVENT_PORT_CFG_INDEPENDENT_ENQ;
200587ecdd9eSTimothy McDaniel 
20068d1d9070SAbdullah Sevincer 	/* Save off port config for reconfig */
20078d1d9070SAbdullah Sevincer 	ev_port->conf = *port_conf;
20088d1d9070SAbdullah Sevincer 
20098d1d9070SAbdullah Sevincer 
201087ecdd9eSTimothy McDaniel 	/*
201187ecdd9eSTimothy McDaniel 	 * Create port
201287ecdd9eSTimothy McDaniel 	 */
201387ecdd9eSTimothy McDaniel 
20143a6d0c04STimothy McDaniel 	if (!ev_port->qm_port.is_directed) {
20153a6d0c04STimothy McDaniel 		ret = dlb2_hw_create_ldb_port(dlb2,
20163a6d0c04STimothy McDaniel 					      ev_port,
20173a6d0c04STimothy McDaniel 					      port_conf->dequeue_depth,
20183a6d0c04STimothy McDaniel 					      port_conf->enqueue_depth);
20193a6d0c04STimothy McDaniel 		if (ret < 0) {
2020f665790aSDavid Marchand 			DLB2_LOG_ERR("Failed to create the lB port ve portId=%d",
20213a6d0c04STimothy McDaniel 				     ev_port_id);
20223a6d0c04STimothy McDaniel 
20233a6d0c04STimothy McDaniel 			return ret;
20243a6d0c04STimothy McDaniel 		}
20253a6d0c04STimothy McDaniel 	} else {
20263a6d0c04STimothy McDaniel 		ret = dlb2_hw_create_dir_port(dlb2,
20273a6d0c04STimothy McDaniel 					      ev_port,
20283a6d0c04STimothy McDaniel 					      port_conf->dequeue_depth,
20293a6d0c04STimothy McDaniel 					      port_conf->enqueue_depth);
20303a6d0c04STimothy McDaniel 		if (ret < 0) {
2031f665790aSDavid Marchand 			DLB2_LOG_ERR("Failed to create the DIR port");
20323a6d0c04STimothy McDaniel 			return ret;
20333a6d0c04STimothy McDaniel 		}
20343a6d0c04STimothy McDaniel 	}
20353a6d0c04STimothy McDaniel 
20363a6d0c04STimothy McDaniel 	ev_port->id = ev_port_id;
20373a6d0c04STimothy McDaniel 	ev_port->enq_configured = true;
20383a6d0c04STimothy McDaniel 	ev_port->setup_done = true;
20393a6d0c04STimothy McDaniel 	ev_port->inflight_max = port_conf->new_event_threshold;
20403a6d0c04STimothy McDaniel 	ev_port->implicit_release = !(port_conf->event_port_cfg &
20413a6d0c04STimothy McDaniel 		  RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
20423a6d0c04STimothy McDaniel 	ev_port->outstanding_releases = 0;
20433a6d0c04STimothy McDaniel 	ev_port->inflight_credits = 0;
20443a6d0c04STimothy McDaniel 	ev_port->dlb2 = dlb2; /* reverse link */
20453a6d0c04STimothy McDaniel 
2046bdd0b609SAbdullah Sevincer 	/* Default for worker ports */
2047bdd0b609SAbdullah Sevincer 	sw_credit_quanta = dlb2->sw_credit_quanta;
2048bdd0b609SAbdullah Sevincer 	hw_credit_quanta = dlb2->hw_credit_quanta;
2049bdd0b609SAbdullah Sevincer 
2050bdd0b609SAbdullah Sevincer 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_HINT_PRODUCER) {
2051bdd0b609SAbdullah Sevincer 		/* Producer type ports. Mostly enqueue */
2052bdd0b609SAbdullah Sevincer 		sw_credit_quanta = DLB2_SW_CREDIT_P_QUANTA_DEFAULT;
2053bdd0b609SAbdullah Sevincer 		hw_credit_quanta = DLB2_SW_CREDIT_P_BATCH_SZ;
2054bdd0b609SAbdullah Sevincer 	}
2055bdd0b609SAbdullah Sevincer 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_HINT_CONSUMER) {
2056bdd0b609SAbdullah Sevincer 		/* Consumer type ports. Mostly dequeue */
2057bdd0b609SAbdullah Sevincer 		sw_credit_quanta = DLB2_SW_CREDIT_C_QUANTA_DEFAULT;
2058bdd0b609SAbdullah Sevincer 		hw_credit_quanta = DLB2_SW_CREDIT_C_BATCH_SZ;
2059bdd0b609SAbdullah Sevincer 	}
2060bdd0b609SAbdullah Sevincer 	ev_port->credit_update_quanta = sw_credit_quanta;
2061bdd0b609SAbdullah Sevincer 	ev_port->qm_port.hw_credit_quanta = hw_credit_quanta;
2062bdd0b609SAbdullah Sevincer 
2063bdd0b609SAbdullah Sevincer 
20643a6d0c04STimothy McDaniel 	/* Tear down pre-existing port->queue links */
20653a6d0c04STimothy McDaniel 	if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
20663a6d0c04STimothy McDaniel 		dlb2_port_link_teardown(dlb2, &dlb2->ev_ports[ev_port_id]);
20673a6d0c04STimothy McDaniel 
20683a6d0c04STimothy McDaniel 	dev->data->ports[ev_port_id] = &dlb2->ev_ports[ev_port_id];
20693a6d0c04STimothy McDaniel 
2070d0ce87e4STimothy McDaniel 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512VL) &&
2071d0ce87e4STimothy McDaniel 	    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2072d0ce87e4STimothy McDaniel 		ev_port->qm_port.use_avx512 = true;
2073d0ce87e4STimothy McDaniel 	else
2074d0ce87e4STimothy McDaniel 		ev_port->qm_port.use_avx512 = false;
2075d0ce87e4STimothy McDaniel 
20763a6d0c04STimothy McDaniel 	return 0;
20773a6d0c04STimothy McDaniel }
20783a6d0c04STimothy McDaniel 
20791acd82c0STimothy McDaniel static int16_t
20801acd82c0STimothy McDaniel dlb2_hw_map_ldb_qid_to_port(struct dlb2_hw_dev *handle,
20811acd82c0STimothy McDaniel 			    uint32_t qm_port_id,
20821acd82c0STimothy McDaniel 			    uint16_t qm_qid,
20831acd82c0STimothy McDaniel 			    uint8_t priority)
20841acd82c0STimothy McDaniel {
20851acd82c0STimothy McDaniel 	struct dlb2_map_qid_args cfg;
20861acd82c0STimothy McDaniel 	int32_t ret;
20871acd82c0STimothy McDaniel 
20881acd82c0STimothy McDaniel 	if (handle == NULL)
20891acd82c0STimothy McDaniel 		return -EINVAL;
20901acd82c0STimothy McDaniel 
20911acd82c0STimothy McDaniel 	/* Build message */
20921acd82c0STimothy McDaniel 	cfg.port_id = qm_port_id;
20931acd82c0STimothy McDaniel 	cfg.qid = qm_qid;
20941acd82c0STimothy McDaniel 	cfg.priority = EV_TO_DLB2_PRIO(priority);
20951acd82c0STimothy McDaniel 
20961acd82c0STimothy McDaniel 	ret = dlb2_iface_map_qid(handle, &cfg);
20971acd82c0STimothy McDaniel 	if (ret < 0) {
2098f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)",
20991acd82c0STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
2100f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d",
21011acd82c0STimothy McDaniel 			     handle->domain_id, cfg.port_id,
21021acd82c0STimothy McDaniel 			     cfg.qid,
21031acd82c0STimothy McDaniel 			     cfg.priority);
21041acd82c0STimothy McDaniel 	} else {
2105e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("dlb2: mapped queue %d to qm_port %d",
21061acd82c0STimothy McDaniel 			     qm_qid, qm_port_id);
21071acd82c0STimothy McDaniel 	}
21081acd82c0STimothy McDaniel 
21091acd82c0STimothy McDaniel 	return ret;
21101acd82c0STimothy McDaniel }
21111acd82c0STimothy McDaniel 
21121acd82c0STimothy McDaniel static int
21131acd82c0STimothy McDaniel dlb2_event_queue_join_ldb(struct dlb2_eventdev *dlb2,
21141acd82c0STimothy McDaniel 			  struct dlb2_eventdev_port *ev_port,
21151acd82c0STimothy McDaniel 			  struct dlb2_eventdev_queue *ev_queue,
21161acd82c0STimothy McDaniel 			  uint8_t priority)
21171acd82c0STimothy McDaniel {
21181acd82c0STimothy McDaniel 	int first_avail = -1;
21191acd82c0STimothy McDaniel 	int ret, i;
21201acd82c0STimothy McDaniel 
21211acd82c0STimothy McDaniel 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
21221acd82c0STimothy McDaniel 		if (ev_port->link[i].valid) {
21231acd82c0STimothy McDaniel 			if (ev_port->link[i].queue_id == ev_queue->id &&
21241acd82c0STimothy McDaniel 			    ev_port->link[i].priority == priority) {
21251acd82c0STimothy McDaniel 				if (ev_port->link[i].mapped)
21261acd82c0STimothy McDaniel 					return 0; /* already mapped */
21271acd82c0STimothy McDaniel 				first_avail = i;
21281acd82c0STimothy McDaniel 			}
21291acd82c0STimothy McDaniel 		} else if (first_avail == -1)
21301acd82c0STimothy McDaniel 			first_avail = i;
21311acd82c0STimothy McDaniel 	}
21321acd82c0STimothy McDaniel 	if (first_avail == -1) {
2133f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.",
21341acd82c0STimothy McDaniel 			     ev_port->qm_port.id);
21351acd82c0STimothy McDaniel 		return -EINVAL;
21361acd82c0STimothy McDaniel 	}
21371acd82c0STimothy McDaniel 
21381acd82c0STimothy McDaniel 	ret = dlb2_hw_map_ldb_qid_to_port(&dlb2->qm_instance,
21391acd82c0STimothy McDaniel 					  ev_port->qm_port.id,
21401acd82c0STimothy McDaniel 					  ev_queue->qm_queue.id,
21411acd82c0STimothy McDaniel 					  priority);
21421acd82c0STimothy McDaniel 
21431acd82c0STimothy McDaniel 	if (!ret)
21441acd82c0STimothy McDaniel 		ev_port->link[first_avail].mapped = true;
21451acd82c0STimothy McDaniel 
21461acd82c0STimothy McDaniel 	return ret;
21471acd82c0STimothy McDaniel }
21481acd82c0STimothy McDaniel 
21491acd82c0STimothy McDaniel static int32_t
21501acd82c0STimothy McDaniel dlb2_hw_create_dir_queue(struct dlb2_eventdev *dlb2,
21511acd82c0STimothy McDaniel 			 struct dlb2_eventdev_queue *ev_queue,
21521acd82c0STimothy McDaniel 			 int32_t qm_port_id)
21531acd82c0STimothy McDaniel {
21541acd82c0STimothy McDaniel 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
21551acd82c0STimothy McDaniel 	struct dlb2_create_dir_queue_args cfg;
21561acd82c0STimothy McDaniel 	int32_t ret;
21571acd82c0STimothy McDaniel 
21581acd82c0STimothy McDaniel 	/* The directed port is always configured before its queue */
21591acd82c0STimothy McDaniel 	cfg.port_id = qm_port_id;
21601acd82c0STimothy McDaniel 
21611acd82c0STimothy McDaniel 	if (ev_queue->depth_threshold == 0) {
21627be66a3bSTimothy McDaniel 		cfg.depth_threshold = dlb2->default_depth_thresh;
21637be66a3bSTimothy McDaniel 		ev_queue->depth_threshold =
21647be66a3bSTimothy McDaniel 			dlb2->default_depth_thresh;
21651acd82c0STimothy McDaniel 	} else
21661acd82c0STimothy McDaniel 		cfg.depth_threshold = ev_queue->depth_threshold;
21671acd82c0STimothy McDaniel 
21681acd82c0STimothy McDaniel 	ret = dlb2_iface_dir_queue_create(handle, &cfg);
21691acd82c0STimothy McDaniel 	if (ret < 0) {
2170f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)",
21711acd82c0STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
21721acd82c0STimothy McDaniel 		return -EINVAL;
21731acd82c0STimothy McDaniel 	}
21741acd82c0STimothy McDaniel 
21751acd82c0STimothy McDaniel 	return cfg.response.id;
21761acd82c0STimothy McDaniel }
21771acd82c0STimothy McDaniel 
21781acd82c0STimothy McDaniel static int
21791acd82c0STimothy McDaniel dlb2_eventdev_dir_queue_setup(struct dlb2_eventdev *dlb2,
21801acd82c0STimothy McDaniel 			      struct dlb2_eventdev_queue *ev_queue,
21811acd82c0STimothy McDaniel 			      struct dlb2_eventdev_port *ev_port)
21821acd82c0STimothy McDaniel {
21831acd82c0STimothy McDaniel 	int32_t qm_qid;
21841acd82c0STimothy McDaniel 
21851acd82c0STimothy McDaniel 	qm_qid = dlb2_hw_create_dir_queue(dlb2, ev_queue, ev_port->qm_port.id);
21861acd82c0STimothy McDaniel 
21871acd82c0STimothy McDaniel 	if (qm_qid < 0) {
2188f665790aSDavid Marchand 		DLB2_LOG_ERR("Failed to create the DIR queue");
21891acd82c0STimothy McDaniel 		return qm_qid;
21901acd82c0STimothy McDaniel 	}
21911acd82c0STimothy McDaniel 
21921acd82c0STimothy McDaniel 	dlb2->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
21931acd82c0STimothy McDaniel 
21941acd82c0STimothy McDaniel 	ev_queue->qm_queue.id = qm_qid;
21951acd82c0STimothy McDaniel 
21961acd82c0STimothy McDaniel 	return 0;
21971acd82c0STimothy McDaniel }
21981acd82c0STimothy McDaniel 
21991acd82c0STimothy McDaniel static int
22001acd82c0STimothy McDaniel dlb2_do_port_link(struct rte_eventdev *dev,
22011acd82c0STimothy McDaniel 		  struct dlb2_eventdev_queue *ev_queue,
22021acd82c0STimothy McDaniel 		  struct dlb2_eventdev_port *ev_port,
22031acd82c0STimothy McDaniel 		  uint8_t prio)
22041acd82c0STimothy McDaniel {
22051acd82c0STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
22061acd82c0STimothy McDaniel 	int err;
22071acd82c0STimothy McDaniel 
22081acd82c0STimothy McDaniel 	/* Don't link until start time. */
22091acd82c0STimothy McDaniel 	if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
22101acd82c0STimothy McDaniel 		return 0;
22111acd82c0STimothy McDaniel 
22121acd82c0STimothy McDaniel 	if (ev_queue->qm_queue.is_directed)
22131acd82c0STimothy McDaniel 		err = dlb2_eventdev_dir_queue_setup(dlb2, ev_queue, ev_port);
22141acd82c0STimothy McDaniel 	else
22151acd82c0STimothy McDaniel 		err = dlb2_event_queue_join_ldb(dlb2, ev_port, ev_queue, prio);
22161acd82c0STimothy McDaniel 
22171acd82c0STimothy McDaniel 	if (err) {
2218f665790aSDavid Marchand 		DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d",
22191acd82c0STimothy McDaniel 			     ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
22201acd82c0STimothy McDaniel 			     ev_queue->id, ev_port->id);
22211acd82c0STimothy McDaniel 
22221acd82c0STimothy McDaniel 		rte_errno = err;
22231acd82c0STimothy McDaniel 		return -1;
22241acd82c0STimothy McDaniel 	}
22251acd82c0STimothy McDaniel 
22261acd82c0STimothy McDaniel 	return 0;
22271acd82c0STimothy McDaniel }
22281acd82c0STimothy McDaniel 
22291acd82c0STimothy McDaniel static int
22301acd82c0STimothy McDaniel dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port,
22311acd82c0STimothy McDaniel 			uint8_t queue_id,
22321acd82c0STimothy McDaniel 			bool link_exists,
22331acd82c0STimothy McDaniel 			int index)
22341acd82c0STimothy McDaniel {
22351acd82c0STimothy McDaniel 	struct dlb2_eventdev *dlb2 = ev_port->dlb2;
22361acd82c0STimothy McDaniel 	struct dlb2_eventdev_queue *ev_queue;
22371acd82c0STimothy McDaniel 	bool port_is_dir, queue_is_dir;
22381acd82c0STimothy McDaniel 
22391acd82c0STimothy McDaniel 	if (queue_id > dlb2->num_queues) {
22401acd82c0STimothy McDaniel 		rte_errno = -EINVAL;
22411acd82c0STimothy McDaniel 		return -1;
22421acd82c0STimothy McDaniel 	}
22431acd82c0STimothy McDaniel 
22441acd82c0STimothy McDaniel 	ev_queue = &dlb2->ev_queues[queue_id];
22451acd82c0STimothy McDaniel 
22461acd82c0STimothy McDaniel 	if (!ev_queue->setup_done &&
22471acd82c0STimothy McDaniel 	    ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED) {
22481acd82c0STimothy McDaniel 		rte_errno = -EINVAL;
22491acd82c0STimothy McDaniel 		return -1;
22501acd82c0STimothy McDaniel 	}
22511acd82c0STimothy McDaniel 
22521acd82c0STimothy McDaniel 	port_is_dir = ev_port->qm_port.is_directed;
22531acd82c0STimothy McDaniel 	queue_is_dir = ev_queue->qm_queue.is_directed;
22541acd82c0STimothy McDaniel 
22551acd82c0STimothy McDaniel 	if (port_is_dir != queue_is_dir) {
2256f665790aSDavid Marchand 		DLB2_LOG_ERR("%s queue %u can't link to %s port %u",
22571acd82c0STimothy McDaniel 			     queue_is_dir ? "DIR" : "LDB", ev_queue->id,
22581acd82c0STimothy McDaniel 			     port_is_dir ? "DIR" : "LDB", ev_port->id);
22591acd82c0STimothy McDaniel 
22601acd82c0STimothy McDaniel 		rte_errno = -EINVAL;
22611acd82c0STimothy McDaniel 		return -1;
22621acd82c0STimothy McDaniel 	}
22631acd82c0STimothy McDaniel 
22641acd82c0STimothy McDaniel 	/* Check if there is space for the requested link */
22651acd82c0STimothy McDaniel 	if (!link_exists && index == -1) {
2266f665790aSDavid Marchand 		DLB2_LOG_ERR("no space for new link");
22671acd82c0STimothy McDaniel 		rte_errno = -ENOSPC;
22681acd82c0STimothy McDaniel 		return -1;
22691acd82c0STimothy McDaniel 	}
22701acd82c0STimothy McDaniel 
22711acd82c0STimothy McDaniel 	/* Check if the directed port is already linked */
22721acd82c0STimothy McDaniel 	if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
22731acd82c0STimothy McDaniel 	    !link_exists) {
2274f665790aSDavid Marchand 		DLB2_LOG_ERR("Can't link DIR port %d to >1 queues",
22751acd82c0STimothy McDaniel 			     ev_port->id);
22761acd82c0STimothy McDaniel 		rte_errno = -EINVAL;
22771acd82c0STimothy McDaniel 		return -1;
22781acd82c0STimothy McDaniel 	}
22791acd82c0STimothy McDaniel 
22801acd82c0STimothy McDaniel 	/* Check if the directed queue is already linked */
22811acd82c0STimothy McDaniel 	if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
22821acd82c0STimothy McDaniel 	    !link_exists) {
2283f665790aSDavid Marchand 		DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports",
22841acd82c0STimothy McDaniel 			     ev_queue->id);
22851acd82c0STimothy McDaniel 		rte_errno = -EINVAL;
22861acd82c0STimothy McDaniel 		return -1;
22871acd82c0STimothy McDaniel 	}
22881acd82c0STimothy McDaniel 
22891acd82c0STimothy McDaniel 	return 0;
22901acd82c0STimothy McDaniel }
22911acd82c0STimothy McDaniel 
22921acd82c0STimothy McDaniel static int
22931acd82c0STimothy McDaniel dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
22941acd82c0STimothy McDaniel 			const uint8_t queues[], const uint8_t priorities[],
22951acd82c0STimothy McDaniel 			uint16_t nb_links)
22961acd82c0STimothy McDaniel 
22971acd82c0STimothy McDaniel {
22981acd82c0STimothy McDaniel 	struct dlb2_eventdev_port *ev_port = event_port;
22991acd82c0STimothy McDaniel 	struct dlb2_eventdev *dlb2;
23001acd82c0STimothy McDaniel 	int i, j;
23011acd82c0STimothy McDaniel 
23021acd82c0STimothy McDaniel 	RTE_SET_USED(dev);
23031acd82c0STimothy McDaniel 
23041acd82c0STimothy McDaniel 	if (ev_port == NULL) {
2305f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: evport not setup");
23061acd82c0STimothy McDaniel 		rte_errno = -EINVAL;
23071acd82c0STimothy McDaniel 		return 0;
23081acd82c0STimothy McDaniel 	}
23091acd82c0STimothy McDaniel 
23101acd82c0STimothy McDaniel 	if (!ev_port->setup_done &&
23111acd82c0STimothy McDaniel 	    ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED) {
2312f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: evport not setup");
23131acd82c0STimothy McDaniel 		rte_errno = -EINVAL;
23141acd82c0STimothy McDaniel 		return 0;
23151acd82c0STimothy McDaniel 	}
23161acd82c0STimothy McDaniel 
23171acd82c0STimothy McDaniel 	/* Note: rte_event_port_link() ensures the PMD won't receive a NULL
23181acd82c0STimothy McDaniel 	 * queues pointer.
23191acd82c0STimothy McDaniel 	 */
23201acd82c0STimothy McDaniel 	if (nb_links == 0) {
2321e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("dlb2: nb_links is 0");
23221acd82c0STimothy McDaniel 		return 0; /* Ignore and return success */
23231acd82c0STimothy McDaniel 	}
23241acd82c0STimothy McDaniel 
23251acd82c0STimothy McDaniel 	dlb2 = ev_port->dlb2;
23261acd82c0STimothy McDaniel 
2327e99981afSDavid Marchand 	DLB2_LOG_LINE_DBG("Linking %u queues to %s port %d",
23281acd82c0STimothy McDaniel 		     nb_links,
23291acd82c0STimothy McDaniel 		     ev_port->qm_port.is_directed ? "DIR" : "LDB",
23301acd82c0STimothy McDaniel 		     ev_port->id);
23311acd82c0STimothy McDaniel 
23321acd82c0STimothy McDaniel 	for (i = 0; i < nb_links; i++) {
23331acd82c0STimothy McDaniel 		struct dlb2_eventdev_queue *ev_queue;
23341acd82c0STimothy McDaniel 		uint8_t queue_id, prio;
23351acd82c0STimothy McDaniel 		bool found = false;
23361acd82c0STimothy McDaniel 		int index = -1;
23371acd82c0STimothy McDaniel 
23381acd82c0STimothy McDaniel 		queue_id = queues[i];
23391acd82c0STimothy McDaniel 		prio = priorities[i];
23401acd82c0STimothy McDaniel 
23411acd82c0STimothy McDaniel 		/* Check if the link already exists. */
23421acd82c0STimothy McDaniel 		for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
23431acd82c0STimothy McDaniel 			if (ev_port->link[j].valid) {
23441acd82c0STimothy McDaniel 				if (ev_port->link[j].queue_id == queue_id) {
23451acd82c0STimothy McDaniel 					found = true;
23461acd82c0STimothy McDaniel 					index = j;
23471acd82c0STimothy McDaniel 					break;
23481acd82c0STimothy McDaniel 				}
23491acd82c0STimothy McDaniel 			} else if (index == -1) {
23501acd82c0STimothy McDaniel 				index = j;
23511acd82c0STimothy McDaniel 			}
23521acd82c0STimothy McDaniel 
23531acd82c0STimothy McDaniel 		/* could not link */
23541acd82c0STimothy McDaniel 		if (index == -1)
23551acd82c0STimothy McDaniel 			break;
23561acd82c0STimothy McDaniel 
23571acd82c0STimothy McDaniel 		/* Check if already linked at the requested priority */
23581acd82c0STimothy McDaniel 		if (found && ev_port->link[j].priority == prio)
23591acd82c0STimothy McDaniel 			continue;
23601acd82c0STimothy McDaniel 
23611acd82c0STimothy McDaniel 		if (dlb2_validate_port_link(ev_port, queue_id, found, index))
23621acd82c0STimothy McDaniel 			break; /* return index of offending queue */
23631acd82c0STimothy McDaniel 
23641acd82c0STimothy McDaniel 		ev_queue = &dlb2->ev_queues[queue_id];
23651acd82c0STimothy McDaniel 
23661acd82c0STimothy McDaniel 		if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
23671acd82c0STimothy McDaniel 			break; /* return index of offending queue */
23681acd82c0STimothy McDaniel 
23691acd82c0STimothy McDaniel 		ev_queue->num_links++;
23701acd82c0STimothy McDaniel 
23711acd82c0STimothy McDaniel 		ev_port->link[index].queue_id = queue_id;
23721acd82c0STimothy McDaniel 		ev_port->link[index].priority = prio;
23731acd82c0STimothy McDaniel 		ev_port->link[index].valid = true;
23741acd82c0STimothy McDaniel 		/* Entry already exists?  If so, then must be prio change */
23751acd82c0STimothy McDaniel 		if (!found)
23761acd82c0STimothy McDaniel 			ev_port->num_links++;
23771acd82c0STimothy McDaniel 	}
23781acd82c0STimothy McDaniel 	return i;
23791acd82c0STimothy McDaniel }
23801acd82c0STimothy McDaniel 
2381a29248b5STimothy McDaniel static int16_t
2382a29248b5STimothy McDaniel dlb2_hw_unmap_ldb_qid_from_port(struct dlb2_hw_dev *handle,
2383a29248b5STimothy McDaniel 				uint32_t qm_port_id,
2384a29248b5STimothy McDaniel 				uint16_t qm_qid)
2385a29248b5STimothy McDaniel {
2386a29248b5STimothy McDaniel 	struct dlb2_unmap_qid_args cfg;
2387a29248b5STimothy McDaniel 	int32_t ret;
2388a29248b5STimothy McDaniel 
2389a29248b5STimothy McDaniel 	if (handle == NULL)
2390a29248b5STimothy McDaniel 		return -EINVAL;
2391a29248b5STimothy McDaniel 
2392a29248b5STimothy McDaniel 	cfg.port_id = qm_port_id;
2393a29248b5STimothy McDaniel 	cfg.qid = qm_qid;
2394a29248b5STimothy McDaniel 
2395a29248b5STimothy McDaniel 	ret = dlb2_iface_unmap_qid(handle, &cfg);
2396a29248b5STimothy McDaniel 	if (ret < 0)
2397f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)",
2398a29248b5STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
2399a29248b5STimothy McDaniel 
2400a29248b5STimothy McDaniel 	return ret;
2401a29248b5STimothy McDaniel }
2402a29248b5STimothy McDaniel 
2403a29248b5STimothy McDaniel static int
2404a29248b5STimothy McDaniel dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,
2405a29248b5STimothy McDaniel 			    struct dlb2_eventdev_port *ev_port,
2406a29248b5STimothy McDaniel 			    struct dlb2_eventdev_queue *ev_queue)
2407a29248b5STimothy McDaniel {
2408a29248b5STimothy McDaniel 	int ret, i;
2409a29248b5STimothy McDaniel 
2410a29248b5STimothy McDaniel 	/* Don't unlink until start time. */
2411a29248b5STimothy McDaniel 	if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
2412a29248b5STimothy McDaniel 		return 0;
2413a29248b5STimothy McDaniel 
2414a29248b5STimothy McDaniel 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2415a29248b5STimothy McDaniel 		if (ev_port->link[i].valid &&
2416a29248b5STimothy McDaniel 		    ev_port->link[i].queue_id == ev_queue->id)
2417a29248b5STimothy McDaniel 			break; /* found */
2418a29248b5STimothy McDaniel 	}
2419a29248b5STimothy McDaniel 
2420a29248b5STimothy McDaniel 	/* This is expected with eventdev API!
24217be78d02SJosh Soref 	 * It blindly attempts to unmap all queues.
2422a29248b5STimothy McDaniel 	 */
2423a29248b5STimothy McDaniel 	if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
2424e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.",
2425a29248b5STimothy McDaniel 			     ev_queue->qm_queue.id,
2426a29248b5STimothy McDaniel 			     ev_port->qm_port.id);
2427a29248b5STimothy McDaniel 		return 0;
2428a29248b5STimothy McDaniel 	}
2429a29248b5STimothy McDaniel 
2430a29248b5STimothy McDaniel 	ret = dlb2_hw_unmap_ldb_qid_from_port(&dlb2->qm_instance,
2431a29248b5STimothy McDaniel 					      ev_port->qm_port.id,
2432a29248b5STimothy McDaniel 					      ev_queue->qm_queue.id);
2433a29248b5STimothy McDaniel 	if (!ret)
2434a29248b5STimothy McDaniel 		ev_port->link[i].mapped = false;
2435a29248b5STimothy McDaniel 
2436a29248b5STimothy McDaniel 	return ret;
2437a29248b5STimothy McDaniel }
2438a29248b5STimothy McDaniel 
2439a29248b5STimothy McDaniel static int
2440a29248b5STimothy McDaniel dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
2441a29248b5STimothy McDaniel 			  uint8_t queues[], uint16_t nb_unlinks)
2442a29248b5STimothy McDaniel {
2443a29248b5STimothy McDaniel 	struct dlb2_eventdev_port *ev_port = event_port;
2444a29248b5STimothy McDaniel 	struct dlb2_eventdev *dlb2;
2445a29248b5STimothy McDaniel 	int i;
2446a29248b5STimothy McDaniel 
2447a29248b5STimothy McDaniel 	RTE_SET_USED(dev);
2448a29248b5STimothy McDaniel 
2449a29248b5STimothy McDaniel 	if (!ev_port->setup_done) {
2450f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: evport %d is not configured",
2451a29248b5STimothy McDaniel 			     ev_port->id);
2452a29248b5STimothy McDaniel 		rte_errno = -EINVAL;
2453a29248b5STimothy McDaniel 		return 0;
2454a29248b5STimothy McDaniel 	}
2455a29248b5STimothy McDaniel 
2456a29248b5STimothy McDaniel 	if (queues == NULL || nb_unlinks == 0) {
2457e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("dlb2: queues is NULL or nb_unlinks is 0");
2458a29248b5STimothy McDaniel 		return 0; /* Ignore and return success */
2459a29248b5STimothy McDaniel 	}
2460a29248b5STimothy McDaniel 
2461a29248b5STimothy McDaniel 	if (ev_port->qm_port.is_directed) {
2462e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("dlb2: ignore unlink from dir port %d",
2463a29248b5STimothy McDaniel 			     ev_port->id);
2464a29248b5STimothy McDaniel 		rte_errno = 0;
2465a29248b5STimothy McDaniel 		return nb_unlinks; /* as if success */
2466a29248b5STimothy McDaniel 	}
2467a29248b5STimothy McDaniel 
2468a29248b5STimothy McDaniel 	dlb2 = ev_port->dlb2;
2469a29248b5STimothy McDaniel 
2470a29248b5STimothy McDaniel 	for (i = 0; i < nb_unlinks; i++) {
2471a29248b5STimothy McDaniel 		struct dlb2_eventdev_queue *ev_queue;
2472a29248b5STimothy McDaniel 		int ret, j;
2473a29248b5STimothy McDaniel 
2474a29248b5STimothy McDaniel 		if (queues[i] >= dlb2->num_queues) {
2475f665790aSDavid Marchand 			DLB2_LOG_ERR("dlb2: invalid queue id %d", queues[i]);
2476a29248b5STimothy McDaniel 			rte_errno = -EINVAL;
2477a29248b5STimothy McDaniel 			return i; /* return index of offending queue */
2478a29248b5STimothy McDaniel 		}
2479a29248b5STimothy McDaniel 
2480a29248b5STimothy McDaniel 		ev_queue = &dlb2->ev_queues[queues[i]];
2481a29248b5STimothy McDaniel 
2482a29248b5STimothy McDaniel 		/* Does a link exist? */
2483a29248b5STimothy McDaniel 		for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
2484a29248b5STimothy McDaniel 			if (ev_port->link[j].queue_id == queues[i] &&
2485a29248b5STimothy McDaniel 			    ev_port->link[j].valid)
2486a29248b5STimothy McDaniel 				break;
2487a29248b5STimothy McDaniel 
2488a29248b5STimothy McDaniel 		if (j == DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
2489a29248b5STimothy McDaniel 			continue;
2490a29248b5STimothy McDaniel 
2491a29248b5STimothy McDaniel 		ret = dlb2_event_queue_detach_ldb(dlb2, ev_port, ev_queue);
2492a29248b5STimothy McDaniel 		if (ret) {
2493f665790aSDavid Marchand 			DLB2_LOG_ERR("unlink err=%d for port %d queue %d",
2494a29248b5STimothy McDaniel 				     ret, ev_port->id, queues[i]);
2495a29248b5STimothy McDaniel 			rte_errno = -ENOENT;
2496a29248b5STimothy McDaniel 			return i; /* return index of offending queue */
2497a29248b5STimothy McDaniel 		}
2498a29248b5STimothy McDaniel 
2499a29248b5STimothy McDaniel 		ev_port->link[j].valid = false;
2500a29248b5STimothy McDaniel 		ev_port->num_links--;
2501a29248b5STimothy McDaniel 		ev_queue->num_links--;
2502a29248b5STimothy McDaniel 	}
2503a29248b5STimothy McDaniel 
2504a29248b5STimothy McDaniel 	return nb_unlinks;
2505a29248b5STimothy McDaniel }
2506a29248b5STimothy McDaniel 
2507a29248b5STimothy McDaniel static int
2508a29248b5STimothy McDaniel dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
2509a29248b5STimothy McDaniel 				       void *event_port)
2510a29248b5STimothy McDaniel {
2511a29248b5STimothy McDaniel 	struct dlb2_eventdev_port *ev_port = event_port;
2512a29248b5STimothy McDaniel 	struct dlb2_eventdev *dlb2;
2513a29248b5STimothy McDaniel 	struct dlb2_hw_dev *handle;
2514a29248b5STimothy McDaniel 	struct dlb2_pending_port_unmaps_args cfg;
2515a29248b5STimothy McDaniel 	int ret;
2516a29248b5STimothy McDaniel 
2517a29248b5STimothy McDaniel 	RTE_SET_USED(dev);
2518a29248b5STimothy McDaniel 
2519a29248b5STimothy McDaniel 	if (!ev_port->setup_done) {
2520f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: evport %d is not configured",
2521a29248b5STimothy McDaniel 			     ev_port->id);
2522a29248b5STimothy McDaniel 		rte_errno = -EINVAL;
2523a29248b5STimothy McDaniel 		return 0;
2524a29248b5STimothy McDaniel 	}
2525a29248b5STimothy McDaniel 
2526a29248b5STimothy McDaniel 	cfg.port_id = ev_port->qm_port.id;
2527a29248b5STimothy McDaniel 	dlb2 = ev_port->dlb2;
2528a29248b5STimothy McDaniel 	handle = &dlb2->qm_instance;
2529a29248b5STimothy McDaniel 	ret = dlb2_iface_pending_port_unmaps(handle, &cfg);
2530a29248b5STimothy McDaniel 
2531a29248b5STimothy McDaniel 	if (ret < 0) {
2532f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)",
2533a29248b5STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
2534a29248b5STimothy McDaniel 		return ret;
2535a29248b5STimothy McDaniel 	}
2536a29248b5STimothy McDaniel 
2537a29248b5STimothy McDaniel 	return cfg.response.id;
2538a29248b5STimothy McDaniel }
2539a29248b5STimothy McDaniel 
254059e1a966STimothy McDaniel static int
254159e1a966STimothy McDaniel dlb2_eventdev_reapply_configuration(struct rte_eventdev *dev)
254259e1a966STimothy McDaniel {
254359e1a966STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
254459e1a966STimothy McDaniel 	int ret, i;
254559e1a966STimothy McDaniel 
254659e1a966STimothy McDaniel 	/* If an event queue or port was previously configured, but hasn't been
254759e1a966STimothy McDaniel 	 * reconfigured, reapply its original configuration.
254859e1a966STimothy McDaniel 	 */
254959e1a966STimothy McDaniel 	for (i = 0; i < dlb2->num_queues; i++) {
255059e1a966STimothy McDaniel 		struct dlb2_eventdev_queue *ev_queue;
255159e1a966STimothy McDaniel 
255259e1a966STimothy McDaniel 		ev_queue = &dlb2->ev_queues[i];
255359e1a966STimothy McDaniel 
255459e1a966STimothy McDaniel 		if (ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED)
255559e1a966STimothy McDaniel 			continue;
255659e1a966STimothy McDaniel 
255759e1a966STimothy McDaniel 		ret = dlb2_eventdev_queue_setup(dev, i, &ev_queue->conf);
255859e1a966STimothy McDaniel 		if (ret < 0) {
255959e1a966STimothy McDaniel 			DLB2_LOG_ERR("dlb2: failed to reconfigure queue %d", i);
256059e1a966STimothy McDaniel 			return ret;
256159e1a966STimothy McDaniel 		}
256259e1a966STimothy McDaniel 	}
256359e1a966STimothy McDaniel 
256459e1a966STimothy McDaniel 	for (i = 0; i < dlb2->num_ports; i++) {
256559e1a966STimothy McDaniel 		struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
256659e1a966STimothy McDaniel 
256759e1a966STimothy McDaniel 		if (ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED)
256859e1a966STimothy McDaniel 			continue;
256959e1a966STimothy McDaniel 
257059e1a966STimothy McDaniel 		ret = dlb2_eventdev_port_setup(dev, i, &ev_port->conf);
257159e1a966STimothy McDaniel 		if (ret < 0) {
257259e1a966STimothy McDaniel 			DLB2_LOG_ERR("dlb2: failed to reconfigure ev_port %d",
257359e1a966STimothy McDaniel 				     i);
257459e1a966STimothy McDaniel 			return ret;
257559e1a966STimothy McDaniel 		}
257659e1a966STimothy McDaniel 	}
257759e1a966STimothy McDaniel 
257859e1a966STimothy McDaniel 	return 0;
257959e1a966STimothy McDaniel }
258059e1a966STimothy McDaniel 
258159e1a966STimothy McDaniel static int
258259e1a966STimothy McDaniel dlb2_eventdev_apply_port_links(struct rte_eventdev *dev)
258359e1a966STimothy McDaniel {
258459e1a966STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
258559e1a966STimothy McDaniel 	int i;
258659e1a966STimothy McDaniel 
258759e1a966STimothy McDaniel 	/* Perform requested port->queue links */
258859e1a966STimothy McDaniel 	for (i = 0; i < dlb2->num_ports; i++) {
258959e1a966STimothy McDaniel 		struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
259059e1a966STimothy McDaniel 		int j;
259159e1a966STimothy McDaniel 
259259e1a966STimothy McDaniel 		for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
259359e1a966STimothy McDaniel 			struct dlb2_eventdev_queue *ev_queue;
259459e1a966STimothy McDaniel 			uint8_t prio, queue_id;
259559e1a966STimothy McDaniel 
259659e1a966STimothy McDaniel 			if (!ev_port->link[j].valid)
259759e1a966STimothy McDaniel 				continue;
259859e1a966STimothy McDaniel 
259959e1a966STimothy McDaniel 			prio = ev_port->link[j].priority;
260059e1a966STimothy McDaniel 			queue_id = ev_port->link[j].queue_id;
260159e1a966STimothy McDaniel 
260259e1a966STimothy McDaniel 			if (dlb2_validate_port_link(ev_port, queue_id, true, j))
260359e1a966STimothy McDaniel 				return -EINVAL;
260459e1a966STimothy McDaniel 
260559e1a966STimothy McDaniel 			ev_queue = &dlb2->ev_queues[queue_id];
260659e1a966STimothy McDaniel 
260759e1a966STimothy McDaniel 			if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
260859e1a966STimothy McDaniel 				return -EINVAL;
260959e1a966STimothy McDaniel 		}
261059e1a966STimothy McDaniel 	}
261159e1a966STimothy McDaniel 
261259e1a966STimothy McDaniel 	return 0;
261359e1a966STimothy McDaniel }
261459e1a966STimothy McDaniel 
261559e1a966STimothy McDaniel static int
261659e1a966STimothy McDaniel dlb2_eventdev_start(struct rte_eventdev *dev)
261759e1a966STimothy McDaniel {
261859e1a966STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
261959e1a966STimothy McDaniel 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
262059e1a966STimothy McDaniel 	struct dlb2_start_domain_args cfg;
262159e1a966STimothy McDaniel 	int ret, i;
262259e1a966STimothy McDaniel 
262359e1a966STimothy McDaniel 	rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
262459e1a966STimothy McDaniel 	if (dlb2->run_state != DLB2_RUN_STATE_STOPPED) {
2625f665790aSDavid Marchand 		DLB2_LOG_ERR("bad state %d for dev_start",
262659e1a966STimothy McDaniel 			     (int)dlb2->run_state);
262759e1a966STimothy McDaniel 		rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
262859e1a966STimothy McDaniel 		return -EINVAL;
262959e1a966STimothy McDaniel 	}
263059e1a966STimothy McDaniel 	dlb2->run_state = DLB2_RUN_STATE_STARTING;
263159e1a966STimothy McDaniel 	rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
263259e1a966STimothy McDaniel 
263359e1a966STimothy McDaniel 	/* If the device was configured more than once, some event ports and/or
263459e1a966STimothy McDaniel 	 * queues may need to be reconfigured.
263559e1a966STimothy McDaniel 	 */
263659e1a966STimothy McDaniel 	ret = dlb2_eventdev_reapply_configuration(dev);
263759e1a966STimothy McDaniel 	if (ret)
263859e1a966STimothy McDaniel 		return ret;
263959e1a966STimothy McDaniel 
264059e1a966STimothy McDaniel 	/* The DLB PMD delays port links until the device is started. */
264159e1a966STimothy McDaniel 	ret = dlb2_eventdev_apply_port_links(dev);
264259e1a966STimothy McDaniel 	if (ret)
264359e1a966STimothy McDaniel 		return ret;
264459e1a966STimothy McDaniel 
264559e1a966STimothy McDaniel 	for (i = 0; i < dlb2->num_ports; i++) {
264659e1a966STimothy McDaniel 		if (!dlb2->ev_ports[i].setup_done) {
264759e1a966STimothy McDaniel 			DLB2_LOG_ERR("dlb2: port %d not setup", i);
264859e1a966STimothy McDaniel 			return -ESTALE;
264959e1a966STimothy McDaniel 		}
265059e1a966STimothy McDaniel 	}
265159e1a966STimothy McDaniel 
265259e1a966STimothy McDaniel 	for (i = 0; i < dlb2->num_queues; i++) {
265359e1a966STimothy McDaniel 		if (dlb2->ev_queues[i].num_links == 0) {
265459e1a966STimothy McDaniel 			DLB2_LOG_ERR("dlb2: queue %d is not linked", i);
265559e1a966STimothy McDaniel 			return -ENOLINK;
265659e1a966STimothy McDaniel 		}
265759e1a966STimothy McDaniel 	}
265859e1a966STimothy McDaniel 
265959e1a966STimothy McDaniel 	ret = dlb2_iface_sched_domain_start(handle, &cfg);
266059e1a966STimothy McDaniel 	if (ret < 0) {
2661f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)",
266259e1a966STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
266359e1a966STimothy McDaniel 		return ret;
266459e1a966STimothy McDaniel 	}
266559e1a966STimothy McDaniel 
266659e1a966STimothy McDaniel 	dlb2->run_state = DLB2_RUN_STATE_STARTED;
2667e99981afSDavid Marchand 	DLB2_LOG_LINE_DBG("dlb2: sched_domain_start completed OK");
266859e1a966STimothy McDaniel 
266959e1a966STimothy McDaniel 	return 0;
267059e1a966STimothy McDaniel }
267159e1a966STimothy McDaniel 
2672f7cc194bSTimothy McDaniel static inline uint32_t
2673f7cc194bSTimothy McDaniel dlb2_port_credits_get(struct dlb2_port *qm_port,
2674f7cc194bSTimothy McDaniel 		      enum dlb2_hw_queue_types type)
2675f7cc194bSTimothy McDaniel {
2676f7cc194bSTimothy McDaniel 	uint32_t credits = *qm_port->credit_pool[type];
2677e4869c0bSPravin Pathak 	/* By default hw_credit_quanta is DLB2_SW_CREDIT_BATCH_SZ */
2678e4869c0bSPravin Pathak 	uint32_t batch_size = qm_port->hw_credit_quanta;
2679f7cc194bSTimothy McDaniel 
2680f7cc194bSTimothy McDaniel 	if (unlikely(credits < batch_size))
2681f7cc194bSTimothy McDaniel 		batch_size = credits;
2682f7cc194bSTimothy McDaniel 
2683f7cc194bSTimothy McDaniel 	if (likely(credits &&
2684e12a0166STyler Retzlaff 		   rte_atomic_compare_exchange_strong_explicit(
2685f7cc194bSTimothy McDaniel 			qm_port->credit_pool[type],
2686e12a0166STyler Retzlaff 			&credits, credits - batch_size,
2687e12a0166STyler Retzlaff 			rte_memory_order_seq_cst, rte_memory_order_seq_cst)))
2688f7cc194bSTimothy McDaniel 		return batch_size;
2689f7cc194bSTimothy McDaniel 	else
2690f7cc194bSTimothy McDaniel 		return 0;
2691f7cc194bSTimothy McDaniel }
2692f7cc194bSTimothy McDaniel 
2693f7cc194bSTimothy McDaniel static inline void
2694f7cc194bSTimothy McDaniel dlb2_replenish_sw_credits(struct dlb2_eventdev *dlb2,
2695f7cc194bSTimothy McDaniel 			  struct dlb2_eventdev_port *ev_port)
2696f7cc194bSTimothy McDaniel {
2697f7cc194bSTimothy McDaniel 	uint16_t quanta = ev_port->credit_update_quanta;
2698f7cc194bSTimothy McDaniel 
2699f7cc194bSTimothy McDaniel 	if (ev_port->inflight_credits >= quanta * 2) {
2700f7cc194bSTimothy McDaniel 		/* Replenish credits, saving one quanta for enqueues */
2701f7cc194bSTimothy McDaniel 		uint16_t val = ev_port->inflight_credits - quanta;
2702f7cc194bSTimothy McDaniel 
2703e12a0166STyler Retzlaff 		rte_atomic_fetch_sub_explicit(&dlb2->inflights, val, rte_memory_order_seq_cst);
2704f7cc194bSTimothy McDaniel 		ev_port->inflight_credits -= val;
2705f7cc194bSTimothy McDaniel 	}
2706f7cc194bSTimothy McDaniel }
2707f7cc194bSTimothy McDaniel 
2708f7cc194bSTimothy McDaniel static inline int
2709f7cc194bSTimothy McDaniel dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
2710f7cc194bSTimothy McDaniel 			      struct dlb2_eventdev_port *ev_port)
2711f7cc194bSTimothy McDaniel {
2712e12a0166STyler Retzlaff 	uint32_t sw_inflights = rte_atomic_load_explicit(&dlb2->inflights,
2713e12a0166STyler Retzlaff 						rte_memory_order_seq_cst);
2714f7cc194bSTimothy McDaniel 	const int num = 1;
2715f7cc194bSTimothy McDaniel 
2716f7cc194bSTimothy McDaniel 	if (unlikely(ev_port->inflight_max < sw_inflights)) {
2717f7cc194bSTimothy McDaniel 		DLB2_INC_STAT(ev_port->stats.traffic.tx_nospc_inflight_max, 1);
2718f7cc194bSTimothy McDaniel 		rte_errno = -ENOSPC;
2719f7cc194bSTimothy McDaniel 		return 1;
2720f7cc194bSTimothy McDaniel 	}
2721f7cc194bSTimothy McDaniel 
2722f7cc194bSTimothy McDaniel 	if (ev_port->inflight_credits < num) {
2723f7cc194bSTimothy McDaniel 		/* check if event enqueue brings ev_port over max threshold */
2724f7cc194bSTimothy McDaniel 		uint32_t credit_update_quanta = ev_port->credit_update_quanta;
2725f7cc194bSTimothy McDaniel 
2726f7cc194bSTimothy McDaniel 		if (sw_inflights + credit_update_quanta >
2727f7cc194bSTimothy McDaniel 				dlb2->new_event_limit) {
2728f7cc194bSTimothy McDaniel 			DLB2_INC_STAT(
2729f7cc194bSTimothy McDaniel 			ev_port->stats.traffic.tx_nospc_new_event_limit,
2730f7cc194bSTimothy McDaniel 			1);
2731f7cc194bSTimothy McDaniel 			rte_errno = -ENOSPC;
2732f7cc194bSTimothy McDaniel 			return 1;
2733f7cc194bSTimothy McDaniel 		}
2734f7cc194bSTimothy McDaniel 
2735e12a0166STyler Retzlaff 		rte_atomic_fetch_add_explicit(&dlb2->inflights, credit_update_quanta,
2736e12a0166STyler Retzlaff 				   rte_memory_order_seq_cst);
2737f7cc194bSTimothy McDaniel 		ev_port->inflight_credits += (credit_update_quanta);
2738f7cc194bSTimothy McDaniel 
2739f7cc194bSTimothy McDaniel 		if (ev_port->inflight_credits < num) {
2740f7cc194bSTimothy McDaniel 			DLB2_INC_STAT(
2741f7cc194bSTimothy McDaniel 			ev_port->stats.traffic.tx_nospc_inflight_credits,
2742f7cc194bSTimothy McDaniel 			1);
2743f7cc194bSTimothy McDaniel 			rte_errno = -ENOSPC;
2744f7cc194bSTimothy McDaniel 			return 1;
2745f7cc194bSTimothy McDaniel 		}
2746f7cc194bSTimothy McDaniel 	}
2747f7cc194bSTimothy McDaniel 
2748f7cc194bSTimothy McDaniel 	return 0;
2749f7cc194bSTimothy McDaniel }
2750f7cc194bSTimothy McDaniel 
2751f7cc194bSTimothy McDaniel static inline int
2752f7cc194bSTimothy McDaniel dlb2_check_enqueue_hw_ldb_credits(struct dlb2_port *qm_port)
2753f7cc194bSTimothy McDaniel {
2754f7cc194bSTimothy McDaniel 	if (unlikely(qm_port->cached_ldb_credits == 0)) {
2755f7cc194bSTimothy McDaniel 		qm_port->cached_ldb_credits =
2756f7cc194bSTimothy McDaniel 			dlb2_port_credits_get(qm_port,
2757f7cc194bSTimothy McDaniel 					      DLB2_LDB_QUEUE);
2758f7cc194bSTimothy McDaniel 		if (unlikely(qm_port->cached_ldb_credits == 0)) {
2759f7cc194bSTimothy McDaniel 			DLB2_INC_STAT(
2760f7cc194bSTimothy McDaniel 			qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
2761f7cc194bSTimothy McDaniel 			1);
2762e99981afSDavid Marchand 			DLB2_LOG_LINE_DBG("ldb credits exhausted");
2763f7cc194bSTimothy McDaniel 			return 1; /* credits exhausted */
2764f7cc194bSTimothy McDaniel 		}
2765f7cc194bSTimothy McDaniel 	}
2766f7cc194bSTimothy McDaniel 
2767f7cc194bSTimothy McDaniel 	return 0;
2768f7cc194bSTimothy McDaniel }
2769f7cc194bSTimothy McDaniel 
2770f7cc194bSTimothy McDaniel static inline int
2771f7cc194bSTimothy McDaniel dlb2_check_enqueue_hw_dir_credits(struct dlb2_port *qm_port)
2772f7cc194bSTimothy McDaniel {
2773f7cc194bSTimothy McDaniel 	if (unlikely(qm_port->cached_dir_credits == 0)) {
2774f7cc194bSTimothy McDaniel 		qm_port->cached_dir_credits =
2775f7cc194bSTimothy McDaniel 			dlb2_port_credits_get(qm_port,
2776f7cc194bSTimothy McDaniel 					      DLB2_DIR_QUEUE);
2777f7cc194bSTimothy McDaniel 		if (unlikely(qm_port->cached_dir_credits == 0)) {
2778f7cc194bSTimothy McDaniel 			DLB2_INC_STAT(
2779f7cc194bSTimothy McDaniel 			qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
2780f7cc194bSTimothy McDaniel 			1);
2781e99981afSDavid Marchand 			DLB2_LOG_LINE_DBG("dir credits exhausted");
2782f7cc194bSTimothy McDaniel 			return 1; /* credits exhausted */
2783f7cc194bSTimothy McDaniel 		}
2784f7cc194bSTimothy McDaniel 	}
2785f7cc194bSTimothy McDaniel 
2786f7cc194bSTimothy McDaniel 	return 0;
2787f7cc194bSTimothy McDaniel }
2788f7cc194bSTimothy McDaniel 
278962e45206STimothy McDaniel static inline int
279062e45206STimothy McDaniel dlb2_check_enqueue_hw_credits(struct dlb2_port *qm_port)
279162e45206STimothy McDaniel {
279262e45206STimothy McDaniel 	if (unlikely(qm_port->cached_credits == 0)) {
279362e45206STimothy McDaniel 		qm_port->cached_credits =
279462e45206STimothy McDaniel 			dlb2_port_credits_get(qm_port,
279562e45206STimothy McDaniel 					      DLB2_COMBINED_POOL);
279662e45206STimothy McDaniel 		if (unlikely(qm_port->cached_credits == 0)) {
279762e45206STimothy McDaniel 			DLB2_INC_STAT(
279862e45206STimothy McDaniel 			qm_port->ev_port->stats.traffic.tx_nospc_hw_credits, 1);
2799e99981afSDavid Marchand 			DLB2_LOG_LINE_DBG("credits exhausted");
280062e45206STimothy McDaniel 			return 1; /* credits exhausted */
280162e45206STimothy McDaniel 		}
280262e45206STimothy McDaniel 	}
280362e45206STimothy McDaniel 
280462e45206STimothy McDaniel 	return 0;
280562e45206STimothy McDaniel }
280662e45206STimothy McDaniel 
2807f7cc194bSTimothy McDaniel static __rte_always_inline void
28086e2e98d6SAbdullah Sevincer dlb2_pp_write(struct process_local_port_data *port_data, struct dlb2_enqueue_qe *qe4)
2809f7cc194bSTimothy McDaniel {
2810f7cc194bSTimothy McDaniel 	dlb2_movdir64b(port_data->pp_addr, qe4);
2811f7cc194bSTimothy McDaniel }
2812f7cc194bSTimothy McDaniel 
28136e2e98d6SAbdullah Sevincer static __rte_always_inline void
28146e2e98d6SAbdullah Sevincer dlb2_pp_write_reorder(struct process_local_port_data *port_data,
28156e2e98d6SAbdullah Sevincer 	      struct dlb2_enqueue_qe *qe4)
28166e2e98d6SAbdullah Sevincer {
28176e2e98d6SAbdullah Sevincer 	for (uint8_t i = 0; i < 4; i++) {
28186e2e98d6SAbdullah Sevincer 		if (qe4[i].cmd_byte != DLB2_NOOP_CMD_BYTE) {
28196e2e98d6SAbdullah Sevincer 			dlb2_movdir64b(port_data->pp_addr, qe4);
28206e2e98d6SAbdullah Sevincer 			return;
28216e2e98d6SAbdullah Sevincer 		}
28226e2e98d6SAbdullah Sevincer 	}
28236e2e98d6SAbdullah Sevincer }
28246e2e98d6SAbdullah Sevincer 
28256e2e98d6SAbdullah Sevincer static __rte_always_inline int
28266e2e98d6SAbdullah Sevincer dlb2_pp_check4_write(struct process_local_port_data *port_data,
28276e2e98d6SAbdullah Sevincer 	      struct dlb2_enqueue_qe *qe4)
28286e2e98d6SAbdullah Sevincer {
28296e2e98d6SAbdullah Sevincer 	for (uint8_t i = 0; i < DLB2_NUM_QES_PER_CACHE_LINE; i++)
28306e2e98d6SAbdullah Sevincer 		if (((uint64_t *)&qe4[i])[1] == 0)
28316e2e98d6SAbdullah Sevincer 			return 0;
28326e2e98d6SAbdullah Sevincer 
28336e2e98d6SAbdullah Sevincer 	dlb2_movdir64b(port_data->pp_addr, qe4);
28346e2e98d6SAbdullah Sevincer 	memset(qe4, 0, DLB2_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb2_enqueue_qe));
28356e2e98d6SAbdullah Sevincer 	return DLB2_NUM_QES_PER_CACHE_LINE;
28366e2e98d6SAbdullah Sevincer }
28376e2e98d6SAbdullah Sevincer 
2838a2e4f1f5STimothy McDaniel static inline int
2839a2e4f1f5STimothy McDaniel dlb2_consume_qe_immediate(struct dlb2_port *qm_port, int num)
2840a2e4f1f5STimothy McDaniel {
2841a2e4f1f5STimothy McDaniel 	struct process_local_port_data *port_data;
2842a2e4f1f5STimothy McDaniel 	struct dlb2_cq_pop_qe *qe;
2843a2e4f1f5STimothy McDaniel 
2844a2e4f1f5STimothy McDaniel 	RTE_ASSERT(qm_port->config_state == DLB2_CONFIGURED);
2845a2e4f1f5STimothy McDaniel 
2846a2e4f1f5STimothy McDaniel 	qe = qm_port->consume_qe;
2847a2e4f1f5STimothy McDaniel 
2848a2e4f1f5STimothy McDaniel 	qe->tokens = num - 1;
2849a2e4f1f5STimothy McDaniel 
2850a2e4f1f5STimothy McDaniel 	/* No store fence needed since no pointer is being sent, and CQ token
2851a2e4f1f5STimothy McDaniel 	 * pops can be safely reordered with other HCWs.
2852a2e4f1f5STimothy McDaniel 	 */
2853a2e4f1f5STimothy McDaniel 	port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2854a2e4f1f5STimothy McDaniel 
28556e2e98d6SAbdullah Sevincer 	dlb2_movdir64b_single(port_data->pp_addr, qe);
2856a2e4f1f5STimothy McDaniel 
2857e99981afSDavid Marchand 	DLB2_LOG_LINE_DBG("dlb2: consume immediate - %d QEs", num);
2858a2e4f1f5STimothy McDaniel 
2859a2e4f1f5STimothy McDaniel 	qm_port->owed_tokens = 0;
2860a2e4f1f5STimothy McDaniel 
2861a2e4f1f5STimothy McDaniel 	return 0;
2862a2e4f1f5STimothy McDaniel }
2863a2e4f1f5STimothy McDaniel 
2864f7cc194bSTimothy McDaniel static inline void
2865f7cc194bSTimothy McDaniel dlb2_hw_do_enqueue(struct dlb2_port *qm_port,
2866f7cc194bSTimothy McDaniel 		   bool do_sfence,
2867f7cc194bSTimothy McDaniel 		   struct process_local_port_data *port_data)
2868f7cc194bSTimothy McDaniel {
2869f7cc194bSTimothy McDaniel 	/* Since MOVDIR64B is weakly-ordered, use an SFENCE to ensure that
2870f7cc194bSTimothy McDaniel 	 * application writes complete before enqueueing the QE.
2871f7cc194bSTimothy McDaniel 	 */
2872f7cc194bSTimothy McDaniel 	if (do_sfence)
2873f7cc194bSTimothy McDaniel 		rte_wmb();
2874f7cc194bSTimothy McDaniel 
28756e2e98d6SAbdullah Sevincer 	dlb2_pp_write(port_data, qm_port->qe4);
2876f7cc194bSTimothy McDaniel }
2877f7cc194bSTimothy McDaniel 
2878f7cc194bSTimothy McDaniel static inline void
2879c667583dSTimothy McDaniel dlb2_construct_token_pop_qe(struct dlb2_port *qm_port, int idx)
2880c667583dSTimothy McDaniel {
2881c667583dSTimothy McDaniel 	struct dlb2_cq_pop_qe *qe = (void *)qm_port->qe4;
2882c667583dSTimothy McDaniel 	int num = qm_port->owed_tokens;
2883c667583dSTimothy McDaniel 
2884c667583dSTimothy McDaniel 	qe[idx].cmd_byte = DLB2_POP_CMD_BYTE;
2885c667583dSTimothy McDaniel 	qe[idx].tokens = num - 1;
2886c667583dSTimothy McDaniel 
2887c667583dSTimothy McDaniel 	qm_port->owed_tokens = 0;
2888c667583dSTimothy McDaniel }
2889c667583dSTimothy McDaniel 
2890f7cc194bSTimothy McDaniel static inline int
2891f7cc194bSTimothy McDaniel dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
2892f7cc194bSTimothy McDaniel 			struct dlb2_port *qm_port,
2893f7cc194bSTimothy McDaniel 			const struct rte_event ev[],
2894f7cc194bSTimothy McDaniel 			uint8_t *sched_type,
2895f7cc194bSTimothy McDaniel 			uint8_t *queue_id)
2896f7cc194bSTimothy McDaniel {
2897f7cc194bSTimothy McDaniel 	struct dlb2_eventdev *dlb2 = ev_port->dlb2;
2898f7cc194bSTimothy McDaniel 	struct dlb2_eventdev_queue *ev_queue;
2899f7cc194bSTimothy McDaniel 	uint16_t *cached_credits = NULL;
2900f7cc194bSTimothy McDaniel 	struct dlb2_queue *qm_queue;
2901f7cc194bSTimothy McDaniel 
2902f7cc194bSTimothy McDaniel 	ev_queue = &dlb2->ev_queues[ev->queue_id];
2903f7cc194bSTimothy McDaniel 	qm_queue = &ev_queue->qm_queue;
2904f7cc194bSTimothy McDaniel 	*queue_id = qm_queue->id;
2905f7cc194bSTimothy McDaniel 
2906f7cc194bSTimothy McDaniel 	/* Ignore sched_type and hardware credits on release events */
2907f7cc194bSTimothy McDaniel 	if (ev->op == RTE_EVENT_OP_RELEASE)
2908f7cc194bSTimothy McDaniel 		goto op_check;
2909f7cc194bSTimothy McDaniel 
2910f7cc194bSTimothy McDaniel 	if (!qm_queue->is_directed) {
2911f7cc194bSTimothy McDaniel 		/* Load balanced destination queue */
2912f7cc194bSTimothy McDaniel 
291362e45206STimothy McDaniel 		if (dlb2->version == DLB2_HW_V2) {
2914f7cc194bSTimothy McDaniel 			if (dlb2_check_enqueue_hw_ldb_credits(qm_port)) {
2915f7cc194bSTimothy McDaniel 				rte_errno = -ENOSPC;
2916f7cc194bSTimothy McDaniel 				return 1;
2917f7cc194bSTimothy McDaniel 			}
2918f7cc194bSTimothy McDaniel 			cached_credits = &qm_port->cached_ldb_credits;
291962e45206STimothy McDaniel 		} else {
292062e45206STimothy McDaniel 			if (dlb2_check_enqueue_hw_credits(qm_port)) {
292162e45206STimothy McDaniel 				rte_errno = -ENOSPC;
292262e45206STimothy McDaniel 				return 1;
292362e45206STimothy McDaniel 			}
292462e45206STimothy McDaniel 			cached_credits = &qm_port->cached_credits;
292562e45206STimothy McDaniel 		}
2926f7cc194bSTimothy McDaniel 		switch (ev->sched_type) {
2927f7cc194bSTimothy McDaniel 		case RTE_SCHED_TYPE_ORDERED:
2928e99981afSDavid Marchand 			DLB2_LOG_LINE_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED");
2929f7cc194bSTimothy McDaniel 			if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
2930f665790aSDavid Marchand 				DLB2_LOG_ERR("dlb2: tried to send ordered event to unordered queue %d",
2931f7cc194bSTimothy McDaniel 					     *queue_id);
2932f7cc194bSTimothy McDaniel 				rte_errno = -EINVAL;
2933f7cc194bSTimothy McDaniel 				return 1;
2934f7cc194bSTimothy McDaniel 			}
2935f7cc194bSTimothy McDaniel 			*sched_type = DLB2_SCHED_ORDERED;
2936f7cc194bSTimothy McDaniel 			break;
2937f7cc194bSTimothy McDaniel 		case RTE_SCHED_TYPE_ATOMIC:
2938e99981afSDavid Marchand 			DLB2_LOG_LINE_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ATOMIC");
2939f7cc194bSTimothy McDaniel 			*sched_type = DLB2_SCHED_ATOMIC;
2940f7cc194bSTimothy McDaniel 			break;
2941f7cc194bSTimothy McDaniel 		case RTE_SCHED_TYPE_PARALLEL:
2942e99981afSDavid Marchand 			DLB2_LOG_LINE_DBG("dlb2: put_qe: RTE_SCHED_TYPE_PARALLEL");
2943f7cc194bSTimothy McDaniel 			if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
2944f7cc194bSTimothy McDaniel 				*sched_type = DLB2_SCHED_ORDERED;
2945f7cc194bSTimothy McDaniel 			else
2946f7cc194bSTimothy McDaniel 				*sched_type = DLB2_SCHED_UNORDERED;
2947f7cc194bSTimothy McDaniel 			break;
2948f7cc194bSTimothy McDaniel 		default:
2949f665790aSDavid Marchand 			DLB2_LOG_ERR("Unsupported LDB sched type in put_qe");
2950f7cc194bSTimothy McDaniel 			DLB2_INC_STAT(ev_port->stats.tx_invalid, 1);
2951f7cc194bSTimothy McDaniel 			rte_errno = -EINVAL;
2952f7cc194bSTimothy McDaniel 			return 1;
2953f7cc194bSTimothy McDaniel 		}
2954f7cc194bSTimothy McDaniel 	} else {
2955f7cc194bSTimothy McDaniel 		/* Directed destination queue */
2956f7cc194bSTimothy McDaniel 
295762e45206STimothy McDaniel 		if (dlb2->version == DLB2_HW_V2) {
2958f7cc194bSTimothy McDaniel 			if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
2959f7cc194bSTimothy McDaniel 				rte_errno = -ENOSPC;
2960f7cc194bSTimothy McDaniel 				return 1;
2961f7cc194bSTimothy McDaniel 			}
2962f7cc194bSTimothy McDaniel 			cached_credits = &qm_port->cached_dir_credits;
296362e45206STimothy McDaniel 		} else {
296462e45206STimothy McDaniel 			if (dlb2_check_enqueue_hw_credits(qm_port)) {
296562e45206STimothy McDaniel 				rte_errno = -ENOSPC;
296662e45206STimothy McDaniel 				return 1;
296762e45206STimothy McDaniel 			}
296862e45206STimothy McDaniel 			cached_credits = &qm_port->cached_credits;
296962e45206STimothy McDaniel 		}
2970e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED");
2971f7cc194bSTimothy McDaniel 
2972f7cc194bSTimothy McDaniel 		*sched_type = DLB2_SCHED_DIRECTED;
2973f7cc194bSTimothy McDaniel 	}
2974f7cc194bSTimothy McDaniel 
2975f7cc194bSTimothy McDaniel op_check:
2976f7cc194bSTimothy McDaniel 	switch (ev->op) {
2977f7cc194bSTimothy McDaniel 	case RTE_EVENT_OP_NEW:
2978f7cc194bSTimothy McDaniel 		/* Check that a sw credit is available */
2979f7cc194bSTimothy McDaniel 		if (dlb2_check_enqueue_sw_credits(dlb2, ev_port)) {
2980f7cc194bSTimothy McDaniel 			rte_errno = -ENOSPC;
2981f7cc194bSTimothy McDaniel 			return 1;
2982f7cc194bSTimothy McDaniel 		}
2983f7cc194bSTimothy McDaniel 		ev_port->inflight_credits--;
2984f7cc194bSTimothy McDaniel 		(*cached_credits)--;
2985f7cc194bSTimothy McDaniel 		break;
2986f7cc194bSTimothy McDaniel 	case RTE_EVENT_OP_FORWARD:
2987f7cc194bSTimothy McDaniel 		/* Check for outstanding_releases underflow. If this occurs,
2988f7cc194bSTimothy McDaniel 		 * the application is not using the EVENT_OPs correctly; for
2989f7cc194bSTimothy McDaniel 		 * example, forwarding or releasing events that were not
2990f7cc194bSTimothy McDaniel 		 * dequeued.
2991f7cc194bSTimothy McDaniel 		 */
2992f7cc194bSTimothy McDaniel 		RTE_ASSERT(ev_port->outstanding_releases > 0);
2993f7cc194bSTimothy McDaniel 		ev_port->outstanding_releases--;
2994f7cc194bSTimothy McDaniel 		qm_port->issued_releases++;
2995f7cc194bSTimothy McDaniel 		(*cached_credits)--;
2996f7cc194bSTimothy McDaniel 		break;
2997f7cc194bSTimothy McDaniel 	case RTE_EVENT_OP_RELEASE:
2998f7cc194bSTimothy McDaniel 		ev_port->inflight_credits++;
2999f7cc194bSTimothy McDaniel 		/* Check for outstanding_releases underflow. If this occurs,
3000f7cc194bSTimothy McDaniel 		 * the application is not using the EVENT_OPs correctly; for
3001f7cc194bSTimothy McDaniel 		 * example, forwarding or releasing events that were not
3002f7cc194bSTimothy McDaniel 		 * dequeued.
3003f7cc194bSTimothy McDaniel 		 */
3004f7cc194bSTimothy McDaniel 		RTE_ASSERT(ev_port->outstanding_releases > 0);
3005f7cc194bSTimothy McDaniel 		ev_port->outstanding_releases--;
3006f7cc194bSTimothy McDaniel 		qm_port->issued_releases++;
3007f7cc194bSTimothy McDaniel 
3008f7cc194bSTimothy McDaniel 		/* Replenish s/w credits if enough are cached */
3009f7cc194bSTimothy McDaniel 		dlb2_replenish_sw_credits(dlb2, ev_port);
3010f7cc194bSTimothy McDaniel 		break;
3011f7cc194bSTimothy McDaniel 	}
3012f7cc194bSTimothy McDaniel 
3013f7cc194bSTimothy McDaniel 	DLB2_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
3014f7cc194bSTimothy McDaniel 	DLB2_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
3015f7cc194bSTimothy McDaniel 
30167be66a3bSTimothy McDaniel #ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
3017f7cc194bSTimothy McDaniel 	if (ev->op != RTE_EVENT_OP_RELEASE) {
3018f7cc194bSTimothy McDaniel 		DLB2_INC_STAT(ev_port->stats.queue[ev->queue_id].enq_ok, 1);
3019f7cc194bSTimothy McDaniel 		DLB2_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
3020f7cc194bSTimothy McDaniel 	}
3021f7cc194bSTimothy McDaniel #endif
3022f7cc194bSTimothy McDaniel 
3023f7cc194bSTimothy McDaniel 	return 0;
3024f7cc194bSTimothy McDaniel }
3025f7cc194bSTimothy McDaniel 
30266e2e98d6SAbdullah Sevincer static inline __m128i
30276e2e98d6SAbdullah Sevincer dlb2_event_to_qe(const struct rte_event *ev, uint8_t cmd, uint8_t sched_type, uint8_t qid)
30286e2e98d6SAbdullah Sevincer {
30296e2e98d6SAbdullah Sevincer 	__m128i dlb2_to_qe_shuffle = _mm_set_epi8(
30306e2e98d6SAbdullah Sevincer 	    0xFF, 0xFF,			 /* zero out cmd word */
30316e2e98d6SAbdullah Sevincer 	    1, 0,			 /* low 16-bits of flow id */
30326e2e98d6SAbdullah Sevincer 	    0xFF, 0xFF, /* zero QID, sched_type etc fields to be filled later */
30336e2e98d6SAbdullah Sevincer 	    3, 2,			 /* top of flow id, event type and subtype */
30346e2e98d6SAbdullah Sevincer 	    15, 14, 13, 12, 11, 10, 9, 8 /* data from end of event goes at start */
30356e2e98d6SAbdullah Sevincer 	);
30366e2e98d6SAbdullah Sevincer 
30376e2e98d6SAbdullah Sevincer 	/* event may not be 16 byte aligned. Use 16 byte unaligned load */
30386e2e98d6SAbdullah Sevincer 	__m128i tmp = _mm_lddqu_si128((const __m128i *)ev);
30396e2e98d6SAbdullah Sevincer 	__m128i qe = _mm_shuffle_epi8(tmp, dlb2_to_qe_shuffle);
30406e2e98d6SAbdullah Sevincer 	struct dlb2_enqueue_qe *dq = (struct dlb2_enqueue_qe *)&qe;
30416e2e98d6SAbdullah Sevincer 	/* set the cmd field */
30426e2e98d6SAbdullah Sevincer 	qe = _mm_insert_epi8(qe, cmd, 15);
30436e2e98d6SAbdullah Sevincer 	/* insert missing 16-bits with qid, sched_type and priority */
30446e2e98d6SAbdullah Sevincer 	uint16_t qid_stype_prio =
30456e2e98d6SAbdullah Sevincer 	    qid | (uint16_t)sched_type << 8 | ((uint16_t)ev->priority & 0xE0) << 5;
30466e2e98d6SAbdullah Sevincer 	qe = _mm_insert_epi16(qe, qid_stype_prio, 5);
30476e2e98d6SAbdullah Sevincer 	dq->weight = RTE_PMD_DLB2_GET_QE_WEIGHT(ev);
30486e2e98d6SAbdullah Sevincer 	return qe;
30496e2e98d6SAbdullah Sevincer }
30506e2e98d6SAbdullah Sevincer 
30516e2e98d6SAbdullah Sevincer static inline uint16_t
30526e2e98d6SAbdullah Sevincer __dlb2_event_enqueue_burst_reorder(void *event_port,
30536e2e98d6SAbdullah Sevincer 		const struct rte_event events[],
30546e2e98d6SAbdullah Sevincer 		uint16_t num,
30556e2e98d6SAbdullah Sevincer 		bool use_delayed)
30566e2e98d6SAbdullah Sevincer {
30576e2e98d6SAbdullah Sevincer 	struct dlb2_eventdev_port *ev_port = event_port;
30586e2e98d6SAbdullah Sevincer 	struct dlb2_port *qm_port = &ev_port->qm_port;
30596e2e98d6SAbdullah Sevincer 	struct dlb2_reorder *order = qm_port->order;
30606e2e98d6SAbdullah Sevincer 	struct process_local_port_data *port_data;
30616e2e98d6SAbdullah Sevincer 	bool is_directed = qm_port->is_directed;
30626e2e98d6SAbdullah Sevincer 	uint8_t n = order->next_to_enqueue;
30636e2e98d6SAbdullah Sevincer 	uint8_t p_cnt = 0;
30646e2e98d6SAbdullah Sevincer 	int retries = ev_port->enq_retries;
30656e2e98d6SAbdullah Sevincer 	__m128i new_qes[4], *from = NULL;
30666e2e98d6SAbdullah Sevincer 	int num_new = 0;
30676e2e98d6SAbdullah Sevincer 	int num_tx;
30686e2e98d6SAbdullah Sevincer 	int i;
30696e2e98d6SAbdullah Sevincer 
30706e2e98d6SAbdullah Sevincer 	RTE_ASSERT(ev_port->enq_configured);
30716e2e98d6SAbdullah Sevincer 	RTE_ASSERT(events != NULL);
30726e2e98d6SAbdullah Sevincer 
30736e2e98d6SAbdullah Sevincer 	port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
30746e2e98d6SAbdullah Sevincer 
30756e2e98d6SAbdullah Sevincer 	num_tx = RTE_MIN(num, ev_port->conf.enqueue_depth);
30766e2e98d6SAbdullah Sevincer #if DLB2_BYPASS_FENCE_ON_PP == 1
30776e2e98d6SAbdullah Sevincer 	if (!qm_port->is_producer) /* Call memory fense once at the start */
30786e2e98d6SAbdullah Sevincer 		rte_wmb();	   /*  calls _mm_sfence() */
30796e2e98d6SAbdullah Sevincer #else
30806e2e98d6SAbdullah Sevincer 	rte_wmb(); /*  calls _mm_sfence() */
30816e2e98d6SAbdullah Sevincer #endif
30826e2e98d6SAbdullah Sevincer 	for (i = 0; i < num_tx; i++) {
30836e2e98d6SAbdullah Sevincer 		uint8_t sched_type = 0;
30846e2e98d6SAbdullah Sevincer 		uint8_t reorder_idx = events[i].impl_opaque;
30856e2e98d6SAbdullah Sevincer 		int16_t thresh = qm_port->token_pop_thresh;
30866e2e98d6SAbdullah Sevincer 		uint8_t qid = 0;
30876e2e98d6SAbdullah Sevincer 		int ret;
30886e2e98d6SAbdullah Sevincer 
30896e2e98d6SAbdullah Sevincer 		while ((ret = dlb2_event_enqueue_prep(ev_port, qm_port, &events[i],
30906e2e98d6SAbdullah Sevincer 						      &sched_type, &qid)) != 0 &&
30916e2e98d6SAbdullah Sevincer 		       rte_errno == -ENOSPC && --retries > 0)
30926e2e98d6SAbdullah Sevincer 			rte_pause();
30936e2e98d6SAbdullah Sevincer 
30946e2e98d6SAbdullah Sevincer 		if (ret != 0) /* Either there is error or retires exceeded */
30956e2e98d6SAbdullah Sevincer 			break;
30966e2e98d6SAbdullah Sevincer 
30976e2e98d6SAbdullah Sevincer 		switch (events[i].op) {
30986e2e98d6SAbdullah Sevincer 		case RTE_EVENT_OP_NEW:
30996e2e98d6SAbdullah Sevincer 			new_qes[num_new++] = dlb2_event_to_qe(
31006e2e98d6SAbdullah Sevincer 			    &events[i], DLB2_NEW_CMD_BYTE, sched_type, qid);
31016e2e98d6SAbdullah Sevincer 			if (num_new == RTE_DIM(new_qes)) {
31026e2e98d6SAbdullah Sevincer 				dlb2_pp_write(port_data, (struct dlb2_enqueue_qe *)&new_qes);
31036e2e98d6SAbdullah Sevincer 				num_new = 0;
31046e2e98d6SAbdullah Sevincer 			}
31056e2e98d6SAbdullah Sevincer 			break;
31066e2e98d6SAbdullah Sevincer 		case RTE_EVENT_OP_FORWARD: {
31076e2e98d6SAbdullah Sevincer 			order->enq_reorder[reorder_idx].m128 = dlb2_event_to_qe(
31086e2e98d6SAbdullah Sevincer 			    &events[i], is_directed ? DLB2_NEW_CMD_BYTE : DLB2_FWD_CMD_BYTE,
31096e2e98d6SAbdullah Sevincer 			    sched_type, qid);
31106e2e98d6SAbdullah Sevincer 			n += dlb2_pp_check4_write(port_data, &order->enq_reorder[n].qe);
31116e2e98d6SAbdullah Sevincer 			break;
31126e2e98d6SAbdullah Sevincer 		}
31136e2e98d6SAbdullah Sevincer 		case RTE_EVENT_OP_RELEASE: {
31146e2e98d6SAbdullah Sevincer 			order->enq_reorder[reorder_idx].m128 = dlb2_event_to_qe(
31156e2e98d6SAbdullah Sevincer 			    &events[i], is_directed ? DLB2_NOOP_CMD_BYTE : DLB2_COMP_CMD_BYTE,
31166e2e98d6SAbdullah Sevincer 			    sched_type, 0xFF);
31176e2e98d6SAbdullah Sevincer 			break;
31186e2e98d6SAbdullah Sevincer 		}
31196e2e98d6SAbdullah Sevincer 		}
31206e2e98d6SAbdullah Sevincer 
31216e2e98d6SAbdullah Sevincer 		if (use_delayed && qm_port->token_pop_mode == DELAYED_POP &&
31226e2e98d6SAbdullah Sevincer 		    (events[i].op == RTE_EVENT_OP_FORWARD ||
31236e2e98d6SAbdullah Sevincer 		     events[i].op == RTE_EVENT_OP_RELEASE) &&
31246e2e98d6SAbdullah Sevincer 		    qm_port->issued_releases >= thresh - 1) {
31256e2e98d6SAbdullah Sevincer 
31266e2e98d6SAbdullah Sevincer 			dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
31276e2e98d6SAbdullah Sevincer 
31286e2e98d6SAbdullah Sevincer 			/* Reset the releases for the next QE batch */
31296e2e98d6SAbdullah Sevincer 			qm_port->issued_releases -= thresh;
31306e2e98d6SAbdullah Sevincer 
31316e2e98d6SAbdullah Sevincer 			/* When using delayed token pop mode, the
31326e2e98d6SAbdullah Sevincer 			 * initial token threshold is the full CQ
31336e2e98d6SAbdullah Sevincer 			 * depth. After the first token pop, we need to
31346e2e98d6SAbdullah Sevincer 			 * reset it to the dequeue_depth.
31356e2e98d6SAbdullah Sevincer 			 */
31366e2e98d6SAbdullah Sevincer 			qm_port->token_pop_thresh =
31376e2e98d6SAbdullah Sevincer 			    qm_port->dequeue_depth;
31386e2e98d6SAbdullah Sevincer 		}
31396e2e98d6SAbdullah Sevincer 	}
31406e2e98d6SAbdullah Sevincer 	while (order->enq_reorder[n].u64[1] != 0) {
31416e2e98d6SAbdullah Sevincer 		__m128i tmp[4] = {0}, *send = NULL;
31426e2e98d6SAbdullah Sevincer 		bool enq;
31436e2e98d6SAbdullah Sevincer 
31446e2e98d6SAbdullah Sevincer 		if (!p_cnt)
31456e2e98d6SAbdullah Sevincer 			from = &order->enq_reorder[n].m128;
31466e2e98d6SAbdullah Sevincer 
31476e2e98d6SAbdullah Sevincer 		p_cnt++;
31486e2e98d6SAbdullah Sevincer 		n++;
31496e2e98d6SAbdullah Sevincer 
31506e2e98d6SAbdullah Sevincer 		enq = !n || p_cnt == 4 || !order->enq_reorder[n].u64[1];
31516e2e98d6SAbdullah Sevincer 		if (!enq)
31526e2e98d6SAbdullah Sevincer 			continue;
31536e2e98d6SAbdullah Sevincer 
31546e2e98d6SAbdullah Sevincer 		if (p_cnt < 4) {
31556e2e98d6SAbdullah Sevincer 			memcpy(tmp, from, p_cnt * sizeof(struct dlb2_enqueue_qe));
31566e2e98d6SAbdullah Sevincer 			send = tmp;
31576e2e98d6SAbdullah Sevincer 		} else {
31586e2e98d6SAbdullah Sevincer 			send  = from;
31596e2e98d6SAbdullah Sevincer 		}
31606e2e98d6SAbdullah Sevincer 
31616e2e98d6SAbdullah Sevincer 		if (is_directed)
31626e2e98d6SAbdullah Sevincer 			dlb2_pp_write_reorder(port_data, (struct dlb2_enqueue_qe *)send);
31636e2e98d6SAbdullah Sevincer 		else
31646e2e98d6SAbdullah Sevincer 			dlb2_pp_write(port_data, (struct dlb2_enqueue_qe *)send);
31656e2e98d6SAbdullah Sevincer 		memset(from, 0, p_cnt * sizeof(struct dlb2_enqueue_qe));
31666e2e98d6SAbdullah Sevincer 		p_cnt = 0;
31676e2e98d6SAbdullah Sevincer 	}
31686e2e98d6SAbdullah Sevincer 	order->next_to_enqueue = n;
31696e2e98d6SAbdullah Sevincer 
31706e2e98d6SAbdullah Sevincer 	if (num_new > 0) {
31716e2e98d6SAbdullah Sevincer 		switch (num_new) {
31726e2e98d6SAbdullah Sevincer 		case 1:
31736e2e98d6SAbdullah Sevincer 			new_qes[1] = _mm_setzero_si128(); /* fall-through */
31746e2e98d6SAbdullah Sevincer 		case 2:
31756e2e98d6SAbdullah Sevincer 			new_qes[2] = _mm_setzero_si128(); /* fall-through */
31766e2e98d6SAbdullah Sevincer 		case 3:
31776e2e98d6SAbdullah Sevincer 			new_qes[3] = _mm_setzero_si128();
31786e2e98d6SAbdullah Sevincer 		}
31796e2e98d6SAbdullah Sevincer 		dlb2_pp_write(port_data, (struct dlb2_enqueue_qe *)&new_qes);
31806e2e98d6SAbdullah Sevincer 		num_new = 0;
31816e2e98d6SAbdullah Sevincer 	}
31826e2e98d6SAbdullah Sevincer 
31836e2e98d6SAbdullah Sevincer 	return i;
31846e2e98d6SAbdullah Sevincer }
31856e2e98d6SAbdullah Sevincer 
3186f7cc194bSTimothy McDaniel static inline uint16_t
318707d55c41STimothy McDaniel __dlb2_event_enqueue_burst(void *event_port,
3188f7cc194bSTimothy McDaniel 			   const struct rte_event events[],
318907d55c41STimothy McDaniel 			   uint16_t num,
319007d55c41STimothy McDaniel 			   bool use_delayed)
3191f7cc194bSTimothy McDaniel {
3192f7cc194bSTimothy McDaniel 	struct dlb2_eventdev_port *ev_port = event_port;
3193f7cc194bSTimothy McDaniel 	struct dlb2_port *qm_port = &ev_port->qm_port;
3194f7cc194bSTimothy McDaniel 	struct process_local_port_data *port_data;
319587ecdd9eSTimothy McDaniel 	int retries = ev_port->enq_retries;
31969c9e7232SAbdullah Sevincer 	int num_tx;
319707d55c41STimothy McDaniel 	int i;
3198f7cc194bSTimothy McDaniel 
3199f7cc194bSTimothy McDaniel 	RTE_ASSERT(ev_port->enq_configured);
3200f7cc194bSTimothy McDaniel 	RTE_ASSERT(events != NULL);
3201f7cc194bSTimothy McDaniel 
32026e2e98d6SAbdullah Sevincer 	if (qm_port->reorder_en)
32036e2e98d6SAbdullah Sevincer 		return __dlb2_event_enqueue_burst_reorder(event_port, events, num, use_delayed);
32046e2e98d6SAbdullah Sevincer 
320507d55c41STimothy McDaniel 	i = 0;
3206f7cc194bSTimothy McDaniel 
3207f7cc194bSTimothy McDaniel 	port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
32089c9e7232SAbdullah Sevincer 	num_tx = RTE_MIN(num, ev_port->conf.enqueue_depth);
32099c9e7232SAbdullah Sevincer 	while (i < num_tx) {
3210f7cc194bSTimothy McDaniel 		uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE];
3211f7cc194bSTimothy McDaniel 		uint8_t queue_ids[DLB2_NUM_QES_PER_CACHE_LINE];
321207d55c41STimothy McDaniel 		int pop_offs = 0;
3213f7cc194bSTimothy McDaniel 		int j = 0;
3214f7cc194bSTimothy McDaniel 
321507d55c41STimothy McDaniel 		memset(qm_port->qe4,
321607d55c41STimothy McDaniel 		       0,
321707d55c41STimothy McDaniel 		       DLB2_NUM_QES_PER_CACHE_LINE *
321807d55c41STimothy McDaniel 		       sizeof(struct dlb2_enqueue_qe));
321907d55c41STimothy McDaniel 
3220f7cc194bSTimothy McDaniel 		for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
3221f7cc194bSTimothy McDaniel 			const struct rte_event *ev = &events[i + j];
322207d55c41STimothy McDaniel 			int16_t thresh = qm_port->token_pop_thresh;
322387ecdd9eSTimothy McDaniel 			int ret;
322407d55c41STimothy McDaniel 
322507d55c41STimothy McDaniel 			if (use_delayed &&
322607d55c41STimothy McDaniel 			    qm_port->token_pop_mode == DELAYED_POP &&
322707d55c41STimothy McDaniel 			    (ev->op == RTE_EVENT_OP_FORWARD ||
322807d55c41STimothy McDaniel 			     ev->op == RTE_EVENT_OP_RELEASE) &&
322907d55c41STimothy McDaniel 			    qm_port->issued_releases >= thresh - 1) {
323007d55c41STimothy McDaniel 				/* Insert the token pop QE and break out. This
323107d55c41STimothy McDaniel 				 * may result in a partial HCW, but that is
323207d55c41STimothy McDaniel 				 * simpler than supporting arbitrary QE
323307d55c41STimothy McDaniel 				 * insertion.
323407d55c41STimothy McDaniel 				 */
323507d55c41STimothy McDaniel 				dlb2_construct_token_pop_qe(qm_port, j);
323607d55c41STimothy McDaniel 
323707d55c41STimothy McDaniel 				/* Reset the releases for the next QE batch */
323807d55c41STimothy McDaniel 				qm_port->issued_releases -= thresh;
323907d55c41STimothy McDaniel 
324007d55c41STimothy McDaniel 				pop_offs = 1;
324107d55c41STimothy McDaniel 				j++;
324207d55c41STimothy McDaniel 				break;
324307d55c41STimothy McDaniel 			}
3244f7cc194bSTimothy McDaniel 
324587ecdd9eSTimothy McDaniel 			/*
324687ecdd9eSTimothy McDaniel 			 * Retry if insufficient credits
324787ecdd9eSTimothy McDaniel 			 */
324887ecdd9eSTimothy McDaniel 			do {
324987ecdd9eSTimothy McDaniel 				ret = dlb2_event_enqueue_prep(ev_port,
325087ecdd9eSTimothy McDaniel 							      qm_port,
325187ecdd9eSTimothy McDaniel 							      ev,
3252f7cc194bSTimothy McDaniel 							      &sched_types[j],
325387ecdd9eSTimothy McDaniel 							      &queue_ids[j]);
325487ecdd9eSTimothy McDaniel 			} while ((ret == -ENOSPC) && (retries-- > 0));
325587ecdd9eSTimothy McDaniel 
325687ecdd9eSTimothy McDaniel 			if (ret != 0)
3257f7cc194bSTimothy McDaniel 				break;
3258f7cc194bSTimothy McDaniel 		}
3259f7cc194bSTimothy McDaniel 
3260f7cc194bSTimothy McDaniel 		if (j == 0)
3261f7cc194bSTimothy McDaniel 			break;
3262f7cc194bSTimothy McDaniel 
326307d55c41STimothy McDaniel 		dlb2_event_build_hcws(qm_port, &events[i], j - pop_offs,
3264f7cc194bSTimothy McDaniel 				      sched_types, queue_ids);
3265f7cc194bSTimothy McDaniel 
3266d8c16de5SAbdullah Sevincer #if DLB2_BYPASS_FENCE_ON_PP == 1
3267d8c16de5SAbdullah Sevincer 		/* Bypass fence instruction for producer ports */
3268d8c16de5SAbdullah Sevincer 		dlb2_hw_do_enqueue(qm_port, i == 0 && !qm_port->is_producer, port_data);
3269d8c16de5SAbdullah Sevincer #else
3270f7cc194bSTimothy McDaniel 		dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
3271d8c16de5SAbdullah Sevincer #endif
3272f7cc194bSTimothy McDaniel 
327307d55c41STimothy McDaniel 		/* Don't include the token pop QE in the enqueue count */
327407d55c41STimothy McDaniel 		i += j - pop_offs;
3275f7cc194bSTimothy McDaniel 
327607d55c41STimothy McDaniel 		/* Don't interpret j < DLB2_NUM_... as out-of-credits if
327707d55c41STimothy McDaniel 		 * pop_offs != 0
327807d55c41STimothy McDaniel 		 */
327907d55c41STimothy McDaniel 		if (j < DLB2_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
3280f7cc194bSTimothy McDaniel 			break;
3281f7cc194bSTimothy McDaniel 	}
3282f7cc194bSTimothy McDaniel 
328307d55c41STimothy McDaniel 	return i;
3284c667583dSTimothy McDaniel }
328507d55c41STimothy McDaniel 
328607d55c41STimothy McDaniel static uint16_t
328707d55c41STimothy McDaniel dlb2_event_enqueue_burst(void *event_port,
328807d55c41STimothy McDaniel 			     const struct rte_event events[],
328907d55c41STimothy McDaniel 			     uint16_t num)
329007d55c41STimothy McDaniel {
329107d55c41STimothy McDaniel 	return __dlb2_event_enqueue_burst(event_port, events, num, false);
329207d55c41STimothy McDaniel }
329307d55c41STimothy McDaniel 
329407d55c41STimothy McDaniel static uint16_t
329507d55c41STimothy McDaniel dlb2_event_enqueue_burst_delayed(void *event_port,
329607d55c41STimothy McDaniel 				     const struct rte_event events[],
329707d55c41STimothy McDaniel 				     uint16_t num)
329807d55c41STimothy McDaniel {
329907d55c41STimothy McDaniel 	return __dlb2_event_enqueue_burst(event_port, events, num, true);
3300f7cc194bSTimothy McDaniel }
3301f7cc194bSTimothy McDaniel 
3302f7cc194bSTimothy McDaniel static uint16_t
3303f7cc194bSTimothy McDaniel dlb2_event_enqueue_new_burst(void *event_port,
3304f7cc194bSTimothy McDaniel 			     const struct rte_event events[],
3305f7cc194bSTimothy McDaniel 			     uint16_t num)
3306f7cc194bSTimothy McDaniel {
330707d55c41STimothy McDaniel 	return __dlb2_event_enqueue_burst(event_port, events, num, false);
330807d55c41STimothy McDaniel }
330907d55c41STimothy McDaniel 
331007d55c41STimothy McDaniel static uint16_t
331107d55c41STimothy McDaniel dlb2_event_enqueue_new_burst_delayed(void *event_port,
331207d55c41STimothy McDaniel 				     const struct rte_event events[],
331307d55c41STimothy McDaniel 				     uint16_t num)
331407d55c41STimothy McDaniel {
331507d55c41STimothy McDaniel 	return __dlb2_event_enqueue_burst(event_port, events, num, true);
3316f7cc194bSTimothy McDaniel }
3317f7cc194bSTimothy McDaniel 
3318f7cc194bSTimothy McDaniel static uint16_t
3319f7cc194bSTimothy McDaniel dlb2_event_enqueue_forward_burst(void *event_port,
3320f7cc194bSTimothy McDaniel 				 const struct rte_event events[],
3321f7cc194bSTimothy McDaniel 				 uint16_t num)
3322f7cc194bSTimothy McDaniel {
332307d55c41STimothy McDaniel 	return __dlb2_event_enqueue_burst(event_port, events, num, false);
332407d55c41STimothy McDaniel }
332507d55c41STimothy McDaniel 
332607d55c41STimothy McDaniel static uint16_t
332707d55c41STimothy McDaniel dlb2_event_enqueue_forward_burst_delayed(void *event_port,
332807d55c41STimothy McDaniel 					 const struct rte_event events[],
332907d55c41STimothy McDaniel 					 uint16_t num)
333007d55c41STimothy McDaniel {
333107d55c41STimothy McDaniel 	return __dlb2_event_enqueue_burst(event_port, events, num, true);
333207d55c41STimothy McDaniel }
333307d55c41STimothy McDaniel 
333407d55c41STimothy McDaniel static void
333507d55c41STimothy McDaniel dlb2_event_release(struct dlb2_eventdev *dlb2,
333607d55c41STimothy McDaniel 		   uint8_t port_id,
333707d55c41STimothy McDaniel 		   int n)
333807d55c41STimothy McDaniel {
333907d55c41STimothy McDaniel 	struct process_local_port_data *port_data;
334007d55c41STimothy McDaniel 	struct dlb2_eventdev_port *ev_port;
334107d55c41STimothy McDaniel 	struct dlb2_port *qm_port;
334207d55c41STimothy McDaniel 	int i;
334307d55c41STimothy McDaniel 
334407d55c41STimothy McDaniel 	if (port_id > dlb2->num_ports) {
3345f665790aSDavid Marchand 		DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release",
334607d55c41STimothy McDaniel 			     port_id);
334707d55c41STimothy McDaniel 		rte_errno = -EINVAL;
334807d55c41STimothy McDaniel 		return;
334907d55c41STimothy McDaniel 	}
335007d55c41STimothy McDaniel 
335107d55c41STimothy McDaniel 	ev_port = &dlb2->ev_ports[port_id];
335207d55c41STimothy McDaniel 	qm_port = &ev_port->qm_port;
335307d55c41STimothy McDaniel 	port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
335407d55c41STimothy McDaniel 
335507d55c41STimothy McDaniel 	i = 0;
335607d55c41STimothy McDaniel 
335707d55c41STimothy McDaniel 	if (qm_port->is_directed) {
335807d55c41STimothy McDaniel 		i = n;
335907d55c41STimothy McDaniel 		goto sw_credit_update;
336007d55c41STimothy McDaniel 	}
336107d55c41STimothy McDaniel 
336207d55c41STimothy McDaniel 	while (i < n) {
336307d55c41STimothy McDaniel 		int pop_offs = 0;
336407d55c41STimothy McDaniel 		int j = 0;
336507d55c41STimothy McDaniel 
336607d55c41STimothy McDaniel 		/* Zero-out QEs */
3367000a7b8eSTimothy McDaniel 		_mm_storeu_si128((void *)&qm_port->qe4[0], _mm_setzero_si128());
3368000a7b8eSTimothy McDaniel 		_mm_storeu_si128((void *)&qm_port->qe4[1], _mm_setzero_si128());
3369000a7b8eSTimothy McDaniel 		_mm_storeu_si128((void *)&qm_port->qe4[2], _mm_setzero_si128());
3370000a7b8eSTimothy McDaniel 		_mm_storeu_si128((void *)&qm_port->qe4[3], _mm_setzero_si128());
3371000a7b8eSTimothy McDaniel 
337207d55c41STimothy McDaniel 
337307d55c41STimothy McDaniel 		for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
337407d55c41STimothy McDaniel 			int16_t thresh = qm_port->token_pop_thresh;
337507d55c41STimothy McDaniel 
337607d55c41STimothy McDaniel 			if (qm_port->token_pop_mode == DELAYED_POP &&
337707d55c41STimothy McDaniel 			    qm_port->issued_releases >= thresh - 1) {
337807d55c41STimothy McDaniel 				/* Insert the token pop QE */
337907d55c41STimothy McDaniel 				dlb2_construct_token_pop_qe(qm_port, j);
338007d55c41STimothy McDaniel 
338107d55c41STimothy McDaniel 				/* Reset the releases for the next QE batch */
338207d55c41STimothy McDaniel 				qm_port->issued_releases -= thresh;
338307d55c41STimothy McDaniel 
338407d55c41STimothy McDaniel 				pop_offs = 1;
338507d55c41STimothy McDaniel 				j++;
338607d55c41STimothy McDaniel 				break;
338707d55c41STimothy McDaniel 			}
338807d55c41STimothy McDaniel 
338907d55c41STimothy McDaniel 			qm_port->qe4[j].cmd_byte = DLB2_COMP_CMD_BYTE;
339007d55c41STimothy McDaniel 			qm_port->issued_releases++;
339107d55c41STimothy McDaniel 		}
339207d55c41STimothy McDaniel 
339307d55c41STimothy McDaniel 		dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
339407d55c41STimothy McDaniel 
339507d55c41STimothy McDaniel 		/* Don't include the token pop QE in the release count */
339607d55c41STimothy McDaniel 		i += j - pop_offs;
339707d55c41STimothy McDaniel 	}
339807d55c41STimothy McDaniel 
339907d55c41STimothy McDaniel sw_credit_update:
340007d55c41STimothy McDaniel 	/* each release returns one credit */
3401000a7b8eSTimothy McDaniel 	if (unlikely(!ev_port->outstanding_releases)) {
3402f665790aSDavid Marchand 		DLB2_LOG_ERR("%s: Outstanding releases underflowed.",
340307d55c41STimothy McDaniel 			     __func__);
340407d55c41STimothy McDaniel 		return;
340507d55c41STimothy McDaniel 	}
340607d55c41STimothy McDaniel 	ev_port->outstanding_releases -= i;
340707d55c41STimothy McDaniel 	ev_port->inflight_credits += i;
340807d55c41STimothy McDaniel 
340907d55c41STimothy McDaniel 	/* Replenish s/w credits if enough releases are performed */
341007d55c41STimothy McDaniel 	dlb2_replenish_sw_credits(dlb2, ev_port);
3411f7cc194bSTimothy McDaniel }
3412f7cc194bSTimothy McDaniel 
3413a2e4f1f5STimothy McDaniel static inline void
3414a2e4f1f5STimothy McDaniel dlb2_port_credits_inc(struct dlb2_port *qm_port, int num)
3415a2e4f1f5STimothy McDaniel {
3416e4869c0bSPravin Pathak 	uint32_t batch_size = qm_port->hw_credit_quanta;
3417a2e4f1f5STimothy McDaniel 
3418a2e4f1f5STimothy McDaniel 	/* increment port credits, and return to pool if exceeds threshold */
3419a2e4f1f5STimothy McDaniel 	if (!qm_port->is_directed) {
342062e45206STimothy McDaniel 		if (qm_port->dlb2->version == DLB2_HW_V2) {
3421a2e4f1f5STimothy McDaniel 			qm_port->cached_ldb_credits += num;
3422a2e4f1f5STimothy McDaniel 			if (qm_port->cached_ldb_credits >= 2 * batch_size) {
3423e12a0166STyler Retzlaff 				rte_atomic_fetch_add_explicit(
3424a2e4f1f5STimothy McDaniel 					qm_port->credit_pool[DLB2_LDB_QUEUE],
3425e12a0166STyler Retzlaff 					batch_size, rte_memory_order_seq_cst);
3426a2e4f1f5STimothy McDaniel 				qm_port->cached_ldb_credits -= batch_size;
3427a2e4f1f5STimothy McDaniel 			}
3428a2e4f1f5STimothy McDaniel 		} else {
342962e45206STimothy McDaniel 			qm_port->cached_credits += num;
343062e45206STimothy McDaniel 			if (qm_port->cached_credits >= 2 * batch_size) {
3431e12a0166STyler Retzlaff 				rte_atomic_fetch_add_explicit(
343262e45206STimothy McDaniel 				      qm_port->credit_pool[DLB2_COMBINED_POOL],
3433e12a0166STyler Retzlaff 				      batch_size, rte_memory_order_seq_cst);
343462e45206STimothy McDaniel 				qm_port->cached_credits -= batch_size;
343562e45206STimothy McDaniel 			}
343662e45206STimothy McDaniel 		}
343762e45206STimothy McDaniel 	} else {
343862e45206STimothy McDaniel 		if (qm_port->dlb2->version == DLB2_HW_V2) {
3439a2e4f1f5STimothy McDaniel 			qm_port->cached_dir_credits += num;
3440a2e4f1f5STimothy McDaniel 			if (qm_port->cached_dir_credits >= 2 * batch_size) {
3441e12a0166STyler Retzlaff 				rte_atomic_fetch_add_explicit(
3442a2e4f1f5STimothy McDaniel 					qm_port->credit_pool[DLB2_DIR_QUEUE],
3443e12a0166STyler Retzlaff 					batch_size, rte_memory_order_seq_cst);
3444a2e4f1f5STimothy McDaniel 				qm_port->cached_dir_credits -= batch_size;
3445a2e4f1f5STimothy McDaniel 			}
344662e45206STimothy McDaniel 		} else {
344762e45206STimothy McDaniel 			qm_port->cached_credits += num;
344862e45206STimothy McDaniel 			if (qm_port->cached_credits >= 2 * batch_size) {
3449e12a0166STyler Retzlaff 				rte_atomic_fetch_add_explicit(
345062e45206STimothy McDaniel 				      qm_port->credit_pool[DLB2_COMBINED_POOL],
3451e12a0166STyler Retzlaff 				      batch_size, rte_memory_order_seq_cst);
345262e45206STimothy McDaniel 				qm_port->cached_credits -= batch_size;
345362e45206STimothy McDaniel 			}
345462e45206STimothy McDaniel 		}
3455a2e4f1f5STimothy McDaniel 	}
3456a2e4f1f5STimothy McDaniel }
3457a2e4f1f5STimothy McDaniel 
34586afc4bafSAnatoly Burakov #define CLB_MASK_IDX 0
34596afc4bafSAnatoly Burakov #define CLB_VAL_IDX 1
34606afc4bafSAnatoly Burakov static int
34616afc4bafSAnatoly Burakov dlb2_monitor_callback(const uint64_t val,
34626afc4bafSAnatoly Burakov 		const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
34636afc4bafSAnatoly Burakov {
34646afc4bafSAnatoly Burakov 	/* abort if the value matches */
34656afc4bafSAnatoly Burakov 	return (val & opaque[CLB_MASK_IDX]) == opaque[CLB_VAL_IDX] ? -1 : 0;
34666afc4bafSAnatoly Burakov }
34676afc4bafSAnatoly Burakov 
3468a2e4f1f5STimothy McDaniel static inline int
3469a2e4f1f5STimothy McDaniel dlb2_dequeue_wait(struct dlb2_eventdev *dlb2,
3470a2e4f1f5STimothy McDaniel 		  struct dlb2_eventdev_port *ev_port,
3471a2e4f1f5STimothy McDaniel 		  struct dlb2_port *qm_port,
3472a2e4f1f5STimothy McDaniel 		  uint64_t timeout,
3473a2e4f1f5STimothy McDaniel 		  uint64_t start_ticks)
3474a2e4f1f5STimothy McDaniel {
3475a2e4f1f5STimothy McDaniel 	struct process_local_port_data *port_data;
3476a2e4f1f5STimothy McDaniel 	uint64_t elapsed_ticks;
3477a2e4f1f5STimothy McDaniel 
3478a2e4f1f5STimothy McDaniel 	port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
3479a2e4f1f5STimothy McDaniel 
3480a2e4f1f5STimothy McDaniel 	elapsed_ticks = rte_get_timer_cycles() - start_ticks;
3481a2e4f1f5STimothy McDaniel 
3482a2e4f1f5STimothy McDaniel 	/* Wait/poll time expired */
3483a2e4f1f5STimothy McDaniel 	if (elapsed_ticks >= timeout) {
3484a2e4f1f5STimothy McDaniel 		return 1;
3485a2e4f1f5STimothy McDaniel 	} else if (dlb2->umwait_allowed) {
34866a17919bSAnatoly Burakov 		struct rte_power_monitor_cond pmc;
3487a2e4f1f5STimothy McDaniel 		volatile struct dlb2_dequeue_qe *cq_base;
3488a2e4f1f5STimothy McDaniel 		union {
3489a2e4f1f5STimothy McDaniel 			uint64_t raw_qe[2];
3490a2e4f1f5STimothy McDaniel 			struct dlb2_dequeue_qe qe;
3491a2e4f1f5STimothy McDaniel 		} qe_mask;
3492a2e4f1f5STimothy McDaniel 		uint64_t expected_value;
3493a2e4f1f5STimothy McDaniel 		volatile uint64_t *monitor_addr;
3494a2e4f1f5STimothy McDaniel 
3495a2e4f1f5STimothy McDaniel 		qe_mask.qe.cq_gen = 1; /* set mask */
3496a2e4f1f5STimothy McDaniel 
3497a2e4f1f5STimothy McDaniel 		cq_base = port_data->cq_base;
3498a2e4f1f5STimothy McDaniel 		monitor_addr = (volatile uint64_t *)(volatile void *)
3499a2e4f1f5STimothy McDaniel 			&cq_base[qm_port->cq_idx];
3500a2e4f1f5STimothy McDaniel 		monitor_addr++; /* cq_gen bit is in second 64bit location */
3501a2e4f1f5STimothy McDaniel 
3502a2e4f1f5STimothy McDaniel 		if (qm_port->gen_bit)
3503a2e4f1f5STimothy McDaniel 			expected_value = qe_mask.raw_qe[1];
3504a2e4f1f5STimothy McDaniel 		else
3505a2e4f1f5STimothy McDaniel 			expected_value = 0;
3506a2e4f1f5STimothy McDaniel 
35076a17919bSAnatoly Burakov 		pmc.addr = monitor_addr;
35086afc4bafSAnatoly Burakov 		/* store expected value and comparison mask in opaque data */
35096afc4bafSAnatoly Burakov 		pmc.opaque[CLB_VAL_IDX] = expected_value;
35106afc4bafSAnatoly Burakov 		pmc.opaque[CLB_MASK_IDX] = qe_mask.raw_qe[1];
35116afc4bafSAnatoly Burakov 		/* set up callback */
35126afc4bafSAnatoly Burakov 		pmc.fn = dlb2_monitor_callback;
3513f400ea0bSAnatoly Burakov 		pmc.size = sizeof(uint64_t);
35146a17919bSAnatoly Burakov 
35156a17919bSAnatoly Burakov 		rte_power_monitor(&pmc, timeout + start_ticks);
3516a2e4f1f5STimothy McDaniel 
3517a2e4f1f5STimothy McDaniel 		DLB2_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
3518a2e4f1f5STimothy McDaniel 	} else {
35197be66a3bSTimothy McDaniel 		uint64_t poll_interval = dlb2->poll_interval;
3520a2e4f1f5STimothy McDaniel 		uint64_t curr_ticks = rte_get_timer_cycles();
3521a2e4f1f5STimothy McDaniel 		uint64_t init_ticks = curr_ticks;
3522a2e4f1f5STimothy McDaniel 
3523a2e4f1f5STimothy McDaniel 		while ((curr_ticks - start_ticks < timeout) &&
3524a2e4f1f5STimothy McDaniel 		       (curr_ticks - init_ticks < poll_interval))
3525a2e4f1f5STimothy McDaniel 			curr_ticks = rte_get_timer_cycles();
3526a2e4f1f5STimothy McDaniel 	}
3527a2e4f1f5STimothy McDaniel 
3528a2e4f1f5STimothy McDaniel 	return 0;
3529a2e4f1f5STimothy McDaniel }
3530a2e4f1f5STimothy McDaniel 
3531000a7b8eSTimothy McDaniel static __rte_noinline int
3532a2e4f1f5STimothy McDaniel dlb2_process_dequeue_qes(struct dlb2_eventdev_port *ev_port,
3533a2e4f1f5STimothy McDaniel 			 struct dlb2_port *qm_port,
3534a2e4f1f5STimothy McDaniel 			 struct rte_event *events,
3535a2e4f1f5STimothy McDaniel 			 struct dlb2_dequeue_qe *qes,
3536a2e4f1f5STimothy McDaniel 			 int cnt)
3537a2e4f1f5STimothy McDaniel {
3538a2e4f1f5STimothy McDaniel 	uint8_t *qid_mappings = qm_port->qid_mappings;
3539a2e4f1f5STimothy McDaniel 	int i, num, evq_id;
3540a2e4f1f5STimothy McDaniel 
3541a2e4f1f5STimothy McDaniel 	for (i = 0, num = 0; i < cnt; i++) {
3542a2e4f1f5STimothy McDaniel 		struct dlb2_dequeue_qe *qe = &qes[i];
3543a2e4f1f5STimothy McDaniel 		int sched_type_map[DLB2_NUM_HW_SCHED_TYPES] = {
3544a2e4f1f5STimothy McDaniel 			[DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
3545a2e4f1f5STimothy McDaniel 			[DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
3546a2e4f1f5STimothy McDaniel 			[DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
3547a2e4f1f5STimothy McDaniel 			[DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
3548a2e4f1f5STimothy McDaniel 		};
3549a2e4f1f5STimothy McDaniel 
3550a2e4f1f5STimothy McDaniel 		/* Fill in event information.
3551a2e4f1f5STimothy McDaniel 		 * Note that flow_id must be embedded in the data by
3552a2e4f1f5STimothy McDaniel 		 * the app, such as the mbuf RSS hash field if the data
3553a2e4f1f5STimothy McDaniel 		 * buffer is a mbuf.
3554a2e4f1f5STimothy McDaniel 		 */
3555a2e4f1f5STimothy McDaniel 		if (unlikely(qe->error)) {
3556f665790aSDavid Marchand 			DLB2_LOG_ERR("QE error bit ON");
3557a2e4f1f5STimothy McDaniel 			DLB2_INC_STAT(ev_port->stats.traffic.rx_drop, 1);
3558a2e4f1f5STimothy McDaniel 			dlb2_consume_qe_immediate(qm_port, 1);
3559a2e4f1f5STimothy McDaniel 			continue; /* Ignore */
3560a2e4f1f5STimothy McDaniel 		}
3561a2e4f1f5STimothy McDaniel 
3562a2e4f1f5STimothy McDaniel 		events[num].u64 = qe->data;
3563a2e4f1f5STimothy McDaniel 		events[num].flow_id = qe->flow_id;
3564a2e4f1f5STimothy McDaniel 		events[num].priority = DLB2_TO_EV_PRIO((uint8_t)qe->priority);
3565a2e4f1f5STimothy McDaniel 		events[num].event_type = qe->u.event_type.major;
3566a2e4f1f5STimothy McDaniel 		events[num].sub_event_type = qe->u.event_type.sub;
3567a2e4f1f5STimothy McDaniel 		events[num].sched_type = sched_type_map[qe->sched_type];
35686e2e98d6SAbdullah Sevincer 		events[num].impl_opaque = qm_port->reorder_id++;
35696e2e98d6SAbdullah Sevincer 		RTE_PMD_DLB2_SET_QID_DEPTH(&events[num], qe->qid_depth);
3570a2e4f1f5STimothy McDaniel 
3571a2e4f1f5STimothy McDaniel 		/* qid not preserved for directed queues */
3572a2e4f1f5STimothy McDaniel 		if (qm_port->is_directed)
3573a2e4f1f5STimothy McDaniel 			evq_id = ev_port->link[0].queue_id;
3574a2e4f1f5STimothy McDaniel 		else
3575a2e4f1f5STimothy McDaniel 			evq_id = qid_mappings[qe->qid];
3576a2e4f1f5STimothy McDaniel 
3577a2e4f1f5STimothy McDaniel 		events[num].queue_id = evq_id;
3578a2e4f1f5STimothy McDaniel 		DLB2_INC_STAT(
3579a2e4f1f5STimothy McDaniel 			ev_port->stats.queue[evq_id].qid_depth[qe->qid_depth],
3580a2e4f1f5STimothy McDaniel 			1);
3581a2e4f1f5STimothy McDaniel 		DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qe->sched_type], 1);
3582a2e4f1f5STimothy McDaniel 		num++;
3583a2e4f1f5STimothy McDaniel 	}
3584a2e4f1f5STimothy McDaniel 
3585a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num);
3586a2e4f1f5STimothy McDaniel 
3587a2e4f1f5STimothy McDaniel 	return num;
3588a2e4f1f5STimothy McDaniel }
3589a2e4f1f5STimothy McDaniel 
3590a2e4f1f5STimothy McDaniel static inline int
3591a2e4f1f5STimothy McDaniel dlb2_process_dequeue_four_qes(struct dlb2_eventdev_port *ev_port,
3592a2e4f1f5STimothy McDaniel 			      struct dlb2_port *qm_port,
3593a2e4f1f5STimothy McDaniel 			      struct rte_event *events,
3594a2e4f1f5STimothy McDaniel 			      struct dlb2_dequeue_qe *qes)
3595a2e4f1f5STimothy McDaniel {
3596a2e4f1f5STimothy McDaniel 	int sched_type_map[] = {
3597a2e4f1f5STimothy McDaniel 		[DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
3598a2e4f1f5STimothy McDaniel 		[DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
3599a2e4f1f5STimothy McDaniel 		[DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
3600a2e4f1f5STimothy McDaniel 		[DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
3601a2e4f1f5STimothy McDaniel 	};
3602a2e4f1f5STimothy McDaniel 	const int num_events = DLB2_NUM_QES_PER_CACHE_LINE;
3603a2e4f1f5STimothy McDaniel 	uint8_t *qid_mappings = qm_port->qid_mappings;
3604a2e4f1f5STimothy McDaniel 
3605a2e4f1f5STimothy McDaniel 	/* In the unlikely case that any of the QE error bits are set, process
3606a2e4f1f5STimothy McDaniel 	 * them one at a time.
3607a2e4f1f5STimothy McDaniel 	 */
3608a2e4f1f5STimothy McDaniel 	if (unlikely(qes[0].error || qes[1].error ||
3609a2e4f1f5STimothy McDaniel 		     qes[2].error || qes[3].error))
3610a2e4f1f5STimothy McDaniel 		return dlb2_process_dequeue_qes(ev_port, qm_port, events,
3611a2e4f1f5STimothy McDaniel 						 qes, num_events);
36126e2e98d6SAbdullah Sevincer 	const __m128i qe_to_ev_shuffle =
36136e2e98d6SAbdullah Sevincer 	    _mm_set_epi8(7, 6, 5, 4, 3, 2, 1, 0, /* last 8-bytes = data from first 8 */
36146e2e98d6SAbdullah Sevincer 			 0xFF, 0xFF, 0xFF, 0xFF, /* fill in later as 32-bit value*/
36156e2e98d6SAbdullah Sevincer 			 9, 8,			 /* event type and sub-event, + 4 zero bits */
36166e2e98d6SAbdullah Sevincer 			 13, 12 /* flow id, 16 bits */);
36176e2e98d6SAbdullah Sevincer 	for (int i = 0; i < 4; i++) {
36186e2e98d6SAbdullah Sevincer 		const __m128i hw_qe = _mm_load_si128((void *)&qes[i]);
36196e2e98d6SAbdullah Sevincer 		const __m128i event = _mm_shuffle_epi8(hw_qe, qe_to_ev_shuffle);
36206e2e98d6SAbdullah Sevincer 		/* prepare missing 32-bits for op, sched_type, QID, Priority and
36216e2e98d6SAbdullah Sevincer 		 * sequence number in impl_opaque
3622a2e4f1f5STimothy McDaniel 		 */
36236e2e98d6SAbdullah Sevincer 		const uint16_t qid_sched_prio = _mm_extract_epi16(hw_qe, 5);
36246e2e98d6SAbdullah Sevincer 		/* Extract qid_depth and format it as per event header */
36256e2e98d6SAbdullah Sevincer 		const uint8_t qid_depth = (_mm_extract_epi8(hw_qe, 15) & 0x6) << 1;
36266e2e98d6SAbdullah Sevincer 		const uint32_t qid =  (qm_port->is_directed) ? ev_port->link[0].queue_id :
36276e2e98d6SAbdullah Sevincer 					qid_mappings[(uint8_t)qid_sched_prio];
36286e2e98d6SAbdullah Sevincer 		const uint32_t sched_type = sched_type_map[(qid_sched_prio >> 8) & 0x3];
36296e2e98d6SAbdullah Sevincer 		const uint32_t priority = (qid_sched_prio >> 5) & 0xE0;
3630a2e4f1f5STimothy McDaniel 
36316e2e98d6SAbdullah Sevincer 		const uint32_t dword1 = qid_depth |
36326e2e98d6SAbdullah Sevincer 		    sched_type << 6 | qid << 8 | priority << 16 | (qm_port->reorder_id + i) << 24;
3633a2e4f1f5STimothy McDaniel 
36346e2e98d6SAbdullah Sevincer 		/* events[] may not be 16 byte aligned. So use separate load and store */
36356e2e98d6SAbdullah Sevincer 		const __m128i tmpEv = _mm_insert_epi32(event, dword1, 1);
36366e2e98d6SAbdullah Sevincer 		_mm_storeu_si128((__m128i *) &events[i], tmpEv);
36376e2e98d6SAbdullah Sevincer 	}
36386e2e98d6SAbdullah Sevincer 	qm_port->reorder_id += 4;
3639a2e4f1f5STimothy McDaniel 
3640a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[0].sched_type], 1);
3641a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[1].sched_type], 1);
3642a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[2].sched_type], 1);
3643a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[3].sched_type], 1);
3644a2e4f1f5STimothy McDaniel 
3645a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(
3646a2e4f1f5STimothy McDaniel 		ev_port->stats.queue[events[0].queue_id].
3647a2e4f1f5STimothy McDaniel 			qid_depth[qes[0].qid_depth],
3648a2e4f1f5STimothy McDaniel 		1);
3649a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(
3650a2e4f1f5STimothy McDaniel 		ev_port->stats.queue[events[1].queue_id].
3651a2e4f1f5STimothy McDaniel 			qid_depth[qes[1].qid_depth],
3652a2e4f1f5STimothy McDaniel 		1);
3653a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(
3654a2e4f1f5STimothy McDaniel 		ev_port->stats.queue[events[2].queue_id].
3655a2e4f1f5STimothy McDaniel 			qid_depth[qes[2].qid_depth],
3656a2e4f1f5STimothy McDaniel 		1);
3657a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(
3658a2e4f1f5STimothy McDaniel 		ev_port->stats.queue[events[3].queue_id].
3659a2e4f1f5STimothy McDaniel 			qid_depth[qes[3].qid_depth],
3660a2e4f1f5STimothy McDaniel 		1);
3661a2e4f1f5STimothy McDaniel 
3662a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num_events);
3663a2e4f1f5STimothy McDaniel 
3664a2e4f1f5STimothy McDaniel 	return num_events;
3665a2e4f1f5STimothy McDaniel }
3666a2e4f1f5STimothy McDaniel 
3667a2e4f1f5STimothy McDaniel static __rte_always_inline int
3668a2e4f1f5STimothy McDaniel dlb2_recv_qe_sparse(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe)
3669a2e4f1f5STimothy McDaniel {
3670a2e4f1f5STimothy McDaniel 	volatile struct dlb2_dequeue_qe *cq_addr;
3671a2e4f1f5STimothy McDaniel 	uint8_t xor_mask[2] = {0x0F, 0x00};
3672a2e4f1f5STimothy McDaniel 	const uint8_t and_mask = 0x0F;
3673a2e4f1f5STimothy McDaniel 	__m128i *qes = (__m128i *)qe;
3674a2e4f1f5STimothy McDaniel 	uint8_t gen_bits, gen_bit;
3675a2e4f1f5STimothy McDaniel 	uintptr_t addr[4];
3676a2e4f1f5STimothy McDaniel 	uint16_t idx;
3677a2e4f1f5STimothy McDaniel 
3678a2e4f1f5STimothy McDaniel 	cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3679a2e4f1f5STimothy McDaniel 
3680000a7b8eSTimothy McDaniel 	idx = qm_port->cq_idx_unmasked & qm_port->cq_depth_mask;
3681a2e4f1f5STimothy McDaniel 	/* Load the next 4 QEs */
3682a2e4f1f5STimothy McDaniel 	addr[0] = (uintptr_t)&cq_addr[idx];
3683a2e4f1f5STimothy McDaniel 	addr[1] = (uintptr_t)&cq_addr[(idx +  4) & qm_port->cq_depth_mask];
3684a2e4f1f5STimothy McDaniel 	addr[2] = (uintptr_t)&cq_addr[(idx +  8) & qm_port->cq_depth_mask];
3685a2e4f1f5STimothy McDaniel 	addr[3] = (uintptr_t)&cq_addr[(idx + 12) & qm_port->cq_depth_mask];
3686a2e4f1f5STimothy McDaniel 
3687a2e4f1f5STimothy McDaniel 	/* Prefetch next batch of QEs (all CQs occupy minimum 8 cache lines) */
3688a2e4f1f5STimothy McDaniel 	rte_prefetch0(&cq_addr[(idx + 16) & qm_port->cq_depth_mask]);
3689a2e4f1f5STimothy McDaniel 	rte_prefetch0(&cq_addr[(idx + 20) & qm_port->cq_depth_mask]);
3690a2e4f1f5STimothy McDaniel 	rte_prefetch0(&cq_addr[(idx + 24) & qm_port->cq_depth_mask]);
3691a2e4f1f5STimothy McDaniel 	rte_prefetch0(&cq_addr[(idx + 28) & qm_port->cq_depth_mask]);
3692a2e4f1f5STimothy McDaniel 
3693a2e4f1f5STimothy McDaniel 	/* Correct the xor_mask for wrap-around QEs */
3694a2e4f1f5STimothy McDaniel 	gen_bit = qm_port->gen_bit;
3695a2e4f1f5STimothy McDaniel 	xor_mask[gen_bit] ^= !!((idx +  4) > qm_port->cq_depth_mask) << 1;
3696a2e4f1f5STimothy McDaniel 	xor_mask[gen_bit] ^= !!((idx +  8) > qm_port->cq_depth_mask) << 2;
3697a2e4f1f5STimothy McDaniel 	xor_mask[gen_bit] ^= !!((idx + 12) > qm_port->cq_depth_mask) << 3;
3698a2e4f1f5STimothy McDaniel 
3699a2e4f1f5STimothy McDaniel 	/* Read the cache lines backwards to ensure that if QE[N] (N > 0) is
3700a2e4f1f5STimothy McDaniel 	 * valid, then QEs[0:N-1] are too.
3701a2e4f1f5STimothy McDaniel 	 */
3702a2e4f1f5STimothy McDaniel 	qes[3] = _mm_load_si128((__m128i *)(void *)addr[3]);
3703a2e4f1f5STimothy McDaniel 	rte_compiler_barrier();
3704a2e4f1f5STimothy McDaniel 	qes[2] = _mm_load_si128((__m128i *)(void *)addr[2]);
3705a2e4f1f5STimothy McDaniel 	rte_compiler_barrier();
3706a2e4f1f5STimothy McDaniel 	qes[1] = _mm_load_si128((__m128i *)(void *)addr[1]);
3707a2e4f1f5STimothy McDaniel 	rte_compiler_barrier();
3708a2e4f1f5STimothy McDaniel 	qes[0] = _mm_load_si128((__m128i *)(void *)addr[0]);
3709a2e4f1f5STimothy McDaniel 
3710a2e4f1f5STimothy McDaniel 	/* Extract and combine the gen bits */
3711a2e4f1f5STimothy McDaniel 	gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
3712a2e4f1f5STimothy McDaniel 		   ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
3713a2e4f1f5STimothy McDaniel 		   ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
3714a2e4f1f5STimothy McDaniel 		   ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
3715a2e4f1f5STimothy McDaniel 
3716a2e4f1f5STimothy McDaniel 	/* XOR the combined bits such that a 1 represents a valid QE */
3717a2e4f1f5STimothy McDaniel 	gen_bits ^= xor_mask[gen_bit];
3718a2e4f1f5STimothy McDaniel 
3719a2e4f1f5STimothy McDaniel 	/* Mask off gen bits we don't care about */
3720a2e4f1f5STimothy McDaniel 	gen_bits &= and_mask;
3721a2e4f1f5STimothy McDaniel 
37223d4e27fdSDavid Marchand 	return rte_popcount32(gen_bits);
3723a2e4f1f5STimothy McDaniel }
3724a2e4f1f5STimothy McDaniel 
3725a2e4f1f5STimothy McDaniel static inline void
3726000a7b8eSTimothy McDaniel _process_deq_qes_vec_impl(struct dlb2_port *qm_port,
3727000a7b8eSTimothy McDaniel 			  struct rte_event *events,
3728000a7b8eSTimothy McDaniel 			  __m128i v_qe_3,
3729000a7b8eSTimothy McDaniel 			  __m128i v_qe_2,
3730000a7b8eSTimothy McDaniel 			  __m128i v_qe_1,
3731000a7b8eSTimothy McDaniel 			  __m128i v_qe_0,
3732000a7b8eSTimothy McDaniel 			  __m128i v_qe_meta,
3733000a7b8eSTimothy McDaniel 			  __m128i v_qe_status,
3734000a7b8eSTimothy McDaniel 			  uint32_t valid_events)
3735000a7b8eSTimothy McDaniel {
3736000a7b8eSTimothy McDaniel 	/* Look up the event QIDs, using the hardware QIDs to index the
3737000a7b8eSTimothy McDaniel 	 * port's QID mapping.
3738000a7b8eSTimothy McDaniel 	 *
3739000a7b8eSTimothy McDaniel 	 * Each v_qe_[0-4] is just a 16-byte load of the whole QE. It is
3740000a7b8eSTimothy McDaniel 	 * passed along in registers as the QE data is required later.
3741000a7b8eSTimothy McDaniel 	 *
3742000a7b8eSTimothy McDaniel 	 * v_qe_meta is an u32 unpack of all 4x QEs. A.k.a, it contains one
3743000a7b8eSTimothy McDaniel 	 * 32-bit slice of each QE, so makes up a full SSE register. This
3744000a7b8eSTimothy McDaniel 	 * allows parallel processing of 4x QEs in a single register.
3745000a7b8eSTimothy McDaniel 	 */
3746000a7b8eSTimothy McDaniel 
3747000a7b8eSTimothy McDaniel 	__m128i v_qid_done = {0};
3748000a7b8eSTimothy McDaniel 	int hw_qid0 = _mm_extract_epi8(v_qe_meta, 2);
3749000a7b8eSTimothy McDaniel 	int hw_qid1 = _mm_extract_epi8(v_qe_meta, 6);
3750000a7b8eSTimothy McDaniel 	int hw_qid2 = _mm_extract_epi8(v_qe_meta, 10);
3751000a7b8eSTimothy McDaniel 	int hw_qid3 = _mm_extract_epi8(v_qe_meta, 14);
3752000a7b8eSTimothy McDaniel 
3753000a7b8eSTimothy McDaniel 	int ev_qid0 = qm_port->qid_mappings[hw_qid0];
3754000a7b8eSTimothy McDaniel 	int ev_qid1 = qm_port->qid_mappings[hw_qid1];
3755000a7b8eSTimothy McDaniel 	int ev_qid2 = qm_port->qid_mappings[hw_qid2];
3756000a7b8eSTimothy McDaniel 	int ev_qid3 = qm_port->qid_mappings[hw_qid3];
3757000a7b8eSTimothy McDaniel 
3758d05072fcSTimothy McDaniel 	int hw_sched0 = _mm_extract_epi8(v_qe_meta, 3) & 3ul;
3759d05072fcSTimothy McDaniel 	int hw_sched1 = _mm_extract_epi8(v_qe_meta, 7) & 3ul;
3760d05072fcSTimothy McDaniel 	int hw_sched2 = _mm_extract_epi8(v_qe_meta, 11) & 3ul;
3761d05072fcSTimothy McDaniel 	int hw_sched3 = _mm_extract_epi8(v_qe_meta, 15) & 3ul;
3762d05072fcSTimothy McDaniel 
3763000a7b8eSTimothy McDaniel 	v_qid_done = _mm_insert_epi8(v_qid_done, ev_qid0, 2);
3764000a7b8eSTimothy McDaniel 	v_qid_done = _mm_insert_epi8(v_qid_done, ev_qid1, 6);
3765000a7b8eSTimothy McDaniel 	v_qid_done = _mm_insert_epi8(v_qid_done, ev_qid2, 10);
3766000a7b8eSTimothy McDaniel 	v_qid_done = _mm_insert_epi8(v_qid_done, ev_qid3, 14);
3767000a7b8eSTimothy McDaniel 
3768000a7b8eSTimothy McDaniel 	/* Schedule field remapping using byte shuffle
3769000a7b8eSTimothy McDaniel 	 * - Full byte containing sched field handled here (op, rsvd are zero)
3770000a7b8eSTimothy McDaniel 	 * - Note sanitizing the register requires two masking ANDs:
3771000a7b8eSTimothy McDaniel 	 *   1) to strip prio/msg_type from byte for correct shuffle lookup
3772000a7b8eSTimothy McDaniel 	 *   2) to strip any non-sched-field lanes from any results to OR later
3773000a7b8eSTimothy McDaniel 	 * - Final byte result is >> 10 to another byte-lane inside the u32.
3774000a7b8eSTimothy McDaniel 	 *   This makes the final combination OR easier to make the rte_event.
3775000a7b8eSTimothy McDaniel 	 */
3776000a7b8eSTimothy McDaniel 	__m128i v_sched_done;
3777000a7b8eSTimothy McDaniel 	__m128i v_sched_bits;
3778000a7b8eSTimothy McDaniel 	{
3779000a7b8eSTimothy McDaniel 		static const uint8_t sched_type_map[16] = {
3780000a7b8eSTimothy McDaniel 			[DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
3781000a7b8eSTimothy McDaniel 			[DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
3782000a7b8eSTimothy McDaniel 			[DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
3783000a7b8eSTimothy McDaniel 			[DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
3784000a7b8eSTimothy McDaniel 		};
3785000a7b8eSTimothy McDaniel 		static const uint8_t sched_and_mask[16] = {
3786000a7b8eSTimothy McDaniel 			0x00, 0x00, 0x00, 0x03,
3787000a7b8eSTimothy McDaniel 			0x00, 0x00, 0x00, 0x03,
3788000a7b8eSTimothy McDaniel 			0x00, 0x00, 0x00, 0x03,
3789000a7b8eSTimothy McDaniel 			0x00, 0x00, 0x00, 0x03,
3790000a7b8eSTimothy McDaniel 		};
37916e2e98d6SAbdullah Sevincer 
37926e2e98d6SAbdullah Sevincer 		static const uint8_t qid_depth_mask[16] = {
37936e2e98d6SAbdullah Sevincer 			0x00, 0x00, 0x00, 0x06,
37946e2e98d6SAbdullah Sevincer 			0x00, 0x00, 0x00, 0x06,
37956e2e98d6SAbdullah Sevincer 			0x00, 0x00, 0x00, 0x06,
37966e2e98d6SAbdullah Sevincer 			0x00, 0x00, 0x00, 0x06,
37976e2e98d6SAbdullah Sevincer 		};
37986e2e98d6SAbdullah Sevincer 		const __m128i v_qid_depth_mask  = _mm_loadu_si128(
37996e2e98d6SAbdullah Sevincer 						  (const __m128i *)qid_depth_mask);
3800000a7b8eSTimothy McDaniel 		const __m128i v_sched_map = _mm_loadu_si128(
3801000a7b8eSTimothy McDaniel 					     (const __m128i *)sched_type_map);
3802000a7b8eSTimothy McDaniel 		__m128i v_sched_mask = _mm_loadu_si128(
3803000a7b8eSTimothy McDaniel 					     (const __m128i *)&sched_and_mask);
3804000a7b8eSTimothy McDaniel 		v_sched_bits = _mm_and_si128(v_qe_meta, v_sched_mask);
3805000a7b8eSTimothy McDaniel 		__m128i v_sched_remapped = _mm_shuffle_epi8(v_sched_map,
3806000a7b8eSTimothy McDaniel 							    v_sched_bits);
3807000a7b8eSTimothy McDaniel 		__m128i v_preshift = _mm_and_si128(v_sched_remapped,
3808000a7b8eSTimothy McDaniel 						   v_sched_mask);
3809000a7b8eSTimothy McDaniel 		v_sched_done = _mm_srli_epi32(v_preshift, 10);
38106e2e98d6SAbdullah Sevincer 		__m128i v_qid_depth =  _mm_and_si128(v_qe_status, v_qid_depth_mask);
38116e2e98d6SAbdullah Sevincer 		v_qid_depth = _mm_srli_epi32(v_qid_depth, 15);
38126e2e98d6SAbdullah Sevincer 		v_sched_done = _mm_or_si128(v_sched_done, v_qid_depth);
3813000a7b8eSTimothy McDaniel 	}
3814000a7b8eSTimothy McDaniel 
3815000a7b8eSTimothy McDaniel 	/* Priority handling
3816000a7b8eSTimothy McDaniel 	 * - QE provides 3 bits of priority
3817000a7b8eSTimothy McDaniel 	 * - Shift << 3 to move to MSBs for byte-prio in rte_event
3818000a7b8eSTimothy McDaniel 	 * - Mask bits to avoid pollution, leaving only 3 prio MSBs in reg
3819000a7b8eSTimothy McDaniel 	 */
3820000a7b8eSTimothy McDaniel 	__m128i v_prio_done;
3821000a7b8eSTimothy McDaniel 	{
3822000a7b8eSTimothy McDaniel 		static const uint8_t prio_mask[16] = {
3823000a7b8eSTimothy McDaniel 			0x00, 0x00, 0x00, 0x07 << 5,
3824000a7b8eSTimothy McDaniel 			0x00, 0x00, 0x00, 0x07 << 5,
3825000a7b8eSTimothy McDaniel 			0x00, 0x00, 0x00, 0x07 << 5,
3826000a7b8eSTimothy McDaniel 			0x00, 0x00, 0x00, 0x07 << 5,
3827000a7b8eSTimothy McDaniel 		};
3828000a7b8eSTimothy McDaniel 		__m128i v_prio_mask  = _mm_loadu_si128(
3829000a7b8eSTimothy McDaniel 						(const __m128i *)prio_mask);
3830000a7b8eSTimothy McDaniel 		__m128i v_prio_shifted = _mm_slli_epi32(v_qe_meta, 3);
3831000a7b8eSTimothy McDaniel 		v_prio_done = _mm_and_si128(v_prio_shifted, v_prio_mask);
3832000a7b8eSTimothy McDaniel 	}
3833000a7b8eSTimothy McDaniel 
3834000a7b8eSTimothy McDaniel 	/* Event Sub/Type handling:
3835000a7b8eSTimothy McDaniel 	 * we want to keep the lower 12 bits of each QE. Shift up by 20 bits
3836000a7b8eSTimothy McDaniel 	 * to get the sub/ev type data into rte_event location, clearing the
3837000a7b8eSTimothy McDaniel 	 * lower 20 bits in the process.
3838000a7b8eSTimothy McDaniel 	 */
3839000a7b8eSTimothy McDaniel 	__m128i v_types_done;
3840000a7b8eSTimothy McDaniel 	{
3841000a7b8eSTimothy McDaniel 		static const uint8_t event_mask[16] = {
3842000a7b8eSTimothy McDaniel 			0x0f, 0x00, 0x00, 0x00,
3843000a7b8eSTimothy McDaniel 			0x0f, 0x00, 0x00, 0x00,
3844000a7b8eSTimothy McDaniel 			0x0f, 0x00, 0x00, 0x00,
3845000a7b8eSTimothy McDaniel 			0x0f, 0x00, 0x00, 0x00,
3846000a7b8eSTimothy McDaniel 		};
3847000a7b8eSTimothy McDaniel 		static const uint8_t sub_event_mask[16] = {
3848000a7b8eSTimothy McDaniel 			0xff, 0x00, 0x00, 0x00,
3849000a7b8eSTimothy McDaniel 			0xff, 0x00, 0x00, 0x00,
3850000a7b8eSTimothy McDaniel 			0xff, 0x00, 0x00, 0x00,
3851000a7b8eSTimothy McDaniel 			0xff, 0x00, 0x00, 0x00,
3852000a7b8eSTimothy McDaniel 		};
3853000a7b8eSTimothy McDaniel 		static const uint8_t flow_mask[16] = {
3854000a7b8eSTimothy McDaniel 			0xff, 0xff, 0x00, 0x00,
3855000a7b8eSTimothy McDaniel 			0xff, 0xff, 0x00, 0x00,
3856000a7b8eSTimothy McDaniel 			0xff, 0xff, 0x00, 0x00,
3857000a7b8eSTimothy McDaniel 			0xff, 0xff, 0x00, 0x00,
3858000a7b8eSTimothy McDaniel 		};
3859000a7b8eSTimothy McDaniel 		__m128i v_event_mask  = _mm_loadu_si128(
3860000a7b8eSTimothy McDaniel 					(const __m128i *)event_mask);
3861000a7b8eSTimothy McDaniel 		__m128i v_sub_event_mask  = _mm_loadu_si128(
3862000a7b8eSTimothy McDaniel 					(const __m128i *)sub_event_mask);
3863000a7b8eSTimothy McDaniel 		__m128i v_flow_mask  = _mm_loadu_si128(
3864000a7b8eSTimothy McDaniel 				       (const __m128i *)flow_mask);
38656e2e98d6SAbdullah Sevincer 		__m128i v_sub = _mm_srli_epi32(v_qe_meta, 4);
3866000a7b8eSTimothy McDaniel 		v_sub = _mm_and_si128(v_sub, v_sub_event_mask);
38676e2e98d6SAbdullah Sevincer 		__m128i v_type = _mm_srli_epi32(v_qe_meta, 12);
38686e2e98d6SAbdullah Sevincer 		v_type = _mm_and_si128(v_type, v_event_mask);
3869000a7b8eSTimothy McDaniel 		v_type = _mm_slli_epi32(v_type, 8);
3870000a7b8eSTimothy McDaniel 		v_types_done = _mm_or_si128(v_type, v_sub);
3871000a7b8eSTimothy McDaniel 		v_types_done = _mm_slli_epi32(v_types_done, 20);
3872000a7b8eSTimothy McDaniel 		__m128i v_flow = _mm_and_si128(v_qe_status, v_flow_mask);
3873000a7b8eSTimothy McDaniel 		v_types_done = _mm_or_si128(v_types_done, v_flow);
3874000a7b8eSTimothy McDaniel 	}
3875000a7b8eSTimothy McDaniel 
3876000a7b8eSTimothy McDaniel 	/* Combine QID, Sched and Prio fields, then Shift >> 8 bits to align
3877000a7b8eSTimothy McDaniel 	 * with the rte_event, allowing unpacks to move/blend with payload.
3878000a7b8eSTimothy McDaniel 	 */
3879000a7b8eSTimothy McDaniel 	__m128i v_q_s_p_done;
3880000a7b8eSTimothy McDaniel 	{
3881000a7b8eSTimothy McDaniel 		__m128i v_qid_sched = _mm_or_si128(v_qid_done, v_sched_done);
3882000a7b8eSTimothy McDaniel 		__m128i v_q_s_prio = _mm_or_si128(v_qid_sched, v_prio_done);
3883000a7b8eSTimothy McDaniel 		v_q_s_p_done = _mm_srli_epi32(v_q_s_prio, 8);
3884000a7b8eSTimothy McDaniel 	}
3885000a7b8eSTimothy McDaniel 
3886000a7b8eSTimothy McDaniel 	__m128i v_unpk_ev_23, v_unpk_ev_01, v_ev_2, v_ev_3, v_ev_0, v_ev_1;
3887000a7b8eSTimothy McDaniel 
3888000a7b8eSTimothy McDaniel 	/* Unpack evs into u64 metadata, then indiv events */
3889000a7b8eSTimothy McDaniel 	v_unpk_ev_23 = _mm_unpackhi_epi32(v_types_done, v_q_s_p_done);
3890000a7b8eSTimothy McDaniel 	v_unpk_ev_01 = _mm_unpacklo_epi32(v_types_done, v_q_s_p_done);
3891000a7b8eSTimothy McDaniel 
3892000a7b8eSTimothy McDaniel 	switch (valid_events) {
3893000a7b8eSTimothy McDaniel 	case 4:
3894000a7b8eSTimothy McDaniel 		v_ev_3 = _mm_blend_epi16(v_unpk_ev_23, v_qe_3, 0x0F);
3895000a7b8eSTimothy McDaniel 		v_ev_3 = _mm_alignr_epi8(v_ev_3, v_ev_3, 8);
38966e2e98d6SAbdullah Sevincer 		v_ev_3 = _mm_insert_epi8(v_ev_3, qm_port->reorder_id + 3, 7);
3897000a7b8eSTimothy McDaniel 		_mm_storeu_si128((__m128i *)&events[3], v_ev_3);
3898d05072fcSTimothy McDaniel 		DLB2_INC_STAT(qm_port->ev_port->stats.rx_sched_cnt[hw_sched3],
3899d05072fcSTimothy McDaniel 			      1);
3900000a7b8eSTimothy McDaniel 		/* fallthrough */
3901000a7b8eSTimothy McDaniel 	case 3:
3902000a7b8eSTimothy McDaniel 		v_ev_2 = _mm_unpacklo_epi64(v_unpk_ev_23, v_qe_2);
39036e2e98d6SAbdullah Sevincer 		v_ev_2 = _mm_insert_epi8(v_ev_2, qm_port->reorder_id + 2, 7);
3904000a7b8eSTimothy McDaniel 		_mm_storeu_si128((__m128i *)&events[2], v_ev_2);
3905d05072fcSTimothy McDaniel 		DLB2_INC_STAT(qm_port->ev_port->stats.rx_sched_cnt[hw_sched2],
3906d05072fcSTimothy McDaniel 			      1);
3907000a7b8eSTimothy McDaniel 		/* fallthrough */
3908000a7b8eSTimothy McDaniel 	case 2:
3909000a7b8eSTimothy McDaniel 		v_ev_1 = _mm_blend_epi16(v_unpk_ev_01, v_qe_1, 0x0F);
3910000a7b8eSTimothy McDaniel 		v_ev_1 = _mm_alignr_epi8(v_ev_1, v_ev_1, 8);
39116e2e98d6SAbdullah Sevincer 		v_ev_1 = _mm_insert_epi8(v_ev_1, qm_port->reorder_id + 1, 7);
3912000a7b8eSTimothy McDaniel 		_mm_storeu_si128((__m128i *)&events[1], v_ev_1);
3913d05072fcSTimothy McDaniel 		DLB2_INC_STAT(qm_port->ev_port->stats.rx_sched_cnt[hw_sched1],
3914d05072fcSTimothy McDaniel 			      1);
3915000a7b8eSTimothy McDaniel 		/* fallthrough */
3916000a7b8eSTimothy McDaniel 	case 1:
3917000a7b8eSTimothy McDaniel 		v_ev_0 = _mm_unpacklo_epi64(v_unpk_ev_01, v_qe_0);
39186e2e98d6SAbdullah Sevincer 		v_ev_0 = _mm_insert_epi8(v_ev_0, qm_port->reorder_id, 7);
3919000a7b8eSTimothy McDaniel 		_mm_storeu_si128((__m128i *)&events[0], v_ev_0);
3920d05072fcSTimothy McDaniel 		DLB2_INC_STAT(qm_port->ev_port->stats.rx_sched_cnt[hw_sched0],
3921d05072fcSTimothy McDaniel 			      1);
3922000a7b8eSTimothy McDaniel 	}
39236e2e98d6SAbdullah Sevincer 	qm_port->reorder_id += valid_events;
3924000a7b8eSTimothy McDaniel }
3925000a7b8eSTimothy McDaniel 
3926000a7b8eSTimothy McDaniel static __rte_always_inline int
3927000a7b8eSTimothy McDaniel dlb2_recv_qe_sparse_vec(struct dlb2_port *qm_port, void *events,
3928000a7b8eSTimothy McDaniel 			uint32_t max_events)
3929000a7b8eSTimothy McDaniel {
3930000a7b8eSTimothy McDaniel 	/* Using unmasked idx for perf, and masking manually */
3931000a7b8eSTimothy McDaniel 	uint16_t idx = qm_port->cq_idx_unmasked;
3932000a7b8eSTimothy McDaniel 	volatile struct dlb2_dequeue_qe *cq_addr;
3933000a7b8eSTimothy McDaniel 
3934000a7b8eSTimothy McDaniel 	cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3935000a7b8eSTimothy McDaniel 
3936000a7b8eSTimothy McDaniel 	uintptr_t qe_ptr_3 = (uintptr_t)&cq_addr[(idx + 12) &
3937000a7b8eSTimothy McDaniel 						 qm_port->cq_depth_mask];
3938000a7b8eSTimothy McDaniel 	uintptr_t qe_ptr_2 = (uintptr_t)&cq_addr[(idx +  8) &
3939000a7b8eSTimothy McDaniel 						 qm_port->cq_depth_mask];
3940000a7b8eSTimothy McDaniel 	uintptr_t qe_ptr_1 = (uintptr_t)&cq_addr[(idx +  4) &
3941000a7b8eSTimothy McDaniel 						 qm_port->cq_depth_mask];
3942000a7b8eSTimothy McDaniel 	uintptr_t qe_ptr_0 = (uintptr_t)&cq_addr[(idx +  0) &
3943000a7b8eSTimothy McDaniel 						 qm_port->cq_depth_mask];
3944000a7b8eSTimothy McDaniel 
3945000a7b8eSTimothy McDaniel 	/* Load QEs from CQ: use compiler barriers to avoid load reordering */
3946000a7b8eSTimothy McDaniel 	__m128i v_qe_3 = _mm_loadu_si128((const __m128i *)qe_ptr_3);
3947000a7b8eSTimothy McDaniel 	rte_compiler_barrier();
3948000a7b8eSTimothy McDaniel 	__m128i v_qe_2 = _mm_loadu_si128((const __m128i *)qe_ptr_2);
3949000a7b8eSTimothy McDaniel 	rte_compiler_barrier();
3950000a7b8eSTimothy McDaniel 	__m128i v_qe_1 = _mm_loadu_si128((const __m128i *)qe_ptr_1);
3951000a7b8eSTimothy McDaniel 	rte_compiler_barrier();
3952000a7b8eSTimothy McDaniel 	__m128i v_qe_0 = _mm_loadu_si128((const __m128i *)qe_ptr_0);
3953000a7b8eSTimothy McDaniel 
3954000a7b8eSTimothy McDaniel 	/* Generate the pkt_shuffle mask;
3955000a7b8eSTimothy McDaniel 	 * - Avoids load in otherwise load-heavy section of code
3956000a7b8eSTimothy McDaniel 	 * - Moves bytes 3,7,11,15 (gen bit bytes) to LSB bytes in XMM
3957000a7b8eSTimothy McDaniel 	 */
3958000a7b8eSTimothy McDaniel 	const uint32_t stat_shuf_bytes = (15 << 24) | (11 << 16) | (7 << 8) | 3;
3959000a7b8eSTimothy McDaniel 	__m128i v_zeros = _mm_setzero_si128();
3960000a7b8eSTimothy McDaniel 	__m128i v_ffff = _mm_cmpeq_epi8(v_zeros, v_zeros);
3961000a7b8eSTimothy McDaniel 	__m128i v_stat_shuf_mask = _mm_insert_epi32(v_ffff, stat_shuf_bytes, 0);
3962000a7b8eSTimothy McDaniel 
3963000a7b8eSTimothy McDaniel 	/* Extract u32 components required from the QE
3964000a7b8eSTimothy McDaniel 	 * - QE[64 to 95 ] for metadata (qid, sched, prio, event type, ...)
3965000a7b8eSTimothy McDaniel 	 * - QE[96 to 127] for status (cq gen bit, error)
3966000a7b8eSTimothy McDaniel 	 *
3967000a7b8eSTimothy McDaniel 	 * Note that stage 1 of the unpacking is re-used for both u32 extracts
3968000a7b8eSTimothy McDaniel 	 */
3969000a7b8eSTimothy McDaniel 	__m128i v_qe_02 = _mm_unpackhi_epi32(v_qe_0, v_qe_2);
3970000a7b8eSTimothy McDaniel 	__m128i v_qe_13 = _mm_unpackhi_epi32(v_qe_1, v_qe_3);
3971000a7b8eSTimothy McDaniel 	__m128i v_qe_status = _mm_unpackhi_epi32(v_qe_02, v_qe_13);
3972000a7b8eSTimothy McDaniel 	__m128i v_qe_meta   = _mm_unpacklo_epi32(v_qe_02, v_qe_13);
3973000a7b8eSTimothy McDaniel 
3974000a7b8eSTimothy McDaniel 	/* Status byte (gen_bit, error) handling:
3975000a7b8eSTimothy McDaniel 	 * - Shuffle to lanes 0,1,2,3, clear all others
3976000a7b8eSTimothy McDaniel 	 * - Shift right by 7 for gen bit to MSB, movemask to scalar
3977000a7b8eSTimothy McDaniel 	 * - Shift right by 2 for error bit to MSB, movemask to scalar
3978000a7b8eSTimothy McDaniel 	 */
3979000a7b8eSTimothy McDaniel 	__m128i v_qe_shuffled = _mm_shuffle_epi8(v_qe_status, v_stat_shuf_mask);
3980000a7b8eSTimothy McDaniel 	__m128i v_qes_shift_gen_bit = _mm_slli_epi32(v_qe_shuffled, 7);
3981000a7b8eSTimothy McDaniel 	int32_t qe_gen_bits = _mm_movemask_epi8(v_qes_shift_gen_bit) & 0xf;
3982000a7b8eSTimothy McDaniel 
3983000a7b8eSTimothy McDaniel 	/* Expected vs Reality of QE Gen bits
3984000a7b8eSTimothy McDaniel 	 * - cq_rolling_mask provides expected bits
3985000a7b8eSTimothy McDaniel 	 * - QE loads, unpacks/shuffle and movemask provides reality
3986000a7b8eSTimothy McDaniel 	 * - XOR of the two gives bitmask of new packets
3987000a7b8eSTimothy McDaniel 	 * - POPCNT to get the number of new events
3988000a7b8eSTimothy McDaniel 	 */
3989000a7b8eSTimothy McDaniel 	uint64_t rolling = qm_port->cq_rolling_mask & 0xF;
3990000a7b8eSTimothy McDaniel 	uint64_t qe_xor_bits = (qe_gen_bits ^ rolling);
39913d4e27fdSDavid Marchand 	uint32_t count_new = rte_popcount32(qe_xor_bits);
3992000a7b8eSTimothy McDaniel 	count_new = RTE_MIN(count_new, max_events);
3993000a7b8eSTimothy McDaniel 	if (!count_new)
3994000a7b8eSTimothy McDaniel 		return 0;
3995000a7b8eSTimothy McDaniel 
3996000a7b8eSTimothy McDaniel 	/* emulate a 128 bit rotate using 2x 64-bit numbers and bit-shifts */
3997000a7b8eSTimothy McDaniel 
3998000a7b8eSTimothy McDaniel 	uint64_t m_rshift = qm_port->cq_rolling_mask >> count_new;
3999000a7b8eSTimothy McDaniel 	uint64_t m_lshift = qm_port->cq_rolling_mask << (64 - count_new);
4000000a7b8eSTimothy McDaniel 	uint64_t m2_rshift = qm_port->cq_rolling_mask_2 >> count_new;
4001000a7b8eSTimothy McDaniel 	uint64_t m2_lshift = qm_port->cq_rolling_mask_2 << (64 - count_new);
4002000a7b8eSTimothy McDaniel 
4003000a7b8eSTimothy McDaniel 	/* shifted out of m2 into MSB of m */
4004000a7b8eSTimothy McDaniel 	qm_port->cq_rolling_mask = (m_rshift | m2_lshift);
4005000a7b8eSTimothy McDaniel 
4006000a7b8eSTimothy McDaniel 	/* shifted out of m "looped back" into MSB of m2 */
4007000a7b8eSTimothy McDaniel 	qm_port->cq_rolling_mask_2 = (m2_rshift | m_lshift);
4008000a7b8eSTimothy McDaniel 
4009000a7b8eSTimothy McDaniel 	/* Prefetch the next QEs - should run as IPC instead of cycles */
4010000a7b8eSTimothy McDaniel 	rte_prefetch0(&cq_addr[(idx + 16) & qm_port->cq_depth_mask]);
4011000a7b8eSTimothy McDaniel 	rte_prefetch0(&cq_addr[(idx + 20) & qm_port->cq_depth_mask]);
4012000a7b8eSTimothy McDaniel 	rte_prefetch0(&cq_addr[(idx + 24) & qm_port->cq_depth_mask]);
4013000a7b8eSTimothy McDaniel 	rte_prefetch0(&cq_addr[(idx + 28) & qm_port->cq_depth_mask]);
4014000a7b8eSTimothy McDaniel 
4015000a7b8eSTimothy McDaniel 	/* Convert QEs from XMM regs to events and store events directly */
4016000a7b8eSTimothy McDaniel 	_process_deq_qes_vec_impl(qm_port, events, v_qe_3, v_qe_2, v_qe_1,
4017000a7b8eSTimothy McDaniel 				  v_qe_0, v_qe_meta, v_qe_status, count_new);
4018000a7b8eSTimothy McDaniel 
4019000a7b8eSTimothy McDaniel 	return count_new;
4020000a7b8eSTimothy McDaniel }
4021000a7b8eSTimothy McDaniel 
4022000a7b8eSTimothy McDaniel static inline void
4023a2e4f1f5STimothy McDaniel dlb2_inc_cq_idx(struct dlb2_port *qm_port, int cnt)
4024a2e4f1f5STimothy McDaniel {
4025a2e4f1f5STimothy McDaniel 	uint16_t idx = qm_port->cq_idx_unmasked + cnt;
4026a2e4f1f5STimothy McDaniel 
4027a2e4f1f5STimothy McDaniel 	qm_port->cq_idx_unmasked = idx;
4028a2e4f1f5STimothy McDaniel 	qm_port->cq_idx = idx & qm_port->cq_depth_mask;
4029a2e4f1f5STimothy McDaniel 	qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;
4030a2e4f1f5STimothy McDaniel }
4031a2e4f1f5STimothy McDaniel 
4032a2e4f1f5STimothy McDaniel static inline int16_t
4033a2e4f1f5STimothy McDaniel dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
4034a2e4f1f5STimothy McDaniel 		       struct dlb2_eventdev_port *ev_port,
4035a2e4f1f5STimothy McDaniel 		       struct rte_event *events,
4036a2e4f1f5STimothy McDaniel 		       uint16_t max_num,
4037a2e4f1f5STimothy McDaniel 		       uint64_t dequeue_timeout_ticks)
4038a2e4f1f5STimothy McDaniel {
4039a2e4f1f5STimothy McDaniel 	uint64_t start_ticks = 0ULL;
4040a2e4f1f5STimothy McDaniel 	struct dlb2_port *qm_port;
4041a2e4f1f5STimothy McDaniel 	int num = 0;
4042000a7b8eSTimothy McDaniel 	bool use_scalar;
4043000a7b8eSTimothy McDaniel 	uint64_t timeout;
4044a2e4f1f5STimothy McDaniel 
4045a2e4f1f5STimothy McDaniel 	qm_port = &ev_port->qm_port;
4046000a7b8eSTimothy McDaniel 	use_scalar = qm_port->use_scalar;
4047a2e4f1f5STimothy McDaniel 
4048a2e4f1f5STimothy McDaniel 	if (!dlb2->global_dequeue_wait)
4049a2e4f1f5STimothy McDaniel 		timeout = dequeue_timeout_ticks;
4050a2e4f1f5STimothy McDaniel 	else
4051a2e4f1f5STimothy McDaniel 		timeout = dlb2->global_dequeue_wait_ticks;
4052a2e4f1f5STimothy McDaniel 
40534a3b9b02SBruce Richardson 	if (timeout != 0)
4054a2e4f1f5STimothy McDaniel 		start_ticks = rte_get_timer_cycles();
4055a2e4f1f5STimothy McDaniel 
4056000a7b8eSTimothy McDaniel 	use_scalar = use_scalar || (max_num & 0x3);
4057000a7b8eSTimothy McDaniel 
4058a2e4f1f5STimothy McDaniel 	while (num < max_num) {
4059a2e4f1f5STimothy McDaniel 		struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
4060a2e4f1f5STimothy McDaniel 		int num_avail;
4061e697f35dSTimothy McDaniel 
4062000a7b8eSTimothy McDaniel 		if (use_scalar) {
4063e697f35dSTimothy McDaniel 			int n_iter = 0;
4064e697f35dSTimothy McDaniel 			uint64_t m_rshift, m_lshift, m2_rshift, m2_lshift;
4065e697f35dSTimothy McDaniel 
4066a2e4f1f5STimothy McDaniel 			num_avail = dlb2_recv_qe_sparse(qm_port, qes);
4067a2e4f1f5STimothy McDaniel 			num_avail = RTE_MIN(num_avail, max_num - num);
4068a2e4f1f5STimothy McDaniel 			dlb2_inc_cq_idx(qm_port, num_avail << 2);
4069a2e4f1f5STimothy McDaniel 			if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
4070e697f35dSTimothy McDaniel 				n_iter = dlb2_process_dequeue_four_qes(ev_port,
4071a2e4f1f5STimothy McDaniel 								qm_port,
4072a2e4f1f5STimothy McDaniel 								&events[num],
4073a2e4f1f5STimothy McDaniel 								&qes[0]);
4074a2e4f1f5STimothy McDaniel 			else if (num_avail)
4075e697f35dSTimothy McDaniel 				n_iter = dlb2_process_dequeue_qes(ev_port,
4076a2e4f1f5STimothy McDaniel 								qm_port,
4077a2e4f1f5STimothy McDaniel 								&events[num],
4078a2e4f1f5STimothy McDaniel 								&qes[0],
4079a2e4f1f5STimothy McDaniel 								num_avail);
40808eb6881cSTimothy McDaniel 			if (n_iter != 0) {
4081e697f35dSTimothy McDaniel 				num += n_iter;
4082e697f35dSTimothy McDaniel 				/* update rolling_mask for vector code support */
4083e697f35dSTimothy McDaniel 				m_rshift = qm_port->cq_rolling_mask >> n_iter;
4084e697f35dSTimothy McDaniel 				m_lshift = qm_port->cq_rolling_mask << (64 - n_iter);
4085e697f35dSTimothy McDaniel 				m2_rshift = qm_port->cq_rolling_mask_2 >> n_iter;
4086e697f35dSTimothy McDaniel 				m2_lshift = qm_port->cq_rolling_mask_2 <<
4087e697f35dSTimothy McDaniel 					(64 - n_iter);
4088e697f35dSTimothy McDaniel 				qm_port->cq_rolling_mask = (m_rshift | m2_lshift);
4089e697f35dSTimothy McDaniel 				qm_port->cq_rolling_mask_2 = (m2_rshift | m_lshift);
40908eb6881cSTimothy McDaniel 			}
4091000a7b8eSTimothy McDaniel 		} else { /* !use_scalar */
4092000a7b8eSTimothy McDaniel 			num_avail = dlb2_recv_qe_sparse_vec(qm_port,
4093000a7b8eSTimothy McDaniel 							    &events[num],
4094000a7b8eSTimothy McDaniel 							    max_num - num);
4095000a7b8eSTimothy McDaniel 			dlb2_inc_cq_idx(qm_port, num_avail << 2);
4096e697f35dSTimothy McDaniel 			num += num_avail;
4097000a7b8eSTimothy McDaniel 			DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num_avail);
4098000a7b8eSTimothy McDaniel 		}
4099000a7b8eSTimothy McDaniel 		if (!num_avail) {
4100e697f35dSTimothy McDaniel 			if ((timeout == 0) || (num > 0))
4101e697f35dSTimothy McDaniel 				/* Not waiting in any form or 1+ events recd */
4102a2e4f1f5STimothy McDaniel 				break;
4103a2e4f1f5STimothy McDaniel 			else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
4104a2e4f1f5STimothy McDaniel 						   timeout, start_ticks))
4105a2e4f1f5STimothy McDaniel 				break;
4106a2e4f1f5STimothy McDaniel 		}
4107000a7b8eSTimothy McDaniel 	}
4108a2e4f1f5STimothy McDaniel 
4109a2e4f1f5STimothy McDaniel 	qm_port->owed_tokens += num;
4110a2e4f1f5STimothy McDaniel 
4111a2e4f1f5STimothy McDaniel 	if (num) {
4112c667583dSTimothy McDaniel 		if (qm_port->token_pop_mode == AUTO_POP)
4113a2e4f1f5STimothy McDaniel 			dlb2_consume_qe_immediate(qm_port, num);
4114a2e4f1f5STimothy McDaniel 
4115a2e4f1f5STimothy McDaniel 		ev_port->outstanding_releases += num;
4116a2e4f1f5STimothy McDaniel 
4117a2e4f1f5STimothy McDaniel 		dlb2_port_credits_inc(qm_port, num);
4118a2e4f1f5STimothy McDaniel 	}
4119a2e4f1f5STimothy McDaniel 
4120a2e4f1f5STimothy McDaniel 	return num;
4121a2e4f1f5STimothy McDaniel }
4122a2e4f1f5STimothy McDaniel 
4123a2e4f1f5STimothy McDaniel static __rte_always_inline int
4124a2e4f1f5STimothy McDaniel dlb2_recv_qe(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe,
4125a2e4f1f5STimothy McDaniel 	     uint8_t *offset)
4126a2e4f1f5STimothy McDaniel {
4127a2e4f1f5STimothy McDaniel 	uint8_t xor_mask[2][4] = { {0x0F, 0x0E, 0x0C, 0x08},
4128a2e4f1f5STimothy McDaniel 				   {0x00, 0x01, 0x03, 0x07} };
4129a2e4f1f5STimothy McDaniel 	uint8_t and_mask[4] = {0x0F, 0x0E, 0x0C, 0x08};
4130a2e4f1f5STimothy McDaniel 	volatile struct dlb2_dequeue_qe *cq_addr;
4131a2e4f1f5STimothy McDaniel 	__m128i *qes = (__m128i *)qe;
4132a2e4f1f5STimothy McDaniel 	uint64_t *cache_line_base;
4133a2e4f1f5STimothy McDaniel 	uint8_t gen_bits;
4134a2e4f1f5STimothy McDaniel 
4135a2e4f1f5STimothy McDaniel 	cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
4136a2e4f1f5STimothy McDaniel 	cq_addr = &cq_addr[qm_port->cq_idx];
4137a2e4f1f5STimothy McDaniel 
4138a2e4f1f5STimothy McDaniel 	cache_line_base = (void *)(((uintptr_t)cq_addr) & ~0x3F);
4139a2e4f1f5STimothy McDaniel 	*offset = ((uintptr_t)cq_addr & 0x30) >> 4;
4140a2e4f1f5STimothy McDaniel 
4141a2e4f1f5STimothy McDaniel 	/* Load the next CQ cache line from memory. Pack these reads as tight
4142a2e4f1f5STimothy McDaniel 	 * as possible to reduce the chance that DLB invalidates the line while
4143a2e4f1f5STimothy McDaniel 	 * the CPU is reading it. Read the cache line backwards to ensure that
4144a2e4f1f5STimothy McDaniel 	 * if QE[N] (N > 0) is valid, then QEs[0:N-1] are too.
4145a2e4f1f5STimothy McDaniel 	 *
4146a2e4f1f5STimothy McDaniel 	 * (Valid QEs start at &qe[offset])
4147a2e4f1f5STimothy McDaniel 	 */
4148a2e4f1f5STimothy McDaniel 	qes[3] = _mm_load_si128((__m128i *)&cache_line_base[6]);
4149a2e4f1f5STimothy McDaniel 	qes[2] = _mm_load_si128((__m128i *)&cache_line_base[4]);
4150a2e4f1f5STimothy McDaniel 	qes[1] = _mm_load_si128((__m128i *)&cache_line_base[2]);
4151a2e4f1f5STimothy McDaniel 	qes[0] = _mm_load_si128((__m128i *)&cache_line_base[0]);
4152a2e4f1f5STimothy McDaniel 
4153a2e4f1f5STimothy McDaniel 	/* Evict the cache line ASAP */
4154a2e4f1f5STimothy McDaniel 	rte_cldemote(cache_line_base);
4155a2e4f1f5STimothy McDaniel 
4156a2e4f1f5STimothy McDaniel 	/* Extract and combine the gen bits */
4157a2e4f1f5STimothy McDaniel 	gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
4158a2e4f1f5STimothy McDaniel 		   ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
4159a2e4f1f5STimothy McDaniel 		   ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
4160a2e4f1f5STimothy McDaniel 		   ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
4161a2e4f1f5STimothy McDaniel 
4162a2e4f1f5STimothy McDaniel 	/* XOR the combined bits such that a 1 represents a valid QE */
4163a2e4f1f5STimothy McDaniel 	gen_bits ^= xor_mask[qm_port->gen_bit][*offset];
4164a2e4f1f5STimothy McDaniel 
4165a2e4f1f5STimothy McDaniel 	/* Mask off gen bits we don't care about */
4166a2e4f1f5STimothy McDaniel 	gen_bits &= and_mask[*offset];
4167a2e4f1f5STimothy McDaniel 
41683d4e27fdSDavid Marchand 	return rte_popcount32(gen_bits);
4169a2e4f1f5STimothy McDaniel }
4170a2e4f1f5STimothy McDaniel 
4171a2e4f1f5STimothy McDaniel static inline int16_t
4172a2e4f1f5STimothy McDaniel dlb2_hw_dequeue(struct dlb2_eventdev *dlb2,
4173a2e4f1f5STimothy McDaniel 		struct dlb2_eventdev_port *ev_port,
4174a2e4f1f5STimothy McDaniel 		struct rte_event *events,
4175a2e4f1f5STimothy McDaniel 		uint16_t max_num,
4176a2e4f1f5STimothy McDaniel 		uint64_t dequeue_timeout_ticks)
4177a2e4f1f5STimothy McDaniel {
4178a2e4f1f5STimothy McDaniel 	uint64_t timeout;
4179a2e4f1f5STimothy McDaniel 	uint64_t start_ticks = 0ULL;
4180a2e4f1f5STimothy McDaniel 	struct dlb2_port *qm_port;
4181a2e4f1f5STimothy McDaniel 	int num = 0;
4182a2e4f1f5STimothy McDaniel 
4183a2e4f1f5STimothy McDaniel 	qm_port = &ev_port->qm_port;
4184a2e4f1f5STimothy McDaniel 
4185a2e4f1f5STimothy McDaniel 	/* We have a special implementation for waiting. Wait can be:
4186a2e4f1f5STimothy McDaniel 	 * 1) no waiting at all
4187a2e4f1f5STimothy McDaniel 	 * 2) busy poll only
4188a2e4f1f5STimothy McDaniel 	 * 3) wait for interrupt. If wakeup and poll time
4189a2e4f1f5STimothy McDaniel 	 * has expired, then return to caller
4190a2e4f1f5STimothy McDaniel 	 * 4) umonitor/umwait repeatedly up to poll time
4191a2e4f1f5STimothy McDaniel 	 */
4192a2e4f1f5STimothy McDaniel 
4193a2e4f1f5STimothy McDaniel 	/* If configured for per dequeue wait, then use wait value provided
4194a2e4f1f5STimothy McDaniel 	 * to this API. Otherwise we must use the global
4195a2e4f1f5STimothy McDaniel 	 * value from eventdev config time.
4196a2e4f1f5STimothy McDaniel 	 */
4197a2e4f1f5STimothy McDaniel 	if (!dlb2->global_dequeue_wait)
4198a2e4f1f5STimothy McDaniel 		timeout = dequeue_timeout_ticks;
4199a2e4f1f5STimothy McDaniel 	else
4200a2e4f1f5STimothy McDaniel 		timeout = dlb2->global_dequeue_wait_ticks;
4201a2e4f1f5STimothy McDaniel 
42024a3b9b02SBruce Richardson 	if (timeout != 0)
4203a2e4f1f5STimothy McDaniel 		start_ticks = rte_get_timer_cycles();
4204a2e4f1f5STimothy McDaniel 
4205a2e4f1f5STimothy McDaniel 	while (num < max_num) {
4206a2e4f1f5STimothy McDaniel 		struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
4207a2e4f1f5STimothy McDaniel 		uint8_t offset;
4208a2e4f1f5STimothy McDaniel 		int num_avail;
4209a2e4f1f5STimothy McDaniel 
4210a2e4f1f5STimothy McDaniel 		/* Copy up to 4 QEs from the current cache line into qes */
4211a2e4f1f5STimothy McDaniel 		num_avail = dlb2_recv_qe(qm_port, qes, &offset);
4212a2e4f1f5STimothy McDaniel 
4213a2e4f1f5STimothy McDaniel 		/* But don't process more than the user requested */
4214a2e4f1f5STimothy McDaniel 		num_avail = RTE_MIN(num_avail, max_num - num);
4215a2e4f1f5STimothy McDaniel 
4216a2e4f1f5STimothy McDaniel 		dlb2_inc_cq_idx(qm_port, num_avail);
4217a2e4f1f5STimothy McDaniel 
4218a2e4f1f5STimothy McDaniel 		if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
4219a2e4f1f5STimothy McDaniel 			num += dlb2_process_dequeue_four_qes(ev_port,
4220a2e4f1f5STimothy McDaniel 							     qm_port,
4221a2e4f1f5STimothy McDaniel 							     &events[num],
4222a2e4f1f5STimothy McDaniel 							     &qes[offset]);
4223a2e4f1f5STimothy McDaniel 		else if (num_avail)
4224a2e4f1f5STimothy McDaniel 			num += dlb2_process_dequeue_qes(ev_port,
4225a2e4f1f5STimothy McDaniel 							qm_port,
4226a2e4f1f5STimothy McDaniel 							&events[num],
4227a2e4f1f5STimothy McDaniel 							&qes[offset],
4228a2e4f1f5STimothy McDaniel 							num_avail);
4229a2e4f1f5STimothy McDaniel 		else if ((timeout == 0) || (num > 0))
4230a2e4f1f5STimothy McDaniel 			/* Not waiting in any form, or 1+ events received? */
4231a2e4f1f5STimothy McDaniel 			break;
4232a2e4f1f5STimothy McDaniel 		else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
4233a2e4f1f5STimothy McDaniel 					   timeout, start_ticks))
4234a2e4f1f5STimothy McDaniel 			break;
4235a2e4f1f5STimothy McDaniel 	}
4236a2e4f1f5STimothy McDaniel 
4237a2e4f1f5STimothy McDaniel 	qm_port->owed_tokens += num;
4238a2e4f1f5STimothy McDaniel 
4239a2e4f1f5STimothy McDaniel 	if (num) {
4240c667583dSTimothy McDaniel 		if (qm_port->token_pop_mode == AUTO_POP)
4241a2e4f1f5STimothy McDaniel 			dlb2_consume_qe_immediate(qm_port, num);
4242a2e4f1f5STimothy McDaniel 
4243a2e4f1f5STimothy McDaniel 		ev_port->outstanding_releases += num;
4244a2e4f1f5STimothy McDaniel 
4245a2e4f1f5STimothy McDaniel 		dlb2_port_credits_inc(qm_port, num);
4246a2e4f1f5STimothy McDaniel 	}
4247a2e4f1f5STimothy McDaniel 
4248a2e4f1f5STimothy McDaniel 	return num;
4249a2e4f1f5STimothy McDaniel }
4250a2e4f1f5STimothy McDaniel 
4251a2e4f1f5STimothy McDaniel static uint16_t
4252a2e4f1f5STimothy McDaniel dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
4253a2e4f1f5STimothy McDaniel 			 uint64_t wait)
4254a2e4f1f5STimothy McDaniel {
4255a2e4f1f5STimothy McDaniel 	struct dlb2_eventdev_port *ev_port = event_port;
4256c667583dSTimothy McDaniel 	struct dlb2_port *qm_port = &ev_port->qm_port;
4257a2e4f1f5STimothy McDaniel 	struct dlb2_eventdev *dlb2 = ev_port->dlb2;
42586e2e98d6SAbdullah Sevincer 	struct dlb2_reorder *order = qm_port->order;
4259a2e4f1f5STimothy McDaniel 	uint16_t cnt;
4260a2e4f1f5STimothy McDaniel 
4261a2e4f1f5STimothy McDaniel 	RTE_ASSERT(ev_port->setup_done);
4262a2e4f1f5STimothy McDaniel 	RTE_ASSERT(ev != NULL);
4263a2e4f1f5STimothy McDaniel 
4264a2e4f1f5STimothy McDaniel 	if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
4265a2e4f1f5STimothy McDaniel 		uint16_t out_rels = ev_port->outstanding_releases;
42666e2e98d6SAbdullah Sevincer 		if (qm_port->reorder_en) {
42676e2e98d6SAbdullah Sevincer 			/* for directed, no-op command-byte = 0, but set dsi field */
42686e2e98d6SAbdullah Sevincer 			/* for load-balanced, set COMP */
42696e2e98d6SAbdullah Sevincer 			uint64_t release_u64 =
42706e2e98d6SAbdullah Sevincer 			    qm_port->is_directed ? 0xFF : (uint64_t)DLB2_COMP_CMD_BYTE << 56;
4271a2e4f1f5STimothy McDaniel 
42726e2e98d6SAbdullah Sevincer 			for (uint8_t i = order->next_to_enqueue; i != qm_port->reorder_id; i++)
42736e2e98d6SAbdullah Sevincer 				if (order->enq_reorder[i].u64[1] == 0)
42746e2e98d6SAbdullah Sevincer 					order->enq_reorder[i].u64[1] = release_u64;
42756e2e98d6SAbdullah Sevincer 
42766e2e98d6SAbdullah Sevincer 			__dlb2_event_enqueue_burst_reorder(event_port, NULL, 0,
42776e2e98d6SAbdullah Sevincer 						   qm_port->token_pop_mode == DELAYED_POP);
42786e2e98d6SAbdullah Sevincer 		} else {
427907d55c41STimothy McDaniel 			dlb2_event_release(dlb2, ev_port->id, out_rels);
42806e2e98d6SAbdullah Sevincer 		}
4281a2e4f1f5STimothy McDaniel 
4282a2e4f1f5STimothy McDaniel 		DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
4283a2e4f1f5STimothy McDaniel 	}
4284a2e4f1f5STimothy McDaniel 
4285c667583dSTimothy McDaniel 	if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
4286c667583dSTimothy McDaniel 		dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
4287c667583dSTimothy McDaniel 
4288a2e4f1f5STimothy McDaniel 	cnt = dlb2_hw_dequeue(dlb2, ev_port, ev, num, wait);
4289a2e4f1f5STimothy McDaniel 
4290a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
4291a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
4292a2e4f1f5STimothy McDaniel 
4293a2e4f1f5STimothy McDaniel 	return cnt;
4294a2e4f1f5STimothy McDaniel }
4295a2e4f1f5STimothy McDaniel 
4296a2e4f1f5STimothy McDaniel static uint16_t
4297a2e4f1f5STimothy McDaniel dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
4298a2e4f1f5STimothy McDaniel 				uint16_t num, uint64_t wait)
4299a2e4f1f5STimothy McDaniel {
4300a2e4f1f5STimothy McDaniel 	struct dlb2_eventdev_port *ev_port = event_port;
4301c667583dSTimothy McDaniel 	struct dlb2_port *qm_port = &ev_port->qm_port;
4302a2e4f1f5STimothy McDaniel 	struct dlb2_eventdev *dlb2 = ev_port->dlb2;
43036e2e98d6SAbdullah Sevincer 	struct dlb2_reorder *order = qm_port->order;
4304a2e4f1f5STimothy McDaniel 	uint16_t cnt;
4305a2e4f1f5STimothy McDaniel 
4306a2e4f1f5STimothy McDaniel 	RTE_ASSERT(ev_port->setup_done);
4307a2e4f1f5STimothy McDaniel 	RTE_ASSERT(ev != NULL);
4308a2e4f1f5STimothy McDaniel 
4309a2e4f1f5STimothy McDaniel 	if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
4310a2e4f1f5STimothy McDaniel 		uint16_t out_rels = ev_port->outstanding_releases;
43116e2e98d6SAbdullah Sevincer 		if (qm_port->reorder_en) {
43126e2e98d6SAbdullah Sevincer 			struct rte_event release_burst[8];
43136e2e98d6SAbdullah Sevincer 			int num_releases = 0;
4314a2e4f1f5STimothy McDaniel 
43156e2e98d6SAbdullah Sevincer 			/* go through reorder buffer looking for missing releases. */
43166e2e98d6SAbdullah Sevincer 			for (uint8_t i = order->next_to_enqueue; i != qm_port->reorder_id; i++) {
43176e2e98d6SAbdullah Sevincer 				if (order->enq_reorder[i].u64[1] == 0) {
43186e2e98d6SAbdullah Sevincer 					release_burst[num_releases++] = (struct rte_event){
43196e2e98d6SAbdullah Sevincer 						.op = RTE_EVENT_OP_RELEASE,
43206e2e98d6SAbdullah Sevincer 							.impl_opaque = i,
43216e2e98d6SAbdullah Sevincer 					};
43226e2e98d6SAbdullah Sevincer 
43236e2e98d6SAbdullah Sevincer 					if (num_releases == RTE_DIM(release_burst)) {
43246e2e98d6SAbdullah Sevincer 						__dlb2_event_enqueue_burst_reorder(event_port,
43256e2e98d6SAbdullah Sevincer 							release_burst, RTE_DIM(release_burst),
43266e2e98d6SAbdullah Sevincer 							qm_port->token_pop_mode == DELAYED_POP);
43276e2e98d6SAbdullah Sevincer 						num_releases = 0;
43286e2e98d6SAbdullah Sevincer 					}
43296e2e98d6SAbdullah Sevincer 				}
43306e2e98d6SAbdullah Sevincer 			}
43316e2e98d6SAbdullah Sevincer 
43326e2e98d6SAbdullah Sevincer 			if (num_releases)
43336e2e98d6SAbdullah Sevincer 				__dlb2_event_enqueue_burst_reorder(event_port, release_burst
43346e2e98d6SAbdullah Sevincer 					, num_releases, qm_port->token_pop_mode == DELAYED_POP);
43356e2e98d6SAbdullah Sevincer 		} else {
433607d55c41STimothy McDaniel 			dlb2_event_release(dlb2, ev_port->id, out_rels);
43376e2e98d6SAbdullah Sevincer 		}
4338a2e4f1f5STimothy McDaniel 
43396e2e98d6SAbdullah Sevincer 		RTE_ASSERT(ev_port->outstanding_releases == 0);
4340a2e4f1f5STimothy McDaniel 		DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
4341a2e4f1f5STimothy McDaniel 	}
4342a2e4f1f5STimothy McDaniel 
4343c667583dSTimothy McDaniel 	if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
4344c667583dSTimothy McDaniel 		dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
4345c667583dSTimothy McDaniel 
4346a2e4f1f5STimothy McDaniel 	cnt = dlb2_hw_dequeue_sparse(dlb2, ev_port, ev, num, wait);
4347a2e4f1f5STimothy McDaniel 
4348a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
4349a2e4f1f5STimothy McDaniel 	DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
4350a2e4f1f5STimothy McDaniel 	return cnt;
4351a2e4f1f5STimothy McDaniel }
4352a2e4f1f5STimothy McDaniel 
435399f66f33STimothy McDaniel static void
435418991548STimothy McDaniel dlb2_flush_port(struct rte_eventdev *dev, int port_id)
435518991548STimothy McDaniel {
435618991548STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
43576e2e98d6SAbdullah Sevincer 	struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[port_id];
43586e2e98d6SAbdullah Sevincer 	struct dlb2_reorder *order = ev_port->qm_port.order;
435918991548STimothy McDaniel 	eventdev_stop_flush_t flush;
436018991548STimothy McDaniel 	struct rte_event ev;
436118991548STimothy McDaniel 	uint8_t dev_id;
436218991548STimothy McDaniel 	void *arg;
436318991548STimothy McDaniel 	int i;
436418991548STimothy McDaniel 
436518991548STimothy McDaniel 	flush = dev->dev_ops->dev_stop_flush;
436618991548STimothy McDaniel 	dev_id = dev->data->dev_id;
436718991548STimothy McDaniel 	arg = dev->data->dev_stop_flush_arg;
436818991548STimothy McDaniel 
436918991548STimothy McDaniel 	while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) {
437018991548STimothy McDaniel 		if (flush)
437118991548STimothy McDaniel 			flush(dev_id, ev, arg);
437218991548STimothy McDaniel 
437318991548STimothy McDaniel 		if (dlb2->ev_ports[port_id].qm_port.is_directed)
437418991548STimothy McDaniel 			continue;
437518991548STimothy McDaniel 
437618991548STimothy McDaniel 		ev.op = RTE_EVENT_OP_RELEASE;
437718991548STimothy McDaniel 
437818991548STimothy McDaniel 		rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
437918991548STimothy McDaniel 	}
438018991548STimothy McDaniel 
438118991548STimothy McDaniel 	/* Enqueue any additional outstanding releases */
438218991548STimothy McDaniel 	ev.op = RTE_EVENT_OP_RELEASE;
438318991548STimothy McDaniel 
43846e2e98d6SAbdullah Sevincer 	for (i = dlb2->ev_ports[port_id].outstanding_releases; i > 0; i--) {
43856e2e98d6SAbdullah Sevincer 		ev.impl_opaque = order ? order->next_to_enqueue : 0;
438618991548STimothy McDaniel 		rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
438718991548STimothy McDaniel 	}
43886e2e98d6SAbdullah Sevincer }
438918991548STimothy McDaniel 
439018991548STimothy McDaniel static uint32_t
439118991548STimothy McDaniel dlb2_get_ldb_queue_depth(struct dlb2_eventdev *dlb2,
439218991548STimothy McDaniel 			 struct dlb2_eventdev_queue *queue)
439318991548STimothy McDaniel {
439418991548STimothy McDaniel 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
439518991548STimothy McDaniel 	struct dlb2_get_ldb_queue_depth_args cfg;
439618991548STimothy McDaniel 	int ret;
439718991548STimothy McDaniel 
439818991548STimothy McDaniel 	cfg.queue_id = queue->qm_queue.id;
439918991548STimothy McDaniel 
440018991548STimothy McDaniel 	ret = dlb2_iface_get_ldb_queue_depth(handle, &cfg);
440118991548STimothy McDaniel 	if (ret < 0) {
4402f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)",
440318991548STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
440418991548STimothy McDaniel 		return ret;
440518991548STimothy McDaniel 	}
440618991548STimothy McDaniel 
440718991548STimothy McDaniel 	return cfg.response.id;
440818991548STimothy McDaniel }
440918991548STimothy McDaniel 
441018991548STimothy McDaniel static uint32_t
441118991548STimothy McDaniel dlb2_get_dir_queue_depth(struct dlb2_eventdev *dlb2,
441218991548STimothy McDaniel 			 struct dlb2_eventdev_queue *queue)
441318991548STimothy McDaniel {
441418991548STimothy McDaniel 	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
441518991548STimothy McDaniel 	struct dlb2_get_dir_queue_depth_args cfg;
441618991548STimothy McDaniel 	int ret;
441718991548STimothy McDaniel 
441818991548STimothy McDaniel 	cfg.queue_id = queue->qm_queue.id;
441918991548STimothy McDaniel 
442018991548STimothy McDaniel 	ret = dlb2_iface_get_dir_queue_depth(handle, &cfg);
442118991548STimothy McDaniel 	if (ret < 0) {
4422f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)",
442318991548STimothy McDaniel 			     ret, dlb2_error_strings[cfg.response.status]);
442418991548STimothy McDaniel 		return ret;
442518991548STimothy McDaniel 	}
442618991548STimothy McDaniel 
442718991548STimothy McDaniel 	return cfg.response.id;
442818991548STimothy McDaniel }
442918991548STimothy McDaniel 
443018991548STimothy McDaniel uint32_t
443118991548STimothy McDaniel dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
443218991548STimothy McDaniel 		     struct dlb2_eventdev_queue *queue)
443318991548STimothy McDaniel {
443418991548STimothy McDaniel 	if (queue->qm_queue.is_directed)
443518991548STimothy McDaniel 		return dlb2_get_dir_queue_depth(dlb2, queue);
443618991548STimothy McDaniel 	else
443718991548STimothy McDaniel 		return dlb2_get_ldb_queue_depth(dlb2, queue);
443818991548STimothy McDaniel }
443918991548STimothy McDaniel 
444018991548STimothy McDaniel static bool
444118991548STimothy McDaniel dlb2_queue_is_empty(struct dlb2_eventdev *dlb2,
444218991548STimothy McDaniel 		    struct dlb2_eventdev_queue *queue)
444318991548STimothy McDaniel {
444418991548STimothy McDaniel 	return dlb2_get_queue_depth(dlb2, queue) == 0;
444518991548STimothy McDaniel }
444618991548STimothy McDaniel 
444718991548STimothy McDaniel static bool
444818991548STimothy McDaniel dlb2_linked_queues_empty(struct dlb2_eventdev *dlb2)
444918991548STimothy McDaniel {
445018991548STimothy McDaniel 	int i;
445118991548STimothy McDaniel 
445218991548STimothy McDaniel 	for (i = 0; i < dlb2->num_queues; i++) {
445318991548STimothy McDaniel 		if (dlb2->ev_queues[i].num_links == 0)
445418991548STimothy McDaniel 			continue;
445518991548STimothy McDaniel 		if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
445618991548STimothy McDaniel 			return false;
445718991548STimothy McDaniel 	}
445818991548STimothy McDaniel 
445918991548STimothy McDaniel 	return true;
446018991548STimothy McDaniel }
446118991548STimothy McDaniel 
446218991548STimothy McDaniel static bool
446318991548STimothy McDaniel dlb2_queues_empty(struct dlb2_eventdev *dlb2)
446418991548STimothy McDaniel {
446518991548STimothy McDaniel 	int i;
446618991548STimothy McDaniel 
446718991548STimothy McDaniel 	for (i = 0; i < dlb2->num_queues; i++) {
446818991548STimothy McDaniel 		if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
446918991548STimothy McDaniel 			return false;
447018991548STimothy McDaniel 	}
447118991548STimothy McDaniel 
447218991548STimothy McDaniel 	return true;
447318991548STimothy McDaniel }
447418991548STimothy McDaniel 
447518991548STimothy McDaniel static void
447618991548STimothy McDaniel dlb2_drain(struct rte_eventdev *dev)
447718991548STimothy McDaniel {
447818991548STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
447918991548STimothy McDaniel 	struct dlb2_eventdev_port *ev_port = NULL;
448018991548STimothy McDaniel 	uint8_t dev_id;
448118991548STimothy McDaniel 	int i;
448218991548STimothy McDaniel 
448318991548STimothy McDaniel 	dev_id = dev->data->dev_id;
448418991548STimothy McDaniel 
448518991548STimothy McDaniel 	while (!dlb2_linked_queues_empty(dlb2)) {
448618991548STimothy McDaniel 		/* Flush all the ev_ports, which will drain all their connected
448718991548STimothy McDaniel 		 * queues.
448818991548STimothy McDaniel 		 */
448918991548STimothy McDaniel 		for (i = 0; i < dlb2->num_ports; i++)
449018991548STimothy McDaniel 			dlb2_flush_port(dev, i);
449118991548STimothy McDaniel 	}
449218991548STimothy McDaniel 
449318991548STimothy McDaniel 	/* The queues are empty, but there may be events left in the ports. */
449418991548STimothy McDaniel 	for (i = 0; i < dlb2->num_ports; i++)
449518991548STimothy McDaniel 		dlb2_flush_port(dev, i);
449618991548STimothy McDaniel 
449718991548STimothy McDaniel 	/* If the domain's queues are empty, we're done. */
449818991548STimothy McDaniel 	if (dlb2_queues_empty(dlb2))
449918991548STimothy McDaniel 		return;
450018991548STimothy McDaniel 
450118991548STimothy McDaniel 	/* Else, there must be at least one unlinked load-balanced queue.
450218991548STimothy McDaniel 	 * Select a load-balanced port with which to drain the unlinked
450318991548STimothy McDaniel 	 * queue(s).
450418991548STimothy McDaniel 	 */
450518991548STimothy McDaniel 	for (i = 0; i < dlb2->num_ports; i++) {
450618991548STimothy McDaniel 		ev_port = &dlb2->ev_ports[i];
450718991548STimothy McDaniel 
450818991548STimothy McDaniel 		if (!ev_port->qm_port.is_directed)
450918991548STimothy McDaniel 			break;
451018991548STimothy McDaniel 	}
451118991548STimothy McDaniel 
451218991548STimothy McDaniel 	if (i == dlb2->num_ports) {
4513f665790aSDavid Marchand 		DLB2_LOG_ERR("internal error: no LDB ev_ports");
451418991548STimothy McDaniel 		return;
451518991548STimothy McDaniel 	}
451618991548STimothy McDaniel 
451718991548STimothy McDaniel 	rte_errno = 0;
451818991548STimothy McDaniel 	rte_event_port_unlink(dev_id, ev_port->id, NULL, 0);
451918991548STimothy McDaniel 
452018991548STimothy McDaniel 	if (rte_errno) {
4521f665790aSDavid Marchand 		DLB2_LOG_ERR("internal error: failed to unlink ev_port %d",
452218991548STimothy McDaniel 			     ev_port->id);
452318991548STimothy McDaniel 		return;
452418991548STimothy McDaniel 	}
452518991548STimothy McDaniel 
452618991548STimothy McDaniel 	for (i = 0; i < dlb2->num_queues; i++) {
452718991548STimothy McDaniel 		uint8_t qid, prio;
452818991548STimothy McDaniel 		int ret;
452918991548STimothy McDaniel 
453018991548STimothy McDaniel 		if (dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
453118991548STimothy McDaniel 			continue;
453218991548STimothy McDaniel 
453318991548STimothy McDaniel 		qid = i;
453418991548STimothy McDaniel 		prio = 0;
453518991548STimothy McDaniel 
453618991548STimothy McDaniel 		/* Link the ev_port to the queue */
453718991548STimothy McDaniel 		ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);
453818991548STimothy McDaniel 		if (ret != 1) {
4539f665790aSDavid Marchand 			DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d",
454018991548STimothy McDaniel 				     ev_port->id, qid);
454118991548STimothy McDaniel 			return;
454218991548STimothy McDaniel 		}
454318991548STimothy McDaniel 
454418991548STimothy McDaniel 		/* Flush the queue */
454518991548STimothy McDaniel 		while (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
454618991548STimothy McDaniel 			dlb2_flush_port(dev, ev_port->id);
454718991548STimothy McDaniel 
454818991548STimothy McDaniel 		/* Drain any extant events in the ev_port. */
454918991548STimothy McDaniel 		dlb2_flush_port(dev, ev_port->id);
455018991548STimothy McDaniel 
455118991548STimothy McDaniel 		/* Unlink the ev_port from the queue */
455218991548STimothy McDaniel 		ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);
455318991548STimothy McDaniel 		if (ret != 1) {
4554f665790aSDavid Marchand 			DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d",
455518991548STimothy McDaniel 				     ev_port->id, qid);
455618991548STimothy McDaniel 			return;
455718991548STimothy McDaniel 		}
455818991548STimothy McDaniel 	}
455918991548STimothy McDaniel }
456018991548STimothy McDaniel 
456118991548STimothy McDaniel static void
456218991548STimothy McDaniel dlb2_eventdev_stop(struct rte_eventdev *dev)
456318991548STimothy McDaniel {
456418991548STimothy McDaniel 	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
456518991548STimothy McDaniel 
456618991548STimothy McDaniel 	rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
456718991548STimothy McDaniel 
456818991548STimothy McDaniel 	if (dlb2->run_state == DLB2_RUN_STATE_STOPPED) {
4569e99981afSDavid Marchand 		DLB2_LOG_LINE_DBG("Internal error: already stopped");
457018991548STimothy McDaniel 		rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
457118991548STimothy McDaniel 		return;
457218991548STimothy McDaniel 	} else if (dlb2->run_state != DLB2_RUN_STATE_STARTED) {
4573f665790aSDavid Marchand 		DLB2_LOG_ERR("Internal error: bad state %d for dev_stop",
457418991548STimothy McDaniel 			     (int)dlb2->run_state);
457518991548STimothy McDaniel 		rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
457618991548STimothy McDaniel 		return;
457718991548STimothy McDaniel 	}
457818991548STimothy McDaniel 
457918991548STimothy McDaniel 	dlb2->run_state = DLB2_RUN_STATE_STOPPING;
458018991548STimothy McDaniel 
458118991548STimothy McDaniel 	rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
458218991548STimothy McDaniel 
458318991548STimothy McDaniel 	dlb2_drain(dev);
458418991548STimothy McDaniel 
458518991548STimothy McDaniel 	dlb2->run_state = DLB2_RUN_STATE_STOPPED;
458618991548STimothy McDaniel }
458718991548STimothy McDaniel 
458818991548STimothy McDaniel static int
458918991548STimothy McDaniel dlb2_eventdev_close(struct rte_eventdev *dev)
459018991548STimothy McDaniel {
459118991548STimothy McDaniel 	dlb2_hw_reset_sched_domain(dev, false);
459218991548STimothy McDaniel 
459318991548STimothy McDaniel 	return 0;
459418991548STimothy McDaniel }
459518991548STimothy McDaniel 
459618991548STimothy McDaniel static void
459727328fedSTimothy McDaniel dlb2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t id)
459827328fedSTimothy McDaniel {
459927328fedSTimothy McDaniel 	RTE_SET_USED(dev);
460027328fedSTimothy McDaniel 	RTE_SET_USED(id);
460127328fedSTimothy McDaniel 
460227328fedSTimothy McDaniel 	/* This function intentionally left blank. */
460327328fedSTimothy McDaniel }
460427328fedSTimothy McDaniel 
460527328fedSTimothy McDaniel static void
460627328fedSTimothy McDaniel dlb2_eventdev_port_release(void *port)
460727328fedSTimothy McDaniel {
460827328fedSTimothy McDaniel 	struct dlb2_eventdev_port *ev_port = port;
460927328fedSTimothy McDaniel 	struct dlb2_port *qm_port;
461027328fedSTimothy McDaniel 
461127328fedSTimothy McDaniel 	if (ev_port) {
461227328fedSTimothy McDaniel 		qm_port = &ev_port->qm_port;
461327328fedSTimothy McDaniel 		if (qm_port->config_state == DLB2_CONFIGURED)
461427328fedSTimothy McDaniel 			dlb2_free_qe_mem(qm_port);
461527328fedSTimothy McDaniel 	}
461627328fedSTimothy McDaniel }
461727328fedSTimothy McDaniel 
4618c105e9b3STimothy McDaniel static int
4619c105e9b3STimothy McDaniel dlb2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
4620c105e9b3STimothy McDaniel 			    uint64_t *timeout_ticks)
4621c105e9b3STimothy McDaniel {
4622c105e9b3STimothy McDaniel 	RTE_SET_USED(dev);
4623c105e9b3STimothy McDaniel 	uint64_t cycles_per_ns = rte_get_timer_hz() / 1E9;
4624c105e9b3STimothy McDaniel 
4625c105e9b3STimothy McDaniel 	*timeout_ticks = ns * cycles_per_ns;
4626c105e9b3STimothy McDaniel 
4627c105e9b3STimothy McDaniel 	return 0;
4628c105e9b3STimothy McDaniel }
4629c105e9b3STimothy McDaniel 
463027328fedSTimothy McDaniel static void
4631e7c9971aSTimothy McDaniel dlb2_entry_points_init(struct rte_eventdev *dev)
4632e7c9971aSTimothy McDaniel {
4633a2e4f1f5STimothy McDaniel 	struct dlb2_eventdev *dlb2;
4634a2e4f1f5STimothy McDaniel 
4635e88753dcSTimothy McDaniel 	/* Expose PMD's eventdev interface */
463623d06e37SPavan Nikhilesh 	static struct eventdev_ops dlb2_eventdev_entry_ops = {
4637f3cad285STimothy McDaniel 		.dev_infos_get    = dlb2_eventdev_info_get,
4638f3cad285STimothy McDaniel 		.dev_configure    = dlb2_eventdev_configure,
463959e1a966STimothy McDaniel 		.dev_start        = dlb2_eventdev_start,
464018991548STimothy McDaniel 		.dev_stop         = dlb2_eventdev_stop,
464118991548STimothy McDaniel 		.dev_close        = dlb2_eventdev_close,
464299f66f33STimothy McDaniel 		.queue_def_conf   = dlb2_eventdev_queue_default_conf_get,
46437e668e57STimothy McDaniel 		.queue_setup      = dlb2_eventdev_queue_setup,
464427328fedSTimothy McDaniel 		.queue_release    = dlb2_eventdev_queue_release,
464599f66f33STimothy McDaniel 		.port_def_conf    = dlb2_eventdev_port_default_conf_get,
46463a6d0c04STimothy McDaniel 		.port_setup       = dlb2_eventdev_port_setup,
464727328fedSTimothy McDaniel 		.port_release     = dlb2_eventdev_port_release,
46481acd82c0STimothy McDaniel 		.port_link        = dlb2_eventdev_port_link,
4649a29248b5STimothy McDaniel 		.port_unlink      = dlb2_eventdev_port_unlink,
4650a29248b5STimothy McDaniel 		.port_unlinks_in_progress =
4651a29248b5STimothy McDaniel 				    dlb2_eventdev_port_unlinks_in_progress,
4652c105e9b3STimothy McDaniel 		.timeout_ticks    = dlb2_eventdev_timeout_ticks,
4653e88753dcSTimothy McDaniel 		.dump             = dlb2_eventdev_dump,
4654e88753dcSTimothy McDaniel 		.xstats_get       = dlb2_eventdev_xstats_get,
4655e88753dcSTimothy McDaniel 		.xstats_get_names = dlb2_eventdev_xstats_get_names,
4656e88753dcSTimothy McDaniel 		.xstats_get_by_name = dlb2_eventdev_xstats_get_by_name,
4657e88753dcSTimothy McDaniel 		.xstats_reset	    = dlb2_eventdev_xstats_reset,
46586f1b8288STimothy McDaniel 		.dev_selftest     = test_dlb2_eventdev,
4659e88753dcSTimothy McDaniel 	};
4660e7c9971aSTimothy McDaniel 
4661f7cc194bSTimothy McDaniel 	/* Expose PMD's eventdev interface */
4662f7cc194bSTimothy McDaniel 
4663e88753dcSTimothy McDaniel 	dev->dev_ops = &dlb2_eventdev_entry_ops;
4664f7cc194bSTimothy McDaniel 	dev->enqueue_burst = dlb2_event_enqueue_burst;
4665f7cc194bSTimothy McDaniel 	dev->enqueue_new_burst = dlb2_event_enqueue_new_burst;
4666f7cc194bSTimothy McDaniel 	dev->enqueue_forward_burst = dlb2_event_enqueue_forward_burst;
4667a2e4f1f5STimothy McDaniel 
4668a2e4f1f5STimothy McDaniel 	dlb2 = dev->data->dev_private;
4669*e20e2148SMattias Rönnblom 	if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
4670a2e4f1f5STimothy McDaniel 		dev->dequeue_burst = dlb2_event_dequeue_burst_sparse;
4671*e20e2148SMattias Rönnblom 	else
4672a2e4f1f5STimothy McDaniel 		dev->dequeue_burst = dlb2_event_dequeue_burst;
4673a2e4f1f5STimothy McDaniel }
4674e7c9971aSTimothy McDaniel 
46755433956dSTimothy McDaniel int
46765433956dSTimothy McDaniel dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
46775433956dSTimothy McDaniel 			    const char *name,
46785433956dSTimothy McDaniel 			    struct dlb2_devargs *dlb2_args)
46795433956dSTimothy McDaniel {
4680e7c9971aSTimothy McDaniel 	struct dlb2_eventdev *dlb2;
4681c667583dSTimothy McDaniel 	int err, i;
4682e7c9971aSTimothy McDaniel 
4683e7c9971aSTimothy McDaniel 	dlb2 = dev->data->dev_private;
4684e7c9971aSTimothy McDaniel 
4685e7c9971aSTimothy McDaniel 	dlb2->event_dev = dev; /* backlink */
4686e7c9971aSTimothy McDaniel 
4687e7c9971aSTimothy McDaniel 	evdev_dlb2_default_info.driver_name = name;
4688e7c9971aSTimothy McDaniel 
4689e7c9971aSTimothy McDaniel 	dlb2->max_num_events_override = dlb2_args->max_num_events;
4690e7c9971aSTimothy McDaniel 	dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override;
46917be66a3bSTimothy McDaniel 	dlb2->poll_interval = dlb2_args->poll_interval;
46927be66a3bSTimothy McDaniel 	dlb2->sw_credit_quanta = dlb2_args->sw_credit_quanta;
4693e4869c0bSPravin Pathak 	dlb2->hw_credit_quanta = dlb2_args->hw_credit_quanta;
46947be66a3bSTimothy McDaniel 	dlb2->default_depth_thresh = dlb2_args->default_depth_thresh;
4695fcc5489cSTimothy McDaniel 	dlb2->vector_opts_enabled = dlb2_args->vector_opts_enabled;
4696b977a659SAbdullah Sevincer 	dlb2->enable_cq_weight = dlb2_args->enable_cq_weight;
4697c7369702STimothy McDaniel 
4698c7369702STimothy McDaniel 
4699c7369702STimothy McDaniel 	if (dlb2_args->max_cq_depth != 0)
470086fe66d4STimothy McDaniel 		dlb2->max_cq_depth = dlb2_args->max_cq_depth;
4701c7369702STimothy McDaniel 	else
4702c7369702STimothy McDaniel 		dlb2->max_cq_depth = DLB2_DEFAULT_CQ_DEPTH;
4703c7369702STimothy McDaniel 
4704c7369702STimothy McDaniel 	evdev_dlb2_default_info.max_event_port_dequeue_depth = dlb2->max_cq_depth;
4705e7c9971aSTimothy McDaniel 
47060fc71ad8STimothy McDaniel 	if (dlb2_args->max_enq_depth != 0)
47070fc71ad8STimothy McDaniel 		dlb2->max_enq_depth = dlb2_args->max_enq_depth;
47080fc71ad8STimothy McDaniel 	else
47090fc71ad8STimothy McDaniel 		dlb2->max_enq_depth = DLB2_DEFAULT_CQ_DEPTH;
47100fc71ad8STimothy McDaniel 
47110fc71ad8STimothy McDaniel 	evdev_dlb2_default_info.max_event_port_enqueue_depth =
47120fc71ad8STimothy McDaniel 		dlb2->max_enq_depth;
47130fc71ad8STimothy McDaniel 
471454089151STimothy McDaniel 	dlb2_init_queue_depth_thresholds(dlb2,
471554089151STimothy McDaniel 					 dlb2_args->qid_depth_thresholds.val);
471654089151STimothy McDaniel 
471754089151STimothy McDaniel 	dlb2_init_port_cos(dlb2,
471854089151STimothy McDaniel 			   dlb2_args->port_cos.cos_id);
471954089151STimothy McDaniel 
472054089151STimothy McDaniel 	dlb2_init_cos_bw(dlb2,
472154089151STimothy McDaniel 			 &dlb2_args->cos_bw);
47220fc71ad8STimothy McDaniel 
4723e7c9971aSTimothy McDaniel 	err = dlb2_iface_open(&dlb2->qm_instance, name);
4724e7c9971aSTimothy McDaniel 	if (err < 0) {
4725f665790aSDavid Marchand 		DLB2_LOG_ERR("could not open event hardware device, err=%d",
4726e7c9971aSTimothy McDaniel 			     err);
4727e7c9971aSTimothy McDaniel 		return err;
4728e7c9971aSTimothy McDaniel 	}
4729e7c9971aSTimothy McDaniel 
4730e7c9971aSTimothy McDaniel 	err = dlb2_iface_get_device_version(&dlb2->qm_instance,
4731e7c9971aSTimothy McDaniel 					    &dlb2->revision);
4732e7c9971aSTimothy McDaniel 	if (err < 0) {
4733f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d",
4734e7c9971aSTimothy McDaniel 			     err);
4735e7c9971aSTimothy McDaniel 		return err;
4736e7c9971aSTimothy McDaniel 	}
4737e7c9971aSTimothy McDaniel 
4738e7c9971aSTimothy McDaniel 	err = dlb2_hw_query_resources(dlb2);
4739e7c9971aSTimothy McDaniel 	if (err) {
4740f665790aSDavid Marchand 		DLB2_LOG_ERR("get resources err=%d for %s",
4741e7c9971aSTimothy McDaniel 			     err, name);
4742e7c9971aSTimothy McDaniel 		return err;
4743e7c9971aSTimothy McDaniel 	}
4744e7c9971aSTimothy McDaniel 
4745e7c9971aSTimothy McDaniel 	dlb2_iface_hardware_init(&dlb2->qm_instance);
4746e7c9971aSTimothy McDaniel 
4747bec8901bSTimothy McDaniel 	/* configure class of service */
4748bec8901bSTimothy McDaniel 	{
4749bec8901bSTimothy McDaniel 		struct dlb2_set_cos_bw_args
4750bec8901bSTimothy McDaniel 			set_cos_bw_args = { {0} };
4751bec8901bSTimothy McDaniel 		int id;
4752bec8901bSTimothy McDaniel 		int ret = 0;
4753bec8901bSTimothy McDaniel 
4754bec8901bSTimothy McDaniel 		for (id = 0; id < DLB2_COS_NUM_VALS; id++) {
4755bec8901bSTimothy McDaniel 			set_cos_bw_args.cos_id = id;
47561084c88cSTimothy McDaniel 			set_cos_bw_args.bandwidth = dlb2->cos_bw[id];
4757bec8901bSTimothy McDaniel 			ret = dlb2_iface_set_cos_bw(&dlb2->qm_instance,
4758bec8901bSTimothy McDaniel 						    &set_cos_bw_args);
4759bec8901bSTimothy McDaniel 			if (ret != 0)
4760bec8901bSTimothy McDaniel 				break;
4761bec8901bSTimothy McDaniel 		}
4762bec8901bSTimothy McDaniel 		if (ret) {
4763f665790aSDavid Marchand 			DLB2_LOG_ERR("dlb2: failed to configure class of service, err=%d",
4764bec8901bSTimothy McDaniel 				     err);
4765bec8901bSTimothy McDaniel 			return err;
4766bec8901bSTimothy McDaniel 		}
4767bec8901bSTimothy McDaniel 	}
4768bec8901bSTimothy McDaniel 
4769e7c9971aSTimothy McDaniel 	err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
4770e7c9971aSTimothy McDaniel 	if (err < 0) {
4771f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d",
4772e7c9971aSTimothy McDaniel 			     err);
4773e7c9971aSTimothy McDaniel 		return err;
4774e7c9971aSTimothy McDaniel 	}
4775e7c9971aSTimothy McDaniel 
4776e88753dcSTimothy McDaniel 	/* Complete xtstats runtime initialization */
4777e88753dcSTimothy McDaniel 	err = dlb2_xstats_init(dlb2);
4778e88753dcSTimothy McDaniel 	if (err) {
4779f665790aSDavid Marchand 		DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d", err);
4780e88753dcSTimothy McDaniel 		return err;
4781e88753dcSTimothy McDaniel 	}
4782e88753dcSTimothy McDaniel 
4783c667583dSTimothy McDaniel 	/* Initialize each port's token pop mode */
4784b66a418dSTimothy McDaniel 	for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++)
4785c667583dSTimothy McDaniel 		dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
4786c667583dSTimothy McDaniel 
4787e7c9971aSTimothy McDaniel 	rte_spinlock_init(&dlb2->qm_instance.resource_lock);
4788e7c9971aSTimothy McDaniel 
4789e7c9971aSTimothy McDaniel 	dlb2_iface_low_level_io_init();
4790e7c9971aSTimothy McDaniel 
4791e7c9971aSTimothy McDaniel 	dlb2_entry_points_init(dev);
4792e7c9971aSTimothy McDaniel 
47935433956dSTimothy McDaniel 	return 0;
47945433956dSTimothy McDaniel }
47955433956dSTimothy McDaniel 
47965433956dSTimothy McDaniel int
47975433956dSTimothy McDaniel dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
47985433956dSTimothy McDaniel 			      const char *name)
47995433956dSTimothy McDaniel {
4800e7c9971aSTimothy McDaniel 	struct dlb2_eventdev *dlb2;
4801e7c9971aSTimothy McDaniel 	int err;
4802e7c9971aSTimothy McDaniel 
4803e7c9971aSTimothy McDaniel 	dlb2 = dev->data->dev_private;
4804e7c9971aSTimothy McDaniel 
4805e7c9971aSTimothy McDaniel 	evdev_dlb2_default_info.driver_name = name;
4806e7c9971aSTimothy McDaniel 
4807e7c9971aSTimothy McDaniel 	err = dlb2_iface_open(&dlb2->qm_instance, name);
4808e7c9971aSTimothy McDaniel 	if (err < 0) {
4809f665790aSDavid Marchand 		DLB2_LOG_ERR("could not open event hardware device, err=%d",
4810e7c9971aSTimothy McDaniel 			     err);
4811e7c9971aSTimothy McDaniel 		return err;
4812e7c9971aSTimothy McDaniel 	}
4813e7c9971aSTimothy McDaniel 
4814e7c9971aSTimothy McDaniel 	err = dlb2_hw_query_resources(dlb2);
4815e7c9971aSTimothy McDaniel 	if (err) {
4816f665790aSDavid Marchand 		DLB2_LOG_ERR("get resources err=%d for %s",
4817e7c9971aSTimothy McDaniel 			     err, name);
4818e7c9971aSTimothy McDaniel 		return err;
4819e7c9971aSTimothy McDaniel 	}
4820e7c9971aSTimothy McDaniel 
4821e7c9971aSTimothy McDaniel 	dlb2_iface_low_level_io_init();
4822e7c9971aSTimothy McDaniel 
4823e7c9971aSTimothy McDaniel 	dlb2_entry_points_init(dev);
48245433956dSTimothy McDaniel 
48255433956dSTimothy McDaniel 	return 0;
48265433956dSTimothy McDaniel }
48275433956dSTimothy McDaniel 
48285433956dSTimothy McDaniel int
48295433956dSTimothy McDaniel dlb2_parse_params(const char *params,
48305433956dSTimothy McDaniel 		  const char *name,
4831b66a418dSTimothy McDaniel 		  struct dlb2_devargs *dlb2_args,
4832b66a418dSTimothy McDaniel 		  uint8_t version)
48335433956dSTimothy McDaniel {
48345433956dSTimothy McDaniel 	int ret = 0;
48355433956dSTimothy McDaniel 	static const char * const args[] = { NUMA_NODE_ARG,
48365433956dSTimothy McDaniel 					     DLB2_MAX_NUM_EVENTS,
48375433956dSTimothy McDaniel 					     DLB2_NUM_DIR_CREDITS,
48385433956dSTimothy McDaniel 					     DEV_ID_ARG,
48395433956dSTimothy McDaniel 					     DLB2_QID_DEPTH_THRESH_ARG,
48407be66a3bSTimothy McDaniel 					     DLB2_POLL_INTERVAL_ARG,
48417be66a3bSTimothy McDaniel 					     DLB2_SW_CREDIT_QUANTA_ARG,
4842e4869c0bSPravin Pathak 					     DLB2_HW_CREDIT_QUANTA_ARG,
48437be66a3bSTimothy McDaniel 					     DLB2_DEPTH_THRESH_ARG,
4844fcc5489cSTimothy McDaniel 					     DLB2_VECTOR_OPTS_ENAB_ARG,
484586fe66d4STimothy McDaniel 					     DLB2_MAX_CQ_DEPTH,
48460fc71ad8STimothy McDaniel 					     DLB2_MAX_ENQ_DEPTH,
4847bec8901bSTimothy McDaniel 					     DLB2_PORT_COS,
4848bec8901bSTimothy McDaniel 					     DLB2_COS_BW,
48498d1d9070SAbdullah Sevincer 					     DLB2_PRODUCER_COREMASK,
48508d1d9070SAbdullah Sevincer 					     DLB2_DEFAULT_LDB_PORT_ALLOCATION_ARG,
4851b977a659SAbdullah Sevincer 					     DLB2_ENABLE_CQ_WEIGHT_ARG,
48525433956dSTimothy McDaniel 					     NULL };
48535433956dSTimothy McDaniel 
48545433956dSTimothy McDaniel 	if (params != NULL && params[0] != '\0') {
48555433956dSTimothy McDaniel 		struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
48565433956dSTimothy McDaniel 
48575433956dSTimothy McDaniel 		if (kvlist == NULL) {
4858a247fcd9SStephen Hemminger 			DLB2_LOG_INFO("Ignoring unsupported parameters when creating device '%s'",
48595433956dSTimothy McDaniel 				      name);
48605433956dSTimothy McDaniel 		} else {
48615433956dSTimothy McDaniel 			int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
48625433956dSTimothy McDaniel 						     set_numa_node,
48635433956dSTimothy McDaniel 						     &dlb2_args->socket_id);
48645433956dSTimothy McDaniel 			if (ret != 0) {
48655433956dSTimothy McDaniel 				DLB2_LOG_ERR("%s: Error parsing numa node parameter",
48665433956dSTimothy McDaniel 					     name);
48675433956dSTimothy McDaniel 				rte_kvargs_free(kvlist);
48685433956dSTimothy McDaniel 				return ret;
48695433956dSTimothy McDaniel 			}
48705433956dSTimothy McDaniel 
48715433956dSTimothy McDaniel 			ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
48725433956dSTimothy McDaniel 						 set_max_num_events,
48735433956dSTimothy McDaniel 						 &dlb2_args->max_num_events);
48745433956dSTimothy McDaniel 			if (ret != 0) {
48755433956dSTimothy McDaniel 				DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
48765433956dSTimothy McDaniel 					     name);
48775433956dSTimothy McDaniel 				rte_kvargs_free(kvlist);
48785433956dSTimothy McDaniel 				return ret;
48795433956dSTimothy McDaniel 			}
48805433956dSTimothy McDaniel 
4881b66a418dSTimothy McDaniel 			if (version == DLB2_HW_V2) {
48825433956dSTimothy McDaniel 				ret = rte_kvargs_process(kvlist,
48835433956dSTimothy McDaniel 					DLB2_NUM_DIR_CREDITS,
48845433956dSTimothy McDaniel 					set_num_dir_credits,
48855433956dSTimothy McDaniel 					&dlb2_args->num_dir_credits_override);
48865433956dSTimothy McDaniel 				if (ret != 0) {
48875433956dSTimothy McDaniel 					DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
48885433956dSTimothy McDaniel 						     name);
48895433956dSTimothy McDaniel 					rte_kvargs_free(kvlist);
48905433956dSTimothy McDaniel 					return ret;
48915433956dSTimothy McDaniel 				}
4892b66a418dSTimothy McDaniel 			}
48935433956dSTimothy McDaniel 			ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
48945433956dSTimothy McDaniel 						 set_dev_id,
48955433956dSTimothy McDaniel 						 &dlb2_args->dev_id);
48965433956dSTimothy McDaniel 			if (ret != 0) {
48975433956dSTimothy McDaniel 				DLB2_LOG_ERR("%s: Error parsing dev_id parameter",
48985433956dSTimothy McDaniel 					     name);
48995433956dSTimothy McDaniel 				rte_kvargs_free(kvlist);
49005433956dSTimothy McDaniel 				return ret;
49015433956dSTimothy McDaniel 			}
49025433956dSTimothy McDaniel 
4903b66a418dSTimothy McDaniel 			if (version == DLB2_HW_V2) {
49045433956dSTimothy McDaniel 				ret = rte_kvargs_process(
49055433956dSTimothy McDaniel 					kvlist,
49065433956dSTimothy McDaniel 					DLB2_QID_DEPTH_THRESH_ARG,
49075433956dSTimothy McDaniel 					set_qid_depth_thresh,
49085433956dSTimothy McDaniel 					&dlb2_args->qid_depth_thresholds);
4909b66a418dSTimothy McDaniel 			} else {
4910b66a418dSTimothy McDaniel 				ret = rte_kvargs_process(
4911b66a418dSTimothy McDaniel 					kvlist,
4912b66a418dSTimothy McDaniel 					DLB2_QID_DEPTH_THRESH_ARG,
4913b66a418dSTimothy McDaniel 					set_qid_depth_thresh_v2_5,
4914b66a418dSTimothy McDaniel 					&dlb2_args->qid_depth_thresholds);
4915b66a418dSTimothy McDaniel 			}
49165433956dSTimothy McDaniel 			if (ret != 0) {
49175433956dSTimothy McDaniel 				DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
49185433956dSTimothy McDaniel 					     name);
49195433956dSTimothy McDaniel 				rte_kvargs_free(kvlist);
49205433956dSTimothy McDaniel 				return ret;
49215433956dSTimothy McDaniel 			}
49225433956dSTimothy McDaniel 
49237be66a3bSTimothy McDaniel 			ret = rte_kvargs_process(kvlist, DLB2_POLL_INTERVAL_ARG,
49247be66a3bSTimothy McDaniel 						 set_poll_interval,
49257be66a3bSTimothy McDaniel 						 &dlb2_args->poll_interval);
49267be66a3bSTimothy McDaniel 			if (ret != 0) {
49277be66a3bSTimothy McDaniel 				DLB2_LOG_ERR("%s: Error parsing poll interval parameter",
49287be66a3bSTimothy McDaniel 					     name);
49297be66a3bSTimothy McDaniel 				rte_kvargs_free(kvlist);
49307be66a3bSTimothy McDaniel 				return ret;
49317be66a3bSTimothy McDaniel 			}
49327be66a3bSTimothy McDaniel 
49337be66a3bSTimothy McDaniel 			ret = rte_kvargs_process(kvlist,
49347be66a3bSTimothy McDaniel 						 DLB2_SW_CREDIT_QUANTA_ARG,
49357be66a3bSTimothy McDaniel 						 set_sw_credit_quanta,
49367be66a3bSTimothy McDaniel 						 &dlb2_args->sw_credit_quanta);
49377be66a3bSTimothy McDaniel 			if (ret != 0) {
4938e4869c0bSPravin Pathak 				DLB2_LOG_ERR("%s: Error parsing sw credit quanta parameter",
4939e4869c0bSPravin Pathak 					     name);
4940e4869c0bSPravin Pathak 				rte_kvargs_free(kvlist);
4941e4869c0bSPravin Pathak 				return ret;
4942e4869c0bSPravin Pathak 			}
4943e4869c0bSPravin Pathak 
4944e4869c0bSPravin Pathak 			ret = rte_kvargs_process(kvlist,
4945e4869c0bSPravin Pathak 						 DLB2_HW_CREDIT_QUANTA_ARG,
4946e4869c0bSPravin Pathak 						 set_hw_credit_quanta,
4947e4869c0bSPravin Pathak 						 &dlb2_args->hw_credit_quanta);
4948e4869c0bSPravin Pathak 			if (ret != 0) {
4949e4869c0bSPravin Pathak 				DLB2_LOG_ERR("%s: Error parsing hw credit quanta parameter",
49507be66a3bSTimothy McDaniel 					     name);
49517be66a3bSTimothy McDaniel 				rte_kvargs_free(kvlist);
49527be66a3bSTimothy McDaniel 				return ret;
49537be66a3bSTimothy McDaniel 			}
49547be66a3bSTimothy McDaniel 
49557be66a3bSTimothy McDaniel 			ret = rte_kvargs_process(kvlist, DLB2_DEPTH_THRESH_ARG,
49567be66a3bSTimothy McDaniel 					set_default_depth_thresh,
49577be66a3bSTimothy McDaniel 					&dlb2_args->default_depth_thresh);
49587be66a3bSTimothy McDaniel 			if (ret != 0) {
49597be66a3bSTimothy McDaniel 				DLB2_LOG_ERR("%s: Error parsing set depth thresh parameter",
49607be66a3bSTimothy McDaniel 					     name);
49617be66a3bSTimothy McDaniel 				rte_kvargs_free(kvlist);
49627be66a3bSTimothy McDaniel 				return ret;
49637be66a3bSTimothy McDaniel 			}
49647be66a3bSTimothy McDaniel 
4965000a7b8eSTimothy McDaniel 			ret = rte_kvargs_process(kvlist,
4966fcc5489cSTimothy McDaniel 					DLB2_VECTOR_OPTS_ENAB_ARG,
4967fcc5489cSTimothy McDaniel 					set_vector_opts_enab,
4968fcc5489cSTimothy McDaniel 					&dlb2_args->vector_opts_enabled);
4969000a7b8eSTimothy McDaniel 			if (ret != 0) {
4970fcc5489cSTimothy McDaniel 				DLB2_LOG_ERR("%s: Error parsing vector opts enabled",
4971000a7b8eSTimothy McDaniel 					     name);
4972000a7b8eSTimothy McDaniel 				rte_kvargs_free(kvlist);
4973000a7b8eSTimothy McDaniel 				return ret;
4974000a7b8eSTimothy McDaniel 			}
4975000a7b8eSTimothy McDaniel 
497686fe66d4STimothy McDaniel 			ret = rte_kvargs_process(kvlist,
497786fe66d4STimothy McDaniel 					DLB2_MAX_CQ_DEPTH,
497886fe66d4STimothy McDaniel 					set_max_cq_depth,
497986fe66d4STimothy McDaniel 					&dlb2_args->max_cq_depth);
498086fe66d4STimothy McDaniel 			if (ret != 0) {
4981ffa46fc4STimothy McDaniel 				DLB2_LOG_ERR("%s: Error parsing max cq depth",
4982ffa46fc4STimothy McDaniel 					     name);
4983ffa46fc4STimothy McDaniel 				rte_kvargs_free(kvlist);
4984ffa46fc4STimothy McDaniel 				return ret;
4985ffa46fc4STimothy McDaniel 			}
4986ffa46fc4STimothy McDaniel 
4987ffa46fc4STimothy McDaniel 			ret = rte_kvargs_process(kvlist,
49880fc71ad8STimothy McDaniel 						 DLB2_MAX_ENQ_DEPTH,
49890fc71ad8STimothy McDaniel 						 set_max_enq_depth,
49900fc71ad8STimothy McDaniel 						 &dlb2_args->max_enq_depth);
49910fc71ad8STimothy McDaniel 			if (ret != 0) {
49920fc71ad8STimothy McDaniel 				DLB2_LOG_ERR("%s: Error parsing vector opts enabled",
49930fc71ad8STimothy McDaniel 					     name);
49940fc71ad8STimothy McDaniel 				rte_kvargs_free(kvlist);
49950fc71ad8STimothy McDaniel 				return ret;
49960fc71ad8STimothy McDaniel 			}
49970fc71ad8STimothy McDaniel 
49980fc71ad8STimothy McDaniel 			ret = rte_kvargs_process(kvlist,
4999bec8901bSTimothy McDaniel 					DLB2_PORT_COS,
5000bec8901bSTimothy McDaniel 					set_port_cos,
5001bec8901bSTimothy McDaniel 					&dlb2_args->port_cos);
5002bec8901bSTimothy McDaniel 			if (ret != 0) {
5003bec8901bSTimothy McDaniel 				DLB2_LOG_ERR("%s: Error parsing port cos",
5004bec8901bSTimothy McDaniel 					     name);
5005bec8901bSTimothy McDaniel 				rte_kvargs_free(kvlist);
5006bec8901bSTimothy McDaniel 				return ret;
5007bec8901bSTimothy McDaniel 			}
5008bec8901bSTimothy McDaniel 
5009bec8901bSTimothy McDaniel 			ret = rte_kvargs_process(kvlist,
5010bec8901bSTimothy McDaniel 					DLB2_COS_BW,
5011bec8901bSTimothy McDaniel 					set_cos_bw,
5012bec8901bSTimothy McDaniel 					&dlb2_args->cos_bw);
5013bec8901bSTimothy McDaniel 			if (ret != 0) {
5014bec8901bSTimothy McDaniel 				DLB2_LOG_ERR("%s: Error parsing cos_bw",
5015bec8901bSTimothy McDaniel 					     name);
5016bec8901bSTimothy McDaniel 				rte_kvargs_free(kvlist);
5017bec8901bSTimothy McDaniel 				return ret;
5018bec8901bSTimothy McDaniel 			}
5019bec8901bSTimothy McDaniel 
5020bec8901bSTimothy McDaniel 
50218d1d9070SAbdullah Sevincer 			ret = rte_kvargs_process(kvlist,
50228d1d9070SAbdullah Sevincer 						 DLB2_PRODUCER_COREMASK,
50238d1d9070SAbdullah Sevincer 						 set_producer_coremask,
50248d1d9070SAbdullah Sevincer 						 &dlb2_args->producer_coremask);
50258d1d9070SAbdullah Sevincer 			if (ret != 0) {
50268d1d9070SAbdullah Sevincer 				DLB2_LOG_ERR(
50278d1d9070SAbdullah Sevincer 					"%s: Error parsing producer coremask",
50288d1d9070SAbdullah Sevincer 					name);
50298d1d9070SAbdullah Sevincer 				rte_kvargs_free(kvlist);
50308d1d9070SAbdullah Sevincer 				return ret;
50318d1d9070SAbdullah Sevincer 			}
50328d1d9070SAbdullah Sevincer 
50338d1d9070SAbdullah Sevincer 			ret = rte_kvargs_process(kvlist,
50348d1d9070SAbdullah Sevincer 						 DLB2_DEFAULT_LDB_PORT_ALLOCATION_ARG,
50358d1d9070SAbdullah Sevincer 						 set_default_ldb_port_allocation,
50368d1d9070SAbdullah Sevincer 						 &dlb2_args->default_ldb_port_allocation);
50378d1d9070SAbdullah Sevincer 			if (ret != 0) {
50388d1d9070SAbdullah Sevincer 				DLB2_LOG_ERR("%s: Error parsing ldb default port allocation arg",
50398d1d9070SAbdullah Sevincer 					     name);
50408d1d9070SAbdullah Sevincer 				rte_kvargs_free(kvlist);
50418d1d9070SAbdullah Sevincer 				return ret;
50428d1d9070SAbdullah Sevincer 			}
50438d1d9070SAbdullah Sevincer 
5044b977a659SAbdullah Sevincer 			ret = rte_kvargs_process(kvlist,
5045b977a659SAbdullah Sevincer 						 DLB2_ENABLE_CQ_WEIGHT_ARG,
5046b977a659SAbdullah Sevincer 						 set_enable_cq_weight,
5047b977a659SAbdullah Sevincer 						 &dlb2_args->enable_cq_weight);
5048b977a659SAbdullah Sevincer 			if (ret != 0) {
5049b977a659SAbdullah Sevincer 				DLB2_LOG_ERR("%s: Error parsing enable_cq_weight arg",
5050b977a659SAbdullah Sevincer 					     name);
5051b977a659SAbdullah Sevincer 				rte_kvargs_free(kvlist);
5052b977a659SAbdullah Sevincer 				return ret;
5053b977a659SAbdullah Sevincer 			}
50546e2e98d6SAbdullah Sevincer 			if (version == DLB2_HW_V2 && dlb2_args->enable_cq_weight)
50556e2e98d6SAbdullah Sevincer 				DLB2_LOG_INFO("Ignoring 'enable_cq_weight=y'. Only supported for 2.5 HW onwards");
5056b977a659SAbdullah Sevincer 
50575433956dSTimothy McDaniel 			rte_kvargs_free(kvlist);
50585433956dSTimothy McDaniel 		}
50595433956dSTimothy McDaniel 	}
50605433956dSTimothy McDaniel 	return ret;
50615433956dSTimothy McDaniel }
5062eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(eventdev_dlb2_log_level, NOTICE);
5063