xref: /dpdk/drivers/event/dlb2/pf/base/dlb2_resource.c (revision 6a41e6070ea6f251987f81f9d610190b9adfb978)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #include "dlb2_user.h"
6 
7 #include "dlb2_hw_types.h"
8 #include "dlb2_osdep.h"
9 #include "dlb2_osdep_bitmap.h"
10 #include "dlb2_osdep_types.h"
11 #include "dlb2_regs.h"
12 #include "dlb2_resource.h"
13 
14 #include "../../dlb2_priv.h"
15 #include "../../dlb2_inline_fns.h"
16 
17 #define DLB2_DOM_LIST_HEAD(head, type) \
18 	DLB2_LIST_HEAD((head), type, domain_list)
19 
20 #define DLB2_FUNC_LIST_HEAD(head, type) \
21 	DLB2_LIST_HEAD((head), type, func_list)
22 
23 #define DLB2_DOM_LIST_FOR(head, ptr, iter) \
24 	DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
25 
26 #define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
27 	DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
28 
29 #define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
30 	DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
31 
32 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
33 	DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
34 
35 /*
36  * The PF driver cannot assume that a register write will affect subsequent HCW
37  * writes. To ensure a write completes, the driver must read back a CSR. This
38  * function only need be called for configuration that can occur after the
39  * domain has started; prior to starting, applications can't send HCWs.
40  */
41 static inline void dlb2_flush_csr(struct dlb2_hw *hw)
42 {
43 	DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS(hw->ver));
44 }
45 
46 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
47 {
48 	int i;
49 
50 	dlb2_list_init_head(&domain->used_ldb_queues);
51 	dlb2_list_init_head(&domain->used_dir_pq_pairs);
52 	dlb2_list_init_head(&domain->avail_ldb_queues);
53 	dlb2_list_init_head(&domain->avail_dir_pq_pairs);
54 	dlb2_list_init_head(&domain->rsvd_dir_pq_pairs);
55 
56 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
57 		dlb2_list_init_head(&domain->used_ldb_ports[i]);
58 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
59 		dlb2_list_init_head(&domain->avail_ldb_ports[i]);
60 }
61 
62 static void dlb2_init_fn_rsrc_lists(struct dlb2_function_resources *rsrc)
63 {
64 	int i;
65 	dlb2_list_init_head(&rsrc->avail_domains);
66 	dlb2_list_init_head(&rsrc->used_domains);
67 	dlb2_list_init_head(&rsrc->avail_ldb_queues);
68 	dlb2_list_init_head(&rsrc->avail_dir_pq_pairs);
69 
70 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
71 		dlb2_list_init_head(&rsrc->avail_ldb_ports[i]);
72 }
73 
74 /**
75  * dlb2_resource_free() - free device state memory
76  * @hw: dlb2_hw handle for a particular device.
77  *
78  * This function frees software state pointed to by dlb2_hw. This function
79  * should be called when resetting the device or unloading the driver.
80  */
81 void dlb2_resource_free(struct dlb2_hw *hw)
82 {
83 	int i;
84 
85 	if (hw->pf.avail_hist_list_entries)
86 		dlb2_bitmap_free(hw->pf.avail_hist_list_entries);
87 
88 	for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
89 		if (hw->vdev[i].avail_hist_list_entries)
90 			dlb2_bitmap_free(hw->vdev[i].avail_hist_list_entries);
91 	}
92 }
93 
94 /**
95  * dlb2_resource_init() - initialize the device
96  * @hw: pointer to struct dlb2_hw.
97  * @ver: device version.
98  *
99  * This function initializes the device's software state (pointed to by the hw
100  * argument) and programs global scheduling QoS registers. This function should
101  * be called during driver initialization, and the dlb2_hw structure should
102  * be zero-initialized before calling the function.
103  *
104  * The dlb2_hw struct must be unique per DLB 2.0 device and persist until the
105  * device is reset.
106  *
107  * Return:
108  * Returns 0 upon success, <0 otherwise.
109  */
110 int dlb2_resource_init(struct dlb2_hw *hw, enum dlb2_hw_ver ver, const void *probe_args)
111 {
112 	const struct dlb2_devargs *args = (const struct dlb2_devargs *)probe_args;
113 	bool ldb_port_default = args ? args->default_ldb_port_allocation : false;
114 	struct dlb2_list_entry *list;
115 	unsigned int i;
116 	int ret;
117 
118 	/*
119 	 * For optimal load-balancing, ports that map to one or more QIDs in
120 	 * common should not be in numerical sequence. The port->QID mapping is
121 	 * application dependent, but the driver interleaves port IDs as much
122 	 * as possible to reduce the likelihood of sequential ports mapping to
123 	 * the same QID(s). This initial allocation of port IDs maximizes the
124 	 * average distance between an ID and its immediate neighbors (i.e.
125 	 * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
126 	 * 3, etc.).
127 	 */
128 
129 	const u8 init_ldb_port_allocation[DLB2_MAX_NUM_LDB_PORTS] = {
130 		0,  7,  14,  5, 12,  3, 10,  1,  8, 15,  6, 13,  4, 11,  2,  9,
131 		16, 23, 30, 21, 28, 19, 26, 17, 24, 31, 22, 29, 20, 27, 18, 25,
132 		32, 39, 46, 37, 44, 35, 42, 33, 40, 47, 38, 45, 36, 43, 34, 41,
133 		48, 55, 62, 53, 60, 51, 58, 49, 56, 63, 54, 61, 52, 59, 50, 57,
134 	};
135 
136 	hw->ver = ver;
137 
138 	dlb2_init_fn_rsrc_lists(&hw->pf);
139 
140 	for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++)
141 		dlb2_init_fn_rsrc_lists(&hw->vdev[i]);
142 
143 	for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
144 		dlb2_init_domain_rsrc_lists(&hw->domains[i]);
145 		hw->domains[i].parent_func = &hw->pf;
146 	}
147 
148 	/* Give all resources to the PF driver */
149 	hw->pf.num_avail_domains = DLB2_MAX_NUM_DOMAINS;
150 	for (i = 0; i < hw->pf.num_avail_domains; i++) {
151 		list = &hw->domains[i].func_list;
152 
153 		dlb2_list_add(&hw->pf.avail_domains, list);
154 	}
155 
156 	hw->pf.num_avail_ldb_queues = DLB2_MAX_NUM_LDB_QUEUES;
157 	for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
158 		list = &hw->rsrcs.ldb_queues[i].func_list;
159 
160 		dlb2_list_add(&hw->pf.avail_ldb_queues, list);
161 	}
162 
163 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
164 		hw->pf.num_avail_ldb_ports[i] =
165 			DLB2_MAX_NUM_LDB_PORTS / DLB2_NUM_COS_DOMAINS;
166 
167 	for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
168 		int cos_id = i >> DLB2_NUM_COS_DOMAINS;
169 		struct dlb2_ldb_port *port;
170 
171 		if (ldb_port_default == true)
172 			port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
173 		else
174 			port = &hw->rsrcs.ldb_ports[hw->ldb_pp_allocations[i]];
175 
176 		dlb2_list_add(&hw->pf.avail_ldb_ports[cos_id],
177 			      &port->func_list);
178 	}
179 
180 	hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
181 	for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
182 		int index = hw->dir_pp_allocations[i];
183 		list = &hw->rsrcs.dir_pq_pairs[index].func_list;
184 
185 		dlb2_list_add(&hw->pf.avail_dir_pq_pairs, list);
186 	}
187 
188 	if (hw->ver == DLB2_HW_V2) {
189 		hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS;
190 		hw->pf.num_avail_dqed_entries =
191 			DLB2_MAX_NUM_DIR_CREDITS(hw->ver);
192 	} else {
193 		hw->pf.num_avail_entries = DLB2_MAX_NUM_CREDITS(hw->ver);
194 	}
195 
196 	hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES;
197 
198 	ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries,
199 				DLB2_MAX_NUM_HIST_LIST_ENTRIES);
200 	if (ret)
201 		goto unwind;
202 
203 	ret = dlb2_bitmap_fill(hw->pf.avail_hist_list_entries);
204 	if (ret)
205 		goto unwind;
206 
207 	for (i = 0; i < DLB2_MAX_NUM_VDEVS; i++) {
208 		ret = dlb2_bitmap_alloc(&hw->vdev[i].avail_hist_list_entries,
209 					DLB2_MAX_NUM_HIST_LIST_ENTRIES);
210 		if (ret)
211 			goto unwind;
212 
213 		ret = dlb2_bitmap_zero(hw->vdev[i].avail_hist_list_entries);
214 		if (ret)
215 			goto unwind;
216 	}
217 
218 	/* Initialize the hardware resource IDs */
219 	for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
220 		hw->domains[i].id.phys_id = i;
221 		hw->domains[i].id.vdev_owned = false;
222 	}
223 
224 	for (i = 0; i < DLB2_MAX_NUM_LDB_QUEUES; i++) {
225 		hw->rsrcs.ldb_queues[i].id.phys_id = i;
226 		hw->rsrcs.ldb_queues[i].id.vdev_owned = false;
227 	}
228 
229 	for (i = 0; i < DLB2_MAX_NUM_LDB_PORTS; i++) {
230 		hw->rsrcs.ldb_ports[i].id.phys_id = i;
231 		hw->rsrcs.ldb_ports[i].id.vdev_owned = false;
232 	}
233 
234 	for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) {
235 		hw->rsrcs.dir_pq_pairs[i].id.phys_id = i;
236 		hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false;
237 	}
238 
239 	for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
240 		hw->rsrcs.sn_groups[i].id = i;
241 		/* Default mode (0) is 64 sequence numbers per queue */
242 		hw->rsrcs.sn_groups[i].mode = 0;
243 		hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 64;
244 		hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
245 	}
246 
247 	return 0;
248 
249 unwind:
250 	dlb2_resource_free(hw);
251 
252 	return ret;
253 }
254 
255 /**
256  * dlb2_clr_pmcsr_disable() - power on bulk of DLB 2.0 logic
257  * @hw: dlb2_hw handle for a particular device.
258  * @ver: device version.
259  *
260  * Clearing the PMCSR must be done at initialization to make the device fully
261  * operational.
262  */
263 void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw, enum dlb2_hw_ver ver)
264 {
265 	u32 pmcsr_dis;
266 
267 	pmcsr_dis = DLB2_CSR_RD(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver));
268 
269 	DLB2_BITS_CLR(pmcsr_dis, DLB2_CM_CFG_PM_PMCSR_DISABLE_DISABLE);
270 
271 	DLB2_CSR_WR(hw, DLB2_CM_CFG_PM_PMCSR_DISABLE(ver), pmcsr_dis);
272 }
273 
274 /**
275  * dlb2_hw_get_num_resources() - query the PCI function's available resources
276  * @hw: dlb2_hw handle for a particular device.
277  * @arg: pointer to resource counts.
278  * @vdev_req: indicates whether this request came from a vdev.
279  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
280  *
281  * This function returns the number of available resources for the PF or for a
282  * VF.
283  *
284  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
285  * device.
286  *
287  * Return:
288  * Returns 0 upon success, -EINVAL if vdev_req is true and vdev_id is
289  * invalid.
290  */
291 int dlb2_hw_get_num_resources(struct dlb2_hw *hw,
292 			      struct dlb2_get_num_resources_args *arg,
293 			      bool vdev_req,
294 			      unsigned int vdev_id)
295 {
296 	struct dlb2_function_resources *rsrcs;
297 	struct dlb2_bitmap *map;
298 	int i;
299 
300 	if (vdev_req && vdev_id >= DLB2_MAX_NUM_VDEVS)
301 		return -EINVAL;
302 
303 	if (vdev_req)
304 		rsrcs = &hw->vdev[vdev_id];
305 	else
306 		rsrcs = &hw->pf;
307 
308 	arg->num_sched_domains = rsrcs->num_avail_domains;
309 
310 	arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
311 
312 	arg->num_ldb_ports = 0;
313 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
314 		arg->num_ldb_ports += rsrcs->num_avail_ldb_ports[i];
315 
316 	arg->num_cos_ldb_ports[0] = rsrcs->num_avail_ldb_ports[0];
317 	arg->num_cos_ldb_ports[1] = rsrcs->num_avail_ldb_ports[1];
318 	arg->num_cos_ldb_ports[2] = rsrcs->num_avail_ldb_ports[2];
319 	arg->num_cos_ldb_ports[3] = rsrcs->num_avail_ldb_ports[3];
320 
321 	arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
322 
323 	arg->num_atomic_inflights = rsrcs->num_avail_aqed_entries;
324 
325 	map = rsrcs->avail_hist_list_entries;
326 
327 	arg->num_hist_list_entries = dlb2_bitmap_count(map);
328 
329 	arg->max_contiguous_hist_list_entries =
330 		dlb2_bitmap_longest_set_range(map);
331 
332 	if (hw->ver == DLB2_HW_V2) {
333 		arg->num_ldb_credits = rsrcs->num_avail_qed_entries;
334 		arg->num_dir_credits = rsrcs->num_avail_dqed_entries;
335 	} else {
336 		arg->num_credits = rsrcs->num_avail_entries;
337 	}
338 	return 0;
339 }
340 
341 static void dlb2_configure_domain_credits_v2_5(struct dlb2_hw *hw,
342 					       struct dlb2_hw_domain *domain)
343 {
344 	u32 reg = 0;
345 
346 	DLB2_BITS_SET(reg, domain->num_credits, DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
347 	DLB2_CSR_WR(hw, DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id), reg);
348 }
349 
350 static void dlb2_configure_domain_credits_v2(struct dlb2_hw *hw,
351 					     struct dlb2_hw_domain *domain)
352 {
353 	u32 reg = 0;
354 
355 	DLB2_BITS_SET(reg, domain->num_ldb_credits,
356 		      DLB2_CHP_CFG_LDB_VAS_CRD_COUNT);
357 	DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), reg);
358 
359 	reg = 0;
360 	DLB2_BITS_SET(reg, domain->num_dir_credits,
361 		      DLB2_CHP_CFG_DIR_VAS_CRD_COUNT);
362 	DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), reg);
363 }
364 
365 static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
366 					  struct dlb2_hw_domain *domain)
367 {
368 	if (hw->ver == DLB2_HW_V2)
369 		dlb2_configure_domain_credits_v2(hw, domain);
370 	else
371 		dlb2_configure_domain_credits_v2_5(hw, domain);
372 }
373 
374 static int dlb2_attach_credits(struct dlb2_function_resources *rsrcs,
375 			       struct dlb2_hw_domain *domain,
376 			       u32 num_credits,
377 			       struct dlb2_cmd_response *resp)
378 {
379 	if (rsrcs->num_avail_entries < num_credits) {
380 		resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
381 		return -EINVAL;
382 	}
383 
384 	rsrcs->num_avail_entries -= num_credits;
385 	domain->num_credits += num_credits;
386 	return 0;
387 }
388 
389 static struct dlb2_ldb_port *
390 dlb2_get_next_ldb_port(struct dlb2_hw *hw,
391 		       struct dlb2_function_resources *rsrcs,
392 		       u32 domain_id,
393 		       u32 cos_id)
394 {
395 	struct dlb2_list_entry *iter;
396 	struct dlb2_ldb_port *port;
397 	RTE_SET_USED(iter);
398 
399 	/*
400 	 * To reduce the odds of consecutive load-balanced ports mapping to the
401 	 * same queue(s), the driver attempts to allocate ports whose neighbors
402 	 * are owned by a different domain.
403 	 */
404 	DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
405 		u32 next, prev;
406 		u32 phys_id;
407 
408 		phys_id = port->id.phys_id;
409 		next = phys_id + 1;
410 		prev = phys_id - 1;
411 
412 		if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
413 			next = 0;
414 		if (phys_id == 0)
415 			prev = DLB2_MAX_NUM_LDB_PORTS - 1;
416 
417 		if (!hw->rsrcs.ldb_ports[next].owned ||
418 		    hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
419 			continue;
420 
421 		if (!hw->rsrcs.ldb_ports[prev].owned ||
422 		    hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
423 			continue;
424 
425 		return port;
426 	}
427 
428 	/*
429 	 * Failing that, the driver looks for a port with one neighbor owned by
430 	 * a different domain and the other unallocated.
431 	 */
432 	DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
433 		u32 next, prev;
434 		u32 phys_id;
435 
436 		phys_id = port->id.phys_id;
437 		next = phys_id + 1;
438 		prev = phys_id - 1;
439 
440 		if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
441 			next = 0;
442 		if (phys_id == 0)
443 			prev = DLB2_MAX_NUM_LDB_PORTS - 1;
444 
445 		if (!hw->rsrcs.ldb_ports[prev].owned &&
446 		    hw->rsrcs.ldb_ports[next].owned &&
447 		    hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
448 			return port;
449 
450 		if (!hw->rsrcs.ldb_ports[next].owned &&
451 		    hw->rsrcs.ldb_ports[prev].owned &&
452 		    hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
453 			return port;
454 	}
455 
456 	/*
457 	 * Failing that, the driver looks for a port with both neighbors
458 	 * unallocated.
459 	 */
460 	DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
461 		u32 next, prev;
462 		u32 phys_id;
463 
464 		phys_id = port->id.phys_id;
465 		next = phys_id + 1;
466 		prev = phys_id - 1;
467 
468 		if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
469 			next = 0;
470 		if (phys_id == 0)
471 			prev = DLB2_MAX_NUM_LDB_PORTS - 1;
472 
473 		if (!hw->rsrcs.ldb_ports[prev].owned &&
474 		    !hw->rsrcs.ldb_ports[next].owned)
475 			return port;
476 	}
477 
478 	/* If all else fails, the driver returns the next available port. */
479 	return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
480 				   typeof(*port));
481 }
482 
483 static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
484 				   struct dlb2_function_resources *rsrcs,
485 				   struct dlb2_hw_domain *domain,
486 				   u32 num_ports,
487 				   u32 cos_id,
488 				   struct dlb2_cmd_response *resp)
489 {
490 	unsigned int i;
491 
492 	if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
493 		resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
494 		return -EINVAL;
495 	}
496 
497 	for (i = 0; i < num_ports; i++) {
498 		struct dlb2_ldb_port *port;
499 
500 		port = dlb2_get_next_ldb_port(hw, rsrcs,
501 					      domain->id.phys_id, cos_id);
502 		if (port == NULL) {
503 			DLB2_HW_ERR(hw,
504 				    "[%s()] Internal error: domain validation failed\n",
505 				    __func__);
506 			return -EFAULT;
507 		}
508 
509 		dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
510 			      &port->func_list);
511 
512 		port->domain_id = domain->id;
513 		port->owned = true;
514 
515 		dlb2_list_add(&domain->avail_ldb_ports[cos_id],
516 			      &port->domain_list);
517 	}
518 
519 	rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
520 
521 	return 0;
522 }
523 
524 
525 static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
526 				 struct dlb2_function_resources *rsrcs,
527 				 struct dlb2_hw_domain *domain,
528 				 struct dlb2_create_sched_domain_args *args,
529 				 struct dlb2_cmd_response *resp)
530 {
531 	unsigned int i, j;
532 	int ret;
533 
534 	if (args->cos_strict) {
535 		for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
536 			u32 num = args->num_cos_ldb_ports[i];
537 
538 			/* Allocate ports from specific classes-of-service */
539 			ret = __dlb2_attach_ldb_ports(hw,
540 						      rsrcs,
541 						      domain,
542 						      num,
543 						      i,
544 						      resp);
545 			if (ret)
546 				return ret;
547 		}
548 	} else {
549 		unsigned int k;
550 		u32 cos_id;
551 
552 		/*
553 		 * Attempt to allocate from specific class-of-service, but
554 		 * fallback to the other classes if that fails.
555 		 */
556 		for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
557 			for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
558 				for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
559 					cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
560 
561 					ret = __dlb2_attach_ldb_ports(hw,
562 								      rsrcs,
563 								      domain,
564 								      1,
565 								      cos_id,
566 								      resp);
567 					if (ret == 0)
568 						break;
569 				}
570 
571 				if (ret)
572 					return ret;
573 			}
574 		}
575 	}
576 
577 	/* Allocate num_ldb_ports from any class-of-service */
578 	for (i = 0; i < args->num_ldb_ports; i++) {
579 		for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
580 			/* Allocate from best performing cos */
581 			u32 cos_idx = j + DLB2_MAX_NUM_LDB_PORTS;
582 			u32 cos_id = hw->ldb_pp_allocations[cos_idx];
583 			ret = __dlb2_attach_ldb_ports(hw,
584 						      rsrcs,
585 						      domain,
586 						      1,
587 						      cos_id,
588 						      resp);
589 			if (ret == 0)
590 				break;
591 		}
592 
593 		if (ret)
594 			return ret;
595 	}
596 
597 	return 0;
598 }
599 
600 static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
601 				 struct dlb2_function_resources *rsrcs,
602 				 struct dlb2_hw_domain *domain,
603 				 u32 num_ports,
604 				 struct dlb2_cmd_response *resp)
605 {
606 	int num_res = hw->num_prod_cores;
607 	unsigned int i;
608 
609 	if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
610 		resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
611 		return -EINVAL;
612 	}
613 
614 	for (i = 0; i < num_ports; i++) {
615 		struct dlb2_dir_pq_pair *port;
616 
617 		port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
618 					   typeof(*port));
619 		if (port == NULL) {
620 			DLB2_HW_ERR(hw,
621 				    "[%s()] Internal error: domain validation failed\n",
622 				    __func__);
623 			return -EFAULT;
624 		}
625 
626 		if (num_res) {
627 			dlb2_list_add(&domain->rsvd_dir_pq_pairs,
628 				      &port->domain_list);
629 			num_res--;
630 		} else {
631 			dlb2_list_add(&domain->avail_dir_pq_pairs,
632 			&port->domain_list);
633 		}
634 
635 		dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
636 
637 		port->domain_id = domain->id;
638 		port->owned = true;
639 	}
640 
641 	rsrcs->num_avail_dir_pq_pairs -= num_ports;
642 
643 	return 0;
644 }
645 
646 static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
647 				   struct dlb2_hw_domain *domain,
648 				   u32 num_credits,
649 				   struct dlb2_cmd_response *resp)
650 {
651 	if (rsrcs->num_avail_qed_entries < num_credits) {
652 		resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
653 		return -EINVAL;
654 	}
655 
656 	rsrcs->num_avail_qed_entries -= num_credits;
657 	domain->num_ldb_credits += num_credits;
658 	return 0;
659 }
660 
661 static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
662 				   struct dlb2_hw_domain *domain,
663 				   u32 num_credits,
664 				   struct dlb2_cmd_response *resp)
665 {
666 	if (rsrcs->num_avail_dqed_entries < num_credits) {
667 		resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
668 		return -EINVAL;
669 	}
670 
671 	rsrcs->num_avail_dqed_entries -= num_credits;
672 	domain->num_dir_credits += num_credits;
673 	return 0;
674 }
675 
676 
677 static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
678 					struct dlb2_hw_domain *domain,
679 					u32 num_atomic_inflights,
680 					struct dlb2_cmd_response *resp)
681 {
682 	if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
683 		resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
684 		return -EINVAL;
685 	}
686 
687 	rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
688 	domain->num_avail_aqed_entries += num_atomic_inflights;
689 	return 0;
690 }
691 
692 static int
693 dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
694 				     struct dlb2_hw_domain *domain,
695 				     u32 num_hist_list_entries,
696 				     struct dlb2_cmd_response *resp)
697 {
698 	struct dlb2_bitmap *bitmap;
699 	int base;
700 
701 	if (num_hist_list_entries) {
702 		bitmap = rsrcs->avail_hist_list_entries;
703 
704 		base = dlb2_bitmap_find_set_bit_range(bitmap,
705 						      num_hist_list_entries);
706 		if (base < 0)
707 			goto error;
708 
709 		domain->total_hist_list_entries = num_hist_list_entries;
710 		domain->avail_hist_list_entries = num_hist_list_entries;
711 		domain->hist_list_entry_base = base;
712 		domain->hist_list_entry_offset = 0;
713 
714 		dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
715 	}
716 	return 0;
717 
718 error:
719 	resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
720 	return -EINVAL;
721 }
722 
723 static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
724 				  struct dlb2_function_resources *rsrcs,
725 				  struct dlb2_hw_domain *domain,
726 				  u32 num_queues,
727 				  struct dlb2_cmd_response *resp)
728 {
729 	unsigned int i;
730 
731 	if (rsrcs->num_avail_ldb_queues < num_queues) {
732 		resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
733 		return -EINVAL;
734 	}
735 
736 	for (i = 0; i < num_queues; i++) {
737 		struct dlb2_ldb_queue *queue;
738 
739 		queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
740 					    typeof(*queue));
741 		if (queue == NULL) {
742 			DLB2_HW_ERR(hw,
743 				    "[%s()] Internal error: domain validation failed\n",
744 				    __func__);
745 			return -EFAULT;
746 		}
747 
748 		dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
749 
750 		queue->domain_id = domain->id;
751 		queue->owned = true;
752 
753 		dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
754 	}
755 
756 	rsrcs->num_avail_ldb_queues -= num_queues;
757 
758 	return 0;
759 }
760 
761 static int
762 dlb2_pp_profile(struct dlb2_hw *hw, int port, bool is_ldb)
763 {
764 	u64 cycle_start = 0ULL, cycle_end = 0ULL;
765 	struct dlb2_hcw hcw_mem[DLB2_HCW_MEM_SIZE], *hcw;
766 	void __iomem *pp_addr;
767 	int i;
768 
769 	pp_addr = os_map_producer_port(hw, port, is_ldb);
770 
771 	/* Point hcw to a 64B-aligned location */
772 	hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[DLB2_HCW_64B_OFF] &
773 	      ~DLB2_HCW_ALIGN_MASK);
774 
775 	/*
776 	 * Program the first HCW for a completion and token return and
777 	 * the other HCWs as NOOPS
778 	 */
779 
780 	memset(hcw, 0, (DLB2_HCW_MEM_SIZE - DLB2_HCW_64B_OFF) * sizeof(*hcw));
781 	hcw->qe_comp = 1;
782 	hcw->cq_token = 1;
783 	hcw->lock_id = 1;
784 
785 	cycle_start = rte_get_tsc_cycles();
786 	for (i = 0; i < DLB2_NUM_PROBE_ENQS; i++)
787 		dlb2_movdir64b(pp_addr, hcw);
788 
789 	cycle_end = rte_get_tsc_cycles();
790 
791 	os_unmap_producer_port(hw, pp_addr);
792 	return (int)(cycle_end - cycle_start);
793 }
794 
795 static uint32_t
796 dlb2_pp_profile_func(void *data)
797 {
798 	struct dlb2_pp_thread_data *thread_data = data;
799 
800 	thread_data->cycles = dlb2_pp_profile(thread_data->hw,
801 			thread_data->pp, thread_data->is_ldb);
802 
803 	return 0;
804 }
805 
806 static int dlb2_pp_cycle_comp(const void *a, const void *b)
807 {
808 	const struct dlb2_pp_thread_data *x = a;
809 	const struct dlb2_pp_thread_data *y = b;
810 
811 	return x->cycles - y->cycles;
812 }
813 
814 
815 /* Probe producer ports from different CPU cores */
816 static void
817 dlb2_get_pp_allocation(struct dlb2_hw *hw, int cpu, int port_type)
818 {
819 	struct dlb2_pp_thread_data dlb2_thread_data[DLB2_MAX_NUM_DIR_PORTS_V2_5];
820 	struct dlb2_dev *dlb2_dev = container_of(hw, struct dlb2_dev, hw);
821 	struct dlb2_pp_thread_data cos_cycles[DLB2_NUM_COS_DOMAINS];
822 	int ver = DLB2_HW_DEVICE_FROM_PCI_ID(dlb2_dev->pdev);
823 	int num_ports_per_sort, num_ports, num_sort, i, err;
824 	bool is_ldb = (port_type == DLB2_LDB_PORT);
825 	int *port_allocations;
826 	rte_thread_t thread;
827 	rte_thread_attr_t th_attr;
828 	char th_name[RTE_THREAD_INTERNAL_NAME_SIZE];
829 
830 	if (is_ldb) {
831 		port_allocations = hw->ldb_pp_allocations;
832 		num_ports = DLB2_MAX_NUM_LDB_PORTS;
833 		num_sort = DLB2_NUM_COS_DOMAINS;
834 	} else {
835 		port_allocations = hw->dir_pp_allocations;
836 		num_ports = DLB2_MAX_NUM_DIR_PORTS(ver);
837 		num_sort = 1;
838 	}
839 
840 	num_ports_per_sort = num_ports / num_sort;
841 
842 	dlb2_dev->enqueue_four = dlb2_movdir64b;
843 
844 	DLB2_HW_INFO(hw, " for %s: cpu core used in pp profiling: %d\n",
845 		      is_ldb ? "LDB" : "DIR", cpu);
846 
847 	memset(cos_cycles, 0, num_sort * sizeof(struct dlb2_pp_thread_data));
848 	for (i = 0; i < num_ports; i++) {
849 		int cos = (i >> DLB2_NUM_COS_DOMAINS) % DLB2_NUM_COS_DOMAINS;
850 		dlb2_thread_data[i].is_ldb = is_ldb;
851 		dlb2_thread_data[i].pp = i;
852 		dlb2_thread_data[i].cycles = 0;
853 		dlb2_thread_data[i].hw = hw;
854 
855 		err = rte_thread_attr_init(&th_attr);
856 		if (err != 0) {
857 			DLB2_HW_ERR(hw, ": thread attribute failed! err=%d\n", err);
858 			return;
859 		}
860 		CPU_SET(cpu, &th_attr.cpuset);
861 
862 		err = rte_thread_create(&thread, &th_attr,
863 				&dlb2_pp_profile_func, &dlb2_thread_data[i]);
864 		if (err) {
865 			DLB2_HW_ERR(hw, ": thread creation failed! err=%d\n", err);
866 			return;
867 		}
868 
869 		snprintf(th_name, sizeof(th_name), "dlb2-pp%d", cpu);
870 		rte_thread_set_prefixed_name(thread, th_name);
871 
872 		err = rte_thread_join(thread, NULL);
873 		if (err) {
874 			DLB2_HW_ERR(hw, ": thread join failed! err=%d\n", err);
875 			return;
876 		}
877 
878 		if (is_ldb)
879 			cos_cycles[cos].cycles += dlb2_thread_data[i].cycles;
880 
881 		if ((i + 1) % num_ports_per_sort == 0) {
882 			int index = 0;
883 
884 			if (is_ldb) {
885 				cos_cycles[cos].pp = cos;
886 				index = cos * num_ports_per_sort;
887 			}
888 			/*
889 			 * For LDB ports first sort with in a cos. Later sort
890 			 * the best cos based on total cycles for the cos.
891 			 * For DIR ports, there is a single sort across all
892 			 * ports.
893 			 */
894 			qsort(&dlb2_thread_data[index], num_ports_per_sort,
895 			      sizeof(struct dlb2_pp_thread_data),
896 			      dlb2_pp_cycle_comp);
897 		}
898 	}
899 
900 	/*
901 	 * Sort by best cos aggregated over all ports per cos
902 	 * Note: After DLB2_MAX_NUM_LDB_PORTS sorted cos is stored and so'pp'
903 	 * is cos_id and not port id.
904 	 */
905 	if (is_ldb) {
906 		qsort(cos_cycles, num_sort, sizeof(struct dlb2_pp_thread_data),
907 		      dlb2_pp_cycle_comp);
908 		for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
909 			port_allocations[i + DLB2_MAX_NUM_LDB_PORTS] = cos_cycles[i].pp;
910 	}
911 
912 	for (i = 0; i < num_ports; i++) {
913 		port_allocations[i] = dlb2_thread_data[i].pp;
914 		DLB2_HW_INFO(hw, ": pp %d cycles %d\n", port_allocations[i],
915 			      dlb2_thread_data[i].cycles);
916 	}
917 
918 }
919 
920 int
921 dlb2_resource_probe(struct dlb2_hw *hw, const void *probe_args)
922 {
923 	const struct dlb2_devargs *args = (const struct dlb2_devargs *)probe_args;
924 	const char *mask = args ? args->producer_coremask : NULL;
925 	int cpu = 0, cnt = 0, cores[RTE_MAX_LCORE], i;
926 
927 	if (args) {
928 		mask = (const char *)args->producer_coremask;
929 	}
930 
931 	if (mask && rte_eal_parse_coremask(mask, cores)) {
932 		DLB2_HW_ERR(hw, ": Invalid producer coremask=%s\n", mask);
933 		return -1;
934 	}
935 
936 	hw->num_prod_cores = 0;
937 	for (i = 0; i < RTE_MAX_LCORE; i++) {
938 		bool is_pcore = (mask && cores[i] != -1);
939 
940 		if (rte_lcore_is_enabled(i)) {
941 			if (is_pcore) {
942 				/*
943 				 * Populate the producer cores from parsed
944 				 * coremask
945 				 */
946 				hw->prod_core_list[cores[i]] = i;
947 				hw->num_prod_cores++;
948 
949 			} else if ((++cnt == DLB2_EAL_PROBE_CORE ||
950 			   rte_lcore_count() < DLB2_EAL_PROBE_CORE)) {
951 				/*
952 				 * If no producer coremask is provided, use the
953 				 * second EAL core to probe
954 				 */
955 				cpu = i;
956 				break;
957 			}
958 		} else if (is_pcore) {
959 			DLB2_HW_ERR(hw, "Producer coremask(%s) must be a subset of EAL coremask\n",
960 				     mask);
961 			return -1;
962 		}
963 
964 	}
965 	/* Use the first core in producer coremask to probe */
966 	if (hw->num_prod_cores)
967 		cpu = hw->prod_core_list[0];
968 
969 	dlb2_get_pp_allocation(hw, cpu, DLB2_LDB_PORT);
970 	dlb2_get_pp_allocation(hw, cpu, DLB2_DIR_PORT);
971 
972 	return 0;
973 }
974 
975 static int
976 dlb2_domain_attach_resources(struct dlb2_hw *hw,
977 			     struct dlb2_function_resources *rsrcs,
978 			     struct dlb2_hw_domain *domain,
979 			     struct dlb2_create_sched_domain_args *args,
980 			     struct dlb2_cmd_response *resp)
981 {
982 	int ret;
983 
984 	ret = dlb2_attach_ldb_queues(hw,
985 				     rsrcs,
986 				     domain,
987 				     args->num_ldb_queues,
988 				     resp);
989 	if (ret)
990 		return ret;
991 
992 	ret = dlb2_attach_ldb_ports(hw,
993 				    rsrcs,
994 				    domain,
995 				    args,
996 				    resp);
997 	if (ret)
998 		return ret;
999 
1000 	ret = dlb2_attach_dir_ports(hw,
1001 				    rsrcs,
1002 				    domain,
1003 				    args->num_dir_ports,
1004 				    resp);
1005 	if (ret)
1006 		return ret;
1007 
1008 	if (hw->ver == DLB2_HW_V2) {
1009 		ret = dlb2_attach_ldb_credits(rsrcs,
1010 					      domain,
1011 					      args->num_ldb_credits,
1012 					      resp);
1013 		if (ret)
1014 			return ret;
1015 
1016 		ret = dlb2_attach_dir_credits(rsrcs,
1017 					      domain,
1018 					      args->num_dir_credits,
1019 					      resp);
1020 		if (ret)
1021 			return ret;
1022 	} else {  /* DLB 2.5 */
1023 		ret = dlb2_attach_credits(rsrcs,
1024 					  domain,
1025 					  args->num_credits,
1026 					  resp);
1027 		if (ret)
1028 			return ret;
1029 	}
1030 
1031 	ret = dlb2_attach_domain_hist_list_entries(rsrcs,
1032 						   domain,
1033 						   args->num_hist_list_entries,
1034 						   resp);
1035 	if (ret)
1036 		return ret;
1037 
1038 	ret = dlb2_attach_atomic_inflights(rsrcs,
1039 					   domain,
1040 					   args->num_atomic_inflights,
1041 					   resp);
1042 	if (ret)
1043 		return ret;
1044 
1045 	dlb2_configure_domain_credits(hw, domain);
1046 
1047 	domain->configured = true;
1048 
1049 	domain->started = false;
1050 
1051 	rsrcs->num_avail_domains--;
1052 
1053 	return 0;
1054 }
1055 
1056 static int
1057 dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
1058 				  struct dlb2_create_sched_domain_args *args,
1059 				  struct dlb2_cmd_response *resp,
1060 				  struct dlb2_hw *hw,
1061 				  struct dlb2_hw_domain **out_domain)
1062 {
1063 	u32 num_avail_ldb_ports, req_ldb_ports;
1064 	struct dlb2_bitmap *avail_hl_entries;
1065 	unsigned int max_contig_hl_range;
1066 	struct dlb2_hw_domain *domain;
1067 	int i;
1068 
1069 	avail_hl_entries = rsrcs->avail_hist_list_entries;
1070 
1071 	max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
1072 
1073 	num_avail_ldb_ports = 0;
1074 	req_ldb_ports = 0;
1075 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1076 		num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
1077 
1078 		req_ldb_ports += args->num_cos_ldb_ports[i];
1079 	}
1080 
1081 	req_ldb_ports += args->num_ldb_ports;
1082 
1083 	if (rsrcs->num_avail_domains < 1) {
1084 		resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
1085 		return -EINVAL;
1086 	}
1087 
1088 	domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
1089 	if (domain == NULL) {
1090 		resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
1091 		return -EFAULT;
1092 	}
1093 
1094 	if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
1095 		resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
1096 		return -EINVAL;
1097 	}
1098 
1099 	if (req_ldb_ports > num_avail_ldb_ports) {
1100 		resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
1101 		return -EINVAL;
1102 	}
1103 
1104 	for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
1105 		if (args->num_cos_ldb_ports[i] >
1106 		    rsrcs->num_avail_ldb_ports[i]) {
1107 			resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
1108 			return -EINVAL;
1109 		}
1110 	}
1111 
1112 	if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
1113 		resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
1114 		return -EINVAL;
1115 	}
1116 
1117 	if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
1118 		resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
1119 		return -EINVAL;
1120 	}
1121 	if (hw->ver == DLB2_HW_V2_5) {
1122 		if (rsrcs->num_avail_entries < args->num_credits) {
1123 			resp->status = DLB2_ST_CREDITS_UNAVAILABLE;
1124 			return -EINVAL;
1125 		}
1126 	} else {
1127 		if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
1128 			resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
1129 			return -EINVAL;
1130 		}
1131 		if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
1132 			resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
1133 			return -EINVAL;
1134 		}
1135 	}
1136 
1137 	if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
1138 		resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
1139 		return -EINVAL;
1140 	}
1141 
1142 	if (max_contig_hl_range < args->num_hist_list_entries) {
1143 		resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
1144 		return -EINVAL;
1145 	}
1146 
1147 	*out_domain = domain;
1148 
1149 	return 0;
1150 }
1151 
1152 static void
1153 dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
1154 				  struct dlb2_create_sched_domain_args *args,
1155 				  bool vdev_req,
1156 				  unsigned int vdev_id)
1157 {
1158 	DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
1159 	if (vdev_req)
1160 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
1161 	DLB2_HW_DBG(hw, "\tNumber of LDB queues:          %d\n",
1162 		    args->num_ldb_queues);
1163 	DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
1164 		    args->num_ldb_ports);
1165 	DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0):   %d\n",
1166 		    args->num_cos_ldb_ports[0]);
1167 	DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1):   %d\n",
1168 		    args->num_cos_ldb_ports[1]);
1169 	DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2):   %d\n",
1170 		    args->num_cos_ldb_ports[2]);
1171 	DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3):   %d\n",
1172 		    args->num_cos_ldb_ports[3]);
1173 	DLB2_HW_DBG(hw, "\tStrict CoS allocation:         %d\n",
1174 		    args->cos_strict);
1175 	DLB2_HW_DBG(hw, "\tNumber of DIR ports:           %d\n",
1176 		    args->num_dir_ports);
1177 	DLB2_HW_DBG(hw, "\tNumber of ATM inflights:       %d\n",
1178 		    args->num_atomic_inflights);
1179 	DLB2_HW_DBG(hw, "\tNumber of hist list entries:   %d\n",
1180 		    args->num_hist_list_entries);
1181 	if (hw->ver == DLB2_HW_V2) {
1182 		DLB2_HW_DBG(hw, "\tNumber of LDB credits:         %d\n",
1183 			    args->num_ldb_credits);
1184 		DLB2_HW_DBG(hw, "\tNumber of DIR credits:         %d\n",
1185 			    args->num_dir_credits);
1186 	} else {
1187 		DLB2_HW_DBG(hw, "\tNumber of credits:         %d\n",
1188 			    args->num_credits);
1189 	}
1190 }
1191 
1192 /**
1193  * dlb2_hw_create_sched_domain() - create a scheduling domain
1194  * @hw: dlb2_hw handle for a particular device.
1195  * @args: scheduling domain creation arguments.
1196  * @resp: response structure.
1197  * @vdev_req: indicates whether this request came from a vdev.
1198  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
1199  *
1200  * This function creates a scheduling domain containing the resources specified
1201  * in args. The individual resources (queues, ports, credits) can be configured
1202  * after creating a scheduling domain.
1203  *
1204  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
1205  * device.
1206  *
1207  * Return:
1208  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
1209  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
1210  * contains the domain ID.
1211  *
1212  * resp->id contains a virtual ID if vdev_req is true.
1213  *
1214  * Errors:
1215  * EINVAL - A requested resource is unavailable, or the requested domain name
1216  *	    is already in use.
1217  * EFAULT - Internal error (resp->status not set).
1218  */
1219 int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
1220 				struct dlb2_create_sched_domain_args *args,
1221 				struct dlb2_cmd_response *resp,
1222 				bool vdev_req,
1223 				unsigned int vdev_id)
1224 {
1225 	struct dlb2_function_resources *rsrcs;
1226 	struct dlb2_hw_domain *domain;
1227 	int ret;
1228 
1229 	rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1230 
1231 	dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
1232 
1233 	/*
1234 	 * Verify that hardware resources are available before attempting to
1235 	 * satisfy the request. This simplifies the error unwinding code.
1236 	 */
1237 	ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp, hw, &domain);
1238 	if (ret)
1239 		return ret;
1240 
1241 	dlb2_init_domain_rsrc_lists(domain);
1242 
1243 	ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
1244 	if (ret) {
1245 		DLB2_HW_ERR(hw,
1246 			    "[%s()] Internal error: failed to verify args.\n",
1247 			    __func__);
1248 
1249 		return ret;
1250 	}
1251 
1252 	dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
1253 
1254 	dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
1255 
1256 	resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
1257 	resp->status = 0;
1258 
1259 	return 0;
1260 }
1261 
1262 static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
1263 				     struct dlb2_dir_pq_pair *port)
1264 {
1265 	u32 reg = 0;
1266 
1267 	DLB2_BIT_SET(reg, DLB2_LSP_CQ_DIR_DSBL_DISABLED);
1268 	DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1269 
1270 	dlb2_flush_csr(hw);
1271 }
1272 
1273 static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
1274 				   struct dlb2_dir_pq_pair *port)
1275 {
1276 	u32 cnt;
1277 
1278 	cnt = DLB2_CSR_RD(hw,
1279 			  DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id));
1280 
1281 	/*
1282 	 * Account for the initial token count, which is used in order to
1283 	 * provide a CQ with depth less than 8.
1284 	 */
1285 
1286 	return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_DIR_TKN_CNT_COUNT) -
1287 	       port->init_tkn_cnt;
1288 }
1289 
1290 static int dlb2_drain_dir_cq(struct dlb2_hw *hw,
1291 			      struct dlb2_dir_pq_pair *port)
1292 {
1293 	unsigned int port_id = port->id.phys_id;
1294 	u32 cnt;
1295 
1296 	/* Return any outstanding tokens */
1297 	cnt = dlb2_dir_cq_token_count(hw, port);
1298 
1299 	if (cnt != 0) {
1300 		struct dlb2_hcw hcw_mem[8], *hcw;
1301 		void __iomem *pp_addr;
1302 
1303 		pp_addr = os_map_producer_port(hw, port_id, false);
1304 
1305 		/* Point hcw to a 64B-aligned location */
1306 		hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1307 
1308 		/*
1309 		 * Program the first HCW for a batch token return and
1310 		 * the rest as NOOPS
1311 		 */
1312 		memset(hcw, 0, 4 * sizeof(*hcw));
1313 		hcw->cq_token = 1;
1314 		hcw->lock_id = cnt - 1;
1315 
1316 		dlb2_movdir64b(pp_addr, hcw);
1317 
1318 		os_fence_hcw(hw, pp_addr);
1319 
1320 		os_unmap_producer_port(hw, pp_addr);
1321 	}
1322 
1323 	return cnt;
1324 }
1325 
1326 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
1327 				    struct dlb2_dir_pq_pair *port)
1328 {
1329 	u32 reg = 0;
1330 
1331 	DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
1332 
1333 	dlb2_flush_csr(hw);
1334 }
1335 
1336 static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
1337 				     struct dlb2_hw_domain *domain,
1338 				     bool toggle_port)
1339 {
1340 	struct dlb2_list_entry *iter;
1341 	struct dlb2_dir_pq_pair *port;
1342 	int drain_cnt = 0;
1343 	RTE_SET_USED(iter);
1344 
1345 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
1346 		/*
1347 		 * Can't drain a port if it's not configured, and there's
1348 		 * nothing to drain if its queue is unconfigured.
1349 		 */
1350 		if (!port->port_configured || !port->queue_configured)
1351 			continue;
1352 
1353 		if (toggle_port)
1354 			dlb2_dir_port_cq_disable(hw, port);
1355 
1356 		drain_cnt = dlb2_drain_dir_cq(hw, port);
1357 
1358 		if (toggle_port)
1359 			dlb2_dir_port_cq_enable(hw, port);
1360 	}
1361 
1362 	return drain_cnt;
1363 }
1364 
1365 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
1366 				struct dlb2_dir_pq_pair *queue)
1367 {
1368 	u32 cnt;
1369 
1370 	cnt = DLB2_CSR_RD(hw, DLB2_LSP_QID_DIR_ENQUEUE_CNT(hw->ver,
1371 						      queue->id.phys_id));
1372 
1373 	return DLB2_BITS_GET(cnt, DLB2_LSP_QID_DIR_ENQUEUE_CNT_COUNT);
1374 }
1375 
1376 static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
1377 				    struct dlb2_dir_pq_pair *queue)
1378 {
1379 	return dlb2_dir_queue_depth(hw, queue) == 0;
1380 }
1381 
1382 static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
1383 					 struct dlb2_hw_domain *domain)
1384 {
1385 	struct dlb2_list_entry *iter;
1386 	struct dlb2_dir_pq_pair *queue;
1387 	RTE_SET_USED(iter);
1388 
1389 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
1390 		if (!dlb2_dir_queue_is_empty(hw, queue))
1391 			return false;
1392 	}
1393 
1394 	return true;
1395 }
1396 static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
1397 					struct dlb2_hw_domain *domain)
1398 {
1399 	int i;
1400 
1401 	/* If the domain hasn't been started, there's no traffic to drain */
1402 	if (!domain->started)
1403 		return 0;
1404 
1405 	for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1406 		int drain_cnt;
1407 
1408 		drain_cnt = dlb2_domain_drain_dir_cqs(hw, domain, false);
1409 
1410 		if (dlb2_domain_dir_queues_empty(hw, domain))
1411 			break;
1412 
1413 		/*
1414 		 * Allow time for DLB to schedule QEs before draining
1415 		 * the CQs again.
1416 		 */
1417 		if (!drain_cnt)
1418 			rte_delay_us(1);
1419 
1420 	}
1421 
1422 	if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1423 		DLB2_HW_ERR(hw,
1424 			    "[%s()] Internal error: failed to empty queues\n",
1425 			    __func__);
1426 		return -EFAULT;
1427 	}
1428 
1429 	/*
1430 	 * Drain the CQs one more time. For the queues to go empty, they would
1431 	 * have scheduled one or more QEs.
1432 	 */
1433 	dlb2_domain_drain_dir_cqs(hw, domain, true);
1434 
1435 	return 0;
1436 }
1437 
1438 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
1439 				    struct dlb2_ldb_port *port)
1440 {
1441 	u32 reg = 0;
1442 
1443 	/*
1444 	 * Don't re-enable the port if a removal is pending. The caller should
1445 	 * mark this port as enabled (if it isn't already), and when the
1446 	 * removal completes the port will be enabled.
1447 	 */
1448 	if (port->num_pending_removals)
1449 		return;
1450 
1451 	DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1452 
1453 	dlb2_flush_csr(hw);
1454 }
1455 
1456 static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
1457 				     struct dlb2_ldb_port *port)
1458 {
1459 	u32 reg = 0;
1460 
1461 	DLB2_BIT_SET(reg, DLB2_LSP_CQ_LDB_DSBL_DISABLED);
1462 	DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
1463 
1464 	dlb2_flush_csr(hw);
1465 }
1466 
1467 static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
1468 				      struct dlb2_ldb_port *port)
1469 {
1470 	u32 cnt;
1471 
1472 	cnt = DLB2_CSR_RD(hw,
1473 			  DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver, port->id.phys_id));
1474 
1475 	return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT);
1476 }
1477 
1478 static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
1479 				   struct dlb2_ldb_port *port)
1480 {
1481 	u32 cnt;
1482 
1483 	cnt = DLB2_CSR_RD(hw,
1484 			  DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id));
1485 
1486 	/*
1487 	 * Account for the initial token count, which is used in order to
1488 	 * provide a CQ with depth less than 8.
1489 	 */
1490 
1491 	return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT) -
1492 		port->init_tkn_cnt;
1493 }
1494 
1495 static int dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
1496 {
1497 	u32 infl_cnt, tkn_cnt;
1498 	unsigned int i;
1499 
1500 	infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
1501 	tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
1502 
1503 	if (infl_cnt || tkn_cnt) {
1504 		struct dlb2_hcw hcw_mem[8], *hcw;
1505 		void __iomem *pp_addr;
1506 
1507 		pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
1508 
1509 		/* Point hcw to a 64B-aligned location */
1510 		hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
1511 
1512 		/*
1513 		 * Program the first HCW for a completion and token return and
1514 		 * the other HCWs as NOOPS
1515 		 */
1516 
1517 		memset(hcw, 0, 4 * sizeof(*hcw));
1518 		hcw->qe_comp = (infl_cnt > 0);
1519 		hcw->cq_token = (tkn_cnt > 0);
1520 		hcw->lock_id = tkn_cnt - 1;
1521 
1522 		/* Return tokens in the first HCW */
1523 		dlb2_movdir64b(pp_addr, hcw);
1524 
1525 		hcw->cq_token = 0;
1526 
1527 		/* Issue remaining completions (if any) */
1528 		for (i = 1; i < infl_cnt; i++)
1529 			dlb2_movdir64b(pp_addr, hcw);
1530 
1531 		os_fence_hcw(hw, pp_addr);
1532 
1533 		os_unmap_producer_port(hw, pp_addr);
1534 	}
1535 
1536 	return tkn_cnt;
1537 }
1538 
1539 static int dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
1540 				      struct dlb2_hw_domain *domain,
1541 				      bool toggle_port)
1542 {
1543 	struct dlb2_list_entry *iter;
1544 	struct dlb2_ldb_port *port;
1545 	int drain_cnt = 0;
1546 	int i;
1547 	RTE_SET_USED(iter);
1548 
1549 	/* If the domain hasn't been started, there's no traffic to drain */
1550 	if (!domain->started)
1551 		return 0;
1552 
1553 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1554 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1555 			if (toggle_port)
1556 				dlb2_ldb_port_cq_disable(hw, port);
1557 
1558 			drain_cnt = dlb2_drain_ldb_cq(hw, port);
1559 
1560 			if (toggle_port)
1561 				dlb2_ldb_port_cq_enable(hw, port);
1562 		}
1563 	}
1564 
1565 	return drain_cnt;
1566 }
1567 
1568 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
1569 				struct dlb2_ldb_queue *queue)
1570 {
1571 	u32 aqed, ldb, atm;
1572 
1573 	aqed = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
1574 						       queue->id.phys_id));
1575 	ldb = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
1576 						      queue->id.phys_id));
1577 	atm = DLB2_CSR_RD(hw,
1578 			  DLB2_LSP_QID_ATM_ACTIVE(hw->ver, queue->id.phys_id));
1579 
1580 	return DLB2_BITS_GET(aqed, DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT)
1581 	       + DLB2_BITS_GET(ldb, DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT)
1582 	       + DLB2_BITS_GET(atm, DLB2_LSP_QID_ATM_ACTIVE_COUNT);
1583 }
1584 
1585 static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
1586 				    struct dlb2_ldb_queue *queue)
1587 {
1588 	return dlb2_ldb_queue_depth(hw, queue) == 0;
1589 }
1590 
1591 static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
1592 					    struct dlb2_hw_domain *domain)
1593 {
1594 	struct dlb2_list_entry *iter;
1595 	struct dlb2_ldb_queue *queue;
1596 	RTE_SET_USED(iter);
1597 
1598 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
1599 		if (queue->num_mappings == 0)
1600 			continue;
1601 
1602 		if (!dlb2_ldb_queue_is_empty(hw, queue))
1603 			return false;
1604 	}
1605 
1606 	return true;
1607 }
1608 
1609 static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
1610 					   struct dlb2_hw_domain *domain)
1611 {
1612 	int i;
1613 
1614 	/* If the domain hasn't been started, there's no traffic to drain */
1615 	if (!domain->started)
1616 		return 0;
1617 
1618 	if (domain->num_pending_removals > 0) {
1619 		DLB2_HW_ERR(hw,
1620 			    "[%s()] Internal error: failed to unmap domain queues\n",
1621 			    __func__);
1622 		return -EFAULT;
1623 	}
1624 
1625 	for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
1626 		int drain_cnt;
1627 
1628 		drain_cnt = dlb2_domain_drain_ldb_cqs(hw, domain, false);
1629 
1630 		if (dlb2_domain_mapped_queues_empty(hw, domain))
1631 			break;
1632 
1633 		/*
1634 		 * Allow time for DLB to schedule QEs before draining
1635 		 * the CQs again.
1636 		 */
1637 		if (!drain_cnt)
1638 			rte_delay_us(1);
1639 	}
1640 
1641 	if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
1642 		DLB2_HW_ERR(hw,
1643 			    "[%s()] Internal error: failed to empty queues\n",
1644 			    __func__);
1645 		return -EFAULT;
1646 	}
1647 
1648 	/*
1649 	 * Drain the CQs one more time. For the queues to go empty, they would
1650 	 * have scheduled one or more QEs.
1651 	 */
1652 	dlb2_domain_drain_ldb_cqs(hw, domain, true);
1653 
1654 	return 0;
1655 }
1656 
1657 static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
1658 				       struct dlb2_hw_domain *domain)
1659 {
1660 	struct dlb2_list_entry *iter;
1661 	struct dlb2_ldb_port *port;
1662 	int i;
1663 	RTE_SET_USED(iter);
1664 
1665 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1666 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1667 			port->enabled = true;
1668 
1669 			dlb2_ldb_port_cq_enable(hw, port);
1670 		}
1671 	}
1672 }
1673 
1674 static struct dlb2_ldb_queue *
1675 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
1676 			   u32 id,
1677 			   bool vdev_req,
1678 			   unsigned int vdev_id)
1679 {
1680 	struct dlb2_list_entry *iter1;
1681 	struct dlb2_list_entry *iter2;
1682 	struct dlb2_function_resources *rsrcs;
1683 	struct dlb2_hw_domain *domain;
1684 	struct dlb2_ldb_queue *queue;
1685 	RTE_SET_USED(iter1);
1686 	RTE_SET_USED(iter2);
1687 
1688 	if (id >= DLB2_MAX_NUM_LDB_QUEUES)
1689 		return NULL;
1690 
1691 	rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
1692 
1693 	if (!vdev_req)
1694 		return &hw->rsrcs.ldb_queues[id];
1695 
1696 	DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
1697 		DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) {
1698 			if (queue->id.virt_id == id)
1699 				return queue;
1700 		}
1701 	}
1702 
1703 	DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1) {
1704 		if (queue->id.virt_id == id)
1705 			return queue;
1706 	}
1707 
1708 	return NULL;
1709 }
1710 
1711 static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
1712 						      u32 id,
1713 						      bool vdev_req,
1714 						      unsigned int vdev_id)
1715 {
1716 	struct dlb2_list_entry *iteration;
1717 	struct dlb2_function_resources *rsrcs;
1718 	struct dlb2_hw_domain *domain;
1719 	RTE_SET_USED(iteration);
1720 
1721 	if (id >= DLB2_MAX_NUM_DOMAINS)
1722 		return NULL;
1723 
1724 	if (!vdev_req)
1725 		return &hw->domains[id];
1726 
1727 	rsrcs = &hw->vdev[vdev_id];
1728 
1729 	DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration) {
1730 		if (domain->id.virt_id == id)
1731 			return domain;
1732 	}
1733 
1734 	return NULL;
1735 }
1736 
1737 static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
1738 					   struct dlb2_ldb_port *port,
1739 					   struct dlb2_ldb_queue *queue,
1740 					   int slot,
1741 					   enum dlb2_qid_map_state new_state)
1742 {
1743 	enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
1744 	struct dlb2_hw_domain *domain;
1745 	int domain_id;
1746 
1747 	domain_id = port->domain_id.phys_id;
1748 
1749 	domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
1750 	if (domain == NULL) {
1751 		DLB2_HW_ERR(hw,
1752 			    "[%s()] Internal error: unable to find domain %d\n",
1753 			    __func__, domain_id);
1754 		return -EINVAL;
1755 	}
1756 
1757 	switch (curr_state) {
1758 	case DLB2_QUEUE_UNMAPPED:
1759 		switch (new_state) {
1760 		case DLB2_QUEUE_MAPPED:
1761 			queue->num_mappings++;
1762 			port->num_mappings++;
1763 			break;
1764 		case DLB2_QUEUE_MAP_IN_PROG:
1765 			queue->num_pending_additions++;
1766 			domain->num_pending_additions++;
1767 			break;
1768 		default:
1769 			goto error;
1770 		}
1771 		break;
1772 	case DLB2_QUEUE_MAPPED:
1773 		switch (new_state) {
1774 		case DLB2_QUEUE_UNMAPPED:
1775 			queue->num_mappings--;
1776 			port->num_mappings--;
1777 			break;
1778 		case DLB2_QUEUE_UNMAP_IN_PROG:
1779 			port->num_pending_removals++;
1780 			domain->num_pending_removals++;
1781 			break;
1782 		case DLB2_QUEUE_MAPPED:
1783 			/* Priority change, nothing to update */
1784 			break;
1785 		default:
1786 			goto error;
1787 		}
1788 		break;
1789 	case DLB2_QUEUE_MAP_IN_PROG:
1790 		switch (new_state) {
1791 		case DLB2_QUEUE_UNMAPPED:
1792 			queue->num_pending_additions--;
1793 			domain->num_pending_additions--;
1794 			break;
1795 		case DLB2_QUEUE_MAPPED:
1796 			queue->num_mappings++;
1797 			port->num_mappings++;
1798 			queue->num_pending_additions--;
1799 			domain->num_pending_additions--;
1800 			break;
1801 		default:
1802 			goto error;
1803 		}
1804 		break;
1805 	case DLB2_QUEUE_UNMAP_IN_PROG:
1806 		switch (new_state) {
1807 		case DLB2_QUEUE_UNMAPPED:
1808 			port->num_pending_removals--;
1809 			domain->num_pending_removals--;
1810 			queue->num_mappings--;
1811 			port->num_mappings--;
1812 			break;
1813 		case DLB2_QUEUE_MAPPED:
1814 			port->num_pending_removals--;
1815 			domain->num_pending_removals--;
1816 			break;
1817 		case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1818 			/* Nothing to update */
1819 			break;
1820 		default:
1821 			goto error;
1822 		}
1823 		break;
1824 	case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
1825 		switch (new_state) {
1826 		case DLB2_QUEUE_UNMAP_IN_PROG:
1827 			/* Nothing to update */
1828 			break;
1829 		case DLB2_QUEUE_UNMAPPED:
1830 			/*
1831 			 * An UNMAP_IN_PROG_PENDING_MAP slot briefly
1832 			 * becomes UNMAPPED before it transitions to
1833 			 * MAP_IN_PROG.
1834 			 */
1835 			queue->num_mappings--;
1836 			port->num_mappings--;
1837 			port->num_pending_removals--;
1838 			domain->num_pending_removals--;
1839 			break;
1840 		default:
1841 			goto error;
1842 		}
1843 		break;
1844 	default:
1845 		goto error;
1846 	}
1847 
1848 	port->qid_map[slot].state = new_state;
1849 
1850 	DLB2_HW_DBG(hw,
1851 		    "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
1852 		    __func__, queue->id.phys_id, port->id.phys_id,
1853 		    curr_state, new_state);
1854 	return 0;
1855 
1856 error:
1857 	DLB2_HW_ERR(hw,
1858 		    "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
1859 		    __func__, queue->id.phys_id, port->id.phys_id,
1860 		    curr_state, new_state);
1861 	return -EFAULT;
1862 }
1863 
1864 static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
1865 				enum dlb2_qid_map_state state,
1866 				int *slot)
1867 {
1868 	int i;
1869 
1870 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1871 		if (port->qid_map[i].state == state)
1872 			break;
1873 	}
1874 
1875 	*slot = i;
1876 
1877 	return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1878 }
1879 
1880 static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
1881 				      enum dlb2_qid_map_state state,
1882 				      struct dlb2_ldb_queue *queue,
1883 				      int *slot)
1884 {
1885 	int i;
1886 
1887 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1888 		if (port->qid_map[i].state == state &&
1889 		    port->qid_map[i].qid == queue->id.phys_id)
1890 			break;
1891 	}
1892 
1893 	*slot = i;
1894 
1895 	return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
1896 }
1897 
1898 /*
1899  * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
1900  * their function names imply, and should only be called by the dynamic CQ
1901  * mapping code.
1902  */
1903 static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
1904 					      struct dlb2_hw_domain *domain,
1905 					      struct dlb2_ldb_queue *queue)
1906 {
1907 	struct dlb2_list_entry *iter;
1908 	struct dlb2_ldb_port *port;
1909 	int slot, i;
1910 	RTE_SET_USED(iter);
1911 
1912 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1913 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1914 			enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1915 
1916 			if (!dlb2_port_find_slot_queue(port, state,
1917 						       queue, &slot))
1918 				continue;
1919 
1920 			if (port->enabled)
1921 				dlb2_ldb_port_cq_disable(hw, port);
1922 		}
1923 	}
1924 }
1925 
1926 static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
1927 					     struct dlb2_hw_domain *domain,
1928 					     struct dlb2_ldb_queue *queue)
1929 {
1930 	struct dlb2_list_entry *iter;
1931 	struct dlb2_ldb_port *port;
1932 	int slot, i;
1933 	RTE_SET_USED(iter);
1934 
1935 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
1936 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
1937 			enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
1938 
1939 			if (!dlb2_port_find_slot_queue(port, state,
1940 						       queue, &slot))
1941 				continue;
1942 
1943 			if (port->enabled)
1944 				dlb2_ldb_port_cq_enable(hw, port);
1945 		}
1946 	}
1947 }
1948 
1949 static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
1950 						struct dlb2_ldb_port *port,
1951 						int slot)
1952 {
1953 	u32 ctrl = 0;
1954 
1955 	DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1956 	DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1957 	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1958 
1959 	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1960 
1961 	dlb2_flush_csr(hw);
1962 }
1963 
1964 static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
1965 					      struct dlb2_ldb_port *port,
1966 					      int slot)
1967 {
1968 	u32 ctrl = 0;
1969 
1970 	DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
1971 	DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
1972 	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
1973 	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
1974 
1975 	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
1976 
1977 	dlb2_flush_csr(hw);
1978 }
1979 
1980 static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
1981 					struct dlb2_ldb_port *p,
1982 					struct dlb2_ldb_queue *q,
1983 					u8 priority)
1984 {
1985 	enum dlb2_qid_map_state state;
1986 	u32 lsp_qid2cq2;
1987 	u32 lsp_qid2cq;
1988 	u32 atm_qid2cq;
1989 	u32 cq2priov;
1990 	u32 cq2qid;
1991 	int i;
1992 
1993 	/* Look for a pending or already mapped slot, else an unused slot */
1994 	if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
1995 	    !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
1996 	    !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
1997 		DLB2_HW_ERR(hw,
1998 			    "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
1999 			    __func__, __LINE__);
2000 		return -EFAULT;
2001 	}
2002 
2003 	/* Read-modify-write the priority and valid bit register */
2004 	cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id));
2005 
2006 	cq2priov |= (1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC)) & DLB2_LSP_CQ2PRIOV_V;
2007 	cq2priov |= ((priority & 0x7) << (i + DLB2_LSP_CQ2PRIOV_PRIO_LOC) * 3)
2008 		    & DLB2_LSP_CQ2PRIOV_PRIO;
2009 
2010 	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id), cq2priov);
2011 
2012 	/* Read-modify-write the QID map register */
2013 	if (i < 4)
2014 		cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(hw->ver,
2015 							  p->id.phys_id));
2016 	else
2017 		cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(hw->ver,
2018 							  p->id.phys_id));
2019 
2020 	if (i == 0 || i == 4)
2021 		DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P0);
2022 	if (i == 1 || i == 5)
2023 		DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P1);
2024 	if (i == 2 || i == 6)
2025 		DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P2);
2026 	if (i == 3 || i == 7)
2027 		DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P3);
2028 
2029 	if (i < 4)
2030 		DLB2_CSR_WR(hw,
2031 			    DLB2_LSP_CQ2QID0(hw->ver, p->id.phys_id), cq2qid);
2032 	else
2033 		DLB2_CSR_WR(hw,
2034 			    DLB2_LSP_CQ2QID1(hw->ver, p->id.phys_id), cq2qid);
2035 
2036 	atm_qid2cq = DLB2_CSR_RD(hw,
2037 				 DLB2_ATM_QID2CQIDIX(q->id.phys_id,
2038 						p->id.phys_id / 4));
2039 
2040 	lsp_qid2cq = DLB2_CSR_RD(hw,
2041 				 DLB2_LSP_QID2CQIDIX(hw->ver, q->id.phys_id,
2042 						p->id.phys_id / 4));
2043 
2044 	lsp_qid2cq2 = DLB2_CSR_RD(hw,
2045 				  DLB2_LSP_QID2CQIDIX2(hw->ver, q->id.phys_id,
2046 						  p->id.phys_id / 4));
2047 
2048 	switch (p->id.phys_id % 4) {
2049 	case 0:
2050 		DLB2_BIT_SET(atm_qid2cq,
2051 			     1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
2052 		DLB2_BIT_SET(lsp_qid2cq,
2053 			     1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
2054 		DLB2_BIT_SET(lsp_qid2cq2,
2055 			     1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
2056 		break;
2057 
2058 	case 1:
2059 		DLB2_BIT_SET(atm_qid2cq,
2060 			     1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
2061 		DLB2_BIT_SET(lsp_qid2cq,
2062 			     1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
2063 		DLB2_BIT_SET(lsp_qid2cq2,
2064 			     1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
2065 		break;
2066 
2067 	case 2:
2068 		DLB2_BIT_SET(atm_qid2cq,
2069 			     1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
2070 		DLB2_BIT_SET(lsp_qid2cq,
2071 			     1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
2072 		DLB2_BIT_SET(lsp_qid2cq2,
2073 			     1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
2074 		break;
2075 
2076 	case 3:
2077 		DLB2_BIT_SET(atm_qid2cq,
2078 			     1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
2079 		DLB2_BIT_SET(lsp_qid2cq,
2080 			     1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
2081 		DLB2_BIT_SET(lsp_qid2cq2,
2082 			     1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
2083 		break;
2084 	}
2085 
2086 	DLB2_CSR_WR(hw,
2087 		    DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
2088 		    atm_qid2cq);
2089 
2090 	DLB2_CSR_WR(hw,
2091 		    DLB2_LSP_QID2CQIDIX(hw->ver,
2092 					q->id.phys_id, p->id.phys_id / 4),
2093 		    lsp_qid2cq);
2094 
2095 	DLB2_CSR_WR(hw,
2096 		    DLB2_LSP_QID2CQIDIX2(hw->ver,
2097 					 q->id.phys_id, p->id.phys_id / 4),
2098 		    lsp_qid2cq2);
2099 
2100 	dlb2_flush_csr(hw);
2101 
2102 	p->qid_map[i].qid = q->id.phys_id;
2103 	p->qid_map[i].priority = priority;
2104 
2105 	state = DLB2_QUEUE_MAPPED;
2106 
2107 	return dlb2_port_slot_state_transition(hw, p, q, i, state);
2108 }
2109 
2110 static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
2111 					   struct dlb2_ldb_port *port,
2112 					   struct dlb2_ldb_queue *queue,
2113 					   int slot)
2114 {
2115 	u32 ctrl = 0;
2116 	u32 active;
2117 	u32 enq;
2118 
2119 	/* Set the atomic scheduling haswork bit */
2120 	active = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
2121 							 queue->id.phys_id));
2122 
2123 	DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
2124 	DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
2125 	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
2126 	DLB2_BITS_SET(ctrl,
2127 		      DLB2_BITS_GET(active,
2128 				    DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT) > 0,
2129 				    DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
2130 
2131 	/* Set the non-atomic scheduling haswork bit */
2132 	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
2133 
2134 	enq = DLB2_CSR_RD(hw,
2135 			  DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
2136 						       queue->id.phys_id));
2137 
2138 	memset(&ctrl, 0, sizeof(ctrl));
2139 
2140 	DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
2141 	DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
2142 	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
2143 	DLB2_BITS_SET(ctrl,
2144 		      DLB2_BITS_GET(enq,
2145 				    DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT) > 0,
2146 		      DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
2147 
2148 	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
2149 
2150 	dlb2_flush_csr(hw);
2151 
2152 	return 0;
2153 }
2154 
2155 static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
2156 					      struct dlb2_ldb_port *port,
2157 					      u8 slot)
2158 {
2159 	u32 ctrl = 0;
2160 
2161 	DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
2162 	DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
2163 	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
2164 
2165 	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
2166 
2167 	memset(&ctrl, 0, sizeof(ctrl));
2168 
2169 	DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
2170 	DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
2171 	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
2172 
2173 	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
2174 
2175 	dlb2_flush_csr(hw);
2176 }
2177 
2178 
2179 static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
2180 					      struct dlb2_ldb_queue *queue)
2181 {
2182 	u32 infl_lim = 0;
2183 
2184 	DLB2_BITS_SET(infl_lim, queue->num_qid_inflights,
2185 		 DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
2186 
2187 	DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
2188 		    infl_lim);
2189 }
2190 
2191 static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
2192 						struct dlb2_ldb_queue *queue)
2193 {
2194 	DLB2_CSR_WR(hw,
2195 		    DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
2196 		    DLB2_LSP_QID_LDB_INFL_LIM_RST);
2197 }
2198 
2199 static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
2200 						struct dlb2_hw_domain *domain,
2201 						struct dlb2_ldb_port *port,
2202 						struct dlb2_ldb_queue *queue)
2203 {
2204 	struct dlb2_list_entry *iter;
2205 	enum dlb2_qid_map_state state;
2206 	int slot, ret, i;
2207 	u32 infl_cnt;
2208 	u8 prio;
2209 	RTE_SET_USED(iter);
2210 
2211 	infl_cnt = DLB2_CSR_RD(hw,
2212 			       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2213 						    queue->id.phys_id));
2214 
2215 	if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2216 		DLB2_HW_ERR(hw,
2217 			    "[%s()] Internal error: non-zero QID inflight count\n",
2218 			    __func__);
2219 		return -EINVAL;
2220 	}
2221 
2222 	/*
2223 	 * Static map the port and set its corresponding has_work bits.
2224 	 */
2225 	state = DLB2_QUEUE_MAP_IN_PROG;
2226 	if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
2227 		return -EINVAL;
2228 
2229 	prio = port->qid_map[slot].priority;
2230 
2231 	/*
2232 	 * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
2233 	 * the port's qid_map state.
2234 	 */
2235 	ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2236 	if (ret)
2237 		return ret;
2238 
2239 	ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
2240 	if (ret)
2241 		return ret;
2242 
2243 	/*
2244 	 * Ensure IF_status(cq,qid) is 0 before enabling the port to
2245 	 * prevent spurious schedules to cause the queue's inflight
2246 	 * count to increase.
2247 	 */
2248 	dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
2249 
2250 	/* Reset the queue's inflight status */
2251 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2252 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2253 			state = DLB2_QUEUE_MAPPED;
2254 			if (!dlb2_port_find_slot_queue(port, state,
2255 						       queue, &slot))
2256 				continue;
2257 
2258 			dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2259 		}
2260 	}
2261 
2262 	dlb2_ldb_queue_set_inflight_limit(hw, queue);
2263 
2264 	/* Re-enable CQs mapped to this queue */
2265 	dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2266 
2267 	/* If this queue has other mappings pending, clear its inflight limit */
2268 	if (queue->num_pending_additions > 0)
2269 		dlb2_ldb_queue_clear_inflight_limit(hw, queue);
2270 
2271 	return 0;
2272 }
2273 
2274 /**
2275  * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
2276  * @hw: dlb2_hw handle for a particular device.
2277  * @port: load-balanced port
2278  * @queue: load-balanced queue
2279  * @priority: queue servicing priority
2280  *
2281  * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
2282  * at a later point, and <0 if an error occurred.
2283  */
2284 static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
2285 					 struct dlb2_ldb_port *port,
2286 					 struct dlb2_ldb_queue *queue,
2287 					 u8 priority)
2288 {
2289 	enum dlb2_qid_map_state state;
2290 	struct dlb2_hw_domain *domain;
2291 	int domain_id, slot, ret;
2292 	u32 infl_cnt;
2293 
2294 	domain_id = port->domain_id.phys_id;
2295 
2296 	domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
2297 	if (domain == NULL) {
2298 		DLB2_HW_ERR(hw,
2299 			    "[%s()] Internal error: unable to find domain %d\n",
2300 			    __func__, port->domain_id.phys_id);
2301 		return -EINVAL;
2302 	}
2303 
2304 	/*
2305 	 * Set the QID inflight limit to 0 to prevent further scheduling of the
2306 	 * queue.
2307 	 */
2308 	DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
2309 						  queue->id.phys_id), 0);
2310 
2311 	if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
2312 		DLB2_HW_ERR(hw,
2313 			    "Internal error: No available unmapped slots\n");
2314 		return -EFAULT;
2315 	}
2316 
2317 	port->qid_map[slot].qid = queue->id.phys_id;
2318 	port->qid_map[slot].priority = priority;
2319 
2320 	state = DLB2_QUEUE_MAP_IN_PROG;
2321 	ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
2322 	if (ret)
2323 		return ret;
2324 
2325 	infl_cnt = DLB2_CSR_RD(hw,
2326 			       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2327 						    queue->id.phys_id));
2328 
2329 	if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2330 		/*
2331 		 * The queue is owed completions so it's not safe to map it
2332 		 * yet. Schedule a kernel thread to complete the mapping later,
2333 		 * once software has completed all the queue's inflight events.
2334 		 */
2335 		if (!os_worker_active(hw))
2336 			os_schedule_work(hw);
2337 
2338 		return 1;
2339 	}
2340 
2341 	/*
2342 	 * Disable the affected CQ, and the CQs already mapped to the QID,
2343 	 * before reading the QID's inflight count a second time. There is an
2344 	 * unlikely race in which the QID may schedule one more QE after we
2345 	 * read an inflight count of 0, and disabling the CQs guarantees that
2346 	 * the race will not occur after a re-read of the inflight count
2347 	 * register.
2348 	 */
2349 	if (port->enabled)
2350 		dlb2_ldb_port_cq_disable(hw, port);
2351 
2352 	dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2353 
2354 	infl_cnt = DLB2_CSR_RD(hw,
2355 			       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
2356 						    queue->id.phys_id));
2357 
2358 	if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2359 		if (port->enabled)
2360 			dlb2_ldb_port_cq_enable(hw, port);
2361 
2362 		dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2363 
2364 		/*
2365 		 * The queue is owed completions so it's not safe to map it
2366 		 * yet. Schedule a kernel thread to complete the mapping later,
2367 		 * once software has completed all the queue's inflight events.
2368 		 */
2369 		if (!os_worker_active(hw))
2370 			os_schedule_work(hw);
2371 
2372 		return 1;
2373 	}
2374 
2375 	return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2376 }
2377 
2378 static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
2379 					struct dlb2_hw_domain *domain,
2380 					struct dlb2_ldb_port *port)
2381 {
2382 	int i;
2383 
2384 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2385 		u32 infl_cnt;
2386 		struct dlb2_ldb_queue *queue;
2387 		int qid;
2388 
2389 		if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
2390 			continue;
2391 
2392 		qid = port->qid_map[i].qid;
2393 
2394 		queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
2395 
2396 		if (queue == NULL) {
2397 			DLB2_HW_ERR(hw,
2398 				    "[%s()] Internal error: unable to find queue %d\n",
2399 				    __func__, qid);
2400 			continue;
2401 		}
2402 
2403 		infl_cnt = DLB2_CSR_RD(hw,
2404 				       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2405 
2406 		if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT))
2407 			continue;
2408 
2409 		/*
2410 		 * Disable the affected CQ, and the CQs already mapped to the
2411 		 * QID, before reading the QID's inflight count a second time.
2412 		 * There is an unlikely race in which the QID may schedule one
2413 		 * more QE after we read an inflight count of 0, and disabling
2414 		 * the CQs guarantees that the race will not occur after a
2415 		 * re-read of the inflight count register.
2416 		 */
2417 		if (port->enabled)
2418 			dlb2_ldb_port_cq_disable(hw, port);
2419 
2420 		dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
2421 
2422 		infl_cnt = DLB2_CSR_RD(hw,
2423 				       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
2424 
2425 		if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
2426 			if (port->enabled)
2427 				dlb2_ldb_port_cq_enable(hw, port);
2428 
2429 			dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
2430 
2431 			continue;
2432 		}
2433 
2434 		dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
2435 	}
2436 }
2437 
2438 static unsigned int
2439 dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
2440 				      struct dlb2_hw_domain *domain)
2441 {
2442 	struct dlb2_list_entry *iter;
2443 	struct dlb2_ldb_port *port;
2444 	int i;
2445 	RTE_SET_USED(iter);
2446 
2447 	if (!domain->configured || domain->num_pending_additions == 0)
2448 		return 0;
2449 
2450 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2451 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2452 			dlb2_domain_finish_map_port(hw, domain, port);
2453 	}
2454 
2455 	return domain->num_pending_additions;
2456 }
2457 
2458 static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
2459 				   struct dlb2_ldb_port *port,
2460 				   struct dlb2_ldb_queue *queue)
2461 {
2462 	enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
2463 	u32 lsp_qid2cq2;
2464 	u32 lsp_qid2cq;
2465 	u32 atm_qid2cq;
2466 	u32 cq2priov;
2467 	u32 queue_id;
2468 	u32 port_id;
2469 	int i;
2470 
2471 	/* Find the queue's slot */
2472 	mapped = DLB2_QUEUE_MAPPED;
2473 	in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
2474 	pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
2475 
2476 	if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
2477 	    !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
2478 	    !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
2479 		DLB2_HW_ERR(hw,
2480 			    "[%s():%d] Internal error: QID %d isn't mapped\n",
2481 			    __func__, __LINE__, queue->id.phys_id);
2482 		return -EFAULT;
2483 	}
2484 
2485 	port_id = port->id.phys_id;
2486 	queue_id = queue->id.phys_id;
2487 
2488 	/* Read-modify-write the priority and valid bit register */
2489 	cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id));
2490 
2491 	cq2priov &= ~(1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC));
2492 
2493 	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id), cq2priov);
2494 
2495 	atm_qid2cq = DLB2_CSR_RD(hw, DLB2_ATM_QID2CQIDIX(queue_id,
2496 							 port_id / 4));
2497 
2498 	lsp_qid2cq = DLB2_CSR_RD(hw,
2499 				 DLB2_LSP_QID2CQIDIX(hw->ver,
2500 						queue_id, port_id / 4));
2501 
2502 	lsp_qid2cq2 = DLB2_CSR_RD(hw,
2503 				  DLB2_LSP_QID2CQIDIX2(hw->ver,
2504 						  queue_id, port_id / 4));
2505 
2506 	switch (port_id % 4) {
2507 	case 0:
2508 		atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
2509 		lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
2510 		lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
2511 		break;
2512 
2513 	case 1:
2514 		atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
2515 		lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
2516 		lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
2517 		break;
2518 
2519 	case 2:
2520 		atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
2521 		lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
2522 		lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
2523 		break;
2524 
2525 	case 3:
2526 		atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
2527 		lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
2528 		lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
2529 		break;
2530 	}
2531 
2532 	DLB2_CSR_WR(hw, DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4), atm_qid2cq);
2533 
2534 	DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, port_id / 4),
2535 		    lsp_qid2cq);
2536 
2537 	DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, port_id / 4),
2538 		    lsp_qid2cq2);
2539 
2540 	dlb2_flush_csr(hw);
2541 
2542 	unmapped = DLB2_QUEUE_UNMAPPED;
2543 
2544 	return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
2545 }
2546 
2547 static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
2548 				 struct dlb2_hw_domain *domain,
2549 				 struct dlb2_ldb_port *port,
2550 				 struct dlb2_ldb_queue *queue,
2551 				 u8 prio)
2552 {
2553 	if (domain->started)
2554 		return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
2555 	else
2556 		return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
2557 }
2558 
2559 static void
2560 dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
2561 				   struct dlb2_hw_domain *domain,
2562 				   struct dlb2_ldb_port *port,
2563 				   int slot)
2564 {
2565 	enum dlb2_qid_map_state state;
2566 	struct dlb2_ldb_queue *queue;
2567 
2568 	queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
2569 
2570 	state = port->qid_map[slot].state;
2571 
2572 	/* Update the QID2CQIDX and CQ2QID vectors */
2573 	dlb2_ldb_port_unmap_qid(hw, port, queue);
2574 
2575 	/*
2576 	 * Ensure the QID will not be serviced by this {CQ, slot} by clearing
2577 	 * the has_work bits
2578 	 */
2579 	dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
2580 
2581 	/* Reset the {CQ, slot} to its default state */
2582 	dlb2_ldb_port_set_queue_if_status(hw, port, slot);
2583 
2584 	/* Re-enable the CQ if it was not manually disabled by the user */
2585 	if (port->enabled)
2586 		dlb2_ldb_port_cq_enable(hw, port);
2587 
2588 	/*
2589 	 * If there is a mapping that is pending this slot's removal, perform
2590 	 * the mapping now.
2591 	 */
2592 	if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
2593 		struct dlb2_ldb_port_qid_map *map;
2594 		struct dlb2_ldb_queue *map_queue;
2595 		u8 prio;
2596 
2597 		map = &port->qid_map[slot];
2598 
2599 		map->qid = map->pending_qid;
2600 		map->priority = map->pending_priority;
2601 
2602 		map_queue = &hw->rsrcs.ldb_queues[map->qid];
2603 		prio = map->priority;
2604 
2605 		dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
2606 	}
2607 }
2608 
2609 
2610 static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
2611 					  struct dlb2_hw_domain *domain,
2612 					  struct dlb2_ldb_port *port)
2613 {
2614 	u32 infl_cnt;
2615 	int i;
2616 	const int max_iters = 1000;
2617 	const int iter_poll_us = 100;
2618 
2619 	if (port->num_pending_removals == 0)
2620 		return false;
2621 
2622 	/*
2623 	 * The unmap requires all the CQ's outstanding inflights to be
2624 	 * completed. Poll up to 100ms.
2625 	 */
2626 	for (i = 0; i < max_iters; i++) {
2627 		infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
2628 						       port->id.phys_id));
2629 
2630 		if (DLB2_BITS_GET(infl_cnt,
2631 				  DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) == 0)
2632 			break;
2633 		rte_delay_us_sleep(iter_poll_us);
2634 	}
2635 
2636 	if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) > 0)
2637 		return false;
2638 
2639 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
2640 		struct dlb2_ldb_port_qid_map *map;
2641 
2642 		map = &port->qid_map[i];
2643 
2644 		if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
2645 		    map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
2646 			continue;
2647 
2648 		dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
2649 	}
2650 
2651 	return true;
2652 }
2653 
2654 static unsigned int
2655 dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
2656 					struct dlb2_hw_domain *domain)
2657 {
2658 	struct dlb2_list_entry *iter;
2659 	struct dlb2_ldb_port *port;
2660 	int i;
2661 	RTE_SET_USED(iter);
2662 
2663 	if (!domain->configured || domain->num_pending_removals == 0)
2664 		return 0;
2665 
2666 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2667 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
2668 			dlb2_domain_finish_unmap_port(hw, domain, port);
2669 	}
2670 
2671 	return domain->num_pending_removals;
2672 }
2673 
2674 static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
2675 					struct dlb2_hw_domain *domain)
2676 {
2677 	struct dlb2_list_entry *iter;
2678 	struct dlb2_ldb_port *port;
2679 	int i;
2680 	RTE_SET_USED(iter);
2681 
2682 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2683 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2684 			port->enabled = false;
2685 
2686 			dlb2_ldb_port_cq_disable(hw, port);
2687 		}
2688 	}
2689 }
2690 
2691 
2692 static void dlb2_log_reset_domain(struct dlb2_hw *hw,
2693 				  u32 domain_id,
2694 				  bool vdev_req,
2695 				  unsigned int vdev_id)
2696 {
2697 	DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
2698 	if (vdev_req)
2699 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
2700 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
2701 }
2702 
2703 static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
2704 					 struct dlb2_hw_domain *domain,
2705 					 unsigned int vdev_id)
2706 {
2707 	struct dlb2_list_entry *iter;
2708 	struct dlb2_dir_pq_pair *port;
2709 	u32 vpp_v = 0;
2710 	RTE_SET_USED(iter);
2711 
2712 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2713 		unsigned int offs;
2714 		u32 virt_id;
2715 
2716 		if (hw->virt_mode == DLB2_VIRT_SRIOV)
2717 			virt_id = port->id.virt_id;
2718 		else
2719 			virt_id = port->id.phys_id;
2720 
2721 		offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
2722 
2723 		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), vpp_v);
2724 	}
2725 }
2726 
2727 static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
2728 					 struct dlb2_hw_domain *domain,
2729 					 unsigned int vdev_id)
2730 {
2731 	struct dlb2_list_entry *iter;
2732 	struct dlb2_ldb_port *port;
2733 	u32 vpp_v = 0;
2734 	int i;
2735 	RTE_SET_USED(iter);
2736 
2737 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2738 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2739 			unsigned int offs;
2740 			u32 virt_id;
2741 
2742 			if (hw->virt_mode == DLB2_VIRT_SRIOV)
2743 				virt_id = port->id.virt_id;
2744 			else
2745 				virt_id = port->id.phys_id;
2746 
2747 			offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
2748 
2749 			DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), vpp_v);
2750 		}
2751 	}
2752 }
2753 
2754 static void
2755 dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
2756 					struct dlb2_hw_domain *domain)
2757 {
2758 	struct dlb2_list_entry *iter;
2759 	struct dlb2_ldb_port *port;
2760 	u32 int_en = 0;
2761 	u32 wd_en = 0;
2762 	int i;
2763 	RTE_SET_USED(iter);
2764 
2765 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2766 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2767 			DLB2_CSR_WR(hw,
2768 				    DLB2_CHP_LDB_CQ_INT_ENB(hw->ver,
2769 						       port->id.phys_id),
2770 				    int_en);
2771 
2772 			DLB2_CSR_WR(hw,
2773 				    DLB2_CHP_LDB_CQ_WD_ENB(hw->ver,
2774 						      port->id.phys_id),
2775 				    wd_en);
2776 		}
2777 	}
2778 }
2779 
2780 static void
2781 dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
2782 					struct dlb2_hw_domain *domain)
2783 {
2784 	struct dlb2_list_entry *iter;
2785 	struct dlb2_dir_pq_pair *port;
2786 	u32 int_en = 0;
2787 	u32 wd_en = 0;
2788 	RTE_SET_USED(iter);
2789 
2790 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2791 		DLB2_CSR_WR(hw,
2792 			    DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
2793 			    int_en);
2794 
2795 		DLB2_CSR_WR(hw,
2796 			    DLB2_CHP_DIR_CQ_WD_ENB(hw->ver, port->id.phys_id),
2797 			    wd_en);
2798 	}
2799 }
2800 
2801 static void
2802 dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
2803 					  struct dlb2_hw_domain *domain)
2804 {
2805 	int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
2806 	struct dlb2_list_entry *iter;
2807 	struct dlb2_ldb_queue *queue;
2808 	RTE_SET_USED(iter);
2809 
2810 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2811 		int idx = domain_offset + queue->id.phys_id;
2812 
2813 		DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), 0);
2814 
2815 		if (queue->id.vdev_owned) {
2816 			DLB2_CSR_WR(hw,
2817 				    DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
2818 				    0);
2819 
2820 			idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
2821 				queue->id.virt_id;
2822 
2823 			DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(idx), 0);
2824 
2825 			DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(idx), 0);
2826 		}
2827 	}
2828 }
2829 
2830 static void
2831 dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
2832 					  struct dlb2_hw_domain *domain)
2833 {
2834 	struct dlb2_list_entry *iter;
2835 	struct dlb2_dir_pq_pair *queue;
2836 	unsigned long max_ports;
2837 	int domain_offset;
2838 	RTE_SET_USED(iter);
2839 
2840 	max_ports = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
2841 
2842 	domain_offset = domain->id.phys_id * max_ports;
2843 
2844 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
2845 		int idx = domain_offset + queue->id.phys_id;
2846 
2847 		DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), 0);
2848 
2849 		if (queue->id.vdev_owned) {
2850 			idx = queue->id.vdev_id * max_ports + queue->id.virt_id;
2851 
2852 			DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(idx), 0);
2853 
2854 			DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(idx), 0);
2855 		}
2856 	}
2857 }
2858 
2859 static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
2860 					       struct dlb2_hw_domain *domain)
2861 {
2862 	struct dlb2_list_entry *iter;
2863 	struct dlb2_ldb_port *port;
2864 	u32 chk_en = 0;
2865 	int i;
2866 	RTE_SET_USED(iter);
2867 
2868 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2869 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2870 			DLB2_CSR_WR(hw,
2871 				    DLB2_CHP_SN_CHK_ENBL(hw->ver,
2872 							 port->id.phys_id),
2873 				    chk_en);
2874 		}
2875 	}
2876 }
2877 
2878 static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
2879 						 struct dlb2_hw_domain *domain)
2880 {
2881 	struct dlb2_list_entry *iter;
2882 	struct dlb2_ldb_port *port;
2883 	int i;
2884 	RTE_SET_USED(iter);
2885 
2886 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2887 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2888 			int j;
2889 
2890 			for (j = 0; j < DLB2_MAX_CQ_COMP_CHECK_LOOPS; j++) {
2891 				if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
2892 					break;
2893 			}
2894 
2895 			if (j == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
2896 				DLB2_HW_ERR(hw,
2897 					    "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
2898 					    __func__, port->id.phys_id);
2899 				return -EFAULT;
2900 			}
2901 		}
2902 	}
2903 
2904 	return 0;
2905 }
2906 
2907 static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
2908 					struct dlb2_hw_domain *domain)
2909 {
2910 	struct dlb2_list_entry *iter;
2911 	struct dlb2_dir_pq_pair *port;
2912 	RTE_SET_USED(iter);
2913 
2914 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2915 		port->enabled = false;
2916 
2917 		dlb2_dir_port_cq_disable(hw, port);
2918 	}
2919 }
2920 
2921 static void
2922 dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
2923 				       struct dlb2_hw_domain *domain)
2924 {
2925 	struct dlb2_list_entry *iter;
2926 	struct dlb2_dir_pq_pair *port;
2927 	u32 pp_v = 0;
2928 	RTE_SET_USED(iter);
2929 
2930 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
2931 		DLB2_CSR_WR(hw,
2932 			    DLB2_SYS_DIR_PP_V(port->id.phys_id),
2933 			    pp_v);
2934 	}
2935 }
2936 
2937 static void
2938 dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
2939 				       struct dlb2_hw_domain *domain)
2940 {
2941 	struct dlb2_list_entry *iter;
2942 	struct dlb2_ldb_port *port;
2943 	u32 pp_v = 0;
2944 	int i;
2945 	RTE_SET_USED(iter);
2946 
2947 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2948 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
2949 			DLB2_CSR_WR(hw,
2950 				    DLB2_SYS_LDB_PP_V(port->id.phys_id),
2951 				    pp_v);
2952 		}
2953 	}
2954 }
2955 
2956 static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
2957 					    struct dlb2_hw_domain *domain)
2958 {
2959 	struct dlb2_list_entry *iter;
2960 	struct dlb2_dir_pq_pair *dir_port;
2961 	struct dlb2_ldb_port *ldb_port;
2962 	struct dlb2_ldb_queue *queue;
2963 	int i;
2964 	RTE_SET_USED(iter);
2965 
2966 	/*
2967 	 * Confirm that all the domain's queue's inflight counts and AQED
2968 	 * active counts are 0.
2969 	 */
2970 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
2971 		if (!dlb2_ldb_queue_is_empty(hw, queue)) {
2972 			DLB2_HW_ERR(hw,
2973 				    "[%s()] Internal error: failed to empty ldb queue %d\n",
2974 				    __func__, queue->id.phys_id);
2975 			return -EFAULT;
2976 		}
2977 	}
2978 
2979 	/* Confirm that all the domain's CQs inflight and token counts are 0. */
2980 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
2981 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
2982 			if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
2983 			    dlb2_ldb_cq_token_count(hw, ldb_port)) {
2984 				DLB2_HW_ERR(hw,
2985 					    "[%s()] Internal error: failed to empty ldb port %d\n",
2986 					    __func__, ldb_port->id.phys_id);
2987 				return -EFAULT;
2988 			}
2989 		}
2990 	}
2991 
2992 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
2993 		if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
2994 			DLB2_HW_ERR(hw,
2995 				    "[%s()] Internal error: failed to empty dir queue %d\n",
2996 				    __func__, dir_port->id.phys_id);
2997 			return -EFAULT;
2998 		}
2999 
3000 		if (dlb2_dir_cq_token_count(hw, dir_port)) {
3001 			DLB2_HW_ERR(hw,
3002 				    "[%s()] Internal error: failed to empty dir port %d\n",
3003 				    __func__, dir_port->id.phys_id);
3004 			return -EFAULT;
3005 		}
3006 	}
3007 
3008 	return 0;
3009 }
3010 
3011 static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
3012 						   struct dlb2_ldb_port *port)
3013 {
3014 	DLB2_CSR_WR(hw,
3015 		    DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
3016 		    DLB2_SYS_LDB_PP2VAS_RST);
3017 
3018 	DLB2_CSR_WR(hw,
3019 		    DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id),
3020 		    DLB2_CHP_LDB_CQ2VAS_RST);
3021 
3022 	DLB2_CSR_WR(hw,
3023 		    DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
3024 		    DLB2_SYS_LDB_PP2VDEV_RST);
3025 
3026 	if (port->id.vdev_owned) {
3027 		unsigned int offs;
3028 		u32 virt_id;
3029 
3030 		/*
3031 		 * DLB uses producer port address bits 17:12 to determine the
3032 		 * producer port ID. In Scalable IOV mode, PP accesses come
3033 		 * through the PF MMIO window for the physical producer port,
3034 		 * so for translation purposes the virtual and physical port
3035 		 * IDs are equal.
3036 		 */
3037 		if (hw->virt_mode == DLB2_VIRT_SRIOV)
3038 			virt_id = port->id.virt_id;
3039 		else
3040 			virt_id = port->id.phys_id;
3041 
3042 		offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
3043 
3044 		DLB2_CSR_WR(hw,
3045 			    DLB2_SYS_VF_LDB_VPP2PP(offs),
3046 			    DLB2_SYS_VF_LDB_VPP2PP_RST);
3047 
3048 		DLB2_CSR_WR(hw,
3049 			    DLB2_SYS_VF_LDB_VPP_V(offs),
3050 			    DLB2_SYS_VF_LDB_VPP_V_RST);
3051 	}
3052 
3053 	DLB2_CSR_WR(hw,
3054 		    DLB2_SYS_LDB_PP_V(port->id.phys_id),
3055 		    DLB2_SYS_LDB_PP_V_RST);
3056 
3057 	DLB2_CSR_WR(hw,
3058 		    DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id),
3059 		    DLB2_LSP_CQ_LDB_DSBL_RST);
3060 
3061 	DLB2_CSR_WR(hw,
3062 		    DLB2_CHP_LDB_CQ_DEPTH(hw->ver, port->id.phys_id),
3063 		    DLB2_CHP_LDB_CQ_DEPTH_RST);
3064 
3065 	if (hw->ver != DLB2_HW_V2)
3066 		DLB2_CSR_WR(hw,
3067 			    DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id),
3068 			    DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_RST);
3069 
3070 	DLB2_CSR_WR(hw,
3071 		    DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
3072 		    DLB2_LSP_CQ_LDB_INFL_LIM_RST);
3073 
3074 	DLB2_CSR_WR(hw,
3075 		    DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id),
3076 		    DLB2_CHP_HIST_LIST_LIM_RST);
3077 
3078 	DLB2_CSR_WR(hw,
3079 		    DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
3080 		    DLB2_CHP_HIST_LIST_BASE_RST);
3081 
3082 	DLB2_CSR_WR(hw,
3083 		    DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
3084 		    DLB2_CHP_HIST_LIST_POP_PTR_RST);
3085 
3086 	DLB2_CSR_WR(hw,
3087 		    DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
3088 		    DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
3089 
3090 	DLB2_CSR_WR(hw,
3091 		    DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
3092 		    DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
3093 
3094 	DLB2_CSR_WR(hw,
3095 		    DLB2_CHP_LDB_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
3096 		    DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
3097 
3098 	DLB2_CSR_WR(hw,
3099 		    DLB2_CHP_LDB_CQ_INT_ENB(hw->ver, port->id.phys_id),
3100 		    DLB2_CHP_LDB_CQ_INT_ENB_RST);
3101 
3102 	DLB2_CSR_WR(hw,
3103 		    DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
3104 		    DLB2_SYS_LDB_CQ_ISR_RST);
3105 
3106 	DLB2_CSR_WR(hw,
3107 		    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
3108 		    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
3109 
3110 	DLB2_CSR_WR(hw,
3111 		    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
3112 		    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
3113 
3114 	DLB2_CSR_WR(hw,
3115 		    DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
3116 		    DLB2_CHP_LDB_CQ_WPTR_RST);
3117 
3118 	DLB2_CSR_WR(hw,
3119 		    DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
3120 		    DLB2_LSP_CQ_LDB_TKN_CNT_RST);
3121 
3122 	DLB2_CSR_WR(hw,
3123 		    DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
3124 		    DLB2_SYS_LDB_CQ_ADDR_L_RST);
3125 
3126 	DLB2_CSR_WR(hw,
3127 		    DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
3128 		    DLB2_SYS_LDB_CQ_ADDR_U_RST);
3129 
3130 	if (hw->ver == DLB2_HW_V2)
3131 		DLB2_CSR_WR(hw,
3132 			    DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
3133 			    DLB2_SYS_LDB_CQ_AT_RST);
3134 
3135 	DLB2_CSR_WR(hw,
3136 		    DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id),
3137 		    DLB2_SYS_LDB_CQ_PASID_RST);
3138 
3139 	DLB2_CSR_WR(hw,
3140 		    DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
3141 		    DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
3142 
3143 	DLB2_CSR_WR(hw,
3144 		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
3145 		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
3146 
3147 	DLB2_CSR_WR(hw,
3148 		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
3149 		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
3150 
3151 	DLB2_CSR_WR(hw,
3152 		    DLB2_LSP_CQ2QID0(hw->ver, port->id.phys_id),
3153 		    DLB2_LSP_CQ2QID0_RST);
3154 
3155 	DLB2_CSR_WR(hw,
3156 		    DLB2_LSP_CQ2QID1(hw->ver, port->id.phys_id),
3157 		    DLB2_LSP_CQ2QID1_RST);
3158 
3159 	DLB2_CSR_WR(hw,
3160 		    DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id),
3161 		    DLB2_LSP_CQ2PRIOV_RST);
3162 }
3163 
3164 static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
3165 						 struct dlb2_hw_domain *domain)
3166 {
3167 	struct dlb2_list_entry *iter;
3168 	struct dlb2_ldb_port *port;
3169 	int i;
3170 	RTE_SET_USED(iter);
3171 
3172 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3173 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
3174 			__dlb2_domain_reset_ldb_port_registers(hw, port);
3175 	}
3176 }
3177 
3178 static void
3179 __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
3180 				       struct dlb2_dir_pq_pair *port)
3181 {
3182 	u32 reg = 0;
3183 
3184 	DLB2_CSR_WR(hw,
3185 		    DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
3186 		    DLB2_CHP_DIR_CQ2VAS_RST);
3187 
3188 	DLB2_CSR_WR(hw,
3189 		    DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id),
3190 		    DLB2_LSP_CQ_DIR_DSBL_RST);
3191 
3192 	DLB2_BIT_SET(reg, DLB2_SYS_WB_DIR_CQ_STATE_CQ_OPT_CLR);
3193 
3194 	if (hw->ver == DLB2_HW_V2)
3195 		DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
3196 	else
3197 		DLB2_CSR_WR(hw,
3198 			    DLB2_SYS_WB_DIR_CQ_STATE(port->id.phys_id), reg);
3199 
3200 	DLB2_CSR_WR(hw,
3201 		    DLB2_CHP_DIR_CQ_DEPTH(hw->ver, port->id.phys_id),
3202 		    DLB2_CHP_DIR_CQ_DEPTH_RST);
3203 
3204 	DLB2_CSR_WR(hw,
3205 		    DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
3206 		    DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
3207 
3208 	DLB2_CSR_WR(hw,
3209 		    DLB2_CHP_DIR_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
3210 		    DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
3211 
3212 	DLB2_CSR_WR(hw,
3213 		    DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
3214 		    DLB2_CHP_DIR_CQ_INT_ENB_RST);
3215 
3216 	DLB2_CSR_WR(hw,
3217 		    DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
3218 		    DLB2_SYS_DIR_CQ_ISR_RST);
3219 
3220 	DLB2_CSR_WR(hw,
3221 		    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
3222 						      port->id.phys_id),
3223 		    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
3224 
3225 	DLB2_CSR_WR(hw,
3226 		    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
3227 		    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
3228 
3229 	DLB2_CSR_WR(hw,
3230 		    DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
3231 		    DLB2_CHP_DIR_CQ_WPTR_RST);
3232 
3233 	DLB2_CSR_WR(hw,
3234 		    DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
3235 		    DLB2_LSP_CQ_DIR_TKN_CNT_RST);
3236 
3237 	DLB2_CSR_WR(hw,
3238 		    DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
3239 		    DLB2_SYS_DIR_CQ_ADDR_L_RST);
3240 
3241 	DLB2_CSR_WR(hw,
3242 		    DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
3243 		    DLB2_SYS_DIR_CQ_ADDR_U_RST);
3244 
3245 	DLB2_CSR_WR(hw,
3246 		    DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
3247 		    DLB2_SYS_DIR_CQ_AT_RST);
3248 
3249 	if (hw->ver == DLB2_HW_V2)
3250 		DLB2_CSR_WR(hw,
3251 			    DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
3252 			    DLB2_SYS_DIR_CQ_AT_RST);
3253 
3254 	DLB2_CSR_WR(hw,
3255 		    DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id),
3256 		    DLB2_SYS_DIR_CQ_PASID_RST);
3257 
3258 	DLB2_CSR_WR(hw,
3259 		    DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
3260 		    DLB2_SYS_DIR_CQ_FMT_RST);
3261 
3262 	DLB2_CSR_WR(hw,
3263 		    DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
3264 		    DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
3265 
3266 	DLB2_CSR_WR(hw,
3267 		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
3268 		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
3269 
3270 	DLB2_CSR_WR(hw,
3271 		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
3272 		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
3273 
3274 	DLB2_CSR_WR(hw,
3275 		    DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
3276 		    DLB2_SYS_DIR_PP2VAS_RST);
3277 
3278 	DLB2_CSR_WR(hw,
3279 		    DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
3280 		    DLB2_CHP_DIR_CQ2VAS_RST);
3281 
3282 	DLB2_CSR_WR(hw,
3283 		    DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
3284 		    DLB2_SYS_DIR_PP2VDEV_RST);
3285 
3286 	if (port->id.vdev_owned) {
3287 		unsigned int offs;
3288 		u32 virt_id;
3289 
3290 		/*
3291 		 * DLB uses producer port address bits 17:12 to determine the
3292 		 * producer port ID. In Scalable IOV mode, PP accesses come
3293 		 * through the PF MMIO window for the physical producer port,
3294 		 * so for translation purposes the virtual and physical port
3295 		 * IDs are equal.
3296 		 */
3297 		if (hw->virt_mode == DLB2_VIRT_SRIOV)
3298 			virt_id = port->id.virt_id;
3299 		else
3300 			virt_id = port->id.phys_id;
3301 
3302 		offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
3303 			virt_id;
3304 
3305 		DLB2_CSR_WR(hw,
3306 			    DLB2_SYS_VF_DIR_VPP2PP(offs),
3307 			    DLB2_SYS_VF_DIR_VPP2PP_RST);
3308 
3309 		DLB2_CSR_WR(hw,
3310 			    DLB2_SYS_VF_DIR_VPP_V(offs),
3311 			    DLB2_SYS_VF_DIR_VPP_V_RST);
3312 	}
3313 
3314 	DLB2_CSR_WR(hw,
3315 		    DLB2_SYS_DIR_PP_V(port->id.phys_id),
3316 		    DLB2_SYS_DIR_PP_V_RST);
3317 }
3318 
3319 static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
3320 						 struct dlb2_hw_domain *domain)
3321 {
3322 	struct dlb2_list_entry *iter;
3323 	struct dlb2_dir_pq_pair *port;
3324 	RTE_SET_USED(iter);
3325 
3326 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
3327 		__dlb2_domain_reset_dir_port_registers(hw, port);
3328 }
3329 
3330 static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
3331 						  struct dlb2_hw_domain *domain)
3332 {
3333 	struct dlb2_list_entry *iter;
3334 	struct dlb2_ldb_queue *queue;
3335 	RTE_SET_USED(iter);
3336 
3337 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3338 		unsigned int queue_id = queue->id.phys_id;
3339 		int i;
3340 
3341 		DLB2_CSR_WR(hw,
3342 			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(hw->ver, queue_id),
3343 			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
3344 
3345 		DLB2_CSR_WR(hw,
3346 			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(hw->ver, queue_id),
3347 			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
3348 
3349 		DLB2_CSR_WR(hw,
3350 			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(hw->ver, queue_id),
3351 			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
3352 
3353 		DLB2_CSR_WR(hw,
3354 			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(hw->ver, queue_id),
3355 			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
3356 
3357 		DLB2_CSR_WR(hw,
3358 			    DLB2_LSP_QID_NALDB_MAX_DEPTH(hw->ver, queue_id),
3359 			    DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
3360 
3361 		DLB2_CSR_WR(hw,
3362 			    DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue_id),
3363 			    DLB2_LSP_QID_LDB_INFL_LIM_RST);
3364 
3365 		DLB2_CSR_WR(hw,
3366 			    DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver, queue_id),
3367 			    DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
3368 
3369 		DLB2_CSR_WR(hw,
3370 			    DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver, queue_id),
3371 			    DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
3372 
3373 		DLB2_CSR_WR(hw,
3374 			    DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue_id),
3375 			    DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
3376 
3377 		DLB2_CSR_WR(hw,
3378 			    DLB2_SYS_LDB_QID_ITS(queue_id),
3379 			    DLB2_SYS_LDB_QID_ITS_RST);
3380 
3381 		DLB2_CSR_WR(hw,
3382 			    DLB2_CHP_ORD_QID_SN(hw->ver, queue_id),
3383 			    DLB2_CHP_ORD_QID_SN_RST);
3384 
3385 		DLB2_CSR_WR(hw,
3386 			    DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue_id),
3387 			    DLB2_CHP_ORD_QID_SN_MAP_RST);
3388 
3389 		DLB2_CSR_WR(hw,
3390 			    DLB2_SYS_LDB_QID_V(queue_id),
3391 			    DLB2_SYS_LDB_QID_V_RST);
3392 
3393 		DLB2_CSR_WR(hw,
3394 			    DLB2_SYS_LDB_QID_CFG_V(queue_id),
3395 			    DLB2_SYS_LDB_QID_CFG_V_RST);
3396 
3397 		if (queue->sn_cfg_valid) {
3398 			u32 offs[2];
3399 
3400 			offs[0] = DLB2_RO_GRP_0_SLT_SHFT(hw->ver,
3401 							 queue->sn_slot);
3402 			offs[1] = DLB2_RO_GRP_1_SLT_SHFT(hw->ver,
3403 							 queue->sn_slot);
3404 
3405 			DLB2_CSR_WR(hw,
3406 				    offs[queue->sn_group],
3407 				    DLB2_RO_GRP_0_SLT_SHFT_RST);
3408 		}
3409 
3410 		for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
3411 			DLB2_CSR_WR(hw,
3412 				    DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, i),
3413 				    DLB2_LSP_QID2CQIDIX_00_RST);
3414 
3415 			DLB2_CSR_WR(hw,
3416 				    DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, i),
3417 				    DLB2_LSP_QID2CQIDIX2_00_RST);
3418 
3419 			DLB2_CSR_WR(hw,
3420 				    DLB2_ATM_QID2CQIDIX(queue_id, i),
3421 				    DLB2_ATM_QID2CQIDIX_00_RST);
3422 		}
3423 	}
3424 }
3425 
3426 static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
3427 						  struct dlb2_hw_domain *domain)
3428 {
3429 	struct dlb2_list_entry *iter;
3430 	struct dlb2_dir_pq_pair *queue;
3431 	RTE_SET_USED(iter);
3432 
3433 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
3434 		DLB2_CSR_WR(hw,
3435 			    DLB2_LSP_QID_DIR_MAX_DEPTH(hw->ver,
3436 						       queue->id.phys_id),
3437 			    DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
3438 
3439 		DLB2_CSR_WR(hw,
3440 			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(hw->ver,
3441 							  queue->id.phys_id),
3442 			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
3443 
3444 		DLB2_CSR_WR(hw,
3445 			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(hw->ver,
3446 							  queue->id.phys_id),
3447 			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
3448 
3449 		DLB2_CSR_WR(hw,
3450 			    DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver,
3451 							 queue->id.phys_id),
3452 			    DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
3453 
3454 		DLB2_CSR_WR(hw,
3455 			    DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
3456 			    DLB2_SYS_DIR_QID_ITS_RST);
3457 
3458 		DLB2_CSR_WR(hw,
3459 			    DLB2_SYS_DIR_QID_V(queue->id.phys_id),
3460 			    DLB2_SYS_DIR_QID_V_RST);
3461 	}
3462 }
3463 
3464 
3465 
3466 
3467 
3468 static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
3469 					struct dlb2_hw_domain *domain)
3470 {
3471 	dlb2_domain_reset_ldb_port_registers(hw, domain);
3472 
3473 	dlb2_domain_reset_dir_port_registers(hw, domain);
3474 
3475 	dlb2_domain_reset_ldb_queue_registers(hw, domain);
3476 
3477 	dlb2_domain_reset_dir_queue_registers(hw, domain);
3478 
3479 	if (hw->ver == DLB2_HW_V2) {
3480 		DLB2_CSR_WR(hw,
3481 			    DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
3482 			    DLB2_CHP_CFG_LDB_VAS_CRD_RST);
3483 
3484 		DLB2_CSR_WR(hw,
3485 			    DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
3486 			    DLB2_CHP_CFG_DIR_VAS_CRD_RST);
3487 	} else
3488 		DLB2_CSR_WR(hw,
3489 			    DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id),
3490 			    DLB2_CHP_CFG_VAS_CRD_RST);
3491 }
3492 
3493 static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
3494 					    struct dlb2_hw_domain *domain)
3495 {
3496 	struct dlb2_dir_pq_pair *tmp_dir_port;
3497 	struct dlb2_ldb_queue *tmp_ldb_queue;
3498 	struct dlb2_ldb_port *tmp_ldb_port;
3499 	struct dlb2_list_entry *iter1;
3500 	struct dlb2_list_entry *iter2;
3501 	struct dlb2_function_resources *rsrcs;
3502 	struct dlb2_dir_pq_pair *dir_port;
3503 	struct dlb2_ldb_queue *ldb_queue;
3504 	struct dlb2_ldb_port *ldb_port;
3505 	struct dlb2_list_head *list;
3506 	int ret, i;
3507 	RTE_SET_USED(tmp_dir_port);
3508 	RTE_SET_USED(tmp_ldb_queue);
3509 	RTE_SET_USED(tmp_ldb_port);
3510 	RTE_SET_USED(iter1);
3511 	RTE_SET_USED(iter2);
3512 
3513 	rsrcs = domain->parent_func;
3514 
3515 	/* Move the domain's ldb queues to the function's avail list */
3516 	list = &domain->used_ldb_queues;
3517 	DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3518 		if (ldb_queue->sn_cfg_valid) {
3519 			struct dlb2_sn_group *grp;
3520 
3521 			grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
3522 
3523 			dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
3524 			ldb_queue->sn_cfg_valid = false;
3525 		}
3526 
3527 		ldb_queue->owned = false;
3528 		ldb_queue->num_mappings = 0;
3529 		ldb_queue->num_pending_additions = 0;
3530 
3531 		dlb2_list_del(&domain->used_ldb_queues,
3532 			      &ldb_queue->domain_list);
3533 		dlb2_list_add(&rsrcs->avail_ldb_queues,
3534 			      &ldb_queue->func_list);
3535 		rsrcs->num_avail_ldb_queues++;
3536 	}
3537 
3538 	list = &domain->avail_ldb_queues;
3539 	DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
3540 		ldb_queue->owned = false;
3541 
3542 		dlb2_list_del(&domain->avail_ldb_queues,
3543 			      &ldb_queue->domain_list);
3544 		dlb2_list_add(&rsrcs->avail_ldb_queues,
3545 			      &ldb_queue->func_list);
3546 		rsrcs->num_avail_ldb_queues++;
3547 	}
3548 
3549 	/* Move the domain's ldb ports to the function's avail list */
3550 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3551 		list = &domain->used_ldb_ports[i];
3552 		DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3553 				       iter1, iter2) {
3554 			int j;
3555 
3556 			ldb_port->owned = false;
3557 			ldb_port->configured = false;
3558 			ldb_port->num_pending_removals = 0;
3559 			ldb_port->num_mappings = 0;
3560 			ldb_port->init_tkn_cnt = 0;
3561 			ldb_port->cq_depth = 0;
3562 			for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
3563 				ldb_port->qid_map[j].state =
3564 					DLB2_QUEUE_UNMAPPED;
3565 
3566 			dlb2_list_del(&domain->used_ldb_ports[i],
3567 				      &ldb_port->domain_list);
3568 			dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3569 				      &ldb_port->func_list);
3570 			rsrcs->num_avail_ldb_ports[i]++;
3571 		}
3572 
3573 		list = &domain->avail_ldb_ports[i];
3574 		DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
3575 				       iter1, iter2) {
3576 			ldb_port->owned = false;
3577 
3578 			dlb2_list_del(&domain->avail_ldb_ports[i],
3579 				      &ldb_port->domain_list);
3580 			dlb2_list_add(&rsrcs->avail_ldb_ports[i],
3581 				      &ldb_port->func_list);
3582 			rsrcs->num_avail_ldb_ports[i]++;
3583 		}
3584 	}
3585 
3586 	/* Move the domain's dir ports to the function's avail list */
3587 	list = &domain->used_dir_pq_pairs;
3588 	DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3589 		dir_port->owned = false;
3590 		dir_port->port_configured = false;
3591 		dir_port->init_tkn_cnt = 0;
3592 
3593 		dlb2_list_del(&domain->used_dir_pq_pairs,
3594 			      &dir_port->domain_list);
3595 
3596 		dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3597 			      &dir_port->func_list);
3598 		rsrcs->num_avail_dir_pq_pairs++;
3599 	}
3600 
3601 	list = &domain->avail_dir_pq_pairs;
3602 	DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
3603 		dir_port->owned = false;
3604 
3605 		dlb2_list_del(&domain->avail_dir_pq_pairs,
3606 			      &dir_port->domain_list);
3607 
3608 		dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
3609 			      &dir_port->func_list);
3610 		rsrcs->num_avail_dir_pq_pairs++;
3611 	}
3612 
3613 	/* Return hist list entries to the function */
3614 	ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
3615 				    domain->hist_list_entry_base,
3616 				    domain->total_hist_list_entries);
3617 	if (ret) {
3618 		DLB2_HW_ERR(hw,
3619 			    "[%s()] Internal error: domain hist list base does not match the function's bitmap.\n",
3620 			    __func__);
3621 		return ret;
3622 	}
3623 
3624 	domain->total_hist_list_entries = 0;
3625 	domain->avail_hist_list_entries = 0;
3626 	domain->hist_list_entry_base = 0;
3627 	domain->hist_list_entry_offset = 0;
3628 
3629 	if (hw->ver == DLB2_HW_V2_5) {
3630 		rsrcs->num_avail_entries += domain->num_credits;
3631 		domain->num_credits = 0;
3632 	} else {
3633 		rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
3634 		domain->num_ldb_credits = 0;
3635 
3636 		rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
3637 		domain->num_dir_credits = 0;
3638 	}
3639 	rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
3640 	rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
3641 	domain->num_avail_aqed_entries = 0;
3642 	domain->num_used_aqed_entries = 0;
3643 
3644 	domain->num_pending_removals = 0;
3645 	domain->num_pending_additions = 0;
3646 	domain->configured = false;
3647 	domain->started = false;
3648 
3649 	/*
3650 	 * Move the domain out of the used_domains list and back to the
3651 	 * function's avail_domains list.
3652 	 */
3653 	dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
3654 	dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
3655 	rsrcs->num_avail_domains++;
3656 
3657 	return 0;
3658 }
3659 
3660 static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
3661 					    struct dlb2_hw_domain *domain,
3662 					    struct dlb2_ldb_queue *queue)
3663 {
3664 	struct dlb2_ldb_port *port = NULL;
3665 	int ret, i;
3666 
3667 	/* If a domain has LDB queues, it must have LDB ports */
3668 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
3669 		port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i],
3670 					  typeof(*port));
3671 		if (port)
3672 			break;
3673 	}
3674 
3675 	if (port == NULL) {
3676 		DLB2_HW_ERR(hw,
3677 			    "[%s()] Internal error: No configured LDB ports\n",
3678 			    __func__);
3679 		return -EFAULT;
3680 	}
3681 
3682 	/* If necessary, free up a QID slot in this CQ */
3683 	if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
3684 		struct dlb2_ldb_queue *mapped_queue;
3685 
3686 		mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
3687 
3688 		ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
3689 		if (ret)
3690 			return ret;
3691 	}
3692 
3693 	ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
3694 	if (ret)
3695 		return ret;
3696 
3697 	return dlb2_domain_drain_mapped_queues(hw, domain);
3698 }
3699 
3700 static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
3701 					     struct dlb2_hw_domain *domain)
3702 {
3703 	struct dlb2_list_entry *iter;
3704 	struct dlb2_ldb_queue *queue;
3705 	int ret;
3706 	RTE_SET_USED(iter);
3707 
3708 	/* If the domain hasn't been started, there's no traffic to drain */
3709 	if (!domain->started)
3710 		return 0;
3711 
3712 	/*
3713 	 * Pre-condition: the unattached queue must not have any outstanding
3714 	 * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
3715 	 * prior to this in dlb2_domain_drain_mapped_queues().
3716 	 */
3717 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
3718 		if (queue->num_mappings != 0 ||
3719 		    dlb2_ldb_queue_is_empty(hw, queue))
3720 			continue;
3721 
3722 		ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
3723 		if (ret)
3724 			return ret;
3725 	}
3726 
3727 	return 0;
3728 }
3729 
3730 /**
3731  * dlb2_reset_domain() - reset a scheduling domain
3732  * @hw: dlb2_hw handle for a particular device.
3733  * @domain_id: domain ID.
3734  * @vdev_req: indicates whether this request came from a vdev.
3735  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
3736  *
3737  * This function resets and frees a DLB 2.0 scheduling domain and its associated
3738  * resources.
3739  *
3740  * Pre-condition: the driver must ensure software has stopped sending QEs
3741  * through this domain's producer ports before invoking this function, or
3742  * undefined behavior will result.
3743  *
3744  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
3745  * device.
3746  *
3747  * Return:
3748  * Returns 0 upon success, -1 otherwise.
3749  *
3750  * EINVAL - Invalid domain ID, or the domain is not configured.
3751  * EFAULT - Internal error. (Possibly caused if software is the pre-condition
3752  *	    is not met.)
3753  * ETIMEDOUT - Hardware component didn't reset in the expected time.
3754  */
3755 int dlb2_reset_domain(struct dlb2_hw *hw,
3756 		      u32 domain_id,
3757 		      bool vdev_req,
3758 		      unsigned int vdev_id)
3759 {
3760 	struct dlb2_hw_domain *domain;
3761 	int ret;
3762 
3763 	dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
3764 
3765 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3766 
3767 	if (domain == NULL || !domain->configured)
3768 		return -EINVAL;
3769 
3770 	/* Disable VPPs */
3771 	if (vdev_req) {
3772 		dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
3773 
3774 		dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
3775 	}
3776 
3777 	/* Disable CQ interrupts */
3778 	dlb2_domain_disable_dir_port_interrupts(hw, domain);
3779 
3780 	dlb2_domain_disable_ldb_port_interrupts(hw, domain);
3781 
3782 	/*
3783 	 * For each queue owned by this domain, disable its write permissions to
3784 	 * cause any traffic sent to it to be dropped. Well-behaved software
3785 	 * should not be sending QEs at this point.
3786 	 */
3787 	dlb2_domain_disable_dir_queue_write_perms(hw, domain);
3788 
3789 	dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
3790 
3791 	/* Turn off completion tracking on all the domain's PPs. */
3792 	dlb2_domain_disable_ldb_seq_checks(hw, domain);
3793 
3794 	/*
3795 	 * Disable the LDB CQs and drain them in order to complete the map and
3796 	 * unmap procedures, which require zero CQ inflights and zero QID
3797 	 * inflights respectively.
3798 	 */
3799 	dlb2_domain_disable_ldb_cqs(hw, domain);
3800 
3801 	dlb2_domain_drain_ldb_cqs(hw, domain, false);
3802 
3803 	ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
3804 	if (ret)
3805 		return ret;
3806 
3807 	ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
3808 	if (ret)
3809 		return ret;
3810 
3811 	ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
3812 	if (ret)
3813 		return ret;
3814 
3815 	/* Re-enable the CQs in order to drain the mapped queues. */
3816 	dlb2_domain_enable_ldb_cqs(hw, domain);
3817 
3818 	ret = dlb2_domain_drain_mapped_queues(hw, domain);
3819 	if (ret)
3820 		return ret;
3821 
3822 	ret = dlb2_domain_drain_unmapped_queues(hw, domain);
3823 	if (ret)
3824 		return ret;
3825 
3826 	/* Done draining LDB QEs, so disable the CQs. */
3827 	dlb2_domain_disable_ldb_cqs(hw, domain);
3828 
3829 	dlb2_domain_drain_dir_queues(hw, domain);
3830 
3831 	/* Done draining DIR QEs, so disable the CQs. */
3832 	dlb2_domain_disable_dir_cqs(hw, domain);
3833 
3834 	/* Disable PPs */
3835 	dlb2_domain_disable_dir_producer_ports(hw, domain);
3836 
3837 	dlb2_domain_disable_ldb_producer_ports(hw, domain);
3838 
3839 	ret = dlb2_domain_verify_reset_success(hw, domain);
3840 	if (ret)
3841 		return ret;
3842 
3843 	/* Reset the QID and port state. */
3844 	dlb2_domain_reset_registers(hw, domain);
3845 
3846 	/* Hardware reset complete. Reset the domain's software state */
3847 	return dlb2_domain_reset_software_state(hw, domain);
3848 }
3849 
3850 static void
3851 dlb2_log_create_ldb_queue_args(struct dlb2_hw *hw,
3852 			       u32 domain_id,
3853 			       struct dlb2_create_ldb_queue_args *args,
3854 			       bool vdev_req,
3855 			       unsigned int vdev_id)
3856 {
3857 	DLB2_HW_DBG(hw, "DLB2 create load-balanced queue arguments:\n");
3858 	if (vdev_req)
3859 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
3860 	DLB2_HW_DBG(hw, "\tDomain ID:                  %d\n",
3861 		    domain_id);
3862 	DLB2_HW_DBG(hw, "\tNumber of sequence numbers: %d\n",
3863 		    args->num_sequence_numbers);
3864 	DLB2_HW_DBG(hw, "\tNumber of QID inflights:    %d\n",
3865 		    args->num_qid_inflights);
3866 	DLB2_HW_DBG(hw, "\tNumber of ATM inflights:    %d\n",
3867 		    args->num_atomic_inflights);
3868 }
3869 
3870 static int
3871 dlb2_ldb_queue_attach_to_sn_group(struct dlb2_hw *hw,
3872 				  struct dlb2_ldb_queue *queue,
3873 				  struct dlb2_create_ldb_queue_args *args)
3874 {
3875 	int slot = -1;
3876 	int i;
3877 
3878 	queue->sn_cfg_valid = false;
3879 
3880 	if (args->num_sequence_numbers == 0)
3881 		return 0;
3882 
3883 	for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3884 		struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3885 
3886 		if (group->sequence_numbers_per_queue ==
3887 		    args->num_sequence_numbers &&
3888 		    !dlb2_sn_group_full(group)) {
3889 			slot = dlb2_sn_group_alloc_slot(group);
3890 			if (slot >= 0)
3891 				break;
3892 		}
3893 	}
3894 
3895 	if (slot == -1) {
3896 		DLB2_HW_ERR(hw,
3897 			    "[%s():%d] Internal error: no sequence number slots available\n",
3898 			    __func__, __LINE__);
3899 		return -EFAULT;
3900 	}
3901 
3902 	queue->sn_cfg_valid = true;
3903 	queue->sn_group = i;
3904 	queue->sn_slot = slot;
3905 	return 0;
3906 }
3907 
3908 static int
3909 dlb2_verify_create_ldb_queue_args(struct dlb2_hw *hw,
3910 				  u32 domain_id,
3911 				  struct dlb2_create_ldb_queue_args *args,
3912 				  struct dlb2_cmd_response *resp,
3913 				  bool vdev_req,
3914 				  unsigned int vdev_id,
3915 				  struct dlb2_hw_domain **out_domain,
3916 				  struct dlb2_ldb_queue **out_queue)
3917 {
3918 	struct dlb2_hw_domain *domain;
3919 	struct dlb2_ldb_queue *queue;
3920 	int i;
3921 
3922 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
3923 
3924 	if (!domain) {
3925 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
3926 		return -EINVAL;
3927 	}
3928 
3929 	if (!domain->configured) {
3930 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
3931 		return -EINVAL;
3932 	}
3933 
3934 	if (domain->started) {
3935 		resp->status = DLB2_ST_DOMAIN_STARTED;
3936 		return -EINVAL;
3937 	}
3938 
3939 	queue = DLB2_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
3940 	if (!queue) {
3941 		resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
3942 		return -EINVAL;
3943 	}
3944 
3945 	if (args->num_sequence_numbers) {
3946 		for (i = 0; i < DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
3947 			struct dlb2_sn_group *group = &hw->rsrcs.sn_groups[i];
3948 
3949 			if (group->sequence_numbers_per_queue ==
3950 			    args->num_sequence_numbers &&
3951 			    !dlb2_sn_group_full(group))
3952 				break;
3953 		}
3954 
3955 		if (i == DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
3956 			resp->status = DLB2_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
3957 			return -EINVAL;
3958 		}
3959 	}
3960 
3961 	if (args->num_qid_inflights < 1 || args->num_qid_inflights > 2048) {
3962 		resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3963 		return -EINVAL;
3964 	}
3965 
3966 	/* Inflights must be <= number of sequence numbers if ordered */
3967 	if (args->num_sequence_numbers != 0 &&
3968 	    args->num_qid_inflights > args->num_sequence_numbers) {
3969 		resp->status = DLB2_ST_INVALID_QID_INFLIGHT_ALLOCATION;
3970 		return -EINVAL;
3971 	}
3972 
3973 	if (domain->num_avail_aqed_entries < args->num_atomic_inflights) {
3974 		resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
3975 		return -EINVAL;
3976 	}
3977 
3978 	if (args->num_atomic_inflights &&
3979 	    args->lock_id_comp_level != 0 &&
3980 	    args->lock_id_comp_level != 64 &&
3981 	    args->lock_id_comp_level != 128 &&
3982 	    args->lock_id_comp_level != 256 &&
3983 	    args->lock_id_comp_level != 512 &&
3984 	    args->lock_id_comp_level != 1024 &&
3985 	    args->lock_id_comp_level != 2048 &&
3986 	    args->lock_id_comp_level != 4096 &&
3987 	    args->lock_id_comp_level != 65536) {
3988 		resp->status = DLB2_ST_INVALID_LOCK_ID_COMP_LEVEL;
3989 		return -EINVAL;
3990 	}
3991 
3992 	*out_domain = domain;
3993 	*out_queue = queue;
3994 
3995 	return 0;
3996 }
3997 
3998 static int
3999 dlb2_ldb_queue_attach_resources(struct dlb2_hw *hw,
4000 				struct dlb2_hw_domain *domain,
4001 				struct dlb2_ldb_queue *queue,
4002 				struct dlb2_create_ldb_queue_args *args)
4003 {
4004 	int ret;
4005 	ret = dlb2_ldb_queue_attach_to_sn_group(hw, queue, args);
4006 	if (ret)
4007 		return ret;
4008 
4009 	/* Attach QID inflights */
4010 	queue->num_qid_inflights = args->num_qid_inflights;
4011 
4012 	/* Attach atomic inflights */
4013 	queue->aqed_limit = args->num_atomic_inflights;
4014 
4015 	domain->num_avail_aqed_entries -= args->num_atomic_inflights;
4016 	domain->num_used_aqed_entries += args->num_atomic_inflights;
4017 
4018 	return 0;
4019 }
4020 
4021 static void dlb2_configure_ldb_queue(struct dlb2_hw *hw,
4022 				     struct dlb2_hw_domain *domain,
4023 				     struct dlb2_ldb_queue *queue,
4024 				     struct dlb2_create_ldb_queue_args *args,
4025 				     bool vdev_req,
4026 				     unsigned int vdev_id)
4027 {
4028 	struct dlb2_sn_group *sn_group;
4029 	unsigned int offs;
4030 	u32 reg = 0;
4031 	u32 alimit;
4032 
4033 	/* QID write permissions are turned on when the domain is started */
4034 	offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.phys_id;
4035 
4036 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), reg);
4037 
4038 	/*
4039 	 * Unordered QIDs get 4K inflights, ordered get as many as the number
4040 	 * of sequence numbers.
4041 	 */
4042 	DLB2_BITS_SET(reg, args->num_qid_inflights,
4043 		      DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
4044 	DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
4045 						  queue->id.phys_id), reg);
4046 
4047 	alimit = queue->aqed_limit;
4048 
4049 	if (alimit > DLB2_MAX_NUM_AQED_ENTRIES)
4050 		alimit = DLB2_MAX_NUM_AQED_ENTRIES;
4051 
4052 	reg = 0;
4053 	DLB2_BITS_SET(reg, alimit, DLB2_LSP_QID_AQED_ACTIVE_LIM_LIMIT);
4054 	DLB2_CSR_WR(hw,
4055 		    DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver,
4056 						 queue->id.phys_id), reg);
4057 
4058 	reg = 0;
4059 	switch (args->lock_id_comp_level) {
4060 	case 64:
4061 		DLB2_BITS_SET(reg, 1, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
4062 		break;
4063 	case 128:
4064 		DLB2_BITS_SET(reg, 2, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
4065 		break;
4066 	case 256:
4067 		DLB2_BITS_SET(reg, 3, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
4068 		break;
4069 	case 512:
4070 		DLB2_BITS_SET(reg, 4, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
4071 		break;
4072 	case 1024:
4073 		DLB2_BITS_SET(reg, 5, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
4074 		break;
4075 	case 2048:
4076 		DLB2_BITS_SET(reg, 6, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
4077 		break;
4078 	case 4096:
4079 		DLB2_BITS_SET(reg, 7, DLB2_AQED_QID_HID_WIDTH_COMPRESS_CODE);
4080 		break;
4081 	default:
4082 		/* No compression by default */
4083 		break;
4084 	}
4085 
4086 	DLB2_CSR_WR(hw, DLB2_AQED_QID_HID_WIDTH(queue->id.phys_id), reg);
4087 
4088 	reg = 0;
4089 	/* Don't timestamp QEs that pass through this queue */
4090 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_ITS(queue->id.phys_id), reg);
4091 
4092 	DLB2_BITS_SET(reg, args->depth_threshold,
4093 		      DLB2_LSP_QID_ATM_DEPTH_THRSH_THRESH);
4094 	DLB2_CSR_WR(hw,
4095 		    DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver,
4096 						 queue->id.phys_id), reg);
4097 
4098 	reg = 0;
4099 	DLB2_BITS_SET(reg, args->depth_threshold,
4100 		      DLB2_LSP_QID_NALDB_DEPTH_THRSH_THRESH);
4101 	DLB2_CSR_WR(hw,
4102 		    DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue->id.phys_id),
4103 		    reg);
4104 
4105 	/*
4106 	 * This register limits the number of inflight flows a queue can have
4107 	 * at one time.  It has an upper bound of 2048, but can be
4108 	 * over-subscribed. 512 is chosen so that a single queue does not use
4109 	 * the entire atomic storage, but can use a substantial portion if
4110 	 * needed.
4111 	 */
4112 	reg = 0;
4113 	DLB2_BITS_SET(reg, 512, DLB2_AQED_QID_FID_LIM_QID_FID_LIMIT);
4114 	DLB2_CSR_WR(hw, DLB2_AQED_QID_FID_LIM(queue->id.phys_id), reg);
4115 
4116 	/* Configure SNs */
4117 	reg = 0;
4118 	sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
4119 	DLB2_BITS_SET(reg, sn_group->mode, DLB2_CHP_ORD_QID_SN_MAP_MODE);
4120 	DLB2_BITS_SET(reg, queue->sn_slot, DLB2_CHP_ORD_QID_SN_MAP_SLOT);
4121 	DLB2_BITS_SET(reg, sn_group->id, DLB2_CHP_ORD_QID_SN_MAP_GRP);
4122 
4123 	DLB2_CSR_WR(hw,
4124 		    DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue->id.phys_id), reg);
4125 
4126 	reg = 0;
4127 	DLB2_BITS_SET(reg, (args->num_sequence_numbers != 0),
4128 		 DLB2_SYS_LDB_QID_CFG_V_SN_CFG_V);
4129 	DLB2_BITS_SET(reg, (args->num_atomic_inflights != 0),
4130 		 DLB2_SYS_LDB_QID_CFG_V_FID_CFG_V);
4131 
4132 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_CFG_V(queue->id.phys_id), reg);
4133 
4134 	if (vdev_req) {
4135 		offs = vdev_id * DLB2_MAX_NUM_LDB_QUEUES + queue->id.virt_id;
4136 
4137 		reg = 0;
4138 		DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VQID_V_VQID_V);
4139 		DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(offs), reg);
4140 
4141 		reg = 0;
4142 		DLB2_BITS_SET(reg, queue->id.phys_id,
4143 			      DLB2_SYS_VF_LDB_VQID2QID_QID);
4144 		DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(offs), reg);
4145 
4146 		reg = 0;
4147 		DLB2_BITS_SET(reg, queue->id.virt_id,
4148 			      DLB2_SYS_LDB_QID2VQID_VQID);
4149 		DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID2VQID(queue->id.phys_id), reg);
4150 	}
4151 
4152 	reg = 0;
4153 	DLB2_BIT_SET(reg, DLB2_SYS_LDB_QID_V_QID_V);
4154 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_QID_V(queue->id.phys_id), reg);
4155 }
4156 
4157 /**
4158  * dlb2_hw_create_ldb_queue() - create a load-balanced queue
4159  * @hw: dlb2_hw handle for a particular device.
4160  * @domain_id: domain ID.
4161  * @args: queue creation arguments.
4162  * @resp: response structure.
4163  * @vdev_req: indicates whether this request came from a vdev.
4164  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4165  *
4166  * This function creates a load-balanced queue.
4167  *
4168  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4169  * device.
4170  *
4171  * Return:
4172  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4173  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4174  * contains the queue ID.
4175  *
4176  * resp->id contains a virtual ID if vdev_req is true.
4177  *
4178  * Errors:
4179  * EINVAL - A requested resource is unavailable, the domain is not configured,
4180  *	    the domain has already been started, or the requested queue name is
4181  *	    already in use.
4182  * EFAULT - Internal error (resp->status not set).
4183  */
4184 int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,
4185 			     u32 domain_id,
4186 			     struct dlb2_create_ldb_queue_args *args,
4187 			     struct dlb2_cmd_response *resp,
4188 			     bool vdev_req,
4189 			     unsigned int vdev_id)
4190 {
4191 	struct dlb2_hw_domain *domain;
4192 	struct dlb2_ldb_queue *queue;
4193 	int ret;
4194 
4195 	dlb2_log_create_ldb_queue_args(hw, domain_id, args, vdev_req, vdev_id);
4196 
4197 	/*
4198 	 * Verify that hardware resources are available before attempting to
4199 	 * satisfy the request. This simplifies the error unwinding code.
4200 	 */
4201 	ret = dlb2_verify_create_ldb_queue_args(hw,
4202 						domain_id,
4203 						args,
4204 						resp,
4205 						vdev_req,
4206 						vdev_id,
4207 						&domain,
4208 						&queue);
4209 	if (ret)
4210 		return ret;
4211 
4212 	ret = dlb2_ldb_queue_attach_resources(hw, domain, queue, args);
4213 
4214 	if (ret) {
4215 		DLB2_HW_ERR(hw,
4216 			    "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
4217 			    __func__, __LINE__);
4218 		return ret;
4219 	}
4220 
4221 	dlb2_configure_ldb_queue(hw, domain, queue, args, vdev_req, vdev_id);
4222 
4223 	queue->num_mappings = 0;
4224 
4225 	queue->configured = true;
4226 
4227 	/*
4228 	 * Configuration succeeded, so move the resource from the 'avail' to
4229 	 * the 'used' list.
4230 	 */
4231 	dlb2_list_del(&domain->avail_ldb_queues, &queue->domain_list);
4232 
4233 	dlb2_list_add(&domain->used_ldb_queues, &queue->domain_list);
4234 
4235 	resp->status = 0;
4236 	resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
4237 
4238 	return 0;
4239 }
4240 
4241 static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
4242 				       struct dlb2_hw_domain *domain,
4243 				       struct dlb2_ldb_port *port,
4244 				       bool vdev_req,
4245 				       unsigned int vdev_id)
4246 {
4247 	u32 reg = 0;
4248 
4249 	DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_LDB_PP2VAS_VAS);
4250 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), reg);
4251 
4252 	if (vdev_req) {
4253 		unsigned int offs;
4254 		u32 virt_id;
4255 
4256 		/*
4257 		 * DLB uses producer port address bits 17:12 to determine the
4258 		 * producer port ID. In Scalable IOV mode, PP accesses come
4259 		 * through the PF MMIO window for the physical producer port,
4260 		 * so for translation purposes the virtual and physical port
4261 		 * IDs are equal.
4262 		 */
4263 		if (hw->virt_mode == DLB2_VIRT_SRIOV)
4264 			virt_id = port->id.virt_id;
4265 		else
4266 			virt_id = port->id.phys_id;
4267 
4268 		reg = 0;
4269 		DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_LDB_VPP2PP_PP);
4270 		offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
4271 		DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), reg);
4272 
4273 		reg = 0;
4274 		DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_PP2VDEV_VDEV);
4275 		DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VDEV(port->id.phys_id), reg);
4276 
4277 		reg = 0;
4278 		DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VPP_V_VPP_V);
4279 		DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), reg);
4280 	}
4281 
4282 	reg = 0;
4283 	DLB2_BIT_SET(reg, DLB2_SYS_LDB_PP_V_PP_V);
4284 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP_V(port->id.phys_id), reg);
4285 }
4286 
4287 static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
4288 				      struct dlb2_hw_domain *domain,
4289 				      struct dlb2_ldb_port *port,
4290 				      uintptr_t cq_dma_base,
4291 				      struct dlb2_create_ldb_port_args *args,
4292 				      bool vdev_req,
4293 				      unsigned int vdev_id)
4294 {
4295 	u32 hl_base = 0;
4296 	u32 reg = 0;
4297 	u32 ds = 0;
4298 
4299 	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4300 	DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_LDB_CQ_ADDR_L_ADDR_L);
4301 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), reg);
4302 
4303 	reg = cq_dma_base >> 32;
4304 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), reg);
4305 
4306 	/*
4307 	 * 'ro' == relaxed ordering. This setting allows DLB2 to write
4308 	 * cache lines out-of-order (but QEs within a cache line are always
4309 	 * updated in-order).
4310 	 */
4311 	reg = 0;
4312 	DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_CQ2VF_PF_RO_VF);
4313 	DLB2_BITS_SET(reg,
4314 		 !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4315 		 DLB2_SYS_LDB_CQ2VF_PF_RO_IS_PF);
4316 	DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ2VF_PF_RO_RO);
4317 
4318 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), reg);
4319 
4320 	port->cq_depth = args->cq_depth;
4321 
4322 	if (args->cq_depth <= 8) {
4323 		ds = 1;
4324 	} else if (args->cq_depth == 16) {
4325 		ds = 2;
4326 	} else if (args->cq_depth == 32) {
4327 		ds = 3;
4328 	} else if (args->cq_depth == 64) {
4329 		ds = 4;
4330 	} else if (args->cq_depth == 128) {
4331 		ds = 5;
4332 	} else if (args->cq_depth == 256) {
4333 		ds = 6;
4334 	} else if (args->cq_depth == 512) {
4335 		ds = 7;
4336 	} else if (args->cq_depth == 1024) {
4337 		ds = 8;
4338 	} else {
4339 		DLB2_HW_ERR(hw,
4340 			    "[%s():%d] Internal error: invalid CQ depth\n",
4341 			    __func__, __LINE__);
4342 		return -EFAULT;
4343 	}
4344 
4345 	reg = 0;
4346 	DLB2_BITS_SET(reg, ds,
4347 		      DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4348 	DLB2_CSR_WR(hw,
4349 		    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4350 		    reg);
4351 
4352 	/*
4353 	 * To support CQs with depth less than 8, program the token count
4354 	 * register with a non-zero initial value. Operations such as domain
4355 	 * reset must take this initial value into account when quiescing the
4356 	 * CQ.
4357 	 */
4358 	port->init_tkn_cnt = 0;
4359 
4360 	if (args->cq_depth < 8) {
4361 		reg = 0;
4362 		port->init_tkn_cnt = 8 - args->cq_depth;
4363 
4364 		DLB2_BITS_SET(reg,
4365 			      port->init_tkn_cnt,
4366 			      DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT);
4367 		DLB2_CSR_WR(hw,
4368 			    DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4369 			    reg);
4370 	} else {
4371 		DLB2_CSR_WR(hw,
4372 			    DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
4373 			    DLB2_LSP_CQ_LDB_TKN_CNT_RST);
4374 	}
4375 
4376 	reg = 0;
4377 	DLB2_BITS_SET(reg, ds,
4378 		      DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT_V2);
4379 	DLB2_CSR_WR(hw,
4380 		    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4381 		    reg);
4382 
4383 	/* Reset the CQ write pointer */
4384 	DLB2_CSR_WR(hw,
4385 		    DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
4386 		    DLB2_CHP_LDB_CQ_WPTR_RST);
4387 
4388 	reg = 0;
4389 	DLB2_BITS_SET(reg,
4390 		      port->hist_list_entry_limit - 1,
4391 		      DLB2_CHP_HIST_LIST_LIM_LIMIT);
4392 	DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id), reg);
4393 
4394 	DLB2_BITS_SET(hl_base, port->hist_list_entry_base,
4395 		      DLB2_CHP_HIST_LIST_BASE_BASE);
4396 	DLB2_CSR_WR(hw,
4397 		    DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
4398 		    hl_base);
4399 
4400 	/*
4401 	 * The inflight limit sets a cap on the number of QEs for which this CQ
4402 	 * can owe completions at one time.
4403 	 */
4404 	reg = 0;
4405 	DLB2_BITS_SET(reg, args->cq_history_list_size,
4406 		      DLB2_LSP_CQ_LDB_INFL_LIM_LIMIT);
4407 	DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
4408 		    reg);
4409 
4410 	reg = 0;
4411 	DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4412 		      DLB2_CHP_HIST_LIST_PUSH_PTR_PUSH_PTR);
4413 	DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
4414 		    reg);
4415 
4416 	reg = 0;
4417 	DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
4418 		      DLB2_CHP_HIST_LIST_POP_PTR_POP_PTR);
4419 	DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
4420 		    reg);
4421 
4422 	/*
4423 	 * Address translation (AT) settings: 0: untranslated, 2: translated
4424 	 * (see ATS spec regarding Address Type field for more details)
4425 	 */
4426 
4427 	if (hw->ver == DLB2_HW_V2) {
4428 		reg = 0;
4429 		DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), reg);
4430 	}
4431 
4432 	if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
4433 		reg = 0;
4434 		DLB2_BITS_SET(reg, hw->pasid[vdev_id],
4435 			      DLB2_SYS_LDB_CQ_PASID_PASID);
4436 		DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ_PASID_FMT2);
4437 	}
4438 
4439 	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id), reg);
4440 
4441 	reg = 0;
4442 	DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_LDB_CQ2VAS_CQ2VAS);
4443 	DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id), reg);
4444 
4445 	/* Disable the port's QID mappings */
4446 	reg = 0;
4447 	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), reg);
4448 
4449 	return 0;
4450 }
4451 
4452 static bool
4453 dlb2_cq_depth_is_valid(u32 depth)
4454 {
4455 	if (depth != 1 && depth != 2 &&
4456 	    depth != 4 && depth != 8 &&
4457 	    depth != 16 && depth != 32 &&
4458 	    depth != 64 && depth != 128 &&
4459 	    depth != 256 && depth != 512 &&
4460 	    depth != 1024)
4461 		return false;
4462 
4463 	return true;
4464 }
4465 
4466 static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
4467 				   struct dlb2_hw_domain *domain,
4468 				   struct dlb2_ldb_port *port,
4469 				   uintptr_t cq_dma_base,
4470 				   struct dlb2_create_ldb_port_args *args,
4471 				   bool vdev_req,
4472 				   unsigned int vdev_id)
4473 {
4474 	int ret, i;
4475 
4476 	port->hist_list_entry_base = domain->hist_list_entry_base +
4477 				     domain->hist_list_entry_offset;
4478 	port->hist_list_entry_limit = port->hist_list_entry_base +
4479 				      args->cq_history_list_size;
4480 
4481 	domain->hist_list_entry_offset += args->cq_history_list_size;
4482 	domain->avail_hist_list_entries -= args->cq_history_list_size;
4483 
4484 	ret = dlb2_ldb_port_configure_cq(hw,
4485 					 domain,
4486 					 port,
4487 					 cq_dma_base,
4488 					 args,
4489 					 vdev_req,
4490 					 vdev_id);
4491 	if (ret)
4492 		return ret;
4493 
4494 	dlb2_ldb_port_configure_pp(hw,
4495 				   domain,
4496 				   port,
4497 				   vdev_req,
4498 				   vdev_id);
4499 
4500 	dlb2_ldb_port_cq_enable(hw, port);
4501 
4502 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
4503 		port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
4504 	port->num_mappings = 0;
4505 
4506 	port->enabled = true;
4507 
4508 	port->configured = true;
4509 
4510 	return 0;
4511 }
4512 
4513 static void
4514 dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
4515 			      u32 domain_id,
4516 			      uintptr_t cq_dma_base,
4517 			      struct dlb2_create_ldb_port_args *args,
4518 			      bool vdev_req,
4519 			      unsigned int vdev_id)
4520 {
4521 	DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
4522 	if (vdev_req)
4523 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4524 	DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
4525 		    domain_id);
4526 	DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
4527 		    args->cq_depth);
4528 	DLB2_HW_DBG(hw, "\tCQ hist list size:         %d\n",
4529 		    args->cq_history_list_size);
4530 	DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
4531 		    cq_dma_base);
4532 	DLB2_HW_DBG(hw, "\tCoS ID:                    %u\n", args->cos_id);
4533 	DLB2_HW_DBG(hw, "\tStrict CoS allocation:     %u\n",
4534 		    args->cos_strict);
4535 }
4536 
4537 static int
4538 dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
4539 				 u32 domain_id,
4540 				 uintptr_t cq_dma_base,
4541 				 struct dlb2_create_ldb_port_args *args,
4542 				 struct dlb2_cmd_response *resp,
4543 				 bool vdev_req,
4544 				 unsigned int vdev_id,
4545 				 struct dlb2_hw_domain **out_domain,
4546 				 struct dlb2_ldb_port **out_port,
4547 				 int *out_cos_id)
4548 {
4549 	struct dlb2_hw_domain *domain;
4550 	struct dlb2_ldb_port *port;
4551 	int i, id;
4552 
4553 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4554 
4555 	if (!domain) {
4556 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4557 		return -EINVAL;
4558 	}
4559 
4560 	if (!domain->configured) {
4561 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4562 		return -EINVAL;
4563 	}
4564 
4565 	if (domain->started) {
4566 		resp->status = DLB2_ST_DOMAIN_STARTED;
4567 		return -EINVAL;
4568 	}
4569 
4570 	if (args->cos_id >= DLB2_NUM_COS_DOMAINS &&
4571 	    (args->cos_id != DLB2_COS_DEFAULT || args->cos_strict)) {
4572 		resp->status = DLB2_ST_INVALID_COS_ID;
4573 		return -EINVAL;
4574 	}
4575 
4576 	if (args->cos_strict) {
4577 		id = args->cos_id;
4578 		port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4579 					  typeof(*port));
4580 	} else {
4581 		for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
4582 			if (args->cos_id == DLB2_COS_DEFAULT) {
4583 				/* Allocate from best performing cos */
4584 				u32 cos_idx = i + DLB2_MAX_NUM_LDB_PORTS;
4585 				id = hw->ldb_pp_allocations[cos_idx];
4586 			} else {
4587 				id = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
4588 			}
4589 
4590 			port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
4591 						  typeof(*port));
4592 			if (port)
4593 				break;
4594 		}
4595 	}
4596 
4597 	if (!port) {
4598 		resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
4599 		return -EINVAL;
4600 	}
4601 
4602 	DLB2_HW_INFO(hw, ": LDB: cos=%d port:%d\n", id, port->id.phys_id);
4603 
4604 	/* Check cache-line alignment */
4605 	if ((cq_dma_base & 0x3F) != 0) {
4606 		resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4607 		return -EINVAL;
4608 	}
4609 
4610 	if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4611 		resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4612 		return -EINVAL;
4613 	}
4614 
4615 	/* The history list size must be >= 1 */
4616 	if (!args->cq_history_list_size) {
4617 		resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
4618 		return -EINVAL;
4619 	}
4620 
4621 	if (args->cq_history_list_size > domain->avail_hist_list_entries) {
4622 		resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
4623 		return -EINVAL;
4624 	}
4625 
4626 	*out_domain = domain;
4627 	*out_port = port;
4628 	*out_cos_id = id;
4629 
4630 	return 0;
4631 }
4632 
4633 /**
4634  * dlb2_hw_create_ldb_port() - create a load-balanced port
4635  * @hw: dlb2_hw handle for a particular device.
4636  * @domain_id: domain ID.
4637  * @args: port creation arguments.
4638  * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
4639  * @resp: response structure.
4640  * @vdev_req: indicates whether this request came from a vdev.
4641  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
4642  *
4643  * This function creates a load-balanced port.
4644  *
4645  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
4646  * device.
4647  *
4648  * Return:
4649  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
4650  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
4651  * contains the port ID.
4652  *
4653  * resp->id contains a virtual ID if vdev_req is true.
4654  *
4655  * Errors:
4656  * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
4657  *	    pointer address is not properly aligned, the domain is not
4658  *	    configured, or the domain has already been started.
4659  * EFAULT - Internal error (resp->status not set).
4660  */
4661 int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
4662 			    u32 domain_id,
4663 			    struct dlb2_create_ldb_port_args *args,
4664 			    uintptr_t cq_dma_base,
4665 			    struct dlb2_cmd_response *resp,
4666 			    bool vdev_req,
4667 			    unsigned int vdev_id)
4668 {
4669 	struct dlb2_hw_domain *domain;
4670 	struct dlb2_ldb_port *port;
4671 	int ret, cos_id;
4672 
4673 	dlb2_log_create_ldb_port_args(hw,
4674 				      domain_id,
4675 				      cq_dma_base,
4676 				      args,
4677 				      vdev_req,
4678 				      vdev_id);
4679 
4680 	/*
4681 	 * Verify that hardware resources are available before attempting to
4682 	 * satisfy the request. This simplifies the error unwinding code.
4683 	 */
4684 	ret = dlb2_verify_create_ldb_port_args(hw,
4685 					       domain_id,
4686 					       cq_dma_base,
4687 					       args,
4688 					       resp,
4689 					       vdev_req,
4690 					       vdev_id,
4691 					       &domain,
4692 					       &port,
4693 					       &cos_id);
4694 	if (ret)
4695 		return ret;
4696 
4697 	ret = dlb2_configure_ldb_port(hw,
4698 				      domain,
4699 				      port,
4700 				      cq_dma_base,
4701 				      args,
4702 				      vdev_req,
4703 				      vdev_id);
4704 	if (ret)
4705 		return ret;
4706 
4707 	/*
4708 	 * Configuration succeeded, so move the resource from the 'avail' to
4709 	 * the 'used' list.
4710 	 */
4711 	dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
4712 
4713 	dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
4714 
4715 	resp->status = 0;
4716 	resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
4717 
4718 	return 0;
4719 }
4720 
4721 static void
4722 dlb2_log_create_dir_port_args(struct dlb2_hw *hw,
4723 			      u32 domain_id,
4724 			      uintptr_t cq_dma_base,
4725 			      struct dlb2_create_dir_port_args *args,
4726 			      bool vdev_req,
4727 			      unsigned int vdev_id)
4728 {
4729 	DLB2_HW_DBG(hw, "DLB2 create directed port arguments:\n");
4730 	if (vdev_req)
4731 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
4732 	DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
4733 		    domain_id);
4734 	DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
4735 		    args->cq_depth);
4736 	DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
4737 		    cq_dma_base);
4738 }
4739 
4740 static struct dlb2_dir_pq_pair *
4741 dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw,
4742 			    u32 id,
4743 			    bool vdev_req,
4744 			    struct dlb2_hw_domain *domain)
4745 {
4746 	struct dlb2_list_entry *iter;
4747 	struct dlb2_dir_pq_pair *port;
4748 	RTE_SET_USED(iter);
4749 
4750 	if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver))
4751 		return NULL;
4752 
4753 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
4754 		if ((!vdev_req && port->id.phys_id == id) ||
4755 		    (vdev_req && port->id.virt_id == id))
4756 			return port;
4757 	}
4758 
4759 	return NULL;
4760 }
4761 
4762 static int
4763 dlb2_verify_create_dir_port_args(struct dlb2_hw *hw,
4764 				 u32 domain_id,
4765 				 uintptr_t cq_dma_base,
4766 				 struct dlb2_create_dir_port_args *args,
4767 				 struct dlb2_cmd_response *resp,
4768 				 bool vdev_req,
4769 				 unsigned int vdev_id,
4770 				 struct dlb2_hw_domain **out_domain,
4771 				 struct dlb2_dir_pq_pair **out_port)
4772 {
4773 	struct dlb2_hw_domain *domain;
4774 	struct dlb2_dir_pq_pair *pq;
4775 
4776 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
4777 
4778 	if (!domain) {
4779 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
4780 		return -EINVAL;
4781 	}
4782 
4783 	if (!domain->configured) {
4784 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
4785 		return -EINVAL;
4786 	}
4787 
4788 	if (domain->started) {
4789 		resp->status = DLB2_ST_DOMAIN_STARTED;
4790 		return -EINVAL;
4791 	}
4792 
4793 	if (args->queue_id != -1) {
4794 		/*
4795 		 * If the user claims the queue is already configured, validate
4796 		 * the queue ID, its domain, and whether the queue is
4797 		 * configured.
4798 		 */
4799 		pq = dlb2_get_domain_used_dir_pq(hw,
4800 						 args->queue_id,
4801 						 vdev_req,
4802 						 domain);
4803 
4804 		if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
4805 		    !pq->queue_configured) {
4806 			resp->status = DLB2_ST_INVALID_DIR_QUEUE_ID;
4807 			return -EINVAL;
4808 		}
4809 	} else {
4810 		/*
4811 		 * If the port's queue is not configured, validate that a free
4812 		 * port-queue pair is available.
4813 		 * First try the 'res' list if the port is producer OR if
4814 		 * 'avail' list is empty else fall back to 'avail' list
4815 		 */
4816 		if (!dlb2_list_empty(&domain->rsvd_dir_pq_pairs) &&
4817 		    (args->is_producer ||
4818 		     dlb2_list_empty(&domain->avail_dir_pq_pairs)))
4819 			pq = DLB2_DOM_LIST_HEAD(domain->rsvd_dir_pq_pairs,
4820 						typeof(*pq));
4821 		else
4822 			pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
4823 						typeof(*pq));
4824 
4825 		if (!pq) {
4826 			resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
4827 			return -EINVAL;
4828 		}
4829 		DLB2_HW_INFO(hw, ": DIR: port:%d is_producer=%d\n",
4830 			      pq->id.phys_id, args->is_producer);
4831 
4832 	}
4833 
4834 	/* Check cache-line alignment */
4835 	if ((cq_dma_base & 0x3F) != 0) {
4836 		resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
4837 		return -EINVAL;
4838 	}
4839 
4840 	if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
4841 		resp->status = DLB2_ST_INVALID_CQ_DEPTH;
4842 		return -EINVAL;
4843 	}
4844 
4845 	*out_domain = domain;
4846 	*out_port = pq;
4847 
4848 	return 0;
4849 }
4850 
4851 static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw,
4852 				       struct dlb2_hw_domain *domain,
4853 				       struct dlb2_dir_pq_pair *port,
4854 				       bool vdev_req,
4855 				       unsigned int vdev_id)
4856 {
4857 	u32 reg = 0;
4858 
4859 	DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_DIR_PP2VAS_VAS);
4860 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VAS(port->id.phys_id), reg);
4861 
4862 	if (vdev_req) {
4863 		unsigned int offs;
4864 		u32 virt_id;
4865 
4866 		/*
4867 		 * DLB uses producer port address bits 17:12 to determine the
4868 		 * producer port ID. In Scalable IOV mode, PP accesses come
4869 		 * through the PF MMIO window for the physical producer port,
4870 		 * so for translation purposes the virtual and physical port
4871 		 * IDs are equal.
4872 		 */
4873 		if (hw->virt_mode == DLB2_VIRT_SRIOV)
4874 			virt_id = port->id.virt_id;
4875 		else
4876 			virt_id = port->id.phys_id;
4877 
4878 		reg = 0;
4879 		DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_DIR_VPP2PP_PP);
4880 		offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
4881 		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), reg);
4882 
4883 		reg = 0;
4884 		DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_PP2VDEV_VDEV);
4885 		DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP2VDEV(port->id.phys_id), reg);
4886 
4887 		reg = 0;
4888 		DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VPP_V_VPP_V);
4889 		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), reg);
4890 	}
4891 
4892 	reg = 0;
4893 	DLB2_BIT_SET(reg, DLB2_SYS_DIR_PP_V_PP_V);
4894 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_PP_V(port->id.phys_id), reg);
4895 }
4896 
4897 static int dlb2_dir_port_configure_cq(struct dlb2_hw *hw,
4898 				      struct dlb2_hw_domain *domain,
4899 				      struct dlb2_dir_pq_pair *port,
4900 				      uintptr_t cq_dma_base,
4901 				      struct dlb2_create_dir_port_args *args,
4902 				      bool vdev_req,
4903 				      unsigned int vdev_id)
4904 {
4905 	u32 reg = 0;
4906 	u32 ds = 0;
4907 
4908 	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
4909 	DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_DIR_CQ_ADDR_L_ADDR_L);
4910 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id), reg);
4911 
4912 	reg = cq_dma_base >> 32;
4913 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id), reg);
4914 
4915 	/*
4916 	 * 'ro' == relaxed ordering. This setting allows DLB2 to write
4917 	 * cache lines out-of-order (but QEs within a cache line are always
4918 	 * updated in-order).
4919 	 */
4920 	reg = 0;
4921 	DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_DIR_CQ2VF_PF_RO_VF);
4922 	DLB2_BITS_SET(reg, !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
4923 		 DLB2_SYS_DIR_CQ2VF_PF_RO_IS_PF);
4924 	DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ2VF_PF_RO_RO);
4925 
4926 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id), reg);
4927 
4928 	if (args->cq_depth <= 8) {
4929 		ds = 1;
4930 	} else if (args->cq_depth == 16) {
4931 		ds = 2;
4932 	} else if (args->cq_depth == 32) {
4933 		ds = 3;
4934 	} else if (args->cq_depth == 64) {
4935 		ds = 4;
4936 	} else if (args->cq_depth == 128) {
4937 		ds = 5;
4938 	} else if (args->cq_depth == 256) {
4939 		ds = 6;
4940 	} else if (args->cq_depth == 512) {
4941 		ds = 7;
4942 	} else if (args->cq_depth == 1024) {
4943 		ds = 8;
4944 	} else {
4945 		DLB2_HW_ERR(hw,
4946 			    "[%s():%d] Internal error: invalid CQ depth\n",
4947 			    __func__, __LINE__);
4948 		return -EFAULT;
4949 	}
4950 
4951 	reg = 0;
4952 	DLB2_BITS_SET(reg, ds,
4953 		      DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
4954 	DLB2_CSR_WR(hw,
4955 		    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
4956 		    reg);
4957 
4958 	/*
4959 	 * To support CQs with depth less than 8, program the token count
4960 	 * register with a non-zero initial value. Operations such as domain
4961 	 * reset must take this initial value into account when quiescing the
4962 	 * CQ.
4963 	 */
4964 	port->init_tkn_cnt = 0;
4965 
4966 	if (args->cq_depth < 8) {
4967 		reg = 0;
4968 		port->init_tkn_cnt = 8 - args->cq_depth;
4969 
4970 		DLB2_BITS_SET(reg, port->init_tkn_cnt,
4971 			      DLB2_LSP_CQ_DIR_TKN_CNT_COUNT);
4972 		DLB2_CSR_WR(hw,
4973 			    DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4974 			    reg);
4975 	} else {
4976 		DLB2_CSR_WR(hw,
4977 			    DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
4978 			    DLB2_LSP_CQ_DIR_TKN_CNT_RST);
4979 	}
4980 
4981 	reg = 0;
4982 	DLB2_BITS_SET(reg, ds,
4983 		      DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_TOKEN_DEPTH_SELECT_V2);
4984 	DLB2_CSR_WR(hw,
4985 		    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
4986 						      port->id.phys_id),
4987 		    reg);
4988 
4989 	/* Reset the CQ write pointer */
4990 	DLB2_CSR_WR(hw,
4991 		    DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
4992 		    DLB2_CHP_DIR_CQ_WPTR_RST);
4993 
4994 	/* Virtualize the PPID */
4995 	reg = 0;
4996 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_FMT(port->id.phys_id), reg);
4997 
4998 	/*
4999 	 * Address translation (AT) settings: 0: untranslated, 2: translated
5000 	 * (see ATS spec regarding Address Type field for more details)
5001 	 */
5002 	if (hw->ver == DLB2_HW_V2) {
5003 		reg = 0;
5004 		DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_AT(port->id.phys_id), reg);
5005 	}
5006 
5007 	if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
5008 		DLB2_BITS_SET(reg, hw->pasid[vdev_id],
5009 			      DLB2_SYS_DIR_CQ_PASID_PASID);
5010 		DLB2_BIT_SET(reg, DLB2_SYS_DIR_CQ_PASID_FMT2);
5011 	}
5012 
5013 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id), reg);
5014 
5015 	reg = 0;
5016 	DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_DIR_CQ2VAS_CQ2VAS);
5017 	DLB2_CSR_WR(hw, DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id), reg);
5018 
5019 	return 0;
5020 }
5021 
5022 static int dlb2_configure_dir_port(struct dlb2_hw *hw,
5023 				   struct dlb2_hw_domain *domain,
5024 				   struct dlb2_dir_pq_pair *port,
5025 				   uintptr_t cq_dma_base,
5026 				   struct dlb2_create_dir_port_args *args,
5027 				   bool vdev_req,
5028 				   unsigned int vdev_id)
5029 {
5030 	int ret;
5031 
5032 	ret = dlb2_dir_port_configure_cq(hw,
5033 					 domain,
5034 					 port,
5035 					 cq_dma_base,
5036 					 args,
5037 					 vdev_req,
5038 					 vdev_id);
5039 
5040 	if (ret)
5041 		return ret;
5042 
5043 	dlb2_dir_port_configure_pp(hw,
5044 				   domain,
5045 				   port,
5046 				   vdev_req,
5047 				   vdev_id);
5048 
5049 	dlb2_dir_port_cq_enable(hw, port);
5050 
5051 	port->enabled = true;
5052 
5053 	port->port_configured = true;
5054 
5055 	return 0;
5056 }
5057 
5058 /**
5059  * dlb2_hw_create_dir_port() - create a directed port
5060  * @hw: dlb2_hw handle for a particular device.
5061  * @domain_id: domain ID.
5062  * @args: port creation arguments.
5063  * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
5064  * @resp: response structure.
5065  * @vdev_req: indicates whether this request came from a vdev.
5066  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5067  *
5068  * This function creates a directed port.
5069  *
5070  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5071  * device.
5072  *
5073  * Return:
5074  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5075  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5076  * contains the port ID.
5077  *
5078  * resp->id contains a virtual ID if vdev_req is true.
5079  *
5080  * Errors:
5081  * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
5082  *	    pointer address is not properly aligned, the domain is not
5083  *	    configured, or the domain has already been started.
5084  * EFAULT - Internal error (resp->status not set).
5085  */
5086 int dlb2_hw_create_dir_port(struct dlb2_hw *hw,
5087 			    u32 domain_id,
5088 			    struct dlb2_create_dir_port_args *args,
5089 			    uintptr_t cq_dma_base,
5090 			    struct dlb2_cmd_response *resp,
5091 			    bool vdev_req,
5092 			    unsigned int vdev_id)
5093 {
5094 	struct dlb2_dir_pq_pair *port;
5095 	struct dlb2_hw_domain *domain;
5096 	int ret;
5097 
5098 	dlb2_log_create_dir_port_args(hw,
5099 				      domain_id,
5100 				      cq_dma_base,
5101 				      args,
5102 				      vdev_req,
5103 				      vdev_id);
5104 
5105 	/*
5106 	 * Verify that hardware resources are available before attempting to
5107 	 * satisfy the request. This simplifies the error unwinding code.
5108 	 */
5109 	ret = dlb2_verify_create_dir_port_args(hw,
5110 					       domain_id,
5111 					       cq_dma_base,
5112 					       args,
5113 					       resp,
5114 					       vdev_req,
5115 					       vdev_id,
5116 					       &domain,
5117 					       &port);
5118 	if (ret)
5119 		return ret;
5120 
5121 	ret = dlb2_configure_dir_port(hw,
5122 				      domain,
5123 				      port,
5124 				      cq_dma_base,
5125 				      args,
5126 				      vdev_req,
5127 				      vdev_id);
5128 	if (ret)
5129 		return ret;
5130 
5131 	/*
5132 	 * Configuration succeeded, so move the resource from the 'avail' or
5133 	 * 'res' to the 'used' list (if it's not already there).
5134 	 */
5135 	if (args->queue_id == -1) {
5136 		struct dlb2_list_head *res = &domain->rsvd_dir_pq_pairs;
5137 		struct dlb2_list_head *avail = &domain->avail_dir_pq_pairs;
5138 
5139 		if ((args->is_producer && !dlb2_list_empty(res)) ||
5140 		     dlb2_list_empty(avail))
5141 			dlb2_list_del(res, &port->domain_list);
5142 		else
5143 			dlb2_list_del(avail, &port->domain_list);
5144 
5145 		dlb2_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
5146 	}
5147 
5148 	resp->status = 0;
5149 	resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
5150 
5151 	return 0;
5152 }
5153 
5154 static void dlb2_configure_dir_queue(struct dlb2_hw *hw,
5155 				     struct dlb2_hw_domain *domain,
5156 				     struct dlb2_dir_pq_pair *queue,
5157 				     struct dlb2_create_dir_queue_args *args,
5158 				     bool vdev_req,
5159 				     unsigned int vdev_id)
5160 {
5161 	unsigned int offs;
5162 	u32 reg = 0;
5163 
5164 	/* QID write permissions are turned on when the domain is started */
5165 	offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
5166 		queue->id.phys_id;
5167 
5168 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), reg);
5169 
5170 	/* Don't timestamp QEs that pass through this queue */
5171 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_ITS(queue->id.phys_id), reg);
5172 
5173 	reg = 0;
5174 	DLB2_BITS_SET(reg, args->depth_threshold,
5175 		      DLB2_LSP_QID_DIR_DEPTH_THRSH_THRESH);
5176 	DLB2_CSR_WR(hw,
5177 		    DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver, queue->id.phys_id),
5178 		    reg);
5179 
5180 	if (vdev_req) {
5181 		offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) +
5182 			queue->id.virt_id;
5183 
5184 		reg = 0;
5185 		DLB2_BIT_SET(reg, DLB2_SYS_VF_DIR_VQID_V_VQID_V);
5186 		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(offs), reg);
5187 
5188 		reg = 0;
5189 		DLB2_BITS_SET(reg, queue->id.phys_id,
5190 			      DLB2_SYS_VF_DIR_VQID2QID_QID);
5191 		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(offs), reg);
5192 	}
5193 
5194 	reg = 0;
5195 	DLB2_BIT_SET(reg, DLB2_SYS_DIR_QID_V_QID_V);
5196 	DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_V(queue->id.phys_id), reg);
5197 
5198 	queue->queue_configured = true;
5199 }
5200 
5201 static void
5202 dlb2_log_create_dir_queue_args(struct dlb2_hw *hw,
5203 			       u32 domain_id,
5204 			       struct dlb2_create_dir_queue_args *args,
5205 			       bool vdev_req,
5206 			       unsigned int vdev_id)
5207 {
5208 	DLB2_HW_DBG(hw, "DLB2 create directed queue arguments:\n");
5209 	if (vdev_req)
5210 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5211 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
5212 	DLB2_HW_DBG(hw, "\tPort ID:   %d\n", args->port_id);
5213 }
5214 
5215 static int
5216 dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw,
5217 				  u32 domain_id,
5218 				  struct dlb2_create_dir_queue_args *args,
5219 				  struct dlb2_cmd_response *resp,
5220 				  bool vdev_req,
5221 				  unsigned int vdev_id,
5222 				  struct dlb2_hw_domain **out_domain,
5223 				  struct dlb2_dir_pq_pair **out_queue)
5224 {
5225 	struct dlb2_hw_domain *domain;
5226 	struct dlb2_dir_pq_pair *pq;
5227 
5228 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5229 
5230 	if (!domain) {
5231 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5232 		return -EINVAL;
5233 	}
5234 
5235 	if (!domain->configured) {
5236 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5237 		return -EINVAL;
5238 	}
5239 
5240 	if (domain->started) {
5241 		resp->status = DLB2_ST_DOMAIN_STARTED;
5242 		return -EINVAL;
5243 	}
5244 
5245 	/*
5246 	 * If the user claims the port is already configured, validate the port
5247 	 * ID, its domain, and whether the port is configured.
5248 	 */
5249 	if (args->port_id != -1) {
5250 		pq = dlb2_get_domain_used_dir_pq(hw,
5251 						 args->port_id,
5252 						 vdev_req,
5253 						 domain);
5254 
5255 		if (!pq || pq->domain_id.phys_id != domain->id.phys_id ||
5256 		    !pq->port_configured) {
5257 			resp->status = DLB2_ST_INVALID_PORT_ID;
5258 			return -EINVAL;
5259 		}
5260 	} else {
5261 		/*
5262 		 * If the queue's port is not configured, validate that a free
5263 		 * port-queue pair is available.
5264 		 */
5265 		pq = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
5266 					typeof(*pq));
5267 		if (!pq) {
5268 			resp->status = DLB2_ST_DIR_QUEUES_UNAVAILABLE;
5269 			return -EINVAL;
5270 		}
5271 	}
5272 
5273 	*out_domain = domain;
5274 	*out_queue = pq;
5275 
5276 	return 0;
5277 }
5278 
5279 /**
5280  * dlb2_hw_create_dir_queue() - create a directed queue
5281  * @hw: dlb2_hw handle for a particular device.
5282  * @domain_id: domain ID.
5283  * @args: queue creation arguments.
5284  * @resp: response structure.
5285  * @vdev_req: indicates whether this request came from a vdev.
5286  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5287  *
5288  * This function creates a directed queue.
5289  *
5290  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5291  * device.
5292  *
5293  * Return:
5294  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5295  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
5296  * contains the queue ID.
5297  *
5298  * resp->id contains a virtual ID if vdev_req is true.
5299  *
5300  * Errors:
5301  * EINVAL - A requested resource is unavailable, the domain is not configured,
5302  *	    or the domain has already been started.
5303  * EFAULT - Internal error (resp->status not set).
5304  */
5305 int dlb2_hw_create_dir_queue(struct dlb2_hw *hw,
5306 			     u32 domain_id,
5307 			     struct dlb2_create_dir_queue_args *args,
5308 			     struct dlb2_cmd_response *resp,
5309 			     bool vdev_req,
5310 			     unsigned int vdev_id)
5311 {
5312 	struct dlb2_dir_pq_pair *queue;
5313 	struct dlb2_hw_domain *domain;
5314 	int ret;
5315 
5316 	dlb2_log_create_dir_queue_args(hw, domain_id, args, vdev_req, vdev_id);
5317 
5318 	/*
5319 	 * Verify that hardware resources are available before attempting to
5320 	 * satisfy the request. This simplifies the error unwinding code.
5321 	 */
5322 	ret = dlb2_verify_create_dir_queue_args(hw,
5323 						domain_id,
5324 						args,
5325 						resp,
5326 						vdev_req,
5327 						vdev_id,
5328 						&domain,
5329 						&queue);
5330 	if (ret)
5331 		return ret;
5332 
5333 	dlb2_configure_dir_queue(hw, domain, queue, args, vdev_req, vdev_id);
5334 
5335 	/*
5336 	 * Configuration succeeded, so move the resource from the 'avail' to
5337 	 * the 'used' list (if it's not already there).
5338 	 */
5339 	if (args->port_id == -1) {
5340 		dlb2_list_del(&domain->avail_dir_pq_pairs,
5341 			      &queue->domain_list);
5342 
5343 		dlb2_list_add(&domain->used_dir_pq_pairs,
5344 			      &queue->domain_list);
5345 	}
5346 
5347 	resp->status = 0;
5348 
5349 	resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
5350 
5351 	return 0;
5352 }
5353 
5354 static bool
5355 dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port *port,
5356 					   struct dlb2_ldb_queue *queue,
5357 					   int *slot)
5358 {
5359 	int i;
5360 
5361 	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
5362 		struct dlb2_ldb_port_qid_map *map = &port->qid_map[i];
5363 
5364 		if (map->state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP &&
5365 		    map->pending_qid == queue->id.phys_id)
5366 			break;
5367 	}
5368 
5369 	*slot = i;
5370 
5371 	return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
5372 }
5373 
5374 static int dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port *port,
5375 					      struct dlb2_ldb_queue *queue,
5376 					      struct dlb2_cmd_response *resp)
5377 {
5378 	enum dlb2_qid_map_state state;
5379 	int i;
5380 
5381 	/* Unused slot available? */
5382 	if (port->num_mappings < DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
5383 		return 0;
5384 
5385 	/*
5386 	 * If the queue is already mapped (from the application's perspective),
5387 	 * this is simply a priority update.
5388 	 */
5389 	state = DLB2_QUEUE_MAPPED;
5390 	if (dlb2_port_find_slot_queue(port, state, queue, &i))
5391 		return 0;
5392 
5393 	state = DLB2_QUEUE_MAP_IN_PROG;
5394 	if (dlb2_port_find_slot_queue(port, state, queue, &i))
5395 		return 0;
5396 
5397 	if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i))
5398 		return 0;
5399 
5400 	/*
5401 	 * If the slot contains an unmap in progress, it's considered
5402 	 * available.
5403 	 */
5404 	state = DLB2_QUEUE_UNMAP_IN_PROG;
5405 	if (dlb2_port_find_slot(port, state, &i))
5406 		return 0;
5407 
5408 	state = DLB2_QUEUE_UNMAPPED;
5409 	if (dlb2_port_find_slot(port, state, &i))
5410 		return 0;
5411 
5412 	resp->status = DLB2_ST_NO_QID_SLOTS_AVAILABLE;
5413 	return -EINVAL;
5414 }
5415 
5416 static struct dlb2_ldb_queue *
5417 dlb2_get_domain_ldb_queue(u32 id,
5418 			  bool vdev_req,
5419 			  struct dlb2_hw_domain *domain)
5420 {
5421 	struct dlb2_list_entry *iter;
5422 	struct dlb2_ldb_queue *queue;
5423 	RTE_SET_USED(iter);
5424 
5425 	if (id >= DLB2_MAX_NUM_LDB_QUEUES)
5426 		return NULL;
5427 
5428 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
5429 		if ((!vdev_req && queue->id.phys_id == id) ||
5430 		    (vdev_req && queue->id.virt_id == id))
5431 			return queue;
5432 	}
5433 
5434 	return NULL;
5435 }
5436 
5437 static struct dlb2_ldb_port *
5438 dlb2_get_domain_used_ldb_port(u32 id,
5439 			      bool vdev_req,
5440 			      struct dlb2_hw_domain *domain)
5441 {
5442 	struct dlb2_list_entry *iter;
5443 	struct dlb2_ldb_port *port;
5444 	int i;
5445 	RTE_SET_USED(iter);
5446 
5447 	if (id >= DLB2_MAX_NUM_LDB_PORTS)
5448 		return NULL;
5449 
5450 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
5451 		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
5452 			if ((!vdev_req && port->id.phys_id == id) ||
5453 			    (vdev_req && port->id.virt_id == id))
5454 				return port;
5455 		}
5456 
5457 		DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter) {
5458 			if ((!vdev_req && port->id.phys_id == id) ||
5459 			    (vdev_req && port->id.virt_id == id))
5460 				return port;
5461 		}
5462 	}
5463 
5464 	return NULL;
5465 }
5466 
5467 static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
5468 					      struct dlb2_ldb_port *port,
5469 					      int slot,
5470 					      struct dlb2_map_qid_args *args)
5471 {
5472 	u32 cq2priov;
5473 
5474 	/* Read-modify-write the priority and valid bit register */
5475 	cq2priov = DLB2_CSR_RD(hw,
5476 			       DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id));
5477 
5478 	cq2priov |= (1 << (slot + DLB2_LSP_CQ2PRIOV_V_LOC)) &
5479 		    DLB2_LSP_CQ2PRIOV_V;
5480 	cq2priov |= ((args->priority & 0x7) << slot * 3) &
5481 		    DLB2_LSP_CQ2PRIOV_PRIO;
5482 
5483 	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), cq2priov);
5484 
5485 	dlb2_flush_csr(hw);
5486 
5487 	port->qid_map[slot].priority = args->priority;
5488 }
5489 
5490 static int dlb2_verify_map_qid_args(struct dlb2_hw *hw,
5491 				    u32 domain_id,
5492 				    struct dlb2_map_qid_args *args,
5493 				    struct dlb2_cmd_response *resp,
5494 				    bool vdev_req,
5495 				    unsigned int vdev_id,
5496 				    struct dlb2_hw_domain **out_domain,
5497 				    struct dlb2_ldb_port **out_port,
5498 				    struct dlb2_ldb_queue **out_queue)
5499 {
5500 	struct dlb2_hw_domain *domain;
5501 	struct dlb2_ldb_queue *queue;
5502 	struct dlb2_ldb_port *port;
5503 	int id;
5504 
5505 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5506 
5507 	if (!domain) {
5508 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5509 		return -EINVAL;
5510 	}
5511 
5512 	if (!domain->configured) {
5513 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5514 		return -EINVAL;
5515 	}
5516 
5517 	id = args->port_id;
5518 
5519 	port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5520 
5521 	if (!port || !port->configured) {
5522 		resp->status = DLB2_ST_INVALID_PORT_ID;
5523 		return -EINVAL;
5524 	}
5525 
5526 	if (args->priority >= DLB2_QID_PRIORITIES) {
5527 		resp->status = DLB2_ST_INVALID_PRIORITY;
5528 		return -EINVAL;
5529 	}
5530 
5531 	queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5532 
5533 	if (!queue || !queue->configured) {
5534 		resp->status = DLB2_ST_INVALID_QID;
5535 		return -EINVAL;
5536 	}
5537 
5538 	if (queue->domain_id.phys_id != domain->id.phys_id) {
5539 		resp->status = DLB2_ST_INVALID_QID;
5540 		return -EINVAL;
5541 	}
5542 
5543 	if (port->domain_id.phys_id != domain->id.phys_id) {
5544 		resp->status = DLB2_ST_INVALID_PORT_ID;
5545 		return -EINVAL;
5546 	}
5547 
5548 	*out_domain = domain;
5549 	*out_queue = queue;
5550 	*out_port = port;
5551 
5552 	return 0;
5553 }
5554 
5555 static void dlb2_log_map_qid(struct dlb2_hw *hw,
5556 			     u32 domain_id,
5557 			     struct dlb2_map_qid_args *args,
5558 			     bool vdev_req,
5559 			     unsigned int vdev_id)
5560 {
5561 	DLB2_HW_DBG(hw, "DLB2 map QID arguments:\n");
5562 	if (vdev_req)
5563 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5564 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5565 		    domain_id);
5566 	DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
5567 		    args->port_id);
5568 	DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
5569 		    args->qid);
5570 	DLB2_HW_DBG(hw, "\tPriority:  %d\n",
5571 		    args->priority);
5572 }
5573 
5574 /**
5575  * dlb2_hw_map_qid() - map a load-balanced queue to a load-balanced port
5576  * @hw: dlb2_hw handle for a particular device.
5577  * @domain_id: domain ID.
5578  * @args: map QID arguments.
5579  * @resp: response structure.
5580  * @vdev_req: indicates whether this request came from a vdev.
5581  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5582  *
5583  * This function configures the DLB to schedule QEs from the specified queue
5584  * to the specified port. Each load-balanced port can be mapped to up to 8
5585  * queues; each load-balanced queue can potentially map to all the
5586  * load-balanced ports.
5587  *
5588  * A successful return does not necessarily mean the mapping was configured. If
5589  * this function is unable to immediately map the queue to the port, it will
5590  * add the requested operation to a per-port list of pending map/unmap
5591  * operations, and (if it's not already running) launch a kernel thread that
5592  * periodically attempts to process all pending operations. In a sense, this is
5593  * an asynchronous function.
5594  *
5595  * This asynchronicity creates two views of the state of hardware: the actual
5596  * hardware state and the requested state (as if every request completed
5597  * immediately). If there are any pending map/unmap operations, the requested
5598  * state will differ from the actual state. All validation is performed with
5599  * respect to the pending state; for instance, if there are 8 pending map
5600  * operations for port X, a request for a 9th will fail because a load-balanced
5601  * port can only map up to 8 queues.
5602  *
5603  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5604  * device.
5605  *
5606  * Return:
5607  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5608  * assigned a detailed error code from enum dlb2_error.
5609  *
5610  * Errors:
5611  * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5612  *	    the domain is not configured.
5613  * EFAULT - Internal error (resp->status not set).
5614  * EBUSY  - The requested port has outstanding detach operations.
5615  */
5616 int dlb2_hw_map_qid(struct dlb2_hw *hw,
5617 		    u32 domain_id,
5618 		    struct dlb2_map_qid_args *args,
5619 		    struct dlb2_cmd_response *resp,
5620 		    bool vdev_req,
5621 		    unsigned int vdev_id)
5622 {
5623 	struct dlb2_hw_domain *domain;
5624 	struct dlb2_ldb_queue *queue;
5625 	enum dlb2_qid_map_state st;
5626 	struct dlb2_ldb_port *port;
5627 	int ret, i;
5628 	u8 prio;
5629 
5630 	dlb2_log_map_qid(hw, domain_id, args, vdev_req, vdev_id);
5631 
5632 	/*
5633 	 * Verify that hardware resources are available before attempting to
5634 	 * satisfy the request. This simplifies the error unwinding code.
5635 	 */
5636 	ret = dlb2_verify_map_qid_args(hw,
5637 				       domain_id,
5638 				       args,
5639 				       resp,
5640 				       vdev_req,
5641 				       vdev_id,
5642 				       &domain,
5643 				       &port,
5644 				       &queue);
5645 	if (ret)
5646 		return ret;
5647 
5648 	prio = args->priority;
5649 
5650 	/*
5651 	 * If there are any outstanding detach operations for this port,
5652 	 * attempt to complete them. This may be necessary to free up a QID
5653 	 * slot for this requested mapping.
5654 	 */
5655 	if (port->num_pending_removals) {
5656 		bool bool_ret;
5657 		bool_ret = dlb2_domain_finish_unmap_port(hw, domain, port);
5658 		if (!bool_ret)
5659 			return -EBUSY;
5660 	}
5661 
5662 	ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
5663 	if (ret)
5664 		return ret;
5665 
5666 	/* Hardware requires disabling the CQ before mapping QIDs. */
5667 	if (port->enabled)
5668 		dlb2_ldb_port_cq_disable(hw, port);
5669 
5670 	/*
5671 	 * If this is only a priority change, don't perform the full QID->CQ
5672 	 * mapping procedure
5673 	 */
5674 	st = DLB2_QUEUE_MAPPED;
5675 	if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5676 		if (prio != port->qid_map[i].priority) {
5677 			dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5678 			DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5679 		}
5680 
5681 		st = DLB2_QUEUE_MAPPED;
5682 		ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5683 		if (ret)
5684 			return ret;
5685 
5686 		goto map_qid_done;
5687 	}
5688 
5689 	st = DLB2_QUEUE_UNMAP_IN_PROG;
5690 	if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5691 		if (prio != port->qid_map[i].priority) {
5692 			dlb2_ldb_port_change_qid_priority(hw, port, i, args);
5693 			DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
5694 		}
5695 
5696 		st = DLB2_QUEUE_MAPPED;
5697 		ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5698 		if (ret)
5699 			return ret;
5700 
5701 		goto map_qid_done;
5702 	}
5703 
5704 	/*
5705 	 * If this is a priority change on an in-progress mapping, don't
5706 	 * perform the full QID->CQ mapping procedure.
5707 	 */
5708 	st = DLB2_QUEUE_MAP_IN_PROG;
5709 	if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5710 		port->qid_map[i].priority = prio;
5711 
5712 		DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5713 
5714 		goto map_qid_done;
5715 	}
5716 
5717 	/*
5718 	 * If this is a priority change on a pending mapping, update the
5719 	 * pending priority
5720 	 */
5721 	if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5722 		port->qid_map[i].pending_priority = prio;
5723 
5724 		DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
5725 
5726 		goto map_qid_done;
5727 	}
5728 
5729 	/*
5730 	 * If all the CQ's slots are in use, then there's an unmap in progress
5731 	 * (guaranteed by dlb2_verify_map_qid_slot_available()), so add this
5732 	 * mapping to pending_map and return. When the removal is completed for
5733 	 * the slot's current occupant, this mapping will be performed.
5734 	 */
5735 	if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &i)) {
5736 		if (dlb2_port_find_slot(port, DLB2_QUEUE_UNMAP_IN_PROG, &i)) {
5737 			enum dlb2_qid_map_state new_st;
5738 
5739 			port->qid_map[i].pending_qid = queue->id.phys_id;
5740 			port->qid_map[i].pending_priority = prio;
5741 
5742 			new_st = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
5743 
5744 			ret = dlb2_port_slot_state_transition(hw, port, queue,
5745 							      i, new_st);
5746 			if (ret)
5747 				return ret;
5748 
5749 			DLB2_HW_DBG(hw, "DLB2 map: map pending removal\n");
5750 
5751 			goto map_qid_done;
5752 		}
5753 	}
5754 
5755 	/*
5756 	 * If the domain has started, a special "dynamic" CQ->queue mapping
5757 	 * procedure is required in order to safely update the CQ<->QID tables.
5758 	 * The "static" procedure cannot be used when traffic is flowing,
5759 	 * because the CQ<->QID tables cannot be updated atomically and the
5760 	 * scheduler won't see the new mapping unless the queue's if_status
5761 	 * changes, which isn't guaranteed.
5762 	 */
5763 	ret = dlb2_ldb_port_map_qid(hw, domain, port, queue, prio);
5764 
5765 	/* If ret is less than zero, it's due to an internal error */
5766 	if (ret < 0)
5767 		return ret;
5768 
5769 map_qid_done:
5770 	if (port->enabled)
5771 		dlb2_ldb_port_cq_enable(hw, port);
5772 
5773 	resp->status = 0;
5774 
5775 	return 0;
5776 }
5777 
5778 static void dlb2_log_unmap_qid(struct dlb2_hw *hw,
5779 			       u32 domain_id,
5780 			       struct dlb2_unmap_qid_args *args,
5781 			       bool vdev_req,
5782 			       unsigned int vdev_id)
5783 {
5784 	DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n");
5785 	if (vdev_req)
5786 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
5787 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
5788 		    domain_id);
5789 	DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
5790 		    args->port_id);
5791 	DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
5792 		    args->qid);
5793 	if (args->qid < DLB2_MAX_NUM_LDB_QUEUES)
5794 		DLB2_HW_DBG(hw, "\tQueue's num mappings:  %d\n",
5795 			    hw->rsrcs.ldb_queues[args->qid].num_mappings);
5796 }
5797 
5798 static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw,
5799 				      u32 domain_id,
5800 				      struct dlb2_unmap_qid_args *args,
5801 				      struct dlb2_cmd_response *resp,
5802 				      bool vdev_req,
5803 				      unsigned int vdev_id,
5804 				      struct dlb2_hw_domain **out_domain,
5805 				      struct dlb2_ldb_port **out_port,
5806 				      struct dlb2_ldb_queue **out_queue)
5807 {
5808 	enum dlb2_qid_map_state state;
5809 	struct dlb2_hw_domain *domain;
5810 	struct dlb2_ldb_queue *queue;
5811 	struct dlb2_ldb_port *port;
5812 	int slot;
5813 	int id;
5814 
5815 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
5816 
5817 	if (!domain) {
5818 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
5819 		return -EINVAL;
5820 	}
5821 
5822 	if (!domain->configured) {
5823 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
5824 		return -EINVAL;
5825 	}
5826 
5827 	id = args->port_id;
5828 
5829 	port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
5830 
5831 	if (!port || !port->configured) {
5832 		resp->status = DLB2_ST_INVALID_PORT_ID;
5833 		return -EINVAL;
5834 	}
5835 
5836 	if (port->domain_id.phys_id != domain->id.phys_id) {
5837 		resp->status = DLB2_ST_INVALID_PORT_ID;
5838 		return -EINVAL;
5839 	}
5840 
5841 	queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
5842 
5843 	if (!queue || !queue->configured) {
5844 		DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
5845 			    __func__, args->qid);
5846 		resp->status = DLB2_ST_INVALID_QID;
5847 		return -EINVAL;
5848 	}
5849 
5850 	/*
5851 	 * Verify that the port has the queue mapped. From the application's
5852 	 * perspective a queue is mapped if it is actually mapped, the map is
5853 	 * in progress, or the map is blocked pending an unmap.
5854 	 */
5855 	state = DLB2_QUEUE_MAPPED;
5856 	if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5857 		goto done;
5858 
5859 	state = DLB2_QUEUE_MAP_IN_PROG;
5860 	if (dlb2_port_find_slot_queue(port, state, queue, &slot))
5861 		goto done;
5862 
5863 	if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot))
5864 		goto done;
5865 
5866 	resp->status = DLB2_ST_INVALID_QID;
5867 	return -EINVAL;
5868 
5869 done:
5870 	*out_domain = domain;
5871 	*out_port = port;
5872 	*out_queue = queue;
5873 
5874 	return 0;
5875 }
5876 
5877 /**
5878  * dlb2_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port
5879  * @hw: dlb2_hw handle for a particular device.
5880  * @domain_id: domain ID.
5881  * @args: unmap QID arguments.
5882  * @resp: response structure.
5883  * @vdev_req: indicates whether this request came from a vdev.
5884  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
5885  *
5886  * This function configures the DLB to stop scheduling QEs from the specified
5887  * queue to the specified port.
5888  *
5889  * A successful return does not necessarily mean the mapping was removed. If
5890  * this function is unable to immediately unmap the queue from the port, it
5891  * will add the requested operation to a per-port list of pending map/unmap
5892  * operations, and (if it's not already running) launch a kernel thread that
5893  * periodically attempts to process all pending operations. See
5894  * dlb2_hw_map_qid() for more details.
5895  *
5896  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
5897  * device.
5898  *
5899  * Return:
5900  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
5901  * assigned a detailed error code from enum dlb2_error.
5902  *
5903  * Errors:
5904  * EINVAL - A requested resource is unavailable, invalid port or queue ID, or
5905  *	    the domain is not configured.
5906  * EFAULT - Internal error (resp->status not set).
5907  */
5908 int dlb2_hw_unmap_qid(struct dlb2_hw *hw,
5909 		      u32 domain_id,
5910 		      struct dlb2_unmap_qid_args *args,
5911 		      struct dlb2_cmd_response *resp,
5912 		      bool vdev_req,
5913 		      unsigned int vdev_id)
5914 {
5915 	struct dlb2_hw_domain *domain;
5916 	struct dlb2_ldb_queue *queue;
5917 	enum dlb2_qid_map_state st;
5918 	struct dlb2_ldb_port *port;
5919 	bool unmap_complete;
5920 	int i, ret;
5921 
5922 	dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id);
5923 
5924 	/*
5925 	 * Verify that hardware resources are available before attempting to
5926 	 * satisfy the request. This simplifies the error unwinding code.
5927 	 */
5928 	ret = dlb2_verify_unmap_qid_args(hw,
5929 					 domain_id,
5930 					 args,
5931 					 resp,
5932 					 vdev_req,
5933 					 vdev_id,
5934 					 &domain,
5935 					 &port,
5936 					 &queue);
5937 	if (ret)
5938 		return ret;
5939 
5940 	/*
5941 	 * If the queue hasn't been mapped yet, we need to update the slot's
5942 	 * state and re-enable the queue's inflights.
5943 	 */
5944 	st = DLB2_QUEUE_MAP_IN_PROG;
5945 	if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
5946 		/*
5947 		 * Since the in-progress map was aborted, re-enable the QID's
5948 		 * inflights.
5949 		 */
5950 		if (queue->num_pending_additions == 0)
5951 			dlb2_ldb_queue_set_inflight_limit(hw, queue);
5952 
5953 		st = DLB2_QUEUE_UNMAPPED;
5954 		ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5955 		if (ret)
5956 			return ret;
5957 
5958 		goto unmap_qid_done;
5959 	}
5960 
5961 	/*
5962 	 * If the queue mapping is on hold pending an unmap, we simply need to
5963 	 * update the slot's state.
5964 	 */
5965 	if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
5966 		st = DLB2_QUEUE_UNMAP_IN_PROG;
5967 		ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5968 		if (ret)
5969 			return ret;
5970 
5971 		goto unmap_qid_done;
5972 	}
5973 
5974 	st = DLB2_QUEUE_MAPPED;
5975 	if (!dlb2_port_find_slot_queue(port, st, queue, &i)) {
5976 		DLB2_HW_ERR(hw,
5977 			    "[%s()] Internal error: no available CQ slots\n",
5978 			    __func__);
5979 		return -EFAULT;
5980 	}
5981 
5982 	/*
5983 	 * QID->CQ mapping removal is an asynchronous procedure. It requires
5984 	 * stopping the DLB2 from scheduling this CQ, draining all inflights
5985 	 * from the CQ, then unmapping the queue from the CQ. This function
5986 	 * simply marks the port as needing the queue unmapped, and (if
5987 	 * necessary) starts the unmapping worker thread.
5988 	 */
5989 	dlb2_ldb_port_cq_disable(hw, port);
5990 
5991 	st = DLB2_QUEUE_UNMAP_IN_PROG;
5992 	ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
5993 	if (ret)
5994 		return ret;
5995 
5996 	/*
5997 	 * Attempt to finish the unmapping now, in case the port has no
5998 	 * outstanding inflights. If that's not the case, this will fail and
5999 	 * the unmapping will be completed at a later time.
6000 	 */
6001 	unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port);
6002 
6003 	/*
6004 	 * If the unmapping couldn't complete immediately, launch the worker
6005 	 * thread (if it isn't already launched) to finish it later.
6006 	 */
6007 	if (!unmap_complete && !os_worker_active(hw))
6008 		os_schedule_work(hw);
6009 
6010 unmap_qid_done:
6011 	resp->status = 0;
6012 
6013 	return 0;
6014 }
6015 
6016 static void
6017 dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw,
6018 				  struct dlb2_pending_port_unmaps_args *args,
6019 				  bool vdev_req,
6020 				  unsigned int vdev_id)
6021 {
6022 	DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n");
6023 	if (vdev_req)
6024 		DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id);
6025 	DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
6026 }
6027 
6028 /**
6029  * dlb2_hw_pending_port_unmaps() - returns the number of unmap operations in
6030  *	progress.
6031  * @hw: dlb2_hw handle for a particular device.
6032  * @domain_id: domain ID.
6033  * @args: number of unmaps in progress args
6034  * @resp: response structure.
6035  * @vdev_req: indicates whether this request came from a vdev.
6036  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
6037  *
6038  * Return:
6039  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
6040  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
6041  * contains the number of unmaps in progress.
6042  *
6043  * Errors:
6044  * EINVAL - Invalid port ID.
6045  */
6046 int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
6047 				u32 domain_id,
6048 				struct dlb2_pending_port_unmaps_args *args,
6049 				struct dlb2_cmd_response *resp,
6050 				bool vdev_req,
6051 				unsigned int vdev_id)
6052 {
6053 	struct dlb2_hw_domain *domain;
6054 	struct dlb2_ldb_port *port;
6055 
6056 	dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id);
6057 
6058 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6059 
6060 	if (!domain) {
6061 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6062 		return -EINVAL;
6063 	}
6064 
6065 	port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
6066 	if (!port || !port->configured) {
6067 		resp->status = DLB2_ST_INVALID_PORT_ID;
6068 		return -EINVAL;
6069 	}
6070 
6071 	resp->id = port->num_pending_removals;
6072 
6073 	return 0;
6074 }
6075 
6076 static int dlb2_verify_start_domain_args(struct dlb2_hw *hw,
6077 					 u32 domain_id,
6078 					 struct dlb2_cmd_response *resp,
6079 					 bool vdev_req,
6080 					 unsigned int vdev_id,
6081 					 struct dlb2_hw_domain **out_domain)
6082 {
6083 	struct dlb2_hw_domain *domain;
6084 
6085 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6086 
6087 	if (!domain) {
6088 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6089 		return -EINVAL;
6090 	}
6091 
6092 	if (!domain->configured) {
6093 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
6094 		return -EINVAL;
6095 	}
6096 
6097 	if (domain->started) {
6098 		resp->status = DLB2_ST_DOMAIN_STARTED;
6099 		return -EINVAL;
6100 	}
6101 
6102 	*out_domain = domain;
6103 
6104 	return 0;
6105 }
6106 
6107 static void dlb2_log_start_domain(struct dlb2_hw *hw,
6108 				  u32 domain_id,
6109 				  bool vdev_req,
6110 				  unsigned int vdev_id)
6111 {
6112 	DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n");
6113 	if (vdev_req)
6114 		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
6115 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
6116 }
6117 
6118 /**
6119  * dlb2_hw_start_domain() - start a scheduling domain
6120  * @hw: dlb2_hw handle for a particular device.
6121  * @domain_id: domain ID.
6122  * @arg: start domain arguments.
6123  * @resp: response structure.
6124  * @vdev_req: indicates whether this request came from a vdev.
6125  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
6126  *
6127  * This function starts a scheduling domain, which allows applications to send
6128  * traffic through it. Once a domain is started, its resources can no longer be
6129  * configured (besides QID remapping and port enable/disable).
6130  *
6131  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
6132  * device.
6133  *
6134  * Return:
6135  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
6136  * assigned a detailed error code from enum dlb2_error.
6137  *
6138  * Errors:
6139  * EINVAL - the domain is not configured, or the domain is already started.
6140  */
6141 int
6142 dlb2_hw_start_domain(struct dlb2_hw *hw,
6143 		     u32 domain_id,
6144 		     struct dlb2_start_domain_args *args,
6145 		     struct dlb2_cmd_response *resp,
6146 		     bool vdev_req,
6147 		     unsigned int vdev_id)
6148 {
6149 	struct dlb2_list_entry *iter;
6150 	struct dlb2_dir_pq_pair *dir_queue;
6151 	struct dlb2_ldb_queue *ldb_queue;
6152 	struct dlb2_hw_domain *domain;
6153 	int ret;
6154 	RTE_SET_USED(args);
6155 	RTE_SET_USED(iter);
6156 
6157 	dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id);
6158 
6159 	ret = dlb2_verify_start_domain_args(hw,
6160 					    domain_id,
6161 					    resp,
6162 					    vdev_req,
6163 					    vdev_id,
6164 					    &domain);
6165 	if (ret)
6166 		return ret;
6167 
6168 	/*
6169 	 * Enable load-balanced and directed queue write permissions for the
6170 	 * queues this domain owns. Without this, the DLB2 will drop all
6171 	 * incoming traffic to those queues.
6172 	 */
6173 	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
6174 		u32 vasqid_v = 0;
6175 		unsigned int offs;
6176 
6177 		DLB2_BIT_SET(vasqid_v, DLB2_SYS_LDB_VASQID_V_VASQID_V);
6178 
6179 		offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES +
6180 			ldb_queue->id.phys_id;
6181 
6182 		DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), vasqid_v);
6183 	}
6184 
6185 	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
6186 		u32 vasqid_v = 0;
6187 		unsigned int offs;
6188 
6189 		DLB2_BIT_SET(vasqid_v, DLB2_SYS_DIR_VASQID_V_VASQID_V);
6190 
6191 		offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
6192 			dir_queue->id.phys_id;
6193 
6194 		DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), vasqid_v);
6195 	}
6196 
6197 	dlb2_flush_csr(hw);
6198 
6199 	domain->started = true;
6200 
6201 	resp->status = 0;
6202 
6203 	return 0;
6204 }
6205 
6206 static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw,
6207 					 u32 domain_id,
6208 					 u32 queue_id,
6209 					 bool vdev_req,
6210 					 unsigned int vf_id)
6211 {
6212 	DLB2_HW_DBG(hw, "DLB get directed queue depth:\n");
6213 	if (vdev_req)
6214 		DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
6215 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
6216 	DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
6217 }
6218 
6219 /**
6220  * dlb2_hw_get_dir_queue_depth() - returns the depth of a directed queue
6221  * @hw: dlb2_hw handle for a particular device.
6222  * @domain_id: domain ID.
6223  * @args: queue depth args
6224  * @resp: response structure.
6225  * @vdev_req: indicates whether this request came from a vdev.
6226  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
6227  *
6228  * This function returns the depth of a directed queue.
6229  *
6230  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
6231  * device.
6232  *
6233  * Return:
6234  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
6235  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
6236  * contains the depth.
6237  *
6238  * Errors:
6239  * EINVAL - Invalid domain ID or queue ID.
6240  */
6241 int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw,
6242 				u32 domain_id,
6243 				struct dlb2_get_dir_queue_depth_args *args,
6244 				struct dlb2_cmd_response *resp,
6245 				bool vdev_req,
6246 				unsigned int vdev_id)
6247 {
6248 	struct dlb2_dir_pq_pair *queue;
6249 	struct dlb2_hw_domain *domain;
6250 	int id;
6251 
6252 	id = domain_id;
6253 
6254 	dlb2_log_get_dir_queue_depth(hw, domain_id, args->queue_id,
6255 				     vdev_req, vdev_id);
6256 
6257 	domain = dlb2_get_domain_from_id(hw, id, vdev_req, vdev_id);
6258 	if (!domain) {
6259 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6260 		return -EINVAL;
6261 	}
6262 
6263 	id = args->queue_id;
6264 
6265 	queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain);
6266 	if (!queue) {
6267 		resp->status = DLB2_ST_INVALID_QID;
6268 		return -EINVAL;
6269 	}
6270 
6271 	resp->id = dlb2_dir_queue_depth(hw, queue);
6272 
6273 	return 0;
6274 }
6275 
6276 static void dlb2_log_get_ldb_queue_depth(struct dlb2_hw *hw,
6277 					 u32 domain_id,
6278 					 u32 queue_id,
6279 					 bool vdev_req,
6280 					 unsigned int vf_id)
6281 {
6282 	DLB2_HW_DBG(hw, "DLB get load-balanced queue depth:\n");
6283 	if (vdev_req)
6284 		DLB2_HW_DBG(hw, "(Request from VF %d)\n", vf_id);
6285 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
6286 	DLB2_HW_DBG(hw, "\tQueue ID: %d\n", queue_id);
6287 }
6288 
6289 /**
6290  * dlb2_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue
6291  * @hw: dlb2_hw handle for a particular device.
6292  * @domain_id: domain ID.
6293  * @args: queue depth args
6294  * @resp: response structure.
6295  * @vdev_req: indicates whether this request came from a vdev.
6296  * @vdev_id: If vdev_req is true, this contains the vdev's ID.
6297  *
6298  * This function returns the depth of a load-balanced queue.
6299  *
6300  * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
6301  * device.
6302  *
6303  * Return:
6304  * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
6305  * assigned a detailed error code from enum dlb2_error. If successful, resp->id
6306  * contains the depth.
6307  *
6308  * Errors:
6309  * EINVAL - Invalid domain ID or queue ID.
6310  */
6311 int dlb2_hw_get_ldb_queue_depth(struct dlb2_hw *hw,
6312 				u32 domain_id,
6313 				struct dlb2_get_ldb_queue_depth_args *args,
6314 				struct dlb2_cmd_response *resp,
6315 				bool vdev_req,
6316 				unsigned int vdev_id)
6317 {
6318 	struct dlb2_hw_domain *domain;
6319 	struct dlb2_ldb_queue *queue;
6320 
6321 	dlb2_log_get_ldb_queue_depth(hw, domain_id, args->queue_id,
6322 				     vdev_req, vdev_id);
6323 
6324 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6325 	if (!domain) {
6326 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6327 		return -EINVAL;
6328 	}
6329 
6330 	queue = dlb2_get_domain_ldb_queue(args->queue_id, vdev_req, domain);
6331 	if (!queue) {
6332 		resp->status = DLB2_ST_INVALID_QID;
6333 		return -EINVAL;
6334 	}
6335 
6336 	resp->id = dlb2_ldb_queue_depth(hw, queue);
6337 
6338 	return 0;
6339 }
6340 
6341 /**
6342  * dlb2_finish_unmap_qid_procedures() - finish any pending unmap procedures
6343  * @hw: dlb2_hw handle for a particular device.
6344  *
6345  * This function attempts to finish any outstanding unmap procedures.
6346  * This function should be called by the kernel thread responsible for
6347  * finishing map/unmap procedures.
6348  *
6349  * Return:
6350  * Returns the number of procedures that weren't completed.
6351  */
6352 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
6353 {
6354 	int i, num = 0;
6355 
6356 	/* Finish queue unmap jobs for any domain that needs it */
6357 	for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
6358 		struct dlb2_hw_domain *domain = &hw->domains[i];
6359 
6360 		num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
6361 	}
6362 
6363 	return num;
6364 }
6365 
6366 /**
6367  * dlb2_finish_map_qid_procedures() - finish any pending map procedures
6368  * @hw: dlb2_hw handle for a particular device.
6369  *
6370  * This function attempts to finish any outstanding map procedures.
6371  * This function should be called by the kernel thread responsible for
6372  * finishing map/unmap procedures.
6373  *
6374  * Return:
6375  * Returns the number of procedures that weren't completed.
6376  */
6377 unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
6378 {
6379 	int i, num = 0;
6380 
6381 	/* Finish queue map jobs for any domain that needs it */
6382 	for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
6383 		struct dlb2_hw_domain *domain = &hw->domains[i];
6384 
6385 		num += dlb2_domain_finish_map_qid_procedures(hw, domain);
6386 	}
6387 
6388 	return num;
6389 }
6390 
6391 /**
6392  * dlb2_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports.
6393  * @hw: dlb2_hw handle for a particular device.
6394  *
6395  * This function must be called prior to configuring scheduling domains.
6396  */
6397 
6398 void dlb2_hw_enable_sparse_dir_cq_mode(struct dlb2_hw *hw)
6399 {
6400 	u32 ctrl;
6401 
6402 	ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
6403 
6404 	DLB2_BIT_SET(ctrl,
6405 		     DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_DIR_CQ_MODE);
6406 
6407 	DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
6408 }
6409 
6410 /**
6411  * dlb2_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced
6412  *	ports.
6413  * @hw: dlb2_hw handle for a particular device.
6414  *
6415  * This function must be called prior to configuring scheduling domains.
6416  */
6417 void dlb2_hw_enable_sparse_ldb_cq_mode(struct dlb2_hw *hw)
6418 {
6419 	u32 ctrl;
6420 
6421 	ctrl = DLB2_CSR_RD(hw, DLB2_CHP_CFG_CHP_CSR_CTRL);
6422 
6423 	DLB2_BIT_SET(ctrl,
6424 		     DLB2_CHP_CFG_CHP_CSR_CTRL_CFG_64BYTES_QE_LDB_CQ_MODE);
6425 
6426 	DLB2_CSR_WR(hw, DLB2_CHP_CFG_CHP_CSR_CTRL, ctrl);
6427 }
6428 
6429 /**
6430  * dlb2_get_group_sequence_numbers() - return a group's number of SNs per queue
6431  * @hw: dlb2_hw handle for a particular device.
6432  * @group_id: sequence number group ID.
6433  *
6434  * This function returns the configured number of sequence numbers per queue
6435  * for the specified group.
6436  *
6437  * Return:
6438  * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
6439  */
6440 int dlb2_get_group_sequence_numbers(struct dlb2_hw *hw, u32 group_id)
6441 {
6442 	if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6443 		return -EINVAL;
6444 
6445 	return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
6446 }
6447 
6448 /**
6449  * dlb2_get_group_sequence_number_occupancy() - return a group's in-use slots
6450  * @hw: dlb2_hw handle for a particular device.
6451  * @group_id: sequence number group ID.
6452  *
6453  * This function returns the group's number of in-use slots (i.e. load-balanced
6454  * queues using the specified group).
6455  *
6456  * Return:
6457  * Returns -EINVAL if group_id is invalid, else the group's SNs per queue.
6458  */
6459 int dlb2_get_group_sequence_number_occupancy(struct dlb2_hw *hw, u32 group_id)
6460 {
6461 	if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6462 		return -EINVAL;
6463 
6464 	return dlb2_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
6465 }
6466 
6467 static void dlb2_log_set_group_sequence_numbers(struct dlb2_hw *hw,
6468 						u32 group_id,
6469 						u32 val)
6470 {
6471 	DLB2_HW_DBG(hw, "DLB2 set group sequence numbers:\n");
6472 	DLB2_HW_DBG(hw, "\tGroup ID: %u\n", group_id);
6473 	DLB2_HW_DBG(hw, "\tValue:    %u\n", val);
6474 }
6475 
6476 /**
6477  * dlb2_set_group_sequence_numbers() - assign a group's number of SNs per queue
6478  * @hw: dlb2_hw handle for a particular device.
6479  * @group_id: sequence number group ID.
6480  * @val: requested amount of sequence numbers per queue.
6481  *
6482  * This function configures the group's number of sequence numbers per queue.
6483  * val can be a power-of-two between 32 and 1024, inclusive. This setting can
6484  * be configured until the first ordered load-balanced queue is configured, at
6485  * which point the configuration is locked.
6486  *
6487  * Return:
6488  * Returns 0 upon success; -EINVAL if group_id or val is invalid, -EPERM if an
6489  * ordered queue is configured.
6490  */
6491 int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,
6492 				    u32 group_id,
6493 				    u32 val)
6494 {
6495 	const u32 valid_allocations[] = {64, 128, 256, 512, 1024};
6496 	struct dlb2_sn_group *group;
6497 	u32 sn_mode = 0;
6498 	int mode;
6499 
6500 	if (group_id >= DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
6501 		return -EINVAL;
6502 
6503 	group = &hw->rsrcs.sn_groups[group_id];
6504 
6505 	/*
6506 	 * Once the first load-balanced queue using an SN group is configured,
6507 	 * the group cannot be changed.
6508 	 */
6509 	if (group->slot_use_bitmap != 0)
6510 		return -EPERM;
6511 
6512 	for (mode = 0; mode < DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
6513 		if (val == valid_allocations[mode])
6514 			break;
6515 
6516 	if (mode == DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES)
6517 		return -EINVAL;
6518 
6519 	group->mode = mode;
6520 	group->sequence_numbers_per_queue = val;
6521 
6522 	DLB2_BITS_SET(sn_mode, hw->rsrcs.sn_groups[0].mode,
6523 		 DLB2_RO_GRP_SN_MODE_SN_MODE_0);
6524 	DLB2_BITS_SET(sn_mode, hw->rsrcs.sn_groups[1].mode,
6525 		 DLB2_RO_GRP_SN_MODE_SN_MODE_1);
6526 
6527 	DLB2_CSR_WR(hw, DLB2_RO_GRP_SN_MODE(hw->ver), sn_mode);
6528 
6529 	dlb2_log_set_group_sequence_numbers(hw, group_id, val);
6530 
6531 	return 0;
6532 }
6533 
6534 /**
6535  * dlb2_hw_set_qe_arbiter_weights() - program QE arbiter weights
6536  * @hw: dlb2_hw handle for a particular device.
6537  * @weight: 8-entry array of arbiter weights.
6538  *
6539  * weight[N] programs priority N's weight. In cases where the 8 priorities are
6540  * reduced to 4 bins, the mapping is:
6541  * - weight[1] programs bin 0
6542  * - weight[3] programs bin 1
6543  * - weight[5] programs bin 2
6544  * - weight[7] programs bin 3
6545  */
6546 void dlb2_hw_set_qe_arbiter_weights(struct dlb2_hw *hw, u8 weight[8])
6547 {
6548 	u32 reg = 0;
6549 
6550 	DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN0);
6551 	DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN1);
6552 	DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN2);
6553 	DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN_BIN3);
6554 	DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_RDY_BIN, reg);
6555 
6556 	reg = 0;
6557 	DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI0);
6558 	DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI1);
6559 	DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI2);
6560 	DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0_PRI3);
6561 	DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_NALB_0(hw->ver), reg);
6562 
6563 	reg = 0;
6564 	DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0);
6565 	DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1);
6566 	DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2);
6567 	DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3);
6568 	DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0(hw->ver), reg);
6569 
6570 	reg = 0;
6571 	DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI0);
6572 	DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI1);
6573 	DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI2);
6574 	DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0_PRI3);
6575 	DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_REPLAY_0, reg);
6576 
6577 	reg = 0;
6578 	DLB2_BITS_SET(reg, weight[1], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI0);
6579 	DLB2_BITS_SET(reg, weight[3], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI1);
6580 	DLB2_BITS_SET(reg, weight[5], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI2);
6581 	DLB2_BITS_SET(reg, weight[7], DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0_PRI3);
6582 	DLB2_CSR_WR(hw, DLB2_DP_CFG_ARB_WEIGHTS_TQPRI_DIR_0, reg);
6583 
6584 	reg = 0;
6585 	DLB2_BITS_SET(reg, weight[1], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI0);
6586 	DLB2_BITS_SET(reg, weight[3], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI1);
6587 	DLB2_BITS_SET(reg, weight[5], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI2);
6588 	DLB2_BITS_SET(reg, weight[7], DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0_PRI3);
6589 	DLB2_CSR_WR(hw, DLB2_NALB_CFG_ARB_WEIGHTS_TQPRI_ATQ_0(hw->ver), reg);
6590 
6591 	reg = 0;
6592 	DLB2_BITS_SET(reg, weight[1], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN0);
6593 	DLB2_BITS_SET(reg, weight[3], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN1);
6594 	DLB2_BITS_SET(reg, weight[5], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN2);
6595 	DLB2_BITS_SET(reg, weight[7], DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN_BIN3);
6596 	DLB2_CSR_WR(hw, DLB2_ATM_CFG_ARB_WEIGHTS_SCHED_BIN, reg);
6597 
6598 	reg = 0;
6599 	DLB2_BITS_SET(reg, weight[1], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI0);
6600 	DLB2_BITS_SET(reg, weight[3], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI1);
6601 	DLB2_BITS_SET(reg, weight[5], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI2);
6602 	DLB2_BITS_SET(reg, weight[7], DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0_PRI3);
6603 	DLB2_CSR_WR(hw, DLB2_AQED_CFG_ARB_WEIGHTS_TQPRI_ATM_0, reg);
6604 }
6605 
6606 /**
6607  * dlb2_hw_set_qid_arbiter_weights() - program QID arbiter weights
6608  * @hw: dlb2_hw handle for a particular device.
6609  * @weight: 8-entry array of arbiter weights.
6610  *
6611  * weight[N] programs priority N's weight. In cases where the 8 priorities are
6612  * reduced to 4 bins, the mapping is:
6613  * - weight[1] programs bin 0
6614  * - weight[3] programs bin 1
6615  * - weight[5] programs bin 2
6616  * - weight[7] programs bin 3
6617  */
6618 void dlb2_hw_set_qid_arbiter_weights(struct dlb2_hw *hw, u8 weight[8])
6619 {
6620 	u32 reg = 0;
6621 
6622 	DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI0_WEIGHT);
6623 	DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI1_WEIGHT);
6624 	DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI2_WEIGHT);
6625 	DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0_PRI3_WEIGHT);
6626 	DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_LDB_QID_0(hw->ver), reg);
6627 
6628 	reg = 0;
6629 	DLB2_BITS_SET(reg, weight[1], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI0_WEIGHT);
6630 	DLB2_BITS_SET(reg, weight[3], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI1_WEIGHT);
6631 	DLB2_BITS_SET(reg, weight[5], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI2_WEIGHT);
6632 	DLB2_BITS_SET(reg, weight[7], DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0_PRI3_WEIGHT);
6633 	DLB2_CSR_WR(hw, DLB2_LSP_CFG_ARB_WEIGHT_ATM_NALB_QID_0(hw->ver), reg);
6634 }
6635 
6636 static void dlb2_log_enable_cq_weight(struct dlb2_hw *hw,
6637 				      u32 domain_id,
6638 				      struct dlb2_enable_cq_weight_args *args,
6639 				      bool vdev_req,
6640 				      unsigned int vdev_id)
6641 {
6642 	DLB2_HW_DBG(hw, "DLB2 enable CQ weight arguments:\n");
6643 	DLB2_HW_DBG(hw, "\tvdev_req %d, vdev_id %d\n", vdev_req, vdev_id);
6644 	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
6645 	DLB2_HW_DBG(hw, "\tPort ID:   %d\n", args->port_id);
6646 	DLB2_HW_DBG(hw, "\tLimit:   %d\n", args->limit);
6647 }
6648 
6649 static int
6650 dlb2_verify_enable_cq_weight_args(struct dlb2_hw *hw,
6651 				  u32 domain_id,
6652 				  struct dlb2_enable_cq_weight_args *args,
6653 				  struct dlb2_cmd_response *resp,
6654 				  bool vdev_req,
6655 				  unsigned int vdev_id)
6656 {
6657 	struct dlb2_hw_domain *domain;
6658 	struct dlb2_ldb_port *port;
6659 
6660 	if (hw->ver == DLB2_HW_V2) {
6661 		DLB2_HW_ERR(hw,
6662 			    "[%s():%d] CQ weight feature requires DLB 2.5 or later\n",
6663 			    __func__, __LINE__);
6664 		resp->status = DLB2_ST_FEATURE_UNAVAILABLE;
6665 		return -EINVAL;
6666 	}
6667 
6668 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6669 
6670 	if (!domain) {
6671 		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
6672 		return -EINVAL;
6673 	}
6674 
6675 	if (!domain->configured) {
6676 		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
6677 		return -EINVAL;
6678 	}
6679 
6680 	if (domain->started) {
6681 		resp->status = DLB2_ST_DOMAIN_STARTED;
6682 		return -EINVAL;
6683 	}
6684 
6685 	port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
6686 	if (!port || !port->configured) {
6687 		resp->status = DLB2_ST_INVALID_PORT_ID;
6688 		return -EINVAL;
6689 	}
6690 
6691 	if (args->limit == 0 || args->limit > port->cq_depth) {
6692 		resp->status = DLB2_ST_INVALID_CQ_WEIGHT_LIMIT;
6693 		return -EINVAL;
6694 	}
6695 
6696 	return 0;
6697 }
6698 
6699 int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw,
6700 			     u32 domain_id,
6701 			     struct dlb2_enable_cq_weight_args *args,
6702 			     struct dlb2_cmd_response *resp,
6703 			     bool vdev_req,
6704 			     unsigned int vdev_id)
6705 {
6706 	struct dlb2_hw_domain *domain;
6707 	struct dlb2_ldb_port *port;
6708 	int ret, id;
6709 	u32 reg = 0;
6710 
6711 	dlb2_log_enable_cq_weight(hw, domain_id, args, vdev_req, vdev_id);
6712 
6713 	/*
6714 	 * Verify that hardware resources are available before attempting to
6715 	 * satisfy the request. This simplifies the error unwinding code.
6716 	 */
6717 	ret = dlb2_verify_enable_cq_weight_args(hw,
6718 						domain_id,
6719 						args,
6720 						resp,
6721 						vdev_req,
6722 						vdev_id);
6723 	if (ret)
6724 		return ret;
6725 
6726 	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
6727 	if (!domain) {
6728 		DLB2_HW_ERR(hw,
6729 			    "[%s():%d] Internal error: domain not found\n",
6730 			    __func__, __LINE__);
6731 		return -EFAULT;
6732 	}
6733 
6734 	id = args->port_id;
6735 
6736 	port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
6737 	if (!port) {
6738 		DLB2_HW_ERR(hw,
6739 			    "[%s():	%d] Internal error: port not found\n",
6740 			    __func__, __LINE__);
6741 		return -EFAULT;
6742 	}
6743 
6744 	DLB2_BIT_SET(reg, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_V);
6745 	DLB2_BITS_SET(reg, args->limit, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_LIMIT);
6746 
6747 	DLB2_CSR_WR(hw, DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id), reg);
6748 
6749 	resp->status = 0;
6750 
6751 	return 0;
6752 }
6753 
6754 static void dlb2_log_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bw)
6755 {
6756 	DLB2_HW_DBG(hw, "DLB2 set port CoS bandwidth:\n");
6757 	DLB2_HW_DBG(hw, "\tCoS ID:    %u\n", cos_id);
6758 	DLB2_HW_DBG(hw, "\tBandwidth: %u\n", bw);
6759 }
6760 
6761 #define DLB2_MAX_BW_PCT 100
6762 
6763 /**
6764  * dlb2_hw_set_cos_bandwidth() - set a bandwidth allocation percentage for a
6765  *      port class-of-service.
6766  * @hw: dlb2_hw handle for a particular device.
6767  * @cos_id: class-of-service ID.
6768  * @bandwidth: class-of-service bandwidth.
6769  *
6770  * Return:
6771  * Returns 0 upon success, < 0 otherwise.
6772  *
6773  * Errors:
6774  * EINVAL - Invalid cos ID, bandwidth is greater than 100, or bandwidth would
6775  *          cause the total bandwidth across all classes of service to exceed
6776  *          100%.
6777  */
6778 int dlb2_hw_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bandwidth)
6779 {
6780 	unsigned int i;
6781 	u32 reg;
6782 	u8 total;
6783 
6784 	if (cos_id >= DLB2_NUM_COS_DOMAINS)
6785 		return -EINVAL;
6786 
6787 	if (bandwidth > DLB2_MAX_BW_PCT)
6788 		return -EINVAL;
6789 
6790 	total = 0;
6791 
6792 	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++)
6793 		total += (i == cos_id) ? bandwidth : hw->cos_reservation[i];
6794 
6795 	if (total > DLB2_MAX_BW_PCT)
6796 		return -EINVAL;
6797 
6798 	reg = DLB2_CSR_RD(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id));
6799 
6800 	/*
6801 	 * Normalize the bandwidth to a value in the range 0-255. Integer
6802 	 * division may leave unreserved scheduling slots; these will be
6803 	 * divided among the 4 classes of service.
6804 	 */
6805 	DLB2_BITS_SET(reg, (bandwidth * 256) / 100, DLB2_LSP_CFG_SHDW_RANGE_COS_BW_RANGE);
6806 	DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_RANGE_COS(hw->ver, cos_id), reg);
6807 
6808 	reg = 0;
6809 	DLB2_BIT_SET(reg, DLB2_LSP_CFG_SHDW_CTRL_TRANSFER);
6810 	/* Atomically transfer the newly configured service weight */
6811 	DLB2_CSR_WR(hw, DLB2_LSP_CFG_SHDW_CTRL(hw->ver), reg);
6812 
6813 	dlb2_log_set_cos_bandwidth(hw, cos_id, bandwidth);
6814 
6815 	hw->cos_reservation[cos_id] = bandwidth;
6816 
6817 	return 0;
6818 }
6819