xref: /dpdk/drivers/common/cnxk/roc_nix_queue.c (revision 42a8fc7daa46256d150278fc9a7a846e27945a0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <math.h>
6 
7 #include "roc_api.h"
8 #include "roc_priv.h"
9 
10 static inline uint32_t
11 nix_qsize_to_val(enum nix_q_size qsize)
12 {
13 	return (16UL << (qsize * 2));
14 }
15 
16 static inline enum nix_q_size
17 nix_qsize_clampup(uint32_t val)
18 {
19 	int i = nix_q_size_16;
20 
21 	for (; i < nix_q_size_max; i++)
22 		if (val <= nix_qsize_to_val(i))
23 			break;
24 
25 	if (i >= nix_q_size_max)
26 		i = nix_q_size_max - 1;
27 
28 	return i;
29 }
30 
31 void
32 nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval)
33 {
34 	uint64_t wait_ns;
35 
36 	if (!roc_model_is_cn10k())
37 		return;
38 	/* Due to HW errata writes to VWQE_FLUSH might hang, so instead
39 	 * wait for max vwqe timeout interval.
40 	 */
41 	if (rq->vwqe_ena) {
42 		wait_ns = rq->vwqe_wait_tmo * (vwqe_interval + 1) * 100;
43 		plt_delay_us((wait_ns / 1E3) + 1);
44 	}
45 }
46 
47 int
48 nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable)
49 {
50 	struct mbox *mbox = dev->mbox;
51 
52 	/* Pkts will be dropped silently if RQ is disabled */
53 	if (roc_model_is_cn9k()) {
54 		struct nix_aq_enq_req *aq;
55 
56 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
57 		if (!aq)
58 			return -ENOSPC;
59 
60 		aq->qidx = rq->qid;
61 		aq->ctype = NIX_AQ_CTYPE_RQ;
62 		aq->op = NIX_AQ_INSTOP_WRITE;
63 
64 		aq->rq.ena = enable;
65 		aq->rq_mask.ena = ~(aq->rq_mask.ena);
66 	} else {
67 		struct nix_cn10k_aq_enq_req *aq;
68 
69 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
70 		if (!aq)
71 			return -ENOSPC;
72 
73 		aq->qidx = rq->qid;
74 		aq->ctype = NIX_AQ_CTYPE_RQ;
75 		aq->op = NIX_AQ_INSTOP_WRITE;
76 
77 		aq->rq.ena = enable;
78 		aq->rq_mask.ena = ~(aq->rq_mask.ena);
79 	}
80 
81 	return mbox_process(mbox);
82 }
83 
84 int
85 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
86 {
87 	struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
88 	int rc;
89 
90 	rc = nix_rq_ena_dis(&nix->dev, rq, enable);
91 	nix_rq_vwqe_flush(rq, nix->vwqe_interval);
92 
93 	return rc;
94 }
95 
96 int
97 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
98 		bool cfg, bool ena)
99 {
100 	struct mbox *mbox = dev->mbox;
101 	struct nix_aq_enq_req *aq;
102 
103 	aq = mbox_alloc_msg_nix_aq_enq(mbox);
104 	if (!aq)
105 		return -ENOSPC;
106 
107 	aq->qidx = rq->qid;
108 	aq->ctype = NIX_AQ_CTYPE_RQ;
109 	aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
110 
111 	if (rq->sso_ena) {
112 		/* SSO mode */
113 		aq->rq.sso_ena = 1;
114 		aq->rq.sso_tt = rq->tt;
115 		aq->rq.sso_grp = rq->hwgrp;
116 		aq->rq.ena_wqwd = 1;
117 		aq->rq.wqe_skip = rq->wqe_skip;
118 		aq->rq.wqe_caching = 1;
119 
120 		aq->rq.good_utag = rq->tag_mask >> 24;
121 		aq->rq.bad_utag = rq->tag_mask >> 24;
122 		aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
123 	} else {
124 		/* CQ mode */
125 		aq->rq.sso_ena = 0;
126 		aq->rq.good_utag = rq->tag_mask >> 24;
127 		aq->rq.bad_utag = rq->tag_mask >> 24;
128 		aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
129 		aq->rq.cq = rq->qid;
130 	}
131 
132 	if (rq->ipsech_ena)
133 		aq->rq.ipsech_ena = 1;
134 
135 	aq->rq.spb_ena = 0;
136 	aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
137 
138 	/* Sizes must be aligned to 8 bytes */
139 	if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
140 		return -EINVAL;
141 
142 	/* Expressed in number of dwords */
143 	aq->rq.first_skip = rq->first_skip / 8;
144 	aq->rq.later_skip = rq->later_skip / 8;
145 	aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
146 	aq->rq.lpb_sizem1 = rq->lpb_size / 8;
147 	aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
148 	aq->rq.ena = ena;
149 	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
150 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
151 	aq->rq.rq_int_ena = 0;
152 	/* Many to one reduction */
153 	aq->rq.qint_idx = rq->qid % qints;
154 	aq->rq.xqe_drop_ena = 1;
155 
156 	/* If RED enabled, then fill enable for all cases */
157 	if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
158 		aq->rq.spb_pool_pass = rq->spb_red_pass;
159 		aq->rq.lpb_pool_pass = rq->red_pass;
160 
161 		aq->rq.spb_pool_drop = rq->spb_red_drop;
162 		aq->rq.lpb_pool_drop = rq->red_drop;
163 	}
164 
165 	if (cfg) {
166 		if (rq->sso_ena) {
167 			/* SSO mode */
168 			aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
169 			aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
170 			aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
171 			aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
172 			aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
173 			aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
174 			aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
175 			aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
176 			aq->rq_mask.ltag = ~aq->rq_mask.ltag;
177 		} else {
178 			/* CQ mode */
179 			aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
180 			aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
181 			aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
182 			aq->rq_mask.ltag = ~aq->rq_mask.ltag;
183 			aq->rq_mask.cq = ~aq->rq_mask.cq;
184 		}
185 
186 		if (rq->ipsech_ena)
187 			aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
188 
189 		aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
190 		aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
191 		aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
192 		aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
193 		aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
194 		aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
195 		aq->rq_mask.ena = ~aq->rq_mask.ena;
196 		aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
197 		aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
198 		aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
199 		aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
200 		aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
201 
202 		if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
203 			aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
204 			aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
205 
206 			aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
207 			aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
208 		}
209 	}
210 
211 	return 0;
212 }
213 
214 int
215 nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
216 	   bool ena)
217 {
218 	struct nix_cn10k_aq_enq_req *aq;
219 	struct mbox *mbox = dev->mbox;
220 
221 	aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
222 	if (!aq)
223 		return -ENOSPC;
224 
225 	aq->qidx = rq->qid;
226 	aq->ctype = NIX_AQ_CTYPE_RQ;
227 	aq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;
228 
229 	if (rq->sso_ena) {
230 		/* SSO mode */
231 		aq->rq.sso_ena = 1;
232 		aq->rq.sso_tt = rq->tt;
233 		aq->rq.sso_grp = rq->hwgrp;
234 		aq->rq.ena_wqwd = 1;
235 		aq->rq.wqe_skip = rq->wqe_skip;
236 		aq->rq.wqe_caching = 1;
237 
238 		aq->rq.good_utag = rq->tag_mask >> 24;
239 		aq->rq.bad_utag = rq->tag_mask >> 24;
240 		aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
241 
242 		if (rq->vwqe_ena) {
243 			aq->rq.vwqe_ena = true;
244 			aq->rq.vwqe_skip = rq->vwqe_first_skip;
245 			/* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */
246 			aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;
247 			aq->rq.vtime_wait = rq->vwqe_wait_tmo;
248 			aq->rq.wqe_aura = rq->vwqe_aura_handle;
249 		}
250 	} else {
251 		/* CQ mode */
252 		aq->rq.sso_ena = 0;
253 		aq->rq.good_utag = rq->tag_mask >> 24;
254 		aq->rq.bad_utag = rq->tag_mask >> 24;
255 		aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
256 		aq->rq.cq = rq->qid;
257 	}
258 
259 	if (rq->ipsech_ena) {
260 		aq->rq.ipsech_ena = 1;
261 		aq->rq.ipsecd_drop_en = 1;
262 	}
263 
264 	aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
265 
266 	/* Sizes must be aligned to 8 bytes */
267 	if (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)
268 		return -EINVAL;
269 
270 	/* Expressed in number of dwords */
271 	aq->rq.first_skip = rq->first_skip / 8;
272 	aq->rq.later_skip = rq->later_skip / 8;
273 	aq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */
274 	aq->rq.lpb_sizem1 = rq->lpb_size / 8;
275 	aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
276 	aq->rq.ena = ena;
277 
278 	if (rq->spb_ena) {
279 		uint32_t spb_sizem1;
280 
281 		aq->rq.spb_ena = 1;
282 		aq->rq.spb_aura =
283 			roc_npa_aura_handle_to_aura(rq->spb_aura_handle);
284 
285 		if (rq->spb_size & 0x7 ||
286 		    rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE)
287 			return -EINVAL;
288 
289 		spb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */
290 		spb_sizem1 -= 1;	       /* Expressed in size minus one */
291 		aq->rq.spb_sizem1 = spb_sizem1 & 0x3F;
292 		aq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7;
293 	} else {
294 		aq->rq.spb_ena = 0;
295 	}
296 
297 	aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
298 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
299 	aq->rq.rq_int_ena = 0;
300 	/* Many to one reduction */
301 	aq->rq.qint_idx = rq->qid % qints;
302 	aq->rq.xqe_drop_ena = 0;
303 	aq->rq.lpb_drop_ena = rq->lpb_drop_ena;
304 	aq->rq.spb_drop_ena = rq->spb_drop_ena;
305 
306 	/* If RED enabled, then fill enable for all cases */
307 	if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
308 		aq->rq.spb_pool_pass = rq->spb_red_pass;
309 		aq->rq.lpb_pool_pass = rq->red_pass;
310 		aq->rq.wqe_pool_pass = rq->red_pass;
311 		aq->rq.xqe_pass = rq->red_pass;
312 
313 		aq->rq.spb_pool_drop = rq->spb_red_drop;
314 		aq->rq.lpb_pool_drop = rq->red_drop;
315 		aq->rq.wqe_pool_drop = rq->red_drop;
316 		aq->rq.xqe_drop = rq->red_drop;
317 	}
318 
319 	if (cfg) {
320 		if (rq->sso_ena) {
321 			/* SSO mode */
322 			aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
323 			aq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;
324 			aq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;
325 			aq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;
326 			aq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;
327 			aq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;
328 			aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
329 			aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
330 			aq->rq_mask.ltag = ~aq->rq_mask.ltag;
331 			if (rq->vwqe_ena) {
332 				aq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena;
333 				aq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip;
334 				aq->rq_mask.max_vsize_exp =
335 					~aq->rq_mask.max_vsize_exp;
336 				aq->rq_mask.vtime_wait =
337 					~aq->rq_mask.vtime_wait;
338 				aq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura;
339 			}
340 		} else {
341 			/* CQ mode */
342 			aq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;
343 			aq->rq_mask.good_utag = ~aq->rq_mask.good_utag;
344 			aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
345 			aq->rq_mask.ltag = ~aq->rq_mask.ltag;
346 			aq->rq_mask.cq = ~aq->rq_mask.cq;
347 		}
348 
349 		if (rq->ipsech_ena)
350 			aq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;
351 
352 		if (rq->spb_ena) {
353 			aq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura;
354 			aq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1;
355 			aq->rq_mask.spb_high_sizem1 =
356 				~aq->rq_mask.spb_high_sizem1;
357 		}
358 
359 		aq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;
360 		aq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;
361 		aq->rq_mask.first_skip = ~aq->rq_mask.first_skip;
362 		aq->rq_mask.later_skip = ~aq->rq_mask.later_skip;
363 		aq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;
364 		aq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;
365 		aq->rq_mask.ena = ~aq->rq_mask.ena;
366 		aq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;
367 		aq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;
368 		aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
369 		aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
370 		aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
371 		aq->rq_mask.lpb_drop_ena = ~aq->rq_mask.lpb_drop_ena;
372 		aq->rq_mask.spb_drop_ena = ~aq->rq_mask.spb_drop_ena;
373 
374 		if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
375 			aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
376 			aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
377 			aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass;
378 			aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass;
379 
380 			aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
381 			aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
382 			aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop;
383 			aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop;
384 		}
385 	}
386 
387 	return 0;
388 }
389 
390 int
391 roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
392 {
393 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
394 	struct mbox *mbox = (&nix->dev)->mbox;
395 	bool is_cn9k = roc_model_is_cn9k();
396 	struct dev *dev = &nix->dev;
397 	int rc;
398 
399 	if (roc_nix == NULL || rq == NULL)
400 		return NIX_ERR_PARAM;
401 
402 	if (rq->qid >= nix->nb_rx_queues)
403 		return NIX_ERR_QUEUE_INVALID_RANGE;
404 
405 	rq->roc_nix = roc_nix;
406 
407 	if (is_cn9k)
408 		rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena);
409 	else
410 		rc = nix_rq_cfg(dev, rq, nix->qints, false, ena);
411 
412 	if (rc)
413 		return rc;
414 
415 	rc = mbox_process(mbox);
416 	if (rc)
417 		return rc;
418 
419 	return nix_tel_node_add_rq(rq);
420 }
421 
422 int
423 roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
424 {
425 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
426 	struct mbox *mbox = (&nix->dev)->mbox;
427 	bool is_cn9k = roc_model_is_cn9k();
428 	struct dev *dev = &nix->dev;
429 	int rc;
430 
431 	if (roc_nix == NULL || rq == NULL)
432 		return NIX_ERR_PARAM;
433 
434 	if (rq->qid >= nix->nb_rx_queues)
435 		return NIX_ERR_QUEUE_INVALID_RANGE;
436 
437 	rq->roc_nix = roc_nix;
438 
439 	if (is_cn9k)
440 		rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena);
441 	else
442 		rc = nix_rq_cfg(dev, rq, nix->qints, true, ena);
443 
444 	if (rc)
445 		return rc;
446 
447 	rc = mbox_process(mbox);
448 	if (rc)
449 		return rc;
450 
451 	return nix_tel_node_add_rq(rq);
452 }
453 
454 int
455 roc_nix_rq_fini(struct roc_nix_rq *rq)
456 {
457 	/* Disabling RQ is sufficient */
458 	return roc_nix_rq_ena_dis(rq, false);
459 }
460 
461 int
462 roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
463 {
464 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
465 	struct mbox *mbox = (&nix->dev)->mbox;
466 	volatile struct nix_cq_ctx_s *cq_ctx;
467 	enum nix_q_size qsize;
468 	size_t desc_sz;
469 	int rc;
470 
471 	if (cq == NULL)
472 		return NIX_ERR_PARAM;
473 
474 	if (cq->qid >= nix->nb_rx_queues)
475 		return NIX_ERR_QUEUE_INVALID_RANGE;
476 
477 	qsize = nix_qsize_clampup(cq->nb_desc);
478 	cq->nb_desc = nix_qsize_to_val(qsize);
479 	cq->qmask = cq->nb_desc - 1;
480 	cq->door = nix->base + NIX_LF_CQ_OP_DOOR;
481 	cq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
482 	cq->wdata = (uint64_t)cq->qid << 32;
483 	cq->roc_nix = roc_nix;
484 
485 	/* CQE of W16 */
486 	desc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ;
487 	cq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN);
488 	if (cq->desc_base == NULL) {
489 		rc = NIX_ERR_NO_MEM;
490 		goto fail;
491 	}
492 
493 	if (roc_model_is_cn9k()) {
494 		struct nix_aq_enq_req *aq;
495 
496 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
497 		if (!aq)
498 			return -ENOSPC;
499 
500 		aq->qidx = cq->qid;
501 		aq->ctype = NIX_AQ_CTYPE_CQ;
502 		aq->op = NIX_AQ_INSTOP_INIT;
503 		cq_ctx = &aq->cq;
504 	} else {
505 		struct nix_cn10k_aq_enq_req *aq;
506 
507 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
508 		if (!aq)
509 			return -ENOSPC;
510 
511 		aq->qidx = cq->qid;
512 		aq->ctype = NIX_AQ_CTYPE_CQ;
513 		aq->op = NIX_AQ_INSTOP_INIT;
514 		cq_ctx = &aq->cq;
515 	}
516 
517 	cq_ctx->ena = 1;
518 	cq_ctx->caching = 1;
519 	cq_ctx->qsize = qsize;
520 	cq_ctx->base = (uint64_t)cq->desc_base;
521 	cq_ctx->avg_level = 0xff;
522 	cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
523 	cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
524 
525 	/* Many to one reduction */
526 	cq_ctx->qint_idx = cq->qid % nix->qints;
527 	/* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
528 	cq_ctx->cint_idx = cq->qid;
529 
530 	if (roc_errata_nix_has_cq_min_size_4k()) {
531 		const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
532 		uint16_t min_rx_drop;
533 
534 		min_rx_drop = ceil(rx_cq_skid / (float)cq->nb_desc);
535 		cq_ctx->drop = min_rx_drop;
536 		cq_ctx->drop_ena = 1;
537 		cq->drop_thresh = min_rx_drop;
538 	} else {
539 		cq->drop_thresh = NIX_CQ_THRESH_LEVEL;
540 		/* Drop processing or red drop cannot be enabled due to
541 		 * due to packets coming for second pass from CPT.
542 		 */
543 		if (!roc_nix_inl_inb_is_enabled(roc_nix)) {
544 			cq_ctx->drop = cq->drop_thresh;
545 			cq_ctx->drop_ena = 1;
546 		}
547 	}
548 
549 	/* TX pause frames enable flow ctrl on RX side */
550 	if (nix->tx_pause) {
551 		/* Single BPID is allocated for all rx channels for now */
552 		cq_ctx->bpid = nix->bpid[0];
553 		cq_ctx->bp = cq->drop_thresh;
554 		cq_ctx->bp_ena = 1;
555 	}
556 
557 	rc = mbox_process(mbox);
558 	if (rc)
559 		goto free_mem;
560 
561 	return nix_tel_node_add_cq(cq);
562 
563 free_mem:
564 	plt_free(cq->desc_base);
565 fail:
566 	return rc;
567 }
568 
569 int
570 roc_nix_cq_fini(struct roc_nix_cq *cq)
571 {
572 	struct mbox *mbox;
573 	struct nix *nix;
574 	int rc;
575 
576 	if (cq == NULL)
577 		return NIX_ERR_PARAM;
578 
579 	nix = roc_nix_to_nix_priv(cq->roc_nix);
580 	mbox = (&nix->dev)->mbox;
581 
582 	/* Disable CQ */
583 	if (roc_model_is_cn9k()) {
584 		struct nix_aq_enq_req *aq;
585 
586 		aq = mbox_alloc_msg_nix_aq_enq(mbox);
587 		if (!aq)
588 			return -ENOSPC;
589 
590 		aq->qidx = cq->qid;
591 		aq->ctype = NIX_AQ_CTYPE_CQ;
592 		aq->op = NIX_AQ_INSTOP_WRITE;
593 		aq->cq.ena = 0;
594 		aq->cq.bp_ena = 0;
595 		aq->cq_mask.ena = ~aq->cq_mask.ena;
596 		aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
597 	} else {
598 		struct nix_cn10k_aq_enq_req *aq;
599 
600 		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
601 		if (!aq)
602 			return -ENOSPC;
603 
604 		aq->qidx = cq->qid;
605 		aq->ctype = NIX_AQ_CTYPE_CQ;
606 		aq->op = NIX_AQ_INSTOP_WRITE;
607 		aq->cq.ena = 0;
608 		aq->cq.bp_ena = 0;
609 		aq->cq_mask.ena = ~aq->cq_mask.ena;
610 		aq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;
611 	}
612 
613 	rc = mbox_process(mbox);
614 	if (rc)
615 		return rc;
616 
617 	plt_free(cq->desc_base);
618 	return 0;
619 }
620 
621 static int
622 sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
623 {
624 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
625 	uint16_t sqes_per_sqb, count, nb_sqb_bufs;
626 	struct npa_pool_s pool;
627 	struct npa_aura_s aura;
628 	uint64_t blk_sz;
629 	uint64_t iova;
630 	int rc;
631 
632 	blk_sz = nix->sqb_size;
633 	if (sq->max_sqe_sz == roc_nix_maxsqesz_w16)
634 		sqes_per_sqb = (blk_sz / 8) / 16;
635 	else
636 		sqes_per_sqb = (blk_sz / 8) / 8;
637 
638 	sq->nb_desc = PLT_MAX(256U, sq->nb_desc);
639 	nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
640 	nb_sqb_bufs += NIX_SQB_LIST_SPACE;
641 	/* Clamp up the SQB count */
642 	nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
643 			      (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
644 
645 	sq->nb_sqb_bufs = nb_sqb_bufs;
646 	sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
647 	sq->nb_sqb_bufs_adj =
648 		nb_sqb_bufs -
649 		(PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
650 	sq->nb_sqb_bufs_adj =
651 		(sq->nb_sqb_bufs_adj * NIX_SQB_LOWER_THRESH) / 100;
652 
653 	/* Explicitly set nat_align alone as by default pool is with both
654 	 * nat_align and buf_offset = 1 which we don't want for SQB.
655 	 */
656 	memset(&pool, 0, sizeof(struct npa_pool_s));
657 	pool.nat_align = 1;
658 
659 	memset(&aura, 0, sizeof(aura));
660 	aura.fc_ena = 1;
661 	if (roc_model_is_cn9k() || roc_errata_npa_has_no_fc_stype_ststp())
662 		aura.fc_stype = 0x0; /* STF */
663 	else
664 		aura.fc_stype = 0x3; /* STSTP */
665 	aura.fc_addr = (uint64_t)sq->fc;
666 	aura.fc_hyst_bits = 0; /* Store count on all updates */
667 	rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
668 				 &pool);
669 	if (rc)
670 		goto fail;
671 
672 	sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);
673 	if (sq->sqe_mem == NULL) {
674 		rc = NIX_ERR_NO_MEM;
675 		goto nomem;
676 	}
677 
678 	/* Fill the initial buffers */
679 	iova = (uint64_t)sq->sqe_mem;
680 	for (count = 0; count < NIX_MAX_SQB; count++) {
681 		roc_npa_aura_op_free(sq->aura_handle, 0, iova);
682 		iova += blk_sz;
683 	}
684 
685 	if (roc_npa_aura_op_available_wait(sq->aura_handle, NIX_MAX_SQB, 0) !=
686 	    NIX_MAX_SQB) {
687 		plt_err("Failed to free all pointers to the pool");
688 		rc = NIX_ERR_NO_MEM;
689 		goto npa_fail;
690 	}
691 
692 	roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
693 	roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
694 	sq->aura_sqb_bufs = NIX_MAX_SQB;
695 
696 	return rc;
697 npa_fail:
698 	plt_free(sq->sqe_mem);
699 nomem:
700 	roc_npa_pool_destroy(sq->aura_handle);
701 fail:
702 	return rc;
703 }
704 
705 static int
706 sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
707 	     uint16_t smq)
708 {
709 	struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix);
710 	struct mbox *mbox = (&nix->dev)->mbox;
711 	struct nix_aq_enq_req *aq;
712 
713 	aq = mbox_alloc_msg_nix_aq_enq(mbox);
714 	if (!aq)
715 		return -ENOSPC;
716 
717 	aq->qidx = sq->qid;
718 	aq->ctype = NIX_AQ_CTYPE_SQ;
719 	aq->op = NIX_AQ_INSTOP_INIT;
720 	aq->sq.max_sqe_size = sq->max_sqe_sz;
721 
722 	aq->sq.max_sqe_size = sq->max_sqe_sz;
723 	aq->sq.smq = smq;
724 	aq->sq.smq_rr_quantum = rr_quantum;
725 	if (roc_nix_is_sdp(roc_nix))
726 		aq->sq.default_chan =
727 			nix->tx_chan_base + (sq->qid % nix->tx_chan_cnt);
728 	else
729 		aq->sq.default_chan = nix->tx_chan_base;
730 	aq->sq.sqe_stype = NIX_STYPE_STF;
731 	aq->sq.ena = 1;
732 	aq->sq.sso_ena = !!sq->sso_ena;
733 	aq->sq.cq_ena = !!sq->cq_ena;
734 	aq->sq.cq = sq->cqid;
735 	if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
736 		aq->sq.sqe_stype = NIX_STYPE_STP;
737 	aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
738 	aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
739 	aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
740 	aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
741 	aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
742 
743 	/* Many to one reduction */
744 	/* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can
745 	 * send incorrect QINT_IDX when reporting queue interrupt (QINT). This
746 	 * might result in software missing the interrupt.
747 	 */
748 	aq->sq.qint_idx = 0;
749 	return 0;
750 }
751 
752 static int
753 sq_cn9k_fini(struct nix *nix, struct roc_nix_sq *sq)
754 {
755 	struct mbox *mbox = (&nix->dev)->mbox;
756 	struct nix_aq_enq_rsp *rsp;
757 	struct nix_aq_enq_req *aq;
758 	uint16_t sqes_per_sqb;
759 	void *sqb_buf;
760 	int rc, count;
761 
762 	aq = mbox_alloc_msg_nix_aq_enq(mbox);
763 	if (!aq)
764 		return -ENOSPC;
765 
766 	aq->qidx = sq->qid;
767 	aq->ctype = NIX_AQ_CTYPE_SQ;
768 	aq->op = NIX_AQ_INSTOP_READ;
769 	rc = mbox_process_msg(mbox, (void *)&rsp);
770 	if (rc)
771 		return rc;
772 
773 	/* Check if sq is already cleaned up */
774 	if (!rsp->sq.ena)
775 		return 0;
776 
777 	/* Disable sq */
778 	aq = mbox_alloc_msg_nix_aq_enq(mbox);
779 	if (!aq)
780 		return -ENOSPC;
781 
782 	aq->qidx = sq->qid;
783 	aq->ctype = NIX_AQ_CTYPE_SQ;
784 	aq->op = NIX_AQ_INSTOP_WRITE;
785 	aq->sq_mask.ena = ~aq->sq_mask.ena;
786 	aq->sq.ena = 0;
787 	rc = mbox_process(mbox);
788 	if (rc)
789 		return rc;
790 
791 	/* Read SQ and free sqb's */
792 	aq = mbox_alloc_msg_nix_aq_enq(mbox);
793 	if (!aq)
794 		return -ENOSPC;
795 
796 	aq->qidx = sq->qid;
797 	aq->ctype = NIX_AQ_CTYPE_SQ;
798 	aq->op = NIX_AQ_INSTOP_READ;
799 	rc = mbox_process_msg(mbox, (void *)&rsp);
800 	if (rc)
801 		return rc;
802 
803 	if (aq->sq.smq_pend)
804 		plt_err("SQ has pending SQE's");
805 
806 	count = aq->sq.sqb_count;
807 	sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
808 	/* Free SQB's that are used */
809 	sqb_buf = (void *)rsp->sq.head_sqb;
810 	while (count) {
811 		void *next_sqb;
812 
813 		next_sqb = *(void **)((uintptr_t)sqb_buf +
814 				      (uint32_t)((sqes_per_sqb - 1) *
815 						 sq->max_sqe_sz));
816 		roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
817 		sqb_buf = next_sqb;
818 		count--;
819 	}
820 
821 	/* Free next to use sqb */
822 	if (rsp->sq.next_sqb)
823 		roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
824 	return 0;
825 }
826 
827 static int
828 sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
829 	uint16_t smq)
830 {
831 	struct mbox *mbox = (&nix->dev)->mbox;
832 	struct nix_cn10k_aq_enq_req *aq;
833 
834 	aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
835 	if (!aq)
836 		return -ENOSPC;
837 
838 	aq->qidx = sq->qid;
839 	aq->ctype = NIX_AQ_CTYPE_SQ;
840 	aq->op = NIX_AQ_INSTOP_INIT;
841 	aq->sq.max_sqe_size = sq->max_sqe_sz;
842 
843 	aq->sq.max_sqe_size = sq->max_sqe_sz;
844 	aq->sq.smq = smq;
845 	aq->sq.smq_rr_weight = rr_quantum;
846 	aq->sq.default_chan = nix->tx_chan_base;
847 	aq->sq.sqe_stype = NIX_STYPE_STF;
848 	aq->sq.ena = 1;
849 	aq->sq.sso_ena = !!sq->sso_ena;
850 	aq->sq.cq_ena = !!sq->cq_ena;
851 	aq->sq.cq = sq->cqid;
852 	if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
853 		aq->sq.sqe_stype = NIX_STYPE_STP;
854 	aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
855 	aq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
856 	aq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
857 	aq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
858 	aq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
859 
860 	/* Assigning QINT 0 to all the SQs, an errata exists where NIXTX can
861 	 * send incorrect QINT_IDX when reporting queue interrupt (QINT). This
862 	 * might result in software missing the interrupt.
863 	 */
864 	aq->sq.qint_idx = 0;
865 	return 0;
866 }
867 
868 static int
869 sq_fini(struct nix *nix, struct roc_nix_sq *sq)
870 {
871 	struct mbox *mbox = (&nix->dev)->mbox;
872 	struct nix_cn10k_aq_enq_rsp *rsp;
873 	struct nix_cn10k_aq_enq_req *aq;
874 	uint16_t sqes_per_sqb;
875 	void *sqb_buf;
876 	int rc, count;
877 
878 	aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
879 	if (!aq)
880 		return -ENOSPC;
881 
882 	aq->qidx = sq->qid;
883 	aq->ctype = NIX_AQ_CTYPE_SQ;
884 	aq->op = NIX_AQ_INSTOP_READ;
885 	rc = mbox_process_msg(mbox, (void *)&rsp);
886 	if (rc)
887 		return rc;
888 
889 	/* Check if sq is already cleaned up */
890 	if (!rsp->sq.ena)
891 		return 0;
892 
893 	/* Disable sq */
894 	aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
895 	if (!aq)
896 		return -ENOSPC;
897 
898 	aq->qidx = sq->qid;
899 	aq->ctype = NIX_AQ_CTYPE_SQ;
900 	aq->op = NIX_AQ_INSTOP_WRITE;
901 	aq->sq_mask.ena = ~aq->sq_mask.ena;
902 	aq->sq.ena = 0;
903 	rc = mbox_process(mbox);
904 	if (rc)
905 		return rc;
906 
907 	/* Read SQ and free sqb's */
908 	aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
909 	if (!aq)
910 		return -ENOSPC;
911 
912 	aq->qidx = sq->qid;
913 	aq->ctype = NIX_AQ_CTYPE_SQ;
914 	aq->op = NIX_AQ_INSTOP_READ;
915 	rc = mbox_process_msg(mbox, (void *)&rsp);
916 	if (rc)
917 		return rc;
918 
919 	if (aq->sq.smq_pend)
920 		plt_err("SQ has pending SQE's");
921 
922 	count = aq->sq.sqb_count;
923 	sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
924 	/* Free SQB's that are used */
925 	sqb_buf = (void *)rsp->sq.head_sqb;
926 	while (count) {
927 		void *next_sqb;
928 
929 		next_sqb = *(void **)((uintptr_t)sqb_buf +
930 				      (uint32_t)((sqes_per_sqb - 1) *
931 						 sq->max_sqe_sz));
932 		roc_npa_aura_op_free(sq->aura_handle, 1, (uint64_t)sqb_buf);
933 		sqb_buf = next_sqb;
934 		count--;
935 	}
936 
937 	/* Free next to use sqb */
938 	if (rsp->sq.next_sqb)
939 		roc_npa_aura_op_free(sq->aura_handle, 1, rsp->sq.next_sqb);
940 	return 0;
941 }
942 
943 int
944 roc_nix_sq_init(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
945 {
946 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
947 	struct mbox *mbox = (&nix->dev)->mbox;
948 	uint16_t qid, smq = UINT16_MAX;
949 	uint32_t rr_quantum = 0;
950 	int rc;
951 
952 	if (sq == NULL)
953 		return NIX_ERR_PARAM;
954 
955 	qid = sq->qid;
956 	if (qid >= nix->nb_tx_queues)
957 		return NIX_ERR_QUEUE_INVALID_RANGE;
958 
959 	sq->roc_nix = roc_nix;
960 	/*
961 	 * Allocate memory for flow control updates from HW.
962 	 * Alloc one cache line, so that fits all FC_STYPE modes.
963 	 */
964 	sq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
965 	if (sq->fc == NULL) {
966 		rc = NIX_ERR_NO_MEM;
967 		goto fail;
968 	}
969 
970 	rc = sqb_pool_populate(roc_nix, sq);
971 	if (rc)
972 		goto nomem;
973 
974 	rc = nix_tm_leaf_data_get(nix, sq->qid, &rr_quantum, &smq);
975 	if (rc) {
976 		rc = NIX_ERR_TM_LEAF_NODE_GET;
977 		goto nomem;
978 	}
979 
980 	/* Init SQ context */
981 	if (roc_model_is_cn9k())
982 		rc = sq_cn9k_init(nix, sq, rr_quantum, smq);
983 	else
984 		rc = sq_init(nix, sq, rr_quantum, smq);
985 
986 	if (rc)
987 		goto nomem;
988 
989 	rc = mbox_process(mbox);
990 	if (rc)
991 		goto nomem;
992 
993 	nix->sqs[qid] = sq;
994 	sq->io_addr = nix->base + NIX_LF_OP_SENDX(0);
995 	/* Evenly distribute LMT slot for each sq */
996 	if (roc_model_is_cn9k()) {
997 		/* Multiple cores/SQ's can use same LMTLINE safely in CN9K */
998 		sq->lmt_addr = (void *)(nix->lmt_base +
999 					((qid & RVU_CN9K_LMT_SLOT_MASK) << 12));
1000 	}
1001 
1002 	rc = nix_tel_node_add_sq(sq);
1003 	return rc;
1004 nomem:
1005 	plt_free(sq->fc);
1006 fail:
1007 	return rc;
1008 }
1009 
1010 int
1011 roc_nix_sq_fini(struct roc_nix_sq *sq)
1012 {
1013 	struct nix *nix;
1014 	struct mbox *mbox;
1015 	struct ndc_sync_op *ndc_req;
1016 	uint16_t qid;
1017 	int rc = 0;
1018 
1019 	if (sq == NULL)
1020 		return NIX_ERR_PARAM;
1021 
1022 	nix = roc_nix_to_nix_priv(sq->roc_nix);
1023 	mbox = (&nix->dev)->mbox;
1024 
1025 	qid = sq->qid;
1026 
1027 	rc = nix_tm_sq_flush_pre(sq);
1028 
1029 	/* Release SQ context */
1030 	if (roc_model_is_cn9k())
1031 		rc |= sq_cn9k_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
1032 	else
1033 		rc |= sq_fini(roc_nix_to_nix_priv(sq->roc_nix), sq);
1034 
1035 	/* Sync NDC-NIX-TX for LF */
1036 	ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
1037 	if (ndc_req == NULL)
1038 		return -ENOSPC;
1039 	ndc_req->nix_lf_tx_sync = 1;
1040 	if (mbox_process(mbox))
1041 		rc |= NIX_ERR_NDC_SYNC;
1042 
1043 	rc |= nix_tm_sq_flush_post(sq);
1044 
1045 	/* Restore limit to max SQB count that the pool was created
1046 	 * for aura drain to succeed.
1047 	 */
1048 	roc_npa_aura_limit_modify(sq->aura_handle, NIX_MAX_SQB);
1049 	rc |= roc_npa_pool_destroy(sq->aura_handle);
1050 	plt_free(sq->fc);
1051 	plt_free(sq->sqe_mem);
1052 	nix->sqs[qid] = NULL;
1053 
1054 	return rc;
1055 }
1056 
1057 void
1058 roc_nix_cq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head,
1059 			 uint32_t *tail)
1060 {
1061 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1062 	uint64_t reg, val;
1063 	int64_t *addr;
1064 
1065 	if (head == NULL || tail == NULL)
1066 		return;
1067 
1068 	reg = (((uint64_t)qid) << 32);
1069 	addr = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);
1070 	val = roc_atomic64_add_nosync(reg, addr);
1071 	if (val &
1072 	    (BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) | BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR)))
1073 		val = 0;
1074 
1075 	*tail = (uint32_t)(val & 0xFFFFF);
1076 	*head = (uint32_t)((val >> 20) & 0xFFFFF);
1077 }
1078 
1079 void
1080 roc_nix_sq_head_tail_get(struct roc_nix *roc_nix, uint16_t qid, uint32_t *head,
1081 			 uint32_t *tail)
1082 {
1083 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1084 	struct roc_nix_sq *sq = nix->sqs[qid];
1085 	uint16_t sqes_per_sqb, sqb_cnt;
1086 	uint64_t reg, val;
1087 	int64_t *addr;
1088 
1089 	if (head == NULL || tail == NULL)
1090 		return;
1091 
1092 	reg = (((uint64_t)qid) << 32);
1093 	addr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
1094 	val = roc_atomic64_add_nosync(reg, addr);
1095 	if (val & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR)) {
1096 		val = 0;
1097 		return;
1098 	}
1099 
1100 	*tail = (uint32_t)((val >> 28) & 0x3F);
1101 	*head = (uint32_t)((val >> 20) & 0x3F);
1102 	sqb_cnt = (uint16_t)(val & 0xFFFF);
1103 
1104 	sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
1105 
1106 	/* Update tail index as per used sqb count */
1107 	*tail += (sqes_per_sqb * (sqb_cnt - 1));
1108 }
1109