xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10 #include <rte_alarm.h>
11 #include "bnxt.h"
12 #include "bnxt_ulp.h"
13 #include "bnxt_tf_common.h"
14 #include "ulp_fc_mgr.h"
15 #include "ulp_flow_db.h"
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
18 #include "tf_tbl.h"
19 
20 static int
21 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
22 {
23 	/* Allocate memory*/
24 	if (!parms)
25 		return -EINVAL;
26 
27 	parms->mem_va = rte_zmalloc("ulp_fc_info",
28 				    RTE_CACHE_LINE_ROUNDUP(size),
29 				    4096);
30 	if (!parms->mem_va) {
31 		BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
32 		return -ENOMEM;
33 	}
34 
35 	rte_mem_lock_page(parms->mem_va);
36 
37 	parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
38 	if (parms->mem_pa == (void *)RTE_BAD_IOVA) {
39 		BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
40 		return -ENOMEM;
41 	}
42 
43 	return 0;
44 }
45 
46 static void
47 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
48 {
49 	rte_free(parms->mem_va);
50 }
51 
52 /*
53  * Allocate and Initialize all Flow Counter Manager resources for this ulp
54  * context.
55  *
56  * ctxt [in] The ulp context for the Flow Counter manager.
57  *
58  */
59 int32_t
60 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
61 {
62 	struct bnxt_ulp_device_params *dparms;
63 	uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
64 	struct bnxt_ulp_fc_info *ulp_fc_info;
65 	int i, rc;
66 
67 	if (!ctxt) {
68 		BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
69 		return -EINVAL;
70 	}
71 
72 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
73 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
74 		return -EINVAL;
75 	}
76 
77 	dparms = bnxt_ulp_device_params_get(dev_id);
78 	if (!dparms) {
79 		BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
80 		return -EINVAL;
81 	}
82 
83 	ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
84 	if (!ulp_fc_info)
85 		goto error;
86 
87 	rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
88 	if (rc) {
89 		PMD_DRV_LOG_LINE(ERR, "Failed to initialize fc mutex");
90 		goto error;
91 	}
92 
93 	/* Add the FC info tbl to the ulp context. */
94 	bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
95 
96 	ulp_fc_info->num_counters = dparms->flow_count_db_entries;
97 	if (!ulp_fc_info->num_counters) {
98 		/* No need for software counters, call fw directly */
99 		BNXT_TF_DBG(DEBUG, "Sw flow counter support not enabled\n");
100 		return 0;
101 	}
102 
103 	sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
104 				dparms->flow_count_db_entries;
105 
106 	for (i = 0; i < TF_DIR_MAX; i++) {
107 		ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
108 							 sw_acc_cntr_tbl_sz, 0);
109 		if (!ulp_fc_info->sw_acc_tbl[i])
110 			goto error;
111 	}
112 
113 	hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
114 
115 	for (i = 0; i < TF_DIR_MAX; i++) {
116 		rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
117 						 hw_fc_mem_info_sz);
118 		if (rc)
119 			goto error;
120 	}
121 
122 	return 0;
123 
124 error:
125 	ulp_fc_mgr_deinit(ctxt);
126 	BNXT_TF_DBG(DEBUG,
127 		    "Failed to allocate memory for fc mgr\n");
128 
129 	return -ENOMEM;
130 }
131 
132 /*
133  * Release all resources in the Flow Counter Manager for this ulp context
134  *
135  * ctxt [in] The ulp context for the Flow Counter manager
136  *
137  */
138 int32_t
139 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
140 {
141 	struct bnxt_ulp_fc_info *ulp_fc_info;
142 	struct hw_fc_mem_info *shd_info;
143 	int i;
144 
145 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
146 
147 	if (!ulp_fc_info)
148 		return -EINVAL;
149 
150 	ulp_fc_mgr_thread_cancel(ctxt);
151 
152 	pthread_mutex_destroy(&ulp_fc_info->fc_lock);
153 
154 	if (ulp_fc_info->num_counters) {
155 		for (i = 0; i < TF_DIR_MAX; i++)
156 			rte_free(ulp_fc_info->sw_acc_tbl[i]);
157 
158 		for (i = 0; i < TF_DIR_MAX; i++) {
159 			shd_info = &ulp_fc_info->shadow_hw_tbl[i];
160 			ulp_fc_mgr_shadow_mem_free(shd_info);
161 		}
162 	}
163 
164 	rte_free(ulp_fc_info);
165 
166 	/* Safe to ignore on deinit */
167 	(void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
168 
169 	return 0;
170 }
171 
172 /*
173  * Check if the alarm thread that walks through the flows is started
174  *
175  * ctxt [in] The ulp context for the flow counter manager
176  *
177  */
178 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
179 {
180 	struct bnxt_ulp_fc_info *ulp_fc_info;
181 
182 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
183 
184 	if (ulp_fc_info)
185 		return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
186 
187 	return false;
188 }
189 
190 /*
191  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
192  * data from the chip's internal flow counters
193  *
194  * ctxt [in] The ulp context for the flow counter manager
195  *
196  */
197 int32_t
198 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
199 {
200 	struct bnxt_ulp_fc_info *ulp_fc_info;
201 
202 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
203 
204 	if (ulp_fc_info && !(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
205 		rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
206 				  ulp_fc_mgr_alarm_cb, (void *)ctxt->cfg_data);
207 		ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
208 	}
209 
210 	return 0;
211 }
212 
213 /*
214  * Cancel the alarm handler
215  *
216  * ctxt [in] The ulp context for the flow counter manager
217  *
218  */
219 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
220 {
221 	struct bnxt_ulp_fc_info *ulp_fc_info;
222 
223 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
224 	if (!ulp_fc_info)
225 		return;
226 
227 	ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
228 	rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, ctxt->cfg_data);
229 }
230 
231 /*
232  * DMA-in the raw counter data from the HW and accumulate in the
233  * local accumulator table using the TF-Core API
234  *
235  * tfp [in] The TF-Core context
236  *
237  * fc_info [in] The ULP Flow counter info ptr
238  *
239  * dir [in] The direction of the flow
240  *
241  * num_counters [in] The number of counters
242  *
243  */
244 __rte_unused static int32_t
245 ulp_bulk_get_flow_stats(struct tf *tfp,
246 			struct bnxt_ulp_fc_info *fc_info,
247 			enum tf_dir dir,
248 			struct bnxt_ulp_device_params *dparms)
249 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
250 {
251 	int rc = 0;
252 	struct tf_tbl_get_bulk_parms parms = { 0 };
253 	enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD: Template? */
254 	struct sw_acc_counter *sw_acc_tbl_entry = NULL;
255 	uint64_t *stats = NULL;
256 	uint16_t i = 0;
257 
258 	parms.dir = dir;
259 	parms.type = stype;
260 	parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
261 	parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
262 	/*
263 	 * TODO:
264 	 * Size of an entry needs to obtained from template
265 	 */
266 	parms.entry_sz_in_bytes = sizeof(uint64_t);
267 	stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
268 	parms.physical_mem_addr = (uint64_t)
269 		((uintptr_t)(fc_info->shadow_hw_tbl[dir].mem_pa));
270 
271 	if (!stats) {
272 		PMD_DRV_LOG_LINE(ERR,
273 			    "BULK: Memory not initialized id:0x%x dir:%d",
274 			    parms.starting_idx, dir);
275 		return -EINVAL;
276 	}
277 
278 	rc = tf_tbl_bulk_get(tfp, &parms);
279 	if (rc) {
280 		PMD_DRV_LOG_LINE(ERR,
281 			    "BULK: Get failed for id:0x%x rc:%d",
282 			    parms.starting_idx, rc);
283 		return rc;
284 	}
285 
286 	for (i = 0; i < parms.num_entries; i++) {
287 		/* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
288 		sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
289 		if (!sw_acc_tbl_entry->valid)
290 			continue;
291 		sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i],
292 							      dparms);
293 		sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
294 								dparms);
295 	}
296 
297 	return rc;
298 }
299 
300 static int32_t
301 ulp_fc_tf_flow_stat_get(struct bnxt_ulp_context *ctxt,
302 			struct ulp_flow_db_res_params *res,
303 			struct rte_flow_query_count *qcount)
304 {
305 	struct tf *tfp;
306 	struct bnxt_ulp_device_params *dparms;
307 	struct tf_get_tbl_entry_parms parms = { 0 };
308 	struct tf_set_tbl_entry_parms	sparms = { 0 };
309 	enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;
310 	uint64_t stats = 0;
311 	uint32_t dev_id = 0;
312 	int32_t rc = 0;
313 
314 	tfp = bnxt_ulp_cntxt_tfp_get(ctxt,
315 				     ulp_flow_db_shared_session_get(res));
316 	if (!tfp) {
317 		BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
318 		return -EINVAL;
319 	}
320 
321 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
322 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
323 		bnxt_ulp_cntxt_entry_release();
324 		return -EINVAL;
325 	}
326 
327 	dparms = bnxt_ulp_device_params_get(dev_id);
328 	if (!dparms) {
329 		BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
330 		bnxt_ulp_cntxt_entry_release();
331 		return -EINVAL;
332 	}
333 	parms.dir = res->direction;
334 	parms.type = stype;
335 	parms.idx = res->resource_hndl;
336 	parms.data_sz_in_bytes = sizeof(uint64_t);
337 	parms.data = (uint8_t *)&stats;
338 	rc = tf_get_tbl_entry(tfp, &parms);
339 	if (rc) {
340 		PMD_DRV_LOG_LINE(ERR,
341 			    "Get failed for id:0x%x rc:%d",
342 			    parms.idx, rc);
343 		return rc;
344 	}
345 	qcount->hits = FLOW_CNTR_PKTS(stats, dparms);
346 	if (qcount->hits)
347 		qcount->hits_set = 1;
348 	qcount->bytes = FLOW_CNTR_BYTES(stats, dparms);
349 	if (qcount->bytes)
350 		qcount->bytes_set = 1;
351 
352 	if (qcount->reset) {
353 		stats = 0;
354 		sparms.dir = res->direction;
355 		sparms.type = stype;
356 		sparms.idx = res->resource_hndl;
357 		sparms.data = (uint8_t *)&stats;
358 		sparms.data_sz_in_bytes = sizeof(uint64_t);
359 		rc = tf_set_tbl_entry(tfp, &sparms);
360 		if (rc) {
361 			PMD_DRV_LOG_LINE(ERR, "Set failed for id:0x%x rc:%d",
362 				    sparms.idx, rc);
363 			return rc;
364 		}
365 	}
366 	return rc;
367 }
368 
369 static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
370 				    struct tf *tfp,
371 				    struct bnxt_ulp_fc_info *fc_info,
372 				    enum tf_dir dir,
373 				    uint32_t hw_cntr_id,
374 				    struct bnxt_ulp_device_params *dparms)
375 {
376 	int rc = 0;
377 	struct tf_get_tbl_entry_parms parms = { 0 };
378 	enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD:Template? */
379 	struct sw_acc_counter *sw_acc_tbl_entry = NULL, *t_sw;
380 	uint64_t stats = 0;
381 	uint32_t sw_cntr_indx = 0;
382 
383 	parms.dir = dir;
384 	parms.type = stype;
385 	parms.idx = hw_cntr_id;
386 	/*
387 	 * TODO:
388 	 * Size of an entry needs to obtained from template
389 	 */
390 	parms.data_sz_in_bytes = sizeof(uint64_t);
391 	parms.data = (uint8_t *)&stats;
392 	rc = tf_get_tbl_entry(tfp, &parms);
393 	if (rc) {
394 		PMD_DRV_LOG_LINE(ERR,
395 			    "Get failed for id:0x%x rc:%d",
396 			    parms.idx, rc);
397 		return rc;
398 	}
399 
400 	/* PKT/BYTE COUNT SHIFT/MASK are device specific */
401 	sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
402 	sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
403 
404 	/* Some dpdk applications may accumulate the flow counters while some
405 	 * may not. In cases where the application is accumulating the counters
406 	 * the PMD need not do the accumulation itself and viceversa to report
407 	 * the correct flow counters.
408 	 */
409 	sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats, dparms);
410 	sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats, dparms);
411 
412 	/* Update the parent counters if it is child flow */
413 	if (sw_acc_tbl_entry->pc_flow_idx & FLOW_CNTR_PC_FLOW_VALID) {
414 		uint32_t pc_idx;
415 
416 		/* Update the parent counters */
417 		t_sw = sw_acc_tbl_entry;
418 		pc_idx = t_sw->pc_flow_idx & ~FLOW_CNTR_PC_FLOW_VALID;
419 		if (ulp_flow_db_parent_flow_count_update(ctxt, pc_idx,
420 							 t_sw->pkt_count,
421 							 t_sw->byte_count)) {
422 			PMD_DRV_LOG_LINE(ERR, "Error updating parent counters");
423 		}
424 	}
425 
426 	return rc;
427 }
428 
429 /*
430  * Alarm handler that will issue the TF-Core API to fetch
431  * data from the chip's internal flow counters
432  *
433  * ctxt [in] The ulp context for the flow counter manager
434  *
435  */
436 
437 void
438 ulp_fc_mgr_alarm_cb(void *arg)
439 {
440 	int rc = 0;
441 	unsigned int j;
442 	enum tf_dir i;
443 	struct bnxt_ulp_context *ctxt;
444 	struct bnxt_ulp_fc_info *ulp_fc_info;
445 	struct bnxt_ulp_device_params *dparms;
446 	struct tf *tfp;
447 	uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
448 
449 	ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
450 	if (ctxt == NULL) {
451 		BNXT_TF_DBG(INFO, "could not get the ulp context lock\n");
452 		rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
453 				  ulp_fc_mgr_alarm_cb, arg);
454 		return;
455 	}
456 
457 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
458 	if (!ulp_fc_info) {
459 		bnxt_ulp_cntxt_entry_release();
460 		return;
461 	}
462 
463 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
464 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
465 		bnxt_ulp_cntxt_entry_release();
466 		return;
467 	}
468 
469 	dparms = bnxt_ulp_device_params_get(dev_id);
470 	if (!dparms) {
471 		BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
472 		bnxt_ulp_cntxt_entry_release();
473 		return;
474 	}
475 
476 	/*
477 	 * Take the fc_lock to ensure no flow is destroyed
478 	 * during the bulk get
479 	 */
480 	if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
481 		goto out;
482 
483 	if (!ulp_fc_info->num_entries) {
484 		pthread_mutex_unlock(&ulp_fc_info->fc_lock);
485 		ulp_fc_mgr_thread_cancel(ctxt);
486 		bnxt_ulp_cntxt_entry_release();
487 		return;
488 	}
489 	/*
490 	 * Commented for now till GET_BULK is resolved, just get the first flow
491 	 * stat for now
492 	 for (i = 0; i < TF_DIR_MAX; i++) {
493 		rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
494 					     dparms->flow_count_db_entries);
495 		if (rc)
496 			break;
497 	}
498 	*/
499 
500 	/* reset the parent accumulation counters before accumulation if any */
501 	ulp_flow_db_parent_flow_count_reset(ctxt);
502 
503 	num_entries = dparms->flow_count_db_entries / 2;
504 	for (i = 0; i < TF_DIR_MAX; i++) {
505 		for (j = 0; j < num_entries; j++) {
506 			if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
507 				continue;
508 			hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
509 			tfp = bnxt_ulp_cntxt_tfp_get(ctxt,
510 						     ulp_fc_info->sw_acc_tbl[i][j].session_type);
511 			if (!tfp) {
512 				BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
513 				pthread_mutex_unlock(&ulp_fc_info->fc_lock);
514 				bnxt_ulp_cntxt_entry_release();
515 				return;
516 			}
517 
518 			rc = ulp_get_single_flow_stat(ctxt, tfp, ulp_fc_info, i,
519 						      hw_cntr_id, dparms);
520 			if (rc)
521 				break;
522 		}
523 	}
524 
525 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
526 
527 	/*
528 	 * If cmd fails once, no need of
529 	 * invoking again every second
530 	 */
531 
532 	if (rc) {
533 		ulp_fc_mgr_thread_cancel(ctxt);
534 		bnxt_ulp_cntxt_entry_release();
535 		return;
536 	}
537 out:
538 	bnxt_ulp_cntxt_entry_release();
539 	rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
540 			  ulp_fc_mgr_alarm_cb, arg);
541 }
542 
543 /*
544  * Set the starting index that indicates the first HW flow
545  * counter ID
546  *
547  * ctxt [in] The ulp context for the flow counter manager
548  *
549  * dir [in] The direction of the flow
550  *
551  * start_idx [in] The HW flow counter ID
552  *
553  */
554 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
555 {
556 	struct bnxt_ulp_fc_info *ulp_fc_info;
557 
558 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
559 
560 	if (ulp_fc_info)
561 		return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
562 
563 	return false;
564 }
565 
566 /*
567  * Set the starting index that indicates the first HW flow
568  * counter ID
569  *
570  * ctxt [in] The ulp context for the flow counter manager
571  *
572  * dir [in] The direction of the flow
573  *
574  * start_idx [in] The HW flow counter ID
575  *
576  */
577 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
578 				 uint32_t start_idx)
579 {
580 	struct bnxt_ulp_fc_info *ulp_fc_info;
581 
582 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
583 
584 	if (!ulp_fc_info)
585 		return -EIO;
586 
587 	if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) {
588 		ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
589 		ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true;
590 	}
591 
592 	return 0;
593 }
594 
595 /*
596  * Set the corresponding SW accumulator table entry based on
597  * the difference between this counter ID and the starting
598  * counter ID. Also, keep track of num of active counter enabled
599  * flows.
600  *
601  * ctxt [in] The ulp context for the flow counter manager
602  *
603  * dir [in] The direction of the flow
604  *
605  * hw_cntr_id [in] The HW flow counter ID
606  *
607  */
608 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
609 			    uint32_t hw_cntr_id,
610 			    enum bnxt_ulp_session_type session_type)
611 {
612 	struct bnxt_ulp_fc_info *ulp_fc_info;
613 	uint32_t sw_cntr_idx;
614 
615 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
616 	if (!ulp_fc_info)
617 		return -EIO;
618 
619 	if (!ulp_fc_info->num_counters)
620 		return 0;
621 
622 	pthread_mutex_lock(&ulp_fc_info->fc_lock);
623 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
624 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
625 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
626 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].session_type = session_type;
627 	ulp_fc_info->num_entries++;
628 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
629 
630 	return 0;
631 }
632 
633 /*
634  * Reset the corresponding SW accumulator table entry based on
635  * the difference between this counter ID and the starting
636  * counter ID.
637  *
638  * ctxt [in] The ulp context for the flow counter manager
639  *
640  * dir [in] The direction of the flow
641  *
642  * hw_cntr_id [in] The HW flow counter ID
643  *
644  */
645 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
646 			      uint32_t hw_cntr_id)
647 {
648 	struct bnxt_ulp_fc_info *ulp_fc_info;
649 	uint32_t sw_cntr_idx;
650 
651 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
652 	if (!ulp_fc_info)
653 		return -EIO;
654 
655 	if (!ulp_fc_info->num_counters)
656 		return 0;
657 
658 	pthread_mutex_lock(&ulp_fc_info->fc_lock);
659 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
660 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
661 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
662 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].session_type = 0;
663 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
664 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
665 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = 0;
666 	ulp_fc_info->num_entries--;
667 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
668 
669 	return 0;
670 }
671 
672 /*
673  * Fill the rte_flow_query_count 'data' argument passed
674  * in the rte_flow_query() with the values obtained and
675  * accumulated locally.
676  *
677  * ctxt [in] The ulp context for the flow counter manager
678  *
679  * flow_id [in] The HW flow ID
680  *
681  * count [out] The rte_flow_query_count 'data' that is set
682  *
683  */
684 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
685 			       uint32_t flow_id,
686 			       struct rte_flow_query_count *count)
687 {
688 	int rc = 0;
689 	uint32_t nxt_resource_index = 0;
690 	struct bnxt_ulp_fc_info *ulp_fc_info;
691 	struct ulp_flow_db_res_params params;
692 	enum tf_dir dir;
693 	uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
694 	struct sw_acc_counter *sw_acc_tbl_entry;
695 	bool found_cntr_resource = false;
696 	bool found_parent_flow = false;
697 	uint32_t pc_idx = 0;
698 
699 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
700 	if (!ulp_fc_info)
701 		return -ENODEV;
702 
703 	if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
704 		return -EIO;
705 
706 	do {
707 		rc = ulp_flow_db_resource_get(ctxt,
708 					      BNXT_ULP_FDB_TYPE_REGULAR,
709 					      flow_id,
710 					      &nxt_resource_index,
711 					      &params);
712 		if (params.resource_func ==
713 		     BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
714 		     (params.resource_sub_type ==
715 		      BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT ||
716 		      params.resource_sub_type ==
717 		      BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT)) {
718 			found_cntr_resource = true;
719 			break;
720 		}
721 		if (params.resource_func ==
722 		    BNXT_ULP_RESOURCE_FUNC_PARENT_FLOW) {
723 			found_parent_flow = true;
724 			pc_idx = params.resource_hndl;
725 		}
726 
727 	} while (!rc && nxt_resource_index);
728 
729 	bnxt_ulp_cntxt_release_fdb_lock(ctxt);
730 
731 	if (rc || !found_cntr_resource)
732 		return rc;
733 
734 	dir = params.direction;
735 	hw_cntr_id = params.resource_hndl;
736 	if (!found_parent_flow &&
737 	    params.resource_sub_type ==
738 			BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
739 		if (!ulp_fc_info->num_counters)
740 			return ulp_fc_tf_flow_stat_get(ctxt, &params, count);
741 
742 		/* TODO:
743 		 * Think about optimizing with try_lock later
744 		 */
745 		pthread_mutex_lock(&ulp_fc_info->fc_lock);
746 		sw_cntr_idx = hw_cntr_id -
747 			ulp_fc_info->shadow_hw_tbl[dir].start_idx;
748 		sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
749 		if (sw_acc_tbl_entry->pkt_count) {
750 			count->hits_set = 1;
751 			count->bytes_set = 1;
752 			count->hits = sw_acc_tbl_entry->pkt_count;
753 			count->bytes = sw_acc_tbl_entry->byte_count;
754 		}
755 		if (count->reset) {
756 			sw_acc_tbl_entry->pkt_count = 0;
757 			sw_acc_tbl_entry->byte_count = 0;
758 		}
759 		pthread_mutex_unlock(&ulp_fc_info->fc_lock);
760 	} else if (found_parent_flow &&
761 		   params.resource_sub_type ==
762 			BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
763 		/* Get stats from the parent child table */
764 		ulp_flow_db_parent_flow_count_get(ctxt, pc_idx,
765 						  &count->hits, &count->bytes,
766 						  count->reset);
767 		if (count->hits)
768 			count->hits_set = 1;
769 		if (count->bytes)
770 			count->bytes_set = 1;
771 	} else {
772 		/* TBD: Handle External counters */
773 		rc = -EINVAL;
774 	}
775 
776 	return rc;
777 }
778 
779 /*
780  * Set the parent flow if it is SW accumulation counter entry.
781  *
782  * ctxt [in] The ulp context for the flow counter manager
783  *
784  * dir [in] The direction of the flow
785  *
786  * hw_cntr_id [in] The HW flow counter ID
787  *
788  * pc_idx [in] parent child db index
789  *
790  */
791 int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
792 					enum tf_dir dir,
793 					uint32_t hw_cntr_id,
794 					uint32_t pc_idx)
795 {
796 	struct bnxt_ulp_fc_info *ulp_fc_info;
797 	uint32_t sw_cntr_idx;
798 	int32_t rc = 0;
799 
800 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
801 	if (!ulp_fc_info)
802 		return -EIO;
803 
804 	pthread_mutex_lock(&ulp_fc_info->fc_lock);
805 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
806 	if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
807 		pc_idx |= FLOW_CNTR_PC_FLOW_VALID;
808 		ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = pc_idx;
809 	} else {
810 		BNXT_TF_DBG(ERR, "Failed to set parent flow id %x:%x\n",
811 			    hw_cntr_id, pc_idx);
812 		rc = -ENOENT;
813 	}
814 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
815 
816 	return rc;
817 }
818