xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c (revision 7d32c003ac175d7ac8669dc11684c75cc7eb56b8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10 #include <rte_alarm.h>
11 #include "bnxt.h"
12 #include "bnxt_ulp.h"
13 #include "bnxt_ulp_utils.h"
14 #include "bnxt_ulp_tf.h"
15 #include "bnxt_tf_common.h"
16 #include "ulp_fc_mgr.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_template_db_enum.h"
19 #include "ulp_template_struct.h"
20 
21 static const struct bnxt_ulp_fc_core_ops *
22 bnxt_ulp_fc_ops_get(struct bnxt_ulp_context *ctxt)
23 {
24 	int32_t rc;
25 	enum bnxt_ulp_device_id  dev_id;
26 	const struct bnxt_ulp_fc_core_ops *func_ops;
27 
28 	rc = bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id);
29 	if (rc)
30 		return NULL;
31 
32 	switch (dev_id) {
33 	case BNXT_ULP_DEVICE_ID_THOR2:
34 		func_ops = &ulp_fc_tfc_core_ops;
35 		break;
36 	case BNXT_ULP_DEVICE_ID_THOR:
37 	case BNXT_ULP_DEVICE_ID_STINGRAY:
38 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
39 		func_ops = &ulp_fc_tf_core_ops;
40 		break;
41 	default:
42 		func_ops = NULL;
43 		break;
44 	}
45 	return func_ops;
46 }
47 
48 static int
49 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
50 {
51 	/* Allocate memory*/
52 	if (!parms)
53 		return -EINVAL;
54 
55 	parms->mem_va = rte_zmalloc("ulp_fc_info",
56 				    RTE_CACHE_LINE_ROUNDUP(size),
57 				    4096);
58 	if (!parms->mem_va) {
59 		BNXT_DRV_DBG(ERR, "Allocate failed mem_va\n");
60 		return -ENOMEM;
61 	}
62 
63 	rte_mem_lock_page(parms->mem_va);
64 
65 	parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
66 	if (parms->mem_pa == (void *)RTE_BAD_IOVA) {
67 		BNXT_DRV_DBG(ERR, "Allocate failed mem_pa\n");
68 		return -ENOMEM;
69 	}
70 
71 	return 0;
72 }
73 
74 static void
75 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
76 {
77 	rte_free(parms->mem_va);
78 }
79 
80 /*
81  * Allocate and Initialize all Flow Counter Manager resources for this ulp
82  * context.
83  *
84  * ctxt [in] The ulp context for the Flow Counter manager.
85  *
86  */
87 int32_t
88 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
89 {
90 	struct bnxt_ulp_device_params *dparms;
91 	uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
92 	struct bnxt_ulp_fc_info *ulp_fc_info;
93 	const struct bnxt_ulp_fc_core_ops *fc_ops;
94 	uint32_t flags = 0;
95 	int i, rc;
96 
97 	if (!ctxt) {
98 		BNXT_DRV_DBG(DEBUG, "Invalid ULP CTXT\n");
99 		return -EINVAL;
100 	}
101 
102 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
103 		BNXT_DRV_DBG(DEBUG, "Failed to get device id\n");
104 		return -EINVAL;
105 	}
106 
107 	dparms = bnxt_ulp_device_params_get(dev_id);
108 	if (!dparms) {
109 		BNXT_DRV_DBG(DEBUG, "Failed to device parms\n");
110 		return -EINVAL;
111 	}
112 
113 	/* update the features list */
114 	if (dparms->dev_features & BNXT_ULP_DEV_FT_STAT_SW_AGG)
115 		flags = ULP_FLAG_FC_SW_AGG_EN;
116 	if (dparms->dev_features & BNXT_ULP_DEV_FT_STAT_PARENT_AGG)
117 		flags |= ULP_FLAG_FC_PARENT_AGG_EN;
118 
119 	fc_ops = bnxt_ulp_fc_ops_get(ctxt);
120 	if (fc_ops == NULL) {
121 		BNXT_DRV_DBG(DEBUG, "Failed to get the counter ops\n");
122 		return -EINVAL;
123 	}
124 
125 	ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
126 	if (!ulp_fc_info)
127 		goto error;
128 
129 	ulp_fc_info->fc_ops = fc_ops;
130 	ulp_fc_info->flags = flags;
131 
132 	pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
133 
134 	/* Add the FC info tbl to the ulp context. */
135 	bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
136 
137 	ulp_fc_info->num_counters = dparms->flow_count_db_entries;
138 	if (!ulp_fc_info->num_counters) {
139 		/* No need for software counters, call fw directly */
140 		BNXT_DRV_DBG(DEBUG, "Sw flow counter support not enabled\n");
141 		return 0;
142 	}
143 
144 	/* no need to allocate sw aggregation memory if agg is disabled */
145 	if (!(ulp_fc_info->flags & ULP_FLAG_FC_SW_AGG_EN))
146 		return 0;
147 
148 	sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
149 				dparms->flow_count_db_entries;
150 
151 	for (i = 0; i < TF_DIR_MAX; i++) {
152 		ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
153 							 sw_acc_cntr_tbl_sz, 0);
154 		if (!ulp_fc_info->sw_acc_tbl[i])
155 			goto error;
156 	}
157 
158 	hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
159 
160 	for (i = 0; i < TF_DIR_MAX; i++) {
161 		rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
162 						 hw_fc_mem_info_sz);
163 		if (rc)
164 			goto error;
165 	}
166 
167 	return 0;
168 
169 error:
170 	ulp_fc_mgr_deinit(ctxt);
171 	BNXT_DRV_DBG(DEBUG, "Failed to allocate memory for fc mgr\n");
172 
173 	return -ENOMEM;
174 }
175 
176 /*
177  * Release all resources in the Flow Counter Manager for this ulp context
178  *
179  * ctxt [in] The ulp context for the Flow Counter manager
180  *
181  */
182 int32_t
183 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
184 {
185 	struct bnxt_ulp_fc_info *ulp_fc_info;
186 	struct hw_fc_mem_info *shd_info;
187 	int i;
188 
189 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
190 
191 	if (!ulp_fc_info)
192 		return -EINVAL;
193 
194 	if (ulp_fc_info->flags & ULP_FLAG_FC_SW_AGG_EN)
195 		ulp_fc_mgr_thread_cancel(ctxt);
196 
197 	pthread_mutex_destroy(&ulp_fc_info->fc_lock);
198 
199 	if (ulp_fc_info->flags & ULP_FLAG_FC_SW_AGG_EN) {
200 		for (i = 0; i < TF_DIR_MAX; i++)
201 			rte_free(ulp_fc_info->sw_acc_tbl[i]);
202 
203 		for (i = 0; i < TF_DIR_MAX; i++) {
204 			shd_info = &ulp_fc_info->shadow_hw_tbl[i];
205 			ulp_fc_mgr_shadow_mem_free(shd_info);
206 		}
207 	}
208 
209 	rte_free(ulp_fc_info);
210 
211 	/* Safe to ignore on deinit */
212 	(void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
213 
214 	return 0;
215 }
216 
217 /*
218  * Check if the alarm thread that walks through the flows is started
219  *
220  * ctxt [in] The ulp context for the flow counter manager
221  *
222  */
223 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
224 {
225 	struct bnxt_ulp_fc_info *ulp_fc_info;
226 
227 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
228 
229 	if (ulp_fc_info)
230 		return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
231 
232 	return false;
233 }
234 
235 /*
236  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
237  * data from the chip's internal flow counters
238  *
239  * ctxt [in] The ulp context for the flow counter manager
240  *
241  */
242 int32_t
243 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
244 {
245 	struct bnxt_ulp_fc_info *ulp_fc_info;
246 
247 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
248 
249 	if (ulp_fc_info && !(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
250 		rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
251 				  ulp_fc_mgr_alarm_cb, (void *)ctxt->cfg_data);
252 		ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
253 	}
254 
255 	return 0;
256 }
257 
258 /*
259  * Cancel the alarm handler
260  *
261  * ctxt [in] The ulp context for the flow counter manager
262  *
263  */
264 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
265 {
266 	struct bnxt_ulp_fc_info *ulp_fc_info;
267 
268 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
269 	if (!ulp_fc_info)
270 		return;
271 
272 	ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
273 	rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, ctxt->cfg_data);
274 }
275 
276 /*
277  * Alarm handler that will issue the TF-Core API to fetch
278  * data from the chip's internal flow counters
279  *
280  * ctxt [in] The ulp context for the flow counter manager
281  *
282  */
283 
284 void
285 ulp_fc_mgr_alarm_cb(void *arg)
286 {
287 	const struct bnxt_ulp_fc_core_ops *fc_ops;
288 	struct bnxt_ulp_device_params *dparms;
289 	struct bnxt_ulp_fc_info *ulp_fc_info;
290 	struct bnxt_ulp_context *ctxt;
291 	uint32_t dev_id;
292 	int rc = 0;
293 
294 	ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
295 	if (ctxt == NULL) {
296 		BNXT_DRV_DBG(INFO, "could not get the ulp context lock\n");
297 		rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
298 				  ulp_fc_mgr_alarm_cb, arg);
299 		return;
300 	}
301 
302 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
303 	if (!ulp_fc_info) {
304 		bnxt_ulp_cntxt_entry_release();
305 		return;
306 	}
307 
308 	fc_ops = ulp_fc_info->fc_ops;
309 
310 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
311 		BNXT_DRV_DBG(DEBUG, "Failed to get device id\n");
312 		bnxt_ulp_cntxt_entry_release();
313 		return;
314 	}
315 
316 	dparms = bnxt_ulp_device_params_get(dev_id);
317 	if (!dparms) {
318 		BNXT_DRV_DBG(DEBUG, "Failed to device parms\n");
319 		bnxt_ulp_cntxt_entry_release();
320 		return;
321 	}
322 
323 	/*
324 	 * Take the fc_lock to ensure no flow is destroyed
325 	 * during the bulk get
326 	 */
327 	if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
328 		goto out;
329 
330 	if (!ulp_fc_info->num_entries) {
331 		pthread_mutex_unlock(&ulp_fc_info->fc_lock);
332 		ulp_fc_mgr_thread_cancel(ctxt);
333 		bnxt_ulp_cntxt_entry_release();
334 		return;
335 	}
336 	/*
337 	 * Commented for now till GET_BULK is resolved, just get the first flow
338 	 * stat for now
339 	 for (i = 0; i < TF_DIR_MAX; i++) {
340 		rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
341 					     dparms->flow_count_db_entries);
342 		if (rc)
343 			break;
344 	}
345 	*/
346 
347 	/* reset the parent accumulation counters before accumulation if any */
348 	ulp_flow_db_parent_flow_count_reset(ctxt);
349 
350 	rc = fc_ops->ulp_flow_stats_accum_update(ctxt, ulp_fc_info, dparms);
351 
352 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
353 
354 	/*
355 	 * If cmd fails once, no need of
356 	 * invoking again every second
357 	 */
358 
359 	if (rc) {
360 		ulp_fc_mgr_thread_cancel(ctxt);
361 		bnxt_ulp_cntxt_entry_release();
362 		return;
363 	}
364 out:
365 	bnxt_ulp_cntxt_entry_release();
366 	rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
367 			  ulp_fc_mgr_alarm_cb, arg);
368 }
369 
370 /*
371  * Set the starting index that indicates the first HW flow
372  * counter ID
373  *
374  * ctxt [in] The ulp context for the flow counter manager
375  *
376  * dir [in] The direction of the flow
377  *
378  * start_idx [in] The HW flow counter ID
379  *
380  */
381 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, uint8_t dir)
382 {
383 	struct bnxt_ulp_fc_info *ulp_fc_info;
384 
385 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
386 
387 	if (ulp_fc_info)
388 		return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
389 
390 	return false;
391 }
392 
393 /*
394  * Set the starting index that indicates the first HW flow
395  * counter ID
396  *
397  * ctxt [in] The ulp context for the flow counter manager
398  *
399  * dir [in] The direction of the flow
400  *
401  * start_idx [in] The HW flow counter ID
402  *
403  */
404 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, uint8_t dir,
405 				 uint32_t start_idx)
406 {
407 	struct bnxt_ulp_fc_info *ulp_fc_info;
408 
409 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
410 
411 	if (!ulp_fc_info)
412 		return -EIO;
413 
414 	if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) {
415 		ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
416 		ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true;
417 	}
418 
419 	return 0;
420 }
421 
422 /*
423  * Set the corresponding SW accumulator table entry based on
424  * the difference between this counter ID and the starting
425  * counter ID. Also, keep track of num of active counter enabled
426  * flows.
427  *
428  * ctxt [in] The ulp context for the flow counter manager
429  *
430  * dir [in] The direction of the flow
431  *
432  * hw_cntr_id [in] The HW flow counter ID
433  *
434  */
435 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
436 			    uint32_t hw_cntr_id,
437 			    enum bnxt_ulp_session_type session_type)
438 {
439 	struct bnxt_ulp_fc_info *ulp_fc_info;
440 	uint32_t sw_cntr_idx;
441 
442 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
443 	if (!ulp_fc_info)
444 		return -EIO;
445 
446 	if (!ulp_fc_info->num_counters)
447 		return 0;
448 
449 	pthread_mutex_lock(&ulp_fc_info->fc_lock);
450 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
451 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
452 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
453 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].session_type = session_type;
454 	ulp_fc_info->num_entries++;
455 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
456 
457 	return 0;
458 }
459 
460 /*
461  * Reset the corresponding SW accumulator table entry based on
462  * the difference between this counter ID and the starting
463  * counter ID.
464  *
465  * ctxt [in] The ulp context for the flow counter manager
466  *
467  * dir [in] The direction of the flow
468  *
469  * hw_cntr_id [in] The HW flow counter ID
470  *
471  */
472 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, uint8_t dir,
473 			      uint32_t hw_cntr_id)
474 {
475 	struct bnxt_ulp_fc_info *ulp_fc_info;
476 	uint32_t sw_cntr_idx;
477 
478 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
479 	if (!ulp_fc_info)
480 		return -EIO;
481 
482 	if (!ulp_fc_info->num_counters)
483 		return 0;
484 
485 	pthread_mutex_lock(&ulp_fc_info->fc_lock);
486 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
487 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
488 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
489 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].session_type = 0;
490 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
491 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
492 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = 0;
493 	ulp_fc_info->num_entries--;
494 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
495 
496 	return 0;
497 }
498 
499 /*
500  * Fill the rte_flow_query_count 'data' argument passed
501  * in the rte_flow_query() with the values obtained and
502  * accumulated locally.
503  *
504  * ctxt [in] The ulp context for the flow counter manager
505  *
506  * flow_id [in] The HW flow ID
507  *
508  * count [out] The rte_flow_query_count 'data' that is set
509  *
510  */
511 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
512 			       uint32_t flow_id,
513 			       struct rte_flow_query_count *count)
514 {
515 	int rc = 0;
516 	uint32_t nxt_resource_index = 0;
517 	struct bnxt_ulp_fc_info *ulp_fc_info;
518 	const struct bnxt_ulp_fc_core_ops *fc_ops;
519 	struct ulp_flow_db_res_params params;
520 	uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
521 	struct sw_acc_counter *sw_acc_tbl_entry;
522 	bool found_cntr_resource = false;
523 	bool found_parent_flow = false;
524 	uint32_t pc_idx = 0;
525 	uint32_t session_type = 0;
526 	uint8_t dir;
527 
528 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
529 	if (!ulp_fc_info)
530 		return -ENODEV;
531 
532 	fc_ops = ulp_fc_info->fc_ops;
533 
534 	if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
535 		return -EIO;
536 
537 	do {
538 		rc = ulp_flow_db_resource_get(ctxt,
539 					      BNXT_ULP_FDB_TYPE_REGULAR,
540 					      flow_id,
541 					      &nxt_resource_index,
542 					      &params);
543 		if (params.resource_func ==
544 		     BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
545 		     (params.resource_sub_type ==
546 		      BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT ||
547 		      params.resource_sub_type ==
548 		      BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT)) {
549 			found_cntr_resource = true;
550 			break;
551 		}
552 		if (params.resource_func == BNXT_ULP_RESOURCE_FUNC_CMM_STAT) {
553 			found_cntr_resource = true;
554 			break;
555 		}
556 		if (params.resource_func ==
557 		    BNXT_ULP_RESOURCE_FUNC_PARENT_FLOW) {
558 			found_parent_flow = true;
559 			pc_idx = params.resource_hndl;
560 		}
561 
562 	} while (!rc && nxt_resource_index);
563 
564 	if (rc || !found_cntr_resource) {
565 		bnxt_ulp_cntxt_release_fdb_lock(ctxt);
566 		return rc;
567 	}
568 
569 	dir = params.direction;
570 	session_type = ulp_flow_db_shared_session_get(&params);
571 	if (!(ulp_fc_info->flags & ULP_FLAG_FC_SW_AGG_EN)) {
572 		rc = fc_ops->ulp_flow_stat_get(ctxt, dir, session_type,
573 					       params.resource_hndl, count);
574 		bnxt_ulp_cntxt_release_fdb_lock(ctxt);
575 		return rc;
576 	}
577 
578 	if (!found_parent_flow &&
579 	    params.resource_sub_type ==
580 			BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
581 		hw_cntr_id = params.resource_hndl;
582 		if (!ulp_fc_info->num_counters) {
583 			rc = fc_ops->ulp_flow_stat_get(ctxt, dir, session_type,
584 						       hw_cntr_id, count);
585 			bnxt_ulp_cntxt_release_fdb_lock(ctxt);
586 			return rc;
587 		}
588 
589 		/* TODO:
590 		 * Think about optimizing with try_lock later
591 		 */
592 		pthread_mutex_lock(&ulp_fc_info->fc_lock);
593 		sw_cntr_idx = hw_cntr_id -
594 			ulp_fc_info->shadow_hw_tbl[dir].start_idx;
595 		sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
596 		if (sw_acc_tbl_entry->pkt_count) {
597 			count->hits_set = 1;
598 			count->bytes_set = 1;
599 			count->hits = sw_acc_tbl_entry->pkt_count;
600 			count->bytes = sw_acc_tbl_entry->byte_count;
601 		}
602 		if (count->reset) {
603 			sw_acc_tbl_entry->pkt_count = 0;
604 			sw_acc_tbl_entry->byte_count = 0;
605 		}
606 		pthread_mutex_unlock(&ulp_fc_info->fc_lock);
607 	} else if (found_parent_flow &&
608 		   params.resource_sub_type ==
609 			BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
610 		/* Get stats from the parent child table */
611 		if (ulp_flow_db_parent_flow_count_get(ctxt, flow_id,
612 						      pc_idx,
613 						      &count->hits,
614 						      &count->bytes,
615 						      count->reset)) {
616 			bnxt_ulp_cntxt_release_fdb_lock(ctxt);
617 			return -EIO;
618 		}
619 		if (count->hits)
620 			count->hits_set = 1;
621 		if (count->bytes)
622 			count->bytes_set = 1;
623 	} else {
624 		rc = -EINVAL;
625 	}
626 	bnxt_ulp_cntxt_release_fdb_lock(ctxt);
627 	return rc;
628 }
629 
630 /*
631  * Set the parent flow if it is SW accumulation counter entry.
632  *
633  * ctxt [in] The ulp context for the flow counter manager
634  *
635  * dir [in] The direction of the flow
636  *
637  * hw_cntr_id [in] The HW flow counter ID
638  *
639  * pc_idx [in] parent child db index
640  *
641  */
642 int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
643 					uint8_t dir,
644 					uint32_t hw_cntr_id,
645 					uint32_t pc_idx)
646 {
647 	struct bnxt_ulp_fc_info *ulp_fc_info;
648 	uint32_t sw_cntr_idx;
649 	int32_t rc = 0;
650 
651 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
652 	if (!ulp_fc_info)
653 		return -EIO;
654 
655 	pthread_mutex_lock(&ulp_fc_info->fc_lock);
656 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
657 	if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
658 		pc_idx |= FLOW_CNTR_PC_FLOW_VALID;
659 		ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = pc_idx;
660 	} else {
661 		BNXT_DRV_DBG(ERR, "Failed to set parent flow id %x:%x\n",
662 			     hw_cntr_id, pc_idx);
663 		rc = -ENOENT;
664 	}
665 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
666 
667 	return rc;
668 }
669