xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10 #include <rte_alarm.h>
11 #include "bnxt.h"
12 #include "bnxt_ulp.h"
13 #include "bnxt_tf_common.h"
14 #include "ulp_fc_mgr.h"
15 #include "ulp_flow_db.h"
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
18 #include "tf_tbl.h"
19 
20 static int
21 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
22 {
23 	/* Allocate memory*/
24 	if (!parms)
25 		return -EINVAL;
26 
27 	parms->mem_va = rte_zmalloc("ulp_fc_info",
28 				    RTE_CACHE_LINE_ROUNDUP(size),
29 				    4096);
30 	if (!parms->mem_va) {
31 		BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
32 		return -ENOMEM;
33 	}
34 
35 	rte_mem_lock_page(parms->mem_va);
36 
37 	parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
38 	if (parms->mem_pa == (void *)RTE_BAD_IOVA) {
39 		BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
40 		return -ENOMEM;
41 	}
42 
43 	return 0;
44 }
45 
46 static void
47 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
48 {
49 	rte_free(parms->mem_va);
50 }
51 
52 /*
53  * Allocate and Initialize all Flow Counter Manager resources for this ulp
54  * context.
55  *
56  * ctxt [in] The ulp context for the Flow Counter manager.
57  *
58  */
59 int32_t
60 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
61 {
62 	struct bnxt_ulp_device_params *dparms;
63 	uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
64 	struct bnxt_ulp_fc_info *ulp_fc_info;
65 	int i, rc;
66 
67 	if (!ctxt) {
68 		BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
69 		return -EINVAL;
70 	}
71 
72 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
73 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
74 		return -EINVAL;
75 	}
76 
77 	dparms = bnxt_ulp_device_params_get(dev_id);
78 	if (!dparms) {
79 		BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
80 		return -EINVAL;
81 	}
82 
83 	ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
84 	if (!ulp_fc_info)
85 		goto error;
86 
87 	rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
88 	if (rc) {
89 		PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
90 		goto error;
91 	}
92 
93 	/* Add the FC info tbl to the ulp context. */
94 	bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
95 
96 	ulp_fc_info->num_counters = dparms->flow_count_db_entries;
97 	if (!ulp_fc_info->num_counters) {
98 		/* No need for software counters, call fw directly */
99 		BNXT_TF_DBG(DEBUG, "Sw flow counter support not enabled\n");
100 		return 0;
101 	}
102 
103 	sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
104 				dparms->flow_count_db_entries;
105 
106 	for (i = 0; i < TF_DIR_MAX; i++) {
107 		ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
108 							 sw_acc_cntr_tbl_sz, 0);
109 		if (!ulp_fc_info->sw_acc_tbl[i])
110 			goto error;
111 	}
112 
113 	hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
114 
115 	for (i = 0; i < TF_DIR_MAX; i++) {
116 		rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
117 						 hw_fc_mem_info_sz);
118 		if (rc)
119 			goto error;
120 	}
121 
122 	return 0;
123 
124 error:
125 	ulp_fc_mgr_deinit(ctxt);
126 	BNXT_TF_DBG(DEBUG,
127 		    "Failed to allocate memory for fc mgr\n");
128 
129 	return -ENOMEM;
130 }
131 
132 /*
133  * Release all resources in the Flow Counter Manager for this ulp context
134  *
135  * ctxt [in] The ulp context for the Flow Counter manager
136  *
137  */
138 int32_t
139 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
140 {
141 	struct bnxt_ulp_fc_info *ulp_fc_info;
142 	struct hw_fc_mem_info *shd_info;
143 	int i;
144 
145 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
146 
147 	if (!ulp_fc_info)
148 		return -EINVAL;
149 
150 	ulp_fc_mgr_thread_cancel(ctxt);
151 
152 	pthread_mutex_destroy(&ulp_fc_info->fc_lock);
153 
154 	if (ulp_fc_info->num_counters) {
155 		for (i = 0; i < TF_DIR_MAX; i++)
156 			rte_free(ulp_fc_info->sw_acc_tbl[i]);
157 
158 		for (i = 0; i < TF_DIR_MAX; i++) {
159 			shd_info = &ulp_fc_info->shadow_hw_tbl[i];
160 			ulp_fc_mgr_shadow_mem_free(shd_info);
161 		}
162 	}
163 
164 	rte_free(ulp_fc_info);
165 
166 	/* Safe to ignore on deinit */
167 	(void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
168 
169 	return 0;
170 }
171 
172 /*
173  * Check if the alarm thread that walks through the flows is started
174  *
175  * ctxt [in] The ulp context for the flow counter manager
176  *
177  */
178 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
179 {
180 	struct bnxt_ulp_fc_info *ulp_fc_info;
181 
182 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
183 
184 	if (ulp_fc_info)
185 		return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
186 
187 	return false;
188 }
189 
190 /*
191  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
192  * data from the chip's internal flow counters
193  *
194  * ctxt [in] The ulp context for the flow counter manager
195  *
196  */
197 int32_t
198 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
199 {
200 	struct bnxt_ulp_fc_info *ulp_fc_info;
201 
202 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
203 
204 	if (ulp_fc_info && !(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
205 		rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
206 				  ulp_fc_mgr_alarm_cb, (void *)ctxt->cfg_data);
207 		ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
208 	}
209 
210 	return 0;
211 }
212 
213 /*
214  * Cancel the alarm handler
215  *
216  * ctxt [in] The ulp context for the flow counter manager
217  *
218  */
219 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
220 {
221 	struct bnxt_ulp_fc_info *ulp_fc_info;
222 
223 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
224 	if (!ulp_fc_info)
225 		return;
226 
227 	ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
228 	rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, ctxt->cfg_data);
229 }
230 
231 /*
232  * DMA-in the raw counter data from the HW and accumulate in the
233  * local accumulator table using the TF-Core API
234  *
235  * tfp [in] The TF-Core context
236  *
237  * fc_info [in] The ULP Flow counter info ptr
238  *
239  * dir [in] The direction of the flow
240  *
241  * num_counters [in] The number of counters
242  *
243  */
244 __rte_unused static int32_t
245 ulp_bulk_get_flow_stats(struct tf *tfp,
246 			struct bnxt_ulp_fc_info *fc_info,
247 			enum tf_dir dir,
248 			struct bnxt_ulp_device_params *dparms)
249 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
250 {
251 	int rc = 0;
252 	struct tf_tbl_get_bulk_parms parms = { 0 };
253 	enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD: Template? */
254 	struct sw_acc_counter *sw_acc_tbl_entry = NULL;
255 	uint64_t *stats = NULL;
256 	uint16_t i = 0;
257 
258 	parms.dir = dir;
259 	parms.type = stype;
260 	parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
261 	parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
262 	/*
263 	 * TODO:
264 	 * Size of an entry needs to obtained from template
265 	 */
266 	parms.entry_sz_in_bytes = sizeof(uint64_t);
267 	stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
268 	parms.physical_mem_addr = (uint64_t)
269 		((uintptr_t)(fc_info->shadow_hw_tbl[dir].mem_pa));
270 
271 	if (!stats) {
272 		PMD_DRV_LOG(ERR,
273 			    "BULK: Memory not initialized id:0x%x dir:%d\n",
274 			    parms.starting_idx, dir);
275 		return -EINVAL;
276 	}
277 
278 	rc = tf_tbl_bulk_get(tfp, &parms);
279 	if (rc) {
280 		PMD_DRV_LOG(ERR,
281 			    "BULK: Get failed for id:0x%x rc:%d\n",
282 			    parms.starting_idx, rc);
283 		return rc;
284 	}
285 
286 	for (i = 0; i < parms.num_entries; i++) {
287 		/* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
288 		sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
289 		if (!sw_acc_tbl_entry->valid)
290 			continue;
291 		sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i],
292 							      dparms);
293 		sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
294 								dparms);
295 	}
296 
297 	return rc;
298 }
299 
300 static int32_t
301 ulp_fc_tf_flow_stat_get(struct bnxt_ulp_context *ctxt,
302 			struct ulp_flow_db_res_params *res,
303 			struct rte_flow_query_count *qcount)
304 {
305 	struct tf *tfp;
306 	struct bnxt_ulp_device_params *dparms;
307 	struct tf_get_tbl_entry_parms parms = { 0 };
308 	struct tf_set_tbl_entry_parms	sparms = { 0 };
309 	enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;
310 	uint64_t stats = 0;
311 	uint32_t dev_id = 0;
312 	int32_t rc = 0;
313 
314 	tfp = bnxt_ulp_cntxt_tfp_get(ctxt, BNXT_ULP_SHARED_SESSION_NO);
315 	if (!tfp) {
316 		BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
317 		return -EINVAL;
318 	}
319 
320 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
321 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
322 		bnxt_ulp_cntxt_entry_release();
323 		return -EINVAL;
324 	}
325 
326 	dparms = bnxt_ulp_device_params_get(dev_id);
327 	if (!dparms) {
328 		BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
329 		bnxt_ulp_cntxt_entry_release();
330 		return -EINVAL;
331 	}
332 	parms.dir = res->direction;
333 	parms.type = stype;
334 	parms.idx = res->resource_hndl;
335 	parms.data_sz_in_bytes = sizeof(uint64_t);
336 	parms.data = (uint8_t *)&stats;
337 	rc = tf_get_tbl_entry(tfp, &parms);
338 	if (rc) {
339 		PMD_DRV_LOG(ERR,
340 			    "Get failed for id:0x%x rc:%d\n",
341 			    parms.idx, rc);
342 		return rc;
343 	}
344 	qcount->hits = FLOW_CNTR_PKTS(stats, dparms);
345 	if (qcount->hits)
346 		qcount->hits_set = 1;
347 	qcount->bytes = FLOW_CNTR_BYTES(stats, dparms);
348 	if (qcount->bytes)
349 		qcount->bytes_set = 1;
350 
351 	if (qcount->reset) {
352 		stats = 0;
353 		sparms.dir = res->direction;
354 		sparms.type = stype;
355 		sparms.idx = res->resource_hndl;
356 		sparms.data = (uint8_t *)&stats;
357 		sparms.data_sz_in_bytes = sizeof(uint64_t);
358 		rc = tf_set_tbl_entry(tfp, &sparms);
359 		if (rc) {
360 			PMD_DRV_LOG(ERR, "Set failed for id:0x%x rc:%d\n",
361 				    sparms.idx, rc);
362 			return rc;
363 		}
364 	}
365 	return rc;
366 }
367 
368 static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
369 				    struct tf *tfp,
370 				    struct bnxt_ulp_fc_info *fc_info,
371 				    enum tf_dir dir,
372 				    uint32_t hw_cntr_id,
373 				    struct bnxt_ulp_device_params *dparms)
374 {
375 	int rc = 0;
376 	struct tf_get_tbl_entry_parms parms = { 0 };
377 	enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD:Template? */
378 	struct sw_acc_counter *sw_acc_tbl_entry = NULL, *t_sw;
379 	uint64_t stats = 0;
380 	uint32_t sw_cntr_indx = 0;
381 
382 	parms.dir = dir;
383 	parms.type = stype;
384 	parms.idx = hw_cntr_id;
385 	/*
386 	 * TODO:
387 	 * Size of an entry needs to obtained from template
388 	 */
389 	parms.data_sz_in_bytes = sizeof(uint64_t);
390 	parms.data = (uint8_t *)&stats;
391 	rc = tf_get_tbl_entry(tfp, &parms);
392 	if (rc) {
393 		PMD_DRV_LOG(ERR,
394 			    "Get failed for id:0x%x rc:%d\n",
395 			    parms.idx, rc);
396 		return rc;
397 	}
398 
399 	/* PKT/BYTE COUNT SHIFT/MASK are device specific */
400 	sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
401 	sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
402 
403 	/* Some dpdk applications may accumulate the flow counters while some
404 	 * may not. In cases where the application is accumulating the counters
405 	 * the PMD need not do the accumulation itself and viceversa to report
406 	 * the correct flow counters.
407 	 */
408 	sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats, dparms);
409 	sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats, dparms);
410 
411 	/* Update the parent counters if it is child flow */
412 	if (sw_acc_tbl_entry->pc_flow_idx & FLOW_CNTR_PC_FLOW_VALID) {
413 		uint32_t pc_idx;
414 
415 		/* Update the parent counters */
416 		t_sw = sw_acc_tbl_entry;
417 		pc_idx = t_sw->pc_flow_idx & ~FLOW_CNTR_PC_FLOW_VALID;
418 		if (ulp_flow_db_parent_flow_count_update(ctxt, pc_idx,
419 							 t_sw->pkt_count,
420 							 t_sw->byte_count)) {
421 			PMD_DRV_LOG(ERR, "Error updating parent counters\n");
422 		}
423 	}
424 
425 	return rc;
426 }
427 
428 /*
429  * Alarm handler that will issue the TF-Core API to fetch
430  * data from the chip's internal flow counters
431  *
432  * ctxt [in] The ulp context for the flow counter manager
433  *
434  */
435 
436 void
437 ulp_fc_mgr_alarm_cb(void *arg)
438 {
439 	int rc = 0;
440 	unsigned int j;
441 	enum tf_dir i;
442 	struct bnxt_ulp_context *ctxt;
443 	struct bnxt_ulp_fc_info *ulp_fc_info;
444 	struct bnxt_ulp_device_params *dparms;
445 	struct tf *tfp;
446 	uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
447 
448 	ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
449 	if (ctxt == NULL) {
450 		BNXT_TF_DBG(INFO, "could not get the ulp context lock\n");
451 		rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
452 				  ulp_fc_mgr_alarm_cb, arg);
453 		return;
454 	}
455 
456 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
457 	if (!ulp_fc_info) {
458 		bnxt_ulp_cntxt_entry_release();
459 		return;
460 	}
461 
462 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
463 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
464 		bnxt_ulp_cntxt_entry_release();
465 		return;
466 	}
467 
468 	dparms = bnxt_ulp_device_params_get(dev_id);
469 	if (!dparms) {
470 		BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
471 		bnxt_ulp_cntxt_entry_release();
472 		return;
473 	}
474 
475 	tfp = bnxt_ulp_cntxt_tfp_get(ctxt, BNXT_ULP_SHARED_SESSION_NO);
476 	if (!tfp) {
477 		BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
478 		bnxt_ulp_cntxt_entry_release();
479 		return;
480 	}
481 
482 	/*
483 	 * Take the fc_lock to ensure no flow is destroyed
484 	 * during the bulk get
485 	 */
486 	if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
487 		goto out;
488 
489 	if (!ulp_fc_info->num_entries) {
490 		pthread_mutex_unlock(&ulp_fc_info->fc_lock);
491 		ulp_fc_mgr_thread_cancel(ctxt);
492 		bnxt_ulp_cntxt_entry_release();
493 		return;
494 	}
495 	/*
496 	 * Commented for now till GET_BULK is resolved, just get the first flow
497 	 * stat for now
498 	 for (i = 0; i < TF_DIR_MAX; i++) {
499 		rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
500 					     dparms->flow_count_db_entries);
501 		if (rc)
502 			break;
503 	}
504 	*/
505 
506 	/* reset the parent accumulation counters before accumulation if any */
507 	ulp_flow_db_parent_flow_count_reset(ctxt);
508 
509 	num_entries = dparms->flow_count_db_entries / 2;
510 	for (i = 0; i < TF_DIR_MAX; i++) {
511 		for (j = 0; j < num_entries; j++) {
512 			if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
513 				continue;
514 			hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
515 			rc = ulp_get_single_flow_stat(ctxt, tfp, ulp_fc_info, i,
516 						      hw_cntr_id, dparms);
517 			if (rc)
518 				break;
519 		}
520 	}
521 
522 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
523 
524 	/*
525 	 * If cmd fails once, no need of
526 	 * invoking again every second
527 	 */
528 
529 	if (rc) {
530 		ulp_fc_mgr_thread_cancel(ctxt);
531 		bnxt_ulp_cntxt_entry_release();
532 		return;
533 	}
534 out:
535 	bnxt_ulp_cntxt_entry_release();
536 	rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
537 			  ulp_fc_mgr_alarm_cb, arg);
538 }
539 
540 /*
541  * Set the starting index that indicates the first HW flow
542  * counter ID
543  *
544  * ctxt [in] The ulp context for the flow counter manager
545  *
546  * dir [in] The direction of the flow
547  *
548  * start_idx [in] The HW flow counter ID
549  *
550  */
551 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
552 {
553 	struct bnxt_ulp_fc_info *ulp_fc_info;
554 
555 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
556 
557 	if (ulp_fc_info)
558 		return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
559 
560 	return false;
561 }
562 
563 /*
564  * Set the starting index that indicates the first HW flow
565  * counter ID
566  *
567  * ctxt [in] The ulp context for the flow counter manager
568  *
569  * dir [in] The direction of the flow
570  *
571  * start_idx [in] The HW flow counter ID
572  *
573  */
574 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
575 				 uint32_t start_idx)
576 {
577 	struct bnxt_ulp_fc_info *ulp_fc_info;
578 
579 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
580 
581 	if (!ulp_fc_info)
582 		return -EIO;
583 
584 	if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) {
585 		ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
586 		ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true;
587 	}
588 
589 	return 0;
590 }
591 
592 /*
593  * Set the corresponding SW accumulator table entry based on
594  * the difference between this counter ID and the starting
595  * counter ID. Also, keep track of num of active counter enabled
596  * flows.
597  *
598  * ctxt [in] The ulp context for the flow counter manager
599  *
600  * dir [in] The direction of the flow
601  *
602  * hw_cntr_id [in] The HW flow counter ID
603  *
604  */
605 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
606 			    uint32_t hw_cntr_id)
607 {
608 	struct bnxt_ulp_fc_info *ulp_fc_info;
609 	uint32_t sw_cntr_idx;
610 
611 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
612 	if (!ulp_fc_info)
613 		return -EIO;
614 
615 	if (!ulp_fc_info->num_counters)
616 		return 0;
617 
618 	pthread_mutex_lock(&ulp_fc_info->fc_lock);
619 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
620 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
621 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
622 	ulp_fc_info->num_entries++;
623 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
624 
625 	return 0;
626 }
627 
628 /*
629  * Reset the corresponding SW accumulator table entry based on
630  * the difference between this counter ID and the starting
631  * counter ID.
632  *
633  * ctxt [in] The ulp context for the flow counter manager
634  *
635  * dir [in] The direction of the flow
636  *
637  * hw_cntr_id [in] The HW flow counter ID
638  *
639  */
640 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
641 			      uint32_t hw_cntr_id)
642 {
643 	struct bnxt_ulp_fc_info *ulp_fc_info;
644 	uint32_t sw_cntr_idx;
645 
646 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
647 	if (!ulp_fc_info)
648 		return -EIO;
649 
650 	if (!ulp_fc_info->num_counters)
651 		return 0;
652 
653 	pthread_mutex_lock(&ulp_fc_info->fc_lock);
654 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
655 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
656 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
657 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
658 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
659 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = 0;
660 	ulp_fc_info->num_entries--;
661 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
662 
663 	return 0;
664 }
665 
666 /*
667  * Fill the rte_flow_query_count 'data' argument passed
668  * in the rte_flow_query() with the values obtained and
669  * accumulated locally.
670  *
671  * ctxt [in] The ulp context for the flow counter manager
672  *
673  * flow_id [in] The HW flow ID
674  *
675  * count [out] The rte_flow_query_count 'data' that is set
676  *
677  */
678 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
679 			       uint32_t flow_id,
680 			       struct rte_flow_query_count *count)
681 {
682 	int rc = 0;
683 	uint32_t nxt_resource_index = 0;
684 	struct bnxt_ulp_fc_info *ulp_fc_info;
685 	struct ulp_flow_db_res_params params;
686 	enum tf_dir dir;
687 	uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
688 	struct sw_acc_counter *sw_acc_tbl_entry;
689 	bool found_cntr_resource = false;
690 	bool found_parent_flow = false;
691 	uint32_t pc_idx = 0;
692 
693 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
694 	if (!ulp_fc_info)
695 		return -ENODEV;
696 
697 	if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
698 		return -EIO;
699 
700 	do {
701 		rc = ulp_flow_db_resource_get(ctxt,
702 					      BNXT_ULP_FDB_TYPE_REGULAR,
703 					      flow_id,
704 					      &nxt_resource_index,
705 					      &params);
706 		if (params.resource_func ==
707 		     BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
708 		     (params.resource_sub_type ==
709 		      BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT ||
710 		      params.resource_sub_type ==
711 		      BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT)) {
712 			found_cntr_resource = true;
713 			break;
714 		}
715 		if (params.resource_func ==
716 		    BNXT_ULP_RESOURCE_FUNC_PARENT_FLOW) {
717 			found_parent_flow = true;
718 			pc_idx = params.resource_hndl;
719 		}
720 
721 	} while (!rc && nxt_resource_index);
722 
723 	bnxt_ulp_cntxt_release_fdb_lock(ctxt);
724 
725 	if (rc || !found_cntr_resource)
726 		return rc;
727 
728 	dir = params.direction;
729 	hw_cntr_id = params.resource_hndl;
730 	if (!found_parent_flow &&
731 	    params.resource_sub_type ==
732 			BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
733 		if (!ulp_fc_info->num_counters)
734 			return ulp_fc_tf_flow_stat_get(ctxt, &params, count);
735 
736 		/* TODO:
737 		 * Think about optimizing with try_lock later
738 		 */
739 		pthread_mutex_lock(&ulp_fc_info->fc_lock);
740 		sw_cntr_idx = hw_cntr_id -
741 			ulp_fc_info->shadow_hw_tbl[dir].start_idx;
742 		sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
743 		if (sw_acc_tbl_entry->pkt_count) {
744 			count->hits_set = 1;
745 			count->bytes_set = 1;
746 			count->hits = sw_acc_tbl_entry->pkt_count;
747 			count->bytes = sw_acc_tbl_entry->byte_count;
748 		}
749 		if (count->reset) {
750 			sw_acc_tbl_entry->pkt_count = 0;
751 			sw_acc_tbl_entry->byte_count = 0;
752 		}
753 		pthread_mutex_unlock(&ulp_fc_info->fc_lock);
754 	} else if (found_parent_flow &&
755 		   params.resource_sub_type ==
756 			BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
757 		/* Get stats from the parent child table */
758 		ulp_flow_db_parent_flow_count_get(ctxt, pc_idx,
759 						  &count->hits, &count->bytes,
760 						  count->reset);
761 		if (count->hits)
762 			count->hits_set = 1;
763 		if (count->bytes)
764 			count->bytes_set = 1;
765 	} else {
766 		/* TBD: Handle External counters */
767 		rc = -EINVAL;
768 	}
769 
770 	return rc;
771 }
772 
773 /*
774  * Set the parent flow if it is SW accumulation counter entry.
775  *
776  * ctxt [in] The ulp context for the flow counter manager
777  *
778  * dir [in] The direction of the flow
779  *
780  * hw_cntr_id [in] The HW flow counter ID
781  *
782  * pc_idx [in] parent child db index
783  *
784  */
785 int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
786 					enum tf_dir dir,
787 					uint32_t hw_cntr_id,
788 					uint32_t pc_idx)
789 {
790 	struct bnxt_ulp_fc_info *ulp_fc_info;
791 	uint32_t sw_cntr_idx;
792 	int32_t rc = 0;
793 
794 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
795 	if (!ulp_fc_info)
796 		return -EIO;
797 
798 	pthread_mutex_lock(&ulp_fc_info->fc_lock);
799 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
800 	if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
801 		pc_idx |= FLOW_CNTR_PC_FLOW_VALID;
802 		ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = pc_idx;
803 	} else {
804 		BNXT_TF_DBG(ERR, "Failed to set parent flow id %x:%x\n",
805 			    hw_cntr_id, pc_idx);
806 		rc = -ENOENT;
807 	}
808 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
809 
810 	return rc;
811 }
812