xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c (revision 3cc6ecfdfe85d2577fef30e1791bb7534e3d60b3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10 #include <rte_alarm.h>
11 #include "bnxt.h"
12 #include "bnxt_ulp.h"
13 #include "bnxt_tf_common.h"
14 #include "ulp_fc_mgr.h"
15 #include "ulp_flow_db.h"
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
18 #include "tf_tbl.h"
19 
20 static int
21 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
22 {
23 	/* Allocate memory*/
24 	if (parms == NULL)
25 		return -EINVAL;
26 
27 	parms->mem_va = rte_zmalloc("ulp_fc_info",
28 				    RTE_CACHE_LINE_ROUNDUP(size),
29 				    4096);
30 	if (parms->mem_va == NULL) {
31 		BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
32 		return -ENOMEM;
33 	}
34 
35 	rte_mem_lock_page(parms->mem_va);
36 
37 	parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
38 	if (parms->mem_pa == (void *)(uintptr_t)RTE_BAD_IOVA) {
39 		BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
40 		return -ENOMEM;
41 	}
42 
43 	return 0;
44 }
45 
46 static void
47 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
48 {
49 	rte_free(parms->mem_va);
50 }
51 
52 /*
53  * Allocate and Initialize all Flow Counter Manager resources for this ulp
54  * context.
55  *
56  * ctxt [in] The ulp context for the Flow Counter manager.
57  *
58  */
59 int32_t
60 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
61 {
62 	struct bnxt_ulp_device_params *dparms;
63 	uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
64 	struct bnxt_ulp_fc_info *ulp_fc_info;
65 	int i, rc;
66 
67 	if (!ctxt) {
68 		BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
69 		return -EINVAL;
70 	}
71 
72 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
73 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
74 		return -EINVAL;
75 	}
76 
77 	dparms = bnxt_ulp_device_params_get(dev_id);
78 	if (!dparms) {
79 		BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
80 		return -EINVAL;
81 	}
82 
83 	ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
84 	if (!ulp_fc_info)
85 		goto error;
86 
87 	rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
88 	if (rc) {
89 		PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
90 		goto error;
91 	}
92 
93 	/* Add the FC info tbl to the ulp context. */
94 	bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
95 
96 	sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
97 				dparms->flow_count_db_entries;
98 
99 	for (i = 0; i < TF_DIR_MAX; i++) {
100 		ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
101 							 sw_acc_cntr_tbl_sz, 0);
102 		if (!ulp_fc_info->sw_acc_tbl[i])
103 			goto error;
104 	}
105 
106 	hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
107 
108 	for (i = 0; i < TF_DIR_MAX; i++) {
109 		rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
110 						 hw_fc_mem_info_sz);
111 		if (rc)
112 			goto error;
113 	}
114 
115 	return 0;
116 
117 error:
118 	ulp_fc_mgr_deinit(ctxt);
119 	BNXT_TF_DBG(DEBUG,
120 		    "Failed to allocate memory for fc mgr\n");
121 
122 	return -ENOMEM;
123 }
124 
125 /*
126  * Release all resources in the Flow Counter Manager for this ulp context
127  *
128  * ctxt [in] The ulp context for the Flow Counter manager
129  *
130  */
131 int32_t
132 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
133 {
134 	struct bnxt_ulp_fc_info *ulp_fc_info;
135 	int i;
136 
137 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
138 
139 	if (!ulp_fc_info)
140 		return -EINVAL;
141 
142 	ulp_fc_mgr_thread_cancel(ctxt);
143 
144 	pthread_mutex_destroy(&ulp_fc_info->fc_lock);
145 
146 	for (i = 0; i < TF_DIR_MAX; i++)
147 		rte_free(ulp_fc_info->sw_acc_tbl[i]);
148 
149 	for (i = 0; i < TF_DIR_MAX; i++)
150 		ulp_fc_mgr_shadow_mem_free(&ulp_fc_info->shadow_hw_tbl[i]);
151 
152 
153 	rte_free(ulp_fc_info);
154 
155 	/* Safe to ignore on deinit */
156 	(void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
157 
158 	return 0;
159 }
160 
161 /*
162  * Check if the alarm thread that walks through the flows is started
163  *
164  * ctxt [in] The ulp context for the flow counter manager
165  *
166  */
167 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
168 {
169 	struct bnxt_ulp_fc_info *ulp_fc_info;
170 
171 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
172 
173 	return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
174 }
175 
176 /*
177  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
178  * data from the chip's internal flow counters
179  *
180  * ctxt [in] The ulp context for the flow counter manager
181  *
182  */
183 int32_t
184 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
185 {
186 	struct bnxt_ulp_fc_info *ulp_fc_info;
187 
188 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
189 
190 	if (!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
191 		rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
192 				  ulp_fc_mgr_alarm_cb,
193 				  (void *)ctxt);
194 		ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
195 	}
196 
197 	return 0;
198 }
199 
200 /*
201  * Cancel the alarm handler
202  *
203  * ctxt [in] The ulp context for the flow counter manager
204  *
205  */
206 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
207 {
208 	struct bnxt_ulp_fc_info *ulp_fc_info;
209 
210 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
211 	if (!ulp_fc_info)
212 		return;
213 
214 	ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
215 	rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, (void *)ctxt);
216 }
217 
218 /*
219  * DMA-in the raw counter data from the HW and accumulate in the
220  * local accumulator table using the TF-Core API
221  *
222  * tfp [in] The TF-Core context
223  *
224  * fc_info [in] The ULP Flow counter info ptr
225  *
226  * dir [in] The direction of the flow
227  *
228  * num_counters [in] The number of counters
229  *
230  */
231 __rte_unused static int32_t
232 ulp_bulk_get_flow_stats(struct tf *tfp,
233 			struct bnxt_ulp_fc_info *fc_info,
234 			enum tf_dir dir,
235 			struct bnxt_ulp_device_params *dparms)
236 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
237 {
238 	int rc = 0;
239 	struct tf_tbl_get_bulk_parms parms = { 0 };
240 	enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD: Template? */
241 	struct sw_acc_counter *sw_acc_tbl_entry = NULL;
242 	uint64_t *stats = NULL;
243 	uint16_t i = 0;
244 
245 	parms.dir = dir;
246 	parms.type = stype;
247 	parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
248 	parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
249 	/*
250 	 * TODO:
251 	 * Size of an entry needs to obtained from template
252 	 */
253 	parms.entry_sz_in_bytes = sizeof(uint64_t);
254 	stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
255 	parms.physical_mem_addr = (uintptr_t)fc_info->shadow_hw_tbl[dir].mem_pa;
256 
257 	if (stats == NULL) {
258 		PMD_DRV_LOG(ERR,
259 			    "BULK: Memory not initialized id:0x%x dir:%d\n",
260 			    parms.starting_idx, dir);
261 		return -EINVAL;
262 	}
263 
264 	rc = tf_tbl_bulk_get(tfp, &parms);
265 	if (rc) {
266 		PMD_DRV_LOG(ERR,
267 			    "BULK: Get failed for id:0x%x rc:%d\n",
268 			    parms.starting_idx, rc);
269 		return rc;
270 	}
271 
272 	for (i = 0; i < parms.num_entries; i++) {
273 		/* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
274 		sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
275 		if (!sw_acc_tbl_entry->valid)
276 			continue;
277 		sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i], dparms);
278 		sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
279 								dparms);
280 	}
281 
282 	return rc;
283 }
284 
285 static int ulp_get_single_flow_stat(struct tf *tfp,
286 				    struct bnxt_ulp_fc_info *fc_info,
287 				    enum tf_dir dir,
288 				    uint32_t hw_cntr_id,
289 				    struct bnxt_ulp_device_params *dparms)
290 {
291 	int rc = 0;
292 	struct tf_get_tbl_entry_parms parms = { 0 };
293 	enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD:Template? */
294 	struct sw_acc_counter *sw_acc_tbl_entry = NULL;
295 	uint64_t stats = 0;
296 	uint32_t sw_cntr_indx = 0;
297 
298 	parms.dir = dir;
299 	parms.type = stype;
300 	parms.idx = hw_cntr_id;
301 	/*
302 	 * TODO:
303 	 * Size of an entry needs to obtained from template
304 	 */
305 	parms.data_sz_in_bytes = sizeof(uint64_t);
306 	parms.data = (uint8_t *)&stats;
307 	rc = tf_get_tbl_entry(tfp, &parms);
308 	if (rc) {
309 		PMD_DRV_LOG(ERR,
310 			    "Get failed for id:0x%x rc:%d\n",
311 			    parms.idx, rc);
312 		return rc;
313 	}
314 
315 	/* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
316 	sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
317 	sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
318 	sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
319 	sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
320 
321 	return rc;
322 }
323 
324 /*
325  * Alarm handler that will issue the TF-Core API to fetch
326  * data from the chip's internal flow counters
327  *
328  * ctxt [in] The ulp context for the flow counter manager
329  *
330  */
331 
332 void
333 ulp_fc_mgr_alarm_cb(void *arg)
334 {
335 	int rc = 0;
336 	unsigned int j;
337 	enum tf_dir i;
338 	struct bnxt_ulp_context *ctxt = arg;
339 	struct bnxt_ulp_fc_info *ulp_fc_info;
340 	struct bnxt_ulp_device_params *dparms;
341 	struct tf *tfp;
342 	uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
343 
344 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
345 	if (!ulp_fc_info)
346 		return;
347 
348 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
349 		BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
350 		return;
351 	}
352 
353 	dparms = bnxt_ulp_device_params_get(dev_id);
354 	if (!dparms) {
355 		BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
356 		return;
357 	}
358 
359 	tfp = bnxt_ulp_cntxt_tfp_get(ctxt);
360 	if (!tfp) {
361 		BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
362 		return;
363 	}
364 
365 	/*
366 	 * Take the fc_lock to ensure no flow is destroyed
367 	 * during the bulk get
368 	 */
369 	if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
370 		goto out;
371 
372 	if (!ulp_fc_info->num_entries) {
373 		pthread_mutex_unlock(&ulp_fc_info->fc_lock);
374 		ulp_fc_mgr_thread_cancel(ctxt);
375 		return;
376 	}
377 	/*
378 	 * Commented for now till GET_BULK is resolved, just get the first flow
379 	 * stat for now
380 	 for (i = 0; i < TF_DIR_MAX; i++) {
381 		rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
382 					     dparms->flow_count_db_entries);
383 		if (rc)
384 			break;
385 	}
386 	*/
387 	num_entries = dparms->flow_count_db_entries / 2;
388 	for (i = 0; i < TF_DIR_MAX; i++) {
389 		for (j = 0; j < num_entries; j++) {
390 			if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
391 				continue;
392 			hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
393 			rc = ulp_get_single_flow_stat(tfp, ulp_fc_info, i,
394 						      hw_cntr_id, dparms);
395 			if (rc)
396 				break;
397 		}
398 	}
399 
400 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
401 
402 	/*
403 	 * If cmd fails once, no need of
404 	 * invoking again every second
405 	 */
406 
407 	if (rc) {
408 		ulp_fc_mgr_thread_cancel(ctxt);
409 		return;
410 	}
411 out:
412 	rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
413 			  ulp_fc_mgr_alarm_cb,
414 			  (void *)ctxt);
415 }
416 
417 /*
418  * Set the starting index that indicates the first HW flow
419  * counter ID
420  *
421  * ctxt [in] The ulp context for the flow counter manager
422  *
423  * dir [in] The direction of the flow
424  *
425  * start_idx [in] The HW flow counter ID
426  *
427  */
428 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
429 {
430 	struct bnxt_ulp_fc_info *ulp_fc_info;
431 
432 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
433 
434 	/* Assuming start_idx of 0 is invalid */
435 	return (ulp_fc_info->shadow_hw_tbl[dir].start_idx != 0);
436 }
437 
438 /*
439  * Set the starting index that indicates the first HW flow
440  * counter ID
441  *
442  * ctxt [in] The ulp context for the flow counter manager
443  *
444  * dir [in] The direction of the flow
445  *
446  * start_idx [in] The HW flow counter ID
447  *
448  */
449 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
450 				 uint32_t start_idx)
451 {
452 	struct bnxt_ulp_fc_info *ulp_fc_info;
453 
454 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
455 
456 	if (!ulp_fc_info)
457 		return -EIO;
458 
459 	/* Assuming that 0 is an invalid counter ID ? */
460 	if (ulp_fc_info->shadow_hw_tbl[dir].start_idx == 0)
461 		ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
462 
463 	return 0;
464 }
465 
466 /*
467  * Set the corresponding SW accumulator table entry based on
468  * the difference between this counter ID and the starting
469  * counter ID. Also, keep track of num of active counter enabled
470  * flows.
471  *
472  * ctxt [in] The ulp context for the flow counter manager
473  *
474  * dir [in] The direction of the flow
475  *
476  * hw_cntr_id [in] The HW flow counter ID
477  *
478  */
479 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
480 			    uint32_t hw_cntr_id)
481 {
482 	struct bnxt_ulp_fc_info *ulp_fc_info;
483 	uint32_t sw_cntr_idx;
484 
485 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
486 	if (!ulp_fc_info)
487 		return -EIO;
488 
489 	pthread_mutex_lock(&ulp_fc_info->fc_lock);
490 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
491 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
492 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
493 	ulp_fc_info->num_entries++;
494 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
495 
496 	return 0;
497 }
498 
499 /*
500  * Reset the corresponding SW accumulator table entry based on
501  * the difference between this counter ID and the starting
502  * counter ID.
503  *
504  * ctxt [in] The ulp context for the flow counter manager
505  *
506  * dir [in] The direction of the flow
507  *
508  * hw_cntr_id [in] The HW flow counter ID
509  *
510  */
511 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
512 			      uint32_t hw_cntr_id)
513 {
514 	struct bnxt_ulp_fc_info *ulp_fc_info;
515 	uint32_t sw_cntr_idx;
516 
517 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
518 	if (!ulp_fc_info)
519 		return -EIO;
520 
521 	pthread_mutex_lock(&ulp_fc_info->fc_lock);
522 	sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
523 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
524 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
525 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
526 	ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
527 	ulp_fc_info->num_entries--;
528 	pthread_mutex_unlock(&ulp_fc_info->fc_lock);
529 
530 	return 0;
531 }
532 
533 /*
534  * Fill the rte_flow_query_count 'data' argument passed
535  * in the rte_flow_query() with the values obtained and
536  * accumulated locally.
537  *
538  * ctxt [in] The ulp context for the flow counter manager
539  *
540  * flow_id [in] The HW flow ID
541  *
542  * count [out] The rte_flow_query_count 'data' that is set
543  *
544  */
545 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
546 			       uint32_t flow_id,
547 			       struct rte_flow_query_count *count)
548 {
549 	int rc = 0;
550 	uint32_t nxt_resource_index = 0;
551 	struct bnxt_ulp_fc_info *ulp_fc_info;
552 	struct ulp_flow_db_res_params params;
553 	enum tf_dir dir;
554 	uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
555 	struct sw_acc_counter *sw_acc_tbl_entry;
556 	bool found_cntr_resource = false;
557 
558 	ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
559 	if (!ulp_fc_info)
560 		return -ENODEV;
561 
562 	do {
563 		rc = ulp_flow_db_resource_get(ctxt,
564 					      BNXT_ULP_REGULAR_FLOW_TABLE,
565 					      flow_id,
566 					      &nxt_resource_index,
567 					      &params);
568 		if (params.resource_func ==
569 		     BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
570 		     (params.resource_sub_type ==
571 		      BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT ||
572 		      params.resource_sub_type ==
573 		      BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_EXT_COUNT)) {
574 			found_cntr_resource = true;
575 			break;
576 		}
577 
578 	} while (!rc);
579 
580 	if (rc)
581 		return rc;
582 
583 	if (found_cntr_resource) {
584 		dir = params.direction;
585 		hw_cntr_id = params.resource_hndl;
586 		sw_cntr_idx = hw_cntr_id -
587 				ulp_fc_info->shadow_hw_tbl[dir].start_idx;
588 		sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
589 		if (params.resource_sub_type ==
590 			BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT) {
591 			pthread_mutex_lock(&ulp_fc_info->fc_lock);
592 			if (sw_acc_tbl_entry->pkt_count) {
593 				count->hits_set = 1;
594 				count->bytes_set = 1;
595 				count->hits = sw_acc_tbl_entry->pkt_count;
596 				count->bytes = sw_acc_tbl_entry->byte_count;
597 			}
598 			if (count->reset) {
599 				sw_acc_tbl_entry->pkt_count = 0;
600 				sw_acc_tbl_entry->byte_count = 0;
601 			}
602 			pthread_mutex_unlock(&ulp_fc_info->fc_lock);
603 		} else {
604 			/* TBD: Handle External counters */
605 			rc = -EINVAL;
606 		}
607 	}
608 
609 	return rc;
610 }
611