xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_sc_mgr.c (revision 67ad40007cd6bb6ce9f0b3eefe2af611848d10dc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10 #include "bnxt.h"
11 #include "bnxt_ulp.h"
12 #include "bnxt_ulp_utils.h"
13 #include "bnxt_ulp_tfc.h"
14 #include "bnxt_tf_common.h"
15 #include "ulp_sc_mgr.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_template_db_enum.h"
18 #include "ulp_template_struct.h"
19 #include "tfc.h"
20 #include "tfc_debug.h"
21 #include "tfc_action_handle.h"
22 
23 #define ULP_TFC_CNTR_READ_BYTES 32
24 #define ULP_TFC_CNTR_ALIGN 32
25 #define ULP_TFC_ACT_WORD_SZ 32
26 
27 static const struct bnxt_ulp_sc_core_ops *
28 bnxt_ulp_sc_ops_get(struct bnxt_ulp_context *ctxt)
29 {
30 	int32_t rc;
31 	enum bnxt_ulp_device_id  dev_id;
32 	const struct bnxt_ulp_sc_core_ops *func_ops;
33 
34 	rc = bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id);
35 	if (rc)
36 		return NULL;
37 
38 	switch (dev_id) {
39 	case BNXT_ULP_DEVICE_ID_THOR2:
40 		func_ops = &ulp_sc_tfc_core_ops;
41 		break;
42 	case BNXT_ULP_DEVICE_ID_THOR:
43 	case BNXT_ULP_DEVICE_ID_STINGRAY:
44 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
45 	default:
46 		func_ops = NULL;
47 		break;
48 	}
49 	return func_ops;
50 }
51 
52 int32_t ulp_sc_mgr_init(struct bnxt_ulp_context *ctxt)
53 {
54 	const struct bnxt_ulp_sc_core_ops *sc_ops;
55 	struct bnxt_ulp_device_params *dparms;
56 	struct bnxt_ulp_sc_info *ulp_sc_info;
57 	uint32_t stats_cache_tbl_sz;
58 	uint32_t dev_id;
59 	int rc;
60 
61 	if (!ctxt) {
62 		BNXT_DRV_DBG(DEBUG, "Invalid ULP CTXT\n");
63 		return -EINVAL;
64 	}
65 
66 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
67 		BNXT_DRV_DBG(DEBUG, "Failed to get device id\n");
68 		return -EINVAL;
69 	}
70 
71 	dparms = bnxt_ulp_device_params_get(dev_id);
72 	if (!dparms) {
73 		BNXT_DRV_DBG(DEBUG, "Failed to device parms\n");
74 		return -EINVAL;
75 	}
76 
77 	sc_ops = bnxt_ulp_sc_ops_get(ctxt);
78 	if (sc_ops == NULL) {
79 		BNXT_DRV_DBG(DEBUG, "Failed to get the counter ops\n");
80 		return -EINVAL;
81 	}
82 
83 	ulp_sc_info = rte_zmalloc("ulp_sc_info", sizeof(*ulp_sc_info), 0);
84 	if (!ulp_sc_info) {
85 		rc = -ENOMEM;
86 		goto error;
87 	}
88 
89 	ulp_sc_info->sc_ops = sc_ops;
90 	ulp_sc_info->flags = 0;
91 
92 	/* Add the SC info tbl to the ulp context. */
93 	bnxt_ulp_cntxt_ptr2_sc_info_set(ctxt, ulp_sc_info);
94 
95 	ulp_sc_info->num_counters = dparms->ext_flow_db_num_entries;
96 	if (!ulp_sc_info->num_counters) {
97 		/* No need for software counters, call fw directly */
98 		BNXT_DRV_DBG(DEBUG, "Sw flow counter support not enabled\n");
99 		return 0;
100 	}
101 
102 	/*
103 	 * Size is determined by the number of flows + 10% to cover IDs
104 	 * used for resources.
105 	 */
106 	ulp_sc_info->cache_tbl_size = ulp_sc_info->num_counters +
107 		(ulp_sc_info->num_counters / 10);
108 	stats_cache_tbl_sz = sizeof(struct ulp_sc_tfc_stats_cache_entry) *
109 		ulp_sc_info->cache_tbl_size;
110 
111 	ulp_sc_info->stats_cache_tbl = rte_zmalloc("ulp_stats_cache_tbl",
112 						   stats_cache_tbl_sz, 0);
113 	if (!ulp_sc_info->stats_cache_tbl) {
114 		rc = -ENOMEM;
115 		goto error;
116 	}
117 
118 	ulp_sc_info->read_data = rte_zmalloc("ulp_stats_cache_read_data",
119 					     ULP_SC_BATCH_SIZE * ULP_SC_PAGE_SIZE,
120 					     ULP_SC_PAGE_SIZE);
121 	if (!ulp_sc_info->read_data) {
122 		rte_free(ulp_sc_info->stats_cache_tbl);
123 		rc = -ENOMEM;
124 		goto error;
125 	}
126 
127 	rc = ulp_sc_mgr_thread_start(ctxt);
128 	if (rc)
129 		BNXT_DRV_DBG(DEBUG, "Stats counter thread start failed\n");
130 
131  error:
132 	return rc;
133 }
134 
135 /*
136  * Release all resources in the Flow Counter Manager for this ulp context
137  *
138  * ctxt [in] The ulp context for the Flow Counter manager
139  *
140  */
141 int32_t
142 ulp_sc_mgr_deinit(struct bnxt_ulp_context *ctxt)
143 {
144 	struct bnxt_ulp_sc_info *ulp_sc_info;
145 
146 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
147 
148 	if (!ulp_sc_info)
149 		return -EINVAL;
150 
151 	if (ulp_sc_info->stats_cache_tbl)
152 		rte_free(ulp_sc_info->stats_cache_tbl);
153 
154 	if (ulp_sc_info->read_data)
155 		rte_free(ulp_sc_info->read_data);
156 
157 	rte_free(ulp_sc_info);
158 
159 	/* Safe to ignore on deinit */
160 	(void)bnxt_ulp_cntxt_ptr2_sc_info_set(ctxt, NULL);
161 
162 	return 0;
163 }
164 
165 #define ULP_SC_PERIOD_US 256
166 #define ULP_SC_CTX_DELAY 10000
167 
168 static uint32_t ulp_stats_cache_main_loop(void *arg)
169 {
170 	struct ulp_sc_tfc_stats_cache_entry *count;
171 	const struct bnxt_ulp_sc_core_ops *sc_ops = NULL;
172 	struct ulp_sc_tfc_stats_cache_entry *sce;
173 	struct ulp_sc_tfc_stats_cache_entry *sce_end;
174 	struct tfc_mpc_batch_info_t batch_info;
175 	struct bnxt_ulp_sc_info *ulp_sc_info;
176 	struct bnxt_ulp_context *ctxt = NULL;
177 	uint16_t words = (ULP_TFC_CNTR_READ_BYTES + ULP_TFC_ACT_WORD_SZ - 1) / ULP_TFC_ACT_WORD_SZ;
178 	uint32_t batch_size;
179 	struct tfc *tfcp = NULL;
180 	uint32_t batch, stat_cnt;
181 	uint8_t *data;
182 	int rc;
183 
184 	while (true) {
185 		ctxt = NULL;
186 		while (!ctxt) {
187 			ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
188 
189 			if (ctxt)
190 				break;
191 			/* If there are no more contexts just exit */
192 			if (bnxt_ulp_cntxt_list_count() == 0)
193 				goto terminate;
194 			rte_delay_us_block(ULP_SC_CTX_DELAY);
195 		}
196 
197 		/* get the stats counter info block from ulp context */
198 		ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
199 		if (unlikely(!ulp_sc_info)) {
200 			bnxt_ulp_cntxt_entry_release();
201 			goto terminate;
202 		}
203 
204 		sce = ulp_sc_info->stats_cache_tbl;
205 		sce_end = sce + ulp_sc_info->cache_tbl_size;
206 
207 		if (unlikely(!sc_ops))
208 			sc_ops = ulp_sc_info->sc_ops;
209 
210 		stat_cnt = 0;
211 		while (stat_cnt < ulp_sc_info->num_entries && (sce < sce_end)) {
212 			data = ulp_sc_info->read_data;
213 
214 			if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
215 				break;
216 
217 			rc = tfc_mpc_batch_start(&batch_info);
218 			if (unlikely(rc)) {
219 				PMD_DRV_LOG_LINE(ERR,
220 						 "MPC batch start failed rc:%d", rc);
221 				bnxt_ulp_cntxt_release_fdb_lock(ctxt);
222 				break;
223 			}
224 
225 			for (batch = 0; (batch < ULP_SC_BATCH_SIZE) &&
226 			     (sce < sce_end);) {
227 				if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID)) {
228 					sce++;
229 					continue;
230 				}
231 				stat_cnt++;
232 				tfcp = bnxt_ulp_cntxt_tfcp_get(sce->ctxt);
233 				if (unlikely(!tfcp)) {
234 					bnxt_ulp_cntxt_release_fdb_lock(ctxt);
235 					bnxt_ulp_cntxt_entry_release();
236 					goto terminate;
237 				}
238 
239 				/* Store the entry pointer to use for counter update */
240 				batch_info.em_hdl[batch_info.count] =
241 					(uint64_t)sce;
242 
243 				rc = sc_ops->ulp_stats_cache_update(tfcp,
244 								    sce->dir,
245 								    data,
246 								    sce->handle,
247 								    &words,
248 								    &batch_info,
249 								    sce->reset);
250 				if (unlikely(rc)) {
251 					/* Abort this batch */
252 					PMD_DRV_LOG_LINE(ERR,
253 							 "read_counter() failed:%d", rc);
254 					break;
255 				}
256 
257 				if (sce->reset)
258 					sce->reset = false;
259 
260 				/* Next */
261 				batch++;
262 				sce++;
263 				data += ULP_SC_PAGE_SIZE;
264 			}
265 
266 			batch_size = batch_info.count;
267 			rc = tfc_mpc_batch_end(tfcp, &batch_info);
268 
269 			bnxt_ulp_cntxt_release_fdb_lock(ctxt);
270 
271 			if (unlikely(rc)) {
272 				PMD_DRV_LOG_LINE(ERR, "MPC batch end failed rc:%d", rc);
273 				batch_info.enabled = false;
274 				break;
275 			}
276 
277 			/* Process counts */
278 			data = ulp_sc_info->read_data;
279 
280 			for (batch = 0; batch < batch_size; batch++) {
281 				/* Check for error in completion */
282 				if (batch_info.result[batch]) {
283 					PMD_DRV_LOG_LINE(ERR, "batch:%d result:%d",
284 							 batch, batch_info.result[batch]);
285 				} else {
286 					count = (struct ulp_sc_tfc_stats_cache_entry *)
287 						((uintptr_t)batch_info.em_hdl[batch]);
288 					memcpy(&count->packet_count, data, ULP_TFC_ACT_WORD_SZ);
289 				}
290 
291 				data += ULP_SC_PAGE_SIZE;
292 			}
293 		}
294 		bnxt_ulp_cntxt_entry_release();
295 		/* Sleep to give any other threads opportunity to access ULP */
296 		rte_delay_us_sleep(ULP_SC_PERIOD_US);
297 	}
298 
299  terminate:
300 	PMD_DRV_LOG_LINE(DEBUG, "Terminating the stats cachce thread");
301 	return 0;
302 }
303 
304 /*
305  * Check if the alarm thread that walks through the flows is started
306  *
307  * ctxt [in] The ulp context for the flow counter manager
308  *
309  */
310 bool ulp_sc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
311 {
312 	struct bnxt_ulp_sc_info *ulp_sc_info;
313 
314 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
315 
316 	if (ulp_sc_info)
317 		return !!(ulp_sc_info->flags & ULP_FLAG_SC_THREAD);
318 
319 	return false;
320 }
321 
322 /*
323  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
324  * data from the chip's internal flow counters
325  *
326  * ctxt [in] The ulp context for the flow counter manager
327  *
328  */
329 int32_t
330 ulp_sc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
331 {
332 	struct bnxt_ulp_sc_info *ulp_sc_info;
333 	int rc;
334 
335 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
336 
337 	if (ulp_sc_info && !(ulp_sc_info->flags & ULP_FLAG_SC_THREAD)) {
338 		rc = rte_thread_create(&ulp_sc_info->tid,
339 				       NULL,
340 				       &ulp_stats_cache_main_loop,
341 				       (void *)ctxt->cfg_data);
342 		if (rc)
343 			return rc;
344 
345 		ulp_sc_info->flags |= ULP_FLAG_SC_THREAD;
346 	}
347 
348 	return 0;
349 }
350 
351 /*
352  * Cancel the alarm handler
353  *
354  * ctxt [in] The ulp context for the flow counter manager
355  *
356  */
357 void ulp_sc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
358 {
359 	struct bnxt_ulp_sc_info *ulp_sc_info;
360 
361 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
362 	if (!ulp_sc_info)
363 		return;
364 
365 	ulp_sc_info->flags &= ~ULP_FLAG_SC_THREAD;
366 }
367 
368 /*
369  * Fill the rte_flow_query_count 'data' argument passed
370  * in the rte_flow_query() with the values obtained and
371  * accumulated locally.
372  *
373  * ctxt [in] The ulp context for the flow counter manager
374  *
375  * flow_id [in] The HW flow ID
376  *
377  * count [out] The rte_flow_query_count 'data' that is set
378  *
379  */
380 int ulp_sc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
381 			       uint32_t flow_id,
382 			       struct rte_flow_query_count *count)
383 {
384 	struct ulp_sc_tfc_stats_cache_entry *sce;
385 	struct bnxt_ulp_sc_info *ulp_sc_info;
386 	struct ulp_fdb_parent_info *pc_entry;
387 	struct bnxt_ulp_flow_db *flow_db;
388 	uint32_t max_array;
389 	uint32_t child_fid;
390 	uint32_t a_idx;
391 	uint32_t f2_cnt;
392 	uint64_t *t;
393 	uint64_t bs;
394 	int rc = 0;
395 
396 	/* Get stats cache info */
397 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
398 	if (!ulp_sc_info)
399 		return -ENODEV;
400 
401 	sce = ulp_sc_info->stats_cache_tbl;
402 	sce += flow_id;
403 
404 	/* To handle the parent flow */
405 	if (sce->flags & ULP_SC_ENTRY_FLAG_PARENT) {
406 		flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ctxt);
407 		if (!flow_db) {
408 			BNXT_DRV_DBG(ERR, "parent child db validation failed\n");
409 			return -EINVAL;
410 		}
411 
412 		/* Validate the arguments and parent child entry */
413 		pc_entry = ulp_flow_db_pc_db_entry_get(ctxt, sce->pc_idx);
414 		if (!pc_entry) {
415 			BNXT_DRV_DBG(ERR, "failed to get the parent child entry\n");
416 			return -EINVAL;
417 		}
418 
419 		t = pc_entry->child_fid_bitset;
420 		f2_cnt = pc_entry->f2_cnt;
421 		max_array = flow_db->parent_child_db.child_bitset_size * 8 / ULP_INDEX_BITMAP_SIZE;
422 
423 		/* Iterate all possible child flows */
424 		for (a_idx = 0; (a_idx < max_array) && f2_cnt; a_idx++) {
425 			/* If it is zero, then check the next bitset */
426 			bs = t[a_idx];
427 			if (!bs)
428 				continue;
429 
430 			/* check one bitset */
431 			do {
432 				/* get the next child fid */
433 				child_fid = (a_idx * ULP_INDEX_BITMAP_SIZE) + rte_clz64(bs);
434 				sce = ulp_sc_info->stats_cache_tbl;
435 				sce += child_fid;
436 
437 				/* clear the bit for this child flow */
438 				ULP_INDEX_BITMAP_RESET(bs, child_fid);
439 				f2_cnt--;
440 
441 				/* no counter action, then ignore flows */
442 				if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID))
443 					continue;
444 				count->hits += sce->packet_count;
445 				count->hits_set = 1;
446 				count->bytes += sce->byte_count;
447 				count->bytes_set = 1;
448 			} while (bs && f2_cnt);
449 		}
450 	} else {
451 		/* To handle regular or child flows */
452 		/* If entry is not valid return an error */
453 		if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID))
454 			return -EBUSY;
455 
456 		count->hits = sce->packet_count;
457 		count->hits_set = 1;
458 		count->bytes = sce->byte_count;
459 		count->bytes_set = 1;
460 
461 		if (count->reset)
462 			sce->reset = true;
463 	}
464 	return rc;
465 }
466 
467 
468 int ulp_sc_mgr_entry_alloc(struct bnxt_ulp_mapper_parms *parms,
469 			   uint64_t counter_handle,
470 			   struct bnxt_ulp_mapper_tbl_info *tbl)
471 {
472 	struct ulp_sc_tfc_stats_cache_entry *sce;
473 	struct bnxt_ulp_sc_info *ulp_sc_info;
474 
475 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(parms->ulp_ctx);
476 	if (!ulp_sc_info)
477 		return -ENODEV;
478 
479 	sce = ulp_sc_info->stats_cache_tbl;
480 	sce += parms->flow_id;
481 
482 	/* If entry is not free return an error */
483 	if (sce->flags & ULP_SC_ENTRY_FLAG_VALID) {
484 		BNXT_DRV_DBG(ERR, "Entry is not free, invalid flow id %u\n",
485 			     parms->flow_id);
486 		return -EBUSY;
487 	}
488 
489 	memset(sce, 0, sizeof(*sce));
490 	sce->ctxt = parms->ulp_ctx;
491 	sce->flags |= ULP_SC_ENTRY_FLAG_VALID;
492 	if (parms->parent_flow)
493 		sce->flags |= ULP_SC_ENTRY_FLAG_PARENT;
494 	sce->handle = counter_handle;
495 	sce->dir = tbl->direction;
496 	ulp_sc_info->num_entries++;
497 	return 0;
498 }
499 
500 void ulp_sc_mgr_entry_free(struct bnxt_ulp_context *ulp,
501 			   uint32_t fid)
502 {
503 	struct ulp_sc_tfc_stats_cache_entry *sce;
504 	struct bnxt_ulp_sc_info *ulp_sc_info;
505 
506 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ulp);
507 	if (!ulp_sc_info)
508 		return;
509 
510 	sce = ulp_sc_info->stats_cache_tbl;
511 	sce += fid;
512 
513 	if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID)) {
514 		BNXT_DRV_DBG(ERR, "Entry already free, invalid flow id %u\n",
515 			     fid);
516 		return;
517 	}
518 
519 	sce->flags = 0;
520 	ulp_sc_info->num_entries--;
521 }
522 
523 /*
524  * Set pc_idx for the flow if stat cache info is valid
525  *
526  * ctxt [in] The ulp context for the flow counter manager
527  *
528  * flow_id [in] The HW flow ID
529  *
530  * pc_idx [in] The parent flow entry idx
531  *
532  */
533 void ulp_sc_mgr_set_pc_idx(struct bnxt_ulp_context *ctxt,
534 			   uint32_t flow_id,
535 			   uint32_t pc_idx)
536 {
537 	struct ulp_sc_tfc_stats_cache_entry *sce;
538 	struct bnxt_ulp_sc_info *ulp_sc_info;
539 
540 	/* Get stats cache info */
541 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
542 	if (!ulp_sc_info)
543 		return;
544 
545 	sce = ulp_sc_info->stats_cache_tbl;
546 	sce += flow_id;
547 	sce->pc_idx = pc_idx & ULP_SC_PC_IDX_MASK;
548 }
549