xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_sc_mgr.c (revision ca827d42ad72f90d045716e688b539e53e31a7cc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <sched.h>
7 #include <unistd.h>
8 #include <rte_common.h>
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_log.h>
12 #include "bnxt.h"
13 #include "bnxt_ulp.h"
14 #include "bnxt_ulp_utils.h"
15 #include "bnxt_ulp_tfc.h"
16 #include "bnxt_tf_common.h"
17 #include "ulp_sc_mgr.h"
18 #include "ulp_flow_db.h"
19 #include "ulp_template_db_enum.h"
20 #include "ulp_template_struct.h"
21 #include "tfc.h"
22 #include "tfc_debug.h"
23 #include "tfc_action_handle.h"
24 
25 #define ULP_TFC_CNTR_READ_BYTES 32
26 #define ULP_TFC_CNTR_ALIGN 32
27 #define ULP_TFC_ACT_WORD_SZ 32
28 
29 static const struct bnxt_ulp_sc_core_ops *
30 bnxt_ulp_sc_ops_get(struct bnxt_ulp_context *ctxt)
31 {
32 	int32_t rc;
33 	enum bnxt_ulp_device_id  dev_id;
34 	const struct bnxt_ulp_sc_core_ops *func_ops;
35 
36 	rc = bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id);
37 	if (rc)
38 		return NULL;
39 
40 	switch (dev_id) {
41 	case BNXT_ULP_DEVICE_ID_THOR2:
42 		func_ops = &ulp_sc_tfc_core_ops;
43 		break;
44 	case BNXT_ULP_DEVICE_ID_THOR:
45 	case BNXT_ULP_DEVICE_ID_STINGRAY:
46 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
47 	default:
48 		func_ops = NULL;
49 		break;
50 	}
51 	return func_ops;
52 }
53 
54 int32_t ulp_sc_mgr_init(struct bnxt_ulp_context *ctxt)
55 {
56 	const struct bnxt_ulp_sc_core_ops *sc_ops;
57 	struct bnxt_ulp_device_params *dparms;
58 	struct bnxt_ulp_sc_info *ulp_sc_info;
59 	uint32_t stats_cache_tbl_sz;
60 	uint32_t dev_id;
61 	uint8_t *data;
62 	int rc;
63 	int i;
64 
65 	if (!ctxt) {
66 		BNXT_DRV_DBG(DEBUG, "Invalid ULP CTXT\n");
67 		return -EINVAL;
68 	}
69 
70 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
71 		BNXT_DRV_DBG(DEBUG, "Failed to get device id\n");
72 		return -EINVAL;
73 	}
74 
75 	dparms = bnxt_ulp_device_params_get(dev_id);
76 	if (!dparms) {
77 		BNXT_DRV_DBG(DEBUG, "Failed to device parms\n");
78 		return -EINVAL;
79 	}
80 
81 	sc_ops = bnxt_ulp_sc_ops_get(ctxt);
82 	if (sc_ops == NULL) {
83 		BNXT_DRV_DBG(DEBUG, "Failed to get the counter ops\n");
84 		return -EINVAL;
85 	}
86 
87 	ulp_sc_info = rte_zmalloc("ulp_sc_info", sizeof(*ulp_sc_info), 0);
88 	if (!ulp_sc_info) {
89 		rc = -ENOMEM;
90 		goto error;
91 	}
92 
93 	ulp_sc_info->sc_ops = sc_ops;
94 	ulp_sc_info->flags = 0;
95 
96 	/* Add the SC info tbl to the ulp context. */
97 	bnxt_ulp_cntxt_ptr2_sc_info_set(ctxt, ulp_sc_info);
98 
99 	ulp_sc_info->num_counters = dparms->ext_flow_db_num_entries;
100 	if (!ulp_sc_info->num_counters) {
101 		/* No need for software counters, call fw directly */
102 		BNXT_DRV_DBG(DEBUG, "Sw flow counter support not enabled\n");
103 		return 0;
104 	}
105 
106 	/*
107 	 * Size is determined by the number of flows + 10% to cover IDs
108 	 * used for resources.
109 	 */
110 	ulp_sc_info->cache_tbl_size = ulp_sc_info->num_counters +
111 		(ulp_sc_info->num_counters / 10);
112 	stats_cache_tbl_sz = sizeof(struct ulp_sc_tfc_stats_cache_entry) *
113 		ulp_sc_info->cache_tbl_size;
114 
115 	ulp_sc_info->stats_cache_tbl = rte_zmalloc("ulp_stats_cache_tbl",
116 						   stats_cache_tbl_sz, 0);
117 	if (!ulp_sc_info->stats_cache_tbl) {
118 		rc = -ENOMEM;
119 		goto error;
120 	}
121 
122 	ulp_sc_info->read_data = rte_zmalloc("ulp_stats_cache_read_data",
123 					     ULP_SC_BATCH_SIZE * ULP_SC_PAGE_SIZE,
124 					     ULP_SC_PAGE_SIZE);
125 	if (!ulp_sc_info->read_data) {
126 		rte_free(ulp_sc_info->stats_cache_tbl);
127 		rc = -ENOMEM;
128 		goto error;
129 	}
130 
131 	data = ulp_sc_info->read_data;
132 	for (i = 0; i < ULP_SC_BATCH_SIZE; i++) {
133 		ulp_sc_info->read_data_iova[i] = (uint64_t)rte_mem_virt2iova(data);
134 		data += ULP_SC_PAGE_SIZE;
135 	}
136 
137 	rc = ulp_sc_mgr_thread_start(ctxt);
138 	if (rc)
139 		BNXT_DRV_DBG(DEBUG, "Stats counter thread start failed\n");
140 
141  error:
142 	return rc;
143 }
144 
145 /*
146  * Release all resources in the Flow Counter Manager for this ulp context
147  *
148  * ctxt [in] The ulp context for the Flow Counter manager
149  *
150  */
151 int32_t
152 ulp_sc_mgr_deinit(struct bnxt_ulp_context *ctxt)
153 {
154 	struct bnxt_ulp_sc_info *ulp_sc_info;
155 
156 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
157 
158 	if (!ulp_sc_info)
159 		return -EINVAL;
160 
161 	if (ulp_sc_info->stats_cache_tbl)
162 		rte_free(ulp_sc_info->stats_cache_tbl);
163 
164 	if (ulp_sc_info->read_data)
165 		rte_free(ulp_sc_info->read_data);
166 
167 	rte_free(ulp_sc_info);
168 
169 	/* Safe to ignore on deinit */
170 	(void)bnxt_ulp_cntxt_ptr2_sc_info_set(ctxt, NULL);
171 
172 	return 0;
173 }
174 
175 #define ULP_SC_PERIOD_US 256
176 #define ULP_SC_CTX_DELAY 10000
177 
178 static uint32_t ulp_stats_cache_main_loop(void *arg)
179 {
180 	struct ulp_sc_tfc_stats_cache_entry *count;
181 	const struct bnxt_ulp_sc_core_ops *sc_ops = NULL;
182 	struct ulp_sc_tfc_stats_cache_entry *sce;
183 	struct ulp_sc_tfc_stats_cache_entry *sce_end;
184 	struct tfc_mpc_batch_info_t batch_info;
185 	struct bnxt_ulp_sc_info *ulp_sc_info;
186 	struct bnxt_ulp_context *ctxt = NULL;
187 	uint16_t words = (ULP_TFC_CNTR_READ_BYTES + ULP_TFC_ACT_WORD_SZ - 1) / ULP_TFC_ACT_WORD_SZ;
188 	uint32_t batch_size;
189 	struct tfc *tfcp = NULL;
190 	uint32_t batch, stat_cnt;
191 	uint8_t *data;
192 	int rc;
193 
194 	while (true) {
195 		ctxt = NULL;
196 		while (!ctxt) {
197 			ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
198 
199 			if (ctxt)
200 				break;
201 			/* If there are no more contexts just exit */
202 			if (bnxt_ulp_cntxt_list_count() == 0)
203 				goto terminate;
204 			rte_delay_us_block(ULP_SC_CTX_DELAY);
205 		}
206 
207 		/* get the stats counter info block from ulp context */
208 		ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
209 		if (unlikely(!ulp_sc_info)) {
210 			bnxt_ulp_cntxt_entry_release();
211 			goto terminate;
212 		}
213 
214 		sce = ulp_sc_info->stats_cache_tbl;
215 		sce_end = sce + ulp_sc_info->cache_tbl_size;
216 
217 		if (unlikely(!sc_ops))
218 			sc_ops = ulp_sc_info->sc_ops;
219 
220 		stat_cnt = 0;
221 		while (stat_cnt < ulp_sc_info->num_entries && (sce < sce_end)) {
222 			data = ulp_sc_info->read_data;
223 
224 			if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
225 				break;
226 
227 			rc = tfc_mpc_batch_start(&batch_info);
228 			if (unlikely(rc)) {
229 				PMD_DRV_LOG_LINE(ERR,
230 						 "MPC batch start failed rc:%d", rc);
231 				bnxt_ulp_cntxt_release_fdb_lock(ctxt);
232 				break;
233 			}
234 
235 			for (batch = 0; (batch < ULP_SC_BATCH_SIZE) &&
236 			     (sce < sce_end);) {
237 				if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID)) {
238 					sce++;
239 					continue;
240 				}
241 				stat_cnt++;
242 				tfcp = bnxt_ulp_cntxt_tfcp_get(sce->ctxt);
243 				if (unlikely(!tfcp)) {
244 					bnxt_ulp_cntxt_release_fdb_lock(ctxt);
245 					bnxt_ulp_cntxt_entry_release();
246 					goto terminate;
247 				}
248 
249 				/* Store the entry pointer to use for counter update */
250 				batch_info.em_hdl[batch_info.count] =
251 					(uint64_t)sce;
252 
253 				rc = sc_ops->ulp_stats_cache_update(tfcp,
254 							    sce->dir,
255 							    &ulp_sc_info->read_data_iova[batch],
256 							    sce->handle,
257 							    &words,
258 							    &batch_info,
259 							    sce->reset);
260 				if (unlikely(rc)) {
261 					/* Abort this batch */
262 					PMD_DRV_LOG_LINE(ERR,
263 							 "read_counter() failed:%d", rc);
264 					break;
265 				}
266 
267 				if (sce->reset)
268 					sce->reset = false;
269 
270 				/* Next */
271 				batch++;
272 				sce++;
273 				data += ULP_SC_PAGE_SIZE;
274 			}
275 
276 			batch_size = batch_info.count;
277 			rc = tfc_mpc_batch_end(tfcp, &batch_info);
278 
279 			bnxt_ulp_cntxt_release_fdb_lock(ctxt);
280 
281 			if (unlikely(rc)) {
282 				PMD_DRV_LOG_LINE(ERR, "MPC batch end failed rc:%d", rc);
283 				batch_info.enabled = false;
284 				break;
285 			}
286 
287 			/* Process counts */
288 			data = ulp_sc_info->read_data;
289 
290 			for (batch = 0; batch < batch_size; batch++) {
291 				/* Check for error in completion */
292 				if (batch_info.result[batch]) {
293 					PMD_DRV_LOG_LINE(ERR, "batch:%d result:%d",
294 							 batch, batch_info.result[batch]);
295 				} else {
296 					count = (struct ulp_sc_tfc_stats_cache_entry *)
297 						((uintptr_t)batch_info.em_hdl[batch]);
298 					memcpy(&count->packet_count, data, ULP_TFC_ACT_WORD_SZ);
299 				}
300 
301 				data += ULP_SC_PAGE_SIZE;
302 			}
303 		}
304 		bnxt_ulp_cntxt_entry_release();
305 		/* Sleep to give any other threads opportunity to access ULP */
306 		rte_delay_us_sleep(ULP_SC_PERIOD_US);
307 	}
308 
309  terminate:
310 	PMD_DRV_LOG_LINE(DEBUG, "Terminating the stats cachce thread");
311 	return 0;
312 }
313 
314 /*
315  * Check if the alarm thread that walks through the flows is started
316  *
317  * ctxt [in] The ulp context for the flow counter manager
318  *
319  */
320 bool ulp_sc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
321 {
322 	struct bnxt_ulp_sc_info *ulp_sc_info;
323 
324 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
325 
326 	if (ulp_sc_info)
327 		return !!(ulp_sc_info->flags & ULP_FLAG_SC_THREAD);
328 
329 	return false;
330 }
331 
332 /*
333  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
334  * data from the chip's internal flow counters
335  *
336  * ctxt [in] The ulp context for the flow counter manager
337  *
338  */
339 int32_t
340 ulp_sc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
341 {
342 	struct bnxt_ulp_sc_info *ulp_sc_info;
343 	rte_thread_attr_t attr;
344 	rte_cpuset_t mask;
345 	size_t i;
346 	int rc;
347 
348 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
349 
350 	if (ulp_sc_info && !(ulp_sc_info->flags & ULP_FLAG_SC_THREAD)) {
351 		rte_thread_attr_init(&attr);
352 
353 		rte_thread_get_affinity(&mask);
354 
355 		for (i = 1; i < CPU_SETSIZE; i++) {
356 			if (CPU_ISSET(i, &mask)) {
357 				CPU_ZERO(&mask);
358 				CPU_SET(i + 2, &mask);
359 				break;
360 			}
361 		}
362 
363 		rc = rte_thread_attr_set_affinity(&attr, &mask);
364 		if (rc)
365 			return rc;
366 
367 		rc = rte_thread_create(&ulp_sc_info->tid,
368 				       &attr,
369 				       &ulp_stats_cache_main_loop,
370 				       (void *)ctxt->cfg_data);
371 		if (rc)
372 			return rc;
373 
374 		rte_thread_set_prefixed_name(ulp_sc_info->tid, "ulp_sc_mgr");
375 
376 		ulp_sc_info->flags |= ULP_FLAG_SC_THREAD;
377 	}
378 
379 	return 0;
380 }
381 
382 /*
383  * Cancel the alarm handler
384  *
385  * ctxt [in] The ulp context for the flow counter manager
386  *
387  */
388 void ulp_sc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
389 {
390 	struct bnxt_ulp_sc_info *ulp_sc_info;
391 
392 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
393 	if (!ulp_sc_info)
394 		return;
395 
396 	ulp_sc_info->flags &= ~ULP_FLAG_SC_THREAD;
397 }
398 
399 /*
400  * Fill the rte_flow_query_count 'data' argument passed
401  * in the rte_flow_query() with the values obtained and
402  * accumulated locally.
403  *
404  * ctxt [in] The ulp context for the flow counter manager
405  *
406  * flow_id [in] The HW flow ID
407  *
408  * count [out] The rte_flow_query_count 'data' that is set
409  *
410  */
411 int ulp_sc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
412 			       uint32_t flow_id,
413 			       struct rte_flow_query_count *count)
414 {
415 	struct ulp_sc_tfc_stats_cache_entry *sce;
416 	struct bnxt_ulp_sc_info *ulp_sc_info;
417 	struct ulp_fdb_parent_info *pc_entry;
418 	struct bnxt_ulp_flow_db *flow_db;
419 	uint32_t max_array;
420 	uint32_t child_fid;
421 	uint32_t a_idx;
422 	uint32_t f2_cnt;
423 	uint64_t *t;
424 	uint64_t bs;
425 	int rc = 0;
426 
427 	/* Get stats cache info */
428 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
429 	if (!ulp_sc_info)
430 		return -ENODEV;
431 
432 	sce = ulp_sc_info->stats_cache_tbl;
433 	sce += flow_id;
434 
435 	/* To handle the parent flow */
436 	if (sce->flags & ULP_SC_ENTRY_FLAG_PARENT) {
437 		flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ctxt);
438 		if (!flow_db) {
439 			BNXT_DRV_DBG(ERR, "parent child db validation failed\n");
440 			return -EINVAL;
441 		}
442 
443 		/* Validate the arguments and parent child entry */
444 		pc_entry = ulp_flow_db_pc_db_entry_get(ctxt, sce->pc_idx);
445 		if (!pc_entry) {
446 			BNXT_DRV_DBG(ERR, "failed to get the parent child entry\n");
447 			return -EINVAL;
448 		}
449 
450 		t = pc_entry->child_fid_bitset;
451 		f2_cnt = pc_entry->f2_cnt;
452 		max_array = flow_db->parent_child_db.child_bitset_size * 8 / ULP_INDEX_BITMAP_SIZE;
453 
454 		/* Iterate all possible child flows */
455 		for (a_idx = 0; (a_idx < max_array) && f2_cnt; a_idx++) {
456 			/* If it is zero, then check the next bitset */
457 			bs = t[a_idx];
458 			if (!bs)
459 				continue;
460 
461 			/* check one bitset */
462 			do {
463 				/* get the next child fid */
464 				child_fid = (a_idx * ULP_INDEX_BITMAP_SIZE) + rte_clz64(bs);
465 				sce = ulp_sc_info->stats_cache_tbl;
466 				sce += child_fid;
467 
468 				/* clear the bit for this child flow */
469 				ULP_INDEX_BITMAP_RESET(bs, child_fid);
470 				f2_cnt--;
471 
472 				/* no counter action, then ignore flows */
473 				if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID))
474 					continue;
475 				count->hits += sce->packet_count;
476 				count->hits_set = 1;
477 				count->bytes += sce->byte_count;
478 				count->bytes_set = 1;
479 			} while (bs && f2_cnt);
480 		}
481 	} else {
482 		/* To handle regular or child flows */
483 		/* If entry is not valid return an error */
484 		if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID))
485 			return -EBUSY;
486 
487 		count->hits = sce->packet_count;
488 		count->hits_set = 1;
489 		count->bytes = sce->byte_count;
490 		count->bytes_set = 1;
491 
492 		if (count->reset)
493 			sce->reset = true;
494 	}
495 	return rc;
496 }
497 
498 
499 int ulp_sc_mgr_entry_alloc(struct bnxt_ulp_mapper_parms *parms,
500 			   uint64_t counter_handle,
501 			   struct bnxt_ulp_mapper_tbl_info *tbl)
502 {
503 	struct ulp_sc_tfc_stats_cache_entry *sce;
504 	struct bnxt_ulp_sc_info *ulp_sc_info;
505 
506 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(parms->ulp_ctx);
507 	if (!ulp_sc_info)
508 		return -ENODEV;
509 
510 	sce = ulp_sc_info->stats_cache_tbl;
511 	sce += parms->flow_id;
512 
513 	/* If entry is not free return an error */
514 	if (sce->flags & ULP_SC_ENTRY_FLAG_VALID) {
515 		BNXT_DRV_DBG(ERR, "Entry is not free, invalid flow id %u\n",
516 			     parms->flow_id);
517 		return -EBUSY;
518 	}
519 
520 	memset(sce, 0, sizeof(*sce));
521 	sce->ctxt = parms->ulp_ctx;
522 	sce->flags |= ULP_SC_ENTRY_FLAG_VALID;
523 	if (parms->parent_flow)
524 		sce->flags |= ULP_SC_ENTRY_FLAG_PARENT;
525 	sce->handle = counter_handle;
526 	sce->dir = tbl->direction;
527 	ulp_sc_info->num_entries++;
528 	return 0;
529 }
530 
531 void ulp_sc_mgr_entry_free(struct bnxt_ulp_context *ulp,
532 			   uint32_t fid)
533 {
534 	struct ulp_sc_tfc_stats_cache_entry *sce;
535 	struct bnxt_ulp_sc_info *ulp_sc_info;
536 
537 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ulp);
538 	if (!ulp_sc_info)
539 		return;
540 
541 	sce = ulp_sc_info->stats_cache_tbl;
542 	sce += fid;
543 
544 	if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID)) {
545 		BNXT_DRV_DBG(ERR, "Entry already free, invalid flow id %u\n",
546 			     fid);
547 		return;
548 	}
549 
550 	sce->flags = 0;
551 	ulp_sc_info->num_entries--;
552 }
553 
554 /*
555  * Set pc_idx for the flow if stat cache info is valid
556  *
557  * ctxt [in] The ulp context for the flow counter manager
558  *
559  * flow_id [in] The HW flow ID
560  *
561  * pc_idx [in] The parent flow entry idx
562  *
563  */
564 void ulp_sc_mgr_set_pc_idx(struct bnxt_ulp_context *ctxt,
565 			   uint32_t flow_id,
566 			   uint32_t pc_idx)
567 {
568 	struct ulp_sc_tfc_stats_cache_entry *sce;
569 	struct bnxt_ulp_sc_info *ulp_sc_info;
570 
571 	/* Get stats cache info */
572 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
573 	if (!ulp_sc_info)
574 		return;
575 
576 	sce = ulp_sc_info->stats_cache_tbl;
577 	sce += flow_id;
578 	sce->pc_idx = pc_idx & ULP_SC_PC_IDX_MASK;
579 }
580