xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_sc_mgr.c (revision 37dda90ee15b7098bc48356868a87d34f727eecc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10 #include "bnxt.h"
11 #include "bnxt_ulp.h"
12 #include "bnxt_ulp_utils.h"
13 #include "bnxt_ulp_tfc.h"
14 #include "bnxt_tf_common.h"
15 #include "ulp_sc_mgr.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_template_db_enum.h"
18 #include "ulp_template_struct.h"
19 #include "tfc.h"
20 #include "tfc_debug.h"
21 #include "tfc_action_handle.h"
22 
23 #define ULP_TFC_CNTR_READ_BYTES 32
24 #define ULP_TFC_CNTR_ALIGN 32
25 #define ULP_TFC_ACT_WORD_SZ 32
26 
27 static const struct bnxt_ulp_sc_core_ops *
28 bnxt_ulp_sc_ops_get(struct bnxt_ulp_context *ctxt)
29 {
30 	int32_t rc;
31 	enum bnxt_ulp_device_id  dev_id;
32 	const struct bnxt_ulp_sc_core_ops *func_ops;
33 
34 	rc = bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id);
35 	if (rc)
36 		return NULL;
37 
38 	switch (dev_id) {
39 	case BNXT_ULP_DEVICE_ID_THOR2:
40 		func_ops = &ulp_sc_tfc_core_ops;
41 		break;
42 	case BNXT_ULP_DEVICE_ID_THOR:
43 	case BNXT_ULP_DEVICE_ID_STINGRAY:
44 	case BNXT_ULP_DEVICE_ID_WH_PLUS:
45 	default:
46 		func_ops = NULL;
47 		break;
48 	}
49 	return func_ops;
50 }
51 
52 int32_t ulp_sc_mgr_init(struct bnxt_ulp_context *ctxt)
53 {
54 	const struct bnxt_ulp_sc_core_ops *sc_ops;
55 	struct bnxt_ulp_device_params *dparms;
56 	struct bnxt_ulp_sc_info *ulp_sc_info;
57 	uint32_t stats_cache_tbl_sz;
58 	uint32_t dev_id;
59 	int rc;
60 
61 	if (!ctxt) {
62 		BNXT_DRV_DBG(DEBUG, "Invalid ULP CTXT\n");
63 		return -EINVAL;
64 	}
65 
66 	if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
67 		BNXT_DRV_DBG(DEBUG, "Failed to get device id\n");
68 		return -EINVAL;
69 	}
70 
71 	dparms = bnxt_ulp_device_params_get(dev_id);
72 	if (!dparms) {
73 		BNXT_DRV_DBG(DEBUG, "Failed to device parms\n");
74 		return -EINVAL;
75 	}
76 
77 	sc_ops = bnxt_ulp_sc_ops_get(ctxt);
78 	if (sc_ops == NULL) {
79 		BNXT_DRV_DBG(DEBUG, "Failed to get the counter ops\n");
80 		return -EINVAL;
81 	}
82 
83 	ulp_sc_info = rte_zmalloc("ulp_sc_info", sizeof(*ulp_sc_info), 0);
84 	if (!ulp_sc_info) {
85 		rc = -ENOMEM;
86 		goto error;
87 	}
88 
89 	ulp_sc_info->sc_ops = sc_ops;
90 	ulp_sc_info->flags = 0;
91 
92 	rc = pthread_mutex_init(&ulp_sc_info->sc_lock, NULL);
93 	if (rc) {
94 		BNXT_DRV_DBG(ERR, "Failed to initialize sc mutex\n");
95 		goto error;
96 	}
97 
98 	/* Add the SC info tbl to the ulp context. */
99 	bnxt_ulp_cntxt_ptr2_sc_info_set(ctxt, ulp_sc_info);
100 
101 	ulp_sc_info->num_counters = dparms->ext_flow_db_num_entries;
102 	if (!ulp_sc_info->num_counters) {
103 		/* No need for software counters, call fw directly */
104 		BNXT_DRV_DBG(DEBUG, "Sw flow counter support not enabled\n");
105 		return 0;
106 	}
107 
108 	/*
109 	 * Size is determined by the number of flows + 10% to cover IDs
110 	 * used for resources.
111 	 */
112 	stats_cache_tbl_sz = sizeof(struct ulp_sc_tfc_stats_cache_entry) *
113 		(ulp_sc_info->num_counters +
114 		 (ulp_sc_info->num_counters / 10));
115 
116 	ulp_sc_info->stats_cache_tbl = rte_zmalloc("ulp_stats_cache_tbl",
117 						   stats_cache_tbl_sz, 0);
118 	if (!ulp_sc_info->stats_cache_tbl) {
119 		rc = -ENOMEM;
120 		goto error;
121 	}
122 
123 	ulp_sc_info->read_data = rte_zmalloc("ulp_stats_cache_read_data",
124 					     ULP_SC_BATCH_SIZE * ULP_SC_PAGE_SIZE,
125 					     ULP_SC_PAGE_SIZE);
126 	if (!ulp_sc_info->read_data) {
127 		rte_free(ulp_sc_info->stats_cache_tbl);
128 		rc = -ENOMEM;
129 		goto error;
130 	}
131 
132 	rc = ulp_sc_mgr_thread_start(ctxt);
133 	if (rc)
134 		BNXT_DRV_DBG(DEBUG, "Stats counter thread start failed\n");
135 
136  error:
137 	return rc;
138 }
139 
140 /*
141  * Release all resources in the Flow Counter Manager for this ulp context
142  *
143  * ctxt [in] The ulp context for the Flow Counter manager
144  *
145  */
146 int32_t
147 ulp_sc_mgr_deinit(struct bnxt_ulp_context *ctxt)
148 {
149 	struct bnxt_ulp_sc_info *ulp_sc_info;
150 
151 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
152 
153 	if (!ulp_sc_info)
154 		return -EINVAL;
155 
156 	pthread_mutex_lock(&ulp_sc_info->sc_lock);
157 
158 	ulp_sc_mgr_thread_cancel(ctxt);
159 
160 	pthread_mutex_destroy(&ulp_sc_info->sc_lock);
161 
162 	if (ulp_sc_info->stats_cache_tbl)
163 		rte_free(ulp_sc_info->stats_cache_tbl);
164 
165 	if (ulp_sc_info->read_data)
166 		rte_free(ulp_sc_info->read_data);
167 
168 	rte_free(ulp_sc_info);
169 
170 	/* Safe to ignore on deinit */
171 	(void)bnxt_ulp_cntxt_ptr2_sc_info_set(ctxt, NULL);
172 
173 	return 0;
174 }
175 
176 #define ULP_SC_PERIOD_S 1
177 #define ULP_SC_PERIOD_MS (ULP_SC_PERIOD_S * 1000)
178 
179 static uint32_t ulp_stats_cache_main_loop(void *arg)
180 {
181 	struct ulp_sc_tfc_stats_cache_entry *count;
182 	const struct bnxt_ulp_sc_core_ops *sc_ops;
183 	struct ulp_sc_tfc_stats_cache_entry *sce;
184 	struct ulp_sc_tfc_stats_cache_entry *sce_end;
185 	struct tfc_mpc_batch_info_t batch_info;
186 	struct bnxt_ulp_sc_info *ulp_sc_info;
187 	struct bnxt_ulp_context *ctxt = NULL;
188 	uint16_t words = (ULP_TFC_CNTR_READ_BYTES + ULP_TFC_ACT_WORD_SZ - 1) / ULP_TFC_ACT_WORD_SZ;
189 	uint32_t batch_size;
190 	struct tfc *tfcp = NULL;
191 	uint32_t batch;
192 	uint32_t delay = ULP_SC_PERIOD_MS;
193 	uint64_t start;
194 	uint64_t stop;
195 	uint64_t hz;
196 	uint8_t *data;
197 	int rc;
198 	static uint32_t loop;
199 	uint64_t cycles = 0;
200 	uint64_t cpms = 0;
201 
202 	while (!ctxt) {
203 		ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
204 
205 		if (ctxt)
206 			break;
207 
208 		BNXT_DRV_DBG(INFO, "could not get the ulp context lock\n");
209 		rte_delay_us_block(1000);
210 	}
211 
212 
213 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
214 	if (!ulp_sc_info) {
215 		bnxt_ulp_cntxt_entry_release();
216 		goto terminate;
217 	}
218 
219 	sc_ops = ulp_sc_info->sc_ops;
220 
221 	hz = rte_get_timer_hz();
222 	cpms = hz / 1000;
223 
224 	while (true) {
225 		bnxt_ulp_cntxt_entry_release();
226 		ctxt = NULL;
227 		rte_delay_ms(delay);
228 
229 		while (!ctxt) {
230 			ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
231 
232 			if (ctxt)
233 				break;
234 
235 			BNXT_DRV_DBG(INFO, "could not get the ulp context lock\n");
236 			rte_delay_us_block(1);
237 		}
238 
239 		start = rte_get_timer_cycles();
240 		sce = ulp_sc_info->stats_cache_tbl;
241 		sce_end = sce + (ulp_sc_info->num_counters + (ulp_sc_info->num_counters / 10));
242 
243 		while (ulp_sc_info->num_entries && (sce < sce_end)) {
244 			data = ulp_sc_info->read_data;
245 
246 			rc = tfc_mpc_batch_start(&batch_info);
247 			if (rc) {
248 				PMD_DRV_LOG_LINE(ERR,
249 						 "MPC batch start failed rc:%d loop:%d",
250 						 rc, loop);
251 				break;
252 			}
253 
254 			if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
255 				break;
256 
257 			rc = pthread_mutex_lock(&ulp_sc_info->sc_lock);
258 			if (rc) {
259 				PMD_DRV_LOG_LINE(ERR,
260 						 "Failed to get SC lock, terminating main loop rc:%d loop:%d",
261 						 rc, loop);
262 				goto terminate;
263 			}
264 
265 			for (batch = 0; (batch < ULP_SC_BATCH_SIZE) && (sce < sce_end);) {
266 				if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID)) {
267 					sce++;
268 					continue;
269 				}
270 
271 				tfcp = bnxt_ulp_cntxt_tfcp_get(sce->ctxt);
272 				if (tfcp == NULL) {
273 					bnxt_ulp_cntxt_entry_release();
274 					goto terminate;
275 				}
276 
277 
278 				/* Store the entry pointer to use for counter update */
279 				batch_info.em_hdl[batch_info.count] = (uint64_t)sce;
280 
281 				rc = sc_ops->ulp_stats_cache_update(tfcp,
282 								    sce->dir,
283 								    data,
284 								    sce->handle,
285 								    &words,
286 								    &batch_info,
287 								    sce->reset);
288 				if (rc) {
289 					/* Abort this batch */
290 					PMD_DRV_LOG_LINE(ERR,
291 							 "loop:%d read_counter() failed:%d",
292 							 loop, rc);
293 					break;
294 				}
295 
296 				if (sce->reset)
297 					sce->reset = false;
298 
299 				/* Next */
300 				batch++;
301 				sce++;
302 				data += ULP_SC_PAGE_SIZE;
303 			}
304 
305 			batch_size = batch_info.count;
306 			rc = tfc_mpc_batch_end(tfcp, &batch_info);
307 
308 			pthread_mutex_unlock(&ulp_sc_info->sc_lock);
309 			bnxt_ulp_cntxt_release_fdb_lock(ctxt);
310 
311 			if (rc) {
312 				PMD_DRV_LOG_LINE(ERR,
313 						 "MPC batch end failed rc:%d loop:%d",
314 						 rc, loop);
315 				batch_info.enabled = false;
316 				break;
317 			}
318 
319 			/* Process counts */
320 			data = ulp_sc_info->read_data;
321 
322 			for (batch = 0; batch < batch_size; batch++) {
323 				/* Check for error in completion */
324 				if (batch_info.result[batch]) {
325 					PMD_DRV_LOG_LINE(ERR,
326 							 "batch:%d result:%d",
327 							 batch, batch_info.result[batch]);
328 				} else {
329 					count = (struct ulp_sc_tfc_stats_cache_entry *)
330 						((uintptr_t)batch_info.em_hdl[batch]);
331 					memcpy(&count->packet_count, data, ULP_TFC_ACT_WORD_SZ);
332 				}
333 
334 				data += ULP_SC_PAGE_SIZE;
335 			}
336 		}
337 
338 		loop++;
339 		stop = rte_get_timer_cycles();
340 		cycles = stop - start;
341 		if (cycles > (hz * ULP_SC_PERIOD_S)) {
342 			PMD_DRV_LOG_LINE(ERR,
343 					 "Stats collection time exceeded %dmS Cycles:%" PRIu64,
344 					 ULP_SC_PERIOD_MS, cycles);
345 			delay = ULP_SC_PERIOD_MS;
346 		} else {
347 			delay = ULP_SC_PERIOD_MS - (cycles / cpms);
348 
349 			if (delay > ULP_SC_PERIOD_MS) {
350 				PMD_DRV_LOG_LINE(ERR,
351 						 "Stats collection delay:%dmS exceedes %dmS",
352 						 delay, ULP_SC_PERIOD_MS);
353 				delay = ULP_SC_PERIOD_MS;
354 			}
355 		}
356 	}
357 
358  terminate:
359 	return 0;
360 }
361 
362 /*
363  * Check if the alarm thread that walks through the flows is started
364  *
365  * ctxt [in] The ulp context for the flow counter manager
366  *
367  */
368 bool ulp_sc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
369 {
370 	struct bnxt_ulp_sc_info *ulp_sc_info;
371 
372 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
373 
374 	if (ulp_sc_info)
375 		return !!(ulp_sc_info->flags & ULP_FLAG_SC_THREAD);
376 
377 	return false;
378 }
379 
380 /*
381  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
382  * data from the chip's internal flow counters
383  *
384  * ctxt [in] The ulp context for the flow counter manager
385  *
386  */
387 int32_t
388 ulp_sc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
389 {
390 	struct bnxt_ulp_sc_info *ulp_sc_info;
391 	int rc;
392 
393 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
394 
395 	if (ulp_sc_info && !(ulp_sc_info->flags & ULP_FLAG_SC_THREAD)) {
396 		rc = rte_thread_create(&ulp_sc_info->tid,
397 				       NULL,
398 				       &ulp_stats_cache_main_loop,
399 				       (void *)ctxt->cfg_data);
400 		if (rc)
401 			return rc;
402 
403 		ulp_sc_info->flags |= ULP_FLAG_SC_THREAD;
404 	}
405 
406 	return 0;
407 }
408 
409 /*
410  * Cancel the alarm handler
411  *
412  * ctxt [in] The ulp context for the flow counter manager
413  *
414  */
415 void ulp_sc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
416 {
417 	struct bnxt_ulp_sc_info *ulp_sc_info;
418 
419 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
420 	if (!ulp_sc_info)
421 		return;
422 
423 	ulp_sc_info->flags &= ~ULP_FLAG_SC_THREAD;
424 }
425 
426 /*
427  * Fill the rte_flow_query_count 'data' argument passed
428  * in the rte_flow_query() with the values obtained and
429  * accumulated locally.
430  *
431  * ctxt [in] The ulp context for the flow counter manager
432  *
433  * flow_id [in] The HW flow ID
434  *
435  * count [out] The rte_flow_query_count 'data' that is set
436  *
437  */
438 int ulp_sc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
439 			       uint32_t flow_id,
440 			       struct rte_flow_query_count *count)
441 {
442 	struct ulp_sc_tfc_stats_cache_entry *sce;
443 	struct bnxt_ulp_sc_info *ulp_sc_info;
444 	int rc = 0;
445 
446 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ctxt);
447 	if (!ulp_sc_info)
448 		return -ENODEV;
449 
450 	sce = ulp_sc_info->stats_cache_tbl;
451 	sce += flow_id;
452 
453 	/* If entry is not valid return an error */
454 	if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID))
455 		return -EBUSY;
456 
457 	count->hits = sce->packet_count;
458 	count->hits_set = 1;
459 	count->bytes = sce->byte_count;
460 	count->bytes_set = 1;
461 
462 	if (count->reset)
463 		sce->reset = true;
464 
465 	return rc;
466 }
467 
468 
469 int ulp_sc_mgr_entry_alloc(struct bnxt_ulp_mapper_parms *parms,
470 			   uint64_t counter_handle,
471 			   struct bnxt_ulp_mapper_tbl_info *tbl)
472 {
473 	struct ulp_sc_tfc_stats_cache_entry *sce;
474 	struct bnxt_ulp_sc_info *ulp_sc_info;
475 
476 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(parms->ulp_ctx);
477 	if (!ulp_sc_info)
478 		return -ENODEV;
479 
480 	pthread_mutex_lock(&ulp_sc_info->sc_lock);
481 
482 	sce = ulp_sc_info->stats_cache_tbl;
483 	sce += parms->flow_id;
484 
485 	/* If entry is not free return an error */
486 	if (sce->flags & ULP_SC_ENTRY_FLAG_VALID) {
487 		pthread_mutex_unlock(&ulp_sc_info->sc_lock);
488 		return -EBUSY;
489 	}
490 
491 	memset(sce, 0, sizeof(*sce));
492 	sce->ctxt = parms->ulp_ctx;
493 	sce->flags |= ULP_SC_ENTRY_FLAG_VALID;
494 	sce->handle = counter_handle;
495 	sce->dir = tbl->direction;
496 	ulp_sc_info->num_entries++;
497 	pthread_mutex_unlock(&ulp_sc_info->sc_lock);
498 
499 	return 0;
500 }
501 
502 void ulp_sc_mgr_entry_free(struct bnxt_ulp_context *ulp,
503 			   uint32_t fid)
504 {
505 	struct ulp_sc_tfc_stats_cache_entry *sce;
506 	struct bnxt_ulp_sc_info *ulp_sc_info;
507 
508 	ulp_sc_info = bnxt_ulp_cntxt_ptr2_sc_info_get(ulp);
509 	if (!ulp_sc_info)
510 		return;
511 
512 	pthread_mutex_lock(&ulp_sc_info->sc_lock);
513 
514 	sce = ulp_sc_info->stats_cache_tbl;
515 	sce += fid;
516 
517 	if (!(sce->flags & ULP_SC_ENTRY_FLAG_VALID)) {
518 		pthread_mutex_unlock(&ulp_sc_info->sc_lock);
519 		return;
520 	}
521 
522 	sce->flags = 0;
523 	ulp_sc_info->num_entries--;
524 
525 	pthread_mutex_unlock(&ulp_sc_info->sc_lock);
526 }
527