xref: /dpdk/drivers/net/sfc/sfc_sw_stats.c (revision d38febb08d57fec29fed27a2d12a507fc6fcdfa1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2021 Xilinx, Inc.
4  */
5 #include <rte_dev.h>
6 #include <rte_bitmap.h>
7 
8 #include "sfc.h"
9 #include "sfc_rx.h"
10 #include "sfc_tx.h"
11 #include "sfc_sw_stats.h"
12 
13 enum sfc_sw_stats_type {
14 	SFC_SW_STATS_RX,
15 	SFC_SW_STATS_TX,
16 };
17 
18 typedef uint64_t sfc_get_sw_xstat_val_t(struct sfc_adapter *sa, uint16_t qid);
19 
20 struct sfc_sw_xstat_descr {
21 	const char *name;
22 	enum sfc_sw_stats_type type;
23 	sfc_get_sw_xstat_val_t *get_val;
24 };
25 
26 static sfc_get_sw_xstat_val_t sfc_get_sw_xstat_val_rx_dbells;
27 static uint64_t
28 sfc_get_sw_xstat_val_rx_dbells(struct sfc_adapter *sa, uint16_t qid)
29 {
30 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
31 	struct sfc_rxq_info *rxq_info;
32 
33 	rxq_info = sfc_rxq_info_by_ethdev_qid(sas, qid);
34 	if (rxq_info->state & SFC_RXQ_INITIALIZED)
35 		return rxq_info->dp->dpq.rx_dbells;
36 	return 0;
37 }
38 
39 static sfc_get_sw_xstat_val_t sfc_get_sw_xstat_val_tx_dbells;
40 static uint64_t
41 sfc_get_sw_xstat_val_tx_dbells(struct sfc_adapter *sa, uint16_t qid)
42 {
43 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
44 	struct sfc_txq_info *txq_info;
45 
46 	txq_info = sfc_txq_info_by_ethdev_qid(sas, qid);
47 	if (txq_info->state & SFC_TXQ_INITIALIZED)
48 		return txq_info->dp->dpq.tx_dbells;
49 	return 0;
50 }
51 
52 struct sfc_sw_xstat_descr sfc_sw_xstats[] = {
53 	{
54 		.name = "dbells",
55 		.type = SFC_SW_STATS_RX,
56 		.get_val  = sfc_get_sw_xstat_val_rx_dbells,
57 	},
58 	{
59 		.name = "dbells",
60 		.type = SFC_SW_STATS_TX,
61 		.get_val  = sfc_get_sw_xstat_val_tx_dbells,
62 	}
63 };
64 
65 static int
66 sfc_sw_stat_get_name(struct sfc_adapter *sa,
67 		     const struct sfc_sw_xstat_descr *sw_xstat, char *name,
68 		     size_t name_size, unsigned int id_off)
69 {
70 	const char *prefix;
71 	int ret;
72 
73 	switch (sw_xstat->type) {
74 	case SFC_SW_STATS_RX:
75 		prefix = "rx";
76 		break;
77 	case SFC_SW_STATS_TX:
78 		prefix = "tx";
79 		break;
80 	default:
81 		sfc_err(sa, "%s: unknown software statistics type %d",
82 			__func__, sw_xstat->type);
83 		return -EINVAL;
84 	}
85 
86 	if (id_off == 0) {
87 		ret = snprintf(name, name_size, "%s_%s", prefix,
88 							 sw_xstat->name);
89 		if (ret < 0 || ret >= (int)name_size) {
90 			sfc_err(sa, "%s: failed to fill xstat name %s_%s, err %d",
91 				__func__, prefix, sw_xstat->name, ret);
92 			return ret > 0 ? -EINVAL : ret;
93 		}
94 	} else {
95 		uint16_t qid = id_off - 1;
96 		ret = snprintf(name, name_size, "%s_q%u_%s", prefix, qid,
97 							sw_xstat->name);
98 		if (ret < 0 || ret >= (int)name_size) {
99 			sfc_err(sa, "%s: failed to fill xstat name %s_q%u_%s, err %d",
100 				__func__, prefix, qid, sw_xstat->name, ret);
101 			return ret > 0 ? -EINVAL : ret;
102 		}
103 	}
104 
105 	return 0;
106 }
107 
108 static unsigned int
109 sfc_sw_stat_get_queue_count(struct sfc_adapter *sa,
110 			    const struct sfc_sw_xstat_descr *sw_xstat)
111 {
112 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
113 
114 	switch (sw_xstat->type) {
115 	case SFC_SW_STATS_RX:
116 		return sas->ethdev_rxq_count;
117 	case SFC_SW_STATS_TX:
118 		return sas->ethdev_txq_count;
119 	default:
120 		sfc_err(sa, "%s: unknown software statistics type %d",
121 			__func__, sw_xstat->type);
122 		return 0;
123 	}
124 }
125 
126 static unsigned int
127 sfc_sw_xstat_per_queue_get_count(unsigned int nb_queues)
128 {
129 	/* Take into account the accumulative xstat of all queues */
130 	return nb_queues > 0 ? 1 + nb_queues : 0;
131 }
132 
133 static unsigned int
134 sfc_sw_xstat_get_nb_supported(struct sfc_adapter *sa,
135 			      const struct sfc_sw_xstat_descr *sw_xstat)
136 {
137 	unsigned int nb_queues;
138 
139 	nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
140 	return sfc_sw_xstat_per_queue_get_count(nb_queues);
141 }
142 
143 static int
144 sfc_sw_stat_get_names(struct sfc_adapter *sa,
145 		      const struct sfc_sw_xstat_descr *sw_xstat,
146 		      struct rte_eth_xstat_name *xstats_names,
147 		      unsigned int xstats_names_sz,
148 		      unsigned int *nb_written,
149 		      unsigned int *nb_supported)
150 {
151 	const size_t name_size = sizeof(xstats_names[0].name);
152 	unsigned int id_base = *nb_supported;
153 	unsigned int nb_queues;
154 	unsigned int qid;
155 	int rc;
156 
157 	nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
158 	if (nb_queues == 0)
159 		return 0;
160 	*nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
161 
162 	/*
163 	 * The order of each software xstat type is the accumulative xstat
164 	 * followed by per-queue xstats.
165 	 */
166 	if (*nb_written < xstats_names_sz) {
167 		rc = sfc_sw_stat_get_name(sa, sw_xstat,
168 					  xstats_names[*nb_written].name,
169 					  name_size, *nb_written - id_base);
170 		if (rc != 0)
171 			return rc;
172 		(*nb_written)++;
173 	}
174 
175 	for (qid = 0; qid < nb_queues; ++qid) {
176 		if (*nb_written < xstats_names_sz) {
177 			rc = sfc_sw_stat_get_name(sa, sw_xstat,
178 					      xstats_names[*nb_written].name,
179 					      name_size, *nb_written - id_base);
180 			if (rc != 0)
181 				return rc;
182 			(*nb_written)++;
183 		}
184 	}
185 
186 	return 0;
187 }
188 
189 static int
190 sfc_sw_xstat_get_names_by_id(struct sfc_adapter *sa,
191 			     const struct sfc_sw_xstat_descr *sw_xstat,
192 			     const uint64_t *ids,
193 			     struct rte_eth_xstat_name *xstats_names,
194 			     unsigned int size,
195 			     unsigned int *nb_supported)
196 {
197 	const size_t name_size = sizeof(xstats_names[0].name);
198 	unsigned int id_base = *nb_supported;
199 	unsigned int nb_queues;
200 	unsigned int i;
201 	int rc;
202 
203 	nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
204 	if (nb_queues == 0)
205 		return 0;
206 	*nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
207 
208 	/*
209 	 * The order of each software xstat type is the accumulative xstat
210 	 * followed by per-queue xstats.
211 	 */
212 	for (i = 0; i < size; i++) {
213 		if (id_base <= ids[i] && ids[i] <= id_base + nb_queues) {
214 			rc = sfc_sw_stat_get_name(sa, sw_xstat,
215 						  xstats_names[i].name,
216 						  name_size, ids[i] - id_base);
217 			if (rc != 0)
218 				return rc;
219 		}
220 	}
221 
222 	return 0;
223 }
224 
225 static void
226 sfc_sw_xstat_get_values(struct sfc_adapter *sa,
227 			const struct sfc_sw_xstat_descr *sw_xstat,
228 			struct rte_eth_xstat *xstats,
229 			unsigned int xstats_size,
230 			unsigned int *nb_written,
231 			unsigned int *nb_supported)
232 {
233 	unsigned int qid;
234 	uint64_t value;
235 	struct rte_eth_xstat *accum_xstat;
236 	bool count_accum_value = false;
237 	unsigned int nb_queues;
238 
239 	nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
240 	if (nb_queues == 0)
241 		return;
242 	*nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
243 
244 	/*
245 	 * The order of each software xstat type is the accumulative xstat
246 	 * followed by per-queue xstats.
247 	 */
248 	if (*nb_written < xstats_size) {
249 		count_accum_value = true;
250 		accum_xstat = &xstats[*nb_written];
251 		xstats[*nb_written].id = *nb_written;
252 		xstats[*nb_written].value = 0;
253 		(*nb_written)++;
254 	}
255 
256 	for (qid = 0; qid < nb_queues; ++qid) {
257 		value = sw_xstat->get_val(sa, qid);
258 
259 		if (*nb_written < xstats_size) {
260 			xstats[*nb_written].id = *nb_written;
261 			xstats[*nb_written].value = value;
262 			(*nb_written)++;
263 		}
264 
265 		if (count_accum_value)
266 			accum_xstat->value += value;
267 	}
268 }
269 
270 static void
271 sfc_sw_xstat_get_values_by_id(struct sfc_adapter *sa,
272 			      const struct sfc_sw_xstat_descr *sw_xstat,
273 			      const uint64_t *ids,
274 			      uint64_t *values,
275 			      unsigned int ids_size,
276 			      unsigned int *nb_supported)
277 {
278 	rte_spinlock_t *bmp_lock = &sa->sw_xstats.queues_bitmap_lock;
279 	struct rte_bitmap *bmp = sa->sw_xstats.queues_bitmap;
280 	unsigned int id_base = *nb_supported;
281 	bool count_accum_value = false;
282 	unsigned int accum_value_idx;
283 	uint64_t accum_value = 0;
284 	unsigned int i, qid;
285 	unsigned int nb_queues;
286 
287 
288 	rte_spinlock_lock(bmp_lock);
289 	rte_bitmap_reset(bmp);
290 
291 	nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
292 	if (nb_queues == 0)
293 		goto unlock;
294 	*nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
295 
296 	/*
297 	 * The order of each software xstat type is the accumulative xstat
298 	 * followed by per-queue xstats.
299 	 */
300 	for (i = 0; i < ids_size; i++) {
301 		if (id_base <= ids[i] && ids[i] <= (id_base + nb_queues)) {
302 			if (ids[i] == id_base) { /* Accumulative value */
303 				count_accum_value = true;
304 				accum_value_idx = i;
305 				continue;
306 			}
307 			qid = ids[i] - id_base - 1;
308 			values[i] = sw_xstat->get_val(sa, qid);
309 			accum_value += values[i];
310 
311 			rte_bitmap_set(bmp, qid);
312 		}
313 	}
314 
315 	if (count_accum_value) {
316 		for (qid = 0; qid < nb_queues; ++qid) {
317 			if (rte_bitmap_get(bmp, qid) != 0)
318 				continue;
319 			values[accum_value_idx] += sw_xstat->get_val(sa, qid);
320 		}
321 		values[accum_value_idx] += accum_value;
322 	}
323 
324 unlock:
325 	rte_spinlock_unlock(bmp_lock);
326 }
327 
328 unsigned int
329 sfc_sw_xstats_get_nb_supported(struct sfc_adapter *sa)
330 {
331 	unsigned int nb_supported = 0;
332 	unsigned int i;
333 
334 	SFC_ASSERT(sfc_adapter_is_locked(sa));
335 
336 	for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
337 		nb_supported += sfc_sw_xstat_get_nb_supported(sa,
338 							     &sfc_sw_xstats[i]);
339 	}
340 
341 	return nb_supported;
342 }
343 
344 void
345 sfc_sw_xstats_get_vals(struct sfc_adapter *sa,
346 		       struct rte_eth_xstat *xstats,
347 		       unsigned int xstats_count,
348 		       unsigned int *nb_written,
349 		       unsigned int *nb_supported)
350 {
351 	uint64_t *reset_vals = sa->sw_xstats.reset_vals;
352 	unsigned int sw_xstats_offset;
353 	unsigned int i;
354 
355 	sfc_adapter_lock(sa);
356 
357 	sw_xstats_offset = *nb_supported;
358 
359 	for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
360 		sfc_sw_xstat_get_values(sa, &sfc_sw_xstats[i], xstats,
361 					xstats_count, nb_written, nb_supported);
362 	}
363 
364 	for (i = sw_xstats_offset; i < *nb_written; i++)
365 		xstats[i].value -= reset_vals[i - sw_xstats_offset];
366 
367 	sfc_adapter_unlock(sa);
368 }
369 
370 int
371 sfc_sw_xstats_get_names(struct sfc_adapter *sa,
372 			struct rte_eth_xstat_name *xstats_names,
373 			unsigned int xstats_count,
374 			unsigned int *nb_written,
375 			unsigned int *nb_supported)
376 {
377 	unsigned int i;
378 	int ret;
379 
380 	sfc_adapter_lock(sa);
381 
382 	for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
383 		ret = sfc_sw_stat_get_names(sa, &sfc_sw_xstats[i],
384 					    xstats_names, xstats_count,
385 					    nb_written, nb_supported);
386 		if (ret != 0) {
387 			sfc_adapter_unlock(sa);
388 			return ret;
389 		}
390 	}
391 
392 	sfc_adapter_unlock(sa);
393 
394 	return 0;
395 }
396 
397 void
398 sfc_sw_xstats_get_vals_by_id(struct sfc_adapter *sa,
399 			     const uint64_t *ids,
400 			     uint64_t *values,
401 			     unsigned int n,
402 			     unsigned int *nb_supported)
403 {
404 	uint64_t *reset_vals = sa->sw_xstats.reset_vals;
405 	unsigned int sw_xstats_offset;
406 	unsigned int i;
407 
408 	sfc_adapter_lock(sa);
409 
410 	sw_xstats_offset = *nb_supported;
411 
412 	for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
413 		sfc_sw_xstat_get_values_by_id(sa, &sfc_sw_xstats[i], ids,
414 					      values, n, nb_supported);
415 	}
416 
417 	for (i = 0; i < n; i++) {
418 		if (sw_xstats_offset <= ids[i] && ids[i] < *nb_supported)
419 			values[i] -= reset_vals[ids[i] - sw_xstats_offset];
420 	}
421 
422 	sfc_adapter_unlock(sa);
423 }
424 
425 int
426 sfc_sw_xstats_get_names_by_id(struct sfc_adapter *sa,
427 			      const uint64_t *ids,
428 			      struct rte_eth_xstat_name *xstats_names,
429 			      unsigned int size,
430 			      unsigned int *nb_supported)
431 {
432 	unsigned int i;
433 	int ret;
434 
435 	sfc_adapter_lock(sa);
436 
437 	for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
438 		ret = sfc_sw_xstat_get_names_by_id(sa, &sfc_sw_xstats[i], ids,
439 						   xstats_names, size,
440 						   nb_supported);
441 		if (ret != 0) {
442 			sfc_adapter_unlock(sa);
443 			SFC_ASSERT(ret < 0);
444 			return ret;
445 		}
446 	}
447 
448 	sfc_adapter_unlock(sa);
449 
450 	return 0;
451 }
452 
453 static void
454 sfc_sw_xstat_reset(struct sfc_adapter *sa, struct sfc_sw_xstat_descr *sw_xstat,
455 		   uint64_t *reset_vals)
456 {
457 	unsigned int nb_queues;
458 	unsigned int qid;
459 	uint64_t *accum_xstat_reset;
460 
461 	SFC_ASSERT(sfc_adapter_is_locked(sa));
462 
463 	nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
464 	if (nb_queues == 0)
465 		return;
466 
467 	/*
468 	 * The order of each software xstat type is the accumulative xstat
469 	 * followed by per-queue xstats.
470 	 */
471 	accum_xstat_reset = reset_vals;
472 	*accum_xstat_reset = 0;
473 	reset_vals++;
474 
475 	for (qid = 0; qid < nb_queues; ++qid) {
476 		reset_vals[qid] = sw_xstat->get_val(sa, qid);
477 		*accum_xstat_reset += reset_vals[qid];
478 	}
479 }
480 
481 void
482 sfc_sw_xstats_reset(struct sfc_adapter *sa)
483 {
484 	uint64_t *reset_vals = sa->sw_xstats.reset_vals;
485 	struct sfc_sw_xstat_descr *sw_xstat;
486 	unsigned int i;
487 
488 	SFC_ASSERT(sfc_adapter_is_locked(sa));
489 
490 	for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
491 		sw_xstat = &sfc_sw_xstats[i];
492 		sfc_sw_xstat_reset(sa, sw_xstat, reset_vals);
493 		reset_vals += sfc_sw_xstat_get_nb_supported(sa, sw_xstat);
494 	}
495 }
496 
497 int
498 sfc_sw_xstats_configure(struct sfc_adapter *sa)
499 {
500 	uint64_t **reset_vals = &sa->sw_xstats.reset_vals;
501 	size_t nb_supported = 0;
502 	unsigned int i;
503 
504 	for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++)
505 		nb_supported += sfc_sw_xstat_get_nb_supported(sa,
506 							&sfc_sw_xstats[i]);
507 
508 	*reset_vals = rte_realloc(*reset_vals,
509 				  nb_supported * sizeof(**reset_vals), 0);
510 	if (*reset_vals == NULL)
511 		return ENOMEM;
512 
513 	memset(*reset_vals, 0, nb_supported * sizeof(**reset_vals));
514 
515 	return 0;
516 }
517 
518 static void
519 sfc_sw_xstats_free_queues_bitmap(struct sfc_adapter *sa)
520 {
521 	rte_bitmap_free(sa->sw_xstats.queues_bitmap);
522 	rte_free(sa->sw_xstats.queues_bitmap_mem);
523 }
524 
525 static int
526 sfc_sw_xstats_alloc_queues_bitmap(struct sfc_adapter *sa)
527 {
528 	struct rte_bitmap **queues_bitmap = &sa->sw_xstats.queues_bitmap;
529 	void **queues_bitmap_mem = &sa->sw_xstats.queues_bitmap_mem;
530 	uint32_t bmp_size;
531 	int rc;
532 
533 	bmp_size = rte_bitmap_get_memory_footprint(RTE_MAX_QUEUES_PER_PORT);
534 	*queues_bitmap_mem = NULL;
535 	*queues_bitmap = NULL;
536 
537 	*queues_bitmap_mem = rte_calloc_socket("bitmap_mem", bmp_size, 1, 0,
538 					       sa->socket_id);
539 	if (*queues_bitmap_mem == NULL)
540 		return ENOMEM;
541 
542 	*queues_bitmap = rte_bitmap_init(RTE_MAX_QUEUES_PER_PORT,
543 					 *queues_bitmap_mem, bmp_size);
544 	if (*queues_bitmap == NULL) {
545 		rc = EINVAL;
546 		goto fail;
547 	}
548 
549 	rte_spinlock_init(&sa->sw_xstats.queues_bitmap_lock);
550 	return 0;
551 
552 fail:
553 	sfc_sw_xstats_free_queues_bitmap(sa);
554 	return rc;
555 }
556 
557 int
558 sfc_sw_xstats_init(struct sfc_adapter *sa)
559 {
560 	sa->sw_xstats.reset_vals = NULL;
561 
562 	return sfc_sw_xstats_alloc_queues_bitmap(sa);
563 }
564 
565 void
566 sfc_sw_xstats_close(struct sfc_adapter *sa)
567 {
568 	rte_free(sa->sw_xstats.reset_vals);
569 	sa->sw_xstats.reset_vals = NULL;
570 
571 	sfc_sw_xstats_free_queues_bitmap(sa);
572 }
573