xref: /dpdk/lib/dmadev/rte_dmadev.c (revision da7e701151ea8b742d4c38ace3e4fefd1b4507fc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  * Copyright(c) 2021 Intel Corporation
4  */
5 
6 #include <ctype.h>
7 #include <inttypes.h>
8 #include <stdlib.h>
9 
10 #include <rte_eal.h>
11 #include <rte_lcore.h>
12 #include <rte_log.h>
13 #include <rte_malloc.h>
14 #include <rte_memzone.h>
15 #include <rte_string_fns.h>
16 #include <rte_telemetry.h>
17 
18 #include "rte_dmadev.h"
19 #include "rte_dmadev_pmd.h"
20 #include "rte_dmadev_trace.h"
21 
22 static int16_t dma_devices_max;
23 
24 struct rte_dma_fp_object *rte_dma_fp_objs;
25 static struct rte_dma_dev *rte_dma_devices;
26 static struct {
27 	/* Hold the dev_max information of the primary process. This field is
28 	 * set by the primary process and is read by the secondary process.
29 	 */
30 	int16_t dev_max;
31 	struct rte_dma_dev_data data[0];
32 } *dma_devices_shared_data;
33 
34 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
35 #define RTE_DMA_LOG(level, ...) \
36 	rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
37 		RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))
38 
39 int
40 rte_dma_dev_max(size_t dev_max)
41 {
42 	/* This function may be called before rte_eal_init(), so no rte library
43 	 * function can be called in this function.
44 	 */
45 	if (dev_max == 0 || dev_max > INT16_MAX)
46 		return -EINVAL;
47 
48 	if (dma_devices_max > 0)
49 		return -EINVAL;
50 
51 	dma_devices_max = dev_max;
52 
53 	return 0;
54 }
55 
56 int16_t
57 rte_dma_next_dev(int16_t start_dev_id)
58 {
59 	int16_t dev_id = start_dev_id;
60 	while (dev_id < dma_devices_max && rte_dma_devices[dev_id].state == RTE_DMA_DEV_UNUSED)
61 		dev_id++;
62 
63 	if (dev_id < dma_devices_max)
64 		return dev_id;
65 
66 	return -1;
67 }
68 
69 static int
70 dma_check_name(const char *name)
71 {
72 	size_t name_len;
73 
74 	if (name == NULL) {
75 		RTE_DMA_LOG(ERR, "Name can't be NULL");
76 		return -EINVAL;
77 	}
78 
79 	name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
80 	if (name_len == 0) {
81 		RTE_DMA_LOG(ERR, "Zero length DMA device name");
82 		return -EINVAL;
83 	}
84 	if (name_len >= RTE_DEV_NAME_MAX_LEN) {
85 		RTE_DMA_LOG(ERR, "DMA device name is too long");
86 		return -EINVAL;
87 	}
88 
89 	return 0;
90 }
91 
92 static int16_t
93 dma_find_free_id(void)
94 {
95 	int16_t i;
96 
97 	if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
98 		return -1;
99 
100 	for (i = 0; i < dma_devices_max; i++) {
101 		if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
102 			return i;
103 	}
104 
105 	return -1;
106 }
107 
108 static struct rte_dma_dev*
109 dma_find_by_name(const char *name)
110 {
111 	int16_t i;
112 
113 	if (rte_dma_devices == NULL)
114 		return NULL;
115 
116 	for (i = 0; i < dma_devices_max; i++) {
117 		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
118 		    (!strcmp(name, rte_dma_devices[i].data->dev_name)))
119 			return &rte_dma_devices[i];
120 	}
121 
122 	return NULL;
123 }
124 
125 static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
126 
127 static int
128 dma_fp_data_prepare(void)
129 {
130 	size_t size;
131 	void *ptr;
132 	int i;
133 
134 	if (rte_dma_fp_objs != NULL)
135 		return 0;
136 
137 	/* Fast-path object must align cacheline, but the return value of malloc
138 	 * may not be aligned to the cache line. Therefore, extra memory is
139 	 * applied for realignment.
140 	 * note: We do not call posix_memalign/aligned_alloc because it is
141 	 * version dependent on libc.
142 	 */
143 	size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
144 		RTE_CACHE_LINE_SIZE;
145 	ptr = malloc(size);
146 	if (ptr == NULL)
147 		return -ENOMEM;
148 	memset(ptr, 0, size);
149 
150 	rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
151 	for (i = 0; i < dma_devices_max; i++)
152 		dma_fp_object_dummy(&rte_dma_fp_objs[i]);
153 
154 	return 0;
155 }
156 
157 static int
158 dma_dev_data_prepare(void)
159 {
160 	size_t size;
161 
162 	if (rte_dma_devices != NULL)
163 		return 0;
164 
165 	size = dma_devices_max * sizeof(struct rte_dma_dev);
166 	rte_dma_devices = malloc(size);
167 	if (rte_dma_devices == NULL)
168 		return -ENOMEM;
169 	memset(rte_dma_devices, 0, size);
170 
171 	return 0;
172 }
173 
174 static int
175 dma_shared_data_prepare(void)
176 {
177 	const char *mz_name = "rte_dma_dev_data";
178 	const struct rte_memzone *mz;
179 	size_t size;
180 
181 	if (dma_devices_shared_data != NULL)
182 		return 0;
183 
184 	size = sizeof(*dma_devices_shared_data) +
185 		sizeof(struct rte_dma_dev_data) * dma_devices_max;
186 
187 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
188 		mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
189 	else
190 		mz = rte_memzone_lookup(mz_name);
191 	if (mz == NULL)
192 		return -ENOMEM;
193 
194 	dma_devices_shared_data = mz->addr;
195 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
196 		memset(dma_devices_shared_data, 0, size);
197 		dma_devices_shared_data->dev_max = dma_devices_max;
198 	} else {
199 		dma_devices_max = dma_devices_shared_data->dev_max;
200 	}
201 
202 	return 0;
203 }
204 
205 static int
206 dma_data_prepare(void)
207 {
208 	int ret;
209 
210 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
211 		if (dma_devices_max == 0)
212 			dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
213 		ret = dma_fp_data_prepare();
214 		if (ret)
215 			return ret;
216 		ret = dma_dev_data_prepare();
217 		if (ret)
218 			return ret;
219 		ret = dma_shared_data_prepare();
220 		if (ret)
221 			return ret;
222 	} else {
223 		ret = dma_shared_data_prepare();
224 		if (ret)
225 			return ret;
226 		ret = dma_fp_data_prepare();
227 		if (ret)
228 			return ret;
229 		ret = dma_dev_data_prepare();
230 		if (ret)
231 			return ret;
232 	}
233 
234 	return 0;
235 }
236 
237 static struct rte_dma_dev *
238 dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
239 {
240 	struct rte_dma_dev *dev;
241 	void *dev_private;
242 	int16_t dev_id;
243 	int ret;
244 
245 	ret = dma_data_prepare();
246 	if (ret < 0) {
247 		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
248 		return NULL;
249 	}
250 
251 	dev = dma_find_by_name(name);
252 	if (dev != NULL) {
253 		RTE_DMA_LOG(ERR, "DMA device already allocated");
254 		return NULL;
255 	}
256 
257 	dev_private = rte_zmalloc_socket(name, private_data_size,
258 					 RTE_CACHE_LINE_SIZE, numa_node);
259 	if (dev_private == NULL) {
260 		RTE_DMA_LOG(ERR, "Cannot allocate private data");
261 		return NULL;
262 	}
263 
264 	dev_id = dma_find_free_id();
265 	if (dev_id < 0) {
266 		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
267 		rte_free(dev_private);
268 		return NULL;
269 	}
270 
271 	dev = &rte_dma_devices[dev_id];
272 	dev->data = &dma_devices_shared_data->data[dev_id];
273 	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
274 	dev->data->dev_id = dev_id;
275 	dev->data->numa_node = numa_node;
276 	dev->data->dev_private = dev_private;
277 
278 	return dev;
279 }
280 
281 static struct rte_dma_dev *
282 dma_attach_secondary(const char *name)
283 {
284 	struct rte_dma_dev *dev;
285 	int16_t i;
286 	int ret;
287 
288 	ret = dma_data_prepare();
289 	if (ret < 0) {
290 		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
291 		return NULL;
292 	}
293 
294 	for (i = 0; i < dma_devices_max; i++) {
295 		if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
296 			break;
297 	}
298 	if (i == dma_devices_max) {
299 		RTE_DMA_LOG(ERR,
300 			"Device %s is not driven by the primary process",
301 			name);
302 		return NULL;
303 	}
304 
305 	dev = &rte_dma_devices[i];
306 	dev->data = &dma_devices_shared_data->data[i];
307 
308 	return dev;
309 }
310 
311 static struct rte_dma_dev *
312 dma_allocate(const char *name, int numa_node, size_t private_data_size)
313 {
314 	struct rte_dma_dev *dev;
315 
316 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
317 		dev = dma_allocate_primary(name, numa_node, private_data_size);
318 	else
319 		dev = dma_attach_secondary(name);
320 
321 	if (dev) {
322 		dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id];
323 		dma_fp_object_dummy(dev->fp_obj);
324 	}
325 
326 	return dev;
327 }
328 
329 static void
330 dma_release(struct rte_dma_dev *dev)
331 {
332 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
333 		rte_free(dev->data->dev_private);
334 		memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
335 	}
336 
337 	dma_fp_object_dummy(dev->fp_obj);
338 	memset(dev, 0, sizeof(struct rte_dma_dev));
339 }
340 
341 struct rte_dma_dev *
342 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
343 {
344 	struct rte_dma_dev *dev;
345 
346 	if (dma_check_name(name) != 0 || private_data_size == 0)
347 		return NULL;
348 
349 	dev = dma_allocate(name, numa_node, private_data_size);
350 	if (dev == NULL)
351 		return NULL;
352 
353 	dev->state = RTE_DMA_DEV_REGISTERED;
354 
355 	return dev;
356 }
357 
358 int
359 rte_dma_pmd_release(const char *name)
360 {
361 	struct rte_dma_dev *dev;
362 
363 	if (dma_check_name(name) != 0)
364 		return -EINVAL;
365 
366 	dev = dma_find_by_name(name);
367 	if (dev == NULL)
368 		return -EINVAL;
369 
370 	if (dev->state == RTE_DMA_DEV_READY)
371 		return rte_dma_close(dev->data->dev_id);
372 
373 	dma_release(dev);
374 	return 0;
375 }
376 
377 int
378 rte_dma_get_dev_id_by_name(const char *name)
379 {
380 	struct rte_dma_dev *dev;
381 
382 	if (dma_check_name(name) != 0)
383 		return -EINVAL;
384 
385 	dev = dma_find_by_name(name);
386 	if (dev == NULL)
387 		return -EINVAL;
388 
389 	return dev->data->dev_id;
390 }
391 
392 bool
393 rte_dma_is_valid(int16_t dev_id)
394 {
395 	return (dev_id >= 0) && (dev_id < dma_devices_max) &&
396 		rte_dma_devices != NULL &&
397 		rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
398 }
399 
400 uint16_t
401 rte_dma_count_avail(void)
402 {
403 	uint16_t count = 0;
404 	uint16_t i;
405 
406 	if (rte_dma_devices == NULL)
407 		return count;
408 
409 	for (i = 0; i < dma_devices_max; i++) {
410 		if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
411 			count++;
412 	}
413 
414 	return count;
415 }
416 
417 int
418 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
419 {
420 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
421 	int ret;
422 
423 	if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
424 		return -EINVAL;
425 
426 	if (*dev->dev_ops->dev_info_get == NULL)
427 		return -ENOTSUP;
428 	memset(dev_info, 0, sizeof(struct rte_dma_info));
429 	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
430 					    sizeof(struct rte_dma_info));
431 	if (ret != 0)
432 		return ret;
433 
434 	dev_info->dev_name = dev->data->dev_name;
435 	dev_info->numa_node = dev->device->numa_node;
436 	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
437 
438 	rte_dma_trace_info_get(dev_id, dev_info);
439 
440 	return 0;
441 }
442 
443 int
444 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
445 {
446 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
447 	struct rte_dma_info dev_info;
448 	int ret;
449 
450 	if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
451 		return -EINVAL;
452 
453 	if (dev->data->dev_started != 0) {
454 		RTE_DMA_LOG(ERR,
455 			"Device %d must be stopped to allow configuration",
456 			dev_id);
457 		return -EBUSY;
458 	}
459 
460 	ret = rte_dma_info_get(dev_id, &dev_info);
461 	if (ret != 0) {
462 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
463 		return -EINVAL;
464 	}
465 	if (dev_conf->nb_vchans == 0) {
466 		RTE_DMA_LOG(ERR,
467 			"Device %d configure zero vchans", dev_id);
468 		return -EINVAL;
469 	}
470 	if (dev_conf->nb_vchans > dev_info.max_vchans) {
471 		RTE_DMA_LOG(ERR,
472 			"Device %d configure too many vchans", dev_id);
473 		return -EINVAL;
474 	}
475 	if (dev_conf->enable_silent &&
476 	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
477 		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
478 		return -EINVAL;
479 	}
480 
481 	if (*dev->dev_ops->dev_configure == NULL)
482 		return -ENOTSUP;
483 	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
484 					     sizeof(struct rte_dma_conf));
485 	if (ret == 0)
486 		memcpy(&dev->data->dev_conf, dev_conf,
487 		       sizeof(struct rte_dma_conf));
488 
489 	rte_dma_trace_configure(dev_id, dev_conf, ret);
490 
491 	return ret;
492 }
493 
494 int
495 rte_dma_start(int16_t dev_id)
496 {
497 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
498 	int ret;
499 
500 	if (!rte_dma_is_valid(dev_id))
501 		return -EINVAL;
502 
503 	if (dev->data->dev_conf.nb_vchans == 0) {
504 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
505 		return -EINVAL;
506 	}
507 
508 	if (dev->data->dev_started != 0) {
509 		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
510 		return 0;
511 	}
512 
513 	if (dev->dev_ops->dev_start == NULL)
514 		goto mark_started;
515 
516 	ret = (*dev->dev_ops->dev_start)(dev);
517 	rte_dma_trace_start(dev_id, ret);
518 	if (ret != 0)
519 		return ret;
520 
521 mark_started:
522 	dev->data->dev_started = 1;
523 	return 0;
524 }
525 
526 int
527 rte_dma_stop(int16_t dev_id)
528 {
529 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
530 	int ret;
531 
532 	if (!rte_dma_is_valid(dev_id))
533 		return -EINVAL;
534 
535 	if (dev->data->dev_started == 0) {
536 		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
537 		return 0;
538 	}
539 
540 	if (dev->dev_ops->dev_stop == NULL)
541 		goto mark_stopped;
542 
543 	ret = (*dev->dev_ops->dev_stop)(dev);
544 	rte_dma_trace_stop(dev_id, ret);
545 	if (ret != 0)
546 		return ret;
547 
548 mark_stopped:
549 	dev->data->dev_started = 0;
550 	return 0;
551 }
552 
553 int
554 rte_dma_close(int16_t dev_id)
555 {
556 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
557 	int ret;
558 
559 	if (!rte_dma_is_valid(dev_id))
560 		return -EINVAL;
561 
562 	/* Device must be stopped before it can be closed */
563 	if (dev->data->dev_started == 1) {
564 		RTE_DMA_LOG(ERR,
565 			"Device %d must be stopped before closing", dev_id);
566 		return -EBUSY;
567 	}
568 
569 	if (*dev->dev_ops->dev_close == NULL)
570 		return -ENOTSUP;
571 	ret = (*dev->dev_ops->dev_close)(dev);
572 	if (ret == 0)
573 		dma_release(dev);
574 
575 	rte_dma_trace_close(dev_id, ret);
576 
577 	return ret;
578 }
579 
580 int
581 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
582 		    const struct rte_dma_vchan_conf *conf)
583 {
584 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
585 	struct rte_dma_info dev_info;
586 	bool src_is_dev, dst_is_dev;
587 	int ret;
588 
589 	if (!rte_dma_is_valid(dev_id) || conf == NULL)
590 		return -EINVAL;
591 
592 	if (dev->data->dev_started != 0) {
593 		RTE_DMA_LOG(ERR,
594 			"Device %d must be stopped to allow configuration",
595 			dev_id);
596 		return -EBUSY;
597 	}
598 
599 	ret = rte_dma_info_get(dev_id, &dev_info);
600 	if (ret != 0) {
601 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
602 		return -EINVAL;
603 	}
604 	if (dev->data->dev_conf.nb_vchans == 0) {
605 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
606 		return -EINVAL;
607 	}
608 	if (vchan >= dev_info.nb_vchans) {
609 		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
610 		return -EINVAL;
611 	}
612 	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
613 	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
614 	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
615 	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
616 		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
617 		return -EINVAL;
618 	}
619 	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
620 	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
621 		RTE_DMA_LOG(ERR,
622 			"Device %d don't support mem2mem transfer", dev_id);
623 		return -EINVAL;
624 	}
625 	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
626 	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
627 		RTE_DMA_LOG(ERR,
628 			"Device %d don't support mem2dev transfer", dev_id);
629 		return -EINVAL;
630 	}
631 	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
632 	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
633 		RTE_DMA_LOG(ERR,
634 			"Device %d don't support dev2mem transfer", dev_id);
635 		return -EINVAL;
636 	}
637 	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
638 	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
639 		RTE_DMA_LOG(ERR,
640 			"Device %d don't support dev2dev transfer", dev_id);
641 		return -EINVAL;
642 	}
643 	if (conf->nb_desc < dev_info.min_desc ||
644 	    conf->nb_desc > dev_info.max_desc) {
645 		RTE_DMA_LOG(ERR,
646 			"Device %d number of descriptors invalid", dev_id);
647 		return -EINVAL;
648 	}
649 	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
650 		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
651 	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
652 	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
653 		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
654 		return -EINVAL;
655 	}
656 	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
657 		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
658 	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
659 	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
660 		RTE_DMA_LOG(ERR,
661 			"Device %d destination port type invalid", dev_id);
662 		return -EINVAL;
663 	}
664 
665 	if (*dev->dev_ops->vchan_setup == NULL)
666 		return -ENOTSUP;
667 	ret = (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
668 					sizeof(struct rte_dma_vchan_conf));
669 	rte_dma_trace_vchan_setup(dev_id, vchan, conf, ret);
670 
671 	return ret;
672 }
673 
674 int
675 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
676 {
677 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
678 
679 	if (!rte_dma_is_valid(dev_id) || stats == NULL)
680 		return -EINVAL;
681 
682 	if (vchan >= dev->data->dev_conf.nb_vchans &&
683 	    vchan != RTE_DMA_ALL_VCHAN) {
684 		RTE_DMA_LOG(ERR,
685 			"Device %d vchan %u out of range", dev_id, vchan);
686 		return -EINVAL;
687 	}
688 
689 	if (*dev->dev_ops->stats_get == NULL)
690 		return -ENOTSUP;
691 	memset(stats, 0, sizeof(struct rte_dma_stats));
692 	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
693 					  sizeof(struct rte_dma_stats));
694 }
695 
696 int
697 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
698 {
699 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
700 	int ret;
701 
702 	if (!rte_dma_is_valid(dev_id))
703 		return -EINVAL;
704 
705 	if (vchan >= dev->data->dev_conf.nb_vchans &&
706 	    vchan != RTE_DMA_ALL_VCHAN) {
707 		RTE_DMA_LOG(ERR,
708 			"Device %d vchan %u out of range", dev_id, vchan);
709 		return -EINVAL;
710 	}
711 
712 	if (*dev->dev_ops->stats_reset == NULL)
713 		return -ENOTSUP;
714 	ret = (*dev->dev_ops->stats_reset)(dev, vchan);
715 	rte_dma_trace_stats_reset(dev_id, vchan, ret);
716 
717 	return ret;
718 }
719 
720 int
721 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status)
722 {
723 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
724 
725 	if (!rte_dma_is_valid(dev_id))
726 		return -EINVAL;
727 
728 	if (vchan >= dev->data->dev_conf.nb_vchans) {
729 		RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan);
730 		return -EINVAL;
731 	}
732 
733 	if (*dev->dev_ops->vchan_status == NULL)
734 		return -ENOTSUP;
735 	return (*dev->dev_ops->vchan_status)(dev, vchan, status);
736 }
737 
738 static const char *
739 dma_capability_name(uint64_t capability)
740 {
741 	static const struct {
742 		uint64_t capability;
743 		const char *name;
744 	} capa_names[] = {
745 		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
746 		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
747 		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
748 		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
749 		{ RTE_DMA_CAPA_SVA,         "sva"     },
750 		{ RTE_DMA_CAPA_SILENT,      "silent"  },
751 		{ RTE_DMA_CAPA_HANDLES_ERRORS, "handles_errors" },
752 		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
753 		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
754 		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
755 	};
756 
757 	const char *name = "unknown";
758 	uint32_t i;
759 
760 	for (i = 0; i < RTE_DIM(capa_names); i++) {
761 		if (capability == capa_names[i].capability) {
762 			name = capa_names[i].name;
763 			break;
764 		}
765 	}
766 
767 	return name;
768 }
769 
770 static void
771 dma_dump_capability(FILE *f, uint64_t dev_capa)
772 {
773 	uint64_t capa;
774 
775 	(void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
776 	while (dev_capa > 0) {
777 		capa = 1ull << rte_ctz64(dev_capa);
778 		(void)fprintf(f, " %s", dma_capability_name(capa));
779 		dev_capa &= ~capa;
780 	}
781 	(void)fprintf(f, "\n");
782 }
783 
784 int
785 rte_dma_dump(int16_t dev_id, FILE *f)
786 {
787 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
788 	struct rte_dma_info dev_info;
789 	int ret;
790 
791 	if (!rte_dma_is_valid(dev_id) || f == NULL)
792 		return -EINVAL;
793 
794 	ret = rte_dma_info_get(dev_id, &dev_info);
795 	if (ret != 0) {
796 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
797 		return -EINVAL;
798 	}
799 
800 	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
801 		dev->data->dev_id,
802 		dev->data->dev_name,
803 		dev->data->dev_started ? "started" : "stopped");
804 	dma_dump_capability(f, dev_info.dev_capa);
805 	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
806 	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
807 	(void)fprintf(f, "  silent_mode: %s\n",
808 		dev->data->dev_conf.enable_silent ? "on" : "off");
809 
810 	if (dev->dev_ops->dev_dump != NULL)
811 		ret = (*dev->dev_ops->dev_dump)(dev, f);
812 	rte_dma_trace_dump(dev_id, f, ret);
813 
814 	return ret;
815 }
816 
817 static int
818 dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
819 	   __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
820 	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
821 {
822 	RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
823 	return -EINVAL;
824 }
825 
826 static int
827 dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
828 	      __rte_unused const struct rte_dma_sge *src,
829 	      __rte_unused const struct rte_dma_sge *dst,
830 	      __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
831 	      __rte_unused uint64_t flags)
832 {
833 	RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
834 	return -EINVAL;
835 }
836 
837 static int
838 dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
839 	   __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
840 	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
841 {
842 	RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
843 	return -EINVAL;
844 }
845 
846 static int
847 dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
848 {
849 	RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
850 	return -EINVAL;
851 }
852 
853 static uint16_t
854 dummy_completed(__rte_unused void *dev_private,	__rte_unused uint16_t vchan,
855 		__rte_unused const uint16_t nb_cpls,
856 		__rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
857 {
858 	RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
859 	return 0;
860 }
861 
862 static uint16_t
863 dummy_completed_status(__rte_unused void *dev_private,
864 		       __rte_unused uint16_t vchan,
865 		       __rte_unused const uint16_t nb_cpls,
866 		       __rte_unused uint16_t *last_idx,
867 		       __rte_unused enum rte_dma_status_code *status)
868 {
869 	RTE_DMA_LOG(ERR,
870 		    "completed_status is not configured or not supported.");
871 	return 0;
872 }
873 
874 static uint16_t
875 dummy_burst_capacity(__rte_unused const void *dev_private,
876 		     __rte_unused uint16_t vchan)
877 {
878 	RTE_DMA_LOG(ERR, "burst_capacity is not configured or not supported.");
879 	return 0;
880 }
881 
882 static void
883 dma_fp_object_dummy(struct rte_dma_fp_object *obj)
884 {
885 	obj->dev_private      = NULL;
886 	obj->copy             = dummy_copy;
887 	obj->copy_sg          = dummy_copy_sg;
888 	obj->fill             = dummy_fill;
889 	obj->submit           = dummy_submit;
890 	obj->completed        = dummy_completed;
891 	obj->completed_status = dummy_completed_status;
892 	obj->burst_capacity   = dummy_burst_capacity;
893 }
894 
895 static int
896 dmadev_handle_dev_list(const char *cmd __rte_unused,
897 		const char *params __rte_unused,
898 		struct rte_tel_data *d)
899 {
900 	int dev_id;
901 
902 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
903 	for (dev_id = 0; dev_id < dma_devices_max; dev_id++)
904 		if (rte_dma_is_valid(dev_id))
905 			rte_tel_data_add_array_int(d, dev_id);
906 
907 	return 0;
908 }
909 
910 #define ADD_CAPA(td, dc, c) rte_tel_data_add_dict_int(td, dma_capability_name(c), !!(dc & c))
911 
912 static int
913 dmadev_handle_dev_info(const char *cmd __rte_unused,
914 		const char *params, struct rte_tel_data *d)
915 {
916 	struct rte_dma_info dma_info;
917 	struct rte_tel_data *dma_caps;
918 	int dev_id, ret;
919 	uint64_t dev_capa;
920 	char *end_param;
921 
922 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
923 		return -EINVAL;
924 
925 	dev_id = strtoul(params, &end_param, 0);
926 	if (*end_param != '\0')
927 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
928 
929 	/* Function info_get validates dev_id so we don't need to. */
930 	ret = rte_dma_info_get(dev_id, &dma_info);
931 	if (ret < 0)
932 		return -EINVAL;
933 	dev_capa = dma_info.dev_capa;
934 
935 	rte_tel_data_start_dict(d);
936 	rte_tel_data_add_dict_string(d, "name", dma_info.dev_name);
937 	rte_tel_data_add_dict_int(d, "nb_vchans", dma_info.nb_vchans);
938 	rte_tel_data_add_dict_int(d, "numa_node", dma_info.numa_node);
939 	rte_tel_data_add_dict_int(d, "max_vchans", dma_info.max_vchans);
940 	rte_tel_data_add_dict_int(d, "max_desc", dma_info.max_desc);
941 	rte_tel_data_add_dict_int(d, "min_desc", dma_info.min_desc);
942 	rte_tel_data_add_dict_int(d, "max_sges", dma_info.max_sges);
943 
944 	dma_caps = rte_tel_data_alloc();
945 	if (!dma_caps)
946 		return -ENOMEM;
947 
948 	rte_tel_data_start_dict(dma_caps);
949 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_MEM);
950 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_DEV);
951 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_MEM);
952 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_DEV);
953 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SVA);
954 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SILENT);
955 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_HANDLES_ERRORS);
956 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY);
957 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY_SG);
958 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_FILL);
959 	rte_tel_data_add_dict_container(d, "capabilities", dma_caps, 0);
960 
961 	return 0;
962 }
963 
964 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, dma_stats.s)
965 
966 static int
967 dmadev_handle_dev_stats(const char *cmd __rte_unused,
968 		const char *params,
969 		struct rte_tel_data *d)
970 {
971 	struct rte_dma_info dma_info;
972 	struct rte_dma_stats dma_stats;
973 	int dev_id, ret, vchan_id;
974 	char *end_param;
975 	const char *vchan_param;
976 
977 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
978 		return -EINVAL;
979 
980 	dev_id = strtoul(params, &end_param, 0);
981 
982 	/* Function info_get validates dev_id so we don't need to. */
983 	ret = rte_dma_info_get(dev_id, &dma_info);
984 	if (ret < 0)
985 		return -EINVAL;
986 
987 	/* If the device has one vchan the user does not need to supply the
988 	 * vchan id and only the device id is needed, no extra parameters.
989 	 */
990 	if (dma_info.nb_vchans == 1 && *end_param == '\0')
991 		vchan_id = 0;
992 	else {
993 		vchan_param = strtok(end_param, ",");
994 		if (!vchan_param || strlen(vchan_param) == 0 || !isdigit(*vchan_param))
995 			return -EINVAL;
996 
997 		vchan_id = strtoul(vchan_param, &end_param, 0);
998 	}
999 	if (*end_param != '\0')
1000 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
1001 
1002 	ret = rte_dma_stats_get(dev_id, vchan_id, &dma_stats);
1003 	if (ret < 0)
1004 		return -EINVAL;
1005 
1006 	rte_tel_data_start_dict(d);
1007 	ADD_DICT_STAT(submitted);
1008 	ADD_DICT_STAT(completed);
1009 	ADD_DICT_STAT(errors);
1010 
1011 	return 0;
1012 }
1013 
1014 #ifndef RTE_EXEC_ENV_WINDOWS
1015 static int
1016 dmadev_handle_dev_dump(const char *cmd __rte_unused,
1017 		const char *params,
1018 		struct rte_tel_data *d)
1019 {
1020 	char *buf, *end_param;
1021 	int dev_id, ret;
1022 	FILE *f;
1023 
1024 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1025 		return -EINVAL;
1026 
1027 	dev_id = strtoul(params, &end_param, 0);
1028 	if (*end_param != '\0')
1029 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
1030 
1031 	buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN);
1032 	if (buf == NULL)
1033 		return -ENOMEM;
1034 
1035 	f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+");
1036 	if (f == NULL) {
1037 		free(buf);
1038 		return -EINVAL;
1039 	}
1040 
1041 	ret = rte_dma_dump(dev_id, f);
1042 	fclose(f);
1043 	if (ret == 0) {
1044 		rte_tel_data_start_dict(d);
1045 		rte_tel_data_string(d, buf);
1046 	}
1047 
1048 	free(buf);
1049 	return ret;
1050 }
1051 #endif /* !RTE_EXEC_ENV_WINDOWS */
1052 
1053 RTE_INIT(dmadev_init_telemetry)
1054 {
1055 	rte_telemetry_register_cmd("/dmadev/list", dmadev_handle_dev_list,
1056 			"Returns list of available dmadev devices by IDs. No parameters.");
1057 	rte_telemetry_register_cmd("/dmadev/info", dmadev_handle_dev_info,
1058 			"Returns information for a dmadev. Parameters: int dev_id");
1059 	rte_telemetry_register_cmd("/dmadev/stats", dmadev_handle_dev_stats,
1060 			"Returns the stats for a dmadev vchannel. Parameters: int dev_id, vchan_id (Optional if only one vchannel)");
1061 #ifndef RTE_EXEC_ENV_WINDOWS
1062 	rte_telemetry_register_cmd("/dmadev/dump", dmadev_handle_dev_dump,
1063 			"Returns dump information for a dmadev. Parameters: int dev_id");
1064 #endif
1065 }
1066