xref: /dpdk/lib/dmadev/rte_dmadev.c (revision 515cd4a488b6a0c6e40d20e6b10d8e89657dc23f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  * Copyright(c) 2021 Intel Corporation
4  */
5 
6 #include <ctype.h>
7 #include <inttypes.h>
8 #include <stdlib.h>
9 
10 #include <rte_eal.h>
11 #include <rte_lcore.h>
12 #include <rte_log.h>
13 #include <rte_malloc.h>
14 #include <rte_memzone.h>
15 #include <rte_string_fns.h>
16 #include <rte_telemetry.h>
17 
18 #include "rte_dmadev.h"
19 #include "rte_dmadev_pmd.h"
20 
21 static int16_t dma_devices_max;
22 
23 struct rte_dma_fp_object *rte_dma_fp_objs;
24 static struct rte_dma_dev *rte_dma_devices;
25 static struct {
26 	/* Hold the dev_max information of the primary process. This field is
27 	 * set by the primary process and is read by the secondary process.
28 	 */
29 	int16_t dev_max;
30 	struct rte_dma_dev_data data[0];
31 } *dma_devices_shared_data;
32 
33 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
34 #define RTE_DMA_LOG(level, ...) \
35 	rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
36 		RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))
37 
38 int
39 rte_dma_dev_max(size_t dev_max)
40 {
41 	/* This function may be called before rte_eal_init(), so no rte library
42 	 * function can be called in this function.
43 	 */
44 	if (dev_max == 0 || dev_max > INT16_MAX)
45 		return -EINVAL;
46 
47 	if (dma_devices_max > 0)
48 		return -EINVAL;
49 
50 	dma_devices_max = dev_max;
51 
52 	return 0;
53 }
54 
55 int16_t
56 rte_dma_next_dev(int16_t start_dev_id)
57 {
58 	int16_t dev_id = start_dev_id;
59 	while (dev_id < dma_devices_max && rte_dma_devices[dev_id].state == RTE_DMA_DEV_UNUSED)
60 		dev_id++;
61 
62 	if (dev_id < dma_devices_max)
63 		return dev_id;
64 
65 	return -1;
66 }
67 
68 static int
69 dma_check_name(const char *name)
70 {
71 	size_t name_len;
72 
73 	if (name == NULL) {
74 		RTE_DMA_LOG(ERR, "Name can't be NULL");
75 		return -EINVAL;
76 	}
77 
78 	name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
79 	if (name_len == 0) {
80 		RTE_DMA_LOG(ERR, "Zero length DMA device name");
81 		return -EINVAL;
82 	}
83 	if (name_len >= RTE_DEV_NAME_MAX_LEN) {
84 		RTE_DMA_LOG(ERR, "DMA device name is too long");
85 		return -EINVAL;
86 	}
87 
88 	return 0;
89 }
90 
91 static int16_t
92 dma_find_free_id(void)
93 {
94 	int16_t i;
95 
96 	if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
97 		return -1;
98 
99 	for (i = 0; i < dma_devices_max; i++) {
100 		if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
101 			return i;
102 	}
103 
104 	return -1;
105 }
106 
107 static struct rte_dma_dev*
108 dma_find_by_name(const char *name)
109 {
110 	int16_t i;
111 
112 	if (rte_dma_devices == NULL)
113 		return NULL;
114 
115 	for (i = 0; i < dma_devices_max; i++) {
116 		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
117 		    (!strcmp(name, rte_dma_devices[i].data->dev_name)))
118 			return &rte_dma_devices[i];
119 	}
120 
121 	return NULL;
122 }
123 
124 static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
125 
126 static int
127 dma_fp_data_prepare(void)
128 {
129 	size_t size;
130 	void *ptr;
131 	int i;
132 
133 	if (rte_dma_fp_objs != NULL)
134 		return 0;
135 
136 	/* Fast-path object must align cacheline, but the return value of malloc
137 	 * may not be aligned to the cache line. Therefore, extra memory is
138 	 * applied for realignment.
139 	 * note: We do not call posix_memalign/aligned_alloc because it is
140 	 * version dependent on libc.
141 	 */
142 	size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
143 		RTE_CACHE_LINE_SIZE;
144 	ptr = malloc(size);
145 	if (ptr == NULL)
146 		return -ENOMEM;
147 	memset(ptr, 0, size);
148 
149 	rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
150 	for (i = 0; i < dma_devices_max; i++)
151 		dma_fp_object_dummy(&rte_dma_fp_objs[i]);
152 
153 	return 0;
154 }
155 
156 static int
157 dma_dev_data_prepare(void)
158 {
159 	size_t size;
160 
161 	if (rte_dma_devices != NULL)
162 		return 0;
163 
164 	size = dma_devices_max * sizeof(struct rte_dma_dev);
165 	rte_dma_devices = malloc(size);
166 	if (rte_dma_devices == NULL)
167 		return -ENOMEM;
168 	memset(rte_dma_devices, 0, size);
169 
170 	return 0;
171 }
172 
173 static int
174 dma_shared_data_prepare(void)
175 {
176 	const char *mz_name = "rte_dma_dev_data";
177 	const struct rte_memzone *mz;
178 	size_t size;
179 
180 	if (dma_devices_shared_data != NULL)
181 		return 0;
182 
183 	size = sizeof(*dma_devices_shared_data) +
184 		sizeof(struct rte_dma_dev_data) * dma_devices_max;
185 
186 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
187 		mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
188 	else
189 		mz = rte_memzone_lookup(mz_name);
190 	if (mz == NULL)
191 		return -ENOMEM;
192 
193 	dma_devices_shared_data = mz->addr;
194 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
195 		memset(dma_devices_shared_data, 0, size);
196 		dma_devices_shared_data->dev_max = dma_devices_max;
197 	} else {
198 		dma_devices_max = dma_devices_shared_data->dev_max;
199 	}
200 
201 	return 0;
202 }
203 
204 static int
205 dma_data_prepare(void)
206 {
207 	int ret;
208 
209 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
210 		if (dma_devices_max == 0)
211 			dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
212 		ret = dma_fp_data_prepare();
213 		if (ret)
214 			return ret;
215 		ret = dma_dev_data_prepare();
216 		if (ret)
217 			return ret;
218 		ret = dma_shared_data_prepare();
219 		if (ret)
220 			return ret;
221 	} else {
222 		ret = dma_shared_data_prepare();
223 		if (ret)
224 			return ret;
225 		ret = dma_fp_data_prepare();
226 		if (ret)
227 			return ret;
228 		ret = dma_dev_data_prepare();
229 		if (ret)
230 			return ret;
231 	}
232 
233 	return 0;
234 }
235 
236 static struct rte_dma_dev *
237 dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
238 {
239 	struct rte_dma_dev *dev;
240 	void *dev_private;
241 	int16_t dev_id;
242 	int ret;
243 
244 	ret = dma_data_prepare();
245 	if (ret < 0) {
246 		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
247 		return NULL;
248 	}
249 
250 	dev = dma_find_by_name(name);
251 	if (dev != NULL) {
252 		RTE_DMA_LOG(ERR, "DMA device already allocated");
253 		return NULL;
254 	}
255 
256 	dev_private = rte_zmalloc_socket(name, private_data_size,
257 					 RTE_CACHE_LINE_SIZE, numa_node);
258 	if (dev_private == NULL) {
259 		RTE_DMA_LOG(ERR, "Cannot allocate private data");
260 		return NULL;
261 	}
262 
263 	dev_id = dma_find_free_id();
264 	if (dev_id < 0) {
265 		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
266 		rte_free(dev_private);
267 		return NULL;
268 	}
269 
270 	dev = &rte_dma_devices[dev_id];
271 	dev->data = &dma_devices_shared_data->data[dev_id];
272 	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
273 	dev->data->dev_id = dev_id;
274 	dev->data->numa_node = numa_node;
275 	dev->data->dev_private = dev_private;
276 
277 	return dev;
278 }
279 
280 static struct rte_dma_dev *
281 dma_attach_secondary(const char *name)
282 {
283 	struct rte_dma_dev *dev;
284 	int16_t i;
285 	int ret;
286 
287 	ret = dma_data_prepare();
288 	if (ret < 0) {
289 		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
290 		return NULL;
291 	}
292 
293 	for (i = 0; i < dma_devices_max; i++) {
294 		if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
295 			break;
296 	}
297 	if (i == dma_devices_max) {
298 		RTE_DMA_LOG(ERR,
299 			"Device %s is not driven by the primary process",
300 			name);
301 		return NULL;
302 	}
303 
304 	dev = &rte_dma_devices[i];
305 	dev->data = &dma_devices_shared_data->data[i];
306 
307 	return dev;
308 }
309 
310 static struct rte_dma_dev *
311 dma_allocate(const char *name, int numa_node, size_t private_data_size)
312 {
313 	struct rte_dma_dev *dev;
314 
315 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
316 		dev = dma_allocate_primary(name, numa_node, private_data_size);
317 	else
318 		dev = dma_attach_secondary(name);
319 
320 	if (dev) {
321 		dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id];
322 		dma_fp_object_dummy(dev->fp_obj);
323 	}
324 
325 	return dev;
326 }
327 
328 static void
329 dma_release(struct rte_dma_dev *dev)
330 {
331 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
332 		rte_free(dev->data->dev_private);
333 		memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
334 	}
335 
336 	dma_fp_object_dummy(dev->fp_obj);
337 	memset(dev, 0, sizeof(struct rte_dma_dev));
338 }
339 
340 struct rte_dma_dev *
341 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
342 {
343 	struct rte_dma_dev *dev;
344 
345 	if (dma_check_name(name) != 0 || private_data_size == 0)
346 		return NULL;
347 
348 	dev = dma_allocate(name, numa_node, private_data_size);
349 	if (dev == NULL)
350 		return NULL;
351 
352 	dev->state = RTE_DMA_DEV_REGISTERED;
353 
354 	return dev;
355 }
356 
357 int
358 rte_dma_pmd_release(const char *name)
359 {
360 	struct rte_dma_dev *dev;
361 
362 	if (dma_check_name(name) != 0)
363 		return -EINVAL;
364 
365 	dev = dma_find_by_name(name);
366 	if (dev == NULL)
367 		return -EINVAL;
368 
369 	if (dev->state == RTE_DMA_DEV_READY)
370 		return rte_dma_close(dev->data->dev_id);
371 
372 	dma_release(dev);
373 	return 0;
374 }
375 
376 int
377 rte_dma_get_dev_id_by_name(const char *name)
378 {
379 	struct rte_dma_dev *dev;
380 
381 	if (dma_check_name(name) != 0)
382 		return -EINVAL;
383 
384 	dev = dma_find_by_name(name);
385 	if (dev == NULL)
386 		return -EINVAL;
387 
388 	return dev->data->dev_id;
389 }
390 
391 bool
392 rte_dma_is_valid(int16_t dev_id)
393 {
394 	return (dev_id >= 0) && (dev_id < dma_devices_max) &&
395 		rte_dma_devices != NULL &&
396 		rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
397 }
398 
399 uint16_t
400 rte_dma_count_avail(void)
401 {
402 	uint16_t count = 0;
403 	uint16_t i;
404 
405 	if (rte_dma_devices == NULL)
406 		return count;
407 
408 	for (i = 0; i < dma_devices_max; i++) {
409 		if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
410 			count++;
411 	}
412 
413 	return count;
414 }
415 
416 int
417 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
418 {
419 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
420 	int ret;
421 
422 	if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
423 		return -EINVAL;
424 
425 	if (*dev->dev_ops->dev_info_get == NULL)
426 		return -ENOTSUP;
427 	memset(dev_info, 0, sizeof(struct rte_dma_info));
428 	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
429 					    sizeof(struct rte_dma_info));
430 	if (ret != 0)
431 		return ret;
432 
433 	dev_info->dev_name = dev->data->dev_name;
434 	dev_info->numa_node = dev->device->numa_node;
435 	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
436 
437 	return 0;
438 }
439 
440 int
441 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
442 {
443 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
444 	struct rte_dma_info dev_info;
445 	int ret;
446 
447 	if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
448 		return -EINVAL;
449 
450 	if (dev->data->dev_started != 0) {
451 		RTE_DMA_LOG(ERR,
452 			"Device %d must be stopped to allow configuration",
453 			dev_id);
454 		return -EBUSY;
455 	}
456 
457 	ret = rte_dma_info_get(dev_id, &dev_info);
458 	if (ret != 0) {
459 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
460 		return -EINVAL;
461 	}
462 	if (dev_conf->nb_vchans == 0) {
463 		RTE_DMA_LOG(ERR,
464 			"Device %d configure zero vchans", dev_id);
465 		return -EINVAL;
466 	}
467 	if (dev_conf->nb_vchans > dev_info.max_vchans) {
468 		RTE_DMA_LOG(ERR,
469 			"Device %d configure too many vchans", dev_id);
470 		return -EINVAL;
471 	}
472 	if (dev_conf->enable_silent &&
473 	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
474 		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
475 		return -EINVAL;
476 	}
477 
478 	if (*dev->dev_ops->dev_configure == NULL)
479 		return -ENOTSUP;
480 	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
481 					     sizeof(struct rte_dma_conf));
482 	if (ret == 0)
483 		memcpy(&dev->data->dev_conf, dev_conf,
484 		       sizeof(struct rte_dma_conf));
485 
486 	return ret;
487 }
488 
489 int
490 rte_dma_start(int16_t dev_id)
491 {
492 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
493 	int ret;
494 
495 	if (!rte_dma_is_valid(dev_id))
496 		return -EINVAL;
497 
498 	if (dev->data->dev_conf.nb_vchans == 0) {
499 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
500 		return -EINVAL;
501 	}
502 
503 	if (dev->data->dev_started != 0) {
504 		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
505 		return 0;
506 	}
507 
508 	if (dev->dev_ops->dev_start == NULL)
509 		goto mark_started;
510 
511 	ret = (*dev->dev_ops->dev_start)(dev);
512 	if (ret != 0)
513 		return ret;
514 
515 mark_started:
516 	dev->data->dev_started = 1;
517 	return 0;
518 }
519 
520 int
521 rte_dma_stop(int16_t dev_id)
522 {
523 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
524 	int ret;
525 
526 	if (!rte_dma_is_valid(dev_id))
527 		return -EINVAL;
528 
529 	if (dev->data->dev_started == 0) {
530 		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
531 		return 0;
532 	}
533 
534 	if (dev->dev_ops->dev_stop == NULL)
535 		goto mark_stopped;
536 
537 	ret = (*dev->dev_ops->dev_stop)(dev);
538 	if (ret != 0)
539 		return ret;
540 
541 mark_stopped:
542 	dev->data->dev_started = 0;
543 	return 0;
544 }
545 
546 int
547 rte_dma_close(int16_t dev_id)
548 {
549 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
550 	int ret;
551 
552 	if (!rte_dma_is_valid(dev_id))
553 		return -EINVAL;
554 
555 	/* Device must be stopped before it can be closed */
556 	if (dev->data->dev_started == 1) {
557 		RTE_DMA_LOG(ERR,
558 			"Device %d must be stopped before closing", dev_id);
559 		return -EBUSY;
560 	}
561 
562 	if (*dev->dev_ops->dev_close == NULL)
563 		return -ENOTSUP;
564 	ret = (*dev->dev_ops->dev_close)(dev);
565 	if (ret == 0)
566 		dma_release(dev);
567 
568 	return ret;
569 }
570 
571 int
572 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
573 		    const struct rte_dma_vchan_conf *conf)
574 {
575 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
576 	struct rte_dma_info dev_info;
577 	bool src_is_dev, dst_is_dev;
578 	int ret;
579 
580 	if (!rte_dma_is_valid(dev_id) || conf == NULL)
581 		return -EINVAL;
582 
583 	if (dev->data->dev_started != 0) {
584 		RTE_DMA_LOG(ERR,
585 			"Device %d must be stopped to allow configuration",
586 			dev_id);
587 		return -EBUSY;
588 	}
589 
590 	ret = rte_dma_info_get(dev_id, &dev_info);
591 	if (ret != 0) {
592 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
593 		return -EINVAL;
594 	}
595 	if (dev->data->dev_conf.nb_vchans == 0) {
596 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
597 		return -EINVAL;
598 	}
599 	if (vchan >= dev_info.nb_vchans) {
600 		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
601 		return -EINVAL;
602 	}
603 	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
604 	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
605 	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
606 	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
607 		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
608 		return -EINVAL;
609 	}
610 	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
611 	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
612 		RTE_DMA_LOG(ERR,
613 			"Device %d don't support mem2mem transfer", dev_id);
614 		return -EINVAL;
615 	}
616 	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
617 	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
618 		RTE_DMA_LOG(ERR,
619 			"Device %d don't support mem2dev transfer", dev_id);
620 		return -EINVAL;
621 	}
622 	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
623 	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
624 		RTE_DMA_LOG(ERR,
625 			"Device %d don't support dev2mem transfer", dev_id);
626 		return -EINVAL;
627 	}
628 	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
629 	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
630 		RTE_DMA_LOG(ERR,
631 			"Device %d don't support dev2dev transfer", dev_id);
632 		return -EINVAL;
633 	}
634 	if (conf->nb_desc < dev_info.min_desc ||
635 	    conf->nb_desc > dev_info.max_desc) {
636 		RTE_DMA_LOG(ERR,
637 			"Device %d number of descriptors invalid", dev_id);
638 		return -EINVAL;
639 	}
640 	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
641 		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
642 	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
643 	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
644 		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
645 		return -EINVAL;
646 	}
647 	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
648 		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
649 	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
650 	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
651 		RTE_DMA_LOG(ERR,
652 			"Device %d destination port type invalid", dev_id);
653 		return -EINVAL;
654 	}
655 
656 	if (*dev->dev_ops->vchan_setup == NULL)
657 		return -ENOTSUP;
658 	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
659 					sizeof(struct rte_dma_vchan_conf));
660 }
661 
662 int
663 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
664 {
665 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
666 
667 	if (!rte_dma_is_valid(dev_id) || stats == NULL)
668 		return -EINVAL;
669 
670 	if (vchan >= dev->data->dev_conf.nb_vchans &&
671 	    vchan != RTE_DMA_ALL_VCHAN) {
672 		RTE_DMA_LOG(ERR,
673 			"Device %d vchan %u out of range", dev_id, vchan);
674 		return -EINVAL;
675 	}
676 
677 	if (*dev->dev_ops->stats_get == NULL)
678 		return -ENOTSUP;
679 	memset(stats, 0, sizeof(struct rte_dma_stats));
680 	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
681 					  sizeof(struct rte_dma_stats));
682 }
683 
684 int
685 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
686 {
687 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
688 
689 	if (!rte_dma_is_valid(dev_id))
690 		return -EINVAL;
691 
692 	if (vchan >= dev->data->dev_conf.nb_vchans &&
693 	    vchan != RTE_DMA_ALL_VCHAN) {
694 		RTE_DMA_LOG(ERR,
695 			"Device %d vchan %u out of range", dev_id, vchan);
696 		return -EINVAL;
697 	}
698 
699 	if (*dev->dev_ops->stats_reset == NULL)
700 		return -ENOTSUP;
701 	return (*dev->dev_ops->stats_reset)(dev, vchan);
702 }
703 
704 int
705 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status)
706 {
707 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
708 
709 	if (!rte_dma_is_valid(dev_id))
710 		return -EINVAL;
711 
712 	if (vchan >= dev->data->dev_conf.nb_vchans) {
713 		RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan);
714 		return -EINVAL;
715 	}
716 
717 	if (*dev->dev_ops->vchan_status == NULL)
718 		return -ENOTSUP;
719 	return (*dev->dev_ops->vchan_status)(dev, vchan, status);
720 }
721 
722 static const char *
723 dma_capability_name(uint64_t capability)
724 {
725 	static const struct {
726 		uint64_t capability;
727 		const char *name;
728 	} capa_names[] = {
729 		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
730 		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
731 		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
732 		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
733 		{ RTE_DMA_CAPA_SVA,         "sva"     },
734 		{ RTE_DMA_CAPA_SILENT,      "silent"  },
735 		{ RTE_DMA_CAPA_HANDLES_ERRORS, "handles_errors" },
736 		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
737 		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
738 		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
739 	};
740 
741 	const char *name = "unknown";
742 	uint32_t i;
743 
744 	for (i = 0; i < RTE_DIM(capa_names); i++) {
745 		if (capability == capa_names[i].capability) {
746 			name = capa_names[i].name;
747 			break;
748 		}
749 	}
750 
751 	return name;
752 }
753 
754 static void
755 dma_dump_capability(FILE *f, uint64_t dev_capa)
756 {
757 	uint64_t capa;
758 
759 	(void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
760 	while (dev_capa > 0) {
761 		capa = 1ull << __builtin_ctzll(dev_capa);
762 		(void)fprintf(f, " %s", dma_capability_name(capa));
763 		dev_capa &= ~capa;
764 	}
765 	(void)fprintf(f, "\n");
766 }
767 
768 int
769 rte_dma_dump(int16_t dev_id, FILE *f)
770 {
771 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
772 	struct rte_dma_info dev_info;
773 	int ret;
774 
775 	if (!rte_dma_is_valid(dev_id) || f == NULL)
776 		return -EINVAL;
777 
778 	ret = rte_dma_info_get(dev_id, &dev_info);
779 	if (ret != 0) {
780 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
781 		return -EINVAL;
782 	}
783 
784 	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
785 		dev->data->dev_id,
786 		dev->data->dev_name,
787 		dev->data->dev_started ? "started" : "stopped");
788 	dma_dump_capability(f, dev_info.dev_capa);
789 	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
790 	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
791 	(void)fprintf(f, "  silent_mode: %s\n",
792 		dev->data->dev_conf.enable_silent ? "on" : "off");
793 
794 	if (dev->dev_ops->dev_dump != NULL)
795 		return (*dev->dev_ops->dev_dump)(dev, f);
796 
797 	return 0;
798 }
799 
800 static int
801 dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
802 	   __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
803 	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
804 {
805 	RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
806 	return -EINVAL;
807 }
808 
809 static int
810 dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
811 	      __rte_unused const struct rte_dma_sge *src,
812 	      __rte_unused const struct rte_dma_sge *dst,
813 	      __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
814 	      __rte_unused uint64_t flags)
815 {
816 	RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
817 	return -EINVAL;
818 }
819 
820 static int
821 dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
822 	   __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
823 	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
824 {
825 	RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
826 	return -EINVAL;
827 }
828 
829 static int
830 dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
831 {
832 	RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
833 	return -EINVAL;
834 }
835 
836 static uint16_t
837 dummy_completed(__rte_unused void *dev_private,	__rte_unused uint16_t vchan,
838 		__rte_unused const uint16_t nb_cpls,
839 		__rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
840 {
841 	RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
842 	return 0;
843 }
844 
845 static uint16_t
846 dummy_completed_status(__rte_unused void *dev_private,
847 		       __rte_unused uint16_t vchan,
848 		       __rte_unused const uint16_t nb_cpls,
849 		       __rte_unused uint16_t *last_idx,
850 		       __rte_unused enum rte_dma_status_code *status)
851 {
852 	RTE_DMA_LOG(ERR,
853 		    "completed_status is not configured or not supported.");
854 	return 0;
855 }
856 
857 static uint16_t
858 dummy_burst_capacity(__rte_unused const void *dev_private,
859 		     __rte_unused uint16_t vchan)
860 {
861 	RTE_DMA_LOG(ERR, "burst_capacity is not configured or not supported.");
862 	return 0;
863 }
864 
865 static void
866 dma_fp_object_dummy(struct rte_dma_fp_object *obj)
867 {
868 	obj->dev_private      = NULL;
869 	obj->copy             = dummy_copy;
870 	obj->copy_sg          = dummy_copy_sg;
871 	obj->fill             = dummy_fill;
872 	obj->submit           = dummy_submit;
873 	obj->completed        = dummy_completed;
874 	obj->completed_status = dummy_completed_status;
875 	obj->burst_capacity   = dummy_burst_capacity;
876 }
877 
878 static int
879 dmadev_handle_dev_list(const char *cmd __rte_unused,
880 		const char *params __rte_unused,
881 		struct rte_tel_data *d)
882 {
883 	int dev_id;
884 
885 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
886 	for (dev_id = 0; dev_id < dma_devices_max; dev_id++)
887 		if (rte_dma_is_valid(dev_id))
888 			rte_tel_data_add_array_int(d, dev_id);
889 
890 	return 0;
891 }
892 
893 #define ADD_CAPA(td, dc, c) rte_tel_data_add_dict_int(td, dma_capability_name(c), !!(dc & c))
894 
895 static int
896 dmadev_handle_dev_info(const char *cmd __rte_unused,
897 		const char *params, struct rte_tel_data *d)
898 {
899 	struct rte_dma_info dma_info;
900 	struct rte_tel_data *dma_caps;
901 	int dev_id, ret;
902 	uint64_t dev_capa;
903 	char *end_param;
904 
905 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
906 		return -EINVAL;
907 
908 	dev_id = strtoul(params, &end_param, 0);
909 	if (*end_param != '\0')
910 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
911 
912 	/* Function info_get validates dev_id so we don't need to. */
913 	ret = rte_dma_info_get(dev_id, &dma_info);
914 	if (ret < 0)
915 		return -EINVAL;
916 	dev_capa = dma_info.dev_capa;
917 
918 	rte_tel_data_start_dict(d);
919 	rte_tel_data_add_dict_string(d, "name", dma_info.dev_name);
920 	rte_tel_data_add_dict_int(d, "nb_vchans", dma_info.nb_vchans);
921 	rte_tel_data_add_dict_int(d, "numa_node", dma_info.numa_node);
922 	rte_tel_data_add_dict_int(d, "max_vchans", dma_info.max_vchans);
923 	rte_tel_data_add_dict_int(d, "max_desc", dma_info.max_desc);
924 	rte_tel_data_add_dict_int(d, "min_desc", dma_info.min_desc);
925 	rte_tel_data_add_dict_int(d, "max_sges", dma_info.max_sges);
926 
927 	dma_caps = rte_tel_data_alloc();
928 	if (!dma_caps)
929 		return -ENOMEM;
930 
931 	rte_tel_data_start_dict(dma_caps);
932 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_MEM);
933 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_DEV);
934 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_MEM);
935 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_DEV);
936 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SVA);
937 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SILENT);
938 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_HANDLES_ERRORS);
939 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY);
940 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY_SG);
941 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_FILL);
942 	rte_tel_data_add_dict_container(d, "capabilities", dma_caps, 0);
943 
944 	return 0;
945 }
946 
947 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, dma_stats.s)
948 
949 static int
950 dmadev_handle_dev_stats(const char *cmd __rte_unused,
951 		const char *params,
952 		struct rte_tel_data *d)
953 {
954 	struct rte_dma_info dma_info;
955 	struct rte_dma_stats dma_stats;
956 	int dev_id, ret, vchan_id;
957 	char *end_param;
958 	const char *vchan_param;
959 
960 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
961 		return -EINVAL;
962 
963 	dev_id = strtoul(params, &end_param, 0);
964 
965 	/* Function info_get validates dev_id so we don't need to. */
966 	ret = rte_dma_info_get(dev_id, &dma_info);
967 	if (ret < 0)
968 		return -EINVAL;
969 
970 	/* If the device has one vchan the user does not need to supply the
971 	 * vchan id and only the device id is needed, no extra parameters.
972 	 */
973 	if (dma_info.nb_vchans == 1 && *end_param == '\0')
974 		vchan_id = 0;
975 	else {
976 		vchan_param = strtok(end_param, ",");
977 		if (!vchan_param || strlen(vchan_param) == 0 || !isdigit(*vchan_param))
978 			return -EINVAL;
979 
980 		vchan_id = strtoul(vchan_param, &end_param, 0);
981 	}
982 	if (*end_param != '\0')
983 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
984 
985 	ret = rte_dma_stats_get(dev_id, vchan_id, &dma_stats);
986 	if (ret < 0)
987 		return -EINVAL;
988 
989 	rte_tel_data_start_dict(d);
990 	ADD_DICT_STAT(submitted);
991 	ADD_DICT_STAT(completed);
992 	ADD_DICT_STAT(errors);
993 
994 	return 0;
995 }
996 
997 #ifndef RTE_EXEC_ENV_WINDOWS
998 static int
999 dmadev_handle_dev_dump(const char *cmd __rte_unused,
1000 		const char *params,
1001 		struct rte_tel_data *d)
1002 {
1003 	char *buf, *end_param;
1004 	int dev_id, ret;
1005 	FILE *f;
1006 
1007 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1008 		return -EINVAL;
1009 
1010 	dev_id = strtoul(params, &end_param, 0);
1011 	if (*end_param != '\0')
1012 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
1013 
1014 	buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN);
1015 	if (buf == NULL)
1016 		return -ENOMEM;
1017 
1018 	f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+");
1019 	if (f == NULL) {
1020 		free(buf);
1021 		return -EINVAL;
1022 	}
1023 
1024 	ret = rte_dma_dump(dev_id, f);
1025 	fclose(f);
1026 	if (ret == 0) {
1027 		rte_tel_data_start_dict(d);
1028 		rte_tel_data_string(d, buf);
1029 	}
1030 
1031 	free(buf);
1032 	return ret;
1033 }
1034 #endif /* !RTE_EXEC_ENV_WINDOWS */
1035 
1036 RTE_INIT(dmadev_init_telemetry)
1037 {
1038 	rte_telemetry_register_cmd("/dmadev/list", dmadev_handle_dev_list,
1039 			"Returns list of available dmadev devices by IDs. No parameters.");
1040 	rte_telemetry_register_cmd("/dmadev/info", dmadev_handle_dev_info,
1041 			"Returns information for a dmadev. Parameters: int dev_id");
1042 	rte_telemetry_register_cmd("/dmadev/stats", dmadev_handle_dev_stats,
1043 			"Returns the stats for a dmadev vchannel. Parameters: int dev_id, vchan_id (Optional if only one vchannel)");
1044 #ifndef RTE_EXEC_ENV_WINDOWS
1045 	rte_telemetry_register_cmd("/dmadev/dump", dmadev_handle_dev_dump,
1046 			"Returns dump information for a dmadev. Parameters: int dev_id");
1047 #endif
1048 }
1049