xref: /dpdk/lib/dmadev/rte_dmadev.c (revision 2dff0bcd3b54bc3279de42123aa618620224ad44)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  * Copyright(c) 2021 Intel Corporation
4  */
5 
6 #include <ctype.h>
7 #include <inttypes.h>
8 #include <stdlib.h>
9 
10 #include <rte_eal.h>
11 #include <rte_lcore.h>
12 #include <rte_log.h>
13 #include <rte_malloc.h>
14 #include <rte_memzone.h>
15 #include <rte_string_fns.h>
16 #include <rte_telemetry.h>
17 
18 #include "rte_dmadev.h"
19 #include "rte_dmadev_pmd.h"
20 #include "rte_dmadev_trace.h"
21 
22 static int16_t dma_devices_max;
23 
24 struct rte_dma_fp_object *rte_dma_fp_objs;
25 static struct rte_dma_dev *rte_dma_devices;
26 static struct {
27 	/* Hold the dev_max information of the primary process. This field is
28 	 * set by the primary process and is read by the secondary process.
29 	 */
30 	int16_t dev_max;
31 	struct rte_dma_dev_data data[0];
32 } *dma_devices_shared_data;
33 
34 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
35 #define RTE_LOGTYPE_DMADEV rte_dma_logtype
36 
37 #define RTE_DMA_LOG(level, ...) \
38 	RTE_LOG_LINE(level, DMADEV, "" __VA_ARGS__)
39 
40 int
41 rte_dma_dev_max(size_t dev_max)
42 {
43 	/* This function may be called before rte_eal_init(), so no rte library
44 	 * function can be called in this function.
45 	 */
46 	if (dev_max == 0 || dev_max > INT16_MAX)
47 		return -EINVAL;
48 
49 	if (dma_devices_max > 0)
50 		return -EINVAL;
51 
52 	dma_devices_max = dev_max;
53 
54 	return 0;
55 }
56 
57 int16_t
58 rte_dma_next_dev(int16_t start_dev_id)
59 {
60 	int16_t dev_id = start_dev_id;
61 	while (dev_id < dma_devices_max && rte_dma_devices[dev_id].state == RTE_DMA_DEV_UNUSED)
62 		dev_id++;
63 
64 	if (dev_id < dma_devices_max)
65 		return dev_id;
66 
67 	return -1;
68 }
69 
70 static int
71 dma_check_name(const char *name)
72 {
73 	size_t name_len;
74 
75 	if (name == NULL) {
76 		RTE_DMA_LOG(ERR, "Name can't be NULL");
77 		return -EINVAL;
78 	}
79 
80 	name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
81 	if (name_len == 0) {
82 		RTE_DMA_LOG(ERR, "Zero length DMA device name");
83 		return -EINVAL;
84 	}
85 	if (name_len >= RTE_DEV_NAME_MAX_LEN) {
86 		RTE_DMA_LOG(ERR, "DMA device name is too long");
87 		return -EINVAL;
88 	}
89 
90 	return 0;
91 }
92 
93 static int16_t
94 dma_find_free_id(void)
95 {
96 	int16_t i;
97 
98 	if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
99 		return -1;
100 
101 	for (i = 0; i < dma_devices_max; i++) {
102 		if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
103 			return i;
104 	}
105 
106 	return -1;
107 }
108 
109 static struct rte_dma_dev*
110 dma_find_by_name(const char *name)
111 {
112 	int16_t i;
113 
114 	if (rte_dma_devices == NULL)
115 		return NULL;
116 
117 	for (i = 0; i < dma_devices_max; i++) {
118 		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
119 		    (!strcmp(name, rte_dma_devices[i].data->dev_name)))
120 			return &rte_dma_devices[i];
121 	}
122 
123 	return NULL;
124 }
125 
126 static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
127 
128 static int
129 dma_fp_data_prepare(void)
130 {
131 	size_t size;
132 	void *ptr;
133 	int i;
134 
135 	if (rte_dma_fp_objs != NULL)
136 		return 0;
137 
138 	/* Fast-path object must align cacheline, but the return value of malloc
139 	 * may not be aligned to the cache line. Therefore, extra memory is
140 	 * applied for realignment.
141 	 * note: We do not call posix_memalign/aligned_alloc because it is
142 	 * version dependent on libc.
143 	 */
144 	size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
145 		RTE_CACHE_LINE_SIZE;
146 	ptr = malloc(size);
147 	if (ptr == NULL)
148 		return -ENOMEM;
149 	memset(ptr, 0, size);
150 
151 	rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
152 	for (i = 0; i < dma_devices_max; i++)
153 		dma_fp_object_dummy(&rte_dma_fp_objs[i]);
154 
155 	return 0;
156 }
157 
158 static int
159 dma_dev_data_prepare(void)
160 {
161 	size_t size;
162 	void *ptr;
163 
164 	if (rte_dma_devices != NULL)
165 		return 0;
166 
167 	/* The DMA device object is expected to align cacheline,
168 	 * but the return value of malloc may not be aligned to the cache line.
169 	 * Therefore, extra memory is applied for realignment.
170 	 * Note: posix_memalign/aligned_alloc are not used
171 	 * because not always available, depending on libc.
172 	 */
173 	size = dma_devices_max * sizeof(struct rte_dma_dev) + RTE_CACHE_LINE_SIZE;
174 	ptr = malloc(size);
175 	if (ptr == NULL)
176 		return -ENOMEM;
177 	memset(ptr, 0, size);
178 
179 	rte_dma_devices = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
180 
181 	return 0;
182 }
183 
184 static int
185 dma_shared_data_prepare(void)
186 {
187 	const char *mz_name = "rte_dma_dev_data";
188 	const struct rte_memzone *mz;
189 	size_t size;
190 
191 	if (dma_devices_shared_data != NULL)
192 		return 0;
193 
194 	size = sizeof(*dma_devices_shared_data) +
195 		sizeof(struct rte_dma_dev_data) * dma_devices_max;
196 
197 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
198 		mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
199 	else
200 		mz = rte_memzone_lookup(mz_name);
201 	if (mz == NULL)
202 		return -ENOMEM;
203 
204 	dma_devices_shared_data = mz->addr;
205 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
206 		memset(dma_devices_shared_data, 0, size);
207 		dma_devices_shared_data->dev_max = dma_devices_max;
208 	} else {
209 		dma_devices_max = dma_devices_shared_data->dev_max;
210 	}
211 
212 	return 0;
213 }
214 
215 static int
216 dma_data_prepare(void)
217 {
218 	int ret;
219 
220 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
221 		if (dma_devices_max == 0)
222 			dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
223 		ret = dma_fp_data_prepare();
224 		if (ret)
225 			return ret;
226 		ret = dma_dev_data_prepare();
227 		if (ret)
228 			return ret;
229 		ret = dma_shared_data_prepare();
230 		if (ret)
231 			return ret;
232 	} else {
233 		ret = dma_shared_data_prepare();
234 		if (ret)
235 			return ret;
236 		ret = dma_fp_data_prepare();
237 		if (ret)
238 			return ret;
239 		ret = dma_dev_data_prepare();
240 		if (ret)
241 			return ret;
242 	}
243 
244 	return 0;
245 }
246 
247 static struct rte_dma_dev *
248 dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
249 {
250 	struct rte_dma_dev *dev;
251 	void *dev_private;
252 	int16_t dev_id;
253 	int ret;
254 
255 	ret = dma_data_prepare();
256 	if (ret < 0) {
257 		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
258 		return NULL;
259 	}
260 
261 	dev = dma_find_by_name(name);
262 	if (dev != NULL) {
263 		RTE_DMA_LOG(ERR, "DMA device already allocated");
264 		return NULL;
265 	}
266 
267 	dev_private = rte_zmalloc_socket(name, private_data_size,
268 					 RTE_CACHE_LINE_SIZE, numa_node);
269 	if (dev_private == NULL) {
270 		RTE_DMA_LOG(ERR, "Cannot allocate private data");
271 		return NULL;
272 	}
273 
274 	dev_id = dma_find_free_id();
275 	if (dev_id < 0) {
276 		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
277 		rte_free(dev_private);
278 		return NULL;
279 	}
280 
281 	dev = &rte_dma_devices[dev_id];
282 	dev->data = &dma_devices_shared_data->data[dev_id];
283 	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
284 	dev->data->dev_id = dev_id;
285 	dev->data->numa_node = numa_node;
286 	dev->data->dev_private = dev_private;
287 
288 	return dev;
289 }
290 
291 static struct rte_dma_dev *
292 dma_attach_secondary(const char *name)
293 {
294 	struct rte_dma_dev *dev;
295 	int16_t i;
296 	int ret;
297 
298 	ret = dma_data_prepare();
299 	if (ret < 0) {
300 		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
301 		return NULL;
302 	}
303 
304 	for (i = 0; i < dma_devices_max; i++) {
305 		if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
306 			break;
307 	}
308 	if (i == dma_devices_max) {
309 		RTE_DMA_LOG(ERR,
310 			"Device %s is not driven by the primary process",
311 			name);
312 		return NULL;
313 	}
314 
315 	dev = &rte_dma_devices[i];
316 	dev->data = &dma_devices_shared_data->data[i];
317 
318 	return dev;
319 }
320 
321 static struct rte_dma_dev *
322 dma_allocate(const char *name, int numa_node, size_t private_data_size)
323 {
324 	struct rte_dma_dev *dev;
325 
326 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
327 		dev = dma_allocate_primary(name, numa_node, private_data_size);
328 	else
329 		dev = dma_attach_secondary(name);
330 
331 	if (dev) {
332 		dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id];
333 		dma_fp_object_dummy(dev->fp_obj);
334 	}
335 
336 	return dev;
337 }
338 
339 static void
340 dma_release(struct rte_dma_dev *dev)
341 {
342 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
343 		rte_free(dev->data->dev_private);
344 		memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
345 	}
346 
347 	dma_fp_object_dummy(dev->fp_obj);
348 	memset(dev, 0, sizeof(struct rte_dma_dev));
349 }
350 
351 struct rte_dma_dev *
352 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
353 {
354 	struct rte_dma_dev *dev;
355 
356 	if (dma_check_name(name) != 0 || private_data_size == 0)
357 		return NULL;
358 
359 	dev = dma_allocate(name, numa_node, private_data_size);
360 	if (dev == NULL)
361 		return NULL;
362 
363 	dev->state = RTE_DMA_DEV_REGISTERED;
364 
365 	return dev;
366 }
367 
368 int
369 rte_dma_pmd_release(const char *name)
370 {
371 	struct rte_dma_dev *dev;
372 
373 	if (dma_check_name(name) != 0)
374 		return -EINVAL;
375 
376 	dev = dma_find_by_name(name);
377 	if (dev == NULL)
378 		return -EINVAL;
379 
380 	if (dev->state == RTE_DMA_DEV_READY)
381 		return rte_dma_close(dev->data->dev_id);
382 
383 	dma_release(dev);
384 	return 0;
385 }
386 
387 int
388 rte_dma_get_dev_id_by_name(const char *name)
389 {
390 	struct rte_dma_dev *dev;
391 
392 	if (dma_check_name(name) != 0)
393 		return -EINVAL;
394 
395 	dev = dma_find_by_name(name);
396 	if (dev == NULL)
397 		return -EINVAL;
398 
399 	return dev->data->dev_id;
400 }
401 
402 bool
403 rte_dma_is_valid(int16_t dev_id)
404 {
405 	return (dev_id >= 0) && (dev_id < dma_devices_max) &&
406 		rte_dma_devices != NULL &&
407 		rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
408 }
409 
410 struct rte_dma_dev *
411 rte_dma_pmd_get_dev_by_id(int16_t dev_id)
412 {
413 	if (!rte_dma_is_valid(dev_id))
414 		return NULL;
415 
416 	return &rte_dma_devices[dev_id];
417 }
418 
419 uint16_t
420 rte_dma_count_avail(void)
421 {
422 	uint16_t count = 0;
423 	uint16_t i;
424 
425 	if (rte_dma_devices == NULL)
426 		return count;
427 
428 	for (i = 0; i < dma_devices_max; i++) {
429 		if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
430 			count++;
431 	}
432 
433 	return count;
434 }
435 
436 int
437 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
438 {
439 	const struct rte_dma_dev *dev;
440 	int ret;
441 
442 	if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
443 		return -EINVAL;
444 	dev = &rte_dma_devices[dev_id];
445 
446 	if (*dev->dev_ops->dev_info_get == NULL)
447 		return -ENOTSUP;
448 	memset(dev_info, 0, sizeof(struct rte_dma_info));
449 	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
450 					    sizeof(struct rte_dma_info));
451 	if (ret != 0)
452 		return ret;
453 
454 	if ((dev_info->dev_capa & RTE_DMA_CAPA_PRI_POLICY_SP) && (dev_info->nb_priorities <= 1)) {
455 		RTE_DMA_LOG(ERR, "Num of priorities must be > 1 for Device %d", dev_id);
456 		return -EINVAL;
457 	}
458 
459 	dev_info->dev_name = dev->data->dev_name;
460 	dev_info->numa_node = dev->device->numa_node;
461 	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
462 
463 	rte_dma_trace_info_get(dev_id, dev_info);
464 
465 	return 0;
466 }
467 
468 int
469 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
470 {
471 	struct rte_dma_info dev_info;
472 	struct rte_dma_dev *dev;
473 	int ret;
474 
475 	if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
476 		return -EINVAL;
477 	dev = &rte_dma_devices[dev_id];
478 
479 	if (dev->data->dev_started != 0) {
480 		RTE_DMA_LOG(ERR,
481 			"Device %d must be stopped to allow configuration",
482 			dev_id);
483 		return -EBUSY;
484 	}
485 
486 	ret = rte_dma_info_get(dev_id, &dev_info);
487 	if (ret != 0) {
488 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
489 		return -EINVAL;
490 	}
491 	if (dev_conf->nb_vchans == 0) {
492 		RTE_DMA_LOG(ERR,
493 			"Device %d configure zero vchans", dev_id);
494 		return -EINVAL;
495 	}
496 	if (dev_conf->nb_vchans > dev_info.max_vchans) {
497 		RTE_DMA_LOG(ERR,
498 			"Device %d configure too many vchans", dev_id);
499 		return -EINVAL;
500 	}
501 	if (dev_conf->enable_silent &&
502 	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
503 		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
504 		return -EINVAL;
505 	}
506 
507 	if ((dev_info.dev_capa & RTE_DMA_CAPA_PRI_POLICY_SP) &&
508 	    (dev_conf->priority >= dev_info.nb_priorities)) {
509 		RTE_DMA_LOG(ERR, "Device %d configure invalid priority", dev_id);
510 		return -EINVAL;
511 	}
512 
513 	if (*dev->dev_ops->dev_configure == NULL)
514 		return -ENOTSUP;
515 	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
516 					     sizeof(struct rte_dma_conf));
517 	if (ret == 0)
518 		memcpy(&dev->data->dev_conf, dev_conf,
519 		       sizeof(struct rte_dma_conf));
520 
521 	rte_dma_trace_configure(dev_id, dev_conf, ret);
522 
523 	return ret;
524 }
525 
526 int
527 rte_dma_start(int16_t dev_id)
528 {
529 	struct rte_dma_dev *dev;
530 	int ret;
531 
532 	if (!rte_dma_is_valid(dev_id))
533 		return -EINVAL;
534 	dev = &rte_dma_devices[dev_id];
535 
536 	if (dev->data->dev_conf.nb_vchans == 0) {
537 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
538 		return -EINVAL;
539 	}
540 
541 	if (dev->data->dev_started != 0) {
542 		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
543 		return 0;
544 	}
545 
546 	if (dev->dev_ops->dev_start == NULL)
547 		goto mark_started;
548 
549 	ret = (*dev->dev_ops->dev_start)(dev);
550 	rte_dma_trace_start(dev_id, ret);
551 	if (ret != 0)
552 		return ret;
553 
554 mark_started:
555 	dev->data->dev_started = 1;
556 	return 0;
557 }
558 
559 int
560 rte_dma_stop(int16_t dev_id)
561 {
562 	struct rte_dma_dev *dev;
563 	int ret;
564 
565 	if (!rte_dma_is_valid(dev_id))
566 		return -EINVAL;
567 	dev = &rte_dma_devices[dev_id];
568 
569 	if (dev->data->dev_started == 0) {
570 		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
571 		return 0;
572 	}
573 
574 	if (dev->dev_ops->dev_stop == NULL)
575 		goto mark_stopped;
576 
577 	ret = (*dev->dev_ops->dev_stop)(dev);
578 	rte_dma_trace_stop(dev_id, ret);
579 	if (ret != 0)
580 		return ret;
581 
582 mark_stopped:
583 	dev->data->dev_started = 0;
584 	return 0;
585 }
586 
587 int
588 rte_dma_close(int16_t dev_id)
589 {
590 	struct rte_dma_dev *dev;
591 	int ret;
592 
593 	if (!rte_dma_is_valid(dev_id))
594 		return -EINVAL;
595 	dev = &rte_dma_devices[dev_id];
596 
597 	/* Device must be stopped before it can be closed */
598 	if (dev->data->dev_started == 1) {
599 		RTE_DMA_LOG(ERR,
600 			"Device %d must be stopped before closing", dev_id);
601 		return -EBUSY;
602 	}
603 
604 	if (*dev->dev_ops->dev_close == NULL)
605 		return -ENOTSUP;
606 	ret = (*dev->dev_ops->dev_close)(dev);
607 	if (ret == 0)
608 		dma_release(dev);
609 
610 	rte_dma_trace_close(dev_id, ret);
611 
612 	return ret;
613 }
614 
615 int
616 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
617 		    const struct rte_dma_vchan_conf *conf)
618 {
619 	struct rte_dma_info dev_info;
620 	bool src_is_dev, dst_is_dev;
621 	struct rte_dma_dev *dev;
622 	int ret;
623 
624 	if (!rte_dma_is_valid(dev_id) || conf == NULL)
625 		return -EINVAL;
626 	dev = &rte_dma_devices[dev_id];
627 
628 	if (dev->data->dev_started != 0) {
629 		RTE_DMA_LOG(ERR,
630 			"Device %d must be stopped to allow configuration",
631 			dev_id);
632 		return -EBUSY;
633 	}
634 
635 	ret = rte_dma_info_get(dev_id, &dev_info);
636 	if (ret != 0) {
637 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
638 		return -EINVAL;
639 	}
640 	if (dev->data->dev_conf.nb_vchans == 0) {
641 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
642 		return -EINVAL;
643 	}
644 	if (vchan >= dev_info.nb_vchans) {
645 		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
646 		return -EINVAL;
647 	}
648 	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
649 	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
650 	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
651 	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
652 		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
653 		return -EINVAL;
654 	}
655 	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
656 	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
657 		RTE_DMA_LOG(ERR,
658 			"Device %d don't support mem2mem transfer", dev_id);
659 		return -EINVAL;
660 	}
661 	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
662 	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
663 		RTE_DMA_LOG(ERR,
664 			"Device %d don't support mem2dev transfer", dev_id);
665 		return -EINVAL;
666 	}
667 	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
668 	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
669 		RTE_DMA_LOG(ERR,
670 			"Device %d don't support dev2mem transfer", dev_id);
671 		return -EINVAL;
672 	}
673 	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
674 	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
675 		RTE_DMA_LOG(ERR,
676 			"Device %d don't support dev2dev transfer", dev_id);
677 		return -EINVAL;
678 	}
679 	if (conf->nb_desc < dev_info.min_desc ||
680 	    conf->nb_desc > dev_info.max_desc) {
681 		RTE_DMA_LOG(ERR,
682 			"Device %d number of descriptors invalid", dev_id);
683 		return -EINVAL;
684 	}
685 	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
686 		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
687 	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
688 	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
689 		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
690 		return -EINVAL;
691 	}
692 	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
693 		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
694 	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
695 	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
696 		RTE_DMA_LOG(ERR,
697 			"Device %d destination port type invalid", dev_id);
698 		return -EINVAL;
699 	}
700 
701 	if (*dev->dev_ops->vchan_setup == NULL)
702 		return -ENOTSUP;
703 	ret = (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
704 					sizeof(struct rte_dma_vchan_conf));
705 	rte_dma_trace_vchan_setup(dev_id, vchan, conf, ret);
706 
707 	return ret;
708 }
709 
710 int
711 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
712 {
713 	const struct rte_dma_dev *dev;
714 
715 	if (!rte_dma_is_valid(dev_id) || stats == NULL)
716 		return -EINVAL;
717 	dev = &rte_dma_devices[dev_id];
718 
719 	if (vchan >= dev->data->dev_conf.nb_vchans &&
720 	    vchan != RTE_DMA_ALL_VCHAN) {
721 		RTE_DMA_LOG(ERR,
722 			"Device %d vchan %u out of range", dev_id, vchan);
723 		return -EINVAL;
724 	}
725 
726 	if (*dev->dev_ops->stats_get == NULL)
727 		return -ENOTSUP;
728 	memset(stats, 0, sizeof(struct rte_dma_stats));
729 	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
730 					  sizeof(struct rte_dma_stats));
731 }
732 
733 int
734 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
735 {
736 	struct rte_dma_dev *dev;
737 	int ret;
738 
739 	if (!rte_dma_is_valid(dev_id))
740 		return -EINVAL;
741 	dev = &rte_dma_devices[dev_id];
742 
743 	if (vchan >= dev->data->dev_conf.nb_vchans &&
744 	    vchan != RTE_DMA_ALL_VCHAN) {
745 		RTE_DMA_LOG(ERR,
746 			"Device %d vchan %u out of range", dev_id, vchan);
747 		return -EINVAL;
748 	}
749 
750 	if (*dev->dev_ops->stats_reset == NULL)
751 		return -ENOTSUP;
752 	ret = (*dev->dev_ops->stats_reset)(dev, vchan);
753 	rte_dma_trace_stats_reset(dev_id, vchan, ret);
754 
755 	return ret;
756 }
757 
758 int
759 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status)
760 {
761 	struct rte_dma_dev *dev;
762 
763 	if (!rte_dma_is_valid(dev_id) || status == NULL)
764 		return -EINVAL;
765 	dev = &rte_dma_devices[dev_id];
766 
767 	if (vchan >= dev->data->dev_conf.nb_vchans) {
768 		RTE_DMA_LOG(ERR, "Device %u vchan %u out of range", dev_id, vchan);
769 		return -EINVAL;
770 	}
771 
772 	if (*dev->dev_ops->vchan_status == NULL)
773 		return -ENOTSUP;
774 	return (*dev->dev_ops->vchan_status)(dev, vchan, status);
775 }
776 
777 static const char *
778 dma_capability_name(uint64_t capability)
779 {
780 	static const struct {
781 		uint64_t capability;
782 		const char *name;
783 	} capa_names[] = {
784 		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
785 		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
786 		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
787 		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
788 		{ RTE_DMA_CAPA_SVA,         "sva"     },
789 		{ RTE_DMA_CAPA_SILENT,      "silent"  },
790 		{ RTE_DMA_CAPA_HANDLES_ERRORS, "handles_errors" },
791 		{ RTE_DMA_CAPA_M2D_AUTO_FREE,  "m2d_auto_free"  },
792 		{ RTE_DMA_CAPA_PRI_POLICY_SP,  "pri_policy_sp" },
793 		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
794 		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
795 		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
796 	};
797 
798 	const char *name = "unknown";
799 	uint32_t i;
800 
801 	for (i = 0; i < RTE_DIM(capa_names); i++) {
802 		if (capability == capa_names[i].capability) {
803 			name = capa_names[i].name;
804 			break;
805 		}
806 	}
807 
808 	return name;
809 }
810 
811 static void
812 dma_dump_capability(FILE *f, uint64_t dev_capa)
813 {
814 	uint64_t capa;
815 
816 	(void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
817 	while (dev_capa > 0) {
818 		capa = 1ull << rte_ctz64(dev_capa);
819 		(void)fprintf(f, " %s", dma_capability_name(capa));
820 		dev_capa &= ~capa;
821 	}
822 	(void)fprintf(f, "\n");
823 }
824 
825 int
826 rte_dma_dump(int16_t dev_id, FILE *f)
827 {
828 	const struct rte_dma_dev *dev;
829 	struct rte_dma_info dev_info;
830 	int ret;
831 
832 	if (!rte_dma_is_valid(dev_id) || f == NULL)
833 		return -EINVAL;
834 	dev = &rte_dma_devices[dev_id];
835 
836 	ret = rte_dma_info_get(dev_id, &dev_info);
837 	if (ret != 0) {
838 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
839 		return -EINVAL;
840 	}
841 
842 	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
843 		dev->data->dev_id,
844 		dev->data->dev_name,
845 		dev->data->dev_started ? "started" : "stopped");
846 	dma_dump_capability(f, dev_info.dev_capa);
847 	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
848 	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
849 	(void)fprintf(f, "  silent_mode: %s\n",
850 		dev->data->dev_conf.enable_silent ? "on" : "off");
851 
852 	if (dev->dev_ops->dev_dump != NULL)
853 		ret = (*dev->dev_ops->dev_dump)(dev, f);
854 	rte_dma_trace_dump(dev_id, f, ret);
855 
856 	return ret;
857 }
858 
859 static int
860 dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
861 	   __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
862 	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
863 {
864 	RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
865 	return -EINVAL;
866 }
867 
868 static int
869 dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
870 	      __rte_unused const struct rte_dma_sge *src,
871 	      __rte_unused const struct rte_dma_sge *dst,
872 	      __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
873 	      __rte_unused uint64_t flags)
874 {
875 	RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
876 	return -EINVAL;
877 }
878 
879 static int
880 dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
881 	   __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
882 	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
883 {
884 	RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
885 	return -EINVAL;
886 }
887 
888 static int
889 dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
890 {
891 	RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
892 	return -EINVAL;
893 }
894 
895 static uint16_t
896 dummy_completed(__rte_unused void *dev_private,	__rte_unused uint16_t vchan,
897 		__rte_unused const uint16_t nb_cpls,
898 		__rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
899 {
900 	RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
901 	return 0;
902 }
903 
904 static uint16_t
905 dummy_completed_status(__rte_unused void *dev_private,
906 		       __rte_unused uint16_t vchan,
907 		       __rte_unused const uint16_t nb_cpls,
908 		       __rte_unused uint16_t *last_idx,
909 		       __rte_unused enum rte_dma_status_code *status)
910 {
911 	RTE_DMA_LOG(ERR,
912 		    "completed_status is not configured or not supported.");
913 	return 0;
914 }
915 
916 static uint16_t
917 dummy_burst_capacity(__rte_unused const void *dev_private,
918 		     __rte_unused uint16_t vchan)
919 {
920 	RTE_DMA_LOG(ERR, "burst_capacity is not configured or not supported.");
921 	return 0;
922 }
923 
924 static void
925 dma_fp_object_dummy(struct rte_dma_fp_object *obj)
926 {
927 	obj->dev_private      = NULL;
928 	obj->copy             = dummy_copy;
929 	obj->copy_sg          = dummy_copy_sg;
930 	obj->fill             = dummy_fill;
931 	obj->submit           = dummy_submit;
932 	obj->completed        = dummy_completed;
933 	obj->completed_status = dummy_completed_status;
934 	obj->burst_capacity   = dummy_burst_capacity;
935 }
936 
937 static int
938 dmadev_handle_dev_list(const char *cmd __rte_unused,
939 		const char *params __rte_unused,
940 		struct rte_tel_data *d)
941 {
942 	int dev_id;
943 
944 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
945 	for (dev_id = 0; dev_id < dma_devices_max; dev_id++)
946 		if (rte_dma_is_valid(dev_id))
947 			rte_tel_data_add_array_int(d, dev_id);
948 
949 	return 0;
950 }
951 
952 #define ADD_CAPA(td, dc, c) rte_tel_data_add_dict_int(td, dma_capability_name(c), !!(dc & c))
953 
954 static int
955 dmadev_handle_dev_info(const char *cmd __rte_unused,
956 		const char *params, struct rte_tel_data *d)
957 {
958 	struct rte_dma_info dma_info;
959 	struct rte_tel_data *dma_caps;
960 	int dev_id, ret;
961 	uint64_t dev_capa;
962 	char *end_param;
963 
964 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
965 		return -EINVAL;
966 
967 	dev_id = strtoul(params, &end_param, 0);
968 	if (*end_param != '\0')
969 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
970 
971 	/* Function info_get validates dev_id so we don't need to. */
972 	ret = rte_dma_info_get(dev_id, &dma_info);
973 	if (ret < 0)
974 		return -EINVAL;
975 	dev_capa = dma_info.dev_capa;
976 
977 	rte_tel_data_start_dict(d);
978 	rte_tel_data_add_dict_string(d, "name", dma_info.dev_name);
979 	rte_tel_data_add_dict_int(d, "nb_vchans", dma_info.nb_vchans);
980 	rte_tel_data_add_dict_int(d, "nb_priorities", dma_info.nb_priorities);
981 	rte_tel_data_add_dict_int(d, "numa_node", dma_info.numa_node);
982 	rte_tel_data_add_dict_int(d, "max_vchans", dma_info.max_vchans);
983 	rte_tel_data_add_dict_int(d, "max_desc", dma_info.max_desc);
984 	rte_tel_data_add_dict_int(d, "min_desc", dma_info.min_desc);
985 	rte_tel_data_add_dict_int(d, "max_sges", dma_info.max_sges);
986 
987 	dma_caps = rte_tel_data_alloc();
988 	if (!dma_caps)
989 		return -ENOMEM;
990 
991 	rte_tel_data_start_dict(dma_caps);
992 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_MEM);
993 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_DEV);
994 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_MEM);
995 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_DEV);
996 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SVA);
997 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SILENT);
998 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_HANDLES_ERRORS);
999 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_M2D_AUTO_FREE);
1000 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_PRI_POLICY_SP);
1001 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY);
1002 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY_SG);
1003 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_FILL);
1004 	rte_tel_data_add_dict_container(d, "capabilities", dma_caps, 0);
1005 
1006 	return 0;
1007 }
1008 
1009 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, dma_stats.s)
1010 
1011 static int
1012 dmadev_handle_dev_stats(const char *cmd __rte_unused,
1013 		const char *params,
1014 		struct rte_tel_data *d)
1015 {
1016 	struct rte_dma_info dma_info;
1017 	struct rte_dma_stats dma_stats;
1018 	int dev_id, ret, vchan_id;
1019 	char *end_param;
1020 	const char *vchan_param;
1021 
1022 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1023 		return -EINVAL;
1024 
1025 	dev_id = strtoul(params, &end_param, 0);
1026 
1027 	/* Function info_get validates dev_id so we don't need to. */
1028 	ret = rte_dma_info_get(dev_id, &dma_info);
1029 	if (ret < 0)
1030 		return -EINVAL;
1031 
1032 	/* If the device has one vchan the user does not need to supply the
1033 	 * vchan id and only the device id is needed, no extra parameters.
1034 	 */
1035 	if (dma_info.nb_vchans == 1 && *end_param == '\0')
1036 		vchan_id = 0;
1037 	else {
1038 		vchan_param = strtok(end_param, ",");
1039 		if (!vchan_param || strlen(vchan_param) == 0 || !isdigit(*vchan_param))
1040 			return -EINVAL;
1041 
1042 		vchan_id = strtoul(vchan_param, &end_param, 0);
1043 	}
1044 	if (*end_param != '\0')
1045 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
1046 
1047 	ret = rte_dma_stats_get(dev_id, vchan_id, &dma_stats);
1048 	if (ret < 0)
1049 		return -EINVAL;
1050 
1051 	rte_tel_data_start_dict(d);
1052 	ADD_DICT_STAT(submitted);
1053 	ADD_DICT_STAT(completed);
1054 	ADD_DICT_STAT(errors);
1055 
1056 	return 0;
1057 }
1058 
1059 #ifndef RTE_EXEC_ENV_WINDOWS
1060 static int
1061 dmadev_handle_dev_dump(const char *cmd __rte_unused,
1062 		const char *params,
1063 		struct rte_tel_data *d)
1064 {
1065 	char *buf, *end_param;
1066 	int dev_id, ret;
1067 	FILE *f;
1068 
1069 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1070 		return -EINVAL;
1071 
1072 	dev_id = strtoul(params, &end_param, 0);
1073 	if (*end_param != '\0')
1074 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
1075 
1076 	buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char));
1077 	if (buf == NULL)
1078 		return -ENOMEM;
1079 
1080 	f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+");
1081 	if (f == NULL) {
1082 		free(buf);
1083 		return -EINVAL;
1084 	}
1085 
1086 	ret = rte_dma_dump(dev_id, f);
1087 	fclose(f);
1088 	if (ret == 0) {
1089 		rte_tel_data_start_dict(d);
1090 		rte_tel_data_string(d, buf);
1091 	}
1092 
1093 	free(buf);
1094 	return ret;
1095 }
1096 #endif /* !RTE_EXEC_ENV_WINDOWS */
1097 
1098 RTE_INIT(dmadev_init_telemetry)
1099 {
1100 	rte_telemetry_register_cmd("/dmadev/list", dmadev_handle_dev_list,
1101 			"Returns list of available dmadev devices by IDs. No parameters.");
1102 	rte_telemetry_register_cmd("/dmadev/info", dmadev_handle_dev_info,
1103 			"Returns information for a dmadev. Parameters: int dev_id");
1104 	rte_telemetry_register_cmd("/dmadev/stats", dmadev_handle_dev_stats,
1105 			"Returns the stats for a dmadev vchannel. Parameters: int dev_id, vchan_id (Optional if only one vchannel)");
1106 #ifndef RTE_EXEC_ENV_WINDOWS
1107 	rte_telemetry_register_cmd("/dmadev/dump", dmadev_handle_dev_dump,
1108 			"Returns dump information for a dmadev. Parameters: int dev_id");
1109 #endif
1110 }
1111