xref: /dpdk/lib/dmadev/rte_dmadev.c (revision bc70e55948380ce57cbc079930f217c73ea59b39)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  * Copyright(c) 2021 Intel Corporation
4  */
5 
6 #include <inttypes.h>
7 
8 #include <rte_eal.h>
9 #include <rte_lcore.h>
10 #include <rte_log.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_string_fns.h>
14 #include <rte_telemetry.h>
15 
16 #include "rte_dmadev.h"
17 #include "rte_dmadev_pmd.h"
18 
19 static int16_t dma_devices_max;
20 
21 struct rte_dma_fp_object *rte_dma_fp_objs;
22 static struct rte_dma_dev *rte_dma_devices;
23 static struct {
24 	/* Hold the dev_max information of the primary process. This field is
25 	 * set by the primary process and is read by the secondary process.
26 	 */
27 	int16_t dev_max;
28 	struct rte_dma_dev_data data[0];
29 } *dma_devices_shared_data;
30 
31 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
32 #define RTE_DMA_LOG(level, ...) \
33 	rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
34 		RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))
35 
36 int
37 rte_dma_dev_max(size_t dev_max)
38 {
39 	/* This function may be called before rte_eal_init(), so no rte library
40 	 * function can be called in this function.
41 	 */
42 	if (dev_max == 0 || dev_max > INT16_MAX)
43 		return -EINVAL;
44 
45 	if (dma_devices_max > 0)
46 		return -EINVAL;
47 
48 	dma_devices_max = dev_max;
49 
50 	return 0;
51 }
52 
53 int16_t
54 rte_dma_next_dev(int16_t start_dev_id)
55 {
56 	int16_t dev_id = start_dev_id;
57 	while (dev_id < dma_devices_max && rte_dma_devices[dev_id].state == RTE_DMA_DEV_UNUSED)
58 		dev_id++;
59 
60 	if (dev_id < dma_devices_max)
61 		return dev_id;
62 
63 	return -1;
64 }
65 
66 static int
67 dma_check_name(const char *name)
68 {
69 	size_t name_len;
70 
71 	if (name == NULL) {
72 		RTE_DMA_LOG(ERR, "Name can't be NULL");
73 		return -EINVAL;
74 	}
75 
76 	name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
77 	if (name_len == 0) {
78 		RTE_DMA_LOG(ERR, "Zero length DMA device name");
79 		return -EINVAL;
80 	}
81 	if (name_len >= RTE_DEV_NAME_MAX_LEN) {
82 		RTE_DMA_LOG(ERR, "DMA device name is too long");
83 		return -EINVAL;
84 	}
85 
86 	return 0;
87 }
88 
89 static int16_t
90 dma_find_free_id(void)
91 {
92 	int16_t i;
93 
94 	if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
95 		return -1;
96 
97 	for (i = 0; i < dma_devices_max; i++) {
98 		if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
99 			return i;
100 	}
101 
102 	return -1;
103 }
104 
105 static struct rte_dma_dev*
106 dma_find_by_name(const char *name)
107 {
108 	int16_t i;
109 
110 	if (rte_dma_devices == NULL)
111 		return NULL;
112 
113 	for (i = 0; i < dma_devices_max; i++) {
114 		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
115 		    (!strcmp(name, rte_dma_devices[i].data->dev_name)))
116 			return &rte_dma_devices[i];
117 	}
118 
119 	return NULL;
120 }
121 
122 static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
123 
124 static int
125 dma_fp_data_prepare(void)
126 {
127 	size_t size;
128 	void *ptr;
129 	int i;
130 
131 	if (rte_dma_fp_objs != NULL)
132 		return 0;
133 
134 	/* Fast-path object must align cacheline, but the return value of malloc
135 	 * may not be aligned to the cache line. Therefore, extra memory is
136 	 * applied for realignment.
137 	 * note: We do not call posix_memalign/aligned_alloc because it is
138 	 * version dependent on libc.
139 	 */
140 	size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
141 		RTE_CACHE_LINE_SIZE;
142 	ptr = malloc(size);
143 	if (ptr == NULL)
144 		return -ENOMEM;
145 	memset(ptr, 0, size);
146 
147 	rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
148 	for (i = 0; i < dma_devices_max; i++)
149 		dma_fp_object_dummy(&rte_dma_fp_objs[i]);
150 
151 	return 0;
152 }
153 
154 static int
155 dma_dev_data_prepare(void)
156 {
157 	size_t size;
158 
159 	if (rte_dma_devices != NULL)
160 		return 0;
161 
162 	size = dma_devices_max * sizeof(struct rte_dma_dev);
163 	rte_dma_devices = malloc(size);
164 	if (rte_dma_devices == NULL)
165 		return -ENOMEM;
166 	memset(rte_dma_devices, 0, size);
167 
168 	return 0;
169 }
170 
171 static int
172 dma_shared_data_prepare(void)
173 {
174 	const char *mz_name = "rte_dma_dev_data";
175 	const struct rte_memzone *mz;
176 	size_t size;
177 
178 	if (dma_devices_shared_data != NULL)
179 		return 0;
180 
181 	size = sizeof(*dma_devices_shared_data) +
182 		sizeof(struct rte_dma_dev_data) * dma_devices_max;
183 
184 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
185 		mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
186 	else
187 		mz = rte_memzone_lookup(mz_name);
188 	if (mz == NULL)
189 		return -ENOMEM;
190 
191 	dma_devices_shared_data = mz->addr;
192 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
193 		memset(dma_devices_shared_data, 0, size);
194 		dma_devices_shared_data->dev_max = dma_devices_max;
195 	} else {
196 		dma_devices_max = dma_devices_shared_data->dev_max;
197 	}
198 
199 	return 0;
200 }
201 
202 static int
203 dma_data_prepare(void)
204 {
205 	int ret;
206 
207 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
208 		if (dma_devices_max == 0)
209 			dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
210 		ret = dma_fp_data_prepare();
211 		if (ret)
212 			return ret;
213 		ret = dma_dev_data_prepare();
214 		if (ret)
215 			return ret;
216 		ret = dma_shared_data_prepare();
217 		if (ret)
218 			return ret;
219 	} else {
220 		ret = dma_shared_data_prepare();
221 		if (ret)
222 			return ret;
223 		ret = dma_fp_data_prepare();
224 		if (ret)
225 			return ret;
226 		ret = dma_dev_data_prepare();
227 		if (ret)
228 			return ret;
229 	}
230 
231 	return 0;
232 }
233 
234 static struct rte_dma_dev *
235 dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
236 {
237 	struct rte_dma_dev *dev;
238 	void *dev_private;
239 	int16_t dev_id;
240 	int ret;
241 
242 	ret = dma_data_prepare();
243 	if (ret < 0) {
244 		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
245 		return NULL;
246 	}
247 
248 	dev = dma_find_by_name(name);
249 	if (dev != NULL) {
250 		RTE_DMA_LOG(ERR, "DMA device already allocated");
251 		return NULL;
252 	}
253 
254 	dev_private = rte_zmalloc_socket(name, private_data_size,
255 					 RTE_CACHE_LINE_SIZE, numa_node);
256 	if (dev_private == NULL) {
257 		RTE_DMA_LOG(ERR, "Cannot allocate private data");
258 		return NULL;
259 	}
260 
261 	dev_id = dma_find_free_id();
262 	if (dev_id < 0) {
263 		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
264 		rte_free(dev_private);
265 		return NULL;
266 	}
267 
268 	dev = &rte_dma_devices[dev_id];
269 	dev->data = &dma_devices_shared_data->data[dev_id];
270 	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
271 	dev->data->dev_id = dev_id;
272 	dev->data->numa_node = numa_node;
273 	dev->data->dev_private = dev_private;
274 
275 	return dev;
276 }
277 
278 static struct rte_dma_dev *
279 dma_attach_secondary(const char *name)
280 {
281 	struct rte_dma_dev *dev;
282 	int16_t i;
283 	int ret;
284 
285 	ret = dma_data_prepare();
286 	if (ret < 0) {
287 		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
288 		return NULL;
289 	}
290 
291 	for (i = 0; i < dma_devices_max; i++) {
292 		if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
293 			break;
294 	}
295 	if (i == dma_devices_max) {
296 		RTE_DMA_LOG(ERR,
297 			"Device %s is not driven by the primary process",
298 			name);
299 		return NULL;
300 	}
301 
302 	dev = &rte_dma_devices[i];
303 	dev->data = &dma_devices_shared_data->data[i];
304 
305 	return dev;
306 }
307 
308 static struct rte_dma_dev *
309 dma_allocate(const char *name, int numa_node, size_t private_data_size)
310 {
311 	struct rte_dma_dev *dev;
312 
313 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
314 		dev = dma_allocate_primary(name, numa_node, private_data_size);
315 	else
316 		dev = dma_attach_secondary(name);
317 
318 	if (dev) {
319 		dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id];
320 		dma_fp_object_dummy(dev->fp_obj);
321 	}
322 
323 	return dev;
324 }
325 
326 static void
327 dma_release(struct rte_dma_dev *dev)
328 {
329 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
330 		rte_free(dev->data->dev_private);
331 		memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
332 	}
333 
334 	dma_fp_object_dummy(dev->fp_obj);
335 	memset(dev, 0, sizeof(struct rte_dma_dev));
336 }
337 
338 struct rte_dma_dev *
339 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
340 {
341 	struct rte_dma_dev *dev;
342 
343 	if (dma_check_name(name) != 0 || private_data_size == 0)
344 		return NULL;
345 
346 	dev = dma_allocate(name, numa_node, private_data_size);
347 	if (dev == NULL)
348 		return NULL;
349 
350 	dev->state = RTE_DMA_DEV_REGISTERED;
351 
352 	return dev;
353 }
354 
355 int
356 rte_dma_pmd_release(const char *name)
357 {
358 	struct rte_dma_dev *dev;
359 
360 	if (dma_check_name(name) != 0)
361 		return -EINVAL;
362 
363 	dev = dma_find_by_name(name);
364 	if (dev == NULL)
365 		return -EINVAL;
366 
367 	if (dev->state == RTE_DMA_DEV_READY)
368 		return rte_dma_close(dev->data->dev_id);
369 
370 	dma_release(dev);
371 	return 0;
372 }
373 
374 int
375 rte_dma_get_dev_id_by_name(const char *name)
376 {
377 	struct rte_dma_dev *dev;
378 
379 	if (dma_check_name(name) != 0)
380 		return -EINVAL;
381 
382 	dev = dma_find_by_name(name);
383 	if (dev == NULL)
384 		return -EINVAL;
385 
386 	return dev->data->dev_id;
387 }
388 
389 bool
390 rte_dma_is_valid(int16_t dev_id)
391 {
392 	return (dev_id >= 0) && (dev_id < dma_devices_max) &&
393 		rte_dma_devices != NULL &&
394 		rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
395 }
396 
397 uint16_t
398 rte_dma_count_avail(void)
399 {
400 	uint16_t count = 0;
401 	uint16_t i;
402 
403 	if (rte_dma_devices == NULL)
404 		return count;
405 
406 	for (i = 0; i < dma_devices_max; i++) {
407 		if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
408 			count++;
409 	}
410 
411 	return count;
412 }
413 
414 int
415 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
416 {
417 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
418 	int ret;
419 
420 	if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
421 		return -EINVAL;
422 
423 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
424 	memset(dev_info, 0, sizeof(struct rte_dma_info));
425 	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
426 					    sizeof(struct rte_dma_info));
427 	if (ret != 0)
428 		return ret;
429 
430 	dev_info->dev_name = dev->data->dev_name;
431 	dev_info->numa_node = dev->device->numa_node;
432 	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
433 
434 	return 0;
435 }
436 
437 int
438 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
439 {
440 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
441 	struct rte_dma_info dev_info;
442 	int ret;
443 
444 	if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
445 		return -EINVAL;
446 
447 	if (dev->data->dev_started != 0) {
448 		RTE_DMA_LOG(ERR,
449 			"Device %d must be stopped to allow configuration",
450 			dev_id);
451 		return -EBUSY;
452 	}
453 
454 	ret = rte_dma_info_get(dev_id, &dev_info);
455 	if (ret != 0) {
456 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
457 		return -EINVAL;
458 	}
459 	if (dev_conf->nb_vchans == 0) {
460 		RTE_DMA_LOG(ERR,
461 			"Device %d configure zero vchans", dev_id);
462 		return -EINVAL;
463 	}
464 	if (dev_conf->nb_vchans > dev_info.max_vchans) {
465 		RTE_DMA_LOG(ERR,
466 			"Device %d configure too many vchans", dev_id);
467 		return -EINVAL;
468 	}
469 	if (dev_conf->enable_silent &&
470 	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
471 		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
472 		return -EINVAL;
473 	}
474 
475 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
476 	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
477 					     sizeof(struct rte_dma_conf));
478 	if (ret == 0)
479 		memcpy(&dev->data->dev_conf, dev_conf,
480 		       sizeof(struct rte_dma_conf));
481 
482 	return ret;
483 }
484 
485 int
486 rte_dma_start(int16_t dev_id)
487 {
488 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
489 	int ret;
490 
491 	if (!rte_dma_is_valid(dev_id))
492 		return -EINVAL;
493 
494 	if (dev->data->dev_conf.nb_vchans == 0) {
495 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
496 		return -EINVAL;
497 	}
498 
499 	if (dev->data->dev_started != 0) {
500 		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
501 		return 0;
502 	}
503 
504 	if (dev->dev_ops->dev_start == NULL)
505 		goto mark_started;
506 
507 	ret = (*dev->dev_ops->dev_start)(dev);
508 	if (ret != 0)
509 		return ret;
510 
511 mark_started:
512 	dev->data->dev_started = 1;
513 	return 0;
514 }
515 
516 int
517 rte_dma_stop(int16_t dev_id)
518 {
519 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
520 	int ret;
521 
522 	if (!rte_dma_is_valid(dev_id))
523 		return -EINVAL;
524 
525 	if (dev->data->dev_started == 0) {
526 		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
527 		return 0;
528 	}
529 
530 	if (dev->dev_ops->dev_stop == NULL)
531 		goto mark_stopped;
532 
533 	ret = (*dev->dev_ops->dev_stop)(dev);
534 	if (ret != 0)
535 		return ret;
536 
537 mark_stopped:
538 	dev->data->dev_started = 0;
539 	return 0;
540 }
541 
542 int
543 rte_dma_close(int16_t dev_id)
544 {
545 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
546 	int ret;
547 
548 	if (!rte_dma_is_valid(dev_id))
549 		return -EINVAL;
550 
551 	/* Device must be stopped before it can be closed */
552 	if (dev->data->dev_started == 1) {
553 		RTE_DMA_LOG(ERR,
554 			"Device %d must be stopped before closing", dev_id);
555 		return -EBUSY;
556 	}
557 
558 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
559 	ret = (*dev->dev_ops->dev_close)(dev);
560 	if (ret == 0)
561 		dma_release(dev);
562 
563 	return ret;
564 }
565 
566 int
567 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
568 		    const struct rte_dma_vchan_conf *conf)
569 {
570 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
571 	struct rte_dma_info dev_info;
572 	bool src_is_dev, dst_is_dev;
573 	int ret;
574 
575 	if (!rte_dma_is_valid(dev_id) || conf == NULL)
576 		return -EINVAL;
577 
578 	if (dev->data->dev_started != 0) {
579 		RTE_DMA_LOG(ERR,
580 			"Device %d must be stopped to allow configuration",
581 			dev_id);
582 		return -EBUSY;
583 	}
584 
585 	ret = rte_dma_info_get(dev_id, &dev_info);
586 	if (ret != 0) {
587 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
588 		return -EINVAL;
589 	}
590 	if (dev->data->dev_conf.nb_vchans == 0) {
591 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
592 		return -EINVAL;
593 	}
594 	if (vchan >= dev_info.nb_vchans) {
595 		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
596 		return -EINVAL;
597 	}
598 	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
599 	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
600 	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
601 	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
602 		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
603 		return -EINVAL;
604 	}
605 	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
606 	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
607 		RTE_DMA_LOG(ERR,
608 			"Device %d don't support mem2mem transfer", dev_id);
609 		return -EINVAL;
610 	}
611 	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
612 	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
613 		RTE_DMA_LOG(ERR,
614 			"Device %d don't support mem2dev transfer", dev_id);
615 		return -EINVAL;
616 	}
617 	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
618 	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
619 		RTE_DMA_LOG(ERR,
620 			"Device %d don't support dev2mem transfer", dev_id);
621 		return -EINVAL;
622 	}
623 	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
624 	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
625 		RTE_DMA_LOG(ERR,
626 			"Device %d don't support dev2dev transfer", dev_id);
627 		return -EINVAL;
628 	}
629 	if (conf->nb_desc < dev_info.min_desc ||
630 	    conf->nb_desc > dev_info.max_desc) {
631 		RTE_DMA_LOG(ERR,
632 			"Device %d number of descriptors invalid", dev_id);
633 		return -EINVAL;
634 	}
635 	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
636 		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
637 	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
638 	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
639 		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
640 		return -EINVAL;
641 	}
642 	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
643 		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
644 	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
645 	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
646 		RTE_DMA_LOG(ERR,
647 			"Device %d destination port type invalid", dev_id);
648 		return -EINVAL;
649 	}
650 
651 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
652 	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
653 					sizeof(struct rte_dma_vchan_conf));
654 }
655 
656 int
657 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
658 {
659 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
660 
661 	if (!rte_dma_is_valid(dev_id) || stats == NULL)
662 		return -EINVAL;
663 
664 	if (vchan >= dev->data->dev_conf.nb_vchans &&
665 	    vchan != RTE_DMA_ALL_VCHAN) {
666 		RTE_DMA_LOG(ERR,
667 			"Device %d vchan %u out of range", dev_id, vchan);
668 		return -EINVAL;
669 	}
670 
671 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
672 	memset(stats, 0, sizeof(struct rte_dma_stats));
673 	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
674 					  sizeof(struct rte_dma_stats));
675 }
676 
677 int
678 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
679 {
680 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
681 
682 	if (!rte_dma_is_valid(dev_id))
683 		return -EINVAL;
684 
685 	if (vchan >= dev->data->dev_conf.nb_vchans &&
686 	    vchan != RTE_DMA_ALL_VCHAN) {
687 		RTE_DMA_LOG(ERR,
688 			"Device %d vchan %u out of range", dev_id, vchan);
689 		return -EINVAL;
690 	}
691 
692 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
693 	return (*dev->dev_ops->stats_reset)(dev, vchan);
694 }
695 
696 int
697 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status)
698 {
699 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
700 
701 	if (!rte_dma_is_valid(dev_id))
702 		return -EINVAL;
703 
704 	if (vchan >= dev->data->dev_conf.nb_vchans) {
705 		RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan);
706 		return -EINVAL;
707 	}
708 
709 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_status, -ENOTSUP);
710 	return (*dev->dev_ops->vchan_status)(dev, vchan, status);
711 }
712 
713 static const char *
714 dma_capability_name(uint64_t capability)
715 {
716 	static const struct {
717 		uint64_t capability;
718 		const char *name;
719 	} capa_names[] = {
720 		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
721 		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
722 		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
723 		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
724 		{ RTE_DMA_CAPA_SVA,         "sva"     },
725 		{ RTE_DMA_CAPA_SILENT,      "silent"  },
726 		{ RTE_DMA_CAPA_HANDLES_ERRORS, "handles_errors" },
727 		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
728 		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
729 		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
730 	};
731 
732 	const char *name = "unknown";
733 	uint32_t i;
734 
735 	for (i = 0; i < RTE_DIM(capa_names); i++) {
736 		if (capability == capa_names[i].capability) {
737 			name = capa_names[i].name;
738 			break;
739 		}
740 	}
741 
742 	return name;
743 }
744 
745 static void
746 dma_dump_capability(FILE *f, uint64_t dev_capa)
747 {
748 	uint64_t capa;
749 
750 	(void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
751 	while (dev_capa > 0) {
752 		capa = 1ull << __builtin_ctzll(dev_capa);
753 		(void)fprintf(f, " %s", dma_capability_name(capa));
754 		dev_capa &= ~capa;
755 	}
756 	(void)fprintf(f, "\n");
757 }
758 
759 int
760 rte_dma_dump(int16_t dev_id, FILE *f)
761 {
762 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
763 	struct rte_dma_info dev_info;
764 	int ret;
765 
766 	if (!rte_dma_is_valid(dev_id) || f == NULL)
767 		return -EINVAL;
768 
769 	ret = rte_dma_info_get(dev_id, &dev_info);
770 	if (ret != 0) {
771 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
772 		return -EINVAL;
773 	}
774 
775 	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
776 		dev->data->dev_id,
777 		dev->data->dev_name,
778 		dev->data->dev_started ? "started" : "stopped");
779 	dma_dump_capability(f, dev_info.dev_capa);
780 	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
781 	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
782 	(void)fprintf(f, "  silent_mode: %s\n",
783 		dev->data->dev_conf.enable_silent ? "on" : "off");
784 
785 	if (dev->dev_ops->dev_dump != NULL)
786 		return (*dev->dev_ops->dev_dump)(dev, f);
787 
788 	return 0;
789 }
790 
791 static int
792 dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
793 	   __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
794 	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
795 {
796 	RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
797 	return -EINVAL;
798 }
799 
800 static int
801 dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
802 	      __rte_unused const struct rte_dma_sge *src,
803 	      __rte_unused const struct rte_dma_sge *dst,
804 	      __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
805 	      __rte_unused uint64_t flags)
806 {
807 	RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
808 	return -EINVAL;
809 }
810 
811 static int
812 dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
813 	   __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
814 	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
815 {
816 	RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
817 	return -EINVAL;
818 }
819 
820 static int
821 dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
822 {
823 	RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
824 	return -EINVAL;
825 }
826 
827 static uint16_t
828 dummy_completed(__rte_unused void *dev_private,	__rte_unused uint16_t vchan,
829 		__rte_unused const uint16_t nb_cpls,
830 		__rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
831 {
832 	RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
833 	return 0;
834 }
835 
836 static uint16_t
837 dummy_completed_status(__rte_unused void *dev_private,
838 		       __rte_unused uint16_t vchan,
839 		       __rte_unused const uint16_t nb_cpls,
840 		       __rte_unused uint16_t *last_idx,
841 		       __rte_unused enum rte_dma_status_code *status)
842 {
843 	RTE_DMA_LOG(ERR,
844 		    "completed_status is not configured or not supported.");
845 	return 0;
846 }
847 
848 static uint16_t
849 dummy_burst_capacity(__rte_unused const void *dev_private,
850 		     __rte_unused uint16_t vchan)
851 {
852 	RTE_DMA_LOG(ERR, "burst_capacity is not configured or not supported.");
853 	return 0;
854 }
855 
856 static void
857 dma_fp_object_dummy(struct rte_dma_fp_object *obj)
858 {
859 	obj->dev_private      = NULL;
860 	obj->copy             = dummy_copy;
861 	obj->copy_sg          = dummy_copy_sg;
862 	obj->fill             = dummy_fill;
863 	obj->submit           = dummy_submit;
864 	obj->completed        = dummy_completed;
865 	obj->completed_status = dummy_completed_status;
866 	obj->burst_capacity   = dummy_burst_capacity;
867 }
868 
869 static int
870 dmadev_handle_dev_list(const char *cmd __rte_unused,
871 		const char *params __rte_unused,
872 		struct rte_tel_data *d)
873 {
874 	int dev_id;
875 
876 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
877 	for (dev_id = 0; dev_id < dma_devices_max; dev_id++)
878 		if (rte_dma_is_valid(dev_id))
879 			rte_tel_data_add_array_int(d, dev_id);
880 
881 	return 0;
882 }
883 
884 #define ADD_CAPA(td, dc, c) rte_tel_data_add_dict_int(td, dma_capability_name(c), !!(dc & c))
885 
886 static int
887 dmadev_handle_dev_info(const char *cmd __rte_unused,
888 		const char *params, struct rte_tel_data *d)
889 {
890 	struct rte_dma_info dma_info;
891 	struct rte_tel_data *dma_caps;
892 	int dev_id, ret;
893 	uint64_t dev_capa;
894 	char *end_param;
895 
896 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
897 		return -EINVAL;
898 
899 	dev_id = strtoul(params, &end_param, 0);
900 	if (*end_param != '\0')
901 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
902 
903 	/* Function info_get validates dev_id so we don't need to. */
904 	ret = rte_dma_info_get(dev_id, &dma_info);
905 	if (ret < 0)
906 		return -EINVAL;
907 	dev_capa = dma_info.dev_capa;
908 
909 	rte_tel_data_start_dict(d);
910 	rte_tel_data_add_dict_string(d, "name", dma_info.dev_name);
911 	rte_tel_data_add_dict_int(d, "nb_vchans", dma_info.nb_vchans);
912 	rte_tel_data_add_dict_int(d, "numa_node", dma_info.numa_node);
913 	rte_tel_data_add_dict_int(d, "max_vchans", dma_info.max_vchans);
914 	rte_tel_data_add_dict_int(d, "max_desc", dma_info.max_desc);
915 	rte_tel_data_add_dict_int(d, "min_desc", dma_info.min_desc);
916 	rte_tel_data_add_dict_int(d, "max_sges", dma_info.max_sges);
917 
918 	dma_caps = rte_tel_data_alloc();
919 	if (!dma_caps)
920 		return -ENOMEM;
921 
922 	rte_tel_data_start_dict(dma_caps);
923 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_MEM);
924 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_DEV);
925 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_MEM);
926 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_DEV);
927 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SVA);
928 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SILENT);
929 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_HANDLES_ERRORS);
930 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY);
931 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY_SG);
932 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_FILL);
933 	rte_tel_data_add_dict_container(d, "capabilities", dma_caps, 0);
934 
935 	return 0;
936 }
937 
938 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, dma_stats.s)
939 
940 static int
941 dmadev_handle_dev_stats(const char *cmd __rte_unused,
942 		const char *params,
943 		struct rte_tel_data *d)
944 {
945 	struct rte_dma_info dma_info;
946 	struct rte_dma_stats dma_stats;
947 	int dev_id, ret, vchan_id;
948 	char *end_param;
949 	const char *vchan_param;
950 
951 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
952 		return -EINVAL;
953 
954 	dev_id = strtoul(params, &end_param, 0);
955 
956 	/* Function info_get validates dev_id so we don't need to. */
957 	ret = rte_dma_info_get(dev_id, &dma_info);
958 	if (ret < 0)
959 		return -EINVAL;
960 
961 	/* If the device has one vchan the user does not need to supply the
962 	 * vchan id and only the device id is needed, no extra parameters.
963 	 */
964 	if (dma_info.nb_vchans == 1 && *end_param == '\0')
965 		vchan_id = 0;
966 	else {
967 		vchan_param = strtok(end_param, ",");
968 		if (!vchan_param || strlen(vchan_param) == 0 || !isdigit(*vchan_param))
969 			return -EINVAL;
970 
971 		vchan_id = strtoul(vchan_param, &end_param, 0);
972 	}
973 	if (*end_param != '\0')
974 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
975 
976 	ret = rte_dma_stats_get(dev_id, vchan_id, &dma_stats);
977 	if (ret < 0)
978 		return -EINVAL;
979 
980 	rte_tel_data_start_dict(d);
981 	ADD_DICT_STAT(submitted);
982 	ADD_DICT_STAT(completed);
983 	ADD_DICT_STAT(errors);
984 
985 	return 0;
986 }
987 
988 RTE_INIT(dmadev_init_telemetry)
989 {
990 	rte_telemetry_register_cmd("/dmadev/list", dmadev_handle_dev_list,
991 			"Returns list of available dmadev devices by IDs. No parameters.");
992 	rte_telemetry_register_cmd("/dmadev/info", dmadev_handle_dev_info,
993 			"Returns information for a dmadev. Parameters: int dev_id");
994 	rte_telemetry_register_cmd("/dmadev/stats", dmadev_handle_dev_stats,
995 			"Returns the stats for a dmadev vchannel. Parameters: int dev_id, vchan_id (Optional if only one vchannel)");
996 }
997