xref: /dpdk/lib/dmadev/rte_dmadev.c (revision b6f30094e511c22f038907a507d16f7cc022c682)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  * Copyright(c) 2021 Intel Corporation
4  */
5 
6 #include <ctype.h>
7 #include <inttypes.h>
8 #include <stdlib.h>
9 
10 #include <rte_eal.h>
11 #include <rte_lcore.h>
12 #include <rte_log.h>
13 #include <rte_malloc.h>
14 #include <rte_memzone.h>
15 #include <rte_string_fns.h>
16 #include <rte_telemetry.h>
17 
18 #include "rte_dmadev.h"
19 #include "rte_dmadev_pmd.h"
20 #include "rte_dmadev_trace.h"
21 
22 static int16_t dma_devices_max;
23 
24 struct rte_dma_fp_object *rte_dma_fp_objs;
25 static struct rte_dma_dev *rte_dma_devices;
26 static struct {
27 	/* Hold the dev_max information of the primary process. This field is
28 	 * set by the primary process and is read by the secondary process.
29 	 */
30 	int16_t dev_max;
31 	struct rte_dma_dev_data data[0];
32 } *dma_devices_shared_data;
33 
34 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
35 #define RTE_LOGTYPE_DMADEV rte_dma_logtype
36 
37 #define RTE_DMA_LOG(level, ...) \
38 	RTE_LOG_LINE(level, DMADEV, "" __VA_ARGS__)
39 
40 int
41 rte_dma_dev_max(size_t dev_max)
42 {
43 	/* This function may be called before rte_eal_init(), so no rte library
44 	 * function can be called in this function.
45 	 */
46 	if (dev_max == 0 || dev_max > INT16_MAX)
47 		return -EINVAL;
48 
49 	if (dma_devices_max > 0)
50 		return -EINVAL;
51 
52 	dma_devices_max = dev_max;
53 
54 	return 0;
55 }
56 
57 int16_t
58 rte_dma_next_dev(int16_t start_dev_id)
59 {
60 	int16_t dev_id = start_dev_id;
61 	while (dev_id < dma_devices_max && rte_dma_devices[dev_id].state == RTE_DMA_DEV_UNUSED)
62 		dev_id++;
63 
64 	if (dev_id < dma_devices_max)
65 		return dev_id;
66 
67 	return -1;
68 }
69 
70 static int
71 dma_check_name(const char *name)
72 {
73 	size_t name_len;
74 
75 	if (name == NULL) {
76 		RTE_DMA_LOG(ERR, "Name can't be NULL");
77 		return -EINVAL;
78 	}
79 
80 	name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
81 	if (name_len == 0) {
82 		RTE_DMA_LOG(ERR, "Zero length DMA device name");
83 		return -EINVAL;
84 	}
85 	if (name_len >= RTE_DEV_NAME_MAX_LEN) {
86 		RTE_DMA_LOG(ERR, "DMA device name is too long");
87 		return -EINVAL;
88 	}
89 
90 	return 0;
91 }
92 
93 static int16_t
94 dma_find_free_id(void)
95 {
96 	int16_t i;
97 
98 	if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
99 		return -1;
100 
101 	for (i = 0; i < dma_devices_max; i++) {
102 		if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
103 			return i;
104 	}
105 
106 	return -1;
107 }
108 
109 static struct rte_dma_dev*
110 dma_find_by_name(const char *name)
111 {
112 	int16_t i;
113 
114 	if (rte_dma_devices == NULL)
115 		return NULL;
116 
117 	for (i = 0; i < dma_devices_max; i++) {
118 		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
119 		    (!strcmp(name, rte_dma_devices[i].data->dev_name)))
120 			return &rte_dma_devices[i];
121 	}
122 
123 	return NULL;
124 }
125 
126 static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
127 
128 static int
129 dma_fp_data_prepare(void)
130 {
131 	size_t size;
132 	void *ptr;
133 	int i;
134 
135 	if (rte_dma_fp_objs != NULL)
136 		return 0;
137 
138 	/* Fast-path object must align cacheline, but the return value of malloc
139 	 * may not be aligned to the cache line. Therefore, extra memory is
140 	 * applied for realignment.
141 	 * note: We do not call posix_memalign/aligned_alloc because it is
142 	 * version dependent on libc.
143 	 */
144 	size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
145 		RTE_CACHE_LINE_SIZE;
146 	ptr = malloc(size);
147 	if (ptr == NULL)
148 		return -ENOMEM;
149 	memset(ptr, 0, size);
150 
151 	rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
152 	for (i = 0; i < dma_devices_max; i++)
153 		dma_fp_object_dummy(&rte_dma_fp_objs[i]);
154 
155 	return 0;
156 }
157 
158 static int
159 dma_dev_data_prepare(void)
160 {
161 	size_t size;
162 
163 	if (rte_dma_devices != NULL)
164 		return 0;
165 
166 	size = dma_devices_max * sizeof(struct rte_dma_dev);
167 	rte_dma_devices = malloc(size);
168 	if (rte_dma_devices == NULL)
169 		return -ENOMEM;
170 	memset(rte_dma_devices, 0, size);
171 
172 	return 0;
173 }
174 
175 static int
176 dma_shared_data_prepare(void)
177 {
178 	const char *mz_name = "rte_dma_dev_data";
179 	const struct rte_memzone *mz;
180 	size_t size;
181 
182 	if (dma_devices_shared_data != NULL)
183 		return 0;
184 
185 	size = sizeof(*dma_devices_shared_data) +
186 		sizeof(struct rte_dma_dev_data) * dma_devices_max;
187 
188 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
189 		mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
190 	else
191 		mz = rte_memzone_lookup(mz_name);
192 	if (mz == NULL)
193 		return -ENOMEM;
194 
195 	dma_devices_shared_data = mz->addr;
196 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
197 		memset(dma_devices_shared_data, 0, size);
198 		dma_devices_shared_data->dev_max = dma_devices_max;
199 	} else {
200 		dma_devices_max = dma_devices_shared_data->dev_max;
201 	}
202 
203 	return 0;
204 }
205 
206 static int
207 dma_data_prepare(void)
208 {
209 	int ret;
210 
211 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
212 		if (dma_devices_max == 0)
213 			dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
214 		ret = dma_fp_data_prepare();
215 		if (ret)
216 			return ret;
217 		ret = dma_dev_data_prepare();
218 		if (ret)
219 			return ret;
220 		ret = dma_shared_data_prepare();
221 		if (ret)
222 			return ret;
223 	} else {
224 		ret = dma_shared_data_prepare();
225 		if (ret)
226 			return ret;
227 		ret = dma_fp_data_prepare();
228 		if (ret)
229 			return ret;
230 		ret = dma_dev_data_prepare();
231 		if (ret)
232 			return ret;
233 	}
234 
235 	return 0;
236 }
237 
238 static struct rte_dma_dev *
239 dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
240 {
241 	struct rte_dma_dev *dev;
242 	void *dev_private;
243 	int16_t dev_id;
244 	int ret;
245 
246 	ret = dma_data_prepare();
247 	if (ret < 0) {
248 		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
249 		return NULL;
250 	}
251 
252 	dev = dma_find_by_name(name);
253 	if (dev != NULL) {
254 		RTE_DMA_LOG(ERR, "DMA device already allocated");
255 		return NULL;
256 	}
257 
258 	dev_private = rte_zmalloc_socket(name, private_data_size,
259 					 RTE_CACHE_LINE_SIZE, numa_node);
260 	if (dev_private == NULL) {
261 		RTE_DMA_LOG(ERR, "Cannot allocate private data");
262 		return NULL;
263 	}
264 
265 	dev_id = dma_find_free_id();
266 	if (dev_id < 0) {
267 		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
268 		rte_free(dev_private);
269 		return NULL;
270 	}
271 
272 	dev = &rte_dma_devices[dev_id];
273 	dev->data = &dma_devices_shared_data->data[dev_id];
274 	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
275 	dev->data->dev_id = dev_id;
276 	dev->data->numa_node = numa_node;
277 	dev->data->dev_private = dev_private;
278 
279 	return dev;
280 }
281 
282 static struct rte_dma_dev *
283 dma_attach_secondary(const char *name)
284 {
285 	struct rte_dma_dev *dev;
286 	int16_t i;
287 	int ret;
288 
289 	ret = dma_data_prepare();
290 	if (ret < 0) {
291 		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
292 		return NULL;
293 	}
294 
295 	for (i = 0; i < dma_devices_max; i++) {
296 		if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
297 			break;
298 	}
299 	if (i == dma_devices_max) {
300 		RTE_DMA_LOG(ERR,
301 			"Device %s is not driven by the primary process",
302 			name);
303 		return NULL;
304 	}
305 
306 	dev = &rte_dma_devices[i];
307 	dev->data = &dma_devices_shared_data->data[i];
308 
309 	return dev;
310 }
311 
312 static struct rte_dma_dev *
313 dma_allocate(const char *name, int numa_node, size_t private_data_size)
314 {
315 	struct rte_dma_dev *dev;
316 
317 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
318 		dev = dma_allocate_primary(name, numa_node, private_data_size);
319 	else
320 		dev = dma_attach_secondary(name);
321 
322 	if (dev) {
323 		dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id];
324 		dma_fp_object_dummy(dev->fp_obj);
325 	}
326 
327 	return dev;
328 }
329 
330 static void
331 dma_release(struct rte_dma_dev *dev)
332 {
333 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
334 		rte_free(dev->data->dev_private);
335 		memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
336 	}
337 
338 	dma_fp_object_dummy(dev->fp_obj);
339 	memset(dev, 0, sizeof(struct rte_dma_dev));
340 }
341 
342 struct rte_dma_dev *
343 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
344 {
345 	struct rte_dma_dev *dev;
346 
347 	if (dma_check_name(name) != 0 || private_data_size == 0)
348 		return NULL;
349 
350 	dev = dma_allocate(name, numa_node, private_data_size);
351 	if (dev == NULL)
352 		return NULL;
353 
354 	dev->state = RTE_DMA_DEV_REGISTERED;
355 
356 	return dev;
357 }
358 
359 int
360 rte_dma_pmd_release(const char *name)
361 {
362 	struct rte_dma_dev *dev;
363 
364 	if (dma_check_name(name) != 0)
365 		return -EINVAL;
366 
367 	dev = dma_find_by_name(name);
368 	if (dev == NULL)
369 		return -EINVAL;
370 
371 	if (dev->state == RTE_DMA_DEV_READY)
372 		return rte_dma_close(dev->data->dev_id);
373 
374 	dma_release(dev);
375 	return 0;
376 }
377 
378 int
379 rte_dma_get_dev_id_by_name(const char *name)
380 {
381 	struct rte_dma_dev *dev;
382 
383 	if (dma_check_name(name) != 0)
384 		return -EINVAL;
385 
386 	dev = dma_find_by_name(name);
387 	if (dev == NULL)
388 		return -EINVAL;
389 
390 	return dev->data->dev_id;
391 }
392 
393 bool
394 rte_dma_is_valid(int16_t dev_id)
395 {
396 	return (dev_id >= 0) && (dev_id < dma_devices_max) &&
397 		rte_dma_devices != NULL &&
398 		rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
399 }
400 
401 struct rte_dma_dev *
402 rte_dma_pmd_get_dev_by_id(int16_t dev_id)
403 {
404 	if (!rte_dma_is_valid(dev_id))
405 		return NULL;
406 
407 	return &rte_dma_devices[dev_id];
408 }
409 
410 uint16_t
411 rte_dma_count_avail(void)
412 {
413 	uint16_t count = 0;
414 	uint16_t i;
415 
416 	if (rte_dma_devices == NULL)
417 		return count;
418 
419 	for (i = 0; i < dma_devices_max; i++) {
420 		if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
421 			count++;
422 	}
423 
424 	return count;
425 }
426 
427 int
428 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
429 {
430 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
431 	int ret;
432 
433 	if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
434 		return -EINVAL;
435 
436 	if (*dev->dev_ops->dev_info_get == NULL)
437 		return -ENOTSUP;
438 	memset(dev_info, 0, sizeof(struct rte_dma_info));
439 	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
440 					    sizeof(struct rte_dma_info));
441 	if (ret != 0)
442 		return ret;
443 
444 	dev_info->dev_name = dev->data->dev_name;
445 	dev_info->numa_node = dev->device->numa_node;
446 	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
447 
448 	rte_dma_trace_info_get(dev_id, dev_info);
449 
450 	return 0;
451 }
452 
453 int
454 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
455 {
456 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
457 	struct rte_dma_info dev_info;
458 	int ret;
459 
460 	if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
461 		return -EINVAL;
462 
463 	if (dev->data->dev_started != 0) {
464 		RTE_DMA_LOG(ERR,
465 			"Device %d must be stopped to allow configuration",
466 			dev_id);
467 		return -EBUSY;
468 	}
469 
470 	ret = rte_dma_info_get(dev_id, &dev_info);
471 	if (ret != 0) {
472 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
473 		return -EINVAL;
474 	}
475 	if (dev_conf->nb_vchans == 0) {
476 		RTE_DMA_LOG(ERR,
477 			"Device %d configure zero vchans", dev_id);
478 		return -EINVAL;
479 	}
480 	if (dev_conf->nb_vchans > dev_info.max_vchans) {
481 		RTE_DMA_LOG(ERR,
482 			"Device %d configure too many vchans", dev_id);
483 		return -EINVAL;
484 	}
485 	if (dev_conf->enable_silent &&
486 	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
487 		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
488 		return -EINVAL;
489 	}
490 
491 	if (*dev->dev_ops->dev_configure == NULL)
492 		return -ENOTSUP;
493 	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
494 					     sizeof(struct rte_dma_conf));
495 	if (ret == 0)
496 		memcpy(&dev->data->dev_conf, dev_conf,
497 		       sizeof(struct rte_dma_conf));
498 
499 	rte_dma_trace_configure(dev_id, dev_conf, ret);
500 
501 	return ret;
502 }
503 
504 int
505 rte_dma_start(int16_t dev_id)
506 {
507 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
508 	int ret;
509 
510 	if (!rte_dma_is_valid(dev_id))
511 		return -EINVAL;
512 
513 	if (dev->data->dev_conf.nb_vchans == 0) {
514 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
515 		return -EINVAL;
516 	}
517 
518 	if (dev->data->dev_started != 0) {
519 		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
520 		return 0;
521 	}
522 
523 	if (dev->dev_ops->dev_start == NULL)
524 		goto mark_started;
525 
526 	ret = (*dev->dev_ops->dev_start)(dev);
527 	rte_dma_trace_start(dev_id, ret);
528 	if (ret != 0)
529 		return ret;
530 
531 mark_started:
532 	dev->data->dev_started = 1;
533 	return 0;
534 }
535 
536 int
537 rte_dma_stop(int16_t dev_id)
538 {
539 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
540 	int ret;
541 
542 	if (!rte_dma_is_valid(dev_id))
543 		return -EINVAL;
544 
545 	if (dev->data->dev_started == 0) {
546 		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
547 		return 0;
548 	}
549 
550 	if (dev->dev_ops->dev_stop == NULL)
551 		goto mark_stopped;
552 
553 	ret = (*dev->dev_ops->dev_stop)(dev);
554 	rte_dma_trace_stop(dev_id, ret);
555 	if (ret != 0)
556 		return ret;
557 
558 mark_stopped:
559 	dev->data->dev_started = 0;
560 	return 0;
561 }
562 
563 int
564 rte_dma_close(int16_t dev_id)
565 {
566 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
567 	int ret;
568 
569 	if (!rte_dma_is_valid(dev_id))
570 		return -EINVAL;
571 
572 	/* Device must be stopped before it can be closed */
573 	if (dev->data->dev_started == 1) {
574 		RTE_DMA_LOG(ERR,
575 			"Device %d must be stopped before closing", dev_id);
576 		return -EBUSY;
577 	}
578 
579 	if (*dev->dev_ops->dev_close == NULL)
580 		return -ENOTSUP;
581 	ret = (*dev->dev_ops->dev_close)(dev);
582 	if (ret == 0)
583 		dma_release(dev);
584 
585 	rte_dma_trace_close(dev_id, ret);
586 
587 	return ret;
588 }
589 
590 int
591 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
592 		    const struct rte_dma_vchan_conf *conf)
593 {
594 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
595 	struct rte_dma_info dev_info;
596 	bool src_is_dev, dst_is_dev;
597 	int ret;
598 
599 	if (!rte_dma_is_valid(dev_id) || conf == NULL)
600 		return -EINVAL;
601 
602 	if (dev->data->dev_started != 0) {
603 		RTE_DMA_LOG(ERR,
604 			"Device %d must be stopped to allow configuration",
605 			dev_id);
606 		return -EBUSY;
607 	}
608 
609 	ret = rte_dma_info_get(dev_id, &dev_info);
610 	if (ret != 0) {
611 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
612 		return -EINVAL;
613 	}
614 	if (dev->data->dev_conf.nb_vchans == 0) {
615 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
616 		return -EINVAL;
617 	}
618 	if (vchan >= dev_info.nb_vchans) {
619 		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
620 		return -EINVAL;
621 	}
622 	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
623 	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
624 	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
625 	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
626 		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
627 		return -EINVAL;
628 	}
629 	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
630 	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
631 		RTE_DMA_LOG(ERR,
632 			"Device %d don't support mem2mem transfer", dev_id);
633 		return -EINVAL;
634 	}
635 	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
636 	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
637 		RTE_DMA_LOG(ERR,
638 			"Device %d don't support mem2dev transfer", dev_id);
639 		return -EINVAL;
640 	}
641 	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
642 	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
643 		RTE_DMA_LOG(ERR,
644 			"Device %d don't support dev2mem transfer", dev_id);
645 		return -EINVAL;
646 	}
647 	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
648 	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
649 		RTE_DMA_LOG(ERR,
650 			"Device %d don't support dev2dev transfer", dev_id);
651 		return -EINVAL;
652 	}
653 	if (conf->nb_desc < dev_info.min_desc ||
654 	    conf->nb_desc > dev_info.max_desc) {
655 		RTE_DMA_LOG(ERR,
656 			"Device %d number of descriptors invalid", dev_id);
657 		return -EINVAL;
658 	}
659 	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
660 		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
661 	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
662 	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
663 		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
664 		return -EINVAL;
665 	}
666 	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
667 		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
668 	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
669 	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
670 		RTE_DMA_LOG(ERR,
671 			"Device %d destination port type invalid", dev_id);
672 		return -EINVAL;
673 	}
674 
675 	if (*dev->dev_ops->vchan_setup == NULL)
676 		return -ENOTSUP;
677 	ret = (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
678 					sizeof(struct rte_dma_vchan_conf));
679 	rte_dma_trace_vchan_setup(dev_id, vchan, conf, ret);
680 
681 	return ret;
682 }
683 
684 int
685 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
686 {
687 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
688 
689 	if (!rte_dma_is_valid(dev_id) || stats == NULL)
690 		return -EINVAL;
691 
692 	if (vchan >= dev->data->dev_conf.nb_vchans &&
693 	    vchan != RTE_DMA_ALL_VCHAN) {
694 		RTE_DMA_LOG(ERR,
695 			"Device %d vchan %u out of range", dev_id, vchan);
696 		return -EINVAL;
697 	}
698 
699 	if (*dev->dev_ops->stats_get == NULL)
700 		return -ENOTSUP;
701 	memset(stats, 0, sizeof(struct rte_dma_stats));
702 	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
703 					  sizeof(struct rte_dma_stats));
704 }
705 
706 int
707 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
708 {
709 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
710 	int ret;
711 
712 	if (!rte_dma_is_valid(dev_id))
713 		return -EINVAL;
714 
715 	if (vchan >= dev->data->dev_conf.nb_vchans &&
716 	    vchan != RTE_DMA_ALL_VCHAN) {
717 		RTE_DMA_LOG(ERR,
718 			"Device %d vchan %u out of range", dev_id, vchan);
719 		return -EINVAL;
720 	}
721 
722 	if (*dev->dev_ops->stats_reset == NULL)
723 		return -ENOTSUP;
724 	ret = (*dev->dev_ops->stats_reset)(dev, vchan);
725 	rte_dma_trace_stats_reset(dev_id, vchan, ret);
726 
727 	return ret;
728 }
729 
730 int
731 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status)
732 {
733 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
734 
735 	if (!rte_dma_is_valid(dev_id))
736 		return -EINVAL;
737 
738 	if (vchan >= dev->data->dev_conf.nb_vchans) {
739 		RTE_DMA_LOG(ERR, "Device %u vchan %u out of range", dev_id, vchan);
740 		return -EINVAL;
741 	}
742 
743 	if (*dev->dev_ops->vchan_status == NULL)
744 		return -ENOTSUP;
745 	return (*dev->dev_ops->vchan_status)(dev, vchan, status);
746 }
747 
748 static const char *
749 dma_capability_name(uint64_t capability)
750 {
751 	static const struct {
752 		uint64_t capability;
753 		const char *name;
754 	} capa_names[] = {
755 		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
756 		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
757 		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
758 		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
759 		{ RTE_DMA_CAPA_SVA,         "sva"     },
760 		{ RTE_DMA_CAPA_SILENT,      "silent"  },
761 		{ RTE_DMA_CAPA_HANDLES_ERRORS, "handles_errors" },
762 		{ RTE_DMA_CAPA_M2D_AUTO_FREE,  "m2d_auto_free"  },
763 		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
764 		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
765 		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
766 	};
767 
768 	const char *name = "unknown";
769 	uint32_t i;
770 
771 	for (i = 0; i < RTE_DIM(capa_names); i++) {
772 		if (capability == capa_names[i].capability) {
773 			name = capa_names[i].name;
774 			break;
775 		}
776 	}
777 
778 	return name;
779 }
780 
781 static void
782 dma_dump_capability(FILE *f, uint64_t dev_capa)
783 {
784 	uint64_t capa;
785 
786 	(void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
787 	while (dev_capa > 0) {
788 		capa = 1ull << rte_ctz64(dev_capa);
789 		(void)fprintf(f, " %s", dma_capability_name(capa));
790 		dev_capa &= ~capa;
791 	}
792 	(void)fprintf(f, "\n");
793 }
794 
795 int
796 rte_dma_dump(int16_t dev_id, FILE *f)
797 {
798 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
799 	struct rte_dma_info dev_info;
800 	int ret;
801 
802 	if (!rte_dma_is_valid(dev_id) || f == NULL)
803 		return -EINVAL;
804 
805 	ret = rte_dma_info_get(dev_id, &dev_info);
806 	if (ret != 0) {
807 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
808 		return -EINVAL;
809 	}
810 
811 	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
812 		dev->data->dev_id,
813 		dev->data->dev_name,
814 		dev->data->dev_started ? "started" : "stopped");
815 	dma_dump_capability(f, dev_info.dev_capa);
816 	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
817 	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
818 	(void)fprintf(f, "  silent_mode: %s\n",
819 		dev->data->dev_conf.enable_silent ? "on" : "off");
820 
821 	if (dev->dev_ops->dev_dump != NULL)
822 		ret = (*dev->dev_ops->dev_dump)(dev, f);
823 	rte_dma_trace_dump(dev_id, f, ret);
824 
825 	return ret;
826 }
827 
828 static int
829 dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
830 	   __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
831 	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
832 {
833 	RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
834 	return -EINVAL;
835 }
836 
837 static int
838 dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
839 	      __rte_unused const struct rte_dma_sge *src,
840 	      __rte_unused const struct rte_dma_sge *dst,
841 	      __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
842 	      __rte_unused uint64_t flags)
843 {
844 	RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
845 	return -EINVAL;
846 }
847 
848 static int
849 dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
850 	   __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
851 	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
852 {
853 	RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
854 	return -EINVAL;
855 }
856 
857 static int
858 dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
859 {
860 	RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
861 	return -EINVAL;
862 }
863 
864 static uint16_t
865 dummy_completed(__rte_unused void *dev_private,	__rte_unused uint16_t vchan,
866 		__rte_unused const uint16_t nb_cpls,
867 		__rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
868 {
869 	RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
870 	return 0;
871 }
872 
873 static uint16_t
874 dummy_completed_status(__rte_unused void *dev_private,
875 		       __rte_unused uint16_t vchan,
876 		       __rte_unused const uint16_t nb_cpls,
877 		       __rte_unused uint16_t *last_idx,
878 		       __rte_unused enum rte_dma_status_code *status)
879 {
880 	RTE_DMA_LOG(ERR,
881 		    "completed_status is not configured or not supported.");
882 	return 0;
883 }
884 
885 static uint16_t
886 dummy_burst_capacity(__rte_unused const void *dev_private,
887 		     __rte_unused uint16_t vchan)
888 {
889 	RTE_DMA_LOG(ERR, "burst_capacity is not configured or not supported.");
890 	return 0;
891 }
892 
893 static void
894 dma_fp_object_dummy(struct rte_dma_fp_object *obj)
895 {
896 	obj->dev_private      = NULL;
897 	obj->copy             = dummy_copy;
898 	obj->copy_sg          = dummy_copy_sg;
899 	obj->fill             = dummy_fill;
900 	obj->submit           = dummy_submit;
901 	obj->completed        = dummy_completed;
902 	obj->completed_status = dummy_completed_status;
903 	obj->burst_capacity   = dummy_burst_capacity;
904 }
905 
906 static int
907 dmadev_handle_dev_list(const char *cmd __rte_unused,
908 		const char *params __rte_unused,
909 		struct rte_tel_data *d)
910 {
911 	int dev_id;
912 
913 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
914 	for (dev_id = 0; dev_id < dma_devices_max; dev_id++)
915 		if (rte_dma_is_valid(dev_id))
916 			rte_tel_data_add_array_int(d, dev_id);
917 
918 	return 0;
919 }
920 
921 #define ADD_CAPA(td, dc, c) rte_tel_data_add_dict_int(td, dma_capability_name(c), !!(dc & c))
922 
923 static int
924 dmadev_handle_dev_info(const char *cmd __rte_unused,
925 		const char *params, struct rte_tel_data *d)
926 {
927 	struct rte_dma_info dma_info;
928 	struct rte_tel_data *dma_caps;
929 	int dev_id, ret;
930 	uint64_t dev_capa;
931 	char *end_param;
932 
933 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
934 		return -EINVAL;
935 
936 	dev_id = strtoul(params, &end_param, 0);
937 	if (*end_param != '\0')
938 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
939 
940 	/* Function info_get validates dev_id so we don't need to. */
941 	ret = rte_dma_info_get(dev_id, &dma_info);
942 	if (ret < 0)
943 		return -EINVAL;
944 	dev_capa = dma_info.dev_capa;
945 
946 	rte_tel_data_start_dict(d);
947 	rte_tel_data_add_dict_string(d, "name", dma_info.dev_name);
948 	rte_tel_data_add_dict_int(d, "nb_vchans", dma_info.nb_vchans);
949 	rte_tel_data_add_dict_int(d, "numa_node", dma_info.numa_node);
950 	rte_tel_data_add_dict_int(d, "max_vchans", dma_info.max_vchans);
951 	rte_tel_data_add_dict_int(d, "max_desc", dma_info.max_desc);
952 	rte_tel_data_add_dict_int(d, "min_desc", dma_info.min_desc);
953 	rte_tel_data_add_dict_int(d, "max_sges", dma_info.max_sges);
954 
955 	dma_caps = rte_tel_data_alloc();
956 	if (!dma_caps)
957 		return -ENOMEM;
958 
959 	rte_tel_data_start_dict(dma_caps);
960 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_MEM);
961 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_DEV);
962 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_MEM);
963 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_DEV);
964 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SVA);
965 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SILENT);
966 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_HANDLES_ERRORS);
967 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_M2D_AUTO_FREE);
968 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY);
969 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY_SG);
970 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_FILL);
971 	rte_tel_data_add_dict_container(d, "capabilities", dma_caps, 0);
972 
973 	return 0;
974 }
975 
976 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, dma_stats.s)
977 
978 static int
979 dmadev_handle_dev_stats(const char *cmd __rte_unused,
980 		const char *params,
981 		struct rte_tel_data *d)
982 {
983 	struct rte_dma_info dma_info;
984 	struct rte_dma_stats dma_stats;
985 	int dev_id, ret, vchan_id;
986 	char *end_param;
987 	const char *vchan_param;
988 
989 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
990 		return -EINVAL;
991 
992 	dev_id = strtoul(params, &end_param, 0);
993 
994 	/* Function info_get validates dev_id so we don't need to. */
995 	ret = rte_dma_info_get(dev_id, &dma_info);
996 	if (ret < 0)
997 		return -EINVAL;
998 
999 	/* If the device has one vchan the user does not need to supply the
1000 	 * vchan id and only the device id is needed, no extra parameters.
1001 	 */
1002 	if (dma_info.nb_vchans == 1 && *end_param == '\0')
1003 		vchan_id = 0;
1004 	else {
1005 		vchan_param = strtok(end_param, ",");
1006 		if (!vchan_param || strlen(vchan_param) == 0 || !isdigit(*vchan_param))
1007 			return -EINVAL;
1008 
1009 		vchan_id = strtoul(vchan_param, &end_param, 0);
1010 	}
1011 	if (*end_param != '\0')
1012 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
1013 
1014 	ret = rte_dma_stats_get(dev_id, vchan_id, &dma_stats);
1015 	if (ret < 0)
1016 		return -EINVAL;
1017 
1018 	rte_tel_data_start_dict(d);
1019 	ADD_DICT_STAT(submitted);
1020 	ADD_DICT_STAT(completed);
1021 	ADD_DICT_STAT(errors);
1022 
1023 	return 0;
1024 }
1025 
1026 #ifndef RTE_EXEC_ENV_WINDOWS
1027 static int
1028 dmadev_handle_dev_dump(const char *cmd __rte_unused,
1029 		const char *params,
1030 		struct rte_tel_data *d)
1031 {
1032 	char *buf, *end_param;
1033 	int dev_id, ret;
1034 	FILE *f;
1035 
1036 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1037 		return -EINVAL;
1038 
1039 	dev_id = strtoul(params, &end_param, 0);
1040 	if (*end_param != '\0')
1041 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
1042 
1043 	buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char));
1044 	if (buf == NULL)
1045 		return -ENOMEM;
1046 
1047 	f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+");
1048 	if (f == NULL) {
1049 		free(buf);
1050 		return -EINVAL;
1051 	}
1052 
1053 	ret = rte_dma_dump(dev_id, f);
1054 	fclose(f);
1055 	if (ret == 0) {
1056 		rte_tel_data_start_dict(d);
1057 		rte_tel_data_string(d, buf);
1058 	}
1059 
1060 	free(buf);
1061 	return ret;
1062 }
1063 #endif /* !RTE_EXEC_ENV_WINDOWS */
1064 
1065 RTE_INIT(dmadev_init_telemetry)
1066 {
1067 	rte_telemetry_register_cmd("/dmadev/list", dmadev_handle_dev_list,
1068 			"Returns list of available dmadev devices by IDs. No parameters.");
1069 	rte_telemetry_register_cmd("/dmadev/info", dmadev_handle_dev_info,
1070 			"Returns information for a dmadev. Parameters: int dev_id");
1071 	rte_telemetry_register_cmd("/dmadev/stats", dmadev_handle_dev_stats,
1072 			"Returns the stats for a dmadev vchannel. Parameters: int dev_id, vchan_id (Optional if only one vchannel)");
1073 #ifndef RTE_EXEC_ENV_WINDOWS
1074 	rte_telemetry_register_cmd("/dmadev/dump", dmadev_handle_dev_dump,
1075 			"Returns dump information for a dmadev. Parameters: int dev_id");
1076 #endif
1077 }
1078