xref: /dpdk/lib/dmadev/rte_dmadev.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  * Copyright(c) 2021 Intel Corporation
4  */
5 
6 #include <ctype.h>
7 #include <inttypes.h>
8 #include <stdlib.h>
9 
10 #include <rte_eal.h>
11 #include <rte_lcore.h>
12 #include <rte_log.h>
13 #include <rte_malloc.h>
14 #include <rte_memzone.h>
15 #include <rte_string_fns.h>
16 #include <rte_telemetry.h>
17 
18 #include "rte_dmadev.h"
19 #include "rte_dmadev_pmd.h"
20 #include "rte_dmadev_trace.h"
21 
22 static int16_t dma_devices_max;
23 
24 struct rte_dma_fp_object *rte_dma_fp_objs;
25 static struct rte_dma_dev *rte_dma_devices;
26 static struct {
27 	/* Hold the dev_max information of the primary process. This field is
28 	 * set by the primary process and is read by the secondary process.
29 	 */
30 	int16_t dev_max;
31 	struct rte_dma_dev_data data[0];
32 } *dma_devices_shared_data;
33 
34 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
35 #define RTE_LOGTYPE_DMADEV rte_dma_logtype
36 
37 #define RTE_DMA_LOG(level, ...) \
38 	RTE_LOG_LINE(level, DMADEV, "" __VA_ARGS__)
39 
40 int
41 rte_dma_dev_max(size_t dev_max)
42 {
43 	/* This function may be called before rte_eal_init(), so no rte library
44 	 * function can be called in this function.
45 	 */
46 	if (dev_max == 0 || dev_max > INT16_MAX)
47 		return -EINVAL;
48 
49 	if (dma_devices_max > 0)
50 		return -EINVAL;
51 
52 	dma_devices_max = dev_max;
53 
54 	return 0;
55 }
56 
57 int16_t
58 rte_dma_next_dev(int16_t start_dev_id)
59 {
60 	int16_t dev_id = start_dev_id;
61 	while (dev_id < dma_devices_max && rte_dma_devices[dev_id].state == RTE_DMA_DEV_UNUSED)
62 		dev_id++;
63 
64 	if (dev_id < dma_devices_max)
65 		return dev_id;
66 
67 	return -1;
68 }
69 
70 static int
71 dma_check_name(const char *name)
72 {
73 	size_t name_len;
74 
75 	if (name == NULL) {
76 		RTE_DMA_LOG(ERR, "Name can't be NULL");
77 		return -EINVAL;
78 	}
79 
80 	name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
81 	if (name_len == 0) {
82 		RTE_DMA_LOG(ERR, "Zero length DMA device name");
83 		return -EINVAL;
84 	}
85 	if (name_len >= RTE_DEV_NAME_MAX_LEN) {
86 		RTE_DMA_LOG(ERR, "DMA device name is too long");
87 		return -EINVAL;
88 	}
89 
90 	return 0;
91 }
92 
93 static int16_t
94 dma_find_free_id(void)
95 {
96 	int16_t i;
97 
98 	if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
99 		return -1;
100 
101 	for (i = 0; i < dma_devices_max; i++) {
102 		if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
103 			return i;
104 	}
105 
106 	return -1;
107 }
108 
109 static struct rte_dma_dev*
110 dma_find_by_name(const char *name)
111 {
112 	int16_t i;
113 
114 	if (rte_dma_devices == NULL)
115 		return NULL;
116 
117 	for (i = 0; i < dma_devices_max; i++) {
118 		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
119 		    (!strcmp(name, rte_dma_devices[i].data->dev_name)))
120 			return &rte_dma_devices[i];
121 	}
122 
123 	return NULL;
124 }
125 
126 static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
127 
128 static int
129 dma_fp_data_prepare(void)
130 {
131 	size_t size;
132 	void *ptr;
133 	int i;
134 
135 	if (rte_dma_fp_objs != NULL)
136 		return 0;
137 
138 	/* Fast-path object must align cacheline, but the return value of malloc
139 	 * may not be aligned to the cache line. Therefore, extra memory is
140 	 * applied for realignment.
141 	 * note: We do not call posix_memalign/aligned_alloc because it is
142 	 * version dependent on libc.
143 	 */
144 	size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
145 		RTE_CACHE_LINE_SIZE;
146 	ptr = malloc(size);
147 	if (ptr == NULL)
148 		return -ENOMEM;
149 	memset(ptr, 0, size);
150 
151 	rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
152 	for (i = 0; i < dma_devices_max; i++)
153 		dma_fp_object_dummy(&rte_dma_fp_objs[i]);
154 
155 	return 0;
156 }
157 
158 static int
159 dma_dev_data_prepare(void)
160 {
161 	size_t size;
162 	void *ptr;
163 
164 	if (rte_dma_devices != NULL)
165 		return 0;
166 
167 	/* The DMA device object is expected to align cacheline,
168 	 * but the return value of malloc may not be aligned to the cache line.
169 	 * Therefore, extra memory is applied for realignment.
170 	 * Note: posix_memalign/aligned_alloc are not used
171 	 * because not always available, depending on libc.
172 	 */
173 	size = dma_devices_max * sizeof(struct rte_dma_dev) + RTE_CACHE_LINE_SIZE;
174 	ptr = malloc(size);
175 	if (ptr == NULL)
176 		return -ENOMEM;
177 	memset(ptr, 0, size);
178 
179 	rte_dma_devices = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
180 
181 	return 0;
182 }
183 
184 static int
185 dma_shared_data_prepare(void)
186 {
187 	const char *mz_name = "rte_dma_dev_data";
188 	const struct rte_memzone *mz;
189 	size_t size;
190 
191 	if (dma_devices_shared_data != NULL)
192 		return 0;
193 
194 	size = sizeof(*dma_devices_shared_data) +
195 		sizeof(struct rte_dma_dev_data) * dma_devices_max;
196 
197 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
198 		mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
199 	else
200 		mz = rte_memzone_lookup(mz_name);
201 	if (mz == NULL)
202 		return -ENOMEM;
203 
204 	dma_devices_shared_data = mz->addr;
205 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
206 		memset(dma_devices_shared_data, 0, size);
207 		dma_devices_shared_data->dev_max = dma_devices_max;
208 	} else {
209 		dma_devices_max = dma_devices_shared_data->dev_max;
210 	}
211 
212 	return 0;
213 }
214 
215 static int
216 dma_data_prepare(void)
217 {
218 	int ret;
219 
220 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
221 		if (dma_devices_max == 0)
222 			dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
223 		ret = dma_fp_data_prepare();
224 		if (ret)
225 			return ret;
226 		ret = dma_dev_data_prepare();
227 		if (ret)
228 			return ret;
229 		ret = dma_shared_data_prepare();
230 		if (ret)
231 			return ret;
232 	} else {
233 		ret = dma_shared_data_prepare();
234 		if (ret)
235 			return ret;
236 		ret = dma_fp_data_prepare();
237 		if (ret)
238 			return ret;
239 		ret = dma_dev_data_prepare();
240 		if (ret)
241 			return ret;
242 	}
243 
244 	return 0;
245 }
246 
247 static struct rte_dma_dev *
248 dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
249 {
250 	struct rte_dma_dev *dev;
251 	void *dev_private;
252 	int16_t dev_id;
253 	int ret;
254 
255 	ret = dma_data_prepare();
256 	if (ret < 0) {
257 		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
258 		return NULL;
259 	}
260 
261 	dev = dma_find_by_name(name);
262 	if (dev != NULL) {
263 		RTE_DMA_LOG(ERR, "DMA device already allocated");
264 		return NULL;
265 	}
266 
267 	dev_private = rte_zmalloc_socket(name, private_data_size,
268 					 RTE_CACHE_LINE_SIZE, numa_node);
269 	if (dev_private == NULL) {
270 		RTE_DMA_LOG(ERR, "Cannot allocate private data");
271 		return NULL;
272 	}
273 
274 	dev_id = dma_find_free_id();
275 	if (dev_id < 0) {
276 		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
277 		rte_free(dev_private);
278 		return NULL;
279 	}
280 
281 	dev = &rte_dma_devices[dev_id];
282 	dev->data = &dma_devices_shared_data->data[dev_id];
283 	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
284 	dev->data->dev_id = dev_id;
285 	dev->data->numa_node = numa_node;
286 	dev->data->dev_private = dev_private;
287 
288 	return dev;
289 }
290 
291 static struct rte_dma_dev *
292 dma_attach_secondary(const char *name)
293 {
294 	struct rte_dma_dev *dev;
295 	int16_t i;
296 	int ret;
297 
298 	ret = dma_data_prepare();
299 	if (ret < 0) {
300 		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
301 		return NULL;
302 	}
303 
304 	for (i = 0; i < dma_devices_max; i++) {
305 		if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
306 			break;
307 	}
308 	if (i == dma_devices_max) {
309 		RTE_DMA_LOG(ERR,
310 			"Device %s is not driven by the primary process",
311 			name);
312 		return NULL;
313 	}
314 
315 	dev = &rte_dma_devices[i];
316 	dev->data = &dma_devices_shared_data->data[i];
317 
318 	return dev;
319 }
320 
321 static struct rte_dma_dev *
322 dma_allocate(const char *name, int numa_node, size_t private_data_size)
323 {
324 	struct rte_dma_dev *dev;
325 
326 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
327 		dev = dma_allocate_primary(name, numa_node, private_data_size);
328 	else
329 		dev = dma_attach_secondary(name);
330 
331 	if (dev) {
332 		dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id];
333 		dma_fp_object_dummy(dev->fp_obj);
334 	}
335 
336 	return dev;
337 }
338 
339 static void
340 dma_release(struct rte_dma_dev *dev)
341 {
342 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
343 		rte_free(dev->data->dev_private);
344 		memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
345 	}
346 
347 	dma_fp_object_dummy(dev->fp_obj);
348 	memset(dev, 0, sizeof(struct rte_dma_dev));
349 }
350 
351 struct rte_dma_dev *
352 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
353 {
354 	struct rte_dma_dev *dev;
355 
356 	if (dma_check_name(name) != 0 || private_data_size == 0)
357 		return NULL;
358 
359 	dev = dma_allocate(name, numa_node, private_data_size);
360 	if (dev == NULL)
361 		return NULL;
362 
363 	dev->state = RTE_DMA_DEV_REGISTERED;
364 
365 	return dev;
366 }
367 
368 int
369 rte_dma_pmd_release(const char *name)
370 {
371 	struct rte_dma_dev *dev;
372 
373 	if (dma_check_name(name) != 0)
374 		return -EINVAL;
375 
376 	dev = dma_find_by_name(name);
377 	if (dev == NULL)
378 		return -EINVAL;
379 
380 	if (dev->state == RTE_DMA_DEV_READY)
381 		return rte_dma_close(dev->data->dev_id);
382 
383 	dma_release(dev);
384 	return 0;
385 }
386 
387 int
388 rte_dma_get_dev_id_by_name(const char *name)
389 {
390 	struct rte_dma_dev *dev;
391 
392 	if (dma_check_name(name) != 0)
393 		return -EINVAL;
394 
395 	dev = dma_find_by_name(name);
396 	if (dev == NULL)
397 		return -EINVAL;
398 
399 	return dev->data->dev_id;
400 }
401 
402 bool
403 rte_dma_is_valid(int16_t dev_id)
404 {
405 	return (dev_id >= 0) && (dev_id < dma_devices_max) &&
406 		rte_dma_devices != NULL &&
407 		rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
408 }
409 
410 struct rte_dma_dev *
411 rte_dma_pmd_get_dev_by_id(int16_t dev_id)
412 {
413 	if (!rte_dma_is_valid(dev_id))
414 		return NULL;
415 
416 	return &rte_dma_devices[dev_id];
417 }
418 
419 uint16_t
420 rte_dma_count_avail(void)
421 {
422 	uint16_t count = 0;
423 	uint16_t i;
424 
425 	if (rte_dma_devices == NULL)
426 		return count;
427 
428 	for (i = 0; i < dma_devices_max; i++) {
429 		if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
430 			count++;
431 	}
432 
433 	return count;
434 }
435 
436 int
437 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
438 {
439 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
440 	int ret;
441 
442 	if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
443 		return -EINVAL;
444 
445 	if (*dev->dev_ops->dev_info_get == NULL)
446 		return -ENOTSUP;
447 	memset(dev_info, 0, sizeof(struct rte_dma_info));
448 	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
449 					    sizeof(struct rte_dma_info));
450 	if (ret != 0)
451 		return ret;
452 
453 	dev_info->dev_name = dev->data->dev_name;
454 	dev_info->numa_node = dev->device->numa_node;
455 	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
456 
457 	rte_dma_trace_info_get(dev_id, dev_info);
458 
459 	return 0;
460 }
461 
462 int
463 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
464 {
465 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
466 	struct rte_dma_info dev_info;
467 	int ret;
468 
469 	if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
470 		return -EINVAL;
471 
472 	if (dev->data->dev_started != 0) {
473 		RTE_DMA_LOG(ERR,
474 			"Device %d must be stopped to allow configuration",
475 			dev_id);
476 		return -EBUSY;
477 	}
478 
479 	ret = rte_dma_info_get(dev_id, &dev_info);
480 	if (ret != 0) {
481 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
482 		return -EINVAL;
483 	}
484 	if (dev_conf->nb_vchans == 0) {
485 		RTE_DMA_LOG(ERR,
486 			"Device %d configure zero vchans", dev_id);
487 		return -EINVAL;
488 	}
489 	if (dev_conf->nb_vchans > dev_info.max_vchans) {
490 		RTE_DMA_LOG(ERR,
491 			"Device %d configure too many vchans", dev_id);
492 		return -EINVAL;
493 	}
494 	if (dev_conf->enable_silent &&
495 	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
496 		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
497 		return -EINVAL;
498 	}
499 
500 	if (*dev->dev_ops->dev_configure == NULL)
501 		return -ENOTSUP;
502 	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
503 					     sizeof(struct rte_dma_conf));
504 	if (ret == 0)
505 		memcpy(&dev->data->dev_conf, dev_conf,
506 		       sizeof(struct rte_dma_conf));
507 
508 	rte_dma_trace_configure(dev_id, dev_conf, ret);
509 
510 	return ret;
511 }
512 
513 int
514 rte_dma_start(int16_t dev_id)
515 {
516 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
517 	int ret;
518 
519 	if (!rte_dma_is_valid(dev_id))
520 		return -EINVAL;
521 
522 	if (dev->data->dev_conf.nb_vchans == 0) {
523 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
524 		return -EINVAL;
525 	}
526 
527 	if (dev->data->dev_started != 0) {
528 		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
529 		return 0;
530 	}
531 
532 	if (dev->dev_ops->dev_start == NULL)
533 		goto mark_started;
534 
535 	ret = (*dev->dev_ops->dev_start)(dev);
536 	rte_dma_trace_start(dev_id, ret);
537 	if (ret != 0)
538 		return ret;
539 
540 mark_started:
541 	dev->data->dev_started = 1;
542 	return 0;
543 }
544 
545 int
546 rte_dma_stop(int16_t dev_id)
547 {
548 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
549 	int ret;
550 
551 	if (!rte_dma_is_valid(dev_id))
552 		return -EINVAL;
553 
554 	if (dev->data->dev_started == 0) {
555 		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
556 		return 0;
557 	}
558 
559 	if (dev->dev_ops->dev_stop == NULL)
560 		goto mark_stopped;
561 
562 	ret = (*dev->dev_ops->dev_stop)(dev);
563 	rte_dma_trace_stop(dev_id, ret);
564 	if (ret != 0)
565 		return ret;
566 
567 mark_stopped:
568 	dev->data->dev_started = 0;
569 	return 0;
570 }
571 
572 int
573 rte_dma_close(int16_t dev_id)
574 {
575 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
576 	int ret;
577 
578 	if (!rte_dma_is_valid(dev_id))
579 		return -EINVAL;
580 
581 	/* Device must be stopped before it can be closed */
582 	if (dev->data->dev_started == 1) {
583 		RTE_DMA_LOG(ERR,
584 			"Device %d must be stopped before closing", dev_id);
585 		return -EBUSY;
586 	}
587 
588 	if (*dev->dev_ops->dev_close == NULL)
589 		return -ENOTSUP;
590 	ret = (*dev->dev_ops->dev_close)(dev);
591 	if (ret == 0)
592 		dma_release(dev);
593 
594 	rte_dma_trace_close(dev_id, ret);
595 
596 	return ret;
597 }
598 
599 int
600 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
601 		    const struct rte_dma_vchan_conf *conf)
602 {
603 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
604 	struct rte_dma_info dev_info;
605 	bool src_is_dev, dst_is_dev;
606 	int ret;
607 
608 	if (!rte_dma_is_valid(dev_id) || conf == NULL)
609 		return -EINVAL;
610 
611 	if (dev->data->dev_started != 0) {
612 		RTE_DMA_LOG(ERR,
613 			"Device %d must be stopped to allow configuration",
614 			dev_id);
615 		return -EBUSY;
616 	}
617 
618 	ret = rte_dma_info_get(dev_id, &dev_info);
619 	if (ret != 0) {
620 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
621 		return -EINVAL;
622 	}
623 	if (dev->data->dev_conf.nb_vchans == 0) {
624 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
625 		return -EINVAL;
626 	}
627 	if (vchan >= dev_info.nb_vchans) {
628 		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
629 		return -EINVAL;
630 	}
631 	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
632 	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
633 	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
634 	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
635 		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
636 		return -EINVAL;
637 	}
638 	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
639 	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
640 		RTE_DMA_LOG(ERR,
641 			"Device %d don't support mem2mem transfer", dev_id);
642 		return -EINVAL;
643 	}
644 	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
645 	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
646 		RTE_DMA_LOG(ERR,
647 			"Device %d don't support mem2dev transfer", dev_id);
648 		return -EINVAL;
649 	}
650 	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
651 	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
652 		RTE_DMA_LOG(ERR,
653 			"Device %d don't support dev2mem transfer", dev_id);
654 		return -EINVAL;
655 	}
656 	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
657 	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
658 		RTE_DMA_LOG(ERR,
659 			"Device %d don't support dev2dev transfer", dev_id);
660 		return -EINVAL;
661 	}
662 	if (conf->nb_desc < dev_info.min_desc ||
663 	    conf->nb_desc > dev_info.max_desc) {
664 		RTE_DMA_LOG(ERR,
665 			"Device %d number of descriptors invalid", dev_id);
666 		return -EINVAL;
667 	}
668 	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
669 		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
670 	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
671 	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
672 		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
673 		return -EINVAL;
674 	}
675 	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
676 		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
677 	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
678 	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
679 		RTE_DMA_LOG(ERR,
680 			"Device %d destination port type invalid", dev_id);
681 		return -EINVAL;
682 	}
683 
684 	if (*dev->dev_ops->vchan_setup == NULL)
685 		return -ENOTSUP;
686 	ret = (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
687 					sizeof(struct rte_dma_vchan_conf));
688 	rte_dma_trace_vchan_setup(dev_id, vchan, conf, ret);
689 
690 	return ret;
691 }
692 
693 int
694 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
695 {
696 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
697 
698 	if (!rte_dma_is_valid(dev_id) || stats == NULL)
699 		return -EINVAL;
700 
701 	if (vchan >= dev->data->dev_conf.nb_vchans &&
702 	    vchan != RTE_DMA_ALL_VCHAN) {
703 		RTE_DMA_LOG(ERR,
704 			"Device %d vchan %u out of range", dev_id, vchan);
705 		return -EINVAL;
706 	}
707 
708 	if (*dev->dev_ops->stats_get == NULL)
709 		return -ENOTSUP;
710 	memset(stats, 0, sizeof(struct rte_dma_stats));
711 	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
712 					  sizeof(struct rte_dma_stats));
713 }
714 
715 int
716 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
717 {
718 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
719 	int ret;
720 
721 	if (!rte_dma_is_valid(dev_id))
722 		return -EINVAL;
723 
724 	if (vchan >= dev->data->dev_conf.nb_vchans &&
725 	    vchan != RTE_DMA_ALL_VCHAN) {
726 		RTE_DMA_LOG(ERR,
727 			"Device %d vchan %u out of range", dev_id, vchan);
728 		return -EINVAL;
729 	}
730 
731 	if (*dev->dev_ops->stats_reset == NULL)
732 		return -ENOTSUP;
733 	ret = (*dev->dev_ops->stats_reset)(dev, vchan);
734 	rte_dma_trace_stats_reset(dev_id, vchan, ret);
735 
736 	return ret;
737 }
738 
739 int
740 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status)
741 {
742 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
743 
744 	if (!rte_dma_is_valid(dev_id))
745 		return -EINVAL;
746 
747 	if (vchan >= dev->data->dev_conf.nb_vchans) {
748 		RTE_DMA_LOG(ERR, "Device %u vchan %u out of range", dev_id, vchan);
749 		return -EINVAL;
750 	}
751 
752 	if (*dev->dev_ops->vchan_status == NULL)
753 		return -ENOTSUP;
754 	return (*dev->dev_ops->vchan_status)(dev, vchan, status);
755 }
756 
757 static const char *
758 dma_capability_name(uint64_t capability)
759 {
760 	static const struct {
761 		uint64_t capability;
762 		const char *name;
763 	} capa_names[] = {
764 		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
765 		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
766 		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
767 		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
768 		{ RTE_DMA_CAPA_SVA,         "sva"     },
769 		{ RTE_DMA_CAPA_SILENT,      "silent"  },
770 		{ RTE_DMA_CAPA_HANDLES_ERRORS, "handles_errors" },
771 		{ RTE_DMA_CAPA_M2D_AUTO_FREE,  "m2d_auto_free"  },
772 		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
773 		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
774 		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
775 	};
776 
777 	const char *name = "unknown";
778 	uint32_t i;
779 
780 	for (i = 0; i < RTE_DIM(capa_names); i++) {
781 		if (capability == capa_names[i].capability) {
782 			name = capa_names[i].name;
783 			break;
784 		}
785 	}
786 
787 	return name;
788 }
789 
790 static void
791 dma_dump_capability(FILE *f, uint64_t dev_capa)
792 {
793 	uint64_t capa;
794 
795 	(void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
796 	while (dev_capa > 0) {
797 		capa = 1ull << rte_ctz64(dev_capa);
798 		(void)fprintf(f, " %s", dma_capability_name(capa));
799 		dev_capa &= ~capa;
800 	}
801 	(void)fprintf(f, "\n");
802 }
803 
804 int
805 rte_dma_dump(int16_t dev_id, FILE *f)
806 {
807 	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
808 	struct rte_dma_info dev_info;
809 	int ret;
810 
811 	if (!rte_dma_is_valid(dev_id) || f == NULL)
812 		return -EINVAL;
813 
814 	ret = rte_dma_info_get(dev_id, &dev_info);
815 	if (ret != 0) {
816 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
817 		return -EINVAL;
818 	}
819 
820 	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
821 		dev->data->dev_id,
822 		dev->data->dev_name,
823 		dev->data->dev_started ? "started" : "stopped");
824 	dma_dump_capability(f, dev_info.dev_capa);
825 	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
826 	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
827 	(void)fprintf(f, "  silent_mode: %s\n",
828 		dev->data->dev_conf.enable_silent ? "on" : "off");
829 
830 	if (dev->dev_ops->dev_dump != NULL)
831 		ret = (*dev->dev_ops->dev_dump)(dev, f);
832 	rte_dma_trace_dump(dev_id, f, ret);
833 
834 	return ret;
835 }
836 
837 static int
838 dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
839 	   __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
840 	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
841 {
842 	RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
843 	return -EINVAL;
844 }
845 
846 static int
847 dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
848 	      __rte_unused const struct rte_dma_sge *src,
849 	      __rte_unused const struct rte_dma_sge *dst,
850 	      __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
851 	      __rte_unused uint64_t flags)
852 {
853 	RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
854 	return -EINVAL;
855 }
856 
857 static int
858 dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
859 	   __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
860 	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
861 {
862 	RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
863 	return -EINVAL;
864 }
865 
866 static int
867 dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
868 {
869 	RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
870 	return -EINVAL;
871 }
872 
873 static uint16_t
874 dummy_completed(__rte_unused void *dev_private,	__rte_unused uint16_t vchan,
875 		__rte_unused const uint16_t nb_cpls,
876 		__rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
877 {
878 	RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
879 	return 0;
880 }
881 
882 static uint16_t
883 dummy_completed_status(__rte_unused void *dev_private,
884 		       __rte_unused uint16_t vchan,
885 		       __rte_unused const uint16_t nb_cpls,
886 		       __rte_unused uint16_t *last_idx,
887 		       __rte_unused enum rte_dma_status_code *status)
888 {
889 	RTE_DMA_LOG(ERR,
890 		    "completed_status is not configured or not supported.");
891 	return 0;
892 }
893 
894 static uint16_t
895 dummy_burst_capacity(__rte_unused const void *dev_private,
896 		     __rte_unused uint16_t vchan)
897 {
898 	RTE_DMA_LOG(ERR, "burst_capacity is not configured or not supported.");
899 	return 0;
900 }
901 
902 static void
903 dma_fp_object_dummy(struct rte_dma_fp_object *obj)
904 {
905 	obj->dev_private      = NULL;
906 	obj->copy             = dummy_copy;
907 	obj->copy_sg          = dummy_copy_sg;
908 	obj->fill             = dummy_fill;
909 	obj->submit           = dummy_submit;
910 	obj->completed        = dummy_completed;
911 	obj->completed_status = dummy_completed_status;
912 	obj->burst_capacity   = dummy_burst_capacity;
913 }
914 
915 static int
916 dmadev_handle_dev_list(const char *cmd __rte_unused,
917 		const char *params __rte_unused,
918 		struct rte_tel_data *d)
919 {
920 	int dev_id;
921 
922 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
923 	for (dev_id = 0; dev_id < dma_devices_max; dev_id++)
924 		if (rte_dma_is_valid(dev_id))
925 			rte_tel_data_add_array_int(d, dev_id);
926 
927 	return 0;
928 }
929 
930 #define ADD_CAPA(td, dc, c) rte_tel_data_add_dict_int(td, dma_capability_name(c), !!(dc & c))
931 
932 static int
933 dmadev_handle_dev_info(const char *cmd __rte_unused,
934 		const char *params, struct rte_tel_data *d)
935 {
936 	struct rte_dma_info dma_info;
937 	struct rte_tel_data *dma_caps;
938 	int dev_id, ret;
939 	uint64_t dev_capa;
940 	char *end_param;
941 
942 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
943 		return -EINVAL;
944 
945 	dev_id = strtoul(params, &end_param, 0);
946 	if (*end_param != '\0')
947 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
948 
949 	/* Function info_get validates dev_id so we don't need to. */
950 	ret = rte_dma_info_get(dev_id, &dma_info);
951 	if (ret < 0)
952 		return -EINVAL;
953 	dev_capa = dma_info.dev_capa;
954 
955 	rte_tel_data_start_dict(d);
956 	rte_tel_data_add_dict_string(d, "name", dma_info.dev_name);
957 	rte_tel_data_add_dict_int(d, "nb_vchans", dma_info.nb_vchans);
958 	rte_tel_data_add_dict_int(d, "numa_node", dma_info.numa_node);
959 	rte_tel_data_add_dict_int(d, "max_vchans", dma_info.max_vchans);
960 	rte_tel_data_add_dict_int(d, "max_desc", dma_info.max_desc);
961 	rte_tel_data_add_dict_int(d, "min_desc", dma_info.min_desc);
962 	rte_tel_data_add_dict_int(d, "max_sges", dma_info.max_sges);
963 
964 	dma_caps = rte_tel_data_alloc();
965 	if (!dma_caps)
966 		return -ENOMEM;
967 
968 	rte_tel_data_start_dict(dma_caps);
969 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_MEM);
970 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_DEV);
971 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_MEM);
972 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_DEV);
973 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SVA);
974 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SILENT);
975 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_HANDLES_ERRORS);
976 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_M2D_AUTO_FREE);
977 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY);
978 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY_SG);
979 	ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_FILL);
980 	rte_tel_data_add_dict_container(d, "capabilities", dma_caps, 0);
981 
982 	return 0;
983 }
984 
985 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, dma_stats.s)
986 
987 static int
988 dmadev_handle_dev_stats(const char *cmd __rte_unused,
989 		const char *params,
990 		struct rte_tel_data *d)
991 {
992 	struct rte_dma_info dma_info;
993 	struct rte_dma_stats dma_stats;
994 	int dev_id, ret, vchan_id;
995 	char *end_param;
996 	const char *vchan_param;
997 
998 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
999 		return -EINVAL;
1000 
1001 	dev_id = strtoul(params, &end_param, 0);
1002 
1003 	/* Function info_get validates dev_id so we don't need to. */
1004 	ret = rte_dma_info_get(dev_id, &dma_info);
1005 	if (ret < 0)
1006 		return -EINVAL;
1007 
1008 	/* If the device has one vchan the user does not need to supply the
1009 	 * vchan id and only the device id is needed, no extra parameters.
1010 	 */
1011 	if (dma_info.nb_vchans == 1 && *end_param == '\0')
1012 		vchan_id = 0;
1013 	else {
1014 		vchan_param = strtok(end_param, ",");
1015 		if (!vchan_param || strlen(vchan_param) == 0 || !isdigit(*vchan_param))
1016 			return -EINVAL;
1017 
1018 		vchan_id = strtoul(vchan_param, &end_param, 0);
1019 	}
1020 	if (*end_param != '\0')
1021 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
1022 
1023 	ret = rte_dma_stats_get(dev_id, vchan_id, &dma_stats);
1024 	if (ret < 0)
1025 		return -EINVAL;
1026 
1027 	rte_tel_data_start_dict(d);
1028 	ADD_DICT_STAT(submitted);
1029 	ADD_DICT_STAT(completed);
1030 	ADD_DICT_STAT(errors);
1031 
1032 	return 0;
1033 }
1034 
1035 #ifndef RTE_EXEC_ENV_WINDOWS
1036 static int
1037 dmadev_handle_dev_dump(const char *cmd __rte_unused,
1038 		const char *params,
1039 		struct rte_tel_data *d)
1040 {
1041 	char *buf, *end_param;
1042 	int dev_id, ret;
1043 	FILE *f;
1044 
1045 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1046 		return -EINVAL;
1047 
1048 	dev_id = strtoul(params, &end_param, 0);
1049 	if (*end_param != '\0')
1050 		RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring");
1051 
1052 	buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char));
1053 	if (buf == NULL)
1054 		return -ENOMEM;
1055 
1056 	f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+");
1057 	if (f == NULL) {
1058 		free(buf);
1059 		return -EINVAL;
1060 	}
1061 
1062 	ret = rte_dma_dump(dev_id, f);
1063 	fclose(f);
1064 	if (ret == 0) {
1065 		rte_tel_data_start_dict(d);
1066 		rte_tel_data_string(d, buf);
1067 	}
1068 
1069 	free(buf);
1070 	return ret;
1071 }
1072 #endif /* !RTE_EXEC_ENV_WINDOWS */
1073 
1074 RTE_INIT(dmadev_init_telemetry)
1075 {
1076 	rte_telemetry_register_cmd("/dmadev/list", dmadev_handle_dev_list,
1077 			"Returns list of available dmadev devices by IDs. No parameters.");
1078 	rte_telemetry_register_cmd("/dmadev/info", dmadev_handle_dev_info,
1079 			"Returns information for a dmadev. Parameters: int dev_id");
1080 	rte_telemetry_register_cmd("/dmadev/stats", dmadev_handle_dev_stats,
1081 			"Returns the stats for a dmadev vchannel. Parameters: int dev_id, vchan_id (Optional if only one vchannel)");
1082 #ifndef RTE_EXEC_ENV_WINDOWS
1083 	rte_telemetry_register_cmd("/dmadev/dump", dmadev_handle_dev_dump,
1084 			"Returns dump information for a dmadev. Parameters: int dev_id");
1085 #endif
1086 }
1087