1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2021 HiSilicon Limited 3 * Copyright(c) 2021 Intel Corporation 4 */ 5 6 #include <ctype.h> 7 #include <inttypes.h> 8 #include <stdlib.h> 9 10 #include <rte_eal.h> 11 #include <rte_lcore.h> 12 #include <rte_log.h> 13 #include <rte_malloc.h> 14 #include <rte_memzone.h> 15 #include <rte_string_fns.h> 16 #include <rte_telemetry.h> 17 18 #include "rte_dmadev.h" 19 #include "rte_dmadev_pmd.h" 20 #include "rte_dmadev_trace.h" 21 22 static int16_t dma_devices_max; 23 24 struct rte_dma_fp_object *rte_dma_fp_objs; 25 static struct rte_dma_dev *rte_dma_devices; 26 static struct { 27 /* Hold the dev_max information of the primary process. This field is 28 * set by the primary process and is read by the secondary process. 29 */ 30 int16_t dev_max; 31 struct rte_dma_dev_data data[0]; 32 } *dma_devices_shared_data; 33 34 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO); 35 #define RTE_LOGTYPE_DMADEV rte_dma_logtype 36 37 #define RTE_DMA_LOG(level, ...) \ 38 RTE_LOG_LINE(level, DMADEV, "" __VA_ARGS__) 39 40 int 41 rte_dma_dev_max(size_t dev_max) 42 { 43 /* This function may be called before rte_eal_init(), so no rte library 44 * function can be called in this function. 45 */ 46 if (dev_max == 0 || dev_max > INT16_MAX) 47 return -EINVAL; 48 49 if (dma_devices_max > 0) 50 return -EINVAL; 51 52 dma_devices_max = dev_max; 53 54 return 0; 55 } 56 57 int16_t 58 rte_dma_next_dev(int16_t start_dev_id) 59 { 60 int16_t dev_id = start_dev_id; 61 while (dev_id < dma_devices_max && rte_dma_devices[dev_id].state == RTE_DMA_DEV_UNUSED) 62 dev_id++; 63 64 if (dev_id < dma_devices_max) 65 return dev_id; 66 67 return -1; 68 } 69 70 static int 71 dma_check_name(const char *name) 72 { 73 size_t name_len; 74 75 if (name == NULL) { 76 RTE_DMA_LOG(ERR, "Name can't be NULL"); 77 return -EINVAL; 78 } 79 80 name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN); 81 if (name_len == 0) { 82 RTE_DMA_LOG(ERR, "Zero length DMA device name"); 83 return -EINVAL; 84 } 85 if (name_len >= RTE_DEV_NAME_MAX_LEN) { 86 RTE_DMA_LOG(ERR, "DMA device name is too long"); 87 return -EINVAL; 88 } 89 90 return 0; 91 } 92 93 static int16_t 94 dma_find_free_id(void) 95 { 96 int16_t i; 97 98 if (rte_dma_devices == NULL || dma_devices_shared_data == NULL) 99 return -1; 100 101 for (i = 0; i < dma_devices_max; i++) { 102 if (dma_devices_shared_data->data[i].dev_name[0] == '\0') 103 return i; 104 } 105 106 return -1; 107 } 108 109 static struct rte_dma_dev* 110 dma_find_by_name(const char *name) 111 { 112 int16_t i; 113 114 if (rte_dma_devices == NULL) 115 return NULL; 116 117 for (i = 0; i < dma_devices_max; i++) { 118 if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) && 119 (!strcmp(name, rte_dma_devices[i].data->dev_name))) 120 return &rte_dma_devices[i]; 121 } 122 123 return NULL; 124 } 125 126 static void dma_fp_object_dummy(struct rte_dma_fp_object *obj); 127 128 static int 129 dma_fp_data_prepare(void) 130 { 131 size_t size; 132 void *ptr; 133 int i; 134 135 if (rte_dma_fp_objs != NULL) 136 return 0; 137 138 /* Fast-path object must align cacheline, but the return value of malloc 139 * may not be aligned to the cache line. Therefore, extra memory is 140 * applied for realignment. 141 * note: We do not call posix_memalign/aligned_alloc because it is 142 * version dependent on libc. 143 */ 144 size = dma_devices_max * sizeof(struct rte_dma_fp_object) + 145 RTE_CACHE_LINE_SIZE; 146 ptr = malloc(size); 147 if (ptr == NULL) 148 return -ENOMEM; 149 memset(ptr, 0, size); 150 151 rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE); 152 for (i = 0; i < dma_devices_max; i++) 153 dma_fp_object_dummy(&rte_dma_fp_objs[i]); 154 155 return 0; 156 } 157 158 static int 159 dma_dev_data_prepare(void) 160 { 161 size_t size; 162 163 if (rte_dma_devices != NULL) 164 return 0; 165 166 size = dma_devices_max * sizeof(struct rte_dma_dev); 167 rte_dma_devices = malloc(size); 168 if (rte_dma_devices == NULL) 169 return -ENOMEM; 170 memset(rte_dma_devices, 0, size); 171 172 return 0; 173 } 174 175 static int 176 dma_shared_data_prepare(void) 177 { 178 const char *mz_name = "rte_dma_dev_data"; 179 const struct rte_memzone *mz; 180 size_t size; 181 182 if (dma_devices_shared_data != NULL) 183 return 0; 184 185 size = sizeof(*dma_devices_shared_data) + 186 sizeof(struct rte_dma_dev_data) * dma_devices_max; 187 188 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 189 mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0); 190 else 191 mz = rte_memzone_lookup(mz_name); 192 if (mz == NULL) 193 return -ENOMEM; 194 195 dma_devices_shared_data = mz->addr; 196 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 197 memset(dma_devices_shared_data, 0, size); 198 dma_devices_shared_data->dev_max = dma_devices_max; 199 } else { 200 dma_devices_max = dma_devices_shared_data->dev_max; 201 } 202 203 return 0; 204 } 205 206 static int 207 dma_data_prepare(void) 208 { 209 int ret; 210 211 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 212 if (dma_devices_max == 0) 213 dma_devices_max = RTE_DMADEV_DEFAULT_MAX; 214 ret = dma_fp_data_prepare(); 215 if (ret) 216 return ret; 217 ret = dma_dev_data_prepare(); 218 if (ret) 219 return ret; 220 ret = dma_shared_data_prepare(); 221 if (ret) 222 return ret; 223 } else { 224 ret = dma_shared_data_prepare(); 225 if (ret) 226 return ret; 227 ret = dma_fp_data_prepare(); 228 if (ret) 229 return ret; 230 ret = dma_dev_data_prepare(); 231 if (ret) 232 return ret; 233 } 234 235 return 0; 236 } 237 238 static struct rte_dma_dev * 239 dma_allocate_primary(const char *name, int numa_node, size_t private_data_size) 240 { 241 struct rte_dma_dev *dev; 242 void *dev_private; 243 int16_t dev_id; 244 int ret; 245 246 ret = dma_data_prepare(); 247 if (ret < 0) { 248 RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data"); 249 return NULL; 250 } 251 252 dev = dma_find_by_name(name); 253 if (dev != NULL) { 254 RTE_DMA_LOG(ERR, "DMA device already allocated"); 255 return NULL; 256 } 257 258 dev_private = rte_zmalloc_socket(name, private_data_size, 259 RTE_CACHE_LINE_SIZE, numa_node); 260 if (dev_private == NULL) { 261 RTE_DMA_LOG(ERR, "Cannot allocate private data"); 262 return NULL; 263 } 264 265 dev_id = dma_find_free_id(); 266 if (dev_id < 0) { 267 RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices"); 268 rte_free(dev_private); 269 return NULL; 270 } 271 272 dev = &rte_dma_devices[dev_id]; 273 dev->data = &dma_devices_shared_data->data[dev_id]; 274 rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name)); 275 dev->data->dev_id = dev_id; 276 dev->data->numa_node = numa_node; 277 dev->data->dev_private = dev_private; 278 279 return dev; 280 } 281 282 static struct rte_dma_dev * 283 dma_attach_secondary(const char *name) 284 { 285 struct rte_dma_dev *dev; 286 int16_t i; 287 int ret; 288 289 ret = dma_data_prepare(); 290 if (ret < 0) { 291 RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data"); 292 return NULL; 293 } 294 295 for (i = 0; i < dma_devices_max; i++) { 296 if (!strcmp(dma_devices_shared_data->data[i].dev_name, name)) 297 break; 298 } 299 if (i == dma_devices_max) { 300 RTE_DMA_LOG(ERR, 301 "Device %s is not driven by the primary process", 302 name); 303 return NULL; 304 } 305 306 dev = &rte_dma_devices[i]; 307 dev->data = &dma_devices_shared_data->data[i]; 308 309 return dev; 310 } 311 312 static struct rte_dma_dev * 313 dma_allocate(const char *name, int numa_node, size_t private_data_size) 314 { 315 struct rte_dma_dev *dev; 316 317 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 318 dev = dma_allocate_primary(name, numa_node, private_data_size); 319 else 320 dev = dma_attach_secondary(name); 321 322 if (dev) { 323 dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id]; 324 dma_fp_object_dummy(dev->fp_obj); 325 } 326 327 return dev; 328 } 329 330 static void 331 dma_release(struct rte_dma_dev *dev) 332 { 333 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 334 rte_free(dev->data->dev_private); 335 memset(dev->data, 0, sizeof(struct rte_dma_dev_data)); 336 } 337 338 dma_fp_object_dummy(dev->fp_obj); 339 memset(dev, 0, sizeof(struct rte_dma_dev)); 340 } 341 342 struct rte_dma_dev * 343 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size) 344 { 345 struct rte_dma_dev *dev; 346 347 if (dma_check_name(name) != 0 || private_data_size == 0) 348 return NULL; 349 350 dev = dma_allocate(name, numa_node, private_data_size); 351 if (dev == NULL) 352 return NULL; 353 354 dev->state = RTE_DMA_DEV_REGISTERED; 355 356 return dev; 357 } 358 359 int 360 rte_dma_pmd_release(const char *name) 361 { 362 struct rte_dma_dev *dev; 363 364 if (dma_check_name(name) != 0) 365 return -EINVAL; 366 367 dev = dma_find_by_name(name); 368 if (dev == NULL) 369 return -EINVAL; 370 371 if (dev->state == RTE_DMA_DEV_READY) 372 return rte_dma_close(dev->data->dev_id); 373 374 dma_release(dev); 375 return 0; 376 } 377 378 int 379 rte_dma_get_dev_id_by_name(const char *name) 380 { 381 struct rte_dma_dev *dev; 382 383 if (dma_check_name(name) != 0) 384 return -EINVAL; 385 386 dev = dma_find_by_name(name); 387 if (dev == NULL) 388 return -EINVAL; 389 390 return dev->data->dev_id; 391 } 392 393 bool 394 rte_dma_is_valid(int16_t dev_id) 395 { 396 return (dev_id >= 0) && (dev_id < dma_devices_max) && 397 rte_dma_devices != NULL && 398 rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED; 399 } 400 401 uint16_t 402 rte_dma_count_avail(void) 403 { 404 uint16_t count = 0; 405 uint16_t i; 406 407 if (rte_dma_devices == NULL) 408 return count; 409 410 for (i = 0; i < dma_devices_max; i++) { 411 if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) 412 count++; 413 } 414 415 return count; 416 } 417 418 int 419 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info) 420 { 421 const struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; 422 int ret; 423 424 if (!rte_dma_is_valid(dev_id) || dev_info == NULL) 425 return -EINVAL; 426 427 if (*dev->dev_ops->dev_info_get == NULL) 428 return -ENOTSUP; 429 memset(dev_info, 0, sizeof(struct rte_dma_info)); 430 ret = (*dev->dev_ops->dev_info_get)(dev, dev_info, 431 sizeof(struct rte_dma_info)); 432 if (ret != 0) 433 return ret; 434 435 dev_info->dev_name = dev->data->dev_name; 436 dev_info->numa_node = dev->device->numa_node; 437 dev_info->nb_vchans = dev->data->dev_conf.nb_vchans; 438 439 rte_dma_trace_info_get(dev_id, dev_info); 440 441 return 0; 442 } 443 444 int 445 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf) 446 { 447 struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; 448 struct rte_dma_info dev_info; 449 int ret; 450 451 if (!rte_dma_is_valid(dev_id) || dev_conf == NULL) 452 return -EINVAL; 453 454 if (dev->data->dev_started != 0) { 455 RTE_DMA_LOG(ERR, 456 "Device %d must be stopped to allow configuration", 457 dev_id); 458 return -EBUSY; 459 } 460 461 ret = rte_dma_info_get(dev_id, &dev_info); 462 if (ret != 0) { 463 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id); 464 return -EINVAL; 465 } 466 if (dev_conf->nb_vchans == 0) { 467 RTE_DMA_LOG(ERR, 468 "Device %d configure zero vchans", dev_id); 469 return -EINVAL; 470 } 471 if (dev_conf->nb_vchans > dev_info.max_vchans) { 472 RTE_DMA_LOG(ERR, 473 "Device %d configure too many vchans", dev_id); 474 return -EINVAL; 475 } 476 if (dev_conf->enable_silent && 477 !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) { 478 RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id); 479 return -EINVAL; 480 } 481 482 if (*dev->dev_ops->dev_configure == NULL) 483 return -ENOTSUP; 484 ret = (*dev->dev_ops->dev_configure)(dev, dev_conf, 485 sizeof(struct rte_dma_conf)); 486 if (ret == 0) 487 memcpy(&dev->data->dev_conf, dev_conf, 488 sizeof(struct rte_dma_conf)); 489 490 rte_dma_trace_configure(dev_id, dev_conf, ret); 491 492 return ret; 493 } 494 495 int 496 rte_dma_start(int16_t dev_id) 497 { 498 struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; 499 int ret; 500 501 if (!rte_dma_is_valid(dev_id)) 502 return -EINVAL; 503 504 if (dev->data->dev_conf.nb_vchans == 0) { 505 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id); 506 return -EINVAL; 507 } 508 509 if (dev->data->dev_started != 0) { 510 RTE_DMA_LOG(WARNING, "Device %d already started", dev_id); 511 return 0; 512 } 513 514 if (dev->dev_ops->dev_start == NULL) 515 goto mark_started; 516 517 ret = (*dev->dev_ops->dev_start)(dev); 518 rte_dma_trace_start(dev_id, ret); 519 if (ret != 0) 520 return ret; 521 522 mark_started: 523 dev->data->dev_started = 1; 524 return 0; 525 } 526 527 int 528 rte_dma_stop(int16_t dev_id) 529 { 530 struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; 531 int ret; 532 533 if (!rte_dma_is_valid(dev_id)) 534 return -EINVAL; 535 536 if (dev->data->dev_started == 0) { 537 RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id); 538 return 0; 539 } 540 541 if (dev->dev_ops->dev_stop == NULL) 542 goto mark_stopped; 543 544 ret = (*dev->dev_ops->dev_stop)(dev); 545 rte_dma_trace_stop(dev_id, ret); 546 if (ret != 0) 547 return ret; 548 549 mark_stopped: 550 dev->data->dev_started = 0; 551 return 0; 552 } 553 554 int 555 rte_dma_close(int16_t dev_id) 556 { 557 struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; 558 int ret; 559 560 if (!rte_dma_is_valid(dev_id)) 561 return -EINVAL; 562 563 /* Device must be stopped before it can be closed */ 564 if (dev->data->dev_started == 1) { 565 RTE_DMA_LOG(ERR, 566 "Device %d must be stopped before closing", dev_id); 567 return -EBUSY; 568 } 569 570 if (*dev->dev_ops->dev_close == NULL) 571 return -ENOTSUP; 572 ret = (*dev->dev_ops->dev_close)(dev); 573 if (ret == 0) 574 dma_release(dev); 575 576 rte_dma_trace_close(dev_id, ret); 577 578 return ret; 579 } 580 581 int 582 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan, 583 const struct rte_dma_vchan_conf *conf) 584 { 585 struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; 586 struct rte_dma_info dev_info; 587 bool src_is_dev, dst_is_dev; 588 int ret; 589 590 if (!rte_dma_is_valid(dev_id) || conf == NULL) 591 return -EINVAL; 592 593 if (dev->data->dev_started != 0) { 594 RTE_DMA_LOG(ERR, 595 "Device %d must be stopped to allow configuration", 596 dev_id); 597 return -EBUSY; 598 } 599 600 ret = rte_dma_info_get(dev_id, &dev_info); 601 if (ret != 0) { 602 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id); 603 return -EINVAL; 604 } 605 if (dev->data->dev_conf.nb_vchans == 0) { 606 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id); 607 return -EINVAL; 608 } 609 if (vchan >= dev_info.nb_vchans) { 610 RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id); 611 return -EINVAL; 612 } 613 if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM && 614 conf->direction != RTE_DMA_DIR_MEM_TO_DEV && 615 conf->direction != RTE_DMA_DIR_DEV_TO_MEM && 616 conf->direction != RTE_DMA_DIR_DEV_TO_DEV) { 617 RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id); 618 return -EINVAL; 619 } 620 if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM && 621 !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) { 622 RTE_DMA_LOG(ERR, 623 "Device %d don't support mem2mem transfer", dev_id); 624 return -EINVAL; 625 } 626 if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV && 627 !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) { 628 RTE_DMA_LOG(ERR, 629 "Device %d don't support mem2dev transfer", dev_id); 630 return -EINVAL; 631 } 632 if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM && 633 !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) { 634 RTE_DMA_LOG(ERR, 635 "Device %d don't support dev2mem transfer", dev_id); 636 return -EINVAL; 637 } 638 if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV && 639 !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) { 640 RTE_DMA_LOG(ERR, 641 "Device %d don't support dev2dev transfer", dev_id); 642 return -EINVAL; 643 } 644 if (conf->nb_desc < dev_info.min_desc || 645 conf->nb_desc > dev_info.max_desc) { 646 RTE_DMA_LOG(ERR, 647 "Device %d number of descriptors invalid", dev_id); 648 return -EINVAL; 649 } 650 src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM || 651 conf->direction == RTE_DMA_DIR_DEV_TO_DEV; 652 if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) || 653 (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) { 654 RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id); 655 return -EINVAL; 656 } 657 dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV || 658 conf->direction == RTE_DMA_DIR_DEV_TO_DEV; 659 if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) || 660 (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) { 661 RTE_DMA_LOG(ERR, 662 "Device %d destination port type invalid", dev_id); 663 return -EINVAL; 664 } 665 666 if (*dev->dev_ops->vchan_setup == NULL) 667 return -ENOTSUP; 668 ret = (*dev->dev_ops->vchan_setup)(dev, vchan, conf, 669 sizeof(struct rte_dma_vchan_conf)); 670 rte_dma_trace_vchan_setup(dev_id, vchan, conf, ret); 671 672 return ret; 673 } 674 675 int 676 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats) 677 { 678 const struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; 679 680 if (!rte_dma_is_valid(dev_id) || stats == NULL) 681 return -EINVAL; 682 683 if (vchan >= dev->data->dev_conf.nb_vchans && 684 vchan != RTE_DMA_ALL_VCHAN) { 685 RTE_DMA_LOG(ERR, 686 "Device %d vchan %u out of range", dev_id, vchan); 687 return -EINVAL; 688 } 689 690 if (*dev->dev_ops->stats_get == NULL) 691 return -ENOTSUP; 692 memset(stats, 0, sizeof(struct rte_dma_stats)); 693 return (*dev->dev_ops->stats_get)(dev, vchan, stats, 694 sizeof(struct rte_dma_stats)); 695 } 696 697 int 698 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan) 699 { 700 struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; 701 int ret; 702 703 if (!rte_dma_is_valid(dev_id)) 704 return -EINVAL; 705 706 if (vchan >= dev->data->dev_conf.nb_vchans && 707 vchan != RTE_DMA_ALL_VCHAN) { 708 RTE_DMA_LOG(ERR, 709 "Device %d vchan %u out of range", dev_id, vchan); 710 return -EINVAL; 711 } 712 713 if (*dev->dev_ops->stats_reset == NULL) 714 return -ENOTSUP; 715 ret = (*dev->dev_ops->stats_reset)(dev, vchan); 716 rte_dma_trace_stats_reset(dev_id, vchan, ret); 717 718 return ret; 719 } 720 721 int 722 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status) 723 { 724 struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; 725 726 if (!rte_dma_is_valid(dev_id)) 727 return -EINVAL; 728 729 if (vchan >= dev->data->dev_conf.nb_vchans) { 730 RTE_DMA_LOG(ERR, "Device %u vchan %u out of range", dev_id, vchan); 731 return -EINVAL; 732 } 733 734 if (*dev->dev_ops->vchan_status == NULL) 735 return -ENOTSUP; 736 return (*dev->dev_ops->vchan_status)(dev, vchan, status); 737 } 738 739 static const char * 740 dma_capability_name(uint64_t capability) 741 { 742 static const struct { 743 uint64_t capability; 744 const char *name; 745 } capa_names[] = { 746 { RTE_DMA_CAPA_MEM_TO_MEM, "mem2mem" }, 747 { RTE_DMA_CAPA_MEM_TO_DEV, "mem2dev" }, 748 { RTE_DMA_CAPA_DEV_TO_MEM, "dev2mem" }, 749 { RTE_DMA_CAPA_DEV_TO_DEV, "dev2dev" }, 750 { RTE_DMA_CAPA_SVA, "sva" }, 751 { RTE_DMA_CAPA_SILENT, "silent" }, 752 { RTE_DMA_CAPA_HANDLES_ERRORS, "handles_errors" }, 753 { RTE_DMA_CAPA_OPS_COPY, "copy" }, 754 { RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" }, 755 { RTE_DMA_CAPA_OPS_FILL, "fill" }, 756 }; 757 758 const char *name = "unknown"; 759 uint32_t i; 760 761 for (i = 0; i < RTE_DIM(capa_names); i++) { 762 if (capability == capa_names[i].capability) { 763 name = capa_names[i].name; 764 break; 765 } 766 } 767 768 return name; 769 } 770 771 static void 772 dma_dump_capability(FILE *f, uint64_t dev_capa) 773 { 774 uint64_t capa; 775 776 (void)fprintf(f, " dev_capa: 0x%" PRIx64 " -", dev_capa); 777 while (dev_capa > 0) { 778 capa = 1ull << rte_ctz64(dev_capa); 779 (void)fprintf(f, " %s", dma_capability_name(capa)); 780 dev_capa &= ~capa; 781 } 782 (void)fprintf(f, "\n"); 783 } 784 785 int 786 rte_dma_dump(int16_t dev_id, FILE *f) 787 { 788 const struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; 789 struct rte_dma_info dev_info; 790 int ret; 791 792 if (!rte_dma_is_valid(dev_id) || f == NULL) 793 return -EINVAL; 794 795 ret = rte_dma_info_get(dev_id, &dev_info); 796 if (ret != 0) { 797 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id); 798 return -EINVAL; 799 } 800 801 (void)fprintf(f, "DMA Dev %d, '%s' [%s]\n", 802 dev->data->dev_id, 803 dev->data->dev_name, 804 dev->data->dev_started ? "started" : "stopped"); 805 dma_dump_capability(f, dev_info.dev_capa); 806 (void)fprintf(f, " max_vchans_supported: %u\n", dev_info.max_vchans); 807 (void)fprintf(f, " nb_vchans_configured: %u\n", dev_info.nb_vchans); 808 (void)fprintf(f, " silent_mode: %s\n", 809 dev->data->dev_conf.enable_silent ? "on" : "off"); 810 811 if (dev->dev_ops->dev_dump != NULL) 812 ret = (*dev->dev_ops->dev_dump)(dev, f); 813 rte_dma_trace_dump(dev_id, f, ret); 814 815 return ret; 816 } 817 818 static int 819 dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan, 820 __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst, 821 __rte_unused uint32_t length, __rte_unused uint64_t flags) 822 { 823 RTE_DMA_LOG(ERR, "copy is not configured or not supported."); 824 return -EINVAL; 825 } 826 827 static int 828 dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan, 829 __rte_unused const struct rte_dma_sge *src, 830 __rte_unused const struct rte_dma_sge *dst, 831 __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst, 832 __rte_unused uint64_t flags) 833 { 834 RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported."); 835 return -EINVAL; 836 } 837 838 static int 839 dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan, 840 __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst, 841 __rte_unused uint32_t length, __rte_unused uint64_t flags) 842 { 843 RTE_DMA_LOG(ERR, "fill is not configured or not supported."); 844 return -EINVAL; 845 } 846 847 static int 848 dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan) 849 { 850 RTE_DMA_LOG(ERR, "submit is not configured or not supported."); 851 return -EINVAL; 852 } 853 854 static uint16_t 855 dummy_completed(__rte_unused void *dev_private, __rte_unused uint16_t vchan, 856 __rte_unused const uint16_t nb_cpls, 857 __rte_unused uint16_t *last_idx, __rte_unused bool *has_error) 858 { 859 RTE_DMA_LOG(ERR, "completed is not configured or not supported."); 860 return 0; 861 } 862 863 static uint16_t 864 dummy_completed_status(__rte_unused void *dev_private, 865 __rte_unused uint16_t vchan, 866 __rte_unused const uint16_t nb_cpls, 867 __rte_unused uint16_t *last_idx, 868 __rte_unused enum rte_dma_status_code *status) 869 { 870 RTE_DMA_LOG(ERR, 871 "completed_status is not configured or not supported."); 872 return 0; 873 } 874 875 static uint16_t 876 dummy_burst_capacity(__rte_unused const void *dev_private, 877 __rte_unused uint16_t vchan) 878 { 879 RTE_DMA_LOG(ERR, "burst_capacity is not configured or not supported."); 880 return 0; 881 } 882 883 static void 884 dma_fp_object_dummy(struct rte_dma_fp_object *obj) 885 { 886 obj->dev_private = NULL; 887 obj->copy = dummy_copy; 888 obj->copy_sg = dummy_copy_sg; 889 obj->fill = dummy_fill; 890 obj->submit = dummy_submit; 891 obj->completed = dummy_completed; 892 obj->completed_status = dummy_completed_status; 893 obj->burst_capacity = dummy_burst_capacity; 894 } 895 896 static int 897 dmadev_handle_dev_list(const char *cmd __rte_unused, 898 const char *params __rte_unused, 899 struct rte_tel_data *d) 900 { 901 int dev_id; 902 903 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 904 for (dev_id = 0; dev_id < dma_devices_max; dev_id++) 905 if (rte_dma_is_valid(dev_id)) 906 rte_tel_data_add_array_int(d, dev_id); 907 908 return 0; 909 } 910 911 #define ADD_CAPA(td, dc, c) rte_tel_data_add_dict_int(td, dma_capability_name(c), !!(dc & c)) 912 913 static int 914 dmadev_handle_dev_info(const char *cmd __rte_unused, 915 const char *params, struct rte_tel_data *d) 916 { 917 struct rte_dma_info dma_info; 918 struct rte_tel_data *dma_caps; 919 int dev_id, ret; 920 uint64_t dev_capa; 921 char *end_param; 922 923 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 924 return -EINVAL; 925 926 dev_id = strtoul(params, &end_param, 0); 927 if (*end_param != '\0') 928 RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring"); 929 930 /* Function info_get validates dev_id so we don't need to. */ 931 ret = rte_dma_info_get(dev_id, &dma_info); 932 if (ret < 0) 933 return -EINVAL; 934 dev_capa = dma_info.dev_capa; 935 936 rte_tel_data_start_dict(d); 937 rte_tel_data_add_dict_string(d, "name", dma_info.dev_name); 938 rte_tel_data_add_dict_int(d, "nb_vchans", dma_info.nb_vchans); 939 rte_tel_data_add_dict_int(d, "numa_node", dma_info.numa_node); 940 rte_tel_data_add_dict_int(d, "max_vchans", dma_info.max_vchans); 941 rte_tel_data_add_dict_int(d, "max_desc", dma_info.max_desc); 942 rte_tel_data_add_dict_int(d, "min_desc", dma_info.min_desc); 943 rte_tel_data_add_dict_int(d, "max_sges", dma_info.max_sges); 944 945 dma_caps = rte_tel_data_alloc(); 946 if (!dma_caps) 947 return -ENOMEM; 948 949 rte_tel_data_start_dict(dma_caps); 950 ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_MEM); 951 ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_MEM_TO_DEV); 952 ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_MEM); 953 ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_DEV_TO_DEV); 954 ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SVA); 955 ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_SILENT); 956 ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_HANDLES_ERRORS); 957 ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY); 958 ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_COPY_SG); 959 ADD_CAPA(dma_caps, dev_capa, RTE_DMA_CAPA_OPS_FILL); 960 rte_tel_data_add_dict_container(d, "capabilities", dma_caps, 0); 961 962 return 0; 963 } 964 965 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, dma_stats.s) 966 967 static int 968 dmadev_handle_dev_stats(const char *cmd __rte_unused, 969 const char *params, 970 struct rte_tel_data *d) 971 { 972 struct rte_dma_info dma_info; 973 struct rte_dma_stats dma_stats; 974 int dev_id, ret, vchan_id; 975 char *end_param; 976 const char *vchan_param; 977 978 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 979 return -EINVAL; 980 981 dev_id = strtoul(params, &end_param, 0); 982 983 /* Function info_get validates dev_id so we don't need to. */ 984 ret = rte_dma_info_get(dev_id, &dma_info); 985 if (ret < 0) 986 return -EINVAL; 987 988 /* If the device has one vchan the user does not need to supply the 989 * vchan id and only the device id is needed, no extra parameters. 990 */ 991 if (dma_info.nb_vchans == 1 && *end_param == '\0') 992 vchan_id = 0; 993 else { 994 vchan_param = strtok(end_param, ","); 995 if (!vchan_param || strlen(vchan_param) == 0 || !isdigit(*vchan_param)) 996 return -EINVAL; 997 998 vchan_id = strtoul(vchan_param, &end_param, 0); 999 } 1000 if (*end_param != '\0') 1001 RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring"); 1002 1003 ret = rte_dma_stats_get(dev_id, vchan_id, &dma_stats); 1004 if (ret < 0) 1005 return -EINVAL; 1006 1007 rte_tel_data_start_dict(d); 1008 ADD_DICT_STAT(submitted); 1009 ADD_DICT_STAT(completed); 1010 ADD_DICT_STAT(errors); 1011 1012 return 0; 1013 } 1014 1015 #ifndef RTE_EXEC_ENV_WINDOWS 1016 static int 1017 dmadev_handle_dev_dump(const char *cmd __rte_unused, 1018 const char *params, 1019 struct rte_tel_data *d) 1020 { 1021 char *buf, *end_param; 1022 int dev_id, ret; 1023 FILE *f; 1024 1025 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 1026 return -EINVAL; 1027 1028 dev_id = strtoul(params, &end_param, 0); 1029 if (*end_param != '\0') 1030 RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring"); 1031 1032 buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); 1033 if (buf == NULL) 1034 return -ENOMEM; 1035 1036 f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+"); 1037 if (f == NULL) { 1038 free(buf); 1039 return -EINVAL; 1040 } 1041 1042 ret = rte_dma_dump(dev_id, f); 1043 fclose(f); 1044 if (ret == 0) { 1045 rte_tel_data_start_dict(d); 1046 rte_tel_data_string(d, buf); 1047 } 1048 1049 free(buf); 1050 return ret; 1051 } 1052 #endif /* !RTE_EXEC_ENV_WINDOWS */ 1053 1054 RTE_INIT(dmadev_init_telemetry) 1055 { 1056 rte_telemetry_register_cmd("/dmadev/list", dmadev_handle_dev_list, 1057 "Returns list of available dmadev devices by IDs. No parameters."); 1058 rte_telemetry_register_cmd("/dmadev/info", dmadev_handle_dev_info, 1059 "Returns information for a dmadev. Parameters: int dev_id"); 1060 rte_telemetry_register_cmd("/dmadev/stats", dmadev_handle_dev_stats, 1061 "Returns the stats for a dmadev vchannel. Parameters: int dev_id, vchan_id (Optional if only one vchannel)"); 1062 #ifndef RTE_EXEC_ENV_WINDOWS 1063 rte_telemetry_register_cmd("/dmadev/dump", dmadev_handle_dev_dump, 1064 "Returns dump information for a dmadev. Parameters: int dev_id"); 1065 #endif 1066 } 1067