1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2021 HiSilicon Limited 3 * Copyright(c) 2021 Intel Corporation 4 * Copyright(c) 2021 Marvell International Ltd 5 * Copyright(c) 2021 SmartShare Systems 6 */ 7 8 #ifndef RTE_DMADEV_H 9 #define RTE_DMADEV_H 10 11 /** 12 * @file rte_dmadev.h 13 * 14 * DMA (Direct Memory Access) device API. 15 * 16 * The DMA framework is built on the following model: 17 * 18 * --------------- --------------- --------------- 19 * | virtual DMA | | virtual DMA | | virtual DMA | 20 * | channel | | channel | | channel | 21 * --------------- --------------- --------------- 22 * | | | 23 * ------------------ | 24 * | | 25 * ------------ ------------ 26 * | dmadev | | dmadev | 27 * ------------ ------------ 28 * | | 29 * ------------------ ------------------ 30 * | HW DMA channel | | HW DMA channel | 31 * ------------------ ------------------ 32 * | | 33 * -------------------------------- 34 * | 35 * --------------------- 36 * | HW DMA Controller | 37 * --------------------- 38 * 39 * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues), 40 * each HW-DMA-channel should be represented by a dmadev. 41 * 42 * The dmadev could create multiple virtual DMA channels, each virtual DMA 43 * channel represents a different transfer context. The DMA operation request 44 * must be submitted to the virtual DMA channel. e.g. Application could create 45 * virtual DMA channel 0 for memory-to-memory transfer scenario, and create 46 * virtual DMA channel 1 for memory-to-device transfer scenario. 47 * 48 * This framework uses 'int16_t dev_id' as the device identifier of a dmadev, 49 * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev. 50 * 51 * The functions exported by the dmadev API to setup a device designated by its 52 * device identifier must be invoked in the following order: 53 * - rte_dma_configure() 54 * - rte_dma_vchan_setup() 55 * - rte_dma_start() 56 * 57 * Then, the application can invoke dataplane functions to process jobs. 58 * 59 * If the application wants to change the configuration (i.e. invoke 60 * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke 61 * rte_dma_stop() first to stop the device and then do the reconfiguration 62 * before invoking rte_dma_start() again. The dataplane functions should not 63 * be invoked when the device is stopped. 64 * 65 * Finally, an application can close a dmadev by invoking the rte_dma_close() 66 * function. 67 * 68 * The dataplane APIs include two parts: 69 * The first part is the submission of operation requests: 70 * - rte_dma_copy() 71 * - rte_dma_copy_sg() 72 * - rte_dma_fill() 73 * - rte_dma_submit() 74 * 75 * These APIs could work with different virtual DMA channels which have 76 * different contexts. 77 * 78 * The first three APIs are used to submit the operation request to the virtual 79 * DMA channel, if the submission is successful, a positive 80 * ring_idx <= UINT16_MAX is returned, otherwise a negative number is returned. 81 * 82 * The last API is used to issue doorbell to hardware, and also there are flags 83 * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the 84 * same work. 85 * @note When enqueuing a set of jobs to the device, having a separate submit 86 * outside a loop makes for clearer code than having a check for the last 87 * iteration inside the loop to set a special submit flag. However, for cases 88 * where one item alone is to be submitted or there is a small set of jobs to 89 * be submitted sequentially, having a submit flag provides a lower-overhead 90 * way of doing the submission while still keeping the code clean. 91 * 92 * The second part is to obtain the result of requests: 93 * - rte_dma_completed() 94 * - return the number of operation requests completed successfully. 95 * - rte_dma_completed_status() 96 * - return the number of operation requests completed. 97 * 98 * @note If the dmadev works in silent mode (@see RTE_DMA_CAPA_SILENT), 99 * application does not invoke the above two completed APIs. 100 * 101 * About the ring_idx which enqueue APIs (e.g. rte_dma_copy(), rte_dma_fill()) 102 * return, the rules are as follows: 103 * - ring_idx for each virtual DMA channel are independent. 104 * - For a virtual DMA channel, the ring_idx is monotonically incremented, 105 * when it reach UINT16_MAX, it wraps back to zero. 106 * - This ring_idx can be used by applications to track per-operation 107 * metadata in an application-defined circular ring. 108 * - The initial ring_idx of a virtual DMA channel is zero, after the 109 * device is stopped, the ring_idx needs to be reset to zero. 110 * 111 * One example: 112 * - step-1: start one dmadev 113 * - step-2: enqueue a copy operation, the ring_idx return is 0 114 * - step-3: enqueue a copy operation again, the ring_idx return is 1 115 * - ... 116 * - step-101: stop the dmadev 117 * - step-102: start the dmadev 118 * - step-103: enqueue a copy operation, the ring_idx return is 0 119 * - ... 120 * - step-x+0: enqueue a fill operation, the ring_idx return is 65535 121 * - step-x+1: enqueue a copy operation, the ring_idx return is 0 122 * - ... 123 * 124 * The DMA operation address used in enqueue APIs (i.e. rte_dma_copy(), 125 * rte_dma_copy_sg(), rte_dma_fill()) is defined as rte_iova_t type. 126 * 127 * The dmadev supports two types of address: memory address and device address. 128 * 129 * - memory address: the source and destination address of the memory-to-memory 130 * transfer type, or the source address of the memory-to-device transfer type, 131 * or the destination address of the device-to-memory transfer type. 132 * @note If the device support SVA (@see RTE_DMA_CAPA_SVA), the memory address 133 * can be any VA address, otherwise it must be an IOVA address. 134 * 135 * - device address: the source and destination address of the device-to-device 136 * transfer type, or the source address of the device-to-memory transfer type, 137 * or the destination address of the memory-to-device transfer type. 138 * 139 * About MT-safe, all the functions of the dmadev API implemented by a PMD are 140 * lock-free functions which assume to not be invoked in parallel on different 141 * logical cores to work on the same target dmadev object. 142 * @note Different virtual DMA channels on the same dmadev *DO NOT* support 143 * parallel invocation because these virtual DMA channels share the same 144 * HW-DMA-channel. 145 */ 146 147 #include <stdint.h> 148 149 #include <rte_bitops.h> 150 #include <rte_common.h> 151 #include <rte_compat.h> 152 #include <rte_dev.h> 153 154 #ifdef __cplusplus 155 extern "C" { 156 #endif 157 158 /** Maximum number of devices if rte_dma_dev_max() is not called. */ 159 #define RTE_DMADEV_DEFAULT_MAX 64 160 161 /** 162 * @warning 163 * @b EXPERIMENTAL: this API may change without prior notice. 164 * 165 * Configure the maximum number of dmadevs. 166 * @note This function can be invoked before the primary process rte_eal_init() 167 * to change the maximum number of dmadevs. If not invoked, the maximum number 168 * of dmadevs is @see RTE_DMADEV_DEFAULT_MAX 169 * 170 * @param dev_max 171 * maximum number of dmadevs. 172 * 173 * @return 174 * 0 on success. Otherwise negative value is returned. 175 */ 176 __rte_experimental 177 int rte_dma_dev_max(size_t dev_max); 178 179 /** 180 * @warning 181 * @b EXPERIMENTAL: this API may change without prior notice. 182 * 183 * Get the device identifier for the named DMA device. 184 * 185 * @param name 186 * DMA device name. 187 * 188 * @return 189 * Returns DMA device identifier on success. 190 * - <0: Failure to find named DMA device. 191 */ 192 __rte_experimental 193 int rte_dma_get_dev_id_by_name(const char *name); 194 195 /** 196 * @warning 197 * @b EXPERIMENTAL: this API may change without prior notice. 198 * 199 * Check whether the dev_id is valid. 200 * 201 * @param dev_id 202 * DMA device index. 203 * 204 * @return 205 * - If the device index is valid (true) or not (false). 206 */ 207 __rte_experimental 208 bool rte_dma_is_valid(int16_t dev_id); 209 210 /** 211 * @warning 212 * @b EXPERIMENTAL: this API may change without prior notice. 213 * 214 * Get the total number of DMA devices that have been successfully 215 * initialised. 216 * 217 * @return 218 * The total number of usable DMA devices. 219 */ 220 __rte_experimental 221 uint16_t rte_dma_count_avail(void); 222 223 /** 224 * Iterates over valid dmadev instances. 225 * 226 * @param start_dev_id 227 * The id of the next possible dmadev. 228 * @return 229 * Next valid dmadev, UINT16_MAX if there is none. 230 */ 231 __rte_experimental 232 int16_t rte_dma_next_dev(int16_t start_dev_id); 233 234 /** Utility macro to iterate over all available dmadevs */ 235 #define RTE_DMA_FOREACH_DEV(p) \ 236 for (p = rte_dma_next_dev(0); \ 237 p != -1; \ 238 p = rte_dma_next_dev(p + 1)) 239 240 241 /**@{@name DMA capability 242 * @see struct rte_dma_info::dev_capa 243 */ 244 /** Support memory-to-memory transfer */ 245 #define RTE_DMA_CAPA_MEM_TO_MEM RTE_BIT64(0) 246 /** Support memory-to-device transfer. */ 247 #define RTE_DMA_CAPA_MEM_TO_DEV RTE_BIT64(1) 248 /** Support device-to-memory transfer. */ 249 #define RTE_DMA_CAPA_DEV_TO_MEM RTE_BIT64(2) 250 /** Support device-to-device transfer. */ 251 #define RTE_DMA_CAPA_DEV_TO_DEV RTE_BIT64(3) 252 /** Support SVA which could use VA as DMA address. 253 * If device support SVA then application could pass any VA address like memory 254 * from rte_malloc(), rte_memzone(), malloc, stack memory. 255 * If device don't support SVA, then application should pass IOVA address which 256 * from rte_malloc(), rte_memzone(). 257 */ 258 #define RTE_DMA_CAPA_SVA RTE_BIT64(4) 259 /** Support work in silent mode. 260 * In this mode, application don't required to invoke rte_dma_completed*() 261 * API. 262 * @see struct rte_dma_conf::silent_mode 263 */ 264 #define RTE_DMA_CAPA_SILENT RTE_BIT64(5) 265 /** Supports error handling 266 * 267 * With this bit set, invalid input addresses will be reported as operation failures 268 * to the user but other operations can continue. 269 * Without this bit set, invalid data is not handled by either HW or driver, so user 270 * must ensure that all memory addresses are valid and accessible by HW. 271 */ 272 #define RTE_DMA_CAPA_HANDLES_ERRORS RTE_BIT64(6) 273 /** Support copy operation. 274 * This capability start with index of 32, so that it could leave gap between 275 * normal capability and ops capability. 276 */ 277 #define RTE_DMA_CAPA_OPS_COPY RTE_BIT64(32) 278 /** Support scatter-gather list copy operation. */ 279 #define RTE_DMA_CAPA_OPS_COPY_SG RTE_BIT64(33) 280 /** Support fill operation. */ 281 #define RTE_DMA_CAPA_OPS_FILL RTE_BIT64(34) 282 /**@}*/ 283 284 /** 285 * A structure used to retrieve the information of a DMA device. 286 * 287 * @see rte_dma_info_get 288 */ 289 struct rte_dma_info { 290 const char *dev_name; /**< Unique device name. */ 291 /** Device capabilities (RTE_DMA_CAPA_*). */ 292 uint64_t dev_capa; 293 /** Maximum number of virtual DMA channels supported. */ 294 uint16_t max_vchans; 295 /** Maximum allowed number of virtual DMA channel descriptors. */ 296 uint16_t max_desc; 297 /** Minimum allowed number of virtual DMA channel descriptors. */ 298 uint16_t min_desc; 299 /** Maximum number of source or destination scatter-gather entry 300 * supported. 301 * If the device does not support COPY_SG capability, this value can be 302 * zero. 303 * If the device supports COPY_SG capability, then rte_dma_copy_sg() 304 * parameter nb_src/nb_dst should not exceed this value. 305 */ 306 uint16_t max_sges; 307 /** NUMA node connection, -1 if unknown. */ 308 int16_t numa_node; 309 /** Number of virtual DMA channel configured. */ 310 uint16_t nb_vchans; 311 }; 312 313 /** 314 * @warning 315 * @b EXPERIMENTAL: this API may change without prior notice. 316 * 317 * Retrieve information of a DMA device. 318 * 319 * @param dev_id 320 * The identifier of the device. 321 * @param[out] dev_info 322 * A pointer to a structure of type *rte_dma_info* to be filled with the 323 * information of the device. 324 * 325 * @return 326 * 0 on success. Otherwise negative value is returned. 327 */ 328 __rte_experimental 329 int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info); 330 331 /** 332 * A structure used to configure a DMA device. 333 * 334 * @see rte_dma_configure 335 */ 336 struct rte_dma_conf { 337 /** The number of virtual DMA channels to set up for the DMA device. 338 * This value cannot be greater than the field 'max_vchans' of struct 339 * rte_dma_info which get from rte_dma_info_get(). 340 */ 341 uint16_t nb_vchans; 342 /** Indicates whether to enable silent mode. 343 * false-default mode, true-silent mode. 344 * This value can be set to true only when the SILENT capability is 345 * supported. 346 * 347 * @see RTE_DMA_CAPA_SILENT 348 */ 349 bool enable_silent; 350 }; 351 352 /** 353 * @warning 354 * @b EXPERIMENTAL: this API may change without prior notice. 355 * 356 * Configure a DMA device. 357 * 358 * This function must be invoked first before any other function in the 359 * API. This function can also be re-invoked when a device is in the 360 * stopped state. 361 * 362 * @param dev_id 363 * The identifier of the device to configure. 364 * @param dev_conf 365 * The DMA device configuration structure encapsulated into rte_dma_conf 366 * object. 367 * 368 * @return 369 * 0 on success. Otherwise negative value is returned. 370 */ 371 __rte_experimental 372 int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf); 373 374 /** 375 * @warning 376 * @b EXPERIMENTAL: this API may change without prior notice. 377 * 378 * Start a DMA device. 379 * 380 * The device start step is the last one and consists of setting the DMA 381 * to start accepting jobs. 382 * 383 * @param dev_id 384 * The identifier of the device. 385 * 386 * @return 387 * 0 on success. Otherwise negative value is returned. 388 */ 389 __rte_experimental 390 int rte_dma_start(int16_t dev_id); 391 392 /** 393 * @warning 394 * @b EXPERIMENTAL: this API may change without prior notice. 395 * 396 * Stop a DMA device. 397 * 398 * The device can be restarted with a call to rte_dma_start(). 399 * 400 * @param dev_id 401 * The identifier of the device. 402 * 403 * @return 404 * 0 on success. Otherwise negative value is returned. 405 */ 406 __rte_experimental 407 int rte_dma_stop(int16_t dev_id); 408 409 /** 410 * @warning 411 * @b EXPERIMENTAL: this API may change without prior notice. 412 * 413 * Close a DMA device. 414 * 415 * The device cannot be restarted after this call. 416 * 417 * @param dev_id 418 * The identifier of the device. 419 * 420 * @return 421 * 0 on success. Otherwise negative value is returned. 422 */ 423 __rte_experimental 424 int rte_dma_close(int16_t dev_id); 425 426 /** 427 * DMA transfer direction defines. 428 * 429 * @see struct rte_dma_vchan_conf::direction 430 */ 431 enum rte_dma_direction { 432 /** DMA transfer direction - from memory to memory. 433 * 434 * @see struct rte_dma_vchan_conf::direction 435 */ 436 RTE_DMA_DIR_MEM_TO_MEM, 437 /** DMA transfer direction - from memory to device. 438 * In a typical scenario, the SoCs are installed on host servers as 439 * iNICs through the PCIe interface. In this case, the SoCs works in 440 * EP(endpoint) mode, it could initiate a DMA move request from memory 441 * (which is SoCs memory) to device (which is host memory). 442 * 443 * @see struct rte_dma_vchan_conf::direction 444 */ 445 RTE_DMA_DIR_MEM_TO_DEV, 446 /** DMA transfer direction - from device to memory. 447 * In a typical scenario, the SoCs are installed on host servers as 448 * iNICs through the PCIe interface. In this case, the SoCs works in 449 * EP(endpoint) mode, it could initiate a DMA move request from device 450 * (which is host memory) to memory (which is SoCs memory). 451 * 452 * @see struct rte_dma_vchan_conf::direction 453 */ 454 RTE_DMA_DIR_DEV_TO_MEM, 455 /** DMA transfer direction - from device to device. 456 * In a typical scenario, the SoCs are installed on host servers as 457 * iNICs through the PCIe interface. In this case, the SoCs works in 458 * EP(endpoint) mode, it could initiate a DMA move request from device 459 * (which is host memory) to the device (which is another host memory). 460 * 461 * @see struct rte_dma_vchan_conf::direction 462 */ 463 RTE_DMA_DIR_DEV_TO_DEV, 464 }; 465 466 /** 467 * DMA access port type defines. 468 * 469 * @see struct rte_dma_port_param::port_type 470 */ 471 enum rte_dma_port_type { 472 RTE_DMA_PORT_NONE, 473 RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */ 474 }; 475 476 /** 477 * A structure used to descript DMA access port parameters. 478 * 479 * @see struct rte_dma_vchan_conf::src_port 480 * @see struct rte_dma_vchan_conf::dst_port 481 */ 482 struct rte_dma_port_param { 483 /** The device access port type. 484 * 485 * @see enum rte_dma_port_type 486 */ 487 enum rte_dma_port_type port_type; 488 RTE_STD_C11 489 union { 490 /** PCIe access port parameters. 491 * 492 * The following model shows SoC's PCIe module connects to 493 * multiple PCIe hosts and multiple endpoints. The PCIe module 494 * has an integrated DMA controller. 495 * 496 * If the DMA wants to access the memory of host A, it can be 497 * initiated by PF1 in core0, or by VF0 of PF0 in core0. 498 * 499 * \code{.unparsed} 500 * System Bus 501 * | ----------PCIe module---------- 502 * | Bus 503 * | Interface 504 * | ----- ------------------ 505 * | | | | PCIe Core0 | 506 * | | | | | ----------- 507 * | | | | PF-0 -- VF-0 | | Host A | 508 * | | |--------| |- VF-1 |--------| Root | 509 * | | | | PF-1 | | Complex | 510 * | | | | PF-2 | ----------- 511 * | | | ------------------ 512 * | | | 513 * | | | ------------------ 514 * | | | | PCIe Core1 | 515 * | | | | | ----------- 516 * | | | | PF-0 -- VF-0 | | Host B | 517 * |-----| |--------| PF-1 -- VF-0 |--------| Root | 518 * | | | | |- VF-1 | | Complex | 519 * | | | | PF-2 | ----------- 520 * | | | ------------------ 521 * | | | 522 * | | | ------------------ 523 * | |DMA| | | ------ 524 * | | | | |--------| EP | 525 * | | |--------| PCIe Core2 | ------ 526 * | | | | | ------ 527 * | | | | |--------| EP | 528 * | | | | | ------ 529 * | ----- ------------------ 530 * 531 * \endcode 532 * 533 * @note If some fields can not be supported by the 534 * hardware/driver, then the driver ignores those fields. 535 * Please check driver-specific documentation for limitations 536 * and capablites. 537 */ 538 __extension__ 539 struct { 540 uint64_t coreid : 4; /**< PCIe core id used. */ 541 uint64_t pfid : 8; /**< PF id used. */ 542 uint64_t vfen : 1; /**< VF enable bit. */ 543 uint64_t vfid : 16; /**< VF id used. */ 544 /** The pasid filed in TLP packet. */ 545 uint64_t pasid : 20; 546 /** The attributes filed in TLP packet. */ 547 uint64_t attr : 3; 548 /** The processing hint filed in TLP packet. */ 549 uint64_t ph : 2; 550 /** The steering tag filed in TLP packet. */ 551 uint64_t st : 16; 552 } pcie; 553 }; 554 uint64_t reserved[2]; /**< Reserved for future fields. */ 555 }; 556 557 /** 558 * A structure used to configure a virtual DMA channel. 559 * 560 * @see rte_dma_vchan_setup 561 */ 562 struct rte_dma_vchan_conf { 563 /** Transfer direction 564 * 565 * @see enum rte_dma_direction 566 */ 567 enum rte_dma_direction direction; 568 /** Number of descriptor for the virtual DMA channel */ 569 uint16_t nb_desc; 570 /** 1) Used to describes the device access port parameter in the 571 * device-to-memory transfer scenario. 572 * 2) Used to describes the source device access port parameter in the 573 * device-to-device transfer scenario. 574 * 575 * @see struct rte_dma_port_param 576 */ 577 struct rte_dma_port_param src_port; 578 /** 1) Used to describes the device access port parameter in the 579 * memory-to-device transfer scenario. 580 * 2) Used to describes the destination device access port parameter in 581 * the device-to-device transfer scenario. 582 * 583 * @see struct rte_dma_port_param 584 */ 585 struct rte_dma_port_param dst_port; 586 }; 587 588 /** 589 * @warning 590 * @b EXPERIMENTAL: this API may change without prior notice. 591 * 592 * Allocate and set up a virtual DMA channel. 593 * 594 * @param dev_id 595 * The identifier of the device. 596 * @param vchan 597 * The identifier of virtual DMA channel. The value must be in the range 598 * [0, nb_vchans - 1] previously supplied to rte_dma_configure(). 599 * @param conf 600 * The virtual DMA channel configuration structure encapsulated into 601 * rte_dma_vchan_conf object. 602 * 603 * @return 604 * 0 on success. Otherwise negative value is returned. 605 */ 606 __rte_experimental 607 int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan, 608 const struct rte_dma_vchan_conf *conf); 609 610 /** 611 * A structure used to retrieve statistics. 612 * 613 * @see rte_dma_stats_get 614 */ 615 struct rte_dma_stats { 616 /** Count of operations which were submitted to hardware. */ 617 uint64_t submitted; 618 /** Count of operations which were completed, including successful and 619 * failed completions. 620 */ 621 uint64_t completed; 622 /** Count of operations which failed to complete. */ 623 uint64_t errors; 624 }; 625 626 /** 627 * Special ID, which is used to represent all virtual DMA channels. 628 * 629 * @see rte_dma_stats_get 630 * @see rte_dma_stats_reset 631 */ 632 #define RTE_DMA_ALL_VCHAN 0xFFFFu 633 634 /** 635 * @warning 636 * @b EXPERIMENTAL: this API may change without prior notice. 637 * 638 * Retrieve basic statistics of a or all virtual DMA channel(s). 639 * 640 * @param dev_id 641 * The identifier of the device. 642 * @param vchan 643 * The identifier of virtual DMA channel. 644 * If equal RTE_DMA_ALL_VCHAN means all channels. 645 * @param[out] stats 646 * The basic statistics structure encapsulated into rte_dma_stats 647 * object. 648 * 649 * @return 650 * 0 on success. Otherwise negative value is returned. 651 */ 652 __rte_experimental 653 int rte_dma_stats_get(int16_t dev_id, uint16_t vchan, 654 struct rte_dma_stats *stats); 655 656 /** 657 * @warning 658 * @b EXPERIMENTAL: this API may change without prior notice. 659 * 660 * Reset basic statistics of a or all virtual DMA channel(s). 661 * 662 * @param dev_id 663 * The identifier of the device. 664 * @param vchan 665 * The identifier of virtual DMA channel. 666 * If equal RTE_DMA_ALL_VCHAN means all channels. 667 * 668 * @return 669 * 0 on success. Otherwise negative value is returned. 670 */ 671 __rte_experimental 672 int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan); 673 674 /** 675 * device vchannel status 676 * 677 * Enum with the options for the channel status, either idle, active or halted due to error 678 * @see rte_dma_vchan_status 679 */ 680 enum rte_dma_vchan_status { 681 RTE_DMA_VCHAN_IDLE, /**< not processing, awaiting ops */ 682 RTE_DMA_VCHAN_ACTIVE, /**< currently processing jobs */ 683 RTE_DMA_VCHAN_HALTED_ERROR, /**< not processing due to error, cannot accept new ops */ 684 }; 685 686 /** 687 * @warning 688 * @b EXPERIMENTAL: this API may change without prior notice. 689 * 690 * Determine if all jobs have completed on a device channel. 691 * This function is primarily designed for testing use, as it allows a process to check if 692 * all jobs are completed, without actually gathering completions from those jobs. 693 * 694 * @param dev_id 695 * The identifier of the device. 696 * @param vchan 697 * The identifier of virtual DMA channel. 698 * @param[out] status 699 * The vchan status 700 * @return 701 * 0 - call completed successfully 702 * < 0 - error code indicating there was a problem calling the API 703 */ 704 __rte_experimental 705 int 706 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status); 707 708 /** 709 * @warning 710 * @b EXPERIMENTAL: this API may change without prior notice. 711 * 712 * Dump DMA device info. 713 * 714 * @param dev_id 715 * The identifier of the device. 716 * @param f 717 * The file to write the output to. 718 * 719 * @return 720 * 0 on success. Otherwise negative value is returned. 721 */ 722 __rte_experimental 723 int rte_dma_dump(int16_t dev_id, FILE *f); 724 725 /** 726 * DMA transfer result status code defines. 727 * 728 * @see rte_dma_completed_status 729 */ 730 enum rte_dma_status_code { 731 /** The operation completed successfully. */ 732 RTE_DMA_STATUS_SUCCESSFUL, 733 /** The operation failed to complete due abort by user. 734 * This is mainly used when processing dev_stop, user could modidy the 735 * descriptors (e.g. change one bit to tell hardware abort this job), 736 * it allows outstanding requests to be complete as much as possible, 737 * so reduce the time to stop the device. 738 */ 739 RTE_DMA_STATUS_USER_ABORT, 740 /** The operation failed to complete due to following scenarios: 741 * The jobs in a particular batch are not attempted because they 742 * appeared after a fence where a previous job failed. In some HW 743 * implementation it's possible for jobs from later batches would be 744 * completed, though, so report the status from the not attempted jobs 745 * before reporting those newer completed jobs. 746 */ 747 RTE_DMA_STATUS_NOT_ATTEMPTED, 748 /** The operation failed to complete due invalid source address. */ 749 RTE_DMA_STATUS_INVALID_SRC_ADDR, 750 /** The operation failed to complete due invalid destination address. */ 751 RTE_DMA_STATUS_INVALID_DST_ADDR, 752 /** The operation failed to complete due invalid source or destination 753 * address, cover the case that only knows the address error, but not 754 * sure which address error. 755 */ 756 RTE_DMA_STATUS_INVALID_ADDR, 757 /** The operation failed to complete due invalid length. */ 758 RTE_DMA_STATUS_INVALID_LENGTH, 759 /** The operation failed to complete due invalid opcode. 760 * The DMA descriptor could have multiple format, which are 761 * distinguished by the opcode field. 762 */ 763 RTE_DMA_STATUS_INVALID_OPCODE, 764 /** The operation failed to complete due bus read error. */ 765 RTE_DMA_STATUS_BUS_READ_ERROR, 766 /** The operation failed to complete due bus write error. */ 767 RTE_DMA_STATUS_BUS_WRITE_ERROR, 768 /** The operation failed to complete due bus error, cover the case that 769 * only knows the bus error, but not sure which direction error. 770 */ 771 RTE_DMA_STATUS_BUS_ERROR, 772 /** The operation failed to complete due data poison. */ 773 RTE_DMA_STATUS_DATA_POISION, 774 /** The operation failed to complete due descriptor read error. */ 775 RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR, 776 /** The operation failed to complete due device link error. 777 * Used to indicates that the link error in the memory-to-device/ 778 * device-to-memory/device-to-device transfer scenario. 779 */ 780 RTE_DMA_STATUS_DEV_LINK_ERROR, 781 /** The operation failed to complete due lookup page fault. */ 782 RTE_DMA_STATUS_PAGE_FAULT, 783 /** The operation failed to complete due unknown reason. 784 * The initial value is 256, which reserves space for future errors. 785 */ 786 RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100, 787 }; 788 789 /** 790 * A structure used to hold scatter-gather DMA operation request entry. 791 * 792 * @see rte_dma_copy_sg 793 */ 794 struct rte_dma_sge { 795 rte_iova_t addr; /**< The DMA operation address. */ 796 uint32_t length; /**< The DMA operation length. */ 797 }; 798 799 #include "rte_dmadev_core.h" 800 801 /**@{@name DMA operation flag 802 * @see rte_dma_copy() 803 * @see rte_dma_copy_sg() 804 * @see rte_dma_fill() 805 */ 806 /** Fence flag. 807 * It means the operation with this flag must be processed only after all 808 * previous operations are completed. 809 * If the specify DMA HW works in-order (it means it has default fence between 810 * operations), this flag could be NOP. 811 */ 812 #define RTE_DMA_OP_FLAG_FENCE RTE_BIT64(0) 813 /** Submit flag. 814 * It means the operation with this flag must issue doorbell to hardware after 815 * enqueued jobs. 816 */ 817 #define RTE_DMA_OP_FLAG_SUBMIT RTE_BIT64(1) 818 /** Write data to low level cache hint. 819 * Used for performance optimization, this is just a hint, and there is no 820 * capability bit for this, driver should not return error if this flag was set. 821 */ 822 #define RTE_DMA_OP_FLAG_LLC RTE_BIT64(2) 823 /**@}*/ 824 825 /** 826 * @warning 827 * @b EXPERIMENTAL: this API may change without prior notice. 828 * 829 * Enqueue a copy operation onto the virtual DMA channel. 830 * 831 * This queues up a copy operation to be performed by hardware, if the 'flags' 832 * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin 833 * this operation, otherwise do not trigger doorbell. 834 * 835 * @param dev_id 836 * The identifier of the device. 837 * @param vchan 838 * The identifier of virtual DMA channel. 839 * @param src 840 * The address of the source buffer. 841 * @param dst 842 * The address of the destination buffer. 843 * @param length 844 * The length of the data to be copied. 845 * @param flags 846 * An flags for this operation. 847 * @see RTE_DMA_OP_FLAG_* 848 * 849 * @return 850 * - 0..UINT16_MAX: index of enqueued job. 851 * - -ENOSPC: if no space left to enqueue. 852 * - other values < 0 on failure. 853 */ 854 __rte_experimental 855 static inline int 856 rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst, 857 uint32_t length, uint64_t flags) 858 { 859 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 860 861 #ifdef RTE_DMADEV_DEBUG 862 if (!rte_dma_is_valid(dev_id) || length == 0) 863 return -EINVAL; 864 RTE_FUNC_PTR_OR_ERR_RET(*obj->copy, -ENOTSUP); 865 #endif 866 867 return (*obj->copy)(obj->dev_private, vchan, src, dst, length, flags); 868 } 869 870 /** 871 * @warning 872 * @b EXPERIMENTAL: this API may change without prior notice. 873 * 874 * Enqueue a scatter-gather list copy operation onto the virtual DMA channel. 875 * 876 * This queues up a scatter-gather list copy operation to be performed by 877 * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then 878 * trigger doorbell to begin this operation, otherwise do not trigger doorbell. 879 * 880 * @param dev_id 881 * The identifier of the device. 882 * @param vchan 883 * The identifier of virtual DMA channel. 884 * @param src 885 * The pointer of source scatter-gather entry array. 886 * @param dst 887 * The pointer of destination scatter-gather entry array. 888 * @param nb_src 889 * The number of source scatter-gather entry. 890 * @see struct rte_dma_info::max_sges 891 * @param nb_dst 892 * The number of destination scatter-gather entry. 893 * @see struct rte_dma_info::max_sges 894 * @param flags 895 * An flags for this operation. 896 * @see RTE_DMA_OP_FLAG_* 897 * 898 * @return 899 * - 0..UINT16_MAX: index of enqueued job. 900 * - -ENOSPC: if no space left to enqueue. 901 * - other values < 0 on failure. 902 */ 903 __rte_experimental 904 static inline int 905 rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src, 906 struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst, 907 uint64_t flags) 908 { 909 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 910 911 #ifdef RTE_DMADEV_DEBUG 912 if (!rte_dma_is_valid(dev_id) || src == NULL || dst == NULL || 913 nb_src == 0 || nb_dst == 0) 914 return -EINVAL; 915 RTE_FUNC_PTR_OR_ERR_RET(*obj->copy_sg, -ENOTSUP); 916 #endif 917 918 return (*obj->copy_sg)(obj->dev_private, vchan, src, dst, nb_src, 919 nb_dst, flags); 920 } 921 922 /** 923 * @warning 924 * @b EXPERIMENTAL: this API may change without prior notice. 925 * 926 * Enqueue a fill operation onto the virtual DMA channel. 927 * 928 * This queues up a fill operation to be performed by hardware, if the 'flags' 929 * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin 930 * this operation, otherwise do not trigger doorbell. 931 * 932 * @param dev_id 933 * The identifier of the device. 934 * @param vchan 935 * The identifier of virtual DMA channel. 936 * @param pattern 937 * The pattern to populate the destination buffer with. 938 * @param dst 939 * The address of the destination buffer. 940 * @param length 941 * The length of the destination buffer. 942 * @param flags 943 * An flags for this operation. 944 * @see RTE_DMA_OP_FLAG_* 945 * 946 * @return 947 * - 0..UINT16_MAX: index of enqueued job. 948 * - -ENOSPC: if no space left to enqueue. 949 * - other values < 0 on failure. 950 */ 951 __rte_experimental 952 static inline int 953 rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern, 954 rte_iova_t dst, uint32_t length, uint64_t flags) 955 { 956 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 957 958 #ifdef RTE_DMADEV_DEBUG 959 if (!rte_dma_is_valid(dev_id) || length == 0) 960 return -EINVAL; 961 RTE_FUNC_PTR_OR_ERR_RET(*obj->fill, -ENOTSUP); 962 #endif 963 964 return (*obj->fill)(obj->dev_private, vchan, pattern, dst, length, 965 flags); 966 } 967 968 /** 969 * @warning 970 * @b EXPERIMENTAL: this API may change without prior notice. 971 * 972 * Trigger hardware to begin performing enqueued operations. 973 * 974 * This API is used to write the "doorbell" to the hardware to trigger it 975 * to begin the operations previously enqueued by rte_dma_copy/fill(). 976 * 977 * @param dev_id 978 * The identifier of the device. 979 * @param vchan 980 * The identifier of virtual DMA channel. 981 * 982 * @return 983 * 0 on success. Otherwise negative value is returned. 984 */ 985 __rte_experimental 986 static inline int 987 rte_dma_submit(int16_t dev_id, uint16_t vchan) 988 { 989 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 990 991 #ifdef RTE_DMADEV_DEBUG 992 if (!rte_dma_is_valid(dev_id)) 993 return -EINVAL; 994 RTE_FUNC_PTR_OR_ERR_RET(*obj->submit, -ENOTSUP); 995 #endif 996 997 return (*obj->submit)(obj->dev_private, vchan); 998 } 999 1000 /** 1001 * @warning 1002 * @b EXPERIMENTAL: this API may change without prior notice. 1003 * 1004 * Return the number of operations that have been successfully completed. 1005 * 1006 * @param dev_id 1007 * The identifier of the device. 1008 * @param vchan 1009 * The identifier of virtual DMA channel. 1010 * @param nb_cpls 1011 * The maximum number of completed operations that can be processed. 1012 * @param[out] last_idx 1013 * The last completed operation's ring_idx. 1014 * If not required, NULL can be passed in. 1015 * @param[out] has_error 1016 * Indicates if there are transfer error. 1017 * If not required, NULL can be passed in. 1018 * 1019 * @return 1020 * The number of operations that successfully completed. This return value 1021 * must be less than or equal to the value of nb_cpls. 1022 */ 1023 __rte_experimental 1024 static inline uint16_t 1025 rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls, 1026 uint16_t *last_idx, bool *has_error) 1027 { 1028 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 1029 uint16_t idx; 1030 bool err; 1031 1032 #ifdef RTE_DMADEV_DEBUG 1033 if (!rte_dma_is_valid(dev_id) || nb_cpls == 0) 1034 return 0; 1035 RTE_FUNC_PTR_OR_ERR_RET(*obj->completed, 0); 1036 #endif 1037 1038 /* Ensure the pointer values are non-null to simplify drivers. 1039 * In most cases these should be compile time evaluated, since this is 1040 * an inline function. 1041 * - If NULL is explicitly passed as parameter, then compiler knows the 1042 * value is NULL 1043 * - If address of local variable is passed as parameter, then compiler 1044 * can know it's non-NULL. 1045 */ 1046 if (last_idx == NULL) 1047 last_idx = &idx; 1048 if (has_error == NULL) 1049 has_error = &err; 1050 1051 *has_error = false; 1052 return (*obj->completed)(obj->dev_private, vchan, nb_cpls, last_idx, 1053 has_error); 1054 } 1055 1056 /** 1057 * @warning 1058 * @b EXPERIMENTAL: this API may change without prior notice. 1059 * 1060 * Return the number of operations that have been completed, and the operations 1061 * result may succeed or fail. 1062 * 1063 * @param dev_id 1064 * The identifier of the device. 1065 * @param vchan 1066 * The identifier of virtual DMA channel. 1067 * @param nb_cpls 1068 * Indicates the size of status array. 1069 * @param[out] last_idx 1070 * The last completed operation's ring_idx. 1071 * If not required, NULL can be passed in. 1072 * @param[out] status 1073 * This is a pointer to an array of length 'nb_cpls' that holds the completion 1074 * status code of each operation. 1075 * @see enum rte_dma_status_code 1076 * 1077 * @return 1078 * The number of operations that completed. This return value must be less 1079 * than or equal to the value of nb_cpls. 1080 * If this number is greater than zero (assuming n), then n values in the 1081 * status array are also set. 1082 */ 1083 __rte_experimental 1084 static inline uint16_t 1085 rte_dma_completed_status(int16_t dev_id, uint16_t vchan, 1086 const uint16_t nb_cpls, uint16_t *last_idx, 1087 enum rte_dma_status_code *status) 1088 { 1089 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 1090 uint16_t idx; 1091 1092 #ifdef RTE_DMADEV_DEBUG 1093 if (!rte_dma_is_valid(dev_id) || nb_cpls == 0 || status == NULL) 1094 return 0; 1095 RTE_FUNC_PTR_OR_ERR_RET(*obj->completed_status, 0); 1096 #endif 1097 1098 if (last_idx == NULL) 1099 last_idx = &idx; 1100 1101 return (*obj->completed_status)(obj->dev_private, vchan, nb_cpls, 1102 last_idx, status); 1103 } 1104 1105 /** 1106 * @warning 1107 * @b EXPERIMENTAL: this API may change without prior notice. 1108 * 1109 * Check remaining capacity in descriptor ring for the current burst. 1110 * 1111 * @param dev_id 1112 * The identifier of the device. 1113 * @param vchan 1114 * The identifier of virtual DMA channel. 1115 * 1116 * @return 1117 * - Remaining space in the descriptor ring for the current burst. 1118 * - 0 on error 1119 */ 1120 __rte_experimental 1121 static inline uint16_t 1122 rte_dma_burst_capacity(int16_t dev_id, uint16_t vchan) 1123 { 1124 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 1125 1126 #ifdef RTE_DMADEV_DEBUG 1127 if (!rte_dma_is_valid(dev_id)) 1128 return 0; 1129 RTE_FUNC_PTR_OR_ERR_RET(*obj->burst_capacity, 0); 1130 #endif 1131 return (*obj->burst_capacity)(obj->dev_private, vchan); 1132 } 1133 1134 #ifdef __cplusplus 1135 } 1136 #endif 1137 1138 #endif /* RTE_DMADEV_H */ 1139