1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2021 HiSilicon Limited 3 * Copyright(c) 2021 Intel Corporation 4 * Copyright(c) 2021 Marvell International Ltd 5 * Copyright(c) 2021 SmartShare Systems 6 */ 7 8 #ifndef RTE_DMADEV_H 9 #define RTE_DMADEV_H 10 11 /** 12 * @file rte_dmadev.h 13 * 14 * DMA (Direct Memory Access) device API. 15 * 16 * The DMA framework is built on the following model: 17 * 18 * --------------- --------------- --------------- 19 * | virtual DMA | | virtual DMA | | virtual DMA | 20 * | channel | | channel | | channel | 21 * --------------- --------------- --------------- 22 * | | | 23 * ------------------ | 24 * | | 25 * ------------ ------------ 26 * | dmadev | | dmadev | 27 * ------------ ------------ 28 * | | 29 * ------------------ ------------------ 30 * | HW DMA channel | | HW DMA channel | 31 * ------------------ ------------------ 32 * | | 33 * -------------------------------- 34 * | 35 * --------------------- 36 * | HW DMA Controller | 37 * --------------------- 38 * 39 * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues), 40 * each HW-DMA-channel should be represented by a dmadev. 41 * 42 * The dmadev could create multiple virtual DMA channels, each virtual DMA 43 * channel represents a different transfer context. The DMA operation request 44 * must be submitted to the virtual DMA channel. e.g. Application could create 45 * virtual DMA channel 0 for memory-to-memory transfer scenario, and create 46 * virtual DMA channel 1 for memory-to-device transfer scenario. 47 * 48 * This framework uses 'int16_t dev_id' as the device identifier of a dmadev, 49 * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev. 50 * 51 * The functions exported by the dmadev API to setup a device designated by its 52 * device identifier must be invoked in the following order: 53 * - rte_dma_configure() 54 * - rte_dma_vchan_setup() 55 * - rte_dma_start() 56 * 57 * Then, the application can invoke dataplane functions to process jobs. 58 * 59 * If the application wants to change the configuration (i.e. invoke 60 * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke 61 * rte_dma_stop() first to stop the device and then do the reconfiguration 62 * before invoking rte_dma_start() again. The dataplane functions should not 63 * be invoked when the device is stopped. 64 * 65 * Finally, an application can close a dmadev by invoking the rte_dma_close() 66 * function. 67 * 68 * The dataplane APIs include two parts: 69 * The first part is the submission of operation requests: 70 * - rte_dma_copy() 71 * - rte_dma_copy_sg() 72 * - rte_dma_fill() 73 * - rte_dma_submit() 74 * 75 * These APIs could work with different virtual DMA channels which have 76 * different contexts. 77 * 78 * The first three APIs are used to submit the operation request to the virtual 79 * DMA channel, if the submission is successful, a positive 80 * ring_idx <= UINT16_MAX is returned, otherwise a negative number is returned. 81 * 82 * The last API is used to issue doorbell to hardware, and also there are flags 83 * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the 84 * same work. 85 * @note When enqueuing a set of jobs to the device, having a separate submit 86 * outside a loop makes for clearer code than having a check for the last 87 * iteration inside the loop to set a special submit flag. However, for cases 88 * where one item alone is to be submitted or there is a small set of jobs to 89 * be submitted sequentially, having a submit flag provides a lower-overhead 90 * way of doing the submission while still keeping the code clean. 91 * 92 * The second part is to obtain the result of requests: 93 * - rte_dma_completed() 94 * - return the number of operation requests completed successfully. 95 * - rte_dma_completed_status() 96 * - return the number of operation requests completed. 97 * 98 * @note If the dmadev works in silent mode (@see RTE_DMA_CAPA_SILENT), 99 * application does not invoke the above two completed APIs. 100 * 101 * About the ring_idx which enqueue APIs (e.g. rte_dma_copy(), rte_dma_fill()) 102 * return, the rules are as follows: 103 * - ring_idx for each virtual DMA channel are independent. 104 * - For a virtual DMA channel, the ring_idx is monotonically incremented, 105 * when it reach UINT16_MAX, it wraps back to zero. 106 * - This ring_idx can be used by applications to track per-operation 107 * metadata in an application-defined circular ring. 108 * - The initial ring_idx of a virtual DMA channel is zero, after the 109 * device is stopped, the ring_idx needs to be reset to zero. 110 * 111 * One example: 112 * - step-1: start one dmadev 113 * - step-2: enqueue a copy operation, the ring_idx return is 0 114 * - step-3: enqueue a copy operation again, the ring_idx return is 1 115 * - ... 116 * - step-101: stop the dmadev 117 * - step-102: start the dmadev 118 * - step-103: enqueue a copy operation, the ring_idx return is 0 119 * - ... 120 * - step-x+0: enqueue a fill operation, the ring_idx return is 65535 121 * - step-x+1: enqueue a copy operation, the ring_idx return is 0 122 * - ... 123 * 124 * The DMA operation address used in enqueue APIs (i.e. rte_dma_copy(), 125 * rte_dma_copy_sg(), rte_dma_fill()) is defined as rte_iova_t type. 126 * 127 * The dmadev supports two types of address: memory address and device address. 128 * 129 * - memory address: the source and destination address of the memory-to-memory 130 * transfer type, or the source address of the memory-to-device transfer type, 131 * or the destination address of the device-to-memory transfer type. 132 * @note If the device support SVA (@see RTE_DMA_CAPA_SVA), the memory address 133 * can be any VA address, otherwise it must be an IOVA address. 134 * 135 * - device address: the source and destination address of the device-to-device 136 * transfer type, or the source address of the device-to-memory transfer type, 137 * or the destination address of the memory-to-device transfer type. 138 * 139 * About MT-safe, all the functions of the dmadev API implemented by a PMD are 140 * lock-free functions which assume to not be invoked in parallel on different 141 * logical cores to work on the same target dmadev object. 142 * @note Different virtual DMA channels on the same dmadev *DO NOT* support 143 * parallel invocation because these virtual DMA channels share the same 144 * HW-DMA-channel. 145 */ 146 147 #include <stdint.h> 148 149 #include <rte_bitops.h> 150 #include <rte_common.h> 151 152 #ifdef __cplusplus 153 extern "C" { 154 #endif 155 156 /** Maximum number of devices if rte_dma_dev_max() is not called. */ 157 #define RTE_DMADEV_DEFAULT_MAX 64 158 159 /** 160 * Configure the maximum number of dmadevs. 161 * @note This function can be invoked before the primary process rte_eal_init() 162 * to change the maximum number of dmadevs. If not invoked, the maximum number 163 * of dmadevs is @see RTE_DMADEV_DEFAULT_MAX 164 * 165 * @param dev_max 166 * maximum number of dmadevs. 167 * 168 * @return 169 * 0 on success. Otherwise negative value is returned. 170 */ 171 int rte_dma_dev_max(size_t dev_max); 172 173 /** 174 * Get the device identifier for the named DMA device. 175 * 176 * @param name 177 * DMA device name. 178 * 179 * @return 180 * Returns DMA device identifier on success. 181 * - <0: Failure to find named DMA device. 182 */ 183 int rte_dma_get_dev_id_by_name(const char *name); 184 185 /** 186 * Check whether the dev_id is valid. 187 * 188 * @param dev_id 189 * DMA device index. 190 * 191 * @return 192 * - If the device index is valid (true) or not (false). 193 */ 194 bool rte_dma_is_valid(int16_t dev_id); 195 196 /** 197 * Get the total number of DMA devices that have been successfully 198 * initialised. 199 * 200 * @return 201 * The total number of usable DMA devices. 202 */ 203 uint16_t rte_dma_count_avail(void); 204 205 /** 206 * Iterates over valid dmadev instances. 207 * 208 * @param start_dev_id 209 * The id of the next possible dmadev. 210 * @return 211 * Next valid dmadev, UINT16_MAX if there is none. 212 */ 213 int16_t rte_dma_next_dev(int16_t start_dev_id); 214 215 /** Utility macro to iterate over all available dmadevs */ 216 #define RTE_DMA_FOREACH_DEV(p) \ 217 for (p = rte_dma_next_dev(0); \ 218 p != -1; \ 219 p = rte_dma_next_dev(p + 1)) 220 221 222 /**@{@name DMA capability 223 * @see struct rte_dma_info::dev_capa 224 */ 225 /** Support memory-to-memory transfer */ 226 #define RTE_DMA_CAPA_MEM_TO_MEM RTE_BIT64(0) 227 /** Support memory-to-device transfer. */ 228 #define RTE_DMA_CAPA_MEM_TO_DEV RTE_BIT64(1) 229 /** Support device-to-memory transfer. */ 230 #define RTE_DMA_CAPA_DEV_TO_MEM RTE_BIT64(2) 231 /** Support device-to-device transfer. */ 232 #define RTE_DMA_CAPA_DEV_TO_DEV RTE_BIT64(3) 233 /** Support SVA which could use VA as DMA address. 234 * If device support SVA then application could pass any VA address like memory 235 * from rte_malloc(), rte_memzone(), malloc, stack memory. 236 * If device don't support SVA, then application should pass IOVA address which 237 * from rte_malloc(), rte_memzone(). 238 */ 239 #define RTE_DMA_CAPA_SVA RTE_BIT64(4) 240 /** Support work in silent mode. 241 * In this mode, application don't required to invoke rte_dma_completed*() 242 * API. 243 * @see struct rte_dma_conf::silent_mode 244 */ 245 #define RTE_DMA_CAPA_SILENT RTE_BIT64(5) 246 /** Supports error handling 247 * 248 * With this bit set, invalid input addresses will be reported as operation failures 249 * to the user but other operations can continue. 250 * Without this bit set, invalid data is not handled by either HW or driver, so user 251 * must ensure that all memory addresses are valid and accessible by HW. 252 */ 253 #define RTE_DMA_CAPA_HANDLES_ERRORS RTE_BIT64(6) 254 /** Support auto free for source buffer once mem to dev transfer is completed. 255 * 256 * @note Even though the DMA driver has this capability, it may not support all 257 * mempool drivers. If the mempool is not supported by the DMA driver, 258 * rte_dma_vchan_setup() will fail. 259 */ 260 #define RTE_DMA_CAPA_M2D_AUTO_FREE RTE_BIT64(7) 261 262 /** Support copy operation. 263 * This capability start with index of 32, so that it could leave gap between 264 * normal capability and ops capability. 265 */ 266 #define RTE_DMA_CAPA_OPS_COPY RTE_BIT64(32) 267 /** Support scatter-gather list copy operation. */ 268 #define RTE_DMA_CAPA_OPS_COPY_SG RTE_BIT64(33) 269 /** Support fill operation. */ 270 #define RTE_DMA_CAPA_OPS_FILL RTE_BIT64(34) 271 /**@}*/ 272 273 /** 274 * A structure used to retrieve the information of a DMA device. 275 * 276 * @see rte_dma_info_get 277 */ 278 struct rte_dma_info { 279 const char *dev_name; /**< Unique device name. */ 280 /** Device capabilities (RTE_DMA_CAPA_*). */ 281 uint64_t dev_capa; 282 /** Maximum number of virtual DMA channels supported. */ 283 uint16_t max_vchans; 284 /** Maximum allowed number of virtual DMA channel descriptors. */ 285 uint16_t max_desc; 286 /** Minimum allowed number of virtual DMA channel descriptors. */ 287 uint16_t min_desc; 288 /** Maximum number of source or destination scatter-gather entry 289 * supported. 290 * If the device does not support COPY_SG capability, this value can be 291 * zero. 292 * If the device supports COPY_SG capability, then rte_dma_copy_sg() 293 * parameter nb_src/nb_dst should not exceed this value. 294 */ 295 uint16_t max_sges; 296 /** NUMA node connection, -1 if unknown. */ 297 int16_t numa_node; 298 /** Number of virtual DMA channel configured. */ 299 uint16_t nb_vchans; 300 }; 301 302 /** 303 * Retrieve information of a DMA device. 304 * 305 * @param dev_id 306 * The identifier of the device. 307 * @param[out] dev_info 308 * A pointer to a structure of type *rte_dma_info* to be filled with the 309 * information of the device. 310 * 311 * @return 312 * 0 on success. Otherwise negative value is returned. 313 */ 314 int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info); 315 316 /** 317 * A structure used to configure a DMA device. 318 * 319 * @see rte_dma_configure 320 */ 321 struct rte_dma_conf { 322 /** The number of virtual DMA channels to set up for the DMA device. 323 * This value cannot be greater than the field 'max_vchans' of struct 324 * rte_dma_info which get from rte_dma_info_get(). 325 */ 326 uint16_t nb_vchans; 327 /** Indicates whether to enable silent mode. 328 * false-default mode, true-silent mode. 329 * This value can be set to true only when the SILENT capability is 330 * supported. 331 * 332 * @see RTE_DMA_CAPA_SILENT 333 */ 334 bool enable_silent; 335 }; 336 337 /** 338 * Configure a DMA device. 339 * 340 * This function must be invoked first before any other function in the 341 * API. This function can also be re-invoked when a device is in the 342 * stopped state. 343 * 344 * @param dev_id 345 * The identifier of the device to configure. 346 * @param dev_conf 347 * The DMA device configuration structure encapsulated into rte_dma_conf 348 * object. 349 * 350 * @return 351 * 0 on success. Otherwise negative value is returned. 352 */ 353 int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf); 354 355 /** 356 * Start a DMA device. 357 * 358 * The device start step is the last one and consists of setting the DMA 359 * to start accepting jobs. 360 * 361 * @param dev_id 362 * The identifier of the device. 363 * 364 * @return 365 * 0 on success. Otherwise negative value is returned. 366 */ 367 int rte_dma_start(int16_t dev_id); 368 369 /** 370 * Stop a DMA device. 371 * 372 * The device can be restarted with a call to rte_dma_start(). 373 * 374 * @param dev_id 375 * The identifier of the device. 376 * 377 * @return 378 * 0 on success. Otherwise negative value is returned. 379 */ 380 int rte_dma_stop(int16_t dev_id); 381 382 /** 383 * Close a DMA device. 384 * 385 * The device cannot be restarted after this call. 386 * 387 * @param dev_id 388 * The identifier of the device. 389 * 390 * @return 391 * 0 on success. Otherwise negative value is returned. 392 */ 393 int rte_dma_close(int16_t dev_id); 394 395 /** 396 * DMA transfer direction defines. 397 * 398 * @see struct rte_dma_vchan_conf::direction 399 */ 400 enum rte_dma_direction { 401 /** DMA transfer direction - from memory to memory. 402 * 403 * @see struct rte_dma_vchan_conf::direction 404 */ 405 RTE_DMA_DIR_MEM_TO_MEM, 406 /** DMA transfer direction - from memory to device. 407 * In a typical scenario, the SoCs are installed on host servers as 408 * iNICs through the PCIe interface. In this case, the SoCs works in 409 * EP(endpoint) mode, it could initiate a DMA move request from memory 410 * (which is SoCs memory) to device (which is host memory). 411 * 412 * @see struct rte_dma_vchan_conf::direction 413 */ 414 RTE_DMA_DIR_MEM_TO_DEV, 415 /** DMA transfer direction - from device to memory. 416 * In a typical scenario, the SoCs are installed on host servers as 417 * iNICs through the PCIe interface. In this case, the SoCs works in 418 * EP(endpoint) mode, it could initiate a DMA move request from device 419 * (which is host memory) to memory (which is SoCs memory). 420 * 421 * @see struct rte_dma_vchan_conf::direction 422 */ 423 RTE_DMA_DIR_DEV_TO_MEM, 424 /** DMA transfer direction - from device to device. 425 * In a typical scenario, the SoCs are installed on host servers as 426 * iNICs through the PCIe interface. In this case, the SoCs works in 427 * EP(endpoint) mode, it could initiate a DMA move request from device 428 * (which is host memory) to the device (which is another host memory). 429 * 430 * @see struct rte_dma_vchan_conf::direction 431 */ 432 RTE_DMA_DIR_DEV_TO_DEV, 433 }; 434 435 /** 436 * DMA access port type defines. 437 * 438 * @see struct rte_dma_port_param::port_type 439 */ 440 enum rte_dma_port_type { 441 RTE_DMA_PORT_NONE, 442 RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */ 443 }; 444 445 /** 446 * A structure used to descript DMA access port parameters. 447 * 448 * @see struct rte_dma_vchan_conf::src_port 449 * @see struct rte_dma_vchan_conf::dst_port 450 */ 451 struct rte_dma_port_param { 452 /** The device access port type. 453 * 454 * @see enum rte_dma_port_type 455 */ 456 enum rte_dma_port_type port_type; 457 union { 458 /** PCIe access port parameters. 459 * 460 * The following model shows SoC's PCIe module connects to 461 * multiple PCIe hosts and multiple endpoints. The PCIe module 462 * has an integrated DMA controller. 463 * 464 * If the DMA wants to access the memory of host A, it can be 465 * initiated by PF1 in core0, or by VF0 of PF0 in core0. 466 * 467 * \code{.unparsed} 468 * System Bus 469 * | ----------PCIe module---------- 470 * | Bus 471 * | Interface 472 * | ----- ------------------ 473 * | | | | PCIe Core0 | 474 * | | | | | ----------- 475 * | | | | PF-0 -- VF-0 | | Host A | 476 * | | |--------| |- VF-1 |--------| Root | 477 * | | | | PF-1 | | Complex | 478 * | | | | PF-2 | ----------- 479 * | | | ------------------ 480 * | | | 481 * | | | ------------------ 482 * | | | | PCIe Core1 | 483 * | | | | | ----------- 484 * | | | | PF-0 -- VF-0 | | Host B | 485 * |-----| |--------| PF-1 -- VF-0 |--------| Root | 486 * | | | | |- VF-1 | | Complex | 487 * | | | | PF-2 | ----------- 488 * | | | ------------------ 489 * | | | 490 * | | | ------------------ 491 * | |DMA| | | ------ 492 * | | | | |--------| EP | 493 * | | |--------| PCIe Core2 | ------ 494 * | | | | | ------ 495 * | | | | |--------| EP | 496 * | | | | | ------ 497 * | ----- ------------------ 498 * 499 * \endcode 500 * 501 * @note If some fields can not be supported by the 502 * hardware/driver, then the driver ignores those fields. 503 * Please check driver-specific documentation for limitations 504 * and capabilities. 505 */ 506 __extension__ 507 struct { 508 uint64_t coreid : 4; /**< PCIe core id used. */ 509 uint64_t pfid : 8; /**< PF id used. */ 510 uint64_t vfen : 1; /**< VF enable bit. */ 511 uint64_t vfid : 16; /**< VF id used. */ 512 /** The pasid filed in TLP packet. */ 513 uint64_t pasid : 20; 514 /** The attributes filed in TLP packet. */ 515 uint64_t attr : 3; 516 /** The processing hint filed in TLP packet. */ 517 uint64_t ph : 2; 518 /** The steering tag filed in TLP packet. */ 519 uint64_t st : 16; 520 } pcie; 521 }; 522 uint64_t reserved[2]; /**< Reserved for future fields. */ 523 }; 524 525 /** 526 * A structure used for offload auto free params. 527 */ 528 struct rte_dma_auto_free_param { 529 union { 530 struct { 531 /** 532 * Mempool from which buffer is allocated. Mempool info 533 * is used for freeing buffer by hardware. 534 * 535 * @note If the mempool is not supported by the DMA device, 536 * rte_dma_vchan_setup() will fail. 537 */ 538 struct rte_mempool *pool; 539 } m2d; 540 }; 541 /** Reserved for future fields. */ 542 uint64_t reserved[2]; 543 }; 544 545 /** 546 * A structure used to configure a virtual DMA channel. 547 * 548 * @see rte_dma_vchan_setup 549 */ 550 struct rte_dma_vchan_conf { 551 /** Transfer direction 552 * 553 * @see enum rte_dma_direction 554 */ 555 enum rte_dma_direction direction; 556 /** Number of descriptor for the virtual DMA channel */ 557 uint16_t nb_desc; 558 /** 1) Used to describes the device access port parameter in the 559 * device-to-memory transfer scenario. 560 * 2) Used to describes the source device access port parameter in the 561 * device-to-device transfer scenario. 562 * 563 * @see struct rte_dma_port_param 564 */ 565 struct rte_dma_port_param src_port; 566 /** 1) Used to describes the device access port parameter in the 567 * memory-to-device transfer scenario. 568 * 2) Used to describes the destination device access port parameter in 569 * the device-to-device transfer scenario. 570 * 571 * @see struct rte_dma_port_param 572 */ 573 struct rte_dma_port_param dst_port; 574 /** Buffer params to auto free buffer by hardware. To free the buffer 575 * by hardware, RTE_DMA_OP_FLAG_AUTO_FREE must be set while calling 576 * rte_dma_copy and rte_dma_copy_sg(). 577 * 578 * @see RTE_DMA_OP_FLAG_AUTO_FREE 579 * @see struct rte_dma_auto_free_param 580 */ 581 struct rte_dma_auto_free_param auto_free; 582 }; 583 584 /** 585 * Allocate and set up a virtual DMA channel. 586 * 587 * @param dev_id 588 * The identifier of the device. 589 * @param vchan 590 * The identifier of virtual DMA channel. The value must be in the range 591 * [0, nb_vchans - 1] previously supplied to rte_dma_configure(). 592 * @param conf 593 * The virtual DMA channel configuration structure encapsulated into 594 * rte_dma_vchan_conf object. 595 * 596 * @return 597 * 0 on success. Otherwise negative value is returned. 598 */ 599 int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan, 600 const struct rte_dma_vchan_conf *conf); 601 602 /** 603 * A structure used to retrieve statistics. 604 * 605 * @see rte_dma_stats_get 606 */ 607 struct rte_dma_stats { 608 /** Count of operations which were submitted to hardware. */ 609 uint64_t submitted; 610 /** Count of operations which were completed, including successful and 611 * failed completions. 612 */ 613 uint64_t completed; 614 /** Count of operations which failed to complete. */ 615 uint64_t errors; 616 }; 617 618 /** 619 * Special ID, which is used to represent all virtual DMA channels. 620 * 621 * @see rte_dma_stats_get 622 * @see rte_dma_stats_reset 623 */ 624 #define RTE_DMA_ALL_VCHAN 0xFFFFu 625 626 /** 627 * Retrieve basic statistics of a or all virtual DMA channel(s). 628 * 629 * @param dev_id 630 * The identifier of the device. 631 * @param vchan 632 * The identifier of virtual DMA channel. 633 * If equal RTE_DMA_ALL_VCHAN means all channels. 634 * @param[out] stats 635 * The basic statistics structure encapsulated into rte_dma_stats 636 * object. 637 * 638 * @return 639 * 0 on success. Otherwise negative value is returned. 640 */ 641 int rte_dma_stats_get(int16_t dev_id, uint16_t vchan, 642 struct rte_dma_stats *stats); 643 644 /** 645 * Reset basic statistics of a or all virtual DMA channel(s). 646 * 647 * @param dev_id 648 * The identifier of the device. 649 * @param vchan 650 * The identifier of virtual DMA channel. 651 * If equal RTE_DMA_ALL_VCHAN means all channels. 652 * 653 * @return 654 * 0 on success. Otherwise negative value is returned. 655 */ 656 int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan); 657 658 /** 659 * device vchannel status 660 * 661 * Enum with the options for the channel status, either idle, active or halted due to error 662 * @see rte_dma_vchan_status 663 */ 664 enum rte_dma_vchan_status { 665 RTE_DMA_VCHAN_IDLE, /**< not processing, awaiting ops */ 666 RTE_DMA_VCHAN_ACTIVE, /**< currently processing jobs */ 667 RTE_DMA_VCHAN_HALTED_ERROR, /**< not processing due to error, cannot accept new ops */ 668 }; 669 670 /** 671 * Determine if all jobs have completed on a device channel. 672 * This function is primarily designed for testing use, as it allows a process to check if 673 * all jobs are completed, without actually gathering completions from those jobs. 674 * 675 * @param dev_id 676 * The identifier of the device. 677 * @param vchan 678 * The identifier of virtual DMA channel. 679 * @param[out] status 680 * The vchan status 681 * @return 682 * 0 - call completed successfully 683 * < 0 - error code indicating there was a problem calling the API 684 */ 685 int 686 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status); 687 688 /** 689 * Dump DMA device info. 690 * 691 * @param dev_id 692 * The identifier of the device. 693 * @param f 694 * The file to write the output to. 695 * 696 * @return 697 * 0 on success. Otherwise negative value is returned. 698 */ 699 int rte_dma_dump(int16_t dev_id, FILE *f); 700 701 /** 702 * DMA transfer result status code defines. 703 * 704 * @see rte_dma_completed_status 705 */ 706 enum rte_dma_status_code { 707 /** The operation completed successfully. */ 708 RTE_DMA_STATUS_SUCCESSFUL, 709 /** The operation failed to complete due abort by user. 710 * This is mainly used when processing dev_stop, user could modify the 711 * descriptors (e.g. change one bit to tell hardware abort this job), 712 * it allows outstanding requests to be complete as much as possible, 713 * so reduce the time to stop the device. 714 */ 715 RTE_DMA_STATUS_USER_ABORT, 716 /** The operation failed to complete due to following scenarios: 717 * The jobs in a particular batch are not attempted because they 718 * appeared after a fence where a previous job failed. In some HW 719 * implementation it's possible for jobs from later batches would be 720 * completed, though, so report the status from the not attempted jobs 721 * before reporting those newer completed jobs. 722 */ 723 RTE_DMA_STATUS_NOT_ATTEMPTED, 724 /** The operation failed to complete due invalid source address. */ 725 RTE_DMA_STATUS_INVALID_SRC_ADDR, 726 /** The operation failed to complete due invalid destination address. */ 727 RTE_DMA_STATUS_INVALID_DST_ADDR, 728 /** The operation failed to complete due invalid source or destination 729 * address, cover the case that only knows the address error, but not 730 * sure which address error. 731 */ 732 RTE_DMA_STATUS_INVALID_ADDR, 733 /** The operation failed to complete due invalid length. */ 734 RTE_DMA_STATUS_INVALID_LENGTH, 735 /** The operation failed to complete due invalid opcode. 736 * The DMA descriptor could have multiple format, which are 737 * distinguished by the opcode field. 738 */ 739 RTE_DMA_STATUS_INVALID_OPCODE, 740 /** The operation failed to complete due bus read error. */ 741 RTE_DMA_STATUS_BUS_READ_ERROR, 742 /** The operation failed to complete due bus write error. */ 743 RTE_DMA_STATUS_BUS_WRITE_ERROR, 744 /** The operation failed to complete due bus error, cover the case that 745 * only knows the bus error, but not sure which direction error. 746 */ 747 RTE_DMA_STATUS_BUS_ERROR, 748 /** The operation failed to complete due data poison. */ 749 RTE_DMA_STATUS_DATA_POISION, 750 /** The operation failed to complete due descriptor read error. */ 751 RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR, 752 /** The operation failed to complete due device link error. 753 * Used to indicates that the link error in the memory-to-device/ 754 * device-to-memory/device-to-device transfer scenario. 755 */ 756 RTE_DMA_STATUS_DEV_LINK_ERROR, 757 /** The operation failed to complete due lookup page fault. */ 758 RTE_DMA_STATUS_PAGE_FAULT, 759 /** The operation failed to complete due unknown reason. 760 * The initial value is 256, which reserves space for future errors. 761 */ 762 RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100, 763 }; 764 765 /** 766 * A structure used to hold scatter-gather DMA operation request entry. 767 * 768 * @see rte_dma_copy_sg 769 */ 770 struct rte_dma_sge { 771 rte_iova_t addr; /**< The DMA operation address. */ 772 uint32_t length; /**< The DMA operation length. */ 773 }; 774 775 #ifdef __cplusplus 776 } 777 #endif 778 779 #include "rte_dmadev_core.h" 780 #include "rte_dmadev_trace_fp.h" 781 782 #ifdef __cplusplus 783 extern "C" { 784 #endif 785 786 /**@{@name DMA operation flag 787 * @see rte_dma_copy() 788 * @see rte_dma_copy_sg() 789 * @see rte_dma_fill() 790 */ 791 /** Fence flag. 792 * It means the operation with this flag must be processed only after all 793 * previous operations are completed. 794 * If the specify DMA HW works in-order (it means it has default fence between 795 * operations), this flag could be NOP. 796 */ 797 #define RTE_DMA_OP_FLAG_FENCE RTE_BIT64(0) 798 /** Submit flag. 799 * It means the operation with this flag must issue doorbell to hardware after 800 * enqueued jobs. 801 */ 802 #define RTE_DMA_OP_FLAG_SUBMIT RTE_BIT64(1) 803 /** Write data to low level cache hint. 804 * Used for performance optimization, this is just a hint, and there is no 805 * capability bit for this, driver should not return error if this flag was set. 806 */ 807 #define RTE_DMA_OP_FLAG_LLC RTE_BIT64(2) 808 /** Auto free buffer flag. 809 * Operation with this flag must issue command to hardware to free the DMA 810 * buffer after DMA transfer is completed. 811 * 812 * @see struct rte_dma_vchan_conf::auto_free 813 */ 814 #define RTE_DMA_OP_FLAG_AUTO_FREE RTE_BIT64(3) 815 /**@}*/ 816 817 /** 818 * Enqueue a copy operation onto the virtual DMA channel. 819 * 820 * This queues up a copy operation to be performed by hardware, if the 'flags' 821 * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin 822 * this operation, otherwise do not trigger doorbell. 823 * 824 * @param dev_id 825 * The identifier of the device. 826 * @param vchan 827 * The identifier of virtual DMA channel. 828 * @param src 829 * The address of the source buffer. 830 * @param dst 831 * The address of the destination buffer. 832 * @param length 833 * The length of the data to be copied. 834 * @param flags 835 * An flags for this operation. 836 * @see RTE_DMA_OP_FLAG_* 837 * 838 * @return 839 * - 0..UINT16_MAX: index of enqueued job. 840 * - -ENOSPC: if no space left to enqueue. 841 * - other values < 0 on failure. 842 */ 843 static inline int 844 rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst, 845 uint32_t length, uint64_t flags) 846 { 847 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 848 int ret; 849 850 #ifdef RTE_DMADEV_DEBUG 851 if (!rte_dma_is_valid(dev_id) || length == 0) 852 return -EINVAL; 853 if (*obj->copy == NULL) 854 return -ENOTSUP; 855 #endif 856 857 ret = (*obj->copy)(obj->dev_private, vchan, src, dst, length, flags); 858 rte_dma_trace_copy(dev_id, vchan, src, dst, length, flags, ret); 859 860 return ret; 861 } 862 863 /** 864 * Enqueue a scatter-gather list copy operation onto the virtual DMA channel. 865 * 866 * This queues up a scatter-gather list copy operation to be performed by 867 * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then 868 * trigger doorbell to begin this operation, otherwise do not trigger doorbell. 869 * 870 * @param dev_id 871 * The identifier of the device. 872 * @param vchan 873 * The identifier of virtual DMA channel. 874 * @param src 875 * The pointer of source scatter-gather entry array. 876 * @param dst 877 * The pointer of destination scatter-gather entry array. 878 * @param nb_src 879 * The number of source scatter-gather entry. 880 * @see struct rte_dma_info::max_sges 881 * @param nb_dst 882 * The number of destination scatter-gather entry. 883 * @see struct rte_dma_info::max_sges 884 * @param flags 885 * An flags for this operation. 886 * @see RTE_DMA_OP_FLAG_* 887 * 888 * @return 889 * - 0..UINT16_MAX: index of enqueued job. 890 * - -ENOSPC: if no space left to enqueue. 891 * - other values < 0 on failure. 892 */ 893 static inline int 894 rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src, 895 struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst, 896 uint64_t flags) 897 { 898 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 899 int ret; 900 901 #ifdef RTE_DMADEV_DEBUG 902 if (!rte_dma_is_valid(dev_id) || src == NULL || dst == NULL || 903 nb_src == 0 || nb_dst == 0) 904 return -EINVAL; 905 if (*obj->copy_sg == NULL) 906 return -ENOTSUP; 907 #endif 908 909 ret = (*obj->copy_sg)(obj->dev_private, vchan, src, dst, nb_src, 910 nb_dst, flags); 911 rte_dma_trace_copy_sg(dev_id, vchan, src, dst, nb_src, nb_dst, flags, 912 ret); 913 914 return ret; 915 } 916 917 /** 918 * Enqueue a fill operation onto the virtual DMA channel. 919 * 920 * This queues up a fill operation to be performed by hardware, if the 'flags' 921 * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin 922 * this operation, otherwise do not trigger doorbell. 923 * 924 * @param dev_id 925 * The identifier of the device. 926 * @param vchan 927 * The identifier of virtual DMA channel. 928 * @param pattern 929 * The pattern to populate the destination buffer with. 930 * @param dst 931 * The address of the destination buffer. 932 * @param length 933 * The length of the destination buffer. 934 * @param flags 935 * An flags for this operation. 936 * @see RTE_DMA_OP_FLAG_* 937 * 938 * @return 939 * - 0..UINT16_MAX: index of enqueued job. 940 * - -ENOSPC: if no space left to enqueue. 941 * - other values < 0 on failure. 942 */ 943 static inline int 944 rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern, 945 rte_iova_t dst, uint32_t length, uint64_t flags) 946 { 947 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 948 int ret; 949 950 #ifdef RTE_DMADEV_DEBUG 951 if (!rte_dma_is_valid(dev_id) || length == 0) 952 return -EINVAL; 953 if (*obj->fill == NULL) 954 return -ENOTSUP; 955 #endif 956 957 ret = (*obj->fill)(obj->dev_private, vchan, pattern, dst, length, 958 flags); 959 rte_dma_trace_fill(dev_id, vchan, pattern, dst, length, flags, ret); 960 961 return ret; 962 } 963 964 /** 965 * Trigger hardware to begin performing enqueued operations. 966 * 967 * Writes the "doorbell" to the hardware to trigger it 968 * to begin the operations previously enqueued by rte_dma_copy/fill(). 969 * 970 * @param dev_id 971 * The identifier of the device. 972 * @param vchan 973 * The identifier of virtual DMA channel. 974 * 975 * @return 976 * 0 on success. Otherwise negative value is returned. 977 */ 978 static inline int 979 rte_dma_submit(int16_t dev_id, uint16_t vchan) 980 { 981 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 982 int ret; 983 984 #ifdef RTE_DMADEV_DEBUG 985 if (!rte_dma_is_valid(dev_id)) 986 return -EINVAL; 987 if (*obj->submit == NULL) 988 return -ENOTSUP; 989 #endif 990 991 ret = (*obj->submit)(obj->dev_private, vchan); 992 rte_dma_trace_submit(dev_id, vchan, ret); 993 994 return ret; 995 } 996 997 /** 998 * Return the number of operations that have been successfully completed. 999 * Once an operation has been reported as completed, the results of that 1000 * operation will be visible to all cores on the system. 1001 * 1002 * @param dev_id 1003 * The identifier of the device. 1004 * @param vchan 1005 * The identifier of virtual DMA channel. 1006 * @param nb_cpls 1007 * The maximum number of completed operations that can be processed. 1008 * @param[out] last_idx 1009 * The last completed operation's ring_idx. 1010 * If not required, NULL can be passed in. 1011 * @param[out] has_error 1012 * Indicates if there are transfer error. 1013 * If not required, NULL can be passed in. 1014 * 1015 * @return 1016 * The number of operations that successfully completed. This return value 1017 * must be less than or equal to the value of nb_cpls. 1018 */ 1019 static inline uint16_t 1020 rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls, 1021 uint16_t *last_idx, bool *has_error) 1022 { 1023 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 1024 uint16_t idx, ret; 1025 bool err; 1026 1027 #ifdef RTE_DMADEV_DEBUG 1028 if (!rte_dma_is_valid(dev_id) || nb_cpls == 0) 1029 return 0; 1030 if (*obj->completed == NULL) 1031 return 0; 1032 #endif 1033 1034 /* Ensure the pointer values are non-null to simplify drivers. 1035 * In most cases these should be compile time evaluated, since this is 1036 * an inline function. 1037 * - If NULL is explicitly passed as parameter, then compiler knows the 1038 * value is NULL 1039 * - If address of local variable is passed as parameter, then compiler 1040 * can know it's non-NULL. 1041 */ 1042 if (last_idx == NULL) 1043 last_idx = &idx; 1044 if (has_error == NULL) 1045 has_error = &err; 1046 1047 *has_error = false; 1048 ret = (*obj->completed)(obj->dev_private, vchan, nb_cpls, last_idx, 1049 has_error); 1050 rte_dma_trace_completed(dev_id, vchan, nb_cpls, last_idx, has_error, 1051 ret); 1052 1053 return ret; 1054 } 1055 1056 /** 1057 * Return the number of operations that have been completed, and the operations 1058 * result may succeed or fail. 1059 * Once an operation has been reported as completed successfully, the results of that 1060 * operation will be visible to all cores on the system. 1061 * 1062 * @param dev_id 1063 * The identifier of the device. 1064 * @param vchan 1065 * The identifier of virtual DMA channel. 1066 * @param nb_cpls 1067 * Indicates the size of status array. 1068 * @param[out] last_idx 1069 * The last completed operation's ring_idx. 1070 * If not required, NULL can be passed in. 1071 * @param[out] status 1072 * This is a pointer to an array of length 'nb_cpls' that holds the completion 1073 * status code of each operation. 1074 * @see enum rte_dma_status_code 1075 * 1076 * @return 1077 * The number of operations that completed. This return value must be less 1078 * than or equal to the value of nb_cpls. 1079 * If this number is greater than zero (assuming n), then n values in the 1080 * status array are also set. 1081 */ 1082 static inline uint16_t 1083 rte_dma_completed_status(int16_t dev_id, uint16_t vchan, 1084 const uint16_t nb_cpls, uint16_t *last_idx, 1085 enum rte_dma_status_code *status) 1086 { 1087 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 1088 uint16_t idx, ret; 1089 1090 #ifdef RTE_DMADEV_DEBUG 1091 if (!rte_dma_is_valid(dev_id) || nb_cpls == 0 || status == NULL) 1092 return 0; 1093 if (*obj->completed_status == NULL) 1094 return 0; 1095 #endif 1096 1097 if (last_idx == NULL) 1098 last_idx = &idx; 1099 1100 ret = (*obj->completed_status)(obj->dev_private, vchan, nb_cpls, 1101 last_idx, status); 1102 rte_dma_trace_completed_status(dev_id, vchan, nb_cpls, last_idx, status, 1103 ret); 1104 1105 return ret; 1106 } 1107 1108 /** 1109 * Check remaining capacity in descriptor ring for the current burst. 1110 * 1111 * @param dev_id 1112 * The identifier of the device. 1113 * @param vchan 1114 * The identifier of virtual DMA channel. 1115 * 1116 * @return 1117 * - Remaining space in the descriptor ring for the current burst. 1118 * - 0 on error 1119 */ 1120 static inline uint16_t 1121 rte_dma_burst_capacity(int16_t dev_id, uint16_t vchan) 1122 { 1123 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id]; 1124 uint16_t ret; 1125 1126 #ifdef RTE_DMADEV_DEBUG 1127 if (!rte_dma_is_valid(dev_id)) 1128 return 0; 1129 if (*obj->burst_capacity == NULL) 1130 return 0; 1131 #endif 1132 ret = (*obj->burst_capacity)(obj->dev_private, vchan); 1133 rte_dma_trace_burst_capacity(dev_id, vchan, ret); 1134 1135 return ret; 1136 } 1137 1138 #ifdef __cplusplus 1139 } 1140 #endif 1141 1142 #endif /* RTE_DMADEV_H */ 1143