1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (C) 2014 Freescale Semiconductor, Inc. 4 * Copyright 2015-2019 NXP 5 * 6 */ 7 #ifndef _FSL_QBMAN_PORTAL_H 8 #define _FSL_QBMAN_PORTAL_H 9 10 #include <fsl_qbman_base.h> 11 12 #define SVR_LS1080A 0x87030000 13 #define SVR_LS2080A 0x87010000 14 #define SVR_LS2088A 0x87090000 15 #define SVR_LX2160A 0x87360000 16 17 /* Variable to store DPAA2 platform type */ 18 extern uint32_t dpaa2_svr_family; 19 20 /** 21 * DOC - QBMan portal APIs to implement the following functions: 22 * - Initialize and destroy Software portal object. 23 * - Read and write Software portal interrupt registers. 24 * - Enqueue, including setting the enqueue descriptor, and issuing enqueue 25 * command etc. 26 * - Dequeue, including setting the dequeue descriptor, issuing dequeue command, 27 * parsing the dequeue response in DQRR and memory, parsing the state change 28 * notifications etc. 29 * - Release, including setting the release descriptor, and issuing the buffer 30 * release command. 31 * - Acquire, acquire the buffer from the given buffer pool. 32 * - FQ management. 33 * - Channel management, enable/disable CDAN with or without context. 34 */ 35 36 /** 37 * qbman_swp_init() - Create a functional object representing the given 38 * QBMan portal descriptor. 39 * @d: the given qbman swp descriptor 40 * 41 * Return qbman_swp portal object for success, NULL if the object cannot 42 * be created. 43 */ 44 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d); 45 46 /** 47 * qbman_swp_finish() - Create and destroy a functional object representing 48 * the given QBMan portal descriptor. 49 * @p: the qbman_swp object to be destroyed. 50 * 51 */ 52 void qbman_swp_finish(struct qbman_swp *p); 53 54 /** 55 * qbman_swp_invalidate() - Invalidate the cache enabled area of the QBMan 56 * portal. This is required to be called if a portal moved to another core 57 * because the QBMan portal area is non coherent 58 * @p: the qbman_swp object to be invalidated 59 * 60 */ 61 void qbman_swp_invalidate(struct qbman_swp *p); 62 63 /** 64 * qbman_swp_get_desc() - Get the descriptor of the given portal object. 65 * @p: the given portal object. 66 * 67 * Return the descriptor for this portal. 68 */ 69 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p); 70 71 /**************/ 72 /* Interrupts */ 73 /**************/ 74 75 /* EQCR ring interrupt */ 76 #define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001) 77 /* Enqueue command dispatched interrupt */ 78 #define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002) 79 /* DQRR non-empty interrupt */ 80 #define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004) 81 /* RCR ring interrupt */ 82 #define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008) 83 /* Release command dispatched interrupt */ 84 #define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010) 85 /* Volatile dequeue command interrupt */ 86 #define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020) 87 88 /** 89 * qbman_swp_interrupt_get_vanish() - Get the data in software portal 90 * interrupt status disable register. 91 * @p: the given software portal object. 92 * 93 * Return the settings in SWP_ISDR register. 94 */ 95 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p); 96 97 /** 98 * qbman_swp_interrupt_set_vanish() - Set the data in software portal 99 * interrupt status disable register. 100 * @p: the given software portal object. 101 * @mask: The value to set in SWP_IDSR register. 102 */ 103 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask); 104 105 /** 106 * qbman_swp_interrupt_read_status() - Get the data in software portal 107 * interrupt status register. 108 * @p: the given software portal object. 109 * 110 * Return the settings in SWP_ISR register. 111 */ 112 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p); 113 114 /** 115 * qbman_swp_interrupt_clear_status() - Set the data in software portal 116 * interrupt status register. 117 * @p: the given software portal object. 118 * @mask: The value to set in SWP_ISR register. 119 */ 120 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask); 121 122 /** 123 * qbman_swp_dqrr_thrshld_read_status() - Get the data in software portal 124 * DQRR interrupt threshold register. 125 * @p: the given software portal object. 126 */ 127 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p); 128 129 /** 130 * qbman_swp_dqrr_thrshld_write() - Set the data in software portal 131 * DQRR interrupt threshold register. 132 * @p: the given software portal object. 133 * @mask: The value to set in SWP_DQRR_ITR register. 134 */ 135 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask); 136 137 /** 138 * qbman_swp_intr_timeout_read_status() - Get the data in software portal 139 * Interrupt Time-Out period register. 140 * @p: the given software portal object. 141 */ 142 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p); 143 144 /** 145 * qbman_swp_intr_timeout_write() - Set the data in software portal 146 * Interrupt Time-Out period register. 147 * @p: the given software portal object. 148 * @mask: The value to set in SWP_ITPR register. 149 */ 150 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask); 151 152 /** 153 * qbman_swp_interrupt_get_trigger() - Get the data in software portal 154 * interrupt enable register. 155 * @p: the given software portal object. 156 * 157 * Return the settings in SWP_IER register. 158 */ 159 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p); 160 161 /** 162 * qbman_swp_interrupt_set_trigger() - Set the data in software portal 163 * interrupt enable register. 164 * @p: the given software portal object. 165 * @mask: The value to set in SWP_IER register. 166 */ 167 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask); 168 169 /** 170 * qbman_swp_interrupt_get_inhibit() - Get the data in software portal 171 * interrupt inhibit register. 172 * @p: the given software portal object. 173 * 174 * Return the settings in SWP_IIR register. 175 */ 176 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p); 177 178 /** 179 * qbman_swp_interrupt_set_inhibit() - Set the data in software portal 180 * interrupt inhibit register. 181 * @p: the given software portal object. 182 * @mask: The value to set in SWP_IIR register. 183 */ 184 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit); 185 186 /************/ 187 /* Dequeues */ 188 /************/ 189 190 /** 191 * struct qbman_result - structure for qbman dequeue response and/or 192 * notification. 193 * @dont_manipulate_directly: the 16 32bit data to represent the whole 194 * possible qbman dequeue result. 195 */ 196 struct qbman_result { 197 union { 198 struct common { 199 uint8_t verb; 200 uint8_t reserved[63]; 201 } common; 202 struct dq { 203 uint8_t verb; 204 uint8_t stat; 205 __le16 seqnum; 206 __le16 oprid; 207 uint8_t reserved; 208 uint8_t tok; 209 __le32 fqid; 210 uint32_t reserved2; 211 __le32 fq_byte_cnt; 212 __le32 fq_frm_cnt; 213 __le64 fqd_ctx; 214 uint8_t fd[32]; 215 } dq; 216 struct scn { 217 uint8_t verb; 218 uint8_t stat; 219 uint8_t state; 220 uint8_t reserved; 221 __le32 rid_tok; 222 __le64 ctx; 223 } scn; 224 struct eq_resp { 225 uint8_t verb; 226 uint8_t dca; 227 __le16 seqnum; 228 __le16 oprid; 229 uint8_t reserved; 230 uint8_t rc; 231 __le32 tgtid; 232 __le32 tag; 233 uint16_t qdbin; 234 uint8_t qpri; 235 uint8_t reserved1; 236 __le32 fqid:24; 237 __le32 rspid:8; 238 __le64 rsp_addr; 239 uint8_t fd[32]; 240 } eq_resp; 241 }; 242 }; 243 244 /* TODO: 245 *A DQRI interrupt can be generated when there are dequeue results on the 246 * portal's DQRR (this mechanism does not deal with "pull" dequeues to 247 * user-supplied 'storage' addresses). There are two parameters to this 248 * interrupt source, one is a threshold and the other is a timeout. The 249 * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or 250 * if the ring has been non-empty for been longer than 'timeout' nanoseconds. 251 * For timeout, an approximation to the desired nanosecond-granularity value is 252 * made, so there are get and set APIs to allow the user to see what actual 253 * timeout is set (compared to the timeout that was requested). 254 */ 255 int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh); 256 int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout); 257 int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout); 258 259 /* ------------------- */ 260 /* Push-mode dequeuing */ 261 /* ------------------- */ 262 263 /* The user of a portal can enable and disable push-mode dequeuing of up to 16 264 * channels independently. It does not specify this toggling by channel IDs, but 265 * rather by specifying the index (from 0 to 15) that has been mapped to the 266 * desired channel. 267 */ 268 269 /** 270 * qbman_swp_push_get() - Get the push dequeue setup. 271 * @s: the software portal object. 272 * @channel_idx: the channel index to query. 273 * @enabled: returned boolean to show whether the push dequeue is enabled for 274 * the given channel. 275 */ 276 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled); 277 278 /** 279 * qbman_swp_push_set() - Enable or disable push dequeue. 280 * @s: the software portal object. 281 * @channel_idx: the channel index.. 282 * @enable: enable or disable push dequeue. 283 * 284 * The user of a portal can enable and disable push-mode dequeuing of up to 16 285 * channels independently. It does not specify this toggling by channel IDs, but 286 * rather by specifying the index (from 0 to 15) that has been mapped to the 287 * desired channel. 288 */ 289 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable); 290 291 /* ------------------- */ 292 /* Pull-mode dequeuing */ 293 /* ------------------- */ 294 295 /** 296 * struct qbman_pull_desc - the structure for pull dequeue descriptor 297 */ 298 struct qbman_pull_desc { 299 union { 300 uint32_t dont_manipulate_directly[16]; 301 struct pull { 302 uint8_t verb; 303 uint8_t numf; 304 uint8_t tok; 305 uint8_t reserved; 306 uint32_t dq_src; 307 uint64_t rsp_addr; 308 uint64_t rsp_addr_virt; 309 uint8_t padding[40]; 310 } pull; 311 }; 312 }; 313 314 enum qbman_pull_type_e { 315 /* dequeue with priority precedence, respect intra-class scheduling */ 316 qbman_pull_type_prio = 1, 317 /* dequeue with active FQ precedence, respect ICS */ 318 qbman_pull_type_active, 319 /* dequeue with active FQ precedence, no ICS */ 320 qbman_pull_type_active_noics 321 }; 322 323 /** 324 * qbman_pull_desc_clear() - Clear the contents of a descriptor to 325 * default/starting state. 326 * @d: the pull dequeue descriptor to be cleared. 327 */ 328 void qbman_pull_desc_clear(struct qbman_pull_desc *d); 329 330 /** 331 * qbman_pull_desc_set_storage()- Set the pull dequeue storage 332 * @d: the pull dequeue descriptor to be set. 333 * @storage: the pointer of the memory to store the dequeue result. 334 * @storage_phys: the physical address of the storage memory. 335 * @stash: to indicate whether write allocate is enabled. 336 * 337 * If not called, or if called with 'storage' as NULL, the result pull dequeues 338 * will produce results to DQRR. If 'storage' is non-NULL, then results are 339 * produced to the given memory location (using the physical/DMA address which 340 * the caller provides in 'storage_phys'), and 'stash' controls whether or not 341 * those writes to main-memory express a cache-warming attribute. 342 */ 343 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, 344 struct qbman_result *storage, 345 uint64_t storage_phys, 346 int stash); 347 /** 348 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued. 349 * @d: the pull dequeue descriptor to be set. 350 * @numframes: number of frames to be set, must be between 1 and 16, inclusive. 351 */ 352 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, 353 uint8_t numframes); 354 /** 355 * qbman_pull_desc_set_token() - Set dequeue token for pull command 356 * @d: the dequeue descriptor 357 * @token: the token to be set 358 * 359 * token is the value that shows up in the dequeue response that can be used to 360 * detect when the results have been published. The easiest technique is to zero 361 * result "storage" before issuing a dequeue, and use any non-zero 'token' value 362 */ 363 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token); 364 365 /* Exactly one of the following descriptor "actions" should be set. (Calling any 366 * one of these will replace the effect of any prior call to one of these.) 367 * - pull dequeue from the given frame queue (FQ) 368 * - pull dequeue from any FQ in the given work queue (WQ) 369 * - pull dequeue from any FQ in any WQ in the given channel 370 */ 371 /** 372 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues. 373 * @fqid: the frame queue index of the given FQ. 374 */ 375 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid); 376 377 /** 378 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues. 379 * @wqid: composed of channel id and wqid within the channel. 380 * @dct: the dequeue command type. 381 */ 382 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid, 383 enum qbman_pull_type_e dct); 384 385 /* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command 386 * dequeues. 387 * @chid: the channel id to be dequeued. 388 * @dct: the dequeue command type. 389 */ 390 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid, 391 enum qbman_pull_type_e dct); 392 393 /** 394 * qbman_pull_desc_set_rad() - Decide whether reschedule the fq after dequeue 395 * 396 * @rad: 1 = Reschedule the FQ after dequeue. 397 * 0 = Allow the FQ to remain active after dequeue. 398 */ 399 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad); 400 401 /** 402 * qbman_swp_pull() - Issue the pull dequeue command 403 * @s: the software portal object. 404 * @d: the software portal descriptor which has been configured with 405 * the set of qbman_pull_desc_set_*() calls. 406 * 407 * Return 0 for success, and -EBUSY if the software portal is not ready 408 * to do pull dequeue. 409 */ 410 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d); 411 412 /* -------------------------------- */ 413 /* Polling DQRR for dequeue results */ 414 /* -------------------------------- */ 415 416 /** 417 * qbman_swp_dqrr_next() - Get an valid DQRR entry. 418 * @s: the software portal object. 419 * 420 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry 421 * only once, so repeated calls can return a sequence of DQRR entries, without 422 * requiring they be consumed immediately or in any particular order. 423 */ 424 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *p); 425 426 /** 427 * qbman_swp_prefetch_dqrr_next() - prefetch the next DQRR entry. 428 * @s: the software portal object. 429 */ 430 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s); 431 432 /** 433 * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from 434 * qbman_swp_dqrr_next(). 435 * @s: the software portal object. 436 * @dq: the DQRR entry to be consumed. 437 */ 438 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq); 439 440 /** 441 * qbman_swp_dqrr_idx_consume() - Given the DQRR index consume the DQRR entry 442 * @s: the software portal object. 443 * @dqrr_index: the DQRR index entry to be consumed. 444 */ 445 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s, uint8_t dqrr_index); 446 447 /** 448 * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr 449 * @dqrr: the given dqrr object. 450 * 451 * Return dqrr index. 452 */ 453 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr); 454 455 /** 456 * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the 457 * given portal 458 * @s: the given portal. 459 * @idx: the dqrr index. 460 * 461 * Return dqrr entry object. 462 */ 463 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx); 464 465 /* ------------------------------------------------- */ 466 /* Polling user-provided storage for dequeue results */ 467 /* ------------------------------------------------- */ 468 469 /** 470 * qbman_result_has_new_result() - Check and get the dequeue response from the 471 * dq storage memory set in pull dequeue command 472 * @s: the software portal object. 473 * @dq: the dequeue result read from the memory. 474 * 475 * Only used for user-provided storage of dequeue results, not DQRR. For 476 * efficiency purposes, the driver will perform any required endianness 477 * conversion to ensure that the user's dequeue result storage is in host-endian 478 * format (whether or not that is the same as the little-endian format that 479 * hardware DMA'd to the user's storage). As such, once the user has called 480 * qbman_result_has_new_result() and been returned a valid dequeue result, 481 * they should not call it again on the same memory location (except of course 482 * if another dequeue command has been executed to produce a new result to that 483 * location). 484 * 485 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid 486 * dequeue result. 487 */ 488 int qbman_result_has_new_result(struct qbman_swp *s, 489 struct qbman_result *dq); 490 491 /** 492 * qbman_check_command_complete() - Check if the previous issued dq commnd 493 * is completed and results are available in memory. 494 * @s: the software portal object. 495 * @dq: the dequeue result read from the memory. 496 * 497 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid 498 * dequeue result. 499 */ 500 int qbman_check_command_complete(struct qbman_result *dq); 501 502 int qbman_check_new_result(struct qbman_result *dq); 503 504 /* -------------------------------------------------------- */ 505 /* Parsing dequeue entries (DQRR and user-provided storage) */ 506 /* -------------------------------------------------------- */ 507 508 /** 509 * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not 510 * @dq: the dequeue result to be checked. 511 * 512 * DQRR entries may contain non-dequeue results, ie. notifications 513 */ 514 int qbman_result_is_DQ(const struct qbman_result *dq); 515 516 /** 517 * qbman_result_is_SCN() - Check the dequeue result is notification or not 518 * @dq: the dequeue result to be checked. 519 * 520 * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change 521 * notifications" of one type or another. Some APIs apply to all of them, of the 522 * form qbman_result_SCN_***(). 523 */ 524 static inline int qbman_result_is_SCN(const struct qbman_result *dq) 525 { 526 return !qbman_result_is_DQ(dq); 527 } 528 529 /* Recognise different notification types, only required if the user allows for 530 * these to occur, and cares about them when they do. 531 */ 532 533 /** 534 * qbman_result_is_FQDAN() - Check for FQ Data Availability 535 * @dq: the qbman_result object. 536 * 537 * Return 1 if this is FQDAN. 538 */ 539 int qbman_result_is_FQDAN(const struct qbman_result *dq); 540 541 /** 542 * qbman_result_is_CDAN() - Check for Channel Data Availability 543 * @dq: the qbman_result object to check. 544 * 545 * Return 1 if this is CDAN. 546 */ 547 int qbman_result_is_CDAN(const struct qbman_result *dq); 548 549 /** 550 * qbman_result_is_CSCN() - Check for Congestion State Change 551 * @dq: the qbman_result object to check. 552 * 553 * Return 1 if this is CSCN. 554 */ 555 int qbman_result_is_CSCN(const struct qbman_result *dq); 556 557 /** 558 * qbman_result_is_BPSCN() - Check for Buffer Pool State Change. 559 * @dq: the qbman_result object to check. 560 * 561 * Return 1 if this is BPSCN. 562 */ 563 int qbman_result_is_BPSCN(const struct qbman_result *dq); 564 565 /** 566 * qbman_result_is_CGCU() - Check for Congestion Group Count Update. 567 * @dq: the qbman_result object to check. 568 * 569 * Return 1 if this is CGCU. 570 */ 571 int qbman_result_is_CGCU(const struct qbman_result *dq); 572 573 /* Frame queue state change notifications; (FQDAN in theory counts too as it 574 * leaves a FQ parked, but it is primarily a data availability notification) 575 */ 576 577 /** 578 * qbman_result_is_FQRN() - Check for FQ Retirement Notification. 579 * @dq: the qbman_result object to check. 580 * 581 * Return 1 if this is FQRN. 582 */ 583 int qbman_result_is_FQRN(const struct qbman_result *dq); 584 585 /** 586 * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate 587 * @dq: the qbman_result object to check. 588 * 589 * Return 1 if this is FQRNI. 590 */ 591 int qbman_result_is_FQRNI(const struct qbman_result *dq); 592 593 /** 594 * qbman_result_is_FQPN() - Check for FQ Park Notification 595 * @dq: the qbman_result object to check. 596 * 597 * Return 1 if this is FQPN. 598 */ 599 int qbman_result_is_FQPN(const struct qbman_result *dq); 600 601 /* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE) 602 */ 603 /* FQ empty */ 604 #define QBMAN_DQ_STAT_FQEMPTY 0x80 605 /* FQ held active */ 606 #define QBMAN_DQ_STAT_HELDACTIVE 0x40 607 /* FQ force eligible */ 608 #define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20 609 /* Valid frame */ 610 #define QBMAN_DQ_STAT_VALIDFRAME 0x10 611 /* FQ ODP enable */ 612 #define QBMAN_DQ_STAT_ODPVALID 0x04 613 /* Volatile dequeue */ 614 #define QBMAN_DQ_STAT_VOLATILE 0x02 615 /* volatile dequeue command is expired */ 616 #define QBMAN_DQ_STAT_EXPIRED 0x01 617 618 #define QBMAN_EQCR_DCA_IDXMASK 0x0f 619 #define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31) 620 621 /** 622 * qbman_result_DQ_flags() - Get the STAT field of dequeue response 623 * @dq: the dequeue result. 624 * 625 * Return the state field. 626 */ 627 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq); 628 629 /** 630 * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull 631 * command. 632 * @dq: the dequeue result. 633 * 634 * Return 1 for volatile(pull) dequeue, 0 for static dequeue. 635 */ 636 static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq) 637 { 638 return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE); 639 } 640 641 /** 642 * qbman_result_DQ_is_pull_complete() - Check whether the pull command is 643 * completed. 644 * @dq: the dequeue result. 645 * 646 * Return boolean. 647 */ 648 static inline int qbman_result_DQ_is_pull_complete( 649 const struct qbman_result *dq) 650 { 651 return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED); 652 } 653 654 /** 655 * qbman_result_DQ_seqnum() - Get the seqnum field in dequeue response 656 * seqnum is valid only if VALIDFRAME flag is TRUE 657 * @dq: the dequeue result. 658 * 659 * Return seqnum. 660 */ 661 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq); 662 663 /** 664 * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response 665 * odpid is valid only if ODPVAILD flag is TRUE. 666 * @dq: the dequeue result. 667 * 668 * Return odpid. 669 */ 670 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq); 671 672 /** 673 * qbman_result_DQ_fqid() - Get the fqid in dequeue response 674 * @dq: the dequeue result. 675 * 676 * Return fqid. 677 */ 678 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq); 679 680 /** 681 * qbman_result_DQ_byte_count() - Get the byte count in dequeue response 682 * @dq: the dequeue result. 683 * 684 * Return the byte count remaining in the FQ. 685 */ 686 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq); 687 688 /** 689 * qbman_result_DQ_frame_count - Get the frame count in dequeue response 690 * @dq: the dequeue result. 691 * 692 * Return the frame count remaining in the FQ. 693 */ 694 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq); 695 696 /** 697 * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response 698 * @dq: the dequeue result. 699 * 700 * Return the frame queue context. 701 */ 702 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq); 703 704 /** 705 * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response 706 * @dq: the dequeue result. 707 * 708 * Return the frame descriptor. 709 */ 710 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq); 711 712 /* State-change notifications (FQDAN/CDAN/CSCN/...). */ 713 714 /** 715 * qbman_result_SCN_state() - Get the state field in State-change notification 716 * @scn: the state change notification. 717 * 718 * Return the state in the notifiation. 719 */ 720 uint8_t qbman_result_SCN_state(const struct qbman_result *scn); 721 722 /** 723 * qbman_result_SCN_rid() - Get the resource id from the notification 724 * @scn: the state change notification. 725 * 726 * Return the resource id. 727 */ 728 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn); 729 730 /** 731 * qbman_result_SCN_ctx() - get the context from the notification 732 * @scn: the state change notification. 733 * 734 * Return the context. 735 */ 736 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn); 737 738 /* Type-specific "resource IDs". Mainly for illustration purposes, though it 739 * also gives the appropriate type widths. 740 */ 741 /* Get the FQID from the FQDAN */ 742 #define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq) 743 /* Get the FQID from the FQRN */ 744 #define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq) 745 /* Get the FQID from the FQRNI */ 746 #define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq) 747 /* Get the FQID from the FQPN */ 748 #define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq) 749 /* Get the channel ID from the CDAN */ 750 #define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) 751 /* Get the CGID from the CSCN */ 752 #define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) 753 754 /** 755 * qbman_result_bpscn_bpid() - Get the bpid from BPSCN 756 * @scn: the state change notification. 757 * 758 * Return the buffer pool id. 759 */ 760 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn); 761 762 /** 763 * qbman_result_bpscn_has_free_bufs() - Check whether there are free 764 * buffers in the pool from BPSCN. 765 * @scn: the state change notification. 766 * 767 * Return the number of free buffers. 768 */ 769 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn); 770 771 /** 772 * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the 773 * buffer pool is depleted. 774 * @scn: the state change notification. 775 * 776 * Return the status of buffer pool depletion. 777 */ 778 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn); 779 780 /** 781 * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer 782 * pool is surplus or not. 783 * @scn: the state change notification. 784 * 785 * Return the status of buffer pool surplus. 786 */ 787 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn); 788 789 /** 790 * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message 791 * @scn: the state change notification. 792 * 793 * Return the BPSCN context. 794 */ 795 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn); 796 797 /* Parsing CGCU */ 798 /** 799 * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid 800 * @scn: the state change notification. 801 * 802 * Return the CGCU resource id. 803 */ 804 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn); 805 806 /** 807 * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU 808 * @scn: the state change notification. 809 * 810 * Return instantaneous count in the CGCU notification. 811 */ 812 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn); 813 814 /************/ 815 /* Enqueues */ 816 /************/ 817 /* struct qbman_eq_desc - structure of enqueue descriptor */ 818 struct qbman_eq_desc { 819 union { 820 uint32_t dont_manipulate_directly[8]; 821 struct eq { 822 uint8_t verb; 823 uint8_t dca; 824 uint16_t seqnum; 825 uint16_t orpid; 826 uint16_t reserved1; 827 uint32_t tgtid; 828 uint32_t tag; 829 uint16_t qdbin; 830 uint8_t qpri; 831 uint8_t reserved[3]; 832 uint8_t wae; 833 uint8_t rspid; 834 uint64_t rsp_addr; 835 } eq; 836 }; 837 }; 838 839 /** 840 * struct qbman_eq_response - structure of enqueue response 841 * @dont_manipulate_directly: the 16 32bit data to represent the whole 842 * enqueue response. 843 */ 844 struct qbman_eq_response { 845 uint32_t dont_manipulate_directly[16]; 846 }; 847 848 /** 849 * qbman_eq_desc_clear() - Clear the contents of a descriptor to 850 * default/starting state. 851 * @d: the given enqueue descriptor. 852 */ 853 void qbman_eq_desc_clear(struct qbman_eq_desc *d); 854 855 /* Exactly one of the following descriptor "actions" should be set. (Calling 856 * any one of these will replace the effect of any prior call to one of these.) 857 * - enqueue without order-restoration 858 * - enqueue with order-restoration 859 * - fill a hole in the order-restoration sequence, without any enqueue 860 * - advance NESN (Next Expected Sequence Number), without any enqueue 861 * 'respond_success' indicates whether an enqueue response should be DMA'd 862 * after success (otherwise a response is DMA'd only after failure). 863 * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to 864 * be enqueued. 865 */ 866 867 /** 868 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp 869 * @d: the enqueue descriptor. 870 * @response_success: 1 = enqueue with response always; 0 = enqueue with 871 * rejections returned on a FQ. 872 */ 873 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success); 874 /** 875 * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor 876 * @d: the enqueue descriptor. 877 * @response_success: 1 = enqueue with response always; 0 = enqueue with 878 * rejections returned on a FQ. 879 * @opr_id: the order point record id. 880 * @seqnum: the order restoration sequence number. 881 * @incomplete: indiates whether this is the last fragments using the same 882 * sequeue number. 883 */ 884 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, 885 uint16_t opr_id, uint16_t seqnum, int incomplete); 886 887 /** 888 * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence 889 * without any enqueue 890 * @d: the enqueue descriptor. 891 * @opr_id: the order point record id. 892 * @seqnum: the order restoration sequence number. 893 */ 894 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id, 895 uint16_t seqnum); 896 897 /** 898 * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number) 899 * without any enqueue 900 * @d: the enqueue descriptor. 901 * @opr_id: the order point record id. 902 * @seqnum: the order restoration sequence number. 903 */ 904 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id, 905 uint16_t seqnum); 906 /** 907 * qbman_eq_desc_set_response() - Set the enqueue response info. 908 * @d: the enqueue descriptor 909 * @storage_phys: the physical address of the enqueue response in memory. 910 * @stash: indicate that the write allocation enabled or not. 911 * 912 * In the case where an enqueue response is DMA'd, this determines where that 913 * response should go. (The physical/DMA address is given for hardware's 914 * benefit, but software should interpret it as a "struct qbman_eq_response" 915 * data structure.) 'stash' controls whether or not the write to main-memory 916 * expresses a cache-warming attribute. 917 */ 918 void qbman_eq_desc_set_response(struct qbman_eq_desc *d, 919 uint64_t storage_phys, 920 int stash); 921 922 /** 923 * qbman_eq_desc_set_token() - Set token for the enqueue command 924 * @d: the enqueue descriptor 925 * @token: the token to be set. 926 * 927 * token is the value that shows up in an enqueue response that can be used to 928 * detect when the results have been published. The easiest technique is to zero 929 * result "storage" before issuing an enqueue, and use any non-zero 'token' 930 * value. 931 */ 932 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token); 933 934 /** 935 * Exactly one of the following descriptor "targets" should be set. (Calling any 936 * one of these will replace the effect of any prior call to one of these.) 937 * - enqueue to a frame queue 938 * - enqueue to a queuing destination 939 * Note, that none of these will have any affect if the "action" type has been 940 * set to "orp_hole" or "orp_nesn". 941 */ 942 /** 943 * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command 944 * @d: the enqueue descriptor 945 * @fqid: the id of the frame queue to be enqueued. 946 */ 947 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid); 948 949 /** 950 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command. 951 * @d: the enqueue descriptor 952 * @qdid: the id of the queuing destination to be enqueued. 953 * @qd_bin: the queuing destination bin 954 * @qd_prio: the queuing destination priority. 955 */ 956 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid, 957 uint16_t qd_bin, uint8_t qd_prio); 958 959 /** 960 * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt 961 * @d: the enqueue descriptor 962 * @enable: boolean to enable/disable EQDI 963 * 964 * Determines whether or not the portal's EQDI interrupt source should be 965 * asserted after the enqueue command is completed. 966 */ 967 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable); 968 969 /** 970 * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command. 971 * @d: the enqueue descriptor. 972 * @enable: enabled/disable DCA mode. 973 * @dqrr_idx: DCAP_CI, the DCAP consumer index. 974 * @park: determine the whether park the FQ or not 975 * 976 * Determines whether or not a portal DQRR entry should be consumed once the 977 * enqueue command is completed. (And if so, and the DQRR entry corresponds to a 978 * held-active (order-preserving) FQ, whether the FQ should be parked instead of 979 * being rescheduled.) 980 */ 981 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable, 982 uint8_t dqrr_idx, int park); 983 984 /** 985 * qbman_result_eqresp_fd() - Get fd from enqueue response. 986 * @eqresp: enqueue response. 987 * 988 * Return the fd pointer. 989 */ 990 struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp); 991 992 /** 993 * qbman_result_eqresp_set_rspid() - Set the response id in enqueue response. 994 * @eqresp: enqueue response. 995 * @val: values to set into the response id. 996 * 997 * This value is set into the response id before the enqueue command, which, 998 * get overwritten by qbman once the enqueue command is complete. 999 */ 1000 void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val); 1001 1002 /** 1003 * qbman_result_eqresp_rspid() - Get the response id. 1004 * @eqresp: enqueue response. 1005 * 1006 * Return the response id. 1007 * 1008 * At the time of enqueue user provides the response id. Response id gets 1009 * copied into the enqueue response to determine if the command has been 1010 * completed, and response has been updated. 1011 */ 1012 uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp); 1013 1014 /** 1015 * qbman_result_eqresp_rc() - determines if enqueue command is sucessful. 1016 * @eqresp: enqueue response. 1017 * 1018 * Return 0 when command is sucessful. 1019 */ 1020 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp); 1021 1022 /** 1023 * qbman_swp_enqueue() - Issue an enqueue command. 1024 * @s: the software portal used for enqueue. 1025 * @d: the enqueue descriptor. 1026 * @fd: the frame descriptor to be enqueued. 1027 * 1028 * Please note that 'fd' should only be NULL if the "action" of the 1029 * descriptor is "orp_hole" or "orp_nesn". 1030 * 1031 * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready. 1032 */ 1033 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, 1034 const struct qbman_fd *fd); 1035 /** 1036 * qbman_swp_enqueue_multiple() - Enqueue multiple frames with same 1037 eq descriptor 1038 * @s: the software portal used for enqueue. 1039 * @d: the enqueue descriptor. 1040 * @fd: the frame descriptor to be enqueued. 1041 * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options 1042 * @num_frames: the number of the frames to be enqueued. 1043 * 1044 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready. 1045 */ 1046 int qbman_swp_enqueue_multiple(struct qbman_swp *s, 1047 const struct qbman_eq_desc *d, 1048 const struct qbman_fd *fd, 1049 uint32_t *flags, 1050 int num_frames); 1051 1052 /** 1053 * qbman_swp_enqueue_multiple_fd() - Enqueue multiple frames with same 1054 eq descriptor 1055 * @s: the software portal used for enqueue. 1056 * @d: the enqueue descriptor. 1057 * @fd: the frame descriptor to be enqueued. 1058 * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options 1059 * @num_frames: the number of the frames to be enqueued. 1060 * 1061 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready. 1062 */ 1063 int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s, 1064 const struct qbman_eq_desc *d, 1065 struct qbman_fd **fd, 1066 uint32_t *flags, 1067 int num_frames); 1068 1069 /** 1070 * qbman_swp_enqueue_multiple_desc() - Enqueue multiple frames with 1071 * individual eq descriptor. 1072 * @s: the software portal used for enqueue. 1073 * @d: the enqueue descriptor. 1074 * @fd: the frame descriptor to be enqueued. 1075 * @num_frames: the number of the frames to be enqueued. 1076 * 1077 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready. 1078 */ 1079 int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s, 1080 const struct qbman_eq_desc *d, 1081 const struct qbman_fd *fd, 1082 int num_frames); 1083 1084 /* TODO: 1085 * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt. 1086 * @s: the software portal. 1087 * @thresh: the threshold to trigger the EQRI interrupt. 1088 * 1089 * An EQRI interrupt can be generated when the fill-level of EQCR falls below 1090 * the 'thresh' value set here. Setting thresh==0 (the default) disables. 1091 */ 1092 int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh); 1093 1094 /*******************/ 1095 /* Buffer releases */ 1096 /*******************/ 1097 /** 1098 * struct qbman_release_desc - The structure for buffer release descriptor 1099 * @dont_manipulate_directly: the 32bit data to represent the whole 1100 * possible settings of qbman release descriptor. 1101 */ 1102 struct qbman_release_desc { 1103 union { 1104 uint32_t dont_manipulate_directly[16]; 1105 struct br { 1106 uint8_t verb; 1107 uint8_t reserved; 1108 uint16_t bpid; 1109 uint32_t reserved2; 1110 uint64_t buf[7]; 1111 } br; 1112 }; 1113 }; 1114 1115 /** 1116 * qbman_release_desc_clear() - Clear the contents of a descriptor to 1117 * default/starting state. 1118 * @d: the qbman release descriptor. 1119 */ 1120 void qbman_release_desc_clear(struct qbman_release_desc *d); 1121 1122 /** 1123 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to 1124 * @d: the qbman release descriptor. 1125 */ 1126 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid); 1127 1128 /** 1129 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI 1130 * interrupt source should be asserted after the release command is completed. 1131 * @d: the qbman release descriptor. 1132 */ 1133 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable); 1134 1135 /** 1136 * qbman_swp_release() - Issue a buffer release command. 1137 * @s: the software portal object. 1138 * @d: the release descriptor. 1139 * @buffers: a pointer pointing to the buffer address to be released. 1140 * @num_buffers: number of buffers to be released, must be less than 8. 1141 * 1142 * Return 0 for success, -EBUSY if the release command ring is not ready. 1143 */ 1144 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, 1145 const uint64_t *buffers, unsigned int num_buffers); 1146 1147 /* TODO: 1148 * qbman_swp_release_thresh() - Set threshold for RCRI interrupt 1149 * @s: the software portal. 1150 * @thresh: the threshold. 1151 * An RCRI interrupt can be generated when the fill-level of RCR falls below 1152 * the 'thresh' value set here. Setting thresh==0 (the default) disables. 1153 */ 1154 int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh); 1155 1156 /*******************/ 1157 /* Buffer acquires */ 1158 /*******************/ 1159 /** 1160 * qbman_swp_acquire() - Issue a buffer acquire command. 1161 * @s: the software portal object. 1162 * @bpid: the buffer pool index. 1163 * @buffers: a pointer pointing to the acquired buffer address|es. 1164 * @num_buffers: number of buffers to be acquired, must be less than 8. 1165 * 1166 * Return 0 for success, or negative error code if the acquire command 1167 * fails. 1168 */ 1169 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers, 1170 unsigned int num_buffers); 1171 1172 /*****************/ 1173 /* FQ management */ 1174 /*****************/ 1175 /** 1176 * qbman_swp_fq_schedule() - Move the fq to the scheduled state. 1177 * @s: the software portal object. 1178 * @fqid: the index of frame queue to be scheduled. 1179 * 1180 * There are a couple of different ways that a FQ can end up parked state, 1181 * This schedules it. 1182 * 1183 * Return 0 for success, or negative error code for failure. 1184 */ 1185 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid); 1186 1187 /** 1188 * qbman_swp_fq_force() - Force the FQ to fully scheduled state. 1189 * @s: the software portal object. 1190 * @fqid: the index of frame queue to be forced. 1191 * 1192 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled 1193 * and thus be available for selection by any channel-dequeuing behaviour (push 1194 * or pull). If the FQ is subsequently "dequeued" from the channel and is still 1195 * empty at the time this happens, the resulting dq_entry will have no FD. 1196 * (qbman_result_DQ_fd() will return NULL.) 1197 * 1198 * Return 0 for success, or negative error code for failure. 1199 */ 1200 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid); 1201 1202 /** 1203 * These functions change the FQ flow-control stuff between XON/XOFF. (The 1204 * default is XON.) This setting doesn't affect enqueues to the FQ, just 1205 * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when 1206 * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is 1207 * changed to XOFF after it had already become truly-scheduled to a channel, and 1208 * a pull dequeue of that channel occurs that selects that FQ for dequeuing, 1209 * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will 1210 * return NULL.) 1211 */ 1212 /** 1213 * qbman_swp_fq_xon() - XON the frame queue. 1214 * @s: the software portal object. 1215 * @fqid: the index of frame queue. 1216 * 1217 * Return 0 for success, or negative error code for failure. 1218 */ 1219 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid); 1220 /** 1221 * qbman_swp_fq_xoff() - XOFF the frame queue. 1222 * @s: the software portal object. 1223 * @fqid: the index of frame queue. 1224 * 1225 * Return 0 for success, or negative error code for failure. 1226 */ 1227 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid); 1228 1229 /**********************/ 1230 /* Channel management */ 1231 /**********************/ 1232 1233 /** 1234 * If the user has been allocated a channel object that is going to generate 1235 * CDANs to another channel, then these functions will be necessary. 1236 * CDAN-enabled channels only generate a single CDAN notification, after which 1237 * it they need to be reenabled before they'll generate another. (The idea is 1238 * that pull dequeuing will occur in reaction to the CDAN, followed by a 1239 * reenable step.) Each function generates a distinct command to hardware, so a 1240 * combination function is provided if the user wishes to modify the "context" 1241 * (which shows up in each CDAN message) each time they reenable, as a single 1242 * command to hardware. 1243 */ 1244 1245 /** 1246 * qbman_swp_CDAN_set_context() - Set CDAN context 1247 * @s: the software portal object. 1248 * @channelid: the channel index. 1249 * @ctx: the context to be set in CDAN. 1250 * 1251 * Return 0 for success, or negative error code for failure. 1252 */ 1253 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid, 1254 uint64_t ctx); 1255 1256 /** 1257 * qbman_swp_CDAN_enable() - Enable CDAN for the channel. 1258 * @s: the software portal object. 1259 * @channelid: the index of the channel to generate CDAN. 1260 * 1261 * Return 0 for success, or negative error code for failure. 1262 */ 1263 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid); 1264 1265 /** 1266 * qbman_swp_CDAN_disable() - disable CDAN for the channel. 1267 * @s: the software portal object. 1268 * @channelid: the index of the channel to generate CDAN. 1269 * 1270 * Return 0 for success, or negative error code for failure. 1271 */ 1272 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid); 1273 1274 /** 1275 * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN 1276 * @s: the software portal object. 1277 * @channelid: the index of the channel to generate CDAN. 1278 * @ctx: the context set in CDAN. 1279 * 1280 * Return 0 for success, or negative error code for failure. 1281 */ 1282 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid, 1283 uint64_t ctx); 1284 #endif /* !_FSL_QBMAN_PORTAL_H */ 1285