1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2 * 3 * Copyright 2008-2012 Freescale Semiconductor, Inc. 4 * Copyright 2019 NXP 5 * 6 */ 7 8 #ifndef __FSL_QMAN_H 9 #define __FSL_QMAN_H 10 11 #ifdef __cplusplus 12 extern "C" { 13 #endif 14 15 #include <dpaa_rbtree.h> 16 #include <rte_eventdev.h> 17 18 /* FQ lookups (turn this on for 64bit user-space) */ 19 #ifdef RTE_ARCH_64 20 #define CONFIG_FSL_QMAN_FQ_LOOKUP 21 /* if FQ lookups are supported, this controls the number of initialised, 22 * s/w-consumed FQs that can be supported at any one time. 23 */ 24 #define CONFIG_FSL_QMAN_FQ_LOOKUP_MAX (32 * 1024) 25 #endif 26 27 /* Last updated for v00.800 of the BG */ 28 29 /* Hardware constants */ 30 #define QM_CHANNEL_SWPORTAL0 0 31 #define QMAN_CHANNEL_POOL1 0x21 32 #define QMAN_CHANNEL_CAAM 0x80 33 #define QMAN_CHANNEL_PME 0xa0 34 #define QMAN_CHANNEL_POOL1_REV3 0x401 35 #define QMAN_CHANNEL_CAAM_REV3 0x840 36 #define QMAN_CHANNEL_PME_REV3 0x860 37 extern u16 qm_channel_pool1; 38 extern u16 qm_channel_caam; 39 extern u16 qm_channel_pme; 40 enum qm_dc_portal { 41 qm_dc_portal_fman0 = 0, 42 qm_dc_portal_fman1 = 1, 43 qm_dc_portal_caam = 2, 44 qm_dc_portal_pme = 3 45 }; 46 47 __rte_internal 48 u16 dpaa_get_qm_channel_caam(void); 49 50 __rte_internal 51 u16 dpaa_get_qm_channel_pool(void); 52 53 /* Portal processing (interrupt) sources */ 54 #define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */ 55 #define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */ 56 #define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */ 57 #define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */ 58 #define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */ 59 #define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */ 60 /* 61 * This mask contains all the interrupt sources that need handling except DQRI, 62 * ie. that if present should trigger slow-path processing. 63 */ 64 #define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \ 65 QM_PIRQ_MRI | QM_PIRQ_CCSCI) 66 67 /* For qman_static_dequeue_*** APIs */ 68 #define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff 69 /* for n in [1,15] */ 70 #define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) 71 /* for conversion from n of qm_channel */ 72 static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel) 73 { 74 return QM_SDQCR_CHANNELS_POOL(channel + 1 - dpaa_get_qm_channel_pool()); 75 } 76 77 /* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use 78 * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use 79 * FQID(n) to fill in the frame queue ID. 80 */ 81 #define QM_VDQCR_PRECEDENCE_VDQCR 0x0 82 #define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000 83 #define QM_VDQCR_EXACT 0x40000000 84 #define QM_VDQCR_NUMFRAMES_MASK 0x3f000000 85 #define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24) 86 #define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f) 87 #define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0) 88 89 /* --- QMan data structures (and associated constants) --- */ 90 91 /* Represents s/w corenet portal mapped data structures */ 92 struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */ 93 struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */ 94 struct qm_mr_entry; /* MR (Message Ring) entries */ 95 struct qm_mc_command; /* MC (Management Command) command */ 96 struct qm_mc_result; /* MC result */ 97 98 #define QM_FD_FORMAT_SG 0x4 99 #define QM_FD_FORMAT_LONG 0x2 100 #define QM_FD_FORMAT_COMPOUND 0x1 101 enum qm_fd_format { 102 /* 103 * 'contig' implies a contiguous buffer, whereas 'sg' implies a 104 * scatter-gather table. 'big' implies a 29-bit length with no offset 105 * field, otherwise length is 20-bit and offset is 9-bit. 'compound' 106 * implies a s/g-like table, where each entry itself represents a frame 107 * (contiguous or scatter-gather) and the 29-bit "length" is 108 * interpreted purely for congestion calculations, ie. a "congestion 109 * weight". 110 */ 111 qm_fd_contig = 0, 112 qm_fd_contig_big = QM_FD_FORMAT_LONG, 113 qm_fd_sg = QM_FD_FORMAT_SG, 114 qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG, 115 qm_fd_compound = QM_FD_FORMAT_COMPOUND 116 }; 117 118 /* Capitalised versions are un-typed but can be used in static expressions */ 119 #define QM_FD_CONTIG 0 120 #define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG 121 #define QM_FD_SG QM_FD_FORMAT_SG 122 #define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG) 123 #define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND 124 125 /* "Frame Descriptor (FD)" */ 126 struct qm_fd { 127 union { 128 struct { 129 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 130 u8 dd:2; /* dynamic debug */ 131 u8 liodn_offset:6; 132 u8 bpid:8; /* Buffer Pool ID */ 133 u8 eliodn_offset:4; 134 u8 __reserved:4; 135 u8 addr_hi; /* high 8-bits of 40-bit address */ 136 u32 addr_lo; /* low 32-bits of 40-bit address */ 137 #else 138 u8 liodn_offset:6; 139 u8 dd:2; /* dynamic debug */ 140 u8 bpid:8; /* Buffer Pool ID */ 141 u8 __reserved:4; 142 u8 eliodn_offset:4; 143 u8 addr_hi; /* high 8-bits of 40-bit address */ 144 u32 addr_lo; /* low 32-bits of 40-bit address */ 145 #endif 146 }; 147 struct { 148 u64 __notaddress:24; 149 /* More efficient address accessor */ 150 u64 addr:40; 151 }; 152 u64 opaque_addr; 153 }; 154 /* The 'format' field indicates the interpretation of the remaining 29 155 * bits of the 32-bit word. For packing reasons, it is duplicated in the 156 * other union elements. Note, union'd structs are difficult to use with 157 * static initialisation under gcc, in which case use the "opaque" form 158 * with one of the macros. 159 */ 160 union { 161 /* For easier/faster copying of this part of the fd (eg. from a 162 * DQRR entry to an EQCR entry) copy 'opaque' 163 */ 164 u32 opaque; 165 /* If 'format' is _contig or _sg, 20b length and 9b offset */ 166 struct { 167 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 168 enum qm_fd_format format:3; 169 u16 offset:9; 170 u32 length20:20; 171 #else 172 u32 length20:20; 173 u16 offset:9; 174 enum qm_fd_format format:3; 175 #endif 176 }; 177 /* If 'format' is _contig_big or _sg_big, 29b length */ 178 struct { 179 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 180 enum qm_fd_format _format1:3; 181 u32 length29:29; 182 #else 183 u32 length29:29; 184 enum qm_fd_format _format1:3; 185 #endif 186 }; 187 /* If 'format' is _compound, 29b "congestion weight" */ 188 struct { 189 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 190 enum qm_fd_format _format2:3; 191 u32 cong_weight:29; 192 #else 193 u32 cong_weight:29; 194 enum qm_fd_format _format2:3; 195 #endif 196 }; 197 }; 198 union { 199 u32 cmd; 200 u32 status; 201 }; 202 } __rte_aligned(8); 203 #define QM_FD_DD_NULL 0x00 204 #define QM_FD_PID_MASK 0x3f 205 static inline u64 qm_fd_addr_get64(const struct qm_fd *fd) 206 { 207 return fd->addr; 208 } 209 210 static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd) 211 { 212 return (dma_addr_t)fd->addr; 213 } 214 215 /* Macro, so we compile better if 'v' isn't always 64-bit */ 216 #define qm_fd_addr_set64(fd, v) \ 217 do { \ 218 struct qm_fd *__fd931 = (fd); \ 219 __fd931->addr = v; \ 220 } while (0) 221 222 /* Scatter/Gather table entry */ 223 struct qm_sg_entry { 224 union { 225 struct { 226 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 227 u8 __reserved1[3]; 228 u8 addr_hi; /* high 8-bits of 40-bit address */ 229 u32 addr_lo; /* low 32-bits of 40-bit address */ 230 #else 231 u32 addr_lo; /* low 32-bits of 40-bit address */ 232 u8 addr_hi; /* high 8-bits of 40-bit address */ 233 u8 __reserved1[3]; 234 #endif 235 }; 236 struct { 237 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 238 u64 __notaddress:24; 239 u64 addr:40; 240 #else 241 u64 addr:40; 242 u64 __notaddress:24; 243 #endif 244 }; 245 u64 opaque; 246 }; 247 union { 248 struct { 249 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 250 u32 extension:1; /* Extension bit */ 251 u32 final:1; /* Final bit */ 252 u32 length:30; 253 #else 254 u32 length:30; 255 u32 final:1; /* Final bit */ 256 u32 extension:1; /* Extension bit */ 257 #endif 258 }; 259 u32 val; 260 }; 261 u8 __reserved2; 262 u8 bpid; 263 union { 264 struct { 265 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 266 u16 __reserved3:3; 267 u16 offset:13; 268 #else 269 u16 offset:13; 270 u16 __reserved3:3; 271 #endif 272 }; 273 u16 val_off; 274 }; 275 } __packed; 276 static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg) 277 { 278 return sg->addr; 279 } 280 281 static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg) 282 { 283 return (dma_addr_t)sg->addr; 284 } 285 286 /* Macro, so we compile better if 'v' isn't always 64-bit */ 287 #define qm_sg_entry_set64(sg, v) \ 288 do { \ 289 struct qm_sg_entry *__sg931 = (sg); \ 290 __sg931->addr = v; \ 291 } while (0) 292 293 /* See 1.5.8.1: "Enqueue Command" */ 294 struct __rte_aligned(8) qm_eqcr_entry { 295 u8 __dont_write_directly__verb; 296 u8 dca; 297 u16 seqnum; 298 u32 orp; /* 24-bit */ 299 u32 fqid; /* 24-bit */ 300 u32 tag; 301 struct qm_fd fd; /* this has alignment 8 */ 302 u8 __reserved3[32]; 303 } __packed; 304 305 306 /* "Frame Dequeue Response" */ 307 struct __rte_aligned(8) qm_dqrr_entry { 308 u8 verb; 309 u8 stat; 310 u16 seqnum; /* 15-bit */ 311 u8 tok; 312 u8 __reserved2[3]; 313 u32 fqid; /* 24-bit */ 314 u32 contextB; 315 struct qm_fd fd; /* this has alignment 8 */ 316 u8 __reserved4[32]; 317 }; 318 319 #define QM_DQRR_VERB_VBIT 0x80 320 #define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */ 321 #define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */ 322 #define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */ 323 #define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */ 324 #define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */ 325 #define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */ 326 #define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */ 327 #define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/ 328 329 330 /* "ERN Message Response" */ 331 /* "FQ State Change Notification" */ 332 struct qm_mr_entry { 333 union { 334 struct { 335 u8 verb; 336 u8 dca; 337 u16 seqnum; 338 u8 rc; /* Rejection Code */ 339 u32 orp:24; 340 u32 fqid; /* 24-bit */ 341 u32 tag; 342 struct qm_fd fd; /* this has alignment 8 */ 343 } __packed __rte_aligned(8) ern; 344 struct { 345 u8 verb; 346 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 347 u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */ 348 u8 __reserved1:4; 349 enum qm_dc_portal portal:2; 350 #else 351 enum qm_dc_portal portal:3; 352 u8 __reserved1:3; 353 u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */ 354 #endif 355 u16 __reserved2; 356 u8 rc; /* Rejection Code */ 357 u32 __reserved3:24; 358 u32 fqid; /* 24-bit */ 359 u32 tag; 360 struct qm_fd fd; /* this has alignment 8 */ 361 } __packed __rte_aligned(8) dcern; 362 struct { 363 u8 verb; 364 u8 fqs; /* Frame Queue Status */ 365 u8 __reserved1[6]; 366 u32 fqid; /* 24-bit */ 367 u32 contextB; 368 u8 __reserved2[16]; 369 } __packed __rte_aligned(8) fq; /* FQRN/FQRNI/FQRL/FQPN */ 370 }; 371 u8 __reserved2[32]; 372 } __packed __rte_aligned(8); 373 #define QM_MR_VERB_VBIT 0x80 374 /* 375 * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb 376 * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished 377 * from the other MR types by noting if the 0x20 bit is unset. 378 */ 379 #define QM_MR_VERB_TYPE_MASK 0x27 380 #define QM_MR_VERB_DC_ERN 0x20 381 #define QM_MR_VERB_FQRN 0x21 382 #define QM_MR_VERB_FQRNI 0x22 383 #define QM_MR_VERB_FQRL 0x23 384 #define QM_MR_VERB_FQPN 0x24 385 #define QM_MR_RC_MASK 0xf0 /* contains one of; */ 386 #define QM_MR_RC_CGR_TAILDROP 0x00 387 #define QM_MR_RC_WRED 0x10 388 #define QM_MR_RC_ERROR 0x20 389 #define QM_MR_RC_ORPWINDOW_EARLY 0x30 390 #define QM_MR_RC_ORPWINDOW_LATE 0x40 391 #define QM_MR_RC_FQ_TAILDROP 0x50 392 #define QM_MR_RC_ORPWINDOW_RETIRED 0x60 393 #define QM_MR_RC_ORP_ZERO 0x70 394 #define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ 395 #define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ 396 #define QM_MR_DCERN_COLOUR_GREEN 0x00 397 #define QM_MR_DCERN_COLOUR_YELLOW 0x01 398 #define QM_MR_DCERN_COLOUR_RED 0x02 399 #define QM_MR_DCERN_COLOUR_OVERRIDE 0x03 400 /* 401 * An identical structure of FQD fields is present in the "Init FQ" command and 402 * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type. 403 * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the 404 * latter has two inlines to assist with converting to/from the mant+exp 405 * representation. 406 */ 407 struct qm_fqd_stashing { 408 /* See QM_STASHING_EXCL_<...> */ 409 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 410 u8 exclusive; 411 u8 __reserved1:2; 412 /* Numbers of cachelines */ 413 u8 annotation_cl:2; 414 u8 data_cl:2; 415 u8 context_cl:2; 416 #else 417 u8 context_cl:2; 418 u8 data_cl:2; 419 u8 annotation_cl:2; 420 u8 __reserved1:2; 421 u8 exclusive; 422 #endif 423 } __packed; 424 struct qm_fqd_taildrop { 425 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 426 u16 __reserved1:3; 427 u16 mant:8; 428 u16 exp:5; 429 #else 430 u16 exp:5; 431 u16 mant:8; 432 u16 __reserved1:3; 433 #endif 434 } __packed; 435 struct qm_fqd_oac { 436 /* "Overhead Accounting Control", see QM_OAC_<...> */ 437 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 438 u8 oac:2; /* "Overhead Accounting Control" */ 439 u8 __reserved1:6; 440 #else 441 u8 __reserved1:6; 442 u8 oac:2; /* "Overhead Accounting Control" */ 443 #endif 444 /* Two's-complement value (-128 to +127) */ 445 signed char oal; /* "Overhead Accounting Length" */ 446 } __packed; 447 struct qm_fqd { 448 union { 449 u8 orpc; 450 struct { 451 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 452 u8 __reserved1:2; 453 u8 orprws:3; 454 u8 oa:1; 455 u8 olws:2; 456 #else 457 u8 olws:2; 458 u8 oa:1; 459 u8 orprws:3; 460 u8 __reserved1:2; 461 #endif 462 } __packed; 463 }; 464 u8 cgid; 465 u16 fq_ctrl; /* See QM_FQCTRL_<...> */ 466 union { 467 u16 dest_wq; 468 struct { 469 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 470 u16 channel:13; /* qm_channel */ 471 u16 wq:3; 472 #else 473 u16 wq:3; 474 u16 channel:13; /* qm_channel */ 475 #endif 476 } __packed dest; 477 }; 478 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 479 u16 __reserved2:1; 480 u16 ics_cred:15; 481 #else 482 u16 __reserved2:1; 483 u16 ics_cred:15; 484 #endif 485 /* 486 * For "Initialize Frame Queue" commands, the write-enable mask 487 * determines whether 'td' or 'oac_init' is observed. For query 488 * commands, this field is always 'td', and 'oac_query' (below) reflects 489 * the Overhead ACcounting values. 490 */ 491 union { 492 uint16_t opaque_td; 493 struct qm_fqd_taildrop td; 494 struct qm_fqd_oac oac_init; 495 }; 496 u32 context_b; 497 union { 498 /* Treat it as 64-bit opaque */ 499 u64 opaque; 500 struct { 501 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 502 u32 hi; 503 u32 lo; 504 #else 505 u32 lo; 506 u32 hi; 507 #endif 508 }; 509 /* Treat it as s/w portal stashing config */ 510 /* see "FQD Context_A field used for [...]" */ 511 struct { 512 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 513 struct qm_fqd_stashing stashing; 514 /* 515 * 48-bit address of FQ context to 516 * stash, must be cacheline-aligned 517 */ 518 u16 context_hi; 519 u32 context_lo; 520 #else 521 u32 context_lo; 522 u16 context_hi; 523 struct qm_fqd_stashing stashing; 524 #endif 525 } __packed; 526 } context_a; 527 struct qm_fqd_oac oac_query; 528 } __packed; 529 /* 64-bit converters for context_hi/lo */ 530 static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd) 531 { 532 return ((u64)fqd->context_a.context_hi << 32) | 533 (u64)fqd->context_a.context_lo; 534 } 535 536 static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd) 537 { 538 return (dma_addr_t)qm_fqd_stashing_get64(fqd); 539 } 540 541 static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd) 542 { 543 return ((u64)fqd->context_a.hi << 32) | 544 (u64)fqd->context_a.lo; 545 } 546 547 static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr) 548 { 549 fqd->context_a.context_hi = upper_32_bits(addr); 550 fqd->context_a.context_lo = lower_32_bits(addr); 551 } 552 553 static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr) 554 { 555 fqd->context_a.hi = upper_32_bits(addr); 556 fqd->context_a.lo = lower_32_bits(addr); 557 } 558 559 /* convert a threshold value into mant+exp representation */ 560 static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val, 561 int roundup) 562 { 563 u32 e = 0; 564 int oddbit = 0; 565 566 if (val > 0xe0000000) 567 return -ERANGE; 568 while (val > 0xff) { 569 oddbit = val & 1; 570 val >>= 1; 571 e++; 572 if (roundup && oddbit) 573 val++; 574 } 575 td->exp = e; 576 td->mant = val; 577 return 0; 578 } 579 580 /* and the other direction */ 581 static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td) 582 { 583 return (u32)td->mant << td->exp; 584 } 585 586 587 /* See "Frame Queue Descriptor (FQD)" */ 588 /* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */ 589 #define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */ 590 #define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */ 591 #define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */ 592 #define QM_FQCTRL_ORP 0x0100 /* ORP Enable */ 593 #define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */ 594 #define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */ 595 #define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */ 596 #define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */ 597 #define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */ 598 #define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */ 599 #define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */ 600 601 /* See "FQD Context_A field used for [...] */ 602 /* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */ 603 #define QM_STASHING_EXCL_ANNOTATION 0x04 604 #define QM_STASHING_EXCL_DATA 0x02 605 #define QM_STASHING_EXCL_CTX 0x01 606 607 /* See "Intra Class Scheduling" */ 608 /* FQD field 'OAC' (Overhead ACcounting) uses these constants */ 609 #define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */ 610 #define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */ 611 612 /* 613 * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields 614 * and associated commands/responses. The WRED parameters are calculated from 615 * these fields as follows; 616 * MaxTH = MA * (2 ^ Mn) 617 * Slope = SA / (2 ^ Sn) 618 * MaxP = 4 * (Pn + 1) 619 */ 620 struct qm_cgr_wr_parm { 621 union { 622 u32 word; 623 struct { 624 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 625 u32 MA:8; 626 u32 Mn:5; 627 u32 SA:7; /* must be between 64-127 */ 628 u32 Sn:6; 629 u32 Pn:6; 630 #else 631 u32 Pn:6; 632 u32 Sn:6; 633 u32 SA:7; /* must be between 64-127 */ 634 u32 Mn:5; 635 u32 MA:8; 636 #endif 637 } __packed; 638 }; 639 } __packed; 640 /* 641 * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding 642 * management commands, this is padded to a 16-bit structure field, so that's 643 * how we represent it here. The congestion state threshold is calculated from 644 * these fields as follows; 645 * CS threshold = TA * (2 ^ Tn) 646 */ 647 struct qm_cgr_cs_thres { 648 union { 649 u16 hword; 650 struct { 651 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 652 u16 __reserved:3; 653 u16 TA:8; 654 u16 Tn:5; 655 #else 656 u16 Tn:5; 657 u16 TA:8; 658 u16 __reserved:3; 659 #endif 660 } __packed; 661 }; 662 } __packed; 663 /* 664 * This identical structure of CGR fields is present in the "Init/Modify CGR" 665 * commands and the "Query CGR" result. It's suctioned out here into its own 666 * struct. 667 */ 668 struct __qm_mc_cgr { 669 struct qm_cgr_wr_parm wr_parm_g; 670 struct qm_cgr_wr_parm wr_parm_y; 671 struct qm_cgr_wr_parm wr_parm_r; 672 u8 wr_en_g; /* boolean, use QM_CGR_EN */ 673 u8 wr_en_y; /* boolean, use QM_CGR_EN */ 674 u8 wr_en_r; /* boolean, use QM_CGR_EN */ 675 u8 cscn_en; /* boolean, use QM_CGR_EN */ 676 union { 677 struct { 678 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 679 u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */ 680 u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */ 681 #else 682 u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */ 683 u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */ 684 #endif 685 }; 686 u32 cscn_targ; /* use QM_CGR_TARG_* */ 687 }; 688 u8 cstd_en; /* boolean, use QM_CGR_EN */ 689 u8 cs; /* boolean, only used in query response */ 690 union { 691 struct qm_cgr_cs_thres cs_thres; 692 /* use qm_cgr_cs_thres_set64() */ 693 u16 __cs_thres; 694 }; 695 u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */ 696 } __packed; 697 #define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */ 698 #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/ 699 #define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */ 700 #define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */ 701 #define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */ 702 #define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */ 703 /* Convert CGR thresholds to/from "cs_thres" format */ 704 static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th) 705 { 706 return (u64)th->TA << th->Tn; 707 } 708 709 static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, 710 int roundup) 711 { 712 u32 e = 0; 713 int oddbit = 0; 714 715 while (val > 0xff) { 716 oddbit = val & 1; 717 val >>= 1; 718 e++; 719 if (roundup && oddbit) 720 val++; 721 } 722 th->Tn = e; 723 th->TA = val; 724 return 0; 725 } 726 727 /* See 1.5.8.5.1: "Initialize FQ" */ 728 /* See 1.5.8.5.2: "Query FQ" */ 729 /* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */ 730 /* See 1.5.8.5.4: "Alter FQ State Commands " */ 731 /* See 1.5.8.6.1: "Initialize/Modify CGR" */ 732 /* See 1.5.8.6.2: "CGR Test Write" */ 733 /* See 1.5.8.6.3: "Query CGR" */ 734 /* See 1.5.8.6.4: "Query Congestion Group State" */ 735 struct qm_mcc_initfq { 736 u8 __reserved1; 737 u16 we_mask; /* Write Enable Mask */ 738 u32 fqid; /* 24-bit */ 739 u16 count; /* Initialises 'count+1' FQDs */ 740 struct qm_fqd fqd; /* the FQD fields go here */ 741 u8 __reserved3[30]; 742 } __packed; 743 struct qm_mcc_queryfq { 744 u8 __reserved1[3]; 745 u32 fqid; /* 24-bit */ 746 u8 __reserved2[56]; 747 } __packed; 748 struct qm_mcc_queryfq_np { 749 u8 __reserved1[3]; 750 u32 fqid; /* 24-bit */ 751 u8 __reserved2[56]; 752 } __packed; 753 struct qm_mcc_alterfq { 754 u8 __reserved1[3]; 755 u32 fqid; /* 24-bit */ 756 u8 __reserved2; 757 u8 count; /* number of consecutive FQID */ 758 u8 __reserved3[10]; 759 u32 context_b; /* frame queue context b */ 760 u8 __reserved4[40]; 761 } __packed; 762 struct qm_mcc_initcgr { 763 u8 __reserved1; 764 u16 we_mask; /* Write Enable Mask */ 765 struct __qm_mc_cgr cgr; /* CGR fields */ 766 u8 __reserved2[2]; 767 u8 cgid; 768 u8 __reserved4[32]; 769 } __packed; 770 struct qm_mcc_cgrtestwrite { 771 u8 __reserved1[2]; 772 u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ 773 u32 i_bcnt_lo; /* low 32-bits of 40-bit */ 774 u8 __reserved2[23]; 775 u8 cgid; 776 u8 __reserved3[32]; 777 } __packed; 778 struct qm_mcc_querycgr { 779 u8 __reserved1[30]; 780 u8 cgid; 781 u8 __reserved2[32]; 782 } __packed; 783 struct qm_mcc_querycongestion { 784 u8 __reserved[63]; 785 } __packed; 786 struct qm_mcc_querywq { 787 u8 __reserved; 788 /* select channel if verb != QUERYWQ_DEDICATED */ 789 union { 790 u16 channel_wq; /* ignores wq (3 lsbits) */ 791 struct { 792 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 793 u16 id:13; /* qm_channel */ 794 u16 __reserved1:3; 795 #else 796 u16 __reserved1:3; 797 u16 id:13; /* qm_channel */ 798 #endif 799 } __packed channel; 800 }; 801 u8 __reserved2[60]; 802 } __packed; 803 804 struct qm_mc_command { 805 u8 __dont_write_directly__verb; 806 union { 807 struct qm_mcc_initfq initfq; 808 struct qm_mcc_queryfq queryfq; 809 struct qm_mcc_queryfq_np queryfq_np; 810 struct qm_mcc_alterfq alterfq; 811 struct qm_mcc_initcgr initcgr; 812 struct qm_mcc_cgrtestwrite cgrtestwrite; 813 struct qm_mcc_querycgr querycgr; 814 struct qm_mcc_querycongestion querycongestion; 815 struct qm_mcc_querywq querywq; 816 }; 817 } __packed; 818 819 /* INITFQ-specific flags */ 820 #define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */ 821 #define QM_INITFQ_WE_OAC 0x0100 822 #define QM_INITFQ_WE_ORPC 0x0080 823 #define QM_INITFQ_WE_CGID 0x0040 824 #define QM_INITFQ_WE_FQCTRL 0x0020 825 #define QM_INITFQ_WE_DESTWQ 0x0010 826 #define QM_INITFQ_WE_ICSCRED 0x0008 827 #define QM_INITFQ_WE_TDTHRESH 0x0004 828 #define QM_INITFQ_WE_CONTEXTB 0x0002 829 #define QM_INITFQ_WE_CONTEXTA 0x0001 830 /* INITCGR/MODIFYCGR-specific flags */ 831 #define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */ 832 #define QM_CGR_WE_WR_PARM_G 0x0400 833 #define QM_CGR_WE_WR_PARM_Y 0x0200 834 #define QM_CGR_WE_WR_PARM_R 0x0100 835 #define QM_CGR_WE_WR_EN_G 0x0080 836 #define QM_CGR_WE_WR_EN_Y 0x0040 837 #define QM_CGR_WE_WR_EN_R 0x0020 838 #define QM_CGR_WE_CSCN_EN 0x0010 839 #define QM_CGR_WE_CSCN_TARG 0x0008 840 #define QM_CGR_WE_CSTD_EN 0x0004 841 #define QM_CGR_WE_CS_THRES 0x0002 842 #define QM_CGR_WE_MODE 0x0001 843 844 struct qm_mcr_initfq { 845 u8 __reserved1[62]; 846 } __packed; 847 struct qm_mcr_queryfq { 848 u8 __reserved1[8]; 849 struct qm_fqd fqd; /* the FQD fields are here */ 850 u8 __reserved2[30]; 851 } __packed; 852 struct qm_mcr_queryfq_np { 853 u8 __reserved1; 854 u8 state; /* QM_MCR_NP_STATE_*** */ 855 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 856 u8 __reserved2; 857 u32 fqd_link:24; 858 u16 __reserved3:2; 859 u16 odp_seq:14; 860 u16 __reserved4:2; 861 u16 orp_nesn:14; 862 u16 __reserved5:1; 863 u16 orp_ea_hseq:15; 864 u16 __reserved6:1; 865 u16 orp_ea_tseq:15; 866 u8 __reserved7; 867 u32 orp_ea_hptr:24; 868 u8 __reserved8; 869 u32 orp_ea_tptr:24; 870 u8 __reserved9; 871 u32 pfdr_hptr:24; 872 u8 __reserved10; 873 u32 pfdr_tptr:24; 874 u8 __reserved11[5]; 875 u8 __reserved12:7; 876 u8 is:1; 877 u16 ics_surp; 878 u32 byte_cnt; 879 u8 __reserved13; 880 u32 frm_cnt:24; 881 u32 __reserved14; 882 u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */ 883 u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */ 884 u16 __reserved15; 885 u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */ 886 u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ 887 u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ 888 #else 889 u8 __reserved2; 890 u32 fqd_link:24; 891 892 u16 odp_seq:14; 893 u16 __reserved3:2; 894 895 u16 orp_nesn:14; 896 u16 __reserved4:2; 897 898 u16 orp_ea_hseq:15; 899 u16 __reserved5:1; 900 901 u16 orp_ea_tseq:15; 902 u16 __reserved6:1; 903 904 u8 __reserved7; 905 u32 orp_ea_hptr:24; 906 907 u8 __reserved8; 908 u32 orp_ea_tptr:24; 909 910 u8 __reserved9; 911 u32 pfdr_hptr:24; 912 913 u8 __reserved10; 914 u32 pfdr_tptr:24; 915 916 u8 __reserved11[5]; 917 u8 is:1; 918 u8 __reserved12:7; 919 u16 ics_surp; 920 u32 byte_cnt; 921 u8 __reserved13; 922 u32 frm_cnt:24; 923 u32 __reserved14; 924 u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */ 925 u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */ 926 u16 __reserved15; 927 u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */ 928 u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ 929 u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ 930 #endif 931 } __packed; 932 933 struct qm_mcr_alterfq { 934 u8 fqs; /* Frame Queue Status */ 935 u8 __reserved1[61]; 936 } __packed; 937 struct qm_mcr_initcgr { 938 u8 __reserved1[62]; 939 } __packed; 940 struct qm_mcr_cgrtestwrite { 941 u16 __reserved1; 942 struct __qm_mc_cgr cgr; /* CGR fields */ 943 u8 __reserved2[3]; 944 u32 __reserved3:24; 945 u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ 946 u32 i_bcnt_lo; /* low 32-bits of 40-bit */ 947 u32 __reserved4:24; 948 u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */ 949 u32 a_bcnt_lo; /* low 32-bits of 40-bit */ 950 u16 lgt; /* Last Group Tick */ 951 u16 wr_prob_g; 952 u16 wr_prob_y; 953 u16 wr_prob_r; 954 u8 __reserved5[8]; 955 } __packed; 956 struct qm_mcr_querycgr { 957 u16 __reserved1; 958 struct __qm_mc_cgr cgr; /* CGR fields */ 959 u8 __reserved2[3]; 960 union { 961 struct { 962 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 963 u32 __reserved3:24; 964 u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ 965 u32 i_bcnt_lo; /* low 32-bits of 40-bit */ 966 #else 967 u32 i_bcnt_lo; /* low 32-bits of 40-bit */ 968 u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ 969 u32 __reserved3:24; 970 #endif 971 }; 972 u64 i_bcnt; 973 }; 974 union { 975 struct { 976 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 977 u32 __reserved4:24; 978 u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */ 979 u32 a_bcnt_lo; /* low 32-bits of 40-bit */ 980 #else 981 u32 a_bcnt_lo; /* low 32-bits of 40-bit */ 982 u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */ 983 u32 __reserved4:24; 984 #endif 985 }; 986 u64 a_bcnt; 987 }; 988 union { 989 u32 cscn_targ_swp[4]; 990 u8 __reserved5[16]; 991 }; 992 } __packed; 993 994 struct __qm_mcr_querycongestion { 995 u32 state[8]; 996 }; 997 998 struct qm_mcr_querycongestion { 999 u8 __reserved[30]; 1000 /* Access this struct using QM_MCR_QUERYCONGESTION() */ 1001 struct __qm_mcr_querycongestion state; 1002 } __packed; 1003 struct qm_mcr_querywq { 1004 union { 1005 u16 channel_wq; /* ignores wq (3 lsbits) */ 1006 struct { 1007 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 1008 u16 id:13; /* qm_channel */ 1009 u16 __reserved:3; 1010 #else 1011 u16 __reserved:3; 1012 u16 id:13; /* qm_channel */ 1013 #endif 1014 } __packed channel; 1015 }; 1016 u8 __reserved[28]; 1017 u32 wq_len[8]; 1018 } __packed; 1019 1020 struct qm_mc_result { 1021 u8 verb; 1022 u8 result; 1023 union { 1024 struct qm_mcr_initfq initfq; 1025 struct qm_mcr_queryfq queryfq; 1026 struct qm_mcr_queryfq_np queryfq_np; 1027 struct qm_mcr_alterfq alterfq; 1028 struct qm_mcr_initcgr initcgr; 1029 struct qm_mcr_cgrtestwrite cgrtestwrite; 1030 struct qm_mcr_querycgr querycgr; 1031 struct qm_mcr_querycongestion querycongestion; 1032 struct qm_mcr_querywq querywq; 1033 }; 1034 } __packed; 1035 1036 #define QM_MCR_VERB_RRID 0x80 1037 #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK 1038 #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED 1039 #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED 1040 #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ 1041 #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP 1042 #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ 1043 #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED 1044 #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED 1045 #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE 1046 #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE 1047 #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS 1048 #define QM_MCR_RESULT_NULL 0x00 1049 #define QM_MCR_RESULT_OK 0xf0 1050 #define QM_MCR_RESULT_ERR_FQID 0xf1 1051 #define QM_MCR_RESULT_ERR_FQSTATE 0xf2 1052 #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ 1053 #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 1054 #define QM_MCR_RESULT_PENDING 0xf8 1055 #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff 1056 #define QM_MCR_NP_STATE_FE 0x10 1057 #define QM_MCR_NP_STATE_R 0x08 1058 #define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */ 1059 #define QM_MCR_NP_STATE_OOS 0x00 1060 #define QM_MCR_NP_STATE_RETIRED 0x01 1061 #define QM_MCR_NP_STATE_TEN_SCHED 0x02 1062 #define QM_MCR_NP_STATE_TRU_SCHED 0x03 1063 #define QM_MCR_NP_STATE_PARKED 0x04 1064 #define QM_MCR_NP_STATE_ACTIVE 0x05 1065 #define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */ 1066 #define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */ 1067 #define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */ 1068 #define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */ 1069 #define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */ 1070 #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ 1071 #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ 1072 /* This extracts the state for congestion group 'n' from a query response. 1073 * Eg. 1074 * u8 cgr = [...]; 1075 * struct qm_mc_result *res = [...]; 1076 * printf("congestion group %d congestion state: %d\n", cgr, 1077 * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr)); 1078 */ 1079 #define __CGR_WORD(num) (num >> 5) 1080 #define __CGR_SHIFT(num) (num & 0x1f) 1081 #define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3) 1082 static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p, 1083 u8 cgr) 1084 { 1085 return p->state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr)); 1086 } 1087 1088 /* Portal and Frame Queues */ 1089 /* Represents a managed portal */ 1090 struct qman_portal; 1091 1092 /* 1093 * This object type represents QMan frame queue descriptors (FQD), it is 1094 * cacheline-aligned, and initialised by qman_create_fq(). The structure is 1095 * defined further down. 1096 */ 1097 struct qman_fq; 1098 1099 /* 1100 * This object type represents a QMan congestion group, it is defined further 1101 * down. 1102 */ 1103 struct qman_cgr; 1104 1105 /* 1106 * This enum, and the callback type that returns it, are used when handling 1107 * dequeued frames via DQRR. Note that for "null" callbacks registered with the 1108 * portal object (for handling dequeues that do not demux because context_b is 1109 * NULL), the return value *MUST* be qman_cb_dqrr_consume. 1110 */ 1111 enum qman_cb_dqrr_result { 1112 /* DQRR entry can be consumed */ 1113 qman_cb_dqrr_consume, 1114 /* Like _consume, but requests parking - FQ must be held-active */ 1115 qman_cb_dqrr_park, 1116 /* Does not consume, for DCA mode only. This allows out-of-order 1117 * consumes by explicit calls to qman_dca() and/or the use of implicit 1118 * DCA via EQCR entries. 1119 */ 1120 qman_cb_dqrr_defer, 1121 /* 1122 * Stop processing without consuming this ring entry. Exits the current 1123 * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within 1124 * an interrupt handler, the callback would typically call 1125 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value, 1126 * otherwise the interrupt will reassert immediately. 1127 */ 1128 qman_cb_dqrr_stop, 1129 /* Like qman_cb_dqrr_stop, but consumes the current entry. */ 1130 qman_cb_dqrr_consume_stop 1131 }; 1132 1133 typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm, 1134 struct qman_fq *fq, 1135 const struct qm_dqrr_entry *dqrr); 1136 1137 typedef enum qman_cb_dqrr_result (*qman_dpdk_cb_dqrr)(void *event, 1138 struct qman_portal *qm, 1139 struct qman_fq *fq, 1140 const struct qm_dqrr_entry *dqrr, 1141 void **bd); 1142 1143 /* This callback type is used when handling buffers in dpdk pull mode */ 1144 typedef void (*qman_dpdk_pull_cb_dqrr)(struct qman_fq **fq, 1145 struct qm_dqrr_entry **dqrr, 1146 void **bufs, 1147 int num_bufs); 1148 1149 typedef void (*qman_dpdk_cb_prepare)(struct qm_dqrr_entry *dq, void **bufs); 1150 1151 /* 1152 * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They 1153 * are always consumed after the callback returns. 1154 */ 1155 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq, 1156 const struct qm_mr_entry *msg); 1157 1158 /* This callback type is used when handling DCP ERNs */ 1159 typedef void (*qman_cb_dc_ern)(struct qman_portal *qm, 1160 const struct qm_mr_entry *msg); 1161 1162 /* This callback function will be used to free mbufs of ERN */ 1163 typedef uint16_t (*qman_cb_free_mbuf)(const struct qm_fd *fd); 1164 1165 /* 1166 * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active + 1167 * held-active + held-suspended are just "sched". Things like "retired" will not 1168 * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until 1169 * then, to indicate it's completing and to gate attempts to retry the retire 1170 * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's 1171 * technically impossible in the case of enqueue DCAs (which refer to DQRR ring 1172 * index rather than the FQ that ring entry corresponds to), so repeated park 1173 * commands are allowed (if you're silly enough to try) but won't change FQ 1174 * state, and the resulting park notifications move FQs from "sched" to 1175 * "parked". 1176 */ 1177 enum qman_fq_state { 1178 qman_fq_state_oos, 1179 qman_fq_state_parked, 1180 qman_fq_state_sched, 1181 qman_fq_state_retired 1182 }; 1183 1184 1185 /* 1186 * Frame queue objects (struct qman_fq) are stored within memory passed to 1187 * qman_create_fq(), as this allows stashing of caller-provided demux callback 1188 * pointers at no extra cost to stashing of (driver-internal) FQ state. If the 1189 * caller wishes to add per-FQ state and have it benefit from dequeue-stashing, 1190 * they should; 1191 * 1192 * (a) extend the qman_fq structure with their state; eg. 1193 * 1194 * // myfq is allocated and driver_fq callbacks filled in; 1195 * struct my_fq { 1196 * struct qman_fq base; 1197 * int an_extra_field; 1198 * [ ... add other fields to be associated with each FQ ...] 1199 * } *myfq = some_my_fq_allocator(); 1200 * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base); 1201 * 1202 * // in a dequeue callback, access extra fields from 'fq' via a cast; 1203 * struct my_fq *myfq = (struct my_fq *)fq; 1204 * do_something_with(myfq->an_extra_field); 1205 * [...] 1206 * 1207 * (b) when and if configuring the FQ for context stashing, specify how ever 1208 * many cachelines are required to stash 'struct my_fq', to accelerate not 1209 * only the QMan driver but the callback as well. 1210 */ 1211 1212 struct qman_fq_cb { 1213 union { /* for dequeued frames */ 1214 qman_dpdk_cb_dqrr dqrr_dpdk_cb; 1215 qman_dpdk_pull_cb_dqrr dqrr_dpdk_pull_cb; 1216 qman_cb_dqrr dqrr; 1217 }; 1218 qman_dpdk_cb_prepare dqrr_prepare; 1219 qman_cb_mr ern; /* for s/w ERNs */ 1220 qman_cb_mr fqs; /* frame-queue state changes*/ 1221 }; 1222 1223 struct qman_fq { 1224 /* Caller of qman_create_fq() provides these demux callbacks */ 1225 struct qman_fq_cb cb; 1226 1227 u32 fqid_le; 1228 u32 fqid; 1229 1230 int q_fd; 1231 u16 ch_id; 1232 int8_t vsp_id; 1233 u8 cgr_groupid; 1234 u8 is_static:4; 1235 u8 qp_initialized:4; 1236 1237 /* DPDK Interface */ 1238 void *dpaa_intf; 1239 1240 struct rte_event ev; 1241 /* affined portal in case of static queue */ 1242 struct qman_portal *qp; 1243 struct dpaa_bp_info *bp_array; 1244 1245 volatile unsigned long flags; 1246 1247 enum qman_fq_state state; 1248 spinlock_t fqlock; 1249 1250 struct rb_node node; 1251 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1252 void **qman_fq_lookup_table; 1253 u32 key; 1254 #endif 1255 u16 nb_desc; 1256 u16 resv; 1257 u64 offloads; 1258 }; 1259 1260 /* 1261 * This callback type is used when handling congestion group entry/exit. 1262 * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. 1263 */ 1264 typedef void (*qman_cb_cgr)(struct qman_portal *qm, 1265 struct qman_cgr *cgr, int congested); 1266 1267 struct qman_cgr { 1268 /* Set these prior to qman_create_cgr() */ 1269 u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/ 1270 qman_cb_cgr cb; 1271 /* These are private to the driver */ 1272 u16 chan; /* portal channel this object is created on */ 1273 struct list_head node; 1274 }; 1275 1276 /* Flags to qman_create_fq() */ 1277 #define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */ 1278 #define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */ 1279 #define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */ 1280 #define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */ 1281 #define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */ 1282 #define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */ 1283 1284 /* Flags to qman_destroy_fq() */ 1285 #define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */ 1286 1287 /* Flags from qman_fq_state() */ 1288 #define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */ 1289 #define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */ 1290 #define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */ 1291 #define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */ 1292 #define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */ 1293 #define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */ 1294 1295 /* Flags to qman_init_fq() */ 1296 #define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */ 1297 #define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */ 1298 1299 /* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware, 1300 * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so 1301 * any change here should be audited in PME.) 1302 */ 1303 #define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */ 1304 #define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */ 1305 #define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */ 1306 #define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \ 1307 (((u32)(p) << 2) & 0x00000f00) 1308 #define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */ 1309 #define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008 1310 #define QMAN_ENQUEUE_FLAG_C_RED 0x00000010 1311 #define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018 1312 /* For the ORP-specific qman_enqueue_orp() variant; 1313 * - this flag indicates "Not Last In Sequence", ie. all but the final fragment 1314 * of a frame. 1315 */ 1316 #define QMAN_ENQUEUE_FLAG_NLIS 0x01000000 1317 /* - this flag performs no enqueue but fills in an ORP sequence number that 1318 * would otherwise block it (eg. if a frame has been dropped). 1319 */ 1320 #define QMAN_ENQUEUE_FLAG_HOLE 0x02000000 1321 /* - this flag performs no enqueue but advances NESN to the given sequence 1322 * number. 1323 */ 1324 #define QMAN_ENQUEUE_FLAG_NESN 0x04000000 1325 1326 /* Flags to qman_modify_cgr() */ 1327 #define QMAN_CGR_FLAG_USE_INIT 0x00000001 1328 #define QMAN_CGR_MODE_FRAME 0x00000001 1329 1330 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1331 __rte_internal 1332 void qman_set_fq_lookup_table(void **table); 1333 #endif 1334 1335 /** 1336 * qman_get_portal_index - get portal configuration index 1337 */ 1338 int qman_get_portal_index(void); 1339 1340 __rte_internal 1341 u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit, 1342 void **bufs); 1343 1344 /** 1345 * qman_irqsource_add - add processing sources to be interrupt-driven 1346 * @bits: bitmask of QM_PIRQ_**I processing sources 1347 * 1348 * Adds processing sources that should be interrupt-driven (rather than 1349 * processed via qman_poll_***() functions). Returns zero for success, or 1350 * -EINVAL if the current CPU is sharing a portal hosted on another CPU. 1351 */ 1352 __rte_internal 1353 int qman_irqsource_add(u32 bits); 1354 1355 /** 1356 * qman_fq_portal_irqsource_add - samilar to qman_irqsource_add, but it 1357 * takes portal (fq specific) as input rather than using the thread affined 1358 * portal. 1359 */ 1360 __rte_internal 1361 int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits); 1362 1363 /** 1364 * qman_irqsource_remove - remove processing sources from being interrupt-driven 1365 * @bits: bitmask of QM_PIRQ_**I processing sources 1366 * 1367 * Removes processing sources from being interrupt-driven, so that they will 1368 * instead be processed via qman_poll_***() functions. Returns zero for success, 1369 * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. 1370 */ 1371 __rte_internal 1372 int qman_irqsource_remove(u32 bits); 1373 1374 /** 1375 * qman_fq_portal_irqsource_remove - similar to qman_irqsource_remove, but it 1376 * takes portal (fq specific) as input rather than using the thread affined 1377 * portal. 1378 */ 1379 __rte_internal 1380 int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits); 1381 1382 /** 1383 * qman_affine_channel - return the channel ID of an portal 1384 * @cpu: the cpu whose affine portal is the subject of the query 1385 * 1386 * If @cpu is -1, the affine portal for the current CPU will be used. It is a 1387 * bug to call this function for any value of @cpu (other than -1) that is not a 1388 * member of the cpu mask. 1389 */ 1390 u16 qman_affine_channel(int cpu); 1391 1392 __rte_internal 1393 unsigned int qman_portal_poll_rx(unsigned int poll_limit, 1394 void **bufs, struct qman_portal *q); 1395 1396 /** 1397 * qman_set_vdq - Issue a volatile dequeue command 1398 * @fq: Frame Queue on which the volatile dequeue command is issued 1399 * @num: Number of Frames requested for volatile dequeue 1400 * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command 1401 * 1402 * This function will issue a volatile dequeue command to the QMAN. 1403 */ 1404 __rte_internal 1405 int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags); 1406 1407 /** 1408 * qman_dequeue - Get the DQRR entry after volatile dequeue command 1409 * @fq: Frame Queue on which the volatile dequeue command is issued 1410 * 1411 * This function will return the DQRR entry after a volatile dequeue command 1412 * is issued. It will keep returning NULL until there is no packet available on 1413 * the DQRR. 1414 */ 1415 __rte_internal 1416 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq); 1417 1418 /** 1419 * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue 1420 * @fq: Frame Queue on which the volatile dequeue command is issued 1421 * @dq: DQRR entry to consume. This is the one which is provided by the 1422 * 'qbman_dequeue' command. 1423 * 1424 * This will consume the DQRR enrey and make it available for next volatile 1425 * dequeue. 1426 */ 1427 __rte_internal 1428 void qman_dqrr_consume(struct qman_fq *fq, 1429 struct qm_dqrr_entry *dq); 1430 1431 /** 1432 * qman_poll_dqrr - process DQRR (fast-path) entries 1433 * @limit: the maximum number of DQRR entries to process 1434 * 1435 * Use of this function requires that DQRR processing not be interrupt-driven. 1436 * Ie. the value returned by qman_irqsource_get() should not include 1437 * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU, 1438 * this function will return -EINVAL, otherwise the return value is >=0 and 1439 * represents the number of DQRR entries processed. 1440 */ 1441 __rte_internal 1442 int qman_poll_dqrr(unsigned int limit); 1443 1444 /** 1445 * qman_poll 1446 * 1447 * Dispatcher logic on a cpu can use this to trigger any maintenance of the 1448 * affine portal. There are two classes of portal processing in question; 1449 * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking 1450 * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR 1451 * thresholds, congestion state changes, etc). This function does whatever 1452 * processing is not triggered by interrupts. 1453 * 1454 * Note, if DQRR and some slow-path processing are poll-driven (rather than 1455 * interrupt-driven) then this function uses a heuristic to determine how often 1456 * to run slow-path processing - as slow-path processing introduces at least a 1457 * minimum latency each time it is run, whereas fast-path (DQRR) processing is 1458 * close to zero-cost if there is no work to be done. 1459 */ 1460 void qman_poll(void); 1461 1462 /** 1463 * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal 1464 * 1465 * Disables DQRR processing of the portal. This is reference-counted, so 1466 * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to 1467 * truly re-enable dequeuing. 1468 */ 1469 void qman_stop_dequeues(void); 1470 1471 /** 1472 * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal 1473 * 1474 * Enables DQRR processing of the portal. This is reference-counted, so 1475 * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to 1476 * truly re-enable dequeuing. 1477 */ 1478 void qman_start_dequeues(void); 1479 1480 /** 1481 * qman_static_dequeue_add - Add pool channels to the portal SDQCR 1482 * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n) 1483 * 1484 * Adds a set of pool channels to the portal's static dequeue command register 1485 * (SDQCR). The requested pools are limited to those the portal has dequeue 1486 * access to. 1487 */ 1488 __rte_internal 1489 void qman_static_dequeue_add(u32 pools, struct qman_portal *qm); 1490 1491 /** 1492 * qman_static_dequeue_del - Remove pool channels from the portal SDQCR 1493 * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n) 1494 * 1495 * Removes a set of pool channels from the portal's static dequeue command 1496 * register (SDQCR). The requested pools are limited to those the portal has 1497 * dequeue access to. 1498 */ 1499 void qman_static_dequeue_del(u32 pools, struct qman_portal *qp); 1500 1501 /** 1502 * qman_static_dequeue_get - return the portal's current SDQCR 1503 * 1504 * Returns the portal's current static dequeue command register (SDQCR). The 1505 * entire register is returned, so if only the currently-enabled pool channels 1506 * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK. 1507 */ 1508 u32 qman_static_dequeue_get(struct qman_portal *qp); 1509 1510 /** 1511 * qman_dca - Perform a Discrete Consumption Acknowledgment 1512 * @dq: the DQRR entry to be consumed 1513 * @park_request: indicates whether the held-active @fq should be parked 1514 * 1515 * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had 1516 * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this 1517 * does not take a 'portal' argument but implies the core affine portal from the 1518 * cpu that is currently executing the function. For reasons of locking, this 1519 * function must be called from the same CPU as that which processed the DQRR 1520 * entry in the first place. 1521 */ 1522 void qman_dca(const struct qm_dqrr_entry *dq, int park_request); 1523 1524 /** 1525 * qman_dca_index - Perform a Discrete Consumption Acknowledgment 1526 * @index: the DQRR index to be consumed 1527 * @park_request: indicates whether the held-active @fq should be parked 1528 * 1529 * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had 1530 * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this 1531 * does not take a 'portal' argument but implies the core affine portal from the 1532 * cpu that is currently executing the function. For reasons of locking, this 1533 * function must be called from the same CPU as that which processed the DQRR 1534 * entry in the first place. 1535 */ 1536 __rte_internal 1537 void qman_dca_index(u8 index, int park_request); 1538 1539 /** 1540 * qman_eqcr_is_empty - Determine if portal's EQCR is empty 1541 * 1542 * For use in situations where a cpu-affine caller needs to determine when all 1543 * enqueues for the local portal have been processed by Qman but can't use the 1544 * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue(). 1545 * The function forces tracking of EQCR consumption (which normally doesn't 1546 * happen until enqueue processing needs to find space to put new enqueue 1547 * commands), and returns zero if the ring still has unprocessed entries, 1548 * non-zero if it is empty. 1549 */ 1550 int qman_eqcr_is_empty(void); 1551 1552 /** 1553 * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications 1554 * @handler: callback for processing DCP ERNs 1555 * @affine: whether this handler is specific to the locally affine portal 1556 * 1557 * If a hardware block's interface to Qman (ie. its direct-connect portal, or 1558 * DCP) is configured not to receive enqueue rejections, then any enqueues 1559 * through that DCP that are rejected will be sent to a given software portal. 1560 * If @affine is non-zero, then this handler will only be used for DCP ERNs 1561 * received on the portal affine to the current CPU. If multiple CPUs share a 1562 * portal and they all call this function, they will be setting the handler for 1563 * the same portal! If @affine is zero, then this handler will be global to all 1564 * portals handled by this instance of the driver. Only those portals that do 1565 * not have their own affine handler will use the global handler. 1566 */ 1567 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine); 1568 1569 /* FQ management */ 1570 /* ------------- */ 1571 /** 1572 * qman_create_fq - Allocates a FQ 1573 * @fqid: the index of the FQD to encapsulate, must be "Out of Service" 1574 * @flags: bit-mask of QMAN_FQ_FLAG_*** options 1575 * @fq: memory for storing the 'fq', with callbacks filled in 1576 * 1577 * Creates a frame queue object for the given @fqid, unless the 1578 * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is 1579 * dynamically allocated (or the function fails if none are available). Once 1580 * created, the caller should not touch the memory at 'fq' except as extended to 1581 * adjacent memory for user-defined fields (see the definition of "struct 1582 * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to 1583 * pre-existing frame-queues that aren't to be otherwise interfered with, it 1584 * prevents all other modifications to the frame queue. The TO_DCPORTAL flag 1585 * causes the driver to honour any contextB modifications requested in the 1586 * qm_init_fq() API, as this indicates the frame queue will be consumed by a 1587 * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by 1588 * software portals, the contextB field is controlled by the driver and can't be 1589 * modified by the caller. If the AS_IS flag is specified, management commands 1590 * will be used on portal @p to query state for frame queue @fqid and construct 1591 * a frame queue object based on that, rather than assuming/requiring that it be 1592 * Out of Service. 1593 */ 1594 __rte_internal 1595 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); 1596 1597 /** 1598 * qman_destroy_fq - Deallocates a FQ 1599 * @fq: the frame queue object to release 1600 * @flags: bit-mask of QMAN_FQ_FREE_*** options 1601 * 1602 * The memory for this frame queue object ('fq' provided in qman_create_fq()) is 1603 * not deallocated but the caller regains ownership, to do with as desired. The 1604 * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag 1605 * is specified, in which case it may also be in the 'parked' state. 1606 */ 1607 void qman_destroy_fq(struct qman_fq *fq, u32 flags); 1608 1609 /** 1610 * qman_fq_fqid - Queries the frame queue ID of a FQ object 1611 * @fq: the frame queue object to query 1612 */ 1613 __rte_internal 1614 u32 qman_fq_fqid(struct qman_fq *fq); 1615 1616 /** 1617 * qman_fq_state - Queries the state of a FQ object 1618 * @fq: the frame queue object to query 1619 * @state: pointer to state enum to return the FQ scheduling state 1620 * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask 1621 * 1622 * Queries the state of the FQ object, without performing any h/w commands. 1623 * This captures the state, as seen by the driver, at the time the function 1624 * executes. 1625 */ 1626 __rte_internal 1627 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags); 1628 1629 /** 1630 * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled" 1631 * @fq: the frame queue object to modify, must be 'parked' or new. 1632 * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options 1633 * @opts: the FQ-modification settings, as defined in the low-level API 1634 * 1635 * The @opts parameter comes from the low-level portal API. Select 1636 * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled 1637 * rather than parked. NB, @opts can be NULL. 1638 * 1639 * Note that some fields and options within @opts may be ignored or overwritten 1640 * by the driver; 1641 * 1. the 'count' and 'fqid' fields are always ignored (this operation only 1642 * affects one frame queue: @fq). 1643 * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated 1644 * 'fqd' structure's 'context_b' field are sometimes overwritten; 1645 * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is 1646 * initialised to a value used by the driver for demux. 1647 * - if context_b is initialised for demux, so is context_a in case stashing 1648 * is requested (see item 4). 1649 * (So caller control of context_b is only possible for TO_DCPORTAL frame queue 1650 * objects.) 1651 * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's 1652 * 'dest::channel' field will be overwritten to match the portal used to issue 1653 * the command. If the WE_DESTWQ write-enable bit had already been set by the 1654 * caller, the channel workqueue will be left as-is, otherwise the write-enable 1655 * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag 1656 * isn't set, the destination channel/workqueue fields and the write-enable bit 1657 * are left as-is. 1658 * 4. if the driver overwrites context_a/b for demux, then if 1659 * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite 1660 * context_a.address fields and will leave the stashing fields provided by the 1661 * user alone, otherwise it will zero out the context_a.stashing fields. 1662 */ 1663 __rte_internal 1664 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts); 1665 1666 /** 1667 * qman_schedule_fq - Schedules a FQ 1668 * @fq: the frame queue object to schedule, must be 'parked' 1669 * 1670 * Schedules the frame queue, which must be Parked, which takes it to 1671 * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level. 1672 */ 1673 int qman_schedule_fq(struct qman_fq *fq); 1674 1675 /** 1676 * qman_retire_fq - Retires a FQ 1677 * @fq: the frame queue object to retire 1678 * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately 1679 * 1680 * Retires the frame queue. This returns zero if it succeeds immediately, +1 if 1681 * the retirement was started asynchronously, otherwise it returns negative for 1682 * failure. When this function returns zero, @flags is set to indicate whether 1683 * the retired FQ is empty and/or whether it has any ORL fragments (to show up 1684 * as ERNs). Otherwise the corresponding flags will be known when a subsequent 1685 * FQRN message shows up on the portal's message ring. 1686 * 1687 * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or 1688 * Active state), the completion will be via the message ring as a FQRN - but 1689 * the corresponding callback may occur before this function returns!! Ie. the 1690 * caller should be prepared to accept the callback as the function is called, 1691 * not only once it has returned. 1692 */ 1693 __rte_internal 1694 int qman_retire_fq(struct qman_fq *fq, u32 *flags); 1695 1696 /** 1697 * qman_oos_fq - Puts a FQ "out of service" 1698 * @fq: the frame queue object to be put out-of-service, must be 'retired' 1699 * 1700 * The frame queue must be retired and empty, and if any order restoration list 1701 * was released as ERNs at the time of retirement, they must all be consumed. 1702 */ 1703 __rte_internal 1704 int qman_oos_fq(struct qman_fq *fq); 1705 1706 /** 1707 * qman_fq_flow_control - Set the XON/XOFF state of a FQ 1708 * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos', 1709 * or 'retired' or 'parked' state 1710 * @xon: boolean to set fq in XON or XOFF state 1711 * 1712 * The frame should be in Tentatively Scheduled state or Truly Schedule sate, 1713 * otherwise the IFSI interrupt will be asserted. 1714 */ 1715 int qman_fq_flow_control(struct qman_fq *fq, int xon); 1716 1717 /** 1718 * qman_query_fq - Queries FQD fields (via h/w query command) 1719 * @fq: the frame queue object to be queried 1720 * @fqd: storage for the queried FQD fields 1721 */ 1722 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd); 1723 1724 /** 1725 * qman_query_fq_has_pkts - Queries non-programmable FQD fields and returns '1' 1726 * if packets are in the frame queue. If there are no packets on frame 1727 * queue '0' is returned. 1728 * @fq: the frame queue object to be queried 1729 */ 1730 int qman_query_fq_has_pkts(struct qman_fq *fq); 1731 1732 /** 1733 * qman_query_fq_np - Queries non-programmable FQD fields 1734 * @fq: the frame queue object to be queried 1735 * @np: storage for the queried FQD fields 1736 */ 1737 __rte_internal 1738 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np); 1739 1740 /** 1741 * qman_query_fq_frmcnt - Queries fq frame count 1742 * @fq: the frame queue object to be queried 1743 * @frm_cnt: number of frames in the queue 1744 */ 1745 __rte_internal 1746 int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt); 1747 1748 /** 1749 * qman_query_wq - Queries work queue lengths 1750 * @query_dedicated: If non-zero, query length of WQs in the channel dedicated 1751 * to this software portal. Otherwise, query length of WQs in a 1752 * channel specified in wq. 1753 * @wq: storage for the queried WQs lengths. Also specified the channel to 1754 * to query if query_dedicated is zero. 1755 */ 1756 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq); 1757 1758 /** 1759 * qman_volatile_dequeue - Issue a volatile dequeue command 1760 * @fq: the frame queue object to dequeue from 1761 * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options 1762 * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set() 1763 * 1764 * Attempts to lock access to the portal's VDQCR volatile dequeue functionality. 1765 * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and 1766 * the VDQCR is already in use, otherwise returns non-zero for failure. If 1767 * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once 1768 * the VDQCR command has finished executing (ie. once the callback for the last 1769 * DQRR entry resulting from the VDQCR command has been called). If not using 1770 * the FINISH flag, completion can be determined either by detecting the 1771 * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits 1772 * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue 1773 * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the 1774 * "flags" retrieved from qman_fq_state(). 1775 */ 1776 __rte_internal 1777 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); 1778 1779 /** 1780 * qman_enqueue - Enqueue a frame to a frame queue 1781 * @fq: the frame queue object to enqueue to 1782 * @fd: a descriptor of the frame to be enqueued 1783 * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options 1784 * 1785 * Fills an entry in the EQCR of portal @qm to enqueue the frame described by 1786 * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid' 1787 * field is ignored. The return value is non-zero on error, such as ring full 1788 * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR 1789 * specified), etc. If the ring is full and FLAG_WAIT is specified, this 1790 * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal 1791 * interrupt will assert when Qman consumes the EQCR entry (subject to "status 1792 * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will 1793 * perform an implied "discrete consumption acknowledgment" on the dequeue 1794 * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x) 1795 * macro. (As an alternative to issuing explicit DCA actions on DQRR entries, 1796 * this implicit DCA can delay the release of a "held active" frame queue 1797 * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing 1798 * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is 1799 * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption 1800 * acknowledgment should "park request" the "held active" frame queue. Ie. 1801 * when the portal eventually releases that frame queue, it will be left in the 1802 * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the 1803 * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag 1804 * is requested, and the FQ is a member of a congestion group, then this 1805 * function returns -EAGAIN if the congestion group is currently congested. 1806 * Note, this does not eliminate ERNs, as the async interface means we can be 1807 * sending enqueue commands to an un-congested FQ that becomes congested before 1808 * the enqueue commands are processed, but it does minimise needless thrashing 1809 * of an already busy hardware resource by throttling many of the to-be-dropped 1810 * enqueues "at the source". 1811 */ 1812 __rte_internal 1813 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags); 1814 1815 __rte_internal 1816 int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags, 1817 int frames_to_send); 1818 1819 /** 1820 * qman_ern_poll_free - Polling on MR and calling a callback function to free 1821 * mbufs when SW ERNs received. 1822 */ 1823 __rte_internal 1824 void qman_ern_poll_free(void); 1825 1826 /** 1827 * qman_ern_register_cb - Register a callback function to free buffers. 1828 */ 1829 __rte_internal 1830 void qman_ern_register_cb(qman_cb_free_mbuf cb); 1831 1832 /** 1833 * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame 1834 * queues. 1835 * @fq[]: Array of frame queue objects to enqueue to 1836 * @fd: pointer to first descriptor of frame to be enqueued 1837 * @frames_to_send: number of frames to be sent. 1838 * 1839 * This API is similar to qman_enqueue_multi(), but it takes fd which needs 1840 * to be processed by different frame queues. 1841 */ 1842 __rte_internal 1843 int 1844 qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd, 1845 u32 *flags, int frames_to_send); 1846 1847 typedef int (*qman_cb_precommit) (void *arg); 1848 1849 /** 1850 * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP 1851 * @fq: the frame queue object to enqueue to 1852 * @fd: a descriptor of the frame to be enqueued 1853 * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options 1854 * @orp: the frame queue object used as an order restoration point. 1855 * @orp_seqnum: the sequence number of this frame in the order restoration path 1856 * 1857 * Similar to qman_enqueue(), but with the addition of an Order Restoration 1858 * Point (@orp) and corresponding sequence number (@orp_seqnum) for this 1859 * enqueue operation to employ order restoration. Each frame queue object acts 1860 * as an Order Definition Point (ODP) by providing each frame dequeued from it 1861 * with an incrementing sequence number, this value is generally ignored unless 1862 * that sequence of dequeued frames will need order restoration later. Each 1863 * frame queue object also encapsulates an Order Restoration Point (ORP), which 1864 * is a re-assembly context for re-ordering frames relative to their sequence 1865 * numbers as they are enqueued. The ORP does not have to be within the frame 1866 * queue that receives the enqueued frame, in fact it is usually the frame 1867 * queue from which the frames were originally dequeued. For the purposes of 1868 * order restoration, multiple frames (or "fragments") can be enqueued for a 1869 * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all 1870 * enqueues except the final fragment of a given sequence number. Ordering 1871 * between sequence numbers is guaranteed, even if fragments of different 1872 * sequence numbers are interlaced with one another. Fragments of the same 1873 * sequence number will retain the order in which they are enqueued. If no 1874 * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given 1875 * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been 1876 * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given 1877 * sequence number should become the ORP's "Next Expected Sequence Number". 1878 * 1879 * Side note: a frame queue object can be used purely as an ORP, without 1880 * carrying any frames at all. Care should be taken not to deallocate a frame 1881 * queue object that is being actively used as an ORP, as a future allocation 1882 * of the frame queue object may start using the internal ORP before the 1883 * previous use has finished. 1884 */ 1885 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags, 1886 struct qman_fq *orp, u16 orp_seqnum); 1887 1888 /** 1889 * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs 1890 * @result: is set by the API to the base FQID of the allocated range 1891 * @count: the number of FQIDs required 1892 * @align: required alignment of the allocated range 1893 * @partial: non-zero if the API can return fewer than @count FQIDs 1894 * 1895 * Returns the number of frame queues allocated, or a negative error code. If 1896 * @partial is non zero, the allocation request may return a smaller range of 1897 * FQs than requested (though alignment will be as requested). If @partial is 1898 * zero, the return value will either be 'count' or negative. 1899 */ 1900 __rte_internal 1901 int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial); 1902 static inline int qman_alloc_fqid(u32 *result) 1903 { 1904 int ret = qman_alloc_fqid_range(result, 1, 0, 0); 1905 1906 return (ret > 0) ? 0 : ret; 1907 } 1908 1909 /** 1910 * qman_release_fqid_range - Release the specified range of frame queue IDs 1911 * @fqid: the base FQID of the range to deallocate 1912 * @count: the number of FQIDs in the range 1913 * 1914 * This function can also be used to seed the allocator with ranges of FQIDs 1915 * that it can subsequently allocate from. 1916 */ 1917 void qman_release_fqid_range(u32 fqid, unsigned int count); 1918 static inline void qman_release_fqid(u32 fqid) 1919 { 1920 qman_release_fqid_range(fqid, 1); 1921 } 1922 1923 void qman_seed_fqid_range(u32 fqid, unsigned int count); 1924 1925 int qman_shutdown_fq(u32 fqid); 1926 1927 /** 1928 * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs 1929 * @fqid: the base FQID of the range to deallocate 1930 * @count: the number of FQIDs in the range 1931 */ 1932 __rte_internal 1933 int qman_reserve_fqid_range(u32 fqid, unsigned int count); 1934 static inline int qman_reserve_fqid(u32 fqid) 1935 { 1936 return qman_reserve_fqid_range(fqid, 1); 1937 } 1938 1939 /* Pool-channel management */ 1940 /** 1941 * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs 1942 * @result: is set by the API to the base pool-channel ID of the allocated range 1943 * @count: the number of pool-channel IDs required 1944 * @align: required alignment of the allocated range 1945 * @partial: non-zero if the API can return fewer than @count 1946 * 1947 * Returns the number of pool-channel IDs allocated, or a negative error code. 1948 * If @partial is non zero, the allocation request may return a smaller range of 1949 * than requested (though alignment will be as requested). If @partial is zero, 1950 * the return value will either be 'count' or negative. 1951 */ 1952 __rte_internal 1953 int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial); 1954 static inline int qman_alloc_pool(u32 *result) 1955 { 1956 int ret = qman_alloc_pool_range(result, 1, 0, 0); 1957 1958 return (ret > 0) ? 0 : ret; 1959 } 1960 1961 /** 1962 * qman_release_pool_range - Release the specified range of pool-channel IDs 1963 * @id: the base pool-channel ID of the range to deallocate 1964 * @count: the number of pool-channel IDs in the range 1965 */ 1966 void qman_release_pool_range(u32 id, unsigned int count); 1967 static inline void qman_release_pool(u32 id) 1968 { 1969 qman_release_pool_range(id, 1); 1970 } 1971 1972 /** 1973 * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs 1974 * @id: the base pool-channel ID of the range to reserve 1975 * @count: the number of pool-channel IDs in the range 1976 */ 1977 int qman_reserve_pool_range(u32 id, unsigned int count); 1978 static inline int qman_reserve_pool(u32 id) 1979 { 1980 return qman_reserve_pool_range(id, 1); 1981 } 1982 1983 void qman_seed_pool_range(u32 id, unsigned int count); 1984 1985 /* CGR management */ 1986 /* -------------- */ 1987 /** 1988 * qman_create_cgr - Register a congestion group object 1989 * @cgr: the 'cgr' object, with fields filled in 1990 * @flags: QMAN_CGR_FLAG_* values 1991 * @opts: optional state of CGR settings 1992 * 1993 * Registers this object to receiving congestion entry/exit callbacks on the 1994 * portal affine to the cpu portal on which this API is executed. If opts is 1995 * NULL then only the callback (cgr->cb) function is registered. If @flags 1996 * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset 1997 * any unspecified parameters) will be used rather than a modify hw hardware 1998 * (which only modifies the specified parameters). 1999 */ 2000 __rte_internal 2001 int qman_create_cgr(struct qman_cgr *cgr, u32 flags, 2002 struct qm_mcc_initcgr *opts); 2003 2004 /** 2005 * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal 2006 * @cgr: the 'cgr' object, with fields filled in 2007 * @flags: QMAN_CGR_FLAG_* values 2008 * @dcp_portal: the DCP portal to which the cgr object is registered. 2009 * @opts: optional state of CGR settings 2010 * 2011 */ 2012 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal, 2013 struct qm_mcc_initcgr *opts); 2014 2015 /** 2016 * qman_delete_cgr - Deregisters a congestion group object 2017 * @cgr: the 'cgr' object to deregister 2018 * 2019 * "Unplugs" this CGR object from the portal affine to the cpu on which this API 2020 * is executed. This must be excuted on the same affine portal on which it was 2021 * created. 2022 */ 2023 __rte_internal 2024 int qman_delete_cgr(struct qman_cgr *cgr); 2025 2026 /** 2027 * qman_modify_cgr - Modify CGR fields 2028 * @cgr: the 'cgr' object to modify 2029 * @flags: QMAN_CGR_FLAG_* values 2030 * @opts: the CGR-modification settings 2031 * 2032 * The @opts parameter comes from the low-level portal API, and can be NULL. 2033 * Note that some fields and options within @opts may be ignored or overwritten 2034 * by the driver, in particular the 'cgrid' field is ignored (this operation 2035 * only affects the given CGR object). If @flags contains 2036 * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any 2037 * unspecified parameters) will be used rather than a modify hw hardware (which 2038 * only modifies the specified parameters). 2039 */ 2040 __rte_internal 2041 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags, 2042 struct qm_mcc_initcgr *opts); 2043 2044 /** 2045 * qman_query_cgr - Queries CGR fields 2046 * @cgr: the 'cgr' object to query 2047 * @result: storage for the queried congestion group record 2048 */ 2049 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result); 2050 2051 /** 2052 * qman_query_congestion - Queries the state of all congestion groups 2053 * @congestion: storage for the queried state of all congestion groups 2054 */ 2055 int qman_query_congestion(struct qm_mcr_querycongestion *congestion); 2056 2057 /** 2058 * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs 2059 * @result: is set by the API to the base CGR ID of the allocated range 2060 * @count: the number of CGR IDs required 2061 * @align: required alignment of the allocated range 2062 * @partial: non-zero if the API can return fewer than @count 2063 * 2064 * Returns the number of CGR IDs allocated, or a negative error code. 2065 * If @partial is non zero, the allocation request may return a smaller range of 2066 * than requested (though alignment will be as requested). If @partial is zero, 2067 * the return value will either be 'count' or negative. 2068 */ 2069 __rte_internal 2070 int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial); 2071 static inline int qman_alloc_cgrid(u32 *result) 2072 { 2073 int ret = qman_alloc_cgrid_range(result, 1, 0, 0); 2074 2075 return (ret > 0) ? 0 : ret; 2076 } 2077 2078 /** 2079 * qman_release_cgrid_range - Release the specified range of CGR IDs 2080 * @id: the base CGR ID of the range to deallocate 2081 * @count: the number of CGR IDs in the range 2082 */ 2083 __rte_internal 2084 void qman_release_cgrid_range(u32 id, unsigned int count); 2085 static inline void qman_release_cgrid(u32 id) 2086 { 2087 qman_release_cgrid_range(id, 1); 2088 } 2089 2090 /** 2091 * qman_reserve_cgrid_range - Reserve the specified range of CGR ID 2092 * @id: the base CGR ID of the range to reserve 2093 * @count: the number of CGR IDs in the range 2094 */ 2095 int qman_reserve_cgrid_range(u32 id, unsigned int count); 2096 static inline int qman_reserve_cgrid(u32 id) 2097 { 2098 return qman_reserve_cgrid_range(id, 1); 2099 } 2100 2101 void qman_seed_cgrid_range(u32 id, unsigned int count); 2102 2103 /* Helpers */ 2104 /* ------- */ 2105 /** 2106 * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS 2107 * @fqid: the FQID that will be initialised by other s/w 2108 * 2109 * In many situations, a FQID is provided for communication between s/w 2110 * entities, and whilst the consumer is responsible for initialising and 2111 * scheduling the FQ, the producer(s) generally create a wrapper FQ object using 2112 * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie; 2113 * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...); 2114 * However, data can not be enqueued to the FQ until it is initialised out of 2115 * the OOS state - this function polls for that condition. It is particularly 2116 * useful for users of IPC functions - each endpoint's Rx FQ is the other 2117 * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object 2118 * and then use this API on the (NO_MODIFY) Tx FQ object in order to 2119 * synchronise. The function returns zero for success, +1 if the FQ is still in 2120 * the OOS state, or negative if there was an error. 2121 */ 2122 static inline int qman_poll_fq_for_init(struct qman_fq *fq) 2123 { 2124 struct qm_mcr_queryfq_np np; 2125 int err; 2126 2127 err = qman_query_fq_np(fq, &np); 2128 if (err) 2129 return err; 2130 if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS) 2131 return 1; 2132 return 0; 2133 } 2134 2135 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 2136 #define cpu_to_hw_sg(x) 2137 #define hw_sg_to_cpu(x) 2138 #else 2139 #define cpu_to_hw_sg(x) __cpu_to_hw_sg(x) 2140 #define hw_sg_to_cpu(x) __hw_sg_to_cpu(x) 2141 2142 static inline void __cpu_to_hw_sg(struct qm_sg_entry *sgentry) 2143 { 2144 sgentry->opaque = cpu_to_be64(sgentry->opaque); 2145 sgentry->val = cpu_to_be32(sgentry->val); 2146 sgentry->val_off = cpu_to_be16(sgentry->val_off); 2147 } 2148 2149 static inline void __hw_sg_to_cpu(struct qm_sg_entry *sgentry) 2150 { 2151 sgentry->opaque = be64_to_cpu(sgentry->opaque); 2152 sgentry->val = be32_to_cpu(sgentry->val); 2153 sgentry->val_off = be16_to_cpu(sgentry->val_off); 2154 } 2155 #endif 2156 2157 #ifdef __cplusplus 2158 } 2159 #endif 2160 2161 #endif /* __FSL_QMAN_H */ 2162