1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2 * 3 * Copyright 2008-2016 Freescale Semiconductor Inc. 4 * Copyright 2017 NXP 5 * 6 */ 7 8 #include "qman.h" 9 #include <rte_branch_prediction.h> 10 #include <rte_dpaa_bus.h> 11 #include <rte_eventdev.h> 12 #include <rte_byteorder.h> 13 14 /* Compilation constants */ 15 #define DQRR_MAXFILL 15 16 #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ 17 #define IRQNAME "QMan portal %d" 18 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ 19 /* maximum number of DQRR entries to process in qman_poll() */ 20 #define FSL_QMAN_POLL_LIMIT 8 21 22 /* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about 23 * inter-processor locking only. Note, FQLOCK() is always called either under a 24 * local_irq_save() or from interrupt context - hence there's no need for irq 25 * protection (and indeed, attempting to nest irq-protection doesn't work, as 26 * the "irq en/disable" machinery isn't recursive...). 27 */ 28 #define FQLOCK(fq) \ 29 do { \ 30 struct qman_fq *__fq478 = (fq); \ 31 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \ 32 spin_lock(&__fq478->fqlock); \ 33 } while (0) 34 #define FQUNLOCK(fq) \ 35 do { \ 36 struct qman_fq *__fq478 = (fq); \ 37 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \ 38 spin_unlock(&__fq478->fqlock); \ 39 } while (0) 40 41 static inline void fq_set(struct qman_fq *fq, u32 mask) 42 { 43 dpaa_set_bits(mask, &fq->flags); 44 } 45 46 static inline void fq_clear(struct qman_fq *fq, u32 mask) 47 { 48 dpaa_clear_bits(mask, &fq->flags); 49 } 50 51 static inline int fq_isset(struct qman_fq *fq, u32 mask) 52 { 53 return fq->flags & mask; 54 } 55 56 static inline int fq_isclear(struct qman_fq *fq, u32 mask) 57 { 58 return !(fq->flags & mask); 59 } 60 61 struct qman_portal { 62 struct qm_portal p; 63 /* PORTAL_BITS_*** - dynamic, strictly internal */ 64 unsigned long bits; 65 /* interrupt sources processed by portal_isr(), configurable */ 66 unsigned long irq_sources; 67 u32 use_eqcr_ci_stashing; 68 u32 slowpoll; /* only used when interrupts are off */ 69 /* only 1 volatile dequeue at a time */ 70 struct qman_fq *vdqcr_owned; 71 u32 sdqcr; 72 int dqrr_disable_ref; 73 /* A portal-specific handler for DCP ERNs. If this is NULL, the global 74 * handler is called instead. 75 */ 76 qman_cb_dc_ern cb_dc_ern; 77 /* When the cpu-affine portal is activated, this is non-NULL */ 78 const struct qm_portal_config *config; 79 struct dpa_rbtree retire_table; 80 char irqname[MAX_IRQNAME]; 81 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ 82 struct qman_cgrs *cgrs; 83 /* linked-list of CSCN handlers. */ 84 struct list_head cgr_cbs; 85 /* list lock */ 86 spinlock_t cgr_lock; 87 /* track if memory was allocated by the driver */ 88 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 89 /* Keep a shadow copy of the DQRR on LE systems as the SW needs to 90 * do byte swaps of DQRR read only memory. First entry must be aligned 91 * to 2 ** 10 to ensure DQRR index calculations based shadow copy 92 * address (6 bits for address shift + 4 bits for the DQRR size). 93 */ 94 struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE] 95 __attribute__((aligned(1024))); 96 #endif 97 }; 98 99 /* Global handler for DCP ERNs. Used when the portal receiving the message does 100 * not have a portal-specific handler. 101 */ 102 static qman_cb_dc_ern cb_dc_ern; 103 104 static cpumask_t affine_mask; 105 static DEFINE_SPINLOCK(affine_mask_lock); 106 static u16 affine_channels[NR_CPUS]; 107 static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal); 108 109 static inline struct qman_portal *get_affine_portal(void) 110 { 111 return &RTE_PER_LCORE(qman_affine_portal); 112 } 113 114 /* This gives a FQID->FQ lookup to cover the fact that we can't directly demux 115 * retirement notifications (the fact they are sometimes h/w-consumed means that 116 * contextB isn't always a s/w demux - and as we can't know which case it is 117 * when looking at the notification, we have to use the slow lookup for all of 118 * them). NB, it's possible to have multiple FQ objects refer to the same FQID 119 * (though at most one of them should be the consumer), so this table isn't for 120 * all FQs - FQs are added when retirement commands are issued, and removed when 121 * they complete, which also massively reduces the size of this table. 122 */ 123 IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid); 124 /* 125 * This is what everything can wait on, even if it migrates to a different cpu 126 * to the one whose affine portal it is waiting on. 127 */ 128 static DECLARE_WAIT_QUEUE_HEAD(affine_queue); 129 130 static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq) 131 { 132 int ret = fqtree_push(&p->retire_table, fq); 133 134 if (ret) 135 pr_err("ERROR: double FQ-retirement %d\n", fq->fqid); 136 return ret; 137 } 138 139 static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq) 140 { 141 fqtree_del(&p->retire_table, fq); 142 } 143 144 static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid) 145 { 146 return fqtree_find(&p->retire_table, fqid); 147 } 148 149 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 150 static void **qman_fq_lookup_table; 151 static size_t qman_fq_lookup_table_size; 152 153 int qman_setup_fq_lookup_table(size_t num_entries) 154 { 155 num_entries++; 156 /* Allocate 1 more entry since the first entry is not used */ 157 qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *))); 158 if (!qman_fq_lookup_table) { 159 pr_err("QMan: Could not allocate fq lookup table\n"); 160 return -ENOMEM; 161 } 162 memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *)); 163 qman_fq_lookup_table_size = num_entries; 164 pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n", 165 qman_fq_lookup_table, 166 (unsigned long)qman_fq_lookup_table_size); 167 return 0; 168 } 169 170 /* global structure that maintains fq object mapping */ 171 static DEFINE_SPINLOCK(fq_hash_table_lock); 172 173 static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq) 174 { 175 u32 i; 176 177 spin_lock(&fq_hash_table_lock); 178 /* Can't use index zero because this has special meaning 179 * in context_b field. 180 */ 181 for (i = 1; i < qman_fq_lookup_table_size; i++) { 182 if (qman_fq_lookup_table[i] == NULL) { 183 *entry = i; 184 qman_fq_lookup_table[i] = fq; 185 spin_unlock(&fq_hash_table_lock); 186 return 0; 187 } 188 } 189 spin_unlock(&fq_hash_table_lock); 190 return -ENOMEM; 191 } 192 193 static void clear_fq_table_entry(u32 entry) 194 { 195 spin_lock(&fq_hash_table_lock); 196 DPAA_BUG_ON(entry >= qman_fq_lookup_table_size); 197 qman_fq_lookup_table[entry] = NULL; 198 spin_unlock(&fq_hash_table_lock); 199 } 200 201 static inline struct qman_fq *get_fq_table_entry(u32 entry) 202 { 203 DPAA_BUG_ON(entry >= qman_fq_lookup_table_size); 204 return qman_fq_lookup_table[entry]; 205 } 206 #endif 207 208 static inline void cpu_to_hw_fqd(struct qm_fqd *fqd) 209 { 210 /* Byteswap the FQD to HW format */ 211 fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl); 212 fqd->dest_wq = cpu_to_be16(fqd->dest_wq); 213 fqd->ics_cred = cpu_to_be16(fqd->ics_cred); 214 fqd->context_b = cpu_to_be32(fqd->context_b); 215 fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque); 216 fqd->opaque_td = cpu_to_be16(fqd->opaque_td); 217 } 218 219 static inline void hw_fqd_to_cpu(struct qm_fqd *fqd) 220 { 221 /* Byteswap the FQD to CPU format */ 222 fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl); 223 fqd->dest_wq = be16_to_cpu(fqd->dest_wq); 224 fqd->ics_cred = be16_to_cpu(fqd->ics_cred); 225 fqd->context_b = be32_to_cpu(fqd->context_b); 226 fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque); 227 } 228 229 static inline void cpu_to_hw_fd(struct qm_fd *fd) 230 { 231 fd->addr = cpu_to_be40(fd->addr); 232 fd->status = cpu_to_be32(fd->status); 233 fd->opaque = cpu_to_be32(fd->opaque); 234 } 235 236 static inline void hw_fd_to_cpu(struct qm_fd *fd) 237 { 238 fd->addr = be40_to_cpu(fd->addr); 239 fd->status = be32_to_cpu(fd->status); 240 fd->opaque = be32_to_cpu(fd->opaque); 241 } 242 243 /* In the case that slow- and fast-path handling are both done by qman_poll() 244 * (ie. because there is no interrupt handling), we ought to balance how often 245 * we do the fast-path poll versus the slow-path poll. We'll use two decrementer 246 * sources, so we call the fast poll 'n' times before calling the slow poll 247 * once. The idle decrementer constant is used when the last slow-poll detected 248 * no work to do, and the busy decrementer constant when the last slow-poll had 249 * work to do. 250 */ 251 #define SLOW_POLL_IDLE 1000 252 #define SLOW_POLL_BUSY 10 253 static u32 __poll_portal_slow(struct qman_portal *p, u32 is); 254 static inline unsigned int __poll_portal_fast(struct qman_portal *p, 255 unsigned int poll_limit); 256 257 /* Portal interrupt handler */ 258 static irqreturn_t portal_isr(__always_unused int irq, void *ptr) 259 { 260 struct qman_portal *p = ptr; 261 /* 262 * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because 263 * it could race against a Query Congestion State command also given 264 * as part of the handling of this interrupt source. We mustn't 265 * clear it a second time in this top-level function. 266 */ 267 u32 clear = QM_DQAVAIL_MASK | (p->irq_sources & 268 ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI)); 269 u32 is = qm_isr_status_read(&p->p) & p->irq_sources; 270 /* DQRR-handling if it's interrupt-driven */ 271 if (is & QM_PIRQ_DQRI) 272 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT); 273 /* Handling of anything else that's interrupt-driven */ 274 clear |= __poll_portal_slow(p, is); 275 qm_isr_status_clear(&p->p, clear); 276 return IRQ_HANDLED; 277 } 278 279 /* This inner version is used privately by qman_create_affine_portal(), as well 280 * as by the exported qman_stop_dequeues(). 281 */ 282 static inline void qman_stop_dequeues_ex(struct qman_portal *p) 283 { 284 if (!(p->dqrr_disable_ref++)) 285 qm_dqrr_set_maxfill(&p->p, 0); 286 } 287 288 static int drain_mr_fqrni(struct qm_portal *p) 289 { 290 const struct qm_mr_entry *msg; 291 loop: 292 msg = qm_mr_current(p); 293 if (!msg) { 294 /* 295 * if MR was full and h/w had other FQRNI entries to produce, we 296 * need to allow it time to produce those entries once the 297 * existing entries are consumed. A worst-case situation 298 * (fully-loaded system) means h/w sequencers may have to do 3-4 299 * other things before servicing the portal's MR pump, each of 300 * which (if slow) may take ~50 qman cycles (which is ~200 301 * processor cycles). So rounding up and then multiplying this 302 * worst-case estimate by a factor of 10, just to be 303 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume 304 * one entry at a time, so h/w has an opportunity to produce new 305 * entries well before the ring has been fully consumed, so 306 * we're being *really* paranoid here. 307 */ 308 u64 now, then = mfatb(); 309 310 do { 311 now = mfatb(); 312 } while ((then + 10000) > now); 313 msg = qm_mr_current(p); 314 if (!msg) 315 return 0; 316 } 317 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { 318 /* We aren't draining anything but FQRNIs */ 319 pr_err("Found verb 0x%x in MR\n", msg->verb); 320 return -1; 321 } 322 qm_mr_next(p); 323 qm_mr_cci_consume(p, 1); 324 goto loop; 325 } 326 327 static inline int qm_eqcr_init(struct qm_portal *portal, 328 enum qm_eqcr_pmode pmode, 329 unsigned int eq_stash_thresh, 330 int eq_stash_prio) 331 { 332 /* This use of 'register', as well as all other occurrences, is because 333 * it has been observed to generate much faster code with gcc than is 334 * otherwise the case. 335 */ 336 register struct qm_eqcr *eqcr = &portal->eqcr; 337 u32 cfg; 338 u8 pi; 339 340 eqcr->ring = portal->addr.ce + QM_CL_EQCR; 341 eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); 342 qm_cl_invalidate(EQCR_CI); 343 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); 344 eqcr->cursor = eqcr->ring + pi; 345 eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ? 346 QM_EQCR_VERB_VBIT : 0; 347 eqcr->available = QM_EQCR_SIZE - 1 - 348 qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); 349 eqcr->ithresh = qm_in(EQCR_ITR); 350 #ifdef RTE_LIBRTE_DPAA_HWDEBUG 351 eqcr->busy = 0; 352 eqcr->pmode = pmode; 353 #endif 354 cfg = (qm_in(CFG) & 0x00ffffff) | 355 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ 356 (eq_stash_prio << 26) | /* QCSP_CFG: EP */ 357 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ 358 qm_out(CFG, cfg); 359 return 0; 360 } 361 362 static inline void qm_eqcr_finish(struct qm_portal *portal) 363 { 364 register struct qm_eqcr *eqcr = &portal->eqcr; 365 u8 pi, ci; 366 u32 cfg; 367 368 /* 369 * Disable EQCI stashing because the QMan only 370 * presents the value it previously stashed to 371 * maintain coherency. Setting the stash threshold 372 * to 1 then 0 ensures that QMan has resyncronized 373 * its internal copy so that the portal is clean 374 * when it is reinitialized in the future 375 */ 376 cfg = (qm_in(CFG) & 0x0fffffff) | 377 (1 << 28); /* QCSP_CFG: EST */ 378 qm_out(CFG, cfg); 379 cfg &= 0x0fffffff; /* stash threshold = 0 */ 380 qm_out(CFG, cfg); 381 382 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); 383 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); 384 385 /* Refresh EQCR CI cache value */ 386 qm_cl_invalidate(EQCR_CI); 387 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); 388 389 #ifdef RTE_LIBRTE_DPAA_HWDEBUG 390 DPAA_ASSERT(!eqcr->busy); 391 #endif 392 if (pi != EQCR_PTR2IDX(eqcr->cursor)) 393 pr_crit("losing uncommitted EQCR entries\n"); 394 if (ci != eqcr->ci) 395 pr_crit("missing existing EQCR completions\n"); 396 if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor)) 397 pr_crit("EQCR destroyed unquiesced\n"); 398 } 399 400 static inline int qm_dqrr_init(struct qm_portal *portal, 401 __maybe_unused const struct qm_portal_config *config, 402 enum qm_dqrr_dmode dmode, 403 __maybe_unused enum qm_dqrr_pmode pmode, 404 enum qm_dqrr_cmode cmode, u8 max_fill) 405 { 406 register struct qm_dqrr *dqrr = &portal->dqrr; 407 u32 cfg; 408 409 /* Make sure the DQRR will be idle when we enable */ 410 qm_out(DQRR_SDQCR, 0); 411 qm_out(DQRR_VDQCR, 0); 412 qm_out(DQRR_PDQCR, 0); 413 dqrr->ring = portal->addr.ce + QM_CL_DQRR; 414 dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); 415 dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); 416 dqrr->cursor = dqrr->ring + dqrr->ci; 417 dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); 418 dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ? 419 QM_DQRR_VERB_VBIT : 0; 420 dqrr->ithresh = qm_in(DQRR_ITR); 421 #ifdef RTE_LIBRTE_DPAA_HWDEBUG 422 dqrr->dmode = dmode; 423 dqrr->pmode = pmode; 424 dqrr->cmode = cmode; 425 #endif 426 /* Invalidate every ring entry before beginning */ 427 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) 428 dccivac(qm_cl(dqrr->ring, cfg)); 429 cfg = (qm_in(CFG) & 0xff000f00) | 430 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ 431 ((dmode & 1) << 18) | /* DP */ 432 ((cmode & 3) << 16) | /* DCM */ 433 0xa0 | /* RE+SE */ 434 (0 ? 0x40 : 0) | /* Ignore RP */ 435 (0 ? 0x10 : 0); /* Ignore SP */ 436 qm_out(CFG, cfg); 437 qm_dqrr_set_maxfill(portal, max_fill); 438 return 0; 439 } 440 441 static inline void qm_dqrr_finish(struct qm_portal *portal) 442 { 443 __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; 444 #ifdef RTE_LIBRTE_DPAA_HWDEBUG 445 if ((dqrr->cmode != qm_dqrr_cdc) && 446 (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor))) 447 pr_crit("Ignoring completed DQRR entries\n"); 448 #endif 449 } 450 451 static inline int qm_mr_init(struct qm_portal *portal, 452 __maybe_unused enum qm_mr_pmode pmode, 453 enum qm_mr_cmode cmode) 454 { 455 register struct qm_mr *mr = &portal->mr; 456 u32 cfg; 457 458 mr->ring = portal->addr.ce + QM_CL_MR; 459 mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1); 460 mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1); 461 mr->cursor = mr->ring + mr->ci; 462 mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); 463 mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0; 464 mr->ithresh = qm_in(MR_ITR); 465 #ifdef RTE_LIBRTE_DPAA_HWDEBUG 466 mr->pmode = pmode; 467 mr->cmode = cmode; 468 #endif 469 cfg = (qm_in(CFG) & 0xfffff0ff) | 470 ((cmode & 1) << 8); /* QCSP_CFG:MM */ 471 qm_out(CFG, cfg); 472 return 0; 473 } 474 475 static inline void qm_mr_pvb_update(struct qm_portal *portal) 476 { 477 register struct qm_mr *mr = &portal->mr; 478 const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi); 479 480 #ifdef RTE_LIBRTE_DPAA_HWDEBUG 481 DPAA_ASSERT(mr->pmode == qm_mr_pvb); 482 #endif 483 /* when accessing 'verb', use __raw_readb() to ensure that compiler 484 * inlining doesn't try to optimise out "excess reads". 485 */ 486 if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { 487 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); 488 if (!mr->pi) 489 mr->vbit ^= QM_MR_VERB_VBIT; 490 mr->fill++; 491 res = MR_INC(res); 492 } 493 dcbit_ro(res); 494 } 495 496 static inline 497 struct qman_portal *qman_create_portal( 498 struct qman_portal *portal, 499 const struct qm_portal_config *c, 500 const struct qman_cgrs *cgrs) 501 { 502 struct qm_portal *p; 503 char buf[16]; 504 int ret; 505 u32 isdr; 506 507 p = &portal->p; 508 509 if (dpaa_svr_family == SVR_LS1043A_FAMILY) 510 portal->use_eqcr_ci_stashing = 3; 511 else 512 portal->use_eqcr_ci_stashing = 513 ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); 514 515 /* 516 * prep the low-level portal struct with the mapped addresses from the 517 * config, everything that follows depends on it and "config" is more 518 * for (de)reference 519 */ 520 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; 521 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; 522 /* 523 * If CI-stashing is used, the current defaults use a threshold of 3, 524 * and stash with high-than-DQRR priority. 525 */ 526 if (qm_eqcr_init(p, qm_eqcr_pvb, 527 portal->use_eqcr_ci_stashing, 1)) { 528 pr_err("Qman EQCR initialisation failed\n"); 529 goto fail_eqcr; 530 } 531 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb, 532 qm_dqrr_cdc, DQRR_MAXFILL)) { 533 pr_err("Qman DQRR initialisation failed\n"); 534 goto fail_dqrr; 535 } 536 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) { 537 pr_err("Qman MR initialisation failed\n"); 538 goto fail_mr; 539 } 540 if (qm_mc_init(p)) { 541 pr_err("Qman MC initialisation failed\n"); 542 goto fail_mc; 543 } 544 545 /* static interrupt-gating controls */ 546 qm_dqrr_set_ithresh(p, 0); 547 qm_mr_set_ithresh(p, 0); 548 qm_isr_set_iperiod(p, 0); 549 portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); 550 if (!portal->cgrs) 551 goto fail_cgrs; 552 /* initial snapshot is no-depletion */ 553 qman_cgrs_init(&portal->cgrs[1]); 554 if (cgrs) 555 portal->cgrs[0] = *cgrs; 556 else 557 /* if the given mask is NULL, assume all CGRs can be seen */ 558 qman_cgrs_fill(&portal->cgrs[0]); 559 INIT_LIST_HEAD(&portal->cgr_cbs); 560 spin_lock_init(&portal->cgr_lock); 561 portal->bits = 0; 562 portal->slowpoll = 0; 563 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | 564 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | 565 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; 566 portal->dqrr_disable_ref = 0; 567 portal->cb_dc_ern = NULL; 568 sprintf(buf, "qportal-%d", c->channel); 569 dpa_rbtree_init(&portal->retire_table); 570 isdr = 0xffffffff; 571 qm_isr_disable_write(p, isdr); 572 portal->irq_sources = 0; 573 qm_isr_enable_write(p, portal->irq_sources); 574 qm_isr_status_clear(p, 0xffffffff); 575 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); 576 if (request_irq(c->irq, portal_isr, 0, portal->irqname, 577 portal)) { 578 pr_err("request_irq() failed\n"); 579 goto fail_irq; 580 } 581 582 /* Need EQCR to be empty before continuing */ 583 isdr &= ~QM_PIRQ_EQCI; 584 qm_isr_disable_write(p, isdr); 585 ret = qm_eqcr_get_fill(p); 586 if (ret) { 587 pr_err("Qman EQCR unclean\n"); 588 goto fail_eqcr_empty; 589 } 590 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); 591 qm_isr_disable_write(p, isdr); 592 if (qm_dqrr_current(p)) { 593 pr_err("Qman DQRR unclean\n"); 594 qm_dqrr_cdc_consume_n(p, 0xffff); 595 } 596 if (qm_mr_current(p) && drain_mr_fqrni(p)) { 597 /* special handling, drain just in case it's a few FQRNIs */ 598 if (drain_mr_fqrni(p)) 599 goto fail_dqrr_mr_empty; 600 } 601 /* Success */ 602 portal->config = c; 603 qm_isr_disable_write(p, 0); 604 qm_isr_uninhibit(p); 605 /* Write a sane SDQCR */ 606 qm_dqrr_sdqcr_set(p, portal->sdqcr); 607 return portal; 608 fail_dqrr_mr_empty: 609 fail_eqcr_empty: 610 free_irq(c->irq, portal); 611 fail_irq: 612 kfree(portal->cgrs); 613 spin_lock_destroy(&portal->cgr_lock); 614 fail_cgrs: 615 qm_mc_finish(p); 616 fail_mc: 617 qm_mr_finish(p); 618 fail_mr: 619 qm_dqrr_finish(p); 620 fail_dqrr: 621 qm_eqcr_finish(p); 622 fail_eqcr: 623 return NULL; 624 } 625 626 #define MAX_GLOBAL_PORTALS 8 627 static struct qman_portal global_portals[MAX_GLOBAL_PORTALS]; 628 static int global_portals_used[MAX_GLOBAL_PORTALS]; 629 630 static struct qman_portal * 631 qman_alloc_global_portal(void) 632 { 633 unsigned int i; 634 635 for (i = 0; i < MAX_GLOBAL_PORTALS; i++) { 636 if (global_portals_used[i] == 0) { 637 global_portals_used[i] = 1; 638 return &global_portals[i]; 639 } 640 } 641 pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS); 642 643 return NULL; 644 } 645 646 static int 647 qman_free_global_portal(struct qman_portal *portal) 648 { 649 unsigned int i; 650 651 for (i = 0; i < MAX_GLOBAL_PORTALS; i++) { 652 if (&global_portals[i] == portal) { 653 global_portals_used[i] = 0; 654 return 0; 655 } 656 } 657 return -1; 658 } 659 660 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c, 661 const struct qman_cgrs *cgrs, 662 int alloc) 663 { 664 struct qman_portal *res; 665 struct qman_portal *portal; 666 667 if (alloc) 668 portal = qman_alloc_global_portal(); 669 else 670 portal = get_affine_portal(); 671 672 /* A criteria for calling this function (from qman_driver.c) is that 673 * we're already affine to the cpu and won't schedule onto another cpu. 674 */ 675 676 res = qman_create_portal(portal, c, cgrs); 677 if (res) { 678 spin_lock(&affine_mask_lock); 679 CPU_SET(c->cpu, &affine_mask); 680 affine_channels[c->cpu] = 681 c->channel; 682 spin_unlock(&affine_mask_lock); 683 } 684 return res; 685 } 686 687 static inline 688 void qman_destroy_portal(struct qman_portal *qm) 689 { 690 const struct qm_portal_config *pcfg; 691 692 /* Stop dequeues on the portal */ 693 qm_dqrr_sdqcr_set(&qm->p, 0); 694 695 /* 696 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or 697 * something related to QM_PIRQ_EQCI, this may need fixing. 698 * Also, due to the prefetching model used for CI updates in the enqueue 699 * path, this update will only invalidate the CI cacheline *after* 700 * working on it, so we need to call this twice to ensure a full update 701 * irrespective of where the enqueue processing was at when the teardown 702 * began. 703 */ 704 qm_eqcr_cce_update(&qm->p); 705 qm_eqcr_cce_update(&qm->p); 706 pcfg = qm->config; 707 708 free_irq(pcfg->irq, qm); 709 710 kfree(qm->cgrs); 711 qm_mc_finish(&qm->p); 712 qm_mr_finish(&qm->p); 713 qm_dqrr_finish(&qm->p); 714 qm_eqcr_finish(&qm->p); 715 716 qm->config = NULL; 717 718 spin_lock_destroy(&qm->cgr_lock); 719 } 720 721 const struct qm_portal_config * 722 qman_destroy_affine_portal(struct qman_portal *qp) 723 { 724 /* We don't want to redirect if we're a slave, use "raw" */ 725 struct qman_portal *qm; 726 const struct qm_portal_config *pcfg; 727 int cpu; 728 729 if (qp == NULL) 730 qm = get_affine_portal(); 731 else 732 qm = qp; 733 pcfg = qm->config; 734 cpu = pcfg->cpu; 735 736 qman_destroy_portal(qm); 737 738 spin_lock(&affine_mask_lock); 739 CPU_CLR(cpu, &affine_mask); 740 spin_unlock(&affine_mask_lock); 741 742 qman_free_global_portal(qm); 743 744 return pcfg; 745 } 746 747 int qman_get_portal_index(void) 748 { 749 struct qman_portal *p = get_affine_portal(); 750 return p->config->index; 751 } 752 753 /* Inline helper to reduce nesting in __poll_portal_slow() */ 754 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, 755 const struct qm_mr_entry *msg, u8 verb) 756 { 757 FQLOCK(fq); 758 switch (verb) { 759 case QM_MR_VERB_FQRL: 760 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); 761 fq_clear(fq, QMAN_FQ_STATE_ORL); 762 table_del_fq(p, fq); 763 break; 764 case QM_MR_VERB_FQRN: 765 DPAA_ASSERT((fq->state == qman_fq_state_parked) || 766 (fq->state == qman_fq_state_sched)); 767 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); 768 fq_clear(fq, QMAN_FQ_STATE_CHANGING); 769 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) 770 fq_set(fq, QMAN_FQ_STATE_NE); 771 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) 772 fq_set(fq, QMAN_FQ_STATE_ORL); 773 else 774 table_del_fq(p, fq); 775 fq->state = qman_fq_state_retired; 776 break; 777 case QM_MR_VERB_FQPN: 778 DPAA_ASSERT(fq->state == qman_fq_state_sched); 779 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); 780 fq->state = qman_fq_state_parked; 781 } 782 FQUNLOCK(fq); 783 } 784 785 static u32 __poll_portal_slow(struct qman_portal *p, u32 is) 786 { 787 const struct qm_mr_entry *msg; 788 struct qm_mr_entry swapped_msg; 789 790 if (is & QM_PIRQ_CSCI) { 791 struct qman_cgrs rr, c; 792 struct qm_mc_result *mcr; 793 struct qman_cgr *cgr; 794 795 spin_lock(&p->cgr_lock); 796 /* 797 * The CSCI bit must be cleared _before_ issuing the 798 * Query Congestion State command, to ensure that a long 799 * CGR State Change callback cannot miss an intervening 800 * state change. 801 */ 802 qm_isr_status_clear(&p->p, QM_PIRQ_CSCI); 803 qm_mc_start(&p->p); 804 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); 805 while (!(mcr = qm_mc_result(&p->p))) 806 cpu_relax(); 807 /* mask out the ones I'm not interested in */ 808 qman_cgrs_and(&rr, (const struct qman_cgrs *) 809 &mcr->querycongestion.state, &p->cgrs[0]); 810 /* check previous snapshot for delta, enter/exit congestion */ 811 qman_cgrs_xor(&c, &rr, &p->cgrs[1]); 812 /* update snapshot */ 813 qman_cgrs_cp(&p->cgrs[1], &rr); 814 /* Invoke callback */ 815 list_for_each_entry(cgr, &p->cgr_cbs, node) 816 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) 817 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); 818 spin_unlock(&p->cgr_lock); 819 } 820 821 if (is & QM_PIRQ_EQRI) { 822 qm_eqcr_cce_update(&p->p); 823 qm_eqcr_set_ithresh(&p->p, 0); 824 wake_up(&affine_queue); 825 } 826 827 if (is & QM_PIRQ_MRI) { 828 struct qman_fq *fq; 829 u8 verb, num = 0; 830 mr_loop: 831 qm_mr_pvb_update(&p->p); 832 msg = qm_mr_current(&p->p); 833 if (!msg) 834 goto mr_done; 835 swapped_msg = *msg; 836 hw_fd_to_cpu(&swapped_msg.ern.fd); 837 verb = msg->verb & QM_MR_VERB_TYPE_MASK; 838 /* The message is a software ERN iff the 0x20 bit is set */ 839 if (verb & 0x20) { 840 switch (verb) { 841 case QM_MR_VERB_FQRNI: 842 /* nada, we drop FQRNIs on the floor */ 843 break; 844 case QM_MR_VERB_FQRN: 845 case QM_MR_VERB_FQRL: 846 /* Lookup in the retirement table */ 847 fq = table_find_fq(p, 848 be32_to_cpu(msg->fq.fqid)); 849 DPAA_BUG_ON(!fq); 850 fq_state_change(p, fq, &swapped_msg, verb); 851 if (fq->cb.fqs) 852 fq->cb.fqs(p, fq, &swapped_msg); 853 break; 854 case QM_MR_VERB_FQPN: 855 /* Parked */ 856 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 857 fq = get_fq_table_entry( 858 be32_to_cpu(msg->fq.contextB)); 859 #else 860 fq = (void *)(uintptr_t) 861 be32_to_cpu(msg->fq.contextB); 862 #endif 863 fq_state_change(p, fq, msg, verb); 864 if (fq->cb.fqs) 865 fq->cb.fqs(p, fq, &swapped_msg); 866 break; 867 case QM_MR_VERB_DC_ERN: 868 /* DCP ERN */ 869 if (p->cb_dc_ern) 870 p->cb_dc_ern(p, msg); 871 else if (cb_dc_ern) 872 cb_dc_ern(p, msg); 873 else { 874 static int warn_once; 875 876 if (!warn_once) { 877 pr_crit("Leaking DCP ERNs!\n"); 878 warn_once = 1; 879 } 880 } 881 break; 882 default: 883 pr_crit("Invalid MR verb 0x%02x\n", verb); 884 } 885 } else { 886 /* Its a software ERN */ 887 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 888 fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag)); 889 #else 890 fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag); 891 #endif 892 fq->cb.ern(p, fq, &swapped_msg); 893 } 894 num++; 895 qm_mr_next(&p->p); 896 goto mr_loop; 897 mr_done: 898 qm_mr_cci_consume(&p->p, num); 899 } 900 /* 901 * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific 902 * processing. If that interrupt source has meanwhile been re-asserted, 903 * we mustn't clear it here (or in the top-level interrupt handler). 904 */ 905 return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI); 906 } 907 908 /* 909 * remove some slowish-path stuff from the "fast path" and make sure it isn't 910 * inlined. 911 */ 912 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) 913 { 914 p->vdqcr_owned = NULL; 915 FQLOCK(fq); 916 fq_clear(fq, QMAN_FQ_STATE_VDQCR); 917 FQUNLOCK(fq); 918 wake_up(&affine_queue); 919 } 920 921 /* 922 * The only states that would conflict with other things if they ran at the 923 * same time on the same cpu are: 924 * 925 * (i) setting/clearing vdqcr_owned, and 926 * (ii) clearing the NE (Not Empty) flag. 927 * 928 * Both are safe. Because; 929 * 930 * (i) this clearing can only occur after qman_set_vdq() has set the 931 * vdqcr_owned field (which it does before setting VDQCR), and 932 * qman_volatile_dequeue() blocks interrupts and preemption while this is 933 * done so that we can't interfere. 934 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as 935 * with (i) that API prevents us from interfering until it's safe. 936 * 937 * The good thing is that qman_set_vdq() and qman_retire_fq() run far 938 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett 939 * advantage comes from this function not having to "lock" anything at all. 940 * 941 * Note also that the callbacks are invoked at points which are safe against the 942 * above potential conflicts, but that this function itself is not re-entrant 943 * (this is because the function tracks one end of each FIFO in the portal and 944 * we do *not* want to lock that). So the consequence is that it is safe for 945 * user callbacks to call into any QMan API. 946 */ 947 static inline unsigned int __poll_portal_fast(struct qman_portal *p, 948 unsigned int poll_limit) 949 { 950 const struct qm_dqrr_entry *dq; 951 struct qman_fq *fq; 952 enum qman_cb_dqrr_result res; 953 unsigned int limit = 0; 954 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 955 struct qm_dqrr_entry *shadow; 956 #endif 957 do { 958 qm_dqrr_pvb_update(&p->p); 959 dq = qm_dqrr_current(&p->p); 960 if (unlikely(!dq)) 961 break; 962 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 963 /* If running on an LE system the fields of the 964 * dequeue entry must be swapper. Because the 965 * QMan HW will ignore writes the DQRR entry is 966 * copied and the index stored within the copy 967 */ 968 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)]; 969 *shadow = *dq; 970 dq = shadow; 971 shadow->fqid = be32_to_cpu(shadow->fqid); 972 shadow->contextB = be32_to_cpu(shadow->contextB); 973 shadow->seqnum = be16_to_cpu(shadow->seqnum); 974 hw_fd_to_cpu(&shadow->fd); 975 #endif 976 977 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { 978 /* 979 * VDQCR: don't trust context_b as the FQ may have 980 * been configured for h/w consumption and we're 981 * draining it post-retirement. 982 */ 983 fq = p->vdqcr_owned; 984 /* 985 * We only set QMAN_FQ_STATE_NE when retiring, so we 986 * only need to check for clearing it when doing 987 * volatile dequeues. It's one less thing to check 988 * in the critical path (SDQCR). 989 */ 990 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) 991 fq_clear(fq, QMAN_FQ_STATE_NE); 992 /* 993 * This is duplicated from the SDQCR code, but we 994 * have stuff to do before *and* after this callback, 995 * and we don't want multiple if()s in the critical 996 * path (SDQCR). 997 */ 998 res = fq->cb.dqrr(p, fq, dq); 999 if (res == qman_cb_dqrr_stop) 1000 break; 1001 /* Check for VDQCR completion */ 1002 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) 1003 clear_vdqcr(p, fq); 1004 } else { 1005 /* SDQCR: context_b points to the FQ */ 1006 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1007 fq = get_fq_table_entry(dq->contextB); 1008 #else 1009 fq = (void *)(uintptr_t)dq->contextB; 1010 #endif 1011 /* Now let the callback do its stuff */ 1012 res = fq->cb.dqrr(p, fq, dq); 1013 /* 1014 * The callback can request that we exit without 1015 * consuming this entry nor advancing; 1016 */ 1017 if (res == qman_cb_dqrr_stop) 1018 break; 1019 } 1020 /* Interpret 'dq' from a driver perspective. */ 1021 /* 1022 * Parking isn't possible unless HELDACTIVE was set. NB, 1023 * FORCEELIGIBLE implies HELDACTIVE, so we only need to 1024 * check for HELDACTIVE to cover both. 1025 */ 1026 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || 1027 (res != qman_cb_dqrr_park)); 1028 /* just means "skip it, I'll consume it myself later on" */ 1029 if (res != qman_cb_dqrr_defer) 1030 qm_dqrr_cdc_consume_1ptr(&p->p, dq, 1031 res == qman_cb_dqrr_park); 1032 /* Move forward */ 1033 qm_dqrr_next(&p->p); 1034 /* 1035 * Entry processed and consumed, increment our counter. The 1036 * callback can request that we exit after consuming the 1037 * entry, and we also exit if we reach our processing limit, 1038 * so loop back only if neither of these conditions is met. 1039 */ 1040 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop); 1041 1042 return limit; 1043 } 1044 1045 u16 qman_affine_channel(int cpu) 1046 { 1047 if (cpu < 0) { 1048 struct qman_portal *portal = get_affine_portal(); 1049 1050 cpu = portal->config->cpu; 1051 } 1052 DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask)); 1053 return affine_channels[cpu]; 1054 } 1055 1056 unsigned int qman_portal_poll_rx(unsigned int poll_limit, 1057 void **bufs, 1058 struct qman_portal *p) 1059 { 1060 const struct qm_dqrr_entry *dq; 1061 struct qman_fq *fq; 1062 enum qman_cb_dqrr_result res; 1063 unsigned int limit = 0; 1064 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 1065 struct qm_dqrr_entry *shadow; 1066 #endif 1067 unsigned int rx_number = 0; 1068 1069 do { 1070 qm_dqrr_pvb_update(&p->p); 1071 dq = qm_dqrr_current(&p->p); 1072 if (unlikely(!dq)) 1073 break; 1074 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 1075 /* If running on an LE system the fields of the 1076 * dequeue entry must be swapper. Because the 1077 * QMan HW will ignore writes the DQRR entry is 1078 * copied and the index stored within the copy 1079 */ 1080 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)]; 1081 *shadow = *dq; 1082 dq = shadow; 1083 shadow->fqid = be32_to_cpu(shadow->fqid); 1084 shadow->contextB = be32_to_cpu(shadow->contextB); 1085 shadow->seqnum = be16_to_cpu(shadow->seqnum); 1086 hw_fd_to_cpu(&shadow->fd); 1087 #endif 1088 1089 /* SDQCR: context_b points to the FQ */ 1090 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1091 fq = get_fq_table_entry(dq->contextB); 1092 #else 1093 fq = (void *)(uintptr_t)dq->contextB; 1094 #endif 1095 /* Now let the callback do its stuff */ 1096 res = fq->cb.dqrr_dpdk_cb(NULL, p, fq, dq, &bufs[rx_number]); 1097 rx_number++; 1098 /* Interpret 'dq' from a driver perspective. */ 1099 /* 1100 * Parking isn't possible unless HELDACTIVE was set. NB, 1101 * FORCEELIGIBLE implies HELDACTIVE, so we only need to 1102 * check for HELDACTIVE to cover both. 1103 */ 1104 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || 1105 (res != qman_cb_dqrr_park)); 1106 qm_dqrr_cdc_consume_1ptr(&p->p, dq, res == qman_cb_dqrr_park); 1107 /* Move forward */ 1108 qm_dqrr_next(&p->p); 1109 /* 1110 * Entry processed and consumed, increment our counter. The 1111 * callback can request that we exit after consuming the 1112 * entry, and we also exit if we reach our processing limit, 1113 * so loop back only if neither of these conditions is met. 1114 */ 1115 } while (likely(++limit < poll_limit)); 1116 1117 return limit; 1118 } 1119 1120 u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit, 1121 void **bufs) 1122 { 1123 const struct qm_dqrr_entry *dq; 1124 struct qman_fq *fq; 1125 enum qman_cb_dqrr_result res; 1126 unsigned int limit = 0; 1127 struct qman_portal *p = get_affine_portal(); 1128 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1129 struct qm_dqrr_entry *shadow; 1130 #endif 1131 unsigned int rx_number = 0; 1132 1133 do { 1134 qm_dqrr_pvb_update(&p->p); 1135 dq = qm_dqrr_current(&p->p); 1136 if (!dq) 1137 break; 1138 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1139 /* 1140 * If running on an LE system the fields of the 1141 * dequeue entry must be swapper. Because the 1142 * QMan HW will ignore writes the DQRR entry is 1143 * copied and the index stored within the copy 1144 */ 1145 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)]; 1146 *shadow = *dq; 1147 dq = shadow; 1148 shadow->fqid = be32_to_cpu(shadow->fqid); 1149 shadow->contextB = be32_to_cpu(shadow->contextB); 1150 shadow->seqnum = be16_to_cpu(shadow->seqnum); 1151 hw_fd_to_cpu(&shadow->fd); 1152 #endif 1153 1154 /* SDQCR: context_b points to the FQ */ 1155 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1156 fq = get_fq_table_entry(dq->contextB); 1157 #else 1158 fq = (void *)(uintptr_t)dq->contextB; 1159 #endif 1160 /* Now let the callback do its stuff */ 1161 res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq, 1162 dq, &bufs[rx_number]); 1163 rx_number++; 1164 /* Interpret 'dq' from a driver perspective. */ 1165 /* 1166 * Parking isn't possible unless HELDACTIVE was set. NB, 1167 * FORCEELIGIBLE implies HELDACTIVE, so we only need to 1168 * check for HELDACTIVE to cover both. 1169 */ 1170 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || 1171 (res != qman_cb_dqrr_park)); 1172 if (res != qman_cb_dqrr_defer) 1173 qm_dqrr_cdc_consume_1ptr(&p->p, dq, 1174 res == qman_cb_dqrr_park); 1175 /* Move forward */ 1176 qm_dqrr_next(&p->p); 1177 /* 1178 * Entry processed and consumed, increment our counter. The 1179 * callback can request that we exit after consuming the 1180 * entry, and we also exit if we reach our processing limit, 1181 * so loop back only if neither of these conditions is met. 1182 */ 1183 } while (++limit < poll_limit); 1184 1185 return limit; 1186 } 1187 1188 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq) 1189 { 1190 struct qman_portal *p = get_affine_portal(); 1191 const struct qm_dqrr_entry *dq; 1192 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 1193 struct qm_dqrr_entry *shadow; 1194 #endif 1195 1196 qm_dqrr_pvb_update(&p->p); 1197 dq = qm_dqrr_current(&p->p); 1198 if (!dq) 1199 return NULL; 1200 1201 if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) { 1202 /* Invalid DQRR - put the portal and consume the DQRR. 1203 * Return NULL to user as no packet is seen. 1204 */ 1205 qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq); 1206 return NULL; 1207 } 1208 1209 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 1210 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)]; 1211 *shadow = *dq; 1212 dq = shadow; 1213 shadow->fqid = be32_to_cpu(shadow->fqid); 1214 shadow->contextB = be32_to_cpu(shadow->contextB); 1215 shadow->seqnum = be16_to_cpu(shadow->seqnum); 1216 hw_fd_to_cpu(&shadow->fd); 1217 #endif 1218 1219 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) 1220 fq_clear(fq, QMAN_FQ_STATE_NE); 1221 1222 return (struct qm_dqrr_entry *)dq; 1223 } 1224 1225 void qman_dqrr_consume(struct qman_fq *fq, 1226 struct qm_dqrr_entry *dq) 1227 { 1228 struct qman_portal *p = get_affine_portal(); 1229 1230 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) 1231 clear_vdqcr(p, fq); 1232 1233 qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0); 1234 qm_dqrr_next(&p->p); 1235 } 1236 1237 int qman_poll_dqrr(unsigned int limit) 1238 { 1239 struct qman_portal *p = get_affine_portal(); 1240 int ret; 1241 1242 ret = __poll_portal_fast(p, limit); 1243 return ret; 1244 } 1245 1246 void qman_poll(void) 1247 { 1248 struct qman_portal *p = get_affine_portal(); 1249 1250 if ((~p->irq_sources) & QM_PIRQ_SLOW) { 1251 if (!(p->slowpoll--)) { 1252 u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources; 1253 u32 active = __poll_portal_slow(p, is); 1254 1255 if (active) { 1256 qm_isr_status_clear(&p->p, active); 1257 p->slowpoll = SLOW_POLL_BUSY; 1258 } else 1259 p->slowpoll = SLOW_POLL_IDLE; 1260 } 1261 } 1262 if ((~p->irq_sources) & QM_PIRQ_DQRI) 1263 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT); 1264 } 1265 1266 void qman_stop_dequeues(void) 1267 { 1268 struct qman_portal *p = get_affine_portal(); 1269 1270 qman_stop_dequeues_ex(p); 1271 } 1272 1273 void qman_start_dequeues(void) 1274 { 1275 struct qman_portal *p = get_affine_portal(); 1276 1277 DPAA_ASSERT(p->dqrr_disable_ref > 0); 1278 if (!(--p->dqrr_disable_ref)) 1279 qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL); 1280 } 1281 1282 void qman_static_dequeue_add(u32 pools, struct qman_portal *qp) 1283 { 1284 struct qman_portal *p = qp ? qp : get_affine_portal(); 1285 1286 pools &= p->config->pools; 1287 p->sdqcr |= pools; 1288 qm_dqrr_sdqcr_set(&p->p, p->sdqcr); 1289 } 1290 1291 void qman_static_dequeue_del(u32 pools, struct qman_portal *qp) 1292 { 1293 struct qman_portal *p = qp ? qp : get_affine_portal(); 1294 1295 pools &= p->config->pools; 1296 p->sdqcr &= ~pools; 1297 qm_dqrr_sdqcr_set(&p->p, p->sdqcr); 1298 } 1299 1300 u32 qman_static_dequeue_get(struct qman_portal *qp) 1301 { 1302 struct qman_portal *p = qp ? qp : get_affine_portal(); 1303 return p->sdqcr; 1304 } 1305 1306 void qman_dca(const struct qm_dqrr_entry *dq, int park_request) 1307 { 1308 struct qman_portal *p = get_affine_portal(); 1309 1310 qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request); 1311 } 1312 1313 void qman_dca_index(u8 index, int park_request) 1314 { 1315 struct qman_portal *p = get_affine_portal(); 1316 1317 qm_dqrr_cdc_consume_1(&p->p, index, park_request); 1318 } 1319 1320 /* Frame queue API */ 1321 static const char *mcr_result_str(u8 result) 1322 { 1323 switch (result) { 1324 case QM_MCR_RESULT_NULL: 1325 return "QM_MCR_RESULT_NULL"; 1326 case QM_MCR_RESULT_OK: 1327 return "QM_MCR_RESULT_OK"; 1328 case QM_MCR_RESULT_ERR_FQID: 1329 return "QM_MCR_RESULT_ERR_FQID"; 1330 case QM_MCR_RESULT_ERR_FQSTATE: 1331 return "QM_MCR_RESULT_ERR_FQSTATE"; 1332 case QM_MCR_RESULT_ERR_NOTEMPTY: 1333 return "QM_MCR_RESULT_ERR_NOTEMPTY"; 1334 case QM_MCR_RESULT_PENDING: 1335 return "QM_MCR_RESULT_PENDING"; 1336 case QM_MCR_RESULT_ERR_BADCOMMAND: 1337 return "QM_MCR_RESULT_ERR_BADCOMMAND"; 1338 } 1339 return "<unknown MCR result>"; 1340 } 1341 1342 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) 1343 { 1344 struct qm_fqd fqd; 1345 struct qm_mcr_queryfq_np np; 1346 struct qm_mc_command *mcc; 1347 struct qm_mc_result *mcr; 1348 struct qman_portal *p; 1349 1350 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { 1351 int ret = qman_alloc_fqid(&fqid); 1352 1353 if (ret) 1354 return ret; 1355 } 1356 spin_lock_init(&fq->fqlock); 1357 fq->fqid = fqid; 1358 fq->fqid_le = cpu_to_be32(fqid); 1359 fq->flags = flags; 1360 fq->state = qman_fq_state_oos; 1361 fq->cgr_groupid = 0; 1362 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1363 if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) { 1364 pr_info("Find empty table entry failed\n"); 1365 return -ENOMEM; 1366 } 1367 #endif 1368 if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY)) 1369 return 0; 1370 /* Everything else is AS_IS support */ 1371 p = get_affine_portal(); 1372 mcc = qm_mc_start(&p->p); 1373 mcc->queryfq.fqid = cpu_to_be32(fqid); 1374 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); 1375 while (!(mcr = qm_mc_result(&p->p))) 1376 cpu_relax(); 1377 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ); 1378 if (mcr->result != QM_MCR_RESULT_OK) { 1379 pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result)); 1380 goto err; 1381 } 1382 fqd = mcr->queryfq.fqd; 1383 hw_fqd_to_cpu(&fqd); 1384 mcc = qm_mc_start(&p->p); 1385 mcc->queryfq_np.fqid = cpu_to_be32(fqid); 1386 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); 1387 while (!(mcr = qm_mc_result(&p->p))) 1388 cpu_relax(); 1389 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP); 1390 if (mcr->result != QM_MCR_RESULT_OK) { 1391 pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result)); 1392 goto err; 1393 } 1394 np = mcr->queryfq_np; 1395 /* Phew, have queryfq and queryfq_np results, stitch together 1396 * the FQ object from those. 1397 */ 1398 fq->cgr_groupid = fqd.cgid; 1399 switch (np.state & QM_MCR_NP_STATE_MASK) { 1400 case QM_MCR_NP_STATE_OOS: 1401 break; 1402 case QM_MCR_NP_STATE_RETIRED: 1403 fq->state = qman_fq_state_retired; 1404 if (np.frm_cnt) 1405 fq_set(fq, QMAN_FQ_STATE_NE); 1406 break; 1407 case QM_MCR_NP_STATE_TEN_SCHED: 1408 case QM_MCR_NP_STATE_TRU_SCHED: 1409 case QM_MCR_NP_STATE_ACTIVE: 1410 fq->state = qman_fq_state_sched; 1411 if (np.state & QM_MCR_NP_STATE_R) 1412 fq_set(fq, QMAN_FQ_STATE_CHANGING); 1413 break; 1414 case QM_MCR_NP_STATE_PARKED: 1415 fq->state = qman_fq_state_parked; 1416 break; 1417 default: 1418 DPAA_ASSERT(NULL == "invalid FQ state"); 1419 } 1420 if (fqd.fq_ctrl & QM_FQCTRL_CGE) 1421 fq->state |= QMAN_FQ_STATE_CGR_EN; 1422 return 0; 1423 err: 1424 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) 1425 qman_release_fqid(fqid); 1426 return -EIO; 1427 } 1428 1429 void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused) 1430 { 1431 /* 1432 * We don't need to lock the FQ as it is a pre-condition that the FQ be 1433 * quiesced. Instead, run some checks. 1434 */ 1435 switch (fq->state) { 1436 case qman_fq_state_parked: 1437 DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED); 1438 /* Fallthrough */ 1439 case qman_fq_state_oos: 1440 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) 1441 qman_release_fqid(fq->fqid); 1442 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1443 clear_fq_table_entry(fq->key); 1444 #endif 1445 return; 1446 default: 1447 break; 1448 } 1449 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); 1450 } 1451 1452 u32 qman_fq_fqid(struct qman_fq *fq) 1453 { 1454 return fq->fqid; 1455 } 1456 1457 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags) 1458 { 1459 if (state) 1460 *state = fq->state; 1461 if (flags) 1462 *flags = fq->flags; 1463 } 1464 1465 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) 1466 { 1467 struct qm_mc_command *mcc; 1468 struct qm_mc_result *mcr; 1469 struct qman_portal *p; 1470 1471 u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ? 1472 QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; 1473 1474 if ((fq->state != qman_fq_state_oos) && 1475 (fq->state != qman_fq_state_parked)) 1476 return -EINVAL; 1477 #ifdef RTE_LIBRTE_DPAA_HWDEBUG 1478 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) 1479 return -EINVAL; 1480 #endif 1481 if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { 1482 /* And can't be set at the same time as TDTHRESH */ 1483 if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) 1484 return -EINVAL; 1485 } 1486 /* Issue an INITFQ_[PARKED|SCHED] management command */ 1487 p = get_affine_portal(); 1488 FQLOCK(fq); 1489 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || 1490 ((fq->state != qman_fq_state_oos) && 1491 (fq->state != qman_fq_state_parked)))) { 1492 FQUNLOCK(fq); 1493 return -EBUSY; 1494 } 1495 mcc = qm_mc_start(&p->p); 1496 if (opts) 1497 mcc->initfq = *opts; 1498 mcc->initfq.fqid = cpu_to_be32(fq->fqid); 1499 mcc->initfq.count = 0; 1500 /* 1501 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a 1502 * demux pointer. Otherwise, the caller-provided value is allowed to 1503 * stand, don't overwrite it. 1504 */ 1505 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { 1506 dma_addr_t phys_fq; 1507 1508 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; 1509 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1510 mcc->initfq.fqd.context_b = fq->key; 1511 #else 1512 mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq; 1513 #endif 1514 /* 1515 * and the physical address - NB, if the user wasn't trying to 1516 * set CONTEXTA, clear the stashing settings. 1517 */ 1518 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { 1519 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; 1520 memset(&mcc->initfq.fqd.context_a, 0, 1521 sizeof(mcc->initfq.fqd.context_a)); 1522 } else { 1523 phys_fq = rte_mem_virt2iova(fq); 1524 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); 1525 } 1526 } 1527 if (flags & QMAN_INITFQ_FLAG_LOCAL) { 1528 mcc->initfq.fqd.dest.channel = p->config->channel; 1529 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { 1530 mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; 1531 mcc->initfq.fqd.dest.wq = 4; 1532 } 1533 } 1534 mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask); 1535 cpu_to_hw_fqd(&mcc->initfq.fqd); 1536 qm_mc_commit(&p->p, myverb); 1537 while (!(mcr = qm_mc_result(&p->p))) 1538 cpu_relax(); 1539 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); 1540 res = mcr->result; 1541 if (res != QM_MCR_RESULT_OK) { 1542 FQUNLOCK(fq); 1543 return -EIO; 1544 } 1545 if (opts) { 1546 if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { 1547 if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) 1548 fq_set(fq, QMAN_FQ_STATE_CGR_EN); 1549 else 1550 fq_clear(fq, QMAN_FQ_STATE_CGR_EN); 1551 } 1552 if (opts->we_mask & QM_INITFQ_WE_CGID) 1553 fq->cgr_groupid = opts->fqd.cgid; 1554 } 1555 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? 1556 qman_fq_state_sched : qman_fq_state_parked; 1557 FQUNLOCK(fq); 1558 return 0; 1559 } 1560 1561 int qman_schedule_fq(struct qman_fq *fq) 1562 { 1563 struct qm_mc_command *mcc; 1564 struct qm_mc_result *mcr; 1565 struct qman_portal *p; 1566 1567 int ret = 0; 1568 u8 res; 1569 1570 if (fq->state != qman_fq_state_parked) 1571 return -EINVAL; 1572 #ifdef RTE_LIBRTE_DPAA_HWDEBUG 1573 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) 1574 return -EINVAL; 1575 #endif 1576 /* Issue a ALTERFQ_SCHED management command */ 1577 p = get_affine_portal(); 1578 1579 FQLOCK(fq); 1580 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || 1581 (fq->state != qman_fq_state_parked))) { 1582 ret = -EBUSY; 1583 goto out; 1584 } 1585 mcc = qm_mc_start(&p->p); 1586 mcc->alterfq.fqid = cpu_to_be32(fq->fqid); 1587 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); 1588 while (!(mcr = qm_mc_result(&p->p))) 1589 cpu_relax(); 1590 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); 1591 res = mcr->result; 1592 if (res != QM_MCR_RESULT_OK) { 1593 ret = -EIO; 1594 goto out; 1595 } 1596 fq->state = qman_fq_state_sched; 1597 out: 1598 FQUNLOCK(fq); 1599 1600 return ret; 1601 } 1602 1603 int qman_retire_fq(struct qman_fq *fq, u32 *flags) 1604 { 1605 struct qm_mc_command *mcc; 1606 struct qm_mc_result *mcr; 1607 struct qman_portal *p; 1608 1609 int rval; 1610 u8 res; 1611 1612 if ((fq->state != qman_fq_state_parked) && 1613 (fq->state != qman_fq_state_sched)) 1614 return -EINVAL; 1615 #ifdef RTE_LIBRTE_DPAA_HWDEBUG 1616 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) 1617 return -EINVAL; 1618 #endif 1619 p = get_affine_portal(); 1620 1621 FQLOCK(fq); 1622 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || 1623 (fq->state == qman_fq_state_retired) || 1624 (fq->state == qman_fq_state_oos))) { 1625 rval = -EBUSY; 1626 goto out; 1627 } 1628 rval = table_push_fq(p, fq); 1629 if (rval) 1630 goto out; 1631 mcc = qm_mc_start(&p->p); 1632 mcc->alterfq.fqid = cpu_to_be32(fq->fqid); 1633 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); 1634 while (!(mcr = qm_mc_result(&p->p))) 1635 cpu_relax(); 1636 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); 1637 res = mcr->result; 1638 /* 1639 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING, 1640 * and defer the flags until FQRNI or FQRN (respectively) show up. But 1641 * "Friendly" is to process OK immediately, and not set CHANGING. We do 1642 * friendly, otherwise the caller doesn't necessarily have a fully 1643 * "retired" FQ on return even if the retirement was immediate. However 1644 * this does mean some code duplication between here and 1645 * fq_state_change(). 1646 */ 1647 if (likely(res == QM_MCR_RESULT_OK)) { 1648 rval = 0; 1649 /* Process 'fq' right away, we'll ignore FQRNI */ 1650 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) 1651 fq_set(fq, QMAN_FQ_STATE_NE); 1652 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) 1653 fq_set(fq, QMAN_FQ_STATE_ORL); 1654 else 1655 table_del_fq(p, fq); 1656 if (flags) 1657 *flags = fq->flags; 1658 fq->state = qman_fq_state_retired; 1659 if (fq->cb.fqs) { 1660 /* 1661 * Another issue with supporting "immediate" retirement 1662 * is that we're forced to drop FQRNIs, because by the 1663 * time they're seen it may already be "too late" (the 1664 * fq may have been OOS'd and free()'d already). But if 1665 * the upper layer wants a callback whether it's 1666 * immediate or not, we have to fake a "MR" entry to 1667 * look like an FQRNI... 1668 */ 1669 struct qm_mr_entry msg; 1670 1671 msg.verb = QM_MR_VERB_FQRNI; 1672 msg.fq.fqs = mcr->alterfq.fqs; 1673 msg.fq.fqid = fq->fqid; 1674 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1675 msg.fq.contextB = fq->key; 1676 #else 1677 msg.fq.contextB = (u32)(uintptr_t)fq; 1678 #endif 1679 fq->cb.fqs(p, fq, &msg); 1680 } 1681 } else if (res == QM_MCR_RESULT_PENDING) { 1682 rval = 1; 1683 fq_set(fq, QMAN_FQ_STATE_CHANGING); 1684 } else { 1685 rval = -EIO; 1686 table_del_fq(p, fq); 1687 } 1688 out: 1689 FQUNLOCK(fq); 1690 return rval; 1691 } 1692 1693 int qman_oos_fq(struct qman_fq *fq) 1694 { 1695 struct qm_mc_command *mcc; 1696 struct qm_mc_result *mcr; 1697 struct qman_portal *p; 1698 1699 int ret = 0; 1700 u8 res; 1701 1702 if (fq->state != qman_fq_state_retired) 1703 return -EINVAL; 1704 #ifdef RTE_LIBRTE_DPAA_HWDEBUG 1705 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) 1706 return -EINVAL; 1707 #endif 1708 p = get_affine_portal(); 1709 FQLOCK(fq); 1710 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) || 1711 (fq->state != qman_fq_state_retired))) { 1712 ret = -EBUSY; 1713 goto out; 1714 } 1715 mcc = qm_mc_start(&p->p); 1716 mcc->alterfq.fqid = cpu_to_be32(fq->fqid); 1717 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); 1718 while (!(mcr = qm_mc_result(&p->p))) 1719 cpu_relax(); 1720 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); 1721 res = mcr->result; 1722 if (res != QM_MCR_RESULT_OK) { 1723 ret = -EIO; 1724 goto out; 1725 } 1726 fq->state = qman_fq_state_oos; 1727 out: 1728 FQUNLOCK(fq); 1729 return ret; 1730 } 1731 1732 int qman_fq_flow_control(struct qman_fq *fq, int xon) 1733 { 1734 struct qm_mc_command *mcc; 1735 struct qm_mc_result *mcr; 1736 struct qman_portal *p; 1737 1738 int ret = 0; 1739 u8 res; 1740 u8 myverb; 1741 1742 if ((fq->state == qman_fq_state_oos) || 1743 (fq->state == qman_fq_state_retired) || 1744 (fq->state == qman_fq_state_parked)) 1745 return -EINVAL; 1746 1747 #ifdef RTE_LIBRTE_DPAA_HWDEBUG 1748 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) 1749 return -EINVAL; 1750 #endif 1751 /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */ 1752 p = get_affine_portal(); 1753 FQLOCK(fq); 1754 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || 1755 (fq->state == qman_fq_state_parked) || 1756 (fq->state == qman_fq_state_oos) || 1757 (fq->state == qman_fq_state_retired))) { 1758 ret = -EBUSY; 1759 goto out; 1760 } 1761 mcc = qm_mc_start(&p->p); 1762 mcc->alterfq.fqid = fq->fqid; 1763 mcc->alterfq.count = 0; 1764 myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF; 1765 1766 qm_mc_commit(&p->p, myverb); 1767 while (!(mcr = qm_mc_result(&p->p))) 1768 cpu_relax(); 1769 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); 1770 1771 res = mcr->result; 1772 if (res != QM_MCR_RESULT_OK) { 1773 ret = -EIO; 1774 goto out; 1775 } 1776 out: 1777 FQUNLOCK(fq); 1778 return ret; 1779 } 1780 1781 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) 1782 { 1783 struct qm_mc_command *mcc; 1784 struct qm_mc_result *mcr; 1785 struct qman_portal *p = get_affine_portal(); 1786 1787 u8 res; 1788 1789 mcc = qm_mc_start(&p->p); 1790 mcc->queryfq.fqid = cpu_to_be32(fq->fqid); 1791 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); 1792 while (!(mcr = qm_mc_result(&p->p))) 1793 cpu_relax(); 1794 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); 1795 res = mcr->result; 1796 if (res == QM_MCR_RESULT_OK) 1797 *fqd = mcr->queryfq.fqd; 1798 hw_fqd_to_cpu(fqd); 1799 if (res != QM_MCR_RESULT_OK) 1800 return -EIO; 1801 return 0; 1802 } 1803 1804 int qman_query_fq_has_pkts(struct qman_fq *fq) 1805 { 1806 struct qm_mc_command *mcc; 1807 struct qm_mc_result *mcr; 1808 struct qman_portal *p = get_affine_portal(); 1809 1810 int ret = 0; 1811 u8 res; 1812 1813 mcc = qm_mc_start(&p->p); 1814 mcc->queryfq.fqid = cpu_to_be32(fq->fqid); 1815 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); 1816 while (!(mcr = qm_mc_result(&p->p))) 1817 cpu_relax(); 1818 res = mcr->result; 1819 if (res == QM_MCR_RESULT_OK) 1820 ret = !!mcr->queryfq_np.frm_cnt; 1821 return ret; 1822 } 1823 1824 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) 1825 { 1826 struct qm_mc_command *mcc; 1827 struct qm_mc_result *mcr; 1828 struct qman_portal *p = get_affine_portal(); 1829 1830 u8 res; 1831 1832 mcc = qm_mc_start(&p->p); 1833 mcc->queryfq.fqid = cpu_to_be32(fq->fqid); 1834 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); 1835 while (!(mcr = qm_mc_result(&p->p))) 1836 cpu_relax(); 1837 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); 1838 res = mcr->result; 1839 if (res == QM_MCR_RESULT_OK) { 1840 *np = mcr->queryfq_np; 1841 np->fqd_link = be24_to_cpu(np->fqd_link); 1842 np->odp_seq = be16_to_cpu(np->odp_seq); 1843 np->orp_nesn = be16_to_cpu(np->orp_nesn); 1844 np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq); 1845 np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq); 1846 np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr); 1847 np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr); 1848 np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr); 1849 np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr); 1850 np->ics_surp = be16_to_cpu(np->ics_surp); 1851 np->byte_cnt = be32_to_cpu(np->byte_cnt); 1852 np->frm_cnt = be24_to_cpu(np->frm_cnt); 1853 np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr); 1854 np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr); 1855 np->od1_sfdr = be16_to_cpu(np->od1_sfdr); 1856 np->od2_sfdr = be16_to_cpu(np->od2_sfdr); 1857 np->od3_sfdr = be16_to_cpu(np->od3_sfdr); 1858 } 1859 if (res == QM_MCR_RESULT_ERR_FQID) 1860 return -ERANGE; 1861 else if (res != QM_MCR_RESULT_OK) 1862 return -EIO; 1863 return 0; 1864 } 1865 1866 int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt) 1867 { 1868 struct qm_mc_command *mcc; 1869 struct qm_mc_result *mcr; 1870 struct qman_portal *p = get_affine_portal(); 1871 1872 mcc = qm_mc_start(&p->p); 1873 mcc->queryfq.fqid = cpu_to_be32(fq->fqid); 1874 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); 1875 while (!(mcr = qm_mc_result(&p->p))) 1876 cpu_relax(); 1877 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); 1878 1879 if (mcr->result == QM_MCR_RESULT_OK) 1880 *frm_cnt = be24_to_cpu(mcr->queryfq_np.frm_cnt); 1881 else if (mcr->result == QM_MCR_RESULT_ERR_FQID) 1882 return -ERANGE; 1883 else if (mcr->result != QM_MCR_RESULT_OK) 1884 return -EIO; 1885 return 0; 1886 } 1887 1888 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq) 1889 { 1890 struct qm_mc_command *mcc; 1891 struct qm_mc_result *mcr; 1892 struct qman_portal *p = get_affine_portal(); 1893 1894 u8 res, myverb; 1895 1896 myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED : 1897 QM_MCR_VERB_QUERYWQ; 1898 mcc = qm_mc_start(&p->p); 1899 mcc->querywq.channel.id = cpu_to_be16(wq->channel.id); 1900 qm_mc_commit(&p->p, myverb); 1901 while (!(mcr = qm_mc_result(&p->p))) 1902 cpu_relax(); 1903 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); 1904 res = mcr->result; 1905 if (res == QM_MCR_RESULT_OK) { 1906 int i, array_len; 1907 1908 wq->channel.id = be16_to_cpu(mcr->querywq.channel.id); 1909 array_len = ARRAY_SIZE(mcr->querywq.wq_len); 1910 for (i = 0; i < array_len; i++) 1911 wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]); 1912 } 1913 if (res != QM_MCR_RESULT_OK) { 1914 pr_err("QUERYWQ failed: %s\n", mcr_result_str(res)); 1915 return -EIO; 1916 } 1917 return 0; 1918 } 1919 1920 int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt, 1921 struct qm_mcr_cgrtestwrite *result) 1922 { 1923 struct qm_mc_command *mcc; 1924 struct qm_mc_result *mcr; 1925 struct qman_portal *p = get_affine_portal(); 1926 1927 u8 res; 1928 1929 mcc = qm_mc_start(&p->p); 1930 mcc->cgrtestwrite.cgid = cgr->cgrid; 1931 mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32); 1932 mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt; 1933 qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE); 1934 while (!(mcr = qm_mc_result(&p->p))) 1935 cpu_relax(); 1936 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE); 1937 res = mcr->result; 1938 if (res == QM_MCR_RESULT_OK) 1939 *result = mcr->cgrtestwrite; 1940 if (res != QM_MCR_RESULT_OK) { 1941 pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res)); 1942 return -EIO; 1943 } 1944 return 0; 1945 } 1946 1947 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd) 1948 { 1949 struct qm_mc_command *mcc; 1950 struct qm_mc_result *mcr; 1951 struct qman_portal *p = get_affine_portal(); 1952 u8 res; 1953 unsigned int i; 1954 1955 mcc = qm_mc_start(&p->p); 1956 mcc->querycgr.cgid = cgr->cgrid; 1957 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); 1958 while (!(mcr = qm_mc_result(&p->p))) 1959 cpu_relax(); 1960 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); 1961 res = mcr->result; 1962 if (res == QM_MCR_RESULT_OK) 1963 *cgrd = mcr->querycgr; 1964 if (res != QM_MCR_RESULT_OK) { 1965 pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res)); 1966 return -EIO; 1967 } 1968 cgrd->cgr.wr_parm_g.word = 1969 be32_to_cpu(cgrd->cgr.wr_parm_g.word); 1970 cgrd->cgr.wr_parm_y.word = 1971 be32_to_cpu(cgrd->cgr.wr_parm_y.word); 1972 cgrd->cgr.wr_parm_r.word = 1973 be32_to_cpu(cgrd->cgr.wr_parm_r.word); 1974 cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ); 1975 cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres); 1976 for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++) 1977 cgrd->cscn_targ_swp[i] = 1978 be32_to_cpu(cgrd->cscn_targ_swp[i]); 1979 return 0; 1980 } 1981 1982 int qman_query_congestion(struct qm_mcr_querycongestion *congestion) 1983 { 1984 struct qm_mc_result *mcr; 1985 struct qman_portal *p = get_affine_portal(); 1986 u8 res; 1987 unsigned int i; 1988 1989 qm_mc_start(&p->p); 1990 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); 1991 while (!(mcr = qm_mc_result(&p->p))) 1992 cpu_relax(); 1993 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == 1994 QM_MCC_VERB_QUERYCONGESTION); 1995 res = mcr->result; 1996 if (res == QM_MCR_RESULT_OK) 1997 *congestion = mcr->querycongestion; 1998 if (res != QM_MCR_RESULT_OK) { 1999 pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res)); 2000 return -EIO; 2001 } 2002 for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++) 2003 congestion->state.state[i] = 2004 be32_to_cpu(congestion->state.state[i]); 2005 return 0; 2006 } 2007 2008 int qman_set_vdq(struct qman_fq *fq, u16 num) 2009 { 2010 struct qman_portal *p = get_affine_portal(); 2011 uint32_t vdqcr; 2012 int ret = -EBUSY; 2013 2014 vdqcr = QM_VDQCR_EXACT; 2015 vdqcr |= QM_VDQCR_NUMFRAMES_SET(num); 2016 2017 if ((fq->state != qman_fq_state_parked) && 2018 (fq->state != qman_fq_state_retired)) { 2019 ret = -EINVAL; 2020 goto out; 2021 } 2022 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) { 2023 ret = -EBUSY; 2024 goto out; 2025 } 2026 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; 2027 2028 if (!p->vdqcr_owned) { 2029 FQLOCK(fq); 2030 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) 2031 goto escape; 2032 fq_set(fq, QMAN_FQ_STATE_VDQCR); 2033 FQUNLOCK(fq); 2034 p->vdqcr_owned = fq; 2035 ret = 0; 2036 } 2037 escape: 2038 if (!ret) 2039 qm_dqrr_vdqcr_set(&p->p, vdqcr); 2040 2041 out: 2042 return ret; 2043 } 2044 2045 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused, 2046 u32 vdqcr) 2047 { 2048 struct qman_portal *p; 2049 int ret = -EBUSY; 2050 2051 if ((fq->state != qman_fq_state_parked) && 2052 (fq->state != qman_fq_state_retired)) 2053 return -EINVAL; 2054 if (vdqcr & QM_VDQCR_FQID_MASK) 2055 return -EINVAL; 2056 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) 2057 return -EBUSY; 2058 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; 2059 2060 p = get_affine_portal(); 2061 2062 if (!p->vdqcr_owned) { 2063 FQLOCK(fq); 2064 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) 2065 goto escape; 2066 fq_set(fq, QMAN_FQ_STATE_VDQCR); 2067 FQUNLOCK(fq); 2068 p->vdqcr_owned = fq; 2069 ret = 0; 2070 } 2071 escape: 2072 if (ret) 2073 return ret; 2074 2075 /* VDQCR is set */ 2076 qm_dqrr_vdqcr_set(&p->p, vdqcr); 2077 return 0; 2078 } 2079 2080 static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail) 2081 { 2082 if (avail) 2083 qm_eqcr_cce_prefetch(&p->p); 2084 else 2085 qm_eqcr_cce_update(&p->p); 2086 } 2087 2088 int qman_eqcr_is_empty(void) 2089 { 2090 struct qman_portal *p = get_affine_portal(); 2091 u8 avail; 2092 2093 update_eqcr_ci(p, 0); 2094 avail = qm_eqcr_get_fill(&p->p); 2095 return (avail == 0); 2096 } 2097 2098 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine) 2099 { 2100 if (affine) { 2101 struct qman_portal *p = get_affine_portal(); 2102 2103 p->cb_dc_ern = handler; 2104 } else 2105 cb_dc_ern = handler; 2106 } 2107 2108 static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p, 2109 struct qman_fq *fq, 2110 const struct qm_fd *fd, 2111 u32 flags) 2112 { 2113 struct qm_eqcr_entry *eq; 2114 u8 avail; 2115 2116 if (p->use_eqcr_ci_stashing) { 2117 /* 2118 * The stashing case is easy, only update if we need to in 2119 * order to try and liberate ring entries. 2120 */ 2121 eq = qm_eqcr_start_stash(&p->p); 2122 } else { 2123 /* 2124 * The non-stashing case is harder, need to prefetch ahead of 2125 * time. 2126 */ 2127 avail = qm_eqcr_get_avail(&p->p); 2128 if (avail < 2) 2129 update_eqcr_ci(p, avail); 2130 eq = qm_eqcr_start_no_stash(&p->p); 2131 } 2132 2133 if (unlikely(!eq)) 2134 return NULL; 2135 2136 if (flags & QMAN_ENQUEUE_FLAG_DCA) 2137 eq->dca = QM_EQCR_DCA_ENABLE | 2138 ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ? 2139 QM_EQCR_DCA_PARK : 0) | 2140 ((flags >> 8) & QM_EQCR_DCA_IDXMASK); 2141 eq->fqid = cpu_to_be32(fq->fqid); 2142 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 2143 eq->tag = cpu_to_be32(fq->key); 2144 #else 2145 eq->tag = cpu_to_be32((u32)(uintptr_t)fq); 2146 #endif 2147 eq->fd = *fd; 2148 cpu_to_hw_fd(&eq->fd); 2149 return eq; 2150 } 2151 2152 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags) 2153 { 2154 struct qman_portal *p = get_affine_portal(); 2155 struct qm_eqcr_entry *eq; 2156 2157 eq = try_p_eq_start(p, fq, fd, flags); 2158 if (!eq) 2159 return -EBUSY; 2160 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ 2161 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | 2162 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); 2163 /* Factor the below out, it's used from qman_enqueue_orp() too */ 2164 return 0; 2165 } 2166 2167 int qman_enqueue_multi(struct qman_fq *fq, 2168 const struct qm_fd *fd, u32 *flags, 2169 int frames_to_send) 2170 { 2171 struct qman_portal *p = get_affine_portal(); 2172 struct qm_portal *portal = &p->p; 2173 2174 register struct qm_eqcr *eqcr = &portal->eqcr; 2175 struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq; 2176 2177 u8 i = 0, diff, old_ci, sent = 0; 2178 2179 /* Update the available entries if no entry is free */ 2180 if (!eqcr->available) { 2181 old_ci = eqcr->ci; 2182 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); 2183 diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); 2184 eqcr->available += diff; 2185 if (!diff) 2186 return 0; 2187 } 2188 2189 /* try to send as many frames as possible */ 2190 while (eqcr->available && frames_to_send--) { 2191 eq->fqid = fq->fqid_le; 2192 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 2193 eq->tag = cpu_to_be32(fq->key); 2194 #else 2195 eq->tag = cpu_to_be32((u32)(uintptr_t)fq); 2196 #endif 2197 eq->fd.opaque_addr = fd->opaque_addr; 2198 eq->fd.addr = cpu_to_be40(fd->addr); 2199 eq->fd.status = cpu_to_be32(fd->status); 2200 eq->fd.opaque = cpu_to_be32(fd->opaque); 2201 if (flags[i] & QMAN_ENQUEUE_FLAG_DCA) { 2202 eq->dca = QM_EQCR_DCA_ENABLE | 2203 ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK); 2204 } 2205 i++; 2206 eq = (void *)((unsigned long)(eq + 1) & 2207 (~(unsigned long)(QM_EQCR_SIZE << 6))); 2208 eqcr->available--; 2209 sent++; 2210 fd++; 2211 } 2212 lwsync(); 2213 2214 /* In order for flushes to complete faster, all lines are recorded in 2215 * 32 bit word. 2216 */ 2217 eq = eqcr->cursor; 2218 for (i = 0; i < sent; i++) { 2219 eq->__dont_write_directly__verb = 2220 QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit; 2221 prev_eq = eq; 2222 eq = (void *)((unsigned long)(eq + 1) & 2223 (~(unsigned long)(QM_EQCR_SIZE << 6))); 2224 if (unlikely((prev_eq + 1) != eq)) 2225 eqcr->vbit ^= QM_EQCR_VERB_VBIT; 2226 } 2227 2228 /* We need to flush all the lines but without load/store operations 2229 * between them 2230 */ 2231 eq = eqcr->cursor; 2232 for (i = 0; i < sent; i++) { 2233 dcbf(eq); 2234 eq = (void *)((unsigned long)(eq + 1) & 2235 (~(unsigned long)(QM_EQCR_SIZE << 6))); 2236 } 2237 /* Update cursor for the next call */ 2238 eqcr->cursor = eq; 2239 return sent; 2240 } 2241 2242 int 2243 qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd, 2244 int frames_to_send) 2245 { 2246 struct qman_portal *p = get_affine_portal(); 2247 struct qm_portal *portal = &p->p; 2248 2249 register struct qm_eqcr *eqcr = &portal->eqcr; 2250 struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq; 2251 2252 u8 i, diff, old_ci, sent = 0; 2253 2254 /* Update the available entries if no entry is free */ 2255 if (!eqcr->available) { 2256 old_ci = eqcr->ci; 2257 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); 2258 diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); 2259 eqcr->available += diff; 2260 if (!diff) 2261 return 0; 2262 } 2263 2264 /* try to send as many frames as possible */ 2265 while (eqcr->available && frames_to_send--) { 2266 eq->fqid = fq[sent]->fqid_le; 2267 eq->fd.opaque_addr = fd->opaque_addr; 2268 eq->fd.addr = cpu_to_be40(fd->addr); 2269 eq->fd.status = cpu_to_be32(fd->status); 2270 eq->fd.opaque = cpu_to_be32(fd->opaque); 2271 2272 eq = (void *)((unsigned long)(eq + 1) & 2273 (~(unsigned long)(QM_EQCR_SIZE << 6))); 2274 eqcr->available--; 2275 sent++; 2276 fd++; 2277 } 2278 lwsync(); 2279 2280 /* In order for flushes to complete faster, all lines are recorded in 2281 * 32 bit word. 2282 */ 2283 eq = eqcr->cursor; 2284 for (i = 0; i < sent; i++) { 2285 eq->__dont_write_directly__verb = 2286 QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit; 2287 prev_eq = eq; 2288 eq = (void *)((unsigned long)(eq + 1) & 2289 (~(unsigned long)(QM_EQCR_SIZE << 6))); 2290 if (unlikely((prev_eq + 1) != eq)) 2291 eqcr->vbit ^= QM_EQCR_VERB_VBIT; 2292 } 2293 2294 /* We need to flush all the lines but without load/store operations 2295 * between them 2296 */ 2297 eq = eqcr->cursor; 2298 for (i = 0; i < sent; i++) { 2299 dcbf(eq); 2300 eq = (void *)((unsigned long)(eq + 1) & 2301 (~(unsigned long)(QM_EQCR_SIZE << 6))); 2302 } 2303 /* Update cursor for the next call */ 2304 eqcr->cursor = eq; 2305 return sent; 2306 } 2307 2308 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags, 2309 struct qman_fq *orp, u16 orp_seqnum) 2310 { 2311 struct qman_portal *p = get_affine_portal(); 2312 struct qm_eqcr_entry *eq; 2313 2314 eq = try_p_eq_start(p, fq, fd, flags); 2315 if (!eq) 2316 return -EBUSY; 2317 /* Process ORP-specifics here */ 2318 if (flags & QMAN_ENQUEUE_FLAG_NLIS) 2319 orp_seqnum |= QM_EQCR_SEQNUM_NLIS; 2320 else { 2321 orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS; 2322 if (flags & QMAN_ENQUEUE_FLAG_NESN) 2323 orp_seqnum |= QM_EQCR_SEQNUM_NESN; 2324 else 2325 /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */ 2326 orp_seqnum &= ~QM_EQCR_SEQNUM_NESN; 2327 } 2328 eq->seqnum = cpu_to_be16(orp_seqnum); 2329 eq->orp = cpu_to_be32(orp->fqid); 2330 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ 2331 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP | 2332 ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ? 2333 0 : QM_EQCR_VERB_CMD_ENQUEUE) | 2334 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); 2335 2336 return 0; 2337 } 2338 2339 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags, 2340 struct qm_mcc_initcgr *opts) 2341 { 2342 struct qm_mc_command *mcc; 2343 struct qm_mc_result *mcr; 2344 struct qman_portal *p = get_affine_portal(); 2345 2346 u8 res; 2347 u8 verb = QM_MCC_VERB_MODIFYCGR; 2348 2349 mcc = qm_mc_start(&p->p); 2350 if (opts) 2351 mcc->initcgr = *opts; 2352 mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask); 2353 mcc->initcgr.cgr.wr_parm_g.word = 2354 cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word); 2355 mcc->initcgr.cgr.wr_parm_y.word = 2356 cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word); 2357 mcc->initcgr.cgr.wr_parm_r.word = 2358 cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word); 2359 mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ); 2360 mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres); 2361 2362 mcc->initcgr.cgid = cgr->cgrid; 2363 if (flags & QMAN_CGR_FLAG_USE_INIT) 2364 verb = QM_MCC_VERB_INITCGR; 2365 qm_mc_commit(&p->p, verb); 2366 while (!(mcr = qm_mc_result(&p->p))) 2367 cpu_relax(); 2368 2369 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); 2370 res = mcr->result; 2371 return (res == QM_MCR_RESULT_OK) ? 0 : -EIO; 2372 } 2373 2374 #define TARG_MASK(n) (0x80000000 >> (n->config->channel - \ 2375 QM_CHANNEL_SWPORTAL0)) 2376 #define TARG_DCP_MASK(n) (0x80000000 >> (10 + n)) 2377 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) 2378 2379 int qman_create_cgr(struct qman_cgr *cgr, u32 flags, 2380 struct qm_mcc_initcgr *opts) 2381 { 2382 struct qm_mcr_querycgr cgr_state; 2383 struct qm_mcc_initcgr local_opts; 2384 int ret; 2385 struct qman_portal *p; 2386 2387 /* We have to check that the provided CGRID is within the limits of the 2388 * data-structures, for obvious reasons. However we'll let h/w take 2389 * care of determining whether it's within the limits of what exists on 2390 * the SoC. 2391 */ 2392 if (cgr->cgrid >= __CGR_NUM) 2393 return -EINVAL; 2394 2395 p = get_affine_portal(); 2396 2397 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); 2398 cgr->chan = p->config->channel; 2399 spin_lock(&p->cgr_lock); 2400 2401 /* if no opts specified, just add it to the list */ 2402 if (!opts) 2403 goto add_list; 2404 2405 ret = qman_query_cgr(cgr, &cgr_state); 2406 if (ret) 2407 goto release_lock; 2408 if (opts) 2409 local_opts = *opts; 2410 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) 2411 local_opts.cgr.cscn_targ_upd_ctrl = 2412 QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); 2413 else 2414 /* Overwrite TARG */ 2415 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | 2416 TARG_MASK(p); 2417 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; 2418 2419 /* send init if flags indicate so */ 2420 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) 2421 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts); 2422 else 2423 ret = qman_modify_cgr(cgr, 0, &local_opts); 2424 if (ret) 2425 goto release_lock; 2426 add_list: 2427 list_add(&cgr->node, &p->cgr_cbs); 2428 2429 /* Determine if newly added object requires its callback to be called */ 2430 ret = qman_query_cgr(cgr, &cgr_state); 2431 if (ret) { 2432 /* we can't go back, so proceed and return success, but screen 2433 * and wail to the log file. 2434 */ 2435 pr_crit("CGR HW state partially modified\n"); 2436 ret = 0; 2437 goto release_lock; 2438 } 2439 if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1], 2440 cgr->cgrid)) 2441 cgr->cb(p, cgr, 1); 2442 release_lock: 2443 spin_unlock(&p->cgr_lock); 2444 return ret; 2445 } 2446 2447 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal, 2448 struct qm_mcc_initcgr *opts) 2449 { 2450 struct qm_mcc_initcgr local_opts; 2451 struct qm_mcr_querycgr cgr_state; 2452 int ret; 2453 2454 if ((qman_ip_rev & 0xFF00) < QMAN_REV30) { 2455 pr_warn("QMan version doesn't support CSCN => DCP portal\n"); 2456 return -EINVAL; 2457 } 2458 /* We have to check that the provided CGRID is within the limits of the 2459 * data-structures, for obvious reasons. However we'll let h/w take 2460 * care of determining whether it's within the limits of what exists on 2461 * the SoC. 2462 */ 2463 if (cgr->cgrid >= __CGR_NUM) 2464 return -EINVAL; 2465 2466 ret = qman_query_cgr(cgr, &cgr_state); 2467 if (ret) 2468 return ret; 2469 2470 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); 2471 if (opts) 2472 local_opts = *opts; 2473 2474 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) 2475 local_opts.cgr.cscn_targ_upd_ctrl = 2476 QM_CGR_TARG_UDP_CTRL_WRITE_BIT | 2477 QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal; 2478 else 2479 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | 2480 TARG_DCP_MASK(dcp_portal); 2481 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; 2482 2483 /* send init if flags indicate so */ 2484 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) 2485 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, 2486 &local_opts); 2487 else 2488 ret = qman_modify_cgr(cgr, 0, &local_opts); 2489 2490 return ret; 2491 } 2492 2493 int qman_delete_cgr(struct qman_cgr *cgr) 2494 { 2495 struct qm_mcr_querycgr cgr_state; 2496 struct qm_mcc_initcgr local_opts; 2497 int ret = 0; 2498 struct qman_cgr *i; 2499 struct qman_portal *p = get_affine_portal(); 2500 2501 if (cgr->chan != p->config->channel) { 2502 pr_crit("Attempting to delete cgr from different portal than" 2503 " it was create: create 0x%x, delete 0x%x\n", 2504 cgr->chan, p->config->channel); 2505 ret = -EINVAL; 2506 goto put_portal; 2507 } 2508 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); 2509 spin_lock(&p->cgr_lock); 2510 list_del(&cgr->node); 2511 /* 2512 * If there are no other CGR objects for this CGRID in the list, 2513 * update CSCN_TARG accordingly 2514 */ 2515 list_for_each_entry(i, &p->cgr_cbs, node) 2516 if ((i->cgrid == cgr->cgrid) && i->cb) 2517 goto release_lock; 2518 ret = qman_query_cgr(cgr, &cgr_state); 2519 if (ret) { 2520 /* add back to the list */ 2521 list_add(&cgr->node, &p->cgr_cbs); 2522 goto release_lock; 2523 } 2524 /* Overwrite TARG */ 2525 local_opts.we_mask = QM_CGR_WE_CSCN_TARG; 2526 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) 2527 local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); 2528 else 2529 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & 2530 ~(TARG_MASK(p)); 2531 ret = qman_modify_cgr(cgr, 0, &local_opts); 2532 if (ret) 2533 /* add back to the list */ 2534 list_add(&cgr->node, &p->cgr_cbs); 2535 release_lock: 2536 spin_unlock(&p->cgr_lock); 2537 put_portal: 2538 return ret; 2539 } 2540 2541 int qman_shutdown_fq(u32 fqid) 2542 { 2543 struct qman_portal *p; 2544 struct qm_portal *low_p; 2545 struct qm_mc_command *mcc; 2546 struct qm_mc_result *mcr; 2547 u8 state; 2548 int orl_empty, fq_empty, drain = 0; 2549 u32 result; 2550 u32 channel, wq; 2551 u16 dest_wq; 2552 2553 p = get_affine_portal(); 2554 low_p = &p->p; 2555 2556 /* Determine the state of the FQID */ 2557 mcc = qm_mc_start(low_p); 2558 mcc->queryfq_np.fqid = cpu_to_be32(fqid); 2559 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP); 2560 while (!(mcr = qm_mc_result(low_p))) 2561 cpu_relax(); 2562 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); 2563 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; 2564 if (state == QM_MCR_NP_STATE_OOS) 2565 return 0; /* Already OOS, no need to do anymore checks */ 2566 2567 /* Query which channel the FQ is using */ 2568 mcc = qm_mc_start(low_p); 2569 mcc->queryfq.fqid = cpu_to_be32(fqid); 2570 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ); 2571 while (!(mcr = qm_mc_result(low_p))) 2572 cpu_relax(); 2573 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); 2574 2575 /* Need to store these since the MCR gets reused */ 2576 dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq); 2577 channel = dest_wq & 0x7; 2578 wq = dest_wq >> 3; 2579 2580 switch (state) { 2581 case QM_MCR_NP_STATE_TEN_SCHED: 2582 case QM_MCR_NP_STATE_TRU_SCHED: 2583 case QM_MCR_NP_STATE_ACTIVE: 2584 case QM_MCR_NP_STATE_PARKED: 2585 orl_empty = 0; 2586 mcc = qm_mc_start(low_p); 2587 mcc->alterfq.fqid = cpu_to_be32(fqid); 2588 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE); 2589 while (!(mcr = qm_mc_result(low_p))) 2590 cpu_relax(); 2591 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == 2592 QM_MCR_VERB_ALTER_RETIRE); 2593 result = mcr->result; /* Make a copy as we reuse MCR below */ 2594 2595 if (result == QM_MCR_RESULT_PENDING) { 2596 /* Need to wait for the FQRN in the message ring, which 2597 * will only occur once the FQ has been drained. In 2598 * order for the FQ to drain the portal needs to be set 2599 * to dequeue from the channel the FQ is scheduled on 2600 */ 2601 const struct qm_mr_entry *msg; 2602 const struct qm_dqrr_entry *dqrr = NULL; 2603 int found_fqrn = 0; 2604 __maybe_unused u16 dequeue_wq = 0; 2605 2606 /* Flag that we need to drain FQ */ 2607 drain = 1; 2608 2609 if (channel >= qm_channel_pool1 && 2610 channel < (u16)(qm_channel_pool1 + 15)) { 2611 /* Pool channel, enable the bit in the portal */ 2612 dequeue_wq = (channel - 2613 qm_channel_pool1 + 1) << 4 | wq; 2614 } else if (channel < qm_channel_pool1) { 2615 /* Dedicated channel */ 2616 dequeue_wq = wq; 2617 } else { 2618 pr_info("Cannot recover FQ 0x%x," 2619 " it is scheduled on channel 0x%x", 2620 fqid, channel); 2621 return -EBUSY; 2622 } 2623 /* Set the sdqcr to drain this channel */ 2624 if (channel < qm_channel_pool1) 2625 qm_dqrr_sdqcr_set(low_p, 2626 QM_SDQCR_TYPE_ACTIVE | 2627 QM_SDQCR_CHANNELS_DEDICATED); 2628 else 2629 qm_dqrr_sdqcr_set(low_p, 2630 QM_SDQCR_TYPE_ACTIVE | 2631 QM_SDQCR_CHANNELS_POOL_CONV 2632 (channel)); 2633 while (!found_fqrn) { 2634 /* Keep draining DQRR while checking the MR*/ 2635 qm_dqrr_pvb_update(low_p); 2636 dqrr = qm_dqrr_current(low_p); 2637 while (dqrr) { 2638 qm_dqrr_cdc_consume_1ptr( 2639 low_p, dqrr, 0); 2640 qm_dqrr_pvb_update(low_p); 2641 qm_dqrr_next(low_p); 2642 dqrr = qm_dqrr_current(low_p); 2643 } 2644 /* Process message ring too */ 2645 qm_mr_pvb_update(low_p); 2646 msg = qm_mr_current(low_p); 2647 while (msg) { 2648 if ((msg->verb & 2649 QM_MR_VERB_TYPE_MASK) 2650 == QM_MR_VERB_FQRN) 2651 found_fqrn = 1; 2652 qm_mr_next(low_p); 2653 qm_mr_cci_consume_to_current(low_p); 2654 qm_mr_pvb_update(low_p); 2655 msg = qm_mr_current(low_p); 2656 } 2657 cpu_relax(); 2658 } 2659 } 2660 if (result != QM_MCR_RESULT_OK && 2661 result != QM_MCR_RESULT_PENDING) { 2662 /* error */ 2663 pr_err("qman_retire_fq failed on FQ 0x%x," 2664 " result=0x%x\n", fqid, result); 2665 return -1; 2666 } 2667 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { 2668 /* ORL had no entries, no need to wait until the 2669 * ERNs come in. 2670 */ 2671 orl_empty = 1; 2672 } 2673 /* Retirement succeeded, check to see if FQ needs 2674 * to be drained. 2675 */ 2676 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { 2677 /* FQ is Not Empty, drain using volatile DQ commands */ 2678 fq_empty = 0; 2679 do { 2680 const struct qm_dqrr_entry *dqrr = NULL; 2681 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); 2682 2683 qm_dqrr_vdqcr_set(low_p, vdqcr); 2684 2685 /* Wait for a dequeue to occur */ 2686 while (dqrr == NULL) { 2687 qm_dqrr_pvb_update(low_p); 2688 dqrr = qm_dqrr_current(low_p); 2689 if (!dqrr) 2690 cpu_relax(); 2691 } 2692 /* Process the dequeues, making sure to 2693 * empty the ring completely. 2694 */ 2695 while (dqrr) { 2696 if (dqrr->fqid == fqid && 2697 dqrr->stat & QM_DQRR_STAT_FQ_EMPTY) 2698 fq_empty = 1; 2699 qm_dqrr_cdc_consume_1ptr(low_p, 2700 dqrr, 0); 2701 qm_dqrr_pvb_update(low_p); 2702 qm_dqrr_next(low_p); 2703 dqrr = qm_dqrr_current(low_p); 2704 } 2705 } while (fq_empty == 0); 2706 } 2707 qm_dqrr_sdqcr_set(low_p, 0); 2708 2709 /* Wait for the ORL to have been completely drained */ 2710 while (orl_empty == 0) { 2711 const struct qm_mr_entry *msg; 2712 2713 qm_mr_pvb_update(low_p); 2714 msg = qm_mr_current(low_p); 2715 while (msg) { 2716 if ((msg->verb & QM_MR_VERB_TYPE_MASK) == 2717 QM_MR_VERB_FQRL) 2718 orl_empty = 1; 2719 qm_mr_next(low_p); 2720 qm_mr_cci_consume_to_current(low_p); 2721 qm_mr_pvb_update(low_p); 2722 msg = qm_mr_current(low_p); 2723 } 2724 cpu_relax(); 2725 } 2726 mcc = qm_mc_start(low_p); 2727 mcc->alterfq.fqid = cpu_to_be32(fqid); 2728 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS); 2729 while (!(mcr = qm_mc_result(low_p))) 2730 cpu_relax(); 2731 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == 2732 QM_MCR_VERB_ALTER_OOS); 2733 if (mcr->result != QM_MCR_RESULT_OK) { 2734 pr_err( 2735 "OOS after drain Failed on FQID 0x%x, result 0x%x\n", 2736 fqid, mcr->result); 2737 return -1; 2738 } 2739 return 0; 2740 2741 case QM_MCR_NP_STATE_RETIRED: 2742 /* Send OOS Command */ 2743 mcc = qm_mc_start(low_p); 2744 mcc->alterfq.fqid = cpu_to_be32(fqid); 2745 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS); 2746 while (!(mcr = qm_mc_result(low_p))) 2747 cpu_relax(); 2748 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == 2749 QM_MCR_VERB_ALTER_OOS); 2750 if (mcr->result) { 2751 pr_err("OOS Failed on FQID 0x%x\n", fqid); 2752 return -1; 2753 } 2754 return 0; 2755 2756 } 2757 return -1; 2758 } 2759