1d81734caSHemant Agrawal /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2c47ff048SShreyansh Jain * 3c47ff048SShreyansh Jain * Copyright 2008-2016 Freescale Semiconductor Inc. 4c9fd1acdSGagandeep Singh * Copyright 2017,2019-2024 NXP 5c47ff048SShreyansh Jain * 6c47ff048SShreyansh Jain */ 7c47ff048SShreyansh Jain 8c47ff048SShreyansh Jain #include "qman.h" 9c47ff048SShreyansh Jain #include <rte_branch_prediction.h> 10a2f1da7dSDavid Marchand #include <bus_dpaa_driver.h> 1143797e7bSSunil Kumar Kori #include <rte_eventdev.h> 1243797e7bSSunil Kumar Kori #include <rte_byteorder.h> 13c47ff048SShreyansh Jain 148c83f28cSHemant Agrawal #include <dpaa_bits.h> 158c83f28cSHemant Agrawal 16c47ff048SShreyansh Jain /* Compilation constants */ 17c47ff048SShreyansh Jain #define DQRR_MAXFILL 15 18c47ff048SShreyansh Jain #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ 19c47ff048SShreyansh Jain #define IRQNAME "QMan portal %d" 20c47ff048SShreyansh Jain #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ 21c47ff048SShreyansh Jain /* maximum number of DQRR entries to process in qman_poll() */ 22c47ff048SShreyansh Jain #define FSL_QMAN_POLL_LIMIT 8 23c47ff048SShreyansh Jain 24c47ff048SShreyansh Jain /* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about 25*68508c18SHemant Agrawal * inter-processor locking only. 26c47ff048SShreyansh Jain */ 27*68508c18SHemant Agrawal #define FQLOCK(fq) fq_lock(fq) 28*68508c18SHemant Agrawal #define FQUNLOCK(fq) fq_unlock(fq) 29c47ff048SShreyansh Jain 309124e65dSGagandeep Singh static qman_cb_free_mbuf qman_free_mbuf_cb; 319124e65dSGagandeep Singh 32c47ff048SShreyansh Jain static inline void fq_set(struct qman_fq *fq, u32 mask) 33c47ff048SShreyansh Jain { 34c47ff048SShreyansh Jain dpaa_set_bits(mask, &fq->flags); 35c47ff048SShreyansh Jain } 36c47ff048SShreyansh Jain 37c47ff048SShreyansh Jain static inline void fq_clear(struct qman_fq *fq, u32 mask) 38c47ff048SShreyansh Jain { 39c47ff048SShreyansh Jain dpaa_clear_bits(mask, &fq->flags); 40c47ff048SShreyansh Jain } 41c47ff048SShreyansh Jain 42c47ff048SShreyansh Jain static inline int fq_isset(struct qman_fq *fq, u32 mask) 43c47ff048SShreyansh Jain { 44c47ff048SShreyansh Jain return fq->flags & mask; 45c47ff048SShreyansh Jain } 46c47ff048SShreyansh Jain 47*68508c18SHemant Agrawal static inline void fq_lock(struct qman_fq *fq) 48*68508c18SHemant Agrawal __rte_exclusive_lock_function(&fq->fqlock) 49*68508c18SHemant Agrawal __rte_no_thread_safety_analysis 50*68508c18SHemant Agrawal { 51*68508c18SHemant Agrawal if (fq_isset(fq, QMAN_FQ_FLAG_LOCKED)) 52*68508c18SHemant Agrawal spin_lock(&fq->fqlock); 53*68508c18SHemant Agrawal } 54*68508c18SHemant Agrawal 55*68508c18SHemant Agrawal static inline void fq_unlock(struct qman_fq *fq) 56*68508c18SHemant Agrawal __rte_unlock_function(&fq->fqlock) 57*68508c18SHemant Agrawal __rte_no_thread_safety_analysis 58*68508c18SHemant Agrawal { 59*68508c18SHemant Agrawal if (fq_isset(fq, QMAN_FQ_FLAG_LOCKED)) 60*68508c18SHemant Agrawal spin_unlock(&fq->fqlock); 61*68508c18SHemant Agrawal } 62*68508c18SHemant Agrawal 63c47ff048SShreyansh Jain static inline int fq_isclear(struct qman_fq *fq, u32 mask) 64c47ff048SShreyansh Jain { 65c47ff048SShreyansh Jain return !(fq->flags & mask); 66c47ff048SShreyansh Jain } 67c47ff048SShreyansh Jain 68c47ff048SShreyansh Jain struct qman_portal { 69c47ff048SShreyansh Jain struct qm_portal p; 70c47ff048SShreyansh Jain /* PORTAL_BITS_*** - dynamic, strictly internal */ 71c47ff048SShreyansh Jain unsigned long bits; 72c47ff048SShreyansh Jain /* interrupt sources processed by portal_isr(), configurable */ 73c47ff048SShreyansh Jain unsigned long irq_sources; 74c47ff048SShreyansh Jain u32 use_eqcr_ci_stashing; 75c47ff048SShreyansh Jain /* only 1 volatile dequeue at a time */ 76c47ff048SShreyansh Jain struct qman_fq *vdqcr_owned; 77c47ff048SShreyansh Jain u32 sdqcr; 78c47ff048SShreyansh Jain int dqrr_disable_ref; 79c47ff048SShreyansh Jain /* A portal-specific handler for DCP ERNs. If this is NULL, the global 80c47ff048SShreyansh Jain * handler is called instead. 81c47ff048SShreyansh Jain */ 82c47ff048SShreyansh Jain qman_cb_dc_ern cb_dc_ern; 83c47ff048SShreyansh Jain /* When the cpu-affine portal is activated, this is non-NULL */ 84c47ff048SShreyansh Jain const struct qm_portal_config *config; 85c47ff048SShreyansh Jain struct dpa_rbtree retire_table; 86c47ff048SShreyansh Jain char irqname[MAX_IRQNAME]; 87c47ff048SShreyansh Jain /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ 88c47ff048SShreyansh Jain struct qman_cgrs *cgrs; 89c47ff048SShreyansh Jain /* linked-list of CSCN handlers. */ 90c47ff048SShreyansh Jain struct list_head cgr_cbs; 91c47ff048SShreyansh Jain /* list lock */ 92c47ff048SShreyansh Jain spinlock_t cgr_lock; 93c47ff048SShreyansh Jain /* track if memory was allocated by the driver */ 94c47ff048SShreyansh Jain #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 95c47ff048SShreyansh Jain /* Keep a shadow copy of the DQRR on LE systems as the SW needs to 96c47ff048SShreyansh Jain * do byte swaps of DQRR read only memory. First entry must be aligned 97c47ff048SShreyansh Jain * to 2 ** 10 to ensure DQRR index calculations based shadow copy 98c47ff048SShreyansh Jain * address (6 bits for address shift + 4 bits for the DQRR size). 99c47ff048SShreyansh Jain */ 10027595cd8STyler Retzlaff alignas(1024) struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]; 101c47ff048SShreyansh Jain #endif 102c47ff048SShreyansh Jain }; 103c47ff048SShreyansh Jain 104c47ff048SShreyansh Jain /* Global handler for DCP ERNs. Used when the portal receiving the message does 105c47ff048SShreyansh Jain * not have a portal-specific handler. 106c47ff048SShreyansh Jain */ 107c47ff048SShreyansh Jain static qman_cb_dc_ern cb_dc_ern; 108c47ff048SShreyansh Jain 109c47ff048SShreyansh Jain static cpumask_t affine_mask; 110c47ff048SShreyansh Jain static DEFINE_SPINLOCK(affine_mask_lock); 111c47ff048SShreyansh Jain static u16 affine_channels[NR_CPUS]; 112c47ff048SShreyansh Jain static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal); 113c47ff048SShreyansh Jain 114c47ff048SShreyansh Jain static inline struct qman_portal *get_affine_portal(void) 115c47ff048SShreyansh Jain { 116c47ff048SShreyansh Jain return &RTE_PER_LCORE(qman_affine_portal); 117c47ff048SShreyansh Jain } 118c47ff048SShreyansh Jain 119c47ff048SShreyansh Jain /* This gives a FQID->FQ lookup to cover the fact that we can't directly demux 120c47ff048SShreyansh Jain * retirement notifications (the fact they are sometimes h/w-consumed means that 121c47ff048SShreyansh Jain * contextB isn't always a s/w demux - and as we can't know which case it is 122c47ff048SShreyansh Jain * when looking at the notification, we have to use the slow lookup for all of 123c47ff048SShreyansh Jain * them). NB, it's possible to have multiple FQ objects refer to the same FQID 124c47ff048SShreyansh Jain * (though at most one of them should be the consumer), so this table isn't for 125c47ff048SShreyansh Jain * all FQs - FQs are added when retirement commands are issued, and removed when 126c47ff048SShreyansh Jain * they complete, which also massively reduces the size of this table. 127c47ff048SShreyansh Jain */ 128c47ff048SShreyansh Jain IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid); 129c47ff048SShreyansh Jain /* 130c47ff048SShreyansh Jain * This is what everything can wait on, even if it migrates to a different cpu 131c47ff048SShreyansh Jain * to the one whose affine portal it is waiting on. 132c47ff048SShreyansh Jain */ 133c47ff048SShreyansh Jain static DECLARE_WAIT_QUEUE_HEAD(affine_queue); 134c47ff048SShreyansh Jain 135c47ff048SShreyansh Jain static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq) 136c47ff048SShreyansh Jain { 137c47ff048SShreyansh Jain int ret = fqtree_push(&p->retire_table, fq); 138c47ff048SShreyansh Jain 139c47ff048SShreyansh Jain if (ret) 140c47ff048SShreyansh Jain pr_err("ERROR: double FQ-retirement %d\n", fq->fqid); 141c47ff048SShreyansh Jain return ret; 142c47ff048SShreyansh Jain } 143c47ff048SShreyansh Jain 144c47ff048SShreyansh Jain static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq) 145c47ff048SShreyansh Jain { 146c47ff048SShreyansh Jain fqtree_del(&p->retire_table, fq); 147c47ff048SShreyansh Jain } 148c47ff048SShreyansh Jain 149c47ff048SShreyansh Jain static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid) 150c47ff048SShreyansh Jain { 151c47ff048SShreyansh Jain return fqtree_find(&p->retire_table, fqid); 152c47ff048SShreyansh Jain } 153c47ff048SShreyansh Jain 154847ee3bdSShreyansh Jain #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 155847ee3bdSShreyansh Jain static void **qman_fq_lookup_table; 156847ee3bdSShreyansh Jain static size_t qman_fq_lookup_table_size; 157847ee3bdSShreyansh Jain 158847ee3bdSShreyansh Jain int qman_setup_fq_lookup_table(size_t num_entries) 159847ee3bdSShreyansh Jain { 160847ee3bdSShreyansh Jain num_entries++; 161847ee3bdSShreyansh Jain /* Allocate 1 more entry since the first entry is not used */ 162847ee3bdSShreyansh Jain qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *))); 163847ee3bdSShreyansh Jain if (!qman_fq_lookup_table) { 164847ee3bdSShreyansh Jain pr_err("QMan: Could not allocate fq lookup table\n"); 165847ee3bdSShreyansh Jain return -ENOMEM; 166847ee3bdSShreyansh Jain } 167847ee3bdSShreyansh Jain memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *)); 168847ee3bdSShreyansh Jain qman_fq_lookup_table_size = num_entries; 169847ee3bdSShreyansh Jain pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n", 170847ee3bdSShreyansh Jain qman_fq_lookup_table, 171847ee3bdSShreyansh Jain (unsigned long)qman_fq_lookup_table_size); 172847ee3bdSShreyansh Jain return 0; 173847ee3bdSShreyansh Jain } 174847ee3bdSShreyansh Jain 1754bbc759fSAkhil Goyal void qman_set_fq_lookup_table(void **fq_table) 1764bbc759fSAkhil Goyal { 1774bbc759fSAkhil Goyal qman_fq_lookup_table = fq_table; 1784bbc759fSAkhil Goyal } 1794bbc759fSAkhil Goyal 180847ee3bdSShreyansh Jain /* global structure that maintains fq object mapping */ 181847ee3bdSShreyansh Jain static DEFINE_SPINLOCK(fq_hash_table_lock); 182847ee3bdSShreyansh Jain 183847ee3bdSShreyansh Jain static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq) 184847ee3bdSShreyansh Jain { 185847ee3bdSShreyansh Jain u32 i; 186847ee3bdSShreyansh Jain 187847ee3bdSShreyansh Jain spin_lock(&fq_hash_table_lock); 188847ee3bdSShreyansh Jain /* Can't use index zero because this has special meaning 189847ee3bdSShreyansh Jain * in context_b field. 190847ee3bdSShreyansh Jain */ 191847ee3bdSShreyansh Jain for (i = 1; i < qman_fq_lookup_table_size; i++) { 192847ee3bdSShreyansh Jain if (qman_fq_lookup_table[i] == NULL) { 193847ee3bdSShreyansh Jain *entry = i; 194847ee3bdSShreyansh Jain qman_fq_lookup_table[i] = fq; 195847ee3bdSShreyansh Jain spin_unlock(&fq_hash_table_lock); 196847ee3bdSShreyansh Jain return 0; 197847ee3bdSShreyansh Jain } 198847ee3bdSShreyansh Jain } 199847ee3bdSShreyansh Jain spin_unlock(&fq_hash_table_lock); 200847ee3bdSShreyansh Jain return -ENOMEM; 201847ee3bdSShreyansh Jain } 202847ee3bdSShreyansh Jain 203847ee3bdSShreyansh Jain static void clear_fq_table_entry(u32 entry) 204847ee3bdSShreyansh Jain { 205847ee3bdSShreyansh Jain spin_lock(&fq_hash_table_lock); 206847ee3bdSShreyansh Jain DPAA_BUG_ON(entry >= qman_fq_lookup_table_size); 207847ee3bdSShreyansh Jain qman_fq_lookup_table[entry] = NULL; 208847ee3bdSShreyansh Jain spin_unlock(&fq_hash_table_lock); 209847ee3bdSShreyansh Jain } 210847ee3bdSShreyansh Jain 211847ee3bdSShreyansh Jain static inline struct qman_fq *get_fq_table_entry(u32 entry) 212847ee3bdSShreyansh Jain { 213847ee3bdSShreyansh Jain DPAA_BUG_ON(entry >= qman_fq_lookup_table_size); 214847ee3bdSShreyansh Jain return qman_fq_lookup_table[entry]; 215847ee3bdSShreyansh Jain } 216847ee3bdSShreyansh Jain #endif 217847ee3bdSShreyansh Jain 218c47ff048SShreyansh Jain static inline void cpu_to_hw_fqd(struct qm_fqd *fqd) 219c47ff048SShreyansh Jain { 220c47ff048SShreyansh Jain /* Byteswap the FQD to HW format */ 221c47ff048SShreyansh Jain fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl); 222c47ff048SShreyansh Jain fqd->dest_wq = cpu_to_be16(fqd->dest_wq); 223c47ff048SShreyansh Jain fqd->ics_cred = cpu_to_be16(fqd->ics_cred); 224c47ff048SShreyansh Jain fqd->context_b = cpu_to_be32(fqd->context_b); 225c47ff048SShreyansh Jain fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque); 226c47ff048SShreyansh Jain fqd->opaque_td = cpu_to_be16(fqd->opaque_td); 227c47ff048SShreyansh Jain } 228c47ff048SShreyansh Jain 229c47ff048SShreyansh Jain static inline void hw_fqd_to_cpu(struct qm_fqd *fqd) 230c47ff048SShreyansh Jain { 231c47ff048SShreyansh Jain /* Byteswap the FQD to CPU format */ 232c47ff048SShreyansh Jain fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl); 233c47ff048SShreyansh Jain fqd->dest_wq = be16_to_cpu(fqd->dest_wq); 234c47ff048SShreyansh Jain fqd->ics_cred = be16_to_cpu(fqd->ics_cred); 235c47ff048SShreyansh Jain fqd->context_b = be32_to_cpu(fqd->context_b); 236c47ff048SShreyansh Jain fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque); 237c47ff048SShreyansh Jain } 238c47ff048SShreyansh Jain 239c47ff048SShreyansh Jain static inline void cpu_to_hw_fd(struct qm_fd *fd) 240c47ff048SShreyansh Jain { 241c47ff048SShreyansh Jain fd->addr = cpu_to_be40(fd->addr); 242c47ff048SShreyansh Jain fd->status = cpu_to_be32(fd->status); 243c47ff048SShreyansh Jain fd->opaque = cpu_to_be32(fd->opaque); 244c47ff048SShreyansh Jain } 245c47ff048SShreyansh Jain 246c47ff048SShreyansh Jain static inline void hw_fd_to_cpu(struct qm_fd *fd) 247c47ff048SShreyansh Jain { 248c47ff048SShreyansh Jain fd->addr = be40_to_cpu(fd->addr); 249c47ff048SShreyansh Jain fd->status = be32_to_cpu(fd->status); 250c47ff048SShreyansh Jain fd->opaque = be32_to_cpu(fd->opaque); 251c47ff048SShreyansh Jain } 252c47ff048SShreyansh Jain 253c47ff048SShreyansh Jain /* In the case that slow- and fast-path handling are both done by qman_poll() 254c47ff048SShreyansh Jain * (ie. because there is no interrupt handling), we ought to balance how often 255c47ff048SShreyansh Jain * we do the fast-path poll versus the slow-path poll. We'll use two decrementer 256c47ff048SShreyansh Jain * sources, so we call the fast poll 'n' times before calling the slow poll 257c47ff048SShreyansh Jain * once. The idle decrementer constant is used when the last slow-poll detected 258c47ff048SShreyansh Jain * no work to do, and the busy decrementer constant when the last slow-poll had 259c47ff048SShreyansh Jain * work to do. 260c47ff048SShreyansh Jain */ 261c47ff048SShreyansh Jain #define SLOW_POLL_IDLE 1000 262c47ff048SShreyansh Jain #define SLOW_POLL_BUSY 10 263c47ff048SShreyansh Jain static u32 __poll_portal_slow(struct qman_portal *p, u32 is); 264c47ff048SShreyansh Jain static inline unsigned int __poll_portal_fast(struct qman_portal *p, 265c47ff048SShreyansh Jain unsigned int poll_limit); 266c47ff048SShreyansh Jain 267c47ff048SShreyansh Jain /* Portal interrupt handler */ 268c47ff048SShreyansh Jain static irqreturn_t portal_isr(__always_unused int irq, void *ptr) 269c47ff048SShreyansh Jain { 270c47ff048SShreyansh Jain struct qman_portal *p = ptr; 271c47ff048SShreyansh Jain /* 272c47ff048SShreyansh Jain * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because 273c47ff048SShreyansh Jain * it could race against a Query Congestion State command also given 274c47ff048SShreyansh Jain * as part of the handling of this interrupt source. We mustn't 275c47ff048SShreyansh Jain * clear it a second time in this top-level function. 276c47ff048SShreyansh Jain */ 277c47ff048SShreyansh Jain u32 clear = QM_DQAVAIL_MASK | (p->irq_sources & 278c47ff048SShreyansh Jain ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI)); 279c47ff048SShreyansh Jain u32 is = qm_isr_status_read(&p->p) & p->irq_sources; 280c47ff048SShreyansh Jain /* DQRR-handling if it's interrupt-driven */ 281c47ff048SShreyansh Jain if (is & QM_PIRQ_DQRI) 282c47ff048SShreyansh Jain __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT); 283c47ff048SShreyansh Jain /* Handling of anything else that's interrupt-driven */ 284c47ff048SShreyansh Jain clear |= __poll_portal_slow(p, is); 285c47ff048SShreyansh Jain qm_isr_status_clear(&p->p, clear); 286c47ff048SShreyansh Jain return IRQ_HANDLED; 287c47ff048SShreyansh Jain } 288c47ff048SShreyansh Jain 289c47ff048SShreyansh Jain /* This inner version is used privately by qman_create_affine_portal(), as well 290c47ff048SShreyansh Jain * as by the exported qman_stop_dequeues(). 291c47ff048SShreyansh Jain */ 292c47ff048SShreyansh Jain static inline void qman_stop_dequeues_ex(struct qman_portal *p) 293c47ff048SShreyansh Jain { 294c47ff048SShreyansh Jain if (!(p->dqrr_disable_ref++)) 295c47ff048SShreyansh Jain qm_dqrr_set_maxfill(&p->p, 0); 296c47ff048SShreyansh Jain } 297c47ff048SShreyansh Jain 298b292acc3SGagandeep Singh static inline void qm_mr_pvb_update(struct qm_portal *portal) 299b292acc3SGagandeep Singh { 300b292acc3SGagandeep Singh register struct qm_mr *mr = &portal->mr; 301b292acc3SGagandeep Singh const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi); 302b292acc3SGagandeep Singh 303b292acc3SGagandeep Singh #ifdef RTE_LIBRTE_DPAA_HWDEBUG 304b292acc3SGagandeep Singh DPAA_ASSERT(mr->pmode == qm_mr_pvb); 305b292acc3SGagandeep Singh #endif 306b292acc3SGagandeep Singh /* when accessing 'verb', use __raw_readb() to ensure that compiler 307b292acc3SGagandeep Singh * inlining doesn't try to optimise out "excess reads". 308b292acc3SGagandeep Singh */ 309b292acc3SGagandeep Singh if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) { 310b292acc3SGagandeep Singh mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); 311b292acc3SGagandeep Singh if (!mr->pi) 312b292acc3SGagandeep Singh mr->vbit ^= QM_MR_VERB_VBIT; 313b292acc3SGagandeep Singh mr->fill++; 314b292acc3SGagandeep Singh res = MR_INC(res); 315b292acc3SGagandeep Singh } 316b292acc3SGagandeep Singh dcbit_ro(res); 317b292acc3SGagandeep Singh } 318b292acc3SGagandeep Singh 319c47ff048SShreyansh Jain static int drain_mr_fqrni(struct qm_portal *p) 320c47ff048SShreyansh Jain { 321c47ff048SShreyansh Jain const struct qm_mr_entry *msg; 322c47ff048SShreyansh Jain loop: 323b292acc3SGagandeep Singh qm_mr_pvb_update(p); 324c47ff048SShreyansh Jain msg = qm_mr_current(p); 325c47ff048SShreyansh Jain if (!msg) { 326c47ff048SShreyansh Jain /* 327c47ff048SShreyansh Jain * if MR was full and h/w had other FQRNI entries to produce, we 328c47ff048SShreyansh Jain * need to allow it time to produce those entries once the 329c47ff048SShreyansh Jain * existing entries are consumed. A worst-case situation 330c47ff048SShreyansh Jain * (fully-loaded system) means h/w sequencers may have to do 3-4 331c47ff048SShreyansh Jain * other things before servicing the portal's MR pump, each of 332c47ff048SShreyansh Jain * which (if slow) may take ~50 qman cycles (which is ~200 333c47ff048SShreyansh Jain * processor cycles). So rounding up and then multiplying this 334c47ff048SShreyansh Jain * worst-case estimate by a factor of 10, just to be 335c47ff048SShreyansh Jain * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume 336c47ff048SShreyansh Jain * one entry at a time, so h/w has an opportunity to produce new 337c47ff048SShreyansh Jain * entries well before the ring has been fully consumed, so 338c47ff048SShreyansh Jain * we're being *really* paranoid here. 339c47ff048SShreyansh Jain */ 340c47ff048SShreyansh Jain u64 now, then = mfatb(); 341c47ff048SShreyansh Jain 342c47ff048SShreyansh Jain do { 343c47ff048SShreyansh Jain now = mfatb(); 344c47ff048SShreyansh Jain } while ((then + 10000) > now); 345b292acc3SGagandeep Singh qm_mr_pvb_update(p); 346c47ff048SShreyansh Jain msg = qm_mr_current(p); 347c47ff048SShreyansh Jain if (!msg) 348c47ff048SShreyansh Jain return 0; 349c47ff048SShreyansh Jain } 350dd6f8d71SAndy Green if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { 351c47ff048SShreyansh Jain /* We aren't draining anything but FQRNIs */ 352dd6f8d71SAndy Green pr_err("Found verb 0x%x in MR\n", msg->ern.verb); 353c47ff048SShreyansh Jain return -1; 354c47ff048SShreyansh Jain } 355c47ff048SShreyansh Jain qm_mr_next(p); 356c47ff048SShreyansh Jain qm_mr_cci_consume(p, 1); 357c47ff048SShreyansh Jain goto loop; 358c47ff048SShreyansh Jain } 359c47ff048SShreyansh Jain 360c47ff048SShreyansh Jain static inline int qm_eqcr_init(struct qm_portal *portal, 361c47ff048SShreyansh Jain enum qm_eqcr_pmode pmode, 362c47ff048SShreyansh Jain unsigned int eq_stash_thresh, 363c47ff048SShreyansh Jain int eq_stash_prio) 364c47ff048SShreyansh Jain { 365c47ff048SShreyansh Jain /* This use of 'register', as well as all other occurrences, is because 366c47ff048SShreyansh Jain * it has been observed to generate much faster code with gcc than is 367c47ff048SShreyansh Jain * otherwise the case. 368c47ff048SShreyansh Jain */ 369c47ff048SShreyansh Jain register struct qm_eqcr *eqcr = &portal->eqcr; 370c47ff048SShreyansh Jain u32 cfg; 371c47ff048SShreyansh Jain u8 pi; 372c47ff048SShreyansh Jain 373c47ff048SShreyansh Jain eqcr->ring = portal->addr.ce + QM_CL_EQCR; 374c47ff048SShreyansh Jain eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); 375c47ff048SShreyansh Jain qm_cl_invalidate(EQCR_CI); 376c47ff048SShreyansh Jain pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); 377c47ff048SShreyansh Jain eqcr->cursor = eqcr->ring + pi; 378c47ff048SShreyansh Jain eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ? 379c47ff048SShreyansh Jain QM_EQCR_VERB_VBIT : 0; 380c47ff048SShreyansh Jain eqcr->available = QM_EQCR_SIZE - 1 - 381c47ff048SShreyansh Jain qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); 382c47ff048SShreyansh Jain eqcr->ithresh = qm_in(EQCR_ITR); 383c47ff048SShreyansh Jain #ifdef RTE_LIBRTE_DPAA_HWDEBUG 384c47ff048SShreyansh Jain eqcr->busy = 0; 385c47ff048SShreyansh Jain eqcr->pmode = pmode; 386c47ff048SShreyansh Jain #endif 387c47ff048SShreyansh Jain cfg = (qm_in(CFG) & 0x00ffffff) | 388c47ff048SShreyansh Jain (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ 389c47ff048SShreyansh Jain (eq_stash_prio << 26) | /* QCSP_CFG: EP */ 390c47ff048SShreyansh Jain ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ 391c47ff048SShreyansh Jain qm_out(CFG, cfg); 392c47ff048SShreyansh Jain return 0; 393c47ff048SShreyansh Jain } 394c47ff048SShreyansh Jain 395c47ff048SShreyansh Jain static inline void qm_eqcr_finish(struct qm_portal *portal) 396c47ff048SShreyansh Jain { 397c47ff048SShreyansh Jain register struct qm_eqcr *eqcr = &portal->eqcr; 398c47ff048SShreyansh Jain u8 pi, ci; 399c47ff048SShreyansh Jain u32 cfg; 400c47ff048SShreyansh Jain 401c47ff048SShreyansh Jain /* 402c47ff048SShreyansh Jain * Disable EQCI stashing because the QMan only 403c47ff048SShreyansh Jain * presents the value it previously stashed to 404c47ff048SShreyansh Jain * maintain coherency. Setting the stash threshold 405c47ff048SShreyansh Jain * to 1 then 0 ensures that QMan has resyncronized 406c47ff048SShreyansh Jain * its internal copy so that the portal is clean 407c47ff048SShreyansh Jain * when it is reinitialized in the future 408c47ff048SShreyansh Jain */ 409c47ff048SShreyansh Jain cfg = (qm_in(CFG) & 0x0fffffff) | 410c47ff048SShreyansh Jain (1 << 28); /* QCSP_CFG: EST */ 411c47ff048SShreyansh Jain qm_out(CFG, cfg); 412c47ff048SShreyansh Jain cfg &= 0x0fffffff; /* stash threshold = 0 */ 413c47ff048SShreyansh Jain qm_out(CFG, cfg); 414c47ff048SShreyansh Jain 415c47ff048SShreyansh Jain pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); 416c47ff048SShreyansh Jain ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); 417c47ff048SShreyansh Jain 418c47ff048SShreyansh Jain /* Refresh EQCR CI cache value */ 419c47ff048SShreyansh Jain qm_cl_invalidate(EQCR_CI); 420c47ff048SShreyansh Jain eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); 421c47ff048SShreyansh Jain 422996672d3SFerruh Yigit #ifdef RTE_LIBRTE_DPAA_HWDEBUG 423c47ff048SShreyansh Jain DPAA_ASSERT(!eqcr->busy); 424996672d3SFerruh Yigit #endif 425c47ff048SShreyansh Jain if (pi != EQCR_PTR2IDX(eqcr->cursor)) 426c47ff048SShreyansh Jain pr_crit("losing uncommitted EQCR entries\n"); 427c47ff048SShreyansh Jain if (ci != eqcr->ci) 428c47ff048SShreyansh Jain pr_crit("missing existing EQCR completions\n"); 429c47ff048SShreyansh Jain if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor)) 430c47ff048SShreyansh Jain pr_crit("EQCR destroyed unquiesced\n"); 431c47ff048SShreyansh Jain } 432c47ff048SShreyansh Jain 433c47ff048SShreyansh Jain static inline int qm_dqrr_init(struct qm_portal *portal, 434c47ff048SShreyansh Jain __maybe_unused const struct qm_portal_config *config, 435c47ff048SShreyansh Jain enum qm_dqrr_dmode dmode, 436c47ff048SShreyansh Jain __maybe_unused enum qm_dqrr_pmode pmode, 437c47ff048SShreyansh Jain enum qm_dqrr_cmode cmode, u8 max_fill) 438c47ff048SShreyansh Jain { 439c47ff048SShreyansh Jain register struct qm_dqrr *dqrr = &portal->dqrr; 440c47ff048SShreyansh Jain u32 cfg; 441c47ff048SShreyansh Jain 442c47ff048SShreyansh Jain /* Make sure the DQRR will be idle when we enable */ 443c47ff048SShreyansh Jain qm_out(DQRR_SDQCR, 0); 444c47ff048SShreyansh Jain qm_out(DQRR_VDQCR, 0); 445c47ff048SShreyansh Jain qm_out(DQRR_PDQCR, 0); 446c47ff048SShreyansh Jain dqrr->ring = portal->addr.ce + QM_CL_DQRR; 447c47ff048SShreyansh Jain dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); 448c47ff048SShreyansh Jain dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); 449c47ff048SShreyansh Jain dqrr->cursor = dqrr->ring + dqrr->ci; 450c47ff048SShreyansh Jain dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); 451c47ff048SShreyansh Jain dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ? 452c47ff048SShreyansh Jain QM_DQRR_VERB_VBIT : 0; 453c47ff048SShreyansh Jain dqrr->ithresh = qm_in(DQRR_ITR); 454c47ff048SShreyansh Jain #ifdef RTE_LIBRTE_DPAA_HWDEBUG 455c47ff048SShreyansh Jain dqrr->dmode = dmode; 456c47ff048SShreyansh Jain dqrr->pmode = pmode; 457c47ff048SShreyansh Jain dqrr->cmode = cmode; 458c47ff048SShreyansh Jain #endif 459c47ff048SShreyansh Jain /* Invalidate every ring entry before beginning */ 460c47ff048SShreyansh Jain for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) 461c47ff048SShreyansh Jain dccivac(qm_cl(dqrr->ring, cfg)); 462c47ff048SShreyansh Jain cfg = (qm_in(CFG) & 0xff000f00) | 463c47ff048SShreyansh Jain ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ 464c47ff048SShreyansh Jain ((dmode & 1) << 18) | /* DP */ 465c47ff048SShreyansh Jain ((cmode & 3) << 16) | /* DCM */ 466c47ff048SShreyansh Jain 0xa0 | /* RE+SE */ 467c47ff048SShreyansh Jain (0 ? 0x40 : 0) | /* Ignore RP */ 468c47ff048SShreyansh Jain (0 ? 0x10 : 0); /* Ignore SP */ 469c47ff048SShreyansh Jain qm_out(CFG, cfg); 470c47ff048SShreyansh Jain qm_dqrr_set_maxfill(portal, max_fill); 471c47ff048SShreyansh Jain return 0; 472c47ff048SShreyansh Jain } 473c47ff048SShreyansh Jain 474c47ff048SShreyansh Jain static inline void qm_dqrr_finish(struct qm_portal *portal) 475c47ff048SShreyansh Jain { 476c47ff048SShreyansh Jain __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; 477c47ff048SShreyansh Jain #ifdef RTE_LIBRTE_DPAA_HWDEBUG 478c47ff048SShreyansh Jain if ((dqrr->cmode != qm_dqrr_cdc) && 479c47ff048SShreyansh Jain (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor))) 480c47ff048SShreyansh Jain pr_crit("Ignoring completed DQRR entries\n"); 481c47ff048SShreyansh Jain #endif 482c47ff048SShreyansh Jain } 483c47ff048SShreyansh Jain 484c47ff048SShreyansh Jain static inline int qm_mr_init(struct qm_portal *portal, 485c47ff048SShreyansh Jain __maybe_unused enum qm_mr_pmode pmode, 486c47ff048SShreyansh Jain enum qm_mr_cmode cmode) 487c47ff048SShreyansh Jain { 488c47ff048SShreyansh Jain register struct qm_mr *mr = &portal->mr; 489c47ff048SShreyansh Jain u32 cfg; 490c47ff048SShreyansh Jain 491c47ff048SShreyansh Jain mr->ring = portal->addr.ce + QM_CL_MR; 492c47ff048SShreyansh Jain mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1); 493c47ff048SShreyansh Jain mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1); 494c47ff048SShreyansh Jain mr->cursor = mr->ring + mr->ci; 495c47ff048SShreyansh Jain mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); 496c47ff048SShreyansh Jain mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0; 497c47ff048SShreyansh Jain mr->ithresh = qm_in(MR_ITR); 498c47ff048SShreyansh Jain #ifdef RTE_LIBRTE_DPAA_HWDEBUG 499c47ff048SShreyansh Jain mr->pmode = pmode; 500c47ff048SShreyansh Jain mr->cmode = cmode; 501c47ff048SShreyansh Jain #endif 502c47ff048SShreyansh Jain cfg = (qm_in(CFG) & 0xfffff0ff) | 503c47ff048SShreyansh Jain ((cmode & 1) << 8); /* QCSP_CFG:MM */ 504c47ff048SShreyansh Jain qm_out(CFG, cfg); 505c47ff048SShreyansh Jain return 0; 506c47ff048SShreyansh Jain } 507c47ff048SShreyansh Jain 508b9c94167SNipun Gupta struct qman_portal * 509b9c94167SNipun Gupta qman_init_portal(struct qman_portal *portal, 510c47ff048SShreyansh Jain const struct qm_portal_config *c, 511c47ff048SShreyansh Jain const struct qman_cgrs *cgrs) 512c47ff048SShreyansh Jain { 513c47ff048SShreyansh Jain struct qm_portal *p; 514c47ff048SShreyansh Jain char buf[16]; 515c47ff048SShreyansh Jain int ret; 516c47ff048SShreyansh Jain u32 isdr; 517c47ff048SShreyansh Jain 518c47ff048SShreyansh Jain p = &portal->p; 519c47ff048SShreyansh Jain 520b9c94167SNipun Gupta if (!c) 521b9c94167SNipun Gupta c = portal->config; 522b9c94167SNipun Gupta 523679e7278SNipun Gupta if (dpaa_svr_family == SVR_LS1043A_FAMILY) 524679e7278SNipun Gupta portal->use_eqcr_ci_stashing = 3; 525679e7278SNipun Gupta else 526679e7278SNipun Gupta portal->use_eqcr_ci_stashing = 527679e7278SNipun Gupta ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); 528679e7278SNipun Gupta 529c47ff048SShreyansh Jain /* 530c47ff048SShreyansh Jain * prep the low-level portal struct with the mapped addresses from the 531c47ff048SShreyansh Jain * config, everything that follows depends on it and "config" is more 532c47ff048SShreyansh Jain * for (de)reference 533c47ff048SShreyansh Jain */ 534c47ff048SShreyansh Jain p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; 535c47ff048SShreyansh Jain p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; 536c47ff048SShreyansh Jain /* 537c47ff048SShreyansh Jain * If CI-stashing is used, the current defaults use a threshold of 3, 538c47ff048SShreyansh Jain * and stash with high-than-DQRR priority. 539c47ff048SShreyansh Jain */ 540c47ff048SShreyansh Jain if (qm_eqcr_init(p, qm_eqcr_pvb, 541679e7278SNipun Gupta portal->use_eqcr_ci_stashing, 1)) { 542c47ff048SShreyansh Jain pr_err("Qman EQCR initialisation failed\n"); 543c47ff048SShreyansh Jain goto fail_eqcr; 544c47ff048SShreyansh Jain } 545c47ff048SShreyansh Jain if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb, 546c47ff048SShreyansh Jain qm_dqrr_cdc, DQRR_MAXFILL)) { 547c47ff048SShreyansh Jain pr_err("Qman DQRR initialisation failed\n"); 548c47ff048SShreyansh Jain goto fail_dqrr; 549c47ff048SShreyansh Jain } 550c47ff048SShreyansh Jain if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) { 551c47ff048SShreyansh Jain pr_err("Qman MR initialisation failed\n"); 552c47ff048SShreyansh Jain goto fail_mr; 553c47ff048SShreyansh Jain } 554c47ff048SShreyansh Jain if (qm_mc_init(p)) { 555c47ff048SShreyansh Jain pr_err("Qman MC initialisation failed\n"); 556c47ff048SShreyansh Jain goto fail_mc; 557c47ff048SShreyansh Jain } 558c47ff048SShreyansh Jain 559c47ff048SShreyansh Jain /* static interrupt-gating controls */ 560c47ff048SShreyansh Jain qm_dqrr_set_ithresh(p, 0); 561c47ff048SShreyansh Jain qm_mr_set_ithresh(p, 0); 562c47ff048SShreyansh Jain qm_isr_set_iperiod(p, 0); 563c47ff048SShreyansh Jain portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); 564c47ff048SShreyansh Jain if (!portal->cgrs) 565c47ff048SShreyansh Jain goto fail_cgrs; 566c47ff048SShreyansh Jain /* initial snapshot is no-depletion */ 567c47ff048SShreyansh Jain qman_cgrs_init(&portal->cgrs[1]); 568c47ff048SShreyansh Jain if (cgrs) 569c47ff048SShreyansh Jain portal->cgrs[0] = *cgrs; 570c47ff048SShreyansh Jain else 571c47ff048SShreyansh Jain /* if the given mask is NULL, assume all CGRs can be seen */ 572c47ff048SShreyansh Jain qman_cgrs_fill(&portal->cgrs[0]); 573c47ff048SShreyansh Jain INIT_LIST_HEAD(&portal->cgr_cbs); 574c47ff048SShreyansh Jain spin_lock_init(&portal->cgr_lock); 575c47ff048SShreyansh Jain portal->bits = 0; 576c47ff048SShreyansh Jain portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | 577c47ff048SShreyansh Jain QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | 578c47ff048SShreyansh Jain QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; 579c47ff048SShreyansh Jain portal->dqrr_disable_ref = 0; 580c47ff048SShreyansh Jain portal->cb_dc_ern = NULL; 581c47ff048SShreyansh Jain sprintf(buf, "qportal-%d", c->channel); 582c47ff048SShreyansh Jain dpa_rbtree_init(&portal->retire_table); 583c47ff048SShreyansh Jain isdr = 0xffffffff; 584c47ff048SShreyansh Jain qm_isr_disable_write(p, isdr); 585c47ff048SShreyansh Jain portal->irq_sources = 0; 586c47ff048SShreyansh Jain qm_isr_enable_write(p, portal->irq_sources); 587c47ff048SShreyansh Jain qm_isr_status_clear(p, 0xffffffff); 588c47ff048SShreyansh Jain snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); 589c47ff048SShreyansh Jain if (request_irq(c->irq, portal_isr, 0, portal->irqname, 590c47ff048SShreyansh Jain portal)) { 591c47ff048SShreyansh Jain pr_err("request_irq() failed\n"); 592c47ff048SShreyansh Jain goto fail_irq; 593c47ff048SShreyansh Jain } 594c47ff048SShreyansh Jain 595c47ff048SShreyansh Jain /* Need EQCR to be empty before continuing */ 596c47ff048SShreyansh Jain isdr &= ~QM_PIRQ_EQCI; 597c47ff048SShreyansh Jain qm_isr_disable_write(p, isdr); 598c47ff048SShreyansh Jain ret = qm_eqcr_get_fill(p); 599c47ff048SShreyansh Jain if (ret) { 600c47ff048SShreyansh Jain pr_err("Qman EQCR unclean\n"); 601c47ff048SShreyansh Jain goto fail_eqcr_empty; 602c47ff048SShreyansh Jain } 603c47ff048SShreyansh Jain isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); 604c47ff048SShreyansh Jain qm_isr_disable_write(p, isdr); 605c47ff048SShreyansh Jain if (qm_dqrr_current(p)) { 606c47ff048SShreyansh Jain pr_err("Qman DQRR unclean\n"); 607c47ff048SShreyansh Jain qm_dqrr_cdc_consume_n(p, 0xffff); 608c47ff048SShreyansh Jain } 609c47ff048SShreyansh Jain if (qm_mr_current(p) && drain_mr_fqrni(p)) { 610c47ff048SShreyansh Jain /* special handling, drain just in case it's a few FQRNIs */ 611c47ff048SShreyansh Jain if (drain_mr_fqrni(p)) 612c47ff048SShreyansh Jain goto fail_dqrr_mr_empty; 613c47ff048SShreyansh Jain } 614c47ff048SShreyansh Jain /* Success */ 615c47ff048SShreyansh Jain portal->config = c; 616c47ff048SShreyansh Jain qm_isr_disable_write(p, 0); 617c47ff048SShreyansh Jain qm_isr_uninhibit(p); 618c47ff048SShreyansh Jain /* Write a sane SDQCR */ 619c47ff048SShreyansh Jain qm_dqrr_sdqcr_set(p, portal->sdqcr); 620c47ff048SShreyansh Jain return portal; 621c47ff048SShreyansh Jain fail_dqrr_mr_empty: 622c47ff048SShreyansh Jain fail_eqcr_empty: 623c47ff048SShreyansh Jain free_irq(c->irq, portal); 624c47ff048SShreyansh Jain fail_irq: 625c47ff048SShreyansh Jain kfree(portal->cgrs); 626c47ff048SShreyansh Jain spin_lock_destroy(&portal->cgr_lock); 627c47ff048SShreyansh Jain fail_cgrs: 628c47ff048SShreyansh Jain qm_mc_finish(p); 629c47ff048SShreyansh Jain fail_mc: 630c47ff048SShreyansh Jain qm_mr_finish(p); 631c47ff048SShreyansh Jain fail_mr: 632c47ff048SShreyansh Jain qm_dqrr_finish(p); 633c47ff048SShreyansh Jain fail_dqrr: 634c47ff048SShreyansh Jain qm_eqcr_finish(p); 635c47ff048SShreyansh Jain fail_eqcr: 636c47ff048SShreyansh Jain return NULL; 637c47ff048SShreyansh Jain } 638c47ff048SShreyansh Jain 6399d32ef0fSHemant Agrawal #define MAX_GLOBAL_PORTALS 8 6409d32ef0fSHemant Agrawal static struct qman_portal global_portals[MAX_GLOBAL_PORTALS]; 6417d9c4dfaSHemant Agrawal static rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS]; 6429d32ef0fSHemant Agrawal 643b9c94167SNipun Gupta struct qman_portal * 644b9c94167SNipun Gupta qman_alloc_global_portal(struct qm_portal_config *q_pcfg) 6459d32ef0fSHemant Agrawal { 6469d32ef0fSHemant Agrawal unsigned int i; 6479d32ef0fSHemant Agrawal 6489d32ef0fSHemant Agrawal for (i = 0; i < MAX_GLOBAL_PORTALS; i++) { 649b9c94167SNipun Gupta if (rte_atomic16_test_and_set(&global_portals_used[i])) { 650b9c94167SNipun Gupta global_portals[i].config = q_pcfg; 6519d32ef0fSHemant Agrawal return &global_portals[i]; 6529d32ef0fSHemant Agrawal } 653b9c94167SNipun Gupta } 6549d32ef0fSHemant Agrawal pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS); 6559d32ef0fSHemant Agrawal 6569d32ef0fSHemant Agrawal return NULL; 6579d32ef0fSHemant Agrawal } 6589d32ef0fSHemant Agrawal 659b9c94167SNipun Gupta int 6609d32ef0fSHemant Agrawal qman_free_global_portal(struct qman_portal *portal) 6619d32ef0fSHemant Agrawal { 6629d32ef0fSHemant Agrawal unsigned int i; 6639d32ef0fSHemant Agrawal 6649d32ef0fSHemant Agrawal for (i = 0; i < MAX_GLOBAL_PORTALS; i++) { 6659d32ef0fSHemant Agrawal if (&global_portals[i] == portal) { 66629d5d681SNipun Gupta rte_atomic16_clear(&global_portals_used[i]); 6679d32ef0fSHemant Agrawal return 0; 6689d32ef0fSHemant Agrawal } 6699d32ef0fSHemant Agrawal } 6709d32ef0fSHemant Agrawal return -1; 6719d32ef0fSHemant Agrawal } 6729d32ef0fSHemant Agrawal 673b1b5d6c9SNipun Gupta void 674b1b5d6c9SNipun Gupta qman_portal_uninhibit_isr(struct qman_portal *portal) 675b1b5d6c9SNipun Gupta { 676b1b5d6c9SNipun Gupta qm_isr_uninhibit(&portal->p); 677b1b5d6c9SNipun Gupta } 678b1b5d6c9SNipun Gupta 679c47ff048SShreyansh Jain struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c, 680b9c94167SNipun Gupta const struct qman_cgrs *cgrs) 681c47ff048SShreyansh Jain { 682c47ff048SShreyansh Jain struct qman_portal *res; 683b9c94167SNipun Gupta struct qman_portal *portal = get_affine_portal(); 6849d32ef0fSHemant Agrawal 685c47ff048SShreyansh Jain /* A criteria for calling this function (from qman_driver.c) is that 686c47ff048SShreyansh Jain * we're already affine to the cpu and won't schedule onto another cpu. 687c47ff048SShreyansh Jain */ 688b9c94167SNipun Gupta res = qman_init_portal(portal, c, cgrs); 689c47ff048SShreyansh Jain if (res) { 690c47ff048SShreyansh Jain spin_lock(&affine_mask_lock); 691c47ff048SShreyansh Jain CPU_SET(c->cpu, &affine_mask); 692c47ff048SShreyansh Jain affine_channels[c->cpu] = 693c47ff048SShreyansh Jain c->channel; 694c47ff048SShreyansh Jain spin_unlock(&affine_mask_lock); 695c47ff048SShreyansh Jain } 696c47ff048SShreyansh Jain return res; 697c47ff048SShreyansh Jain } 698c47ff048SShreyansh Jain 699c47ff048SShreyansh Jain static inline 700c47ff048SShreyansh Jain void qman_destroy_portal(struct qman_portal *qm) 701c47ff048SShreyansh Jain { 702c47ff048SShreyansh Jain const struct qm_portal_config *pcfg; 703c47ff048SShreyansh Jain 704c47ff048SShreyansh Jain /* Stop dequeues on the portal */ 705c47ff048SShreyansh Jain qm_dqrr_sdqcr_set(&qm->p, 0); 706c47ff048SShreyansh Jain 707c47ff048SShreyansh Jain /* 708c47ff048SShreyansh Jain * NB we do this to "quiesce" EQCR. If we add enqueue-completions or 709c47ff048SShreyansh Jain * something related to QM_PIRQ_EQCI, this may need fixing. 710c47ff048SShreyansh Jain * Also, due to the prefetching model used for CI updates in the enqueue 711c47ff048SShreyansh Jain * path, this update will only invalidate the CI cacheline *after* 712c47ff048SShreyansh Jain * working on it, so we need to call this twice to ensure a full update 713c47ff048SShreyansh Jain * irrespective of where the enqueue processing was at when the teardown 714c47ff048SShreyansh Jain * began. 715c47ff048SShreyansh Jain */ 716c47ff048SShreyansh Jain qm_eqcr_cce_update(&qm->p); 717c47ff048SShreyansh Jain qm_eqcr_cce_update(&qm->p); 718c47ff048SShreyansh Jain pcfg = qm->config; 719c47ff048SShreyansh Jain 720c47ff048SShreyansh Jain free_irq(pcfg->irq, qm); 721c47ff048SShreyansh Jain 722c47ff048SShreyansh Jain kfree(qm->cgrs); 723c47ff048SShreyansh Jain qm_mc_finish(&qm->p); 724c47ff048SShreyansh Jain qm_mr_finish(&qm->p); 725c47ff048SShreyansh Jain qm_dqrr_finish(&qm->p); 726c47ff048SShreyansh Jain qm_eqcr_finish(&qm->p); 727c47ff048SShreyansh Jain 728c47ff048SShreyansh Jain qm->config = NULL; 729c47ff048SShreyansh Jain 730c47ff048SShreyansh Jain spin_lock_destroy(&qm->cgr_lock); 731c47ff048SShreyansh Jain } 732c47ff048SShreyansh Jain 7339d32ef0fSHemant Agrawal const struct qm_portal_config * 7349d32ef0fSHemant Agrawal qman_destroy_affine_portal(struct qman_portal *qp) 735c47ff048SShreyansh Jain { 736c47ff048SShreyansh Jain /* We don't want to redirect if we're a slave, use "raw" */ 7379d32ef0fSHemant Agrawal struct qman_portal *qm; 738c47ff048SShreyansh Jain const struct qm_portal_config *pcfg; 739c47ff048SShreyansh Jain int cpu; 740c47ff048SShreyansh Jain 7419d32ef0fSHemant Agrawal if (qp == NULL) 7429d32ef0fSHemant Agrawal qm = get_affine_portal(); 7439d32ef0fSHemant Agrawal else 7449d32ef0fSHemant Agrawal qm = qp; 745c47ff048SShreyansh Jain pcfg = qm->config; 746c47ff048SShreyansh Jain cpu = pcfg->cpu; 747c47ff048SShreyansh Jain 748c47ff048SShreyansh Jain qman_destroy_portal(qm); 749c47ff048SShreyansh Jain 750c47ff048SShreyansh Jain spin_lock(&affine_mask_lock); 751c47ff048SShreyansh Jain CPU_CLR(cpu, &affine_mask); 752c47ff048SShreyansh Jain spin_unlock(&affine_mask_lock); 7539d32ef0fSHemant Agrawal 7549d32ef0fSHemant Agrawal qman_free_global_portal(qm); 7559d32ef0fSHemant Agrawal 756c47ff048SShreyansh Jain return pcfg; 757c47ff048SShreyansh Jain } 758c47ff048SShreyansh Jain 759c47ff048SShreyansh Jain int qman_get_portal_index(void) 760c47ff048SShreyansh Jain { 761c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 762c47ff048SShreyansh Jain return p->config->index; 763c47ff048SShreyansh Jain } 764c47ff048SShreyansh Jain 765c47ff048SShreyansh Jain /* Inline helper to reduce nesting in __poll_portal_slow() */ 766c47ff048SShreyansh Jain static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, 767c47ff048SShreyansh Jain const struct qm_mr_entry *msg, u8 verb) 768c47ff048SShreyansh Jain { 769c47ff048SShreyansh Jain FQLOCK(fq); 770c47ff048SShreyansh Jain switch (verb) { 771c47ff048SShreyansh Jain case QM_MR_VERB_FQRL: 772c47ff048SShreyansh Jain DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); 773c47ff048SShreyansh Jain fq_clear(fq, QMAN_FQ_STATE_ORL); 774c47ff048SShreyansh Jain table_del_fq(p, fq); 775c47ff048SShreyansh Jain break; 776c47ff048SShreyansh Jain case QM_MR_VERB_FQRN: 777c47ff048SShreyansh Jain DPAA_ASSERT((fq->state == qman_fq_state_parked) || 778c47ff048SShreyansh Jain (fq->state == qman_fq_state_sched)); 779c47ff048SShreyansh Jain DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); 780c47ff048SShreyansh Jain fq_clear(fq, QMAN_FQ_STATE_CHANGING); 781c47ff048SShreyansh Jain if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) 782c47ff048SShreyansh Jain fq_set(fq, QMAN_FQ_STATE_NE); 783c47ff048SShreyansh Jain if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) 784c47ff048SShreyansh Jain fq_set(fq, QMAN_FQ_STATE_ORL); 785c47ff048SShreyansh Jain else 786c47ff048SShreyansh Jain table_del_fq(p, fq); 787c47ff048SShreyansh Jain fq->state = qman_fq_state_retired; 788c47ff048SShreyansh Jain break; 789c47ff048SShreyansh Jain case QM_MR_VERB_FQPN: 790c47ff048SShreyansh Jain DPAA_ASSERT(fq->state == qman_fq_state_sched); 791c47ff048SShreyansh Jain DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); 792c47ff048SShreyansh Jain fq->state = qman_fq_state_parked; 793c47ff048SShreyansh Jain } 794c47ff048SShreyansh Jain FQUNLOCK(fq); 795c47ff048SShreyansh Jain } 796c47ff048SShreyansh Jain 7979124e65dSGagandeep Singh void 7989124e65dSGagandeep Singh qman_ern_register_cb(qman_cb_free_mbuf cb) 7999124e65dSGagandeep Singh { 8009124e65dSGagandeep Singh qman_free_mbuf_cb = cb; 8019124e65dSGagandeep Singh } 8029124e65dSGagandeep Singh 8039124e65dSGagandeep Singh 8049124e65dSGagandeep Singh void 8059124e65dSGagandeep Singh qman_ern_poll_free(void) 8069124e65dSGagandeep Singh { 8079124e65dSGagandeep Singh struct qman_portal *p = get_affine_portal(); 8089124e65dSGagandeep Singh u8 verb, num = 0; 8099124e65dSGagandeep Singh const struct qm_mr_entry *msg; 8109124e65dSGagandeep Singh const struct qm_fd *fd; 8119124e65dSGagandeep Singh struct qm_mr_entry swapped_msg; 8129124e65dSGagandeep Singh 8139124e65dSGagandeep Singh qm_mr_pvb_update(&p->p); 8149124e65dSGagandeep Singh msg = qm_mr_current(&p->p); 8159124e65dSGagandeep Singh 8169124e65dSGagandeep Singh while (msg != NULL) { 8179124e65dSGagandeep Singh swapped_msg = *msg; 8189124e65dSGagandeep Singh hw_fd_to_cpu(&swapped_msg.ern.fd); 8199124e65dSGagandeep Singh verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK; 8209124e65dSGagandeep Singh fd = &swapped_msg.ern.fd; 8219124e65dSGagandeep Singh 8229124e65dSGagandeep Singh if (unlikely(verb & 0x20)) { 8230fcdbde0SHemant Agrawal pr_warn("HW ERN notification, Nothing to do\n"); 8249124e65dSGagandeep Singh } else { 8259124e65dSGagandeep Singh if ((fd->bpid & 0xff) != 0xff) 8269124e65dSGagandeep Singh qman_free_mbuf_cb(fd); 8279124e65dSGagandeep Singh } 8289124e65dSGagandeep Singh 8299124e65dSGagandeep Singh num++; 8309124e65dSGagandeep Singh qm_mr_next(&p->p); 8319124e65dSGagandeep Singh qm_mr_pvb_update(&p->p); 8329124e65dSGagandeep Singh msg = qm_mr_current(&p->p); 8339124e65dSGagandeep Singh } 8349124e65dSGagandeep Singh 8359124e65dSGagandeep Singh qm_mr_cci_consume(&p->p, num); 8369124e65dSGagandeep Singh } 8379124e65dSGagandeep Singh 838c47ff048SShreyansh Jain static u32 __poll_portal_slow(struct qman_portal *p, u32 is) 839c47ff048SShreyansh Jain { 840c47ff048SShreyansh Jain const struct qm_mr_entry *msg; 841c47ff048SShreyansh Jain struct qm_mr_entry swapped_msg; 842c47ff048SShreyansh Jain 843c47ff048SShreyansh Jain if (is & QM_PIRQ_CSCI) { 844c47ff048SShreyansh Jain struct qman_cgrs rr, c; 845c47ff048SShreyansh Jain struct qm_mc_result *mcr; 846c47ff048SShreyansh Jain struct qman_cgr *cgr; 847c47ff048SShreyansh Jain 848c47ff048SShreyansh Jain spin_lock(&p->cgr_lock); 849c47ff048SShreyansh Jain /* 850c47ff048SShreyansh Jain * The CSCI bit must be cleared _before_ issuing the 851c47ff048SShreyansh Jain * Query Congestion State command, to ensure that a long 852c47ff048SShreyansh Jain * CGR State Change callback cannot miss an intervening 853c47ff048SShreyansh Jain * state change. 854c47ff048SShreyansh Jain */ 855c47ff048SShreyansh Jain qm_isr_status_clear(&p->p, QM_PIRQ_CSCI); 856c47ff048SShreyansh Jain qm_mc_start(&p->p); 857c47ff048SShreyansh Jain qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); 858c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 859c47ff048SShreyansh Jain cpu_relax(); 860c47ff048SShreyansh Jain /* mask out the ones I'm not interested in */ 861c47ff048SShreyansh Jain qman_cgrs_and(&rr, (const struct qman_cgrs *) 862c47ff048SShreyansh Jain &mcr->querycongestion.state, &p->cgrs[0]); 863c47ff048SShreyansh Jain /* check previous snapshot for delta, enter/exit congestion */ 864c47ff048SShreyansh Jain qman_cgrs_xor(&c, &rr, &p->cgrs[1]); 865c47ff048SShreyansh Jain /* update snapshot */ 866c47ff048SShreyansh Jain qman_cgrs_cp(&p->cgrs[1], &rr); 867c47ff048SShreyansh Jain /* Invoke callback */ 868c47ff048SShreyansh Jain list_for_each_entry(cgr, &p->cgr_cbs, node) 869c47ff048SShreyansh Jain if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) 870c47ff048SShreyansh Jain cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); 871c47ff048SShreyansh Jain spin_unlock(&p->cgr_lock); 872c47ff048SShreyansh Jain } 873c47ff048SShreyansh Jain 874c47ff048SShreyansh Jain if (is & QM_PIRQ_EQRI) { 875c47ff048SShreyansh Jain qm_eqcr_cce_update(&p->p); 876c47ff048SShreyansh Jain qm_eqcr_set_ithresh(&p->p, 0); 877c47ff048SShreyansh Jain wake_up(&affine_queue); 878c47ff048SShreyansh Jain } 879c47ff048SShreyansh Jain 880c47ff048SShreyansh Jain if (is & QM_PIRQ_MRI) { 881c47ff048SShreyansh Jain struct qman_fq *fq; 882c47ff048SShreyansh Jain u8 verb, num = 0; 883c47ff048SShreyansh Jain mr_loop: 884c47ff048SShreyansh Jain qm_mr_pvb_update(&p->p); 885c47ff048SShreyansh Jain msg = qm_mr_current(&p->p); 886c47ff048SShreyansh Jain if (!msg) 887c47ff048SShreyansh Jain goto mr_done; 888c47ff048SShreyansh Jain swapped_msg = *msg; 889c47ff048SShreyansh Jain hw_fd_to_cpu(&swapped_msg.ern.fd); 890dd6f8d71SAndy Green verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK; 891c47ff048SShreyansh Jain /* The message is a software ERN iff the 0x20 bit is set */ 892c47ff048SShreyansh Jain if (verb & 0x20) { 893c47ff048SShreyansh Jain switch (verb) { 894c47ff048SShreyansh Jain case QM_MR_VERB_FQRNI: 895c47ff048SShreyansh Jain /* nada, we drop FQRNIs on the floor */ 896c47ff048SShreyansh Jain break; 897c47ff048SShreyansh Jain case QM_MR_VERB_FQRN: 898c47ff048SShreyansh Jain case QM_MR_VERB_FQRL: 899c47ff048SShreyansh Jain /* Lookup in the retirement table */ 900c47ff048SShreyansh Jain fq = table_find_fq(p, 901c47ff048SShreyansh Jain be32_to_cpu(msg->fq.fqid)); 9023bcd5b31SGagandeep Singh DPAA_BUG_ON(fq != NULL); 903c47ff048SShreyansh Jain fq_state_change(p, fq, &swapped_msg, verb); 904c47ff048SShreyansh Jain if (fq->cb.fqs) 905c47ff048SShreyansh Jain fq->cb.fqs(p, fq, &swapped_msg); 906c47ff048SShreyansh Jain break; 907c47ff048SShreyansh Jain case QM_MR_VERB_FQPN: 908c47ff048SShreyansh Jain /* Parked */ 909847ee3bdSShreyansh Jain #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 910fa362ceaSNipun Gupta fq = get_fq_table_entry(msg->fq.contextB); 911847ee3bdSShreyansh Jain #else 912fa362ceaSNipun Gupta fq = (void *)(uintptr_t)msg->fq.contextB; 913847ee3bdSShreyansh Jain #endif 9143bcd5b31SGagandeep Singh DPAA_BUG_ON(fq != NULL); 915c47ff048SShreyansh Jain fq_state_change(p, fq, msg, verb); 916c47ff048SShreyansh Jain if (fq->cb.fqs) 917c47ff048SShreyansh Jain fq->cb.fqs(p, fq, &swapped_msg); 918c47ff048SShreyansh Jain break; 919c47ff048SShreyansh Jain case QM_MR_VERB_DC_ERN: 920c47ff048SShreyansh Jain /* DCP ERN */ 921c47ff048SShreyansh Jain if (p->cb_dc_ern) 922c47ff048SShreyansh Jain p->cb_dc_ern(p, msg); 923c47ff048SShreyansh Jain else if (cb_dc_ern) 924c47ff048SShreyansh Jain cb_dc_ern(p, msg); 925c47ff048SShreyansh Jain else { 926c47ff048SShreyansh Jain static int warn_once; 927c47ff048SShreyansh Jain 928c47ff048SShreyansh Jain if (!warn_once) { 929c47ff048SShreyansh Jain pr_crit("Leaking DCP ERNs!\n"); 930c47ff048SShreyansh Jain warn_once = 1; 931c47ff048SShreyansh Jain } 932c47ff048SShreyansh Jain } 933c47ff048SShreyansh Jain break; 934c47ff048SShreyansh Jain default: 935c47ff048SShreyansh Jain pr_crit("Invalid MR verb 0x%02x\n", verb); 936c47ff048SShreyansh Jain } 937c47ff048SShreyansh Jain } else { 938c47ff048SShreyansh Jain /* Its a software ERN */ 939847ee3bdSShreyansh Jain #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 940847ee3bdSShreyansh Jain fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag)); 941847ee3bdSShreyansh Jain #else 942c47ff048SShreyansh Jain fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag); 943847ee3bdSShreyansh Jain #endif 944c47ff048SShreyansh Jain fq->cb.ern(p, fq, &swapped_msg); 945c47ff048SShreyansh Jain } 946c47ff048SShreyansh Jain num++; 947c47ff048SShreyansh Jain qm_mr_next(&p->p); 948c47ff048SShreyansh Jain goto mr_loop; 949c47ff048SShreyansh Jain mr_done: 950c47ff048SShreyansh Jain qm_mr_cci_consume(&p->p, num); 951c47ff048SShreyansh Jain } 952c47ff048SShreyansh Jain /* 953c47ff048SShreyansh Jain * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific 954c47ff048SShreyansh Jain * processing. If that interrupt source has meanwhile been re-asserted, 955c47ff048SShreyansh Jain * we mustn't clear it here (or in the top-level interrupt handler). 956c47ff048SShreyansh Jain */ 957c47ff048SShreyansh Jain return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI); 958c47ff048SShreyansh Jain } 959c47ff048SShreyansh Jain 960c47ff048SShreyansh Jain /* 961c47ff048SShreyansh Jain * remove some slowish-path stuff from the "fast path" and make sure it isn't 962c47ff048SShreyansh Jain * inlined. 963c47ff048SShreyansh Jain */ 964c47ff048SShreyansh Jain static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) 965c47ff048SShreyansh Jain { 966c47ff048SShreyansh Jain p->vdqcr_owned = NULL; 967c47ff048SShreyansh Jain FQLOCK(fq); 968c47ff048SShreyansh Jain fq_clear(fq, QMAN_FQ_STATE_VDQCR); 969c47ff048SShreyansh Jain FQUNLOCK(fq); 970c47ff048SShreyansh Jain wake_up(&affine_queue); 971c47ff048SShreyansh Jain } 972c47ff048SShreyansh Jain 973c47ff048SShreyansh Jain /* 974c47ff048SShreyansh Jain * The only states that would conflict with other things if they ran at the 975c47ff048SShreyansh Jain * same time on the same cpu are: 976c47ff048SShreyansh Jain * 977c47ff048SShreyansh Jain * (i) setting/clearing vdqcr_owned, and 978c47ff048SShreyansh Jain * (ii) clearing the NE (Not Empty) flag. 979c47ff048SShreyansh Jain * 980c47ff048SShreyansh Jain * Both are safe. Because; 981c47ff048SShreyansh Jain * 982c47ff048SShreyansh Jain * (i) this clearing can only occur after qman_set_vdq() has set the 983c47ff048SShreyansh Jain * vdqcr_owned field (which it does before setting VDQCR), and 984c47ff048SShreyansh Jain * qman_volatile_dequeue() blocks interrupts and preemption while this is 985c47ff048SShreyansh Jain * done so that we can't interfere. 986c47ff048SShreyansh Jain * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as 987c47ff048SShreyansh Jain * with (i) that API prevents us from interfering until it's safe. 988c47ff048SShreyansh Jain * 989c47ff048SShreyansh Jain * The good thing is that qman_set_vdq() and qman_retire_fq() run far 990c47ff048SShreyansh Jain * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett 991c47ff048SShreyansh Jain * advantage comes from this function not having to "lock" anything at all. 992c47ff048SShreyansh Jain * 993c47ff048SShreyansh Jain * Note also that the callbacks are invoked at points which are safe against the 994c47ff048SShreyansh Jain * above potential conflicts, but that this function itself is not re-entrant 995c47ff048SShreyansh Jain * (this is because the function tracks one end of each FIFO in the portal and 996c47ff048SShreyansh Jain * we do *not* want to lock that). So the consequence is that it is safe for 997c47ff048SShreyansh Jain * user callbacks to call into any QMan API. 998c47ff048SShreyansh Jain */ 999c47ff048SShreyansh Jain static inline unsigned int __poll_portal_fast(struct qman_portal *p, 1000c47ff048SShreyansh Jain unsigned int poll_limit) 1001c47ff048SShreyansh Jain { 1002c47ff048SShreyansh Jain const struct qm_dqrr_entry *dq; 1003c47ff048SShreyansh Jain struct qman_fq *fq; 1004c47ff048SShreyansh Jain enum qman_cb_dqrr_result res; 1005c47ff048SShreyansh Jain unsigned int limit = 0; 1006c47ff048SShreyansh Jain #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 1007c47ff048SShreyansh Jain struct qm_dqrr_entry *shadow; 1008c47ff048SShreyansh Jain #endif 1009c47ff048SShreyansh Jain do { 1010c47ff048SShreyansh Jain qm_dqrr_pvb_update(&p->p); 1011c47ff048SShreyansh Jain dq = qm_dqrr_current(&p->p); 10128dc88183SNipun Gupta if (unlikely(!dq)) 1013c47ff048SShreyansh Jain break; 1014c47ff048SShreyansh Jain #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 1015c47ff048SShreyansh Jain /* If running on an LE system the fields of the 1016c47ff048SShreyansh Jain * dequeue entry must be swapper. Because the 1017c47ff048SShreyansh Jain * QMan HW will ignore writes the DQRR entry is 1018c47ff048SShreyansh Jain * copied and the index stored within the copy 1019c47ff048SShreyansh Jain */ 1020c47ff048SShreyansh Jain shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)]; 1021c47ff048SShreyansh Jain *shadow = *dq; 1022c47ff048SShreyansh Jain dq = shadow; 1023c47ff048SShreyansh Jain shadow->fqid = be32_to_cpu(shadow->fqid); 1024c47ff048SShreyansh Jain shadow->seqnum = be16_to_cpu(shadow->seqnum); 1025c47ff048SShreyansh Jain hw_fd_to_cpu(&shadow->fd); 1026c47ff048SShreyansh Jain #endif 1027c47ff048SShreyansh Jain 1028c47ff048SShreyansh Jain if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { 1029c47ff048SShreyansh Jain /* 1030c47ff048SShreyansh Jain * VDQCR: don't trust context_b as the FQ may have 1031c47ff048SShreyansh Jain * been configured for h/w consumption and we're 1032c47ff048SShreyansh Jain * draining it post-retirement. 1033c47ff048SShreyansh Jain */ 1034c47ff048SShreyansh Jain fq = p->vdqcr_owned; 1035c47ff048SShreyansh Jain /* 1036c47ff048SShreyansh Jain * We only set QMAN_FQ_STATE_NE when retiring, so we 1037c47ff048SShreyansh Jain * only need to check for clearing it when doing 1038c47ff048SShreyansh Jain * volatile dequeues. It's one less thing to check 1039c47ff048SShreyansh Jain * in the critical path (SDQCR). 1040c47ff048SShreyansh Jain */ 1041c47ff048SShreyansh Jain if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) 1042c47ff048SShreyansh Jain fq_clear(fq, QMAN_FQ_STATE_NE); 1043c47ff048SShreyansh Jain /* 1044c47ff048SShreyansh Jain * This is duplicated from the SDQCR code, but we 1045c47ff048SShreyansh Jain * have stuff to do before *and* after this callback, 1046c47ff048SShreyansh Jain * and we don't want multiple if()s in the critical 1047c47ff048SShreyansh Jain * path (SDQCR). 1048c47ff048SShreyansh Jain */ 1049c47ff048SShreyansh Jain res = fq->cb.dqrr(p, fq, dq); 1050c47ff048SShreyansh Jain if (res == qman_cb_dqrr_stop) 1051c47ff048SShreyansh Jain break; 1052c47ff048SShreyansh Jain /* Check for VDQCR completion */ 1053c47ff048SShreyansh Jain if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) 1054c47ff048SShreyansh Jain clear_vdqcr(p, fq); 1055c47ff048SShreyansh Jain } else { 1056c47ff048SShreyansh Jain /* SDQCR: context_b points to the FQ */ 1057847ee3bdSShreyansh Jain #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1058847ee3bdSShreyansh Jain fq = get_fq_table_entry(dq->contextB); 1059847ee3bdSShreyansh Jain #else 1060c47ff048SShreyansh Jain fq = (void *)(uintptr_t)dq->contextB; 1061847ee3bdSShreyansh Jain #endif 1062c47ff048SShreyansh Jain /* Now let the callback do its stuff */ 1063c47ff048SShreyansh Jain res = fq->cb.dqrr(p, fq, dq); 1064c47ff048SShreyansh Jain /* 1065c47ff048SShreyansh Jain * The callback can request that we exit without 1066c47ff048SShreyansh Jain * consuming this entry nor advancing; 1067c47ff048SShreyansh Jain */ 1068c47ff048SShreyansh Jain if (res == qman_cb_dqrr_stop) 1069c47ff048SShreyansh Jain break; 1070c47ff048SShreyansh Jain } 1071c47ff048SShreyansh Jain /* Interpret 'dq' from a driver perspective. */ 1072c47ff048SShreyansh Jain /* 1073c47ff048SShreyansh Jain * Parking isn't possible unless HELDACTIVE was set. NB, 1074c47ff048SShreyansh Jain * FORCEELIGIBLE implies HELDACTIVE, so we only need to 1075c47ff048SShreyansh Jain * check for HELDACTIVE to cover both. 1076c47ff048SShreyansh Jain */ 1077c47ff048SShreyansh Jain DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || 1078c47ff048SShreyansh Jain (res != qman_cb_dqrr_park)); 1079c47ff048SShreyansh Jain /* just means "skip it, I'll consume it myself later on" */ 1080c47ff048SShreyansh Jain if (res != qman_cb_dqrr_defer) 1081c47ff048SShreyansh Jain qm_dqrr_cdc_consume_1ptr(&p->p, dq, 1082c47ff048SShreyansh Jain res == qman_cb_dqrr_park); 1083c47ff048SShreyansh Jain /* Move forward */ 1084c47ff048SShreyansh Jain qm_dqrr_next(&p->p); 1085c47ff048SShreyansh Jain /* 1086c47ff048SShreyansh Jain * Entry processed and consumed, increment our counter. The 1087c47ff048SShreyansh Jain * callback can request that we exit after consuming the 1088c47ff048SShreyansh Jain * entry, and we also exit if we reach our processing limit, 1089c47ff048SShreyansh Jain * so loop back only if neither of these conditions is met. 1090c47ff048SShreyansh Jain */ 1091c47ff048SShreyansh Jain } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop); 1092c47ff048SShreyansh Jain 1093c47ff048SShreyansh Jain return limit; 1094c47ff048SShreyansh Jain } 1095c47ff048SShreyansh Jain 10968e253882SHemant Agrawal int qman_irqsource_add(u32 bits) 10978e253882SHemant Agrawal { 10988e253882SHemant Agrawal struct qman_portal *p = get_affine_portal(); 10998e253882SHemant Agrawal 11008e253882SHemant Agrawal bits = bits & QM_PIRQ_VISIBLE; 11018e253882SHemant Agrawal 11028e253882SHemant Agrawal /* Clear any previously remaining interrupt conditions in 11038e253882SHemant Agrawal * QCSP_ISR. This prevents raising a false interrupt when 11048e253882SHemant Agrawal * interrupt conditions are enabled in QCSP_IER. 11058e253882SHemant Agrawal */ 11068e253882SHemant Agrawal qm_isr_status_clear(&p->p, bits); 11078e253882SHemant Agrawal dpaa_set_bits(bits, &p->irq_sources); 11088e253882SHemant Agrawal qm_isr_enable_write(&p->p, p->irq_sources); 11098e253882SHemant Agrawal 1110b1b5d6c9SNipun Gupta return 0; 1111b1b5d6c9SNipun Gupta } 1112b1b5d6c9SNipun Gupta 1113b1b5d6c9SNipun Gupta int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits) 1114b1b5d6c9SNipun Gupta { 1115b1b5d6c9SNipun Gupta bits = bits & QM_PIRQ_VISIBLE; 1116b1b5d6c9SNipun Gupta 1117b1b5d6c9SNipun Gupta /* Clear any previously remaining interrupt conditions in 1118b1b5d6c9SNipun Gupta * QCSP_ISR. This prevents raising a false interrupt when 1119b1b5d6c9SNipun Gupta * interrupt conditions are enabled in QCSP_IER. 1120b1b5d6c9SNipun Gupta */ 1121b1b5d6c9SNipun Gupta qm_isr_status_clear(&p->p, bits); 1122b1b5d6c9SNipun Gupta dpaa_set_bits(bits, &p->irq_sources); 1123b1b5d6c9SNipun Gupta qm_isr_enable_write(&p->p, p->irq_sources); 11248e253882SHemant Agrawal 11258e253882SHemant Agrawal return 0; 11268e253882SHemant Agrawal } 11278e253882SHemant Agrawal 11288e253882SHemant Agrawal int qman_irqsource_remove(u32 bits) 11298e253882SHemant Agrawal { 11308e253882SHemant Agrawal struct qman_portal *p = get_affine_portal(); 11318e253882SHemant Agrawal u32 ier; 11328e253882SHemant Agrawal 11338e253882SHemant Agrawal /* Our interrupt handler only processes+clears status register bits that 11348e253882SHemant Agrawal * are in p->irq_sources. As we're trimming that mask, if one of them 11358e253882SHemant Agrawal * were to assert in the status register just before we remove it from 11368e253882SHemant Agrawal * the enable register, there would be an interrupt-storm when we 11378e253882SHemant Agrawal * release the IRQ lock. So we wait for the enable register update to 11388e253882SHemant Agrawal * take effect in h/w (by reading it back) and then clear all other bits 11398e253882SHemant Agrawal * in the status register. Ie. we clear them from ISR once it's certain 11408e253882SHemant Agrawal * IER won't allow them to reassert. 11418e253882SHemant Agrawal */ 11428e253882SHemant Agrawal 11438e253882SHemant Agrawal bits &= QM_PIRQ_VISIBLE; 11448e253882SHemant Agrawal dpaa_clear_bits(bits, &p->irq_sources); 11458e253882SHemant Agrawal qm_isr_enable_write(&p->p, p->irq_sources); 11468e253882SHemant Agrawal ier = qm_isr_enable_read(&p->p); 11478e253882SHemant Agrawal /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a 11488e253882SHemant Agrawal * data-dependency, ie. to protect against re-ordering. 11498e253882SHemant Agrawal */ 11508e253882SHemant Agrawal qm_isr_status_clear(&p->p, ~ier); 11518e253882SHemant Agrawal return 0; 11528e253882SHemant Agrawal } 11538e253882SHemant Agrawal 1154b1b5d6c9SNipun Gupta int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits) 1155b1b5d6c9SNipun Gupta { 1156b1b5d6c9SNipun Gupta u32 ier; 1157b1b5d6c9SNipun Gupta 1158b1b5d6c9SNipun Gupta /* Our interrupt handler only processes+clears status register bits that 1159b1b5d6c9SNipun Gupta * are in p->irq_sources. As we're trimming that mask, if one of them 1160b1b5d6c9SNipun Gupta * were to assert in the status register just before we remove it from 1161b1b5d6c9SNipun Gupta * the enable register, there would be an interrupt-storm when we 1162b1b5d6c9SNipun Gupta * release the IRQ lock. So we wait for the enable register update to 1163b1b5d6c9SNipun Gupta * take effect in h/w (by reading it back) and then clear all other bits 1164b1b5d6c9SNipun Gupta * in the status register. Ie. we clear them from ISR once it's certain 1165b1b5d6c9SNipun Gupta * IER won't allow them to reassert. 1166b1b5d6c9SNipun Gupta */ 1167b1b5d6c9SNipun Gupta 1168b1b5d6c9SNipun Gupta bits &= QM_PIRQ_VISIBLE; 1169b1b5d6c9SNipun Gupta dpaa_clear_bits(bits, &p->irq_sources); 1170b1b5d6c9SNipun Gupta qm_isr_enable_write(&p->p, p->irq_sources); 1171b1b5d6c9SNipun Gupta ier = qm_isr_enable_read(&p->p); 1172b1b5d6c9SNipun Gupta /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a 1173b1b5d6c9SNipun Gupta * data-dependency, ie. to protect against re-ordering. 1174b1b5d6c9SNipun Gupta */ 1175b1b5d6c9SNipun Gupta qm_isr_status_clear(&p->p, ~ier); 1176b1b5d6c9SNipun Gupta return 0; 1177b1b5d6c9SNipun Gupta } 1178b1b5d6c9SNipun Gupta 1179c47ff048SShreyansh Jain u16 qman_affine_channel(int cpu) 1180c47ff048SShreyansh Jain { 1181c47ff048SShreyansh Jain if (cpu < 0) { 1182c47ff048SShreyansh Jain struct qman_portal *portal = get_affine_portal(); 1183c47ff048SShreyansh Jain 1184c47ff048SShreyansh Jain cpu = portal->config->cpu; 1185c47ff048SShreyansh Jain } 1186c47ff048SShreyansh Jain DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask)); 1187c47ff048SShreyansh Jain return affine_channels[cpu]; 1188c47ff048SShreyansh Jain } 1189c47ff048SShreyansh Jain 1190f5648825SHemant Agrawal unsigned int qman_portal_poll_rx(unsigned int poll_limit, 1191f5648825SHemant Agrawal void **bufs, 1192f5648825SHemant Agrawal struct qman_portal *p) 1193f5648825SHemant Agrawal { 1194b9083ea5SNipun Gupta struct qm_portal *portal = &p->p; 1195b9083ea5SNipun Gupta register struct qm_dqrr *dqrr = &portal->dqrr; 1196b9083ea5SNipun Gupta struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE]; 11979abdad12SHemant Agrawal struct qman_fq *fq; 1198b9083ea5SNipun Gupta unsigned int limit = 0, rx_number = 0; 1199b9083ea5SNipun Gupta uint32_t consume = 0; 1200f5648825SHemant Agrawal 1201f5648825SHemant Agrawal do { 1202f5648825SHemant Agrawal qm_dqrr_pvb_update(&p->p); 1203b9083ea5SNipun Gupta if (!dqrr->fill) 1204f5648825SHemant Agrawal break; 1205b9083ea5SNipun Gupta 1206b9083ea5SNipun Gupta dq[rx_number] = dqrr->cursor; 1207b9083ea5SNipun Gupta dqrr->cursor = DQRR_CARRYCLEAR(dqrr->cursor + 1); 1208b9083ea5SNipun Gupta /* Prefetch the next DQRR entry */ 1209b9083ea5SNipun Gupta rte_prefetch0(dqrr->cursor); 1210b9083ea5SNipun Gupta 1211f5648825SHemant Agrawal #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 1212f5648825SHemant Agrawal /* If running on an LE system the fields of the 1213f5648825SHemant Agrawal * dequeue entry must be swapper. Because the 1214f5648825SHemant Agrawal * QMan HW will ignore writes the DQRR entry is 1215f5648825SHemant Agrawal * copied and the index stored within the copy 1216f5648825SHemant Agrawal */ 1217b9083ea5SNipun Gupta shadow[rx_number] = 1218b9083ea5SNipun Gupta &p->shadow_dqrr[DQRR_PTR2IDX(dq[rx_number])]; 1219b9083ea5SNipun Gupta shadow[rx_number]->fd.opaque_addr = 1220b9083ea5SNipun Gupta dq[rx_number]->fd.opaque_addr; 1221b9083ea5SNipun Gupta shadow[rx_number]->fd.addr = 1222b9083ea5SNipun Gupta be40_to_cpu(dq[rx_number]->fd.addr); 1223b9083ea5SNipun Gupta shadow[rx_number]->fd.opaque = 1224b9083ea5SNipun Gupta be32_to_cpu(dq[rx_number]->fd.opaque); 1225b9083ea5SNipun Gupta #else 1226aecb4351SHemant Agrawal shadow[rx_number] = dq[rx_number]; 1227f5648825SHemant Agrawal #endif 1228f5648825SHemant Agrawal 1229f5648825SHemant Agrawal /* SDQCR: context_b points to the FQ */ 1230f5648825SHemant Agrawal #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1231fa362ceaSNipun Gupta fq = qman_fq_lookup_table[dq[rx_number]->contextB]; 1232f5648825SHemant Agrawal #else 1233fa362ceaSNipun Gupta fq = (void *)dq[rx_number]->contextB; 1234f5648825SHemant Agrawal #endif 12359abdad12SHemant Agrawal if (fq->cb.dqrr_prepare) 12369abdad12SHemant Agrawal fq->cb.dqrr_prepare(shadow[rx_number], 1237b9083ea5SNipun Gupta &bufs[rx_number]); 1238f5648825SHemant Agrawal 1239b9083ea5SNipun Gupta consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number]))); 1240b9083ea5SNipun Gupta rx_number++; 1241b9083ea5SNipun Gupta --dqrr->fill; 1242b9083ea5SNipun Gupta } while (++limit < poll_limit); 1243b9083ea5SNipun Gupta 1244b9083ea5SNipun Gupta if (rx_number) 12459abdad12SHemant Agrawal fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number); 1246b9083ea5SNipun Gupta 1247b9083ea5SNipun Gupta /* Consume all the DQRR enries together */ 1248b9083ea5SNipun Gupta qm_out(DQRR_DCAP, (1 << 8) | consume); 1249b9083ea5SNipun Gupta 1250b9083ea5SNipun Gupta return rx_number; 1251f5648825SHemant Agrawal } 1252f5648825SHemant Agrawal 12538e253882SHemant Agrawal void qman_clear_irq(void) 12548e253882SHemant Agrawal { 12558e253882SHemant Agrawal struct qman_portal *p = get_affine_portal(); 12568e253882SHemant Agrawal u32 clear = QM_DQAVAIL_MASK | (p->irq_sources & 12578e253882SHemant Agrawal ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI)); 12588e253882SHemant Agrawal qm_isr_status_clear(&p->p, clear); 12598e253882SHemant Agrawal } 12608e253882SHemant Agrawal 126143797e7bSSunil Kumar Kori u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit, 126243797e7bSSunil Kumar Kori void **bufs) 126343797e7bSSunil Kumar Kori { 126443797e7bSSunil Kumar Kori const struct qm_dqrr_entry *dq; 126543797e7bSSunil Kumar Kori struct qman_fq *fq; 126643797e7bSSunil Kumar Kori enum qman_cb_dqrr_result res; 126743797e7bSSunil Kumar Kori unsigned int limit = 0; 126843797e7bSSunil Kumar Kori struct qman_portal *p = get_affine_portal(); 126943797e7bSSunil Kumar Kori #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 127043797e7bSSunil Kumar Kori struct qm_dqrr_entry *shadow; 127143797e7bSSunil Kumar Kori #endif 127243797e7bSSunil Kumar Kori unsigned int rx_number = 0; 127343797e7bSSunil Kumar Kori 127443797e7bSSunil Kumar Kori do { 127543797e7bSSunil Kumar Kori qm_dqrr_pvb_update(&p->p); 127643797e7bSSunil Kumar Kori dq = qm_dqrr_current(&p->p); 127743797e7bSSunil Kumar Kori if (!dq) 127843797e7bSSunil Kumar Kori break; 127943797e7bSSunil Kumar Kori #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 128043797e7bSSunil Kumar Kori /* 128143797e7bSSunil Kumar Kori * If running on an LE system the fields of the 128243797e7bSSunil Kumar Kori * dequeue entry must be swapper. Because the 128343797e7bSSunil Kumar Kori * QMan HW will ignore writes the DQRR entry is 128443797e7bSSunil Kumar Kori * copied and the index stored within the copy 128543797e7bSSunil Kumar Kori */ 128643797e7bSSunil Kumar Kori shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)]; 128743797e7bSSunil Kumar Kori *shadow = *dq; 128843797e7bSSunil Kumar Kori dq = shadow; 128943797e7bSSunil Kumar Kori shadow->fqid = be32_to_cpu(shadow->fqid); 129043797e7bSSunil Kumar Kori shadow->seqnum = be16_to_cpu(shadow->seqnum); 129143797e7bSSunil Kumar Kori hw_fd_to_cpu(&shadow->fd); 129243797e7bSSunil Kumar Kori #endif 129343797e7bSSunil Kumar Kori 129443797e7bSSunil Kumar Kori /* SDQCR: context_b points to the FQ */ 129543797e7bSSunil Kumar Kori #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 129643797e7bSSunil Kumar Kori fq = get_fq_table_entry(dq->contextB); 129743797e7bSSunil Kumar Kori #else 129843797e7bSSunil Kumar Kori fq = (void *)(uintptr_t)dq->contextB; 129943797e7bSSunil Kumar Kori #endif 130043797e7bSSunil Kumar Kori /* Now let the callback do its stuff */ 130143797e7bSSunil Kumar Kori res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq, 130243797e7bSSunil Kumar Kori dq, &bufs[rx_number]); 130343797e7bSSunil Kumar Kori rx_number++; 130443797e7bSSunil Kumar Kori /* Interpret 'dq' from a driver perspective. */ 130543797e7bSSunil Kumar Kori /* 130643797e7bSSunil Kumar Kori * Parking isn't possible unless HELDACTIVE was set. NB, 130743797e7bSSunil Kumar Kori * FORCEELIGIBLE implies HELDACTIVE, so we only need to 130843797e7bSSunil Kumar Kori * check for HELDACTIVE to cover both. 130943797e7bSSunil Kumar Kori */ 131043797e7bSSunil Kumar Kori DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || 131143797e7bSSunil Kumar Kori (res != qman_cb_dqrr_park)); 131243797e7bSSunil Kumar Kori if (res != qman_cb_dqrr_defer) 131343797e7bSSunil Kumar Kori qm_dqrr_cdc_consume_1ptr(&p->p, dq, 131443797e7bSSunil Kumar Kori res == qman_cb_dqrr_park); 131543797e7bSSunil Kumar Kori /* Move forward */ 131643797e7bSSunil Kumar Kori qm_dqrr_next(&p->p); 131743797e7bSSunil Kumar Kori /* 131843797e7bSSunil Kumar Kori * Entry processed and consumed, increment our counter. The 131943797e7bSSunil Kumar Kori * callback can request that we exit after consuming the 132043797e7bSSunil Kumar Kori * entry, and we also exit if we reach our processing limit, 132143797e7bSSunil Kumar Kori * so loop back only if neither of these conditions is met. 132243797e7bSSunil Kumar Kori */ 132343797e7bSSunil Kumar Kori } while (++limit < poll_limit); 132443797e7bSSunil Kumar Kori 132543797e7bSSunil Kumar Kori return limit; 132643797e7bSSunil Kumar Kori } 132743797e7bSSunil Kumar Kori 1328c47ff048SShreyansh Jain struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq) 1329c47ff048SShreyansh Jain { 1330c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 1331c47ff048SShreyansh Jain const struct qm_dqrr_entry *dq; 1332c47ff048SShreyansh Jain #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 1333c47ff048SShreyansh Jain struct qm_dqrr_entry *shadow; 1334c47ff048SShreyansh Jain #endif 1335c47ff048SShreyansh Jain 1336c47ff048SShreyansh Jain qm_dqrr_pvb_update(&p->p); 1337c47ff048SShreyansh Jain dq = qm_dqrr_current(&p->p); 1338c47ff048SShreyansh Jain if (!dq) 1339c47ff048SShreyansh Jain return NULL; 1340c47ff048SShreyansh Jain 1341c47ff048SShreyansh Jain if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) { 1342c47ff048SShreyansh Jain /* Invalid DQRR - put the portal and consume the DQRR. 1343c47ff048SShreyansh Jain * Return NULL to user as no packet is seen. 1344c47ff048SShreyansh Jain */ 1345c47ff048SShreyansh Jain qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq); 1346c47ff048SShreyansh Jain return NULL; 1347c47ff048SShreyansh Jain } 1348c47ff048SShreyansh Jain 1349c47ff048SShreyansh Jain #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 1350c47ff048SShreyansh Jain shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)]; 1351c47ff048SShreyansh Jain *shadow = *dq; 1352c47ff048SShreyansh Jain dq = shadow; 1353c47ff048SShreyansh Jain shadow->fqid = be32_to_cpu(shadow->fqid); 1354c47ff048SShreyansh Jain shadow->seqnum = be16_to_cpu(shadow->seqnum); 1355c47ff048SShreyansh Jain hw_fd_to_cpu(&shadow->fd); 1356c47ff048SShreyansh Jain #endif 1357c47ff048SShreyansh Jain 1358c47ff048SShreyansh Jain if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) 1359c47ff048SShreyansh Jain fq_clear(fq, QMAN_FQ_STATE_NE); 1360c47ff048SShreyansh Jain 1361c47ff048SShreyansh Jain return (struct qm_dqrr_entry *)dq; 1362c47ff048SShreyansh Jain } 1363c47ff048SShreyansh Jain 1364c47ff048SShreyansh Jain void qman_dqrr_consume(struct qman_fq *fq, 1365c47ff048SShreyansh Jain struct qm_dqrr_entry *dq) 1366c47ff048SShreyansh Jain { 1367c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 1368c47ff048SShreyansh Jain 1369c47ff048SShreyansh Jain if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) 1370c47ff048SShreyansh Jain clear_vdqcr(p, fq); 1371c47ff048SShreyansh Jain 1372c47ff048SShreyansh Jain qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0); 1373c47ff048SShreyansh Jain qm_dqrr_next(&p->p); 1374c47ff048SShreyansh Jain } 1375c47ff048SShreyansh Jain 1376c47ff048SShreyansh Jain void qman_stop_dequeues(void) 1377c47ff048SShreyansh Jain { 1378c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 1379c47ff048SShreyansh Jain 1380c47ff048SShreyansh Jain qman_stop_dequeues_ex(p); 1381c47ff048SShreyansh Jain } 1382c47ff048SShreyansh Jain 1383c47ff048SShreyansh Jain void qman_start_dequeues(void) 1384c47ff048SShreyansh Jain { 1385c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 1386c47ff048SShreyansh Jain 1387c47ff048SShreyansh Jain DPAA_ASSERT(p->dqrr_disable_ref > 0); 1388c47ff048SShreyansh Jain if (!(--p->dqrr_disable_ref)) 1389c47ff048SShreyansh Jain qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL); 1390c47ff048SShreyansh Jain } 1391c47ff048SShreyansh Jain 13929d32ef0fSHemant Agrawal void qman_static_dequeue_add(u32 pools, struct qman_portal *qp) 1393c47ff048SShreyansh Jain { 13949d32ef0fSHemant Agrawal struct qman_portal *p = qp ? qp : get_affine_portal(); 1395c47ff048SShreyansh Jain 1396c47ff048SShreyansh Jain pools &= p->config->pools; 1397c47ff048SShreyansh Jain p->sdqcr |= pools; 1398c47ff048SShreyansh Jain qm_dqrr_sdqcr_set(&p->p, p->sdqcr); 1399c47ff048SShreyansh Jain } 1400c47ff048SShreyansh Jain 14019d32ef0fSHemant Agrawal void qman_static_dequeue_del(u32 pools, struct qman_portal *qp) 1402c47ff048SShreyansh Jain { 14039d32ef0fSHemant Agrawal struct qman_portal *p = qp ? qp : get_affine_portal(); 1404c47ff048SShreyansh Jain 1405c47ff048SShreyansh Jain pools &= p->config->pools; 1406c47ff048SShreyansh Jain p->sdqcr &= ~pools; 1407c47ff048SShreyansh Jain qm_dqrr_sdqcr_set(&p->p, p->sdqcr); 1408c47ff048SShreyansh Jain } 1409c47ff048SShreyansh Jain 14109d32ef0fSHemant Agrawal u32 qman_static_dequeue_get(struct qman_portal *qp) 1411c47ff048SShreyansh Jain { 14129d32ef0fSHemant Agrawal struct qman_portal *p = qp ? qp : get_affine_portal(); 1413c47ff048SShreyansh Jain return p->sdqcr; 1414c47ff048SShreyansh Jain } 1415c47ff048SShreyansh Jain 141643797e7bSSunil Kumar Kori void qman_dca(const struct qm_dqrr_entry *dq, int park_request) 1417c47ff048SShreyansh Jain { 1418c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 1419c47ff048SShreyansh Jain 1420c47ff048SShreyansh Jain qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request); 1421c47ff048SShreyansh Jain } 1422c47ff048SShreyansh Jain 142343797e7bSSunil Kumar Kori void qman_dca_index(u8 index, int park_request) 142443797e7bSSunil Kumar Kori { 142543797e7bSSunil Kumar Kori struct qman_portal *p = get_affine_portal(); 142643797e7bSSunil Kumar Kori 142743797e7bSSunil Kumar Kori qm_dqrr_cdc_consume_1(&p->p, index, park_request); 142843797e7bSSunil Kumar Kori } 142943797e7bSSunil Kumar Kori 1430c47ff048SShreyansh Jain /* Frame queue API */ 1431c47ff048SShreyansh Jain static const char *mcr_result_str(u8 result) 1432c47ff048SShreyansh Jain { 1433c47ff048SShreyansh Jain switch (result) { 1434c47ff048SShreyansh Jain case QM_MCR_RESULT_NULL: 1435c47ff048SShreyansh Jain return "QM_MCR_RESULT_NULL"; 1436c47ff048SShreyansh Jain case QM_MCR_RESULT_OK: 1437c47ff048SShreyansh Jain return "QM_MCR_RESULT_OK"; 1438c47ff048SShreyansh Jain case QM_MCR_RESULT_ERR_FQID: 1439c47ff048SShreyansh Jain return "QM_MCR_RESULT_ERR_FQID"; 1440c47ff048SShreyansh Jain case QM_MCR_RESULT_ERR_FQSTATE: 1441c47ff048SShreyansh Jain return "QM_MCR_RESULT_ERR_FQSTATE"; 1442c47ff048SShreyansh Jain case QM_MCR_RESULT_ERR_NOTEMPTY: 1443c47ff048SShreyansh Jain return "QM_MCR_RESULT_ERR_NOTEMPTY"; 1444c47ff048SShreyansh Jain case QM_MCR_RESULT_PENDING: 1445c47ff048SShreyansh Jain return "QM_MCR_RESULT_PENDING"; 1446c47ff048SShreyansh Jain case QM_MCR_RESULT_ERR_BADCOMMAND: 1447c47ff048SShreyansh Jain return "QM_MCR_RESULT_ERR_BADCOMMAND"; 1448c47ff048SShreyansh Jain } 1449c47ff048SShreyansh Jain return "<unknown MCR result>"; 1450c47ff048SShreyansh Jain } 1451c47ff048SShreyansh Jain 1452c47ff048SShreyansh Jain int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) 1453c47ff048SShreyansh Jain { 1454c47ff048SShreyansh Jain struct qm_fqd fqd; 1455c47ff048SShreyansh Jain struct qm_mcr_queryfq_np np; 1456c47ff048SShreyansh Jain struct qm_mc_command *mcc; 1457c47ff048SShreyansh Jain struct qm_mc_result *mcr; 1458c47ff048SShreyansh Jain struct qman_portal *p; 1459c47ff048SShreyansh Jain 1460c47ff048SShreyansh Jain if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { 1461c47ff048SShreyansh Jain int ret = qman_alloc_fqid(&fqid); 1462c47ff048SShreyansh Jain 1463c47ff048SShreyansh Jain if (ret) 1464c47ff048SShreyansh Jain return ret; 1465c47ff048SShreyansh Jain } 1466c47ff048SShreyansh Jain spin_lock_init(&fq->fqlock); 1467c47ff048SShreyansh Jain fq->fqid = fqid; 14688dc88183SNipun Gupta fq->fqid_le = cpu_to_be32(fqid); 1469c47ff048SShreyansh Jain fq->flags = flags; 1470c47ff048SShreyansh Jain fq->state = qman_fq_state_oos; 1471c47ff048SShreyansh Jain fq->cgr_groupid = 0; 1472847ee3bdSShreyansh Jain #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1473847ee3bdSShreyansh Jain if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) { 1474847ee3bdSShreyansh Jain pr_info("Find empty table entry failed\n"); 1475847ee3bdSShreyansh Jain return -ENOMEM; 1476847ee3bdSShreyansh Jain } 14774bbc759fSAkhil Goyal fq->qman_fq_lookup_table = qman_fq_lookup_table; 1478847ee3bdSShreyansh Jain #endif 1479c47ff048SShreyansh Jain if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY)) 1480c47ff048SShreyansh Jain return 0; 1481c47ff048SShreyansh Jain /* Everything else is AS_IS support */ 1482c47ff048SShreyansh Jain p = get_affine_portal(); 1483c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 1484c47ff048SShreyansh Jain mcc->queryfq.fqid = cpu_to_be32(fqid); 1485c47ff048SShreyansh Jain qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); 1486c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 1487c47ff048SShreyansh Jain cpu_relax(); 1488c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ); 1489c47ff048SShreyansh Jain if (mcr->result != QM_MCR_RESULT_OK) { 1490c47ff048SShreyansh Jain pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result)); 1491c47ff048SShreyansh Jain goto err; 1492c47ff048SShreyansh Jain } 1493c47ff048SShreyansh Jain fqd = mcr->queryfq.fqd; 1494c47ff048SShreyansh Jain hw_fqd_to_cpu(&fqd); 1495c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 1496c47ff048SShreyansh Jain mcc->queryfq_np.fqid = cpu_to_be32(fqid); 1497c47ff048SShreyansh Jain qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); 1498c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 1499c47ff048SShreyansh Jain cpu_relax(); 1500c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP); 1501c47ff048SShreyansh Jain if (mcr->result != QM_MCR_RESULT_OK) { 1502c47ff048SShreyansh Jain pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result)); 1503c47ff048SShreyansh Jain goto err; 1504c47ff048SShreyansh Jain } 1505c47ff048SShreyansh Jain np = mcr->queryfq_np; 1506c47ff048SShreyansh Jain /* Phew, have queryfq and queryfq_np results, stitch together 1507c47ff048SShreyansh Jain * the FQ object from those. 1508c47ff048SShreyansh Jain */ 1509c47ff048SShreyansh Jain fq->cgr_groupid = fqd.cgid; 1510c47ff048SShreyansh Jain switch (np.state & QM_MCR_NP_STATE_MASK) { 1511c47ff048SShreyansh Jain case QM_MCR_NP_STATE_OOS: 1512c47ff048SShreyansh Jain break; 1513c47ff048SShreyansh Jain case QM_MCR_NP_STATE_RETIRED: 1514c47ff048SShreyansh Jain fq->state = qman_fq_state_retired; 1515c47ff048SShreyansh Jain if (np.frm_cnt) 1516c47ff048SShreyansh Jain fq_set(fq, QMAN_FQ_STATE_NE); 1517c47ff048SShreyansh Jain break; 1518c47ff048SShreyansh Jain case QM_MCR_NP_STATE_TEN_SCHED: 1519c47ff048SShreyansh Jain case QM_MCR_NP_STATE_TRU_SCHED: 1520c47ff048SShreyansh Jain case QM_MCR_NP_STATE_ACTIVE: 1521c47ff048SShreyansh Jain fq->state = qman_fq_state_sched; 1522c47ff048SShreyansh Jain if (np.state & QM_MCR_NP_STATE_R) 1523c47ff048SShreyansh Jain fq_set(fq, QMAN_FQ_STATE_CHANGING); 1524c47ff048SShreyansh Jain break; 1525c47ff048SShreyansh Jain case QM_MCR_NP_STATE_PARKED: 1526c47ff048SShreyansh Jain fq->state = qman_fq_state_parked; 1527c47ff048SShreyansh Jain break; 1528c47ff048SShreyansh Jain default: 1529c47ff048SShreyansh Jain DPAA_ASSERT(NULL == "invalid FQ state"); 1530c47ff048SShreyansh Jain } 1531c47ff048SShreyansh Jain if (fqd.fq_ctrl & QM_FQCTRL_CGE) 1532c47ff048SShreyansh Jain fq->state |= QMAN_FQ_STATE_CGR_EN; 1533c47ff048SShreyansh Jain return 0; 1534c47ff048SShreyansh Jain err: 1535c47ff048SShreyansh Jain if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) 1536c47ff048SShreyansh Jain qman_release_fqid(fqid); 1537c47ff048SShreyansh Jain return -EIO; 1538c47ff048SShreyansh Jain } 1539c47ff048SShreyansh Jain 1540c47ff048SShreyansh Jain void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused) 1541c47ff048SShreyansh Jain { 1542c47ff048SShreyansh Jain /* 1543c47ff048SShreyansh Jain * We don't need to lock the FQ as it is a pre-condition that the FQ be 1544c47ff048SShreyansh Jain * quiesced. Instead, run some checks. 1545c47ff048SShreyansh Jain */ 1546c47ff048SShreyansh Jain switch (fq->state) { 1547c47ff048SShreyansh Jain case qman_fq_state_parked: 1548c47ff048SShreyansh Jain DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED); 1549996672d3SFerruh Yigit /* Fallthrough */ 1550c47ff048SShreyansh Jain case qman_fq_state_oos: 1551c47ff048SShreyansh Jain if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) 1552c47ff048SShreyansh Jain qman_release_fqid(fq->fqid); 1553847ee3bdSShreyansh Jain #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1554847ee3bdSShreyansh Jain clear_fq_table_entry(fq->key); 1555847ee3bdSShreyansh Jain #endif 1556c47ff048SShreyansh Jain return; 1557c47ff048SShreyansh Jain default: 1558c47ff048SShreyansh Jain break; 1559c47ff048SShreyansh Jain } 1560c47ff048SShreyansh Jain DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); 1561c47ff048SShreyansh Jain } 1562c47ff048SShreyansh Jain 1563c47ff048SShreyansh Jain u32 qman_fq_fqid(struct qman_fq *fq) 1564c47ff048SShreyansh Jain { 1565c47ff048SShreyansh Jain return fq->fqid; 1566c47ff048SShreyansh Jain } 1567c47ff048SShreyansh Jain 1568c47ff048SShreyansh Jain void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags) 1569c47ff048SShreyansh Jain { 1570c47ff048SShreyansh Jain if (state) 1571c47ff048SShreyansh Jain *state = fq->state; 1572c47ff048SShreyansh Jain if (flags) 1573c47ff048SShreyansh Jain *flags = fq->flags; 1574c47ff048SShreyansh Jain } 1575c47ff048SShreyansh Jain 1576c47ff048SShreyansh Jain int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) 1577c47ff048SShreyansh Jain { 1578c47ff048SShreyansh Jain struct qm_mc_command *mcc; 1579c47ff048SShreyansh Jain struct qm_mc_result *mcr; 1580c47ff048SShreyansh Jain struct qman_portal *p; 1581c47ff048SShreyansh Jain 1582c47ff048SShreyansh Jain u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ? 1583c47ff048SShreyansh Jain QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; 1584c47ff048SShreyansh Jain 1585c47ff048SShreyansh Jain if ((fq->state != qman_fq_state_oos) && 1586c47ff048SShreyansh Jain (fq->state != qman_fq_state_parked)) 1587c47ff048SShreyansh Jain return -EINVAL; 1588c47ff048SShreyansh Jain #ifdef RTE_LIBRTE_DPAA_HWDEBUG 1589c47ff048SShreyansh Jain if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) 1590c47ff048SShreyansh Jain return -EINVAL; 1591c47ff048SShreyansh Jain #endif 1592c47ff048SShreyansh Jain if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { 1593c47ff048SShreyansh Jain /* And can't be set at the same time as TDTHRESH */ 1594c47ff048SShreyansh Jain if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) 1595c47ff048SShreyansh Jain return -EINVAL; 1596c47ff048SShreyansh Jain } 1597c47ff048SShreyansh Jain /* Issue an INITFQ_[PARKED|SCHED] management command */ 1598c47ff048SShreyansh Jain p = get_affine_portal(); 1599c47ff048SShreyansh Jain FQLOCK(fq); 1600c47ff048SShreyansh Jain if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || 1601c47ff048SShreyansh Jain ((fq->state != qman_fq_state_oos) && 1602c47ff048SShreyansh Jain (fq->state != qman_fq_state_parked)))) { 1603c47ff048SShreyansh Jain FQUNLOCK(fq); 1604c47ff048SShreyansh Jain return -EBUSY; 1605c47ff048SShreyansh Jain } 1606c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 1607c47ff048SShreyansh Jain if (opts) 1608c47ff048SShreyansh Jain mcc->initfq = *opts; 1609c47ff048SShreyansh Jain mcc->initfq.fqid = cpu_to_be32(fq->fqid); 1610c47ff048SShreyansh Jain mcc->initfq.count = 0; 1611c47ff048SShreyansh Jain /* 1612c47ff048SShreyansh Jain * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a 1613c47ff048SShreyansh Jain * demux pointer. Otherwise, the caller-provided value is allowed to 1614c47ff048SShreyansh Jain * stand, don't overwrite it. 1615c47ff048SShreyansh Jain */ 1616c47ff048SShreyansh Jain if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { 1617c47ff048SShreyansh Jain dma_addr_t phys_fq; 1618c47ff048SShreyansh Jain 1619c47ff048SShreyansh Jain mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; 1620847ee3bdSShreyansh Jain #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1621fa362ceaSNipun Gupta mcc->initfq.fqd.context_b = cpu_to_be32(fq->key); 1622847ee3bdSShreyansh Jain #else 1623c47ff048SShreyansh Jain mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq; 1624847ee3bdSShreyansh Jain #endif 1625c47ff048SShreyansh Jain /* 1626c47ff048SShreyansh Jain * and the physical address - NB, if the user wasn't trying to 1627c47ff048SShreyansh Jain * set CONTEXTA, clear the stashing settings. 1628c47ff048SShreyansh Jain */ 1629c47ff048SShreyansh Jain if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { 1630c47ff048SShreyansh Jain mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; 1631c47ff048SShreyansh Jain memset(&mcc->initfq.fqd.context_a, 0, 1632c47ff048SShreyansh Jain sizeof(mcc->initfq.fqd.context_a)); 1633c47ff048SShreyansh Jain } else { 163462196f4eSThomas Monjalon phys_fq = rte_mem_virt2iova(fq); 1635c47ff048SShreyansh Jain qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); 1636c47ff048SShreyansh Jain } 1637c47ff048SShreyansh Jain } 1638c47ff048SShreyansh Jain if (flags & QMAN_INITFQ_FLAG_LOCAL) { 1639c47ff048SShreyansh Jain mcc->initfq.fqd.dest.channel = p->config->channel; 1640c47ff048SShreyansh Jain if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { 1641c47ff048SShreyansh Jain mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; 1642c47ff048SShreyansh Jain mcc->initfq.fqd.dest.wq = 4; 1643c47ff048SShreyansh Jain } 1644c47ff048SShreyansh Jain } 1645c47ff048SShreyansh Jain mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask); 1646c47ff048SShreyansh Jain cpu_to_hw_fqd(&mcc->initfq.fqd); 1647c47ff048SShreyansh Jain qm_mc_commit(&p->p, myverb); 1648c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 1649c47ff048SShreyansh Jain cpu_relax(); 1650c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); 1651c47ff048SShreyansh Jain res = mcr->result; 1652c47ff048SShreyansh Jain if (res != QM_MCR_RESULT_OK) { 1653c47ff048SShreyansh Jain FQUNLOCK(fq); 1654c47ff048SShreyansh Jain return -EIO; 1655c47ff048SShreyansh Jain } 1656c47ff048SShreyansh Jain if (opts) { 1657c47ff048SShreyansh Jain if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { 1658c47ff048SShreyansh Jain if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) 1659c47ff048SShreyansh Jain fq_set(fq, QMAN_FQ_STATE_CGR_EN); 1660c47ff048SShreyansh Jain else 1661c47ff048SShreyansh Jain fq_clear(fq, QMAN_FQ_STATE_CGR_EN); 1662c47ff048SShreyansh Jain } 1663c47ff048SShreyansh Jain if (opts->we_mask & QM_INITFQ_WE_CGID) 1664c47ff048SShreyansh Jain fq->cgr_groupid = opts->fqd.cgid; 1665c47ff048SShreyansh Jain } 1666c47ff048SShreyansh Jain fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? 1667c47ff048SShreyansh Jain qman_fq_state_sched : qman_fq_state_parked; 1668c47ff048SShreyansh Jain FQUNLOCK(fq); 1669c47ff048SShreyansh Jain return 0; 1670c47ff048SShreyansh Jain } 1671c47ff048SShreyansh Jain 1672c47ff048SShreyansh Jain int qman_schedule_fq(struct qman_fq *fq) 1673c47ff048SShreyansh Jain { 1674c47ff048SShreyansh Jain struct qm_mc_command *mcc; 1675c47ff048SShreyansh Jain struct qm_mc_result *mcr; 1676c47ff048SShreyansh Jain struct qman_portal *p; 1677c47ff048SShreyansh Jain 1678c47ff048SShreyansh Jain int ret = 0; 1679c47ff048SShreyansh Jain u8 res; 1680c47ff048SShreyansh Jain 1681c47ff048SShreyansh Jain if (fq->state != qman_fq_state_parked) 1682c47ff048SShreyansh Jain return -EINVAL; 1683c47ff048SShreyansh Jain #ifdef RTE_LIBRTE_DPAA_HWDEBUG 1684c47ff048SShreyansh Jain if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) 1685c47ff048SShreyansh Jain return -EINVAL; 1686c47ff048SShreyansh Jain #endif 1687c47ff048SShreyansh Jain /* Issue a ALTERFQ_SCHED management command */ 1688c47ff048SShreyansh Jain p = get_affine_portal(); 1689c47ff048SShreyansh Jain 1690c47ff048SShreyansh Jain FQLOCK(fq); 1691c47ff048SShreyansh Jain if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || 1692c47ff048SShreyansh Jain (fq->state != qman_fq_state_parked))) { 1693c47ff048SShreyansh Jain ret = -EBUSY; 1694c47ff048SShreyansh Jain goto out; 1695c47ff048SShreyansh Jain } 1696c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 1697c47ff048SShreyansh Jain mcc->alterfq.fqid = cpu_to_be32(fq->fqid); 1698c47ff048SShreyansh Jain qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); 1699c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 1700c47ff048SShreyansh Jain cpu_relax(); 1701c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); 1702c47ff048SShreyansh Jain res = mcr->result; 1703c47ff048SShreyansh Jain if (res != QM_MCR_RESULT_OK) { 1704c47ff048SShreyansh Jain ret = -EIO; 1705c47ff048SShreyansh Jain goto out; 1706c47ff048SShreyansh Jain } 1707c47ff048SShreyansh Jain fq->state = qman_fq_state_sched; 1708c47ff048SShreyansh Jain out: 1709c47ff048SShreyansh Jain FQUNLOCK(fq); 1710c47ff048SShreyansh Jain 1711c47ff048SShreyansh Jain return ret; 1712c47ff048SShreyansh Jain } 1713c47ff048SShreyansh Jain 1714c47ff048SShreyansh Jain int qman_retire_fq(struct qman_fq *fq, u32 *flags) 1715c47ff048SShreyansh Jain { 1716c47ff048SShreyansh Jain struct qm_mc_command *mcc; 1717c47ff048SShreyansh Jain struct qm_mc_result *mcr; 1718c47ff048SShreyansh Jain struct qman_portal *p; 1719c47ff048SShreyansh Jain 1720c47ff048SShreyansh Jain int rval; 1721c47ff048SShreyansh Jain u8 res; 1722c47ff048SShreyansh Jain 1723c9fd1acdSGagandeep Singh /* Queue is already in retire or oos state */ 1724c47ff048SShreyansh Jain if ((fq->state != qman_fq_state_parked) && 1725c47ff048SShreyansh Jain (fq->state != qman_fq_state_sched)) 1726c9fd1acdSGagandeep Singh return 0; 1727c47ff048SShreyansh Jain #ifdef RTE_LIBRTE_DPAA_HWDEBUG 1728c47ff048SShreyansh Jain if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) 1729c47ff048SShreyansh Jain return -EINVAL; 1730c47ff048SShreyansh Jain #endif 1731c47ff048SShreyansh Jain p = get_affine_portal(); 1732c47ff048SShreyansh Jain 1733c47ff048SShreyansh Jain FQLOCK(fq); 1734c47ff048SShreyansh Jain if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || 1735c47ff048SShreyansh Jain (fq->state == qman_fq_state_retired) || 1736c47ff048SShreyansh Jain (fq->state == qman_fq_state_oos))) { 1737c47ff048SShreyansh Jain rval = -EBUSY; 1738c47ff048SShreyansh Jain goto out; 1739c47ff048SShreyansh Jain } 1740c47ff048SShreyansh Jain rval = table_push_fq(p, fq); 1741c47ff048SShreyansh Jain if (rval) 1742c47ff048SShreyansh Jain goto out; 1743c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 1744c47ff048SShreyansh Jain mcc->alterfq.fqid = cpu_to_be32(fq->fqid); 1745c47ff048SShreyansh Jain qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); 1746c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 1747c47ff048SShreyansh Jain cpu_relax(); 1748c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); 1749c47ff048SShreyansh Jain res = mcr->result; 1750c47ff048SShreyansh Jain /* 1751c47ff048SShreyansh Jain * "Elegant" would be to treat OK/PENDING the same way; set CHANGING, 1752c47ff048SShreyansh Jain * and defer the flags until FQRNI or FQRN (respectively) show up. But 1753c47ff048SShreyansh Jain * "Friendly" is to process OK immediately, and not set CHANGING. We do 1754c47ff048SShreyansh Jain * friendly, otherwise the caller doesn't necessarily have a fully 1755c47ff048SShreyansh Jain * "retired" FQ on return even if the retirement was immediate. However 1756c47ff048SShreyansh Jain * this does mean some code duplication between here and 1757c47ff048SShreyansh Jain * fq_state_change(). 1758c47ff048SShreyansh Jain */ 1759c47ff048SShreyansh Jain if (likely(res == QM_MCR_RESULT_OK)) { 1760c47ff048SShreyansh Jain rval = 0; 1761c47ff048SShreyansh Jain /* Process 'fq' right away, we'll ignore FQRNI */ 1762c47ff048SShreyansh Jain if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) 1763c47ff048SShreyansh Jain fq_set(fq, QMAN_FQ_STATE_NE); 1764c47ff048SShreyansh Jain if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) 1765c47ff048SShreyansh Jain fq_set(fq, QMAN_FQ_STATE_ORL); 1766c47ff048SShreyansh Jain else 1767c47ff048SShreyansh Jain table_del_fq(p, fq); 1768c47ff048SShreyansh Jain if (flags) 1769c47ff048SShreyansh Jain *flags = fq->flags; 1770c47ff048SShreyansh Jain fq->state = qman_fq_state_retired; 1771c47ff048SShreyansh Jain if (fq->cb.fqs) { 1772c47ff048SShreyansh Jain /* 1773c47ff048SShreyansh Jain * Another issue with supporting "immediate" retirement 1774c47ff048SShreyansh Jain * is that we're forced to drop FQRNIs, because by the 1775c47ff048SShreyansh Jain * time they're seen it may already be "too late" (the 1776c47ff048SShreyansh Jain * fq may have been OOS'd and free()'d already). But if 1777c47ff048SShreyansh Jain * the upper layer wants a callback whether it's 1778c47ff048SShreyansh Jain * immediate or not, we have to fake a "MR" entry to 1779c47ff048SShreyansh Jain * look like an FQRNI... 1780c47ff048SShreyansh Jain */ 1781c47ff048SShreyansh Jain struct qm_mr_entry msg; 1782c47ff048SShreyansh Jain 1783dd6f8d71SAndy Green msg.ern.verb = QM_MR_VERB_FQRNI; 1784c47ff048SShreyansh Jain msg.fq.fqs = mcr->alterfq.fqs; 1785c47ff048SShreyansh Jain msg.fq.fqid = fq->fqid; 1786847ee3bdSShreyansh Jain #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 1787847ee3bdSShreyansh Jain msg.fq.contextB = fq->key; 1788847ee3bdSShreyansh Jain #else 1789c47ff048SShreyansh Jain msg.fq.contextB = (u32)(uintptr_t)fq; 1790847ee3bdSShreyansh Jain #endif 1791c47ff048SShreyansh Jain fq->cb.fqs(p, fq, &msg); 1792c47ff048SShreyansh Jain } 1793c47ff048SShreyansh Jain } else if (res == QM_MCR_RESULT_PENDING) { 1794c47ff048SShreyansh Jain rval = 1; 1795c47ff048SShreyansh Jain fq_set(fq, QMAN_FQ_STATE_CHANGING); 1796c47ff048SShreyansh Jain } else { 1797c47ff048SShreyansh Jain rval = -EIO; 1798c47ff048SShreyansh Jain table_del_fq(p, fq); 1799c47ff048SShreyansh Jain } 1800c47ff048SShreyansh Jain out: 1801c47ff048SShreyansh Jain FQUNLOCK(fq); 1802b292acc3SGagandeep Singh /* Draining FQRNIs, if any */ 1803b292acc3SGagandeep Singh drain_mr_fqrni(&p->p); 1804c47ff048SShreyansh Jain return rval; 1805c47ff048SShreyansh Jain } 1806c47ff048SShreyansh Jain 1807c47ff048SShreyansh Jain int qman_oos_fq(struct qman_fq *fq) 1808c47ff048SShreyansh Jain { 1809c47ff048SShreyansh Jain struct qm_mc_command *mcc; 1810c47ff048SShreyansh Jain struct qm_mc_result *mcr; 1811c47ff048SShreyansh Jain struct qman_portal *p; 1812c47ff048SShreyansh Jain 1813c47ff048SShreyansh Jain int ret = 0; 1814c47ff048SShreyansh Jain u8 res; 1815c47ff048SShreyansh Jain 1816c47ff048SShreyansh Jain if (fq->state != qman_fq_state_retired) 1817c47ff048SShreyansh Jain return -EINVAL; 1818c47ff048SShreyansh Jain #ifdef RTE_LIBRTE_DPAA_HWDEBUG 1819c47ff048SShreyansh Jain if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) 1820c47ff048SShreyansh Jain return -EINVAL; 1821c47ff048SShreyansh Jain #endif 1822c47ff048SShreyansh Jain p = get_affine_portal(); 1823c47ff048SShreyansh Jain FQLOCK(fq); 1824c47ff048SShreyansh Jain if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) || 1825c47ff048SShreyansh Jain (fq->state != qman_fq_state_retired))) { 1826c47ff048SShreyansh Jain ret = -EBUSY; 1827c47ff048SShreyansh Jain goto out; 1828c47ff048SShreyansh Jain } 1829c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 1830c47ff048SShreyansh Jain mcc->alterfq.fqid = cpu_to_be32(fq->fqid); 1831c47ff048SShreyansh Jain qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); 1832c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 1833c47ff048SShreyansh Jain cpu_relax(); 1834c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); 1835c47ff048SShreyansh Jain res = mcr->result; 1836c47ff048SShreyansh Jain if (res != QM_MCR_RESULT_OK) { 1837c47ff048SShreyansh Jain ret = -EIO; 1838c47ff048SShreyansh Jain goto out; 1839c47ff048SShreyansh Jain } 1840c47ff048SShreyansh Jain fq->state = qman_fq_state_oos; 1841c47ff048SShreyansh Jain out: 1842c47ff048SShreyansh Jain FQUNLOCK(fq); 1843c47ff048SShreyansh Jain return ret; 1844c47ff048SShreyansh Jain } 1845c47ff048SShreyansh Jain 1846c47ff048SShreyansh Jain int qman_fq_flow_control(struct qman_fq *fq, int xon) 1847c47ff048SShreyansh Jain { 1848c47ff048SShreyansh Jain struct qm_mc_command *mcc; 1849c47ff048SShreyansh Jain struct qm_mc_result *mcr; 1850c47ff048SShreyansh Jain struct qman_portal *p; 1851c47ff048SShreyansh Jain 1852c47ff048SShreyansh Jain int ret = 0; 1853c47ff048SShreyansh Jain u8 res; 1854c47ff048SShreyansh Jain u8 myverb; 1855c47ff048SShreyansh Jain 1856c47ff048SShreyansh Jain if ((fq->state == qman_fq_state_oos) || 1857c47ff048SShreyansh Jain (fq->state == qman_fq_state_retired) || 1858c47ff048SShreyansh Jain (fq->state == qman_fq_state_parked)) 1859c47ff048SShreyansh Jain return -EINVAL; 1860c47ff048SShreyansh Jain 1861c47ff048SShreyansh Jain #ifdef RTE_LIBRTE_DPAA_HWDEBUG 1862c47ff048SShreyansh Jain if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) 1863c47ff048SShreyansh Jain return -EINVAL; 1864c47ff048SShreyansh Jain #endif 1865c47ff048SShreyansh Jain /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */ 1866c47ff048SShreyansh Jain p = get_affine_portal(); 1867c47ff048SShreyansh Jain FQLOCK(fq); 1868c47ff048SShreyansh Jain if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || 1869c47ff048SShreyansh Jain (fq->state == qman_fq_state_parked) || 1870c47ff048SShreyansh Jain (fq->state == qman_fq_state_oos) || 1871c47ff048SShreyansh Jain (fq->state == qman_fq_state_retired))) { 1872c47ff048SShreyansh Jain ret = -EBUSY; 1873c47ff048SShreyansh Jain goto out; 1874c47ff048SShreyansh Jain } 1875c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 1876c47ff048SShreyansh Jain mcc->alterfq.fqid = fq->fqid; 1877c47ff048SShreyansh Jain mcc->alterfq.count = 0; 1878c47ff048SShreyansh Jain myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF; 1879c47ff048SShreyansh Jain 1880c47ff048SShreyansh Jain qm_mc_commit(&p->p, myverb); 1881c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 1882c47ff048SShreyansh Jain cpu_relax(); 1883c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); 1884c47ff048SShreyansh Jain 1885c47ff048SShreyansh Jain res = mcr->result; 1886c47ff048SShreyansh Jain if (res != QM_MCR_RESULT_OK) { 1887c47ff048SShreyansh Jain ret = -EIO; 1888c47ff048SShreyansh Jain goto out; 1889c47ff048SShreyansh Jain } 1890c47ff048SShreyansh Jain out: 1891c47ff048SShreyansh Jain FQUNLOCK(fq); 1892c47ff048SShreyansh Jain return ret; 1893c47ff048SShreyansh Jain } 1894c47ff048SShreyansh Jain 1895c47ff048SShreyansh Jain int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) 1896c47ff048SShreyansh Jain { 1897c47ff048SShreyansh Jain struct qm_mc_command *mcc; 1898c47ff048SShreyansh Jain struct qm_mc_result *mcr; 1899c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 1900c47ff048SShreyansh Jain 1901c47ff048SShreyansh Jain u8 res; 1902c47ff048SShreyansh Jain 1903c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 1904c47ff048SShreyansh Jain mcc->queryfq.fqid = cpu_to_be32(fq->fqid); 1905c47ff048SShreyansh Jain qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); 1906c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 1907c47ff048SShreyansh Jain cpu_relax(); 1908c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); 1909c47ff048SShreyansh Jain res = mcr->result; 1910c47ff048SShreyansh Jain if (res == QM_MCR_RESULT_OK) 1911c47ff048SShreyansh Jain *fqd = mcr->queryfq.fqd; 1912c47ff048SShreyansh Jain hw_fqd_to_cpu(fqd); 1913c47ff048SShreyansh Jain if (res != QM_MCR_RESULT_OK) 1914c47ff048SShreyansh Jain return -EIO; 1915c47ff048SShreyansh Jain return 0; 1916c47ff048SShreyansh Jain } 1917c47ff048SShreyansh Jain 1918c47ff048SShreyansh Jain int qman_query_fq_has_pkts(struct qman_fq *fq) 1919c47ff048SShreyansh Jain { 1920c47ff048SShreyansh Jain struct qm_mc_command *mcc; 1921c47ff048SShreyansh Jain struct qm_mc_result *mcr; 1922c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 1923c47ff048SShreyansh Jain 1924c47ff048SShreyansh Jain int ret = 0; 1925c47ff048SShreyansh Jain u8 res; 1926c47ff048SShreyansh Jain 1927c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 1928c47ff048SShreyansh Jain mcc->queryfq.fqid = cpu_to_be32(fq->fqid); 1929c47ff048SShreyansh Jain qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); 1930c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 1931c47ff048SShreyansh Jain cpu_relax(); 1932c47ff048SShreyansh Jain res = mcr->result; 1933c47ff048SShreyansh Jain if (res == QM_MCR_RESULT_OK) 1934c47ff048SShreyansh Jain ret = !!mcr->queryfq_np.frm_cnt; 1935c47ff048SShreyansh Jain return ret; 1936c47ff048SShreyansh Jain } 1937c47ff048SShreyansh Jain 1938c47ff048SShreyansh Jain int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) 1939c47ff048SShreyansh Jain { 1940c47ff048SShreyansh Jain struct qm_mc_command *mcc; 1941c47ff048SShreyansh Jain struct qm_mc_result *mcr; 1942c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 1943c47ff048SShreyansh Jain 1944c47ff048SShreyansh Jain u8 res; 1945c47ff048SShreyansh Jain 1946c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 1947c47ff048SShreyansh Jain mcc->queryfq.fqid = cpu_to_be32(fq->fqid); 1948c47ff048SShreyansh Jain qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); 1949c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 1950c47ff048SShreyansh Jain cpu_relax(); 1951c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); 1952c47ff048SShreyansh Jain res = mcr->result; 1953c47ff048SShreyansh Jain if (res == QM_MCR_RESULT_OK) { 1954c47ff048SShreyansh Jain *np = mcr->queryfq_np; 1955c47ff048SShreyansh Jain np->fqd_link = be24_to_cpu(np->fqd_link); 1956c47ff048SShreyansh Jain np->odp_seq = be16_to_cpu(np->odp_seq); 1957c47ff048SShreyansh Jain np->orp_nesn = be16_to_cpu(np->orp_nesn); 1958c47ff048SShreyansh Jain np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq); 1959c47ff048SShreyansh Jain np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq); 1960c47ff048SShreyansh Jain np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr); 1961c47ff048SShreyansh Jain np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr); 1962c47ff048SShreyansh Jain np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr); 1963c47ff048SShreyansh Jain np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr); 1964c47ff048SShreyansh Jain np->ics_surp = be16_to_cpu(np->ics_surp); 1965c47ff048SShreyansh Jain np->byte_cnt = be32_to_cpu(np->byte_cnt); 1966c47ff048SShreyansh Jain np->frm_cnt = be24_to_cpu(np->frm_cnt); 1967c47ff048SShreyansh Jain np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr); 1968c47ff048SShreyansh Jain np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr); 1969c47ff048SShreyansh Jain np->od1_sfdr = be16_to_cpu(np->od1_sfdr); 1970c47ff048SShreyansh Jain np->od2_sfdr = be16_to_cpu(np->od2_sfdr); 1971c47ff048SShreyansh Jain np->od3_sfdr = be16_to_cpu(np->od3_sfdr); 1972c47ff048SShreyansh Jain } 1973c47ff048SShreyansh Jain if (res == QM_MCR_RESULT_ERR_FQID) 1974c47ff048SShreyansh Jain return -ERANGE; 1975c47ff048SShreyansh Jain else if (res != QM_MCR_RESULT_OK) 1976c47ff048SShreyansh Jain return -EIO; 1977c47ff048SShreyansh Jain return 0; 1978c47ff048SShreyansh Jain } 1979c47ff048SShreyansh Jain 198006268e2cSHemant Agrawal int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt) 198106268e2cSHemant Agrawal { 198206268e2cSHemant Agrawal struct qm_mc_command *mcc; 198306268e2cSHemant Agrawal struct qm_mc_result *mcr; 198406268e2cSHemant Agrawal struct qman_portal *p = get_affine_portal(); 198506268e2cSHemant Agrawal 198606268e2cSHemant Agrawal mcc = qm_mc_start(&p->p); 198706268e2cSHemant Agrawal mcc->queryfq.fqid = cpu_to_be32(fq->fqid); 198806268e2cSHemant Agrawal qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); 198906268e2cSHemant Agrawal while (!(mcr = qm_mc_result(&p->p))) 199006268e2cSHemant Agrawal cpu_relax(); 199106268e2cSHemant Agrawal DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); 199206268e2cSHemant Agrawal 199306268e2cSHemant Agrawal if (mcr->result == QM_MCR_RESULT_OK) 199406268e2cSHemant Agrawal *frm_cnt = be24_to_cpu(mcr->queryfq_np.frm_cnt); 199506268e2cSHemant Agrawal else if (mcr->result == QM_MCR_RESULT_ERR_FQID) 199606268e2cSHemant Agrawal return -ERANGE; 199706268e2cSHemant Agrawal else if (mcr->result != QM_MCR_RESULT_OK) 199806268e2cSHemant Agrawal return -EIO; 199906268e2cSHemant Agrawal return 0; 200006268e2cSHemant Agrawal } 200106268e2cSHemant Agrawal 2002c47ff048SShreyansh Jain int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq) 2003c47ff048SShreyansh Jain { 2004c47ff048SShreyansh Jain struct qm_mc_command *mcc; 2005c47ff048SShreyansh Jain struct qm_mc_result *mcr; 2006c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 2007c47ff048SShreyansh Jain 2008c47ff048SShreyansh Jain u8 res, myverb; 2009c47ff048SShreyansh Jain 2010c47ff048SShreyansh Jain myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED : 2011c47ff048SShreyansh Jain QM_MCR_VERB_QUERYWQ; 2012c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 2013c47ff048SShreyansh Jain mcc->querywq.channel.id = cpu_to_be16(wq->channel.id); 2014c47ff048SShreyansh Jain qm_mc_commit(&p->p, myverb); 2015c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 2016c47ff048SShreyansh Jain cpu_relax(); 2017c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); 2018c47ff048SShreyansh Jain res = mcr->result; 2019c47ff048SShreyansh Jain if (res == QM_MCR_RESULT_OK) { 2020c47ff048SShreyansh Jain int i, array_len; 2021c47ff048SShreyansh Jain 2022c47ff048SShreyansh Jain wq->channel.id = be16_to_cpu(mcr->querywq.channel.id); 2023c47ff048SShreyansh Jain array_len = ARRAY_SIZE(mcr->querywq.wq_len); 2024c47ff048SShreyansh Jain for (i = 0; i < array_len; i++) 2025c47ff048SShreyansh Jain wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]); 2026c47ff048SShreyansh Jain } 2027c47ff048SShreyansh Jain if (res != QM_MCR_RESULT_OK) { 2028c47ff048SShreyansh Jain pr_err("QUERYWQ failed: %s\n", mcr_result_str(res)); 2029c47ff048SShreyansh Jain return -EIO; 2030c47ff048SShreyansh Jain } 2031c47ff048SShreyansh Jain return 0; 2032c47ff048SShreyansh Jain } 2033c47ff048SShreyansh Jain 2034c47ff048SShreyansh Jain int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt, 2035c47ff048SShreyansh Jain struct qm_mcr_cgrtestwrite *result) 2036c47ff048SShreyansh Jain { 2037c47ff048SShreyansh Jain struct qm_mc_command *mcc; 2038c47ff048SShreyansh Jain struct qm_mc_result *mcr; 2039c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 2040c47ff048SShreyansh Jain 2041c47ff048SShreyansh Jain u8 res; 2042c47ff048SShreyansh Jain 2043c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 2044c47ff048SShreyansh Jain mcc->cgrtestwrite.cgid = cgr->cgrid; 2045c47ff048SShreyansh Jain mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32); 2046c47ff048SShreyansh Jain mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt; 2047c47ff048SShreyansh Jain qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE); 2048c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 2049c47ff048SShreyansh Jain cpu_relax(); 2050c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE); 2051c47ff048SShreyansh Jain res = mcr->result; 2052c47ff048SShreyansh Jain if (res == QM_MCR_RESULT_OK) 2053c47ff048SShreyansh Jain *result = mcr->cgrtestwrite; 2054c47ff048SShreyansh Jain if (res != QM_MCR_RESULT_OK) { 2055c47ff048SShreyansh Jain pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res)); 2056c47ff048SShreyansh Jain return -EIO; 2057c47ff048SShreyansh Jain } 2058c47ff048SShreyansh Jain return 0; 2059c47ff048SShreyansh Jain } 2060c47ff048SShreyansh Jain 2061c47ff048SShreyansh Jain int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd) 2062c47ff048SShreyansh Jain { 2063c47ff048SShreyansh Jain struct qm_mc_command *mcc; 2064c47ff048SShreyansh Jain struct qm_mc_result *mcr; 2065c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 2066c47ff048SShreyansh Jain u8 res; 2067c47ff048SShreyansh Jain unsigned int i; 2068c47ff048SShreyansh Jain 2069c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 2070c47ff048SShreyansh Jain mcc->querycgr.cgid = cgr->cgrid; 2071c47ff048SShreyansh Jain qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); 2072c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 2073c47ff048SShreyansh Jain cpu_relax(); 2074c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); 2075c47ff048SShreyansh Jain res = mcr->result; 2076c47ff048SShreyansh Jain if (res == QM_MCR_RESULT_OK) 2077c47ff048SShreyansh Jain *cgrd = mcr->querycgr; 2078c47ff048SShreyansh Jain if (res != QM_MCR_RESULT_OK) { 2079c47ff048SShreyansh Jain pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res)); 2080c47ff048SShreyansh Jain return -EIO; 2081c47ff048SShreyansh Jain } 2082c47ff048SShreyansh Jain cgrd->cgr.wr_parm_g.word = 2083c47ff048SShreyansh Jain be32_to_cpu(cgrd->cgr.wr_parm_g.word); 2084c47ff048SShreyansh Jain cgrd->cgr.wr_parm_y.word = 2085c47ff048SShreyansh Jain be32_to_cpu(cgrd->cgr.wr_parm_y.word); 2086c47ff048SShreyansh Jain cgrd->cgr.wr_parm_r.word = 2087c47ff048SShreyansh Jain be32_to_cpu(cgrd->cgr.wr_parm_r.word); 2088c47ff048SShreyansh Jain cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ); 2089c47ff048SShreyansh Jain cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres); 2090c47ff048SShreyansh Jain for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++) 2091c47ff048SShreyansh Jain cgrd->cscn_targ_swp[i] = 2092c47ff048SShreyansh Jain be32_to_cpu(cgrd->cscn_targ_swp[i]); 2093c47ff048SShreyansh Jain return 0; 2094c47ff048SShreyansh Jain } 2095c47ff048SShreyansh Jain 2096c47ff048SShreyansh Jain int qman_query_congestion(struct qm_mcr_querycongestion *congestion) 2097c47ff048SShreyansh Jain { 2098c47ff048SShreyansh Jain struct qm_mc_result *mcr; 2099c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 2100c47ff048SShreyansh Jain u8 res; 2101c47ff048SShreyansh Jain unsigned int i; 2102c47ff048SShreyansh Jain 2103c47ff048SShreyansh Jain qm_mc_start(&p->p); 2104c47ff048SShreyansh Jain qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); 2105c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 2106c47ff048SShreyansh Jain cpu_relax(); 2107c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == 2108c47ff048SShreyansh Jain QM_MCC_VERB_QUERYCONGESTION); 2109c47ff048SShreyansh Jain res = mcr->result; 2110c47ff048SShreyansh Jain if (res == QM_MCR_RESULT_OK) 2111c47ff048SShreyansh Jain *congestion = mcr->querycongestion; 2112c47ff048SShreyansh Jain if (res != QM_MCR_RESULT_OK) { 2113c47ff048SShreyansh Jain pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res)); 2114c47ff048SShreyansh Jain return -EIO; 2115c47ff048SShreyansh Jain } 2116c47ff048SShreyansh Jain for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++) 2117c47ff048SShreyansh Jain congestion->state.state[i] = 2118c47ff048SShreyansh Jain be32_to_cpu(congestion->state.state[i]); 2119c47ff048SShreyansh Jain return 0; 2120c47ff048SShreyansh Jain } 2121c47ff048SShreyansh Jain 2122f40d5a53SNipun Gupta int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags) 2123c47ff048SShreyansh Jain { 2124c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 2125c47ff048SShreyansh Jain uint32_t vdqcr; 2126c47ff048SShreyansh Jain int ret = -EBUSY; 2127c47ff048SShreyansh Jain 2128f40d5a53SNipun Gupta vdqcr = vdqcr_flags; 2129c47ff048SShreyansh Jain vdqcr |= QM_VDQCR_NUMFRAMES_SET(num); 2130c47ff048SShreyansh Jain 2131c47ff048SShreyansh Jain if ((fq->state != qman_fq_state_parked) && 2132c47ff048SShreyansh Jain (fq->state != qman_fq_state_retired)) { 2133c47ff048SShreyansh Jain ret = -EINVAL; 2134c47ff048SShreyansh Jain goto out; 2135c47ff048SShreyansh Jain } 2136c47ff048SShreyansh Jain if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) { 2137c47ff048SShreyansh Jain ret = -EBUSY; 2138c47ff048SShreyansh Jain goto out; 2139c47ff048SShreyansh Jain } 2140c47ff048SShreyansh Jain vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; 2141c47ff048SShreyansh Jain 2142c47ff048SShreyansh Jain if (!p->vdqcr_owned) { 2143c47ff048SShreyansh Jain FQLOCK(fq); 2144c7c3a329SHemant Agrawal if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) { 2145c7c3a329SHemant Agrawal FQUNLOCK(fq); 2146c47ff048SShreyansh Jain goto escape; 2147c7c3a329SHemant Agrawal } 2148c47ff048SShreyansh Jain fq_set(fq, QMAN_FQ_STATE_VDQCR); 2149c47ff048SShreyansh Jain FQUNLOCK(fq); 2150c47ff048SShreyansh Jain p->vdqcr_owned = fq; 2151c47ff048SShreyansh Jain ret = 0; 2152c47ff048SShreyansh Jain } 2153c47ff048SShreyansh Jain escape: 2154c47ff048SShreyansh Jain if (!ret) 2155c47ff048SShreyansh Jain qm_dqrr_vdqcr_set(&p->p, vdqcr); 2156c47ff048SShreyansh Jain 2157c47ff048SShreyansh Jain out: 2158c47ff048SShreyansh Jain return ret; 2159c47ff048SShreyansh Jain } 2160c47ff048SShreyansh Jain 2161c47ff048SShreyansh Jain int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused, 2162c47ff048SShreyansh Jain u32 vdqcr) 2163c47ff048SShreyansh Jain { 2164c47ff048SShreyansh Jain struct qman_portal *p; 2165c47ff048SShreyansh Jain int ret = -EBUSY; 2166c47ff048SShreyansh Jain 2167c47ff048SShreyansh Jain if ((fq->state != qman_fq_state_parked) && 2168c47ff048SShreyansh Jain (fq->state != qman_fq_state_retired)) 2169c47ff048SShreyansh Jain return -EINVAL; 2170c47ff048SShreyansh Jain if (vdqcr & QM_VDQCR_FQID_MASK) 2171c47ff048SShreyansh Jain return -EINVAL; 2172c47ff048SShreyansh Jain if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) 2173c47ff048SShreyansh Jain return -EBUSY; 2174c47ff048SShreyansh Jain vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; 2175c47ff048SShreyansh Jain 2176c47ff048SShreyansh Jain p = get_affine_portal(); 2177c47ff048SShreyansh Jain 2178c47ff048SShreyansh Jain if (!p->vdqcr_owned) { 2179c47ff048SShreyansh Jain FQLOCK(fq); 2180c7c3a329SHemant Agrawal if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) { 2181c7c3a329SHemant Agrawal FQUNLOCK(fq); 2182c47ff048SShreyansh Jain goto escape; 2183c7c3a329SHemant Agrawal } 2184c47ff048SShreyansh Jain fq_set(fq, QMAN_FQ_STATE_VDQCR); 2185c47ff048SShreyansh Jain FQUNLOCK(fq); 2186c47ff048SShreyansh Jain p->vdqcr_owned = fq; 2187c47ff048SShreyansh Jain ret = 0; 2188c47ff048SShreyansh Jain } 2189c47ff048SShreyansh Jain escape: 2190c47ff048SShreyansh Jain if (ret) 2191c47ff048SShreyansh Jain return ret; 2192c47ff048SShreyansh Jain 2193c47ff048SShreyansh Jain /* VDQCR is set */ 2194c47ff048SShreyansh Jain qm_dqrr_vdqcr_set(&p->p, vdqcr); 2195c47ff048SShreyansh Jain return 0; 2196c47ff048SShreyansh Jain } 2197c47ff048SShreyansh Jain 2198c47ff048SShreyansh Jain static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail) 2199c47ff048SShreyansh Jain { 2200c47ff048SShreyansh Jain if (avail) 2201c47ff048SShreyansh Jain qm_eqcr_cce_prefetch(&p->p); 2202c47ff048SShreyansh Jain else 2203c47ff048SShreyansh Jain qm_eqcr_cce_update(&p->p); 2204c47ff048SShreyansh Jain } 2205c47ff048SShreyansh Jain 2206c47ff048SShreyansh Jain int qman_eqcr_is_empty(void) 2207c47ff048SShreyansh Jain { 2208c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 2209c47ff048SShreyansh Jain u8 avail; 2210c47ff048SShreyansh Jain 2211c47ff048SShreyansh Jain update_eqcr_ci(p, 0); 2212c47ff048SShreyansh Jain avail = qm_eqcr_get_fill(&p->p); 2213c47ff048SShreyansh Jain return (avail == 0); 2214c47ff048SShreyansh Jain } 2215c47ff048SShreyansh Jain 2216c47ff048SShreyansh Jain void qman_set_dc_ern(qman_cb_dc_ern handler, int affine) 2217c47ff048SShreyansh Jain { 2218c47ff048SShreyansh Jain if (affine) { 2219c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 2220c47ff048SShreyansh Jain 2221c47ff048SShreyansh Jain p->cb_dc_ern = handler; 2222c47ff048SShreyansh Jain } else 2223c47ff048SShreyansh Jain cb_dc_ern = handler; 2224c47ff048SShreyansh Jain } 2225c47ff048SShreyansh Jain 2226c47ff048SShreyansh Jain static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p, 2227c47ff048SShreyansh Jain struct qman_fq *fq, 2228c47ff048SShreyansh Jain const struct qm_fd *fd, 2229c47ff048SShreyansh Jain u32 flags) 2230c47ff048SShreyansh Jain { 2231c47ff048SShreyansh Jain struct qm_eqcr_entry *eq; 2232c47ff048SShreyansh Jain u8 avail; 2233c47ff048SShreyansh Jain 2234c47ff048SShreyansh Jain if (p->use_eqcr_ci_stashing) { 2235c47ff048SShreyansh Jain /* 2236c47ff048SShreyansh Jain * The stashing case is easy, only update if we need to in 2237c47ff048SShreyansh Jain * order to try and liberate ring entries. 2238c47ff048SShreyansh Jain */ 2239c47ff048SShreyansh Jain eq = qm_eqcr_start_stash(&p->p); 2240c47ff048SShreyansh Jain } else { 2241c47ff048SShreyansh Jain /* 2242c47ff048SShreyansh Jain * The non-stashing case is harder, need to prefetch ahead of 2243c47ff048SShreyansh Jain * time. 2244c47ff048SShreyansh Jain */ 2245c47ff048SShreyansh Jain avail = qm_eqcr_get_avail(&p->p); 2246c47ff048SShreyansh Jain if (avail < 2) 2247c47ff048SShreyansh Jain update_eqcr_ci(p, avail); 2248c47ff048SShreyansh Jain eq = qm_eqcr_start_no_stash(&p->p); 2249c47ff048SShreyansh Jain } 2250c47ff048SShreyansh Jain 2251c47ff048SShreyansh Jain if (unlikely(!eq)) 2252c47ff048SShreyansh Jain return NULL; 2253c47ff048SShreyansh Jain 2254c47ff048SShreyansh Jain if (flags & QMAN_ENQUEUE_FLAG_DCA) 2255c47ff048SShreyansh Jain eq->dca = QM_EQCR_DCA_ENABLE | 2256c47ff048SShreyansh Jain ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ? 2257c47ff048SShreyansh Jain QM_EQCR_DCA_PARK : 0) | 2258c47ff048SShreyansh Jain ((flags >> 8) & QM_EQCR_DCA_IDXMASK); 2259c47ff048SShreyansh Jain eq->fqid = cpu_to_be32(fq->fqid); 2260847ee3bdSShreyansh Jain #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP 2261847ee3bdSShreyansh Jain eq->tag = cpu_to_be32(fq->key); 2262847ee3bdSShreyansh Jain #else 2263c47ff048SShreyansh Jain eq->tag = cpu_to_be32((u32)(uintptr_t)fq); 2264847ee3bdSShreyansh Jain #endif 2265c47ff048SShreyansh Jain eq->fd = *fd; 2266c47ff048SShreyansh Jain cpu_to_hw_fd(&eq->fd); 2267c47ff048SShreyansh Jain return eq; 2268c47ff048SShreyansh Jain } 2269c47ff048SShreyansh Jain 2270c47ff048SShreyansh Jain int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags) 2271c47ff048SShreyansh Jain { 2272c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 2273c47ff048SShreyansh Jain struct qm_eqcr_entry *eq; 2274c47ff048SShreyansh Jain 2275c47ff048SShreyansh Jain eq = try_p_eq_start(p, fq, fd, flags); 2276c47ff048SShreyansh Jain if (!eq) 2277c47ff048SShreyansh Jain return -EBUSY; 2278c47ff048SShreyansh Jain /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ 2279c47ff048SShreyansh Jain qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | 2280c47ff048SShreyansh Jain (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); 2281c47ff048SShreyansh Jain /* Factor the below out, it's used from qman_enqueue_orp() too */ 2282c47ff048SShreyansh Jain return 0; 2283c47ff048SShreyansh Jain } 2284c47ff048SShreyansh Jain 2285c47ff048SShreyansh Jain int qman_enqueue_multi(struct qman_fq *fq, 228643797e7bSSunil Kumar Kori const struct qm_fd *fd, u32 *flags, 2287c47ff048SShreyansh Jain int frames_to_send) 2288c47ff048SShreyansh Jain { 2289c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 2290c47ff048SShreyansh Jain struct qm_portal *portal = &p->p; 2291c47ff048SShreyansh Jain 2292c47ff048SShreyansh Jain register struct qm_eqcr *eqcr = &portal->eqcr; 2293c47ff048SShreyansh Jain struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq; 2294c47ff048SShreyansh Jain 229543797e7bSSunil Kumar Kori u8 i = 0, diff, old_ci, sent = 0; 2296c47ff048SShreyansh Jain 2297c47ff048SShreyansh Jain /* Update the available entries if no entry is free */ 2298c47ff048SShreyansh Jain if (!eqcr->available) { 2299c47ff048SShreyansh Jain old_ci = eqcr->ci; 2300c47ff048SShreyansh Jain eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); 2301c47ff048SShreyansh Jain diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); 2302c47ff048SShreyansh Jain eqcr->available += diff; 2303c47ff048SShreyansh Jain if (!diff) 2304c47ff048SShreyansh Jain return 0; 2305c47ff048SShreyansh Jain } 2306c47ff048SShreyansh Jain 2307c47ff048SShreyansh Jain /* try to send as many frames as possible */ 2308c47ff048SShreyansh Jain while (eqcr->available && frames_to_send--) { 23098dc88183SNipun Gupta eq->fqid = fq->fqid_le; 2310c47ff048SShreyansh Jain eq->fd.opaque_addr = fd->opaque_addr; 2311c47ff048SShreyansh Jain eq->fd.addr = cpu_to_be40(fd->addr); 2312c47ff048SShreyansh Jain eq->fd.status = cpu_to_be32(fd->status); 2313c47ff048SShreyansh Jain eq->fd.opaque = cpu_to_be32(fd->opaque); 23146613c374SNipun Gupta if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) { 231543797e7bSSunil Kumar Kori eq->dca = QM_EQCR_DCA_ENABLE | 231643797e7bSSunil Kumar Kori ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK); 231743797e7bSSunil Kumar Kori } 231843797e7bSSunil Kumar Kori i++; 2319c47ff048SShreyansh Jain eq = (void *)((unsigned long)(eq + 1) & 2320c47ff048SShreyansh Jain (~(unsigned long)(QM_EQCR_SIZE << 6))); 2321c47ff048SShreyansh Jain eqcr->available--; 2322c47ff048SShreyansh Jain sent++; 2323c47ff048SShreyansh Jain fd++; 2324c47ff048SShreyansh Jain } 2325c47ff048SShreyansh Jain lwsync(); 2326c47ff048SShreyansh Jain 2327c47ff048SShreyansh Jain /* In order for flushes to complete faster, all lines are recorded in 2328c47ff048SShreyansh Jain * 32 bit word. 2329c47ff048SShreyansh Jain */ 2330c47ff048SShreyansh Jain eq = eqcr->cursor; 2331c47ff048SShreyansh Jain for (i = 0; i < sent; i++) { 2332c47ff048SShreyansh Jain eq->__dont_write_directly__verb = 2333c47ff048SShreyansh Jain QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit; 2334c47ff048SShreyansh Jain prev_eq = eq; 2335c47ff048SShreyansh Jain eq = (void *)((unsigned long)(eq + 1) & 2336c47ff048SShreyansh Jain (~(unsigned long)(QM_EQCR_SIZE << 6))); 2337c47ff048SShreyansh Jain if (unlikely((prev_eq + 1) != eq)) 2338c47ff048SShreyansh Jain eqcr->vbit ^= QM_EQCR_VERB_VBIT; 2339c47ff048SShreyansh Jain } 2340c47ff048SShreyansh Jain 2341c47ff048SShreyansh Jain /* We need to flush all the lines but without load/store operations 2342c47ff048SShreyansh Jain * between them 2343c47ff048SShreyansh Jain */ 2344c47ff048SShreyansh Jain eq = eqcr->cursor; 2345c47ff048SShreyansh Jain for (i = 0; i < sent; i++) { 2346c47ff048SShreyansh Jain dcbf(eq); 2347c47ff048SShreyansh Jain eq = (void *)((unsigned long)(eq + 1) & 2348c47ff048SShreyansh Jain (~(unsigned long)(QM_EQCR_SIZE << 6))); 2349c47ff048SShreyansh Jain } 2350c47ff048SShreyansh Jain /* Update cursor for the next call */ 2351c47ff048SShreyansh Jain eqcr->cursor = eq; 2352c47ff048SShreyansh Jain return sent; 2353c47ff048SShreyansh Jain } 2354c47ff048SShreyansh Jain 2355c6c1ac5cSAkhil Goyal int 2356c6c1ac5cSAkhil Goyal qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd, 2357fe3688baSAkhil Goyal u32 *flags, int frames_to_send) 2358c6c1ac5cSAkhil Goyal { 2359c6c1ac5cSAkhil Goyal struct qman_portal *p = get_affine_portal(); 2360c6c1ac5cSAkhil Goyal struct qm_portal *portal = &p->p; 2361c6c1ac5cSAkhil Goyal 2362c6c1ac5cSAkhil Goyal register struct qm_eqcr *eqcr = &portal->eqcr; 2363c6c1ac5cSAkhil Goyal struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq; 2364c6c1ac5cSAkhil Goyal 2365fe3688baSAkhil Goyal u8 i = 0, diff, old_ci, sent = 0; 2366c6c1ac5cSAkhil Goyal 2367c6c1ac5cSAkhil Goyal /* Update the available entries if no entry is free */ 2368c6c1ac5cSAkhil Goyal if (!eqcr->available) { 2369c6c1ac5cSAkhil Goyal old_ci = eqcr->ci; 2370c6c1ac5cSAkhil Goyal eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); 2371c6c1ac5cSAkhil Goyal diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); 2372c6c1ac5cSAkhil Goyal eqcr->available += diff; 2373c6c1ac5cSAkhil Goyal if (!diff) 2374c6c1ac5cSAkhil Goyal return 0; 2375c6c1ac5cSAkhil Goyal } 2376c6c1ac5cSAkhil Goyal 2377c6c1ac5cSAkhil Goyal /* try to send as many frames as possible */ 2378c6c1ac5cSAkhil Goyal while (eqcr->available && frames_to_send--) { 2379c6c1ac5cSAkhil Goyal eq->fqid = fq[sent]->fqid_le; 2380c6c1ac5cSAkhil Goyal eq->fd.opaque_addr = fd->opaque_addr; 2381c6c1ac5cSAkhil Goyal eq->fd.addr = cpu_to_be40(fd->addr); 2382c6c1ac5cSAkhil Goyal eq->fd.status = cpu_to_be32(fd->status); 2383c6c1ac5cSAkhil Goyal eq->fd.opaque = cpu_to_be32(fd->opaque); 2384fe3688baSAkhil Goyal if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) { 2385fe3688baSAkhil Goyal eq->dca = QM_EQCR_DCA_ENABLE | 2386fe3688baSAkhil Goyal ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK); 2387fe3688baSAkhil Goyal } 2388fe3688baSAkhil Goyal i++; 2389c6c1ac5cSAkhil Goyal 2390c6c1ac5cSAkhil Goyal eq = (void *)((unsigned long)(eq + 1) & 2391c6c1ac5cSAkhil Goyal (~(unsigned long)(QM_EQCR_SIZE << 6))); 2392c6c1ac5cSAkhil Goyal eqcr->available--; 2393c6c1ac5cSAkhil Goyal sent++; 2394c6c1ac5cSAkhil Goyal fd++; 2395c6c1ac5cSAkhil Goyal } 2396c6c1ac5cSAkhil Goyal lwsync(); 2397c6c1ac5cSAkhil Goyal 2398c6c1ac5cSAkhil Goyal /* In order for flushes to complete faster, all lines are recorded in 2399c6c1ac5cSAkhil Goyal * 32 bit word. 2400c6c1ac5cSAkhil Goyal */ 2401c6c1ac5cSAkhil Goyal eq = eqcr->cursor; 2402c6c1ac5cSAkhil Goyal for (i = 0; i < sent; i++) { 2403c6c1ac5cSAkhil Goyal eq->__dont_write_directly__verb = 2404c6c1ac5cSAkhil Goyal QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit; 2405c6c1ac5cSAkhil Goyal prev_eq = eq; 2406c6c1ac5cSAkhil Goyal eq = (void *)((unsigned long)(eq + 1) & 2407c6c1ac5cSAkhil Goyal (~(unsigned long)(QM_EQCR_SIZE << 6))); 2408c6c1ac5cSAkhil Goyal if (unlikely((prev_eq + 1) != eq)) 2409c6c1ac5cSAkhil Goyal eqcr->vbit ^= QM_EQCR_VERB_VBIT; 2410c6c1ac5cSAkhil Goyal } 2411c6c1ac5cSAkhil Goyal 2412c6c1ac5cSAkhil Goyal /* We need to flush all the lines but without load/store operations 2413c6c1ac5cSAkhil Goyal * between them 2414c6c1ac5cSAkhil Goyal */ 2415c6c1ac5cSAkhil Goyal eq = eqcr->cursor; 2416c6c1ac5cSAkhil Goyal for (i = 0; i < sent; i++) { 2417c6c1ac5cSAkhil Goyal dcbf(eq); 2418c6c1ac5cSAkhil Goyal eq = (void *)((unsigned long)(eq + 1) & 2419c6c1ac5cSAkhil Goyal (~(unsigned long)(QM_EQCR_SIZE << 6))); 2420c6c1ac5cSAkhil Goyal } 2421c6c1ac5cSAkhil Goyal /* Update cursor for the next call */ 2422c6c1ac5cSAkhil Goyal eqcr->cursor = eq; 2423c6c1ac5cSAkhil Goyal return sent; 2424c6c1ac5cSAkhil Goyal } 2425c6c1ac5cSAkhil Goyal 2426c47ff048SShreyansh Jain int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags, 2427c47ff048SShreyansh Jain struct qman_fq *orp, u16 orp_seqnum) 2428c47ff048SShreyansh Jain { 2429c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 2430c47ff048SShreyansh Jain struct qm_eqcr_entry *eq; 2431c47ff048SShreyansh Jain 2432c47ff048SShreyansh Jain eq = try_p_eq_start(p, fq, fd, flags); 2433c47ff048SShreyansh Jain if (!eq) 2434c47ff048SShreyansh Jain return -EBUSY; 2435c47ff048SShreyansh Jain /* Process ORP-specifics here */ 2436c47ff048SShreyansh Jain if (flags & QMAN_ENQUEUE_FLAG_NLIS) 2437c47ff048SShreyansh Jain orp_seqnum |= QM_EQCR_SEQNUM_NLIS; 2438c47ff048SShreyansh Jain else { 2439c47ff048SShreyansh Jain orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS; 2440c47ff048SShreyansh Jain if (flags & QMAN_ENQUEUE_FLAG_NESN) 2441c47ff048SShreyansh Jain orp_seqnum |= QM_EQCR_SEQNUM_NESN; 2442c47ff048SShreyansh Jain else 2443c47ff048SShreyansh Jain /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */ 2444c47ff048SShreyansh Jain orp_seqnum &= ~QM_EQCR_SEQNUM_NESN; 2445c47ff048SShreyansh Jain } 2446c47ff048SShreyansh Jain eq->seqnum = cpu_to_be16(orp_seqnum); 2447c47ff048SShreyansh Jain eq->orp = cpu_to_be32(orp->fqid); 2448c47ff048SShreyansh Jain /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ 2449c47ff048SShreyansh Jain qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP | 2450c47ff048SShreyansh Jain ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ? 2451c47ff048SShreyansh Jain 0 : QM_EQCR_VERB_CMD_ENQUEUE) | 2452c47ff048SShreyansh Jain (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); 2453c47ff048SShreyansh Jain 2454c47ff048SShreyansh Jain return 0; 2455c47ff048SShreyansh Jain } 2456c47ff048SShreyansh Jain 2457c47ff048SShreyansh Jain int qman_modify_cgr(struct qman_cgr *cgr, u32 flags, 2458c47ff048SShreyansh Jain struct qm_mcc_initcgr *opts) 2459c47ff048SShreyansh Jain { 2460c47ff048SShreyansh Jain struct qm_mc_command *mcc; 2461c47ff048SShreyansh Jain struct qm_mc_result *mcr; 2462c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 2463c47ff048SShreyansh Jain 2464c47ff048SShreyansh Jain u8 res; 2465c47ff048SShreyansh Jain u8 verb = QM_MCC_VERB_MODIFYCGR; 2466c47ff048SShreyansh Jain 2467c47ff048SShreyansh Jain mcc = qm_mc_start(&p->p); 2468c47ff048SShreyansh Jain if (opts) 2469c47ff048SShreyansh Jain mcc->initcgr = *opts; 2470c47ff048SShreyansh Jain mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask); 2471c47ff048SShreyansh Jain mcc->initcgr.cgr.wr_parm_g.word = 2472c47ff048SShreyansh Jain cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word); 2473c47ff048SShreyansh Jain mcc->initcgr.cgr.wr_parm_y.word = 2474c47ff048SShreyansh Jain cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word); 2475c47ff048SShreyansh Jain mcc->initcgr.cgr.wr_parm_r.word = 2476c47ff048SShreyansh Jain cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word); 2477c47ff048SShreyansh Jain mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ); 2478c47ff048SShreyansh Jain mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres); 2479c47ff048SShreyansh Jain 2480c47ff048SShreyansh Jain mcc->initcgr.cgid = cgr->cgrid; 2481c47ff048SShreyansh Jain if (flags & QMAN_CGR_FLAG_USE_INIT) 2482c47ff048SShreyansh Jain verb = QM_MCC_VERB_INITCGR; 2483c47ff048SShreyansh Jain qm_mc_commit(&p->p, verb); 2484c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(&p->p))) 2485c47ff048SShreyansh Jain cpu_relax(); 2486c47ff048SShreyansh Jain 2487c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); 2488c47ff048SShreyansh Jain res = mcr->result; 2489c47ff048SShreyansh Jain return (res == QM_MCR_RESULT_OK) ? 0 : -EIO; 2490c47ff048SShreyansh Jain } 2491c47ff048SShreyansh Jain 2492c47ff048SShreyansh Jain #define TARG_MASK(n) (0x80000000 >> (n->config->channel - \ 2493c47ff048SShreyansh Jain QM_CHANNEL_SWPORTAL0)) 2494c47ff048SShreyansh Jain #define TARG_DCP_MASK(n) (0x80000000 >> (10 + n)) 2495c47ff048SShreyansh Jain #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) 2496c47ff048SShreyansh Jain 2497c47ff048SShreyansh Jain int qman_create_cgr(struct qman_cgr *cgr, u32 flags, 2498c47ff048SShreyansh Jain struct qm_mcc_initcgr *opts) 2499c47ff048SShreyansh Jain { 2500c47ff048SShreyansh Jain struct qm_mcr_querycgr cgr_state; 2501c47ff048SShreyansh Jain struct qm_mcc_initcgr local_opts; 2502c47ff048SShreyansh Jain int ret; 2503c47ff048SShreyansh Jain struct qman_portal *p; 2504c47ff048SShreyansh Jain 2505c47ff048SShreyansh Jain /* We have to check that the provided CGRID is within the limits of the 2506c47ff048SShreyansh Jain * data-structures, for obvious reasons. However we'll let h/w take 2507c47ff048SShreyansh Jain * care of determining whether it's within the limits of what exists on 2508c47ff048SShreyansh Jain * the SoC. 2509c47ff048SShreyansh Jain */ 2510c47ff048SShreyansh Jain if (cgr->cgrid >= __CGR_NUM) 2511c47ff048SShreyansh Jain return -EINVAL; 2512c47ff048SShreyansh Jain 2513c47ff048SShreyansh Jain p = get_affine_portal(); 2514c47ff048SShreyansh Jain 2515c47ff048SShreyansh Jain memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); 2516c47ff048SShreyansh Jain cgr->chan = p->config->channel; 2517c47ff048SShreyansh Jain spin_lock(&p->cgr_lock); 2518c47ff048SShreyansh Jain 2519c47ff048SShreyansh Jain /* if no opts specified, just add it to the list */ 2520c47ff048SShreyansh Jain if (!opts) 2521c47ff048SShreyansh Jain goto add_list; 2522c47ff048SShreyansh Jain 2523c47ff048SShreyansh Jain ret = qman_query_cgr(cgr, &cgr_state); 2524c47ff048SShreyansh Jain if (ret) 2525c47ff048SShreyansh Jain goto release_lock; 2526c47ff048SShreyansh Jain if (opts) 2527c47ff048SShreyansh Jain local_opts = *opts; 2528c47ff048SShreyansh Jain if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) 2529c47ff048SShreyansh Jain local_opts.cgr.cscn_targ_upd_ctrl = 2530c47ff048SShreyansh Jain QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); 2531c47ff048SShreyansh Jain else 2532c47ff048SShreyansh Jain /* Overwrite TARG */ 2533c47ff048SShreyansh Jain local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | 2534c47ff048SShreyansh Jain TARG_MASK(p); 2535c47ff048SShreyansh Jain local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; 2536c47ff048SShreyansh Jain 2537c47ff048SShreyansh Jain /* send init if flags indicate so */ 2538c47ff048SShreyansh Jain if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) 2539c47ff048SShreyansh Jain ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts); 2540c47ff048SShreyansh Jain else 2541c47ff048SShreyansh Jain ret = qman_modify_cgr(cgr, 0, &local_opts); 2542c47ff048SShreyansh Jain if (ret) 2543c47ff048SShreyansh Jain goto release_lock; 2544c47ff048SShreyansh Jain add_list: 2545c47ff048SShreyansh Jain list_add(&cgr->node, &p->cgr_cbs); 2546c47ff048SShreyansh Jain 2547c47ff048SShreyansh Jain /* Determine if newly added object requires its callback to be called */ 2548c47ff048SShreyansh Jain ret = qman_query_cgr(cgr, &cgr_state); 2549c47ff048SShreyansh Jain if (ret) { 2550c47ff048SShreyansh Jain /* we can't go back, so proceed and return success, but screen 2551c47ff048SShreyansh Jain * and wail to the log file. 2552c47ff048SShreyansh Jain */ 2553c47ff048SShreyansh Jain pr_crit("CGR HW state partially modified\n"); 2554c47ff048SShreyansh Jain ret = 0; 2555c47ff048SShreyansh Jain goto release_lock; 2556c47ff048SShreyansh Jain } 2557c47ff048SShreyansh Jain if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1], 2558c47ff048SShreyansh Jain cgr->cgrid)) 2559c47ff048SShreyansh Jain cgr->cb(p, cgr, 1); 2560c47ff048SShreyansh Jain release_lock: 2561c47ff048SShreyansh Jain spin_unlock(&p->cgr_lock); 2562c47ff048SShreyansh Jain return ret; 2563c47ff048SShreyansh Jain } 2564c47ff048SShreyansh Jain 2565c47ff048SShreyansh Jain int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal, 2566c47ff048SShreyansh Jain struct qm_mcc_initcgr *opts) 2567c47ff048SShreyansh Jain { 2568c47ff048SShreyansh Jain struct qm_mcc_initcgr local_opts; 2569c47ff048SShreyansh Jain struct qm_mcr_querycgr cgr_state; 2570c47ff048SShreyansh Jain int ret; 2571c47ff048SShreyansh Jain 2572c47ff048SShreyansh Jain if ((qman_ip_rev & 0xFF00) < QMAN_REV30) { 2573c47ff048SShreyansh Jain pr_warn("QMan version doesn't support CSCN => DCP portal\n"); 2574c47ff048SShreyansh Jain return -EINVAL; 2575c47ff048SShreyansh Jain } 2576c47ff048SShreyansh Jain /* We have to check that the provided CGRID is within the limits of the 2577c47ff048SShreyansh Jain * data-structures, for obvious reasons. However we'll let h/w take 2578c47ff048SShreyansh Jain * care of determining whether it's within the limits of what exists on 2579c47ff048SShreyansh Jain * the SoC. 2580c47ff048SShreyansh Jain */ 2581c47ff048SShreyansh Jain if (cgr->cgrid >= __CGR_NUM) 2582c47ff048SShreyansh Jain return -EINVAL; 2583c47ff048SShreyansh Jain 2584c47ff048SShreyansh Jain ret = qman_query_cgr(cgr, &cgr_state); 2585c47ff048SShreyansh Jain if (ret) 2586c47ff048SShreyansh Jain return ret; 2587c47ff048SShreyansh Jain 2588c47ff048SShreyansh Jain memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); 2589c47ff048SShreyansh Jain if (opts) 2590c47ff048SShreyansh Jain local_opts = *opts; 2591c47ff048SShreyansh Jain 2592c47ff048SShreyansh Jain if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) 2593c47ff048SShreyansh Jain local_opts.cgr.cscn_targ_upd_ctrl = 2594c47ff048SShreyansh Jain QM_CGR_TARG_UDP_CTRL_WRITE_BIT | 2595c47ff048SShreyansh Jain QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal; 2596c47ff048SShreyansh Jain else 2597c47ff048SShreyansh Jain local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | 2598c47ff048SShreyansh Jain TARG_DCP_MASK(dcp_portal); 2599c47ff048SShreyansh Jain local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; 2600c47ff048SShreyansh Jain 2601c47ff048SShreyansh Jain /* send init if flags indicate so */ 2602c47ff048SShreyansh Jain if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) 2603c47ff048SShreyansh Jain ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, 2604c47ff048SShreyansh Jain &local_opts); 2605c47ff048SShreyansh Jain else 2606c47ff048SShreyansh Jain ret = qman_modify_cgr(cgr, 0, &local_opts); 2607c47ff048SShreyansh Jain 2608c47ff048SShreyansh Jain return ret; 2609c47ff048SShreyansh Jain } 2610c47ff048SShreyansh Jain 2611c47ff048SShreyansh Jain int qman_delete_cgr(struct qman_cgr *cgr) 2612c47ff048SShreyansh Jain { 2613c47ff048SShreyansh Jain struct qm_mcr_querycgr cgr_state; 2614c47ff048SShreyansh Jain struct qm_mcc_initcgr local_opts; 2615c47ff048SShreyansh Jain int ret = 0; 2616c47ff048SShreyansh Jain struct qman_cgr *i; 2617c47ff048SShreyansh Jain struct qman_portal *p = get_affine_portal(); 2618c47ff048SShreyansh Jain 2619c47ff048SShreyansh Jain if (cgr->chan != p->config->channel) { 2620c47ff048SShreyansh Jain pr_crit("Attempting to delete cgr from different portal than" 2621c47ff048SShreyansh Jain " it was create: create 0x%x, delete 0x%x\n", 2622c47ff048SShreyansh Jain cgr->chan, p->config->channel); 2623c47ff048SShreyansh Jain ret = -EINVAL; 2624c47ff048SShreyansh Jain goto put_portal; 2625c47ff048SShreyansh Jain } 2626c47ff048SShreyansh Jain memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); 2627c47ff048SShreyansh Jain spin_lock(&p->cgr_lock); 2628c47ff048SShreyansh Jain list_del(&cgr->node); 2629c47ff048SShreyansh Jain /* 2630c47ff048SShreyansh Jain * If there are no other CGR objects for this CGRID in the list, 2631c47ff048SShreyansh Jain * update CSCN_TARG accordingly 2632c47ff048SShreyansh Jain */ 2633c47ff048SShreyansh Jain list_for_each_entry(i, &p->cgr_cbs, node) 2634c47ff048SShreyansh Jain if ((i->cgrid == cgr->cgrid) && i->cb) 2635c47ff048SShreyansh Jain goto release_lock; 2636c47ff048SShreyansh Jain ret = qman_query_cgr(cgr, &cgr_state); 2637c47ff048SShreyansh Jain if (ret) { 2638c47ff048SShreyansh Jain /* add back to the list */ 2639c47ff048SShreyansh Jain list_add(&cgr->node, &p->cgr_cbs); 2640c47ff048SShreyansh Jain goto release_lock; 2641c47ff048SShreyansh Jain } 2642c47ff048SShreyansh Jain /* Overwrite TARG */ 2643c47ff048SShreyansh Jain local_opts.we_mask = QM_CGR_WE_CSCN_TARG; 2644c47ff048SShreyansh Jain if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) 2645c47ff048SShreyansh Jain local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); 2646c47ff048SShreyansh Jain else 2647c47ff048SShreyansh Jain local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & 2648c47ff048SShreyansh Jain ~(TARG_MASK(p)); 2649c47ff048SShreyansh Jain ret = qman_modify_cgr(cgr, 0, &local_opts); 2650c47ff048SShreyansh Jain if (ret) 2651c47ff048SShreyansh Jain /* add back to the list */ 2652c47ff048SShreyansh Jain list_add(&cgr->node, &p->cgr_cbs); 2653c47ff048SShreyansh Jain release_lock: 2654c47ff048SShreyansh Jain spin_unlock(&p->cgr_lock); 2655c47ff048SShreyansh Jain put_portal: 2656c47ff048SShreyansh Jain return ret; 2657c47ff048SShreyansh Jain } 2658c47ff048SShreyansh Jain 2659c47ff048SShreyansh Jain int qman_shutdown_fq(u32 fqid) 2660c47ff048SShreyansh Jain { 2661c47ff048SShreyansh Jain struct qman_portal *p; 2662c47ff048SShreyansh Jain struct qm_portal *low_p; 2663c47ff048SShreyansh Jain struct qm_mc_command *mcc; 2664c47ff048SShreyansh Jain struct qm_mc_result *mcr; 2665c47ff048SShreyansh Jain u8 state; 2666c47ff048SShreyansh Jain int orl_empty, fq_empty, drain = 0; 2667c47ff048SShreyansh Jain u32 result; 2668c47ff048SShreyansh Jain u32 channel, wq; 2669c47ff048SShreyansh Jain u16 dest_wq; 2670c47ff048SShreyansh Jain 2671c47ff048SShreyansh Jain p = get_affine_portal(); 2672c47ff048SShreyansh Jain low_p = &p->p; 2673c47ff048SShreyansh Jain 2674c47ff048SShreyansh Jain /* Determine the state of the FQID */ 2675c47ff048SShreyansh Jain mcc = qm_mc_start(low_p); 2676c47ff048SShreyansh Jain mcc->queryfq_np.fqid = cpu_to_be32(fqid); 2677c47ff048SShreyansh Jain qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP); 2678c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(low_p))) 2679c47ff048SShreyansh Jain cpu_relax(); 2680c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); 2681c47ff048SShreyansh Jain state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; 2682c47ff048SShreyansh Jain if (state == QM_MCR_NP_STATE_OOS) 2683c47ff048SShreyansh Jain return 0; /* Already OOS, no need to do anymore checks */ 2684c47ff048SShreyansh Jain 2685c47ff048SShreyansh Jain /* Query which channel the FQ is using */ 2686c47ff048SShreyansh Jain mcc = qm_mc_start(low_p); 2687c47ff048SShreyansh Jain mcc->queryfq.fqid = cpu_to_be32(fqid); 2688c47ff048SShreyansh Jain qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ); 2689c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(low_p))) 2690c47ff048SShreyansh Jain cpu_relax(); 2691c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); 2692c47ff048SShreyansh Jain 2693c47ff048SShreyansh Jain /* Need to store these since the MCR gets reused */ 2694c47ff048SShreyansh Jain dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq); 2695c47ff048SShreyansh Jain channel = dest_wq & 0x7; 2696c47ff048SShreyansh Jain wq = dest_wq >> 3; 2697c47ff048SShreyansh Jain 2698c47ff048SShreyansh Jain switch (state) { 2699c47ff048SShreyansh Jain case QM_MCR_NP_STATE_TEN_SCHED: 2700c47ff048SShreyansh Jain case QM_MCR_NP_STATE_TRU_SCHED: 2701c47ff048SShreyansh Jain case QM_MCR_NP_STATE_ACTIVE: 2702c47ff048SShreyansh Jain case QM_MCR_NP_STATE_PARKED: 2703c47ff048SShreyansh Jain orl_empty = 0; 2704c47ff048SShreyansh Jain mcc = qm_mc_start(low_p); 2705c47ff048SShreyansh Jain mcc->alterfq.fqid = cpu_to_be32(fqid); 2706c47ff048SShreyansh Jain qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE); 2707c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(low_p))) 2708c47ff048SShreyansh Jain cpu_relax(); 2709c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == 2710c47ff048SShreyansh Jain QM_MCR_VERB_ALTER_RETIRE); 2711c47ff048SShreyansh Jain result = mcr->result; /* Make a copy as we reuse MCR below */ 2712c47ff048SShreyansh Jain 2713c47ff048SShreyansh Jain if (result == QM_MCR_RESULT_PENDING) { 2714c47ff048SShreyansh Jain /* Need to wait for the FQRN in the message ring, which 2715c47ff048SShreyansh Jain * will only occur once the FQ has been drained. In 2716c47ff048SShreyansh Jain * order for the FQ to drain the portal needs to be set 2717c47ff048SShreyansh Jain * to dequeue from the channel the FQ is scheduled on 2718c47ff048SShreyansh Jain */ 2719c47ff048SShreyansh Jain const struct qm_mr_entry *msg; 2720c47ff048SShreyansh Jain const struct qm_dqrr_entry *dqrr = NULL; 2721c47ff048SShreyansh Jain int found_fqrn = 0; 2722c47ff048SShreyansh Jain __maybe_unused u16 dequeue_wq = 0; 2723c47ff048SShreyansh Jain 2724c47ff048SShreyansh Jain /* Flag that we need to drain FQ */ 2725c47ff048SShreyansh Jain drain = 1; 2726c47ff048SShreyansh Jain 2727c47ff048SShreyansh Jain if (channel >= qm_channel_pool1 && 2728c47ff048SShreyansh Jain channel < (u16)(qm_channel_pool1 + 15)) { 2729c47ff048SShreyansh Jain /* Pool channel, enable the bit in the portal */ 2730c47ff048SShreyansh Jain dequeue_wq = (channel - 2731c47ff048SShreyansh Jain qm_channel_pool1 + 1) << 4 | wq; 2732c47ff048SShreyansh Jain } else if (channel < qm_channel_pool1) { 2733c47ff048SShreyansh Jain /* Dedicated channel */ 2734c47ff048SShreyansh Jain dequeue_wq = wq; 2735c47ff048SShreyansh Jain } else { 2736c47ff048SShreyansh Jain pr_info("Cannot recover FQ 0x%x," 2737c47ff048SShreyansh Jain " it is scheduled on channel 0x%x", 2738c47ff048SShreyansh Jain fqid, channel); 2739c47ff048SShreyansh Jain return -EBUSY; 2740c47ff048SShreyansh Jain } 2741c47ff048SShreyansh Jain /* Set the sdqcr to drain this channel */ 2742c47ff048SShreyansh Jain if (channel < qm_channel_pool1) 2743c47ff048SShreyansh Jain qm_dqrr_sdqcr_set(low_p, 2744c47ff048SShreyansh Jain QM_SDQCR_TYPE_ACTIVE | 2745c47ff048SShreyansh Jain QM_SDQCR_CHANNELS_DEDICATED); 2746c47ff048SShreyansh Jain else 2747c47ff048SShreyansh Jain qm_dqrr_sdqcr_set(low_p, 2748c47ff048SShreyansh Jain QM_SDQCR_TYPE_ACTIVE | 2749c47ff048SShreyansh Jain QM_SDQCR_CHANNELS_POOL_CONV 2750c47ff048SShreyansh Jain (channel)); 2751c47ff048SShreyansh Jain while (!found_fqrn) { 2752c47ff048SShreyansh Jain /* Keep draining DQRR while checking the MR*/ 2753c47ff048SShreyansh Jain qm_dqrr_pvb_update(low_p); 2754c47ff048SShreyansh Jain dqrr = qm_dqrr_current(low_p); 2755c47ff048SShreyansh Jain while (dqrr) { 2756c47ff048SShreyansh Jain qm_dqrr_cdc_consume_1ptr( 2757c47ff048SShreyansh Jain low_p, dqrr, 0); 2758c47ff048SShreyansh Jain qm_dqrr_pvb_update(low_p); 2759c47ff048SShreyansh Jain qm_dqrr_next(low_p); 2760c47ff048SShreyansh Jain dqrr = qm_dqrr_current(low_p); 2761c47ff048SShreyansh Jain } 2762c47ff048SShreyansh Jain /* Process message ring too */ 2763c47ff048SShreyansh Jain qm_mr_pvb_update(low_p); 2764c47ff048SShreyansh Jain msg = qm_mr_current(low_p); 2765c47ff048SShreyansh Jain while (msg) { 2766dd6f8d71SAndy Green if ((msg->ern.verb & 2767c47ff048SShreyansh Jain QM_MR_VERB_TYPE_MASK) 2768c47ff048SShreyansh Jain == QM_MR_VERB_FQRN) 2769c47ff048SShreyansh Jain found_fqrn = 1; 2770c47ff048SShreyansh Jain qm_mr_next(low_p); 2771c47ff048SShreyansh Jain qm_mr_cci_consume_to_current(low_p); 2772c47ff048SShreyansh Jain qm_mr_pvb_update(low_p); 2773c47ff048SShreyansh Jain msg = qm_mr_current(low_p); 2774c47ff048SShreyansh Jain } 2775c47ff048SShreyansh Jain cpu_relax(); 2776c47ff048SShreyansh Jain } 2777c47ff048SShreyansh Jain } 2778c47ff048SShreyansh Jain if (result != QM_MCR_RESULT_OK && 2779c47ff048SShreyansh Jain result != QM_MCR_RESULT_PENDING) { 2780c47ff048SShreyansh Jain /* error */ 2781c47ff048SShreyansh Jain pr_err("qman_retire_fq failed on FQ 0x%x," 2782c47ff048SShreyansh Jain " result=0x%x\n", fqid, result); 2783c47ff048SShreyansh Jain return -1; 2784c47ff048SShreyansh Jain } 2785c47ff048SShreyansh Jain if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { 2786c47ff048SShreyansh Jain /* ORL had no entries, no need to wait until the 2787c47ff048SShreyansh Jain * ERNs come in. 2788c47ff048SShreyansh Jain */ 2789c47ff048SShreyansh Jain orl_empty = 1; 2790c47ff048SShreyansh Jain } 2791c47ff048SShreyansh Jain /* Retirement succeeded, check to see if FQ needs 2792c47ff048SShreyansh Jain * to be drained. 2793c47ff048SShreyansh Jain */ 2794c47ff048SShreyansh Jain if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { 2795c47ff048SShreyansh Jain /* FQ is Not Empty, drain using volatile DQ commands */ 2796c47ff048SShreyansh Jain fq_empty = 0; 2797c47ff048SShreyansh Jain do { 2798c47ff048SShreyansh Jain const struct qm_dqrr_entry *dqrr = NULL; 2799c47ff048SShreyansh Jain u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); 2800c47ff048SShreyansh Jain 2801c47ff048SShreyansh Jain qm_dqrr_vdqcr_set(low_p, vdqcr); 2802c47ff048SShreyansh Jain 2803c47ff048SShreyansh Jain /* Wait for a dequeue to occur */ 2804c47ff048SShreyansh Jain while (dqrr == NULL) { 2805c47ff048SShreyansh Jain qm_dqrr_pvb_update(low_p); 2806c47ff048SShreyansh Jain dqrr = qm_dqrr_current(low_p); 2807c47ff048SShreyansh Jain if (!dqrr) 2808c47ff048SShreyansh Jain cpu_relax(); 2809c47ff048SShreyansh Jain } 2810c47ff048SShreyansh Jain /* Process the dequeues, making sure to 2811c47ff048SShreyansh Jain * empty the ring completely. 2812c47ff048SShreyansh Jain */ 2813c47ff048SShreyansh Jain while (dqrr) { 2814c47ff048SShreyansh Jain if (dqrr->fqid == fqid && 2815c47ff048SShreyansh Jain dqrr->stat & QM_DQRR_STAT_FQ_EMPTY) 2816c47ff048SShreyansh Jain fq_empty = 1; 2817c47ff048SShreyansh Jain qm_dqrr_cdc_consume_1ptr(low_p, 2818c47ff048SShreyansh Jain dqrr, 0); 2819c47ff048SShreyansh Jain qm_dqrr_pvb_update(low_p); 2820c47ff048SShreyansh Jain qm_dqrr_next(low_p); 2821c47ff048SShreyansh Jain dqrr = qm_dqrr_current(low_p); 2822c47ff048SShreyansh Jain } 2823c47ff048SShreyansh Jain } while (fq_empty == 0); 2824c47ff048SShreyansh Jain } 2825c47ff048SShreyansh Jain qm_dqrr_sdqcr_set(low_p, 0); 2826c47ff048SShreyansh Jain 2827c47ff048SShreyansh Jain /* Wait for the ORL to have been completely drained */ 2828c47ff048SShreyansh Jain while (orl_empty == 0) { 2829c47ff048SShreyansh Jain const struct qm_mr_entry *msg; 2830c47ff048SShreyansh Jain 2831c47ff048SShreyansh Jain qm_mr_pvb_update(low_p); 2832c47ff048SShreyansh Jain msg = qm_mr_current(low_p); 2833c47ff048SShreyansh Jain while (msg) { 2834dd6f8d71SAndy Green if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) == 2835c47ff048SShreyansh Jain QM_MR_VERB_FQRL) 2836c47ff048SShreyansh Jain orl_empty = 1; 2837c47ff048SShreyansh Jain qm_mr_next(low_p); 2838c47ff048SShreyansh Jain qm_mr_cci_consume_to_current(low_p); 2839c47ff048SShreyansh Jain qm_mr_pvb_update(low_p); 2840c47ff048SShreyansh Jain msg = qm_mr_current(low_p); 2841c47ff048SShreyansh Jain } 2842c47ff048SShreyansh Jain cpu_relax(); 2843c47ff048SShreyansh Jain } 2844c47ff048SShreyansh Jain mcc = qm_mc_start(low_p); 2845c47ff048SShreyansh Jain mcc->alterfq.fqid = cpu_to_be32(fqid); 2846c47ff048SShreyansh Jain qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS); 2847c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(low_p))) 2848c47ff048SShreyansh Jain cpu_relax(); 2849c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == 2850c47ff048SShreyansh Jain QM_MCR_VERB_ALTER_OOS); 2851c47ff048SShreyansh Jain if (mcr->result != QM_MCR_RESULT_OK) { 2852c47ff048SShreyansh Jain pr_err( 2853c47ff048SShreyansh Jain "OOS after drain Failed on FQID 0x%x, result 0x%x\n", 2854c47ff048SShreyansh Jain fqid, mcr->result); 2855c47ff048SShreyansh Jain return -1; 2856c47ff048SShreyansh Jain } 2857c47ff048SShreyansh Jain return 0; 2858c47ff048SShreyansh Jain 2859c47ff048SShreyansh Jain case QM_MCR_NP_STATE_RETIRED: 2860c47ff048SShreyansh Jain /* Send OOS Command */ 2861c47ff048SShreyansh Jain mcc = qm_mc_start(low_p); 2862c47ff048SShreyansh Jain mcc->alterfq.fqid = cpu_to_be32(fqid); 2863c47ff048SShreyansh Jain qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS); 2864c47ff048SShreyansh Jain while (!(mcr = qm_mc_result(low_p))) 2865c47ff048SShreyansh Jain cpu_relax(); 2866c47ff048SShreyansh Jain DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == 2867c47ff048SShreyansh Jain QM_MCR_VERB_ALTER_OOS); 2868c47ff048SShreyansh Jain if (mcr->result) { 2869c47ff048SShreyansh Jain pr_err("OOS Failed on FQID 0x%x\n", fqid); 2870c47ff048SShreyansh Jain return -1; 2871c47ff048SShreyansh Jain } 2872c47ff048SShreyansh Jain return 0; 2873c47ff048SShreyansh Jain 2874c47ff048SShreyansh Jain } 2875c47ff048SShreyansh Jain return -1; 2876c47ff048SShreyansh Jain } 2877