1445371e8SJerin Jacob /* 2445371e8SJerin Jacob * BSD LICENSE 3445371e8SJerin Jacob * 4445371e8SJerin Jacob * Copyright (C) Cavium Inc. 2017. All rights reserved. 5445371e8SJerin Jacob * 6445371e8SJerin Jacob * Redistribution and use in source and binary forms, with or without 7445371e8SJerin Jacob * modification, are permitted provided that the following conditions 8445371e8SJerin Jacob * are met: 9445371e8SJerin Jacob * 10445371e8SJerin Jacob * * Redistributions of source code must retain the above copyright 11445371e8SJerin Jacob * notice, this list of conditions and the following disclaimer. 12445371e8SJerin Jacob * * Redistributions in binary form must reproduce the above copyright 13445371e8SJerin Jacob * notice, this list of conditions and the following disclaimer in 14445371e8SJerin Jacob * the documentation and/or other materials provided with the 15445371e8SJerin Jacob * distribution. 16445371e8SJerin Jacob * * Neither the name of Cavium networks nor the names of its 17445371e8SJerin Jacob * contributors may be used to endorse or promote products derived 18445371e8SJerin Jacob * from this software without specific prior written permission. 19445371e8SJerin Jacob * 20445371e8SJerin Jacob * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21445371e8SJerin Jacob * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22445371e8SJerin Jacob * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23445371e8SJerin Jacob * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24445371e8SJerin Jacob * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25445371e8SJerin Jacob * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26445371e8SJerin Jacob * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27445371e8SJerin Jacob * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28445371e8SJerin Jacob * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29445371e8SJerin Jacob * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30445371e8SJerin Jacob * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31445371e8SJerin Jacob */ 32445371e8SJerin Jacob #include <stdbool.h> 33445371e8SJerin Jacob #include <string.h> 34445371e8SJerin Jacob #include <stdio.h> 35445371e8SJerin Jacob 36445371e8SJerin Jacob #include <rte_eal.h> 37445371e8SJerin Jacob #include <rte_cycles.h> 38445371e8SJerin Jacob #include <rte_malloc.h> 39445371e8SJerin Jacob #include <rte_memory.h> 40445371e8SJerin Jacob #include <rte_pci.h> 41445371e8SJerin Jacob #include <rte_spinlock.h> 42445371e8SJerin Jacob 43445371e8SJerin Jacob #include "../octeontx_logs.h" 44445371e8SJerin Jacob #include "octeontx_io.h" 45445371e8SJerin Jacob #include "octeontx_pkovf.h" 46445371e8SJerin Jacob 47445371e8SJerin Jacob struct octeontx_pko_iomem { 48445371e8SJerin Jacob uint8_t *va; 49445371e8SJerin Jacob phys_addr_t iova; 50445371e8SJerin Jacob size_t size; 51445371e8SJerin Jacob }; 52445371e8SJerin Jacob 53445371e8SJerin Jacob #define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0} 54445371e8SJerin Jacob 55445371e8SJerin Jacob struct octeontx_pko_fc_ctl_s { 56445371e8SJerin Jacob int64_t buf_cnt; 57445371e8SJerin Jacob int64_t padding[(PKO_DQ_FC_STRIDE / 8) - 1]; 58445371e8SJerin Jacob }; 59445371e8SJerin Jacob 60445371e8SJerin Jacob struct octeontx_pkovf { 61445371e8SJerin Jacob uint8_t *bar0; 62445371e8SJerin Jacob uint8_t *bar2; 63445371e8SJerin Jacob uint16_t domain; 64445371e8SJerin Jacob uint16_t vfid; 65445371e8SJerin Jacob }; 66445371e8SJerin Jacob 67445371e8SJerin Jacob struct octeontx_pko_vf_ctl_s { 68445371e8SJerin Jacob rte_spinlock_t lock; 69445371e8SJerin Jacob 70445371e8SJerin Jacob struct octeontx_pko_iomem fc_iomem; 71445371e8SJerin Jacob struct octeontx_pko_fc_ctl_s *fc_ctl; 72445371e8SJerin Jacob struct octeontx_pkovf pko[PKO_VF_MAX]; 73445371e8SJerin Jacob struct { 74445371e8SJerin Jacob uint64_t chanid; 75445371e8SJerin Jacob } dq_map[PKO_VF_MAX * PKO_VF_NUM_DQ]; 76445371e8SJerin Jacob }; 77445371e8SJerin Jacob 78445371e8SJerin Jacob static struct octeontx_pko_vf_ctl_s pko_vf_ctl; 79445371e8SJerin Jacob 80cad78ca2SJerin Jacob static void * 81cad78ca2SJerin Jacob octeontx_pko_dq_vf_bar0(uint16_t txq) 82cad78ca2SJerin Jacob { 83cad78ca2SJerin Jacob int vf_ix; 84cad78ca2SJerin Jacob 85cad78ca2SJerin Jacob vf_ix = txq / PKO_VF_NUM_DQ; 86cad78ca2SJerin Jacob return pko_vf_ctl.pko[vf_ix].bar0; 87cad78ca2SJerin Jacob } 88cad78ca2SJerin Jacob 89cad78ca2SJerin Jacob static int 90cad78ca2SJerin Jacob octeontx_pko_dq_gdq(uint16_t txq) 91cad78ca2SJerin Jacob { 92cad78ca2SJerin Jacob return txq % PKO_VF_NUM_DQ; 93cad78ca2SJerin Jacob } 94cad78ca2SJerin Jacob 95cad78ca2SJerin Jacob /** 96cad78ca2SJerin Jacob * Open a PKO DQ. 97cad78ca2SJerin Jacob */ 98cad78ca2SJerin Jacob static inline 99cad78ca2SJerin Jacob int octeontx_pko_dq_open(uint16_t txq) 100cad78ca2SJerin Jacob { 101cad78ca2SJerin Jacob unsigned int reg_off; 102cad78ca2SJerin Jacob uint8_t *vf_bar0; 103cad78ca2SJerin Jacob uint64_t rtn; 104cad78ca2SJerin Jacob int gdq; 105cad78ca2SJerin Jacob 106cad78ca2SJerin Jacob vf_bar0 = octeontx_pko_dq_vf_bar0(txq); 107cad78ca2SJerin Jacob gdq = octeontx_pko_dq_gdq(txq); 108cad78ca2SJerin Jacob 109cad78ca2SJerin Jacob if (unlikely(gdq < 0 || vf_bar0 == NULL)) 110cad78ca2SJerin Jacob return -EINVAL; 111cad78ca2SJerin Jacob *(volatile int64_t*)(pko_vf_ctl.fc_ctl + txq) = 112cad78ca2SJerin Jacob PKO_DQ_FC_DEPTH_PAGES - PKO_DQ_FC_SKID; 113cad78ca2SJerin Jacob 114cad78ca2SJerin Jacob rte_wmb(); 115cad78ca2SJerin Jacob 116cad78ca2SJerin Jacob octeontx_write64(PKO_DQ_FC_DEPTH_PAGES, 117cad78ca2SJerin Jacob vf_bar0 + PKO_VF_DQ_FC_STATUS(gdq)); 118cad78ca2SJerin Jacob 119cad78ca2SJerin Jacob /* Set the register to return descriptor (packet) count as DEPTH */ 120cad78ca2SJerin Jacob /* KIND=1, NCB_QUERY_RSP=0 */ 121cad78ca2SJerin Jacob octeontx_write64(1ull << PKO_DQ_KIND_BIT, 122cad78ca2SJerin Jacob vf_bar0 + PKO_VF_DQ_WM_CTL(gdq)); 123cad78ca2SJerin Jacob reg_off = PKO_VF_DQ_OP_OPEN(gdq); 124cad78ca2SJerin Jacob 125cad78ca2SJerin Jacob rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0); 126cad78ca2SJerin Jacob 127cad78ca2SJerin Jacob /* PKO_DQOP_E::OPEN */ 128cad78ca2SJerin Jacob if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x1) 129cad78ca2SJerin Jacob return -EIO; 130cad78ca2SJerin Jacob 131cad78ca2SJerin Jacob switch (rtn >> PKO_DQ_STATUS_BIT) { 132cad78ca2SJerin Jacob case 0xC: /* DQALREADYCREATED */ 133cad78ca2SJerin Jacob case 0x0: /* PASS */ 134cad78ca2SJerin Jacob break; 135cad78ca2SJerin Jacob default: 136cad78ca2SJerin Jacob return -EIO; 137cad78ca2SJerin Jacob } 138cad78ca2SJerin Jacob 139cad78ca2SJerin Jacob /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */ 140cad78ca2SJerin Jacob octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq)); 141cad78ca2SJerin Jacob 142cad78ca2SJerin Jacob return rtn & ((1ull << PKO_DQ_OP_BIT) - 1); 143cad78ca2SJerin Jacob } 144cad78ca2SJerin Jacob 145cad78ca2SJerin Jacob /** 146cad78ca2SJerin Jacob * Close a PKO DQ 147cad78ca2SJerin Jacob * Flush all packets pending. 148cad78ca2SJerin Jacob */ 149cad78ca2SJerin Jacob static inline 150cad78ca2SJerin Jacob int octeontx_pko_dq_close(uint16_t txq) 151cad78ca2SJerin Jacob { 152cad78ca2SJerin Jacob unsigned int reg_off; 153cad78ca2SJerin Jacob uint8_t *vf_bar0; 154cad78ca2SJerin Jacob uint64_t rtn; 155cad78ca2SJerin Jacob int res; 156cad78ca2SJerin Jacob 157cad78ca2SJerin Jacob vf_bar0 = octeontx_pko_dq_vf_bar0(txq); 158cad78ca2SJerin Jacob res = octeontx_pko_dq_gdq(txq); 159cad78ca2SJerin Jacob 160cad78ca2SJerin Jacob if (unlikely(res < 0 || vf_bar0 == NULL)) 161cad78ca2SJerin Jacob return -EINVAL; 162cad78ca2SJerin Jacob 163cad78ca2SJerin Jacob reg_off = PKO_VF_DQ_OP_CLOSE(res); 164cad78ca2SJerin Jacob 165cad78ca2SJerin Jacob rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0); 166cad78ca2SJerin Jacob 167cad78ca2SJerin Jacob /* PKO_DQOP_E::CLOSE */ 168cad78ca2SJerin Jacob if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x2) 169cad78ca2SJerin Jacob return -EIO; 170cad78ca2SJerin Jacob 171cad78ca2SJerin Jacob switch (rtn >> PKO_DQ_STATUS_BIT) { 172cad78ca2SJerin Jacob case 0xD: /* DQNOTCREATED */ 173cad78ca2SJerin Jacob case 0x0: /* PASS */ 174cad78ca2SJerin Jacob break; 175cad78ca2SJerin Jacob default: 176cad78ca2SJerin Jacob return -EIO; 177cad78ca2SJerin Jacob } 178cad78ca2SJerin Jacob 179cad78ca2SJerin Jacob res = rtn & ((1ull << PKO_DQ_OP_BIT) - 1); /* DEPTH */ 180cad78ca2SJerin Jacob return res; 181cad78ca2SJerin Jacob } 182cad78ca2SJerin Jacob 183cad78ca2SJerin Jacob /* Flush all packets pending on a DQ */ 184cad78ca2SJerin Jacob static inline 185cad78ca2SJerin Jacob int octeontx_pko_dq_drain(uint16_t txq) 186cad78ca2SJerin Jacob { 187cad78ca2SJerin Jacob unsigned int gdq; 188cad78ca2SJerin Jacob uint8_t *vf_bar0; 189cad78ca2SJerin Jacob uint64_t reg; 190cad78ca2SJerin Jacob int res, timo = PKO_DQ_DRAIN_TO; 191cad78ca2SJerin Jacob 192cad78ca2SJerin Jacob vf_bar0 = octeontx_pko_dq_vf_bar0(txq); 193cad78ca2SJerin Jacob res = octeontx_pko_dq_gdq(txq); 194cad78ca2SJerin Jacob gdq = res; 195cad78ca2SJerin Jacob 196cad78ca2SJerin Jacob /* DRAIN=1, DRAIN_NULL_LINK=0, SW_XOFF=1 */ 197cad78ca2SJerin Jacob octeontx_write64(0x3, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq)); 198cad78ca2SJerin Jacob /* Wait until buffers leave DQs */ 199cad78ca2SJerin Jacob reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq)); 200cad78ca2SJerin Jacob while (reg && timo > 0) { 201cad78ca2SJerin Jacob rte_delay_us(100); 202cad78ca2SJerin Jacob timo--; 203cad78ca2SJerin Jacob reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq)); 204cad78ca2SJerin Jacob } 205cad78ca2SJerin Jacob /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */ 206cad78ca2SJerin Jacob octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq)); 207cad78ca2SJerin Jacob 208cad78ca2SJerin Jacob return reg; 209cad78ca2SJerin Jacob } 210cad78ca2SJerin Jacob 211cad78ca2SJerin Jacob static inline int 212cad78ca2SJerin Jacob octeontx_pko_dq_range_lookup(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid, 213cad78ca2SJerin Jacob unsigned int dq_num, unsigned int dq_from) 214cad78ca2SJerin Jacob { 215cad78ca2SJerin Jacob unsigned int dq, dq_cnt; 216cad78ca2SJerin Jacob unsigned int dq_base; 217cad78ca2SJerin Jacob 218cad78ca2SJerin Jacob dq_cnt = 0; 219cad78ca2SJerin Jacob dq = dq_from; 220cad78ca2SJerin Jacob while (dq < RTE_DIM(ctl->dq_map)) { 221cad78ca2SJerin Jacob dq_base = dq; 222cad78ca2SJerin Jacob dq_cnt = 0; 223cad78ca2SJerin Jacob while (ctl->dq_map[dq].chanid == ~chanid && 224cad78ca2SJerin Jacob dq < RTE_DIM(ctl->dq_map)) { 225cad78ca2SJerin Jacob dq_cnt++; 226cad78ca2SJerin Jacob if (dq_cnt == dq_num) 227cad78ca2SJerin Jacob return dq_base; 228cad78ca2SJerin Jacob dq++; 229cad78ca2SJerin Jacob } 230cad78ca2SJerin Jacob dq++; 231cad78ca2SJerin Jacob } 232cad78ca2SJerin Jacob return -1; 233cad78ca2SJerin Jacob } 234cad78ca2SJerin Jacob 235cad78ca2SJerin Jacob static inline void 236cad78ca2SJerin Jacob octeontx_pko_dq_range_assign(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid, 237cad78ca2SJerin Jacob unsigned int dq_base, unsigned int dq_num) 238cad78ca2SJerin Jacob { 239cad78ca2SJerin Jacob unsigned int dq, dq_cnt; 240cad78ca2SJerin Jacob 241cad78ca2SJerin Jacob dq_cnt = 0; 242cad78ca2SJerin Jacob while (dq_cnt < dq_num) { 243cad78ca2SJerin Jacob dq = dq_base + dq_cnt; 244cad78ca2SJerin Jacob 245cad78ca2SJerin Jacob octeontx_log_dbg("DQ# %u assigned to CHAN# %" PRIx64 "", dq, 246cad78ca2SJerin Jacob chanid); 247cad78ca2SJerin Jacob 248cad78ca2SJerin Jacob ctl->dq_map[dq].chanid = ~chanid; 249cad78ca2SJerin Jacob dq_cnt++; 250cad78ca2SJerin Jacob } 251cad78ca2SJerin Jacob } 252cad78ca2SJerin Jacob 253cad78ca2SJerin Jacob static inline int 254cad78ca2SJerin Jacob octeontx_pko_dq_claim(struct octeontx_pko_vf_ctl_s *ctl, unsigned int dq_base, 255cad78ca2SJerin Jacob unsigned int dq_num, uint64_t chanid) 256cad78ca2SJerin Jacob { 257cad78ca2SJerin Jacob const uint64_t null_chanid = ~0ull; 258cad78ca2SJerin Jacob int dq; 259cad78ca2SJerin Jacob 260cad78ca2SJerin Jacob rte_spinlock_lock(&ctl->lock); 261cad78ca2SJerin Jacob 262cad78ca2SJerin Jacob dq = octeontx_pko_dq_range_lookup(ctl, null_chanid, dq_num, dq_base); 263cad78ca2SJerin Jacob if (dq < 0 || (unsigned int)dq != dq_base) { 264cad78ca2SJerin Jacob rte_spinlock_unlock(&ctl->lock); 265cad78ca2SJerin Jacob return -1; 266cad78ca2SJerin Jacob } 267cad78ca2SJerin Jacob octeontx_pko_dq_range_assign(ctl, chanid, dq_base, dq_num); 268cad78ca2SJerin Jacob 269cad78ca2SJerin Jacob rte_spinlock_unlock(&ctl->lock); 270cad78ca2SJerin Jacob 271cad78ca2SJerin Jacob return 0; 272cad78ca2SJerin Jacob } 273cad78ca2SJerin Jacob 274cad78ca2SJerin Jacob static inline int 275cad78ca2SJerin Jacob octeontx_pko_dq_free(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid) 276cad78ca2SJerin Jacob { 277cad78ca2SJerin Jacob const uint64_t null_chanid = ~0ull; 278cad78ca2SJerin Jacob unsigned int dq = 0, dq_cnt = 0; 279cad78ca2SJerin Jacob 280cad78ca2SJerin Jacob rte_spinlock_lock(&ctl->lock); 281cad78ca2SJerin Jacob while (dq < RTE_DIM(ctl->dq_map)) { 282cad78ca2SJerin Jacob if (ctl->dq_map[dq].chanid == ~chanid) { 283cad78ca2SJerin Jacob ctl->dq_map[dq].chanid = ~null_chanid; 284cad78ca2SJerin Jacob dq_cnt++; 285cad78ca2SJerin Jacob } 286cad78ca2SJerin Jacob dq++; 287cad78ca2SJerin Jacob } 288cad78ca2SJerin Jacob rte_spinlock_unlock(&ctl->lock); 289cad78ca2SJerin Jacob 290cad78ca2SJerin Jacob return dq_cnt > 0 ? 0 : -EINVAL; 291cad78ca2SJerin Jacob } 292cad78ca2SJerin Jacob 293cad78ca2SJerin Jacob int 294cad78ca2SJerin Jacob octeontx_pko_channel_open(int dq_base, int dq_num, int chanid) 295cad78ca2SJerin Jacob { 296cad78ca2SJerin Jacob struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl; 297cad78ca2SJerin Jacob int res; 298cad78ca2SJerin Jacob 299cad78ca2SJerin Jacob res = octeontx_pko_dq_claim(ctl, dq_base, dq_num, chanid); 300cad78ca2SJerin Jacob if (res < 0) 301cad78ca2SJerin Jacob return -1; 302cad78ca2SJerin Jacob 303cad78ca2SJerin Jacob return 0; 304cad78ca2SJerin Jacob } 305cad78ca2SJerin Jacob 306cad78ca2SJerin Jacob int 307cad78ca2SJerin Jacob octeontx_pko_channel_close(int chanid) 308cad78ca2SJerin Jacob { 309cad78ca2SJerin Jacob struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl; 310cad78ca2SJerin Jacob int res; 311cad78ca2SJerin Jacob 312cad78ca2SJerin Jacob res = octeontx_pko_dq_free(ctl, chanid); 313cad78ca2SJerin Jacob if (res < 0) 314cad78ca2SJerin Jacob return -1; 315cad78ca2SJerin Jacob 316cad78ca2SJerin Jacob return 0; 317cad78ca2SJerin Jacob } 318cad78ca2SJerin Jacob 319cad78ca2SJerin Jacob static inline int 320cad78ca2SJerin Jacob octeontx_pko_chan_start(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid) 321cad78ca2SJerin Jacob { 322cad78ca2SJerin Jacob unsigned int dq_vf; 323cad78ca2SJerin Jacob unsigned int dq, dq_cnt; 324cad78ca2SJerin Jacob 325cad78ca2SJerin Jacob dq_cnt = 0; 326cad78ca2SJerin Jacob dq = 0; 327cad78ca2SJerin Jacob while (dq < RTE_DIM(ctl->dq_map)) { 328cad78ca2SJerin Jacob dq_vf = dq / PKO_VF_NUM_DQ; 329cad78ca2SJerin Jacob 330cad78ca2SJerin Jacob if (!ctl->pko[dq_vf].bar0) { 331cad78ca2SJerin Jacob dq += PKO_VF_NUM_DQ; 332cad78ca2SJerin Jacob continue; 333cad78ca2SJerin Jacob } 334cad78ca2SJerin Jacob 335cad78ca2SJerin Jacob if (ctl->dq_map[dq].chanid != ~chanid) { 336cad78ca2SJerin Jacob dq++; 337cad78ca2SJerin Jacob continue; 338cad78ca2SJerin Jacob } 339cad78ca2SJerin Jacob 340cad78ca2SJerin Jacob if (octeontx_pko_dq_open(dq) < 0) 341cad78ca2SJerin Jacob break; 342cad78ca2SJerin Jacob 343cad78ca2SJerin Jacob dq_cnt++; 344cad78ca2SJerin Jacob dq++; 345cad78ca2SJerin Jacob } 346cad78ca2SJerin Jacob 347cad78ca2SJerin Jacob return dq_cnt; 348cad78ca2SJerin Jacob } 349cad78ca2SJerin Jacob 350cad78ca2SJerin Jacob int 351cad78ca2SJerin Jacob octeontx_pko_channel_start(int chanid) 352cad78ca2SJerin Jacob { 353cad78ca2SJerin Jacob struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl; 354cad78ca2SJerin Jacob int dq_cnt; 355cad78ca2SJerin Jacob 356cad78ca2SJerin Jacob dq_cnt = octeontx_pko_chan_start(ctl, chanid); 357cad78ca2SJerin Jacob if (dq_cnt < 0) 358cad78ca2SJerin Jacob return -1; 359cad78ca2SJerin Jacob 360cad78ca2SJerin Jacob return dq_cnt; 361cad78ca2SJerin Jacob } 362cad78ca2SJerin Jacob 363cad78ca2SJerin Jacob static inline int 364cad78ca2SJerin Jacob octeontx_pko_chan_stop(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid) 365cad78ca2SJerin Jacob { 366cad78ca2SJerin Jacob unsigned int dq, dq_cnt, dq_vf; 367cad78ca2SJerin Jacob int res; 368cad78ca2SJerin Jacob 369cad78ca2SJerin Jacob dq_cnt = 0; 370cad78ca2SJerin Jacob dq = 0; 371cad78ca2SJerin Jacob while (dq < RTE_DIM(ctl->dq_map)) { 372cad78ca2SJerin Jacob dq_vf = dq / PKO_VF_NUM_DQ; 373cad78ca2SJerin Jacob 374cad78ca2SJerin Jacob if (!ctl->pko[dq_vf].bar0) { 375cad78ca2SJerin Jacob dq += PKO_VF_NUM_DQ; 376cad78ca2SJerin Jacob continue; 377cad78ca2SJerin Jacob } 378cad78ca2SJerin Jacob 379cad78ca2SJerin Jacob if (ctl->dq_map[dq].chanid != ~chanid) { 380cad78ca2SJerin Jacob dq++; 381cad78ca2SJerin Jacob continue; 382cad78ca2SJerin Jacob } 383cad78ca2SJerin Jacob 384cad78ca2SJerin Jacob res = octeontx_pko_dq_drain(dq); 385cad78ca2SJerin Jacob if (res > 0) 386cad78ca2SJerin Jacob octeontx_log_err("draining DQ%d, buffers left: %x", 387cad78ca2SJerin Jacob dq, res); 388cad78ca2SJerin Jacob 389cad78ca2SJerin Jacob res = octeontx_pko_dq_close(dq); 390cad78ca2SJerin Jacob if (res < 0) 391cad78ca2SJerin Jacob octeontx_log_err("closing DQ%d failed\n", dq); 392cad78ca2SJerin Jacob 393cad78ca2SJerin Jacob dq_cnt++; 394cad78ca2SJerin Jacob dq++; 395cad78ca2SJerin Jacob } 396cad78ca2SJerin Jacob return dq_cnt; 397cad78ca2SJerin Jacob } 398cad78ca2SJerin Jacob 399cad78ca2SJerin Jacob int 400cad78ca2SJerin Jacob octeontx_pko_channel_stop(int chanid) 401cad78ca2SJerin Jacob { 402cad78ca2SJerin Jacob struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl; 403cad78ca2SJerin Jacob 404cad78ca2SJerin Jacob octeontx_pko_chan_stop(ctl, chanid); 405cad78ca2SJerin Jacob return 0; 406cad78ca2SJerin Jacob } 407cad78ca2SJerin Jacob 408*3813a10aSJerin Jacob static inline int 409*3813a10aSJerin Jacob octeontx_pko_channel_query(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid, 410*3813a10aSJerin Jacob void *out, size_t out_elem_size, 411*3813a10aSJerin Jacob size_t dq_num, octeontx_pko_dq_getter_t getter) 412*3813a10aSJerin Jacob { 413*3813a10aSJerin Jacob octeontx_dq_t curr; 414*3813a10aSJerin Jacob unsigned int dq_vf; 415*3813a10aSJerin Jacob unsigned int dq; 416*3813a10aSJerin Jacob 417*3813a10aSJerin Jacob RTE_SET_USED(out_elem_size); 418*3813a10aSJerin Jacob memset(&curr, 0, sizeof(octeontx_dq_t)); 419*3813a10aSJerin Jacob 420*3813a10aSJerin Jacob dq_vf = dq_num / PKO_VF_NUM_DQ; 421*3813a10aSJerin Jacob dq = dq_num % PKO_VF_NUM_DQ; 422*3813a10aSJerin Jacob 423*3813a10aSJerin Jacob if (!ctl->pko[dq_vf].bar0) 424*3813a10aSJerin Jacob return -EINVAL; 425*3813a10aSJerin Jacob 426*3813a10aSJerin Jacob if (ctl->dq_map[dq_num].chanid != ~chanid) 427*3813a10aSJerin Jacob return -EINVAL; 428*3813a10aSJerin Jacob 429*3813a10aSJerin Jacob uint8_t *iter = (uint8_t *)out; 430*3813a10aSJerin Jacob curr.lmtline_va = ctl->pko[dq_vf].bar2; 431*3813a10aSJerin Jacob curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0 432*3813a10aSJerin Jacob + PKO_VF_DQ_OP_SEND((dq), 0)); 433*3813a10aSJerin Jacob curr.fc_status_va = ctl->fc_ctl + dq; 434*3813a10aSJerin Jacob 435*3813a10aSJerin Jacob octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p", 436*3813a10aSJerin Jacob curr.lmtline_va, curr.ioreg_va, 437*3813a10aSJerin Jacob curr.fc_status_va); 438*3813a10aSJerin Jacob 439*3813a10aSJerin Jacob getter(&curr, (void *)iter); 440*3813a10aSJerin Jacob return 0; 441*3813a10aSJerin Jacob } 442*3813a10aSJerin Jacob 443*3813a10aSJerin Jacob int 444*3813a10aSJerin Jacob octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size, 445*3813a10aSJerin Jacob size_t dq_num, octeontx_pko_dq_getter_t getter) 446*3813a10aSJerin Jacob { 447*3813a10aSJerin Jacob struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl; 448*3813a10aSJerin Jacob int dq_cnt; 449*3813a10aSJerin Jacob 450*3813a10aSJerin Jacob dq_cnt = octeontx_pko_channel_query(ctl, chanid, out, out_elem_size, 451*3813a10aSJerin Jacob dq_num, getter); 452*3813a10aSJerin Jacob if (dq_cnt < 0) 453*3813a10aSJerin Jacob return -1; 454*3813a10aSJerin Jacob 455*3813a10aSJerin Jacob return dq_cnt; 456*3813a10aSJerin Jacob } 457*3813a10aSJerin Jacob 458*3813a10aSJerin Jacob int 459*3813a10aSJerin Jacob octeontx_pko_vf_count(void) 460*3813a10aSJerin Jacob { 461*3813a10aSJerin Jacob int vf_cnt; 462*3813a10aSJerin Jacob 463*3813a10aSJerin Jacob vf_cnt = 0; 464*3813a10aSJerin Jacob while (pko_vf_ctl.pko[vf_cnt].bar0) 465*3813a10aSJerin Jacob vf_cnt++; 466*3813a10aSJerin Jacob 467*3813a10aSJerin Jacob return vf_cnt; 468*3813a10aSJerin Jacob } 469*3813a10aSJerin Jacob 470*3813a10aSJerin Jacob int 471*3813a10aSJerin Jacob octeontx_pko_init_fc(const size_t pko_vf_count) 472*3813a10aSJerin Jacob { 473*3813a10aSJerin Jacob int dq_ix; 474*3813a10aSJerin Jacob uint64_t reg; 475*3813a10aSJerin Jacob uint8_t *vf_bar0; 476*3813a10aSJerin Jacob size_t vf_idx; 477*3813a10aSJerin Jacob size_t fc_mem_size; 478*3813a10aSJerin Jacob 479*3813a10aSJerin Jacob fc_mem_size = sizeof(struct octeontx_pko_fc_ctl_s) * 480*3813a10aSJerin Jacob pko_vf_count * PKO_VF_NUM_DQ; 481*3813a10aSJerin Jacob 482*3813a10aSJerin Jacob pko_vf_ctl.fc_iomem.va = rte_malloc(NULL, fc_mem_size, 128); 483*3813a10aSJerin Jacob if (unlikely(!pko_vf_ctl.fc_iomem.va)) { 484*3813a10aSJerin Jacob octeontx_log_err("fc_iomem: not enough memory"); 485*3813a10aSJerin Jacob return -ENOMEM; 486*3813a10aSJerin Jacob } 487*3813a10aSJerin Jacob 488*3813a10aSJerin Jacob pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2phy((void *) 489*3813a10aSJerin Jacob pko_vf_ctl.fc_iomem.va); 490*3813a10aSJerin Jacob pko_vf_ctl.fc_iomem.size = fc_mem_size; 491*3813a10aSJerin Jacob 492*3813a10aSJerin Jacob pko_vf_ctl.fc_ctl = 493*3813a10aSJerin Jacob (struct octeontx_pko_fc_ctl_s *)pko_vf_ctl.fc_iomem.va; 494*3813a10aSJerin Jacob 495*3813a10aSJerin Jacob /* Configure Flow-Control feature for all DQs of open VFs */ 496*3813a10aSJerin Jacob for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) { 497*3813a10aSJerin Jacob dq_ix = vf_idx * PKO_VF_NUM_DQ; 498*3813a10aSJerin Jacob 499*3813a10aSJerin Jacob vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0; 500*3813a10aSJerin Jacob 501*3813a10aSJerin Jacob reg = (pko_vf_ctl.fc_iomem.iova + 502*3813a10aSJerin Jacob (sizeof(struct octeontx_pko_fc_ctl_s) * dq_ix)) & ~0x7F; 503*3813a10aSJerin Jacob reg |= /* BASE */ 504*3813a10aSJerin Jacob (0x2 << 3) | /* HYST_BITS */ 505*3813a10aSJerin Jacob (((PKO_DQ_FC_STRIDE == PKO_DQ_FC_STRIDE_16) ? 1 : 0) << 2) | 506*3813a10aSJerin Jacob (0x1 << 0); /* ENABLE */ 507*3813a10aSJerin Jacob 508*3813a10aSJerin Jacob octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG); 509*3813a10aSJerin Jacob 510*3813a10aSJerin Jacob octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "", 511*3813a10aSJerin Jacob vf_bar0, (int)vf_idx, reg); 512*3813a10aSJerin Jacob } 513*3813a10aSJerin Jacob return 0; 514*3813a10aSJerin Jacob } 515*3813a10aSJerin Jacob 516*3813a10aSJerin Jacob void 517*3813a10aSJerin Jacob octeontx_pko_fc_free(void) 518*3813a10aSJerin Jacob { 519*3813a10aSJerin Jacob rte_free(pko_vf_ctl.fc_iomem.va); 520*3813a10aSJerin Jacob } 521*3813a10aSJerin Jacob 522445371e8SJerin Jacob static void 523445371e8SJerin Jacob octeontx_pkovf_setup(void) 524445371e8SJerin Jacob { 525445371e8SJerin Jacob static bool init_once; 526445371e8SJerin Jacob 527445371e8SJerin Jacob if (!init_once) { 528445371e8SJerin Jacob unsigned int i; 529445371e8SJerin Jacob 530445371e8SJerin Jacob rte_spinlock_init(&pko_vf_ctl.lock); 531445371e8SJerin Jacob 532445371e8SJerin Jacob pko_vf_ctl.fc_iomem = PKO_IOMEM_NULL; 533445371e8SJerin Jacob pko_vf_ctl.fc_ctl = NULL; 534445371e8SJerin Jacob 535445371e8SJerin Jacob for (i = 0; i < PKO_VF_MAX; i++) { 536445371e8SJerin Jacob pko_vf_ctl.pko[i].bar0 = NULL; 537445371e8SJerin Jacob pko_vf_ctl.pko[i].bar2 = NULL; 538445371e8SJerin Jacob pko_vf_ctl.pko[i].domain = ~(uint16_t)0; 539445371e8SJerin Jacob pko_vf_ctl.pko[i].vfid = ~(uint16_t)0; 540445371e8SJerin Jacob } 541445371e8SJerin Jacob 542445371e8SJerin Jacob for (i = 0; i < (PKO_VF_MAX * PKO_VF_NUM_DQ); i++) 543445371e8SJerin Jacob pko_vf_ctl.dq_map[i].chanid = 0; 544445371e8SJerin Jacob 545445371e8SJerin Jacob init_once = true; 546445371e8SJerin Jacob } 547445371e8SJerin Jacob } 548445371e8SJerin Jacob 549445371e8SJerin Jacob /* PKOVF pcie device*/ 550445371e8SJerin Jacob static int 551445371e8SJerin Jacob pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) 552445371e8SJerin Jacob { 553445371e8SJerin Jacob uint64_t val; 554445371e8SJerin Jacob uint16_t vfid; 555445371e8SJerin Jacob uint16_t domain; 556445371e8SJerin Jacob uint8_t *bar0; 557445371e8SJerin Jacob uint8_t *bar2; 558445371e8SJerin Jacob struct octeontx_pkovf *res; 559445371e8SJerin Jacob 560445371e8SJerin Jacob RTE_SET_USED(pci_drv); 561445371e8SJerin Jacob 562445371e8SJerin Jacob /* For secondary processes, the primary has done all the work */ 563445371e8SJerin Jacob if (rte_eal_process_type() != RTE_PROC_PRIMARY) 564445371e8SJerin Jacob return 0; 565445371e8SJerin Jacob 566445371e8SJerin Jacob if (pci_dev->mem_resource[0].addr == NULL || 567445371e8SJerin Jacob pci_dev->mem_resource[2].addr == NULL) { 568445371e8SJerin Jacob octeontx_log_err("Empty bars %p %p", 569445371e8SJerin Jacob pci_dev->mem_resource[0].addr, 570445371e8SJerin Jacob pci_dev->mem_resource[2].addr); 571445371e8SJerin Jacob return -ENODEV; 572445371e8SJerin Jacob } 573445371e8SJerin Jacob bar0 = pci_dev->mem_resource[0].addr; 574445371e8SJerin Jacob bar2 = pci_dev->mem_resource[2].addr; 575445371e8SJerin Jacob 576445371e8SJerin Jacob octeontx_pkovf_setup(); 577445371e8SJerin Jacob 578445371e8SJerin Jacob /* get vfid and domain */ 579445371e8SJerin Jacob val = octeontx_read64(bar0 + PKO_VF_DQ_FC_CONFIG); 580445371e8SJerin Jacob domain = (val >> 7) & 0xffff; 581445371e8SJerin Jacob vfid = (val >> 23) & 0xffff; 582445371e8SJerin Jacob 583445371e8SJerin Jacob if (unlikely(vfid >= PKO_VF_MAX)) { 584445371e8SJerin Jacob octeontx_log_err("pko: Invalid vfid %d", vfid); 585445371e8SJerin Jacob return -EINVAL; 586445371e8SJerin Jacob } 587445371e8SJerin Jacob 588445371e8SJerin Jacob res = &pko_vf_ctl.pko[vfid]; 589445371e8SJerin Jacob res->vfid = vfid; 590445371e8SJerin Jacob res->domain = domain; 591445371e8SJerin Jacob res->bar0 = bar0; 592445371e8SJerin Jacob res->bar2 = bar2; 593445371e8SJerin Jacob 594445371e8SJerin Jacob octeontx_log_dbg("Domain=%d group=%d", res->domain, res->vfid); 595445371e8SJerin Jacob return 0; 596445371e8SJerin Jacob } 597445371e8SJerin Jacob 598445371e8SJerin Jacob #define PCI_VENDOR_ID_CAVIUM 0x177D 599445371e8SJerin Jacob #define PCI_DEVICE_ID_OCTEONTX_PKO_VF 0xA049 600445371e8SJerin Jacob 601445371e8SJerin Jacob static const struct rte_pci_id pci_pkovf_map[] = { 602445371e8SJerin Jacob { 603445371e8SJerin Jacob RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 604445371e8SJerin Jacob PCI_DEVICE_ID_OCTEONTX_PKO_VF) 605445371e8SJerin Jacob }, 606445371e8SJerin Jacob { 607445371e8SJerin Jacob .vendor_id = 0, 608445371e8SJerin Jacob }, 609445371e8SJerin Jacob }; 610445371e8SJerin Jacob 611445371e8SJerin Jacob static struct rte_pci_driver pci_pkovf = { 612445371e8SJerin Jacob .id_table = pci_pkovf_map, 613445371e8SJerin Jacob .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 614445371e8SJerin Jacob .probe = pkovf_probe, 615445371e8SJerin Jacob }; 616445371e8SJerin Jacob 617445371e8SJerin Jacob RTE_PMD_REGISTER_PCI(octeontx_pkovf, pci_pkovf); 618