1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Chelsio Communications. 3 * All rights reserved. 4 */ 5 6 #include <sys/queue.h> 7 #include <stdio.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <unistd.h> 12 #include <stdarg.h> 13 #include <inttypes.h> 14 #include <netinet/in.h> 15 16 #include <rte_byteorder.h> 17 #include <rte_common.h> 18 #include <rte_cycles.h> 19 #include <rte_interrupts.h> 20 #include <rte_log.h> 21 #include <rte_debug.h> 22 #include <rte_pci.h> 23 #include <rte_atomic.h> 24 #include <rte_branch_prediction.h> 25 #include <rte_memory.h> 26 #include <rte_tailq.h> 27 #include <rte_eal.h> 28 #include <rte_alarm.h> 29 #include <rte_ether.h> 30 #include <rte_ethdev_driver.h> 31 #include <rte_ethdev_pci.h> 32 #include <rte_random.h> 33 #include <rte_dev.h> 34 #include <rte_kvargs.h> 35 36 #include "common.h" 37 #include "t4_regs.h" 38 #include "t4_msg.h" 39 #include "cxgbe.h" 40 41 /** 42 * Allocate a chunk of memory. The allocated memory is cleared. 43 */ 44 void *t4_alloc_mem(size_t size) 45 { 46 return rte_zmalloc(NULL, size, 0); 47 } 48 49 /** 50 * Free memory allocated through t4_alloc_mem(). 51 */ 52 void t4_free_mem(void *addr) 53 { 54 rte_free(addr); 55 } 56 57 /* 58 * Response queue handler for the FW event queue. 59 */ 60 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, 61 __rte_unused const struct pkt_gl *gl) 62 { 63 u8 opcode = ((const struct rss_header *)rsp)->opcode; 64 65 rsp++; /* skip RSS header */ 66 67 /* 68 * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. 69 */ 70 if (unlikely(opcode == CPL_FW4_MSG && 71 ((const struct cpl_fw4_msg *)rsp)->type == 72 FW_TYPE_RSSCPL)) { 73 rsp++; 74 opcode = ((const struct rss_header *)rsp)->opcode; 75 rsp++; 76 if (opcode != CPL_SGE_EGR_UPDATE) { 77 dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n", 78 opcode); 79 goto out; 80 } 81 } 82 83 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 84 /* do nothing */ 85 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { 86 const struct cpl_fw6_msg *msg = (const void *)rsp; 87 88 t4_handle_fw_rpl(q->adapter, msg->data); 89 } else if (opcode == CPL_SET_TCB_RPL) { 90 const struct cpl_set_tcb_rpl *p = (const void *)rsp; 91 92 filter_rpl(q->adapter, p); 93 } else { 94 dev_err(adapter, "unexpected CPL %#x on FW event queue\n", 95 opcode); 96 } 97 out: 98 return 0; 99 } 100 101 /** 102 * Setup sge control queues to pass control information. 103 */ 104 int setup_sge_ctrl_txq(struct adapter *adapter) 105 { 106 struct sge *s = &adapter->sge; 107 int err = 0, i = 0; 108 109 for_each_port(adapter, i) { 110 char name[RTE_ETH_NAME_MAX_LEN]; 111 struct sge_ctrl_txq *q = &s->ctrlq[i]; 112 113 q->q.size = 1024; 114 err = t4_sge_alloc_ctrl_txq(adapter, q, 115 adapter->eth_dev, i, 116 s->fw_evtq.cntxt_id, 117 rte_socket_id()); 118 if (err) { 119 dev_err(adapter, "Failed to alloc ctrl txq. Err: %d", 120 err); 121 goto out; 122 } 123 snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i); 124 q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size, 125 RTE_CACHE_LINE_SIZE, 126 RTE_MBUF_PRIV_ALIGN, 127 RTE_MBUF_DEFAULT_BUF_SIZE, 128 SOCKET_ID_ANY); 129 if (!q->mb_pool) { 130 dev_err(adapter, "Can't create ctrl pool for port: %d", 131 i); 132 err = -ENOMEM; 133 goto out; 134 } 135 } 136 return 0; 137 out: 138 t4_free_sge_resources(adapter); 139 return err; 140 } 141 142 /** 143 * cxgbe_poll_for_completion: Poll rxq for completion 144 * @q: rxq to poll 145 * @us: microseconds to delay 146 * @cnt: number of times to poll 147 * @c: completion to check for 'done' status 148 * 149 * Polls the rxq for reples until completion is done or the count 150 * expires. 151 */ 152 int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us, 153 unsigned int cnt, struct t4_completion *c) 154 { 155 unsigned int i; 156 unsigned int work_done, budget = 4; 157 158 if (!c) 159 return -EINVAL; 160 161 for (i = 0; i < cnt; i++) { 162 cxgbe_poll(q, NULL, budget, &work_done); 163 t4_os_lock(&c->lock); 164 if (c->done) { 165 t4_os_unlock(&c->lock); 166 return 0; 167 } 168 t4_os_unlock(&c->lock); 169 udelay(us); 170 } 171 return -ETIMEDOUT; 172 } 173 174 int setup_sge_fwevtq(struct adapter *adapter) 175 { 176 struct sge *s = &adapter->sge; 177 int err = 0; 178 int msi_idx = 0; 179 180 err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev, 181 msi_idx, NULL, fwevtq_handler, -1, NULL, 0, 182 rte_socket_id()); 183 return err; 184 } 185 186 static int closest_timer(const struct sge *s, int time) 187 { 188 unsigned int i, match = 0; 189 int delta, min_delta = INT_MAX; 190 191 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { 192 delta = time - s->timer_val[i]; 193 if (delta < 0) 194 delta = -delta; 195 if (delta < min_delta) { 196 min_delta = delta; 197 match = i; 198 } 199 } 200 return match; 201 } 202 203 static int closest_thres(const struct sge *s, int thres) 204 { 205 unsigned int i, match = 0; 206 int delta, min_delta = INT_MAX; 207 208 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { 209 delta = thres - s->counter_val[i]; 210 if (delta < 0) 211 delta = -delta; 212 if (delta < min_delta) { 213 min_delta = delta; 214 match = i; 215 } 216 } 217 return match; 218 } 219 220 /** 221 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters 222 * @q: the Rx queue 223 * @us: the hold-off time in us, or 0 to disable timer 224 * @cnt: the hold-off packet count, or 0 to disable counter 225 * 226 * Sets an Rx queue's interrupt hold-off time and packet count. At least 227 * one of the two needs to be enabled for the queue to generate interrupts. 228 */ 229 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, 230 unsigned int cnt) 231 { 232 struct adapter *adap = q->adapter; 233 unsigned int timer_val; 234 235 if (cnt) { 236 int err; 237 u32 v, new_idx; 238 239 new_idx = closest_thres(&adap->sge, cnt); 240 if (q->desc && q->pktcnt_idx != new_idx) { 241 /* the queue has already been created, update it */ 242 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 243 V_FW_PARAMS_PARAM_X( 244 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | 245 V_FW_PARAMS_PARAM_YZ(q->cntxt_id); 246 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 247 &v, &new_idx); 248 if (err) 249 return err; 250 } 251 q->pktcnt_idx = new_idx; 252 } 253 254 timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER : 255 closest_timer(&adap->sge, us); 256 257 if ((us | cnt) == 0) 258 q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX); 259 else 260 q->intr_params = V_QINTR_TIMER_IDX(timer_val) | 261 V_QINTR_CNT_EN(cnt > 0); 262 return 0; 263 } 264 265 /** 266 * Free TID tables. 267 */ 268 static void tid_free(struct tid_info *t) 269 { 270 if (t->tid_tab) { 271 if (t->ftid_bmap) 272 rte_bitmap_free(t->ftid_bmap); 273 274 if (t->ftid_bmap_array) 275 t4_os_free(t->ftid_bmap_array); 276 277 t4_os_free(t->tid_tab); 278 } 279 280 memset(t, 0, sizeof(struct tid_info)); 281 } 282 283 /** 284 * Allocate and initialize the TID tables. Returns 0 on success. 285 */ 286 static int tid_init(struct tid_info *t) 287 { 288 size_t size; 289 unsigned int ftid_bmap_size; 290 unsigned int max_ftids = t->nftids; 291 292 ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids); 293 size = t->ntids * sizeof(*t->tid_tab) + 294 max_ftids * sizeof(*t->ftid_tab); 295 296 t->tid_tab = t4_os_alloc(size); 297 if (!t->tid_tab) 298 return -ENOMEM; 299 300 t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->ntids]; 301 t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size); 302 if (!t->ftid_bmap_array) { 303 tid_free(t); 304 return -ENOMEM; 305 } 306 307 t4_os_lock_init(&t->ftid_lock); 308 t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array, 309 ftid_bmap_size); 310 if (!t->ftid_bmap) { 311 tid_free(t); 312 return -ENOMEM; 313 } 314 315 return 0; 316 } 317 318 static inline bool is_x_1g_port(const struct link_config *lc) 319 { 320 return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0; 321 } 322 323 static inline bool is_x_10g_port(const struct link_config *lc) 324 { 325 unsigned int speeds, high_speeds; 326 327 speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps)); 328 high_speeds = speeds & 329 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G); 330 331 return high_speeds != 0; 332 } 333 334 inline void init_rspq(struct adapter *adap, struct sge_rspq *q, 335 unsigned int us, unsigned int cnt, 336 unsigned int size, unsigned int iqe_size) 337 { 338 q->adapter = adap; 339 cxgb4_set_rspq_intr_params(q, us, cnt); 340 q->iqe_len = iqe_size; 341 q->size = size; 342 } 343 344 int cfg_queue_count(struct rte_eth_dev *eth_dev) 345 { 346 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 347 struct adapter *adap = pi->adapter; 348 struct sge *s = &adap->sge; 349 unsigned int max_queues = s->max_ethqsets / adap->params.nports; 350 351 if ((eth_dev->data->nb_rx_queues < 1) || 352 (eth_dev->data->nb_tx_queues < 1)) 353 return -EINVAL; 354 355 if ((eth_dev->data->nb_rx_queues > max_queues) || 356 (eth_dev->data->nb_tx_queues > max_queues)) 357 return -EINVAL; 358 359 if (eth_dev->data->nb_rx_queues > pi->rss_size) 360 return -EINVAL; 361 362 /* We must configure RSS, since config has changed*/ 363 pi->flags &= ~PORT_RSS_DONE; 364 365 pi->n_rx_qsets = eth_dev->data->nb_rx_queues; 366 pi->n_tx_qsets = eth_dev->data->nb_tx_queues; 367 368 return 0; 369 } 370 371 void cfg_queues(struct rte_eth_dev *eth_dev) 372 { 373 struct rte_config *config = rte_eal_get_configuration(); 374 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 375 struct adapter *adap = pi->adapter; 376 struct sge *s = &adap->sge; 377 unsigned int i, nb_ports = 0, qidx = 0; 378 unsigned int q_per_port = 0; 379 380 if (!(adap->flags & CFG_QUEUES)) { 381 for_each_port(adap, i) { 382 struct port_info *tpi = adap2pinfo(adap, i); 383 384 nb_ports += (is_x_10g_port(&tpi->link_cfg)) || 385 is_x_1g_port(&tpi->link_cfg) ? 1 : 0; 386 } 387 388 /* 389 * We default up to # of cores queues per 1G/10G port. 390 */ 391 if (nb_ports) 392 q_per_port = (MAX_ETH_QSETS - 393 (adap->params.nports - nb_ports)) / 394 nb_ports; 395 396 if (q_per_port > config->lcore_count) 397 q_per_port = config->lcore_count; 398 399 for_each_port(adap, i) { 400 struct port_info *pi = adap2pinfo(adap, i); 401 402 pi->first_qset = qidx; 403 404 /* Initially n_rx_qsets == n_tx_qsets */ 405 pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) || 406 is_x_1g_port(&pi->link_cfg)) ? 407 q_per_port : 1; 408 pi->n_tx_qsets = pi->n_rx_qsets; 409 410 if (pi->n_rx_qsets > pi->rss_size) 411 pi->n_rx_qsets = pi->rss_size; 412 413 qidx += pi->n_rx_qsets; 414 } 415 416 s->max_ethqsets = qidx; 417 418 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { 419 struct sge_eth_rxq *r = &s->ethrxq[i]; 420 421 init_rspq(adap, &r->rspq, 5, 32, 1024, 64); 422 r->usembufs = 1; 423 r->fl.size = (r->usembufs ? 1024 : 72); 424 } 425 426 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) 427 s->ethtxq[i].q.size = 1024; 428 429 init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64); 430 adap->flags |= CFG_QUEUES; 431 } 432 } 433 434 void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats) 435 { 436 t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats, 437 &pi->stats_base); 438 } 439 440 void cxgbe_stats_reset(struct port_info *pi) 441 { 442 t4_clr_port_stats(pi->adapter, pi->tx_chan); 443 } 444 445 static void setup_memwin(struct adapter *adap) 446 { 447 u32 mem_win0_base; 448 449 /* For T5, only relative offset inside the PCIe BAR is passed */ 450 mem_win0_base = MEMWIN0_BASE; 451 452 /* 453 * Set up memory window for accessing adapter memory ranges. (Read 454 * back MA register to ensure that changes propagate before we attempt 455 * to use the new values.) 456 */ 457 t4_write_reg(adap, 458 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 459 MEMWIN_NIC), 460 mem_win0_base | V_BIR(0) | 461 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT)); 462 t4_read_reg(adap, 463 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 464 MEMWIN_NIC)); 465 } 466 467 int init_rss(struct adapter *adap) 468 { 469 unsigned int i; 470 471 if (is_pf4(adap)) { 472 int err; 473 474 err = t4_init_rss_mode(adap, adap->mbox); 475 if (err) 476 return err; 477 } 478 479 for_each_port(adap, i) { 480 struct port_info *pi = adap2pinfo(adap, i); 481 482 pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); 483 if (!pi->rss) 484 return -ENOMEM; 485 486 pi->rss_hf = CXGBE_RSS_HF_ALL; 487 } 488 return 0; 489 } 490 491 /** 492 * Dump basic information about the adapter. 493 */ 494 void print_adapter_info(struct adapter *adap) 495 { 496 /** 497 * Hardware/Firmware/etc. Version/Revision IDs. 498 */ 499 t4_dump_version_info(adap); 500 } 501 502 void print_port_info(struct adapter *adap) 503 { 504 int i; 505 char buf[80]; 506 struct rte_pci_addr *loc = &adap->pdev->addr; 507 508 for_each_port(adap, i) { 509 const struct port_info *pi = adap2pinfo(adap, i); 510 char *bufp = buf; 511 512 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) 513 bufp += sprintf(bufp, "100M/"); 514 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) 515 bufp += sprintf(bufp, "1G/"); 516 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) 517 bufp += sprintf(bufp, "10G/"); 518 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) 519 bufp += sprintf(bufp, "25G/"); 520 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) 521 bufp += sprintf(bufp, "40G/"); 522 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) 523 bufp += sprintf(bufp, "50G/"); 524 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) 525 bufp += sprintf(bufp, "100G/"); 526 if (bufp != buf) 527 --bufp; 528 sprintf(bufp, "BASE-%s", 529 t4_get_port_type_description( 530 (enum fw_port_type)pi->port_type)); 531 532 dev_info(adap, 533 " " PCI_PRI_FMT " Chelsio rev %d %s %s\n", 534 loc->domain, loc->bus, loc->devid, loc->function, 535 CHELSIO_CHIP_RELEASE(adap->params.chip), buf, 536 (adap->flags & USING_MSIX) ? " MSI-X" : 537 (adap->flags & USING_MSI) ? " MSI" : ""); 538 } 539 } 540 541 static int 542 check_devargs_handler(__rte_unused const char *key, const char *value, 543 __rte_unused void *opaque) 544 { 545 if (strcmp(value, "1")) 546 return -1; 547 548 return 0; 549 } 550 551 int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key) 552 { 553 struct rte_kvargs *kvlist; 554 555 if (!devargs) 556 return 0; 557 558 kvlist = rte_kvargs_parse(devargs->args, NULL); 559 if (!kvlist) 560 return 0; 561 562 if (!rte_kvargs_count(kvlist, key)) { 563 rte_kvargs_free(kvlist); 564 return 0; 565 } 566 567 if (rte_kvargs_process(kvlist, key, 568 check_devargs_handler, NULL) < 0) { 569 rte_kvargs_free(kvlist); 570 return 0; 571 } 572 rte_kvargs_free(kvlist); 573 574 return 1; 575 } 576 577 static void configure_vlan_types(struct adapter *adapter) 578 { 579 struct rte_pci_device *pdev = adapter->pdev; 580 int i; 581 582 for_each_port(adapter, i) { 583 /* OVLAN Type 0x88a8 */ 584 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0), 585 V_OVLAN_MASK(M_OVLAN_MASK) | 586 V_OVLAN_ETYPE(M_OVLAN_ETYPE), 587 V_OVLAN_MASK(M_OVLAN_MASK) | 588 V_OVLAN_ETYPE(0x88a8)); 589 /* OVLAN Type 0x9100 */ 590 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1), 591 V_OVLAN_MASK(M_OVLAN_MASK) | 592 V_OVLAN_ETYPE(M_OVLAN_ETYPE), 593 V_OVLAN_MASK(M_OVLAN_MASK) | 594 V_OVLAN_ETYPE(0x9100)); 595 /* OVLAN Type 0x8100 */ 596 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2), 597 V_OVLAN_MASK(M_OVLAN_MASK) | 598 V_OVLAN_ETYPE(M_OVLAN_ETYPE), 599 V_OVLAN_MASK(M_OVLAN_MASK) | 600 V_OVLAN_ETYPE(0x8100)); 601 602 /* IVLAN 0X8100 */ 603 t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i), 604 V_IVLAN_ETYPE(M_IVLAN_ETYPE), 605 V_IVLAN_ETYPE(0x8100)); 606 607 t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i), 608 F_OVLAN_EN0 | F_OVLAN_EN1 | 609 F_OVLAN_EN2 | F_IVLAN_EN, 610 F_OVLAN_EN0 | F_OVLAN_EN1 | 611 F_OVLAN_EN2 | F_IVLAN_EN); 612 } 613 614 if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN)) 615 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, 616 V_RM_OVLAN(1), V_RM_OVLAN(0)); 617 } 618 619 static void configure_pcie_ext_tag(struct adapter *adapter) 620 { 621 u16 v; 622 int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 623 624 if (!pos) 625 return; 626 627 if (pos > 0) { 628 t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v); 629 v |= PCI_EXP_DEVCTL_EXT_TAG; 630 t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v); 631 if (is_t6(adapter->params.chip)) { 632 t4_set_reg_field(adapter, A_PCIE_CFG2, 633 V_T6_TOTMAXTAG(M_T6_TOTMAXTAG), 634 V_T6_TOTMAXTAG(7)); 635 t4_set_reg_field(adapter, A_PCIE_CMD_CFG, 636 V_T6_MINTAG(M_T6_MINTAG), 637 V_T6_MINTAG(8)); 638 } else { 639 t4_set_reg_field(adapter, A_PCIE_CFG2, 640 V_TOTMAXTAG(M_TOTMAXTAG), 641 V_TOTMAXTAG(3)); 642 t4_set_reg_field(adapter, A_PCIE_CMD_CFG, 643 V_MINTAG(M_MINTAG), 644 V_MINTAG(8)); 645 } 646 } 647 } 648 649 /* 650 * Tweak configuration based on system architecture, etc. Most of these have 651 * defaults assigned to them by Firmware Configuration Files (if we're using 652 * them) but need to be explicitly set if we're using hard-coded 653 * initialization. So these are essentially common tweaks/settings for 654 * Configuration Files and hard-coded initialization ... 655 */ 656 static int adap_init0_tweaks(struct adapter *adapter) 657 { 658 u8 rx_dma_offset; 659 660 /* 661 * Fix up various Host-Dependent Parameters like Page Size, Cache 662 * Line Size, etc. The firmware default is for a 4KB Page Size and 663 * 64B Cache Line Size ... 664 */ 665 t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES, 666 T5_LAST_REV); 667 668 /* 669 * Keep the chip default offset to deliver Ingress packets into our 670 * DMA buffers to zero 671 */ 672 rx_dma_offset = 0; 673 t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT), 674 V_PKTSHIFT(rx_dma_offset)); 675 676 t4_set_reg_field(adapter, A_SGE_FLM_CFG, 677 V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING, 678 V_CREDITCNT(3) | V_CREDITCNTPACKING(1)); 679 680 t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD, 681 V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U)); 682 683 t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U), 684 V_IDMAARBROUNDROBIN(1U)); 685 686 /* 687 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux 688 * adds the pseudo header itself. 689 */ 690 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, 691 F_CSUM_HAS_PSEUDO_HDR, 0); 692 693 return 0; 694 } 695 696 /* 697 * Attempt to initialize the adapter via a Firmware Configuration File. 698 */ 699 static int adap_init0_config(struct adapter *adapter, int reset) 700 { 701 struct fw_caps_config_cmd caps_cmd; 702 unsigned long mtype = 0, maddr = 0; 703 u32 finiver, finicsum, cfcsum; 704 int ret; 705 int config_issued = 0; 706 int cfg_addr; 707 char config_name[20]; 708 709 /* 710 * Reset device if necessary. 711 */ 712 if (reset) { 713 ret = t4_fw_reset(adapter, adapter->mbox, 714 F_PIORSTMODE | F_PIORST); 715 if (ret < 0) { 716 dev_warn(adapter, "Firmware reset failed, error %d\n", 717 -ret); 718 goto bye; 719 } 720 } 721 722 cfg_addr = t4_flash_cfg_addr(adapter); 723 if (cfg_addr < 0) { 724 ret = cfg_addr; 725 dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n", 726 -ret); 727 goto bye; 728 } 729 730 strcpy(config_name, "On Flash"); 731 mtype = FW_MEMTYPE_CF_FLASH; 732 maddr = cfg_addr; 733 734 /* 735 * Issue a Capability Configuration command to the firmware to get it 736 * to parse the Configuration File. We don't use t4_fw_config_file() 737 * because we want the ability to modify various features after we've 738 * processed the configuration file ... 739 */ 740 memset(&caps_cmd, 0, sizeof(caps_cmd)); 741 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 742 F_FW_CMD_REQUEST | F_FW_CMD_READ); 743 caps_cmd.cfvalid_to_len16 = 744 cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID | 745 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 746 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | 747 FW_LEN16(caps_cmd)); 748 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 749 &caps_cmd); 750 /* 751 * If the CAPS_CONFIG failed with an ENOENT (for a Firmware 752 * Configuration File in FLASH), our last gasp effort is to use the 753 * Firmware Configuration File which is embedded in the firmware. A 754 * very few early versions of the firmware didn't have one embedded 755 * but we can ignore those. 756 */ 757 if (ret == -ENOENT) { 758 dev_info(adapter, "%s: Going for embedded config in firmware..\n", 759 __func__); 760 761 memset(&caps_cmd, 0, sizeof(caps_cmd)); 762 caps_cmd.op_to_write = 763 cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 764 F_FW_CMD_REQUEST | F_FW_CMD_READ); 765 caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd)); 766 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, 767 sizeof(caps_cmd), &caps_cmd); 768 strcpy(config_name, "Firmware Default"); 769 } 770 771 config_issued = 1; 772 if (ret < 0) 773 goto bye; 774 775 finiver = be32_to_cpu(caps_cmd.finiver); 776 finicsum = be32_to_cpu(caps_cmd.finicsum); 777 cfcsum = be32_to_cpu(caps_cmd.cfcsum); 778 if (finicsum != cfcsum) 779 dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n", 780 finicsum, cfcsum); 781 782 /* 783 * If we're a pure NIC driver then disable all offloading facilities. 784 * This will allow the firmware to optimize aspects of the hardware 785 * configuration which will result in improved performance. 786 */ 787 caps_cmd.niccaps &= cpu_to_be16(~(FW_CAPS_CONFIG_NIC_HASHFILTER | 788 FW_CAPS_CONFIG_NIC_ETHOFLD)); 789 caps_cmd.toecaps = 0; 790 caps_cmd.iscsicaps = 0; 791 caps_cmd.rdmacaps = 0; 792 caps_cmd.fcoecaps = 0; 793 794 /* 795 * And now tell the firmware to use the configuration we just loaded. 796 */ 797 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 798 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 799 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 800 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 801 NULL); 802 if (ret < 0) { 803 dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n", 804 -ret); 805 goto bye; 806 } 807 808 /* 809 * Tweak configuration based on system architecture, etc. 810 */ 811 ret = adap_init0_tweaks(adapter); 812 if (ret < 0) { 813 dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret); 814 goto bye; 815 } 816 817 /* 818 * And finally tell the firmware to initialize itself using the 819 * parameters from the Configuration File. 820 */ 821 ret = t4_fw_initialize(adapter, adapter->mbox); 822 if (ret < 0) { 823 dev_warn(adapter, "Initializing Firmware failed, error %d\n", 824 -ret); 825 goto bye; 826 } 827 828 /* 829 * Return successfully and note that we're operating with parameters 830 * not supplied by the driver, rather than from hard-wired 831 * initialization constants buried in the driver. 832 */ 833 dev_info(adapter, 834 "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n", 835 config_name, finiver, cfcsum); 836 837 return 0; 838 839 /* 840 * Something bad happened. Return the error ... (If the "error" 841 * is that there's no Configuration File on the adapter we don't 842 * want to issue a warning since this is fairly common.) 843 */ 844 bye: 845 if (config_issued && ret != -ENOENT) 846 dev_warn(adapter, "\"%s\" configuration file error %d\n", 847 config_name, -ret); 848 849 dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret); 850 return ret; 851 } 852 853 static int adap_init0(struct adapter *adap) 854 { 855 struct fw_caps_config_cmd caps_cmd; 856 int ret = 0; 857 u32 v, port_vec; 858 enum dev_state state; 859 u32 params[7], val[7]; 860 int reset = 1; 861 int mbox = adap->mbox; 862 863 /* 864 * Contact FW, advertising Master capability. 865 */ 866 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); 867 if (ret < 0) { 868 dev_err(adap, "%s: could not connect to FW, error %d\n", 869 __func__, -ret); 870 goto bye; 871 } 872 873 CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__, 874 adap->mbox, ret); 875 876 if (ret == mbox) 877 adap->flags |= MASTER_PF; 878 879 if (state == DEV_STATE_INIT) { 880 /* 881 * Force halt and reset FW because a previous instance may have 882 * exited abnormally without properly shutting down 883 */ 884 ret = t4_fw_halt(adap, adap->mbox, reset); 885 if (ret < 0) { 886 dev_err(adap, "Failed to halt. Exit.\n"); 887 goto bye; 888 } 889 890 ret = t4_fw_restart(adap, adap->mbox, reset); 891 if (ret < 0) { 892 dev_err(adap, "Failed to restart. Exit.\n"); 893 goto bye; 894 } 895 state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT); 896 } 897 898 t4_get_version_info(adap); 899 900 ret = t4_get_core_clock(adap, &adap->params.vpd); 901 if (ret < 0) { 902 dev_err(adap, "%s: could not get core clock, error %d\n", 903 __func__, -ret); 904 goto bye; 905 } 906 907 /* 908 * If the firmware is initialized already (and we're not forcing a 909 * master initialization), note that we're living with existing 910 * adapter parameters. Otherwise, it's time to try initializing the 911 * adapter ... 912 */ 913 if (state == DEV_STATE_INIT) { 914 dev_info(adap, "Coming up as %s: Adapter already initialized\n", 915 adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); 916 } else { 917 dev_info(adap, "Coming up as MASTER: Initializing adapter\n"); 918 919 ret = adap_init0_config(adap, reset); 920 if (ret == -ENOENT) { 921 dev_err(adap, 922 "No Configuration File present on adapter. Using hard-wired configuration parameters.\n"); 923 goto bye; 924 } 925 } 926 if (ret < 0) { 927 dev_err(adap, "could not initialize adapter, error %d\n", -ret); 928 goto bye; 929 } 930 931 /* Find out what ports are available to us. */ 932 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 933 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); 934 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); 935 if (ret < 0) { 936 dev_err(adap, "%s: failure in t4_query_params; error = %d\n", 937 __func__, ret); 938 goto bye; 939 } 940 941 adap->params.nports = hweight32(port_vec); 942 adap->params.portvec = port_vec; 943 944 dev_debug(adap, "%s: adap->params.nports = %u\n", __func__, 945 adap->params.nports); 946 947 /* 948 * Give the SGE code a chance to pull in anything that it needs ... 949 * Note that this must be called after we retrieve our VPD parameters 950 * in order to know how to convert core ticks to seconds, etc. 951 */ 952 ret = t4_sge_init(adap); 953 if (ret < 0) { 954 dev_err(adap, "t4_sge_init failed with error %d\n", 955 -ret); 956 goto bye; 957 } 958 959 /* 960 * Grab some of our basic fundamental operating parameters. 961 */ 962 #define FW_PARAM_DEV(param) \ 963 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 964 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 965 966 #define FW_PARAM_PFVF(param) \ 967 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 968 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ 969 V_FW_PARAMS_PARAM_Y(0) | \ 970 V_FW_PARAMS_PARAM_Z(0)) 971 972 params[0] = FW_PARAM_PFVF(FILTER_START); 973 params[1] = FW_PARAM_PFVF(FILTER_END); 974 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); 975 if (ret < 0) 976 goto bye; 977 adap->tids.ftid_base = val[0]; 978 adap->tids.nftids = val[1] - val[0] + 1; 979 980 /* 981 * Get device capabilities so we can determine what resources we need 982 * to manage. 983 */ 984 memset(&caps_cmd, 0, sizeof(caps_cmd)); 985 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 986 F_FW_CMD_REQUEST | F_FW_CMD_READ); 987 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 988 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), 989 &caps_cmd); 990 if (ret < 0) 991 goto bye; 992 993 /* query tid-related parameters */ 994 params[0] = FW_PARAM_DEV(NTID); 995 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, 996 params, val); 997 if (ret < 0) 998 goto bye; 999 adap->tids.ntids = val[0]; 1000 1001 /* If we're running on newer firmware, let it know that we're 1002 * prepared to deal with encapsulated CPL messages. Older 1003 * firmware won't understand this and we'll just get 1004 * unencapsulated messages ... 1005 */ 1006 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 1007 val[0] = 1; 1008 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); 1009 1010 /* 1011 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL 1012 * capability. Earlier versions of the firmware didn't have the 1013 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no 1014 * permission to use ULPTX MEMWRITE DSGL. 1015 */ 1016 if (is_t4(adap->params.chip)) { 1017 adap->params.ulptx_memwrite_dsgl = false; 1018 } else { 1019 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); 1020 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1021 1, params, val); 1022 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); 1023 } 1024 1025 /* 1026 * The MTU/MSS Table is initialized by now, so load their values. If 1027 * we're initializing the adapter, then we'll make any modifications 1028 * we want to the MTU/MSS Table and also initialize the congestion 1029 * parameters. 1030 */ 1031 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 1032 if (state != DEV_STATE_INIT) { 1033 int i; 1034 1035 /* 1036 * The default MTU Table contains values 1492 and 1500. 1037 * However, for TCP, it's better to have two values which are 1038 * a multiple of 8 +/- 4 bytes apart near this popular MTU. 1039 * This allows us to have a TCP Data Payload which is a 1040 * multiple of 8 regardless of what combination of TCP Options 1041 * are in use (always a multiple of 4 bytes) which is 1042 * important for performance reasons. For instance, if no 1043 * options are in use, then we have a 20-byte IP header and a 1044 * 20-byte TCP header. In this case, a 1500-byte MSS would 1045 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes 1046 * which is not a multiple of 8. So using an MSS of 1488 in 1047 * this case results in a TCP Data Payload of 1448 bytes which 1048 * is a multiple of 8. On the other hand, if 12-byte TCP Time 1049 * Stamps have been negotiated, then an MTU of 1500 bytes 1050 * results in a TCP Data Payload of 1448 bytes which, as 1051 * above, is a multiple of 8 bytes ... 1052 */ 1053 for (i = 0; i < NMTUS; i++) 1054 if (adap->params.mtus[i] == 1492) { 1055 adap->params.mtus[i] = 1488; 1056 break; 1057 } 1058 1059 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 1060 adap->params.b_wnd); 1061 } 1062 t4_init_sge_params(adap); 1063 t4_init_tp_params(adap); 1064 configure_pcie_ext_tag(adap); 1065 configure_vlan_types(adap); 1066 1067 adap->params.drv_memwin = MEMWIN_NIC; 1068 adap->flags |= FW_OK; 1069 dev_debug(adap, "%s: returning zero..\n", __func__); 1070 return 0; 1071 1072 /* 1073 * Something bad happened. If a command timed out or failed with EIO 1074 * FW does not operate within its spec or something catastrophic 1075 * happened to HW/FW, stop issuing commands. 1076 */ 1077 bye: 1078 if (ret != -ETIMEDOUT && ret != -EIO) 1079 t4_fw_bye(adap, adap->mbox); 1080 return ret; 1081 } 1082 1083 /** 1084 * t4_os_portmod_changed - handle port module changes 1085 * @adap: the adapter associated with the module change 1086 * @port_id: the port index whose module status has changed 1087 * 1088 * This is the OS-dependent handler for port module changes. It is 1089 * invoked when a port module is removed or inserted for any OS-specific 1090 * processing. 1091 */ 1092 void t4_os_portmod_changed(const struct adapter *adap, int port_id) 1093 { 1094 static const char * const mod_str[] = { 1095 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" 1096 }; 1097 1098 const struct port_info *pi = adap2pinfo(adap, port_id); 1099 1100 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 1101 dev_info(adap, "Port%d: port module unplugged\n", pi->port_id); 1102 else if (pi->mod_type < ARRAY_SIZE(mod_str)) 1103 dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id, 1104 mod_str[pi->mod_type]); 1105 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 1106 dev_info(adap, "Port%d: unsupported port module inserted\n", 1107 pi->port_id); 1108 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 1109 dev_info(adap, "Port%d: unknown port module inserted\n", 1110 pi->port_id); 1111 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) 1112 dev_info(adap, "Port%d: transceiver module error\n", 1113 pi->port_id); 1114 else 1115 dev_info(adap, "Port%d: unknown module type %d inserted\n", 1116 pi->port_id, pi->mod_type); 1117 } 1118 1119 inline bool force_linkup(struct adapter *adap) 1120 { 1121 struct rte_pci_device *pdev = adap->pdev; 1122 1123 if (is_pf4(adap)) 1124 return false; /* force_linkup not required for pf driver*/ 1125 if (!cxgbe_get_devargs(pdev->device.devargs, 1126 CXGBE_DEVARG_FORCE_LINK_UP)) 1127 return false; 1128 return true; 1129 } 1130 1131 /** 1132 * link_start - enable a port 1133 * @dev: the port to enable 1134 * 1135 * Performs the MAC and PHY actions needed to enable a port. 1136 */ 1137 int link_start(struct port_info *pi) 1138 { 1139 struct adapter *adapter = pi->adapter; 1140 int ret; 1141 unsigned int mtu; 1142 1143 mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1144 (ETHER_HDR_LEN + ETHER_CRC_LEN); 1145 1146 /* 1147 * We do not set address filters and promiscuity here, the stack does 1148 * that step explicitly. 1149 */ 1150 ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, 1151 -1, 1, true); 1152 if (ret == 0) { 1153 ret = t4_change_mac(adapter, adapter->mbox, pi->viid, 1154 pi->xact_addr_filt, 1155 (u8 *)&pi->eth_dev->data->mac_addrs[0], 1156 true, true); 1157 if (ret >= 0) { 1158 pi->xact_addr_filt = ret; 1159 ret = 0; 1160 } 1161 } 1162 if (ret == 0 && is_pf4(adapter)) 1163 ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, 1164 &pi->link_cfg); 1165 if (ret == 0) { 1166 /* 1167 * Enabling a Virtual Interface can result in an interrupt 1168 * during the processing of the VI Enable command and, in some 1169 * paths, result in an attempt to issue another command in the 1170 * interrupt context. Thus, we disable interrupts during the 1171 * course of the VI Enable command ... 1172 */ 1173 ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid, 1174 true, true, false); 1175 } 1176 1177 if (ret == 0 && force_linkup(adapter)) 1178 pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP; 1179 return ret; 1180 } 1181 1182 /** 1183 * cxgbe_write_rss_conf - flash the RSS configuration for a given port 1184 * @pi: the port 1185 * @rss_hf: Hash configuration to apply 1186 */ 1187 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf) 1188 { 1189 struct adapter *adapter = pi->adapter; 1190 const struct sge_eth_rxq *rxq; 1191 u64 flags = 0; 1192 u16 rss; 1193 int err; 1194 1195 /* Should never be called before setting up sge eth rx queues */ 1196 if (!(adapter->flags & FULL_INIT_DONE)) { 1197 dev_err(adap, "%s No RXQs available on port %d\n", 1198 __func__, pi->port_id); 1199 return -EINVAL; 1200 } 1201 1202 /* Don't allow unsupported hash functions */ 1203 if (rss_hf & ~CXGBE_RSS_HF_ALL) 1204 return -EINVAL; 1205 1206 if (rss_hf & ETH_RSS_IPV4) 1207 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 1208 1209 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) 1210 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 1211 1212 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) 1213 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 1214 F_FW_RSS_VI_CONFIG_CMD_UDPEN; 1215 1216 if (rss_hf & ETH_RSS_IPV6) 1217 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 1218 1219 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) 1220 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 1221 1222 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) 1223 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 1224 F_FW_RSS_VI_CONFIG_CMD_UDPEN; 1225 1226 rxq = &adapter->sge.ethrxq[pi->first_qset]; 1227 rss = rxq[0].rspq.abs_id; 1228 1229 /* If Tunnel All Lookup isn't specified in the global RSS 1230 * Configuration, then we need to specify a default Ingress 1231 * Queue for any ingress packets which aren't hashed. We'll 1232 * use our first ingress queue ... 1233 */ 1234 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, 1235 flags, rss); 1236 return err; 1237 } 1238 1239 /** 1240 * cxgbe_write_rss - write the RSS table for a given port 1241 * @pi: the port 1242 * @queues: array of queue indices for RSS 1243 * 1244 * Sets up the portion of the HW RSS table for the port's VI to distribute 1245 * packets to the Rx queues in @queues. 1246 */ 1247 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues) 1248 { 1249 u16 *rss; 1250 int i, err; 1251 struct adapter *adapter = pi->adapter; 1252 const struct sge_eth_rxq *rxq; 1253 1254 /* Should never be called before setting up sge eth rx queues */ 1255 BUG_ON(!(adapter->flags & FULL_INIT_DONE)); 1256 1257 rxq = &adapter->sge.ethrxq[pi->first_qset]; 1258 rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); 1259 if (!rss) 1260 return -ENOMEM; 1261 1262 /* map the queue indices to queue ids */ 1263 for (i = 0; i < pi->rss_size; i++, queues++) 1264 rss[i] = rxq[*queues].rspq.abs_id; 1265 1266 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, 1267 pi->rss_size, rss, pi->rss_size); 1268 rte_free(rss); 1269 return err; 1270 } 1271 1272 /** 1273 * setup_rss - configure RSS 1274 * @adapter: the adapter 1275 * 1276 * Sets up RSS to distribute packets to multiple receive queues. We 1277 * configure the RSS CPU lookup table to distribute to the number of HW 1278 * receive queues, and the response queue lookup table to narrow that 1279 * down to the response queues actually configured for each port. 1280 * We always configure the RSS mapping for all ports since the mapping 1281 * table has plenty of entries. 1282 */ 1283 int setup_rss(struct port_info *pi) 1284 { 1285 int j, err; 1286 struct adapter *adapter = pi->adapter; 1287 1288 dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n", 1289 __func__, pi->rss_size, pi->n_rx_qsets); 1290 1291 if (!(pi->flags & PORT_RSS_DONE)) { 1292 if (adapter->flags & FULL_INIT_DONE) { 1293 /* Fill default values with equal distribution */ 1294 for (j = 0; j < pi->rss_size; j++) 1295 pi->rss[j] = j % pi->n_rx_qsets; 1296 1297 err = cxgbe_write_rss(pi, pi->rss); 1298 if (err) 1299 return err; 1300 1301 err = cxgbe_write_rss_conf(pi, pi->rss_hf); 1302 if (err) 1303 return err; 1304 pi->flags |= PORT_RSS_DONE; 1305 } 1306 } 1307 return 0; 1308 } 1309 1310 /* 1311 * Enable NAPI scheduling and interrupt generation for all Rx queues. 1312 */ 1313 static void enable_rx(struct adapter *adap, struct sge_rspq *q) 1314 { 1315 /* 0-increment GTS to start the timer and enable interrupts */ 1316 t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) : 1317 T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS, 1318 V_SEINTARM(q->intr_params) | 1319 V_INGRESSQID(q->cntxt_id)); 1320 } 1321 1322 void cxgbe_enable_rx_queues(struct port_info *pi) 1323 { 1324 struct adapter *adap = pi->adapter; 1325 struct sge *s = &adap->sge; 1326 unsigned int i; 1327 1328 for (i = 0; i < pi->n_rx_qsets; i++) 1329 enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq); 1330 } 1331 1332 /** 1333 * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps. 1334 * @port_type: Firmware Port Type 1335 * @fw_caps: Firmware Port Capabilities 1336 * @speed_caps: Device Info Speed Capabilities 1337 * 1338 * Translate a Firmware Port Capabilities specification to Device Info 1339 * Speed Capabilities. 1340 */ 1341 static void fw_caps_to_speed_caps(enum fw_port_type port_type, 1342 unsigned int fw_caps, 1343 u32 *speed_caps) 1344 { 1345 #define SET_SPEED(__speed_name) \ 1346 do { \ 1347 *speed_caps |= ETH_LINK_ ## __speed_name; \ 1348 } while (0) 1349 1350 #define FW_CAPS_TO_SPEED(__fw_name) \ 1351 do { \ 1352 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \ 1353 SET_SPEED(__fw_name); \ 1354 } while (0) 1355 1356 switch (port_type) { 1357 case FW_PORT_TYPE_BT_SGMII: 1358 case FW_PORT_TYPE_BT_XFI: 1359 case FW_PORT_TYPE_BT_XAUI: 1360 FW_CAPS_TO_SPEED(SPEED_100M); 1361 FW_CAPS_TO_SPEED(SPEED_1G); 1362 FW_CAPS_TO_SPEED(SPEED_10G); 1363 break; 1364 1365 case FW_PORT_TYPE_KX4: 1366 case FW_PORT_TYPE_KX: 1367 case FW_PORT_TYPE_FIBER_XFI: 1368 case FW_PORT_TYPE_FIBER_XAUI: 1369 case FW_PORT_TYPE_SFP: 1370 case FW_PORT_TYPE_QSFP_10G: 1371 case FW_PORT_TYPE_QSA: 1372 FW_CAPS_TO_SPEED(SPEED_1G); 1373 FW_CAPS_TO_SPEED(SPEED_10G); 1374 break; 1375 1376 case FW_PORT_TYPE_KR: 1377 SET_SPEED(SPEED_10G); 1378 break; 1379 1380 case FW_PORT_TYPE_BP_AP: 1381 case FW_PORT_TYPE_BP4_AP: 1382 SET_SPEED(SPEED_1G); 1383 SET_SPEED(SPEED_10G); 1384 break; 1385 1386 case FW_PORT_TYPE_BP40_BA: 1387 case FW_PORT_TYPE_QSFP: 1388 SET_SPEED(SPEED_40G); 1389 break; 1390 1391 case FW_PORT_TYPE_CR_QSFP: 1392 case FW_PORT_TYPE_SFP28: 1393 case FW_PORT_TYPE_KR_SFP28: 1394 FW_CAPS_TO_SPEED(SPEED_1G); 1395 FW_CAPS_TO_SPEED(SPEED_10G); 1396 FW_CAPS_TO_SPEED(SPEED_25G); 1397 break; 1398 1399 case FW_PORT_TYPE_CR2_QSFP: 1400 SET_SPEED(SPEED_50G); 1401 break; 1402 1403 case FW_PORT_TYPE_KR4_100G: 1404 case FW_PORT_TYPE_CR4_QSFP: 1405 FW_CAPS_TO_SPEED(SPEED_25G); 1406 FW_CAPS_TO_SPEED(SPEED_40G); 1407 FW_CAPS_TO_SPEED(SPEED_50G); 1408 FW_CAPS_TO_SPEED(SPEED_100G); 1409 break; 1410 1411 default: 1412 break; 1413 } 1414 1415 #undef FW_CAPS_TO_SPEED 1416 #undef SET_SPEED 1417 } 1418 1419 /** 1420 * cxgbe_get_speed_caps - Fetch supported speed capabilities 1421 * @pi: Underlying port's info 1422 * @speed_caps: Device Info speed capabilities 1423 * 1424 * Fetch supported speed capabilities of the underlying port. 1425 */ 1426 void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps) 1427 { 1428 *speed_caps = 0; 1429 1430 fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps, 1431 speed_caps); 1432 1433 if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)) 1434 *speed_caps |= ETH_LINK_SPEED_FIXED; 1435 } 1436 1437 /** 1438 * cxgb_up - enable the adapter 1439 * @adap: adapter being enabled 1440 * 1441 * Called when the first port is enabled, this function performs the 1442 * actions necessary to make an adapter operational, such as completing 1443 * the initialization of HW modules, and enabling interrupts. 1444 */ 1445 int cxgbe_up(struct adapter *adap) 1446 { 1447 enable_rx(adap, &adap->sge.fw_evtq); 1448 t4_sge_tx_monitor_start(adap); 1449 if (is_pf4(adap)) 1450 t4_intr_enable(adap); 1451 adap->flags |= FULL_INIT_DONE; 1452 1453 /* TODO: deadman watchdog ?? */ 1454 return 0; 1455 } 1456 1457 /* 1458 * Close the port 1459 */ 1460 int cxgbe_down(struct port_info *pi) 1461 { 1462 struct adapter *adapter = pi->adapter; 1463 int err = 0; 1464 1465 err = t4_enable_vi(adapter, adapter->mbox, pi->viid, false, false); 1466 if (err) { 1467 dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err); 1468 return err; 1469 } 1470 1471 t4_reset_link_config(adapter, pi->pidx); 1472 return 0; 1473 } 1474 1475 /* 1476 * Release resources when all the ports have been stopped. 1477 */ 1478 void cxgbe_close(struct adapter *adapter) 1479 { 1480 struct port_info *pi; 1481 int i; 1482 1483 if (adapter->flags & FULL_INIT_DONE) { 1484 if (is_pf4(adapter)) 1485 t4_intr_disable(adapter); 1486 tid_free(&adapter->tids); 1487 t4_sge_tx_monitor_stop(adapter); 1488 t4_free_sge_resources(adapter); 1489 for_each_port(adapter, i) { 1490 pi = adap2pinfo(adapter, i); 1491 if (pi->viid != 0) 1492 t4_free_vi(adapter, adapter->mbox, 1493 adapter->pf, 0, pi->viid); 1494 rte_free(pi->eth_dev->data->mac_addrs); 1495 /* Skip first port since it'll be freed by DPDK stack */ 1496 if (i) { 1497 rte_free(pi->eth_dev->data->dev_private); 1498 rte_eth_dev_release_port(pi->eth_dev); 1499 } 1500 } 1501 adapter->flags &= ~FULL_INIT_DONE; 1502 } 1503 1504 if (is_pf4(adapter) && (adapter->flags & FW_OK)) 1505 t4_fw_bye(adapter, adapter->mbox); 1506 } 1507 1508 int cxgbe_probe(struct adapter *adapter) 1509 { 1510 struct port_info *pi; 1511 int chip; 1512 int func, i; 1513 int err = 0; 1514 u32 whoami; 1515 1516 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 1517 chip = t4_get_chip_type(adapter, 1518 CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id)); 1519 if (chip < 0) 1520 return chip; 1521 1522 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ? 1523 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami); 1524 1525 adapter->mbox = func; 1526 adapter->pf = func; 1527 1528 t4_os_lock_init(&adapter->mbox_lock); 1529 TAILQ_INIT(&adapter->mbox_list); 1530 t4_os_lock_init(&adapter->win0_lock); 1531 1532 err = t4_prep_adapter(adapter); 1533 if (err) 1534 return err; 1535 1536 setup_memwin(adapter); 1537 err = adap_init0(adapter); 1538 if (err) { 1539 dev_err(adapter, "%s: Adapter initialization failed, error %d\n", 1540 __func__, err); 1541 goto out_free; 1542 } 1543 1544 if (!is_t4(adapter->params.chip)) { 1545 /* 1546 * The userspace doorbell BAR is split evenly into doorbell 1547 * regions, each associated with an egress queue. If this 1548 * per-queue region is large enough (at least UDBS_SEG_SIZE) 1549 * then it can be used to submit a tx work request with an 1550 * implied doorbell. Enable write combining on the BAR if 1551 * there is room for such work requests. 1552 */ 1553 int s_qpp, qpp, num_seg; 1554 1555 s_qpp = (S_QUEUESPERPAGEPF0 + 1556 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * 1557 adapter->pf); 1558 qpp = 1 << ((t4_read_reg(adapter, 1559 A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp) 1560 & M_QUEUESPERPAGEPF0); 1561 num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE; 1562 if (qpp > num_seg) 1563 dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n"); 1564 1565 adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr; 1566 if (!adapter->bar2) { 1567 dev_err(adapter, "cannot map device bar2 region\n"); 1568 err = -ENOMEM; 1569 goto out_free; 1570 } 1571 t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) | 1572 V_STATMODE(0)); 1573 } 1574 1575 for_each_port(adapter, i) { 1576 const unsigned int numa_node = rte_socket_id(); 1577 char name[RTE_ETH_NAME_MAX_LEN]; 1578 struct rte_eth_dev *eth_dev; 1579 1580 snprintf(name, sizeof(name), "%s_%d", 1581 adapter->pdev->device.name, i); 1582 1583 if (i == 0) { 1584 /* First port is already allocated by DPDK */ 1585 eth_dev = adapter->eth_dev; 1586 goto allocate_mac; 1587 } 1588 1589 /* 1590 * now do all data allocation - for eth_dev structure, 1591 * and internal (private) data for the remaining ports 1592 */ 1593 1594 /* reserve an ethdev entry */ 1595 eth_dev = rte_eth_dev_allocate(name); 1596 if (!eth_dev) 1597 goto out_free; 1598 1599 eth_dev->data->dev_private = 1600 rte_zmalloc_socket(name, sizeof(struct port_info), 1601 RTE_CACHE_LINE_SIZE, numa_node); 1602 if (!eth_dev->data->dev_private) 1603 goto out_free; 1604 1605 allocate_mac: 1606 pi = (struct port_info *)eth_dev->data->dev_private; 1607 adapter->port[i] = pi; 1608 pi->eth_dev = eth_dev; 1609 pi->adapter = adapter; 1610 pi->xact_addr_filt = -1; 1611 pi->port_id = i; 1612 pi->pidx = i; 1613 1614 pi->eth_dev->device = &adapter->pdev->device; 1615 pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops; 1616 pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst; 1617 pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst; 1618 1619 rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev); 1620 1621 pi->eth_dev->data->mac_addrs = rte_zmalloc(name, 1622 ETHER_ADDR_LEN, 0); 1623 if (!pi->eth_dev->data->mac_addrs) { 1624 dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n", 1625 __func__); 1626 err = -1; 1627 goto out_free; 1628 } 1629 1630 if (i > 0) { 1631 /* First port will be notified by upper layer */ 1632 rte_eth_dev_probing_finish(eth_dev); 1633 } 1634 } 1635 1636 if (adapter->flags & FW_OK) { 1637 err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0); 1638 if (err) { 1639 dev_err(adapter, "%s: t4_port_init failed with err %d\n", 1640 __func__, err); 1641 goto out_free; 1642 } 1643 } 1644 1645 cfg_queues(adapter->eth_dev); 1646 1647 print_adapter_info(adapter); 1648 print_port_info(adapter); 1649 1650 if (tid_init(&adapter->tids) < 0) { 1651 /* Disable filtering support */ 1652 dev_warn(adapter, "could not allocate TID table, " 1653 "filter support disabled. Continuing\n"); 1654 } 1655 1656 err = init_rss(adapter); 1657 if (err) 1658 goto out_free; 1659 1660 return 0; 1661 1662 out_free: 1663 for_each_port(adapter, i) { 1664 pi = adap2pinfo(adapter, i); 1665 if (pi->viid != 0) 1666 t4_free_vi(adapter, adapter->mbox, adapter->pf, 1667 0, pi->viid); 1668 /* Skip first port since it'll be de-allocated by DPDK */ 1669 if (i == 0) 1670 continue; 1671 if (pi->eth_dev) { 1672 if (pi->eth_dev->data->dev_private) 1673 rte_free(pi->eth_dev->data->dev_private); 1674 rte_eth_dev_release_port(pi->eth_dev); 1675 } 1676 } 1677 1678 if (adapter->flags & FW_OK) 1679 t4_fw_bye(adapter, adapter->mbox); 1680 return -err; 1681 } 1682