1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2014-2017 Chelsio Communications. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Chelsio Communications nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/queue.h> 35 #include <stdio.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <stdarg.h> 41 #include <inttypes.h> 42 #include <netinet/in.h> 43 44 #include <rte_byteorder.h> 45 #include <rte_common.h> 46 #include <rte_cycles.h> 47 #include <rte_interrupts.h> 48 #include <rte_log.h> 49 #include <rte_debug.h> 50 #include <rte_pci.h> 51 #include <rte_atomic.h> 52 #include <rte_branch_prediction.h> 53 #include <rte_memory.h> 54 #include <rte_tailq.h> 55 #include <rte_eal.h> 56 #include <rte_alarm.h> 57 #include <rte_ether.h> 58 #include <rte_ethdev_driver.h> 59 #include <rte_ethdev_pci.h> 60 #include <rte_malloc.h> 61 #include <rte_random.h> 62 #include <rte_dev.h> 63 64 #include "common.h" 65 #include "t4_regs.h" 66 #include "t4_msg.h" 67 #include "cxgbe.h" 68 69 /* 70 * Response queue handler for the FW event queue. 71 */ 72 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, 73 __rte_unused const struct pkt_gl *gl) 74 { 75 u8 opcode = ((const struct rss_header *)rsp)->opcode; 76 77 rsp++; /* skip RSS header */ 78 79 /* 80 * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. 81 */ 82 if (unlikely(opcode == CPL_FW4_MSG && 83 ((const struct cpl_fw4_msg *)rsp)->type == 84 FW_TYPE_RSSCPL)) { 85 rsp++; 86 opcode = ((const struct rss_header *)rsp)->opcode; 87 rsp++; 88 if (opcode != CPL_SGE_EGR_UPDATE) { 89 dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n", 90 opcode); 91 goto out; 92 } 93 } 94 95 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 96 /* do nothing */ 97 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { 98 const struct cpl_fw6_msg *msg = (const void *)rsp; 99 100 t4_handle_fw_rpl(q->adapter, msg->data); 101 } else { 102 dev_err(adapter, "unexpected CPL %#x on FW event queue\n", 103 opcode); 104 } 105 out: 106 return 0; 107 } 108 109 int setup_sge_fwevtq(struct adapter *adapter) 110 { 111 struct sge *s = &adapter->sge; 112 int err = 0; 113 int msi_idx = 0; 114 115 err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev, 116 msi_idx, NULL, fwevtq_handler, -1, NULL, 0, 117 rte_socket_id()); 118 return err; 119 } 120 121 static int closest_timer(const struct sge *s, int time) 122 { 123 unsigned int i, match = 0; 124 int delta, min_delta = INT_MAX; 125 126 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { 127 delta = time - s->timer_val[i]; 128 if (delta < 0) 129 delta = -delta; 130 if (delta < min_delta) { 131 min_delta = delta; 132 match = i; 133 } 134 } 135 return match; 136 } 137 138 static int closest_thres(const struct sge *s, int thres) 139 { 140 unsigned int i, match = 0; 141 int delta, min_delta = INT_MAX; 142 143 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { 144 delta = thres - s->counter_val[i]; 145 if (delta < 0) 146 delta = -delta; 147 if (delta < min_delta) { 148 min_delta = delta; 149 match = i; 150 } 151 } 152 return match; 153 } 154 155 /** 156 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters 157 * @q: the Rx queue 158 * @us: the hold-off time in us, or 0 to disable timer 159 * @cnt: the hold-off packet count, or 0 to disable counter 160 * 161 * Sets an Rx queue's interrupt hold-off time and packet count. At least 162 * one of the two needs to be enabled for the queue to generate interrupts. 163 */ 164 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, 165 unsigned int cnt) 166 { 167 struct adapter *adap = q->adapter; 168 unsigned int timer_val; 169 170 if (cnt) { 171 int err; 172 u32 v, new_idx; 173 174 new_idx = closest_thres(&adap->sge, cnt); 175 if (q->desc && q->pktcnt_idx != new_idx) { 176 /* the queue has already been created, update it */ 177 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 178 V_FW_PARAMS_PARAM_X( 179 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | 180 V_FW_PARAMS_PARAM_YZ(q->cntxt_id); 181 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 182 &v, &new_idx); 183 if (err) 184 return err; 185 } 186 q->pktcnt_idx = new_idx; 187 } 188 189 timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER : 190 closest_timer(&adap->sge, us); 191 192 if ((us | cnt) == 0) 193 q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX); 194 else 195 q->intr_params = V_QINTR_TIMER_IDX(timer_val) | 196 V_QINTR_CNT_EN(cnt > 0); 197 return 0; 198 } 199 200 static inline bool is_x_1g_port(const struct link_config *lc) 201 { 202 return (lc->supported & FW_PORT_CAP_SPEED_1G) != 0; 203 } 204 205 static inline bool is_x_10g_port(const struct link_config *lc) 206 { 207 unsigned int speeds, high_speeds; 208 209 speeds = V_FW_PORT_CAP_SPEED(G_FW_PORT_CAP_SPEED(lc->supported)); 210 high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); 211 212 return high_speeds != 0; 213 } 214 215 inline void init_rspq(struct adapter *adap, struct sge_rspq *q, 216 unsigned int us, unsigned int cnt, 217 unsigned int size, unsigned int iqe_size) 218 { 219 q->adapter = adap; 220 cxgb4_set_rspq_intr_params(q, us, cnt); 221 q->iqe_len = iqe_size; 222 q->size = size; 223 } 224 225 int cfg_queue_count(struct rte_eth_dev *eth_dev) 226 { 227 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 228 struct adapter *adap = pi->adapter; 229 struct sge *s = &adap->sge; 230 unsigned int max_queues = s->max_ethqsets / adap->params.nports; 231 232 if ((eth_dev->data->nb_rx_queues < 1) || 233 (eth_dev->data->nb_tx_queues < 1)) 234 return -EINVAL; 235 236 if ((eth_dev->data->nb_rx_queues > max_queues) || 237 (eth_dev->data->nb_tx_queues > max_queues)) 238 return -EINVAL; 239 240 if (eth_dev->data->nb_rx_queues > pi->rss_size) 241 return -EINVAL; 242 243 /* We must configure RSS, since config has changed*/ 244 pi->flags &= ~PORT_RSS_DONE; 245 246 pi->n_rx_qsets = eth_dev->data->nb_rx_queues; 247 pi->n_tx_qsets = eth_dev->data->nb_tx_queues; 248 249 return 0; 250 } 251 252 void cfg_queues(struct rte_eth_dev *eth_dev) 253 { 254 struct rte_config *config = rte_eal_get_configuration(); 255 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 256 struct adapter *adap = pi->adapter; 257 struct sge *s = &adap->sge; 258 unsigned int i, nb_ports = 0, qidx = 0; 259 unsigned int q_per_port = 0; 260 261 if (!(adap->flags & CFG_QUEUES)) { 262 for_each_port(adap, i) { 263 struct port_info *tpi = adap2pinfo(adap, i); 264 265 nb_ports += (is_x_10g_port(&tpi->link_cfg)) || 266 is_x_1g_port(&tpi->link_cfg) ? 1 : 0; 267 } 268 269 /* 270 * We default up to # of cores queues per 1G/10G port. 271 */ 272 if (nb_ports) 273 q_per_port = (MAX_ETH_QSETS - 274 (adap->params.nports - nb_ports)) / 275 nb_ports; 276 277 if (q_per_port > config->lcore_count) 278 q_per_port = config->lcore_count; 279 280 for_each_port(adap, i) { 281 struct port_info *pi = adap2pinfo(adap, i); 282 283 pi->first_qset = qidx; 284 285 /* Initially n_rx_qsets == n_tx_qsets */ 286 pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) || 287 is_x_1g_port(&pi->link_cfg)) ? 288 q_per_port : 1; 289 pi->n_tx_qsets = pi->n_rx_qsets; 290 291 if (pi->n_rx_qsets > pi->rss_size) 292 pi->n_rx_qsets = pi->rss_size; 293 294 qidx += pi->n_rx_qsets; 295 } 296 297 s->max_ethqsets = qidx; 298 299 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { 300 struct sge_eth_rxq *r = &s->ethrxq[i]; 301 302 init_rspq(adap, &r->rspq, 5, 32, 1024, 64); 303 r->usembufs = 1; 304 r->fl.size = (r->usembufs ? 1024 : 72); 305 } 306 307 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) 308 s->ethtxq[i].q.size = 1024; 309 310 init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64); 311 adap->flags |= CFG_QUEUES; 312 } 313 } 314 315 void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats) 316 { 317 t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats, 318 &pi->stats_base); 319 } 320 321 void cxgbe_stats_reset(struct port_info *pi) 322 { 323 t4_clr_port_stats(pi->adapter, pi->tx_chan); 324 } 325 326 static void setup_memwin(struct adapter *adap) 327 { 328 u32 mem_win0_base; 329 330 /* For T5, only relative offset inside the PCIe BAR is passed */ 331 mem_win0_base = MEMWIN0_BASE; 332 333 /* 334 * Set up memory window for accessing adapter memory ranges. (Read 335 * back MA register to ensure that changes propagate before we attempt 336 * to use the new values.) 337 */ 338 t4_write_reg(adap, 339 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 340 MEMWIN_NIC), 341 mem_win0_base | V_BIR(0) | 342 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT)); 343 t4_read_reg(adap, 344 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 345 MEMWIN_NIC)); 346 } 347 348 static int init_rss(struct adapter *adap) 349 { 350 unsigned int i; 351 int err; 352 353 err = t4_init_rss_mode(adap, adap->mbox); 354 if (err) 355 return err; 356 357 for_each_port(adap, i) { 358 struct port_info *pi = adap2pinfo(adap, i); 359 360 pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); 361 if (!pi->rss) 362 return -ENOMEM; 363 } 364 return 0; 365 } 366 367 /** 368 * Dump basic information about the adapter. 369 */ 370 static void print_adapter_info(struct adapter *adap) 371 { 372 /** 373 * Hardware/Firmware/etc. Version/Revision IDs. 374 */ 375 t4_dump_version_info(adap); 376 } 377 378 static void print_port_info(struct adapter *adap) 379 { 380 int i; 381 char buf[80]; 382 struct rte_pci_addr *loc = &adap->pdev->addr; 383 384 for_each_port(adap, i) { 385 const struct port_info *pi = &adap->port[i]; 386 char *bufp = buf; 387 388 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) 389 bufp += sprintf(bufp, "100M/"); 390 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) 391 bufp += sprintf(bufp, "1G/"); 392 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) 393 bufp += sprintf(bufp, "10G/"); 394 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) 395 bufp += sprintf(bufp, "25G/"); 396 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) 397 bufp += sprintf(bufp, "40G/"); 398 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) 399 bufp += sprintf(bufp, "100G/"); 400 if (bufp != buf) 401 --bufp; 402 sprintf(bufp, "BASE-%s", 403 t4_get_port_type_description( 404 (enum fw_port_type)pi->port_type)); 405 406 dev_info(adap, 407 " " PCI_PRI_FMT " Chelsio rev %d %s %s\n", 408 loc->domain, loc->bus, loc->devid, loc->function, 409 CHELSIO_CHIP_RELEASE(adap->params.chip), buf, 410 (adap->flags & USING_MSIX) ? " MSI-X" : 411 (adap->flags & USING_MSI) ? " MSI" : ""); 412 } 413 } 414 415 static void configure_pcie_ext_tag(struct adapter *adapter) 416 { 417 u16 v; 418 int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 419 420 if (!pos) 421 return; 422 423 if (pos > 0) { 424 t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v); 425 v |= PCI_EXP_DEVCTL_EXT_TAG; 426 t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v); 427 if (is_t6(adapter->params.chip)) { 428 t4_set_reg_field(adapter, A_PCIE_CFG2, 429 V_T6_TOTMAXTAG(M_T6_TOTMAXTAG), 430 V_T6_TOTMAXTAG(7)); 431 t4_set_reg_field(adapter, A_PCIE_CMD_CFG, 432 V_T6_MINTAG(M_T6_MINTAG), 433 V_T6_MINTAG(8)); 434 } else { 435 t4_set_reg_field(adapter, A_PCIE_CFG2, 436 V_TOTMAXTAG(M_TOTMAXTAG), 437 V_TOTMAXTAG(3)); 438 t4_set_reg_field(adapter, A_PCIE_CMD_CFG, 439 V_MINTAG(M_MINTAG), 440 V_MINTAG(8)); 441 } 442 } 443 } 444 445 /* 446 * Tweak configuration based on system architecture, etc. Most of these have 447 * defaults assigned to them by Firmware Configuration Files (if we're using 448 * them) but need to be explicitly set if we're using hard-coded 449 * initialization. So these are essentially common tweaks/settings for 450 * Configuration Files and hard-coded initialization ... 451 */ 452 static int adap_init0_tweaks(struct adapter *adapter) 453 { 454 u8 rx_dma_offset; 455 456 /* 457 * Fix up various Host-Dependent Parameters like Page Size, Cache 458 * Line Size, etc. The firmware default is for a 4KB Page Size and 459 * 64B Cache Line Size ... 460 */ 461 t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES, 462 T5_LAST_REV); 463 464 /* 465 * Keep the chip default offset to deliver Ingress packets into our 466 * DMA buffers to zero 467 */ 468 rx_dma_offset = 0; 469 t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT), 470 V_PKTSHIFT(rx_dma_offset)); 471 472 t4_set_reg_field(adapter, A_SGE_FLM_CFG, 473 V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING, 474 V_CREDITCNT(3) | V_CREDITCNTPACKING(1)); 475 476 t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD, 477 V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U)); 478 479 t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U), 480 V_IDMAARBROUNDROBIN(1U)); 481 482 /* 483 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux 484 * adds the pseudo header itself. 485 */ 486 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, 487 F_CSUM_HAS_PSEUDO_HDR, 0); 488 489 return 0; 490 } 491 492 /* 493 * Attempt to initialize the adapter via a Firmware Configuration File. 494 */ 495 static int adap_init0_config(struct adapter *adapter, int reset) 496 { 497 struct fw_caps_config_cmd caps_cmd; 498 unsigned long mtype = 0, maddr = 0; 499 u32 finiver, finicsum, cfcsum; 500 int ret; 501 int config_issued = 0; 502 int cfg_addr; 503 char config_name[20]; 504 505 /* 506 * Reset device if necessary. 507 */ 508 if (reset) { 509 ret = t4_fw_reset(adapter, adapter->mbox, 510 F_PIORSTMODE | F_PIORST); 511 if (ret < 0) { 512 dev_warn(adapter, "Firmware reset failed, error %d\n", 513 -ret); 514 goto bye; 515 } 516 } 517 518 cfg_addr = t4_flash_cfg_addr(adapter); 519 if (cfg_addr < 0) { 520 ret = cfg_addr; 521 dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n", 522 -ret); 523 goto bye; 524 } 525 526 strcpy(config_name, "On Flash"); 527 mtype = FW_MEMTYPE_CF_FLASH; 528 maddr = cfg_addr; 529 530 /* 531 * Issue a Capability Configuration command to the firmware to get it 532 * to parse the Configuration File. We don't use t4_fw_config_file() 533 * because we want the ability to modify various features after we've 534 * processed the configuration file ... 535 */ 536 memset(&caps_cmd, 0, sizeof(caps_cmd)); 537 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 538 F_FW_CMD_REQUEST | F_FW_CMD_READ); 539 caps_cmd.cfvalid_to_len16 = 540 cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID | 541 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 542 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | 543 FW_LEN16(caps_cmd)); 544 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 545 &caps_cmd); 546 /* 547 * If the CAPS_CONFIG failed with an ENOENT (for a Firmware 548 * Configuration File in FLASH), our last gasp effort is to use the 549 * Firmware Configuration File which is embedded in the firmware. A 550 * very few early versions of the firmware didn't have one embedded 551 * but we can ignore those. 552 */ 553 if (ret == -ENOENT) { 554 dev_info(adapter, "%s: Going for embedded config in firmware..\n", 555 __func__); 556 557 memset(&caps_cmd, 0, sizeof(caps_cmd)); 558 caps_cmd.op_to_write = 559 cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 560 F_FW_CMD_REQUEST | F_FW_CMD_READ); 561 caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd)); 562 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, 563 sizeof(caps_cmd), &caps_cmd); 564 strcpy(config_name, "Firmware Default"); 565 } 566 567 config_issued = 1; 568 if (ret < 0) 569 goto bye; 570 571 finiver = be32_to_cpu(caps_cmd.finiver); 572 finicsum = be32_to_cpu(caps_cmd.finicsum); 573 cfcsum = be32_to_cpu(caps_cmd.cfcsum); 574 if (finicsum != cfcsum) 575 dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n", 576 finicsum, cfcsum); 577 578 /* 579 * If we're a pure NIC driver then disable all offloading facilities. 580 * This will allow the firmware to optimize aspects of the hardware 581 * configuration which will result in improved performance. 582 */ 583 caps_cmd.niccaps &= cpu_to_be16(~(FW_CAPS_CONFIG_NIC_HASHFILTER | 584 FW_CAPS_CONFIG_NIC_ETHOFLD)); 585 caps_cmd.toecaps = 0; 586 caps_cmd.iscsicaps = 0; 587 caps_cmd.rdmacaps = 0; 588 caps_cmd.fcoecaps = 0; 589 590 /* 591 * And now tell the firmware to use the configuration we just loaded. 592 */ 593 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 594 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 595 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 596 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 597 NULL); 598 if (ret < 0) { 599 dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n", 600 -ret); 601 goto bye; 602 } 603 604 /* 605 * Tweak configuration based on system architecture, etc. 606 */ 607 ret = adap_init0_tweaks(adapter); 608 if (ret < 0) { 609 dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret); 610 goto bye; 611 } 612 613 /* 614 * And finally tell the firmware to initialize itself using the 615 * parameters from the Configuration File. 616 */ 617 ret = t4_fw_initialize(adapter, adapter->mbox); 618 if (ret < 0) { 619 dev_warn(adapter, "Initializing Firmware failed, error %d\n", 620 -ret); 621 goto bye; 622 } 623 624 /* 625 * Return successfully and note that we're operating with parameters 626 * not supplied by the driver, rather than from hard-wired 627 * initialization constants buried in the driver. 628 */ 629 dev_info(adapter, 630 "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n", 631 config_name, finiver, cfcsum); 632 633 return 0; 634 635 /* 636 * Something bad happened. Return the error ... (If the "error" 637 * is that there's no Configuration File on the adapter we don't 638 * want to issue a warning since this is fairly common.) 639 */ 640 bye: 641 if (config_issued && ret != -ENOENT) 642 dev_warn(adapter, "\"%s\" configuration file error %d\n", 643 config_name, -ret); 644 645 dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret); 646 return ret; 647 } 648 649 static int adap_init0(struct adapter *adap) 650 { 651 int ret = 0; 652 u32 v, port_vec; 653 enum dev_state state; 654 u32 params[7], val[7]; 655 int reset = 1; 656 int mbox = adap->mbox; 657 658 /* 659 * Contact FW, advertising Master capability. 660 */ 661 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); 662 if (ret < 0) { 663 dev_err(adap, "%s: could not connect to FW, error %d\n", 664 __func__, -ret); 665 goto bye; 666 } 667 668 CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__, 669 adap->mbox, ret); 670 671 if (ret == mbox) 672 adap->flags |= MASTER_PF; 673 674 if (state == DEV_STATE_INIT) { 675 /* 676 * Force halt and reset FW because a previous instance may have 677 * exited abnormally without properly shutting down 678 */ 679 ret = t4_fw_halt(adap, adap->mbox, reset); 680 if (ret < 0) { 681 dev_err(adap, "Failed to halt. Exit.\n"); 682 goto bye; 683 } 684 685 ret = t4_fw_restart(adap, adap->mbox, reset); 686 if (ret < 0) { 687 dev_err(adap, "Failed to restart. Exit.\n"); 688 goto bye; 689 } 690 state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT); 691 } 692 693 t4_get_version_info(adap); 694 695 ret = t4_get_core_clock(adap, &adap->params.vpd); 696 if (ret < 0) { 697 dev_err(adap, "%s: could not get core clock, error %d\n", 698 __func__, -ret); 699 goto bye; 700 } 701 702 /* 703 * If the firmware is initialized already (and we're not forcing a 704 * master initialization), note that we're living with existing 705 * adapter parameters. Otherwise, it's time to try initializing the 706 * adapter ... 707 */ 708 if (state == DEV_STATE_INIT) { 709 dev_info(adap, "Coming up as %s: Adapter already initialized\n", 710 adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); 711 } else { 712 dev_info(adap, "Coming up as MASTER: Initializing adapter\n"); 713 714 ret = adap_init0_config(adap, reset); 715 if (ret == -ENOENT) { 716 dev_err(adap, 717 "No Configuration File present on adapter. Using hard-wired configuration parameters.\n"); 718 goto bye; 719 } 720 } 721 if (ret < 0) { 722 dev_err(adap, "could not initialize adapter, error %d\n", -ret); 723 goto bye; 724 } 725 726 /* Find out what ports are available to us. */ 727 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 728 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); 729 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); 730 if (ret < 0) { 731 dev_err(adap, "%s: failure in t4_query_params; error = %d\n", 732 __func__, ret); 733 goto bye; 734 } 735 736 adap->params.nports = hweight32(port_vec); 737 adap->params.portvec = port_vec; 738 739 dev_debug(adap, "%s: adap->params.nports = %u\n", __func__, 740 adap->params.nports); 741 742 /* 743 * Give the SGE code a chance to pull in anything that it needs ... 744 * Note that this must be called after we retrieve our VPD parameters 745 * in order to know how to convert core ticks to seconds, etc. 746 */ 747 ret = t4_sge_init(adap); 748 if (ret < 0) { 749 dev_err(adap, "t4_sge_init failed with error %d\n", 750 -ret); 751 goto bye; 752 } 753 754 /* 755 * Grab some of our basic fundamental operating parameters. 756 */ 757 #define FW_PARAM_DEV(param) \ 758 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 759 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 760 761 #define FW_PARAM_PFVF(param) \ 762 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 763 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ 764 V_FW_PARAMS_PARAM_Y(0) | \ 765 V_FW_PARAMS_PARAM_Z(0)) 766 767 /* If we're running on newer firmware, let it know that we're 768 * prepared to deal with encapsulated CPL messages. Older 769 * firmware won't understand this and we'll just get 770 * unencapsulated messages ... 771 */ 772 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 773 val[0] = 1; 774 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); 775 776 /* 777 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL 778 * capability. Earlier versions of the firmware didn't have the 779 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no 780 * permission to use ULPTX MEMWRITE DSGL. 781 */ 782 if (is_t4(adap->params.chip)) { 783 adap->params.ulptx_memwrite_dsgl = false; 784 } else { 785 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); 786 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 787 1, params, val); 788 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); 789 } 790 791 /* 792 * The MTU/MSS Table is initialized by now, so load their values. If 793 * we're initializing the adapter, then we'll make any modifications 794 * we want to the MTU/MSS Table and also initialize the congestion 795 * parameters. 796 */ 797 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 798 if (state != DEV_STATE_INIT) { 799 int i; 800 801 /* 802 * The default MTU Table contains values 1492 and 1500. 803 * However, for TCP, it's better to have two values which are 804 * a multiple of 8 +/- 4 bytes apart near this popular MTU. 805 * This allows us to have a TCP Data Payload which is a 806 * multiple of 8 regardless of what combination of TCP Options 807 * are in use (always a multiple of 4 bytes) which is 808 * important for performance reasons. For instance, if no 809 * options are in use, then we have a 20-byte IP header and a 810 * 20-byte TCP header. In this case, a 1500-byte MSS would 811 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes 812 * which is not a multiple of 8. So using an MSS of 1488 in 813 * this case results in a TCP Data Payload of 1448 bytes which 814 * is a multiple of 8. On the other hand, if 12-byte TCP Time 815 * Stamps have been negotiated, then an MTU of 1500 bytes 816 * results in a TCP Data Payload of 1448 bytes which, as 817 * above, is a multiple of 8 bytes ... 818 */ 819 for (i = 0; i < NMTUS; i++) 820 if (adap->params.mtus[i] == 1492) { 821 adap->params.mtus[i] = 1488; 822 break; 823 } 824 825 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 826 adap->params.b_wnd); 827 } 828 t4_init_sge_params(adap); 829 t4_init_tp_params(adap); 830 configure_pcie_ext_tag(adap); 831 832 adap->params.drv_memwin = MEMWIN_NIC; 833 adap->flags |= FW_OK; 834 dev_debug(adap, "%s: returning zero..\n", __func__); 835 return 0; 836 837 /* 838 * Something bad happened. If a command timed out or failed with EIO 839 * FW does not operate within its spec or something catastrophic 840 * happened to HW/FW, stop issuing commands. 841 */ 842 bye: 843 if (ret != -ETIMEDOUT && ret != -EIO) 844 t4_fw_bye(adap, adap->mbox); 845 return ret; 846 } 847 848 /** 849 * t4_os_portmod_changed - handle port module changes 850 * @adap: the adapter associated with the module change 851 * @port_id: the port index whose module status has changed 852 * 853 * This is the OS-dependent handler for port module changes. It is 854 * invoked when a port module is removed or inserted for any OS-specific 855 * processing. 856 */ 857 void t4_os_portmod_changed(const struct adapter *adap, int port_id) 858 { 859 static const char * const mod_str[] = { 860 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" 861 }; 862 863 const struct port_info *pi = &adap->port[port_id]; 864 865 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 866 dev_info(adap, "Port%d: port module unplugged\n", pi->port_id); 867 else if (pi->mod_type < ARRAY_SIZE(mod_str)) 868 dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id, 869 mod_str[pi->mod_type]); 870 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 871 dev_info(adap, "Port%d: unsupported port module inserted\n", 872 pi->port_id); 873 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 874 dev_info(adap, "Port%d: unknown port module inserted\n", 875 pi->port_id); 876 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) 877 dev_info(adap, "Port%d: transceiver module error\n", 878 pi->port_id); 879 else 880 dev_info(adap, "Port%d: unknown module type %d inserted\n", 881 pi->port_id, pi->mod_type); 882 } 883 884 /** 885 * link_start - enable a port 886 * @dev: the port to enable 887 * 888 * Performs the MAC and PHY actions needed to enable a port. 889 */ 890 int link_start(struct port_info *pi) 891 { 892 struct adapter *adapter = pi->adapter; 893 int ret; 894 unsigned int mtu; 895 896 mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 897 (ETHER_HDR_LEN + ETHER_CRC_LEN); 898 899 /* 900 * We do not set address filters and promiscuity here, the stack does 901 * that step explicitly. 902 */ 903 ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, 904 -1, 1, true); 905 if (ret == 0) { 906 ret = t4_change_mac(adapter, adapter->mbox, pi->viid, 907 pi->xact_addr_filt, 908 (u8 *)&pi->eth_dev->data->mac_addrs[0], 909 true, true); 910 if (ret >= 0) { 911 pi->xact_addr_filt = ret; 912 ret = 0; 913 } 914 } 915 if (ret == 0) 916 ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, 917 &pi->link_cfg); 918 if (ret == 0) { 919 /* 920 * Enabling a Virtual Interface can result in an interrupt 921 * during the processing of the VI Enable command and, in some 922 * paths, result in an attempt to issue another command in the 923 * interrupt context. Thus, we disable interrupts during the 924 * course of the VI Enable command ... 925 */ 926 ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid, 927 true, true, false); 928 } 929 return ret; 930 } 931 932 /** 933 * cxgb4_write_rss - write the RSS table for a given port 934 * @pi: the port 935 * @queues: array of queue indices for RSS 936 * 937 * Sets up the portion of the HW RSS table for the port's VI to distribute 938 * packets to the Rx queues in @queues. 939 */ 940 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) 941 { 942 u16 *rss; 943 int i, err; 944 struct adapter *adapter = pi->adapter; 945 const struct sge_eth_rxq *rxq; 946 947 /* Should never be called before setting up sge eth rx queues */ 948 BUG_ON(!(adapter->flags & FULL_INIT_DONE)); 949 950 rxq = &adapter->sge.ethrxq[pi->first_qset]; 951 rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); 952 if (!rss) 953 return -ENOMEM; 954 955 /* map the queue indices to queue ids */ 956 for (i = 0; i < pi->rss_size; i++, queues++) 957 rss[i] = rxq[*queues].rspq.abs_id; 958 959 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, 960 pi->rss_size, rss, pi->rss_size); 961 /* 962 * If Tunnel All Lookup isn't specified in the global RSS 963 * Configuration, then we need to specify a default Ingress 964 * Queue for any ingress packets which aren't hashed. We'll 965 * use our first ingress queue ... 966 */ 967 if (!err) 968 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, 969 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 970 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 971 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 972 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | 973 F_FW_RSS_VI_CONFIG_CMD_UDPEN, 974 rss[0]); 975 rte_free(rss); 976 return err; 977 } 978 979 /** 980 * setup_rss - configure RSS 981 * @adapter: the adapter 982 * 983 * Sets up RSS to distribute packets to multiple receive queues. We 984 * configure the RSS CPU lookup table to distribute to the number of HW 985 * receive queues, and the response queue lookup table to narrow that 986 * down to the response queues actually configured for each port. 987 * We always configure the RSS mapping for all ports since the mapping 988 * table has plenty of entries. 989 */ 990 int setup_rss(struct port_info *pi) 991 { 992 int j, err; 993 struct adapter *adapter = pi->adapter; 994 995 dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n", 996 __func__, pi->rss_size, pi->n_rx_qsets); 997 998 if (!(pi->flags & PORT_RSS_DONE)) { 999 if (adapter->flags & FULL_INIT_DONE) { 1000 /* Fill default values with equal distribution */ 1001 for (j = 0; j < pi->rss_size; j++) 1002 pi->rss[j] = j % pi->n_rx_qsets; 1003 1004 err = cxgb4_write_rss(pi, pi->rss); 1005 if (err) 1006 return err; 1007 pi->flags |= PORT_RSS_DONE; 1008 } 1009 } 1010 return 0; 1011 } 1012 1013 /* 1014 * Enable NAPI scheduling and interrupt generation for all Rx queues. 1015 */ 1016 static void enable_rx(struct adapter *adap, struct sge_rspq *q) 1017 { 1018 /* 0-increment GTS to start the timer and enable interrupts */ 1019 t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS), 1020 V_SEINTARM(q->intr_params) | 1021 V_INGRESSQID(q->cntxt_id)); 1022 } 1023 1024 void cxgbe_enable_rx_queues(struct port_info *pi) 1025 { 1026 struct adapter *adap = pi->adapter; 1027 struct sge *s = &adap->sge; 1028 unsigned int i; 1029 1030 for (i = 0; i < pi->n_rx_qsets; i++) 1031 enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq); 1032 } 1033 1034 /** 1035 * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps. 1036 * @port_type: Firmware Port Type 1037 * @fw_caps: Firmware Port Capabilities 1038 * @speed_caps: Device Info Speed Capabilities 1039 * 1040 * Translate a Firmware Port Capabilities specification to Device Info 1041 * Speed Capabilities. 1042 */ 1043 static void fw_caps_to_speed_caps(enum fw_port_type port_type, 1044 unsigned int fw_caps, 1045 u32 *speed_caps) 1046 { 1047 #define SET_SPEED(__speed_name) \ 1048 do { \ 1049 *speed_caps |= ETH_LINK_ ## __speed_name; \ 1050 } while (0) 1051 1052 #define FW_CAPS_TO_SPEED(__fw_name) \ 1053 do { \ 1054 if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ 1055 SET_SPEED(__fw_name); \ 1056 } while (0) 1057 1058 switch (port_type) { 1059 case FW_PORT_TYPE_BT_SGMII: 1060 case FW_PORT_TYPE_BT_XFI: 1061 case FW_PORT_TYPE_BT_XAUI: 1062 FW_CAPS_TO_SPEED(SPEED_100M); 1063 FW_CAPS_TO_SPEED(SPEED_1G); 1064 FW_CAPS_TO_SPEED(SPEED_10G); 1065 break; 1066 1067 case FW_PORT_TYPE_KX4: 1068 case FW_PORT_TYPE_KX: 1069 case FW_PORT_TYPE_FIBER_XFI: 1070 case FW_PORT_TYPE_FIBER_XAUI: 1071 case FW_PORT_TYPE_SFP: 1072 case FW_PORT_TYPE_QSFP_10G: 1073 case FW_PORT_TYPE_QSA: 1074 FW_CAPS_TO_SPEED(SPEED_1G); 1075 FW_CAPS_TO_SPEED(SPEED_10G); 1076 break; 1077 1078 case FW_PORT_TYPE_KR: 1079 SET_SPEED(SPEED_10G); 1080 break; 1081 1082 case FW_PORT_TYPE_BP_AP: 1083 case FW_PORT_TYPE_BP4_AP: 1084 SET_SPEED(SPEED_1G); 1085 SET_SPEED(SPEED_10G); 1086 break; 1087 1088 case FW_PORT_TYPE_BP40_BA: 1089 case FW_PORT_TYPE_QSFP: 1090 SET_SPEED(SPEED_40G); 1091 break; 1092 1093 case FW_PORT_TYPE_CR_QSFP: 1094 case FW_PORT_TYPE_SFP28: 1095 case FW_PORT_TYPE_KR_SFP28: 1096 FW_CAPS_TO_SPEED(SPEED_1G); 1097 FW_CAPS_TO_SPEED(SPEED_10G); 1098 FW_CAPS_TO_SPEED(SPEED_25G); 1099 break; 1100 1101 case FW_PORT_TYPE_CR2_QSFP: 1102 SET_SPEED(SPEED_50G); 1103 break; 1104 1105 case FW_PORT_TYPE_KR4_100G: 1106 case FW_PORT_TYPE_CR4_QSFP: 1107 FW_CAPS_TO_SPEED(SPEED_25G); 1108 FW_CAPS_TO_SPEED(SPEED_40G); 1109 FW_CAPS_TO_SPEED(SPEED_100G); 1110 break; 1111 1112 default: 1113 break; 1114 } 1115 1116 #undef FW_CAPS_TO_SPEED 1117 #undef SET_SPEED 1118 } 1119 1120 /** 1121 * cxgbe_get_speed_caps - Fetch supported speed capabilities 1122 * @pi: Underlying port's info 1123 * @speed_caps: Device Info speed capabilities 1124 * 1125 * Fetch supported speed capabilities of the underlying port. 1126 */ 1127 void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps) 1128 { 1129 *speed_caps = 0; 1130 1131 fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.supported, 1132 speed_caps); 1133 1134 if (!(pi->link_cfg.supported & FW_PORT_CAP_ANEG)) 1135 *speed_caps |= ETH_LINK_SPEED_FIXED; 1136 } 1137 1138 /** 1139 * cxgb_up - enable the adapter 1140 * @adap: adapter being enabled 1141 * 1142 * Called when the first port is enabled, this function performs the 1143 * actions necessary to make an adapter operational, such as completing 1144 * the initialization of HW modules, and enabling interrupts. 1145 */ 1146 int cxgbe_up(struct adapter *adap) 1147 { 1148 enable_rx(adap, &adap->sge.fw_evtq); 1149 t4_sge_tx_monitor_start(adap); 1150 t4_intr_enable(adap); 1151 adap->flags |= FULL_INIT_DONE; 1152 1153 /* TODO: deadman watchdog ?? */ 1154 return 0; 1155 } 1156 1157 /* 1158 * Close the port 1159 */ 1160 int cxgbe_down(struct port_info *pi) 1161 { 1162 struct adapter *adapter = pi->adapter; 1163 int err = 0; 1164 1165 err = t4_enable_vi(adapter, adapter->mbox, pi->viid, false, false); 1166 if (err) { 1167 dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err); 1168 return err; 1169 } 1170 1171 t4_reset_link_config(adapter, pi->port_id); 1172 return 0; 1173 } 1174 1175 /* 1176 * Release resources when all the ports have been stopped. 1177 */ 1178 void cxgbe_close(struct adapter *adapter) 1179 { 1180 struct port_info *pi; 1181 int i; 1182 1183 if (adapter->flags & FULL_INIT_DONE) { 1184 t4_intr_disable(adapter); 1185 t4_sge_tx_monitor_stop(adapter); 1186 t4_free_sge_resources(adapter); 1187 for_each_port(adapter, i) { 1188 pi = adap2pinfo(adapter, i); 1189 if (pi->viid != 0) 1190 t4_free_vi(adapter, adapter->mbox, 1191 adapter->pf, 0, pi->viid); 1192 rte_free(pi->eth_dev->data->mac_addrs); 1193 } 1194 adapter->flags &= ~FULL_INIT_DONE; 1195 } 1196 1197 if (adapter->flags & FW_OK) 1198 t4_fw_bye(adapter, adapter->mbox); 1199 } 1200 1201 int cxgbe_probe(struct adapter *adapter) 1202 { 1203 struct port_info *pi; 1204 int chip; 1205 int func, i; 1206 int err = 0; 1207 u32 whoami; 1208 1209 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 1210 chip = t4_get_chip_type(adapter, 1211 CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id)); 1212 if (chip < 0) 1213 return chip; 1214 1215 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ? 1216 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami); 1217 1218 adapter->mbox = func; 1219 adapter->pf = func; 1220 1221 t4_os_lock_init(&adapter->mbox_lock); 1222 TAILQ_INIT(&adapter->mbox_list); 1223 1224 err = t4_prep_adapter(adapter); 1225 if (err) 1226 return err; 1227 1228 setup_memwin(adapter); 1229 err = adap_init0(adapter); 1230 if (err) { 1231 dev_err(adapter, "%s: Adapter initialization failed, error %d\n", 1232 __func__, err); 1233 goto out_free; 1234 } 1235 1236 if (!is_t4(adapter->params.chip)) { 1237 /* 1238 * The userspace doorbell BAR is split evenly into doorbell 1239 * regions, each associated with an egress queue. If this 1240 * per-queue region is large enough (at least UDBS_SEG_SIZE) 1241 * then it can be used to submit a tx work request with an 1242 * implied doorbell. Enable write combining on the BAR if 1243 * there is room for such work requests. 1244 */ 1245 int s_qpp, qpp, num_seg; 1246 1247 s_qpp = (S_QUEUESPERPAGEPF0 + 1248 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * 1249 adapter->pf); 1250 qpp = 1 << ((t4_read_reg(adapter, 1251 A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp) 1252 & M_QUEUESPERPAGEPF0); 1253 num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE; 1254 if (qpp > num_seg) 1255 dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n"); 1256 1257 adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr; 1258 if (!adapter->bar2) { 1259 dev_err(adapter, "cannot map device bar2 region\n"); 1260 err = -ENOMEM; 1261 goto out_free; 1262 } 1263 t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) | 1264 V_STATMODE(0)); 1265 } 1266 1267 for_each_port(adapter, i) { 1268 char name[RTE_ETH_NAME_MAX_LEN]; 1269 struct rte_eth_dev_data *data = NULL; 1270 const unsigned int numa_node = rte_socket_id(); 1271 1272 pi = &adapter->port[i]; 1273 pi->adapter = adapter; 1274 pi->xact_addr_filt = -1; 1275 pi->port_id = i; 1276 1277 snprintf(name, sizeof(name), "cxgbe%d", 1278 adapter->eth_dev->data->port_id + i); 1279 1280 if (i == 0) { 1281 /* First port is already allocated by DPDK */ 1282 pi->eth_dev = adapter->eth_dev; 1283 goto allocate_mac; 1284 } 1285 1286 /* 1287 * now do all data allocation - for eth_dev structure, 1288 * and internal (private) data for the remaining ports 1289 */ 1290 1291 /* reserve an ethdev entry */ 1292 pi->eth_dev = rte_eth_dev_allocate(name); 1293 if (!pi->eth_dev) 1294 goto out_free; 1295 1296 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); 1297 if (!data) 1298 goto out_free; 1299 1300 data->port_id = adapter->eth_dev->data->port_id + i; 1301 1302 pi->eth_dev->data = data; 1303 1304 allocate_mac: 1305 pi->eth_dev->device = &adapter->pdev->device; 1306 pi->eth_dev->data->dev_private = pi; 1307 pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops; 1308 pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst; 1309 pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst; 1310 1311 rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev); 1312 1313 pi->eth_dev->data->mac_addrs = rte_zmalloc(name, 1314 ETHER_ADDR_LEN, 0); 1315 if (!pi->eth_dev->data->mac_addrs) { 1316 dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n", 1317 __func__); 1318 err = -1; 1319 goto out_free; 1320 } 1321 } 1322 1323 if (adapter->flags & FW_OK) { 1324 err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0); 1325 if (err) { 1326 dev_err(adapter, "%s: t4_port_init failed with err %d\n", 1327 __func__, err); 1328 goto out_free; 1329 } 1330 } 1331 1332 cfg_queues(adapter->eth_dev); 1333 1334 print_adapter_info(adapter); 1335 print_port_info(adapter); 1336 1337 err = init_rss(adapter); 1338 if (err) 1339 goto out_free; 1340 1341 return 0; 1342 1343 out_free: 1344 for_each_port(adapter, i) { 1345 pi = adap2pinfo(adapter, i); 1346 if (pi->viid != 0) 1347 t4_free_vi(adapter, adapter->mbox, adapter->pf, 1348 0, pi->viid); 1349 /* Skip first port since it'll be de-allocated by DPDK */ 1350 if (i == 0) 1351 continue; 1352 if (pi->eth_dev->data) 1353 rte_free(pi->eth_dev->data); 1354 } 1355 1356 if (adapter->flags & FW_OK) 1357 t4_fw_bye(adapter, adapter->mbox); 1358 return -err; 1359 } 1360