1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Chelsio Communications. 3 * All rights reserved. 4 */ 5 6 #include <sys/queue.h> 7 #include <stdio.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <unistd.h> 12 #include <stdarg.h> 13 #include <inttypes.h> 14 #include <netinet/in.h> 15 16 #include <rte_byteorder.h> 17 #include <rte_common.h> 18 #include <rte_cycles.h> 19 #include <rte_interrupts.h> 20 #include <rte_log.h> 21 #include <rte_debug.h> 22 #include <rte_pci.h> 23 #include <rte_atomic.h> 24 #include <rte_branch_prediction.h> 25 #include <rte_memory.h> 26 #include <rte_tailq.h> 27 #include <rte_eal.h> 28 #include <rte_alarm.h> 29 #include <rte_ether.h> 30 #include <rte_ethdev_driver.h> 31 #include <rte_ethdev_pci.h> 32 #include <rte_random.h> 33 #include <rte_dev.h> 34 #include <rte_kvargs.h> 35 36 #include "common.h" 37 #include "t4_regs.h" 38 #include "t4_msg.h" 39 #include "cxgbe.h" 40 41 #define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan" 42 43 /* 44 * Response queue handler for the FW event queue. 45 */ 46 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, 47 __rte_unused const struct pkt_gl *gl) 48 { 49 u8 opcode = ((const struct rss_header *)rsp)->opcode; 50 51 rsp++; /* skip RSS header */ 52 53 /* 54 * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. 55 */ 56 if (unlikely(opcode == CPL_FW4_MSG && 57 ((const struct cpl_fw4_msg *)rsp)->type == 58 FW_TYPE_RSSCPL)) { 59 rsp++; 60 opcode = ((const struct rss_header *)rsp)->opcode; 61 rsp++; 62 if (opcode != CPL_SGE_EGR_UPDATE) { 63 dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n", 64 opcode); 65 goto out; 66 } 67 } 68 69 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 70 /* do nothing */ 71 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { 72 const struct cpl_fw6_msg *msg = (const void *)rsp; 73 74 t4_handle_fw_rpl(q->adapter, msg->data); 75 } else { 76 dev_err(adapter, "unexpected CPL %#x on FW event queue\n", 77 opcode); 78 } 79 out: 80 return 0; 81 } 82 83 int setup_sge_fwevtq(struct adapter *adapter) 84 { 85 struct sge *s = &adapter->sge; 86 int err = 0; 87 int msi_idx = 0; 88 89 err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev, 90 msi_idx, NULL, fwevtq_handler, -1, NULL, 0, 91 rte_socket_id()); 92 return err; 93 } 94 95 static int closest_timer(const struct sge *s, int time) 96 { 97 unsigned int i, match = 0; 98 int delta, min_delta = INT_MAX; 99 100 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { 101 delta = time - s->timer_val[i]; 102 if (delta < 0) 103 delta = -delta; 104 if (delta < min_delta) { 105 min_delta = delta; 106 match = i; 107 } 108 } 109 return match; 110 } 111 112 static int closest_thres(const struct sge *s, int thres) 113 { 114 unsigned int i, match = 0; 115 int delta, min_delta = INT_MAX; 116 117 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { 118 delta = thres - s->counter_val[i]; 119 if (delta < 0) 120 delta = -delta; 121 if (delta < min_delta) { 122 min_delta = delta; 123 match = i; 124 } 125 } 126 return match; 127 } 128 129 /** 130 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters 131 * @q: the Rx queue 132 * @us: the hold-off time in us, or 0 to disable timer 133 * @cnt: the hold-off packet count, or 0 to disable counter 134 * 135 * Sets an Rx queue's interrupt hold-off time and packet count. At least 136 * one of the two needs to be enabled for the queue to generate interrupts. 137 */ 138 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, 139 unsigned int cnt) 140 { 141 struct adapter *adap = q->adapter; 142 unsigned int timer_val; 143 144 if (cnt) { 145 int err; 146 u32 v, new_idx; 147 148 new_idx = closest_thres(&adap->sge, cnt); 149 if (q->desc && q->pktcnt_idx != new_idx) { 150 /* the queue has already been created, update it */ 151 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 152 V_FW_PARAMS_PARAM_X( 153 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | 154 V_FW_PARAMS_PARAM_YZ(q->cntxt_id); 155 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 156 &v, &new_idx); 157 if (err) 158 return err; 159 } 160 q->pktcnt_idx = new_idx; 161 } 162 163 timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER : 164 closest_timer(&adap->sge, us); 165 166 if ((us | cnt) == 0) 167 q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX); 168 else 169 q->intr_params = V_QINTR_TIMER_IDX(timer_val) | 170 V_QINTR_CNT_EN(cnt > 0); 171 return 0; 172 } 173 174 static inline bool is_x_1g_port(const struct link_config *lc) 175 { 176 return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0; 177 } 178 179 static inline bool is_x_10g_port(const struct link_config *lc) 180 { 181 unsigned int speeds, high_speeds; 182 183 speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps)); 184 high_speeds = speeds & 185 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G); 186 187 return high_speeds != 0; 188 } 189 190 inline void init_rspq(struct adapter *adap, struct sge_rspq *q, 191 unsigned int us, unsigned int cnt, 192 unsigned int size, unsigned int iqe_size) 193 { 194 q->adapter = adap; 195 cxgb4_set_rspq_intr_params(q, us, cnt); 196 q->iqe_len = iqe_size; 197 q->size = size; 198 } 199 200 int cfg_queue_count(struct rte_eth_dev *eth_dev) 201 { 202 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 203 struct adapter *adap = pi->adapter; 204 struct sge *s = &adap->sge; 205 unsigned int max_queues = s->max_ethqsets / adap->params.nports; 206 207 if ((eth_dev->data->nb_rx_queues < 1) || 208 (eth_dev->data->nb_tx_queues < 1)) 209 return -EINVAL; 210 211 if ((eth_dev->data->nb_rx_queues > max_queues) || 212 (eth_dev->data->nb_tx_queues > max_queues)) 213 return -EINVAL; 214 215 if (eth_dev->data->nb_rx_queues > pi->rss_size) 216 return -EINVAL; 217 218 /* We must configure RSS, since config has changed*/ 219 pi->flags &= ~PORT_RSS_DONE; 220 221 pi->n_rx_qsets = eth_dev->data->nb_rx_queues; 222 pi->n_tx_qsets = eth_dev->data->nb_tx_queues; 223 224 return 0; 225 } 226 227 void cfg_queues(struct rte_eth_dev *eth_dev) 228 { 229 struct rte_config *config = rte_eal_get_configuration(); 230 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 231 struct adapter *adap = pi->adapter; 232 struct sge *s = &adap->sge; 233 unsigned int i, nb_ports = 0, qidx = 0; 234 unsigned int q_per_port = 0; 235 236 if (!(adap->flags & CFG_QUEUES)) { 237 for_each_port(adap, i) { 238 struct port_info *tpi = adap2pinfo(adap, i); 239 240 nb_ports += (is_x_10g_port(&tpi->link_cfg)) || 241 is_x_1g_port(&tpi->link_cfg) ? 1 : 0; 242 } 243 244 /* 245 * We default up to # of cores queues per 1G/10G port. 246 */ 247 if (nb_ports) 248 q_per_port = (MAX_ETH_QSETS - 249 (adap->params.nports - nb_ports)) / 250 nb_ports; 251 252 if (q_per_port > config->lcore_count) 253 q_per_port = config->lcore_count; 254 255 for_each_port(adap, i) { 256 struct port_info *pi = adap2pinfo(adap, i); 257 258 pi->first_qset = qidx; 259 260 /* Initially n_rx_qsets == n_tx_qsets */ 261 pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) || 262 is_x_1g_port(&pi->link_cfg)) ? 263 q_per_port : 1; 264 pi->n_tx_qsets = pi->n_rx_qsets; 265 266 if (pi->n_rx_qsets > pi->rss_size) 267 pi->n_rx_qsets = pi->rss_size; 268 269 qidx += pi->n_rx_qsets; 270 } 271 272 s->max_ethqsets = qidx; 273 274 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { 275 struct sge_eth_rxq *r = &s->ethrxq[i]; 276 277 init_rspq(adap, &r->rspq, 5, 32, 1024, 64); 278 r->usembufs = 1; 279 r->fl.size = (r->usembufs ? 1024 : 72); 280 } 281 282 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) 283 s->ethtxq[i].q.size = 1024; 284 285 init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64); 286 adap->flags |= CFG_QUEUES; 287 } 288 } 289 290 void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats) 291 { 292 t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats, 293 &pi->stats_base); 294 } 295 296 void cxgbe_stats_reset(struct port_info *pi) 297 { 298 t4_clr_port_stats(pi->adapter, pi->tx_chan); 299 } 300 301 static void setup_memwin(struct adapter *adap) 302 { 303 u32 mem_win0_base; 304 305 /* For T5, only relative offset inside the PCIe BAR is passed */ 306 mem_win0_base = MEMWIN0_BASE; 307 308 /* 309 * Set up memory window for accessing adapter memory ranges. (Read 310 * back MA register to ensure that changes propagate before we attempt 311 * to use the new values.) 312 */ 313 t4_write_reg(adap, 314 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 315 MEMWIN_NIC), 316 mem_win0_base | V_BIR(0) | 317 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT)); 318 t4_read_reg(adap, 319 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 320 MEMWIN_NIC)); 321 } 322 323 int init_rss(struct adapter *adap) 324 { 325 unsigned int i; 326 327 if (is_pf4(adap)) { 328 int err; 329 330 err = t4_init_rss_mode(adap, adap->mbox); 331 if (err) 332 return err; 333 } 334 335 for_each_port(adap, i) { 336 struct port_info *pi = adap2pinfo(adap, i); 337 338 pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); 339 if (!pi->rss) 340 return -ENOMEM; 341 342 pi->rss_hf = CXGBE_RSS_HF_ALL; 343 } 344 return 0; 345 } 346 347 /** 348 * Dump basic information about the adapter. 349 */ 350 void print_adapter_info(struct adapter *adap) 351 { 352 /** 353 * Hardware/Firmware/etc. Version/Revision IDs. 354 */ 355 t4_dump_version_info(adap); 356 } 357 358 void print_port_info(struct adapter *adap) 359 { 360 int i; 361 char buf[80]; 362 struct rte_pci_addr *loc = &adap->pdev->addr; 363 364 for_each_port(adap, i) { 365 const struct port_info *pi = adap2pinfo(adap, i); 366 char *bufp = buf; 367 368 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) 369 bufp += sprintf(bufp, "100M/"); 370 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) 371 bufp += sprintf(bufp, "1G/"); 372 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) 373 bufp += sprintf(bufp, "10G/"); 374 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) 375 bufp += sprintf(bufp, "25G/"); 376 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) 377 bufp += sprintf(bufp, "40G/"); 378 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) 379 bufp += sprintf(bufp, "50G/"); 380 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) 381 bufp += sprintf(bufp, "100G/"); 382 if (bufp != buf) 383 --bufp; 384 sprintf(bufp, "BASE-%s", 385 t4_get_port_type_description( 386 (enum fw_port_type)pi->port_type)); 387 388 dev_info(adap, 389 " " PCI_PRI_FMT " Chelsio rev %d %s %s\n", 390 loc->domain, loc->bus, loc->devid, loc->function, 391 CHELSIO_CHIP_RELEASE(adap->params.chip), buf, 392 (adap->flags & USING_MSIX) ? " MSI-X" : 393 (adap->flags & USING_MSI) ? " MSI" : ""); 394 } 395 } 396 397 static int 398 check_devargs_handler(__rte_unused const char *key, const char *value, 399 __rte_unused void *opaque) 400 { 401 if (strcmp(value, "1")) 402 return -1; 403 404 return 0; 405 } 406 407 static int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key) 408 { 409 struct rte_kvargs *kvlist; 410 411 if (!devargs) 412 return 0; 413 414 kvlist = rte_kvargs_parse(devargs->args, NULL); 415 if (!kvlist) 416 return 0; 417 418 if (!rte_kvargs_count(kvlist, key)) { 419 rte_kvargs_free(kvlist); 420 return 0; 421 } 422 423 if (rte_kvargs_process(kvlist, key, 424 check_devargs_handler, NULL) < 0) { 425 rte_kvargs_free(kvlist); 426 return 0; 427 } 428 rte_kvargs_free(kvlist); 429 430 return 1; 431 } 432 433 static void configure_vlan_types(struct adapter *adapter) 434 { 435 struct rte_pci_device *pdev = adapter->pdev; 436 int i; 437 438 for_each_port(adapter, i) { 439 /* OVLAN Type 0x88a8 */ 440 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0), 441 V_OVLAN_MASK(M_OVLAN_MASK) | 442 V_OVLAN_ETYPE(M_OVLAN_ETYPE), 443 V_OVLAN_MASK(M_OVLAN_MASK) | 444 V_OVLAN_ETYPE(0x88a8)); 445 /* OVLAN Type 0x9100 */ 446 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1), 447 V_OVLAN_MASK(M_OVLAN_MASK) | 448 V_OVLAN_ETYPE(M_OVLAN_ETYPE), 449 V_OVLAN_MASK(M_OVLAN_MASK) | 450 V_OVLAN_ETYPE(0x9100)); 451 /* OVLAN Type 0x8100 */ 452 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2), 453 V_OVLAN_MASK(M_OVLAN_MASK) | 454 V_OVLAN_ETYPE(M_OVLAN_ETYPE), 455 V_OVLAN_MASK(M_OVLAN_MASK) | 456 V_OVLAN_ETYPE(0x8100)); 457 458 /* IVLAN 0X8100 */ 459 t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i), 460 V_IVLAN_ETYPE(M_IVLAN_ETYPE), 461 V_IVLAN_ETYPE(0x8100)); 462 463 t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i), 464 F_OVLAN_EN0 | F_OVLAN_EN1 | 465 F_OVLAN_EN2 | F_IVLAN_EN, 466 F_OVLAN_EN0 | F_OVLAN_EN1 | 467 F_OVLAN_EN2 | F_IVLAN_EN); 468 } 469 470 if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN)) 471 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, 472 V_RM_OVLAN(1), V_RM_OVLAN(0)); 473 } 474 475 static void configure_pcie_ext_tag(struct adapter *adapter) 476 { 477 u16 v; 478 int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 479 480 if (!pos) 481 return; 482 483 if (pos > 0) { 484 t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v); 485 v |= PCI_EXP_DEVCTL_EXT_TAG; 486 t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v); 487 if (is_t6(adapter->params.chip)) { 488 t4_set_reg_field(adapter, A_PCIE_CFG2, 489 V_T6_TOTMAXTAG(M_T6_TOTMAXTAG), 490 V_T6_TOTMAXTAG(7)); 491 t4_set_reg_field(adapter, A_PCIE_CMD_CFG, 492 V_T6_MINTAG(M_T6_MINTAG), 493 V_T6_MINTAG(8)); 494 } else { 495 t4_set_reg_field(adapter, A_PCIE_CFG2, 496 V_TOTMAXTAG(M_TOTMAXTAG), 497 V_TOTMAXTAG(3)); 498 t4_set_reg_field(adapter, A_PCIE_CMD_CFG, 499 V_MINTAG(M_MINTAG), 500 V_MINTAG(8)); 501 } 502 } 503 } 504 505 /* 506 * Tweak configuration based on system architecture, etc. Most of these have 507 * defaults assigned to them by Firmware Configuration Files (if we're using 508 * them) but need to be explicitly set if we're using hard-coded 509 * initialization. So these are essentially common tweaks/settings for 510 * Configuration Files and hard-coded initialization ... 511 */ 512 static int adap_init0_tweaks(struct adapter *adapter) 513 { 514 u8 rx_dma_offset; 515 516 /* 517 * Fix up various Host-Dependent Parameters like Page Size, Cache 518 * Line Size, etc. The firmware default is for a 4KB Page Size and 519 * 64B Cache Line Size ... 520 */ 521 t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES, 522 T5_LAST_REV); 523 524 /* 525 * Keep the chip default offset to deliver Ingress packets into our 526 * DMA buffers to zero 527 */ 528 rx_dma_offset = 0; 529 t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT), 530 V_PKTSHIFT(rx_dma_offset)); 531 532 t4_set_reg_field(adapter, A_SGE_FLM_CFG, 533 V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING, 534 V_CREDITCNT(3) | V_CREDITCNTPACKING(1)); 535 536 t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD, 537 V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U)); 538 539 t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U), 540 V_IDMAARBROUNDROBIN(1U)); 541 542 /* 543 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux 544 * adds the pseudo header itself. 545 */ 546 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, 547 F_CSUM_HAS_PSEUDO_HDR, 0); 548 549 return 0; 550 } 551 552 /* 553 * Attempt to initialize the adapter via a Firmware Configuration File. 554 */ 555 static int adap_init0_config(struct adapter *adapter, int reset) 556 { 557 struct fw_caps_config_cmd caps_cmd; 558 unsigned long mtype = 0, maddr = 0; 559 u32 finiver, finicsum, cfcsum; 560 int ret; 561 int config_issued = 0; 562 int cfg_addr; 563 char config_name[20]; 564 565 /* 566 * Reset device if necessary. 567 */ 568 if (reset) { 569 ret = t4_fw_reset(adapter, adapter->mbox, 570 F_PIORSTMODE | F_PIORST); 571 if (ret < 0) { 572 dev_warn(adapter, "Firmware reset failed, error %d\n", 573 -ret); 574 goto bye; 575 } 576 } 577 578 cfg_addr = t4_flash_cfg_addr(adapter); 579 if (cfg_addr < 0) { 580 ret = cfg_addr; 581 dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n", 582 -ret); 583 goto bye; 584 } 585 586 strcpy(config_name, "On Flash"); 587 mtype = FW_MEMTYPE_CF_FLASH; 588 maddr = cfg_addr; 589 590 /* 591 * Issue a Capability Configuration command to the firmware to get it 592 * to parse the Configuration File. We don't use t4_fw_config_file() 593 * because we want the ability to modify various features after we've 594 * processed the configuration file ... 595 */ 596 memset(&caps_cmd, 0, sizeof(caps_cmd)); 597 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 598 F_FW_CMD_REQUEST | F_FW_CMD_READ); 599 caps_cmd.cfvalid_to_len16 = 600 cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID | 601 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 602 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | 603 FW_LEN16(caps_cmd)); 604 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 605 &caps_cmd); 606 /* 607 * If the CAPS_CONFIG failed with an ENOENT (for a Firmware 608 * Configuration File in FLASH), our last gasp effort is to use the 609 * Firmware Configuration File which is embedded in the firmware. A 610 * very few early versions of the firmware didn't have one embedded 611 * but we can ignore those. 612 */ 613 if (ret == -ENOENT) { 614 dev_info(adapter, "%s: Going for embedded config in firmware..\n", 615 __func__); 616 617 memset(&caps_cmd, 0, sizeof(caps_cmd)); 618 caps_cmd.op_to_write = 619 cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 620 F_FW_CMD_REQUEST | F_FW_CMD_READ); 621 caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd)); 622 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, 623 sizeof(caps_cmd), &caps_cmd); 624 strcpy(config_name, "Firmware Default"); 625 } 626 627 config_issued = 1; 628 if (ret < 0) 629 goto bye; 630 631 finiver = be32_to_cpu(caps_cmd.finiver); 632 finicsum = be32_to_cpu(caps_cmd.finicsum); 633 cfcsum = be32_to_cpu(caps_cmd.cfcsum); 634 if (finicsum != cfcsum) 635 dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n", 636 finicsum, cfcsum); 637 638 /* 639 * If we're a pure NIC driver then disable all offloading facilities. 640 * This will allow the firmware to optimize aspects of the hardware 641 * configuration which will result in improved performance. 642 */ 643 caps_cmd.niccaps &= cpu_to_be16(~(FW_CAPS_CONFIG_NIC_HASHFILTER | 644 FW_CAPS_CONFIG_NIC_ETHOFLD)); 645 caps_cmd.toecaps = 0; 646 caps_cmd.iscsicaps = 0; 647 caps_cmd.rdmacaps = 0; 648 caps_cmd.fcoecaps = 0; 649 650 /* 651 * And now tell the firmware to use the configuration we just loaded. 652 */ 653 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 654 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 655 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 656 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 657 NULL); 658 if (ret < 0) { 659 dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n", 660 -ret); 661 goto bye; 662 } 663 664 /* 665 * Tweak configuration based on system architecture, etc. 666 */ 667 ret = adap_init0_tweaks(adapter); 668 if (ret < 0) { 669 dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret); 670 goto bye; 671 } 672 673 /* 674 * And finally tell the firmware to initialize itself using the 675 * parameters from the Configuration File. 676 */ 677 ret = t4_fw_initialize(adapter, adapter->mbox); 678 if (ret < 0) { 679 dev_warn(adapter, "Initializing Firmware failed, error %d\n", 680 -ret); 681 goto bye; 682 } 683 684 /* 685 * Return successfully and note that we're operating with parameters 686 * not supplied by the driver, rather than from hard-wired 687 * initialization constants buried in the driver. 688 */ 689 dev_info(adapter, 690 "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n", 691 config_name, finiver, cfcsum); 692 693 return 0; 694 695 /* 696 * Something bad happened. Return the error ... (If the "error" 697 * is that there's no Configuration File on the adapter we don't 698 * want to issue a warning since this is fairly common.) 699 */ 700 bye: 701 if (config_issued && ret != -ENOENT) 702 dev_warn(adapter, "\"%s\" configuration file error %d\n", 703 config_name, -ret); 704 705 dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret); 706 return ret; 707 } 708 709 static int adap_init0(struct adapter *adap) 710 { 711 int ret = 0; 712 u32 v, port_vec; 713 enum dev_state state; 714 u32 params[7], val[7]; 715 int reset = 1; 716 int mbox = adap->mbox; 717 718 /* 719 * Contact FW, advertising Master capability. 720 */ 721 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); 722 if (ret < 0) { 723 dev_err(adap, "%s: could not connect to FW, error %d\n", 724 __func__, -ret); 725 goto bye; 726 } 727 728 CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__, 729 adap->mbox, ret); 730 731 if (ret == mbox) 732 adap->flags |= MASTER_PF; 733 734 if (state == DEV_STATE_INIT) { 735 /* 736 * Force halt and reset FW because a previous instance may have 737 * exited abnormally without properly shutting down 738 */ 739 ret = t4_fw_halt(adap, adap->mbox, reset); 740 if (ret < 0) { 741 dev_err(adap, "Failed to halt. Exit.\n"); 742 goto bye; 743 } 744 745 ret = t4_fw_restart(adap, adap->mbox, reset); 746 if (ret < 0) { 747 dev_err(adap, "Failed to restart. Exit.\n"); 748 goto bye; 749 } 750 state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT); 751 } 752 753 t4_get_version_info(adap); 754 755 ret = t4_get_core_clock(adap, &adap->params.vpd); 756 if (ret < 0) { 757 dev_err(adap, "%s: could not get core clock, error %d\n", 758 __func__, -ret); 759 goto bye; 760 } 761 762 /* 763 * If the firmware is initialized already (and we're not forcing a 764 * master initialization), note that we're living with existing 765 * adapter parameters. Otherwise, it's time to try initializing the 766 * adapter ... 767 */ 768 if (state == DEV_STATE_INIT) { 769 dev_info(adap, "Coming up as %s: Adapter already initialized\n", 770 adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); 771 } else { 772 dev_info(adap, "Coming up as MASTER: Initializing adapter\n"); 773 774 ret = adap_init0_config(adap, reset); 775 if (ret == -ENOENT) { 776 dev_err(adap, 777 "No Configuration File present on adapter. Using hard-wired configuration parameters.\n"); 778 goto bye; 779 } 780 } 781 if (ret < 0) { 782 dev_err(adap, "could not initialize adapter, error %d\n", -ret); 783 goto bye; 784 } 785 786 /* Find out what ports are available to us. */ 787 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 788 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); 789 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); 790 if (ret < 0) { 791 dev_err(adap, "%s: failure in t4_query_params; error = %d\n", 792 __func__, ret); 793 goto bye; 794 } 795 796 adap->params.nports = hweight32(port_vec); 797 adap->params.portvec = port_vec; 798 799 dev_debug(adap, "%s: adap->params.nports = %u\n", __func__, 800 adap->params.nports); 801 802 /* 803 * Give the SGE code a chance to pull in anything that it needs ... 804 * Note that this must be called after we retrieve our VPD parameters 805 * in order to know how to convert core ticks to seconds, etc. 806 */ 807 ret = t4_sge_init(adap); 808 if (ret < 0) { 809 dev_err(adap, "t4_sge_init failed with error %d\n", 810 -ret); 811 goto bye; 812 } 813 814 /* 815 * Grab some of our basic fundamental operating parameters. 816 */ 817 #define FW_PARAM_DEV(param) \ 818 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 819 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 820 821 #define FW_PARAM_PFVF(param) \ 822 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 823 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ 824 V_FW_PARAMS_PARAM_Y(0) | \ 825 V_FW_PARAMS_PARAM_Z(0)) 826 827 /* If we're running on newer firmware, let it know that we're 828 * prepared to deal with encapsulated CPL messages. Older 829 * firmware won't understand this and we'll just get 830 * unencapsulated messages ... 831 */ 832 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 833 val[0] = 1; 834 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); 835 836 /* 837 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL 838 * capability. Earlier versions of the firmware didn't have the 839 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no 840 * permission to use ULPTX MEMWRITE DSGL. 841 */ 842 if (is_t4(adap->params.chip)) { 843 adap->params.ulptx_memwrite_dsgl = false; 844 } else { 845 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); 846 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 847 1, params, val); 848 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); 849 } 850 851 /* 852 * The MTU/MSS Table is initialized by now, so load their values. If 853 * we're initializing the adapter, then we'll make any modifications 854 * we want to the MTU/MSS Table and also initialize the congestion 855 * parameters. 856 */ 857 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 858 if (state != DEV_STATE_INIT) { 859 int i; 860 861 /* 862 * The default MTU Table contains values 1492 and 1500. 863 * However, for TCP, it's better to have two values which are 864 * a multiple of 8 +/- 4 bytes apart near this popular MTU. 865 * This allows us to have a TCP Data Payload which is a 866 * multiple of 8 regardless of what combination of TCP Options 867 * are in use (always a multiple of 4 bytes) which is 868 * important for performance reasons. For instance, if no 869 * options are in use, then we have a 20-byte IP header and a 870 * 20-byte TCP header. In this case, a 1500-byte MSS would 871 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes 872 * which is not a multiple of 8. So using an MSS of 1488 in 873 * this case results in a TCP Data Payload of 1448 bytes which 874 * is a multiple of 8. On the other hand, if 12-byte TCP Time 875 * Stamps have been negotiated, then an MTU of 1500 bytes 876 * results in a TCP Data Payload of 1448 bytes which, as 877 * above, is a multiple of 8 bytes ... 878 */ 879 for (i = 0; i < NMTUS; i++) 880 if (adap->params.mtus[i] == 1492) { 881 adap->params.mtus[i] = 1488; 882 break; 883 } 884 885 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 886 adap->params.b_wnd); 887 } 888 t4_init_sge_params(adap); 889 t4_init_tp_params(adap); 890 configure_pcie_ext_tag(adap); 891 configure_vlan_types(adap); 892 893 adap->params.drv_memwin = MEMWIN_NIC; 894 adap->flags |= FW_OK; 895 dev_debug(adap, "%s: returning zero..\n", __func__); 896 return 0; 897 898 /* 899 * Something bad happened. If a command timed out or failed with EIO 900 * FW does not operate within its spec or something catastrophic 901 * happened to HW/FW, stop issuing commands. 902 */ 903 bye: 904 if (ret != -ETIMEDOUT && ret != -EIO) 905 t4_fw_bye(adap, adap->mbox); 906 return ret; 907 } 908 909 /** 910 * t4_os_portmod_changed - handle port module changes 911 * @adap: the adapter associated with the module change 912 * @port_id: the port index whose module status has changed 913 * 914 * This is the OS-dependent handler for port module changes. It is 915 * invoked when a port module is removed or inserted for any OS-specific 916 * processing. 917 */ 918 void t4_os_portmod_changed(const struct adapter *adap, int port_id) 919 { 920 static const char * const mod_str[] = { 921 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" 922 }; 923 924 const struct port_info *pi = adap2pinfo(adap, port_id); 925 926 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 927 dev_info(adap, "Port%d: port module unplugged\n", pi->port_id); 928 else if (pi->mod_type < ARRAY_SIZE(mod_str)) 929 dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id, 930 mod_str[pi->mod_type]); 931 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 932 dev_info(adap, "Port%d: unsupported port module inserted\n", 933 pi->port_id); 934 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 935 dev_info(adap, "Port%d: unknown port module inserted\n", 936 pi->port_id); 937 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) 938 dev_info(adap, "Port%d: transceiver module error\n", 939 pi->port_id); 940 else 941 dev_info(adap, "Port%d: unknown module type %d inserted\n", 942 pi->port_id, pi->mod_type); 943 } 944 945 /** 946 * link_start - enable a port 947 * @dev: the port to enable 948 * 949 * Performs the MAC and PHY actions needed to enable a port. 950 */ 951 int link_start(struct port_info *pi) 952 { 953 struct adapter *adapter = pi->adapter; 954 int ret; 955 unsigned int mtu; 956 957 mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 958 (ETHER_HDR_LEN + ETHER_CRC_LEN); 959 960 /* 961 * We do not set address filters and promiscuity here, the stack does 962 * that step explicitly. 963 */ 964 ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, 965 -1, 1, true); 966 if (ret == 0) { 967 ret = t4_change_mac(adapter, adapter->mbox, pi->viid, 968 pi->xact_addr_filt, 969 (u8 *)&pi->eth_dev->data->mac_addrs[0], 970 true, true); 971 if (ret >= 0) { 972 pi->xact_addr_filt = ret; 973 ret = 0; 974 } 975 } 976 if (ret == 0 && is_pf4(adapter)) 977 ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, 978 &pi->link_cfg); 979 if (ret == 0) { 980 /* 981 * Enabling a Virtual Interface can result in an interrupt 982 * during the processing of the VI Enable command and, in some 983 * paths, result in an attempt to issue another command in the 984 * interrupt context. Thus, we disable interrupts during the 985 * course of the VI Enable command ... 986 */ 987 ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid, 988 true, true, false); 989 } 990 return ret; 991 } 992 993 /** 994 * cxgbe_write_rss_conf - flash the RSS configuration for a given port 995 * @pi: the port 996 * @rss_hf: Hash configuration to apply 997 */ 998 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf) 999 { 1000 struct adapter *adapter = pi->adapter; 1001 const struct sge_eth_rxq *rxq; 1002 u64 flags = 0; 1003 u16 rss; 1004 int err; 1005 1006 /* Should never be called before setting up sge eth rx queues */ 1007 if (!(adapter->flags & FULL_INIT_DONE)) { 1008 dev_err(adap, "%s No RXQs available on port %d\n", 1009 __func__, pi->port_id); 1010 return -EINVAL; 1011 } 1012 1013 /* Don't allow unsupported hash functions */ 1014 if (rss_hf & ~CXGBE_RSS_HF_ALL) 1015 return -EINVAL; 1016 1017 if (rss_hf & ETH_RSS_IPV4) 1018 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 1019 1020 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) 1021 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 1022 1023 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) 1024 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 1025 F_FW_RSS_VI_CONFIG_CMD_UDPEN; 1026 1027 if (rss_hf & ETH_RSS_IPV6) 1028 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 1029 1030 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) 1031 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 1032 1033 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) 1034 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 1035 F_FW_RSS_VI_CONFIG_CMD_UDPEN; 1036 1037 rxq = &adapter->sge.ethrxq[pi->first_qset]; 1038 rss = rxq[0].rspq.abs_id; 1039 1040 /* If Tunnel All Lookup isn't specified in the global RSS 1041 * Configuration, then we need to specify a default Ingress 1042 * Queue for any ingress packets which aren't hashed. We'll 1043 * use our first ingress queue ... 1044 */ 1045 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, 1046 flags, rss); 1047 return err; 1048 } 1049 1050 /** 1051 * cxgbe_write_rss - write the RSS table for a given port 1052 * @pi: the port 1053 * @queues: array of queue indices for RSS 1054 * 1055 * Sets up the portion of the HW RSS table for the port's VI to distribute 1056 * packets to the Rx queues in @queues. 1057 */ 1058 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues) 1059 { 1060 u16 *rss; 1061 int i, err; 1062 struct adapter *adapter = pi->adapter; 1063 const struct sge_eth_rxq *rxq; 1064 1065 /* Should never be called before setting up sge eth rx queues */ 1066 BUG_ON(!(adapter->flags & FULL_INIT_DONE)); 1067 1068 rxq = &adapter->sge.ethrxq[pi->first_qset]; 1069 rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); 1070 if (!rss) 1071 return -ENOMEM; 1072 1073 /* map the queue indices to queue ids */ 1074 for (i = 0; i < pi->rss_size; i++, queues++) 1075 rss[i] = rxq[*queues].rspq.abs_id; 1076 1077 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, 1078 pi->rss_size, rss, pi->rss_size); 1079 rte_free(rss); 1080 return err; 1081 } 1082 1083 /** 1084 * setup_rss - configure RSS 1085 * @adapter: the adapter 1086 * 1087 * Sets up RSS to distribute packets to multiple receive queues. We 1088 * configure the RSS CPU lookup table to distribute to the number of HW 1089 * receive queues, and the response queue lookup table to narrow that 1090 * down to the response queues actually configured for each port. 1091 * We always configure the RSS mapping for all ports since the mapping 1092 * table has plenty of entries. 1093 */ 1094 int setup_rss(struct port_info *pi) 1095 { 1096 int j, err; 1097 struct adapter *adapter = pi->adapter; 1098 1099 dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n", 1100 __func__, pi->rss_size, pi->n_rx_qsets); 1101 1102 if (!(pi->flags & PORT_RSS_DONE)) { 1103 if (adapter->flags & FULL_INIT_DONE) { 1104 /* Fill default values with equal distribution */ 1105 for (j = 0; j < pi->rss_size; j++) 1106 pi->rss[j] = j % pi->n_rx_qsets; 1107 1108 err = cxgbe_write_rss(pi, pi->rss); 1109 if (err) 1110 return err; 1111 1112 err = cxgbe_write_rss_conf(pi, pi->rss_hf); 1113 if (err) 1114 return err; 1115 pi->flags |= PORT_RSS_DONE; 1116 } 1117 } 1118 return 0; 1119 } 1120 1121 /* 1122 * Enable NAPI scheduling and interrupt generation for all Rx queues. 1123 */ 1124 static void enable_rx(struct adapter *adap, struct sge_rspq *q) 1125 { 1126 /* 0-increment GTS to start the timer and enable interrupts */ 1127 t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) : 1128 T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS, 1129 V_SEINTARM(q->intr_params) | 1130 V_INGRESSQID(q->cntxt_id)); 1131 } 1132 1133 void cxgbe_enable_rx_queues(struct port_info *pi) 1134 { 1135 struct adapter *adap = pi->adapter; 1136 struct sge *s = &adap->sge; 1137 unsigned int i; 1138 1139 for (i = 0; i < pi->n_rx_qsets; i++) 1140 enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq); 1141 } 1142 1143 /** 1144 * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps. 1145 * @port_type: Firmware Port Type 1146 * @fw_caps: Firmware Port Capabilities 1147 * @speed_caps: Device Info Speed Capabilities 1148 * 1149 * Translate a Firmware Port Capabilities specification to Device Info 1150 * Speed Capabilities. 1151 */ 1152 static void fw_caps_to_speed_caps(enum fw_port_type port_type, 1153 unsigned int fw_caps, 1154 u32 *speed_caps) 1155 { 1156 #define SET_SPEED(__speed_name) \ 1157 do { \ 1158 *speed_caps |= ETH_LINK_ ## __speed_name; \ 1159 } while (0) 1160 1161 #define FW_CAPS_TO_SPEED(__fw_name) \ 1162 do { \ 1163 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \ 1164 SET_SPEED(__fw_name); \ 1165 } while (0) 1166 1167 switch (port_type) { 1168 case FW_PORT_TYPE_BT_SGMII: 1169 case FW_PORT_TYPE_BT_XFI: 1170 case FW_PORT_TYPE_BT_XAUI: 1171 FW_CAPS_TO_SPEED(SPEED_100M); 1172 FW_CAPS_TO_SPEED(SPEED_1G); 1173 FW_CAPS_TO_SPEED(SPEED_10G); 1174 break; 1175 1176 case FW_PORT_TYPE_KX4: 1177 case FW_PORT_TYPE_KX: 1178 case FW_PORT_TYPE_FIBER_XFI: 1179 case FW_PORT_TYPE_FIBER_XAUI: 1180 case FW_PORT_TYPE_SFP: 1181 case FW_PORT_TYPE_QSFP_10G: 1182 case FW_PORT_TYPE_QSA: 1183 FW_CAPS_TO_SPEED(SPEED_1G); 1184 FW_CAPS_TO_SPEED(SPEED_10G); 1185 break; 1186 1187 case FW_PORT_TYPE_KR: 1188 SET_SPEED(SPEED_10G); 1189 break; 1190 1191 case FW_PORT_TYPE_BP_AP: 1192 case FW_PORT_TYPE_BP4_AP: 1193 SET_SPEED(SPEED_1G); 1194 SET_SPEED(SPEED_10G); 1195 break; 1196 1197 case FW_PORT_TYPE_BP40_BA: 1198 case FW_PORT_TYPE_QSFP: 1199 SET_SPEED(SPEED_40G); 1200 break; 1201 1202 case FW_PORT_TYPE_CR_QSFP: 1203 case FW_PORT_TYPE_SFP28: 1204 case FW_PORT_TYPE_KR_SFP28: 1205 FW_CAPS_TO_SPEED(SPEED_1G); 1206 FW_CAPS_TO_SPEED(SPEED_10G); 1207 FW_CAPS_TO_SPEED(SPEED_25G); 1208 break; 1209 1210 case FW_PORT_TYPE_CR2_QSFP: 1211 SET_SPEED(SPEED_50G); 1212 break; 1213 1214 case FW_PORT_TYPE_KR4_100G: 1215 case FW_PORT_TYPE_CR4_QSFP: 1216 FW_CAPS_TO_SPEED(SPEED_25G); 1217 FW_CAPS_TO_SPEED(SPEED_40G); 1218 FW_CAPS_TO_SPEED(SPEED_50G); 1219 FW_CAPS_TO_SPEED(SPEED_100G); 1220 break; 1221 1222 default: 1223 break; 1224 } 1225 1226 #undef FW_CAPS_TO_SPEED 1227 #undef SET_SPEED 1228 } 1229 1230 /** 1231 * cxgbe_get_speed_caps - Fetch supported speed capabilities 1232 * @pi: Underlying port's info 1233 * @speed_caps: Device Info speed capabilities 1234 * 1235 * Fetch supported speed capabilities of the underlying port. 1236 */ 1237 void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps) 1238 { 1239 *speed_caps = 0; 1240 1241 fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps, 1242 speed_caps); 1243 1244 if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)) 1245 *speed_caps |= ETH_LINK_SPEED_FIXED; 1246 } 1247 1248 /** 1249 * cxgb_up - enable the adapter 1250 * @adap: adapter being enabled 1251 * 1252 * Called when the first port is enabled, this function performs the 1253 * actions necessary to make an adapter operational, such as completing 1254 * the initialization of HW modules, and enabling interrupts. 1255 */ 1256 int cxgbe_up(struct adapter *adap) 1257 { 1258 enable_rx(adap, &adap->sge.fw_evtq); 1259 t4_sge_tx_monitor_start(adap); 1260 if (is_pf4(adap)) 1261 t4_intr_enable(adap); 1262 adap->flags |= FULL_INIT_DONE; 1263 1264 /* TODO: deadman watchdog ?? */ 1265 return 0; 1266 } 1267 1268 /* 1269 * Close the port 1270 */ 1271 int cxgbe_down(struct port_info *pi) 1272 { 1273 struct adapter *adapter = pi->adapter; 1274 int err = 0; 1275 1276 err = t4_enable_vi(adapter, adapter->mbox, pi->viid, false, false); 1277 if (err) { 1278 dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err); 1279 return err; 1280 } 1281 1282 t4_reset_link_config(adapter, pi->pidx); 1283 return 0; 1284 } 1285 1286 /* 1287 * Release resources when all the ports have been stopped. 1288 */ 1289 void cxgbe_close(struct adapter *adapter) 1290 { 1291 struct port_info *pi; 1292 int i; 1293 1294 if (adapter->flags & FULL_INIT_DONE) { 1295 if (is_pf4(adapter)) 1296 t4_intr_disable(adapter); 1297 t4_sge_tx_monitor_stop(adapter); 1298 t4_free_sge_resources(adapter); 1299 for_each_port(adapter, i) { 1300 pi = adap2pinfo(adapter, i); 1301 if (pi->viid != 0) 1302 t4_free_vi(adapter, adapter->mbox, 1303 adapter->pf, 0, pi->viid); 1304 rte_free(pi->eth_dev->data->mac_addrs); 1305 /* Skip first port since it'll be freed by DPDK stack */ 1306 if (i) { 1307 rte_free(pi->eth_dev->data->dev_private); 1308 rte_eth_dev_release_port(pi->eth_dev); 1309 } 1310 } 1311 adapter->flags &= ~FULL_INIT_DONE; 1312 } 1313 1314 if (is_pf4(adapter) && (adapter->flags & FW_OK)) 1315 t4_fw_bye(adapter, adapter->mbox); 1316 } 1317 1318 int cxgbe_probe(struct adapter *adapter) 1319 { 1320 struct port_info *pi; 1321 int chip; 1322 int func, i; 1323 int err = 0; 1324 u32 whoami; 1325 1326 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 1327 chip = t4_get_chip_type(adapter, 1328 CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id)); 1329 if (chip < 0) 1330 return chip; 1331 1332 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ? 1333 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami); 1334 1335 adapter->mbox = func; 1336 adapter->pf = func; 1337 1338 t4_os_lock_init(&adapter->mbox_lock); 1339 TAILQ_INIT(&adapter->mbox_list); 1340 1341 err = t4_prep_adapter(adapter); 1342 if (err) 1343 return err; 1344 1345 setup_memwin(adapter); 1346 err = adap_init0(adapter); 1347 if (err) { 1348 dev_err(adapter, "%s: Adapter initialization failed, error %d\n", 1349 __func__, err); 1350 goto out_free; 1351 } 1352 1353 if (!is_t4(adapter->params.chip)) { 1354 /* 1355 * The userspace doorbell BAR is split evenly into doorbell 1356 * regions, each associated with an egress queue. If this 1357 * per-queue region is large enough (at least UDBS_SEG_SIZE) 1358 * then it can be used to submit a tx work request with an 1359 * implied doorbell. Enable write combining on the BAR if 1360 * there is room for such work requests. 1361 */ 1362 int s_qpp, qpp, num_seg; 1363 1364 s_qpp = (S_QUEUESPERPAGEPF0 + 1365 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * 1366 adapter->pf); 1367 qpp = 1 << ((t4_read_reg(adapter, 1368 A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp) 1369 & M_QUEUESPERPAGEPF0); 1370 num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE; 1371 if (qpp > num_seg) 1372 dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n"); 1373 1374 adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr; 1375 if (!adapter->bar2) { 1376 dev_err(adapter, "cannot map device bar2 region\n"); 1377 err = -ENOMEM; 1378 goto out_free; 1379 } 1380 t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) | 1381 V_STATMODE(0)); 1382 } 1383 1384 for_each_port(adapter, i) { 1385 const unsigned int numa_node = rte_socket_id(); 1386 char name[RTE_ETH_NAME_MAX_LEN]; 1387 struct rte_eth_dev *eth_dev; 1388 1389 snprintf(name, sizeof(name), "%s_%d", 1390 adapter->pdev->device.name, i); 1391 1392 if (i == 0) { 1393 /* First port is already allocated by DPDK */ 1394 eth_dev = adapter->eth_dev; 1395 goto allocate_mac; 1396 } 1397 1398 /* 1399 * now do all data allocation - for eth_dev structure, 1400 * and internal (private) data for the remaining ports 1401 */ 1402 1403 /* reserve an ethdev entry */ 1404 eth_dev = rte_eth_dev_allocate(name); 1405 if (!eth_dev) 1406 goto out_free; 1407 1408 eth_dev->data->dev_private = 1409 rte_zmalloc_socket(name, sizeof(struct port_info), 1410 RTE_CACHE_LINE_SIZE, numa_node); 1411 if (!eth_dev->data->dev_private) 1412 goto out_free; 1413 1414 allocate_mac: 1415 pi = (struct port_info *)eth_dev->data->dev_private; 1416 adapter->port[i] = pi; 1417 pi->eth_dev = eth_dev; 1418 pi->adapter = adapter; 1419 pi->xact_addr_filt = -1; 1420 pi->port_id = i; 1421 pi->pidx = i; 1422 1423 pi->eth_dev->device = &adapter->pdev->device; 1424 pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops; 1425 pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst; 1426 pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst; 1427 1428 rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev); 1429 1430 pi->eth_dev->data->mac_addrs = rte_zmalloc(name, 1431 ETHER_ADDR_LEN, 0); 1432 if (!pi->eth_dev->data->mac_addrs) { 1433 dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n", 1434 __func__); 1435 err = -1; 1436 goto out_free; 1437 } 1438 1439 if (i > 0) { 1440 /* First port will be notified by upper layer */ 1441 rte_eth_dev_probing_finish(eth_dev); 1442 } 1443 } 1444 1445 if (adapter->flags & FW_OK) { 1446 err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0); 1447 if (err) { 1448 dev_err(adapter, "%s: t4_port_init failed with err %d\n", 1449 __func__, err); 1450 goto out_free; 1451 } 1452 } 1453 1454 cfg_queues(adapter->eth_dev); 1455 1456 print_adapter_info(adapter); 1457 print_port_info(adapter); 1458 1459 err = init_rss(adapter); 1460 if (err) 1461 goto out_free; 1462 1463 return 0; 1464 1465 out_free: 1466 for_each_port(adapter, i) { 1467 pi = adap2pinfo(adapter, i); 1468 if (pi->viid != 0) 1469 t4_free_vi(adapter, adapter->mbox, adapter->pf, 1470 0, pi->viid); 1471 /* Skip first port since it'll be de-allocated by DPDK */ 1472 if (i == 0) 1473 continue; 1474 if (pi->eth_dev) { 1475 if (pi->eth_dev->data->dev_private) 1476 rte_free(pi->eth_dev->data->dev_private); 1477 rte_eth_dev_release_port(pi->eth_dev); 1478 } 1479 } 1480 1481 if (adapter->flags & FW_OK) 1482 t4_fw_bye(adapter, adapter->mbox); 1483 return -err; 1484 } 1485