1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Chelsio Communications. 3 * All rights reserved. 4 */ 5 6 #include <sys/queue.h> 7 #include <stdio.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <unistd.h> 12 #include <stdarg.h> 13 #include <inttypes.h> 14 #include <netinet/in.h> 15 16 #include <rte_byteorder.h> 17 #include <rte_common.h> 18 #include <rte_cycles.h> 19 #include <rte_interrupts.h> 20 #include <rte_log.h> 21 #include <rte_debug.h> 22 #include <rte_pci.h> 23 #include <rte_bus_pci.h> 24 #include <rte_atomic.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_memory.h> 27 #include <rte_tailq.h> 28 #include <rte_eal.h> 29 #include <rte_alarm.h> 30 #include <rte_ether.h> 31 #include <rte_ethdev_driver.h> 32 #include <rte_ethdev_pci.h> 33 #include <rte_malloc.h> 34 #include <rte_random.h> 35 #include <rte_dev.h> 36 37 #include "cxgbe.h" 38 #include "cxgbe_pfvf.h" 39 #include "cxgbe_flow.h" 40 41 /* 42 * Macros needed to support the PCI Device ID Table ... 43 */ 44 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ 45 static const struct rte_pci_id cxgb4_pci_tbl[] = { 46 #define CH_PCI_DEVICE_ID_FUNCTION 0x4 47 48 #define PCI_VENDOR_ID_CHELSIO 0x1425 49 50 #define CH_PCI_ID_TABLE_ENTRY(devid) \ 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) } 52 53 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ 54 { .vendor_id = 0, } \ 55 } 56 57 /* 58 *... and the PCI ID Table itself ... 59 */ 60 #include "base/t4_pci_id_tbl.h" 61 62 uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 63 uint16_t nb_pkts) 64 { 65 struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue; 66 uint16_t pkts_sent, pkts_remain; 67 uint16_t total_sent = 0; 68 uint16_t idx = 0; 69 int ret = 0; 70 71 t4_os_lock(&txq->txq_lock); 72 /* free up desc from already completed tx */ 73 reclaim_completed_tx(&txq->q); 74 if (unlikely(!nb_pkts)) 75 goto out_unlock; 76 77 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *)); 78 while (total_sent < nb_pkts) { 79 pkts_remain = nb_pkts - total_sent; 80 81 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) { 82 idx = total_sent + pkts_sent; 83 if ((idx + 1) < nb_pkts) 84 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1], 85 volatile void *)); 86 ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts); 87 if (ret < 0) 88 break; 89 } 90 if (!pkts_sent) 91 break; 92 total_sent += pkts_sent; 93 /* reclaim as much as possible */ 94 reclaim_completed_tx(&txq->q); 95 } 96 97 out_unlock: 98 t4_os_unlock(&txq->txq_lock); 99 return total_sent; 100 } 101 102 uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 103 uint16_t nb_pkts) 104 { 105 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue; 106 unsigned int work_done; 107 108 if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done)) 109 dev_err(adapter, "error in cxgbe poll\n"); 110 111 return work_done; 112 } 113 114 int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, 115 struct rte_eth_dev_info *device_info) 116 { 117 struct port_info *pi = eth_dev->data->dev_private; 118 struct adapter *adapter = pi->adapter; 119 120 static const struct rte_eth_desc_lim cxgbe_desc_lim = { 121 .nb_max = CXGBE_MAX_RING_DESC_SIZE, 122 .nb_min = CXGBE_MIN_RING_DESC_SIZE, 123 .nb_align = 1, 124 }; 125 126 device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE; 127 device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN; 128 device_info->max_rx_queues = adapter->sge.max_ethqsets; 129 device_info->max_tx_queues = adapter->sge.max_ethqsets; 130 device_info->max_mac_addrs = 1; 131 /* XXX: For now we support one MAC/port */ 132 device_info->max_vfs = adapter->params.arch.vfcount; 133 device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */ 134 135 device_info->rx_queue_offload_capa = 0UL; 136 device_info->rx_offload_capa = CXGBE_RX_OFFLOADS; 137 138 device_info->tx_queue_offload_capa = 0UL; 139 device_info->tx_offload_capa = CXGBE_TX_OFFLOADS; 140 141 device_info->reta_size = pi->rss_size; 142 device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN; 143 device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL; 144 145 device_info->rx_desc_lim = cxgbe_desc_lim; 146 device_info->tx_desc_lim = cxgbe_desc_lim; 147 cxgbe_get_speed_caps(pi, &device_info->speed_capa); 148 149 return 0; 150 } 151 152 int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 153 { 154 struct port_info *pi = eth_dev->data->dev_private; 155 struct adapter *adapter = pi->adapter; 156 157 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, 158 1, -1, 1, -1, false); 159 } 160 161 int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 162 { 163 struct port_info *pi = eth_dev->data->dev_private; 164 struct adapter *adapter = pi->adapter; 165 166 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, 167 0, -1, 1, -1, false); 168 } 169 170 int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 171 { 172 struct port_info *pi = eth_dev->data->dev_private; 173 struct adapter *adapter = pi->adapter; 174 175 /* TODO: address filters ?? */ 176 177 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, 178 -1, 1, 1, -1, false); 179 } 180 181 int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 182 { 183 struct port_info *pi = eth_dev->data->dev_private; 184 struct adapter *adapter = pi->adapter; 185 186 /* TODO: address filters ?? */ 187 188 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, 189 -1, 0, 1, -1, false); 190 } 191 192 int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, 193 int wait_to_complete) 194 { 195 struct port_info *pi = eth_dev->data->dev_private; 196 struct adapter *adapter = pi->adapter; 197 struct sge *s = &adapter->sge; 198 struct rte_eth_link new_link = { 0 }; 199 unsigned int i, work_done, budget = 32; 200 u8 old_link = pi->link_cfg.link_ok; 201 202 for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) { 203 if (!s->fw_evtq.desc) 204 break; 205 206 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); 207 208 /* Exit if link status changed or always forced up */ 209 if (pi->link_cfg.link_ok != old_link || 210 cxgbe_force_linkup(adapter)) 211 break; 212 213 if (!wait_to_complete) 214 break; 215 216 rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS); 217 } 218 219 new_link.link_status = cxgbe_force_linkup(adapter) ? 220 ETH_LINK_UP : pi->link_cfg.link_ok; 221 new_link.link_autoneg = pi->link_cfg.autoneg; 222 new_link.link_duplex = ETH_LINK_FULL_DUPLEX; 223 new_link.link_speed = pi->link_cfg.speed; 224 225 return rte_eth_linkstatus_set(eth_dev, &new_link); 226 } 227 228 /** 229 * Set device link up. 230 */ 231 int cxgbe_dev_set_link_up(struct rte_eth_dev *dev) 232 { 233 struct port_info *pi = dev->data->dev_private; 234 struct adapter *adapter = pi->adapter; 235 unsigned int work_done, budget = 32; 236 struct sge *s = &adapter->sge; 237 int ret; 238 239 if (!s->fw_evtq.desc) 240 return -ENOMEM; 241 242 /* Flush all link events */ 243 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); 244 245 /* If link already up, nothing to do */ 246 if (pi->link_cfg.link_ok) 247 return 0; 248 249 ret = cxgbe_set_link_status(pi, true); 250 if (ret) 251 return ret; 252 253 cxgbe_dev_link_update(dev, 1); 254 return 0; 255 } 256 257 /** 258 * Set device link down. 259 */ 260 int cxgbe_dev_set_link_down(struct rte_eth_dev *dev) 261 { 262 struct port_info *pi = dev->data->dev_private; 263 struct adapter *adapter = pi->adapter; 264 unsigned int work_done, budget = 32; 265 struct sge *s = &adapter->sge; 266 int ret; 267 268 if (!s->fw_evtq.desc) 269 return -ENOMEM; 270 271 /* Flush all link events */ 272 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); 273 274 /* If link already down, nothing to do */ 275 if (!pi->link_cfg.link_ok) 276 return 0; 277 278 ret = cxgbe_set_link_status(pi, false); 279 if (ret) 280 return ret; 281 282 cxgbe_dev_link_update(dev, 0); 283 return 0; 284 } 285 286 int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) 287 { 288 struct port_info *pi = eth_dev->data->dev_private; 289 struct adapter *adapter = pi->adapter; 290 struct rte_eth_dev_info dev_info; 291 int err; 292 uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 293 294 err = cxgbe_dev_info_get(eth_dev, &dev_info); 295 if (err != 0) 296 return err; 297 298 /* Must accommodate at least RTE_ETHER_MIN_MTU */ 299 if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen) 300 return -EINVAL; 301 302 /* set to jumbo mode if needed */ 303 if (new_mtu > RTE_ETHER_MAX_LEN) 304 eth_dev->data->dev_conf.rxmode.offloads |= 305 DEV_RX_OFFLOAD_JUMBO_FRAME; 306 else 307 eth_dev->data->dev_conf.rxmode.offloads &= 308 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 309 310 err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1, 311 -1, -1, true); 312 if (!err) 313 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu; 314 315 return err; 316 } 317 318 /* 319 * Stop device. 320 */ 321 int cxgbe_dev_close(struct rte_eth_dev *eth_dev) 322 { 323 struct port_info *temp_pi, *pi = eth_dev->data->dev_private; 324 struct adapter *adapter = pi->adapter; 325 u8 i; 326 327 CXGBE_FUNC_TRACE(); 328 329 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 330 return 0; 331 332 if (!(adapter->flags & FULL_INIT_DONE)) 333 return 0; 334 335 if (!pi->viid) 336 return 0; 337 338 cxgbe_down(pi); 339 t4_sge_eth_release_queues(pi); 340 t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid); 341 pi->viid = 0; 342 343 /* Free up the adapter-wide resources only after all the ports 344 * under this PF have been closed. 345 */ 346 for_each_port(adapter, i) { 347 temp_pi = adap2pinfo(adapter, i); 348 if (temp_pi->viid) 349 return 0; 350 } 351 352 cxgbe_close(adapter); 353 rte_free(adapter); 354 355 return 0; 356 } 357 358 /* Start the device. 359 * It returns 0 on success. 360 */ 361 int cxgbe_dev_start(struct rte_eth_dev *eth_dev) 362 { 363 struct port_info *pi = eth_dev->data->dev_private; 364 struct rte_eth_rxmode *rx_conf = ð_dev->data->dev_conf.rxmode; 365 struct adapter *adapter = pi->adapter; 366 int err = 0, i; 367 368 CXGBE_FUNC_TRACE(); 369 370 /* 371 * If we don't have a connection to the firmware there's nothing we 372 * can do. 373 */ 374 if (!(adapter->flags & FW_OK)) { 375 err = -ENXIO; 376 goto out; 377 } 378 379 if (!(adapter->flags & FULL_INIT_DONE)) { 380 err = cxgbe_up(adapter); 381 if (err < 0) 382 goto out; 383 } 384 385 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) 386 eth_dev->data->scattered_rx = 1; 387 else 388 eth_dev->data->scattered_rx = 0; 389 390 cxgbe_enable_rx_queues(pi); 391 392 err = cxgbe_setup_rss(pi); 393 if (err) 394 goto out; 395 396 for (i = 0; i < pi->n_tx_qsets; i++) { 397 err = cxgbe_dev_tx_queue_start(eth_dev, i); 398 if (err) 399 goto out; 400 } 401 402 for (i = 0; i < pi->n_rx_qsets; i++) { 403 err = cxgbe_dev_rx_queue_start(eth_dev, i); 404 if (err) 405 goto out; 406 } 407 408 err = cxgbe_link_start(pi); 409 if (err) 410 goto out; 411 412 out: 413 return err; 414 } 415 416 /* 417 * Stop device: disable rx and tx functions to allow for reconfiguring. 418 */ 419 int cxgbe_dev_stop(struct rte_eth_dev *eth_dev) 420 { 421 struct port_info *pi = eth_dev->data->dev_private; 422 struct adapter *adapter = pi->adapter; 423 424 CXGBE_FUNC_TRACE(); 425 426 if (!(adapter->flags & FULL_INIT_DONE)) 427 return 0; 428 429 cxgbe_down(pi); 430 431 /* 432 * We clear queues only if both tx and rx path of the port 433 * have been disabled 434 */ 435 t4_sge_eth_clear_queues(pi); 436 eth_dev->data->scattered_rx = 0; 437 438 return 0; 439 } 440 441 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) 442 { 443 struct port_info *pi = eth_dev->data->dev_private; 444 struct adapter *adapter = pi->adapter; 445 int err; 446 447 CXGBE_FUNC_TRACE(); 448 449 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 450 eth_dev->data->dev_conf.rxmode.offloads |= 451 DEV_RX_OFFLOAD_RSS_HASH; 452 453 if (!(adapter->flags & FW_QUEUE_BOUND)) { 454 err = cxgbe_setup_sge_fwevtq(adapter); 455 if (err) 456 return err; 457 adapter->flags |= FW_QUEUE_BOUND; 458 if (is_pf4(adapter)) { 459 err = cxgbe_setup_sge_ctrl_txq(adapter); 460 if (err) 461 return err; 462 } 463 } 464 465 err = cxgbe_cfg_queue_count(eth_dev); 466 if (err) 467 return err; 468 469 return 0; 470 } 471 472 int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 473 { 474 int ret; 475 struct sge_eth_txq *txq = (struct sge_eth_txq *) 476 (eth_dev->data->tx_queues[tx_queue_id]); 477 478 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id); 479 480 ret = t4_sge_eth_txq_start(txq); 481 if (ret == 0) 482 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 483 484 return ret; 485 } 486 487 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 488 { 489 int ret; 490 struct sge_eth_txq *txq = (struct sge_eth_txq *) 491 (eth_dev->data->tx_queues[tx_queue_id]); 492 493 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id); 494 495 ret = t4_sge_eth_txq_stop(txq); 496 if (ret == 0) 497 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 498 499 return ret; 500 } 501 502 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, 503 uint16_t queue_idx, uint16_t nb_desc, 504 unsigned int socket_id, 505 const struct rte_eth_txconf *tx_conf __rte_unused) 506 { 507 struct port_info *pi = eth_dev->data->dev_private; 508 struct adapter *adapter = pi->adapter; 509 struct sge *s = &adapter->sge; 510 unsigned int temp_nb_desc; 511 struct sge_eth_txq *txq; 512 int err = 0; 513 514 txq = &s->ethtxq[pi->first_txqset + queue_idx]; 515 dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n", 516 __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc, 517 socket_id, pi->first_txqset); 518 519 /* Free up the existing queue */ 520 if (eth_dev->data->tx_queues[queue_idx]) { 521 cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]); 522 eth_dev->data->tx_queues[queue_idx] = NULL; 523 } 524 525 eth_dev->data->tx_queues[queue_idx] = (void *)txq; 526 527 /* Sanity Checking 528 * 529 * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE 530 */ 531 temp_nb_desc = nb_desc; 532 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) { 533 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n", 534 __func__, CXGBE_MIN_RING_DESC_SIZE, 535 CXGBE_DEFAULT_TX_DESC_SIZE); 536 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE; 537 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) { 538 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n", 539 __func__, CXGBE_MIN_RING_DESC_SIZE, 540 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE); 541 return -(EINVAL); 542 } 543 544 txq->q.size = temp_nb_desc; 545 546 err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx, 547 s->fw_evtq.cntxt_id, socket_id); 548 549 dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n", 550 __func__, txq->q.cntxt_id, txq->q.abs_id, err); 551 return err; 552 } 553 554 void cxgbe_dev_tx_queue_release(void *q) 555 { 556 struct sge_eth_txq *txq = (struct sge_eth_txq *)q; 557 558 if (txq) { 559 struct port_info *pi = (struct port_info *) 560 (txq->eth_dev->data->dev_private); 561 struct adapter *adap = pi->adapter; 562 563 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n", 564 __func__, pi->port_id, txq->q.cntxt_id); 565 566 t4_sge_eth_txq_release(adap, txq); 567 } 568 } 569 570 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 571 { 572 struct port_info *pi = eth_dev->data->dev_private; 573 struct adapter *adap = pi->adapter; 574 struct sge_eth_rxq *rxq; 575 int ret; 576 577 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", 578 __func__, pi->port_id, rx_queue_id); 579 580 rxq = eth_dev->data->rx_queues[rx_queue_id]; 581 ret = t4_sge_eth_rxq_start(adap, rxq); 582 if (ret == 0) 583 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 584 585 return ret; 586 } 587 588 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 589 { 590 struct port_info *pi = eth_dev->data->dev_private; 591 struct adapter *adap = pi->adapter; 592 struct sge_eth_rxq *rxq; 593 int ret; 594 595 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", 596 __func__, pi->port_id, rx_queue_id); 597 598 rxq = eth_dev->data->rx_queues[rx_queue_id]; 599 ret = t4_sge_eth_rxq_stop(adap, rxq); 600 if (ret == 0) 601 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 602 603 return ret; 604 } 605 606 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 607 uint16_t queue_idx, uint16_t nb_desc, 608 unsigned int socket_id, 609 const struct rte_eth_rxconf *rx_conf __rte_unused, 610 struct rte_mempool *mp) 611 { 612 unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 613 struct port_info *pi = eth_dev->data->dev_private; 614 struct adapter *adapter = pi->adapter; 615 struct rte_eth_dev_info dev_info; 616 struct sge *s = &adapter->sge; 617 unsigned int temp_nb_desc; 618 int err = 0, msi_idx = 0; 619 struct sge_eth_rxq *rxq; 620 621 rxq = &s->ethrxq[pi->first_rxqset + queue_idx]; 622 dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n", 623 __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc, 624 socket_id, mp); 625 626 err = cxgbe_dev_info_get(eth_dev, &dev_info); 627 if (err != 0) { 628 dev_err(adap, "%s: error during getting ethernet device info", 629 __func__); 630 return err; 631 } 632 633 /* Must accommodate at least RTE_ETHER_MIN_MTU */ 634 if ((pkt_len < dev_info.min_rx_bufsize) || 635 (pkt_len > dev_info.max_rx_pktlen)) { 636 dev_err(adap, "%s: max pkt len must be > %d and <= %d\n", 637 __func__, dev_info.min_rx_bufsize, 638 dev_info.max_rx_pktlen); 639 return -EINVAL; 640 } 641 642 /* Free up the existing queue */ 643 if (eth_dev->data->rx_queues[queue_idx]) { 644 cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]); 645 eth_dev->data->rx_queues[queue_idx] = NULL; 646 } 647 648 eth_dev->data->rx_queues[queue_idx] = (void *)rxq; 649 650 /* Sanity Checking 651 * 652 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE 653 */ 654 temp_nb_desc = nb_desc; 655 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) { 656 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n", 657 __func__, CXGBE_MIN_RING_DESC_SIZE, 658 CXGBE_DEFAULT_RX_DESC_SIZE); 659 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE; 660 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) { 661 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n", 662 __func__, CXGBE_MIN_RING_DESC_SIZE, 663 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE); 664 return -(EINVAL); 665 } 666 667 rxq->rspq.size = temp_nb_desc; 668 if ((&rxq->fl) != NULL) 669 rxq->fl.size = temp_nb_desc; 670 671 /* Set to jumbo mode if necessary */ 672 if (pkt_len > RTE_ETHER_MAX_LEN) 673 eth_dev->data->dev_conf.rxmode.offloads |= 674 DEV_RX_OFFLOAD_JUMBO_FRAME; 675 else 676 eth_dev->data->dev_conf.rxmode.offloads &= 677 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 678 679 err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx, 680 &rxq->fl, NULL, 681 is_pf4(adapter) ? 682 t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp, 683 queue_idx, socket_id); 684 685 dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n", 686 __func__, err, pi->port_id, rxq->rspq.cntxt_id, 687 rxq->rspq.abs_id); 688 return err; 689 } 690 691 void cxgbe_dev_rx_queue_release(void *q) 692 { 693 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q; 694 695 if (rxq) { 696 struct port_info *pi = (struct port_info *) 697 (rxq->rspq.eth_dev->data->dev_private); 698 struct adapter *adap = pi->adapter; 699 700 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", 701 __func__, pi->port_id, rxq->rspq.cntxt_id); 702 703 t4_sge_eth_rxq_release(adap, rxq); 704 } 705 } 706 707 /* 708 * Get port statistics. 709 */ 710 static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, 711 struct rte_eth_stats *eth_stats) 712 { 713 struct port_info *pi = eth_dev->data->dev_private; 714 struct adapter *adapter = pi->adapter; 715 struct sge *s = &adapter->sge; 716 struct port_stats ps; 717 unsigned int i; 718 719 cxgbe_stats_get(pi, &ps); 720 721 /* RX Stats */ 722 eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 + 723 ps.rx_ovflow2 + ps.rx_ovflow3 + 724 ps.rx_trunc0 + ps.rx_trunc1 + 725 ps.rx_trunc2 + ps.rx_trunc3; 726 eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err + 727 ps.rx_jabber + ps.rx_too_long + ps.rx_runt + 728 ps.rx_len_err; 729 730 /* TX Stats */ 731 eth_stats->opackets = ps.tx_frames; 732 eth_stats->obytes = ps.tx_octets; 733 eth_stats->oerrors = ps.tx_error_frames; 734 735 for (i = 0; i < pi->n_rx_qsets; i++) { 736 struct sge_eth_rxq *rxq = 737 &s->ethrxq[pi->first_rxqset + i]; 738 739 eth_stats->q_ipackets[i] = rxq->stats.pkts; 740 eth_stats->q_ibytes[i] = rxq->stats.rx_bytes; 741 eth_stats->ipackets += eth_stats->q_ipackets[i]; 742 eth_stats->ibytes += eth_stats->q_ibytes[i]; 743 } 744 745 for (i = 0; i < pi->n_tx_qsets; i++) { 746 struct sge_eth_txq *txq = 747 &s->ethtxq[pi->first_txqset + i]; 748 749 eth_stats->q_opackets[i] = txq->stats.pkts; 750 eth_stats->q_obytes[i] = txq->stats.tx_bytes; 751 } 752 return 0; 753 } 754 755 /* 756 * Reset port statistics. 757 */ 758 static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) 759 { 760 struct port_info *pi = eth_dev->data->dev_private; 761 struct adapter *adapter = pi->adapter; 762 struct sge *s = &adapter->sge; 763 unsigned int i; 764 765 cxgbe_stats_reset(pi); 766 for (i = 0; i < pi->n_rx_qsets; i++) { 767 struct sge_eth_rxq *rxq = 768 &s->ethrxq[pi->first_rxqset + i]; 769 770 rxq->stats.pkts = 0; 771 rxq->stats.rx_bytes = 0; 772 } 773 for (i = 0; i < pi->n_tx_qsets; i++) { 774 struct sge_eth_txq *txq = 775 &s->ethtxq[pi->first_txqset + i]; 776 777 txq->stats.pkts = 0; 778 txq->stats.tx_bytes = 0; 779 txq->stats.mapping_err = 0; 780 } 781 782 return 0; 783 } 784 785 static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev, 786 struct rte_eth_fc_conf *fc_conf) 787 { 788 struct port_info *pi = eth_dev->data->dev_private; 789 struct link_config *lc = &pi->link_cfg; 790 int rx_pause, tx_pause; 791 792 fc_conf->autoneg = lc->fc & PAUSE_AUTONEG; 793 rx_pause = lc->fc & PAUSE_RX; 794 tx_pause = lc->fc & PAUSE_TX; 795 796 if (rx_pause && tx_pause) 797 fc_conf->mode = RTE_FC_FULL; 798 else if (rx_pause) 799 fc_conf->mode = RTE_FC_RX_PAUSE; 800 else if (tx_pause) 801 fc_conf->mode = RTE_FC_TX_PAUSE; 802 else 803 fc_conf->mode = RTE_FC_NONE; 804 return 0; 805 } 806 807 static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev, 808 struct rte_eth_fc_conf *fc_conf) 809 { 810 struct port_info *pi = eth_dev->data->dev_private; 811 struct adapter *adapter = pi->adapter; 812 struct link_config *lc = &pi->link_cfg; 813 814 if (lc->pcaps & FW_PORT_CAP32_ANEG) { 815 if (fc_conf->autoneg) 816 lc->requested_fc |= PAUSE_AUTONEG; 817 else 818 lc->requested_fc &= ~PAUSE_AUTONEG; 819 } 820 821 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 822 (fc_conf->mode & RTE_FC_RX_PAUSE)) 823 lc->requested_fc |= PAUSE_RX; 824 else 825 lc->requested_fc &= ~PAUSE_RX; 826 827 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 828 (fc_conf->mode & RTE_FC_TX_PAUSE)) 829 lc->requested_fc |= PAUSE_TX; 830 else 831 lc->requested_fc &= ~PAUSE_TX; 832 833 return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, 834 &pi->link_cfg); 835 } 836 837 const uint32_t * 838 cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 839 { 840 static const uint32_t ptypes[] = { 841 RTE_PTYPE_L3_IPV4, 842 RTE_PTYPE_L3_IPV6, 843 RTE_PTYPE_UNKNOWN 844 }; 845 846 if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts) 847 return ptypes; 848 return NULL; 849 } 850 851 /* Update RSS hash configuration 852 */ 853 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 854 struct rte_eth_rss_conf *rss_conf) 855 { 856 struct port_info *pi = dev->data->dev_private; 857 struct adapter *adapter = pi->adapter; 858 int err; 859 860 err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf); 861 if (err) 862 return err; 863 864 pi->rss_hf = rss_conf->rss_hf; 865 866 if (rss_conf->rss_key) { 867 u32 key[10], mod_key[10]; 868 int i, j; 869 870 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN); 871 872 for (i = 9, j = 0; i >= 0; i--, j++) 873 mod_key[j] = cpu_to_be32(key[i]); 874 875 t4_write_rss_key(adapter, mod_key, -1); 876 } 877 878 return 0; 879 } 880 881 /* Get RSS hash configuration 882 */ 883 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 884 struct rte_eth_rss_conf *rss_conf) 885 { 886 struct port_info *pi = dev->data->dev_private; 887 struct adapter *adapter = pi->adapter; 888 u64 rss_hf = 0; 889 u64 flags = 0; 890 int err; 891 892 err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid, 893 &flags, NULL); 894 895 if (err) 896 return err; 897 898 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) { 899 rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK; 900 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN) 901 rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK; 902 } 903 904 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 905 rss_hf |= CXGBE_RSS_HF_IPV6_MASK; 906 907 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) { 908 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 909 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN) 910 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 911 } 912 913 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 914 rss_hf |= CXGBE_RSS_HF_IPV4_MASK; 915 916 rss_conf->rss_hf = rss_hf; 917 918 if (rss_conf->rss_key) { 919 u32 key[10], mod_key[10]; 920 int i, j; 921 922 t4_read_rss_key(adapter, key); 923 924 for (i = 9, j = 0; i >= 0; i--, j++) 925 mod_key[j] = be32_to_cpu(key[i]); 926 927 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN); 928 } 929 930 return 0; 931 } 932 933 static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 934 struct rte_eth_rss_reta_entry64 *reta_conf, 935 uint16_t reta_size) 936 { 937 struct port_info *pi = dev->data->dev_private; 938 struct adapter *adapter = pi->adapter; 939 u16 i, idx, shift, *rss; 940 int ret; 941 942 if (!(adapter->flags & FULL_INIT_DONE)) 943 return -ENOMEM; 944 945 if (!reta_size || reta_size > pi->rss_size) 946 return -EINVAL; 947 948 rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0); 949 if (!rss) 950 return -ENOMEM; 951 952 rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16)); 953 for (i = 0; i < reta_size; i++) { 954 idx = i / RTE_RETA_GROUP_SIZE; 955 shift = i % RTE_RETA_GROUP_SIZE; 956 if (!(reta_conf[idx].mask & (1ULL << shift))) 957 continue; 958 959 rss[i] = reta_conf[idx].reta[shift]; 960 } 961 962 ret = cxgbe_write_rss(pi, rss); 963 if (!ret) 964 rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16)); 965 966 rte_free(rss); 967 return ret; 968 } 969 970 static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 971 struct rte_eth_rss_reta_entry64 *reta_conf, 972 uint16_t reta_size) 973 { 974 struct port_info *pi = dev->data->dev_private; 975 struct adapter *adapter = pi->adapter; 976 u16 i, idx, shift; 977 978 if (!(adapter->flags & FULL_INIT_DONE)) 979 return -ENOMEM; 980 981 if (!reta_size || reta_size > pi->rss_size) 982 return -EINVAL; 983 984 for (i = 0; i < reta_size; i++) { 985 idx = i / RTE_RETA_GROUP_SIZE; 986 shift = i % RTE_RETA_GROUP_SIZE; 987 if (!(reta_conf[idx].mask & (1ULL << shift))) 988 continue; 989 990 reta_conf[idx].reta[shift] = pi->rss[i]; 991 } 992 993 return 0; 994 } 995 996 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev) 997 { 998 RTE_SET_USED(dev); 999 return EEPROMSIZE; 1000 } 1001 1002 /** 1003 * eeprom_ptov - translate a physical EEPROM address to virtual 1004 * @phys_addr: the physical EEPROM address 1005 * @fn: the PCI function number 1006 * @sz: size of function-specific area 1007 * 1008 * Translate a physical EEPROM address to virtual. The first 1K is 1009 * accessed through virtual addresses starting at 31K, the rest is 1010 * accessed through virtual addresses starting at 0. 1011 * 1012 * The mapping is as follows: 1013 * [0..1K) -> [31K..32K) 1014 * [1K..1K+A) -> [31K-A..31K) 1015 * [1K+A..ES) -> [0..ES-A-1K) 1016 * 1017 * where A = @fn * @sz, and ES = EEPROM size. 1018 */ 1019 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 1020 { 1021 fn *= sz; 1022 if (phys_addr < 1024) 1023 return phys_addr + (31 << 10); 1024 if (phys_addr < 1024 + fn) 1025 return fn + phys_addr - 1024; 1026 if (phys_addr < EEPROMSIZE) 1027 return phys_addr - 1024 - fn; 1028 if (phys_addr < EEPROMVSIZE) 1029 return phys_addr - 1024; 1030 return -EINVAL; 1031 } 1032 1033 /* The next two routines implement eeprom read/write from physical addresses. 1034 */ 1035 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) 1036 { 1037 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); 1038 1039 if (vaddr >= 0) 1040 vaddr = t4_seeprom_read(adap, vaddr, v); 1041 return vaddr < 0 ? vaddr : 0; 1042 } 1043 1044 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) 1045 { 1046 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); 1047 1048 if (vaddr >= 0) 1049 vaddr = t4_seeprom_write(adap, vaddr, v); 1050 return vaddr < 0 ? vaddr : 0; 1051 } 1052 1053 #define EEPROM_MAGIC 0x38E2F10C 1054 1055 static int cxgbe_get_eeprom(struct rte_eth_dev *dev, 1056 struct rte_dev_eeprom_info *e) 1057 { 1058 struct port_info *pi = dev->data->dev_private; 1059 struct adapter *adapter = pi->adapter; 1060 u32 i, err = 0; 1061 u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0); 1062 1063 if (!buf) 1064 return -ENOMEM; 1065 1066 e->magic = EEPROM_MAGIC; 1067 for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4) 1068 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); 1069 1070 if (!err) 1071 rte_memcpy(e->data, buf + e->offset, e->length); 1072 rte_free(buf); 1073 return err; 1074 } 1075 1076 static int cxgbe_set_eeprom(struct rte_eth_dev *dev, 1077 struct rte_dev_eeprom_info *eeprom) 1078 { 1079 struct port_info *pi = dev->data->dev_private; 1080 struct adapter *adapter = pi->adapter; 1081 u8 *buf; 1082 int err = 0; 1083 u32 aligned_offset, aligned_len, *p; 1084 1085 if (eeprom->magic != EEPROM_MAGIC) 1086 return -EINVAL; 1087 1088 aligned_offset = eeprom->offset & ~3; 1089 aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3; 1090 1091 if (adapter->pf > 0) { 1092 u32 start = 1024 + adapter->pf * EEPROMPFSIZE; 1093 1094 if (aligned_offset < start || 1095 aligned_offset + aligned_len > start + EEPROMPFSIZE) 1096 return -EPERM; 1097 } 1098 1099 if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) { 1100 /* RMW possibly needed for first or last words. 1101 */ 1102 buf = rte_zmalloc(NULL, aligned_len, 0); 1103 if (!buf) 1104 return -ENOMEM; 1105 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); 1106 if (!err && aligned_len > 4) 1107 err = eeprom_rd_phys(adapter, 1108 aligned_offset + aligned_len - 4, 1109 (u32 *)&buf[aligned_len - 4]); 1110 if (err) 1111 goto out; 1112 rte_memcpy(buf + (eeprom->offset & 3), eeprom->data, 1113 eeprom->length); 1114 } else { 1115 buf = eeprom->data; 1116 } 1117 1118 err = t4_seeprom_wp(adapter, false); 1119 if (err) 1120 goto out; 1121 1122 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { 1123 err = eeprom_wr_phys(adapter, aligned_offset, *p); 1124 aligned_offset += 4; 1125 } 1126 1127 if (!err) 1128 err = t4_seeprom_wp(adapter, true); 1129 out: 1130 if (buf != eeprom->data) 1131 rte_free(buf); 1132 return err; 1133 } 1134 1135 static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev) 1136 { 1137 struct port_info *pi = eth_dev->data->dev_private; 1138 struct adapter *adapter = pi->adapter; 1139 1140 return t4_get_regs_len(adapter) / sizeof(uint32_t); 1141 } 1142 1143 static int cxgbe_get_regs(struct rte_eth_dev *eth_dev, 1144 struct rte_dev_reg_info *regs) 1145 { 1146 struct port_info *pi = eth_dev->data->dev_private; 1147 struct adapter *adapter = pi->adapter; 1148 1149 regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) | 1150 (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) | 1151 (1 << 16); 1152 1153 if (regs->data == NULL) { 1154 regs->length = cxgbe_get_regs_len(eth_dev); 1155 regs->width = sizeof(uint32_t); 1156 1157 return 0; 1158 } 1159 1160 t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t))); 1161 1162 return 0; 1163 } 1164 1165 int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 1166 { 1167 struct port_info *pi = dev->data->dev_private; 1168 int ret; 1169 1170 ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr); 1171 if (ret < 0) { 1172 dev_err(adapter, "failed to set mac addr; err = %d\n", 1173 ret); 1174 return ret; 1175 } 1176 pi->xact_addr_filt = ret; 1177 return 0; 1178 } 1179 1180 static const struct eth_dev_ops cxgbe_eth_dev_ops = { 1181 .dev_start = cxgbe_dev_start, 1182 .dev_stop = cxgbe_dev_stop, 1183 .dev_close = cxgbe_dev_close, 1184 .promiscuous_enable = cxgbe_dev_promiscuous_enable, 1185 .promiscuous_disable = cxgbe_dev_promiscuous_disable, 1186 .allmulticast_enable = cxgbe_dev_allmulticast_enable, 1187 .allmulticast_disable = cxgbe_dev_allmulticast_disable, 1188 .dev_configure = cxgbe_dev_configure, 1189 .dev_infos_get = cxgbe_dev_info_get, 1190 .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get, 1191 .link_update = cxgbe_dev_link_update, 1192 .dev_set_link_up = cxgbe_dev_set_link_up, 1193 .dev_set_link_down = cxgbe_dev_set_link_down, 1194 .mtu_set = cxgbe_dev_mtu_set, 1195 .tx_queue_setup = cxgbe_dev_tx_queue_setup, 1196 .tx_queue_start = cxgbe_dev_tx_queue_start, 1197 .tx_queue_stop = cxgbe_dev_tx_queue_stop, 1198 .tx_queue_release = cxgbe_dev_tx_queue_release, 1199 .rx_queue_setup = cxgbe_dev_rx_queue_setup, 1200 .rx_queue_start = cxgbe_dev_rx_queue_start, 1201 .rx_queue_stop = cxgbe_dev_rx_queue_stop, 1202 .rx_queue_release = cxgbe_dev_rx_queue_release, 1203 .filter_ctrl = cxgbe_dev_filter_ctrl, 1204 .stats_get = cxgbe_dev_stats_get, 1205 .stats_reset = cxgbe_dev_stats_reset, 1206 .flow_ctrl_get = cxgbe_flow_ctrl_get, 1207 .flow_ctrl_set = cxgbe_flow_ctrl_set, 1208 .get_eeprom_length = cxgbe_get_eeprom_length, 1209 .get_eeprom = cxgbe_get_eeprom, 1210 .set_eeprom = cxgbe_set_eeprom, 1211 .get_reg = cxgbe_get_regs, 1212 .rss_hash_update = cxgbe_dev_rss_hash_update, 1213 .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get, 1214 .mac_addr_set = cxgbe_mac_addr_set, 1215 .reta_update = cxgbe_dev_rss_reta_update, 1216 .reta_query = cxgbe_dev_rss_reta_query, 1217 }; 1218 1219 /* 1220 * Initialize driver 1221 * It returns 0 on success. 1222 */ 1223 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) 1224 { 1225 struct rte_pci_device *pci_dev; 1226 struct port_info *pi = eth_dev->data->dev_private; 1227 struct adapter *adapter = NULL; 1228 char name[RTE_ETH_NAME_MAX_LEN]; 1229 int err = 0; 1230 1231 CXGBE_FUNC_TRACE(); 1232 1233 eth_dev->dev_ops = &cxgbe_eth_dev_ops; 1234 eth_dev->rx_pkt_burst = &cxgbe_recv_pkts; 1235 eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts; 1236 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1237 1238 /* for secondary processes, we attach to ethdevs allocated by primary 1239 * and do minimal initialization. 1240 */ 1241 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1242 int i; 1243 1244 for (i = 1; i < MAX_NPORTS; i++) { 1245 struct rte_eth_dev *rest_eth_dev; 1246 char namei[RTE_ETH_NAME_MAX_LEN]; 1247 1248 snprintf(namei, sizeof(namei), "%s_%d", 1249 pci_dev->device.name, i); 1250 rest_eth_dev = rte_eth_dev_attach_secondary(namei); 1251 if (rest_eth_dev) { 1252 rest_eth_dev->device = &pci_dev->device; 1253 rest_eth_dev->dev_ops = 1254 eth_dev->dev_ops; 1255 rest_eth_dev->rx_pkt_burst = 1256 eth_dev->rx_pkt_burst; 1257 rest_eth_dev->tx_pkt_burst = 1258 eth_dev->tx_pkt_burst; 1259 rte_eth_dev_probing_finish(rest_eth_dev); 1260 } 1261 } 1262 return 0; 1263 } 1264 1265 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1266 1267 snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id); 1268 adapter = rte_zmalloc(name, sizeof(*adapter), 0); 1269 if (!adapter) 1270 return -1; 1271 1272 adapter->use_unpacked_mode = 1; 1273 adapter->regs = (void *)pci_dev->mem_resource[0].addr; 1274 if (!adapter->regs) { 1275 dev_err(adapter, "%s: cannot map device registers\n", __func__); 1276 err = -ENOMEM; 1277 goto out_free_adapter; 1278 } 1279 adapter->pdev = pci_dev; 1280 adapter->eth_dev = eth_dev; 1281 pi->adapter = adapter; 1282 1283 cxgbe_process_devargs(adapter); 1284 1285 err = cxgbe_probe(adapter); 1286 if (err) { 1287 dev_err(adapter, "%s: cxgbe probe failed with err %d\n", 1288 __func__, err); 1289 goto out_free_adapter; 1290 } 1291 1292 return 0; 1293 1294 out_free_adapter: 1295 rte_free(adapter); 1296 return err; 1297 } 1298 1299 static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1300 { 1301 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1302 uint16_t port_id; 1303 int err = 0; 1304 1305 /* Free up other ports and all resources */ 1306 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) 1307 err |= rte_eth_dev_close(port_id); 1308 1309 return err == 0 ? 0 : -EIO; 1310 } 1311 1312 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1313 struct rte_pci_device *pci_dev) 1314 { 1315 return rte_eth_dev_pci_generic_probe(pci_dev, 1316 sizeof(struct port_info), eth_cxgbe_dev_init); 1317 } 1318 1319 static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev) 1320 { 1321 return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit); 1322 } 1323 1324 static struct rte_pci_driver rte_cxgbe_pmd = { 1325 .id_table = cxgb4_pci_tbl, 1326 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1327 .probe = eth_cxgbe_pci_probe, 1328 .remove = eth_cxgbe_pci_remove, 1329 }; 1330 1331 RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd); 1332 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl); 1333 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 1334 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe, 1335 CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> " 1336 CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> " 1337 CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> " 1338 CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> "); 1339 RTE_LOG_REGISTER(cxgbe_logtype, pmd.net.cxgbe, NOTICE); 1340 RTE_LOG_REGISTER(cxgbe_mbox_logtype, pmd.net.cxgbe.mbox, NOTICE); 1341