1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Chelsio Communications. 3 * All rights reserved. 4 */ 5 6 #include <sys/queue.h> 7 #include <stdio.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <unistd.h> 12 #include <stdarg.h> 13 #include <inttypes.h> 14 #include <netinet/in.h> 15 16 #include <rte_byteorder.h> 17 #include <rte_common.h> 18 #include <rte_cycles.h> 19 #include <rte_interrupts.h> 20 #include <rte_log.h> 21 #include <rte_debug.h> 22 #include <rte_pci.h> 23 #include <rte_bus_pci.h> 24 #include <rte_atomic.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_memory.h> 27 #include <rte_tailq.h> 28 #include <rte_eal.h> 29 #include <rte_alarm.h> 30 #include <rte_ether.h> 31 #include <rte_ethdev_driver.h> 32 #include <rte_ethdev_pci.h> 33 #include <rte_malloc.h> 34 #include <rte_random.h> 35 #include <rte_dev.h> 36 37 #include "cxgbe.h" 38 #include "cxgbe_pfvf.h" 39 40 /* 41 * Macros needed to support the PCI Device ID Table ... 42 */ 43 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ 44 static const struct rte_pci_id cxgb4_pci_tbl[] = { 45 #define CH_PCI_DEVICE_ID_FUNCTION 0x4 46 47 #define PCI_VENDOR_ID_CHELSIO 0x1425 48 49 #define CH_PCI_ID_TABLE_ENTRY(devid) \ 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) } 51 52 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ 53 { .vendor_id = 0, } \ 54 } 55 56 /* 57 *... and the PCI ID Table itself ... 58 */ 59 #include "t4_pci_id_tbl.h" 60 61 uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 62 uint16_t nb_pkts) 63 { 64 struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue; 65 uint16_t pkts_sent, pkts_remain; 66 uint16_t total_sent = 0; 67 int ret = 0; 68 69 CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n", 70 __func__, txq, tx_pkts, nb_pkts); 71 72 t4_os_lock(&txq->txq_lock); 73 /* free up desc from already completed tx */ 74 reclaim_completed_tx(&txq->q); 75 while (total_sent < nb_pkts) { 76 pkts_remain = nb_pkts - total_sent; 77 78 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) { 79 ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent], 80 nb_pkts); 81 if (ret < 0) 82 break; 83 } 84 if (!pkts_sent) 85 break; 86 total_sent += pkts_sent; 87 /* reclaim as much as possible */ 88 reclaim_completed_tx(&txq->q); 89 } 90 91 t4_os_unlock(&txq->txq_lock); 92 return total_sent; 93 } 94 95 uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 96 uint16_t nb_pkts) 97 { 98 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue; 99 unsigned int work_done; 100 101 CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n", 102 __func__, rxq->rspq.cntxt_id, nb_pkts); 103 104 if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done)) 105 dev_err(adapter, "error in cxgbe poll\n"); 106 107 CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done); 108 return work_done; 109 } 110 111 void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, 112 struct rte_eth_dev_info *device_info) 113 { 114 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 115 struct adapter *adapter = pi->adapter; 116 int max_queues = adapter->sge.max_ethqsets / adapter->params.nports; 117 118 static const struct rte_eth_desc_lim cxgbe_desc_lim = { 119 .nb_max = CXGBE_MAX_RING_DESC_SIZE, 120 .nb_min = CXGBE_MIN_RING_DESC_SIZE, 121 .nb_align = 1, 122 }; 123 124 device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 125 126 device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE; 127 device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN; 128 device_info->max_rx_queues = max_queues; 129 device_info->max_tx_queues = max_queues; 130 device_info->max_mac_addrs = 1; 131 /* XXX: For now we support one MAC/port */ 132 device_info->max_vfs = adapter->params.arch.vfcount; 133 device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */ 134 135 device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | 136 DEV_RX_OFFLOAD_IPV4_CKSUM | 137 DEV_RX_OFFLOAD_UDP_CKSUM | 138 DEV_RX_OFFLOAD_TCP_CKSUM; 139 140 device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 141 DEV_TX_OFFLOAD_IPV4_CKSUM | 142 DEV_TX_OFFLOAD_UDP_CKSUM | 143 DEV_TX_OFFLOAD_TCP_CKSUM | 144 DEV_TX_OFFLOAD_TCP_TSO; 145 146 device_info->reta_size = pi->rss_size; 147 device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN; 148 device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL; 149 150 device_info->rx_desc_lim = cxgbe_desc_lim; 151 device_info->tx_desc_lim = cxgbe_desc_lim; 152 cxgbe_get_speed_caps(pi, &device_info->speed_capa); 153 } 154 155 void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 156 { 157 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 158 struct adapter *adapter = pi->adapter; 159 160 t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, 161 1, -1, 1, -1, false); 162 } 163 164 void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 165 { 166 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 167 struct adapter *adapter = pi->adapter; 168 169 t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, 170 0, -1, 1, -1, false); 171 } 172 173 void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 174 { 175 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 176 struct adapter *adapter = pi->adapter; 177 178 /* TODO: address filters ?? */ 179 180 t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, 181 -1, 1, 1, -1, false); 182 } 183 184 void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 185 { 186 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 187 struct adapter *adapter = pi->adapter; 188 189 /* TODO: address filters ?? */ 190 191 t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, 192 -1, 0, 1, -1, false); 193 } 194 195 int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, 196 __rte_unused int wait_to_complete) 197 { 198 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 199 struct adapter *adapter = pi->adapter; 200 struct sge *s = &adapter->sge; 201 struct rte_eth_link *old_link = ð_dev->data->dev_link; 202 unsigned int work_done, budget = 4; 203 204 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); 205 if (old_link->link_status == pi->link_cfg.link_ok) 206 return -1; /* link not changed */ 207 208 eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok; 209 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; 210 eth_dev->data->dev_link.link_speed = pi->link_cfg.speed; 211 212 /* link has changed */ 213 return 0; 214 } 215 216 int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) 217 { 218 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 219 struct adapter *adapter = pi->adapter; 220 struct rte_eth_dev_info dev_info; 221 int err; 222 uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 223 224 cxgbe_dev_info_get(eth_dev, &dev_info); 225 226 /* Must accommodate at least ETHER_MIN_MTU */ 227 if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen)) 228 return -EINVAL; 229 230 /* set to jumbo mode if needed */ 231 if (new_mtu > ETHER_MAX_LEN) 232 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; 233 else 234 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; 235 236 err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1, 237 -1, -1, true); 238 if (!err) 239 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu; 240 241 return err; 242 } 243 244 /* 245 * Stop device. 246 */ 247 void cxgbe_dev_close(struct rte_eth_dev *eth_dev) 248 { 249 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 250 struct adapter *adapter = pi->adapter; 251 int i, dev_down = 0; 252 253 CXGBE_FUNC_TRACE(); 254 255 if (!(adapter->flags & FULL_INIT_DONE)) 256 return; 257 258 cxgbe_down(pi); 259 260 /* 261 * We clear queues only if both tx and rx path of the port 262 * have been disabled 263 */ 264 t4_sge_eth_clear_queues(pi); 265 266 /* See if all ports are down */ 267 for_each_port(adapter, i) { 268 pi = adap2pinfo(adapter, i); 269 /* 270 * Skip first port of the adapter since it will be closed 271 * by DPDK 272 */ 273 if (i == 0) 274 continue; 275 dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0; 276 } 277 278 /* If rest of the ports are stopped, then free up resources */ 279 if (dev_down == (adapter->params.nports - 1)) 280 cxgbe_close(adapter); 281 } 282 283 /* Start the device. 284 * It returns 0 on success. 285 */ 286 int cxgbe_dev_start(struct rte_eth_dev *eth_dev) 287 { 288 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 289 struct adapter *adapter = pi->adapter; 290 int err = 0, i; 291 292 CXGBE_FUNC_TRACE(); 293 294 /* 295 * If we don't have a connection to the firmware there's nothing we 296 * can do. 297 */ 298 if (!(adapter->flags & FW_OK)) { 299 err = -ENXIO; 300 goto out; 301 } 302 303 if (!(adapter->flags & FULL_INIT_DONE)) { 304 err = cxgbe_up(adapter); 305 if (err < 0) 306 goto out; 307 } 308 309 cxgbe_enable_rx_queues(pi); 310 311 err = setup_rss(pi); 312 if (err) 313 goto out; 314 315 for (i = 0; i < pi->n_tx_qsets; i++) { 316 err = cxgbe_dev_tx_queue_start(eth_dev, i); 317 if (err) 318 goto out; 319 } 320 321 for (i = 0; i < pi->n_rx_qsets; i++) { 322 err = cxgbe_dev_rx_queue_start(eth_dev, i); 323 if (err) 324 goto out; 325 } 326 327 err = link_start(pi); 328 if (err) 329 goto out; 330 331 out: 332 return err; 333 } 334 335 /* 336 * Stop device: disable rx and tx functions to allow for reconfiguring. 337 */ 338 void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) 339 { 340 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 341 struct adapter *adapter = pi->adapter; 342 343 CXGBE_FUNC_TRACE(); 344 345 if (!(adapter->flags & FULL_INIT_DONE)) 346 return; 347 348 cxgbe_down(pi); 349 350 /* 351 * We clear queues only if both tx and rx path of the port 352 * have been disabled 353 */ 354 t4_sge_eth_clear_queues(pi); 355 } 356 357 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) 358 { 359 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 360 struct adapter *adapter = pi->adapter; 361 int err; 362 363 CXGBE_FUNC_TRACE(); 364 365 if (!(adapter->flags & FW_QUEUE_BOUND)) { 366 err = setup_sge_fwevtq(adapter); 367 if (err) 368 return err; 369 adapter->flags |= FW_QUEUE_BOUND; 370 } 371 372 err = cfg_queue_count(eth_dev); 373 if (err) 374 return err; 375 376 return 0; 377 } 378 379 int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 380 { 381 int ret; 382 struct sge_eth_txq *txq = (struct sge_eth_txq *) 383 (eth_dev->data->tx_queues[tx_queue_id]); 384 385 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id); 386 387 ret = t4_sge_eth_txq_start(txq); 388 if (ret == 0) 389 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 390 391 return ret; 392 } 393 394 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 395 { 396 int ret; 397 struct sge_eth_txq *txq = (struct sge_eth_txq *) 398 (eth_dev->data->tx_queues[tx_queue_id]); 399 400 dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id); 401 402 ret = t4_sge_eth_txq_stop(txq); 403 if (ret == 0) 404 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 405 406 return ret; 407 } 408 409 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, 410 uint16_t queue_idx, uint16_t nb_desc, 411 unsigned int socket_id, 412 const struct rte_eth_txconf *tx_conf) 413 { 414 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 415 struct adapter *adapter = pi->adapter; 416 struct sge *s = &adapter->sge; 417 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx]; 418 int err = 0; 419 unsigned int temp_nb_desc; 420 421 RTE_SET_USED(tx_conf); 422 423 dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n", 424 __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc, 425 socket_id, pi->first_qset); 426 427 /* Free up the existing queue */ 428 if (eth_dev->data->tx_queues[queue_idx]) { 429 cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]); 430 eth_dev->data->tx_queues[queue_idx] = NULL; 431 } 432 433 eth_dev->data->tx_queues[queue_idx] = (void *)txq; 434 435 /* Sanity Checking 436 * 437 * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE 438 */ 439 temp_nb_desc = nb_desc; 440 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) { 441 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n", 442 __func__, CXGBE_MIN_RING_DESC_SIZE, 443 CXGBE_DEFAULT_TX_DESC_SIZE); 444 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE; 445 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) { 446 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n", 447 __func__, CXGBE_MIN_RING_DESC_SIZE, 448 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE); 449 return -(EINVAL); 450 } 451 452 txq->q.size = temp_nb_desc; 453 454 err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx, 455 s->fw_evtq.cntxt_id, socket_id); 456 457 dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n", 458 __func__, txq->q.cntxt_id, txq->q.abs_id, err); 459 return err; 460 } 461 462 void cxgbe_dev_tx_queue_release(void *q) 463 { 464 struct sge_eth_txq *txq = (struct sge_eth_txq *)q; 465 466 if (txq) { 467 struct port_info *pi = (struct port_info *) 468 (txq->eth_dev->data->dev_private); 469 struct adapter *adap = pi->adapter; 470 471 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n", 472 __func__, pi->port_id, txq->q.cntxt_id); 473 474 t4_sge_eth_txq_release(adap, txq); 475 } 476 } 477 478 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 479 { 480 int ret; 481 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 482 struct adapter *adap = pi->adapter; 483 struct sge_rspq *q; 484 485 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", 486 __func__, pi->port_id, rx_queue_id); 487 488 q = eth_dev->data->rx_queues[rx_queue_id]; 489 490 ret = t4_sge_eth_rxq_start(adap, q); 491 if (ret == 0) 492 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 493 494 return ret; 495 } 496 497 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 498 { 499 int ret; 500 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 501 struct adapter *adap = pi->adapter; 502 struct sge_rspq *q; 503 504 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", 505 __func__, pi->port_id, rx_queue_id); 506 507 q = eth_dev->data->rx_queues[rx_queue_id]; 508 ret = t4_sge_eth_rxq_stop(adap, q); 509 if (ret == 0) 510 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 511 512 return ret; 513 } 514 515 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 516 uint16_t queue_idx, uint16_t nb_desc, 517 unsigned int socket_id, 518 const struct rte_eth_rxconf *rx_conf, 519 struct rte_mempool *mp) 520 { 521 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 522 struct adapter *adapter = pi->adapter; 523 struct sge *s = &adapter->sge; 524 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx]; 525 int err = 0; 526 int msi_idx = 0; 527 unsigned int temp_nb_desc; 528 struct rte_eth_dev_info dev_info; 529 unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 530 531 RTE_SET_USED(rx_conf); 532 533 dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n", 534 __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc, 535 socket_id, mp); 536 537 cxgbe_dev_info_get(eth_dev, &dev_info); 538 539 /* Must accommodate at least ETHER_MIN_MTU */ 540 if ((pkt_len < dev_info.min_rx_bufsize) || 541 (pkt_len > dev_info.max_rx_pktlen)) { 542 dev_err(adap, "%s: max pkt len must be > %d and <= %d\n", 543 __func__, dev_info.min_rx_bufsize, 544 dev_info.max_rx_pktlen); 545 return -EINVAL; 546 } 547 548 /* Free up the existing queue */ 549 if (eth_dev->data->rx_queues[queue_idx]) { 550 cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]); 551 eth_dev->data->rx_queues[queue_idx] = NULL; 552 } 553 554 eth_dev->data->rx_queues[queue_idx] = (void *)rxq; 555 556 /* Sanity Checking 557 * 558 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE 559 */ 560 temp_nb_desc = nb_desc; 561 if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) { 562 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n", 563 __func__, CXGBE_MIN_RING_DESC_SIZE, 564 CXGBE_DEFAULT_RX_DESC_SIZE); 565 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE; 566 } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) { 567 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n", 568 __func__, CXGBE_MIN_RING_DESC_SIZE, 569 CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE); 570 return -(EINVAL); 571 } 572 573 rxq->rspq.size = temp_nb_desc; 574 if ((&rxq->fl) != NULL) 575 rxq->fl.size = temp_nb_desc; 576 577 /* Set to jumbo mode if necessary */ 578 if (pkt_len > ETHER_MAX_LEN) 579 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; 580 else 581 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; 582 583 err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx, 584 &rxq->fl, t4_ethrx_handler, 585 is_pf4(adapter) ? 586 t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp, 587 queue_idx, socket_id); 588 589 dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n", 590 __func__, err, pi->port_id, rxq->rspq.cntxt_id, 591 rxq->rspq.abs_id); 592 return err; 593 } 594 595 void cxgbe_dev_rx_queue_release(void *q) 596 { 597 struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q; 598 struct sge_rspq *rq = &rxq->rspq; 599 600 if (rq) { 601 struct port_info *pi = (struct port_info *) 602 (rq->eth_dev->data->dev_private); 603 struct adapter *adap = pi->adapter; 604 605 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", 606 __func__, pi->port_id, rxq->rspq.cntxt_id); 607 608 t4_sge_eth_rxq_release(adap, rxq); 609 } 610 } 611 612 /* 613 * Get port statistics. 614 */ 615 static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, 616 struct rte_eth_stats *eth_stats) 617 { 618 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 619 struct adapter *adapter = pi->adapter; 620 struct sge *s = &adapter->sge; 621 struct port_stats ps; 622 unsigned int i; 623 624 cxgbe_stats_get(pi, &ps); 625 626 /* RX Stats */ 627 eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 + 628 ps.rx_ovflow2 + ps.rx_ovflow3 + 629 ps.rx_trunc0 + ps.rx_trunc1 + 630 ps.rx_trunc2 + ps.rx_trunc3; 631 eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err + 632 ps.rx_jabber + ps.rx_too_long + ps.rx_runt + 633 ps.rx_len_err; 634 635 /* TX Stats */ 636 eth_stats->opackets = ps.tx_frames; 637 eth_stats->obytes = ps.tx_octets; 638 eth_stats->oerrors = ps.tx_error_frames; 639 640 for (i = 0; i < pi->n_rx_qsets; i++) { 641 struct sge_eth_rxq *rxq = 642 &s->ethrxq[pi->first_qset + i]; 643 644 eth_stats->q_ipackets[i] = rxq->stats.pkts; 645 eth_stats->q_ibytes[i] = rxq->stats.rx_bytes; 646 eth_stats->ipackets += eth_stats->q_ipackets[i]; 647 eth_stats->ibytes += eth_stats->q_ibytes[i]; 648 } 649 650 for (i = 0; i < pi->n_tx_qsets; i++) { 651 struct sge_eth_txq *txq = 652 &s->ethtxq[pi->first_qset + i]; 653 654 eth_stats->q_opackets[i] = txq->stats.pkts; 655 eth_stats->q_obytes[i] = txq->stats.tx_bytes; 656 eth_stats->q_errors[i] = txq->stats.mapping_err; 657 } 658 return 0; 659 } 660 661 /* 662 * Reset port statistics. 663 */ 664 static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) 665 { 666 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 667 struct adapter *adapter = pi->adapter; 668 struct sge *s = &adapter->sge; 669 unsigned int i; 670 671 cxgbe_stats_reset(pi); 672 for (i = 0; i < pi->n_rx_qsets; i++) { 673 struct sge_eth_rxq *rxq = 674 &s->ethrxq[pi->first_qset + i]; 675 676 rxq->stats.pkts = 0; 677 rxq->stats.rx_bytes = 0; 678 } 679 for (i = 0; i < pi->n_tx_qsets; i++) { 680 struct sge_eth_txq *txq = 681 &s->ethtxq[pi->first_qset + i]; 682 683 txq->stats.pkts = 0; 684 txq->stats.tx_bytes = 0; 685 txq->stats.mapping_err = 0; 686 } 687 } 688 689 static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev, 690 struct rte_eth_fc_conf *fc_conf) 691 { 692 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 693 struct link_config *lc = &pi->link_cfg; 694 int rx_pause, tx_pause; 695 696 fc_conf->autoneg = lc->fc & PAUSE_AUTONEG; 697 rx_pause = lc->fc & PAUSE_RX; 698 tx_pause = lc->fc & PAUSE_TX; 699 700 if (rx_pause && tx_pause) 701 fc_conf->mode = RTE_FC_FULL; 702 else if (rx_pause) 703 fc_conf->mode = RTE_FC_RX_PAUSE; 704 else if (tx_pause) 705 fc_conf->mode = RTE_FC_TX_PAUSE; 706 else 707 fc_conf->mode = RTE_FC_NONE; 708 return 0; 709 } 710 711 static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev, 712 struct rte_eth_fc_conf *fc_conf) 713 { 714 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 715 struct adapter *adapter = pi->adapter; 716 struct link_config *lc = &pi->link_cfg; 717 718 if (lc->pcaps & FW_PORT_CAP32_ANEG) { 719 if (fc_conf->autoneg) 720 lc->requested_fc |= PAUSE_AUTONEG; 721 else 722 lc->requested_fc &= ~PAUSE_AUTONEG; 723 } 724 725 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 726 (fc_conf->mode & RTE_FC_RX_PAUSE)) 727 lc->requested_fc |= PAUSE_RX; 728 else 729 lc->requested_fc &= ~PAUSE_RX; 730 731 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 732 (fc_conf->mode & RTE_FC_TX_PAUSE)) 733 lc->requested_fc |= PAUSE_TX; 734 else 735 lc->requested_fc &= ~PAUSE_TX; 736 737 return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, 738 &pi->link_cfg); 739 } 740 741 const uint32_t * 742 cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 743 { 744 static const uint32_t ptypes[] = { 745 RTE_PTYPE_L3_IPV4, 746 RTE_PTYPE_L3_IPV6, 747 RTE_PTYPE_UNKNOWN 748 }; 749 750 if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts) 751 return ptypes; 752 return NULL; 753 } 754 755 /* Update RSS hash configuration 756 */ 757 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 758 struct rte_eth_rss_conf *rss_conf) 759 { 760 struct port_info *pi = (struct port_info *)(dev->data->dev_private); 761 struct adapter *adapter = pi->adapter; 762 int err; 763 764 err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf); 765 if (err) 766 return err; 767 768 pi->rss_hf = rss_conf->rss_hf; 769 770 if (rss_conf->rss_key) { 771 u32 key[10], mod_key[10]; 772 int i, j; 773 774 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN); 775 776 for (i = 9, j = 0; i >= 0; i--, j++) 777 mod_key[j] = cpu_to_be32(key[i]); 778 779 t4_write_rss_key(adapter, mod_key, -1); 780 } 781 782 return 0; 783 } 784 785 /* Get RSS hash configuration 786 */ 787 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 788 struct rte_eth_rss_conf *rss_conf) 789 { 790 struct port_info *pi = (struct port_info *)(dev->data->dev_private); 791 struct adapter *adapter = pi->adapter; 792 u64 rss_hf = 0; 793 u64 flags = 0; 794 int err; 795 796 err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid, 797 &flags, NULL); 798 799 if (err) 800 return err; 801 802 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) { 803 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 804 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN) 805 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 806 } 807 808 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 809 rss_hf |= ETH_RSS_IPV6; 810 811 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) { 812 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 813 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN) 814 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 815 } 816 817 if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 818 rss_hf |= ETH_RSS_IPV4; 819 820 rss_conf->rss_hf = rss_hf; 821 822 if (rss_conf->rss_key) { 823 u32 key[10], mod_key[10]; 824 int i, j; 825 826 t4_read_rss_key(adapter, key); 827 828 for (i = 9, j = 0; i >= 0; i--, j++) 829 mod_key[j] = be32_to_cpu(key[i]); 830 831 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN); 832 } 833 834 return 0; 835 } 836 837 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev) 838 { 839 RTE_SET_USED(dev); 840 return EEPROMSIZE; 841 } 842 843 /** 844 * eeprom_ptov - translate a physical EEPROM address to virtual 845 * @phys_addr: the physical EEPROM address 846 * @fn: the PCI function number 847 * @sz: size of function-specific area 848 * 849 * Translate a physical EEPROM address to virtual. The first 1K is 850 * accessed through virtual addresses starting at 31K, the rest is 851 * accessed through virtual addresses starting at 0. 852 * 853 * The mapping is as follows: 854 * [0..1K) -> [31K..32K) 855 * [1K..1K+A) -> [31K-A..31K) 856 * [1K+A..ES) -> [0..ES-A-1K) 857 * 858 * where A = @fn * @sz, and ES = EEPROM size. 859 */ 860 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 861 { 862 fn *= sz; 863 if (phys_addr < 1024) 864 return phys_addr + (31 << 10); 865 if (phys_addr < 1024 + fn) 866 return fn + phys_addr - 1024; 867 if (phys_addr < EEPROMSIZE) 868 return phys_addr - 1024 - fn; 869 if (phys_addr < EEPROMVSIZE) 870 return phys_addr - 1024; 871 return -EINVAL; 872 } 873 874 /* The next two routines implement eeprom read/write from physical addresses. 875 */ 876 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) 877 { 878 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); 879 880 if (vaddr >= 0) 881 vaddr = t4_seeprom_read(adap, vaddr, v); 882 return vaddr < 0 ? vaddr : 0; 883 } 884 885 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) 886 { 887 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); 888 889 if (vaddr >= 0) 890 vaddr = t4_seeprom_write(adap, vaddr, v); 891 return vaddr < 0 ? vaddr : 0; 892 } 893 894 #define EEPROM_MAGIC 0x38E2F10C 895 896 static int cxgbe_get_eeprom(struct rte_eth_dev *dev, 897 struct rte_dev_eeprom_info *e) 898 { 899 struct port_info *pi = (struct port_info *)(dev->data->dev_private); 900 struct adapter *adapter = pi->adapter; 901 u32 i, err = 0; 902 u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0); 903 904 if (!buf) 905 return -ENOMEM; 906 907 e->magic = EEPROM_MAGIC; 908 for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4) 909 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); 910 911 if (!err) 912 rte_memcpy(e->data, buf + e->offset, e->length); 913 rte_free(buf); 914 return err; 915 } 916 917 static int cxgbe_set_eeprom(struct rte_eth_dev *dev, 918 struct rte_dev_eeprom_info *eeprom) 919 { 920 struct port_info *pi = (struct port_info *)(dev->data->dev_private); 921 struct adapter *adapter = pi->adapter; 922 u8 *buf; 923 int err = 0; 924 u32 aligned_offset, aligned_len, *p; 925 926 if (eeprom->magic != EEPROM_MAGIC) 927 return -EINVAL; 928 929 aligned_offset = eeprom->offset & ~3; 930 aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3; 931 932 if (adapter->pf > 0) { 933 u32 start = 1024 + adapter->pf * EEPROMPFSIZE; 934 935 if (aligned_offset < start || 936 aligned_offset + aligned_len > start + EEPROMPFSIZE) 937 return -EPERM; 938 } 939 940 if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) { 941 /* RMW possibly needed for first or last words. 942 */ 943 buf = rte_zmalloc(NULL, aligned_len, 0); 944 if (!buf) 945 return -ENOMEM; 946 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); 947 if (!err && aligned_len > 4) 948 err = eeprom_rd_phys(adapter, 949 aligned_offset + aligned_len - 4, 950 (u32 *)&buf[aligned_len - 4]); 951 if (err) 952 goto out; 953 rte_memcpy(buf + (eeprom->offset & 3), eeprom->data, 954 eeprom->length); 955 } else { 956 buf = eeprom->data; 957 } 958 959 err = t4_seeprom_wp(adapter, false); 960 if (err) 961 goto out; 962 963 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { 964 err = eeprom_wr_phys(adapter, aligned_offset, *p); 965 aligned_offset += 4; 966 } 967 968 if (!err) 969 err = t4_seeprom_wp(adapter, true); 970 out: 971 if (buf != eeprom->data) 972 rte_free(buf); 973 return err; 974 } 975 976 static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev) 977 { 978 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 979 struct adapter *adapter = pi->adapter; 980 981 return t4_get_regs_len(adapter) / sizeof(uint32_t); 982 } 983 984 static int cxgbe_get_regs(struct rte_eth_dev *eth_dev, 985 struct rte_dev_reg_info *regs) 986 { 987 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 988 struct adapter *adapter = pi->adapter; 989 990 regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) | 991 (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) | 992 (1 << 16); 993 994 if (regs->data == NULL) { 995 regs->length = cxgbe_get_regs_len(eth_dev); 996 regs->width = sizeof(uint32_t); 997 998 return 0; 999 } 1000 1001 t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t))); 1002 1003 return 0; 1004 } 1005 1006 void cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) 1007 { 1008 struct port_info *pi = (struct port_info *)(dev->data->dev_private); 1009 struct adapter *adapter = pi->adapter; 1010 int ret; 1011 1012 ret = t4_change_mac(adapter, adapter->mbox, pi->viid, 1013 pi->xact_addr_filt, (u8 *)addr, true, true); 1014 if (ret < 0) { 1015 dev_err(adapter, "failed to set mac addr; err = %d\n", 1016 ret); 1017 return; 1018 } 1019 pi->xact_addr_filt = ret; 1020 } 1021 1022 static const struct eth_dev_ops cxgbe_eth_dev_ops = { 1023 .dev_start = cxgbe_dev_start, 1024 .dev_stop = cxgbe_dev_stop, 1025 .dev_close = cxgbe_dev_close, 1026 .promiscuous_enable = cxgbe_dev_promiscuous_enable, 1027 .promiscuous_disable = cxgbe_dev_promiscuous_disable, 1028 .allmulticast_enable = cxgbe_dev_allmulticast_enable, 1029 .allmulticast_disable = cxgbe_dev_allmulticast_disable, 1030 .dev_configure = cxgbe_dev_configure, 1031 .dev_infos_get = cxgbe_dev_info_get, 1032 .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get, 1033 .link_update = cxgbe_dev_link_update, 1034 .mtu_set = cxgbe_dev_mtu_set, 1035 .tx_queue_setup = cxgbe_dev_tx_queue_setup, 1036 .tx_queue_start = cxgbe_dev_tx_queue_start, 1037 .tx_queue_stop = cxgbe_dev_tx_queue_stop, 1038 .tx_queue_release = cxgbe_dev_tx_queue_release, 1039 .rx_queue_setup = cxgbe_dev_rx_queue_setup, 1040 .rx_queue_start = cxgbe_dev_rx_queue_start, 1041 .rx_queue_stop = cxgbe_dev_rx_queue_stop, 1042 .rx_queue_release = cxgbe_dev_rx_queue_release, 1043 .stats_get = cxgbe_dev_stats_get, 1044 .stats_reset = cxgbe_dev_stats_reset, 1045 .flow_ctrl_get = cxgbe_flow_ctrl_get, 1046 .flow_ctrl_set = cxgbe_flow_ctrl_set, 1047 .get_eeprom_length = cxgbe_get_eeprom_length, 1048 .get_eeprom = cxgbe_get_eeprom, 1049 .set_eeprom = cxgbe_set_eeprom, 1050 .get_reg = cxgbe_get_regs, 1051 .rss_hash_update = cxgbe_dev_rss_hash_update, 1052 .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get, 1053 .mac_addr_set = cxgbe_mac_addr_set, 1054 }; 1055 1056 /* 1057 * Initialize driver 1058 * It returns 0 on success. 1059 */ 1060 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) 1061 { 1062 struct rte_pci_device *pci_dev; 1063 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); 1064 struct adapter *adapter = NULL; 1065 char name[RTE_ETH_NAME_MAX_LEN]; 1066 int err = 0; 1067 1068 CXGBE_FUNC_TRACE(); 1069 1070 eth_dev->dev_ops = &cxgbe_eth_dev_ops; 1071 eth_dev->rx_pkt_burst = &cxgbe_recv_pkts; 1072 eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts; 1073 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1074 1075 /* for secondary processes, we attach to ethdevs allocated by primary 1076 * and do minimal initialization. 1077 */ 1078 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1079 int i; 1080 1081 for (i = 1; i < MAX_NPORTS; i++) { 1082 struct rte_eth_dev *rest_eth_dev; 1083 char namei[RTE_ETH_NAME_MAX_LEN]; 1084 1085 snprintf(namei, sizeof(namei), "%s_%d", 1086 pci_dev->device.name, i); 1087 rest_eth_dev = rte_eth_dev_attach_secondary(namei); 1088 if (rest_eth_dev) { 1089 rest_eth_dev->device = &pci_dev->device; 1090 rest_eth_dev->dev_ops = 1091 eth_dev->dev_ops; 1092 rest_eth_dev->rx_pkt_burst = 1093 eth_dev->rx_pkt_burst; 1094 rest_eth_dev->tx_pkt_burst = 1095 eth_dev->tx_pkt_burst; 1096 } 1097 } 1098 return 0; 1099 } 1100 1101 snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id); 1102 adapter = rte_zmalloc(name, sizeof(*adapter), 0); 1103 if (!adapter) 1104 return -1; 1105 1106 adapter->use_unpacked_mode = 1; 1107 adapter->regs = (void *)pci_dev->mem_resource[0].addr; 1108 if (!adapter->regs) { 1109 dev_err(adapter, "%s: cannot map device registers\n", __func__); 1110 err = -ENOMEM; 1111 goto out_free_adapter; 1112 } 1113 adapter->pdev = pci_dev; 1114 adapter->eth_dev = eth_dev; 1115 pi->adapter = adapter; 1116 1117 err = cxgbe_probe(adapter); 1118 if (err) { 1119 dev_err(adapter, "%s: cxgbe probe failed with err %d\n", 1120 __func__, err); 1121 goto out_free_adapter; 1122 } 1123 1124 return 0; 1125 1126 out_free_adapter: 1127 rte_free(adapter); 1128 return err; 1129 } 1130 1131 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1132 struct rte_pci_device *pci_dev) 1133 { 1134 return rte_eth_dev_pci_generic_probe(pci_dev, 1135 sizeof(struct port_info), eth_cxgbe_dev_init); 1136 } 1137 1138 static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev) 1139 { 1140 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 1141 } 1142 1143 static struct rte_pci_driver rte_cxgbe_pmd = { 1144 .id_table = cxgb4_pci_tbl, 1145 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1146 .probe = eth_cxgbe_pci_probe, 1147 .remove = eth_cxgbe_pci_remove, 1148 }; 1149 1150 RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd); 1151 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl); 1152 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 1153