1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2019 NXP 3 */ 4 5 #include <sys/ioctl.h> 6 #include <sys/epoll.h> 7 #include <rte_kvargs.h> 8 #include <rte_ethdev_vdev.h> 9 #include <rte_bus_vdev.h> 10 #include <rte_ether.h> 11 #include <dpaa_of.h> 12 13 #include "pfe_logs.h" 14 #include "pfe_mod.h" 15 16 #define PFE_MAX_MACS 1 /* we can support up to 4 MACs per IF */ 17 #define PFE_VDEV_GEM_ID_ARG "intf" 18 19 struct pfe_vdev_init_params { 20 int8_t gem_id; 21 }; 22 static struct pfe *g_pfe; 23 /* Supported Rx offloads */ 24 static uint64_t dev_rx_offloads_sup = 25 DEV_RX_OFFLOAD_IPV4_CKSUM | 26 DEV_RX_OFFLOAD_UDP_CKSUM | 27 DEV_RX_OFFLOAD_TCP_CKSUM; 28 29 /* Supported Tx offloads */ 30 static uint64_t dev_tx_offloads_sup = 31 DEV_TX_OFFLOAD_IPV4_CKSUM | 32 DEV_TX_OFFLOAD_UDP_CKSUM | 33 DEV_TX_OFFLOAD_TCP_CKSUM; 34 35 /* TODO: make pfe_svr a runtime option. 36 * Driver should be able to get the SVR 37 * information from HW. 38 */ 39 unsigned int pfe_svr = SVR_LS1012A_REV1; 40 static void *cbus_emac_base[3]; 41 static void *cbus_gpi_base[3]; 42 43 /* pfe_gemac_init 44 */ 45 static int 46 pfe_gemac_init(struct pfe_eth_priv_s *priv) 47 { 48 struct gemac_cfg cfg; 49 50 cfg.speed = SPEED_1000M; 51 cfg.duplex = DUPLEX_FULL; 52 53 gemac_set_config(priv->EMAC_baseaddr, &cfg); 54 gemac_allow_broadcast(priv->EMAC_baseaddr); 55 gemac_enable_1536_rx(priv->EMAC_baseaddr); 56 gemac_enable_stacked_vlan(priv->EMAC_baseaddr); 57 gemac_enable_pause_rx(priv->EMAC_baseaddr); 58 gemac_set_bus_width(priv->EMAC_baseaddr, 64); 59 gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr); 60 61 return 0; 62 } 63 64 static void 65 pfe_soc_version_get(void) 66 { 67 FILE *svr_file = NULL; 68 unsigned int svr_ver = 0; 69 70 PMD_INIT_FUNC_TRACE(); 71 72 svr_file = fopen(PFE_SOC_ID_FILE, "r"); 73 if (!svr_file) { 74 PFE_PMD_ERR("Unable to open SoC device"); 75 return; /* Not supported on this infra */ 76 } 77 78 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) 79 pfe_svr = svr_ver; 80 else 81 PFE_PMD_ERR("Unable to read SoC device"); 82 83 fclose(svr_file); 84 } 85 86 static int pfe_eth_start(struct pfe_eth_priv_s *priv) 87 { 88 gpi_enable(priv->GPI_baseaddr); 89 gemac_enable(priv->EMAC_baseaddr); 90 91 return 0; 92 } 93 94 static void 95 pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int 96 __rte_unused from_tx, __rte_unused int n_desc) 97 { 98 struct rte_mbuf *mbuf; 99 unsigned int flags; 100 101 /* Clean HIF and client queue */ 102 while ((mbuf = hif_lib_tx_get_next_complete(&priv->client, 103 tx_q_num, &flags, 104 HIF_TX_DESC_NT))) { 105 if (mbuf) { 106 mbuf->next = NULL; 107 mbuf->nb_segs = 1; 108 rte_pktmbuf_free(mbuf); 109 } 110 } 111 } 112 113 114 static void 115 pfe_eth_flush_tx(struct pfe_eth_priv_s *priv) 116 { 117 unsigned int ii; 118 119 for (ii = 0; ii < emac_txq_cnt; ii++) 120 pfe_eth_flush_txQ(priv, ii, 0, 0); 121 } 122 123 static int 124 pfe_eth_event_handler(void *data, int event, __rte_unused int qno) 125 { 126 struct pfe_eth_priv_s *priv = data; 127 128 switch (event) { 129 case EVENT_TXDONE_IND: 130 pfe_eth_flush_tx(priv); 131 hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0); 132 break; 133 case EVENT_HIGH_RX_WM: 134 default: 135 break; 136 } 137 138 return 0; 139 } 140 141 static uint16_t 142 pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 143 { 144 struct hif_client_rx_queue *queue = rxq; 145 struct pfe_eth_priv_s *priv = queue->priv; 146 struct epoll_event epoll_ev; 147 uint64_t ticks = 1; /* 1 msec */ 148 int ret; 149 int have_something, work_done; 150 151 #define RESET_STATUS (HIF_INT | HIF_RXPKT_INT) 152 153 /*TODO can we remove this cleanup from here?*/ 154 pfe_tx_do_cleanup(priv->pfe); 155 have_something = pfe_hif_rx_process(priv->pfe, nb_pkts); 156 work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool, 157 rx_pkts, nb_pkts); 158 159 if (!have_something || !work_done) { 160 writel(RESET_STATUS, HIF_INT_SRC); 161 writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE); 162 ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks); 163 if (ret < 0 && errno != EINTR) 164 PFE_PMD_ERR("epoll_wait fails with %d\n", errno); 165 } 166 167 return work_done; 168 } 169 170 static uint16_t 171 pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 172 { 173 struct hif_client_rx_queue *queue = rxq; 174 struct pfe_eth_priv_s *priv = queue->priv; 175 struct rte_mempool *pool; 176 177 /*TODO can we remove this cleanup from here?*/ 178 pfe_tx_do_cleanup(priv->pfe); 179 pfe_hif_rx_process(priv->pfe, nb_pkts); 180 pool = priv->pfe->hif.shm->pool; 181 182 return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts); 183 } 184 185 static uint16_t 186 pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 187 { 188 struct hif_client_tx_queue *queue = tx_queue; 189 struct pfe_eth_priv_s *priv = queue->priv; 190 struct rte_eth_stats *stats = &priv->stats; 191 int i; 192 193 for (i = 0; i < nb_pkts; i++) { 194 if (tx_pkts[i]->nb_segs > 1) { 195 struct rte_mbuf *mbuf; 196 int j; 197 198 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 199 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), 200 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, 201 tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER, 202 tx_pkts[i]); 203 204 mbuf = tx_pkts[i]->next; 205 for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) { 206 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 207 (void *)(size_t)rte_pktmbuf_iova(mbuf), 208 mbuf->buf_addr + mbuf->data_off, 209 mbuf->data_len, 210 0x0, 0x0, mbuf); 211 mbuf = mbuf->next; 212 } 213 214 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 215 (void *)(size_t)rte_pktmbuf_iova(mbuf), 216 mbuf->buf_addr + mbuf->data_off, 217 mbuf->data_len, 218 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID, 219 mbuf); 220 } else { 221 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 222 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), 223 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, 224 tx_pkts[i]->pkt_len, 0 /*ctrl*/, 225 HIF_FIRST_BUFFER | HIF_LAST_BUFFER | 226 HIF_DATA_VALID, 227 tx_pkts[i]); 228 } 229 stats->obytes += tx_pkts[i]->pkt_len; 230 hif_tx_dma_start(); 231 } 232 stats->opackets += nb_pkts; 233 pfe_tx_do_cleanup(priv->pfe); 234 235 return nb_pkts; 236 } 237 238 static uint16_t 239 pfe_dummy_xmit_pkts(__rte_unused void *tx_queue, 240 __rte_unused struct rte_mbuf **tx_pkts, 241 __rte_unused uint16_t nb_pkts) 242 { 243 return 0; 244 } 245 246 static uint16_t 247 pfe_dummy_recv_pkts(__rte_unused void *rxq, 248 __rte_unused struct rte_mbuf **rx_pkts, 249 __rte_unused uint16_t nb_pkts) 250 { 251 return 0; 252 } 253 254 static int 255 pfe_eth_open(struct rte_eth_dev *dev) 256 { 257 struct pfe_eth_priv_s *priv = dev->data->dev_private; 258 struct hif_client_s *client; 259 struct hif_shm *hif_shm; 260 int rc; 261 262 /* Register client driver with HIF */ 263 client = &priv->client; 264 265 if (client->pfe) { 266 hif_shm = client->pfe->hif.shm; 267 /* TODO please remove the below code of if block, once we add 268 * the proper cleanup in eth_close 269 */ 270 if (!test_bit(PFE_CL_GEM0 + priv->id, 271 &hif_shm->g_client_status[0])) { 272 /* Register client driver with HIF */ 273 memset(client, 0, sizeof(*client)); 274 client->id = PFE_CL_GEM0 + priv->id; 275 client->tx_qn = emac_txq_cnt; 276 client->rx_qn = EMAC_RXQ_CNT; 277 client->priv = priv; 278 client->pfe = priv->pfe; 279 client->port_id = dev->data->port_id; 280 client->event_handler = pfe_eth_event_handler; 281 282 client->tx_qsize = EMAC_TXQ_DEPTH; 283 client->rx_qsize = EMAC_RXQ_DEPTH; 284 285 rc = hif_lib_client_register(client); 286 if (rc) { 287 PFE_PMD_ERR("hif_lib_client_register(%d)" 288 " failed", client->id); 289 goto err0; 290 } 291 } else { 292 /* Freeing the packets if already exists */ 293 int ret = 0; 294 struct rte_mbuf *rx_pkts[32]; 295 /* TODO multiqueue support */ 296 ret = hif_lib_receive_pkt(&client->rx_q[0], 297 hif_shm->pool, rx_pkts, 32); 298 while (ret) { 299 int i; 300 for (i = 0; i < ret; i++) 301 rte_pktmbuf_free(rx_pkts[i]); 302 ret = hif_lib_receive_pkt(&client->rx_q[0], 303 hif_shm->pool, 304 rx_pkts, 32); 305 } 306 } 307 } else { 308 /* Register client driver with HIF */ 309 memset(client, 0, sizeof(*client)); 310 client->id = PFE_CL_GEM0 + priv->id; 311 client->tx_qn = emac_txq_cnt; 312 client->rx_qn = EMAC_RXQ_CNT; 313 client->priv = priv; 314 client->pfe = priv->pfe; 315 client->port_id = dev->data->port_id; 316 client->event_handler = pfe_eth_event_handler; 317 318 client->tx_qsize = EMAC_TXQ_DEPTH; 319 client->rx_qsize = EMAC_RXQ_DEPTH; 320 321 rc = hif_lib_client_register(client); 322 if (rc) { 323 PFE_PMD_ERR("hif_lib_client_register(%d) failed", 324 client->id); 325 goto err0; 326 } 327 } 328 rc = pfe_eth_start(priv); 329 dev->rx_pkt_burst = &pfe_recv_pkts; 330 dev->tx_pkt_burst = &pfe_xmit_pkts; 331 /* If no prefetch is configured. */ 332 if (getenv("PFE_INTR_SUPPORT")) { 333 dev->rx_pkt_burst = &pfe_recv_pkts_on_intr; 334 PFE_PMD_INFO("PFE INTERRUPT Mode enabled"); 335 } 336 337 338 err0: 339 return rc; 340 } 341 342 static int 343 pfe_eth_open_cdev(struct pfe_eth_priv_s *priv) 344 { 345 int pfe_cdev_fd; 346 347 if (priv == NULL) 348 return -1; 349 350 pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY); 351 if (pfe_cdev_fd < 0) { 352 PFE_PMD_WARN("Unable to open PFE device file (%s).\n", 353 PFE_CDEV_PATH); 354 PFE_PMD_WARN("Link status update will not be available.\n"); 355 priv->link_fd = PFE_CDEV_INVALID_FD; 356 return -1; 357 } 358 359 priv->link_fd = pfe_cdev_fd; 360 361 return 0; 362 } 363 364 static void 365 pfe_eth_close_cdev(struct pfe_eth_priv_s *priv) 366 { 367 if (priv == NULL) 368 return; 369 370 if (priv->link_fd != PFE_CDEV_INVALID_FD) { 371 close(priv->link_fd); 372 priv->link_fd = PFE_CDEV_INVALID_FD; 373 } 374 } 375 376 static void 377 pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/) 378 { 379 struct pfe_eth_priv_s *priv = dev->data->dev_private; 380 381 gemac_disable(priv->EMAC_baseaddr); 382 gpi_disable(priv->GPI_baseaddr); 383 384 dev->rx_pkt_burst = &pfe_dummy_recv_pkts; 385 dev->tx_pkt_burst = &pfe_dummy_xmit_pkts; 386 } 387 388 static void 389 pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe) 390 { 391 PMD_INIT_FUNC_TRACE(); 392 393 pfe_eth_stop(dev); 394 /* Close the device file for link status */ 395 pfe_eth_close_cdev(dev->data->dev_private); 396 397 rte_eth_dev_release_port(dev); 398 pfe->nb_devs--; 399 } 400 401 static void 402 pfe_eth_close(struct rte_eth_dev *dev) 403 { 404 if (!dev) 405 return; 406 407 if (!g_pfe) 408 return; 409 410 pfe_eth_exit(dev, g_pfe); 411 412 if (g_pfe->nb_devs == 0) { 413 pfe_hif_exit(g_pfe); 414 pfe_hif_lib_exit(g_pfe); 415 rte_free(g_pfe); 416 g_pfe = NULL; 417 } 418 } 419 420 static int 421 pfe_eth_configure(struct rte_eth_dev *dev __rte_unused) 422 { 423 return 0; 424 } 425 426 static int 427 pfe_eth_info(struct rte_eth_dev *dev, 428 struct rte_eth_dev_info *dev_info) 429 { 430 struct pfe_eth_priv_s *internals = dev->data->dev_private; 431 432 dev_info->if_index = internals->id; 433 dev_info->max_mac_addrs = PFE_MAX_MACS; 434 dev_info->max_rx_queues = dev->data->nb_rx_queues; 435 dev_info->max_tx_queues = dev->data->nb_tx_queues; 436 dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE; 437 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 438 dev_info->rx_offload_capa = dev_rx_offloads_sup; 439 dev_info->tx_offload_capa = dev_tx_offloads_sup; 440 if (pfe_svr == SVR_LS1012A_REV1) { 441 dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD; 442 dev_info->max_mtu = MAX_MTU_ON_REV1; 443 } else { 444 dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE; 445 dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD; 446 } 447 448 return 0; 449 } 450 451 /* Only first mb_pool given on first call of this API will be used 452 * in whole system, also nb_rx_desc and rx_conf are unused params 453 */ 454 static int 455 pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 456 __rte_unused uint16_t nb_rx_desc, 457 __rte_unused unsigned int socket_id, 458 __rte_unused const struct rte_eth_rxconf *rx_conf, 459 struct rte_mempool *mb_pool) 460 { 461 int rc = 0; 462 struct pfe *pfe; 463 struct pfe_eth_priv_s *priv = dev->data->dev_private; 464 465 pfe = priv->pfe; 466 467 if (queue_idx >= EMAC_RXQ_CNT) { 468 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", 469 queue_idx, EMAC_RXQ_CNT); 470 return -1; 471 } 472 473 if (!pfe->hif.setuped) { 474 rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool); 475 if (rc) { 476 PFE_PMD_ERR("Could not allocate buffer descriptors"); 477 return -1; 478 } 479 480 pfe->hif.shm->pool = mb_pool; 481 if (pfe_hif_init_buffers(&pfe->hif)) { 482 PFE_PMD_ERR("Could not initialize buffer descriptors"); 483 return -1; 484 } 485 hif_init(); 486 hif_rx_enable(); 487 hif_tx_enable(); 488 pfe->hif.setuped = 1; 489 } 490 dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx]; 491 priv->client.rx_q[queue_idx].queue_id = queue_idx; 492 493 return 0; 494 } 495 496 static void 497 pfe_rx_queue_release(void *q __rte_unused) 498 { 499 PMD_INIT_FUNC_TRACE(); 500 } 501 502 static void 503 pfe_tx_queue_release(void *q __rte_unused) 504 { 505 PMD_INIT_FUNC_TRACE(); 506 } 507 508 static int 509 pfe_tx_queue_setup(struct rte_eth_dev *dev, 510 uint16_t queue_idx, 511 __rte_unused uint16_t nb_desc, 512 __rte_unused unsigned int socket_id, 513 __rte_unused const struct rte_eth_txconf *tx_conf) 514 { 515 struct pfe_eth_priv_s *priv = dev->data->dev_private; 516 517 if (queue_idx >= emac_txq_cnt) { 518 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", 519 queue_idx, emac_txq_cnt); 520 return -1; 521 } 522 dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx]; 523 priv->client.tx_q[queue_idx].queue_id = queue_idx; 524 return 0; 525 } 526 527 static const uint32_t * 528 pfe_supported_ptypes_get(struct rte_eth_dev *dev) 529 { 530 static const uint32_t ptypes[] = { 531 /*todo -= add more types */ 532 RTE_PTYPE_L2_ETHER, 533 RTE_PTYPE_L3_IPV4, 534 RTE_PTYPE_L3_IPV4_EXT, 535 RTE_PTYPE_L3_IPV6, 536 RTE_PTYPE_L3_IPV6_EXT, 537 RTE_PTYPE_L4_TCP, 538 RTE_PTYPE_L4_UDP, 539 RTE_PTYPE_L4_SCTP 540 }; 541 542 if (dev->rx_pkt_burst == pfe_recv_pkts || 543 dev->rx_pkt_burst == pfe_recv_pkts_on_intr) 544 return ptypes; 545 return NULL; 546 } 547 548 static inline int 549 pfe_eth_atomic_read_link_status(struct rte_eth_dev *dev, 550 struct rte_eth_link *link) 551 { 552 struct rte_eth_link *dst = link; 553 struct rte_eth_link *src = &dev->data->dev_link; 554 555 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 556 *(uint64_t *)src) == 0) 557 return -1; 558 559 return 0; 560 } 561 562 static inline int 563 pfe_eth_atomic_write_link_status(struct rte_eth_dev *dev, 564 struct rte_eth_link *link) 565 { 566 struct rte_eth_link *dst = &dev->data->dev_link; 567 struct rte_eth_link *src = link; 568 569 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 570 *(uint64_t *)src) == 0) 571 return -1; 572 573 return 0; 574 } 575 576 static int 577 pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 578 { 579 int ret, ioctl_cmd = 0; 580 struct pfe_eth_priv_s *priv = dev->data->dev_private; 581 struct rte_eth_link link, old; 582 unsigned int lstatus = 1; 583 584 if (dev == NULL) { 585 PFE_PMD_ERR("Invalid device in link_update.\n"); 586 return 0; 587 } 588 589 memset(&old, 0, sizeof(old)); 590 memset(&link, 0, sizeof(struct rte_eth_link)); 591 592 pfe_eth_atomic_read_link_status(dev, &old); 593 594 /* Read from PFE CDEV, status of link, if file was successfully 595 * opened. 596 */ 597 if (priv->link_fd != PFE_CDEV_INVALID_FD) { 598 if (priv->id == 0) 599 ioctl_cmd = PFE_CDEV_ETH0_STATE_GET; 600 if (priv->id == 1) 601 ioctl_cmd = PFE_CDEV_ETH1_STATE_GET; 602 603 ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus); 604 if (ret != 0) { 605 PFE_PMD_ERR("Unable to fetch link status (ioctl)\n"); 606 /* use dummy link value */ 607 link.link_status = 1; 608 } 609 PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n", 610 lstatus, priv->id); 611 } 612 613 if (old.link_status == lstatus) { 614 /* no change in status */ 615 PFE_PMD_DEBUG("No change in link status; Not updating.\n"); 616 return -1; 617 } 618 619 link.link_status = lstatus; 620 link.link_speed = ETH_LINK_SPEED_1G; 621 link.link_duplex = ETH_LINK_FULL_DUPLEX; 622 link.link_autoneg = ETH_LINK_AUTONEG; 623 624 pfe_eth_atomic_write_link_status(dev, &link); 625 626 PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id, 627 link.link_status ? "up" : "down"); 628 629 return 0; 630 } 631 632 static int 633 pfe_promiscuous_enable(struct rte_eth_dev *dev) 634 { 635 struct pfe_eth_priv_s *priv = dev->data->dev_private; 636 637 priv->promisc = 1; 638 dev->data->promiscuous = 1; 639 gemac_enable_copy_all(priv->EMAC_baseaddr); 640 641 return 0; 642 } 643 644 static int 645 pfe_promiscuous_disable(struct rte_eth_dev *dev) 646 { 647 struct pfe_eth_priv_s *priv = dev->data->dev_private; 648 649 priv->promisc = 0; 650 dev->data->promiscuous = 0; 651 gemac_disable_copy_all(priv->EMAC_baseaddr); 652 653 return 0; 654 } 655 656 static int 657 pfe_allmulticast_enable(struct rte_eth_dev *dev) 658 { 659 struct pfe_eth_priv_s *priv = dev->data->dev_private; 660 struct pfe_mac_addr hash_addr; /* hash register structure */ 661 662 /* Set the hash to rx all multicast frames */ 663 hash_addr.bottom = 0xFFFFFFFF; 664 hash_addr.top = 0xFFFFFFFF; 665 gemac_set_hash(priv->EMAC_baseaddr, &hash_addr); 666 dev->data->all_multicast = 1; 667 668 return 0; 669 } 670 671 static int 672 pfe_link_down(struct rte_eth_dev *dev) 673 { 674 pfe_eth_stop(dev); 675 return 0; 676 } 677 678 static int 679 pfe_link_up(struct rte_eth_dev *dev) 680 { 681 struct pfe_eth_priv_s *priv = dev->data->dev_private; 682 683 pfe_eth_start(priv); 684 return 0; 685 } 686 687 static int 688 pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 689 { 690 int ret; 691 struct pfe_eth_priv_s *priv = dev->data->dev_private; 692 uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 693 694 /*TODO Support VLAN*/ 695 ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size); 696 if (!ret) 697 dev->data->mtu = mtu; 698 699 return ret; 700 } 701 702 /* pfe_eth_enet_addr_byte_mac 703 */ 704 static int 705 pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr, 706 struct pfe_mac_addr *enet_addr) 707 { 708 if (!enet_byte_addr || !enet_addr) { 709 return -1; 710 711 } else { 712 enet_addr->bottom = enet_byte_addr[0] | 713 (enet_byte_addr[1] << 8) | 714 (enet_byte_addr[2] << 16) | 715 (enet_byte_addr[3] << 24); 716 enet_addr->top = enet_byte_addr[4] | 717 (enet_byte_addr[5] << 8); 718 return 0; 719 } 720 } 721 722 static int 723 pfe_dev_set_mac_addr(struct rte_eth_dev *dev, 724 struct rte_ether_addr *addr) 725 { 726 struct pfe_eth_priv_s *priv = dev->data->dev_private; 727 struct pfe_mac_addr spec_addr; 728 int ret; 729 730 ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr); 731 if (ret) 732 return ret; 733 734 gemac_set_laddrN(priv->EMAC_baseaddr, 735 (struct pfe_mac_addr *)&spec_addr, 1); 736 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 737 return 0; 738 } 739 740 static int 741 pfe_stats_get(struct rte_eth_dev *dev, 742 struct rte_eth_stats *stats) 743 { 744 struct pfe_eth_priv_s *priv = dev->data->dev_private; 745 struct rte_eth_stats *eth_stats = &priv->stats; 746 747 if (stats == NULL) 748 return -1; 749 750 memset(stats, 0, sizeof(struct rte_eth_stats)); 751 752 stats->ipackets = eth_stats->ipackets; 753 stats->ibytes = eth_stats->ibytes; 754 stats->opackets = eth_stats->opackets; 755 stats->obytes = eth_stats->obytes; 756 757 return 0; 758 } 759 760 static const struct eth_dev_ops ops = { 761 .dev_start = pfe_eth_open, 762 .dev_stop = pfe_eth_stop, 763 .dev_close = pfe_eth_close, 764 .dev_configure = pfe_eth_configure, 765 .dev_infos_get = pfe_eth_info, 766 .rx_queue_setup = pfe_rx_queue_setup, 767 .rx_queue_release = pfe_rx_queue_release, 768 .tx_queue_setup = pfe_tx_queue_setup, 769 .tx_queue_release = pfe_tx_queue_release, 770 .dev_supported_ptypes_get = pfe_supported_ptypes_get, 771 .link_update = pfe_eth_link_update, 772 .promiscuous_enable = pfe_promiscuous_enable, 773 .promiscuous_disable = pfe_promiscuous_disable, 774 .allmulticast_enable = pfe_allmulticast_enable, 775 .dev_set_link_down = pfe_link_down, 776 .dev_set_link_up = pfe_link_up, 777 .mtu_set = pfe_mtu_set, 778 .mac_addr_set = pfe_dev_set_mac_addr, 779 .stats_get = pfe_stats_get, 780 }; 781 782 static int 783 pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id) 784 { 785 struct rte_eth_dev *eth_dev = NULL; 786 struct pfe_eth_priv_s *priv = NULL; 787 struct ls1012a_eth_platform_data *einfo; 788 struct ls1012a_pfe_platform_data *pfe_info; 789 struct rte_ether_addr addr; 790 int err; 791 792 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv)); 793 if (eth_dev == NULL) 794 return -ENOMEM; 795 796 /* Extract pltform data */ 797 pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data; 798 if (!pfe_info) { 799 PFE_PMD_ERR("pfe missing additional platform data"); 800 err = -ENODEV; 801 goto err0; 802 } 803 804 einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata; 805 806 /* einfo never be NULL, but no harm in having this check */ 807 if (!einfo) { 808 PFE_PMD_ERR("pfe missing additional gemacs platform data"); 809 err = -ENODEV; 810 goto err0; 811 } 812 813 priv = eth_dev->data->dev_private; 814 priv->ndev = eth_dev; 815 priv->id = einfo[id].gem_id; 816 priv->pfe = pfe; 817 818 pfe->eth.eth_priv[id] = priv; 819 820 /* Set the info in the priv to the current info */ 821 priv->einfo = &einfo[id]; 822 priv->EMAC_baseaddr = cbus_emac_base[id]; 823 priv->PHY_baseaddr = cbus_emac_base[id]; 824 priv->GPI_baseaddr = cbus_gpi_base[id]; 825 826 #define HIF_GEMAC_TMUQ_BASE 6 827 priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2); 828 priv->high_tmu_q = priv->low_tmu_q + 1; 829 830 rte_spinlock_init(&priv->lock); 831 832 /* Copy the station address into the dev structure, */ 833 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 834 ETHER_ADDR_LEN * PFE_MAX_MACS, 0); 835 if (eth_dev->data->mac_addrs == NULL) { 836 PFE_PMD_ERR("Failed to allocate mem %d to store MAC addresses", 837 ETHER_ADDR_LEN * PFE_MAX_MACS); 838 err = -ENOMEM; 839 goto err0; 840 } 841 842 memcpy(addr.addr_bytes, priv->einfo->mac_addr, 843 ETH_ALEN); 844 845 pfe_dev_set_mac_addr(eth_dev, &addr); 846 rte_ether_addr_copy(&addr, ð_dev->data->mac_addrs[0]); 847 848 eth_dev->data->mtu = 1500; 849 eth_dev->dev_ops = &ops; 850 pfe_eth_stop(eth_dev); 851 pfe_gemac_init(priv); 852 853 eth_dev->data->nb_rx_queues = 1; 854 eth_dev->data->nb_tx_queues = 1; 855 856 /* For link status, open the PFE CDEV; Error from this function 857 * is silently ignored; In case of error, the link status will not 858 * be available. 859 */ 860 pfe_eth_open_cdev(priv); 861 rte_eth_dev_probing_finish(eth_dev); 862 863 return 0; 864 err0: 865 rte_eth_dev_release_port(eth_dev); 866 return err; 867 } 868 869 static int 870 pfe_get_gemac_if_proprties(struct pfe *pfe, 871 __rte_unused const struct device_node *parent, 872 unsigned int port, unsigned int if_cnt, 873 struct ls1012a_pfe_platform_data *pdata) 874 { 875 const struct device_node *gem = NULL; 876 size_t size; 877 unsigned int ii = 0, phy_id = 0; 878 const u32 *addr; 879 const void *mac_addr; 880 881 for (ii = 0; ii < if_cnt; ii++) { 882 gem = of_get_next_child(parent, gem); 883 if (!gem) 884 goto err; 885 addr = of_get_property(gem, "reg", &size); 886 if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port)) 887 break; 888 } 889 890 if (ii >= if_cnt) { 891 PFE_PMD_ERR("Failed to find interface = %d", if_cnt); 892 goto err; 893 } 894 895 pdata->ls1012a_eth_pdata[port].gem_id = port; 896 897 mac_addr = of_get_mac_address(gem); 898 899 if (mac_addr) { 900 memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr, 901 ETH_ALEN); 902 } 903 904 addr = of_get_property(gem, "fsl,mdio-mux-val", &size); 905 if (!addr) { 906 PFE_PMD_ERR("Invalid mdio-mux-val...."); 907 } else { 908 phy_id = rte_be_to_cpu_32((unsigned int)*addr); 909 pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id; 910 } 911 if (pdata->ls1012a_eth_pdata[port].phy_id < 32) 912 pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] = 913 pdata->ls1012a_eth_pdata[port].mdio_muxval; 914 915 return 0; 916 917 err: 918 return -1; 919 } 920 921 /* Parse integer from integer argument */ 922 static int 923 parse_integer_arg(const char *key __rte_unused, 924 const char *value, void *extra_args) 925 { 926 int i; 927 char *end; 928 errno = 0; 929 930 i = strtol(value, &end, 10); 931 if (*end != 0 || errno != 0 || i < 0 || i > 1) { 932 PFE_PMD_ERR("Supported Port IDS are 0 and 1"); 933 return -EINVAL; 934 } 935 936 *((uint32_t *)extra_args) = i; 937 938 return 0; 939 } 940 941 static int 942 pfe_parse_vdev_init_params(struct pfe_vdev_init_params *params, 943 struct rte_vdev_device *dev) 944 { 945 struct rte_kvargs *kvlist = NULL; 946 int ret = 0; 947 948 static const char * const pfe_vdev_valid_params[] = { 949 PFE_VDEV_GEM_ID_ARG, 950 NULL 951 }; 952 953 const char *input_args = rte_vdev_device_args(dev); 954 955 if (!input_args) 956 return -1; 957 958 kvlist = rte_kvargs_parse(input_args, pfe_vdev_valid_params); 959 if (kvlist == NULL) 960 return -1; 961 962 ret = rte_kvargs_process(kvlist, 963 PFE_VDEV_GEM_ID_ARG, 964 &parse_integer_arg, 965 ¶ms->gem_id); 966 rte_kvargs_free(kvlist); 967 return ret; 968 } 969 970 static int 971 pmd_pfe_probe(struct rte_vdev_device *vdev) 972 { 973 const u32 *prop; 974 const struct device_node *np; 975 const char *name; 976 const uint32_t *addr; 977 uint64_t cbus_addr, ddr_size, cbus_size; 978 int rc = -1, fd = -1, gem_id; 979 unsigned int ii, interface_count = 0; 980 size_t size = 0; 981 struct pfe_vdev_init_params init_params = { 982 .gem_id = -1 983 }; 984 985 name = rte_vdev_device_name(vdev); 986 rc = pfe_parse_vdev_init_params(&init_params, vdev); 987 if (rc < 0) 988 return -EINVAL; 989 990 PFE_PMD_LOG(INFO, "Initializing pmd_pfe for %s Given gem-id %d", 991 name, init_params.gem_id); 992 993 if (g_pfe) { 994 if (g_pfe->nb_devs >= g_pfe->max_intf) { 995 PFE_PMD_ERR("PFE %d dev already created Max is %d", 996 g_pfe->nb_devs, g_pfe->max_intf); 997 return -EINVAL; 998 } 999 goto eth_init; 1000 } 1001 1002 g_pfe = rte_zmalloc(NULL, sizeof(*g_pfe), RTE_CACHE_LINE_SIZE); 1003 if (g_pfe == NULL) 1004 return -EINVAL; 1005 1006 /* Load the device-tree driver */ 1007 rc = of_init(); 1008 if (rc) { 1009 PFE_PMD_ERR("of_init failed with ret: %d", rc); 1010 goto err; 1011 } 1012 1013 np = of_find_compatible_node(NULL, NULL, "fsl,pfe"); 1014 if (!np) { 1015 PFE_PMD_ERR("Invalid device node"); 1016 rc = -EINVAL; 1017 goto err; 1018 } 1019 1020 addr = of_get_address(np, 0, &cbus_size, NULL); 1021 if (!addr) { 1022 PFE_PMD_ERR("of_get_address cannot return qman address\n"); 1023 goto err; 1024 } 1025 cbus_addr = of_translate_address(np, addr); 1026 if (!cbus_addr) { 1027 PFE_PMD_ERR("of_translate_address failed\n"); 1028 goto err; 1029 } 1030 1031 addr = of_get_address(np, 1, &ddr_size, NULL); 1032 if (!addr) { 1033 PFE_PMD_ERR("of_get_address cannot return qman address\n"); 1034 goto err; 1035 } 1036 1037 g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr); 1038 if (!g_pfe->ddr_phys_baseaddr) { 1039 PFE_PMD_ERR("of_translate_address failed\n"); 1040 goto err; 1041 } 1042 1043 g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr); 1044 g_pfe->ddr_size = ddr_size; 1045 g_pfe->cbus_size = cbus_size; 1046 1047 fd = open("/dev/mem", O_RDWR); 1048 g_pfe->cbus_baseaddr = mmap(NULL, cbus_size, PROT_READ | PROT_WRITE, 1049 MAP_SHARED, fd, cbus_addr); 1050 close(fd); 1051 if (g_pfe->cbus_baseaddr == MAP_FAILED) { 1052 PFE_PMD_ERR("Can not map cbus base"); 1053 rc = -EINVAL; 1054 goto err; 1055 } 1056 1057 /* Read interface count */ 1058 prop = of_get_property(np, "fsl,pfe-num-interfaces", &size); 1059 if (!prop) { 1060 PFE_PMD_ERR("Failed to read number of interfaces"); 1061 rc = -ENXIO; 1062 goto err_prop; 1063 } 1064 1065 interface_count = rte_be_to_cpu_32((unsigned int)*prop); 1066 if (interface_count <= 0) { 1067 PFE_PMD_ERR("No ethernet interface count : %d", 1068 interface_count); 1069 rc = -ENXIO; 1070 goto err_prop; 1071 } 1072 PFE_PMD_INFO("num interfaces = %d ", interface_count); 1073 1074 g_pfe->max_intf = interface_count; 1075 g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff; 1076 1077 for (ii = 0; ii < interface_count; ii++) { 1078 pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count, 1079 &g_pfe->platform_data); 1080 } 1081 1082 pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr, 1083 g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size); 1084 1085 PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION)); 1086 PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION)); 1087 1088 PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION)); 1089 PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION)); 1090 1091 PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION)); 1092 PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION)); 1093 PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION)); 1094 1095 PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION)); 1096 PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION)); 1097 1098 cbus_emac_base[0] = EMAC1_BASE_ADDR; 1099 cbus_emac_base[1] = EMAC2_BASE_ADDR; 1100 1101 cbus_gpi_base[0] = EGPI1_BASE_ADDR; 1102 cbus_gpi_base[1] = EGPI2_BASE_ADDR; 1103 1104 rc = pfe_hif_lib_init(g_pfe); 1105 if (rc < 0) 1106 goto err_hif_lib; 1107 1108 rc = pfe_hif_init(g_pfe); 1109 if (rc < 0) 1110 goto err_hif; 1111 pfe_soc_version_get(); 1112 eth_init: 1113 if (init_params.gem_id < 0) 1114 gem_id = g_pfe->nb_devs; 1115 else 1116 gem_id = init_params.gem_id; 1117 1118 PFE_PMD_LOG(INFO, "Init pmd_pfe for %s gem-id %d(given =%d)", 1119 name, gem_id, init_params.gem_id); 1120 1121 rc = pfe_eth_init(vdev, g_pfe, gem_id); 1122 if (rc < 0) 1123 goto err_eth; 1124 else 1125 g_pfe->nb_devs++; 1126 1127 return 0; 1128 1129 err_eth: 1130 pfe_hif_exit(g_pfe); 1131 1132 err_hif: 1133 pfe_hif_lib_exit(g_pfe); 1134 1135 err_hif_lib: 1136 err_prop: 1137 munmap(g_pfe->cbus_baseaddr, cbus_size); 1138 err: 1139 rte_free(g_pfe); 1140 return rc; 1141 } 1142 1143 static int 1144 pmd_pfe_remove(struct rte_vdev_device *vdev) 1145 { 1146 const char *name; 1147 struct rte_eth_dev *eth_dev = NULL; 1148 1149 name = rte_vdev_device_name(vdev); 1150 if (name == NULL) 1151 return -EINVAL; 1152 1153 PFE_PMD_INFO("Closing eventdev sw device %s", name); 1154 1155 if (!g_pfe) 1156 return 0; 1157 1158 eth_dev = rte_eth_dev_allocated(name); 1159 if (eth_dev == NULL) 1160 return -ENODEV; 1161 1162 pfe_eth_exit(eth_dev, g_pfe); 1163 munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size); 1164 1165 if (g_pfe->nb_devs == 0) { 1166 pfe_hif_exit(g_pfe); 1167 pfe_hif_lib_exit(g_pfe); 1168 rte_free(g_pfe); 1169 g_pfe = NULL; 1170 } 1171 return 0; 1172 } 1173 1174 static 1175 struct rte_vdev_driver pmd_pfe_drv = { 1176 .probe = pmd_pfe_probe, 1177 .remove = pmd_pfe_remove, 1178 }; 1179 1180 RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv); 1181 RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "=<int> "); 1182 RTE_LOG_REGISTER(pfe_logtype_pmd, pmd.net.pfe, NOTICE); 1183