1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2019 NXP 3 */ 4 5 #include <sys/ioctl.h> 6 #include <sys/epoll.h> 7 #include <rte_kvargs.h> 8 #include <rte_ethdev_vdev.h> 9 #include <rte_bus_vdev.h> 10 #include <rte_ether.h> 11 #include <dpaa_of.h> 12 13 #include "pfe_logs.h" 14 #include "pfe_mod.h" 15 16 #define PFE_MAX_MACS 1 /*we can support upto 4 MACs per IF*/ 17 #define PFE_VDEV_GEM_ID_ARG "intf" 18 19 struct pfe_vdev_init_params { 20 int8_t gem_id; 21 }; 22 static struct pfe *g_pfe; 23 /* Supported Rx offloads */ 24 static uint64_t dev_rx_offloads_sup = 25 DEV_RX_OFFLOAD_IPV4_CKSUM | 26 DEV_RX_OFFLOAD_UDP_CKSUM | 27 DEV_RX_OFFLOAD_TCP_CKSUM; 28 29 /* Supported Tx offloads */ 30 static uint64_t dev_tx_offloads_sup = 31 DEV_TX_OFFLOAD_IPV4_CKSUM | 32 DEV_TX_OFFLOAD_UDP_CKSUM | 33 DEV_TX_OFFLOAD_TCP_CKSUM; 34 35 /* TODO: make pfe_svr a runtime option. 36 * Driver should be able to get the SVR 37 * information from HW. 38 */ 39 unsigned int pfe_svr = SVR_LS1012A_REV1; 40 static void *cbus_emac_base[3]; 41 static void *cbus_gpi_base[3]; 42 43 int pfe_logtype_pmd; 44 45 /* pfe_gemac_init 46 */ 47 static int 48 pfe_gemac_init(struct pfe_eth_priv_s *priv) 49 { 50 struct gemac_cfg cfg; 51 52 cfg.speed = SPEED_1000M; 53 cfg.duplex = DUPLEX_FULL; 54 55 gemac_set_config(priv->EMAC_baseaddr, &cfg); 56 gemac_allow_broadcast(priv->EMAC_baseaddr); 57 gemac_enable_1536_rx(priv->EMAC_baseaddr); 58 gemac_enable_stacked_vlan(priv->EMAC_baseaddr); 59 gemac_enable_pause_rx(priv->EMAC_baseaddr); 60 gemac_set_bus_width(priv->EMAC_baseaddr, 64); 61 gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr); 62 63 return 0; 64 } 65 66 static void 67 pfe_soc_version_get(void) 68 { 69 FILE *svr_file = NULL; 70 unsigned int svr_ver = 0; 71 72 PMD_INIT_FUNC_TRACE(); 73 74 svr_file = fopen(PFE_SOC_ID_FILE, "r"); 75 if (!svr_file) { 76 PFE_PMD_ERR("Unable to open SoC device"); 77 return; /* Not supported on this infra */ 78 } 79 80 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) 81 pfe_svr = svr_ver; 82 else 83 PFE_PMD_ERR("Unable to read SoC device"); 84 85 fclose(svr_file); 86 } 87 88 static int pfe_eth_start(struct pfe_eth_priv_s *priv) 89 { 90 gpi_enable(priv->GPI_baseaddr); 91 gemac_enable(priv->EMAC_baseaddr); 92 93 return 0; 94 } 95 96 static void 97 pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int 98 __rte_unused from_tx, __rte_unused int n_desc) 99 { 100 struct rte_mbuf *mbuf; 101 unsigned int flags; 102 103 /* Clean HIF and client queue */ 104 while ((mbuf = hif_lib_tx_get_next_complete(&priv->client, 105 tx_q_num, &flags, 106 HIF_TX_DESC_NT))) { 107 if (mbuf) { 108 mbuf->next = NULL; 109 mbuf->nb_segs = 1; 110 rte_pktmbuf_free(mbuf); 111 } 112 } 113 } 114 115 116 static void 117 pfe_eth_flush_tx(struct pfe_eth_priv_s *priv) 118 { 119 unsigned int ii; 120 121 for (ii = 0; ii < emac_txq_cnt; ii++) 122 pfe_eth_flush_txQ(priv, ii, 0, 0); 123 } 124 125 static int 126 pfe_eth_event_handler(void *data, int event, __rte_unused int qno) 127 { 128 struct pfe_eth_priv_s *priv = data; 129 130 switch (event) { 131 case EVENT_TXDONE_IND: 132 pfe_eth_flush_tx(priv); 133 hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0); 134 break; 135 case EVENT_HIGH_RX_WM: 136 default: 137 break; 138 } 139 140 return 0; 141 } 142 143 static uint16_t 144 pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 145 { 146 struct hif_client_rx_queue *queue = rxq; 147 struct pfe_eth_priv_s *priv = queue->priv; 148 struct epoll_event epoll_ev; 149 uint64_t ticks = 1; /* 1 msec */ 150 int ret; 151 int have_something, work_done; 152 153 #define RESET_STATUS (HIF_INT | HIF_RXPKT_INT) 154 155 /*TODO can we remove this cleanup from here?*/ 156 pfe_tx_do_cleanup(priv->pfe); 157 have_something = pfe_hif_rx_process(priv->pfe, nb_pkts); 158 work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool, 159 rx_pkts, nb_pkts); 160 161 if (!have_something || !work_done) { 162 writel(RESET_STATUS, HIF_INT_SRC); 163 writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE); 164 ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks); 165 if (ret < 0 && errno != EINTR) 166 PFE_PMD_ERR("epoll_wait fails with %d\n", errno); 167 } 168 169 return work_done; 170 } 171 172 static uint16_t 173 pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 174 { 175 struct hif_client_rx_queue *queue = rxq; 176 struct pfe_eth_priv_s *priv = queue->priv; 177 struct rte_mempool *pool; 178 179 /*TODO can we remove this cleanup from here?*/ 180 pfe_tx_do_cleanup(priv->pfe); 181 pfe_hif_rx_process(priv->pfe, nb_pkts); 182 pool = priv->pfe->hif.shm->pool; 183 184 return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts); 185 } 186 187 static uint16_t 188 pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 189 { 190 struct hif_client_tx_queue *queue = tx_queue; 191 struct pfe_eth_priv_s *priv = queue->priv; 192 struct rte_eth_stats *stats = &priv->stats; 193 int i; 194 195 for (i = 0; i < nb_pkts; i++) { 196 if (tx_pkts[i]->nb_segs > 1) { 197 struct rte_mbuf *mbuf; 198 int j; 199 200 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 201 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), 202 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, 203 tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER, 204 tx_pkts[i]); 205 206 mbuf = tx_pkts[i]->next; 207 for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) { 208 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 209 (void *)(size_t)rte_pktmbuf_iova(mbuf), 210 mbuf->buf_addr + mbuf->data_off, 211 mbuf->data_len, 212 0x0, 0x0, mbuf); 213 mbuf = mbuf->next; 214 } 215 216 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 217 (void *)(size_t)rte_pktmbuf_iova(mbuf), 218 mbuf->buf_addr + mbuf->data_off, 219 mbuf->data_len, 220 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID, 221 mbuf); 222 } else { 223 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 224 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), 225 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, 226 tx_pkts[i]->pkt_len, 0 /*ctrl*/, 227 HIF_FIRST_BUFFER | HIF_LAST_BUFFER | 228 HIF_DATA_VALID, 229 tx_pkts[i]); 230 } 231 stats->obytes += tx_pkts[i]->pkt_len; 232 hif_tx_dma_start(); 233 } 234 stats->opackets += nb_pkts; 235 pfe_tx_do_cleanup(priv->pfe); 236 237 return nb_pkts; 238 } 239 240 static uint16_t 241 pfe_dummy_xmit_pkts(__rte_unused void *tx_queue, 242 __rte_unused struct rte_mbuf **tx_pkts, 243 __rte_unused uint16_t nb_pkts) 244 { 245 return 0; 246 } 247 248 static uint16_t 249 pfe_dummy_recv_pkts(__rte_unused void *rxq, 250 __rte_unused struct rte_mbuf **rx_pkts, 251 __rte_unused uint16_t nb_pkts) 252 { 253 return 0; 254 } 255 256 static int 257 pfe_eth_open(struct rte_eth_dev *dev) 258 { 259 struct pfe_eth_priv_s *priv = dev->data->dev_private; 260 struct hif_client_s *client; 261 struct hif_shm *hif_shm; 262 int rc; 263 264 /* Register client driver with HIF */ 265 client = &priv->client; 266 267 if (client->pfe) { 268 hif_shm = client->pfe->hif.shm; 269 /* TODO please remove the below code of if block, once we add 270 * the proper cleanup in eth_close 271 */ 272 if (!test_bit(PFE_CL_GEM0 + priv->id, 273 &hif_shm->g_client_status[0])) { 274 /* Register client driver with HIF */ 275 memset(client, 0, sizeof(*client)); 276 client->id = PFE_CL_GEM0 + priv->id; 277 client->tx_qn = emac_txq_cnt; 278 client->rx_qn = EMAC_RXQ_CNT; 279 client->priv = priv; 280 client->pfe = priv->pfe; 281 client->port_id = dev->data->port_id; 282 client->event_handler = pfe_eth_event_handler; 283 284 client->tx_qsize = EMAC_TXQ_DEPTH; 285 client->rx_qsize = EMAC_RXQ_DEPTH; 286 287 rc = hif_lib_client_register(client); 288 if (rc) { 289 PFE_PMD_ERR("hif_lib_client_register(%d)" 290 " failed", client->id); 291 goto err0; 292 } 293 } else { 294 /* Freeing the packets if already exists */ 295 int ret = 0; 296 struct rte_mbuf *rx_pkts[32]; 297 /* TODO multiqueue support */ 298 ret = hif_lib_receive_pkt(&client->rx_q[0], 299 hif_shm->pool, rx_pkts, 32); 300 while (ret) { 301 int i; 302 for (i = 0; i < ret; i++) 303 rte_pktmbuf_free(rx_pkts[i]); 304 ret = hif_lib_receive_pkt(&client->rx_q[0], 305 hif_shm->pool, 306 rx_pkts, 32); 307 } 308 } 309 } else { 310 /* Register client driver with HIF */ 311 memset(client, 0, sizeof(*client)); 312 client->id = PFE_CL_GEM0 + priv->id; 313 client->tx_qn = emac_txq_cnt; 314 client->rx_qn = EMAC_RXQ_CNT; 315 client->priv = priv; 316 client->pfe = priv->pfe; 317 client->port_id = dev->data->port_id; 318 client->event_handler = pfe_eth_event_handler; 319 320 client->tx_qsize = EMAC_TXQ_DEPTH; 321 client->rx_qsize = EMAC_RXQ_DEPTH; 322 323 rc = hif_lib_client_register(client); 324 if (rc) { 325 PFE_PMD_ERR("hif_lib_client_register(%d) failed", 326 client->id); 327 goto err0; 328 } 329 } 330 rc = pfe_eth_start(priv); 331 dev->rx_pkt_burst = &pfe_recv_pkts; 332 dev->tx_pkt_burst = &pfe_xmit_pkts; 333 /* If no prefetch is configured. */ 334 if (getenv("PFE_INTR_SUPPORT")) { 335 dev->rx_pkt_burst = &pfe_recv_pkts_on_intr; 336 PFE_PMD_INFO("PFE INTERRUPT Mode enabled"); 337 } 338 339 340 err0: 341 return rc; 342 } 343 344 static int 345 pfe_eth_open_cdev(struct pfe_eth_priv_s *priv) 346 { 347 int pfe_cdev_fd; 348 349 if (priv == NULL) 350 return -1; 351 352 pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY); 353 if (pfe_cdev_fd < 0) { 354 PFE_PMD_WARN("Unable to open PFE device file (%s).\n", 355 PFE_CDEV_PATH); 356 PFE_PMD_WARN("Link status update will not be available.\n"); 357 priv->link_fd = PFE_CDEV_INVALID_FD; 358 return -1; 359 } 360 361 priv->link_fd = pfe_cdev_fd; 362 363 return 0; 364 } 365 366 static void 367 pfe_eth_close_cdev(struct pfe_eth_priv_s *priv) 368 { 369 if (priv == NULL) 370 return; 371 372 if (priv->link_fd != PFE_CDEV_INVALID_FD) { 373 close(priv->link_fd); 374 priv->link_fd = PFE_CDEV_INVALID_FD; 375 } 376 } 377 378 static void 379 pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/) 380 { 381 struct pfe_eth_priv_s *priv = dev->data->dev_private; 382 383 gemac_disable(priv->EMAC_baseaddr); 384 gpi_disable(priv->GPI_baseaddr); 385 386 dev->rx_pkt_burst = &pfe_dummy_recv_pkts; 387 dev->tx_pkt_burst = &pfe_dummy_xmit_pkts; 388 } 389 390 static void 391 pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe) 392 { 393 PMD_INIT_FUNC_TRACE(); 394 395 pfe_eth_stop(dev); 396 /* Close the device file for link status */ 397 pfe_eth_close_cdev(dev->data->dev_private); 398 399 rte_free(dev->data->mac_addrs); 400 rte_eth_dev_release_port(dev); 401 pfe->nb_devs--; 402 } 403 404 static void 405 pfe_eth_close(struct rte_eth_dev *dev) 406 { 407 if (!dev) 408 return; 409 410 if (!g_pfe) 411 return; 412 413 pfe_eth_exit(dev, g_pfe); 414 415 if (g_pfe->nb_devs == 0) { 416 pfe_hif_exit(g_pfe); 417 pfe_hif_lib_exit(g_pfe); 418 rte_free(g_pfe); 419 g_pfe = NULL; 420 } 421 } 422 423 static int 424 pfe_eth_configure(struct rte_eth_dev *dev __rte_unused) 425 { 426 return 0; 427 } 428 429 static int 430 pfe_eth_info(struct rte_eth_dev *dev, 431 struct rte_eth_dev_info *dev_info) 432 { 433 struct pfe_eth_priv_s *internals = dev->data->dev_private; 434 435 dev_info->if_index = internals->id; 436 dev_info->max_mac_addrs = PFE_MAX_MACS; 437 dev_info->max_rx_queues = dev->data->nb_rx_queues; 438 dev_info->max_tx_queues = dev->data->nb_tx_queues; 439 dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE; 440 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 441 dev_info->rx_offload_capa = dev_rx_offloads_sup; 442 dev_info->tx_offload_capa = dev_tx_offloads_sup; 443 if (pfe_svr == SVR_LS1012A_REV1) { 444 dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD; 445 dev_info->max_mtu = MAX_MTU_ON_REV1; 446 } else { 447 dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE; 448 dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD; 449 } 450 451 return 0; 452 } 453 454 /* Only first mb_pool given on first call of this API will be used 455 * in whole system, also nb_rx_desc and rx_conf are unused params 456 */ 457 static int 458 pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 459 __rte_unused uint16_t nb_rx_desc, 460 __rte_unused unsigned int socket_id, 461 __rte_unused const struct rte_eth_rxconf *rx_conf, 462 struct rte_mempool *mb_pool) 463 { 464 int rc = 0; 465 struct pfe *pfe; 466 struct pfe_eth_priv_s *priv = dev->data->dev_private; 467 468 pfe = priv->pfe; 469 470 if (queue_idx >= EMAC_RXQ_CNT) { 471 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", 472 queue_idx, EMAC_RXQ_CNT); 473 return -1; 474 } 475 476 if (!pfe->hif.setuped) { 477 rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool); 478 if (rc) { 479 PFE_PMD_ERR("Could not allocate buffer descriptors"); 480 return -1; 481 } 482 483 pfe->hif.shm->pool = mb_pool; 484 if (pfe_hif_init_buffers(&pfe->hif)) { 485 PFE_PMD_ERR("Could not initialize buffer descriptors"); 486 return -1; 487 } 488 hif_init(); 489 hif_rx_enable(); 490 hif_tx_enable(); 491 pfe->hif.setuped = 1; 492 } 493 dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx]; 494 priv->client.rx_q[queue_idx].queue_id = queue_idx; 495 496 return 0; 497 } 498 499 static void 500 pfe_rx_queue_release(void *q __rte_unused) 501 { 502 PMD_INIT_FUNC_TRACE(); 503 } 504 505 static void 506 pfe_tx_queue_release(void *q __rte_unused) 507 { 508 PMD_INIT_FUNC_TRACE(); 509 } 510 511 static int 512 pfe_tx_queue_setup(struct rte_eth_dev *dev, 513 uint16_t queue_idx, 514 __rte_unused uint16_t nb_desc, 515 __rte_unused unsigned int socket_id, 516 __rte_unused const struct rte_eth_txconf *tx_conf) 517 { 518 struct pfe_eth_priv_s *priv = dev->data->dev_private; 519 520 if (queue_idx >= emac_txq_cnt) { 521 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", 522 queue_idx, emac_txq_cnt); 523 return -1; 524 } 525 dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx]; 526 priv->client.tx_q[queue_idx].queue_id = queue_idx; 527 return 0; 528 } 529 530 static const uint32_t * 531 pfe_supported_ptypes_get(struct rte_eth_dev *dev) 532 { 533 static const uint32_t ptypes[] = { 534 /*todo -= add more types */ 535 RTE_PTYPE_L2_ETHER, 536 RTE_PTYPE_L3_IPV4, 537 RTE_PTYPE_L3_IPV4_EXT, 538 RTE_PTYPE_L3_IPV6, 539 RTE_PTYPE_L3_IPV6_EXT, 540 RTE_PTYPE_L4_TCP, 541 RTE_PTYPE_L4_UDP, 542 RTE_PTYPE_L4_SCTP 543 }; 544 545 if (dev->rx_pkt_burst == pfe_recv_pkts || 546 dev->rx_pkt_burst == pfe_recv_pkts_on_intr) 547 return ptypes; 548 return NULL; 549 } 550 551 static inline int 552 pfe_eth_atomic_read_link_status(struct rte_eth_dev *dev, 553 struct rte_eth_link *link) 554 { 555 struct rte_eth_link *dst = link; 556 struct rte_eth_link *src = &dev->data->dev_link; 557 558 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 559 *(uint64_t *)src) == 0) 560 return -1; 561 562 return 0; 563 } 564 565 static inline int 566 pfe_eth_atomic_write_link_status(struct rte_eth_dev *dev, 567 struct rte_eth_link *link) 568 { 569 struct rte_eth_link *dst = &dev->data->dev_link; 570 struct rte_eth_link *src = link; 571 572 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 573 *(uint64_t *)src) == 0) 574 return -1; 575 576 return 0; 577 } 578 579 static int 580 pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 581 { 582 int ret, ioctl_cmd = 0; 583 struct pfe_eth_priv_s *priv = dev->data->dev_private; 584 struct rte_eth_link link, old; 585 unsigned int lstatus = 1; 586 587 if (dev == NULL) { 588 PFE_PMD_ERR("Invalid device in link_update.\n"); 589 return 0; 590 } 591 592 memset(&old, 0, sizeof(old)); 593 memset(&link, 0, sizeof(struct rte_eth_link)); 594 595 pfe_eth_atomic_read_link_status(dev, &old); 596 597 /* Read from PFE CDEV, status of link, if file was successfully 598 * opened. 599 */ 600 if (priv->link_fd != PFE_CDEV_INVALID_FD) { 601 if (priv->id == 0) 602 ioctl_cmd = PFE_CDEV_ETH0_STATE_GET; 603 if (priv->id == 1) 604 ioctl_cmd = PFE_CDEV_ETH1_STATE_GET; 605 606 ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus); 607 if (ret != 0) { 608 PFE_PMD_ERR("Unable to fetch link status (ioctl)\n"); 609 /* use dummy link value */ 610 link.link_status = 1; 611 } 612 PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n", 613 lstatus, priv->id); 614 } 615 616 if (old.link_status == lstatus) { 617 /* no change in status */ 618 PFE_PMD_DEBUG("No change in link status; Not updating.\n"); 619 return -1; 620 } 621 622 link.link_status = lstatus; 623 link.link_speed = ETH_LINK_SPEED_1G; 624 link.link_duplex = ETH_LINK_FULL_DUPLEX; 625 link.link_autoneg = ETH_LINK_AUTONEG; 626 627 pfe_eth_atomic_write_link_status(dev, &link); 628 629 PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id, 630 link.link_status ? "up" : "down"); 631 632 return 0; 633 } 634 635 static int 636 pfe_promiscuous_enable(struct rte_eth_dev *dev) 637 { 638 struct pfe_eth_priv_s *priv = dev->data->dev_private; 639 640 priv->promisc = 1; 641 dev->data->promiscuous = 1; 642 gemac_enable_copy_all(priv->EMAC_baseaddr); 643 644 return 0; 645 } 646 647 static int 648 pfe_promiscuous_disable(struct rte_eth_dev *dev) 649 { 650 struct pfe_eth_priv_s *priv = dev->data->dev_private; 651 652 priv->promisc = 0; 653 dev->data->promiscuous = 0; 654 gemac_disable_copy_all(priv->EMAC_baseaddr); 655 656 return 0; 657 } 658 659 static int 660 pfe_allmulticast_enable(struct rte_eth_dev *dev) 661 { 662 struct pfe_eth_priv_s *priv = dev->data->dev_private; 663 struct pfe_mac_addr hash_addr; /* hash register structure */ 664 665 /* Set the hash to rx all multicast frames */ 666 hash_addr.bottom = 0xFFFFFFFF; 667 hash_addr.top = 0xFFFFFFFF; 668 gemac_set_hash(priv->EMAC_baseaddr, &hash_addr); 669 dev->data->all_multicast = 1; 670 671 return 0; 672 } 673 674 static int 675 pfe_link_down(struct rte_eth_dev *dev) 676 { 677 pfe_eth_stop(dev); 678 return 0; 679 } 680 681 static int 682 pfe_link_up(struct rte_eth_dev *dev) 683 { 684 struct pfe_eth_priv_s *priv = dev->data->dev_private; 685 686 pfe_eth_start(priv); 687 return 0; 688 } 689 690 static int 691 pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 692 { 693 int ret; 694 struct pfe_eth_priv_s *priv = dev->data->dev_private; 695 uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 696 697 /*TODO Support VLAN*/ 698 ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size); 699 if (!ret) 700 dev->data->mtu = mtu; 701 702 return ret; 703 } 704 705 /* pfe_eth_enet_addr_byte_mac 706 */ 707 static int 708 pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr, 709 struct pfe_mac_addr *enet_addr) 710 { 711 if (!enet_byte_addr || !enet_addr) { 712 return -1; 713 714 } else { 715 enet_addr->bottom = enet_byte_addr[0] | 716 (enet_byte_addr[1] << 8) | 717 (enet_byte_addr[2] << 16) | 718 (enet_byte_addr[3] << 24); 719 enet_addr->top = enet_byte_addr[4] | 720 (enet_byte_addr[5] << 8); 721 return 0; 722 } 723 } 724 725 static int 726 pfe_dev_set_mac_addr(struct rte_eth_dev *dev, 727 struct rte_ether_addr *addr) 728 { 729 struct pfe_eth_priv_s *priv = dev->data->dev_private; 730 struct pfe_mac_addr spec_addr; 731 int ret; 732 733 ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr); 734 if (ret) 735 return ret; 736 737 gemac_set_laddrN(priv->EMAC_baseaddr, 738 (struct pfe_mac_addr *)&spec_addr, 1); 739 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 740 return 0; 741 } 742 743 static int 744 pfe_stats_get(struct rte_eth_dev *dev, 745 struct rte_eth_stats *stats) 746 { 747 struct pfe_eth_priv_s *priv = dev->data->dev_private; 748 struct rte_eth_stats *eth_stats = &priv->stats; 749 750 if (stats == NULL) 751 return -1; 752 753 memset(stats, 0, sizeof(struct rte_eth_stats)); 754 755 stats->ipackets = eth_stats->ipackets; 756 stats->ibytes = eth_stats->ibytes; 757 stats->opackets = eth_stats->opackets; 758 stats->obytes = eth_stats->obytes; 759 760 return 0; 761 } 762 763 static const struct eth_dev_ops ops = { 764 .dev_start = pfe_eth_open, 765 .dev_stop = pfe_eth_stop, 766 .dev_close = pfe_eth_close, 767 .dev_configure = pfe_eth_configure, 768 .dev_infos_get = pfe_eth_info, 769 .rx_queue_setup = pfe_rx_queue_setup, 770 .rx_queue_release = pfe_rx_queue_release, 771 .tx_queue_setup = pfe_tx_queue_setup, 772 .tx_queue_release = pfe_tx_queue_release, 773 .dev_supported_ptypes_get = pfe_supported_ptypes_get, 774 .link_update = pfe_eth_link_update, 775 .promiscuous_enable = pfe_promiscuous_enable, 776 .promiscuous_disable = pfe_promiscuous_disable, 777 .allmulticast_enable = pfe_allmulticast_enable, 778 .dev_set_link_down = pfe_link_down, 779 .dev_set_link_up = pfe_link_up, 780 .mtu_set = pfe_mtu_set, 781 .mac_addr_set = pfe_dev_set_mac_addr, 782 .stats_get = pfe_stats_get, 783 }; 784 785 static int 786 pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id) 787 { 788 struct rte_eth_dev *eth_dev = NULL; 789 struct pfe_eth_priv_s *priv = NULL; 790 struct ls1012a_eth_platform_data *einfo; 791 struct ls1012a_pfe_platform_data *pfe_info; 792 struct rte_ether_addr addr; 793 int err; 794 795 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv)); 796 if (eth_dev == NULL) 797 return -ENOMEM; 798 799 /* Extract pltform data */ 800 pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data; 801 if (!pfe_info) { 802 PFE_PMD_ERR("pfe missing additional platform data"); 803 err = -ENODEV; 804 goto err0; 805 } 806 807 einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata; 808 809 /* einfo never be NULL, but no harm in having this check */ 810 if (!einfo) { 811 PFE_PMD_ERR("pfe missing additional gemacs platform data"); 812 err = -ENODEV; 813 goto err0; 814 } 815 816 priv = eth_dev->data->dev_private; 817 priv->ndev = eth_dev; 818 priv->id = einfo[id].gem_id; 819 priv->pfe = pfe; 820 821 pfe->eth.eth_priv[id] = priv; 822 823 /* Set the info in the priv to the current info */ 824 priv->einfo = &einfo[id]; 825 priv->EMAC_baseaddr = cbus_emac_base[id]; 826 priv->PHY_baseaddr = cbus_emac_base[id]; 827 priv->GPI_baseaddr = cbus_gpi_base[id]; 828 829 #define HIF_GEMAC_TMUQ_BASE 6 830 priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2); 831 priv->high_tmu_q = priv->low_tmu_q + 1; 832 833 rte_spinlock_init(&priv->lock); 834 835 /* Copy the station address into the dev structure, */ 836 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 837 ETHER_ADDR_LEN * PFE_MAX_MACS, 0); 838 if (eth_dev->data->mac_addrs == NULL) { 839 PFE_PMD_ERR("Failed to allocate mem %d to store MAC addresses", 840 ETHER_ADDR_LEN * PFE_MAX_MACS); 841 err = -ENOMEM; 842 goto err0; 843 } 844 845 memcpy(addr.addr_bytes, priv->einfo->mac_addr, 846 ETH_ALEN); 847 848 pfe_dev_set_mac_addr(eth_dev, &addr); 849 rte_ether_addr_copy(&addr, ð_dev->data->mac_addrs[0]); 850 851 eth_dev->data->mtu = 1500; 852 eth_dev->dev_ops = &ops; 853 pfe_eth_stop(eth_dev); 854 pfe_gemac_init(priv); 855 856 eth_dev->data->nb_rx_queues = 1; 857 eth_dev->data->nb_tx_queues = 1; 858 859 /* For link status, open the PFE CDEV; Error from this function 860 * is silently ignored; In case of error, the link status will not 861 * be available. 862 */ 863 pfe_eth_open_cdev(priv); 864 rte_eth_dev_probing_finish(eth_dev); 865 866 return 0; 867 err0: 868 rte_eth_dev_release_port(eth_dev); 869 return err; 870 } 871 872 static int 873 pfe_get_gemac_if_proprties(struct pfe *pfe, 874 __rte_unused const struct device_node *parent, 875 unsigned int port, unsigned int if_cnt, 876 struct ls1012a_pfe_platform_data *pdata) 877 { 878 const struct device_node *gem = NULL; 879 size_t size; 880 unsigned int ii = 0, phy_id = 0; 881 const u32 *addr; 882 const void *mac_addr; 883 884 for (ii = 0; ii < if_cnt; ii++) { 885 gem = of_get_next_child(parent, gem); 886 if (!gem) 887 goto err; 888 addr = of_get_property(gem, "reg", &size); 889 if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port)) 890 break; 891 } 892 893 if (ii >= if_cnt) { 894 PFE_PMD_ERR("Failed to find interface = %d", if_cnt); 895 goto err; 896 } 897 898 pdata->ls1012a_eth_pdata[port].gem_id = port; 899 900 mac_addr = of_get_mac_address(gem); 901 902 if (mac_addr) { 903 memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr, 904 ETH_ALEN); 905 } 906 907 addr = of_get_property(gem, "fsl,mdio-mux-val", &size); 908 if (!addr) { 909 PFE_PMD_ERR("Invalid mdio-mux-val...."); 910 } else { 911 phy_id = rte_be_to_cpu_32((unsigned int)*addr); 912 pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id; 913 } 914 if (pdata->ls1012a_eth_pdata[port].phy_id < 32) 915 pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] = 916 pdata->ls1012a_eth_pdata[port].mdio_muxval; 917 918 return 0; 919 920 err: 921 return -1; 922 } 923 924 /* Parse integer from integer argument */ 925 static int 926 parse_integer_arg(const char *key __rte_unused, 927 const char *value, void *extra_args) 928 { 929 int i; 930 char *end; 931 errno = 0; 932 933 i = strtol(value, &end, 10); 934 if (*end != 0 || errno != 0 || i < 0 || i > 1) { 935 PFE_PMD_ERR("Supported Port IDS are 0 and 1"); 936 return -EINVAL; 937 } 938 939 *((uint32_t *)extra_args) = i; 940 941 return 0; 942 } 943 944 static int 945 pfe_parse_vdev_init_params(struct pfe_vdev_init_params *params, 946 struct rte_vdev_device *dev) 947 { 948 struct rte_kvargs *kvlist = NULL; 949 int ret = 0; 950 951 static const char * const pfe_vdev_valid_params[] = { 952 PFE_VDEV_GEM_ID_ARG, 953 NULL 954 }; 955 956 const char *input_args = rte_vdev_device_args(dev); 957 958 if (!input_args) 959 return -1; 960 961 kvlist = rte_kvargs_parse(input_args, pfe_vdev_valid_params); 962 if (kvlist == NULL) 963 return -1; 964 965 ret = rte_kvargs_process(kvlist, 966 PFE_VDEV_GEM_ID_ARG, 967 &parse_integer_arg, 968 ¶ms->gem_id); 969 rte_kvargs_free(kvlist); 970 return ret; 971 } 972 973 static int 974 pmd_pfe_probe(struct rte_vdev_device *vdev) 975 { 976 const u32 *prop; 977 const struct device_node *np; 978 const char *name; 979 const uint32_t *addr; 980 uint64_t cbus_addr, ddr_size, cbus_size; 981 int rc = -1, fd = -1, gem_id; 982 unsigned int ii, interface_count = 0; 983 size_t size = 0; 984 struct pfe_vdev_init_params init_params = { 985 .gem_id = -1 986 }; 987 988 name = rte_vdev_device_name(vdev); 989 rc = pfe_parse_vdev_init_params(&init_params, vdev); 990 if (rc < 0) 991 return -EINVAL; 992 993 RTE_LOG(INFO, PMD, "Initializing pmd_pfe for %s Given gem-id %d\n", 994 name, init_params.gem_id); 995 996 if (g_pfe) { 997 if (g_pfe->nb_devs >= g_pfe->max_intf) { 998 PFE_PMD_ERR("PFE %d dev already created Max is %d", 999 g_pfe->nb_devs, g_pfe->max_intf); 1000 return -EINVAL; 1001 } 1002 goto eth_init; 1003 } 1004 1005 g_pfe = rte_zmalloc(NULL, sizeof(*g_pfe), RTE_CACHE_LINE_SIZE); 1006 if (g_pfe == NULL) 1007 return -EINVAL; 1008 1009 /* Load the device-tree driver */ 1010 rc = of_init(); 1011 if (rc) { 1012 PFE_PMD_ERR("of_init failed with ret: %d", rc); 1013 goto err; 1014 } 1015 1016 np = of_find_compatible_node(NULL, NULL, "fsl,pfe"); 1017 if (!np) { 1018 PFE_PMD_ERR("Invalid device node"); 1019 rc = -EINVAL; 1020 goto err; 1021 } 1022 1023 addr = of_get_address(np, 0, &cbus_size, NULL); 1024 if (!addr) { 1025 PFE_PMD_ERR("of_get_address cannot return qman address\n"); 1026 goto err; 1027 } 1028 cbus_addr = of_translate_address(np, addr); 1029 if (!cbus_addr) { 1030 PFE_PMD_ERR("of_translate_address failed\n"); 1031 goto err; 1032 } 1033 1034 addr = of_get_address(np, 1, &ddr_size, NULL); 1035 if (!addr) { 1036 PFE_PMD_ERR("of_get_address cannot return qman address\n"); 1037 goto err; 1038 } 1039 1040 g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr); 1041 if (!g_pfe->ddr_phys_baseaddr) { 1042 PFE_PMD_ERR("of_translate_address failed\n"); 1043 goto err; 1044 } 1045 1046 g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr); 1047 g_pfe->ddr_size = ddr_size; 1048 g_pfe->cbus_size = cbus_size; 1049 1050 fd = open("/dev/mem", O_RDWR); 1051 g_pfe->cbus_baseaddr = mmap(NULL, cbus_size, PROT_READ | PROT_WRITE, 1052 MAP_SHARED, fd, cbus_addr); 1053 close(fd); 1054 if (g_pfe->cbus_baseaddr == MAP_FAILED) { 1055 PFE_PMD_ERR("Can not map cbus base"); 1056 rc = -EINVAL; 1057 goto err; 1058 } 1059 1060 /* Read interface count */ 1061 prop = of_get_property(np, "fsl,pfe-num-interfaces", &size); 1062 if (!prop) { 1063 PFE_PMD_ERR("Failed to read number of interfaces"); 1064 rc = -ENXIO; 1065 goto err_prop; 1066 } 1067 1068 interface_count = rte_be_to_cpu_32((unsigned int)*prop); 1069 if (interface_count <= 0) { 1070 PFE_PMD_ERR("No ethernet interface count : %d", 1071 interface_count); 1072 rc = -ENXIO; 1073 goto err_prop; 1074 } 1075 PFE_PMD_INFO("num interfaces = %d ", interface_count); 1076 1077 g_pfe->max_intf = interface_count; 1078 g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff; 1079 1080 for (ii = 0; ii < interface_count; ii++) { 1081 pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count, 1082 &g_pfe->platform_data); 1083 } 1084 1085 pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr, 1086 g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size); 1087 1088 PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION)); 1089 PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION)); 1090 1091 PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION)); 1092 PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION)); 1093 1094 PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION)); 1095 PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION)); 1096 PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION)); 1097 1098 PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION)); 1099 PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION)); 1100 1101 cbus_emac_base[0] = EMAC1_BASE_ADDR; 1102 cbus_emac_base[1] = EMAC2_BASE_ADDR; 1103 1104 cbus_gpi_base[0] = EGPI1_BASE_ADDR; 1105 cbus_gpi_base[1] = EGPI2_BASE_ADDR; 1106 1107 rc = pfe_hif_lib_init(g_pfe); 1108 if (rc < 0) 1109 goto err_hif_lib; 1110 1111 rc = pfe_hif_init(g_pfe); 1112 if (rc < 0) 1113 goto err_hif; 1114 pfe_soc_version_get(); 1115 eth_init: 1116 if (init_params.gem_id < 0) 1117 gem_id = g_pfe->nb_devs; 1118 else 1119 gem_id = init_params.gem_id; 1120 1121 RTE_LOG(INFO, PMD, "Init pmd_pfe for %s gem-id %d(given =%d)\n", 1122 name, gem_id, init_params.gem_id); 1123 1124 rc = pfe_eth_init(vdev, g_pfe, gem_id); 1125 if (rc < 0) 1126 goto err_eth; 1127 else 1128 g_pfe->nb_devs++; 1129 1130 return 0; 1131 1132 err_eth: 1133 pfe_hif_exit(g_pfe); 1134 1135 err_hif: 1136 pfe_hif_lib_exit(g_pfe); 1137 1138 err_hif_lib: 1139 err_prop: 1140 munmap(g_pfe->cbus_baseaddr, cbus_size); 1141 err: 1142 rte_free(g_pfe); 1143 return rc; 1144 } 1145 1146 static int 1147 pmd_pfe_remove(struct rte_vdev_device *vdev) 1148 { 1149 const char *name; 1150 struct rte_eth_dev *eth_dev = NULL; 1151 1152 name = rte_vdev_device_name(vdev); 1153 if (name == NULL) 1154 return -EINVAL; 1155 1156 PFE_PMD_INFO("Closing eventdev sw device %s", name); 1157 1158 if (!g_pfe) 1159 return 0; 1160 1161 eth_dev = rte_eth_dev_allocated(name); 1162 if (eth_dev == NULL) 1163 return -ENODEV; 1164 1165 pfe_eth_exit(eth_dev, g_pfe); 1166 munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size); 1167 1168 if (g_pfe->nb_devs == 0) { 1169 pfe_hif_exit(g_pfe); 1170 pfe_hif_lib_exit(g_pfe); 1171 rte_free(g_pfe); 1172 g_pfe = NULL; 1173 } 1174 return 0; 1175 } 1176 1177 static 1178 struct rte_vdev_driver pmd_pfe_drv = { 1179 .probe = pmd_pfe_probe, 1180 .remove = pmd_pfe_remove, 1181 }; 1182 1183 RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv); 1184 RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "=<int> "); 1185 1186 RTE_INIT(pfe_pmd_init_log) 1187 { 1188 pfe_logtype_pmd = rte_log_register("pmd.net.pfe"); 1189 if (pfe_logtype_pmd >= 0) 1190 rte_log_set_level(pfe_logtype_pmd, RTE_LOG_NOTICE); 1191 } 1192