1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2019 NXP 3 */ 4 5 #include <sys/ioctl.h> 6 #include <sys/epoll.h> 7 #include <rte_kvargs.h> 8 #include <ethdev_vdev.h> 9 #include <bus_vdev_driver.h> 10 #include <rte_ether.h> 11 #include <dpaa_of.h> 12 13 #include "pfe_logs.h" 14 #include "pfe_mod.h" 15 16 #define PFE_MAX_MACS 1 /* we can support up to 4 MACs per IF */ 17 #define PFE_VDEV_GEM_ID_ARG "intf" 18 19 struct pfe_vdev_init_params { 20 int8_t gem_id; 21 }; 22 static struct pfe *g_pfe; 23 /* Supported Rx offloads */ 24 static uint64_t dev_rx_offloads_sup = 25 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 26 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 27 RTE_ETH_RX_OFFLOAD_TCP_CKSUM; 28 29 /* Supported Tx offloads */ 30 static uint64_t dev_tx_offloads_sup = 31 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 32 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 33 RTE_ETH_TX_OFFLOAD_TCP_CKSUM; 34 35 /* TODO: make pfe_svr a runtime option. 36 * Driver should be able to get the SVR 37 * information from HW. 38 */ 39 unsigned int pfe_svr = SVR_LS1012A_REV1; 40 static void *cbus_emac_base[3]; 41 static void *cbus_gpi_base[3]; 42 43 /* pfe_gemac_init 44 */ 45 static int 46 pfe_gemac_init(struct pfe_eth_priv_s *priv) 47 { 48 struct gemac_cfg cfg; 49 50 cfg.speed = SPEED_1000M; 51 cfg.duplex = DUPLEX_FULL; 52 53 gemac_set_config(priv->EMAC_baseaddr, &cfg); 54 gemac_allow_broadcast(priv->EMAC_baseaddr); 55 gemac_enable_1536_rx(priv->EMAC_baseaddr); 56 gemac_enable_stacked_vlan(priv->EMAC_baseaddr); 57 gemac_enable_pause_rx(priv->EMAC_baseaddr); 58 gemac_set_bus_width(priv->EMAC_baseaddr, 64); 59 gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr); 60 61 return 0; 62 } 63 64 static void 65 pfe_soc_version_get(void) 66 { 67 FILE *svr_file = NULL; 68 unsigned int svr_ver = 0; 69 70 PMD_INIT_FUNC_TRACE(); 71 72 svr_file = fopen(PFE_SOC_ID_FILE, "r"); 73 if (!svr_file) { 74 PFE_PMD_ERR("Unable to open SoC device"); 75 return; /* Not supported on this infra */ 76 } 77 78 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) 79 pfe_svr = svr_ver; 80 else 81 PFE_PMD_ERR("Unable to read SoC device"); 82 83 fclose(svr_file); 84 } 85 86 static int pfe_eth_start(struct pfe_eth_priv_s *priv) 87 { 88 gpi_enable(priv->GPI_baseaddr); 89 gemac_enable(priv->EMAC_baseaddr); 90 91 return 0; 92 } 93 94 static void 95 pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int 96 __rte_unused from_tx, __rte_unused int n_desc) 97 { 98 struct rte_mbuf *mbuf; 99 unsigned int flags; 100 101 /* Clean HIF and client queue */ 102 while ((mbuf = hif_lib_tx_get_next_complete(&priv->client, 103 tx_q_num, &flags, 104 HIF_TX_DESC_NT))) { 105 if (mbuf) { 106 mbuf->next = NULL; 107 mbuf->nb_segs = 1; 108 rte_pktmbuf_free(mbuf); 109 } 110 } 111 } 112 113 114 static void 115 pfe_eth_flush_tx(struct pfe_eth_priv_s *priv) 116 { 117 unsigned int ii; 118 119 for (ii = 0; ii < emac_txq_cnt; ii++) 120 pfe_eth_flush_txQ(priv, ii, 0, 0); 121 } 122 123 static int 124 pfe_eth_event_handler(void *data, int event, __rte_unused int qno) 125 { 126 struct pfe_eth_priv_s *priv = data; 127 128 switch (event) { 129 case EVENT_TXDONE_IND: 130 pfe_eth_flush_tx(priv); 131 hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0); 132 break; 133 case EVENT_HIGH_RX_WM: 134 default: 135 break; 136 } 137 138 return 0; 139 } 140 141 static uint16_t 142 pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 143 { 144 struct hif_client_rx_queue *queue = rxq; 145 struct pfe_eth_priv_s *priv = queue->priv; 146 struct epoll_event epoll_ev; 147 uint64_t ticks = 1; /* 1 msec */ 148 int ret; 149 int have_something, work_done; 150 151 #define RESET_STATUS (HIF_INT | HIF_RXPKT_INT) 152 153 /*TODO can we remove this cleanup from here?*/ 154 pfe_tx_do_cleanup(priv->pfe); 155 have_something = pfe_hif_rx_process(priv->pfe, nb_pkts); 156 work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool, 157 rx_pkts, nb_pkts); 158 159 if (!have_something || !work_done) { 160 writel(RESET_STATUS, HIF_INT_SRC); 161 writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE); 162 ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks); 163 if (ret < 0 && errno != EINTR) 164 PFE_PMD_ERR("epoll_wait fails with %d\n", errno); 165 } 166 167 return work_done; 168 } 169 170 static uint16_t 171 pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 172 { 173 struct hif_client_rx_queue *queue = rxq; 174 struct pfe_eth_priv_s *priv = queue->priv; 175 struct rte_mempool *pool; 176 177 /*TODO can we remove this cleanup from here?*/ 178 pfe_tx_do_cleanup(priv->pfe); 179 pfe_hif_rx_process(priv->pfe, nb_pkts); 180 pool = priv->pfe->hif.shm->pool; 181 182 return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts); 183 } 184 185 static uint16_t 186 pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 187 { 188 struct hif_client_tx_queue *queue = tx_queue; 189 struct pfe_eth_priv_s *priv = queue->priv; 190 struct rte_eth_stats *stats = &priv->stats; 191 int i; 192 193 for (i = 0; i < nb_pkts; i++) { 194 if (tx_pkts[i]->nb_segs > 1) { 195 struct rte_mbuf *mbuf; 196 int j; 197 198 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 199 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), 200 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, 201 tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER, 202 tx_pkts[i]); 203 204 mbuf = tx_pkts[i]->next; 205 for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) { 206 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 207 (void *)(size_t)rte_pktmbuf_iova(mbuf), 208 mbuf->buf_addr + mbuf->data_off, 209 mbuf->data_len, 210 0x0, 0x0, mbuf); 211 mbuf = mbuf->next; 212 } 213 214 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 215 (void *)(size_t)rte_pktmbuf_iova(mbuf), 216 mbuf->buf_addr + mbuf->data_off, 217 mbuf->data_len, 218 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID, 219 mbuf); 220 } else { 221 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 222 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), 223 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, 224 tx_pkts[i]->pkt_len, 0 /*ctrl*/, 225 HIF_FIRST_BUFFER | HIF_LAST_BUFFER | 226 HIF_DATA_VALID, 227 tx_pkts[i]); 228 } 229 stats->obytes += tx_pkts[i]->pkt_len; 230 hif_tx_dma_start(); 231 } 232 stats->opackets += nb_pkts; 233 pfe_tx_do_cleanup(priv->pfe); 234 235 return nb_pkts; 236 } 237 238 static int 239 pfe_eth_open(struct rte_eth_dev *dev) 240 { 241 struct pfe_eth_priv_s *priv = dev->data->dev_private; 242 struct hif_client_s *client; 243 struct hif_shm *hif_shm; 244 uint16_t i; 245 int rc; 246 247 /* Register client driver with HIF */ 248 client = &priv->client; 249 250 if (client->pfe) { 251 hif_shm = client->pfe->hif.shm; 252 /* TODO please remove the below code of if block, once we add 253 * the proper cleanup in eth_close 254 */ 255 if (!test_bit(PFE_CL_GEM0 + priv->id, 256 &hif_shm->g_client_status[0])) { 257 /* Register client driver with HIF */ 258 memset(client, 0, sizeof(*client)); 259 client->id = PFE_CL_GEM0 + priv->id; 260 client->tx_qn = emac_txq_cnt; 261 client->rx_qn = EMAC_RXQ_CNT; 262 client->priv = priv; 263 client->pfe = priv->pfe; 264 client->port_id = dev->data->port_id; 265 client->event_handler = pfe_eth_event_handler; 266 267 client->tx_qsize = EMAC_TXQ_DEPTH; 268 client->rx_qsize = EMAC_RXQ_DEPTH; 269 270 rc = hif_lib_client_register(client); 271 if (rc) { 272 PFE_PMD_ERR("hif_lib_client_register(%d)" 273 " failed", client->id); 274 goto err0; 275 } 276 } else { 277 /* Freeing the packets if already exists */ 278 int ret = 0; 279 struct rte_mbuf *rx_pkts[32]; 280 /* TODO multiqueue support */ 281 ret = hif_lib_receive_pkt(&client->rx_q[0], 282 hif_shm->pool, rx_pkts, 32); 283 while (ret) { 284 int i; 285 for (i = 0; i < ret; i++) 286 rte_pktmbuf_free(rx_pkts[i]); 287 ret = hif_lib_receive_pkt(&client->rx_q[0], 288 hif_shm->pool, 289 rx_pkts, 32); 290 } 291 } 292 } else { 293 /* Register client driver with HIF */ 294 memset(client, 0, sizeof(*client)); 295 client->id = PFE_CL_GEM0 + priv->id; 296 client->tx_qn = emac_txq_cnt; 297 client->rx_qn = EMAC_RXQ_CNT; 298 client->priv = priv; 299 client->pfe = priv->pfe; 300 client->port_id = dev->data->port_id; 301 client->event_handler = pfe_eth_event_handler; 302 303 client->tx_qsize = EMAC_TXQ_DEPTH; 304 client->rx_qsize = EMAC_RXQ_DEPTH; 305 306 rc = hif_lib_client_register(client); 307 if (rc) { 308 PFE_PMD_ERR("hif_lib_client_register(%d) failed", 309 client->id); 310 goto err0; 311 } 312 } 313 rc = pfe_eth_start(priv); 314 dev->rx_pkt_burst = &pfe_recv_pkts; 315 dev->tx_pkt_burst = &pfe_xmit_pkts; 316 /* If no prefetch is configured. */ 317 if (getenv("PFE_INTR_SUPPORT")) { 318 dev->rx_pkt_burst = &pfe_recv_pkts_on_intr; 319 PFE_PMD_INFO("PFE INTERRUPT Mode enabled"); 320 } 321 322 for (i = 0; i < dev->data->nb_rx_queues; i++) 323 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 324 for (i = 0; i < dev->data->nb_tx_queues; i++) 325 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 326 327 err0: 328 return rc; 329 } 330 331 static int 332 pfe_eth_open_cdev(struct pfe_eth_priv_s *priv) 333 { 334 int pfe_cdev_fd; 335 336 if (priv == NULL) 337 return -1; 338 339 pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY); 340 if (pfe_cdev_fd < 0) { 341 PFE_PMD_WARN("Unable to open PFE device file (%s).\n", 342 PFE_CDEV_PATH); 343 PFE_PMD_WARN("Link status update will not be available.\n"); 344 priv->link_fd = PFE_CDEV_INVALID_FD; 345 return -1; 346 } 347 348 priv->link_fd = pfe_cdev_fd; 349 350 return 0; 351 } 352 353 static void 354 pfe_eth_close_cdev(struct pfe_eth_priv_s *priv) 355 { 356 if (priv == NULL) 357 return; 358 359 if (priv->link_fd != PFE_CDEV_INVALID_FD) { 360 close(priv->link_fd); 361 priv->link_fd = PFE_CDEV_INVALID_FD; 362 } 363 } 364 365 static int 366 pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/) 367 { 368 struct pfe_eth_priv_s *priv = dev->data->dev_private; 369 uint16_t i; 370 371 dev->data->dev_started = 0; 372 373 gemac_disable(priv->EMAC_baseaddr); 374 gpi_disable(priv->GPI_baseaddr); 375 376 dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 377 dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 378 379 for (i = 0; i < dev->data->nb_rx_queues; i++) 380 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 381 for (i = 0; i < dev->data->nb_tx_queues; i++) 382 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 383 384 return 0; 385 } 386 387 static int 388 pfe_eth_close(struct rte_eth_dev *dev) 389 { 390 int ret; 391 PMD_INIT_FUNC_TRACE(); 392 393 if (!dev) 394 return -1; 395 396 if (!g_pfe) 397 return -1; 398 399 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 400 return 0; 401 402 ret = pfe_eth_stop(dev); 403 /* Close the device file for link status */ 404 pfe_eth_close_cdev(dev->data->dev_private); 405 406 munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size); 407 g_pfe->nb_devs--; 408 409 if (g_pfe->nb_devs == 0) { 410 pfe_hif_exit(g_pfe); 411 pfe_hif_lib_exit(g_pfe); 412 rte_free(g_pfe); 413 g_pfe = NULL; 414 } 415 416 return ret; 417 } 418 419 static int 420 pfe_eth_configure(struct rte_eth_dev *dev __rte_unused) 421 { 422 return 0; 423 } 424 425 static int 426 pfe_eth_info(struct rte_eth_dev *dev, 427 struct rte_eth_dev_info *dev_info) 428 { 429 dev_info->max_mac_addrs = PFE_MAX_MACS; 430 dev_info->max_rx_queues = dev->data->nb_rx_queues; 431 dev_info->max_tx_queues = dev->data->nb_tx_queues; 432 dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE; 433 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 434 dev_info->rx_offload_capa = dev_rx_offloads_sup; 435 dev_info->tx_offload_capa = dev_tx_offloads_sup; 436 if (pfe_svr == SVR_LS1012A_REV1) { 437 dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD; 438 dev_info->max_mtu = MAX_MTU_ON_REV1; 439 } else { 440 dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE; 441 dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD; 442 } 443 444 return 0; 445 } 446 447 /* Only first mb_pool given on first call of this API will be used 448 * in whole system, also nb_rx_desc and rx_conf are unused params 449 */ 450 static int 451 pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 452 __rte_unused uint16_t nb_rx_desc, 453 __rte_unused unsigned int socket_id, 454 __rte_unused const struct rte_eth_rxconf *rx_conf, 455 struct rte_mempool *mb_pool) 456 { 457 int rc = 0; 458 struct pfe *pfe; 459 struct pfe_eth_priv_s *priv = dev->data->dev_private; 460 461 pfe = priv->pfe; 462 463 if (queue_idx >= EMAC_RXQ_CNT) { 464 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", 465 queue_idx, EMAC_RXQ_CNT); 466 return -1; 467 } 468 469 if (!pfe->hif.setuped) { 470 rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool); 471 if (rc) { 472 PFE_PMD_ERR("Could not allocate buffer descriptors"); 473 return -1; 474 } 475 476 pfe->hif.shm->pool = mb_pool; 477 if (pfe_hif_init_buffers(&pfe->hif)) { 478 PFE_PMD_ERR("Could not initialize buffer descriptors"); 479 return -1; 480 } 481 hif_init(); 482 hif_rx_enable(); 483 hif_tx_enable(); 484 pfe->hif.setuped = 1; 485 } 486 dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx]; 487 priv->client.rx_q[queue_idx].queue_id = queue_idx; 488 489 return 0; 490 } 491 492 static int 493 pfe_tx_queue_setup(struct rte_eth_dev *dev, 494 uint16_t queue_idx, 495 __rte_unused uint16_t nb_desc, 496 __rte_unused unsigned int socket_id, 497 __rte_unused const struct rte_eth_txconf *tx_conf) 498 { 499 struct pfe_eth_priv_s *priv = dev->data->dev_private; 500 501 if (queue_idx >= emac_txq_cnt) { 502 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", 503 queue_idx, emac_txq_cnt); 504 return -1; 505 } 506 dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx]; 507 priv->client.tx_q[queue_idx].queue_id = queue_idx; 508 return 0; 509 } 510 511 static const uint32_t * 512 pfe_supported_ptypes_get(struct rte_eth_dev *dev) 513 { 514 static const uint32_t ptypes[] = { 515 /*todo -= add more types */ 516 RTE_PTYPE_L2_ETHER, 517 RTE_PTYPE_L3_IPV4, 518 RTE_PTYPE_L3_IPV4_EXT, 519 RTE_PTYPE_L3_IPV6, 520 RTE_PTYPE_L3_IPV6_EXT, 521 RTE_PTYPE_L4_TCP, 522 RTE_PTYPE_L4_UDP, 523 RTE_PTYPE_L4_SCTP 524 }; 525 526 if (dev->rx_pkt_burst == pfe_recv_pkts || 527 dev->rx_pkt_burst == pfe_recv_pkts_on_intr) 528 return ptypes; 529 return NULL; 530 } 531 532 static inline int 533 pfe_eth_atomic_read_link_status(struct rte_eth_dev *dev, 534 struct rte_eth_link *link) 535 { 536 struct rte_eth_link *dst = link; 537 struct rte_eth_link *src = &dev->data->dev_link; 538 539 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 540 *(uint64_t *)src) == 0) 541 return -1; 542 543 return 0; 544 } 545 546 static inline int 547 pfe_eth_atomic_write_link_status(struct rte_eth_dev *dev, 548 struct rte_eth_link *link) 549 { 550 struct rte_eth_link *dst = &dev->data->dev_link; 551 struct rte_eth_link *src = link; 552 553 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 554 *(uint64_t *)src) == 0) 555 return -1; 556 557 return 0; 558 } 559 560 static int 561 pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 562 { 563 int ret, ioctl_cmd = 0; 564 struct pfe_eth_priv_s *priv = dev->data->dev_private; 565 struct rte_eth_link link, old; 566 unsigned int lstatus = 1; 567 568 memset(&old, 0, sizeof(old)); 569 memset(&link, 0, sizeof(struct rte_eth_link)); 570 571 pfe_eth_atomic_read_link_status(dev, &old); 572 573 /* Read from PFE CDEV, status of link, if file was successfully 574 * opened. 575 */ 576 if (priv->link_fd != PFE_CDEV_INVALID_FD) { 577 if (priv->id == 0) 578 ioctl_cmd = PFE_CDEV_ETH0_STATE_GET; 579 if (priv->id == 1) 580 ioctl_cmd = PFE_CDEV_ETH1_STATE_GET; 581 582 ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus); 583 if (ret != 0) { 584 PFE_PMD_ERR("Unable to fetch link status (ioctl)\n"); 585 return -1; 586 } 587 PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n", 588 lstatus, priv->id); 589 } 590 591 if (old.link_status == lstatus) { 592 /* no change in status */ 593 PFE_PMD_DEBUG("No change in link status; Not updating.\n"); 594 return -1; 595 } 596 597 link.link_status = lstatus; 598 link.link_speed = RTE_ETH_LINK_SPEED_1G; 599 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 600 link.link_autoneg = RTE_ETH_LINK_AUTONEG; 601 602 pfe_eth_atomic_write_link_status(dev, &link); 603 604 PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id, 605 link.link_status ? "up" : "down"); 606 607 return 0; 608 } 609 610 static int 611 pfe_promiscuous_enable(struct rte_eth_dev *dev) 612 { 613 struct pfe_eth_priv_s *priv = dev->data->dev_private; 614 615 priv->promisc = 1; 616 dev->data->promiscuous = 1; 617 gemac_enable_copy_all(priv->EMAC_baseaddr); 618 619 return 0; 620 } 621 622 static int 623 pfe_promiscuous_disable(struct rte_eth_dev *dev) 624 { 625 struct pfe_eth_priv_s *priv = dev->data->dev_private; 626 627 priv->promisc = 0; 628 dev->data->promiscuous = 0; 629 gemac_disable_copy_all(priv->EMAC_baseaddr); 630 631 return 0; 632 } 633 634 static int 635 pfe_allmulticast_enable(struct rte_eth_dev *dev) 636 { 637 struct pfe_eth_priv_s *priv = dev->data->dev_private; 638 struct pfe_mac_addr hash_addr; /* hash register structure */ 639 640 /* Set the hash to rx all multicast frames */ 641 hash_addr.bottom = 0xFFFFFFFF; 642 hash_addr.top = 0xFFFFFFFF; 643 gemac_set_hash(priv->EMAC_baseaddr, &hash_addr); 644 dev->data->all_multicast = 1; 645 646 return 0; 647 } 648 649 static int 650 pfe_link_down(struct rte_eth_dev *dev) 651 { 652 return pfe_eth_stop(dev); 653 } 654 655 static int 656 pfe_link_up(struct rte_eth_dev *dev) 657 { 658 struct pfe_eth_priv_s *priv = dev->data->dev_private; 659 660 pfe_eth_start(priv); 661 return 0; 662 } 663 664 static int 665 pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 666 { 667 struct pfe_eth_priv_s *priv = dev->data->dev_private; 668 uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 669 670 /*TODO Support VLAN*/ 671 return gemac_set_rx(priv->EMAC_baseaddr, frame_size); 672 } 673 674 /* pfe_eth_enet_addr_byte_mac 675 */ 676 static int 677 pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr, 678 struct pfe_mac_addr *enet_addr) 679 { 680 if (!enet_byte_addr || !enet_addr) { 681 return -1; 682 683 } else { 684 enet_addr->bottom = enet_byte_addr[0] | 685 (enet_byte_addr[1] << 8) | 686 (enet_byte_addr[2] << 16) | 687 (enet_byte_addr[3] << 24); 688 enet_addr->top = enet_byte_addr[4] | 689 (enet_byte_addr[5] << 8); 690 return 0; 691 } 692 } 693 694 static int 695 pfe_dev_set_mac_addr(struct rte_eth_dev *dev, 696 struct rte_ether_addr *addr) 697 { 698 struct pfe_eth_priv_s *priv = dev->data->dev_private; 699 struct pfe_mac_addr spec_addr; 700 int ret; 701 702 ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr); 703 if (ret) 704 return ret; 705 706 gemac_set_laddrN(priv->EMAC_baseaddr, 707 (struct pfe_mac_addr *)&spec_addr, 1); 708 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 709 return 0; 710 } 711 712 static int 713 pfe_stats_get(struct rte_eth_dev *dev, 714 struct rte_eth_stats *stats) 715 { 716 struct pfe_eth_priv_s *priv = dev->data->dev_private; 717 struct rte_eth_stats *eth_stats = &priv->stats; 718 719 if (stats == NULL) 720 return -1; 721 722 memset(stats, 0, sizeof(struct rte_eth_stats)); 723 724 stats->ipackets = eth_stats->ipackets; 725 stats->ibytes = eth_stats->ibytes; 726 stats->opackets = eth_stats->opackets; 727 stats->obytes = eth_stats->obytes; 728 729 return 0; 730 } 731 732 static const struct eth_dev_ops ops = { 733 .dev_start = pfe_eth_open, 734 .dev_stop = pfe_eth_stop, 735 .dev_close = pfe_eth_close, 736 .dev_configure = pfe_eth_configure, 737 .dev_infos_get = pfe_eth_info, 738 .rx_queue_setup = pfe_rx_queue_setup, 739 .tx_queue_setup = pfe_tx_queue_setup, 740 .dev_supported_ptypes_get = pfe_supported_ptypes_get, 741 .link_update = pfe_eth_link_update, 742 .promiscuous_enable = pfe_promiscuous_enable, 743 .promiscuous_disable = pfe_promiscuous_disable, 744 .allmulticast_enable = pfe_allmulticast_enable, 745 .dev_set_link_down = pfe_link_down, 746 .dev_set_link_up = pfe_link_up, 747 .mtu_set = pfe_mtu_set, 748 .mac_addr_set = pfe_dev_set_mac_addr, 749 .stats_get = pfe_stats_get, 750 }; 751 752 static int 753 pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id) 754 { 755 struct rte_eth_dev *eth_dev = NULL; 756 struct pfe_eth_priv_s *priv = NULL; 757 struct ls1012a_eth_platform_data *einfo; 758 struct ls1012a_pfe_platform_data *pfe_info; 759 struct rte_ether_addr addr; 760 int err; 761 762 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv)); 763 if (eth_dev == NULL) 764 return -ENOMEM; 765 766 /* Extract platform data */ 767 pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data; 768 if (!pfe_info) { 769 PFE_PMD_ERR("pfe missing additional platform data"); 770 err = -ENODEV; 771 goto err0; 772 } 773 774 einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata; 775 776 /* einfo never be NULL, but no harm in having this check */ 777 if (!einfo) { 778 PFE_PMD_ERR("pfe missing additional gemacs platform data"); 779 err = -ENODEV; 780 goto err0; 781 } 782 783 priv = eth_dev->data->dev_private; 784 priv->ndev = eth_dev; 785 priv->id = einfo[id].gem_id; 786 priv->pfe = pfe; 787 788 pfe->eth.eth_priv[id] = priv; 789 790 /* Set the info in the priv to the current info */ 791 priv->einfo = &einfo[id]; 792 priv->EMAC_baseaddr = cbus_emac_base[id]; 793 priv->PHY_baseaddr = cbus_emac_base[id]; 794 priv->GPI_baseaddr = cbus_gpi_base[id]; 795 796 #define HIF_GEMAC_TMUQ_BASE 6 797 priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2); 798 priv->high_tmu_q = priv->low_tmu_q + 1; 799 800 rte_spinlock_init(&priv->lock); 801 802 /* Copy the station address into the dev structure, */ 803 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 804 ETHER_ADDR_LEN * PFE_MAX_MACS, 0); 805 if (eth_dev->data->mac_addrs == NULL) { 806 PFE_PMD_ERR("Failed to allocate mem %d to store MAC addresses", 807 ETHER_ADDR_LEN * PFE_MAX_MACS); 808 err = -ENOMEM; 809 goto err0; 810 } 811 812 memcpy(addr.addr_bytes, priv->einfo->mac_addr, 813 ETH_ALEN); 814 815 pfe_dev_set_mac_addr(eth_dev, &addr); 816 rte_ether_addr_copy(&addr, ð_dev->data->mac_addrs[0]); 817 818 eth_dev->data->mtu = 1500; 819 eth_dev->dev_ops = &ops; 820 err = pfe_eth_stop(eth_dev); 821 if (err != 0) 822 goto err0; 823 pfe_gemac_init(priv); 824 825 eth_dev->data->nb_rx_queues = 1; 826 eth_dev->data->nb_tx_queues = 1; 827 828 /* For link status, open the PFE CDEV; Error from this function 829 * is silently ignored; In case of error, the link status will not 830 * be available. 831 */ 832 pfe_eth_open_cdev(priv); 833 rte_eth_dev_probing_finish(eth_dev); 834 835 return 0; 836 err0: 837 rte_eth_dev_release_port(eth_dev); 838 return err; 839 } 840 841 static int 842 pfe_get_gemac_if_proprties(struct pfe *pfe, 843 __rte_unused const struct device_node *parent, 844 unsigned int port, unsigned int if_cnt, 845 struct ls1012a_pfe_platform_data *pdata) 846 { 847 const struct device_node *gem = NULL; 848 size_t size; 849 unsigned int ii = 0, phy_id = 0; 850 const u32 *addr; 851 const void *mac_addr; 852 853 for (ii = 0; ii < if_cnt; ii++) { 854 gem = of_get_next_child(parent, gem); 855 if (!gem) 856 goto err; 857 addr = of_get_property(gem, "reg", &size); 858 if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port)) 859 break; 860 } 861 862 if (ii >= if_cnt) { 863 PFE_PMD_ERR("Failed to find interface = %d", if_cnt); 864 goto err; 865 } 866 867 pdata->ls1012a_eth_pdata[port].gem_id = port; 868 869 mac_addr = of_get_mac_address(gem); 870 871 if (mac_addr) { 872 memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr, 873 ETH_ALEN); 874 } 875 876 addr = of_get_property(gem, "fsl,mdio-mux-val", &size); 877 if (!addr) { 878 PFE_PMD_ERR("Invalid mdio-mux-val...."); 879 } else { 880 phy_id = rte_be_to_cpu_32((unsigned int)*addr); 881 pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id; 882 } 883 if (pdata->ls1012a_eth_pdata[port].phy_id < 32) 884 pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] = 885 pdata->ls1012a_eth_pdata[port].mdio_muxval; 886 887 return 0; 888 889 err: 890 return -1; 891 } 892 893 /* Parse integer from integer argument */ 894 static int 895 parse_integer_arg(const char *key __rte_unused, 896 const char *value, void *extra_args) 897 { 898 int i; 899 char *end; 900 errno = 0; 901 902 i = strtol(value, &end, 10); 903 if (*end != 0 || errno != 0 || i < 0 || i > 1) { 904 PFE_PMD_ERR("Supported Port IDS are 0 and 1"); 905 return -EINVAL; 906 } 907 908 *((uint32_t *)extra_args) = i; 909 910 return 0; 911 } 912 913 static int 914 pfe_parse_vdev_init_params(struct pfe_vdev_init_params *params, 915 struct rte_vdev_device *dev) 916 { 917 struct rte_kvargs *kvlist = NULL; 918 int ret = 0; 919 920 static const char * const pfe_vdev_valid_params[] = { 921 PFE_VDEV_GEM_ID_ARG, 922 NULL 923 }; 924 925 const char *input_args = rte_vdev_device_args(dev); 926 927 if (!input_args) 928 return -1; 929 930 kvlist = rte_kvargs_parse(input_args, pfe_vdev_valid_params); 931 if (kvlist == NULL) 932 return -1; 933 934 ret = rte_kvargs_process(kvlist, 935 PFE_VDEV_GEM_ID_ARG, 936 &parse_integer_arg, 937 ¶ms->gem_id); 938 rte_kvargs_free(kvlist); 939 return ret; 940 } 941 942 static int 943 pmd_pfe_probe(struct rte_vdev_device *vdev) 944 { 945 const u32 *prop; 946 const struct device_node *np; 947 const char *name; 948 const uint32_t *addr; 949 uint64_t cbus_addr, ddr_size, cbus_size; 950 int rc = -1, fd = -1, gem_id; 951 unsigned int ii, interface_count = 0; 952 size_t size = 0; 953 struct pfe_vdev_init_params init_params = { 954 .gem_id = -1 955 }; 956 957 name = rte_vdev_device_name(vdev); 958 rc = pfe_parse_vdev_init_params(&init_params, vdev); 959 if (rc < 0) 960 return -EINVAL; 961 962 PFE_PMD_LOG(INFO, "Initializing pmd_pfe for %s Given gem-id %d", 963 name, init_params.gem_id); 964 965 if (g_pfe) { 966 if (g_pfe->nb_devs >= g_pfe->max_intf) { 967 PFE_PMD_ERR("PFE %d dev already created Max is %d", 968 g_pfe->nb_devs, g_pfe->max_intf); 969 return -EINVAL; 970 } 971 goto eth_init; 972 } 973 974 g_pfe = rte_zmalloc(NULL, sizeof(*g_pfe), RTE_CACHE_LINE_SIZE); 975 if (g_pfe == NULL) 976 return -EINVAL; 977 978 /* Load the device-tree driver */ 979 rc = of_init(); 980 if (rc) { 981 PFE_PMD_ERR("of_init failed with ret: %d", rc); 982 goto err; 983 } 984 985 np = of_find_compatible_node(NULL, NULL, "fsl,pfe"); 986 if (!np) { 987 PFE_PMD_ERR("Invalid device node"); 988 rc = -EINVAL; 989 goto err; 990 } 991 992 addr = of_get_address(np, 0, &cbus_size, NULL); 993 if (!addr) { 994 PFE_PMD_ERR("of_get_address cannot return qman address\n"); 995 goto err; 996 } 997 cbus_addr = of_translate_address(np, addr); 998 if (!cbus_addr) { 999 PFE_PMD_ERR("of_translate_address failed\n"); 1000 goto err; 1001 } 1002 1003 addr = of_get_address(np, 1, &ddr_size, NULL); 1004 if (!addr) { 1005 PFE_PMD_ERR("of_get_address cannot return qman address\n"); 1006 goto err; 1007 } 1008 1009 g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr); 1010 if (!g_pfe->ddr_phys_baseaddr) { 1011 PFE_PMD_ERR("of_translate_address failed\n"); 1012 goto err; 1013 } 1014 1015 g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr); 1016 g_pfe->ddr_size = ddr_size; 1017 g_pfe->cbus_size = cbus_size; 1018 1019 fd = open("/dev/mem", O_RDWR); 1020 g_pfe->cbus_baseaddr = mmap(NULL, cbus_size, PROT_READ | PROT_WRITE, 1021 MAP_SHARED, fd, cbus_addr); 1022 close(fd); 1023 if (g_pfe->cbus_baseaddr == MAP_FAILED) { 1024 PFE_PMD_ERR("Can not map cbus base"); 1025 rc = -EINVAL; 1026 goto err; 1027 } 1028 1029 /* Read interface count */ 1030 prop = of_get_property(np, "fsl,pfe-num-interfaces", &size); 1031 if (!prop) { 1032 PFE_PMD_ERR("Failed to read number of interfaces"); 1033 rc = -ENXIO; 1034 goto err_prop; 1035 } 1036 1037 interface_count = rte_be_to_cpu_32((unsigned int)*prop); 1038 if (interface_count <= 0) { 1039 PFE_PMD_ERR("No ethernet interface count : %d", 1040 interface_count); 1041 rc = -ENXIO; 1042 goto err_prop; 1043 } 1044 PFE_PMD_INFO("num interfaces = %d ", interface_count); 1045 1046 g_pfe->max_intf = interface_count; 1047 g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff; 1048 1049 for (ii = 0; ii < interface_count; ii++) { 1050 pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count, 1051 &g_pfe->platform_data); 1052 } 1053 1054 pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr, 1055 g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size); 1056 1057 PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION)); 1058 PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION)); 1059 1060 PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION)); 1061 PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION)); 1062 1063 PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION)); 1064 PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION)); 1065 PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION)); 1066 1067 PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION)); 1068 PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION)); 1069 1070 cbus_emac_base[0] = EMAC1_BASE_ADDR; 1071 cbus_emac_base[1] = EMAC2_BASE_ADDR; 1072 1073 cbus_gpi_base[0] = EGPI1_BASE_ADDR; 1074 cbus_gpi_base[1] = EGPI2_BASE_ADDR; 1075 1076 rc = pfe_hif_lib_init(g_pfe); 1077 if (rc < 0) 1078 goto err_hif_lib; 1079 1080 rc = pfe_hif_init(g_pfe); 1081 if (rc < 0) 1082 goto err_hif; 1083 pfe_soc_version_get(); 1084 eth_init: 1085 if (init_params.gem_id < 0) 1086 gem_id = g_pfe->nb_devs; 1087 else 1088 gem_id = init_params.gem_id; 1089 1090 PFE_PMD_LOG(INFO, "Init pmd_pfe for %s gem-id %d(given =%d)", 1091 name, gem_id, init_params.gem_id); 1092 1093 rc = pfe_eth_init(vdev, g_pfe, gem_id); 1094 if (rc < 0) 1095 goto err_eth; 1096 else 1097 g_pfe->nb_devs++; 1098 1099 return 0; 1100 1101 err_eth: 1102 pfe_hif_exit(g_pfe); 1103 1104 err_hif: 1105 pfe_hif_lib_exit(g_pfe); 1106 1107 err_hif_lib: 1108 err_prop: 1109 munmap(g_pfe->cbus_baseaddr, cbus_size); 1110 err: 1111 rte_free(g_pfe); 1112 return rc; 1113 } 1114 1115 static int 1116 pmd_pfe_remove(struct rte_vdev_device *vdev) 1117 { 1118 const char *name; 1119 struct rte_eth_dev *eth_dev = NULL; 1120 int ret = 0; 1121 1122 name = rte_vdev_device_name(vdev); 1123 if (name == NULL) 1124 return -EINVAL; 1125 1126 PFE_PMD_INFO("Closing eventdev sw device %s", name); 1127 1128 if (!g_pfe) 1129 return 0; 1130 1131 eth_dev = rte_eth_dev_allocated(name); 1132 if (eth_dev) { 1133 pfe_eth_close(eth_dev); 1134 ret = rte_eth_dev_release_port(eth_dev); 1135 } 1136 1137 return ret; 1138 } 1139 1140 static 1141 struct rte_vdev_driver pmd_pfe_drv = { 1142 .probe = pmd_pfe_probe, 1143 .remove = pmd_pfe_remove, 1144 }; 1145 1146 RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv); 1147 RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "=<int> "); 1148 RTE_LOG_REGISTER_DEFAULT(pfe_logtype_pmd, NOTICE); 1149