1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Marvell International Ltd. 3 * Copyright(c) 2017 Semihalf. 4 * All rights reserved. 5 */ 6 7 #include <rte_ethdev_driver.h> 8 #include <rte_kvargs.h> 9 #include <rte_log.h> 10 #include <rte_malloc.h> 11 #include <rte_bus_vdev.h> 12 13 /* Unluckily, container_of is defined by both DPDK and MUSDK, 14 * we'll declare only one version. 15 * 16 * Note that it is not used in this PMD anyway. 17 */ 18 #ifdef container_of 19 #undef container_of 20 #endif 21 22 #include <fcntl.h> 23 #include <linux/ethtool.h> 24 #include <linux/sockios.h> 25 #include <net/if.h> 26 #include <net/if_arp.h> 27 #include <sys/ioctl.h> 28 #include <sys/socket.h> 29 #include <sys/stat.h> 30 #include <sys/types.h> 31 32 #include "mrvl_ethdev.h" 33 #include "mrvl_qos.h" 34 35 /* bitmask with reserved hifs */ 36 #define MRVL_MUSDK_HIFS_RESERVED 0x0F 37 /* bitmask with reserved bpools */ 38 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07 39 /* bitmask with reserved kernel RSS tables */ 40 #define MRVL_MUSDK_RSS_RESERVED 0x01 41 /* maximum number of available hifs */ 42 #define MRVL_MUSDK_HIFS_MAX 9 43 44 /* prefetch shift */ 45 #define MRVL_MUSDK_PREFETCH_SHIFT 2 46 47 /* TCAM has 25 entries reserved for uc/mc filter entries */ 48 #define MRVL_MAC_ADDRS_MAX 25 49 #define MRVL_MATCH_LEN 16 50 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE) 51 /* Maximum allowable packet size */ 52 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE) 53 54 #define MRVL_IFACE_NAME_ARG "iface" 55 #define MRVL_CFG_ARG "cfg" 56 57 #define MRVL_BURST_SIZE 64 58 59 #define MRVL_ARP_LENGTH 28 60 61 #define MRVL_COOKIE_ADDR_INVALID ~0ULL 62 63 #define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8) 64 #define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT) 65 66 /* Memory size (in bytes) for MUSDK dma buffers */ 67 #define MRVL_MUSDK_DMA_MEMSIZE 41943040 68 69 /** Port Rx offload capabilities */ 70 #define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \ 71 DEV_RX_OFFLOAD_JUMBO_FRAME | \ 72 DEV_RX_OFFLOAD_CRC_STRIP | \ 73 DEV_RX_OFFLOAD_CHECKSUM) 74 75 /** Port Tx offloads capabilities */ 76 #define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \ 77 DEV_TX_OFFLOAD_UDP_CKSUM | \ 78 DEV_TX_OFFLOAD_TCP_CKSUM) 79 80 static const char * const valid_args[] = { 81 MRVL_IFACE_NAME_ARG, 82 MRVL_CFG_ARG, 83 NULL 84 }; 85 86 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED; 87 static struct pp2_hif *hifs[RTE_MAX_LCORE]; 88 static int used_bpools[PP2_NUM_PKT_PROC] = { 89 MRVL_MUSDK_BPOOLS_RESERVED, 90 MRVL_MUSDK_BPOOLS_RESERVED 91 }; 92 93 struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS]; 94 int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE]; 95 uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID; 96 97 struct mrvl_ifnames { 98 const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC]; 99 int idx; 100 }; 101 102 /* 103 * To use buffer harvesting based on loopback port shadow queue structure 104 * was introduced for buffers information bookkeeping. 105 * 106 * Before sending the packet, related buffer information (pp2_buff_inf) is 107 * stored in shadow queue. After packet is transmitted no longer used 108 * packet buffer is released back to it's original hardware pool, 109 * on condition it originated from interface. 110 * In case it was generated by application itself i.e: mbuf->port field is 111 * 0xff then its released to software mempool. 112 */ 113 struct mrvl_shadow_txq { 114 int head; /* write index - used when sending buffers */ 115 int tail; /* read index - used when releasing buffers */ 116 u16 size; /* queue occupied size */ 117 u16 num_to_release; /* number of buffers sent, that can be released */ 118 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */ 119 }; 120 121 struct mrvl_rxq { 122 struct mrvl_priv *priv; 123 struct rte_mempool *mp; 124 int queue_id; 125 int port_id; 126 int cksum_enabled; 127 uint64_t bytes_recv; 128 uint64_t drop_mac; 129 }; 130 131 struct mrvl_txq { 132 struct mrvl_priv *priv; 133 int queue_id; 134 int port_id; 135 uint64_t bytes_sent; 136 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE]; 137 int tx_deferred_start; 138 }; 139 140 static int mrvl_lcore_first; 141 static int mrvl_lcore_last; 142 static int mrvl_dev_num; 143 144 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num); 145 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio, 146 struct pp2_hif *hif, unsigned int core_id, 147 struct mrvl_shadow_txq *sq, int qid, int force); 148 149 #define MRVL_XSTATS_TBL_ENTRY(name) { \ 150 #name, offsetof(struct pp2_ppio_statistics, name), \ 151 sizeof(((struct pp2_ppio_statistics *)0)->name) \ 152 } 153 154 /* Table with xstats data */ 155 static struct { 156 const char *name; 157 unsigned int offset; 158 unsigned int size; 159 } mrvl_xstats_tbl[] = { 160 MRVL_XSTATS_TBL_ENTRY(rx_bytes), 161 MRVL_XSTATS_TBL_ENTRY(rx_packets), 162 MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets), 163 MRVL_XSTATS_TBL_ENTRY(rx_errors), 164 MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped), 165 MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped), 166 MRVL_XSTATS_TBL_ENTRY(rx_early_dropped), 167 MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped), 168 MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped), 169 MRVL_XSTATS_TBL_ENTRY(tx_bytes), 170 MRVL_XSTATS_TBL_ENTRY(tx_packets), 171 MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets), 172 MRVL_XSTATS_TBL_ENTRY(tx_errors) 173 }; 174 175 static inline int 176 mrvl_get_bpool_size(int pp2_id, int pool_id) 177 { 178 int i; 179 int size = 0; 180 181 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) 182 size += mrvl_port_bpool_size[pp2_id][pool_id][i]; 183 184 return size; 185 } 186 187 static inline int 188 mrvl_reserve_bit(int *bitmap, int max) 189 { 190 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap); 191 192 if (n >= max) 193 return -1; 194 195 *bitmap |= 1 << n; 196 197 return n; 198 } 199 200 static int 201 mrvl_init_hif(int core_id) 202 { 203 struct pp2_hif_params params; 204 char match[MRVL_MATCH_LEN]; 205 int ret; 206 207 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX); 208 if (ret < 0) { 209 RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id); 210 return ret; 211 } 212 213 snprintf(match, sizeof(match), "hif-%d", ret); 214 memset(¶ms, 0, sizeof(params)); 215 params.match = match; 216 params.out_size = MRVL_PP2_AGGR_TXQD_MAX; 217 ret = pp2_hif_init(¶ms, &hifs[core_id]); 218 if (ret) { 219 RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id); 220 return ret; 221 } 222 223 return 0; 224 } 225 226 static inline struct pp2_hif* 227 mrvl_get_hif(struct mrvl_priv *priv, int core_id) 228 { 229 int ret; 230 231 if (likely(hifs[core_id] != NULL)) 232 return hifs[core_id]; 233 234 rte_spinlock_lock(&priv->lock); 235 236 ret = mrvl_init_hif(core_id); 237 if (ret < 0) { 238 RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id); 239 goto out; 240 } 241 242 if (core_id < mrvl_lcore_first) 243 mrvl_lcore_first = core_id; 244 245 if (core_id > mrvl_lcore_last) 246 mrvl_lcore_last = core_id; 247 out: 248 rte_spinlock_unlock(&priv->lock); 249 250 return hifs[core_id]; 251 } 252 253 /** 254 * Configure rss based on dpdk rss configuration. 255 * 256 * @param priv 257 * Pointer to private structure. 258 * @param rss_conf 259 * Pointer to RSS configuration. 260 * 261 * @return 262 * 0 on success, negative error value otherwise. 263 */ 264 static int 265 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf) 266 { 267 if (rss_conf->rss_key) 268 RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n"); 269 270 if (rss_conf->rss_hf == 0) { 271 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; 272 } else if (rss_conf->rss_hf & ETH_RSS_IPV4) { 273 priv->ppio_params.inqs_params.hash_type = 274 PP2_PPIO_HASH_T_2_TUPLE; 275 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { 276 priv->ppio_params.inqs_params.hash_type = 277 PP2_PPIO_HASH_T_5_TUPLE; 278 priv->rss_hf_tcp = 1; 279 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { 280 priv->ppio_params.inqs_params.hash_type = 281 PP2_PPIO_HASH_T_5_TUPLE; 282 priv->rss_hf_tcp = 0; 283 } else { 284 return -EINVAL; 285 } 286 287 return 0; 288 } 289 290 /** 291 * Ethernet device configuration. 292 * 293 * Prepare the driver for a given number of TX and RX queues and 294 * configure RSS. 295 * 296 * @param dev 297 * Pointer to Ethernet device structure. 298 * 299 * @return 300 * 0 on success, negative error value otherwise. 301 */ 302 static int 303 mrvl_dev_configure(struct rte_eth_dev *dev) 304 { 305 struct mrvl_priv *priv = dev->data->dev_private; 306 int ret; 307 308 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE && 309 dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { 310 RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n", 311 dev->data->dev_conf.rxmode.mq_mode); 312 return -EINVAL; 313 } 314 315 if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { 316 RTE_LOG(INFO, PMD, 317 "L2 CRC stripping is always enabled in hw\n"); 318 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; 319 } 320 321 if (dev->data->dev_conf.rxmode.split_hdr_size) { 322 RTE_LOG(INFO, PMD, "Split headers not supported\n"); 323 return -EINVAL; 324 } 325 326 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 327 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - 328 ETHER_HDR_LEN - ETHER_CRC_LEN; 329 330 ret = mrvl_configure_rxqs(priv, dev->data->port_id, 331 dev->data->nb_rx_queues); 332 if (ret < 0) 333 return ret; 334 335 ret = mrvl_configure_txqs(priv, dev->data->port_id, 336 dev->data->nb_tx_queues); 337 if (ret < 0) 338 return ret; 339 340 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues; 341 priv->ppio_params.maintain_stats = 1; 342 priv->nb_rx_queues = dev->data->nb_rx_queues; 343 344 if (dev->data->nb_rx_queues == 1 && 345 dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 346 RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n"); 347 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; 348 349 return 0; 350 } 351 352 return mrvl_configure_rss(priv, 353 &dev->data->dev_conf.rx_adv_conf.rss_conf); 354 } 355 356 /** 357 * DPDK callback to change the MTU. 358 * 359 * Setting the MTU affects hardware MRU (packets larger than the MRU 360 * will be dropped). 361 * 362 * @param dev 363 * Pointer to Ethernet device structure. 364 * @param mtu 365 * New MTU. 366 * 367 * @return 368 * 0 on success, negative error value otherwise. 369 */ 370 static int 371 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 372 { 373 struct mrvl_priv *priv = dev->data->dev_private; 374 /* extra MV_MH_SIZE bytes are required for Marvell tag */ 375 uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN; 376 int ret; 377 378 if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) 379 return -EINVAL; 380 381 if (!priv->ppio) 382 return 0; 383 384 ret = pp2_ppio_set_mru(priv->ppio, mru); 385 if (ret) 386 return ret; 387 388 return pp2_ppio_set_mtu(priv->ppio, mtu); 389 } 390 391 /** 392 * DPDK callback to bring the link up. 393 * 394 * @param dev 395 * Pointer to Ethernet device structure. 396 * 397 * @return 398 * 0 on success, negative error value otherwise. 399 */ 400 static int 401 mrvl_dev_set_link_up(struct rte_eth_dev *dev) 402 { 403 struct mrvl_priv *priv = dev->data->dev_private; 404 int ret; 405 406 if (!priv->ppio) 407 return -EPERM; 408 409 ret = pp2_ppio_enable(priv->ppio); 410 if (ret) 411 return ret; 412 413 /* 414 * mtu/mru can be updated if pp2_ppio_enable() was called at least once 415 * as pp2_ppio_enable() changes port->t_mode from default 0 to 416 * PP2_TRAFFIC_INGRESS_EGRESS. 417 * 418 * Set mtu to default DPDK value here. 419 */ 420 ret = mrvl_mtu_set(dev, dev->data->mtu); 421 if (ret) 422 pp2_ppio_disable(priv->ppio); 423 424 return ret; 425 } 426 427 /** 428 * DPDK callback to bring the link down. 429 * 430 * @param dev 431 * Pointer to Ethernet device structure. 432 * 433 * @return 434 * 0 on success, negative error value otherwise. 435 */ 436 static int 437 mrvl_dev_set_link_down(struct rte_eth_dev *dev) 438 { 439 struct mrvl_priv *priv = dev->data->dev_private; 440 441 if (!priv->ppio) 442 return -EPERM; 443 444 return pp2_ppio_disable(priv->ppio); 445 } 446 447 /** 448 * DPDK callback to start tx queue. 449 * 450 * @param dev 451 * Pointer to Ethernet device structure. 452 * @param queue_id 453 * Transmit queue index. 454 * 455 * @return 456 * 0 on success, negative error value otherwise. 457 */ 458 static int 459 mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id) 460 { 461 struct mrvl_priv *priv = dev->data->dev_private; 462 int ret; 463 464 if (!priv) 465 return -EPERM; 466 467 /* passing 1 enables given tx queue */ 468 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1); 469 if (ret) { 470 RTE_LOG(ERR, PMD, "Failed to start txq %d\n", queue_id); 471 return ret; 472 } 473 474 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 475 476 return 0; 477 } 478 479 /** 480 * DPDK callback to stop tx queue. 481 * 482 * @param dev 483 * Pointer to Ethernet device structure. 484 * @param queue_id 485 * Transmit queue index. 486 * 487 * @return 488 * 0 on success, negative error value otherwise. 489 */ 490 static int 491 mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id) 492 { 493 struct mrvl_priv *priv = dev->data->dev_private; 494 int ret; 495 496 if (!priv->ppio) 497 return -EPERM; 498 499 /* passing 0 disables given tx queue */ 500 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0); 501 if (ret) { 502 RTE_LOG(ERR, PMD, "Failed to stop txq %d\n", queue_id); 503 return ret; 504 } 505 506 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 507 508 return 0; 509 } 510 511 /** 512 * DPDK callback to start the device. 513 * 514 * @param dev 515 * Pointer to Ethernet device structure. 516 * 517 * @return 518 * 0 on success, negative errno value on failure. 519 */ 520 static int 521 mrvl_dev_start(struct rte_eth_dev *dev) 522 { 523 struct mrvl_priv *priv = dev->data->dev_private; 524 char match[MRVL_MATCH_LEN]; 525 int ret = 0, i, def_init_size; 526 527 snprintf(match, sizeof(match), "ppio-%d:%d", 528 priv->pp_id, priv->ppio_id); 529 priv->ppio_params.match = match; 530 531 /* 532 * Calculate the minimum bpool size for refill feature as follows: 533 * 2 default burst sizes multiply by number of rx queues. 534 * If the bpool size will be below this value, new buffers will 535 * be added to the pool. 536 */ 537 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2; 538 539 /* In case initial bpool size configured in queues setup is 540 * smaller than minimum size add more buffers 541 */ 542 def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2; 543 if (priv->bpool_init_size < def_init_size) { 544 int buffs_to_add = def_init_size - priv->bpool_init_size; 545 546 priv->bpool_init_size += buffs_to_add; 547 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add); 548 if (ret) 549 RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n"); 550 } 551 552 /* 553 * Calculate the maximum bpool size for refill feature as follows: 554 * maximum number of descriptors in rx queue multiply by number 555 * of rx queues plus minimum bpool size. 556 * In case the bpool size will exceed this value, superfluous buffers 557 * will be removed 558 */ 559 priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) + 560 priv->bpool_min_size; 561 562 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio); 563 if (ret) { 564 RTE_LOG(ERR, PMD, "Failed to init ppio\n"); 565 return ret; 566 } 567 568 /* 569 * In case there are some some stale uc/mc mac addresses flush them 570 * here. It cannot be done during mrvl_dev_close() as port information 571 * is already gone at that point (due to pp2_ppio_deinit() in 572 * mrvl_dev_stop()). 573 */ 574 if (!priv->uc_mc_flushed) { 575 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1); 576 if (ret) { 577 RTE_LOG(ERR, PMD, 578 "Failed to flush uc/mc filter list\n"); 579 goto out; 580 } 581 priv->uc_mc_flushed = 1; 582 } 583 584 if (!priv->vlan_flushed) { 585 ret = pp2_ppio_flush_vlan(priv->ppio); 586 if (ret) { 587 RTE_LOG(ERR, PMD, "Failed to flush vlan list\n"); 588 /* 589 * TODO 590 * once pp2_ppio_flush_vlan() is supported jump to out 591 * goto out; 592 */ 593 } 594 priv->vlan_flushed = 1; 595 } 596 597 /* For default QoS config, don't start classifier. */ 598 if (mrvl_qos_cfg) { 599 ret = mrvl_start_qos_mapping(priv); 600 if (ret) { 601 RTE_LOG(ERR, PMD, "Failed to setup QoS mapping\n"); 602 goto out; 603 } 604 } 605 606 ret = mrvl_dev_set_link_up(dev); 607 if (ret) { 608 RTE_LOG(ERR, PMD, "Failed to set link up\n"); 609 goto out; 610 } 611 612 /* start tx queues */ 613 for (i = 0; i < dev->data->nb_tx_queues; i++) { 614 struct mrvl_txq *txq = dev->data->tx_queues[i]; 615 616 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 617 618 if (!txq->tx_deferred_start) 619 continue; 620 621 /* 622 * All txqs are started by default. Stop them 623 * so that tx_deferred_start works as expected. 624 */ 625 ret = mrvl_tx_queue_stop(dev, i); 626 if (ret) 627 goto out; 628 } 629 630 return 0; 631 out: 632 RTE_LOG(ERR, PMD, "Failed to start device\n"); 633 pp2_ppio_deinit(priv->ppio); 634 return ret; 635 } 636 637 /** 638 * Flush receive queues. 639 * 640 * @param dev 641 * Pointer to Ethernet device structure. 642 */ 643 static void 644 mrvl_flush_rx_queues(struct rte_eth_dev *dev) 645 { 646 int i; 647 648 RTE_LOG(INFO, PMD, "Flushing rx queues\n"); 649 for (i = 0; i < dev->data->nb_rx_queues; i++) { 650 int ret, num; 651 652 do { 653 struct mrvl_rxq *q = dev->data->rx_queues[i]; 654 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX]; 655 656 num = MRVL_PP2_RXD_MAX; 657 ret = pp2_ppio_recv(q->priv->ppio, 658 q->priv->rxq_map[q->queue_id].tc, 659 q->priv->rxq_map[q->queue_id].inq, 660 descs, (uint16_t *)&num); 661 } while (ret == 0 && num); 662 } 663 } 664 665 /** 666 * Flush transmit shadow queues. 667 * 668 * @param dev 669 * Pointer to Ethernet device structure. 670 */ 671 static void 672 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev) 673 { 674 int i, j; 675 struct mrvl_txq *txq; 676 677 RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n"); 678 for (i = 0; i < dev->data->nb_tx_queues; i++) { 679 txq = (struct mrvl_txq *)dev->data->tx_queues[i]; 680 681 for (j = 0; j < RTE_MAX_LCORE; j++) { 682 struct mrvl_shadow_txq *sq; 683 684 if (!hifs[j]) 685 continue; 686 687 sq = &txq->shadow_txqs[j]; 688 mrvl_free_sent_buffers(txq->priv->ppio, 689 hifs[j], j, sq, txq->queue_id, 1); 690 while (sq->tail != sq->head) { 691 uint64_t addr = cookie_addr_high | 692 sq->ent[sq->tail].buff.cookie; 693 rte_pktmbuf_free( 694 (struct rte_mbuf *)addr); 695 sq->tail = (sq->tail + 1) & 696 MRVL_PP2_TX_SHADOWQ_MASK; 697 } 698 memset(sq, 0, sizeof(*sq)); 699 } 700 } 701 } 702 703 /** 704 * Flush hardware bpool (buffer-pool). 705 * 706 * @param dev 707 * Pointer to Ethernet device structure. 708 */ 709 static void 710 mrvl_flush_bpool(struct rte_eth_dev *dev) 711 { 712 struct mrvl_priv *priv = dev->data->dev_private; 713 struct pp2_hif *hif; 714 uint32_t num; 715 int ret; 716 unsigned int core_id = rte_lcore_id(); 717 718 if (core_id == LCORE_ID_ANY) 719 core_id = 0; 720 721 hif = mrvl_get_hif(priv, core_id); 722 723 ret = pp2_bpool_get_num_buffs(priv->bpool, &num); 724 if (ret) { 725 RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n"); 726 return; 727 } 728 729 while (num--) { 730 struct pp2_buff_inf inf; 731 uint64_t addr; 732 733 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf); 734 if (ret) 735 break; 736 737 addr = cookie_addr_high | inf.cookie; 738 rte_pktmbuf_free((struct rte_mbuf *)addr); 739 } 740 } 741 742 /** 743 * DPDK callback to stop the device. 744 * 745 * @param dev 746 * Pointer to Ethernet device structure. 747 */ 748 static void 749 mrvl_dev_stop(struct rte_eth_dev *dev) 750 { 751 struct mrvl_priv *priv = dev->data->dev_private; 752 753 mrvl_dev_set_link_down(dev); 754 mrvl_flush_rx_queues(dev); 755 mrvl_flush_tx_shadow_queues(dev); 756 if (priv->cls_tbl) { 757 pp2_cls_tbl_deinit(priv->cls_tbl); 758 priv->cls_tbl = NULL; 759 } 760 if (priv->qos_tbl) { 761 pp2_cls_qos_tbl_deinit(priv->qos_tbl); 762 priv->qos_tbl = NULL; 763 } 764 if (priv->ppio) 765 pp2_ppio_deinit(priv->ppio); 766 priv->ppio = NULL; 767 768 /* policer must be released after ppio deinitialization */ 769 if (priv->policer) { 770 pp2_cls_plcr_deinit(priv->policer); 771 priv->policer = NULL; 772 } 773 } 774 775 /** 776 * DPDK callback to close the device. 777 * 778 * @param dev 779 * Pointer to Ethernet device structure. 780 */ 781 static void 782 mrvl_dev_close(struct rte_eth_dev *dev) 783 { 784 struct mrvl_priv *priv = dev->data->dev_private; 785 size_t i; 786 787 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) { 788 struct pp2_ppio_tc_params *tc_params = 789 &priv->ppio_params.inqs_params.tcs_params[i]; 790 791 if (tc_params->inqs_params) { 792 rte_free(tc_params->inqs_params); 793 tc_params->inqs_params = NULL; 794 } 795 } 796 797 mrvl_flush_bpool(dev); 798 } 799 800 /** 801 * DPDK callback to retrieve physical link information. 802 * 803 * @param dev 804 * Pointer to Ethernet device structure. 805 * @param wait_to_complete 806 * Wait for request completion (ignored). 807 * 808 * @return 809 * 0 on success, negative error value otherwise. 810 */ 811 static int 812 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 813 { 814 /* 815 * TODO 816 * once MUSDK provides necessary API use it here 817 */ 818 struct mrvl_priv *priv = dev->data->dev_private; 819 struct ethtool_cmd edata; 820 struct ifreq req; 821 int ret, fd, link_up; 822 823 if (!priv->ppio) 824 return -EPERM; 825 826 edata.cmd = ETHTOOL_GSET; 827 828 strcpy(req.ifr_name, dev->data->name); 829 req.ifr_data = (void *)&edata; 830 831 fd = socket(AF_INET, SOCK_DGRAM, 0); 832 if (fd == -1) 833 return -EFAULT; 834 835 ret = ioctl(fd, SIOCETHTOOL, &req); 836 if (ret == -1) { 837 close(fd); 838 return -EFAULT; 839 } 840 841 close(fd); 842 843 switch (ethtool_cmd_speed(&edata)) { 844 case SPEED_10: 845 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M; 846 break; 847 case SPEED_100: 848 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M; 849 break; 850 case SPEED_1000: 851 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G; 852 break; 853 case SPEED_10000: 854 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; 855 break; 856 default: 857 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; 858 } 859 860 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX : 861 ETH_LINK_HALF_DUPLEX; 862 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG : 863 ETH_LINK_FIXED; 864 pp2_ppio_get_link_state(priv->ppio, &link_up); 865 dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 866 867 return 0; 868 } 869 870 /** 871 * DPDK callback to enable promiscuous mode. 872 * 873 * @param dev 874 * Pointer to Ethernet device structure. 875 */ 876 static void 877 mrvl_promiscuous_enable(struct rte_eth_dev *dev) 878 { 879 struct mrvl_priv *priv = dev->data->dev_private; 880 int ret; 881 882 if (!priv->ppio) 883 return; 884 885 if (priv->isolated) 886 return; 887 888 ret = pp2_ppio_set_promisc(priv->ppio, 1); 889 if (ret) 890 RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n"); 891 } 892 893 /** 894 * DPDK callback to enable allmulti mode. 895 * 896 * @param dev 897 * Pointer to Ethernet device structure. 898 */ 899 static void 900 mrvl_allmulticast_enable(struct rte_eth_dev *dev) 901 { 902 struct mrvl_priv *priv = dev->data->dev_private; 903 int ret; 904 905 if (!priv->ppio) 906 return; 907 908 if (priv->isolated) 909 return; 910 911 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1); 912 if (ret) 913 RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n"); 914 } 915 916 /** 917 * DPDK callback to disable promiscuous mode. 918 * 919 * @param dev 920 * Pointer to Ethernet device structure. 921 */ 922 static void 923 mrvl_promiscuous_disable(struct rte_eth_dev *dev) 924 { 925 struct mrvl_priv *priv = dev->data->dev_private; 926 int ret; 927 928 if (!priv->ppio) 929 return; 930 931 ret = pp2_ppio_set_promisc(priv->ppio, 0); 932 if (ret) 933 RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n"); 934 } 935 936 /** 937 * DPDK callback to disable allmulticast mode. 938 * 939 * @param dev 940 * Pointer to Ethernet device structure. 941 */ 942 static void 943 mrvl_allmulticast_disable(struct rte_eth_dev *dev) 944 { 945 struct mrvl_priv *priv = dev->data->dev_private; 946 int ret; 947 948 if (!priv->ppio) 949 return; 950 951 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0); 952 if (ret) 953 RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n"); 954 } 955 956 /** 957 * DPDK callback to remove a MAC address. 958 * 959 * @param dev 960 * Pointer to Ethernet device structure. 961 * @param index 962 * MAC address index. 963 */ 964 static void 965 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 966 { 967 struct mrvl_priv *priv = dev->data->dev_private; 968 char buf[ETHER_ADDR_FMT_SIZE]; 969 int ret; 970 971 if (!priv->ppio) 972 return; 973 974 if (priv->isolated) 975 return; 976 977 ret = pp2_ppio_remove_mac_addr(priv->ppio, 978 dev->data->mac_addrs[index].addr_bytes); 979 if (ret) { 980 ether_format_addr(buf, sizeof(buf), 981 &dev->data->mac_addrs[index]); 982 RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf); 983 } 984 } 985 986 /** 987 * DPDK callback to add a MAC address. 988 * 989 * @param dev 990 * Pointer to Ethernet device structure. 991 * @param mac_addr 992 * MAC address to register. 993 * @param index 994 * MAC address index. 995 * @param vmdq 996 * VMDq pool index to associate address with (unused). 997 * 998 * @return 999 * 0 on success, negative error value otherwise. 1000 */ 1001 static int 1002 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 1003 uint32_t index, uint32_t vmdq __rte_unused) 1004 { 1005 struct mrvl_priv *priv = dev->data->dev_private; 1006 char buf[ETHER_ADDR_FMT_SIZE]; 1007 int ret; 1008 1009 if (priv->isolated) 1010 return -ENOTSUP; 1011 1012 if (index == 0) 1013 /* For setting index 0, mrvl_mac_addr_set() should be used.*/ 1014 return -1; 1015 1016 if (!priv->ppio) 1017 return 0; 1018 1019 /* 1020 * Maximum number of uc addresses can be tuned via kernel module mvpp2x 1021 * parameter uc_filter_max. Maximum number of mc addresses is then 1022 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and 1023 * 21 respectively. 1024 * 1025 * If more than uc_filter_max uc addresses were added to filter list 1026 * then NIC will switch to promiscuous mode automatically. 1027 * 1028 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses 1029 * were added to filter list then NIC will switch to all-multicast mode 1030 * automatically. 1031 */ 1032 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes); 1033 if (ret) { 1034 ether_format_addr(buf, sizeof(buf), mac_addr); 1035 RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf); 1036 return -1; 1037 } 1038 1039 return 0; 1040 } 1041 1042 /** 1043 * DPDK callback to set the primary MAC address. 1044 * 1045 * @param dev 1046 * Pointer to Ethernet device structure. 1047 * @param mac_addr 1048 * MAC address to register. 1049 * 1050 * @return 1051 * 0 on success, negative error value otherwise. 1052 */ 1053 static int 1054 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 1055 { 1056 struct mrvl_priv *priv = dev->data->dev_private; 1057 int ret; 1058 1059 if (!priv->ppio) 1060 return 0; 1061 1062 if (priv->isolated) 1063 return -ENOTSUP; 1064 1065 ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes); 1066 if (ret) { 1067 char buf[ETHER_ADDR_FMT_SIZE]; 1068 ether_format_addr(buf, sizeof(buf), mac_addr); 1069 RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf); 1070 } 1071 1072 return ret; 1073 } 1074 1075 /** 1076 * DPDK callback to get device statistics. 1077 * 1078 * @param dev 1079 * Pointer to Ethernet device structure. 1080 * @param stats 1081 * Stats structure output buffer. 1082 * 1083 * @return 1084 * 0 on success, negative error value otherwise. 1085 */ 1086 static int 1087 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1088 { 1089 struct mrvl_priv *priv = dev->data->dev_private; 1090 struct pp2_ppio_statistics ppio_stats; 1091 uint64_t drop_mac = 0; 1092 unsigned int i, idx, ret; 1093 1094 if (!priv->ppio) 1095 return -EPERM; 1096 1097 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1098 struct mrvl_rxq *rxq = dev->data->rx_queues[i]; 1099 struct pp2_ppio_inq_statistics rx_stats; 1100 1101 if (!rxq) 1102 continue; 1103 1104 idx = rxq->queue_id; 1105 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { 1106 RTE_LOG(ERR, PMD, 1107 "rx queue %d stats out of range (0 - %d)\n", 1108 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1109 continue; 1110 } 1111 1112 ret = pp2_ppio_inq_get_statistics(priv->ppio, 1113 priv->rxq_map[idx].tc, 1114 priv->rxq_map[idx].inq, 1115 &rx_stats, 0); 1116 if (unlikely(ret)) { 1117 RTE_LOG(ERR, PMD, 1118 "Failed to update rx queue %d stats\n", idx); 1119 break; 1120 } 1121 1122 stats->q_ibytes[idx] = rxq->bytes_recv; 1123 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac; 1124 stats->q_errors[idx] = rx_stats.drop_early + 1125 rx_stats.drop_fullq + 1126 rx_stats.drop_bm + 1127 rxq->drop_mac; 1128 stats->ibytes += rxq->bytes_recv; 1129 drop_mac += rxq->drop_mac; 1130 } 1131 1132 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1133 struct mrvl_txq *txq = dev->data->tx_queues[i]; 1134 struct pp2_ppio_outq_statistics tx_stats; 1135 1136 if (!txq) 1137 continue; 1138 1139 idx = txq->queue_id; 1140 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { 1141 RTE_LOG(ERR, PMD, 1142 "tx queue %d stats out of range (0 - %d)\n", 1143 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1144 } 1145 1146 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx, 1147 &tx_stats, 0); 1148 if (unlikely(ret)) { 1149 RTE_LOG(ERR, PMD, 1150 "Failed to update tx queue %d stats\n", idx); 1151 break; 1152 } 1153 1154 stats->q_opackets[idx] = tx_stats.deq_desc; 1155 stats->q_obytes[idx] = txq->bytes_sent; 1156 stats->obytes += txq->bytes_sent; 1157 } 1158 1159 ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); 1160 if (unlikely(ret)) { 1161 RTE_LOG(ERR, PMD, "Failed to update port statistics\n"); 1162 return ret; 1163 } 1164 1165 stats->ipackets += ppio_stats.rx_packets - drop_mac; 1166 stats->opackets += ppio_stats.tx_packets; 1167 stats->imissed += ppio_stats.rx_fullq_dropped + 1168 ppio_stats.rx_bm_dropped + 1169 ppio_stats.rx_early_dropped + 1170 ppio_stats.rx_fifo_dropped + 1171 ppio_stats.rx_cls_dropped; 1172 stats->ierrors = drop_mac; 1173 1174 return 0; 1175 } 1176 1177 /** 1178 * DPDK callback to clear device statistics. 1179 * 1180 * @param dev 1181 * Pointer to Ethernet device structure. 1182 */ 1183 static void 1184 mrvl_stats_reset(struct rte_eth_dev *dev) 1185 { 1186 struct mrvl_priv *priv = dev->data->dev_private; 1187 int i; 1188 1189 if (!priv->ppio) 1190 return; 1191 1192 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1193 struct mrvl_rxq *rxq = dev->data->rx_queues[i]; 1194 1195 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc, 1196 priv->rxq_map[i].inq, NULL, 1); 1197 rxq->bytes_recv = 0; 1198 rxq->drop_mac = 0; 1199 } 1200 1201 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1202 struct mrvl_txq *txq = dev->data->tx_queues[i]; 1203 1204 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1); 1205 txq->bytes_sent = 0; 1206 } 1207 1208 pp2_ppio_get_statistics(priv->ppio, NULL, 1); 1209 } 1210 1211 /** 1212 * DPDK callback to get extended statistics. 1213 * 1214 * @param dev 1215 * Pointer to Ethernet device structure. 1216 * @param stats 1217 * Pointer to xstats table. 1218 * @param n 1219 * Number of entries in xstats table. 1220 * @return 1221 * Negative value on error, number of read xstats otherwise. 1222 */ 1223 static int 1224 mrvl_xstats_get(struct rte_eth_dev *dev, 1225 struct rte_eth_xstat *stats, unsigned int n) 1226 { 1227 struct mrvl_priv *priv = dev->data->dev_private; 1228 struct pp2_ppio_statistics ppio_stats; 1229 unsigned int i; 1230 1231 if (!stats) 1232 return 0; 1233 1234 pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); 1235 for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) { 1236 uint64_t val; 1237 1238 if (mrvl_xstats_tbl[i].size == sizeof(uint32_t)) 1239 val = *(uint32_t *)((uint8_t *)&ppio_stats + 1240 mrvl_xstats_tbl[i].offset); 1241 else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t)) 1242 val = *(uint64_t *)((uint8_t *)&ppio_stats + 1243 mrvl_xstats_tbl[i].offset); 1244 else 1245 return -EINVAL; 1246 1247 stats[i].id = i; 1248 stats[i].value = val; 1249 } 1250 1251 return n; 1252 } 1253 1254 /** 1255 * DPDK callback to reset extended statistics. 1256 * 1257 * @param dev 1258 * Pointer to Ethernet device structure. 1259 */ 1260 static void 1261 mrvl_xstats_reset(struct rte_eth_dev *dev) 1262 { 1263 mrvl_stats_reset(dev); 1264 } 1265 1266 /** 1267 * DPDK callback to get extended statistics names. 1268 * 1269 * @param dev (unused) 1270 * Pointer to Ethernet device structure. 1271 * @param xstats_names 1272 * Pointer to xstats names table. 1273 * @param size 1274 * Size of the xstats names table. 1275 * @return 1276 * Number of read names. 1277 */ 1278 static int 1279 mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused, 1280 struct rte_eth_xstat_name *xstats_names, 1281 unsigned int size) 1282 { 1283 unsigned int i; 1284 1285 if (!xstats_names) 1286 return RTE_DIM(mrvl_xstats_tbl); 1287 1288 for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++) 1289 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s", 1290 mrvl_xstats_tbl[i].name); 1291 1292 return size; 1293 } 1294 1295 /** 1296 * DPDK callback to get information about the device. 1297 * 1298 * @param dev 1299 * Pointer to Ethernet device structure (unused). 1300 * @param info 1301 * Info structure output buffer. 1302 */ 1303 static void 1304 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused, 1305 struct rte_eth_dev_info *info) 1306 { 1307 info->speed_capa = ETH_LINK_SPEED_10M | 1308 ETH_LINK_SPEED_100M | 1309 ETH_LINK_SPEED_1G | 1310 ETH_LINK_SPEED_10G; 1311 1312 info->max_rx_queues = MRVL_PP2_RXQ_MAX; 1313 info->max_tx_queues = MRVL_PP2_TXQ_MAX; 1314 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX; 1315 1316 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX; 1317 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN; 1318 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN; 1319 1320 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX; 1321 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN; 1322 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN; 1323 1324 info->rx_offload_capa = MRVL_RX_OFFLOADS; 1325 info->rx_queue_offload_capa = MRVL_RX_OFFLOADS; 1326 1327 info->tx_offload_capa = MRVL_TX_OFFLOADS; 1328 info->tx_queue_offload_capa = MRVL_TX_OFFLOADS; 1329 1330 info->flow_type_rss_offloads = ETH_RSS_IPV4 | 1331 ETH_RSS_NONFRAG_IPV4_TCP | 1332 ETH_RSS_NONFRAG_IPV4_UDP; 1333 1334 /* By default packets are dropped if no descriptors are available */ 1335 info->default_rxconf.rx_drop_en = 1; 1336 info->default_rxconf.offloads = DEV_RX_OFFLOAD_CRC_STRIP; 1337 1338 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX; 1339 } 1340 1341 /** 1342 * Return supported packet types. 1343 * 1344 * @param dev 1345 * Pointer to Ethernet device structure (unused). 1346 * 1347 * @return 1348 * Const pointer to the table with supported packet types. 1349 */ 1350 static const uint32_t * 1351 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 1352 { 1353 static const uint32_t ptypes[] = { 1354 RTE_PTYPE_L2_ETHER, 1355 RTE_PTYPE_L3_IPV4, 1356 RTE_PTYPE_L3_IPV4_EXT, 1357 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 1358 RTE_PTYPE_L3_IPV6, 1359 RTE_PTYPE_L3_IPV6_EXT, 1360 RTE_PTYPE_L2_ETHER_ARP, 1361 RTE_PTYPE_L4_TCP, 1362 RTE_PTYPE_L4_UDP 1363 }; 1364 1365 return ptypes; 1366 } 1367 1368 /** 1369 * DPDK callback to get information about specific receive queue. 1370 * 1371 * @param dev 1372 * Pointer to Ethernet device structure. 1373 * @param rx_queue_id 1374 * Receive queue index. 1375 * @param qinfo 1376 * Receive queue information structure. 1377 */ 1378 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 1379 struct rte_eth_rxq_info *qinfo) 1380 { 1381 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id]; 1382 struct mrvl_priv *priv = dev->data->dev_private; 1383 int inq = priv->rxq_map[rx_queue_id].inq; 1384 int tc = priv->rxq_map[rx_queue_id].tc; 1385 struct pp2_ppio_tc_params *tc_params = 1386 &priv->ppio_params.inqs_params.tcs_params[tc]; 1387 1388 qinfo->mp = q->mp; 1389 qinfo->nb_desc = tc_params->inqs_params[inq].size; 1390 } 1391 1392 /** 1393 * DPDK callback to get information about specific transmit queue. 1394 * 1395 * @param dev 1396 * Pointer to Ethernet device structure. 1397 * @param tx_queue_id 1398 * Transmit queue index. 1399 * @param qinfo 1400 * Transmit queue information structure. 1401 */ 1402 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 1403 struct rte_eth_txq_info *qinfo) 1404 { 1405 struct mrvl_priv *priv = dev->data->dev_private; 1406 struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id]; 1407 1408 qinfo->nb_desc = 1409 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size; 1410 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1411 } 1412 1413 /** 1414 * DPDK callback to Configure a VLAN filter. 1415 * 1416 * @param dev 1417 * Pointer to Ethernet device structure. 1418 * @param vlan_id 1419 * VLAN ID to filter. 1420 * @param on 1421 * Toggle filter. 1422 * 1423 * @return 1424 * 0 on success, negative error value otherwise. 1425 */ 1426 static int 1427 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1428 { 1429 struct mrvl_priv *priv = dev->data->dev_private; 1430 1431 if (!priv->ppio) 1432 return -EPERM; 1433 1434 if (priv->isolated) 1435 return -ENOTSUP; 1436 1437 return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) : 1438 pp2_ppio_remove_vlan(priv->ppio, vlan_id); 1439 } 1440 1441 /** 1442 * Release buffers to hardware bpool (buffer-pool) 1443 * 1444 * @param rxq 1445 * Receive queue pointer. 1446 * @param num 1447 * Number of buffers to release to bpool. 1448 * 1449 * @return 1450 * 0 on success, negative error value otherwise. 1451 */ 1452 static int 1453 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) 1454 { 1455 struct buff_release_entry entries[MRVL_PP2_RXD_MAX]; 1456 struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX]; 1457 int i, ret; 1458 unsigned int core_id; 1459 struct pp2_hif *hif; 1460 struct pp2_bpool *bpool; 1461 1462 core_id = rte_lcore_id(); 1463 if (core_id == LCORE_ID_ANY) 1464 core_id = 0; 1465 1466 hif = mrvl_get_hif(rxq->priv, core_id); 1467 if (!hif) 1468 return -1; 1469 1470 bpool = rxq->priv->bpool; 1471 1472 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num); 1473 if (ret) 1474 return ret; 1475 1476 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID) 1477 cookie_addr_high = 1478 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK; 1479 1480 for (i = 0; i < num; i++) { 1481 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK) 1482 != cookie_addr_high) { 1483 RTE_LOG(ERR, PMD, 1484 "mbuf virtual addr high 0x%lx out of range\n", 1485 (uint64_t)mbufs[i] >> 32); 1486 goto out; 1487 } 1488 1489 entries[i].buff.addr = 1490 rte_mbuf_data_iova_default(mbufs[i]); 1491 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i]; 1492 entries[i].bpool = bpool; 1493 } 1494 1495 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i); 1496 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i; 1497 1498 if (i != num) 1499 goto out; 1500 1501 return 0; 1502 out: 1503 for (; i < num; i++) 1504 rte_pktmbuf_free(mbufs[i]); 1505 1506 return -1; 1507 } 1508 1509 /** 1510 * DPDK callback to configure the receive queue. 1511 * 1512 * @param dev 1513 * Pointer to Ethernet device structure. 1514 * @param idx 1515 * RX queue index. 1516 * @param desc 1517 * Number of descriptors to configure in queue. 1518 * @param socket 1519 * NUMA socket on which memory must be allocated. 1520 * @param conf 1521 * Thresholds parameters. 1522 * @param mp 1523 * Memory pool for buffer allocations. 1524 * 1525 * @return 1526 * 0 on success, negative error value otherwise. 1527 */ 1528 static int 1529 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 1530 unsigned int socket, 1531 const struct rte_eth_rxconf *conf, 1532 struct rte_mempool *mp) 1533 { 1534 struct mrvl_priv *priv = dev->data->dev_private; 1535 struct mrvl_rxq *rxq; 1536 uint32_t min_size, 1537 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; 1538 int ret, tc, inq; 1539 uint64_t offloads; 1540 1541 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; 1542 1543 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) { 1544 /* 1545 * Unknown TC mapping, mapping will not have a correct queue. 1546 */ 1547 RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n", 1548 idx, priv->ppio_id); 1549 return -EFAULT; 1550 } 1551 1552 min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM - 1553 MRVL_PKT_EFFEC_OFFS; 1554 if (min_size < max_rx_pkt_len) { 1555 RTE_LOG(ERR, PMD, 1556 "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n", 1557 max_rx_pkt_len + RTE_PKTMBUF_HEADROOM + 1558 MRVL_PKT_EFFEC_OFFS, 1559 max_rx_pkt_len); 1560 return -EINVAL; 1561 } 1562 1563 if (dev->data->rx_queues[idx]) { 1564 rte_free(dev->data->rx_queues[idx]); 1565 dev->data->rx_queues[idx] = NULL; 1566 } 1567 1568 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket); 1569 if (!rxq) 1570 return -ENOMEM; 1571 1572 rxq->priv = priv; 1573 rxq->mp = mp; 1574 rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM; 1575 rxq->queue_id = idx; 1576 rxq->port_id = dev->data->port_id; 1577 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool; 1578 1579 tc = priv->rxq_map[rxq->queue_id].tc, 1580 inq = priv->rxq_map[rxq->queue_id].inq; 1581 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size = 1582 desc; 1583 1584 ret = mrvl_fill_bpool(rxq, desc); 1585 if (ret) { 1586 rte_free(rxq); 1587 return ret; 1588 } 1589 1590 priv->bpool_init_size += desc; 1591 1592 dev->data->rx_queues[idx] = rxq; 1593 1594 return 0; 1595 } 1596 1597 /** 1598 * DPDK callback to release the receive queue. 1599 * 1600 * @param rxq 1601 * Generic receive queue pointer. 1602 */ 1603 static void 1604 mrvl_rx_queue_release(void *rxq) 1605 { 1606 struct mrvl_rxq *q = rxq; 1607 struct pp2_ppio_tc_params *tc_params; 1608 int i, num, tc, inq; 1609 struct pp2_hif *hif; 1610 unsigned int core_id = rte_lcore_id(); 1611 1612 if (core_id == LCORE_ID_ANY) 1613 core_id = 0; 1614 1615 hif = mrvl_get_hif(q->priv, core_id); 1616 1617 if (!q || !hif) 1618 return; 1619 1620 tc = q->priv->rxq_map[q->queue_id].tc; 1621 inq = q->priv->rxq_map[q->queue_id].inq; 1622 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc]; 1623 num = tc_params->inqs_params[inq].size; 1624 for (i = 0; i < num; i++) { 1625 struct pp2_buff_inf inf; 1626 uint64_t addr; 1627 1628 pp2_bpool_get_buff(hif, q->priv->bpool, &inf); 1629 addr = cookie_addr_high | inf.cookie; 1630 rte_pktmbuf_free((struct rte_mbuf *)addr); 1631 } 1632 1633 rte_free(q); 1634 } 1635 1636 /** 1637 * DPDK callback to configure the transmit queue. 1638 * 1639 * @param dev 1640 * Pointer to Ethernet device structure. 1641 * @param idx 1642 * Transmit queue index. 1643 * @param desc 1644 * Number of descriptors to configure in the queue. 1645 * @param socket 1646 * NUMA socket on which memory must be allocated. 1647 * @param conf 1648 * Tx queue configuration parameters. 1649 * 1650 * @return 1651 * 0 on success, negative error value otherwise. 1652 */ 1653 static int 1654 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 1655 unsigned int socket, 1656 const struct rte_eth_txconf *conf) 1657 { 1658 struct mrvl_priv *priv = dev->data->dev_private; 1659 struct mrvl_txq *txq; 1660 1661 if (dev->data->tx_queues[idx]) { 1662 rte_free(dev->data->tx_queues[idx]); 1663 dev->data->tx_queues[idx] = NULL; 1664 } 1665 1666 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket); 1667 if (!txq) 1668 return -ENOMEM; 1669 1670 txq->priv = priv; 1671 txq->queue_id = idx; 1672 txq->port_id = dev->data->port_id; 1673 txq->tx_deferred_start = conf->tx_deferred_start; 1674 dev->data->tx_queues[idx] = txq; 1675 1676 priv->ppio_params.outqs_params.outqs_params[idx].size = desc; 1677 1678 return 0; 1679 } 1680 1681 /** 1682 * DPDK callback to release the transmit queue. 1683 * 1684 * @param txq 1685 * Generic transmit queue pointer. 1686 */ 1687 static void 1688 mrvl_tx_queue_release(void *txq) 1689 { 1690 struct mrvl_txq *q = txq; 1691 1692 if (!q) 1693 return; 1694 1695 rte_free(q); 1696 } 1697 1698 /** 1699 * DPDK callback to get flow control configuration. 1700 * 1701 * @param dev 1702 * Pointer to Ethernet device structure. 1703 * @param fc_conf 1704 * Pointer to the flow control configuration. 1705 * 1706 * @return 1707 * 0 on success, negative error value otherwise. 1708 */ 1709 static int 1710 mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1711 { 1712 struct mrvl_priv *priv = dev->data->dev_private; 1713 int ret, en; 1714 1715 if (!priv) 1716 return -EPERM; 1717 1718 ret = pp2_ppio_get_rx_pause(priv->ppio, &en); 1719 if (ret) { 1720 RTE_LOG(ERR, PMD, "Failed to read rx pause state\n"); 1721 return ret; 1722 } 1723 1724 fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE; 1725 1726 return 0; 1727 } 1728 1729 /** 1730 * DPDK callback to set flow control configuration. 1731 * 1732 * @param dev 1733 * Pointer to Ethernet device structure. 1734 * @param fc_conf 1735 * Pointer to the flow control configuration. 1736 * 1737 * @return 1738 * 0 on success, negative error value otherwise. 1739 */ 1740 static int 1741 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1742 { 1743 struct mrvl_priv *priv = dev->data->dev_private; 1744 1745 if (!priv) 1746 return -EPERM; 1747 1748 if (fc_conf->high_water || 1749 fc_conf->low_water || 1750 fc_conf->pause_time || 1751 fc_conf->mac_ctrl_frame_fwd || 1752 fc_conf->autoneg) { 1753 RTE_LOG(ERR, PMD, "Flowctrl parameter is not supported\n"); 1754 1755 return -EINVAL; 1756 } 1757 1758 if (fc_conf->mode == RTE_FC_NONE || 1759 fc_conf->mode == RTE_FC_RX_PAUSE) { 1760 int ret, en; 1761 1762 en = fc_conf->mode == RTE_FC_NONE ? 0 : 1; 1763 ret = pp2_ppio_set_rx_pause(priv->ppio, en); 1764 if (ret) 1765 RTE_LOG(ERR, PMD, 1766 "Failed to change flowctrl on RX side\n"); 1767 1768 return ret; 1769 } 1770 1771 return 0; 1772 } 1773 1774 /** 1775 * Update RSS hash configuration 1776 * 1777 * @param dev 1778 * Pointer to Ethernet device structure. 1779 * @param rss_conf 1780 * Pointer to RSS configuration. 1781 * 1782 * @return 1783 * 0 on success, negative error value otherwise. 1784 */ 1785 static int 1786 mrvl_rss_hash_update(struct rte_eth_dev *dev, 1787 struct rte_eth_rss_conf *rss_conf) 1788 { 1789 struct mrvl_priv *priv = dev->data->dev_private; 1790 1791 if (priv->isolated) 1792 return -ENOTSUP; 1793 1794 return mrvl_configure_rss(priv, rss_conf); 1795 } 1796 1797 /** 1798 * DPDK callback to get RSS hash configuration. 1799 * 1800 * @param dev 1801 * Pointer to Ethernet device structure. 1802 * @rss_conf 1803 * Pointer to RSS configuration. 1804 * 1805 * @return 1806 * Always 0. 1807 */ 1808 static int 1809 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev, 1810 struct rte_eth_rss_conf *rss_conf) 1811 { 1812 struct mrvl_priv *priv = dev->data->dev_private; 1813 enum pp2_ppio_hash_type hash_type = 1814 priv->ppio_params.inqs_params.hash_type; 1815 1816 rss_conf->rss_key = NULL; 1817 1818 if (hash_type == PP2_PPIO_HASH_T_NONE) 1819 rss_conf->rss_hf = 0; 1820 else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE) 1821 rss_conf->rss_hf = ETH_RSS_IPV4; 1822 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp) 1823 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP; 1824 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp) 1825 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP; 1826 1827 return 0; 1828 } 1829 1830 /** 1831 * DPDK callback to get rte_flow callbacks. 1832 * 1833 * @param dev 1834 * Pointer to the device structure. 1835 * @param filer_type 1836 * Flow filter type. 1837 * @param filter_op 1838 * Flow filter operation. 1839 * @param arg 1840 * Pointer to pass the flow ops. 1841 * 1842 * @return 1843 * 0 on success, negative error value otherwise. 1844 */ 1845 static int 1846 mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused, 1847 enum rte_filter_type filter_type, 1848 enum rte_filter_op filter_op, void *arg) 1849 { 1850 switch (filter_type) { 1851 case RTE_ETH_FILTER_GENERIC: 1852 if (filter_op != RTE_ETH_FILTER_GET) 1853 return -EINVAL; 1854 *(const void **)arg = &mrvl_flow_ops; 1855 return 0; 1856 default: 1857 RTE_LOG(WARNING, PMD, "Filter type (%d) not supported", 1858 filter_type); 1859 return -EINVAL; 1860 } 1861 } 1862 1863 static const struct eth_dev_ops mrvl_ops = { 1864 .dev_configure = mrvl_dev_configure, 1865 .dev_start = mrvl_dev_start, 1866 .dev_stop = mrvl_dev_stop, 1867 .dev_set_link_up = mrvl_dev_set_link_up, 1868 .dev_set_link_down = mrvl_dev_set_link_down, 1869 .dev_close = mrvl_dev_close, 1870 .link_update = mrvl_link_update, 1871 .promiscuous_enable = mrvl_promiscuous_enable, 1872 .allmulticast_enable = mrvl_allmulticast_enable, 1873 .promiscuous_disable = mrvl_promiscuous_disable, 1874 .allmulticast_disable = mrvl_allmulticast_disable, 1875 .mac_addr_remove = mrvl_mac_addr_remove, 1876 .mac_addr_add = mrvl_mac_addr_add, 1877 .mac_addr_set = mrvl_mac_addr_set, 1878 .mtu_set = mrvl_mtu_set, 1879 .stats_get = mrvl_stats_get, 1880 .stats_reset = mrvl_stats_reset, 1881 .xstats_get = mrvl_xstats_get, 1882 .xstats_reset = mrvl_xstats_reset, 1883 .xstats_get_names = mrvl_xstats_get_names, 1884 .dev_infos_get = mrvl_dev_infos_get, 1885 .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get, 1886 .rxq_info_get = mrvl_rxq_info_get, 1887 .txq_info_get = mrvl_txq_info_get, 1888 .vlan_filter_set = mrvl_vlan_filter_set, 1889 .tx_queue_start = mrvl_tx_queue_start, 1890 .tx_queue_stop = mrvl_tx_queue_stop, 1891 .rx_queue_setup = mrvl_rx_queue_setup, 1892 .rx_queue_release = mrvl_rx_queue_release, 1893 .tx_queue_setup = mrvl_tx_queue_setup, 1894 .tx_queue_release = mrvl_tx_queue_release, 1895 .flow_ctrl_get = mrvl_flow_ctrl_get, 1896 .flow_ctrl_set = mrvl_flow_ctrl_set, 1897 .rss_hash_update = mrvl_rss_hash_update, 1898 .rss_hash_conf_get = mrvl_rss_hash_conf_get, 1899 .filter_ctrl = mrvl_eth_filter_ctrl, 1900 }; 1901 1902 /** 1903 * Return packet type information and l3/l4 offsets. 1904 * 1905 * @param desc 1906 * Pointer to the received packet descriptor. 1907 * @param l3_offset 1908 * l3 packet offset. 1909 * @param l4_offset 1910 * l4 packet offset. 1911 * 1912 * @return 1913 * Packet type information. 1914 */ 1915 static inline uint64_t 1916 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, 1917 uint8_t *l3_offset, uint8_t *l4_offset) 1918 { 1919 enum pp2_inq_l3_type l3_type; 1920 enum pp2_inq_l4_type l4_type; 1921 uint64_t packet_type; 1922 1923 pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); 1924 pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); 1925 1926 packet_type = RTE_PTYPE_L2_ETHER; 1927 1928 switch (l3_type) { 1929 case PP2_INQ_L3_TYPE_IPV4_NO_OPTS: 1930 packet_type |= RTE_PTYPE_L3_IPV4; 1931 break; 1932 case PP2_INQ_L3_TYPE_IPV4_OK: 1933 packet_type |= RTE_PTYPE_L3_IPV4_EXT; 1934 break; 1935 case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO: 1936 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; 1937 break; 1938 case PP2_INQ_L3_TYPE_IPV6_NO_EXT: 1939 packet_type |= RTE_PTYPE_L3_IPV6; 1940 break; 1941 case PP2_INQ_L3_TYPE_IPV6_EXT: 1942 packet_type |= RTE_PTYPE_L3_IPV6_EXT; 1943 break; 1944 case PP2_INQ_L3_TYPE_ARP: 1945 packet_type |= RTE_PTYPE_L2_ETHER_ARP; 1946 /* 1947 * In case of ARP l4_offset is set to wrong value. 1948 * Set it to proper one so that later on mbuf->l3_len can be 1949 * calculated subtracting l4_offset and l3_offset. 1950 */ 1951 *l4_offset = *l3_offset + MRVL_ARP_LENGTH; 1952 break; 1953 default: 1954 RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n"); 1955 break; 1956 } 1957 1958 switch (l4_type) { 1959 case PP2_INQ_L4_TYPE_TCP: 1960 packet_type |= RTE_PTYPE_L4_TCP; 1961 break; 1962 case PP2_INQ_L4_TYPE_UDP: 1963 packet_type |= RTE_PTYPE_L4_UDP; 1964 break; 1965 default: 1966 RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n"); 1967 break; 1968 } 1969 1970 return packet_type; 1971 } 1972 1973 /** 1974 * Get offload information from the received packet descriptor. 1975 * 1976 * @param desc 1977 * Pointer to the received packet descriptor. 1978 * 1979 * @return 1980 * Mbuf offload flags. 1981 */ 1982 static inline uint64_t 1983 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc) 1984 { 1985 uint64_t flags; 1986 enum pp2_inq_desc_status status; 1987 1988 status = pp2_ppio_inq_desc_get_l3_pkt_error(desc); 1989 if (unlikely(status != PP2_DESC_ERR_OK)) 1990 flags = PKT_RX_IP_CKSUM_BAD; 1991 else 1992 flags = PKT_RX_IP_CKSUM_GOOD; 1993 1994 status = pp2_ppio_inq_desc_get_l4_pkt_error(desc); 1995 if (unlikely(status != PP2_DESC_ERR_OK)) 1996 flags |= PKT_RX_L4_CKSUM_BAD; 1997 else 1998 flags |= PKT_RX_L4_CKSUM_GOOD; 1999 2000 return flags; 2001 } 2002 2003 /** 2004 * DPDK callback for receive. 2005 * 2006 * @param rxq 2007 * Generic pointer to the receive queue. 2008 * @param rx_pkts 2009 * Array to store received packets. 2010 * @param nb_pkts 2011 * Maximum number of packets in array. 2012 * 2013 * @return 2014 * Number of packets successfully received. 2015 */ 2016 static uint16_t 2017 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 2018 { 2019 struct mrvl_rxq *q = rxq; 2020 struct pp2_ppio_desc descs[nb_pkts]; 2021 struct pp2_bpool *bpool; 2022 int i, ret, rx_done = 0; 2023 int num; 2024 struct pp2_hif *hif; 2025 unsigned int core_id = rte_lcore_id(); 2026 2027 hif = mrvl_get_hif(q->priv, core_id); 2028 2029 if (unlikely(!q->priv->ppio || !hif)) 2030 return 0; 2031 2032 bpool = q->priv->bpool; 2033 2034 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc, 2035 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts); 2036 if (unlikely(ret < 0)) { 2037 RTE_LOG(ERR, PMD, "Failed to receive packets\n"); 2038 return 0; 2039 } 2040 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts; 2041 2042 for (i = 0; i < nb_pkts; i++) { 2043 struct rte_mbuf *mbuf; 2044 uint8_t l3_offset, l4_offset; 2045 enum pp2_inq_desc_status status; 2046 uint64_t addr; 2047 2048 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { 2049 struct pp2_ppio_desc *pref_desc; 2050 u64 pref_addr; 2051 2052 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT]; 2053 pref_addr = cookie_addr_high | 2054 pp2_ppio_inq_desc_get_cookie(pref_desc); 2055 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr)); 2056 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr)); 2057 } 2058 2059 addr = cookie_addr_high | 2060 pp2_ppio_inq_desc_get_cookie(&descs[i]); 2061 mbuf = (struct rte_mbuf *)addr; 2062 rte_pktmbuf_reset(mbuf); 2063 2064 /* drop packet in case of mac, overrun or resource error */ 2065 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]); 2066 if (unlikely(status != PP2_DESC_ERR_OK)) { 2067 struct pp2_buff_inf binf = { 2068 .addr = rte_mbuf_data_iova_default(mbuf), 2069 .cookie = (pp2_cookie_t)(uint64_t)mbuf, 2070 }; 2071 2072 pp2_bpool_put_buff(hif, bpool, &binf); 2073 mrvl_port_bpool_size 2074 [bpool->pp2_id][bpool->id][core_id]++; 2075 q->drop_mac++; 2076 continue; 2077 } 2078 2079 mbuf->data_off += MRVL_PKT_EFFEC_OFFS; 2080 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]); 2081 mbuf->data_len = mbuf->pkt_len; 2082 mbuf->port = q->port_id; 2083 mbuf->packet_type = 2084 mrvl_desc_to_packet_type_and_offset(&descs[i], 2085 &l3_offset, 2086 &l4_offset); 2087 mbuf->l2_len = l3_offset; 2088 mbuf->l3_len = l4_offset - l3_offset; 2089 2090 if (likely(q->cksum_enabled)) 2091 mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]); 2092 2093 rx_pkts[rx_done++] = mbuf; 2094 q->bytes_recv += mbuf->pkt_len; 2095 } 2096 2097 if (rte_spinlock_trylock(&q->priv->lock) == 1) { 2098 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id); 2099 2100 if (unlikely(num <= q->priv->bpool_min_size || 2101 (!rx_done && num < q->priv->bpool_init_size))) { 2102 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE); 2103 if (ret) 2104 RTE_LOG(ERR, PMD, "Failed to fill bpool\n"); 2105 } else if (unlikely(num > q->priv->bpool_max_size)) { 2106 int i; 2107 int pkt_to_remove = num - q->priv->bpool_init_size; 2108 struct rte_mbuf *mbuf; 2109 struct pp2_buff_inf buff; 2110 2111 RTE_LOG(DEBUG, PMD, 2112 "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n", 2113 bpool->pp2_id, q->priv->ppio->port_id, 2114 bpool->id, pkt_to_remove, num, 2115 q->priv->bpool_init_size); 2116 2117 for (i = 0; i < pkt_to_remove; i++) { 2118 ret = pp2_bpool_get_buff(hif, bpool, &buff); 2119 if (ret) 2120 break; 2121 mbuf = (struct rte_mbuf *) 2122 (cookie_addr_high | buff.cookie); 2123 rte_pktmbuf_free(mbuf); 2124 } 2125 mrvl_port_bpool_size 2126 [bpool->pp2_id][bpool->id][core_id] -= i; 2127 } 2128 rte_spinlock_unlock(&q->priv->lock); 2129 } 2130 2131 return rx_done; 2132 } 2133 2134 /** 2135 * Prepare offload information. 2136 * 2137 * @param ol_flags 2138 * Offload flags. 2139 * @param packet_type 2140 * Packet type bitfield. 2141 * @param l3_type 2142 * Pointer to the pp2_ouq_l3_type structure. 2143 * @param l4_type 2144 * Pointer to the pp2_outq_l4_type structure. 2145 * @param gen_l3_cksum 2146 * Will be set to 1 in case l3 checksum is computed. 2147 * @param l4_cksum 2148 * Will be set to 1 in case l4 checksum is computed. 2149 * 2150 * @return 2151 * 0 on success, negative error value otherwise. 2152 */ 2153 static inline int 2154 mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type, 2155 enum pp2_outq_l3_type *l3_type, 2156 enum pp2_outq_l4_type *l4_type, 2157 int *gen_l3_cksum, 2158 int *gen_l4_cksum) 2159 { 2160 /* 2161 * Based on ol_flags prepare information 2162 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor 2163 * for offloading. 2164 */ 2165 if (ol_flags & PKT_TX_IPV4) { 2166 *l3_type = PP2_OUTQ_L3_TYPE_IPV4; 2167 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; 2168 } else if (ol_flags & PKT_TX_IPV6) { 2169 *l3_type = PP2_OUTQ_L3_TYPE_IPV6; 2170 /* no checksum for ipv6 header */ 2171 *gen_l3_cksum = 0; 2172 } else { 2173 /* if something different then stop processing */ 2174 return -1; 2175 } 2176 2177 ol_flags &= PKT_TX_L4_MASK; 2178 if ((packet_type & RTE_PTYPE_L4_TCP) && 2179 ol_flags == PKT_TX_TCP_CKSUM) { 2180 *l4_type = PP2_OUTQ_L4_TYPE_TCP; 2181 *gen_l4_cksum = 1; 2182 } else if ((packet_type & RTE_PTYPE_L4_UDP) && 2183 ol_flags == PKT_TX_UDP_CKSUM) { 2184 *l4_type = PP2_OUTQ_L4_TYPE_UDP; 2185 *gen_l4_cksum = 1; 2186 } else { 2187 *l4_type = PP2_OUTQ_L4_TYPE_OTHER; 2188 /* no checksum for other type */ 2189 *gen_l4_cksum = 0; 2190 } 2191 2192 return 0; 2193 } 2194 2195 /** 2196 * Release already sent buffers to bpool (buffer-pool). 2197 * 2198 * @param ppio 2199 * Pointer to the port structure. 2200 * @param hif 2201 * Pointer to the MUSDK hardware interface. 2202 * @param sq 2203 * Pointer to the shadow queue. 2204 * @param qid 2205 * Queue id number. 2206 * @param force 2207 * Force releasing packets. 2208 */ 2209 static inline void 2210 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif, 2211 unsigned int core_id, struct mrvl_shadow_txq *sq, 2212 int qid, int force) 2213 { 2214 struct buff_release_entry *entry; 2215 uint16_t nb_done = 0, num = 0, skip_bufs = 0; 2216 int i; 2217 2218 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done); 2219 2220 sq->num_to_release += nb_done; 2221 2222 if (likely(!force && 2223 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE)) 2224 return; 2225 2226 nb_done = sq->num_to_release; 2227 sq->num_to_release = 0; 2228 2229 for (i = 0; i < nb_done; i++) { 2230 entry = &sq->ent[sq->tail + num]; 2231 if (unlikely(!entry->buff.addr)) { 2232 RTE_LOG(ERR, PMD, 2233 "Shadow memory @%d: cookie(%lx), pa(%lx)!\n", 2234 sq->tail, (u64)entry->buff.cookie, 2235 (u64)entry->buff.addr); 2236 skip_bufs = 1; 2237 goto skip; 2238 } 2239 2240 if (unlikely(!entry->bpool)) { 2241 struct rte_mbuf *mbuf; 2242 2243 mbuf = (struct rte_mbuf *) 2244 (cookie_addr_high | entry->buff.cookie); 2245 rte_pktmbuf_free(mbuf); 2246 skip_bufs = 1; 2247 goto skip; 2248 } 2249 2250 mrvl_port_bpool_size 2251 [entry->bpool->pp2_id][entry->bpool->id][core_id]++; 2252 num++; 2253 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE)) 2254 goto skip; 2255 continue; 2256 skip: 2257 if (likely(num)) 2258 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); 2259 num += skip_bufs; 2260 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; 2261 sq->size -= num; 2262 num = 0; 2263 skip_bufs = 0; 2264 } 2265 2266 if (likely(num)) { 2267 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); 2268 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; 2269 sq->size -= num; 2270 } 2271 } 2272 2273 /** 2274 * DPDK callback for transmit. 2275 * 2276 * @param txq 2277 * Generic pointer transmit queue. 2278 * @param tx_pkts 2279 * Packets to transmit. 2280 * @param nb_pkts 2281 * Number of packets in array. 2282 * 2283 * @return 2284 * Number of packets successfully transmitted. 2285 */ 2286 static uint16_t 2287 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 2288 { 2289 struct mrvl_txq *q = txq; 2290 struct mrvl_shadow_txq *sq; 2291 struct pp2_hif *hif; 2292 struct pp2_ppio_desc descs[nb_pkts]; 2293 unsigned int core_id = rte_lcore_id(); 2294 int i, ret, bytes_sent = 0; 2295 uint16_t num, sq_free_size; 2296 uint64_t addr; 2297 2298 hif = mrvl_get_hif(q->priv, core_id); 2299 sq = &q->shadow_txqs[core_id]; 2300 2301 if (unlikely(!q->priv->ppio || !hif)) 2302 return 0; 2303 2304 if (sq->size) 2305 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, 2306 sq, q->queue_id, 0); 2307 2308 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; 2309 if (unlikely(nb_pkts > sq_free_size)) { 2310 RTE_LOG(DEBUG, PMD, 2311 "No room in shadow queue for %d packets! %d packets will be sent.\n", 2312 nb_pkts, sq_free_size); 2313 nb_pkts = sq_free_size; 2314 } 2315 2316 for (i = 0; i < nb_pkts; i++) { 2317 struct rte_mbuf *mbuf = tx_pkts[i]; 2318 int gen_l3_cksum, gen_l4_cksum; 2319 enum pp2_outq_l3_type l3_type; 2320 enum pp2_outq_l4_type l4_type; 2321 2322 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { 2323 struct rte_mbuf *pref_pkt_hdr; 2324 2325 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; 2326 rte_mbuf_prefetch_part1(pref_pkt_hdr); 2327 rte_mbuf_prefetch_part2(pref_pkt_hdr); 2328 } 2329 2330 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf; 2331 sq->ent[sq->head].buff.addr = 2332 rte_mbuf_data_iova_default(mbuf); 2333 sq->ent[sq->head].bpool = 2334 (unlikely(mbuf->port >= RTE_MAX_ETHPORTS || 2335 mbuf->refcnt > 1)) ? NULL : 2336 mrvl_port_to_bpool_lookup[mbuf->port]; 2337 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK; 2338 sq->size++; 2339 2340 pp2_ppio_outq_desc_reset(&descs[i]); 2341 pp2_ppio_outq_desc_set_phys_addr(&descs[i], 2342 rte_pktmbuf_iova(mbuf)); 2343 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0); 2344 pp2_ppio_outq_desc_set_pkt_len(&descs[i], 2345 rte_pktmbuf_pkt_len(mbuf)); 2346 2347 bytes_sent += rte_pktmbuf_pkt_len(mbuf); 2348 /* 2349 * in case unsupported ol_flags were passed 2350 * do not update descriptor offload information 2351 */ 2352 ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type, 2353 &l3_type, &l4_type, &gen_l3_cksum, 2354 &gen_l4_cksum); 2355 if (unlikely(ret)) 2356 continue; 2357 2358 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, 2359 mbuf->l2_len, 2360 mbuf->l2_len + mbuf->l3_len, 2361 gen_l3_cksum, gen_l4_cksum); 2362 } 2363 2364 num = nb_pkts; 2365 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts); 2366 /* number of packets that were not sent */ 2367 if (unlikely(num > nb_pkts)) { 2368 for (i = nb_pkts; i < num; i++) { 2369 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & 2370 MRVL_PP2_TX_SHADOWQ_MASK; 2371 addr = cookie_addr_high | sq->ent[sq->head].buff.cookie; 2372 bytes_sent -= 2373 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr); 2374 } 2375 sq->size -= num - nb_pkts; 2376 } 2377 2378 q->bytes_sent += bytes_sent; 2379 2380 return nb_pkts; 2381 } 2382 2383 /** 2384 * Initialize packet processor. 2385 * 2386 * @return 2387 * 0 on success, negative error value otherwise. 2388 */ 2389 static int 2390 mrvl_init_pp2(void) 2391 { 2392 struct pp2_init_params init_params; 2393 2394 memset(&init_params, 0, sizeof(init_params)); 2395 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED; 2396 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED; 2397 init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED; 2398 2399 return pp2_init(&init_params); 2400 } 2401 2402 /** 2403 * Deinitialize packet processor. 2404 * 2405 * @return 2406 * 0 on success, negative error value otherwise. 2407 */ 2408 static void 2409 mrvl_deinit_pp2(void) 2410 { 2411 pp2_deinit(); 2412 } 2413 2414 /** 2415 * Create private device structure. 2416 * 2417 * @param dev_name 2418 * Pointer to the port name passed in the initialization parameters. 2419 * 2420 * @return 2421 * Pointer to the newly allocated private device structure. 2422 */ 2423 static struct mrvl_priv * 2424 mrvl_priv_create(const char *dev_name) 2425 { 2426 struct pp2_bpool_params bpool_params; 2427 char match[MRVL_MATCH_LEN]; 2428 struct mrvl_priv *priv; 2429 int ret, bpool_bit; 2430 2431 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id()); 2432 if (!priv) 2433 return NULL; 2434 2435 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name, 2436 &priv->pp_id, &priv->ppio_id); 2437 if (ret) 2438 goto out_free_priv; 2439 2440 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id], 2441 PP2_BPOOL_NUM_POOLS); 2442 if (bpool_bit < 0) 2443 goto out_free_priv; 2444 priv->bpool_bit = bpool_bit; 2445 2446 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id, 2447 priv->bpool_bit); 2448 memset(&bpool_params, 0, sizeof(bpool_params)); 2449 bpool_params.match = match; 2450 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS; 2451 ret = pp2_bpool_init(&bpool_params, &priv->bpool); 2452 if (ret) 2453 goto out_clear_bpool_bit; 2454 2455 priv->ppio_params.type = PP2_PPIO_T_NIC; 2456 rte_spinlock_init(&priv->lock); 2457 2458 return priv; 2459 out_clear_bpool_bit: 2460 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); 2461 out_free_priv: 2462 rte_free(priv); 2463 return NULL; 2464 } 2465 2466 /** 2467 * Create device representing Ethernet port. 2468 * 2469 * @param name 2470 * Pointer to the port's name. 2471 * 2472 * @return 2473 * 0 on success, negative error value otherwise. 2474 */ 2475 static int 2476 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name) 2477 { 2478 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0); 2479 struct rte_eth_dev *eth_dev; 2480 struct mrvl_priv *priv; 2481 struct ifreq req; 2482 2483 eth_dev = rte_eth_dev_allocate(name); 2484 if (!eth_dev) 2485 return -ENOMEM; 2486 2487 priv = mrvl_priv_create(name); 2488 if (!priv) { 2489 ret = -ENOMEM; 2490 goto out_free_dev; 2491 } 2492 2493 eth_dev->data->mac_addrs = 2494 rte_zmalloc("mac_addrs", 2495 ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0); 2496 if (!eth_dev->data->mac_addrs) { 2497 RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n"); 2498 ret = -ENOMEM; 2499 goto out_free_priv; 2500 } 2501 2502 memset(&req, 0, sizeof(req)); 2503 strcpy(req.ifr_name, name); 2504 ret = ioctl(fd, SIOCGIFHWADDR, &req); 2505 if (ret) 2506 goto out_free_mac; 2507 2508 memcpy(eth_dev->data->mac_addrs[0].addr_bytes, 2509 req.ifr_addr.sa_data, ETHER_ADDR_LEN); 2510 2511 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst; 2512 eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst; 2513 eth_dev->data->kdrv = RTE_KDRV_NONE; 2514 eth_dev->data->dev_private = priv; 2515 eth_dev->device = &vdev->device; 2516 eth_dev->dev_ops = &mrvl_ops; 2517 2518 rte_eth_dev_probing_finish(eth_dev); 2519 return 0; 2520 out_free_mac: 2521 rte_free(eth_dev->data->mac_addrs); 2522 out_free_dev: 2523 rte_eth_dev_release_port(eth_dev); 2524 out_free_priv: 2525 rte_free(priv); 2526 2527 return ret; 2528 } 2529 2530 /** 2531 * Cleanup previously created device representing Ethernet port. 2532 * 2533 * @param name 2534 * Pointer to the port name. 2535 */ 2536 static void 2537 mrvl_eth_dev_destroy(const char *name) 2538 { 2539 struct rte_eth_dev *eth_dev; 2540 struct mrvl_priv *priv; 2541 2542 eth_dev = rte_eth_dev_allocated(name); 2543 if (!eth_dev) 2544 return; 2545 2546 priv = eth_dev->data->dev_private; 2547 pp2_bpool_deinit(priv->bpool); 2548 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); 2549 rte_free(priv); 2550 rte_free(eth_dev->data->mac_addrs); 2551 rte_eth_dev_release_port(eth_dev); 2552 } 2553 2554 /** 2555 * Callback used by rte_kvargs_process() during argument parsing. 2556 * 2557 * @param key 2558 * Pointer to the parsed key (unused). 2559 * @param value 2560 * Pointer to the parsed value. 2561 * @param extra_args 2562 * Pointer to the extra arguments which contains address of the 2563 * table of pointers to parsed interface names. 2564 * 2565 * @return 2566 * Always 0. 2567 */ 2568 static int 2569 mrvl_get_ifnames(const char *key __rte_unused, const char *value, 2570 void *extra_args) 2571 { 2572 struct mrvl_ifnames *ifnames = extra_args; 2573 2574 ifnames->names[ifnames->idx++] = value; 2575 2576 return 0; 2577 } 2578 2579 /** 2580 * Deinitialize per-lcore MUSDK hardware interfaces (hifs). 2581 */ 2582 static void 2583 mrvl_deinit_hifs(void) 2584 { 2585 int i; 2586 2587 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) { 2588 if (hifs[i]) 2589 pp2_hif_deinit(hifs[i]); 2590 } 2591 used_hifs = MRVL_MUSDK_HIFS_RESERVED; 2592 memset(hifs, 0, sizeof(hifs)); 2593 } 2594 2595 /** 2596 * DPDK callback to register the virtual device. 2597 * 2598 * @param vdev 2599 * Pointer to the virtual device. 2600 * 2601 * @return 2602 * 0 on success, negative error value otherwise. 2603 */ 2604 static int 2605 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev) 2606 { 2607 struct rte_kvargs *kvlist; 2608 struct mrvl_ifnames ifnames; 2609 int ret = -EINVAL; 2610 uint32_t i, ifnum, cfgnum; 2611 const char *params; 2612 2613 params = rte_vdev_device_args(vdev); 2614 if (!params) 2615 return -EINVAL; 2616 2617 kvlist = rte_kvargs_parse(params, valid_args); 2618 if (!kvlist) 2619 return -EINVAL; 2620 2621 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG); 2622 if (ifnum > RTE_DIM(ifnames.names)) 2623 goto out_free_kvlist; 2624 2625 ifnames.idx = 0; 2626 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG, 2627 mrvl_get_ifnames, &ifnames); 2628 2629 2630 /* 2631 * The below system initialization should be done only once, 2632 * on the first provided configuration file 2633 */ 2634 if (!mrvl_qos_cfg) { 2635 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG); 2636 RTE_LOG(INFO, PMD, "Parsing config file!\n"); 2637 if (cfgnum > 1) { 2638 RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n"); 2639 goto out_free_kvlist; 2640 } else if (cfgnum == 1) { 2641 rte_kvargs_process(kvlist, MRVL_CFG_ARG, 2642 mrvl_get_qoscfg, &mrvl_qos_cfg); 2643 } 2644 } 2645 2646 if (mrvl_dev_num) 2647 goto init_devices; 2648 2649 RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n"); 2650 /* 2651 * ret == -EEXIST is correct, it means DMA 2652 * has been already initialized (by another PMD). 2653 */ 2654 ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE); 2655 if (ret < 0) { 2656 if (ret != -EEXIST) 2657 goto out_free_kvlist; 2658 else 2659 RTE_LOG(INFO, PMD, 2660 "DMA memory has been already initialized by a different driver.\n"); 2661 } 2662 2663 ret = mrvl_init_pp2(); 2664 if (ret) { 2665 RTE_LOG(ERR, PMD, "Failed to init PP!\n"); 2666 goto out_deinit_dma; 2667 } 2668 2669 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size)); 2670 memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup)); 2671 2672 mrvl_lcore_first = RTE_MAX_LCORE; 2673 mrvl_lcore_last = 0; 2674 2675 init_devices: 2676 for (i = 0; i < ifnum; i++) { 2677 RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]); 2678 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]); 2679 if (ret) 2680 goto out_cleanup; 2681 } 2682 mrvl_dev_num += ifnum; 2683 2684 rte_kvargs_free(kvlist); 2685 2686 return 0; 2687 out_cleanup: 2688 for (; i > 0; i--) 2689 mrvl_eth_dev_destroy(ifnames.names[i]); 2690 2691 if (mrvl_dev_num == 0) 2692 mrvl_deinit_pp2(); 2693 out_deinit_dma: 2694 if (mrvl_dev_num == 0) 2695 mv_sys_dma_mem_destroy(); 2696 out_free_kvlist: 2697 rte_kvargs_free(kvlist); 2698 2699 return ret; 2700 } 2701 2702 /** 2703 * DPDK callback to remove virtual device. 2704 * 2705 * @param vdev 2706 * Pointer to the removed virtual device. 2707 * 2708 * @return 2709 * 0 on success, negative error value otherwise. 2710 */ 2711 static int 2712 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev) 2713 { 2714 int i; 2715 const char *name; 2716 2717 name = rte_vdev_device_name(vdev); 2718 if (!name) 2719 return -EINVAL; 2720 2721 RTE_LOG(INFO, PMD, "Removing %s\n", name); 2722 2723 RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */ 2724 char ifname[RTE_ETH_NAME_MAX_LEN]; 2725 2726 rte_eth_dev_get_name_by_port(i, ifname); 2727 mrvl_eth_dev_destroy(ifname); 2728 mrvl_dev_num--; 2729 } 2730 2731 if (mrvl_dev_num == 0) { 2732 RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n"); 2733 mrvl_deinit_hifs(); 2734 mrvl_deinit_pp2(); 2735 mv_sys_dma_mem_destroy(); 2736 } 2737 2738 return 0; 2739 } 2740 2741 static struct rte_vdev_driver pmd_mrvl_drv = { 2742 .probe = rte_pmd_mrvl_probe, 2743 .remove = rte_pmd_mrvl_remove, 2744 }; 2745 2746 RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv); 2747 RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2); 2748