1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Marvell International Ltd. 3 * Copyright(c) 2017 Semihalf. 4 * All rights reserved. 5 */ 6 7 #include <rte_ethdev_driver.h> 8 #include <rte_kvargs.h> 9 #include <rte_log.h> 10 #include <rte_malloc.h> 11 #include <rte_bus_vdev.h> 12 13 /* Unluckily, container_of is defined by both DPDK and MUSDK, 14 * we'll declare only one version. 15 * 16 * Note that it is not used in this PMD anyway. 17 */ 18 #ifdef container_of 19 #undef container_of 20 #endif 21 22 #include <fcntl.h> 23 #include <linux/ethtool.h> 24 #include <linux/sockios.h> 25 #include <net/if.h> 26 #include <net/if_arp.h> 27 #include <sys/ioctl.h> 28 #include <sys/socket.h> 29 #include <sys/stat.h> 30 #include <sys/types.h> 31 32 #include "mrvl_ethdev.h" 33 #include "mrvl_qos.h" 34 35 /* bitmask with reserved hifs */ 36 #define MRVL_MUSDK_HIFS_RESERVED 0x0F 37 /* bitmask with reserved bpools */ 38 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07 39 /* bitmask with reserved kernel RSS tables */ 40 #define MRVL_MUSDK_RSS_RESERVED 0x01 41 /* maximum number of available hifs */ 42 #define MRVL_MUSDK_HIFS_MAX 9 43 44 /* prefetch shift */ 45 #define MRVL_MUSDK_PREFETCH_SHIFT 2 46 47 /* TCAM has 25 entries reserved for uc/mc filter entries */ 48 #define MRVL_MAC_ADDRS_MAX 25 49 #define MRVL_MATCH_LEN 16 50 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE) 51 /* Maximum allowable packet size */ 52 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE) 53 54 #define MRVL_IFACE_NAME_ARG "iface" 55 #define MRVL_CFG_ARG "cfg" 56 57 #define MRVL_BURST_SIZE 64 58 59 #define MRVL_ARP_LENGTH 28 60 61 #define MRVL_COOKIE_ADDR_INVALID ~0ULL 62 63 #define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8) 64 #define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT) 65 66 /* Memory size (in bytes) for MUSDK dma buffers */ 67 #define MRVL_MUSDK_DMA_MEMSIZE 41943040 68 69 /** Port Rx offload capabilities */ 70 #define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \ 71 DEV_RX_OFFLOAD_JUMBO_FRAME | \ 72 DEV_RX_OFFLOAD_CRC_STRIP | \ 73 DEV_RX_OFFLOAD_CHECKSUM) 74 75 /** Port Tx offloads capabilities */ 76 #define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \ 77 DEV_TX_OFFLOAD_UDP_CKSUM | \ 78 DEV_TX_OFFLOAD_TCP_CKSUM) 79 80 static const char * const valid_args[] = { 81 MRVL_IFACE_NAME_ARG, 82 MRVL_CFG_ARG, 83 NULL 84 }; 85 86 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED; 87 static struct pp2_hif *hifs[RTE_MAX_LCORE]; 88 static int used_bpools[PP2_NUM_PKT_PROC] = { 89 MRVL_MUSDK_BPOOLS_RESERVED, 90 MRVL_MUSDK_BPOOLS_RESERVED 91 }; 92 93 struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS]; 94 int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE]; 95 uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID; 96 97 struct mrvl_ifnames { 98 const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC]; 99 int idx; 100 }; 101 102 /* 103 * To use buffer harvesting based on loopback port shadow queue structure 104 * was introduced for buffers information bookkeeping. 105 * 106 * Before sending the packet, related buffer information (pp2_buff_inf) is 107 * stored in shadow queue. After packet is transmitted no longer used 108 * packet buffer is released back to it's original hardware pool, 109 * on condition it originated from interface. 110 * In case it was generated by application itself i.e: mbuf->port field is 111 * 0xff then its released to software mempool. 112 */ 113 struct mrvl_shadow_txq { 114 int head; /* write index - used when sending buffers */ 115 int tail; /* read index - used when releasing buffers */ 116 u16 size; /* queue occupied size */ 117 u16 num_to_release; /* number of buffers sent, that can be released */ 118 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */ 119 }; 120 121 struct mrvl_rxq { 122 struct mrvl_priv *priv; 123 struct rte_mempool *mp; 124 int queue_id; 125 int port_id; 126 int cksum_enabled; 127 uint64_t bytes_recv; 128 uint64_t drop_mac; 129 }; 130 131 struct mrvl_txq { 132 struct mrvl_priv *priv; 133 int queue_id; 134 int port_id; 135 uint64_t bytes_sent; 136 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE]; 137 int tx_deferred_start; 138 }; 139 140 static int mrvl_lcore_first; 141 static int mrvl_lcore_last; 142 static int mrvl_dev_num; 143 144 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num); 145 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio, 146 struct pp2_hif *hif, unsigned int core_id, 147 struct mrvl_shadow_txq *sq, int qid, int force); 148 149 #define MRVL_XSTATS_TBL_ENTRY(name) { \ 150 #name, offsetof(struct pp2_ppio_statistics, name), \ 151 sizeof(((struct pp2_ppio_statistics *)0)->name) \ 152 } 153 154 /* Table with xstats data */ 155 static struct { 156 const char *name; 157 unsigned int offset; 158 unsigned int size; 159 } mrvl_xstats_tbl[] = { 160 MRVL_XSTATS_TBL_ENTRY(rx_bytes), 161 MRVL_XSTATS_TBL_ENTRY(rx_packets), 162 MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets), 163 MRVL_XSTATS_TBL_ENTRY(rx_errors), 164 MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped), 165 MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped), 166 MRVL_XSTATS_TBL_ENTRY(rx_early_dropped), 167 MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped), 168 MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped), 169 MRVL_XSTATS_TBL_ENTRY(tx_bytes), 170 MRVL_XSTATS_TBL_ENTRY(tx_packets), 171 MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets), 172 MRVL_XSTATS_TBL_ENTRY(tx_errors) 173 }; 174 175 static inline int 176 mrvl_get_bpool_size(int pp2_id, int pool_id) 177 { 178 int i; 179 int size = 0; 180 181 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) 182 size += mrvl_port_bpool_size[pp2_id][pool_id][i]; 183 184 return size; 185 } 186 187 static inline int 188 mrvl_reserve_bit(int *bitmap, int max) 189 { 190 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap); 191 192 if (n >= max) 193 return -1; 194 195 *bitmap |= 1 << n; 196 197 return n; 198 } 199 200 static int 201 mrvl_init_hif(int core_id) 202 { 203 struct pp2_hif_params params; 204 char match[MRVL_MATCH_LEN]; 205 int ret; 206 207 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX); 208 if (ret < 0) { 209 RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id); 210 return ret; 211 } 212 213 snprintf(match, sizeof(match), "hif-%d", ret); 214 memset(¶ms, 0, sizeof(params)); 215 params.match = match; 216 params.out_size = MRVL_PP2_AGGR_TXQD_MAX; 217 ret = pp2_hif_init(¶ms, &hifs[core_id]); 218 if (ret) { 219 RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id); 220 return ret; 221 } 222 223 return 0; 224 } 225 226 static inline struct pp2_hif* 227 mrvl_get_hif(struct mrvl_priv *priv, int core_id) 228 { 229 int ret; 230 231 if (likely(hifs[core_id] != NULL)) 232 return hifs[core_id]; 233 234 rte_spinlock_lock(&priv->lock); 235 236 ret = mrvl_init_hif(core_id); 237 if (ret < 0) { 238 RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id); 239 goto out; 240 } 241 242 if (core_id < mrvl_lcore_first) 243 mrvl_lcore_first = core_id; 244 245 if (core_id > mrvl_lcore_last) 246 mrvl_lcore_last = core_id; 247 out: 248 rte_spinlock_unlock(&priv->lock); 249 250 return hifs[core_id]; 251 } 252 253 /** 254 * Configure rss based on dpdk rss configuration. 255 * 256 * @param priv 257 * Pointer to private structure. 258 * @param rss_conf 259 * Pointer to RSS configuration. 260 * 261 * @return 262 * 0 on success, negative error value otherwise. 263 */ 264 static int 265 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf) 266 { 267 if (rss_conf->rss_key) 268 RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n"); 269 270 if (rss_conf->rss_hf == 0) { 271 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; 272 } else if (rss_conf->rss_hf & ETH_RSS_IPV4) { 273 priv->ppio_params.inqs_params.hash_type = 274 PP2_PPIO_HASH_T_2_TUPLE; 275 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { 276 priv->ppio_params.inqs_params.hash_type = 277 PP2_PPIO_HASH_T_5_TUPLE; 278 priv->rss_hf_tcp = 1; 279 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { 280 priv->ppio_params.inqs_params.hash_type = 281 PP2_PPIO_HASH_T_5_TUPLE; 282 priv->rss_hf_tcp = 0; 283 } else { 284 return -EINVAL; 285 } 286 287 return 0; 288 } 289 290 /** 291 * Ethernet device configuration. 292 * 293 * Prepare the driver for a given number of TX and RX queues and 294 * configure RSS. 295 * 296 * @param dev 297 * Pointer to Ethernet device structure. 298 * 299 * @return 300 * 0 on success, negative error value otherwise. 301 */ 302 static int 303 mrvl_dev_configure(struct rte_eth_dev *dev) 304 { 305 struct mrvl_priv *priv = dev->data->dev_private; 306 int ret; 307 308 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE && 309 dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { 310 RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n", 311 dev->data->dev_conf.rxmode.mq_mode); 312 return -EINVAL; 313 } 314 315 if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { 316 RTE_LOG(INFO, PMD, 317 "L2 CRC stripping is always enabled in hw\n"); 318 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; 319 } 320 321 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 322 RTE_LOG(INFO, PMD, "VLAN stripping not supported\n"); 323 return -EINVAL; 324 } 325 326 if (dev->data->dev_conf.rxmode.split_hdr_size) { 327 RTE_LOG(INFO, PMD, "Split headers not supported\n"); 328 return -EINVAL; 329 } 330 331 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { 332 RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n"); 333 return -EINVAL; 334 } 335 336 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { 337 RTE_LOG(INFO, PMD, "LRO not supported\n"); 338 return -EINVAL; 339 } 340 341 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 342 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - 343 ETHER_HDR_LEN - ETHER_CRC_LEN; 344 345 ret = mrvl_configure_rxqs(priv, dev->data->port_id, 346 dev->data->nb_rx_queues); 347 if (ret < 0) 348 return ret; 349 350 ret = mrvl_configure_txqs(priv, dev->data->port_id, 351 dev->data->nb_tx_queues); 352 if (ret < 0) 353 return ret; 354 355 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues; 356 priv->ppio_params.maintain_stats = 1; 357 priv->nb_rx_queues = dev->data->nb_rx_queues; 358 359 if (dev->data->nb_rx_queues == 1 && 360 dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 361 RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n"); 362 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; 363 364 return 0; 365 } 366 367 return mrvl_configure_rss(priv, 368 &dev->data->dev_conf.rx_adv_conf.rss_conf); 369 } 370 371 /** 372 * DPDK callback to change the MTU. 373 * 374 * Setting the MTU affects hardware MRU (packets larger than the MRU 375 * will be dropped). 376 * 377 * @param dev 378 * Pointer to Ethernet device structure. 379 * @param mtu 380 * New MTU. 381 * 382 * @return 383 * 0 on success, negative error value otherwise. 384 */ 385 static int 386 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 387 { 388 struct mrvl_priv *priv = dev->data->dev_private; 389 /* extra MV_MH_SIZE bytes are required for Marvell tag */ 390 uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN; 391 int ret; 392 393 if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) 394 return -EINVAL; 395 396 if (!priv->ppio) 397 return 0; 398 399 ret = pp2_ppio_set_mru(priv->ppio, mru); 400 if (ret) 401 return ret; 402 403 return pp2_ppio_set_mtu(priv->ppio, mtu); 404 } 405 406 /** 407 * DPDK callback to bring the link up. 408 * 409 * @param dev 410 * Pointer to Ethernet device structure. 411 * 412 * @return 413 * 0 on success, negative error value otherwise. 414 */ 415 static int 416 mrvl_dev_set_link_up(struct rte_eth_dev *dev) 417 { 418 struct mrvl_priv *priv = dev->data->dev_private; 419 int ret; 420 421 if (!priv->ppio) 422 return -EPERM; 423 424 ret = pp2_ppio_enable(priv->ppio); 425 if (ret) 426 return ret; 427 428 /* 429 * mtu/mru can be updated if pp2_ppio_enable() was called at least once 430 * as pp2_ppio_enable() changes port->t_mode from default 0 to 431 * PP2_TRAFFIC_INGRESS_EGRESS. 432 * 433 * Set mtu to default DPDK value here. 434 */ 435 ret = mrvl_mtu_set(dev, dev->data->mtu); 436 if (ret) 437 pp2_ppio_disable(priv->ppio); 438 439 return ret; 440 } 441 442 /** 443 * DPDK callback to bring the link down. 444 * 445 * @param dev 446 * Pointer to Ethernet device structure. 447 * 448 * @return 449 * 0 on success, negative error value otherwise. 450 */ 451 static int 452 mrvl_dev_set_link_down(struct rte_eth_dev *dev) 453 { 454 struct mrvl_priv *priv = dev->data->dev_private; 455 456 if (!priv->ppio) 457 return -EPERM; 458 459 return pp2_ppio_disable(priv->ppio); 460 } 461 462 /** 463 * DPDK callback to start tx queue. 464 * 465 * @param dev 466 * Pointer to Ethernet device structure. 467 * @param queue_id 468 * Transmit queue index. 469 * 470 * @return 471 * 0 on success, negative error value otherwise. 472 */ 473 static int 474 mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id) 475 { 476 struct mrvl_priv *priv = dev->data->dev_private; 477 int ret; 478 479 if (!priv) 480 return -EPERM; 481 482 /* passing 1 enables given tx queue */ 483 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1); 484 if (ret) { 485 RTE_LOG(ERR, PMD, "Failed to start txq %d\n", queue_id); 486 return ret; 487 } 488 489 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 490 491 return 0; 492 } 493 494 /** 495 * DPDK callback to stop tx queue. 496 * 497 * @param dev 498 * Pointer to Ethernet device structure. 499 * @param queue_id 500 * Transmit queue index. 501 * 502 * @return 503 * 0 on success, negative error value otherwise. 504 */ 505 static int 506 mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id) 507 { 508 struct mrvl_priv *priv = dev->data->dev_private; 509 int ret; 510 511 if (!priv->ppio) 512 return -EPERM; 513 514 /* passing 0 disables given tx queue */ 515 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0); 516 if (ret) { 517 RTE_LOG(ERR, PMD, "Failed to stop txq %d\n", queue_id); 518 return ret; 519 } 520 521 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 522 523 return 0; 524 } 525 526 /** 527 * DPDK callback to start the device. 528 * 529 * @param dev 530 * Pointer to Ethernet device structure. 531 * 532 * @return 533 * 0 on success, negative errno value on failure. 534 */ 535 static int 536 mrvl_dev_start(struct rte_eth_dev *dev) 537 { 538 struct mrvl_priv *priv = dev->data->dev_private; 539 char match[MRVL_MATCH_LEN]; 540 int ret = 0, i, def_init_size; 541 542 snprintf(match, sizeof(match), "ppio-%d:%d", 543 priv->pp_id, priv->ppio_id); 544 priv->ppio_params.match = match; 545 546 /* 547 * Calculate the minimum bpool size for refill feature as follows: 548 * 2 default burst sizes multiply by number of rx queues. 549 * If the bpool size will be below this value, new buffers will 550 * be added to the pool. 551 */ 552 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2; 553 554 /* In case initial bpool size configured in queues setup is 555 * smaller than minimum size add more buffers 556 */ 557 def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2; 558 if (priv->bpool_init_size < def_init_size) { 559 int buffs_to_add = def_init_size - priv->bpool_init_size; 560 561 priv->bpool_init_size += buffs_to_add; 562 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add); 563 if (ret) 564 RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n"); 565 } 566 567 /* 568 * Calculate the maximum bpool size for refill feature as follows: 569 * maximum number of descriptors in rx queue multiply by number 570 * of rx queues plus minimum bpool size. 571 * In case the bpool size will exceed this value, superfluous buffers 572 * will be removed 573 */ 574 priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) + 575 priv->bpool_min_size; 576 577 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio); 578 if (ret) { 579 RTE_LOG(ERR, PMD, "Failed to init ppio\n"); 580 return ret; 581 } 582 583 /* 584 * In case there are some some stale uc/mc mac addresses flush them 585 * here. It cannot be done during mrvl_dev_close() as port information 586 * is already gone at that point (due to pp2_ppio_deinit() in 587 * mrvl_dev_stop()). 588 */ 589 if (!priv->uc_mc_flushed) { 590 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1); 591 if (ret) { 592 RTE_LOG(ERR, PMD, 593 "Failed to flush uc/mc filter list\n"); 594 goto out; 595 } 596 priv->uc_mc_flushed = 1; 597 } 598 599 if (!priv->vlan_flushed) { 600 ret = pp2_ppio_flush_vlan(priv->ppio); 601 if (ret) { 602 RTE_LOG(ERR, PMD, "Failed to flush vlan list\n"); 603 /* 604 * TODO 605 * once pp2_ppio_flush_vlan() is supported jump to out 606 * goto out; 607 */ 608 } 609 priv->vlan_flushed = 1; 610 } 611 612 /* For default QoS config, don't start classifier. */ 613 if (mrvl_qos_cfg) { 614 ret = mrvl_start_qos_mapping(priv); 615 if (ret) { 616 RTE_LOG(ERR, PMD, "Failed to setup QoS mapping\n"); 617 goto out; 618 } 619 } 620 621 ret = mrvl_dev_set_link_up(dev); 622 if (ret) { 623 RTE_LOG(ERR, PMD, "Failed to set link up\n"); 624 goto out; 625 } 626 627 /* start tx queues */ 628 for (i = 0; i < dev->data->nb_tx_queues; i++) { 629 struct mrvl_txq *txq = dev->data->tx_queues[i]; 630 631 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 632 633 if (!txq->tx_deferred_start) 634 continue; 635 636 /* 637 * All txqs are started by default. Stop them 638 * so that tx_deferred_start works as expected. 639 */ 640 ret = mrvl_tx_queue_stop(dev, i); 641 if (ret) 642 goto out; 643 } 644 645 return 0; 646 out: 647 RTE_LOG(ERR, PMD, "Failed to start device\n"); 648 pp2_ppio_deinit(priv->ppio); 649 return ret; 650 } 651 652 /** 653 * Flush receive queues. 654 * 655 * @param dev 656 * Pointer to Ethernet device structure. 657 */ 658 static void 659 mrvl_flush_rx_queues(struct rte_eth_dev *dev) 660 { 661 int i; 662 663 RTE_LOG(INFO, PMD, "Flushing rx queues\n"); 664 for (i = 0; i < dev->data->nb_rx_queues; i++) { 665 int ret, num; 666 667 do { 668 struct mrvl_rxq *q = dev->data->rx_queues[i]; 669 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX]; 670 671 num = MRVL_PP2_RXD_MAX; 672 ret = pp2_ppio_recv(q->priv->ppio, 673 q->priv->rxq_map[q->queue_id].tc, 674 q->priv->rxq_map[q->queue_id].inq, 675 descs, (uint16_t *)&num); 676 } while (ret == 0 && num); 677 } 678 } 679 680 /** 681 * Flush transmit shadow queues. 682 * 683 * @param dev 684 * Pointer to Ethernet device structure. 685 */ 686 static void 687 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev) 688 { 689 int i, j; 690 struct mrvl_txq *txq; 691 692 RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n"); 693 for (i = 0; i < dev->data->nb_tx_queues; i++) { 694 txq = (struct mrvl_txq *)dev->data->tx_queues[i]; 695 696 for (j = 0; j < RTE_MAX_LCORE; j++) { 697 struct mrvl_shadow_txq *sq; 698 699 if (!hifs[j]) 700 continue; 701 702 sq = &txq->shadow_txqs[j]; 703 mrvl_free_sent_buffers(txq->priv->ppio, 704 hifs[j], j, sq, txq->queue_id, 1); 705 while (sq->tail != sq->head) { 706 uint64_t addr = cookie_addr_high | 707 sq->ent[sq->tail].buff.cookie; 708 rte_pktmbuf_free( 709 (struct rte_mbuf *)addr); 710 sq->tail = (sq->tail + 1) & 711 MRVL_PP2_TX_SHADOWQ_MASK; 712 } 713 memset(sq, 0, sizeof(*sq)); 714 } 715 } 716 } 717 718 /** 719 * Flush hardware bpool (buffer-pool). 720 * 721 * @param dev 722 * Pointer to Ethernet device structure. 723 */ 724 static void 725 mrvl_flush_bpool(struct rte_eth_dev *dev) 726 { 727 struct mrvl_priv *priv = dev->data->dev_private; 728 struct pp2_hif *hif; 729 uint32_t num; 730 int ret; 731 unsigned int core_id = rte_lcore_id(); 732 733 if (core_id == LCORE_ID_ANY) 734 core_id = 0; 735 736 hif = mrvl_get_hif(priv, core_id); 737 738 ret = pp2_bpool_get_num_buffs(priv->bpool, &num); 739 if (ret) { 740 RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n"); 741 return; 742 } 743 744 while (num--) { 745 struct pp2_buff_inf inf; 746 uint64_t addr; 747 748 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf); 749 if (ret) 750 break; 751 752 addr = cookie_addr_high | inf.cookie; 753 rte_pktmbuf_free((struct rte_mbuf *)addr); 754 } 755 } 756 757 /** 758 * DPDK callback to stop the device. 759 * 760 * @param dev 761 * Pointer to Ethernet device structure. 762 */ 763 static void 764 mrvl_dev_stop(struct rte_eth_dev *dev) 765 { 766 struct mrvl_priv *priv = dev->data->dev_private; 767 768 mrvl_dev_set_link_down(dev); 769 mrvl_flush_rx_queues(dev); 770 mrvl_flush_tx_shadow_queues(dev); 771 if (priv->cls_tbl) { 772 pp2_cls_tbl_deinit(priv->cls_tbl); 773 priv->cls_tbl = NULL; 774 } 775 if (priv->qos_tbl) { 776 pp2_cls_qos_tbl_deinit(priv->qos_tbl); 777 priv->qos_tbl = NULL; 778 } 779 if (priv->ppio) 780 pp2_ppio_deinit(priv->ppio); 781 priv->ppio = NULL; 782 783 /* policer must be released after ppio deinitialization */ 784 if (priv->policer) { 785 pp2_cls_plcr_deinit(priv->policer); 786 priv->policer = NULL; 787 } 788 } 789 790 /** 791 * DPDK callback to close the device. 792 * 793 * @param dev 794 * Pointer to Ethernet device structure. 795 */ 796 static void 797 mrvl_dev_close(struct rte_eth_dev *dev) 798 { 799 struct mrvl_priv *priv = dev->data->dev_private; 800 size_t i; 801 802 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) { 803 struct pp2_ppio_tc_params *tc_params = 804 &priv->ppio_params.inqs_params.tcs_params[i]; 805 806 if (tc_params->inqs_params) { 807 rte_free(tc_params->inqs_params); 808 tc_params->inqs_params = NULL; 809 } 810 } 811 812 mrvl_flush_bpool(dev); 813 } 814 815 /** 816 * DPDK callback to retrieve physical link information. 817 * 818 * @param dev 819 * Pointer to Ethernet device structure. 820 * @param wait_to_complete 821 * Wait for request completion (ignored). 822 * 823 * @return 824 * 0 on success, negative error value otherwise. 825 */ 826 static int 827 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 828 { 829 /* 830 * TODO 831 * once MUSDK provides necessary API use it here 832 */ 833 struct mrvl_priv *priv = dev->data->dev_private; 834 struct ethtool_cmd edata; 835 struct ifreq req; 836 int ret, fd, link_up; 837 838 if (!priv->ppio) 839 return -EPERM; 840 841 edata.cmd = ETHTOOL_GSET; 842 843 strcpy(req.ifr_name, dev->data->name); 844 req.ifr_data = (void *)&edata; 845 846 fd = socket(AF_INET, SOCK_DGRAM, 0); 847 if (fd == -1) 848 return -EFAULT; 849 850 ret = ioctl(fd, SIOCETHTOOL, &req); 851 if (ret == -1) { 852 close(fd); 853 return -EFAULT; 854 } 855 856 close(fd); 857 858 switch (ethtool_cmd_speed(&edata)) { 859 case SPEED_10: 860 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M; 861 break; 862 case SPEED_100: 863 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M; 864 break; 865 case SPEED_1000: 866 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G; 867 break; 868 case SPEED_10000: 869 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; 870 break; 871 default: 872 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; 873 } 874 875 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX : 876 ETH_LINK_HALF_DUPLEX; 877 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG : 878 ETH_LINK_FIXED; 879 pp2_ppio_get_link_state(priv->ppio, &link_up); 880 dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 881 882 return 0; 883 } 884 885 /** 886 * DPDK callback to enable promiscuous mode. 887 * 888 * @param dev 889 * Pointer to Ethernet device structure. 890 */ 891 static void 892 mrvl_promiscuous_enable(struct rte_eth_dev *dev) 893 { 894 struct mrvl_priv *priv = dev->data->dev_private; 895 int ret; 896 897 if (!priv->ppio) 898 return; 899 900 if (priv->isolated) 901 return; 902 903 ret = pp2_ppio_set_promisc(priv->ppio, 1); 904 if (ret) 905 RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n"); 906 } 907 908 /** 909 * DPDK callback to enable allmulti mode. 910 * 911 * @param dev 912 * Pointer to Ethernet device structure. 913 */ 914 static void 915 mrvl_allmulticast_enable(struct rte_eth_dev *dev) 916 { 917 struct mrvl_priv *priv = dev->data->dev_private; 918 int ret; 919 920 if (!priv->ppio) 921 return; 922 923 if (priv->isolated) 924 return; 925 926 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1); 927 if (ret) 928 RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n"); 929 } 930 931 /** 932 * DPDK callback to disable promiscuous mode. 933 * 934 * @param dev 935 * Pointer to Ethernet device structure. 936 */ 937 static void 938 mrvl_promiscuous_disable(struct rte_eth_dev *dev) 939 { 940 struct mrvl_priv *priv = dev->data->dev_private; 941 int ret; 942 943 if (!priv->ppio) 944 return; 945 946 ret = pp2_ppio_set_promisc(priv->ppio, 0); 947 if (ret) 948 RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n"); 949 } 950 951 /** 952 * DPDK callback to disable allmulticast mode. 953 * 954 * @param dev 955 * Pointer to Ethernet device structure. 956 */ 957 static void 958 mrvl_allmulticast_disable(struct rte_eth_dev *dev) 959 { 960 struct mrvl_priv *priv = dev->data->dev_private; 961 int ret; 962 963 if (!priv->ppio) 964 return; 965 966 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0); 967 if (ret) 968 RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n"); 969 } 970 971 /** 972 * DPDK callback to remove a MAC address. 973 * 974 * @param dev 975 * Pointer to Ethernet device structure. 976 * @param index 977 * MAC address index. 978 */ 979 static void 980 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 981 { 982 struct mrvl_priv *priv = dev->data->dev_private; 983 char buf[ETHER_ADDR_FMT_SIZE]; 984 int ret; 985 986 if (!priv->ppio) 987 return; 988 989 if (priv->isolated) 990 return; 991 992 ret = pp2_ppio_remove_mac_addr(priv->ppio, 993 dev->data->mac_addrs[index].addr_bytes); 994 if (ret) { 995 ether_format_addr(buf, sizeof(buf), 996 &dev->data->mac_addrs[index]); 997 RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf); 998 } 999 } 1000 1001 /** 1002 * DPDK callback to add a MAC address. 1003 * 1004 * @param dev 1005 * Pointer to Ethernet device structure. 1006 * @param mac_addr 1007 * MAC address to register. 1008 * @param index 1009 * MAC address index. 1010 * @param vmdq 1011 * VMDq pool index to associate address with (unused). 1012 * 1013 * @return 1014 * 0 on success, negative error value otherwise. 1015 */ 1016 static int 1017 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 1018 uint32_t index, uint32_t vmdq __rte_unused) 1019 { 1020 struct mrvl_priv *priv = dev->data->dev_private; 1021 char buf[ETHER_ADDR_FMT_SIZE]; 1022 int ret; 1023 1024 if (priv->isolated) 1025 return -ENOTSUP; 1026 1027 if (index == 0) 1028 /* For setting index 0, mrvl_mac_addr_set() should be used.*/ 1029 return -1; 1030 1031 if (!priv->ppio) 1032 return 0; 1033 1034 /* 1035 * Maximum number of uc addresses can be tuned via kernel module mvpp2x 1036 * parameter uc_filter_max. Maximum number of mc addresses is then 1037 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and 1038 * 21 respectively. 1039 * 1040 * If more than uc_filter_max uc addresses were added to filter list 1041 * then NIC will switch to promiscuous mode automatically. 1042 * 1043 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses 1044 * were added to filter list then NIC will switch to all-multicast mode 1045 * automatically. 1046 */ 1047 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes); 1048 if (ret) { 1049 ether_format_addr(buf, sizeof(buf), mac_addr); 1050 RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf); 1051 return -1; 1052 } 1053 1054 return 0; 1055 } 1056 1057 /** 1058 * DPDK callback to set the primary MAC address. 1059 * 1060 * @param dev 1061 * Pointer to Ethernet device structure. 1062 * @param mac_addr 1063 * MAC address to register. 1064 * 1065 * @return 1066 * 0 on success, negative error value otherwise. 1067 */ 1068 static int 1069 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 1070 { 1071 struct mrvl_priv *priv = dev->data->dev_private; 1072 int ret; 1073 1074 if (!priv->ppio) 1075 return 0; 1076 1077 if (priv->isolated) 1078 return -ENOTSUP; 1079 1080 ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes); 1081 if (ret) { 1082 char buf[ETHER_ADDR_FMT_SIZE]; 1083 ether_format_addr(buf, sizeof(buf), mac_addr); 1084 RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf); 1085 } 1086 1087 return ret; 1088 } 1089 1090 /** 1091 * DPDK callback to get device statistics. 1092 * 1093 * @param dev 1094 * Pointer to Ethernet device structure. 1095 * @param stats 1096 * Stats structure output buffer. 1097 * 1098 * @return 1099 * 0 on success, negative error value otherwise. 1100 */ 1101 static int 1102 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1103 { 1104 struct mrvl_priv *priv = dev->data->dev_private; 1105 struct pp2_ppio_statistics ppio_stats; 1106 uint64_t drop_mac = 0; 1107 unsigned int i, idx, ret; 1108 1109 if (!priv->ppio) 1110 return -EPERM; 1111 1112 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1113 struct mrvl_rxq *rxq = dev->data->rx_queues[i]; 1114 struct pp2_ppio_inq_statistics rx_stats; 1115 1116 if (!rxq) 1117 continue; 1118 1119 idx = rxq->queue_id; 1120 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { 1121 RTE_LOG(ERR, PMD, 1122 "rx queue %d stats out of range (0 - %d)\n", 1123 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1124 continue; 1125 } 1126 1127 ret = pp2_ppio_inq_get_statistics(priv->ppio, 1128 priv->rxq_map[idx].tc, 1129 priv->rxq_map[idx].inq, 1130 &rx_stats, 0); 1131 if (unlikely(ret)) { 1132 RTE_LOG(ERR, PMD, 1133 "Failed to update rx queue %d stats\n", idx); 1134 break; 1135 } 1136 1137 stats->q_ibytes[idx] = rxq->bytes_recv; 1138 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac; 1139 stats->q_errors[idx] = rx_stats.drop_early + 1140 rx_stats.drop_fullq + 1141 rx_stats.drop_bm + 1142 rxq->drop_mac; 1143 stats->ibytes += rxq->bytes_recv; 1144 drop_mac += rxq->drop_mac; 1145 } 1146 1147 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1148 struct mrvl_txq *txq = dev->data->tx_queues[i]; 1149 struct pp2_ppio_outq_statistics tx_stats; 1150 1151 if (!txq) 1152 continue; 1153 1154 idx = txq->queue_id; 1155 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { 1156 RTE_LOG(ERR, PMD, 1157 "tx queue %d stats out of range (0 - %d)\n", 1158 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1159 } 1160 1161 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx, 1162 &tx_stats, 0); 1163 if (unlikely(ret)) { 1164 RTE_LOG(ERR, PMD, 1165 "Failed to update tx queue %d stats\n", idx); 1166 break; 1167 } 1168 1169 stats->q_opackets[idx] = tx_stats.deq_desc; 1170 stats->q_obytes[idx] = txq->bytes_sent; 1171 stats->obytes += txq->bytes_sent; 1172 } 1173 1174 ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); 1175 if (unlikely(ret)) { 1176 RTE_LOG(ERR, PMD, "Failed to update port statistics\n"); 1177 return ret; 1178 } 1179 1180 stats->ipackets += ppio_stats.rx_packets - drop_mac; 1181 stats->opackets += ppio_stats.tx_packets; 1182 stats->imissed += ppio_stats.rx_fullq_dropped + 1183 ppio_stats.rx_bm_dropped + 1184 ppio_stats.rx_early_dropped + 1185 ppio_stats.rx_fifo_dropped + 1186 ppio_stats.rx_cls_dropped; 1187 stats->ierrors = drop_mac; 1188 1189 return 0; 1190 } 1191 1192 /** 1193 * DPDK callback to clear device statistics. 1194 * 1195 * @param dev 1196 * Pointer to Ethernet device structure. 1197 */ 1198 static void 1199 mrvl_stats_reset(struct rte_eth_dev *dev) 1200 { 1201 struct mrvl_priv *priv = dev->data->dev_private; 1202 int i; 1203 1204 if (!priv->ppio) 1205 return; 1206 1207 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1208 struct mrvl_rxq *rxq = dev->data->rx_queues[i]; 1209 1210 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc, 1211 priv->rxq_map[i].inq, NULL, 1); 1212 rxq->bytes_recv = 0; 1213 rxq->drop_mac = 0; 1214 } 1215 1216 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1217 struct mrvl_txq *txq = dev->data->tx_queues[i]; 1218 1219 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1); 1220 txq->bytes_sent = 0; 1221 } 1222 1223 pp2_ppio_get_statistics(priv->ppio, NULL, 1); 1224 } 1225 1226 /** 1227 * DPDK callback to get extended statistics. 1228 * 1229 * @param dev 1230 * Pointer to Ethernet device structure. 1231 * @param stats 1232 * Pointer to xstats table. 1233 * @param n 1234 * Number of entries in xstats table. 1235 * @return 1236 * Negative value on error, number of read xstats otherwise. 1237 */ 1238 static int 1239 mrvl_xstats_get(struct rte_eth_dev *dev, 1240 struct rte_eth_xstat *stats, unsigned int n) 1241 { 1242 struct mrvl_priv *priv = dev->data->dev_private; 1243 struct pp2_ppio_statistics ppio_stats; 1244 unsigned int i; 1245 1246 if (!stats) 1247 return 0; 1248 1249 pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); 1250 for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) { 1251 uint64_t val; 1252 1253 if (mrvl_xstats_tbl[i].size == sizeof(uint32_t)) 1254 val = *(uint32_t *)((uint8_t *)&ppio_stats + 1255 mrvl_xstats_tbl[i].offset); 1256 else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t)) 1257 val = *(uint64_t *)((uint8_t *)&ppio_stats + 1258 mrvl_xstats_tbl[i].offset); 1259 else 1260 return -EINVAL; 1261 1262 stats[i].id = i; 1263 stats[i].value = val; 1264 } 1265 1266 return n; 1267 } 1268 1269 /** 1270 * DPDK callback to reset extended statistics. 1271 * 1272 * @param dev 1273 * Pointer to Ethernet device structure. 1274 */ 1275 static void 1276 mrvl_xstats_reset(struct rte_eth_dev *dev) 1277 { 1278 mrvl_stats_reset(dev); 1279 } 1280 1281 /** 1282 * DPDK callback to get extended statistics names. 1283 * 1284 * @param dev (unused) 1285 * Pointer to Ethernet device structure. 1286 * @param xstats_names 1287 * Pointer to xstats names table. 1288 * @param size 1289 * Size of the xstats names table. 1290 * @return 1291 * Number of read names. 1292 */ 1293 static int 1294 mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused, 1295 struct rte_eth_xstat_name *xstats_names, 1296 unsigned int size) 1297 { 1298 unsigned int i; 1299 1300 if (!xstats_names) 1301 return RTE_DIM(mrvl_xstats_tbl); 1302 1303 for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++) 1304 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s", 1305 mrvl_xstats_tbl[i].name); 1306 1307 return size; 1308 } 1309 1310 /** 1311 * DPDK callback to get information about the device. 1312 * 1313 * @param dev 1314 * Pointer to Ethernet device structure (unused). 1315 * @param info 1316 * Info structure output buffer. 1317 */ 1318 static void 1319 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused, 1320 struct rte_eth_dev_info *info) 1321 { 1322 info->speed_capa = ETH_LINK_SPEED_10M | 1323 ETH_LINK_SPEED_100M | 1324 ETH_LINK_SPEED_1G | 1325 ETH_LINK_SPEED_10G; 1326 1327 info->max_rx_queues = MRVL_PP2_RXQ_MAX; 1328 info->max_tx_queues = MRVL_PP2_TXQ_MAX; 1329 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX; 1330 1331 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX; 1332 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN; 1333 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN; 1334 1335 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX; 1336 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN; 1337 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN; 1338 1339 info->rx_offload_capa = MRVL_RX_OFFLOADS; 1340 info->rx_queue_offload_capa = MRVL_RX_OFFLOADS; 1341 1342 info->tx_offload_capa = MRVL_TX_OFFLOADS; 1343 info->tx_queue_offload_capa = MRVL_TX_OFFLOADS; 1344 1345 info->flow_type_rss_offloads = ETH_RSS_IPV4 | 1346 ETH_RSS_NONFRAG_IPV4_TCP | 1347 ETH_RSS_NONFRAG_IPV4_UDP; 1348 1349 /* By default packets are dropped if no descriptors are available */ 1350 info->default_rxconf.rx_drop_en = 1; 1351 info->default_rxconf.offloads = DEV_RX_OFFLOAD_CRC_STRIP; 1352 1353 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX; 1354 } 1355 1356 /** 1357 * Return supported packet types. 1358 * 1359 * @param dev 1360 * Pointer to Ethernet device structure (unused). 1361 * 1362 * @return 1363 * Const pointer to the table with supported packet types. 1364 */ 1365 static const uint32_t * 1366 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 1367 { 1368 static const uint32_t ptypes[] = { 1369 RTE_PTYPE_L2_ETHER, 1370 RTE_PTYPE_L3_IPV4, 1371 RTE_PTYPE_L3_IPV4_EXT, 1372 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 1373 RTE_PTYPE_L3_IPV6, 1374 RTE_PTYPE_L3_IPV6_EXT, 1375 RTE_PTYPE_L2_ETHER_ARP, 1376 RTE_PTYPE_L4_TCP, 1377 RTE_PTYPE_L4_UDP 1378 }; 1379 1380 return ptypes; 1381 } 1382 1383 /** 1384 * DPDK callback to get information about specific receive queue. 1385 * 1386 * @param dev 1387 * Pointer to Ethernet device structure. 1388 * @param rx_queue_id 1389 * Receive queue index. 1390 * @param qinfo 1391 * Receive queue information structure. 1392 */ 1393 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 1394 struct rte_eth_rxq_info *qinfo) 1395 { 1396 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id]; 1397 struct mrvl_priv *priv = dev->data->dev_private; 1398 int inq = priv->rxq_map[rx_queue_id].inq; 1399 int tc = priv->rxq_map[rx_queue_id].tc; 1400 struct pp2_ppio_tc_params *tc_params = 1401 &priv->ppio_params.inqs_params.tcs_params[tc]; 1402 1403 qinfo->mp = q->mp; 1404 qinfo->nb_desc = tc_params->inqs_params[inq].size; 1405 } 1406 1407 /** 1408 * DPDK callback to get information about specific transmit queue. 1409 * 1410 * @param dev 1411 * Pointer to Ethernet device structure. 1412 * @param tx_queue_id 1413 * Transmit queue index. 1414 * @param qinfo 1415 * Transmit queue information structure. 1416 */ 1417 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 1418 struct rte_eth_txq_info *qinfo) 1419 { 1420 struct mrvl_priv *priv = dev->data->dev_private; 1421 struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id]; 1422 1423 qinfo->nb_desc = 1424 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size; 1425 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1426 } 1427 1428 /** 1429 * DPDK callback to Configure a VLAN filter. 1430 * 1431 * @param dev 1432 * Pointer to Ethernet device structure. 1433 * @param vlan_id 1434 * VLAN ID to filter. 1435 * @param on 1436 * Toggle filter. 1437 * 1438 * @return 1439 * 0 on success, negative error value otherwise. 1440 */ 1441 static int 1442 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1443 { 1444 struct mrvl_priv *priv = dev->data->dev_private; 1445 1446 if (!priv->ppio) 1447 return -EPERM; 1448 1449 if (priv->isolated) 1450 return -ENOTSUP; 1451 1452 return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) : 1453 pp2_ppio_remove_vlan(priv->ppio, vlan_id); 1454 } 1455 1456 /** 1457 * Release buffers to hardware bpool (buffer-pool) 1458 * 1459 * @param rxq 1460 * Receive queue pointer. 1461 * @param num 1462 * Number of buffers to release to bpool. 1463 * 1464 * @return 1465 * 0 on success, negative error value otherwise. 1466 */ 1467 static int 1468 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) 1469 { 1470 struct buff_release_entry entries[MRVL_PP2_RXD_MAX]; 1471 struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX]; 1472 int i, ret; 1473 unsigned int core_id; 1474 struct pp2_hif *hif; 1475 struct pp2_bpool *bpool; 1476 1477 core_id = rte_lcore_id(); 1478 if (core_id == LCORE_ID_ANY) 1479 core_id = 0; 1480 1481 hif = mrvl_get_hif(rxq->priv, core_id); 1482 if (!hif) 1483 return -1; 1484 1485 bpool = rxq->priv->bpool; 1486 1487 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num); 1488 if (ret) 1489 return ret; 1490 1491 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID) 1492 cookie_addr_high = 1493 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK; 1494 1495 for (i = 0; i < num; i++) { 1496 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK) 1497 != cookie_addr_high) { 1498 RTE_LOG(ERR, PMD, 1499 "mbuf virtual addr high 0x%lx out of range\n", 1500 (uint64_t)mbufs[i] >> 32); 1501 goto out; 1502 } 1503 1504 entries[i].buff.addr = 1505 rte_mbuf_data_iova_default(mbufs[i]); 1506 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i]; 1507 entries[i].bpool = bpool; 1508 } 1509 1510 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i); 1511 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i; 1512 1513 if (i != num) 1514 goto out; 1515 1516 return 0; 1517 out: 1518 for (; i < num; i++) 1519 rte_pktmbuf_free(mbufs[i]); 1520 1521 return -1; 1522 } 1523 1524 /** 1525 * Check whether requested rx queue offloads match port offloads. 1526 * 1527 * @param 1528 * dev Pointer to the device. 1529 * @param 1530 * requested Bitmap of the requested offloads. 1531 * 1532 * @return 1533 * 1 if requested offloads are okay, 0 otherwise. 1534 */ 1535 static int 1536 mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested) 1537 { 1538 uint64_t mandatory = dev->data->dev_conf.rxmode.offloads; 1539 uint64_t supported = MRVL_RX_OFFLOADS; 1540 uint64_t unsupported = requested & ~supported; 1541 uint64_t missing = mandatory & ~requested; 1542 1543 if (unsupported) { 1544 RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. " 1545 "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n", 1546 requested, supported); 1547 return 0; 1548 } 1549 1550 if (missing) { 1551 RTE_LOG(ERR, PMD, "Some Rx offloads are missing. " 1552 "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n", 1553 requested, missing); 1554 return 0; 1555 } 1556 1557 return 1; 1558 } 1559 1560 /** 1561 * DPDK callback to configure the receive queue. 1562 * 1563 * @param dev 1564 * Pointer to Ethernet device structure. 1565 * @param idx 1566 * RX queue index. 1567 * @param desc 1568 * Number of descriptors to configure in queue. 1569 * @param socket 1570 * NUMA socket on which memory must be allocated. 1571 * @param conf 1572 * Thresholds parameters. 1573 * @param mp 1574 * Memory pool for buffer allocations. 1575 * 1576 * @return 1577 * 0 on success, negative error value otherwise. 1578 */ 1579 static int 1580 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 1581 unsigned int socket, 1582 const struct rte_eth_rxconf *conf, 1583 struct rte_mempool *mp) 1584 { 1585 struct mrvl_priv *priv = dev->data->dev_private; 1586 struct mrvl_rxq *rxq; 1587 uint32_t min_size, 1588 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; 1589 int ret, tc, inq; 1590 1591 if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads)) 1592 return -ENOTSUP; 1593 1594 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) { 1595 /* 1596 * Unknown TC mapping, mapping will not have a correct queue. 1597 */ 1598 RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n", 1599 idx, priv->ppio_id); 1600 return -EFAULT; 1601 } 1602 1603 min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM - 1604 MRVL_PKT_EFFEC_OFFS; 1605 if (min_size < max_rx_pkt_len) { 1606 RTE_LOG(ERR, PMD, 1607 "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n", 1608 max_rx_pkt_len + RTE_PKTMBUF_HEADROOM + 1609 MRVL_PKT_EFFEC_OFFS, 1610 max_rx_pkt_len); 1611 return -EINVAL; 1612 } 1613 1614 if (dev->data->rx_queues[idx]) { 1615 rte_free(dev->data->rx_queues[idx]); 1616 dev->data->rx_queues[idx] = NULL; 1617 } 1618 1619 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket); 1620 if (!rxq) 1621 return -ENOMEM; 1622 1623 rxq->priv = priv; 1624 rxq->mp = mp; 1625 rxq->cksum_enabled = 1626 dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM; 1627 rxq->queue_id = idx; 1628 rxq->port_id = dev->data->port_id; 1629 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool; 1630 1631 tc = priv->rxq_map[rxq->queue_id].tc, 1632 inq = priv->rxq_map[rxq->queue_id].inq; 1633 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size = 1634 desc; 1635 1636 ret = mrvl_fill_bpool(rxq, desc); 1637 if (ret) { 1638 rte_free(rxq); 1639 return ret; 1640 } 1641 1642 priv->bpool_init_size += desc; 1643 1644 dev->data->rx_queues[idx] = rxq; 1645 1646 return 0; 1647 } 1648 1649 /** 1650 * DPDK callback to release the receive queue. 1651 * 1652 * @param rxq 1653 * Generic receive queue pointer. 1654 */ 1655 static void 1656 mrvl_rx_queue_release(void *rxq) 1657 { 1658 struct mrvl_rxq *q = rxq; 1659 struct pp2_ppio_tc_params *tc_params; 1660 int i, num, tc, inq; 1661 struct pp2_hif *hif; 1662 unsigned int core_id = rte_lcore_id(); 1663 1664 if (core_id == LCORE_ID_ANY) 1665 core_id = 0; 1666 1667 hif = mrvl_get_hif(q->priv, core_id); 1668 1669 if (!q || !hif) 1670 return; 1671 1672 tc = q->priv->rxq_map[q->queue_id].tc; 1673 inq = q->priv->rxq_map[q->queue_id].inq; 1674 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc]; 1675 num = tc_params->inqs_params[inq].size; 1676 for (i = 0; i < num; i++) { 1677 struct pp2_buff_inf inf; 1678 uint64_t addr; 1679 1680 pp2_bpool_get_buff(hif, q->priv->bpool, &inf); 1681 addr = cookie_addr_high | inf.cookie; 1682 rte_pktmbuf_free((struct rte_mbuf *)addr); 1683 } 1684 1685 rte_free(q); 1686 } 1687 1688 /** 1689 * Check whether requested tx queue offloads match port offloads. 1690 * 1691 * @param 1692 * dev Pointer to the device. 1693 * @param 1694 * requested Bitmap of the requested offloads. 1695 * 1696 * @return 1697 * 1 if requested offloads are okay, 0 otherwise. 1698 */ 1699 static int 1700 mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested) 1701 { 1702 uint64_t mandatory = dev->data->dev_conf.txmode.offloads; 1703 uint64_t supported = MRVL_TX_OFFLOADS; 1704 uint64_t unsupported = requested & ~supported; 1705 uint64_t missing = mandatory & ~requested; 1706 1707 if (unsupported) { 1708 RTE_LOG(ERR, PMD, "Some Tx offloads are not supported. " 1709 "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n", 1710 requested, supported); 1711 return 0; 1712 } 1713 1714 if (missing) { 1715 RTE_LOG(ERR, PMD, "Some Tx offloads are missing. " 1716 "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n", 1717 requested, missing); 1718 return 0; 1719 } 1720 1721 return 1; 1722 } 1723 1724 /** 1725 * DPDK callback to configure the transmit queue. 1726 * 1727 * @param dev 1728 * Pointer to Ethernet device structure. 1729 * @param idx 1730 * Transmit queue index. 1731 * @param desc 1732 * Number of descriptors to configure in the queue. 1733 * @param socket 1734 * NUMA socket on which memory must be allocated. 1735 * @param conf 1736 * Tx queue configuration parameters. 1737 * 1738 * @return 1739 * 0 on success, negative error value otherwise. 1740 */ 1741 static int 1742 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 1743 unsigned int socket, 1744 const struct rte_eth_txconf *conf) 1745 { 1746 struct mrvl_priv *priv = dev->data->dev_private; 1747 struct mrvl_txq *txq; 1748 1749 if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads)) 1750 return -ENOTSUP; 1751 1752 if (dev->data->tx_queues[idx]) { 1753 rte_free(dev->data->tx_queues[idx]); 1754 dev->data->tx_queues[idx] = NULL; 1755 } 1756 1757 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket); 1758 if (!txq) 1759 return -ENOMEM; 1760 1761 txq->priv = priv; 1762 txq->queue_id = idx; 1763 txq->port_id = dev->data->port_id; 1764 txq->tx_deferred_start = conf->tx_deferred_start; 1765 dev->data->tx_queues[idx] = txq; 1766 1767 priv->ppio_params.outqs_params.outqs_params[idx].size = desc; 1768 1769 return 0; 1770 } 1771 1772 /** 1773 * DPDK callback to release the transmit queue. 1774 * 1775 * @param txq 1776 * Generic transmit queue pointer. 1777 */ 1778 static void 1779 mrvl_tx_queue_release(void *txq) 1780 { 1781 struct mrvl_txq *q = txq; 1782 1783 if (!q) 1784 return; 1785 1786 rte_free(q); 1787 } 1788 1789 /** 1790 * DPDK callback to get flow control configuration. 1791 * 1792 * @param dev 1793 * Pointer to Ethernet device structure. 1794 * @param fc_conf 1795 * Pointer to the flow control configuration. 1796 * 1797 * @return 1798 * 0 on success, negative error value otherwise. 1799 */ 1800 static int 1801 mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1802 { 1803 struct mrvl_priv *priv = dev->data->dev_private; 1804 int ret, en; 1805 1806 if (!priv) 1807 return -EPERM; 1808 1809 ret = pp2_ppio_get_rx_pause(priv->ppio, &en); 1810 if (ret) { 1811 RTE_LOG(ERR, PMD, "Failed to read rx pause state\n"); 1812 return ret; 1813 } 1814 1815 fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE; 1816 1817 return 0; 1818 } 1819 1820 /** 1821 * DPDK callback to set flow control configuration. 1822 * 1823 * @param dev 1824 * Pointer to Ethernet device structure. 1825 * @param fc_conf 1826 * Pointer to the flow control configuration. 1827 * 1828 * @return 1829 * 0 on success, negative error value otherwise. 1830 */ 1831 static int 1832 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1833 { 1834 struct mrvl_priv *priv = dev->data->dev_private; 1835 1836 if (!priv) 1837 return -EPERM; 1838 1839 if (fc_conf->high_water || 1840 fc_conf->low_water || 1841 fc_conf->pause_time || 1842 fc_conf->mac_ctrl_frame_fwd || 1843 fc_conf->autoneg) { 1844 RTE_LOG(ERR, PMD, "Flowctrl parameter is not supported\n"); 1845 1846 return -EINVAL; 1847 } 1848 1849 if (fc_conf->mode == RTE_FC_NONE || 1850 fc_conf->mode == RTE_FC_RX_PAUSE) { 1851 int ret, en; 1852 1853 en = fc_conf->mode == RTE_FC_NONE ? 0 : 1; 1854 ret = pp2_ppio_set_rx_pause(priv->ppio, en); 1855 if (ret) 1856 RTE_LOG(ERR, PMD, 1857 "Failed to change flowctrl on RX side\n"); 1858 1859 return ret; 1860 } 1861 1862 return 0; 1863 } 1864 1865 /** 1866 * Update RSS hash configuration 1867 * 1868 * @param dev 1869 * Pointer to Ethernet device structure. 1870 * @param rss_conf 1871 * Pointer to RSS configuration. 1872 * 1873 * @return 1874 * 0 on success, negative error value otherwise. 1875 */ 1876 static int 1877 mrvl_rss_hash_update(struct rte_eth_dev *dev, 1878 struct rte_eth_rss_conf *rss_conf) 1879 { 1880 struct mrvl_priv *priv = dev->data->dev_private; 1881 1882 if (priv->isolated) 1883 return -ENOTSUP; 1884 1885 return mrvl_configure_rss(priv, rss_conf); 1886 } 1887 1888 /** 1889 * DPDK callback to get RSS hash configuration. 1890 * 1891 * @param dev 1892 * Pointer to Ethernet device structure. 1893 * @rss_conf 1894 * Pointer to RSS configuration. 1895 * 1896 * @return 1897 * Always 0. 1898 */ 1899 static int 1900 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev, 1901 struct rte_eth_rss_conf *rss_conf) 1902 { 1903 struct mrvl_priv *priv = dev->data->dev_private; 1904 enum pp2_ppio_hash_type hash_type = 1905 priv->ppio_params.inqs_params.hash_type; 1906 1907 rss_conf->rss_key = NULL; 1908 1909 if (hash_type == PP2_PPIO_HASH_T_NONE) 1910 rss_conf->rss_hf = 0; 1911 else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE) 1912 rss_conf->rss_hf = ETH_RSS_IPV4; 1913 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp) 1914 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP; 1915 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp) 1916 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP; 1917 1918 return 0; 1919 } 1920 1921 /** 1922 * DPDK callback to get rte_flow callbacks. 1923 * 1924 * @param dev 1925 * Pointer to the device structure. 1926 * @param filer_type 1927 * Flow filter type. 1928 * @param filter_op 1929 * Flow filter operation. 1930 * @param arg 1931 * Pointer to pass the flow ops. 1932 * 1933 * @return 1934 * 0 on success, negative error value otherwise. 1935 */ 1936 static int 1937 mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused, 1938 enum rte_filter_type filter_type, 1939 enum rte_filter_op filter_op, void *arg) 1940 { 1941 switch (filter_type) { 1942 case RTE_ETH_FILTER_GENERIC: 1943 if (filter_op != RTE_ETH_FILTER_GET) 1944 return -EINVAL; 1945 *(const void **)arg = &mrvl_flow_ops; 1946 return 0; 1947 default: 1948 RTE_LOG(WARNING, PMD, "Filter type (%d) not supported", 1949 filter_type); 1950 return -EINVAL; 1951 } 1952 } 1953 1954 static const struct eth_dev_ops mrvl_ops = { 1955 .dev_configure = mrvl_dev_configure, 1956 .dev_start = mrvl_dev_start, 1957 .dev_stop = mrvl_dev_stop, 1958 .dev_set_link_up = mrvl_dev_set_link_up, 1959 .dev_set_link_down = mrvl_dev_set_link_down, 1960 .dev_close = mrvl_dev_close, 1961 .link_update = mrvl_link_update, 1962 .promiscuous_enable = mrvl_promiscuous_enable, 1963 .allmulticast_enable = mrvl_allmulticast_enable, 1964 .promiscuous_disable = mrvl_promiscuous_disable, 1965 .allmulticast_disable = mrvl_allmulticast_disable, 1966 .mac_addr_remove = mrvl_mac_addr_remove, 1967 .mac_addr_add = mrvl_mac_addr_add, 1968 .mac_addr_set = mrvl_mac_addr_set, 1969 .mtu_set = mrvl_mtu_set, 1970 .stats_get = mrvl_stats_get, 1971 .stats_reset = mrvl_stats_reset, 1972 .xstats_get = mrvl_xstats_get, 1973 .xstats_reset = mrvl_xstats_reset, 1974 .xstats_get_names = mrvl_xstats_get_names, 1975 .dev_infos_get = mrvl_dev_infos_get, 1976 .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get, 1977 .rxq_info_get = mrvl_rxq_info_get, 1978 .txq_info_get = mrvl_txq_info_get, 1979 .vlan_filter_set = mrvl_vlan_filter_set, 1980 .tx_queue_start = mrvl_tx_queue_start, 1981 .tx_queue_stop = mrvl_tx_queue_stop, 1982 .rx_queue_setup = mrvl_rx_queue_setup, 1983 .rx_queue_release = mrvl_rx_queue_release, 1984 .tx_queue_setup = mrvl_tx_queue_setup, 1985 .tx_queue_release = mrvl_tx_queue_release, 1986 .flow_ctrl_get = mrvl_flow_ctrl_get, 1987 .flow_ctrl_set = mrvl_flow_ctrl_set, 1988 .rss_hash_update = mrvl_rss_hash_update, 1989 .rss_hash_conf_get = mrvl_rss_hash_conf_get, 1990 .filter_ctrl = mrvl_eth_filter_ctrl, 1991 }; 1992 1993 /** 1994 * Return packet type information and l3/l4 offsets. 1995 * 1996 * @param desc 1997 * Pointer to the received packet descriptor. 1998 * @param l3_offset 1999 * l3 packet offset. 2000 * @param l4_offset 2001 * l4 packet offset. 2002 * 2003 * @return 2004 * Packet type information. 2005 */ 2006 static inline uint64_t 2007 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, 2008 uint8_t *l3_offset, uint8_t *l4_offset) 2009 { 2010 enum pp2_inq_l3_type l3_type; 2011 enum pp2_inq_l4_type l4_type; 2012 uint64_t packet_type; 2013 2014 pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); 2015 pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); 2016 2017 packet_type = RTE_PTYPE_L2_ETHER; 2018 2019 switch (l3_type) { 2020 case PP2_INQ_L3_TYPE_IPV4_NO_OPTS: 2021 packet_type |= RTE_PTYPE_L3_IPV4; 2022 break; 2023 case PP2_INQ_L3_TYPE_IPV4_OK: 2024 packet_type |= RTE_PTYPE_L3_IPV4_EXT; 2025 break; 2026 case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO: 2027 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; 2028 break; 2029 case PP2_INQ_L3_TYPE_IPV6_NO_EXT: 2030 packet_type |= RTE_PTYPE_L3_IPV6; 2031 break; 2032 case PP2_INQ_L3_TYPE_IPV6_EXT: 2033 packet_type |= RTE_PTYPE_L3_IPV6_EXT; 2034 break; 2035 case PP2_INQ_L3_TYPE_ARP: 2036 packet_type |= RTE_PTYPE_L2_ETHER_ARP; 2037 /* 2038 * In case of ARP l4_offset is set to wrong value. 2039 * Set it to proper one so that later on mbuf->l3_len can be 2040 * calculated subtracting l4_offset and l3_offset. 2041 */ 2042 *l4_offset = *l3_offset + MRVL_ARP_LENGTH; 2043 break; 2044 default: 2045 RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n"); 2046 break; 2047 } 2048 2049 switch (l4_type) { 2050 case PP2_INQ_L4_TYPE_TCP: 2051 packet_type |= RTE_PTYPE_L4_TCP; 2052 break; 2053 case PP2_INQ_L4_TYPE_UDP: 2054 packet_type |= RTE_PTYPE_L4_UDP; 2055 break; 2056 default: 2057 RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n"); 2058 break; 2059 } 2060 2061 return packet_type; 2062 } 2063 2064 /** 2065 * Get offload information from the received packet descriptor. 2066 * 2067 * @param desc 2068 * Pointer to the received packet descriptor. 2069 * 2070 * @return 2071 * Mbuf offload flags. 2072 */ 2073 static inline uint64_t 2074 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc) 2075 { 2076 uint64_t flags; 2077 enum pp2_inq_desc_status status; 2078 2079 status = pp2_ppio_inq_desc_get_l3_pkt_error(desc); 2080 if (unlikely(status != PP2_DESC_ERR_OK)) 2081 flags = PKT_RX_IP_CKSUM_BAD; 2082 else 2083 flags = PKT_RX_IP_CKSUM_GOOD; 2084 2085 status = pp2_ppio_inq_desc_get_l4_pkt_error(desc); 2086 if (unlikely(status != PP2_DESC_ERR_OK)) 2087 flags |= PKT_RX_L4_CKSUM_BAD; 2088 else 2089 flags |= PKT_RX_L4_CKSUM_GOOD; 2090 2091 return flags; 2092 } 2093 2094 /** 2095 * DPDK callback for receive. 2096 * 2097 * @param rxq 2098 * Generic pointer to the receive queue. 2099 * @param rx_pkts 2100 * Array to store received packets. 2101 * @param nb_pkts 2102 * Maximum number of packets in array. 2103 * 2104 * @return 2105 * Number of packets successfully received. 2106 */ 2107 static uint16_t 2108 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 2109 { 2110 struct mrvl_rxq *q = rxq; 2111 struct pp2_ppio_desc descs[nb_pkts]; 2112 struct pp2_bpool *bpool; 2113 int i, ret, rx_done = 0; 2114 int num; 2115 struct pp2_hif *hif; 2116 unsigned int core_id = rte_lcore_id(); 2117 2118 hif = mrvl_get_hif(q->priv, core_id); 2119 2120 if (unlikely(!q->priv->ppio || !hif)) 2121 return 0; 2122 2123 bpool = q->priv->bpool; 2124 2125 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc, 2126 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts); 2127 if (unlikely(ret < 0)) { 2128 RTE_LOG(ERR, PMD, "Failed to receive packets\n"); 2129 return 0; 2130 } 2131 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts; 2132 2133 for (i = 0; i < nb_pkts; i++) { 2134 struct rte_mbuf *mbuf; 2135 uint8_t l3_offset, l4_offset; 2136 enum pp2_inq_desc_status status; 2137 uint64_t addr; 2138 2139 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { 2140 struct pp2_ppio_desc *pref_desc; 2141 u64 pref_addr; 2142 2143 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT]; 2144 pref_addr = cookie_addr_high | 2145 pp2_ppio_inq_desc_get_cookie(pref_desc); 2146 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr)); 2147 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr)); 2148 } 2149 2150 addr = cookie_addr_high | 2151 pp2_ppio_inq_desc_get_cookie(&descs[i]); 2152 mbuf = (struct rte_mbuf *)addr; 2153 rte_pktmbuf_reset(mbuf); 2154 2155 /* drop packet in case of mac, overrun or resource error */ 2156 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]); 2157 if (unlikely(status != PP2_DESC_ERR_OK)) { 2158 struct pp2_buff_inf binf = { 2159 .addr = rte_mbuf_data_iova_default(mbuf), 2160 .cookie = (pp2_cookie_t)(uint64_t)mbuf, 2161 }; 2162 2163 pp2_bpool_put_buff(hif, bpool, &binf); 2164 mrvl_port_bpool_size 2165 [bpool->pp2_id][bpool->id][core_id]++; 2166 q->drop_mac++; 2167 continue; 2168 } 2169 2170 mbuf->data_off += MRVL_PKT_EFFEC_OFFS; 2171 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]); 2172 mbuf->data_len = mbuf->pkt_len; 2173 mbuf->port = q->port_id; 2174 mbuf->packet_type = 2175 mrvl_desc_to_packet_type_and_offset(&descs[i], 2176 &l3_offset, 2177 &l4_offset); 2178 mbuf->l2_len = l3_offset; 2179 mbuf->l3_len = l4_offset - l3_offset; 2180 2181 if (likely(q->cksum_enabled)) 2182 mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]); 2183 2184 rx_pkts[rx_done++] = mbuf; 2185 q->bytes_recv += mbuf->pkt_len; 2186 } 2187 2188 if (rte_spinlock_trylock(&q->priv->lock) == 1) { 2189 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id); 2190 2191 if (unlikely(num <= q->priv->bpool_min_size || 2192 (!rx_done && num < q->priv->bpool_init_size))) { 2193 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE); 2194 if (ret) 2195 RTE_LOG(ERR, PMD, "Failed to fill bpool\n"); 2196 } else if (unlikely(num > q->priv->bpool_max_size)) { 2197 int i; 2198 int pkt_to_remove = num - q->priv->bpool_init_size; 2199 struct rte_mbuf *mbuf; 2200 struct pp2_buff_inf buff; 2201 2202 RTE_LOG(DEBUG, PMD, 2203 "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n", 2204 bpool->pp2_id, q->priv->ppio->port_id, 2205 bpool->id, pkt_to_remove, num, 2206 q->priv->bpool_init_size); 2207 2208 for (i = 0; i < pkt_to_remove; i++) { 2209 ret = pp2_bpool_get_buff(hif, bpool, &buff); 2210 if (ret) 2211 break; 2212 mbuf = (struct rte_mbuf *) 2213 (cookie_addr_high | buff.cookie); 2214 rte_pktmbuf_free(mbuf); 2215 } 2216 mrvl_port_bpool_size 2217 [bpool->pp2_id][bpool->id][core_id] -= i; 2218 } 2219 rte_spinlock_unlock(&q->priv->lock); 2220 } 2221 2222 return rx_done; 2223 } 2224 2225 /** 2226 * Prepare offload information. 2227 * 2228 * @param ol_flags 2229 * Offload flags. 2230 * @param packet_type 2231 * Packet type bitfield. 2232 * @param l3_type 2233 * Pointer to the pp2_ouq_l3_type structure. 2234 * @param l4_type 2235 * Pointer to the pp2_outq_l4_type structure. 2236 * @param gen_l3_cksum 2237 * Will be set to 1 in case l3 checksum is computed. 2238 * @param l4_cksum 2239 * Will be set to 1 in case l4 checksum is computed. 2240 * 2241 * @return 2242 * 0 on success, negative error value otherwise. 2243 */ 2244 static inline int 2245 mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type, 2246 enum pp2_outq_l3_type *l3_type, 2247 enum pp2_outq_l4_type *l4_type, 2248 int *gen_l3_cksum, 2249 int *gen_l4_cksum) 2250 { 2251 /* 2252 * Based on ol_flags prepare information 2253 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor 2254 * for offloading. 2255 */ 2256 if (ol_flags & PKT_TX_IPV4) { 2257 *l3_type = PP2_OUTQ_L3_TYPE_IPV4; 2258 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; 2259 } else if (ol_flags & PKT_TX_IPV6) { 2260 *l3_type = PP2_OUTQ_L3_TYPE_IPV6; 2261 /* no checksum for ipv6 header */ 2262 *gen_l3_cksum = 0; 2263 } else { 2264 /* if something different then stop processing */ 2265 return -1; 2266 } 2267 2268 ol_flags &= PKT_TX_L4_MASK; 2269 if ((packet_type & RTE_PTYPE_L4_TCP) && 2270 ol_flags == PKT_TX_TCP_CKSUM) { 2271 *l4_type = PP2_OUTQ_L4_TYPE_TCP; 2272 *gen_l4_cksum = 1; 2273 } else if ((packet_type & RTE_PTYPE_L4_UDP) && 2274 ol_flags == PKT_TX_UDP_CKSUM) { 2275 *l4_type = PP2_OUTQ_L4_TYPE_UDP; 2276 *gen_l4_cksum = 1; 2277 } else { 2278 *l4_type = PP2_OUTQ_L4_TYPE_OTHER; 2279 /* no checksum for other type */ 2280 *gen_l4_cksum = 0; 2281 } 2282 2283 return 0; 2284 } 2285 2286 /** 2287 * Release already sent buffers to bpool (buffer-pool). 2288 * 2289 * @param ppio 2290 * Pointer to the port structure. 2291 * @param hif 2292 * Pointer to the MUSDK hardware interface. 2293 * @param sq 2294 * Pointer to the shadow queue. 2295 * @param qid 2296 * Queue id number. 2297 * @param force 2298 * Force releasing packets. 2299 */ 2300 static inline void 2301 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif, 2302 unsigned int core_id, struct mrvl_shadow_txq *sq, 2303 int qid, int force) 2304 { 2305 struct buff_release_entry *entry; 2306 uint16_t nb_done = 0, num = 0, skip_bufs = 0; 2307 int i; 2308 2309 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done); 2310 2311 sq->num_to_release += nb_done; 2312 2313 if (likely(!force && 2314 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE)) 2315 return; 2316 2317 nb_done = sq->num_to_release; 2318 sq->num_to_release = 0; 2319 2320 for (i = 0; i < nb_done; i++) { 2321 entry = &sq->ent[sq->tail + num]; 2322 if (unlikely(!entry->buff.addr)) { 2323 RTE_LOG(ERR, PMD, 2324 "Shadow memory @%d: cookie(%lx), pa(%lx)!\n", 2325 sq->tail, (u64)entry->buff.cookie, 2326 (u64)entry->buff.addr); 2327 skip_bufs = 1; 2328 goto skip; 2329 } 2330 2331 if (unlikely(!entry->bpool)) { 2332 struct rte_mbuf *mbuf; 2333 2334 mbuf = (struct rte_mbuf *) 2335 (cookie_addr_high | entry->buff.cookie); 2336 rte_pktmbuf_free(mbuf); 2337 skip_bufs = 1; 2338 goto skip; 2339 } 2340 2341 mrvl_port_bpool_size 2342 [entry->bpool->pp2_id][entry->bpool->id][core_id]++; 2343 num++; 2344 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE)) 2345 goto skip; 2346 continue; 2347 skip: 2348 if (likely(num)) 2349 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); 2350 num += skip_bufs; 2351 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; 2352 sq->size -= num; 2353 num = 0; 2354 skip_bufs = 0; 2355 } 2356 2357 if (likely(num)) { 2358 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); 2359 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; 2360 sq->size -= num; 2361 } 2362 } 2363 2364 /** 2365 * DPDK callback for transmit. 2366 * 2367 * @param txq 2368 * Generic pointer transmit queue. 2369 * @param tx_pkts 2370 * Packets to transmit. 2371 * @param nb_pkts 2372 * Number of packets in array. 2373 * 2374 * @return 2375 * Number of packets successfully transmitted. 2376 */ 2377 static uint16_t 2378 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 2379 { 2380 struct mrvl_txq *q = txq; 2381 struct mrvl_shadow_txq *sq; 2382 struct pp2_hif *hif; 2383 struct pp2_ppio_desc descs[nb_pkts]; 2384 unsigned int core_id = rte_lcore_id(); 2385 int i, ret, bytes_sent = 0; 2386 uint16_t num, sq_free_size; 2387 uint64_t addr; 2388 2389 hif = mrvl_get_hif(q->priv, core_id); 2390 sq = &q->shadow_txqs[core_id]; 2391 2392 if (unlikely(!q->priv->ppio || !hif)) 2393 return 0; 2394 2395 if (sq->size) 2396 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, 2397 sq, q->queue_id, 0); 2398 2399 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; 2400 if (unlikely(nb_pkts > sq_free_size)) { 2401 RTE_LOG(DEBUG, PMD, 2402 "No room in shadow queue for %d packets! %d packets will be sent.\n", 2403 nb_pkts, sq_free_size); 2404 nb_pkts = sq_free_size; 2405 } 2406 2407 for (i = 0; i < nb_pkts; i++) { 2408 struct rte_mbuf *mbuf = tx_pkts[i]; 2409 int gen_l3_cksum, gen_l4_cksum; 2410 enum pp2_outq_l3_type l3_type; 2411 enum pp2_outq_l4_type l4_type; 2412 2413 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { 2414 struct rte_mbuf *pref_pkt_hdr; 2415 2416 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; 2417 rte_mbuf_prefetch_part1(pref_pkt_hdr); 2418 rte_mbuf_prefetch_part2(pref_pkt_hdr); 2419 } 2420 2421 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf; 2422 sq->ent[sq->head].buff.addr = 2423 rte_mbuf_data_iova_default(mbuf); 2424 sq->ent[sq->head].bpool = 2425 (unlikely(mbuf->port >= RTE_MAX_ETHPORTS || 2426 mbuf->refcnt > 1)) ? NULL : 2427 mrvl_port_to_bpool_lookup[mbuf->port]; 2428 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK; 2429 sq->size++; 2430 2431 pp2_ppio_outq_desc_reset(&descs[i]); 2432 pp2_ppio_outq_desc_set_phys_addr(&descs[i], 2433 rte_pktmbuf_iova(mbuf)); 2434 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0); 2435 pp2_ppio_outq_desc_set_pkt_len(&descs[i], 2436 rte_pktmbuf_pkt_len(mbuf)); 2437 2438 bytes_sent += rte_pktmbuf_pkt_len(mbuf); 2439 /* 2440 * in case unsupported ol_flags were passed 2441 * do not update descriptor offload information 2442 */ 2443 ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type, 2444 &l3_type, &l4_type, &gen_l3_cksum, 2445 &gen_l4_cksum); 2446 if (unlikely(ret)) 2447 continue; 2448 2449 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, 2450 mbuf->l2_len, 2451 mbuf->l2_len + mbuf->l3_len, 2452 gen_l3_cksum, gen_l4_cksum); 2453 } 2454 2455 num = nb_pkts; 2456 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts); 2457 /* number of packets that were not sent */ 2458 if (unlikely(num > nb_pkts)) { 2459 for (i = nb_pkts; i < num; i++) { 2460 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & 2461 MRVL_PP2_TX_SHADOWQ_MASK; 2462 addr = cookie_addr_high | sq->ent[sq->head].buff.cookie; 2463 bytes_sent -= 2464 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr); 2465 } 2466 sq->size -= num - nb_pkts; 2467 } 2468 2469 q->bytes_sent += bytes_sent; 2470 2471 return nb_pkts; 2472 } 2473 2474 /** 2475 * Initialize packet processor. 2476 * 2477 * @return 2478 * 0 on success, negative error value otherwise. 2479 */ 2480 static int 2481 mrvl_init_pp2(void) 2482 { 2483 struct pp2_init_params init_params; 2484 2485 memset(&init_params, 0, sizeof(init_params)); 2486 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED; 2487 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED; 2488 init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED; 2489 2490 return pp2_init(&init_params); 2491 } 2492 2493 /** 2494 * Deinitialize packet processor. 2495 * 2496 * @return 2497 * 0 on success, negative error value otherwise. 2498 */ 2499 static void 2500 mrvl_deinit_pp2(void) 2501 { 2502 pp2_deinit(); 2503 } 2504 2505 /** 2506 * Create private device structure. 2507 * 2508 * @param dev_name 2509 * Pointer to the port name passed in the initialization parameters. 2510 * 2511 * @return 2512 * Pointer to the newly allocated private device structure. 2513 */ 2514 static struct mrvl_priv * 2515 mrvl_priv_create(const char *dev_name) 2516 { 2517 struct pp2_bpool_params bpool_params; 2518 char match[MRVL_MATCH_LEN]; 2519 struct mrvl_priv *priv; 2520 int ret, bpool_bit; 2521 2522 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id()); 2523 if (!priv) 2524 return NULL; 2525 2526 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name, 2527 &priv->pp_id, &priv->ppio_id); 2528 if (ret) 2529 goto out_free_priv; 2530 2531 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id], 2532 PP2_BPOOL_NUM_POOLS); 2533 if (bpool_bit < 0) 2534 goto out_free_priv; 2535 priv->bpool_bit = bpool_bit; 2536 2537 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id, 2538 priv->bpool_bit); 2539 memset(&bpool_params, 0, sizeof(bpool_params)); 2540 bpool_params.match = match; 2541 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS; 2542 ret = pp2_bpool_init(&bpool_params, &priv->bpool); 2543 if (ret) 2544 goto out_clear_bpool_bit; 2545 2546 priv->ppio_params.type = PP2_PPIO_T_NIC; 2547 rte_spinlock_init(&priv->lock); 2548 2549 return priv; 2550 out_clear_bpool_bit: 2551 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); 2552 out_free_priv: 2553 rte_free(priv); 2554 return NULL; 2555 } 2556 2557 /** 2558 * Create device representing Ethernet port. 2559 * 2560 * @param name 2561 * Pointer to the port's name. 2562 * 2563 * @return 2564 * 0 on success, negative error value otherwise. 2565 */ 2566 static int 2567 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name) 2568 { 2569 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0); 2570 struct rte_eth_dev *eth_dev; 2571 struct mrvl_priv *priv; 2572 struct ifreq req; 2573 2574 eth_dev = rte_eth_dev_allocate(name); 2575 if (!eth_dev) 2576 return -ENOMEM; 2577 2578 priv = mrvl_priv_create(name); 2579 if (!priv) { 2580 ret = -ENOMEM; 2581 goto out_free_dev; 2582 } 2583 2584 eth_dev->data->mac_addrs = 2585 rte_zmalloc("mac_addrs", 2586 ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0); 2587 if (!eth_dev->data->mac_addrs) { 2588 RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n"); 2589 ret = -ENOMEM; 2590 goto out_free_priv; 2591 } 2592 2593 memset(&req, 0, sizeof(req)); 2594 strcpy(req.ifr_name, name); 2595 ret = ioctl(fd, SIOCGIFHWADDR, &req); 2596 if (ret) 2597 goto out_free_mac; 2598 2599 memcpy(eth_dev->data->mac_addrs[0].addr_bytes, 2600 req.ifr_addr.sa_data, ETHER_ADDR_LEN); 2601 2602 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst; 2603 eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst; 2604 eth_dev->data->kdrv = RTE_KDRV_NONE; 2605 eth_dev->data->dev_private = priv; 2606 eth_dev->device = &vdev->device; 2607 eth_dev->dev_ops = &mrvl_ops; 2608 2609 return 0; 2610 out_free_mac: 2611 rte_free(eth_dev->data->mac_addrs); 2612 out_free_dev: 2613 rte_eth_dev_release_port(eth_dev); 2614 out_free_priv: 2615 rte_free(priv); 2616 2617 return ret; 2618 } 2619 2620 /** 2621 * Cleanup previously created device representing Ethernet port. 2622 * 2623 * @param name 2624 * Pointer to the port name. 2625 */ 2626 static void 2627 mrvl_eth_dev_destroy(const char *name) 2628 { 2629 struct rte_eth_dev *eth_dev; 2630 struct mrvl_priv *priv; 2631 2632 eth_dev = rte_eth_dev_allocated(name); 2633 if (!eth_dev) 2634 return; 2635 2636 priv = eth_dev->data->dev_private; 2637 pp2_bpool_deinit(priv->bpool); 2638 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); 2639 rte_free(priv); 2640 rte_free(eth_dev->data->mac_addrs); 2641 rte_eth_dev_release_port(eth_dev); 2642 } 2643 2644 /** 2645 * Callback used by rte_kvargs_process() during argument parsing. 2646 * 2647 * @param key 2648 * Pointer to the parsed key (unused). 2649 * @param value 2650 * Pointer to the parsed value. 2651 * @param extra_args 2652 * Pointer to the extra arguments which contains address of the 2653 * table of pointers to parsed interface names. 2654 * 2655 * @return 2656 * Always 0. 2657 */ 2658 static int 2659 mrvl_get_ifnames(const char *key __rte_unused, const char *value, 2660 void *extra_args) 2661 { 2662 struct mrvl_ifnames *ifnames = extra_args; 2663 2664 ifnames->names[ifnames->idx++] = value; 2665 2666 return 0; 2667 } 2668 2669 /** 2670 * Deinitialize per-lcore MUSDK hardware interfaces (hifs). 2671 */ 2672 static void 2673 mrvl_deinit_hifs(void) 2674 { 2675 int i; 2676 2677 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) { 2678 if (hifs[i]) 2679 pp2_hif_deinit(hifs[i]); 2680 } 2681 used_hifs = MRVL_MUSDK_HIFS_RESERVED; 2682 memset(hifs, 0, sizeof(hifs)); 2683 } 2684 2685 /** 2686 * DPDK callback to register the virtual device. 2687 * 2688 * @param vdev 2689 * Pointer to the virtual device. 2690 * 2691 * @return 2692 * 0 on success, negative error value otherwise. 2693 */ 2694 static int 2695 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev) 2696 { 2697 struct rte_kvargs *kvlist; 2698 struct mrvl_ifnames ifnames; 2699 int ret = -EINVAL; 2700 uint32_t i, ifnum, cfgnum; 2701 const char *params; 2702 2703 params = rte_vdev_device_args(vdev); 2704 if (!params) 2705 return -EINVAL; 2706 2707 kvlist = rte_kvargs_parse(params, valid_args); 2708 if (!kvlist) 2709 return -EINVAL; 2710 2711 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG); 2712 if (ifnum > RTE_DIM(ifnames.names)) 2713 goto out_free_kvlist; 2714 2715 ifnames.idx = 0; 2716 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG, 2717 mrvl_get_ifnames, &ifnames); 2718 2719 2720 /* 2721 * The below system initialization should be done only once, 2722 * on the first provided configuration file 2723 */ 2724 if (!mrvl_qos_cfg) { 2725 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG); 2726 RTE_LOG(INFO, PMD, "Parsing config file!\n"); 2727 if (cfgnum > 1) { 2728 RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n"); 2729 goto out_free_kvlist; 2730 } else if (cfgnum == 1) { 2731 rte_kvargs_process(kvlist, MRVL_CFG_ARG, 2732 mrvl_get_qoscfg, &mrvl_qos_cfg); 2733 } 2734 } 2735 2736 if (mrvl_dev_num) 2737 goto init_devices; 2738 2739 RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n"); 2740 /* 2741 * ret == -EEXIST is correct, it means DMA 2742 * has been already initialized (by another PMD). 2743 */ 2744 ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE); 2745 if (ret < 0) { 2746 if (ret != -EEXIST) 2747 goto out_free_kvlist; 2748 else 2749 RTE_LOG(INFO, PMD, 2750 "DMA memory has been already initialized by a different driver.\n"); 2751 } 2752 2753 ret = mrvl_init_pp2(); 2754 if (ret) { 2755 RTE_LOG(ERR, PMD, "Failed to init PP!\n"); 2756 goto out_deinit_dma; 2757 } 2758 2759 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size)); 2760 memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup)); 2761 2762 mrvl_lcore_first = RTE_MAX_LCORE; 2763 mrvl_lcore_last = 0; 2764 2765 init_devices: 2766 for (i = 0; i < ifnum; i++) { 2767 RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]); 2768 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]); 2769 if (ret) 2770 goto out_cleanup; 2771 } 2772 mrvl_dev_num += ifnum; 2773 2774 rte_kvargs_free(kvlist); 2775 2776 return 0; 2777 out_cleanup: 2778 for (; i > 0; i--) 2779 mrvl_eth_dev_destroy(ifnames.names[i]); 2780 2781 if (mrvl_dev_num == 0) 2782 mrvl_deinit_pp2(); 2783 out_deinit_dma: 2784 if (mrvl_dev_num == 0) 2785 mv_sys_dma_mem_destroy(); 2786 out_free_kvlist: 2787 rte_kvargs_free(kvlist); 2788 2789 return ret; 2790 } 2791 2792 /** 2793 * DPDK callback to remove virtual device. 2794 * 2795 * @param vdev 2796 * Pointer to the removed virtual device. 2797 * 2798 * @return 2799 * 0 on success, negative error value otherwise. 2800 */ 2801 static int 2802 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev) 2803 { 2804 int i; 2805 const char *name; 2806 2807 name = rte_vdev_device_name(vdev); 2808 if (!name) 2809 return -EINVAL; 2810 2811 RTE_LOG(INFO, PMD, "Removing %s\n", name); 2812 2813 RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */ 2814 char ifname[RTE_ETH_NAME_MAX_LEN]; 2815 2816 rte_eth_dev_get_name_by_port(i, ifname); 2817 mrvl_eth_dev_destroy(ifname); 2818 mrvl_dev_num--; 2819 } 2820 2821 if (mrvl_dev_num == 0) { 2822 RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n"); 2823 mrvl_deinit_hifs(); 2824 mrvl_deinit_pp2(); 2825 mv_sys_dma_mem_destroy(); 2826 } 2827 2828 return 0; 2829 } 2830 2831 static struct rte_vdev_driver pmd_mrvl_drv = { 2832 .probe = rte_pmd_mrvl_probe, 2833 .remove = rte_pmd_mrvl_remove, 2834 }; 2835 2836 RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv); 2837 RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2); 2838