1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Marvell International Ltd. 3 * Copyright(c) 2017 Semihalf. 4 * All rights reserved. 5 */ 6 7 #include <rte_ethdev_driver.h> 8 #include <rte_kvargs.h> 9 #include <rte_log.h> 10 #include <rte_malloc.h> 11 #include <rte_bus_vdev.h> 12 13 #include <fcntl.h> 14 #include <linux/ethtool.h> 15 #include <linux/sockios.h> 16 #include <net/if.h> 17 #include <net/if_arp.h> 18 #include <sys/ioctl.h> 19 #include <sys/socket.h> 20 #include <sys/stat.h> 21 #include <sys/types.h> 22 23 #include <rte_mvep_common.h> 24 #include "mrvl_ethdev.h" 25 #include "mrvl_qos.h" 26 #include "mrvl_mtr.h" 27 28 /* bitmask with reserved hifs */ 29 #define MRVL_MUSDK_HIFS_RESERVED 0x0F 30 /* bitmask with reserved bpools */ 31 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07 32 /* bitmask with reserved kernel RSS tables */ 33 #define MRVL_MUSDK_RSS_RESERVED 0x01 34 /* maximum number of available hifs */ 35 #define MRVL_MUSDK_HIFS_MAX 9 36 37 /* prefetch shift */ 38 #define MRVL_MUSDK_PREFETCH_SHIFT 2 39 40 /* TCAM has 25 entries reserved for uc/mc filter entries */ 41 #define MRVL_MAC_ADDRS_MAX 25 42 #define MRVL_MATCH_LEN 16 43 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE) 44 /* Maximum allowable packet size */ 45 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE) 46 47 #define MRVL_IFACE_NAME_ARG "iface" 48 #define MRVL_CFG_ARG "cfg" 49 50 #define MRVL_BURST_SIZE 64 51 52 #define MRVL_ARP_LENGTH 28 53 54 #define MRVL_COOKIE_ADDR_INVALID ~0ULL 55 56 #define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8) 57 #define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT) 58 59 /** Port Rx offload capabilities */ 60 #define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \ 61 DEV_RX_OFFLOAD_JUMBO_FRAME | \ 62 DEV_RX_OFFLOAD_CHECKSUM) 63 64 /** Port Tx offloads capabilities */ 65 #define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \ 66 DEV_TX_OFFLOAD_UDP_CKSUM | \ 67 DEV_TX_OFFLOAD_TCP_CKSUM) 68 69 static const char * const valid_args[] = { 70 MRVL_IFACE_NAME_ARG, 71 MRVL_CFG_ARG, 72 NULL 73 }; 74 75 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED; 76 static struct pp2_hif *hifs[RTE_MAX_LCORE]; 77 static int used_bpools[PP2_NUM_PKT_PROC] = { 78 [0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED 79 }; 80 81 static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS]; 82 static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE]; 83 static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID; 84 85 int mrvl_logtype; 86 87 struct mrvl_ifnames { 88 const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC]; 89 int idx; 90 }; 91 92 /* 93 * To use buffer harvesting based on loopback port shadow queue structure 94 * was introduced for buffers information bookkeeping. 95 * 96 * Before sending the packet, related buffer information (pp2_buff_inf) is 97 * stored in shadow queue. After packet is transmitted no longer used 98 * packet buffer is released back to it's original hardware pool, 99 * on condition it originated from interface. 100 * In case it was generated by application itself i.e: mbuf->port field is 101 * 0xff then its released to software mempool. 102 */ 103 struct mrvl_shadow_txq { 104 int head; /* write index - used when sending buffers */ 105 int tail; /* read index - used when releasing buffers */ 106 u16 size; /* queue occupied size */ 107 u16 num_to_release; /* number of buffers sent, that can be released */ 108 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */ 109 }; 110 111 struct mrvl_rxq { 112 struct mrvl_priv *priv; 113 struct rte_mempool *mp; 114 int queue_id; 115 int port_id; 116 int cksum_enabled; 117 uint64_t bytes_recv; 118 uint64_t drop_mac; 119 }; 120 121 struct mrvl_txq { 122 struct mrvl_priv *priv; 123 int queue_id; 124 int port_id; 125 uint64_t bytes_sent; 126 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE]; 127 int tx_deferred_start; 128 }; 129 130 static int mrvl_lcore_first; 131 static int mrvl_lcore_last; 132 static int mrvl_dev_num; 133 134 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num); 135 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio, 136 struct pp2_hif *hif, unsigned int core_id, 137 struct mrvl_shadow_txq *sq, int qid, int force); 138 139 #define MRVL_XSTATS_TBL_ENTRY(name) { \ 140 #name, offsetof(struct pp2_ppio_statistics, name), \ 141 sizeof(((struct pp2_ppio_statistics *)0)->name) \ 142 } 143 144 /* Table with xstats data */ 145 static struct { 146 const char *name; 147 unsigned int offset; 148 unsigned int size; 149 } mrvl_xstats_tbl[] = { 150 MRVL_XSTATS_TBL_ENTRY(rx_bytes), 151 MRVL_XSTATS_TBL_ENTRY(rx_packets), 152 MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets), 153 MRVL_XSTATS_TBL_ENTRY(rx_errors), 154 MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped), 155 MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped), 156 MRVL_XSTATS_TBL_ENTRY(rx_early_dropped), 157 MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped), 158 MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped), 159 MRVL_XSTATS_TBL_ENTRY(tx_bytes), 160 MRVL_XSTATS_TBL_ENTRY(tx_packets), 161 MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets), 162 MRVL_XSTATS_TBL_ENTRY(tx_errors) 163 }; 164 165 static inline int 166 mrvl_get_bpool_size(int pp2_id, int pool_id) 167 { 168 int i; 169 int size = 0; 170 171 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) 172 size += mrvl_port_bpool_size[pp2_id][pool_id][i]; 173 174 return size; 175 } 176 177 static inline int 178 mrvl_reserve_bit(int *bitmap, int max) 179 { 180 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap); 181 182 if (n >= max) 183 return -1; 184 185 *bitmap |= 1 << n; 186 187 return n; 188 } 189 190 static int 191 mrvl_init_hif(int core_id) 192 { 193 struct pp2_hif_params params; 194 char match[MRVL_MATCH_LEN]; 195 int ret; 196 197 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX); 198 if (ret < 0) { 199 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id); 200 return ret; 201 } 202 203 snprintf(match, sizeof(match), "hif-%d", ret); 204 memset(¶ms, 0, sizeof(params)); 205 params.match = match; 206 params.out_size = MRVL_PP2_AGGR_TXQD_MAX; 207 ret = pp2_hif_init(¶ms, &hifs[core_id]); 208 if (ret) { 209 MRVL_LOG(ERR, "Failed to initialize hif %d", core_id); 210 return ret; 211 } 212 213 return 0; 214 } 215 216 static inline struct pp2_hif* 217 mrvl_get_hif(struct mrvl_priv *priv, int core_id) 218 { 219 int ret; 220 221 if (likely(hifs[core_id] != NULL)) 222 return hifs[core_id]; 223 224 rte_spinlock_lock(&priv->lock); 225 226 ret = mrvl_init_hif(core_id); 227 if (ret < 0) { 228 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id); 229 goto out; 230 } 231 232 if (core_id < mrvl_lcore_first) 233 mrvl_lcore_first = core_id; 234 235 if (core_id > mrvl_lcore_last) 236 mrvl_lcore_last = core_id; 237 out: 238 rte_spinlock_unlock(&priv->lock); 239 240 return hifs[core_id]; 241 } 242 243 /** 244 * Configure rss based on dpdk rss configuration. 245 * 246 * @param priv 247 * Pointer to private structure. 248 * @param rss_conf 249 * Pointer to RSS configuration. 250 * 251 * @return 252 * 0 on success, negative error value otherwise. 253 */ 254 static int 255 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf) 256 { 257 if (rss_conf->rss_key) 258 MRVL_LOG(WARNING, "Changing hash key is not supported"); 259 260 if (rss_conf->rss_hf == 0) { 261 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; 262 } else if (rss_conf->rss_hf & ETH_RSS_IPV4) { 263 priv->ppio_params.inqs_params.hash_type = 264 PP2_PPIO_HASH_T_2_TUPLE; 265 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { 266 priv->ppio_params.inqs_params.hash_type = 267 PP2_PPIO_HASH_T_5_TUPLE; 268 priv->rss_hf_tcp = 1; 269 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { 270 priv->ppio_params.inqs_params.hash_type = 271 PP2_PPIO_HASH_T_5_TUPLE; 272 priv->rss_hf_tcp = 0; 273 } else { 274 return -EINVAL; 275 } 276 277 return 0; 278 } 279 280 /** 281 * Ethernet device configuration. 282 * 283 * Prepare the driver for a given number of TX and RX queues and 284 * configure RSS. 285 * 286 * @param dev 287 * Pointer to Ethernet device structure. 288 * 289 * @return 290 * 0 on success, negative error value otherwise. 291 */ 292 static int 293 mrvl_dev_configure(struct rte_eth_dev *dev) 294 { 295 struct mrvl_priv *priv = dev->data->dev_private; 296 int ret; 297 298 if (priv->ppio) { 299 MRVL_LOG(INFO, "Device reconfiguration is not supported"); 300 return -EINVAL; 301 } 302 303 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE && 304 dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { 305 MRVL_LOG(INFO, "Unsupported rx multi queue mode %d", 306 dev->data->dev_conf.rxmode.mq_mode); 307 return -EINVAL; 308 } 309 310 if (dev->data->dev_conf.rxmode.split_hdr_size) { 311 MRVL_LOG(INFO, "Split headers not supported"); 312 return -EINVAL; 313 } 314 315 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 316 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - 317 ETHER_HDR_LEN - ETHER_CRC_LEN; 318 319 ret = mrvl_configure_rxqs(priv, dev->data->port_id, 320 dev->data->nb_rx_queues); 321 if (ret < 0) 322 return ret; 323 324 ret = mrvl_configure_txqs(priv, dev->data->port_id, 325 dev->data->nb_tx_queues); 326 if (ret < 0) 327 return ret; 328 329 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues; 330 priv->ppio_params.maintain_stats = 1; 331 priv->nb_rx_queues = dev->data->nb_rx_queues; 332 333 if (dev->data->nb_rx_queues == 1 && 334 dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 335 MRVL_LOG(WARNING, "Disabling hash for 1 rx queue"); 336 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; 337 338 return 0; 339 } 340 341 return mrvl_configure_rss(priv, 342 &dev->data->dev_conf.rx_adv_conf.rss_conf); 343 } 344 345 /** 346 * DPDK callback to change the MTU. 347 * 348 * Setting the MTU affects hardware MRU (packets larger than the MRU 349 * will be dropped). 350 * 351 * @param dev 352 * Pointer to Ethernet device structure. 353 * @param mtu 354 * New MTU. 355 * 356 * @return 357 * 0 on success, negative error value otherwise. 358 */ 359 static int 360 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 361 { 362 struct mrvl_priv *priv = dev->data->dev_private; 363 /* extra MV_MH_SIZE bytes are required for Marvell tag */ 364 uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN; 365 int ret; 366 367 if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) 368 return -EINVAL; 369 370 if (!priv->ppio) 371 return 0; 372 373 ret = pp2_ppio_set_mru(priv->ppio, mru); 374 if (ret) 375 return ret; 376 377 return pp2_ppio_set_mtu(priv->ppio, mtu); 378 } 379 380 /** 381 * DPDK callback to bring the link up. 382 * 383 * @param dev 384 * Pointer to Ethernet device structure. 385 * 386 * @return 387 * 0 on success, negative error value otherwise. 388 */ 389 static int 390 mrvl_dev_set_link_up(struct rte_eth_dev *dev) 391 { 392 struct mrvl_priv *priv = dev->data->dev_private; 393 int ret; 394 395 if (!priv->ppio) 396 return -EPERM; 397 398 ret = pp2_ppio_enable(priv->ppio); 399 if (ret) 400 return ret; 401 402 /* 403 * mtu/mru can be updated if pp2_ppio_enable() was called at least once 404 * as pp2_ppio_enable() changes port->t_mode from default 0 to 405 * PP2_TRAFFIC_INGRESS_EGRESS. 406 * 407 * Set mtu to default DPDK value here. 408 */ 409 ret = mrvl_mtu_set(dev, dev->data->mtu); 410 if (ret) 411 pp2_ppio_disable(priv->ppio); 412 413 return ret; 414 } 415 416 /** 417 * DPDK callback to bring the link down. 418 * 419 * @param dev 420 * Pointer to Ethernet device structure. 421 * 422 * @return 423 * 0 on success, negative error value otherwise. 424 */ 425 static int 426 mrvl_dev_set_link_down(struct rte_eth_dev *dev) 427 { 428 struct mrvl_priv *priv = dev->data->dev_private; 429 430 if (!priv->ppio) 431 return -EPERM; 432 433 return pp2_ppio_disable(priv->ppio); 434 } 435 436 /** 437 * DPDK callback to start tx queue. 438 * 439 * @param dev 440 * Pointer to Ethernet device structure. 441 * @param queue_id 442 * Transmit queue index. 443 * 444 * @return 445 * 0 on success, negative error value otherwise. 446 */ 447 static int 448 mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id) 449 { 450 struct mrvl_priv *priv = dev->data->dev_private; 451 int ret; 452 453 if (!priv) 454 return -EPERM; 455 456 /* passing 1 enables given tx queue */ 457 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1); 458 if (ret) { 459 MRVL_LOG(ERR, "Failed to start txq %d", queue_id); 460 return ret; 461 } 462 463 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 464 465 return 0; 466 } 467 468 /** 469 * DPDK callback to stop tx queue. 470 * 471 * @param dev 472 * Pointer to Ethernet device structure. 473 * @param queue_id 474 * Transmit queue index. 475 * 476 * @return 477 * 0 on success, negative error value otherwise. 478 */ 479 static int 480 mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id) 481 { 482 struct mrvl_priv *priv = dev->data->dev_private; 483 int ret; 484 485 if (!priv->ppio) 486 return -EPERM; 487 488 /* passing 0 disables given tx queue */ 489 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0); 490 if (ret) { 491 MRVL_LOG(ERR, "Failed to stop txq %d", queue_id); 492 return ret; 493 } 494 495 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 496 497 return 0; 498 } 499 500 /** 501 * DPDK callback to start the device. 502 * 503 * @param dev 504 * Pointer to Ethernet device structure. 505 * 506 * @return 507 * 0 on success, negative errno value on failure. 508 */ 509 static int 510 mrvl_dev_start(struct rte_eth_dev *dev) 511 { 512 struct mrvl_priv *priv = dev->data->dev_private; 513 char match[MRVL_MATCH_LEN]; 514 int ret = 0, i, def_init_size; 515 516 if (priv->ppio) 517 return mrvl_dev_set_link_up(dev); 518 519 snprintf(match, sizeof(match), "ppio-%d:%d", 520 priv->pp_id, priv->ppio_id); 521 priv->ppio_params.match = match; 522 523 /* 524 * Calculate the minimum bpool size for refill feature as follows: 525 * 2 default burst sizes multiply by number of rx queues. 526 * If the bpool size will be below this value, new buffers will 527 * be added to the pool. 528 */ 529 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2; 530 531 /* In case initial bpool size configured in queues setup is 532 * smaller than minimum size add more buffers 533 */ 534 def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2; 535 if (priv->bpool_init_size < def_init_size) { 536 int buffs_to_add = def_init_size - priv->bpool_init_size; 537 538 priv->bpool_init_size += buffs_to_add; 539 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add); 540 if (ret) 541 MRVL_LOG(ERR, "Failed to add buffers to bpool"); 542 } 543 544 /* 545 * Calculate the maximum bpool size for refill feature as follows: 546 * maximum number of descriptors in rx queue multiply by number 547 * of rx queues plus minimum bpool size. 548 * In case the bpool size will exceed this value, superfluous buffers 549 * will be removed 550 */ 551 priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) + 552 priv->bpool_min_size; 553 554 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio); 555 if (ret) { 556 MRVL_LOG(ERR, "Failed to init ppio"); 557 return ret; 558 } 559 560 /* 561 * In case there are some some stale uc/mc mac addresses flush them 562 * here. It cannot be done during mrvl_dev_close() as port information 563 * is already gone at that point (due to pp2_ppio_deinit() in 564 * mrvl_dev_stop()). 565 */ 566 if (!priv->uc_mc_flushed) { 567 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1); 568 if (ret) { 569 MRVL_LOG(ERR, 570 "Failed to flush uc/mc filter list"); 571 goto out; 572 } 573 priv->uc_mc_flushed = 1; 574 } 575 576 if (!priv->vlan_flushed) { 577 ret = pp2_ppio_flush_vlan(priv->ppio); 578 if (ret) { 579 MRVL_LOG(ERR, "Failed to flush vlan list"); 580 /* 581 * TODO 582 * once pp2_ppio_flush_vlan() is supported jump to out 583 * goto out; 584 */ 585 } 586 priv->vlan_flushed = 1; 587 } 588 589 /* For default QoS config, don't start classifier. */ 590 if (mrvl_qos_cfg) { 591 ret = mrvl_start_qos_mapping(priv); 592 if (ret) { 593 MRVL_LOG(ERR, "Failed to setup QoS mapping"); 594 goto out; 595 } 596 } 597 598 ret = mrvl_dev_set_link_up(dev); 599 if (ret) { 600 MRVL_LOG(ERR, "Failed to set link up"); 601 goto out; 602 } 603 604 /* start tx queues */ 605 for (i = 0; i < dev->data->nb_tx_queues; i++) { 606 struct mrvl_txq *txq = dev->data->tx_queues[i]; 607 608 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 609 610 if (!txq->tx_deferred_start) 611 continue; 612 613 /* 614 * All txqs are started by default. Stop them 615 * so that tx_deferred_start works as expected. 616 */ 617 ret = mrvl_tx_queue_stop(dev, i); 618 if (ret) 619 goto out; 620 } 621 622 mrvl_mtr_init(dev); 623 624 return 0; 625 out: 626 MRVL_LOG(ERR, "Failed to start device"); 627 pp2_ppio_deinit(priv->ppio); 628 return ret; 629 } 630 631 /** 632 * Flush receive queues. 633 * 634 * @param dev 635 * Pointer to Ethernet device structure. 636 */ 637 static void 638 mrvl_flush_rx_queues(struct rte_eth_dev *dev) 639 { 640 int i; 641 642 MRVL_LOG(INFO, "Flushing rx queues"); 643 for (i = 0; i < dev->data->nb_rx_queues; i++) { 644 int ret, num; 645 646 do { 647 struct mrvl_rxq *q = dev->data->rx_queues[i]; 648 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX]; 649 650 num = MRVL_PP2_RXD_MAX; 651 ret = pp2_ppio_recv(q->priv->ppio, 652 q->priv->rxq_map[q->queue_id].tc, 653 q->priv->rxq_map[q->queue_id].inq, 654 descs, (uint16_t *)&num); 655 } while (ret == 0 && num); 656 } 657 } 658 659 /** 660 * Flush transmit shadow queues. 661 * 662 * @param dev 663 * Pointer to Ethernet device structure. 664 */ 665 static void 666 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev) 667 { 668 int i, j; 669 struct mrvl_txq *txq; 670 671 MRVL_LOG(INFO, "Flushing tx shadow queues"); 672 for (i = 0; i < dev->data->nb_tx_queues; i++) { 673 txq = (struct mrvl_txq *)dev->data->tx_queues[i]; 674 675 for (j = 0; j < RTE_MAX_LCORE; j++) { 676 struct mrvl_shadow_txq *sq; 677 678 if (!hifs[j]) 679 continue; 680 681 sq = &txq->shadow_txqs[j]; 682 mrvl_free_sent_buffers(txq->priv->ppio, 683 hifs[j], j, sq, txq->queue_id, 1); 684 while (sq->tail != sq->head) { 685 uint64_t addr = cookie_addr_high | 686 sq->ent[sq->tail].buff.cookie; 687 rte_pktmbuf_free( 688 (struct rte_mbuf *)addr); 689 sq->tail = (sq->tail + 1) & 690 MRVL_PP2_TX_SHADOWQ_MASK; 691 } 692 memset(sq, 0, sizeof(*sq)); 693 } 694 } 695 } 696 697 /** 698 * Flush hardware bpool (buffer-pool). 699 * 700 * @param dev 701 * Pointer to Ethernet device structure. 702 */ 703 static void 704 mrvl_flush_bpool(struct rte_eth_dev *dev) 705 { 706 struct mrvl_priv *priv = dev->data->dev_private; 707 struct pp2_hif *hif; 708 uint32_t num; 709 int ret; 710 unsigned int core_id = rte_lcore_id(); 711 712 if (core_id == LCORE_ID_ANY) 713 core_id = 0; 714 715 hif = mrvl_get_hif(priv, core_id); 716 717 ret = pp2_bpool_get_num_buffs(priv->bpool, &num); 718 if (ret) { 719 MRVL_LOG(ERR, "Failed to get bpool buffers number"); 720 return; 721 } 722 723 while (num--) { 724 struct pp2_buff_inf inf; 725 uint64_t addr; 726 727 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf); 728 if (ret) 729 break; 730 731 addr = cookie_addr_high | inf.cookie; 732 rte_pktmbuf_free((struct rte_mbuf *)addr); 733 } 734 } 735 736 /** 737 * DPDK callback to stop the device. 738 * 739 * @param dev 740 * Pointer to Ethernet device structure. 741 */ 742 static void 743 mrvl_dev_stop(struct rte_eth_dev *dev) 744 { 745 mrvl_dev_set_link_down(dev); 746 } 747 748 /** 749 * DPDK callback to close the device. 750 * 751 * @param dev 752 * Pointer to Ethernet device structure. 753 */ 754 static void 755 mrvl_dev_close(struct rte_eth_dev *dev) 756 { 757 struct mrvl_priv *priv = dev->data->dev_private; 758 size_t i; 759 760 mrvl_flush_rx_queues(dev); 761 mrvl_flush_tx_shadow_queues(dev); 762 mrvl_mtr_deinit(dev); 763 764 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) { 765 struct pp2_ppio_tc_params *tc_params = 766 &priv->ppio_params.inqs_params.tcs_params[i]; 767 768 if (tc_params->inqs_params) { 769 rte_free(tc_params->inqs_params); 770 tc_params->inqs_params = NULL; 771 } 772 } 773 774 if (priv->cls_tbl) { 775 pp2_cls_tbl_deinit(priv->cls_tbl); 776 priv->cls_tbl = NULL; 777 } 778 779 if (priv->qos_tbl) { 780 pp2_cls_qos_tbl_deinit(priv->qos_tbl); 781 priv->qos_tbl = NULL; 782 } 783 784 mrvl_flush_bpool(dev); 785 786 if (priv->ppio) { 787 pp2_ppio_deinit(priv->ppio); 788 priv->ppio = NULL; 789 } 790 791 /* policer must be released after ppio deinitialization */ 792 if (priv->default_policer) { 793 pp2_cls_plcr_deinit(priv->default_policer); 794 priv->default_policer = NULL; 795 } 796 } 797 798 /** 799 * DPDK callback to retrieve physical link information. 800 * 801 * @param dev 802 * Pointer to Ethernet device structure. 803 * @param wait_to_complete 804 * Wait for request completion (ignored). 805 * 806 * @return 807 * 0 on success, negative error value otherwise. 808 */ 809 static int 810 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 811 { 812 /* 813 * TODO 814 * once MUSDK provides necessary API use it here 815 */ 816 struct mrvl_priv *priv = dev->data->dev_private; 817 struct ethtool_cmd edata; 818 struct ifreq req; 819 int ret, fd, link_up; 820 821 if (!priv->ppio) 822 return -EPERM; 823 824 edata.cmd = ETHTOOL_GSET; 825 826 strcpy(req.ifr_name, dev->data->name); 827 req.ifr_data = (void *)&edata; 828 829 fd = socket(AF_INET, SOCK_DGRAM, 0); 830 if (fd == -1) 831 return -EFAULT; 832 833 ret = ioctl(fd, SIOCETHTOOL, &req); 834 if (ret == -1) { 835 close(fd); 836 return -EFAULT; 837 } 838 839 close(fd); 840 841 switch (ethtool_cmd_speed(&edata)) { 842 case SPEED_10: 843 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M; 844 break; 845 case SPEED_100: 846 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M; 847 break; 848 case SPEED_1000: 849 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G; 850 break; 851 case SPEED_10000: 852 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; 853 break; 854 default: 855 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; 856 } 857 858 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX : 859 ETH_LINK_HALF_DUPLEX; 860 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG : 861 ETH_LINK_FIXED; 862 pp2_ppio_get_link_state(priv->ppio, &link_up); 863 dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 864 865 return 0; 866 } 867 868 /** 869 * DPDK callback to enable promiscuous mode. 870 * 871 * @param dev 872 * Pointer to Ethernet device structure. 873 */ 874 static void 875 mrvl_promiscuous_enable(struct rte_eth_dev *dev) 876 { 877 struct mrvl_priv *priv = dev->data->dev_private; 878 int ret; 879 880 if (!priv->ppio) 881 return; 882 883 if (priv->isolated) 884 return; 885 886 ret = pp2_ppio_set_promisc(priv->ppio, 1); 887 if (ret) 888 MRVL_LOG(ERR, "Failed to enable promiscuous mode"); 889 } 890 891 /** 892 * DPDK callback to enable allmulti mode. 893 * 894 * @param dev 895 * Pointer to Ethernet device structure. 896 */ 897 static void 898 mrvl_allmulticast_enable(struct rte_eth_dev *dev) 899 { 900 struct mrvl_priv *priv = dev->data->dev_private; 901 int ret; 902 903 if (!priv->ppio) 904 return; 905 906 if (priv->isolated) 907 return; 908 909 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1); 910 if (ret) 911 MRVL_LOG(ERR, "Failed enable all-multicast mode"); 912 } 913 914 /** 915 * DPDK callback to disable promiscuous mode. 916 * 917 * @param dev 918 * Pointer to Ethernet device structure. 919 */ 920 static void 921 mrvl_promiscuous_disable(struct rte_eth_dev *dev) 922 { 923 struct mrvl_priv *priv = dev->data->dev_private; 924 int ret; 925 926 if (!priv->ppio) 927 return; 928 929 ret = pp2_ppio_set_promisc(priv->ppio, 0); 930 if (ret) 931 MRVL_LOG(ERR, "Failed to disable promiscuous mode"); 932 } 933 934 /** 935 * DPDK callback to disable allmulticast mode. 936 * 937 * @param dev 938 * Pointer to Ethernet device structure. 939 */ 940 static void 941 mrvl_allmulticast_disable(struct rte_eth_dev *dev) 942 { 943 struct mrvl_priv *priv = dev->data->dev_private; 944 int ret; 945 946 if (!priv->ppio) 947 return; 948 949 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0); 950 if (ret) 951 MRVL_LOG(ERR, "Failed to disable all-multicast mode"); 952 } 953 954 /** 955 * DPDK callback to remove a MAC address. 956 * 957 * @param dev 958 * Pointer to Ethernet device structure. 959 * @param index 960 * MAC address index. 961 */ 962 static void 963 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 964 { 965 struct mrvl_priv *priv = dev->data->dev_private; 966 char buf[ETHER_ADDR_FMT_SIZE]; 967 int ret; 968 969 if (!priv->ppio) 970 return; 971 972 if (priv->isolated) 973 return; 974 975 ret = pp2_ppio_remove_mac_addr(priv->ppio, 976 dev->data->mac_addrs[index].addr_bytes); 977 if (ret) { 978 ether_format_addr(buf, sizeof(buf), 979 &dev->data->mac_addrs[index]); 980 MRVL_LOG(ERR, "Failed to remove mac %s", buf); 981 } 982 } 983 984 /** 985 * DPDK callback to add a MAC address. 986 * 987 * @param dev 988 * Pointer to Ethernet device structure. 989 * @param mac_addr 990 * MAC address to register. 991 * @param index 992 * MAC address index. 993 * @param vmdq 994 * VMDq pool index to associate address with (unused). 995 * 996 * @return 997 * 0 on success, negative error value otherwise. 998 */ 999 static int 1000 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 1001 uint32_t index, uint32_t vmdq __rte_unused) 1002 { 1003 struct mrvl_priv *priv = dev->data->dev_private; 1004 char buf[ETHER_ADDR_FMT_SIZE]; 1005 int ret; 1006 1007 if (priv->isolated) 1008 return -ENOTSUP; 1009 1010 if (index == 0) 1011 /* For setting index 0, mrvl_mac_addr_set() should be used.*/ 1012 return -1; 1013 1014 if (!priv->ppio) 1015 return 0; 1016 1017 /* 1018 * Maximum number of uc addresses can be tuned via kernel module mvpp2x 1019 * parameter uc_filter_max. Maximum number of mc addresses is then 1020 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and 1021 * 21 respectively. 1022 * 1023 * If more than uc_filter_max uc addresses were added to filter list 1024 * then NIC will switch to promiscuous mode automatically. 1025 * 1026 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses 1027 * were added to filter list then NIC will switch to all-multicast mode 1028 * automatically. 1029 */ 1030 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes); 1031 if (ret) { 1032 ether_format_addr(buf, sizeof(buf), mac_addr); 1033 MRVL_LOG(ERR, "Failed to add mac %s", buf); 1034 return -1; 1035 } 1036 1037 return 0; 1038 } 1039 1040 /** 1041 * DPDK callback to set the primary MAC address. 1042 * 1043 * @param dev 1044 * Pointer to Ethernet device structure. 1045 * @param mac_addr 1046 * MAC address to register. 1047 * 1048 * @return 1049 * 0 on success, negative error value otherwise. 1050 */ 1051 static int 1052 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 1053 { 1054 struct mrvl_priv *priv = dev->data->dev_private; 1055 int ret; 1056 1057 if (!priv->ppio) 1058 return 0; 1059 1060 if (priv->isolated) 1061 return -ENOTSUP; 1062 1063 ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes); 1064 if (ret) { 1065 char buf[ETHER_ADDR_FMT_SIZE]; 1066 ether_format_addr(buf, sizeof(buf), mac_addr); 1067 MRVL_LOG(ERR, "Failed to set mac to %s", buf); 1068 } 1069 1070 return ret; 1071 } 1072 1073 /** 1074 * DPDK callback to get device statistics. 1075 * 1076 * @param dev 1077 * Pointer to Ethernet device structure. 1078 * @param stats 1079 * Stats structure output buffer. 1080 * 1081 * @return 1082 * 0 on success, negative error value otherwise. 1083 */ 1084 static int 1085 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1086 { 1087 struct mrvl_priv *priv = dev->data->dev_private; 1088 struct pp2_ppio_statistics ppio_stats; 1089 uint64_t drop_mac = 0; 1090 unsigned int i, idx, ret; 1091 1092 if (!priv->ppio) 1093 return -EPERM; 1094 1095 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1096 struct mrvl_rxq *rxq = dev->data->rx_queues[i]; 1097 struct pp2_ppio_inq_statistics rx_stats; 1098 1099 if (!rxq) 1100 continue; 1101 1102 idx = rxq->queue_id; 1103 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { 1104 MRVL_LOG(ERR, 1105 "rx queue %d stats out of range (0 - %d)", 1106 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1107 continue; 1108 } 1109 1110 ret = pp2_ppio_inq_get_statistics(priv->ppio, 1111 priv->rxq_map[idx].tc, 1112 priv->rxq_map[idx].inq, 1113 &rx_stats, 0); 1114 if (unlikely(ret)) { 1115 MRVL_LOG(ERR, 1116 "Failed to update rx queue %d stats", idx); 1117 break; 1118 } 1119 1120 stats->q_ibytes[idx] = rxq->bytes_recv; 1121 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac; 1122 stats->q_errors[idx] = rx_stats.drop_early + 1123 rx_stats.drop_fullq + 1124 rx_stats.drop_bm + 1125 rxq->drop_mac; 1126 stats->ibytes += rxq->bytes_recv; 1127 drop_mac += rxq->drop_mac; 1128 } 1129 1130 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1131 struct mrvl_txq *txq = dev->data->tx_queues[i]; 1132 struct pp2_ppio_outq_statistics tx_stats; 1133 1134 if (!txq) 1135 continue; 1136 1137 idx = txq->queue_id; 1138 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { 1139 MRVL_LOG(ERR, 1140 "tx queue %d stats out of range (0 - %d)", 1141 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1142 } 1143 1144 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx, 1145 &tx_stats, 0); 1146 if (unlikely(ret)) { 1147 MRVL_LOG(ERR, 1148 "Failed to update tx queue %d stats", idx); 1149 break; 1150 } 1151 1152 stats->q_opackets[idx] = tx_stats.deq_desc; 1153 stats->q_obytes[idx] = txq->bytes_sent; 1154 stats->obytes += txq->bytes_sent; 1155 } 1156 1157 ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); 1158 if (unlikely(ret)) { 1159 MRVL_LOG(ERR, "Failed to update port statistics"); 1160 return ret; 1161 } 1162 1163 stats->ipackets += ppio_stats.rx_packets - drop_mac; 1164 stats->opackets += ppio_stats.tx_packets; 1165 stats->imissed += ppio_stats.rx_fullq_dropped + 1166 ppio_stats.rx_bm_dropped + 1167 ppio_stats.rx_early_dropped + 1168 ppio_stats.rx_fifo_dropped + 1169 ppio_stats.rx_cls_dropped; 1170 stats->ierrors = drop_mac; 1171 1172 return 0; 1173 } 1174 1175 /** 1176 * DPDK callback to clear device statistics. 1177 * 1178 * @param dev 1179 * Pointer to Ethernet device structure. 1180 */ 1181 static void 1182 mrvl_stats_reset(struct rte_eth_dev *dev) 1183 { 1184 struct mrvl_priv *priv = dev->data->dev_private; 1185 int i; 1186 1187 if (!priv->ppio) 1188 return; 1189 1190 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1191 struct mrvl_rxq *rxq = dev->data->rx_queues[i]; 1192 1193 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc, 1194 priv->rxq_map[i].inq, NULL, 1); 1195 rxq->bytes_recv = 0; 1196 rxq->drop_mac = 0; 1197 } 1198 1199 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1200 struct mrvl_txq *txq = dev->data->tx_queues[i]; 1201 1202 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1); 1203 txq->bytes_sent = 0; 1204 } 1205 1206 pp2_ppio_get_statistics(priv->ppio, NULL, 1); 1207 } 1208 1209 /** 1210 * DPDK callback to get extended statistics. 1211 * 1212 * @param dev 1213 * Pointer to Ethernet device structure. 1214 * @param stats 1215 * Pointer to xstats table. 1216 * @param n 1217 * Number of entries in xstats table. 1218 * @return 1219 * Negative value on error, number of read xstats otherwise. 1220 */ 1221 static int 1222 mrvl_xstats_get(struct rte_eth_dev *dev, 1223 struct rte_eth_xstat *stats, unsigned int n) 1224 { 1225 struct mrvl_priv *priv = dev->data->dev_private; 1226 struct pp2_ppio_statistics ppio_stats; 1227 unsigned int i; 1228 1229 if (!stats) 1230 return 0; 1231 1232 pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); 1233 for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) { 1234 uint64_t val; 1235 1236 if (mrvl_xstats_tbl[i].size == sizeof(uint32_t)) 1237 val = *(uint32_t *)((uint8_t *)&ppio_stats + 1238 mrvl_xstats_tbl[i].offset); 1239 else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t)) 1240 val = *(uint64_t *)((uint8_t *)&ppio_stats + 1241 mrvl_xstats_tbl[i].offset); 1242 else 1243 return -EINVAL; 1244 1245 stats[i].id = i; 1246 stats[i].value = val; 1247 } 1248 1249 return n; 1250 } 1251 1252 /** 1253 * DPDK callback to reset extended statistics. 1254 * 1255 * @param dev 1256 * Pointer to Ethernet device structure. 1257 */ 1258 static void 1259 mrvl_xstats_reset(struct rte_eth_dev *dev) 1260 { 1261 mrvl_stats_reset(dev); 1262 } 1263 1264 /** 1265 * DPDK callback to get extended statistics names. 1266 * 1267 * @param dev (unused) 1268 * Pointer to Ethernet device structure. 1269 * @param xstats_names 1270 * Pointer to xstats names table. 1271 * @param size 1272 * Size of the xstats names table. 1273 * @return 1274 * Number of read names. 1275 */ 1276 static int 1277 mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused, 1278 struct rte_eth_xstat_name *xstats_names, 1279 unsigned int size) 1280 { 1281 unsigned int i; 1282 1283 if (!xstats_names) 1284 return RTE_DIM(mrvl_xstats_tbl); 1285 1286 for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++) 1287 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s", 1288 mrvl_xstats_tbl[i].name); 1289 1290 return size; 1291 } 1292 1293 /** 1294 * DPDK callback to get information about the device. 1295 * 1296 * @param dev 1297 * Pointer to Ethernet device structure (unused). 1298 * @param info 1299 * Info structure output buffer. 1300 */ 1301 static void 1302 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused, 1303 struct rte_eth_dev_info *info) 1304 { 1305 info->speed_capa = ETH_LINK_SPEED_10M | 1306 ETH_LINK_SPEED_100M | 1307 ETH_LINK_SPEED_1G | 1308 ETH_LINK_SPEED_10G; 1309 1310 info->max_rx_queues = MRVL_PP2_RXQ_MAX; 1311 info->max_tx_queues = MRVL_PP2_TXQ_MAX; 1312 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX; 1313 1314 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX; 1315 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN; 1316 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN; 1317 1318 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX; 1319 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN; 1320 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN; 1321 1322 info->rx_offload_capa = MRVL_RX_OFFLOADS; 1323 info->rx_queue_offload_capa = MRVL_RX_OFFLOADS; 1324 1325 info->tx_offload_capa = MRVL_TX_OFFLOADS; 1326 info->tx_queue_offload_capa = MRVL_TX_OFFLOADS; 1327 1328 info->flow_type_rss_offloads = ETH_RSS_IPV4 | 1329 ETH_RSS_NONFRAG_IPV4_TCP | 1330 ETH_RSS_NONFRAG_IPV4_UDP; 1331 1332 /* By default packets are dropped if no descriptors are available */ 1333 info->default_rxconf.rx_drop_en = 1; 1334 1335 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX; 1336 } 1337 1338 /** 1339 * Return supported packet types. 1340 * 1341 * @param dev 1342 * Pointer to Ethernet device structure (unused). 1343 * 1344 * @return 1345 * Const pointer to the table with supported packet types. 1346 */ 1347 static const uint32_t * 1348 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 1349 { 1350 static const uint32_t ptypes[] = { 1351 RTE_PTYPE_L2_ETHER, 1352 RTE_PTYPE_L2_ETHER_VLAN, 1353 RTE_PTYPE_L2_ETHER_QINQ, 1354 RTE_PTYPE_L3_IPV4, 1355 RTE_PTYPE_L3_IPV4_EXT, 1356 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 1357 RTE_PTYPE_L3_IPV6, 1358 RTE_PTYPE_L3_IPV6_EXT, 1359 RTE_PTYPE_L2_ETHER_ARP, 1360 RTE_PTYPE_L4_TCP, 1361 RTE_PTYPE_L4_UDP 1362 }; 1363 1364 return ptypes; 1365 } 1366 1367 /** 1368 * DPDK callback to get information about specific receive queue. 1369 * 1370 * @param dev 1371 * Pointer to Ethernet device structure. 1372 * @param rx_queue_id 1373 * Receive queue index. 1374 * @param qinfo 1375 * Receive queue information structure. 1376 */ 1377 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 1378 struct rte_eth_rxq_info *qinfo) 1379 { 1380 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id]; 1381 struct mrvl_priv *priv = dev->data->dev_private; 1382 int inq = priv->rxq_map[rx_queue_id].inq; 1383 int tc = priv->rxq_map[rx_queue_id].tc; 1384 struct pp2_ppio_tc_params *tc_params = 1385 &priv->ppio_params.inqs_params.tcs_params[tc]; 1386 1387 qinfo->mp = q->mp; 1388 qinfo->nb_desc = tc_params->inqs_params[inq].size; 1389 } 1390 1391 /** 1392 * DPDK callback to get information about specific transmit queue. 1393 * 1394 * @param dev 1395 * Pointer to Ethernet device structure. 1396 * @param tx_queue_id 1397 * Transmit queue index. 1398 * @param qinfo 1399 * Transmit queue information structure. 1400 */ 1401 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 1402 struct rte_eth_txq_info *qinfo) 1403 { 1404 struct mrvl_priv *priv = dev->data->dev_private; 1405 struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id]; 1406 1407 qinfo->nb_desc = 1408 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size; 1409 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1410 } 1411 1412 /** 1413 * DPDK callback to Configure a VLAN filter. 1414 * 1415 * @param dev 1416 * Pointer to Ethernet device structure. 1417 * @param vlan_id 1418 * VLAN ID to filter. 1419 * @param on 1420 * Toggle filter. 1421 * 1422 * @return 1423 * 0 on success, negative error value otherwise. 1424 */ 1425 static int 1426 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1427 { 1428 struct mrvl_priv *priv = dev->data->dev_private; 1429 1430 if (!priv->ppio) 1431 return -EPERM; 1432 1433 if (priv->isolated) 1434 return -ENOTSUP; 1435 1436 return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) : 1437 pp2_ppio_remove_vlan(priv->ppio, vlan_id); 1438 } 1439 1440 /** 1441 * Release buffers to hardware bpool (buffer-pool) 1442 * 1443 * @param rxq 1444 * Receive queue pointer. 1445 * @param num 1446 * Number of buffers to release to bpool. 1447 * 1448 * @return 1449 * 0 on success, negative error value otherwise. 1450 */ 1451 static int 1452 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) 1453 { 1454 struct buff_release_entry entries[MRVL_PP2_RXD_MAX]; 1455 struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX]; 1456 int i, ret; 1457 unsigned int core_id; 1458 struct pp2_hif *hif; 1459 struct pp2_bpool *bpool; 1460 1461 core_id = rte_lcore_id(); 1462 if (core_id == LCORE_ID_ANY) 1463 core_id = 0; 1464 1465 hif = mrvl_get_hif(rxq->priv, core_id); 1466 if (!hif) 1467 return -1; 1468 1469 bpool = rxq->priv->bpool; 1470 1471 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num); 1472 if (ret) 1473 return ret; 1474 1475 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID) 1476 cookie_addr_high = 1477 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK; 1478 1479 for (i = 0; i < num; i++) { 1480 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK) 1481 != cookie_addr_high) { 1482 MRVL_LOG(ERR, 1483 "mbuf virtual addr high 0x%lx out of range", 1484 (uint64_t)mbufs[i] >> 32); 1485 goto out; 1486 } 1487 1488 entries[i].buff.addr = 1489 rte_mbuf_data_iova_default(mbufs[i]); 1490 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i]; 1491 entries[i].bpool = bpool; 1492 } 1493 1494 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i); 1495 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i; 1496 1497 if (i != num) 1498 goto out; 1499 1500 return 0; 1501 out: 1502 for (; i < num; i++) 1503 rte_pktmbuf_free(mbufs[i]); 1504 1505 return -1; 1506 } 1507 1508 /** 1509 * DPDK callback to configure the receive queue. 1510 * 1511 * @param dev 1512 * Pointer to Ethernet device structure. 1513 * @param idx 1514 * RX queue index. 1515 * @param desc 1516 * Number of descriptors to configure in queue. 1517 * @param socket 1518 * NUMA socket on which memory must be allocated. 1519 * @param conf 1520 * Thresholds parameters. 1521 * @param mp 1522 * Memory pool for buffer allocations. 1523 * 1524 * @return 1525 * 0 on success, negative error value otherwise. 1526 */ 1527 static int 1528 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 1529 unsigned int socket, 1530 const struct rte_eth_rxconf *conf, 1531 struct rte_mempool *mp) 1532 { 1533 struct mrvl_priv *priv = dev->data->dev_private; 1534 struct mrvl_rxq *rxq; 1535 uint32_t min_size, 1536 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; 1537 int ret, tc, inq; 1538 uint64_t offloads; 1539 1540 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; 1541 1542 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) { 1543 /* 1544 * Unknown TC mapping, mapping will not have a correct queue. 1545 */ 1546 MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu", 1547 idx, priv->ppio_id); 1548 return -EFAULT; 1549 } 1550 1551 min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM - 1552 MRVL_PKT_EFFEC_OFFS; 1553 if (min_size < max_rx_pkt_len) { 1554 MRVL_LOG(ERR, 1555 "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.", 1556 max_rx_pkt_len + RTE_PKTMBUF_HEADROOM + 1557 MRVL_PKT_EFFEC_OFFS, 1558 max_rx_pkt_len); 1559 return -EINVAL; 1560 } 1561 1562 if (dev->data->rx_queues[idx]) { 1563 rte_free(dev->data->rx_queues[idx]); 1564 dev->data->rx_queues[idx] = NULL; 1565 } 1566 1567 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket); 1568 if (!rxq) 1569 return -ENOMEM; 1570 1571 rxq->priv = priv; 1572 rxq->mp = mp; 1573 rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM; 1574 rxq->queue_id = idx; 1575 rxq->port_id = dev->data->port_id; 1576 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool; 1577 1578 tc = priv->rxq_map[rxq->queue_id].tc, 1579 inq = priv->rxq_map[rxq->queue_id].inq; 1580 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size = 1581 desc; 1582 1583 ret = mrvl_fill_bpool(rxq, desc); 1584 if (ret) { 1585 rte_free(rxq); 1586 return ret; 1587 } 1588 1589 priv->bpool_init_size += desc; 1590 1591 dev->data->rx_queues[idx] = rxq; 1592 1593 return 0; 1594 } 1595 1596 /** 1597 * DPDK callback to release the receive queue. 1598 * 1599 * @param rxq 1600 * Generic receive queue pointer. 1601 */ 1602 static void 1603 mrvl_rx_queue_release(void *rxq) 1604 { 1605 struct mrvl_rxq *q = rxq; 1606 struct pp2_ppio_tc_params *tc_params; 1607 int i, num, tc, inq; 1608 struct pp2_hif *hif; 1609 unsigned int core_id = rte_lcore_id(); 1610 1611 if (core_id == LCORE_ID_ANY) 1612 core_id = 0; 1613 1614 if (!q) 1615 return; 1616 1617 hif = mrvl_get_hif(q->priv, core_id); 1618 1619 if (!hif) 1620 return; 1621 1622 tc = q->priv->rxq_map[q->queue_id].tc; 1623 inq = q->priv->rxq_map[q->queue_id].inq; 1624 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc]; 1625 num = tc_params->inqs_params[inq].size; 1626 for (i = 0; i < num; i++) { 1627 struct pp2_buff_inf inf; 1628 uint64_t addr; 1629 1630 pp2_bpool_get_buff(hif, q->priv->bpool, &inf); 1631 addr = cookie_addr_high | inf.cookie; 1632 rte_pktmbuf_free((struct rte_mbuf *)addr); 1633 } 1634 1635 rte_free(q); 1636 } 1637 1638 /** 1639 * DPDK callback to configure the transmit queue. 1640 * 1641 * @param dev 1642 * Pointer to Ethernet device structure. 1643 * @param idx 1644 * Transmit queue index. 1645 * @param desc 1646 * Number of descriptors to configure in the queue. 1647 * @param socket 1648 * NUMA socket on which memory must be allocated. 1649 * @param conf 1650 * Tx queue configuration parameters. 1651 * 1652 * @return 1653 * 0 on success, negative error value otherwise. 1654 */ 1655 static int 1656 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 1657 unsigned int socket, 1658 const struct rte_eth_txconf *conf) 1659 { 1660 struct mrvl_priv *priv = dev->data->dev_private; 1661 struct mrvl_txq *txq; 1662 1663 if (dev->data->tx_queues[idx]) { 1664 rte_free(dev->data->tx_queues[idx]); 1665 dev->data->tx_queues[idx] = NULL; 1666 } 1667 1668 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket); 1669 if (!txq) 1670 return -ENOMEM; 1671 1672 txq->priv = priv; 1673 txq->queue_id = idx; 1674 txq->port_id = dev->data->port_id; 1675 txq->tx_deferred_start = conf->tx_deferred_start; 1676 dev->data->tx_queues[idx] = txq; 1677 1678 priv->ppio_params.outqs_params.outqs_params[idx].size = desc; 1679 1680 return 0; 1681 } 1682 1683 /** 1684 * DPDK callback to release the transmit queue. 1685 * 1686 * @param txq 1687 * Generic transmit queue pointer. 1688 */ 1689 static void 1690 mrvl_tx_queue_release(void *txq) 1691 { 1692 struct mrvl_txq *q = txq; 1693 1694 if (!q) 1695 return; 1696 1697 rte_free(q); 1698 } 1699 1700 /** 1701 * DPDK callback to get flow control configuration. 1702 * 1703 * @param dev 1704 * Pointer to Ethernet device structure. 1705 * @param fc_conf 1706 * Pointer to the flow control configuration. 1707 * 1708 * @return 1709 * 0 on success, negative error value otherwise. 1710 */ 1711 static int 1712 mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1713 { 1714 struct mrvl_priv *priv = dev->data->dev_private; 1715 int ret, en; 1716 1717 if (!priv) 1718 return -EPERM; 1719 1720 ret = pp2_ppio_get_rx_pause(priv->ppio, &en); 1721 if (ret) { 1722 MRVL_LOG(ERR, "Failed to read rx pause state"); 1723 return ret; 1724 } 1725 1726 fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE; 1727 1728 return 0; 1729 } 1730 1731 /** 1732 * DPDK callback to set flow control configuration. 1733 * 1734 * @param dev 1735 * Pointer to Ethernet device structure. 1736 * @param fc_conf 1737 * Pointer to the flow control configuration. 1738 * 1739 * @return 1740 * 0 on success, negative error value otherwise. 1741 */ 1742 static int 1743 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1744 { 1745 struct mrvl_priv *priv = dev->data->dev_private; 1746 1747 if (!priv) 1748 return -EPERM; 1749 1750 if (fc_conf->high_water || 1751 fc_conf->low_water || 1752 fc_conf->pause_time || 1753 fc_conf->mac_ctrl_frame_fwd || 1754 fc_conf->autoneg) { 1755 MRVL_LOG(ERR, "Flowctrl parameter is not supported"); 1756 1757 return -EINVAL; 1758 } 1759 1760 if (fc_conf->mode == RTE_FC_NONE || 1761 fc_conf->mode == RTE_FC_RX_PAUSE) { 1762 int ret, en; 1763 1764 en = fc_conf->mode == RTE_FC_NONE ? 0 : 1; 1765 ret = pp2_ppio_set_rx_pause(priv->ppio, en); 1766 if (ret) 1767 MRVL_LOG(ERR, 1768 "Failed to change flowctrl on RX side"); 1769 1770 return ret; 1771 } 1772 1773 return 0; 1774 } 1775 1776 /** 1777 * Update RSS hash configuration 1778 * 1779 * @param dev 1780 * Pointer to Ethernet device structure. 1781 * @param rss_conf 1782 * Pointer to RSS configuration. 1783 * 1784 * @return 1785 * 0 on success, negative error value otherwise. 1786 */ 1787 static int 1788 mrvl_rss_hash_update(struct rte_eth_dev *dev, 1789 struct rte_eth_rss_conf *rss_conf) 1790 { 1791 struct mrvl_priv *priv = dev->data->dev_private; 1792 1793 if (priv->isolated) 1794 return -ENOTSUP; 1795 1796 return mrvl_configure_rss(priv, rss_conf); 1797 } 1798 1799 /** 1800 * DPDK callback to get RSS hash configuration. 1801 * 1802 * @param dev 1803 * Pointer to Ethernet device structure. 1804 * @rss_conf 1805 * Pointer to RSS configuration. 1806 * 1807 * @return 1808 * Always 0. 1809 */ 1810 static int 1811 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev, 1812 struct rte_eth_rss_conf *rss_conf) 1813 { 1814 struct mrvl_priv *priv = dev->data->dev_private; 1815 enum pp2_ppio_hash_type hash_type = 1816 priv->ppio_params.inqs_params.hash_type; 1817 1818 rss_conf->rss_key = NULL; 1819 1820 if (hash_type == PP2_PPIO_HASH_T_NONE) 1821 rss_conf->rss_hf = 0; 1822 else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE) 1823 rss_conf->rss_hf = ETH_RSS_IPV4; 1824 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp) 1825 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP; 1826 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp) 1827 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP; 1828 1829 return 0; 1830 } 1831 1832 /** 1833 * DPDK callback to get rte_flow callbacks. 1834 * 1835 * @param dev 1836 * Pointer to the device structure. 1837 * @param filer_type 1838 * Flow filter type. 1839 * @param filter_op 1840 * Flow filter operation. 1841 * @param arg 1842 * Pointer to pass the flow ops. 1843 * 1844 * @return 1845 * 0 on success, negative error value otherwise. 1846 */ 1847 static int 1848 mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused, 1849 enum rte_filter_type filter_type, 1850 enum rte_filter_op filter_op, void *arg) 1851 { 1852 switch (filter_type) { 1853 case RTE_ETH_FILTER_GENERIC: 1854 if (filter_op != RTE_ETH_FILTER_GET) 1855 return -EINVAL; 1856 *(const void **)arg = &mrvl_flow_ops; 1857 return 0; 1858 default: 1859 MRVL_LOG(WARNING, "Filter type (%d) not supported", 1860 filter_type); 1861 return -EINVAL; 1862 } 1863 } 1864 1865 /** 1866 * DPDK callback to get rte_mtr callbacks. 1867 * 1868 * @param dev 1869 * Pointer to the device structure. 1870 * @param ops 1871 * Pointer to pass the mtr ops. 1872 * 1873 * @return 1874 * Always 0. 1875 */ 1876 static int 1877 mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops) 1878 { 1879 *(const void **)ops = &mrvl_mtr_ops; 1880 1881 return 0; 1882 } 1883 1884 static const struct eth_dev_ops mrvl_ops = { 1885 .dev_configure = mrvl_dev_configure, 1886 .dev_start = mrvl_dev_start, 1887 .dev_stop = mrvl_dev_stop, 1888 .dev_set_link_up = mrvl_dev_set_link_up, 1889 .dev_set_link_down = mrvl_dev_set_link_down, 1890 .dev_close = mrvl_dev_close, 1891 .link_update = mrvl_link_update, 1892 .promiscuous_enable = mrvl_promiscuous_enable, 1893 .allmulticast_enable = mrvl_allmulticast_enable, 1894 .promiscuous_disable = mrvl_promiscuous_disable, 1895 .allmulticast_disable = mrvl_allmulticast_disable, 1896 .mac_addr_remove = mrvl_mac_addr_remove, 1897 .mac_addr_add = mrvl_mac_addr_add, 1898 .mac_addr_set = mrvl_mac_addr_set, 1899 .mtu_set = mrvl_mtu_set, 1900 .stats_get = mrvl_stats_get, 1901 .stats_reset = mrvl_stats_reset, 1902 .xstats_get = mrvl_xstats_get, 1903 .xstats_reset = mrvl_xstats_reset, 1904 .xstats_get_names = mrvl_xstats_get_names, 1905 .dev_infos_get = mrvl_dev_infos_get, 1906 .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get, 1907 .rxq_info_get = mrvl_rxq_info_get, 1908 .txq_info_get = mrvl_txq_info_get, 1909 .vlan_filter_set = mrvl_vlan_filter_set, 1910 .tx_queue_start = mrvl_tx_queue_start, 1911 .tx_queue_stop = mrvl_tx_queue_stop, 1912 .rx_queue_setup = mrvl_rx_queue_setup, 1913 .rx_queue_release = mrvl_rx_queue_release, 1914 .tx_queue_setup = mrvl_tx_queue_setup, 1915 .tx_queue_release = mrvl_tx_queue_release, 1916 .flow_ctrl_get = mrvl_flow_ctrl_get, 1917 .flow_ctrl_set = mrvl_flow_ctrl_set, 1918 .rss_hash_update = mrvl_rss_hash_update, 1919 .rss_hash_conf_get = mrvl_rss_hash_conf_get, 1920 .filter_ctrl = mrvl_eth_filter_ctrl, 1921 .mtr_ops_get = mrvl_mtr_ops_get, 1922 }; 1923 1924 /** 1925 * Return packet type information and l3/l4 offsets. 1926 * 1927 * @param desc 1928 * Pointer to the received packet descriptor. 1929 * @param l3_offset 1930 * l3 packet offset. 1931 * @param l4_offset 1932 * l4 packet offset. 1933 * 1934 * @return 1935 * Packet type information. 1936 */ 1937 static inline uint64_t 1938 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, 1939 uint8_t *l3_offset, uint8_t *l4_offset) 1940 { 1941 enum pp2_inq_l3_type l3_type; 1942 enum pp2_inq_l4_type l4_type; 1943 enum pp2_inq_vlan_tag vlan_tag; 1944 uint64_t packet_type; 1945 1946 pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); 1947 pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); 1948 pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag); 1949 1950 packet_type = RTE_PTYPE_L2_ETHER; 1951 1952 switch (vlan_tag) { 1953 case PP2_INQ_VLAN_TAG_SINGLE: 1954 packet_type |= RTE_PTYPE_L2_ETHER_VLAN; 1955 break; 1956 case PP2_INQ_VLAN_TAG_DOUBLE: 1957 case PP2_INQ_VLAN_TAG_TRIPLE: 1958 packet_type |= RTE_PTYPE_L2_ETHER_QINQ; 1959 break; 1960 default: 1961 break; 1962 } 1963 1964 switch (l3_type) { 1965 case PP2_INQ_L3_TYPE_IPV4_NO_OPTS: 1966 packet_type |= RTE_PTYPE_L3_IPV4; 1967 break; 1968 case PP2_INQ_L3_TYPE_IPV4_OK: 1969 packet_type |= RTE_PTYPE_L3_IPV4_EXT; 1970 break; 1971 case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO: 1972 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; 1973 break; 1974 case PP2_INQ_L3_TYPE_IPV6_NO_EXT: 1975 packet_type |= RTE_PTYPE_L3_IPV6; 1976 break; 1977 case PP2_INQ_L3_TYPE_IPV6_EXT: 1978 packet_type |= RTE_PTYPE_L3_IPV6_EXT; 1979 break; 1980 case PP2_INQ_L3_TYPE_ARP: 1981 packet_type |= RTE_PTYPE_L2_ETHER_ARP; 1982 /* 1983 * In case of ARP l4_offset is set to wrong value. 1984 * Set it to proper one so that later on mbuf->l3_len can be 1985 * calculated subtracting l4_offset and l3_offset. 1986 */ 1987 *l4_offset = *l3_offset + MRVL_ARP_LENGTH; 1988 break; 1989 default: 1990 MRVL_LOG(DEBUG, "Failed to recognise l3 packet type"); 1991 break; 1992 } 1993 1994 switch (l4_type) { 1995 case PP2_INQ_L4_TYPE_TCP: 1996 packet_type |= RTE_PTYPE_L4_TCP; 1997 break; 1998 case PP2_INQ_L4_TYPE_UDP: 1999 packet_type |= RTE_PTYPE_L4_UDP; 2000 break; 2001 default: 2002 MRVL_LOG(DEBUG, "Failed to recognise l4 packet type"); 2003 break; 2004 } 2005 2006 return packet_type; 2007 } 2008 2009 /** 2010 * Get offload information from the received packet descriptor. 2011 * 2012 * @param desc 2013 * Pointer to the received packet descriptor. 2014 * 2015 * @return 2016 * Mbuf offload flags. 2017 */ 2018 static inline uint64_t 2019 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc) 2020 { 2021 uint64_t flags; 2022 enum pp2_inq_desc_status status; 2023 2024 status = pp2_ppio_inq_desc_get_l3_pkt_error(desc); 2025 if (unlikely(status != PP2_DESC_ERR_OK)) 2026 flags = PKT_RX_IP_CKSUM_BAD; 2027 else 2028 flags = PKT_RX_IP_CKSUM_GOOD; 2029 2030 status = pp2_ppio_inq_desc_get_l4_pkt_error(desc); 2031 if (unlikely(status != PP2_DESC_ERR_OK)) 2032 flags |= PKT_RX_L4_CKSUM_BAD; 2033 else 2034 flags |= PKT_RX_L4_CKSUM_GOOD; 2035 2036 return flags; 2037 } 2038 2039 /** 2040 * DPDK callback for receive. 2041 * 2042 * @param rxq 2043 * Generic pointer to the receive queue. 2044 * @param rx_pkts 2045 * Array to store received packets. 2046 * @param nb_pkts 2047 * Maximum number of packets in array. 2048 * 2049 * @return 2050 * Number of packets successfully received. 2051 */ 2052 static uint16_t 2053 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 2054 { 2055 struct mrvl_rxq *q = rxq; 2056 struct pp2_ppio_desc descs[nb_pkts]; 2057 struct pp2_bpool *bpool; 2058 int i, ret, rx_done = 0; 2059 int num; 2060 struct pp2_hif *hif; 2061 unsigned int core_id = rte_lcore_id(); 2062 2063 hif = mrvl_get_hif(q->priv, core_id); 2064 2065 if (unlikely(!q->priv->ppio || !hif)) 2066 return 0; 2067 2068 bpool = q->priv->bpool; 2069 2070 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc, 2071 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts); 2072 if (unlikely(ret < 0)) { 2073 MRVL_LOG(ERR, "Failed to receive packets"); 2074 return 0; 2075 } 2076 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts; 2077 2078 for (i = 0; i < nb_pkts; i++) { 2079 struct rte_mbuf *mbuf; 2080 uint8_t l3_offset, l4_offset; 2081 enum pp2_inq_desc_status status; 2082 uint64_t addr; 2083 2084 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { 2085 struct pp2_ppio_desc *pref_desc; 2086 u64 pref_addr; 2087 2088 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT]; 2089 pref_addr = cookie_addr_high | 2090 pp2_ppio_inq_desc_get_cookie(pref_desc); 2091 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr)); 2092 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr)); 2093 } 2094 2095 addr = cookie_addr_high | 2096 pp2_ppio_inq_desc_get_cookie(&descs[i]); 2097 mbuf = (struct rte_mbuf *)addr; 2098 rte_pktmbuf_reset(mbuf); 2099 2100 /* drop packet in case of mac, overrun or resource error */ 2101 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]); 2102 if (unlikely(status != PP2_DESC_ERR_OK)) { 2103 struct pp2_buff_inf binf = { 2104 .addr = rte_mbuf_data_iova_default(mbuf), 2105 .cookie = (pp2_cookie_t)(uint64_t)mbuf, 2106 }; 2107 2108 pp2_bpool_put_buff(hif, bpool, &binf); 2109 mrvl_port_bpool_size 2110 [bpool->pp2_id][bpool->id][core_id]++; 2111 q->drop_mac++; 2112 continue; 2113 } 2114 2115 mbuf->data_off += MRVL_PKT_EFFEC_OFFS; 2116 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]); 2117 mbuf->data_len = mbuf->pkt_len; 2118 mbuf->port = q->port_id; 2119 mbuf->packet_type = 2120 mrvl_desc_to_packet_type_and_offset(&descs[i], 2121 &l3_offset, 2122 &l4_offset); 2123 mbuf->l2_len = l3_offset; 2124 mbuf->l3_len = l4_offset - l3_offset; 2125 2126 if (likely(q->cksum_enabled)) 2127 mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]); 2128 2129 rx_pkts[rx_done++] = mbuf; 2130 q->bytes_recv += mbuf->pkt_len; 2131 } 2132 2133 if (rte_spinlock_trylock(&q->priv->lock) == 1) { 2134 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id); 2135 2136 if (unlikely(num <= q->priv->bpool_min_size || 2137 (!rx_done && num < q->priv->bpool_init_size))) { 2138 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE); 2139 if (ret) 2140 MRVL_LOG(ERR, "Failed to fill bpool"); 2141 } else if (unlikely(num > q->priv->bpool_max_size)) { 2142 int i; 2143 int pkt_to_remove = num - q->priv->bpool_init_size; 2144 struct rte_mbuf *mbuf; 2145 struct pp2_buff_inf buff; 2146 2147 MRVL_LOG(DEBUG, 2148 "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)", 2149 bpool->pp2_id, q->priv->ppio->port_id, 2150 bpool->id, pkt_to_remove, num, 2151 q->priv->bpool_init_size); 2152 2153 for (i = 0; i < pkt_to_remove; i++) { 2154 ret = pp2_bpool_get_buff(hif, bpool, &buff); 2155 if (ret) 2156 break; 2157 mbuf = (struct rte_mbuf *) 2158 (cookie_addr_high | buff.cookie); 2159 rte_pktmbuf_free(mbuf); 2160 } 2161 mrvl_port_bpool_size 2162 [bpool->pp2_id][bpool->id][core_id] -= i; 2163 } 2164 rte_spinlock_unlock(&q->priv->lock); 2165 } 2166 2167 return rx_done; 2168 } 2169 2170 /** 2171 * Prepare offload information. 2172 * 2173 * @param ol_flags 2174 * Offload flags. 2175 * @param packet_type 2176 * Packet type bitfield. 2177 * @param l3_type 2178 * Pointer to the pp2_ouq_l3_type structure. 2179 * @param l4_type 2180 * Pointer to the pp2_outq_l4_type structure. 2181 * @param gen_l3_cksum 2182 * Will be set to 1 in case l3 checksum is computed. 2183 * @param l4_cksum 2184 * Will be set to 1 in case l4 checksum is computed. 2185 * 2186 * @return 2187 * 0 on success, negative error value otherwise. 2188 */ 2189 static inline int 2190 mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type, 2191 enum pp2_outq_l3_type *l3_type, 2192 enum pp2_outq_l4_type *l4_type, 2193 int *gen_l3_cksum, 2194 int *gen_l4_cksum) 2195 { 2196 /* 2197 * Based on ol_flags prepare information 2198 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor 2199 * for offloading. 2200 */ 2201 if (ol_flags & PKT_TX_IPV4) { 2202 *l3_type = PP2_OUTQ_L3_TYPE_IPV4; 2203 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; 2204 } else if (ol_flags & PKT_TX_IPV6) { 2205 *l3_type = PP2_OUTQ_L3_TYPE_IPV6; 2206 /* no checksum for ipv6 header */ 2207 *gen_l3_cksum = 0; 2208 } else { 2209 /* if something different then stop processing */ 2210 return -1; 2211 } 2212 2213 ol_flags &= PKT_TX_L4_MASK; 2214 if ((packet_type & RTE_PTYPE_L4_TCP) && 2215 ol_flags == PKT_TX_TCP_CKSUM) { 2216 *l4_type = PP2_OUTQ_L4_TYPE_TCP; 2217 *gen_l4_cksum = 1; 2218 } else if ((packet_type & RTE_PTYPE_L4_UDP) && 2219 ol_flags == PKT_TX_UDP_CKSUM) { 2220 *l4_type = PP2_OUTQ_L4_TYPE_UDP; 2221 *gen_l4_cksum = 1; 2222 } else { 2223 *l4_type = PP2_OUTQ_L4_TYPE_OTHER; 2224 /* no checksum for other type */ 2225 *gen_l4_cksum = 0; 2226 } 2227 2228 return 0; 2229 } 2230 2231 /** 2232 * Release already sent buffers to bpool (buffer-pool). 2233 * 2234 * @param ppio 2235 * Pointer to the port structure. 2236 * @param hif 2237 * Pointer to the MUSDK hardware interface. 2238 * @param sq 2239 * Pointer to the shadow queue. 2240 * @param qid 2241 * Queue id number. 2242 * @param force 2243 * Force releasing packets. 2244 */ 2245 static inline void 2246 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif, 2247 unsigned int core_id, struct mrvl_shadow_txq *sq, 2248 int qid, int force) 2249 { 2250 struct buff_release_entry *entry; 2251 uint16_t nb_done = 0, num = 0, skip_bufs = 0; 2252 int i; 2253 2254 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done); 2255 2256 sq->num_to_release += nb_done; 2257 2258 if (likely(!force && 2259 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE)) 2260 return; 2261 2262 nb_done = sq->num_to_release; 2263 sq->num_to_release = 0; 2264 2265 for (i = 0; i < nb_done; i++) { 2266 entry = &sq->ent[sq->tail + num]; 2267 if (unlikely(!entry->buff.addr)) { 2268 MRVL_LOG(ERR, 2269 "Shadow memory @%d: cookie(%lx), pa(%lx)!", 2270 sq->tail, (u64)entry->buff.cookie, 2271 (u64)entry->buff.addr); 2272 skip_bufs = 1; 2273 goto skip; 2274 } 2275 2276 if (unlikely(!entry->bpool)) { 2277 struct rte_mbuf *mbuf; 2278 2279 mbuf = (struct rte_mbuf *) 2280 (cookie_addr_high | entry->buff.cookie); 2281 rte_pktmbuf_free(mbuf); 2282 skip_bufs = 1; 2283 goto skip; 2284 } 2285 2286 mrvl_port_bpool_size 2287 [entry->bpool->pp2_id][entry->bpool->id][core_id]++; 2288 num++; 2289 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE)) 2290 goto skip; 2291 continue; 2292 skip: 2293 if (likely(num)) 2294 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); 2295 num += skip_bufs; 2296 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; 2297 sq->size -= num; 2298 num = 0; 2299 skip_bufs = 0; 2300 } 2301 2302 if (likely(num)) { 2303 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); 2304 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; 2305 sq->size -= num; 2306 } 2307 } 2308 2309 /** 2310 * DPDK callback for transmit. 2311 * 2312 * @param txq 2313 * Generic pointer transmit queue. 2314 * @param tx_pkts 2315 * Packets to transmit. 2316 * @param nb_pkts 2317 * Number of packets in array. 2318 * 2319 * @return 2320 * Number of packets successfully transmitted. 2321 */ 2322 static uint16_t 2323 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 2324 { 2325 struct mrvl_txq *q = txq; 2326 struct mrvl_shadow_txq *sq; 2327 struct pp2_hif *hif; 2328 struct pp2_ppio_desc descs[nb_pkts]; 2329 unsigned int core_id = rte_lcore_id(); 2330 int i, ret, bytes_sent = 0; 2331 uint16_t num, sq_free_size; 2332 uint64_t addr; 2333 2334 hif = mrvl_get_hif(q->priv, core_id); 2335 sq = &q->shadow_txqs[core_id]; 2336 2337 if (unlikely(!q->priv->ppio || !hif)) 2338 return 0; 2339 2340 if (sq->size) 2341 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, 2342 sq, q->queue_id, 0); 2343 2344 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; 2345 if (unlikely(nb_pkts > sq_free_size)) { 2346 MRVL_LOG(DEBUG, 2347 "No room in shadow queue for %d packets! %d packets will be sent.", 2348 nb_pkts, sq_free_size); 2349 nb_pkts = sq_free_size; 2350 } 2351 2352 for (i = 0; i < nb_pkts; i++) { 2353 struct rte_mbuf *mbuf = tx_pkts[i]; 2354 int gen_l3_cksum, gen_l4_cksum; 2355 enum pp2_outq_l3_type l3_type; 2356 enum pp2_outq_l4_type l4_type; 2357 2358 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { 2359 struct rte_mbuf *pref_pkt_hdr; 2360 2361 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; 2362 rte_mbuf_prefetch_part1(pref_pkt_hdr); 2363 rte_mbuf_prefetch_part2(pref_pkt_hdr); 2364 } 2365 2366 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf; 2367 sq->ent[sq->head].buff.addr = 2368 rte_mbuf_data_iova_default(mbuf); 2369 sq->ent[sq->head].bpool = 2370 (unlikely(mbuf->port >= RTE_MAX_ETHPORTS || 2371 mbuf->refcnt > 1)) ? NULL : 2372 mrvl_port_to_bpool_lookup[mbuf->port]; 2373 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK; 2374 sq->size++; 2375 2376 pp2_ppio_outq_desc_reset(&descs[i]); 2377 pp2_ppio_outq_desc_set_phys_addr(&descs[i], 2378 rte_pktmbuf_iova(mbuf)); 2379 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0); 2380 pp2_ppio_outq_desc_set_pkt_len(&descs[i], 2381 rte_pktmbuf_pkt_len(mbuf)); 2382 2383 bytes_sent += rte_pktmbuf_pkt_len(mbuf); 2384 /* 2385 * in case unsupported ol_flags were passed 2386 * do not update descriptor offload information 2387 */ 2388 ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type, 2389 &l3_type, &l4_type, &gen_l3_cksum, 2390 &gen_l4_cksum); 2391 if (unlikely(ret)) 2392 continue; 2393 2394 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, 2395 mbuf->l2_len, 2396 mbuf->l2_len + mbuf->l3_len, 2397 gen_l3_cksum, gen_l4_cksum); 2398 } 2399 2400 num = nb_pkts; 2401 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts); 2402 /* number of packets that were not sent */ 2403 if (unlikely(num > nb_pkts)) { 2404 for (i = nb_pkts; i < num; i++) { 2405 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & 2406 MRVL_PP2_TX_SHADOWQ_MASK; 2407 addr = cookie_addr_high | sq->ent[sq->head].buff.cookie; 2408 bytes_sent -= 2409 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr); 2410 } 2411 sq->size -= num - nb_pkts; 2412 } 2413 2414 q->bytes_sent += bytes_sent; 2415 2416 return nb_pkts; 2417 } 2418 2419 /** 2420 * Initialize packet processor. 2421 * 2422 * @return 2423 * 0 on success, negative error value otherwise. 2424 */ 2425 static int 2426 mrvl_init_pp2(void) 2427 { 2428 struct pp2_init_params init_params; 2429 2430 memset(&init_params, 0, sizeof(init_params)); 2431 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED; 2432 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED; 2433 init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED; 2434 2435 return pp2_init(&init_params); 2436 } 2437 2438 /** 2439 * Deinitialize packet processor. 2440 * 2441 * @return 2442 * 0 on success, negative error value otherwise. 2443 */ 2444 static void 2445 mrvl_deinit_pp2(void) 2446 { 2447 pp2_deinit(); 2448 } 2449 2450 /** 2451 * Create private device structure. 2452 * 2453 * @param dev_name 2454 * Pointer to the port name passed in the initialization parameters. 2455 * 2456 * @return 2457 * Pointer to the newly allocated private device structure. 2458 */ 2459 static struct mrvl_priv * 2460 mrvl_priv_create(const char *dev_name) 2461 { 2462 struct pp2_bpool_params bpool_params; 2463 char match[MRVL_MATCH_LEN]; 2464 struct mrvl_priv *priv; 2465 int ret, bpool_bit; 2466 2467 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id()); 2468 if (!priv) 2469 return NULL; 2470 2471 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name, 2472 &priv->pp_id, &priv->ppio_id); 2473 if (ret) 2474 goto out_free_priv; 2475 2476 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id], 2477 PP2_BPOOL_NUM_POOLS); 2478 if (bpool_bit < 0) 2479 goto out_free_priv; 2480 priv->bpool_bit = bpool_bit; 2481 2482 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id, 2483 priv->bpool_bit); 2484 memset(&bpool_params, 0, sizeof(bpool_params)); 2485 bpool_params.match = match; 2486 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS; 2487 ret = pp2_bpool_init(&bpool_params, &priv->bpool); 2488 if (ret) 2489 goto out_clear_bpool_bit; 2490 2491 priv->ppio_params.type = PP2_PPIO_T_NIC; 2492 rte_spinlock_init(&priv->lock); 2493 2494 return priv; 2495 out_clear_bpool_bit: 2496 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); 2497 out_free_priv: 2498 rte_free(priv); 2499 return NULL; 2500 } 2501 2502 /** 2503 * Create device representing Ethernet port. 2504 * 2505 * @param name 2506 * Pointer to the port's name. 2507 * 2508 * @return 2509 * 0 on success, negative error value otherwise. 2510 */ 2511 static int 2512 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name) 2513 { 2514 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0); 2515 struct rte_eth_dev *eth_dev; 2516 struct mrvl_priv *priv; 2517 struct ifreq req; 2518 2519 eth_dev = rte_eth_dev_allocate(name); 2520 if (!eth_dev) 2521 return -ENOMEM; 2522 2523 priv = mrvl_priv_create(name); 2524 if (!priv) { 2525 ret = -ENOMEM; 2526 goto out_free_dev; 2527 } 2528 2529 eth_dev->data->mac_addrs = 2530 rte_zmalloc("mac_addrs", 2531 ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0); 2532 if (!eth_dev->data->mac_addrs) { 2533 MRVL_LOG(ERR, "Failed to allocate space for eth addrs"); 2534 ret = -ENOMEM; 2535 goto out_free_priv; 2536 } 2537 2538 memset(&req, 0, sizeof(req)); 2539 strcpy(req.ifr_name, name); 2540 ret = ioctl(fd, SIOCGIFHWADDR, &req); 2541 if (ret) 2542 goto out_free_mac; 2543 2544 memcpy(eth_dev->data->mac_addrs[0].addr_bytes, 2545 req.ifr_addr.sa_data, ETHER_ADDR_LEN); 2546 2547 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst; 2548 eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst; 2549 eth_dev->data->kdrv = RTE_KDRV_NONE; 2550 eth_dev->data->dev_private = priv; 2551 eth_dev->device = &vdev->device; 2552 eth_dev->dev_ops = &mrvl_ops; 2553 2554 rte_eth_dev_probing_finish(eth_dev); 2555 return 0; 2556 out_free_mac: 2557 rte_free(eth_dev->data->mac_addrs); 2558 out_free_dev: 2559 rte_eth_dev_release_port(eth_dev); 2560 out_free_priv: 2561 rte_free(priv); 2562 2563 return ret; 2564 } 2565 2566 /** 2567 * Cleanup previously created device representing Ethernet port. 2568 * 2569 * @param name 2570 * Pointer to the port name. 2571 */ 2572 static void 2573 mrvl_eth_dev_destroy(const char *name) 2574 { 2575 struct rte_eth_dev *eth_dev; 2576 struct mrvl_priv *priv; 2577 2578 eth_dev = rte_eth_dev_allocated(name); 2579 if (!eth_dev) 2580 return; 2581 2582 priv = eth_dev->data->dev_private; 2583 pp2_bpool_deinit(priv->bpool); 2584 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); 2585 rte_free(priv); 2586 rte_free(eth_dev->data->mac_addrs); 2587 rte_eth_dev_release_port(eth_dev); 2588 } 2589 2590 /** 2591 * Callback used by rte_kvargs_process() during argument parsing. 2592 * 2593 * @param key 2594 * Pointer to the parsed key (unused). 2595 * @param value 2596 * Pointer to the parsed value. 2597 * @param extra_args 2598 * Pointer to the extra arguments which contains address of the 2599 * table of pointers to parsed interface names. 2600 * 2601 * @return 2602 * Always 0. 2603 */ 2604 static int 2605 mrvl_get_ifnames(const char *key __rte_unused, const char *value, 2606 void *extra_args) 2607 { 2608 struct mrvl_ifnames *ifnames = extra_args; 2609 2610 ifnames->names[ifnames->idx++] = value; 2611 2612 return 0; 2613 } 2614 2615 /** 2616 * Deinitialize per-lcore MUSDK hardware interfaces (hifs). 2617 */ 2618 static void 2619 mrvl_deinit_hifs(void) 2620 { 2621 int i; 2622 2623 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) { 2624 if (hifs[i]) 2625 pp2_hif_deinit(hifs[i]); 2626 } 2627 used_hifs = MRVL_MUSDK_HIFS_RESERVED; 2628 memset(hifs, 0, sizeof(hifs)); 2629 } 2630 2631 /** 2632 * DPDK callback to register the virtual device. 2633 * 2634 * @param vdev 2635 * Pointer to the virtual device. 2636 * 2637 * @return 2638 * 0 on success, negative error value otherwise. 2639 */ 2640 static int 2641 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev) 2642 { 2643 struct rte_kvargs *kvlist; 2644 struct mrvl_ifnames ifnames; 2645 int ret = -EINVAL; 2646 uint32_t i, ifnum, cfgnum; 2647 const char *params; 2648 2649 params = rte_vdev_device_args(vdev); 2650 if (!params) 2651 return -EINVAL; 2652 2653 kvlist = rte_kvargs_parse(params, valid_args); 2654 if (!kvlist) 2655 return -EINVAL; 2656 2657 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG); 2658 if (ifnum > RTE_DIM(ifnames.names)) 2659 goto out_free_kvlist; 2660 2661 ifnames.idx = 0; 2662 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG, 2663 mrvl_get_ifnames, &ifnames); 2664 2665 2666 /* 2667 * The below system initialization should be done only once, 2668 * on the first provided configuration file 2669 */ 2670 if (!mrvl_qos_cfg) { 2671 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG); 2672 MRVL_LOG(INFO, "Parsing config file!"); 2673 if (cfgnum > 1) { 2674 MRVL_LOG(ERR, "Cannot handle more than one config file!"); 2675 goto out_free_kvlist; 2676 } else if (cfgnum == 1) { 2677 rte_kvargs_process(kvlist, MRVL_CFG_ARG, 2678 mrvl_get_qoscfg, &mrvl_qos_cfg); 2679 } 2680 } 2681 2682 if (mrvl_dev_num) 2683 goto init_devices; 2684 2685 MRVL_LOG(INFO, "Perform MUSDK initializations"); 2686 2687 ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist); 2688 if (ret) 2689 goto out_free_kvlist; 2690 2691 ret = mrvl_init_pp2(); 2692 if (ret) { 2693 MRVL_LOG(ERR, "Failed to init PP!"); 2694 rte_mvep_deinit(MVEP_MOD_T_PP2); 2695 goto out_free_kvlist; 2696 } 2697 2698 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size)); 2699 memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup)); 2700 2701 mrvl_lcore_first = RTE_MAX_LCORE; 2702 mrvl_lcore_last = 0; 2703 2704 init_devices: 2705 for (i = 0; i < ifnum; i++) { 2706 MRVL_LOG(INFO, "Creating %s", ifnames.names[i]); 2707 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]); 2708 if (ret) 2709 goto out_cleanup; 2710 } 2711 mrvl_dev_num += ifnum; 2712 2713 rte_kvargs_free(kvlist); 2714 2715 return 0; 2716 out_cleanup: 2717 for (; i > 0; i--) 2718 mrvl_eth_dev_destroy(ifnames.names[i]); 2719 2720 if (mrvl_dev_num == 0) { 2721 mrvl_deinit_pp2(); 2722 rte_mvep_deinit(MVEP_MOD_T_PP2); 2723 } 2724 out_free_kvlist: 2725 rte_kvargs_free(kvlist); 2726 2727 return ret; 2728 } 2729 2730 /** 2731 * DPDK callback to remove virtual device. 2732 * 2733 * @param vdev 2734 * Pointer to the removed virtual device. 2735 * 2736 * @return 2737 * 0 on success, negative error value otherwise. 2738 */ 2739 static int 2740 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev) 2741 { 2742 int i; 2743 const char *name; 2744 2745 name = rte_vdev_device_name(vdev); 2746 if (!name) 2747 return -EINVAL; 2748 2749 MRVL_LOG(INFO, "Removing %s", name); 2750 2751 RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */ 2752 char ifname[RTE_ETH_NAME_MAX_LEN]; 2753 2754 rte_eth_dev_get_name_by_port(i, ifname); 2755 mrvl_eth_dev_destroy(ifname); 2756 mrvl_dev_num--; 2757 } 2758 2759 if (mrvl_dev_num == 0) { 2760 MRVL_LOG(INFO, "Perform MUSDK deinit"); 2761 mrvl_deinit_hifs(); 2762 mrvl_deinit_pp2(); 2763 rte_mvep_deinit(MVEP_MOD_T_PP2); 2764 } 2765 2766 return 0; 2767 } 2768 2769 static struct rte_vdev_driver pmd_mrvl_drv = { 2770 .probe = rte_pmd_mrvl_probe, 2771 .remove = rte_pmd_mrvl_remove, 2772 }; 2773 2774 RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv); 2775 RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2); 2776 2777 RTE_INIT(mrvl_init_log) 2778 { 2779 mrvl_logtype = rte_log_register("pmd.net.mvpp2"); 2780 if (mrvl_logtype >= 0) 2781 rte_log_set_level(mrvl_logtype, RTE_LOG_NOTICE); 2782 } 2783