1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017-2021 Marvell International Ltd. 3 * Copyright(c) 2017-2021 Semihalf. 4 * All rights reserved. 5 */ 6 7 #include <rte_string_fns.h> 8 #include <ethdev_driver.h> 9 #include <rte_kvargs.h> 10 #include <rte_log.h> 11 #include <rte_malloc.h> 12 #include <rte_bus_vdev.h> 13 14 #include <fcntl.h> 15 #include <linux/ethtool.h> 16 #include <linux/sockios.h> 17 #include <net/if.h> 18 #include <net/if_arp.h> 19 #include <sys/ioctl.h> 20 #include <sys/socket.h> 21 #include <sys/stat.h> 22 #include <sys/types.h> 23 24 #include <rte_mvep_common.h> 25 #include "mrvl_ethdev.h" 26 #include "mrvl_qos.h" 27 #include "mrvl_flow.h" 28 #include "mrvl_mtr.h" 29 #include "mrvl_tm.h" 30 31 /* bitmask with reserved hifs */ 32 #define MRVL_MUSDK_HIFS_RESERVED 0x0F 33 /* bitmask with reserved bpools */ 34 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07 35 /* bitmask with reserved kernel RSS tables */ 36 #define MRVL_MUSDK_RSS_RESERVED 0x0F 37 /* maximum number of available hifs */ 38 #define MRVL_MUSDK_HIFS_MAX 9 39 40 /* prefetch shift */ 41 #define MRVL_MUSDK_PREFETCH_SHIFT 2 42 43 /* TCAM has 25 entries reserved for uc/mc filter entries 44 * + 1 for primary mac address 45 */ 46 #define MRVL_MAC_ADDRS_MAX (1 + 25) 47 #define MRVL_MATCH_LEN 16 48 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE) 49 /* Maximum allowable packet size */ 50 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE) 51 52 #define MRVL_IFACE_NAME_ARG "iface" 53 #define MRVL_CFG_ARG "cfg" 54 55 #define MRVL_ARP_LENGTH 28 56 57 #define MRVL_COOKIE_ADDR_INVALID ~0ULL 58 #define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000 59 60 /** Port Rx offload capabilities */ 61 #define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \ 62 DEV_RX_OFFLOAD_JUMBO_FRAME | \ 63 DEV_RX_OFFLOAD_CHECKSUM) 64 65 /** Port Tx offloads capabilities */ 66 #define MRVL_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \ 67 DEV_TX_OFFLOAD_UDP_CKSUM | \ 68 DEV_TX_OFFLOAD_TCP_CKSUM) 69 #define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \ 70 DEV_TX_OFFLOAD_MULTI_SEGS) 71 72 #define MRVL_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \ 73 PKT_TX_TCP_CKSUM | \ 74 PKT_TX_UDP_CKSUM) 75 76 static const char * const valid_args[] = { 77 MRVL_IFACE_NAME_ARG, 78 MRVL_CFG_ARG, 79 NULL 80 }; 81 82 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED; 83 static struct pp2_hif *hifs[RTE_MAX_LCORE]; 84 static int used_bpools[PP2_NUM_PKT_PROC] = { 85 [0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED 86 }; 87 88 static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS]; 89 static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE]; 90 static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID; 91 static int dummy_pool_id[PP2_NUM_PKT_PROC]; 92 struct pp2_bpool *dummy_pool[PP2_NUM_PKT_PROC] = {0}; 93 94 struct mrvl_ifnames { 95 const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC]; 96 int idx; 97 }; 98 99 /* 100 * To use buffer harvesting based on loopback port shadow queue structure 101 * was introduced for buffers information bookkeeping. 102 * 103 * Before sending the packet, related buffer information (pp2_buff_inf) is 104 * stored in shadow queue. After packet is transmitted no longer used 105 * packet buffer is released back to it's original hardware pool, 106 * on condition it originated from interface. 107 * In case it was generated by application itself i.e: mbuf->port field is 108 * 0xff then its released to software mempool. 109 */ 110 struct mrvl_shadow_txq { 111 int head; /* write index - used when sending buffers */ 112 int tail; /* read index - used when releasing buffers */ 113 u16 size; /* queue occupied size */ 114 u16 num_to_release; /* number of descriptors sent, that can be 115 * released 116 */ 117 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */ 118 }; 119 120 struct mrvl_rxq { 121 struct mrvl_priv *priv; 122 struct rte_mempool *mp; 123 int queue_id; 124 int port_id; 125 int cksum_enabled; 126 uint64_t bytes_recv; 127 uint64_t drop_mac; 128 }; 129 130 struct mrvl_txq { 131 struct mrvl_priv *priv; 132 int queue_id; 133 int port_id; 134 uint64_t bytes_sent; 135 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE]; 136 int tx_deferred_start; 137 }; 138 139 static int mrvl_lcore_first; 140 static int mrvl_lcore_last; 141 static int mrvl_dev_num; 142 143 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num); 144 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio, 145 struct pp2_hif *hif, unsigned int core_id, 146 struct mrvl_shadow_txq *sq, int qid, int force); 147 148 static uint16_t mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, 149 uint16_t nb_pkts); 150 static uint16_t mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, 151 uint16_t nb_pkts); 152 static int rte_pmd_mrvl_remove(struct rte_vdev_device *vdev); 153 static void mrvl_deinit_pp2(void); 154 static void mrvl_deinit_hifs(void); 155 156 static int 157 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 158 uint32_t index, uint32_t vmdq __rte_unused); 159 static int 160 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr); 161 static int 162 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); 163 static int mrvl_promiscuous_enable(struct rte_eth_dev *dev); 164 static int mrvl_allmulticast_enable(struct rte_eth_dev *dev); 165 static int 166 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 167 168 #define MRVL_XSTATS_TBL_ENTRY(name) { \ 169 #name, offsetof(struct pp2_ppio_statistics, name), \ 170 sizeof(((struct pp2_ppio_statistics *)0)->name) \ 171 } 172 173 /* Table with xstats data */ 174 static struct { 175 const char *name; 176 unsigned int offset; 177 unsigned int size; 178 } mrvl_xstats_tbl[] = { 179 MRVL_XSTATS_TBL_ENTRY(rx_bytes), 180 MRVL_XSTATS_TBL_ENTRY(rx_packets), 181 MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets), 182 MRVL_XSTATS_TBL_ENTRY(rx_errors), 183 MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped), 184 MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped), 185 MRVL_XSTATS_TBL_ENTRY(rx_early_dropped), 186 MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped), 187 MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped), 188 MRVL_XSTATS_TBL_ENTRY(tx_bytes), 189 MRVL_XSTATS_TBL_ENTRY(tx_packets), 190 MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets), 191 MRVL_XSTATS_TBL_ENTRY(tx_errors) 192 }; 193 194 static inline int 195 mrvl_reserve_bit(int *bitmap, int max) 196 { 197 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap); 198 199 if (n >= max) 200 return -1; 201 202 *bitmap |= 1 << n; 203 204 return n; 205 } 206 207 static int 208 mrvl_pp2_fixup_init(void) 209 { 210 struct pp2_bpool_params bpool_params; 211 char name[15]; 212 int err, i; 213 214 memset(dummy_pool, 0, sizeof(dummy_pool)); 215 for (i = 0; i < pp2_get_num_inst(); i++) { 216 dummy_pool_id[i] = mrvl_reserve_bit(&used_bpools[i], 217 PP2_BPOOL_NUM_POOLS); 218 if (dummy_pool_id[i] < 0) { 219 MRVL_LOG(ERR, "Can't find free pool\n"); 220 return -1; 221 } 222 223 memset(name, 0, sizeof(name)); 224 snprintf(name, sizeof(name), "pool-%d:%d", i, dummy_pool_id[i]); 225 memset(&bpool_params, 0, sizeof(bpool_params)); 226 bpool_params.match = name; 227 bpool_params.buff_len = MRVL_PKT_OFFS; 228 bpool_params.dummy_short_pool = 1; 229 err = pp2_bpool_init(&bpool_params, &dummy_pool[i]); 230 if (err != 0 || !dummy_pool[i]) { 231 MRVL_LOG(ERR, "BPool init failed!\n"); 232 used_bpools[i] &= ~(1 << dummy_pool_id[i]); 233 return -1; 234 } 235 } 236 237 return 0; 238 } 239 240 /** 241 * Initialize packet processor. 242 * 243 * @return 244 * 0 on success, negative error value otherwise. 245 */ 246 static int 247 mrvl_init_pp2(void) 248 { 249 struct pp2_init_params init_params; 250 int err; 251 252 memset(&init_params, 0, sizeof(init_params)); 253 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED; 254 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED; 255 init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED; 256 if (mrvl_cfg && mrvl_cfg->pp2_cfg.prs_udfs.num_udfs) 257 memcpy(&init_params.prs_udfs, &mrvl_cfg->pp2_cfg.prs_udfs, 258 sizeof(struct pp2_parse_udfs)); 259 err = pp2_init(&init_params); 260 if (err != 0) { 261 MRVL_LOG(ERR, "PP2 init failed"); 262 return -1; 263 } 264 265 err = mrvl_pp2_fixup_init(); 266 if (err != 0) { 267 MRVL_LOG(ERR, "PP2 fixup init failed"); 268 return -1; 269 } 270 271 return 0; 272 } 273 274 static void 275 mrvl_pp2_fixup_deinit(void) 276 { 277 int i; 278 279 for (i = 0; i < PP2_NUM_PKT_PROC; i++) { 280 if (!dummy_pool[i]) 281 continue; 282 pp2_bpool_deinit(dummy_pool[i]); 283 used_bpools[i] &= ~(1 << dummy_pool_id[i]); 284 } 285 } 286 287 /** 288 * Deinitialize packet processor. 289 * 290 * @return 291 * 0 on success, negative error value otherwise. 292 */ 293 static void 294 mrvl_deinit_pp2(void) 295 { 296 mrvl_pp2_fixup_deinit(); 297 pp2_deinit(); 298 } 299 300 static inline void 301 mrvl_fill_shadowq(struct mrvl_shadow_txq *sq, struct rte_mbuf *buf) 302 { 303 sq->ent[sq->head].buff.cookie = (uint64_t)buf; 304 sq->ent[sq->head].buff.addr = buf ? 305 rte_mbuf_data_iova_default(buf) : 0; 306 307 sq->ent[sq->head].bpool = 308 (unlikely(!buf || buf->port >= RTE_MAX_ETHPORTS || 309 buf->refcnt > 1)) ? NULL : 310 mrvl_port_to_bpool_lookup[buf->port]; 311 312 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK; 313 sq->size++; 314 } 315 316 /** 317 * Deinitialize per-lcore MUSDK hardware interfaces (hifs). 318 */ 319 static void 320 mrvl_deinit_hifs(void) 321 { 322 int i; 323 324 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) { 325 if (hifs[i]) 326 pp2_hif_deinit(hifs[i]); 327 } 328 used_hifs = MRVL_MUSDK_HIFS_RESERVED; 329 memset(hifs, 0, sizeof(hifs)); 330 } 331 332 static inline void 333 mrvl_fill_desc(struct pp2_ppio_desc *desc, struct rte_mbuf *buf) 334 { 335 pp2_ppio_outq_desc_reset(desc); 336 pp2_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf)); 337 pp2_ppio_outq_desc_set_pkt_offset(desc, 0); 338 pp2_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf)); 339 } 340 341 static inline int 342 mrvl_get_bpool_size(int pp2_id, int pool_id) 343 { 344 int i; 345 int size = 0; 346 347 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) 348 size += mrvl_port_bpool_size[pp2_id][pool_id][i]; 349 350 return size; 351 } 352 353 static int 354 mrvl_init_hif(int core_id) 355 { 356 struct pp2_hif_params params; 357 char match[MRVL_MATCH_LEN]; 358 int ret; 359 360 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX); 361 if (ret < 0) { 362 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id); 363 return ret; 364 } 365 366 snprintf(match, sizeof(match), "hif-%d", ret); 367 memset(¶ms, 0, sizeof(params)); 368 params.match = match; 369 params.out_size = MRVL_PP2_AGGR_TXQD_MAX; 370 ret = pp2_hif_init(¶ms, &hifs[core_id]); 371 if (ret) { 372 MRVL_LOG(ERR, "Failed to initialize hif %d", core_id); 373 return ret; 374 } 375 376 return 0; 377 } 378 379 static inline struct pp2_hif* 380 mrvl_get_hif(struct mrvl_priv *priv, int core_id) 381 { 382 int ret; 383 384 if (likely(hifs[core_id] != NULL)) 385 return hifs[core_id]; 386 387 rte_spinlock_lock(&priv->lock); 388 389 ret = mrvl_init_hif(core_id); 390 if (ret < 0) { 391 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id); 392 goto out; 393 } 394 395 if (core_id < mrvl_lcore_first) 396 mrvl_lcore_first = core_id; 397 398 if (core_id > mrvl_lcore_last) 399 mrvl_lcore_last = core_id; 400 out: 401 rte_spinlock_unlock(&priv->lock); 402 403 return hifs[core_id]; 404 } 405 406 /** 407 * Set tx burst function according to offload flag 408 * 409 * @param dev 410 * Pointer to Ethernet device structure. 411 */ 412 static void 413 mrvl_set_tx_function(struct rte_eth_dev *dev) 414 { 415 struct mrvl_priv *priv = dev->data->dev_private; 416 417 /* Use a simple Tx queue (no offloads, no multi segs) if possible */ 418 if (priv->multiseg) { 419 RTE_LOG(INFO, PMD, "Using multi-segment tx callback\n"); 420 dev->tx_pkt_burst = mrvl_tx_sg_pkt_burst; 421 } else { 422 RTE_LOG(INFO, PMD, "Using single-segment tx callback\n"); 423 dev->tx_pkt_burst = mrvl_tx_pkt_burst; 424 } 425 } 426 427 /** 428 * Configure rss based on dpdk rss configuration. 429 * 430 * @param priv 431 * Pointer to private structure. 432 * @param rss_conf 433 * Pointer to RSS configuration. 434 * 435 * @return 436 * 0 on success, negative error value otherwise. 437 */ 438 static int 439 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf) 440 { 441 if (rss_conf->rss_key) 442 MRVL_LOG(WARNING, "Changing hash key is not supported"); 443 444 if (rss_conf->rss_hf == 0) { 445 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; 446 } else if (rss_conf->rss_hf & ETH_RSS_IPV4) { 447 priv->ppio_params.inqs_params.hash_type = 448 PP2_PPIO_HASH_T_2_TUPLE; 449 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { 450 priv->ppio_params.inqs_params.hash_type = 451 PP2_PPIO_HASH_T_5_TUPLE; 452 priv->rss_hf_tcp = 1; 453 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { 454 priv->ppio_params.inqs_params.hash_type = 455 PP2_PPIO_HASH_T_5_TUPLE; 456 priv->rss_hf_tcp = 0; 457 } else { 458 return -EINVAL; 459 } 460 461 return 0; 462 } 463 464 /** 465 * Ethernet device configuration. 466 * 467 * Prepare the driver for a given number of TX and RX queues and 468 * configure RSS. 469 * 470 * @param dev 471 * Pointer to Ethernet device structure. 472 * 473 * @return 474 * 0 on success, negative error value otherwise. 475 */ 476 static int 477 mrvl_dev_configure(struct rte_eth_dev *dev) 478 { 479 struct mrvl_priv *priv = dev->data->dev_private; 480 int ret; 481 482 if (priv->ppio) { 483 MRVL_LOG(INFO, "Device reconfiguration is not supported"); 484 return -EINVAL; 485 } 486 487 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE && 488 dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { 489 MRVL_LOG(INFO, "Unsupported rx multi queue mode %d", 490 dev->data->dev_conf.rxmode.mq_mode); 491 return -EINVAL; 492 } 493 494 if (dev->data->dev_conf.rxmode.split_hdr_size) { 495 MRVL_LOG(INFO, "Split headers not supported"); 496 return -EINVAL; 497 } 498 499 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 500 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - 501 MRVL_PP2_ETH_HDRS_LEN; 502 if (dev->data->mtu > priv->max_mtu) { 503 MRVL_LOG(ERR, "inherit MTU %u from max_rx_pkt_len %u is larger than max_mtu %u\n", 504 dev->data->mtu, 505 dev->data->dev_conf.rxmode.max_rx_pkt_len, 506 priv->max_mtu); 507 return -EINVAL; 508 } 509 } 510 511 if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) 512 priv->multiseg = 1; 513 514 ret = mrvl_configure_rxqs(priv, dev->data->port_id, 515 dev->data->nb_rx_queues); 516 if (ret < 0) 517 return ret; 518 519 ret = mrvl_configure_txqs(priv, dev->data->port_id, 520 dev->data->nb_tx_queues); 521 if (ret < 0) 522 return ret; 523 524 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues; 525 priv->ppio_params.maintain_stats = 1; 526 priv->nb_rx_queues = dev->data->nb_rx_queues; 527 528 ret = mrvl_tm_init(dev); 529 if (ret < 0) 530 return ret; 531 532 if (dev->data->nb_rx_queues == 1 && 533 dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 534 MRVL_LOG(WARNING, "Disabling hash for 1 rx queue"); 535 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; 536 priv->configured = 1; 537 return 0; 538 } 539 540 ret = mrvl_configure_rss(priv, 541 &dev->data->dev_conf.rx_adv_conf.rss_conf); 542 if (ret < 0) 543 return ret; 544 545 priv->configured = 1; 546 547 return 0; 548 } 549 550 /** 551 * DPDK callback to change the MTU. 552 * 553 * Setting the MTU affects hardware MRU (packets larger than the MRU 554 * will be dropped). 555 * 556 * @param dev 557 * Pointer to Ethernet device structure. 558 * @param mtu 559 * New MTU. 560 * 561 * @return 562 * 0 on success, negative error value otherwise. 563 */ 564 static int 565 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 566 { 567 struct mrvl_priv *priv = dev->data->dev_private; 568 uint16_t mru; 569 uint16_t mbuf_data_size = 0; /* SW buffer size */ 570 int ret; 571 572 mru = MRVL_PP2_MTU_TO_MRU(mtu); 573 /* 574 * min_rx_buf_size is equal to mbuf data size 575 * if pmd didn't set it differently 576 */ 577 mbuf_data_size = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 578 /* Prevent PMD from: 579 * - setting mru greater than the mbuf size resulting in 580 * hw and sw buffer size mismatch 581 * - setting mtu that requires the support of scattered packets 582 * when this feature has not been enabled/supported so far 583 * (TODO check scattered_rx flag here once scattered RX is supported). 584 */ 585 if (mru - RTE_ETHER_CRC_LEN + MRVL_PKT_OFFS > mbuf_data_size) { 586 mru = mbuf_data_size + RTE_ETHER_CRC_LEN - MRVL_PKT_OFFS; 587 mtu = MRVL_PP2_MRU_TO_MTU(mru); 588 MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted " 589 "by current mbuf size: %u. Set MTU to %u, MRU to %u", 590 mbuf_data_size, mtu, mru); 591 } 592 593 if (mtu < RTE_ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) { 594 MRVL_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru); 595 return -EINVAL; 596 } 597 598 dev->data->mtu = mtu; 599 dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE; 600 601 if (!priv->ppio) 602 return 0; 603 604 ret = pp2_ppio_set_mru(priv->ppio, mru); 605 if (ret) { 606 MRVL_LOG(ERR, "Failed to change MRU"); 607 return ret; 608 } 609 610 ret = pp2_ppio_set_mtu(priv->ppio, mtu); 611 if (ret) { 612 MRVL_LOG(ERR, "Failed to change MTU"); 613 return ret; 614 } 615 616 return 0; 617 } 618 619 /** 620 * DPDK callback to bring the link up. 621 * 622 * @param dev 623 * Pointer to Ethernet device structure. 624 * 625 * @return 626 * 0 on success, negative error value otherwise. 627 */ 628 static int 629 mrvl_dev_set_link_up(struct rte_eth_dev *dev) 630 { 631 struct mrvl_priv *priv = dev->data->dev_private; 632 int ret; 633 634 if (!priv->ppio) { 635 dev->data->dev_link.link_status = ETH_LINK_UP; 636 return 0; 637 } 638 639 ret = pp2_ppio_enable(priv->ppio); 640 if (ret) 641 return ret; 642 643 /* 644 * mtu/mru can be updated if pp2_ppio_enable() was called at least once 645 * as pp2_ppio_enable() changes port->t_mode from default 0 to 646 * PP2_TRAFFIC_INGRESS_EGRESS. 647 * 648 * Set mtu to default DPDK value here. 649 */ 650 ret = mrvl_mtu_set(dev, dev->data->mtu); 651 if (ret) { 652 pp2_ppio_disable(priv->ppio); 653 return ret; 654 } 655 656 dev->data->dev_link.link_status = ETH_LINK_UP; 657 return 0; 658 } 659 660 /** 661 * DPDK callback to bring the link down. 662 * 663 * @param dev 664 * Pointer to Ethernet device structure. 665 * 666 * @return 667 * 0 on success, negative error value otherwise. 668 */ 669 static int 670 mrvl_dev_set_link_down(struct rte_eth_dev *dev) 671 { 672 struct mrvl_priv *priv = dev->data->dev_private; 673 int ret; 674 675 if (!priv->ppio) { 676 dev->data->dev_link.link_status = ETH_LINK_DOWN; 677 return 0; 678 } 679 ret = pp2_ppio_disable(priv->ppio); 680 if (ret) 681 return ret; 682 683 dev->data->dev_link.link_status = ETH_LINK_DOWN; 684 return 0; 685 } 686 687 /** 688 * DPDK callback to start tx queue. 689 * 690 * @param dev 691 * Pointer to Ethernet device structure. 692 * @param queue_id 693 * Transmit queue index. 694 * 695 * @return 696 * 0 on success, negative error value otherwise. 697 */ 698 static int 699 mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id) 700 { 701 struct mrvl_priv *priv = dev->data->dev_private; 702 int ret; 703 704 if (!priv) 705 return -EPERM; 706 707 /* passing 1 enables given tx queue */ 708 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1); 709 if (ret) { 710 MRVL_LOG(ERR, "Failed to start txq %d", queue_id); 711 return ret; 712 } 713 714 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 715 716 return 0; 717 } 718 719 /** 720 * DPDK callback to stop tx queue. 721 * 722 * @param dev 723 * Pointer to Ethernet device structure. 724 * @param queue_id 725 * Transmit queue index. 726 * 727 * @return 728 * 0 on success, negative error value otherwise. 729 */ 730 static int 731 mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id) 732 { 733 struct mrvl_priv *priv = dev->data->dev_private; 734 int ret; 735 736 if (!priv->ppio) 737 return -EPERM; 738 739 /* passing 0 disables given tx queue */ 740 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0); 741 if (ret) { 742 MRVL_LOG(ERR, "Failed to stop txq %d", queue_id); 743 return ret; 744 } 745 746 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 747 748 return 0; 749 } 750 751 /** 752 * Populate VLAN Filter configuration. 753 * 754 * @param dev 755 * Pointer to Ethernet device structure. 756 * @param on 757 * Toggle filter. 758 * 759 * @return 760 * 0 on success, negative error value otherwise. 761 */ 762 static int mrvl_populate_vlan_table(struct rte_eth_dev *dev, int on) 763 { 764 uint32_t j; 765 int ret; 766 struct rte_vlan_filter_conf *vfc; 767 768 vfc = &dev->data->vlan_filter_conf; 769 for (j = 0; j < RTE_DIM(vfc->ids); j++) { 770 uint64_t vlan; 771 uint64_t vbit; 772 uint64_t ids = vfc->ids[j]; 773 774 if (ids == 0) 775 continue; 776 777 while (ids) { 778 vlan = 64 * j; 779 /* count trailing zeroes */ 780 vbit = ~ids & (ids - 1); 781 /* clear least significant bit set */ 782 ids ^= (ids ^ (ids - 1)) ^ vbit; 783 for (; vbit; vlan++) 784 vbit >>= 1; 785 ret = mrvl_vlan_filter_set(dev, vlan, on); 786 if (ret) { 787 MRVL_LOG(ERR, "Failed to setup VLAN filter\n"); 788 return ret; 789 } 790 } 791 } 792 793 return 0; 794 } 795 796 /** 797 * DPDK callback to start the device. 798 * 799 * @param dev 800 * Pointer to Ethernet device structure. 801 * 802 * @return 803 * 0 on success, negative errno value on failure. 804 */ 805 static int 806 mrvl_dev_start(struct rte_eth_dev *dev) 807 { 808 struct mrvl_priv *priv = dev->data->dev_private; 809 char match[MRVL_MATCH_LEN]; 810 int ret = 0, i, def_init_size; 811 struct rte_ether_addr *mac_addr; 812 813 if (priv->ppio) 814 return mrvl_dev_set_link_up(dev); 815 816 snprintf(match, sizeof(match), "ppio-%d:%d", 817 priv->pp_id, priv->ppio_id); 818 priv->ppio_params.match = match; 819 priv->ppio_params.eth_start_hdr = PP2_PPIO_HDR_ETH; 820 priv->forward_bad_frames = 0; 821 priv->fill_bpool_buffs = MRVL_BURST_SIZE; 822 823 if (mrvl_cfg) { 824 priv->ppio_params.eth_start_hdr = 825 mrvl_cfg->port[dev->data->port_id].eth_start_hdr; 826 priv->forward_bad_frames = 827 mrvl_cfg->port[dev->data->port_id].forward_bad_frames; 828 priv->fill_bpool_buffs = 829 mrvl_cfg->port[dev->data->port_id].fill_bpool_buffs; 830 } 831 832 /* 833 * Calculate the minimum bpool size for refill feature as follows: 834 * 2 default burst sizes multiply by number of rx queues. 835 * If the bpool size will be below this value, new buffers will 836 * be added to the pool. 837 */ 838 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2; 839 840 /* In case initial bpool size configured in queues setup is 841 * smaller than minimum size add more buffers 842 */ 843 def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2; 844 if (priv->bpool_init_size < def_init_size) { 845 int buffs_to_add = def_init_size - priv->bpool_init_size; 846 847 priv->bpool_init_size += buffs_to_add; 848 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add); 849 if (ret) 850 MRVL_LOG(ERR, "Failed to add buffers to bpool"); 851 } 852 853 /* 854 * Calculate the maximum bpool size for refill feature as follows: 855 * maximum number of descriptors in rx queue multiply by number 856 * of rx queues plus minimum bpool size. 857 * In case the bpool size will exceed this value, superfluous buffers 858 * will be removed 859 */ 860 priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) + 861 priv->bpool_min_size; 862 863 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio); 864 if (ret) { 865 MRVL_LOG(ERR, "Failed to init ppio"); 866 return ret; 867 } 868 869 /* 870 * In case there are some some stale uc/mc mac addresses flush them 871 * here. It cannot be done during mrvl_dev_close() as port information 872 * is already gone at that point (due to pp2_ppio_deinit() in 873 * mrvl_dev_stop()). 874 */ 875 if (!priv->uc_mc_flushed) { 876 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1); 877 if (ret) { 878 MRVL_LOG(ERR, 879 "Failed to flush uc/mc filter list"); 880 goto out; 881 } 882 priv->uc_mc_flushed = 1; 883 } 884 885 ret = mrvl_mtu_set(dev, dev->data->mtu); 886 if (ret) 887 MRVL_LOG(ERR, "Failed to set MTU to %d", dev->data->mtu); 888 889 if (!rte_is_zero_ether_addr(&dev->data->mac_addrs[0])) 890 mrvl_mac_addr_set(dev, &dev->data->mac_addrs[0]); 891 892 for (i = 1; i < MRVL_MAC_ADDRS_MAX; i++) { 893 mac_addr = &dev->data->mac_addrs[i]; 894 895 /* skip zero address */ 896 if (rte_is_zero_ether_addr(mac_addr)) 897 continue; 898 899 mrvl_mac_addr_add(dev, mac_addr, i, 0); 900 } 901 902 if (dev->data->all_multicast == 1) 903 mrvl_allmulticast_enable(dev); 904 905 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 906 ret = mrvl_populate_vlan_table(dev, 1); 907 if (ret) { 908 MRVL_LOG(ERR, "Failed to populate VLAN table"); 909 goto out; 910 } 911 } 912 913 /* For default QoS config, don't start classifier. */ 914 if (mrvl_cfg && 915 mrvl_cfg->port[dev->data->port_id].use_qos_global_defaults == 0) { 916 ret = mrvl_start_qos_mapping(priv); 917 if (ret) { 918 MRVL_LOG(ERR, "Failed to setup QoS mapping"); 919 goto out; 920 } 921 } 922 923 ret = pp2_ppio_set_loopback(priv->ppio, dev->data->dev_conf.lpbk_mode); 924 if (ret) { 925 MRVL_LOG(ERR, "Failed to set loopback"); 926 goto out; 927 } 928 929 if (dev->data->promiscuous == 1) 930 mrvl_promiscuous_enable(dev); 931 932 if (priv->flow_ctrl) { 933 ret = mrvl_flow_ctrl_set(dev, &priv->fc_conf); 934 if (ret) { 935 MRVL_LOG(ERR, "Failed to configure flow control"); 936 goto out; 937 } 938 priv->flow_ctrl = 0; 939 } 940 941 if (dev->data->dev_link.link_status == ETH_LINK_UP) { 942 ret = mrvl_dev_set_link_up(dev); 943 if (ret) { 944 MRVL_LOG(ERR, "Failed to set link up"); 945 dev->data->dev_link.link_status = ETH_LINK_DOWN; 946 goto out; 947 } 948 } 949 950 /* start tx queues */ 951 for (i = 0; i < dev->data->nb_tx_queues; i++) { 952 struct mrvl_txq *txq = dev->data->tx_queues[i]; 953 954 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 955 956 if (!txq->tx_deferred_start) 957 continue; 958 959 /* 960 * All txqs are started by default. Stop them 961 * so that tx_deferred_start works as expected. 962 */ 963 ret = mrvl_tx_queue_stop(dev, i); 964 if (ret) 965 goto out; 966 } 967 968 mrvl_flow_init(dev); 969 mrvl_mtr_init(dev); 970 mrvl_set_tx_function(dev); 971 972 return 0; 973 out: 974 MRVL_LOG(ERR, "Failed to start device"); 975 pp2_ppio_deinit(priv->ppio); 976 return ret; 977 } 978 979 /** 980 * Flush receive queues. 981 * 982 * @param dev 983 * Pointer to Ethernet device structure. 984 */ 985 static void 986 mrvl_flush_rx_queues(struct rte_eth_dev *dev) 987 { 988 int i; 989 990 MRVL_LOG(INFO, "Flushing rx queues"); 991 for (i = 0; i < dev->data->nb_rx_queues; i++) { 992 int ret, num; 993 994 do { 995 struct mrvl_rxq *q = dev->data->rx_queues[i]; 996 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX]; 997 998 num = MRVL_PP2_RXD_MAX; 999 ret = pp2_ppio_recv(q->priv->ppio, 1000 q->priv->rxq_map[q->queue_id].tc, 1001 q->priv->rxq_map[q->queue_id].inq, 1002 descs, (uint16_t *)&num); 1003 } while (ret == 0 && num); 1004 } 1005 } 1006 1007 /** 1008 * Flush transmit shadow queues. 1009 * 1010 * @param dev 1011 * Pointer to Ethernet device structure. 1012 */ 1013 static void 1014 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev) 1015 { 1016 int i, j; 1017 struct mrvl_txq *txq; 1018 1019 MRVL_LOG(INFO, "Flushing tx shadow queues"); 1020 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1021 txq = (struct mrvl_txq *)dev->data->tx_queues[i]; 1022 1023 for (j = 0; j < RTE_MAX_LCORE; j++) { 1024 struct mrvl_shadow_txq *sq; 1025 1026 if (!hifs[j]) 1027 continue; 1028 1029 sq = &txq->shadow_txqs[j]; 1030 mrvl_free_sent_buffers(txq->priv->ppio, 1031 hifs[j], j, sq, txq->queue_id, 1); 1032 while (sq->tail != sq->head) { 1033 uint64_t addr = cookie_addr_high | 1034 sq->ent[sq->tail].buff.cookie; 1035 rte_pktmbuf_free( 1036 (struct rte_mbuf *)addr); 1037 sq->tail = (sq->tail + 1) & 1038 MRVL_PP2_TX_SHADOWQ_MASK; 1039 } 1040 memset(sq, 0, sizeof(*sq)); 1041 } 1042 } 1043 } 1044 1045 /** 1046 * Flush hardware bpool (buffer-pool). 1047 * 1048 * @param dev 1049 * Pointer to Ethernet device structure. 1050 */ 1051 static void 1052 mrvl_flush_bpool(struct rte_eth_dev *dev) 1053 { 1054 struct mrvl_priv *priv = dev->data->dev_private; 1055 struct pp2_hif *hif; 1056 uint32_t num; 1057 int ret; 1058 unsigned int core_id = rte_lcore_id(); 1059 1060 if (core_id == LCORE_ID_ANY) 1061 core_id = rte_get_main_lcore(); 1062 1063 hif = mrvl_get_hif(priv, core_id); 1064 1065 ret = pp2_bpool_get_num_buffs(priv->bpool, &num); 1066 if (ret) { 1067 MRVL_LOG(ERR, "Failed to get bpool buffers number"); 1068 return; 1069 } 1070 1071 while (num--) { 1072 struct pp2_buff_inf inf; 1073 uint64_t addr; 1074 1075 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf); 1076 if (ret) 1077 break; 1078 1079 addr = cookie_addr_high | inf.cookie; 1080 rte_pktmbuf_free((struct rte_mbuf *)addr); 1081 } 1082 } 1083 1084 /** 1085 * DPDK callback to stop the device. 1086 * 1087 * @param dev 1088 * Pointer to Ethernet device structure. 1089 */ 1090 static int 1091 mrvl_dev_stop(struct rte_eth_dev *dev) 1092 { 1093 return mrvl_dev_set_link_down(dev); 1094 } 1095 1096 /** 1097 * DPDK callback to close the device. 1098 * 1099 * @param dev 1100 * Pointer to Ethernet device structure. 1101 */ 1102 static int 1103 mrvl_dev_close(struct rte_eth_dev *dev) 1104 { 1105 struct mrvl_priv *priv = dev->data->dev_private; 1106 size_t i; 1107 1108 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1109 return 0; 1110 1111 mrvl_flush_rx_queues(dev); 1112 mrvl_flush_tx_shadow_queues(dev); 1113 mrvl_flow_deinit(dev); 1114 mrvl_mtr_deinit(dev); 1115 1116 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) { 1117 struct pp2_ppio_tc_params *tc_params = 1118 &priv->ppio_params.inqs_params.tcs_params[i]; 1119 1120 if (tc_params->inqs_params) { 1121 rte_free(tc_params->inqs_params); 1122 tc_params->inqs_params = NULL; 1123 } 1124 } 1125 1126 if (priv->cls_tbl) { 1127 pp2_cls_tbl_deinit(priv->cls_tbl); 1128 priv->cls_tbl = NULL; 1129 } 1130 1131 if (priv->qos_tbl) { 1132 pp2_cls_qos_tbl_deinit(priv->qos_tbl); 1133 priv->qos_tbl = NULL; 1134 } 1135 1136 mrvl_flush_bpool(dev); 1137 mrvl_tm_deinit(dev); 1138 1139 if (priv->ppio) { 1140 pp2_ppio_deinit(priv->ppio); 1141 priv->ppio = NULL; 1142 } 1143 1144 /* policer must be released after ppio deinitialization */ 1145 if (priv->default_policer) { 1146 pp2_cls_plcr_deinit(priv->default_policer); 1147 priv->default_policer = NULL; 1148 } 1149 1150 1151 if (priv->bpool) { 1152 pp2_bpool_deinit(priv->bpool); 1153 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); 1154 priv->bpool = NULL; 1155 } 1156 1157 mrvl_dev_num--; 1158 1159 if (mrvl_dev_num == 0) { 1160 MRVL_LOG(INFO, "Perform MUSDK deinit"); 1161 mrvl_deinit_hifs(); 1162 mrvl_deinit_pp2(); 1163 rte_mvep_deinit(MVEP_MOD_T_PP2); 1164 } 1165 1166 return 0; 1167 } 1168 1169 /** 1170 * DPDK callback to retrieve physical link information. 1171 * 1172 * @param dev 1173 * Pointer to Ethernet device structure. 1174 * @param wait_to_complete 1175 * Wait for request completion (ignored). 1176 * 1177 * @return 1178 * 0 on success, negative error value otherwise. 1179 */ 1180 static int 1181 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 1182 { 1183 /* 1184 * TODO 1185 * once MUSDK provides necessary API use it here 1186 */ 1187 struct mrvl_priv *priv = dev->data->dev_private; 1188 struct ethtool_cmd edata; 1189 struct ifreq req; 1190 int ret, fd, link_up; 1191 1192 if (!priv->ppio) 1193 return -EPERM; 1194 1195 edata.cmd = ETHTOOL_GSET; 1196 1197 strcpy(req.ifr_name, dev->data->name); 1198 req.ifr_data = (void *)&edata; 1199 1200 fd = socket(AF_INET, SOCK_DGRAM, 0); 1201 if (fd == -1) 1202 return -EFAULT; 1203 1204 ret = ioctl(fd, SIOCETHTOOL, &req); 1205 if (ret == -1) { 1206 close(fd); 1207 return -EFAULT; 1208 } 1209 1210 close(fd); 1211 1212 switch (ethtool_cmd_speed(&edata)) { 1213 case SPEED_10: 1214 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M; 1215 break; 1216 case SPEED_100: 1217 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M; 1218 break; 1219 case SPEED_1000: 1220 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G; 1221 break; 1222 case SPEED_2500: 1223 dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G; 1224 break; 1225 case SPEED_10000: 1226 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; 1227 break; 1228 default: 1229 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; 1230 } 1231 1232 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX : 1233 ETH_LINK_HALF_DUPLEX; 1234 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG : 1235 ETH_LINK_FIXED; 1236 pp2_ppio_get_link_state(priv->ppio, &link_up); 1237 dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 1238 1239 return 0; 1240 } 1241 1242 /** 1243 * DPDK callback to enable promiscuous mode. 1244 * 1245 * @param dev 1246 * Pointer to Ethernet device structure. 1247 * 1248 * @return 1249 * 0 on success, negative error value otherwise. 1250 */ 1251 static int 1252 mrvl_promiscuous_enable(struct rte_eth_dev *dev) 1253 { 1254 struct mrvl_priv *priv = dev->data->dev_private; 1255 int ret; 1256 1257 if (priv->isolated) 1258 return -ENOTSUP; 1259 1260 if (!priv->ppio) 1261 return 0; 1262 1263 ret = pp2_ppio_set_promisc(priv->ppio, 1); 1264 if (ret) { 1265 MRVL_LOG(ERR, "Failed to enable promiscuous mode"); 1266 return -EAGAIN; 1267 } 1268 1269 return 0; 1270 } 1271 1272 /** 1273 * DPDK callback to enable allmulti mode. 1274 * 1275 * @param dev 1276 * Pointer to Ethernet device structure. 1277 * 1278 * @return 1279 * 0 on success, negative error value otherwise. 1280 */ 1281 static int 1282 mrvl_allmulticast_enable(struct rte_eth_dev *dev) 1283 { 1284 struct mrvl_priv *priv = dev->data->dev_private; 1285 int ret; 1286 1287 if (priv->isolated) 1288 return -ENOTSUP; 1289 1290 if (!priv->ppio) 1291 return 0; 1292 1293 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1); 1294 if (ret) { 1295 MRVL_LOG(ERR, "Failed enable all-multicast mode"); 1296 return -EAGAIN; 1297 } 1298 1299 return 0; 1300 } 1301 1302 /** 1303 * DPDK callback to disable promiscuous mode. 1304 * 1305 * @param dev 1306 * Pointer to Ethernet device structure. 1307 * 1308 * @return 1309 * 0 on success, negative error value otherwise. 1310 */ 1311 static int 1312 mrvl_promiscuous_disable(struct rte_eth_dev *dev) 1313 { 1314 struct mrvl_priv *priv = dev->data->dev_private; 1315 int ret; 1316 1317 if (priv->isolated) 1318 return -ENOTSUP; 1319 1320 if (!priv->ppio) 1321 return 0; 1322 1323 ret = pp2_ppio_set_promisc(priv->ppio, 0); 1324 if (ret) { 1325 MRVL_LOG(ERR, "Failed to disable promiscuous mode"); 1326 return -EAGAIN; 1327 } 1328 1329 return 0; 1330 } 1331 1332 /** 1333 * DPDK callback to disable allmulticast mode. 1334 * 1335 * @param dev 1336 * Pointer to Ethernet device structure. 1337 * 1338 * @return 1339 * 0 on success, negative error value otherwise. 1340 */ 1341 static int 1342 mrvl_allmulticast_disable(struct rte_eth_dev *dev) 1343 { 1344 struct mrvl_priv *priv = dev->data->dev_private; 1345 int ret; 1346 1347 if (priv->isolated) 1348 return -ENOTSUP; 1349 1350 if (!priv->ppio) 1351 return 0; 1352 1353 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0); 1354 if (ret) { 1355 MRVL_LOG(ERR, "Failed to disable all-multicast mode"); 1356 return -EAGAIN; 1357 } 1358 1359 return 0; 1360 } 1361 1362 /** 1363 * DPDK callback to remove a MAC address. 1364 * 1365 * @param dev 1366 * Pointer to Ethernet device structure. 1367 * @param index 1368 * MAC address index. 1369 */ 1370 static void 1371 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1372 { 1373 struct mrvl_priv *priv = dev->data->dev_private; 1374 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 1375 int ret; 1376 1377 if (priv->isolated) 1378 return; 1379 1380 if (!priv->ppio) 1381 return; 1382 1383 ret = pp2_ppio_remove_mac_addr(priv->ppio, 1384 dev->data->mac_addrs[index].addr_bytes); 1385 if (ret) { 1386 rte_ether_format_addr(buf, sizeof(buf), 1387 &dev->data->mac_addrs[index]); 1388 MRVL_LOG(ERR, "Failed to remove mac %s", buf); 1389 } 1390 } 1391 1392 /** 1393 * DPDK callback to add a MAC address. 1394 * 1395 * @param dev 1396 * Pointer to Ethernet device structure. 1397 * @param mac_addr 1398 * MAC address to register. 1399 * @param index 1400 * MAC address index. 1401 * @param vmdq 1402 * VMDq pool index to associate address with (unused). 1403 * 1404 * @return 1405 * 0 on success, negative error value otherwise. 1406 */ 1407 static int 1408 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1409 uint32_t index, uint32_t vmdq __rte_unused) 1410 { 1411 struct mrvl_priv *priv = dev->data->dev_private; 1412 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 1413 int ret; 1414 1415 if (priv->isolated) 1416 return -ENOTSUP; 1417 1418 if (!priv->ppio) 1419 return 0; 1420 1421 if (index == 0) 1422 /* For setting index 0, mrvl_mac_addr_set() should be used.*/ 1423 return -1; 1424 1425 /* 1426 * Maximum number of uc addresses can be tuned via kernel module mvpp2x 1427 * parameter uc_filter_max. Maximum number of mc addresses is then 1428 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and 1429 * 21 respectively. 1430 * 1431 * If more than uc_filter_max uc addresses were added to filter list 1432 * then NIC will switch to promiscuous mode automatically. 1433 * 1434 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses 1435 * were added to filter list then NIC will switch to all-multicast mode 1436 * automatically. 1437 */ 1438 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes); 1439 if (ret) { 1440 rte_ether_format_addr(buf, sizeof(buf), mac_addr); 1441 MRVL_LOG(ERR, "Failed to add mac %s", buf); 1442 return -1; 1443 } 1444 1445 return 0; 1446 } 1447 1448 /** 1449 * DPDK callback to set the primary MAC address. 1450 * 1451 * @param dev 1452 * Pointer to Ethernet device structure. 1453 * @param mac_addr 1454 * MAC address to register. 1455 * 1456 * @return 1457 * 0 on success, negative error value otherwise. 1458 */ 1459 static int 1460 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1461 { 1462 struct mrvl_priv *priv = dev->data->dev_private; 1463 int ret; 1464 1465 if (priv->isolated) 1466 return -ENOTSUP; 1467 1468 if (!priv->ppio) 1469 return 0; 1470 1471 ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes); 1472 if (ret) { 1473 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 1474 rte_ether_format_addr(buf, sizeof(buf), mac_addr); 1475 MRVL_LOG(ERR, "Failed to set mac to %s", buf); 1476 } 1477 1478 return ret; 1479 } 1480 1481 /** 1482 * DPDK callback to get device statistics. 1483 * 1484 * @param dev 1485 * Pointer to Ethernet device structure. 1486 * @param stats 1487 * Stats structure output buffer. 1488 * 1489 * @return 1490 * 0 on success, negative error value otherwise. 1491 */ 1492 static int 1493 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1494 { 1495 struct mrvl_priv *priv = dev->data->dev_private; 1496 struct pp2_ppio_statistics ppio_stats; 1497 uint64_t drop_mac = 0; 1498 unsigned int i, idx, ret; 1499 1500 if (!priv->ppio) 1501 return -EPERM; 1502 1503 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1504 struct mrvl_rxq *rxq = dev->data->rx_queues[i]; 1505 struct pp2_ppio_inq_statistics rx_stats; 1506 1507 if (!rxq) 1508 continue; 1509 1510 idx = rxq->queue_id; 1511 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { 1512 MRVL_LOG(ERR, 1513 "rx queue %d stats out of range (0 - %d)", 1514 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1515 continue; 1516 } 1517 1518 ret = pp2_ppio_inq_get_statistics(priv->ppio, 1519 priv->rxq_map[idx].tc, 1520 priv->rxq_map[idx].inq, 1521 &rx_stats, 0); 1522 if (unlikely(ret)) { 1523 MRVL_LOG(ERR, 1524 "Failed to update rx queue %d stats", idx); 1525 break; 1526 } 1527 1528 stats->q_ibytes[idx] = rxq->bytes_recv; 1529 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac; 1530 stats->q_errors[idx] = rx_stats.drop_early + 1531 rx_stats.drop_fullq + 1532 rx_stats.drop_bm + 1533 rxq->drop_mac; 1534 stats->ibytes += rxq->bytes_recv; 1535 drop_mac += rxq->drop_mac; 1536 } 1537 1538 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1539 struct mrvl_txq *txq = dev->data->tx_queues[i]; 1540 struct pp2_ppio_outq_statistics tx_stats; 1541 1542 if (!txq) 1543 continue; 1544 1545 idx = txq->queue_id; 1546 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { 1547 MRVL_LOG(ERR, 1548 "tx queue %d stats out of range (0 - %d)", 1549 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1550 } 1551 1552 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx, 1553 &tx_stats, 0); 1554 if (unlikely(ret)) { 1555 MRVL_LOG(ERR, 1556 "Failed to update tx queue %d stats", idx); 1557 break; 1558 } 1559 1560 stats->q_opackets[idx] = tx_stats.deq_desc; 1561 stats->q_obytes[idx] = txq->bytes_sent; 1562 stats->obytes += txq->bytes_sent; 1563 } 1564 1565 ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); 1566 if (unlikely(ret)) { 1567 MRVL_LOG(ERR, "Failed to update port statistics"); 1568 return ret; 1569 } 1570 1571 stats->ipackets += ppio_stats.rx_packets - drop_mac; 1572 stats->opackets += ppio_stats.tx_packets; 1573 stats->imissed += ppio_stats.rx_fullq_dropped + 1574 ppio_stats.rx_bm_dropped + 1575 ppio_stats.rx_early_dropped + 1576 ppio_stats.rx_fifo_dropped + 1577 ppio_stats.rx_cls_dropped; 1578 stats->ierrors = drop_mac; 1579 1580 return 0; 1581 } 1582 1583 /** 1584 * DPDK callback to clear device statistics. 1585 * 1586 * @param dev 1587 * Pointer to Ethernet device structure. 1588 * 1589 * @return 1590 * 0 on success, negative error value otherwise. 1591 */ 1592 static int 1593 mrvl_stats_reset(struct rte_eth_dev *dev) 1594 { 1595 struct mrvl_priv *priv = dev->data->dev_private; 1596 int i; 1597 1598 if (!priv->ppio) 1599 return 0; 1600 1601 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1602 struct mrvl_rxq *rxq = dev->data->rx_queues[i]; 1603 1604 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc, 1605 priv->rxq_map[i].inq, NULL, 1); 1606 rxq->bytes_recv = 0; 1607 rxq->drop_mac = 0; 1608 } 1609 1610 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1611 struct mrvl_txq *txq = dev->data->tx_queues[i]; 1612 1613 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1); 1614 txq->bytes_sent = 0; 1615 } 1616 1617 return pp2_ppio_get_statistics(priv->ppio, NULL, 1); 1618 } 1619 1620 /** 1621 * DPDK callback to get extended statistics. 1622 * 1623 * @param dev 1624 * Pointer to Ethernet device structure. 1625 * @param stats 1626 * Pointer to xstats table. 1627 * @param n 1628 * Number of entries in xstats table. 1629 * @return 1630 * Negative value on error, number of read xstats otherwise. 1631 */ 1632 static int 1633 mrvl_xstats_get(struct rte_eth_dev *dev, 1634 struct rte_eth_xstat *stats, unsigned int n) 1635 { 1636 struct mrvl_priv *priv = dev->data->dev_private; 1637 struct pp2_ppio_statistics ppio_stats; 1638 unsigned int i; 1639 1640 if (!stats) 1641 return 0; 1642 1643 pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); 1644 for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) { 1645 uint64_t val; 1646 1647 if (mrvl_xstats_tbl[i].size == sizeof(uint32_t)) 1648 val = *(uint32_t *)((uint8_t *)&ppio_stats + 1649 mrvl_xstats_tbl[i].offset); 1650 else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t)) 1651 val = *(uint64_t *)((uint8_t *)&ppio_stats + 1652 mrvl_xstats_tbl[i].offset); 1653 else 1654 return -EINVAL; 1655 1656 stats[i].id = i; 1657 stats[i].value = val; 1658 } 1659 1660 return n; 1661 } 1662 1663 /** 1664 * DPDK callback to reset extended statistics. 1665 * 1666 * @param dev 1667 * Pointer to Ethernet device structure. 1668 * 1669 * @return 1670 * 0 on success, negative error value otherwise. 1671 */ 1672 static int 1673 mrvl_xstats_reset(struct rte_eth_dev *dev) 1674 { 1675 return mrvl_stats_reset(dev); 1676 } 1677 1678 /** 1679 * DPDK callback to get extended statistics names. 1680 * 1681 * @param dev (unused) 1682 * Pointer to Ethernet device structure. 1683 * @param xstats_names 1684 * Pointer to xstats names table. 1685 * @param size 1686 * Size of the xstats names table. 1687 * @return 1688 * Number of read names. 1689 */ 1690 static int 1691 mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused, 1692 struct rte_eth_xstat_name *xstats_names, 1693 unsigned int size) 1694 { 1695 unsigned int i; 1696 1697 if (!xstats_names) 1698 return RTE_DIM(mrvl_xstats_tbl); 1699 1700 for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++) 1701 strlcpy(xstats_names[i].name, mrvl_xstats_tbl[i].name, 1702 RTE_ETH_XSTATS_NAME_SIZE); 1703 1704 return size; 1705 } 1706 1707 /** 1708 * DPDK callback to get information about the device. 1709 * 1710 * @param dev 1711 * Pointer to Ethernet device structure (unused). 1712 * @param info 1713 * Info structure output buffer. 1714 */ 1715 static int 1716 mrvl_dev_infos_get(struct rte_eth_dev *dev, 1717 struct rte_eth_dev_info *info) 1718 { 1719 struct mrvl_priv *priv = dev->data->dev_private; 1720 1721 info->speed_capa = ETH_LINK_SPEED_10M | 1722 ETH_LINK_SPEED_100M | 1723 ETH_LINK_SPEED_1G | 1724 ETH_LINK_SPEED_2_5G | 1725 ETH_LINK_SPEED_10G; 1726 1727 info->max_rx_queues = MRVL_PP2_RXQ_MAX; 1728 info->max_tx_queues = MRVL_PP2_TXQ_MAX; 1729 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX; 1730 1731 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX; 1732 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN; 1733 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN; 1734 1735 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX; 1736 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN; 1737 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN; 1738 1739 info->rx_offload_capa = MRVL_RX_OFFLOADS; 1740 info->rx_queue_offload_capa = MRVL_RX_OFFLOADS; 1741 1742 info->tx_offload_capa = MRVL_TX_OFFLOADS; 1743 info->tx_queue_offload_capa = MRVL_TX_OFFLOADS; 1744 1745 info->flow_type_rss_offloads = ETH_RSS_IPV4 | 1746 ETH_RSS_NONFRAG_IPV4_TCP | 1747 ETH_RSS_NONFRAG_IPV4_UDP; 1748 1749 /* By default packets are dropped if no descriptors are available */ 1750 info->default_rxconf.rx_drop_en = 1; 1751 1752 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX; 1753 info->max_mtu = priv->max_mtu; 1754 1755 return 0; 1756 } 1757 1758 /** 1759 * Return supported packet types. 1760 * 1761 * @param dev 1762 * Pointer to Ethernet device structure (unused). 1763 * 1764 * @return 1765 * Const pointer to the table with supported packet types. 1766 */ 1767 static const uint32_t * 1768 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 1769 { 1770 static const uint32_t ptypes[] = { 1771 RTE_PTYPE_L2_ETHER, 1772 RTE_PTYPE_L2_ETHER_VLAN, 1773 RTE_PTYPE_L2_ETHER_QINQ, 1774 RTE_PTYPE_L3_IPV4, 1775 RTE_PTYPE_L3_IPV4_EXT, 1776 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 1777 RTE_PTYPE_L3_IPV6, 1778 RTE_PTYPE_L3_IPV6_EXT, 1779 RTE_PTYPE_L2_ETHER_ARP, 1780 RTE_PTYPE_L4_TCP, 1781 RTE_PTYPE_L4_UDP 1782 }; 1783 1784 return ptypes; 1785 } 1786 1787 /** 1788 * DPDK callback to get information about specific receive queue. 1789 * 1790 * @param dev 1791 * Pointer to Ethernet device structure. 1792 * @param rx_queue_id 1793 * Receive queue index. 1794 * @param qinfo 1795 * Receive queue information structure. 1796 */ 1797 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 1798 struct rte_eth_rxq_info *qinfo) 1799 { 1800 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id]; 1801 struct mrvl_priv *priv = dev->data->dev_private; 1802 int inq = priv->rxq_map[rx_queue_id].inq; 1803 int tc = priv->rxq_map[rx_queue_id].tc; 1804 struct pp2_ppio_tc_params *tc_params = 1805 &priv->ppio_params.inqs_params.tcs_params[tc]; 1806 1807 qinfo->mp = q->mp; 1808 qinfo->nb_desc = tc_params->inqs_params[inq].size; 1809 } 1810 1811 /** 1812 * DPDK callback to get information about specific transmit queue. 1813 * 1814 * @param dev 1815 * Pointer to Ethernet device structure. 1816 * @param tx_queue_id 1817 * Transmit queue index. 1818 * @param qinfo 1819 * Transmit queue information structure. 1820 */ 1821 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 1822 struct rte_eth_txq_info *qinfo) 1823 { 1824 struct mrvl_priv *priv = dev->data->dev_private; 1825 struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id]; 1826 1827 qinfo->nb_desc = 1828 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size; 1829 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1830 } 1831 1832 /** 1833 * DPDK callback to Configure a VLAN filter. 1834 * 1835 * @param dev 1836 * Pointer to Ethernet device structure. 1837 * @param vlan_id 1838 * VLAN ID to filter. 1839 * @param on 1840 * Toggle filter. 1841 * 1842 * @return 1843 * 0 on success, negative error value otherwise. 1844 */ 1845 static int 1846 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1847 { 1848 struct mrvl_priv *priv = dev->data->dev_private; 1849 1850 if (priv->isolated) 1851 return -ENOTSUP; 1852 1853 if (!priv->ppio) 1854 return 0; 1855 1856 return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) : 1857 pp2_ppio_remove_vlan(priv->ppio, vlan_id); 1858 } 1859 1860 /** 1861 * DPDK callback to Configure VLAN offload. 1862 * 1863 * @param dev 1864 * Pointer to Ethernet device structure. 1865 * @param mask 1866 * VLAN offload mask. 1867 * 1868 * @return 1869 * 0 on success, negative error value otherwise. 1870 */ 1871 static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1872 { 1873 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 1874 int ret; 1875 1876 if (mask & ETH_VLAN_STRIP_MASK) { 1877 MRVL_LOG(ERR, "VLAN stripping is not supported\n"); 1878 return -ENOTSUP; 1879 } 1880 1881 if (mask & ETH_VLAN_FILTER_MASK) { 1882 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1883 ret = mrvl_populate_vlan_table(dev, 1); 1884 else 1885 ret = mrvl_populate_vlan_table(dev, 0); 1886 1887 if (ret) 1888 return ret; 1889 } 1890 1891 if (mask & ETH_VLAN_EXTEND_MASK) { 1892 MRVL_LOG(ERR, "Extend VLAN not supported\n"); 1893 return -ENOTSUP; 1894 } 1895 1896 return 0; 1897 } 1898 1899 /** 1900 * Release buffers to hardware bpool (buffer-pool) 1901 * 1902 * @param rxq 1903 * Receive queue pointer. 1904 * @param num 1905 * Number of buffers to release to bpool. 1906 * 1907 * @return 1908 * 0 on success, negative error value otherwise. 1909 */ 1910 static int 1911 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) 1912 { 1913 struct buff_release_entry entries[num]; 1914 struct rte_mbuf *mbufs[num]; 1915 int i, ret; 1916 unsigned int core_id; 1917 struct pp2_hif *hif; 1918 struct pp2_bpool *bpool; 1919 1920 core_id = rte_lcore_id(); 1921 if (core_id == LCORE_ID_ANY) 1922 core_id = rte_get_main_lcore(); 1923 1924 hif = mrvl_get_hif(rxq->priv, core_id); 1925 if (!hif) 1926 return -1; 1927 1928 bpool = rxq->priv->bpool; 1929 1930 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num); 1931 if (ret) 1932 return ret; 1933 1934 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID) 1935 cookie_addr_high = 1936 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK; 1937 1938 for (i = 0; i < num; i++) { 1939 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK) 1940 != cookie_addr_high) { 1941 MRVL_LOG(ERR, 1942 "mbuf virtual addr high is out of range " 1943 "0x%x instead of 0x%x\n", 1944 (uint32_t)((uint64_t)mbufs[i] >> 32), 1945 (uint32_t)(cookie_addr_high >> 32)); 1946 goto out; 1947 } 1948 1949 entries[i].buff.addr = 1950 rte_mbuf_data_iova_default(mbufs[i]); 1951 entries[i].buff.cookie = (uintptr_t)mbufs[i]; 1952 entries[i].bpool = bpool; 1953 } 1954 1955 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i); 1956 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i; 1957 1958 if (i != num) 1959 goto out; 1960 1961 return 0; 1962 out: 1963 for (; i < num; i++) 1964 rte_pktmbuf_free(mbufs[i]); 1965 1966 return -1; 1967 } 1968 1969 /** 1970 * DPDK callback to configure the receive queue. 1971 * 1972 * @param dev 1973 * Pointer to Ethernet device structure. 1974 * @param idx 1975 * RX queue index. 1976 * @param desc 1977 * Number of descriptors to configure in queue. 1978 * @param socket 1979 * NUMA socket on which memory must be allocated. 1980 * @param conf 1981 * Thresholds parameters. 1982 * @param mp 1983 * Memory pool for buffer allocations. 1984 * 1985 * @return 1986 * 0 on success, negative error value otherwise. 1987 */ 1988 static int 1989 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 1990 unsigned int socket, 1991 const struct rte_eth_rxconf *conf, 1992 struct rte_mempool *mp) 1993 { 1994 struct mrvl_priv *priv = dev->data->dev_private; 1995 struct mrvl_rxq *rxq; 1996 uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp); 1997 uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; 1998 int ret, tc, inq; 1999 uint64_t offloads; 2000 2001 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; 2002 2003 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) { 2004 /* 2005 * Unknown TC mapping, mapping will not have a correct queue. 2006 */ 2007 MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu", 2008 idx, priv->ppio_id); 2009 return -EFAULT; 2010 } 2011 2012 frame_size = buf_size - RTE_PKTMBUF_HEADROOM - 2013 MRVL_PKT_EFFEC_OFFS + RTE_ETHER_CRC_LEN; 2014 if (frame_size < max_rx_pkt_len) { 2015 MRVL_LOG(WARNING, 2016 "Mbuf size must be increased to %u bytes to hold up " 2017 "to %u bytes of data.", 2018 buf_size + max_rx_pkt_len - frame_size, 2019 max_rx_pkt_len); 2020 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 2021 MRVL_LOG(INFO, "Setting max rx pkt len to %u", 2022 dev->data->dev_conf.rxmode.max_rx_pkt_len); 2023 } 2024 2025 if (dev->data->rx_queues[idx]) { 2026 rte_free(dev->data->rx_queues[idx]); 2027 dev->data->rx_queues[idx] = NULL; 2028 } 2029 2030 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket); 2031 if (!rxq) 2032 return -ENOMEM; 2033 2034 rxq->priv = priv; 2035 rxq->mp = mp; 2036 rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM; 2037 rxq->queue_id = idx; 2038 rxq->port_id = dev->data->port_id; 2039 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool; 2040 2041 tc = priv->rxq_map[rxq->queue_id].tc, 2042 inq = priv->rxq_map[rxq->queue_id].inq; 2043 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size = 2044 desc; 2045 2046 ret = mrvl_fill_bpool(rxq, desc); 2047 if (ret) { 2048 rte_free(rxq); 2049 return ret; 2050 } 2051 2052 priv->bpool_init_size += desc; 2053 2054 dev->data->rx_queues[idx] = rxq; 2055 2056 return 0; 2057 } 2058 2059 /** 2060 * DPDK callback to release the receive queue. 2061 * 2062 * @param rxq 2063 * Generic receive queue pointer. 2064 */ 2065 static void 2066 mrvl_rx_queue_release(void *rxq) 2067 { 2068 struct mrvl_rxq *q = rxq; 2069 struct pp2_ppio_tc_params *tc_params; 2070 int i, num, tc, inq; 2071 struct pp2_hif *hif; 2072 unsigned int core_id = rte_lcore_id(); 2073 2074 if (core_id == LCORE_ID_ANY) 2075 core_id = rte_get_main_lcore(); 2076 2077 if (!q) 2078 return; 2079 2080 hif = mrvl_get_hif(q->priv, core_id); 2081 2082 if (!hif) 2083 return; 2084 2085 tc = q->priv->rxq_map[q->queue_id].tc; 2086 inq = q->priv->rxq_map[q->queue_id].inq; 2087 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc]; 2088 num = tc_params->inqs_params[inq].size; 2089 for (i = 0; i < num; i++) { 2090 struct pp2_buff_inf inf; 2091 uint64_t addr; 2092 2093 pp2_bpool_get_buff(hif, q->priv->bpool, &inf); 2094 addr = cookie_addr_high | inf.cookie; 2095 rte_pktmbuf_free((struct rte_mbuf *)addr); 2096 } 2097 2098 rte_free(q); 2099 } 2100 2101 /** 2102 * DPDK callback to configure the transmit queue. 2103 * 2104 * @param dev 2105 * Pointer to Ethernet device structure. 2106 * @param idx 2107 * Transmit queue index. 2108 * @param desc 2109 * Number of descriptors to configure in the queue. 2110 * @param socket 2111 * NUMA socket on which memory must be allocated. 2112 * @param conf 2113 * Tx queue configuration parameters. 2114 * 2115 * @return 2116 * 0 on success, negative error value otherwise. 2117 */ 2118 static int 2119 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 2120 unsigned int socket, 2121 const struct rte_eth_txconf *conf) 2122 { 2123 struct mrvl_priv *priv = dev->data->dev_private; 2124 struct mrvl_txq *txq; 2125 2126 if (dev->data->tx_queues[idx]) { 2127 rte_free(dev->data->tx_queues[idx]); 2128 dev->data->tx_queues[idx] = NULL; 2129 } 2130 2131 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket); 2132 if (!txq) 2133 return -ENOMEM; 2134 2135 txq->priv = priv; 2136 txq->queue_id = idx; 2137 txq->port_id = dev->data->port_id; 2138 txq->tx_deferred_start = conf->tx_deferred_start; 2139 dev->data->tx_queues[idx] = txq; 2140 2141 priv->ppio_params.outqs_params.outqs_params[idx].size = desc; 2142 2143 return 0; 2144 } 2145 2146 /** 2147 * DPDK callback to release the transmit queue. 2148 * 2149 * @param txq 2150 * Generic transmit queue pointer. 2151 */ 2152 static void 2153 mrvl_tx_queue_release(void *txq) 2154 { 2155 struct mrvl_txq *q = txq; 2156 2157 if (!q) 2158 return; 2159 2160 rte_free(q); 2161 } 2162 2163 /** 2164 * DPDK callback to get flow control configuration. 2165 * 2166 * @param dev 2167 * Pointer to Ethernet device structure. 2168 * @param fc_conf 2169 * Pointer to the flow control configuration. 2170 * 2171 * @return 2172 * 0 on success, negative error value otherwise. 2173 */ 2174 static int 2175 mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2176 { 2177 struct mrvl_priv *priv = dev->data->dev_private; 2178 int ret, en; 2179 2180 if (!priv->ppio) { 2181 memcpy(fc_conf, &priv->fc_conf, sizeof(struct rte_eth_fc_conf)); 2182 return 0; 2183 } 2184 2185 fc_conf->autoneg = 1; 2186 ret = pp2_ppio_get_rx_pause(priv->ppio, &en); 2187 if (ret) { 2188 MRVL_LOG(ERR, "Failed to read rx pause state"); 2189 return ret; 2190 } 2191 2192 fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE; 2193 2194 ret = pp2_ppio_get_tx_pause(priv->ppio, &en); 2195 if (ret) { 2196 MRVL_LOG(ERR, "Failed to read tx pause state"); 2197 return ret; 2198 } 2199 2200 if (en) { 2201 if (fc_conf->mode == RTE_FC_NONE) 2202 fc_conf->mode = RTE_FC_TX_PAUSE; 2203 else 2204 fc_conf->mode = RTE_FC_FULL; 2205 } 2206 2207 return 0; 2208 } 2209 2210 /** 2211 * DPDK callback to set flow control configuration. 2212 * 2213 * @param dev 2214 * Pointer to Ethernet device structure. 2215 * @param fc_conf 2216 * Pointer to the flow control configuration. 2217 * 2218 * @return 2219 * 0 on success, negative error value otherwise. 2220 */ 2221 static int 2222 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2223 { 2224 struct mrvl_priv *priv = dev->data->dev_private; 2225 struct pp2_ppio_tx_pause_params mrvl_pause_params; 2226 int ret; 2227 int rx_en, tx_en; 2228 2229 if (fc_conf->high_water || 2230 fc_conf->low_water || 2231 fc_conf->pause_time || 2232 fc_conf->mac_ctrl_frame_fwd) { 2233 MRVL_LOG(ERR, "Flowctrl parameter is not supported"); 2234 2235 return -EINVAL; 2236 } 2237 2238 if (fc_conf->autoneg == 0) { 2239 MRVL_LOG(ERR, "Flowctrl Autoneg disable is not supported"); 2240 return -EINVAL; 2241 } 2242 2243 if (!priv->ppio) { 2244 memcpy(&priv->fc_conf, fc_conf, sizeof(struct rte_eth_fc_conf)); 2245 priv->flow_ctrl = 1; 2246 return 0; 2247 } 2248 2249 switch (fc_conf->mode) { 2250 case RTE_FC_FULL: 2251 rx_en = 1; 2252 tx_en = 1; 2253 break; 2254 case RTE_FC_TX_PAUSE: 2255 rx_en = 0; 2256 tx_en = 1; 2257 break; 2258 case RTE_FC_RX_PAUSE: 2259 rx_en = 1; 2260 tx_en = 0; 2261 break; 2262 case RTE_FC_NONE: 2263 rx_en = 0; 2264 tx_en = 0; 2265 break; 2266 default: 2267 MRVL_LOG(ERR, "Incorrect Flow control flag (%d)", 2268 fc_conf->mode); 2269 return -EINVAL; 2270 } 2271 2272 /* Set RX flow control */ 2273 ret = pp2_ppio_set_rx_pause(priv->ppio, rx_en); 2274 if (ret) { 2275 MRVL_LOG(ERR, "Failed to change RX flowctrl"); 2276 return ret; 2277 } 2278 2279 /* Set TX flow control */ 2280 mrvl_pause_params.en = tx_en; 2281 /* all inqs participate in xon/xoff decision */ 2282 mrvl_pause_params.use_tc_pause_inqs = 0; 2283 ret = pp2_ppio_set_tx_pause(priv->ppio, &mrvl_pause_params); 2284 if (ret) { 2285 MRVL_LOG(ERR, "Failed to change TX flowctrl"); 2286 return ret; 2287 } 2288 2289 return 0; 2290 } 2291 2292 /** 2293 * Update RSS hash configuration 2294 * 2295 * @param dev 2296 * Pointer to Ethernet device structure. 2297 * @param rss_conf 2298 * Pointer to RSS configuration. 2299 * 2300 * @return 2301 * 0 on success, negative error value otherwise. 2302 */ 2303 static int 2304 mrvl_rss_hash_update(struct rte_eth_dev *dev, 2305 struct rte_eth_rss_conf *rss_conf) 2306 { 2307 struct mrvl_priv *priv = dev->data->dev_private; 2308 2309 if (priv->isolated) 2310 return -ENOTSUP; 2311 2312 return mrvl_configure_rss(priv, rss_conf); 2313 } 2314 2315 /** 2316 * DPDK callback to get RSS hash configuration. 2317 * 2318 * @param dev 2319 * Pointer to Ethernet device structure. 2320 * @rss_conf 2321 * Pointer to RSS configuration. 2322 * 2323 * @return 2324 * Always 0. 2325 */ 2326 static int 2327 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev, 2328 struct rte_eth_rss_conf *rss_conf) 2329 { 2330 struct mrvl_priv *priv = dev->data->dev_private; 2331 enum pp2_ppio_hash_type hash_type = 2332 priv->ppio_params.inqs_params.hash_type; 2333 2334 rss_conf->rss_key = NULL; 2335 2336 if (hash_type == PP2_PPIO_HASH_T_NONE) 2337 rss_conf->rss_hf = 0; 2338 else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE) 2339 rss_conf->rss_hf = ETH_RSS_IPV4; 2340 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp) 2341 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP; 2342 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp) 2343 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP; 2344 2345 return 0; 2346 } 2347 2348 /** 2349 * DPDK callback to get rte_flow callbacks. 2350 * 2351 * @param dev 2352 * Pointer to the device structure. 2353 * @param ops 2354 * Pointer to pass the flow ops. 2355 * 2356 * @return 2357 * 0 on success, negative error value otherwise. 2358 */ 2359 static int 2360 mrvl_eth_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 2361 const struct rte_flow_ops **ops) 2362 { 2363 *ops = &mrvl_flow_ops; 2364 return 0; 2365 } 2366 2367 /** 2368 * DPDK callback to get rte_mtr callbacks. 2369 * 2370 * @param dev 2371 * Pointer to the device structure. 2372 * @param ops 2373 * Pointer to pass the mtr ops. 2374 * 2375 * @return 2376 * Always 0. 2377 */ 2378 static int 2379 mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops) 2380 { 2381 *(const void **)ops = &mrvl_mtr_ops; 2382 2383 return 0; 2384 } 2385 2386 /** 2387 * DPDK callback to get rte_tm callbacks. 2388 * 2389 * @param dev 2390 * Pointer to the device structure. 2391 * @param ops 2392 * Pointer to pass the tm ops. 2393 * 2394 * @return 2395 * Always 0. 2396 */ 2397 static int 2398 mrvl_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops) 2399 { 2400 *(const void **)ops = &mrvl_tm_ops; 2401 2402 return 0; 2403 } 2404 2405 static const struct eth_dev_ops mrvl_ops = { 2406 .dev_configure = mrvl_dev_configure, 2407 .dev_start = mrvl_dev_start, 2408 .dev_stop = mrvl_dev_stop, 2409 .dev_set_link_up = mrvl_dev_set_link_up, 2410 .dev_set_link_down = mrvl_dev_set_link_down, 2411 .dev_close = mrvl_dev_close, 2412 .link_update = mrvl_link_update, 2413 .promiscuous_enable = mrvl_promiscuous_enable, 2414 .allmulticast_enable = mrvl_allmulticast_enable, 2415 .promiscuous_disable = mrvl_promiscuous_disable, 2416 .allmulticast_disable = mrvl_allmulticast_disable, 2417 .mac_addr_remove = mrvl_mac_addr_remove, 2418 .mac_addr_add = mrvl_mac_addr_add, 2419 .mac_addr_set = mrvl_mac_addr_set, 2420 .mtu_set = mrvl_mtu_set, 2421 .stats_get = mrvl_stats_get, 2422 .stats_reset = mrvl_stats_reset, 2423 .xstats_get = mrvl_xstats_get, 2424 .xstats_reset = mrvl_xstats_reset, 2425 .xstats_get_names = mrvl_xstats_get_names, 2426 .dev_infos_get = mrvl_dev_infos_get, 2427 .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get, 2428 .rxq_info_get = mrvl_rxq_info_get, 2429 .txq_info_get = mrvl_txq_info_get, 2430 .vlan_filter_set = mrvl_vlan_filter_set, 2431 .vlan_offload_set = mrvl_vlan_offload_set, 2432 .tx_queue_start = mrvl_tx_queue_start, 2433 .tx_queue_stop = mrvl_tx_queue_stop, 2434 .rx_queue_setup = mrvl_rx_queue_setup, 2435 .rx_queue_release = mrvl_rx_queue_release, 2436 .tx_queue_setup = mrvl_tx_queue_setup, 2437 .tx_queue_release = mrvl_tx_queue_release, 2438 .flow_ctrl_get = mrvl_flow_ctrl_get, 2439 .flow_ctrl_set = mrvl_flow_ctrl_set, 2440 .rss_hash_update = mrvl_rss_hash_update, 2441 .rss_hash_conf_get = mrvl_rss_hash_conf_get, 2442 .flow_ops_get = mrvl_eth_flow_ops_get, 2443 .mtr_ops_get = mrvl_mtr_ops_get, 2444 .tm_ops_get = mrvl_tm_ops_get, 2445 }; 2446 2447 /** 2448 * Return packet type information and l3/l4 offsets. 2449 * 2450 * @param desc 2451 * Pointer to the received packet descriptor. 2452 * @param l3_offset 2453 * l3 packet offset. 2454 * @param l4_offset 2455 * l4 packet offset. 2456 * 2457 * @return 2458 * Packet type information. 2459 */ 2460 static inline uint64_t 2461 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, 2462 uint8_t *l3_offset, uint8_t *l4_offset) 2463 { 2464 enum pp2_inq_l3_type l3_type; 2465 enum pp2_inq_l4_type l4_type; 2466 enum pp2_inq_vlan_tag vlan_tag; 2467 uint64_t packet_type; 2468 2469 pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); 2470 pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); 2471 pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag); 2472 2473 packet_type = RTE_PTYPE_L2_ETHER; 2474 2475 switch (vlan_tag) { 2476 case PP2_INQ_VLAN_TAG_SINGLE: 2477 packet_type |= RTE_PTYPE_L2_ETHER_VLAN; 2478 break; 2479 case PP2_INQ_VLAN_TAG_DOUBLE: 2480 case PP2_INQ_VLAN_TAG_TRIPLE: 2481 packet_type |= RTE_PTYPE_L2_ETHER_QINQ; 2482 break; 2483 default: 2484 break; 2485 } 2486 2487 switch (l3_type) { 2488 case PP2_INQ_L3_TYPE_IPV4_NO_OPTS: 2489 packet_type |= RTE_PTYPE_L3_IPV4; 2490 break; 2491 case PP2_INQ_L3_TYPE_IPV4_OK: 2492 packet_type |= RTE_PTYPE_L3_IPV4_EXT; 2493 break; 2494 case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO: 2495 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; 2496 break; 2497 case PP2_INQ_L3_TYPE_IPV6_NO_EXT: 2498 packet_type |= RTE_PTYPE_L3_IPV6; 2499 break; 2500 case PP2_INQ_L3_TYPE_IPV6_EXT: 2501 packet_type |= RTE_PTYPE_L3_IPV6_EXT; 2502 break; 2503 case PP2_INQ_L3_TYPE_ARP: 2504 packet_type |= RTE_PTYPE_L2_ETHER_ARP; 2505 /* 2506 * In case of ARP l4_offset is set to wrong value. 2507 * Set it to proper one so that later on mbuf->l3_len can be 2508 * calculated subtracting l4_offset and l3_offset. 2509 */ 2510 *l4_offset = *l3_offset + MRVL_ARP_LENGTH; 2511 break; 2512 default: 2513 break; 2514 } 2515 2516 switch (l4_type) { 2517 case PP2_INQ_L4_TYPE_TCP: 2518 packet_type |= RTE_PTYPE_L4_TCP; 2519 break; 2520 case PP2_INQ_L4_TYPE_UDP: 2521 packet_type |= RTE_PTYPE_L4_UDP; 2522 break; 2523 default: 2524 break; 2525 } 2526 2527 return packet_type; 2528 } 2529 2530 /** 2531 * Get offload information from the received packet descriptor. 2532 * 2533 * @param desc 2534 * Pointer to the received packet descriptor. 2535 * 2536 * @return 2537 * Mbuf offload flags. 2538 */ 2539 static inline uint64_t 2540 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc, uint64_t packet_type) 2541 { 2542 uint64_t flags = 0; 2543 enum pp2_inq_desc_status status; 2544 2545 if (RTE_ETH_IS_IPV4_HDR(packet_type)) { 2546 status = pp2_ppio_inq_desc_get_l3_pkt_error(desc); 2547 if (unlikely(status != PP2_DESC_ERR_OK)) 2548 flags |= PKT_RX_IP_CKSUM_BAD; 2549 else 2550 flags |= PKT_RX_IP_CKSUM_GOOD; 2551 } 2552 2553 if (((packet_type & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) || 2554 ((packet_type & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP)) { 2555 status = pp2_ppio_inq_desc_get_l4_pkt_error(desc); 2556 if (unlikely(status != PP2_DESC_ERR_OK)) 2557 flags |= PKT_RX_L4_CKSUM_BAD; 2558 else 2559 flags |= PKT_RX_L4_CKSUM_GOOD; 2560 } 2561 2562 return flags; 2563 } 2564 2565 /** 2566 * DPDK callback for receive. 2567 * 2568 * @param rxq 2569 * Generic pointer to the receive queue. 2570 * @param rx_pkts 2571 * Array to store received packets. 2572 * @param nb_pkts 2573 * Maximum number of packets in array. 2574 * 2575 * @return 2576 * Number of packets successfully received. 2577 */ 2578 static uint16_t 2579 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 2580 { 2581 struct mrvl_rxq *q = rxq; 2582 struct pp2_ppio_desc descs[nb_pkts]; 2583 struct pp2_bpool *bpool; 2584 int i, ret, rx_done = 0; 2585 int num; 2586 struct pp2_hif *hif; 2587 unsigned int core_id = rte_lcore_id(); 2588 2589 hif = mrvl_get_hif(q->priv, core_id); 2590 2591 if (unlikely(!q->priv->ppio || !hif)) 2592 return 0; 2593 2594 bpool = q->priv->bpool; 2595 2596 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc, 2597 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts); 2598 if (unlikely(ret < 0)) 2599 return 0; 2600 2601 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts; 2602 2603 for (i = 0; i < nb_pkts; i++) { 2604 struct rte_mbuf *mbuf; 2605 uint8_t l3_offset, l4_offset; 2606 enum pp2_inq_desc_status status; 2607 uint64_t addr; 2608 2609 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { 2610 struct pp2_ppio_desc *pref_desc; 2611 u64 pref_addr; 2612 2613 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT]; 2614 pref_addr = cookie_addr_high | 2615 pp2_ppio_inq_desc_get_cookie(pref_desc); 2616 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr)); 2617 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr)); 2618 } 2619 2620 addr = cookie_addr_high | 2621 pp2_ppio_inq_desc_get_cookie(&descs[i]); 2622 mbuf = (struct rte_mbuf *)addr; 2623 rte_pktmbuf_reset(mbuf); 2624 2625 /* drop packet in case of mac, overrun or resource error */ 2626 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]); 2627 if ((unlikely(status != PP2_DESC_ERR_OK)) && 2628 !(q->priv->forward_bad_frames)) { 2629 struct pp2_buff_inf binf = { 2630 .addr = rte_mbuf_data_iova_default(mbuf), 2631 .cookie = (uint64_t)mbuf, 2632 }; 2633 2634 pp2_bpool_put_buff(hif, bpool, &binf); 2635 mrvl_port_bpool_size 2636 [bpool->pp2_id][bpool->id][core_id]++; 2637 q->drop_mac++; 2638 continue; 2639 } 2640 2641 mbuf->data_off += MRVL_PKT_EFFEC_OFFS; 2642 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]); 2643 mbuf->data_len = mbuf->pkt_len; 2644 mbuf->port = q->port_id; 2645 mbuf->packet_type = 2646 mrvl_desc_to_packet_type_and_offset(&descs[i], 2647 &l3_offset, 2648 &l4_offset); 2649 mbuf->l2_len = l3_offset; 2650 mbuf->l3_len = l4_offset - l3_offset; 2651 2652 if (likely(q->cksum_enabled)) 2653 mbuf->ol_flags = 2654 mrvl_desc_to_ol_flags(&descs[i], 2655 mbuf->packet_type); 2656 2657 rx_pkts[rx_done++] = mbuf; 2658 q->bytes_recv += mbuf->pkt_len; 2659 } 2660 2661 if (rte_spinlock_trylock(&q->priv->lock) == 1) { 2662 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id); 2663 2664 if (unlikely(num <= q->priv->bpool_min_size || 2665 (!rx_done && num < q->priv->bpool_init_size))) { 2666 mrvl_fill_bpool(q, q->priv->fill_bpool_buffs); 2667 } else if (unlikely(num > q->priv->bpool_max_size)) { 2668 int i; 2669 int pkt_to_remove = num - q->priv->bpool_init_size; 2670 struct rte_mbuf *mbuf; 2671 struct pp2_buff_inf buff; 2672 2673 for (i = 0; i < pkt_to_remove; i++) { 2674 ret = pp2_bpool_get_buff(hif, bpool, &buff); 2675 if (ret) 2676 break; 2677 mbuf = (struct rte_mbuf *) 2678 (cookie_addr_high | buff.cookie); 2679 rte_pktmbuf_free(mbuf); 2680 } 2681 mrvl_port_bpool_size 2682 [bpool->pp2_id][bpool->id][core_id] -= i; 2683 } 2684 rte_spinlock_unlock(&q->priv->lock); 2685 } 2686 2687 return rx_done; 2688 } 2689 2690 /** 2691 * Prepare offload information. 2692 * 2693 * @param ol_flags 2694 * Offload flags. 2695 * @param l3_type 2696 * Pointer to the pp2_ouq_l3_type structure. 2697 * @param l4_type 2698 * Pointer to the pp2_outq_l4_type structure. 2699 * @param gen_l3_cksum 2700 * Will be set to 1 in case l3 checksum is computed. 2701 * @param l4_cksum 2702 * Will be set to 1 in case l4 checksum is computed. 2703 */ 2704 static inline void 2705 mrvl_prepare_proto_info(uint64_t ol_flags, 2706 enum pp2_outq_l3_type *l3_type, 2707 enum pp2_outq_l4_type *l4_type, 2708 int *gen_l3_cksum, 2709 int *gen_l4_cksum) 2710 { 2711 /* 2712 * Based on ol_flags prepare information 2713 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor 2714 * for offloading. 2715 * in most of the checksum cases ipv4 must be set, so this is the 2716 * default value 2717 */ 2718 *l3_type = PP2_OUTQ_L3_TYPE_IPV4; 2719 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; 2720 2721 if (ol_flags & PKT_TX_IPV6) { 2722 *l3_type = PP2_OUTQ_L3_TYPE_IPV6; 2723 /* no checksum for ipv6 header */ 2724 *gen_l3_cksum = 0; 2725 } 2726 2727 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) { 2728 *l4_type = PP2_OUTQ_L4_TYPE_TCP; 2729 *gen_l4_cksum = 1; 2730 } else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) { 2731 *l4_type = PP2_OUTQ_L4_TYPE_UDP; 2732 *gen_l4_cksum = 1; 2733 } else { 2734 *l4_type = PP2_OUTQ_L4_TYPE_OTHER; 2735 /* no checksum for other type */ 2736 *gen_l4_cksum = 0; 2737 } 2738 } 2739 2740 /** 2741 * Release already sent buffers to bpool (buffer-pool). 2742 * 2743 * @param ppio 2744 * Pointer to the port structure. 2745 * @param hif 2746 * Pointer to the MUSDK hardware interface. 2747 * @param sq 2748 * Pointer to the shadow queue. 2749 * @param qid 2750 * Queue id number. 2751 * @param force 2752 * Force releasing packets. 2753 */ 2754 static inline void 2755 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif, 2756 unsigned int core_id, struct mrvl_shadow_txq *sq, 2757 int qid, int force) 2758 { 2759 struct buff_release_entry *entry; 2760 uint16_t nb_done = 0, num = 0, skip_bufs = 0; 2761 int i; 2762 2763 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done); 2764 2765 sq->num_to_release += nb_done; 2766 2767 if (likely(!force && 2768 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE)) 2769 return; 2770 2771 nb_done = sq->num_to_release; 2772 sq->num_to_release = 0; 2773 2774 for (i = 0; i < nb_done; i++) { 2775 entry = &sq->ent[sq->tail + num]; 2776 if (unlikely(!entry->buff.addr)) { 2777 MRVL_LOG(ERR, 2778 "Shadow memory @%d: cookie(%lx), pa(%lx)!", 2779 sq->tail, (u64)entry->buff.cookie, 2780 (u64)entry->buff.addr); 2781 skip_bufs = 1; 2782 goto skip; 2783 } 2784 2785 if (unlikely(!entry->bpool)) { 2786 struct rte_mbuf *mbuf; 2787 2788 mbuf = (struct rte_mbuf *)entry->buff.cookie; 2789 rte_pktmbuf_free(mbuf); 2790 skip_bufs = 1; 2791 goto skip; 2792 } 2793 2794 mrvl_port_bpool_size 2795 [entry->bpool->pp2_id][entry->bpool->id][core_id]++; 2796 num++; 2797 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE)) 2798 goto skip; 2799 continue; 2800 skip: 2801 if (likely(num)) 2802 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); 2803 num += skip_bufs; 2804 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; 2805 sq->size -= num; 2806 num = 0; 2807 skip_bufs = 0; 2808 } 2809 2810 if (likely(num)) { 2811 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); 2812 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; 2813 sq->size -= num; 2814 } 2815 } 2816 2817 /** 2818 * DPDK callback for transmit. 2819 * 2820 * @param txq 2821 * Generic pointer transmit queue. 2822 * @param tx_pkts 2823 * Packets to transmit. 2824 * @param nb_pkts 2825 * Number of packets in array. 2826 * 2827 * @return 2828 * Number of packets successfully transmitted. 2829 */ 2830 static uint16_t 2831 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 2832 { 2833 struct mrvl_txq *q = txq; 2834 struct mrvl_shadow_txq *sq; 2835 struct pp2_hif *hif; 2836 struct pp2_ppio_desc descs[nb_pkts]; 2837 unsigned int core_id = rte_lcore_id(); 2838 int i, bytes_sent = 0; 2839 uint16_t num, sq_free_size; 2840 uint64_t addr; 2841 2842 hif = mrvl_get_hif(q->priv, core_id); 2843 sq = &q->shadow_txqs[core_id]; 2844 2845 if (unlikely(!q->priv->ppio || !hif)) 2846 return 0; 2847 2848 if (sq->size) 2849 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, 2850 sq, q->queue_id, 0); 2851 2852 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; 2853 if (unlikely(nb_pkts > sq_free_size)) 2854 nb_pkts = sq_free_size; 2855 2856 for (i = 0; i < nb_pkts; i++) { 2857 struct rte_mbuf *mbuf = tx_pkts[i]; 2858 int gen_l3_cksum, gen_l4_cksum; 2859 enum pp2_outq_l3_type l3_type; 2860 enum pp2_outq_l4_type l4_type; 2861 2862 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { 2863 struct rte_mbuf *pref_pkt_hdr; 2864 2865 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; 2866 rte_mbuf_prefetch_part1(pref_pkt_hdr); 2867 rte_mbuf_prefetch_part2(pref_pkt_hdr); 2868 } 2869 2870 mrvl_fill_shadowq(sq, mbuf); 2871 mrvl_fill_desc(&descs[i], mbuf); 2872 2873 bytes_sent += rte_pktmbuf_pkt_len(mbuf); 2874 /* 2875 * in case unsupported ol_flags were passed 2876 * do not update descriptor offload information 2877 */ 2878 if (!(mbuf->ol_flags & MRVL_TX_PKT_OFFLOADS)) 2879 continue; 2880 mrvl_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type, 2881 &gen_l3_cksum, &gen_l4_cksum); 2882 2883 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, 2884 mbuf->l2_len, 2885 mbuf->l2_len + mbuf->l3_len, 2886 gen_l3_cksum, gen_l4_cksum); 2887 } 2888 2889 num = nb_pkts; 2890 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts); 2891 /* number of packets that were not sent */ 2892 if (unlikely(num > nb_pkts)) { 2893 for (i = nb_pkts; i < num; i++) { 2894 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & 2895 MRVL_PP2_TX_SHADOWQ_MASK; 2896 addr = sq->ent[sq->head].buff.cookie; 2897 bytes_sent -= 2898 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr); 2899 } 2900 sq->size -= num - nb_pkts; 2901 } 2902 2903 q->bytes_sent += bytes_sent; 2904 2905 return nb_pkts; 2906 } 2907 2908 /** DPDK callback for S/G transmit. 2909 * 2910 * @param txq 2911 * Generic pointer transmit queue. 2912 * @param tx_pkts 2913 * Packets to transmit. 2914 * @param nb_pkts 2915 * Number of packets in array. 2916 * 2917 * @return 2918 * Number of packets successfully transmitted. 2919 */ 2920 static uint16_t 2921 mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, 2922 uint16_t nb_pkts) 2923 { 2924 struct mrvl_txq *q = txq; 2925 struct mrvl_shadow_txq *sq; 2926 struct pp2_hif *hif; 2927 struct pp2_ppio_desc descs[nb_pkts * PP2_PPIO_DESC_NUM_FRAGS]; 2928 struct pp2_ppio_sg_pkts pkts; 2929 uint8_t frags[nb_pkts]; 2930 unsigned int core_id = rte_lcore_id(); 2931 int i, j, bytes_sent = 0; 2932 int tail, tail_first; 2933 uint16_t num, sq_free_size; 2934 uint16_t nb_segs, total_descs = 0; 2935 uint64_t addr; 2936 2937 hif = mrvl_get_hif(q->priv, core_id); 2938 sq = &q->shadow_txqs[core_id]; 2939 pkts.frags = frags; 2940 pkts.num = 0; 2941 2942 if (unlikely(!q->priv->ppio || !hif)) 2943 return 0; 2944 2945 if (sq->size) 2946 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, 2947 sq, q->queue_id, 0); 2948 2949 /* Save shadow queue free size */ 2950 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; 2951 2952 tail = 0; 2953 for (i = 0; i < nb_pkts; i++) { 2954 struct rte_mbuf *mbuf = tx_pkts[i]; 2955 struct rte_mbuf *seg = NULL; 2956 int gen_l3_cksum, gen_l4_cksum; 2957 enum pp2_outq_l3_type l3_type; 2958 enum pp2_outq_l4_type l4_type; 2959 2960 nb_segs = mbuf->nb_segs; 2961 tail_first = tail; 2962 total_descs += nb_segs; 2963 2964 /* 2965 * Check if total_descs does not exceed 2966 * shadow queue free size 2967 */ 2968 if (unlikely(total_descs > sq_free_size)) { 2969 total_descs -= nb_segs; 2970 break; 2971 } 2972 2973 /* Check if nb_segs does not exceed the max nb of desc per 2974 * fragmented packet 2975 */ 2976 if (nb_segs > PP2_PPIO_DESC_NUM_FRAGS) { 2977 total_descs -= nb_segs; 2978 RTE_LOG(ERR, PMD, 2979 "Too many segments. Packet won't be sent.\n"); 2980 break; 2981 } 2982 2983 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { 2984 struct rte_mbuf *pref_pkt_hdr; 2985 2986 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; 2987 rte_mbuf_prefetch_part1(pref_pkt_hdr); 2988 rte_mbuf_prefetch_part2(pref_pkt_hdr); 2989 } 2990 2991 pkts.frags[pkts.num] = nb_segs; 2992 pkts.num++; 2993 2994 seg = mbuf; 2995 for (j = 0; j < nb_segs - 1; j++) { 2996 /* For the subsequent segments, set shadow queue 2997 * buffer to NULL 2998 */ 2999 mrvl_fill_shadowq(sq, NULL); 3000 mrvl_fill_desc(&descs[tail], seg); 3001 3002 tail++; 3003 seg = seg->next; 3004 } 3005 /* Put first mbuf info in last shadow queue entry */ 3006 mrvl_fill_shadowq(sq, mbuf); 3007 /* Update descriptor with last segment */ 3008 mrvl_fill_desc(&descs[tail++], seg); 3009 3010 bytes_sent += rte_pktmbuf_pkt_len(mbuf); 3011 /* In case unsupported ol_flags were passed 3012 * do not update descriptor offload information 3013 */ 3014 if (!(mbuf->ol_flags & MRVL_TX_PKT_OFFLOADS)) 3015 continue; 3016 mrvl_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type, 3017 &gen_l3_cksum, &gen_l4_cksum); 3018 3019 pp2_ppio_outq_desc_set_proto_info(&descs[tail_first], l3_type, 3020 l4_type, mbuf->l2_len, 3021 mbuf->l2_len + mbuf->l3_len, 3022 gen_l3_cksum, gen_l4_cksum); 3023 } 3024 3025 num = total_descs; 3026 pp2_ppio_send_sg(q->priv->ppio, hif, q->queue_id, descs, 3027 &total_descs, &pkts); 3028 /* number of packets that were not sent */ 3029 if (unlikely(num > total_descs)) { 3030 for (i = total_descs; i < num; i++) { 3031 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & 3032 MRVL_PP2_TX_SHADOWQ_MASK; 3033 3034 addr = sq->ent[sq->head].buff.cookie; 3035 if (addr) 3036 bytes_sent -= 3037 rte_pktmbuf_pkt_len((struct rte_mbuf *) 3038 (cookie_addr_high | addr)); 3039 } 3040 sq->size -= num - total_descs; 3041 nb_pkts = pkts.num; 3042 } 3043 3044 q->bytes_sent += bytes_sent; 3045 3046 return nb_pkts; 3047 } 3048 3049 /** 3050 * Create private device structure. 3051 * 3052 * @param dev_name 3053 * Pointer to the port name passed in the initialization parameters. 3054 * 3055 * @return 3056 * Pointer to the newly allocated private device structure. 3057 */ 3058 static struct mrvl_priv * 3059 mrvl_priv_create(const char *dev_name) 3060 { 3061 struct pp2_bpool_params bpool_params; 3062 char match[MRVL_MATCH_LEN]; 3063 struct mrvl_priv *priv; 3064 uint16_t max_frame_size; 3065 int ret, bpool_bit; 3066 3067 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id()); 3068 if (!priv) 3069 return NULL; 3070 3071 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name, 3072 &priv->pp_id, &priv->ppio_id); 3073 if (ret) 3074 goto out_free_priv; 3075 3076 ret = pp2_ppio_get_l4_cksum_max_frame_size(priv->pp_id, priv->ppio_id, 3077 &max_frame_size); 3078 if (ret) 3079 goto out_free_priv; 3080 3081 priv->max_mtu = max_frame_size + RTE_ETHER_CRC_LEN - 3082 MRVL_PP2_ETH_HDRS_LEN; 3083 3084 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id], 3085 PP2_BPOOL_NUM_POOLS); 3086 if (bpool_bit < 0) 3087 goto out_free_priv; 3088 priv->bpool_bit = bpool_bit; 3089 3090 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id, 3091 priv->bpool_bit); 3092 memset(&bpool_params, 0, sizeof(bpool_params)); 3093 bpool_params.match = match; 3094 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS; 3095 ret = pp2_bpool_init(&bpool_params, &priv->bpool); 3096 if (ret) 3097 goto out_clear_bpool_bit; 3098 3099 priv->ppio_params.type = PP2_PPIO_T_NIC; 3100 rte_spinlock_init(&priv->lock); 3101 3102 return priv; 3103 out_clear_bpool_bit: 3104 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); 3105 out_free_priv: 3106 rte_free(priv); 3107 return NULL; 3108 } 3109 3110 /** 3111 * Create device representing Ethernet port. 3112 * 3113 * @param name 3114 * Pointer to the port's name. 3115 * 3116 * @return 3117 * 0 on success, negative error value otherwise. 3118 */ 3119 static int 3120 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name) 3121 { 3122 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0); 3123 struct rte_eth_dev *eth_dev; 3124 struct mrvl_priv *priv; 3125 struct ifreq req; 3126 3127 eth_dev = rte_eth_dev_allocate(name); 3128 if (!eth_dev) 3129 return -ENOMEM; 3130 3131 priv = mrvl_priv_create(name); 3132 if (!priv) { 3133 ret = -ENOMEM; 3134 goto out_free; 3135 } 3136 eth_dev->data->dev_private = priv; 3137 3138 eth_dev->data->mac_addrs = 3139 rte_zmalloc("mac_addrs", 3140 RTE_ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0); 3141 if (!eth_dev->data->mac_addrs) { 3142 MRVL_LOG(ERR, "Failed to allocate space for eth addrs"); 3143 ret = -ENOMEM; 3144 goto out_free; 3145 } 3146 3147 memset(&req, 0, sizeof(req)); 3148 strcpy(req.ifr_name, name); 3149 ret = ioctl(fd, SIOCGIFHWADDR, &req); 3150 if (ret) 3151 goto out_free; 3152 3153 memcpy(eth_dev->data->mac_addrs[0].addr_bytes, 3154 req.ifr_addr.sa_data, RTE_ETHER_ADDR_LEN); 3155 3156 eth_dev->device = &vdev->device; 3157 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst; 3158 mrvl_set_tx_function(eth_dev); 3159 eth_dev->dev_ops = &mrvl_ops; 3160 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 3161 3162 eth_dev->data->dev_link.link_status = ETH_LINK_UP; 3163 3164 rte_eth_dev_probing_finish(eth_dev); 3165 return 0; 3166 out_free: 3167 rte_eth_dev_release_port(eth_dev); 3168 3169 return ret; 3170 } 3171 3172 /** 3173 * Callback used by rte_kvargs_process() during argument parsing. 3174 * 3175 * @param key 3176 * Pointer to the parsed key (unused). 3177 * @param value 3178 * Pointer to the parsed value. 3179 * @param extra_args 3180 * Pointer to the extra arguments which contains address of the 3181 * table of pointers to parsed interface names. 3182 * 3183 * @return 3184 * Always 0. 3185 */ 3186 static int 3187 mrvl_get_ifnames(const char *key __rte_unused, const char *value, 3188 void *extra_args) 3189 { 3190 struct mrvl_ifnames *ifnames = extra_args; 3191 3192 ifnames->names[ifnames->idx++] = value; 3193 3194 return 0; 3195 } 3196 3197 /** 3198 * DPDK callback to register the virtual device. 3199 * 3200 * @param vdev 3201 * Pointer to the virtual device. 3202 * 3203 * @return 3204 * 0 on success, negative error value otherwise. 3205 */ 3206 static int 3207 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev) 3208 { 3209 struct rte_kvargs *kvlist; 3210 struct mrvl_ifnames ifnames; 3211 int ret = -EINVAL; 3212 uint32_t i, ifnum, cfgnum; 3213 const char *params; 3214 3215 params = rte_vdev_device_args(vdev); 3216 if (!params) 3217 return -EINVAL; 3218 3219 kvlist = rte_kvargs_parse(params, valid_args); 3220 if (!kvlist) 3221 return -EINVAL; 3222 3223 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG); 3224 if (ifnum > RTE_DIM(ifnames.names)) 3225 goto out_free_kvlist; 3226 3227 ifnames.idx = 0; 3228 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG, 3229 mrvl_get_ifnames, &ifnames); 3230 3231 3232 /* 3233 * The below system initialization should be done only once, 3234 * on the first provided configuration file 3235 */ 3236 if (!mrvl_cfg) { 3237 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG); 3238 MRVL_LOG(INFO, "Parsing config file!"); 3239 if (cfgnum > 1) { 3240 MRVL_LOG(ERR, "Cannot handle more than one config file!"); 3241 goto out_free_kvlist; 3242 } else if (cfgnum == 1) { 3243 rte_kvargs_process(kvlist, MRVL_CFG_ARG, 3244 mrvl_get_cfg, &mrvl_cfg); 3245 } 3246 } 3247 3248 if (mrvl_dev_num) 3249 goto init_devices; 3250 3251 MRVL_LOG(INFO, "Perform MUSDK initializations"); 3252 3253 ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist); 3254 if (ret) 3255 goto out_free_kvlist; 3256 3257 ret = mrvl_init_pp2(); 3258 if (ret) { 3259 MRVL_LOG(ERR, "Failed to init PP!"); 3260 rte_mvep_deinit(MVEP_MOD_T_PP2); 3261 goto out_free_kvlist; 3262 } 3263 3264 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size)); 3265 memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup)); 3266 3267 mrvl_lcore_first = RTE_MAX_LCORE; 3268 mrvl_lcore_last = 0; 3269 3270 init_devices: 3271 for (i = 0; i < ifnum; i++) { 3272 MRVL_LOG(INFO, "Creating %s", ifnames.names[i]); 3273 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]); 3274 if (ret) 3275 goto out_cleanup; 3276 mrvl_dev_num++; 3277 } 3278 3279 rte_kvargs_free(kvlist); 3280 3281 return 0; 3282 out_cleanup: 3283 rte_pmd_mrvl_remove(vdev); 3284 3285 out_free_kvlist: 3286 rte_kvargs_free(kvlist); 3287 3288 return ret; 3289 } 3290 3291 /** 3292 * DPDK callback to remove virtual device. 3293 * 3294 * @param vdev 3295 * Pointer to the removed virtual device. 3296 * 3297 * @return 3298 * 0 on success, negative error value otherwise. 3299 */ 3300 static int 3301 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev) 3302 { 3303 uint16_t port_id; 3304 int ret = 0; 3305 3306 RTE_ETH_FOREACH_DEV(port_id) { 3307 if (rte_eth_devices[port_id].device != &vdev->device) 3308 continue; 3309 ret |= rte_eth_dev_close(port_id); 3310 } 3311 3312 return ret == 0 ? 0 : -EIO; 3313 } 3314 3315 static struct rte_vdev_driver pmd_mrvl_drv = { 3316 .probe = rte_pmd_mrvl_probe, 3317 .remove = rte_pmd_mrvl_remove, 3318 }; 3319 3320 RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv); 3321 RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2); 3322 RTE_LOG_REGISTER_DEFAULT(mrvl_logtype, NOTICE); 3323