1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017-2021 Marvell International Ltd. 3 * Copyright(c) 2017-2021 Semihalf. 4 * All rights reserved. 5 */ 6 7 #include <rte_string_fns.h> 8 #include <ethdev_driver.h> 9 #include <rte_kvargs.h> 10 #include <rte_log.h> 11 #include <rte_malloc.h> 12 #include <rte_bus_vdev.h> 13 14 #include <fcntl.h> 15 #include <linux/ethtool.h> 16 #include <linux/sockios.h> 17 #include <net/if.h> 18 #include <net/if_arp.h> 19 #include <sys/ioctl.h> 20 #include <sys/socket.h> 21 #include <sys/stat.h> 22 #include <sys/types.h> 23 24 #include <rte_mvep_common.h> 25 #include "mrvl_ethdev.h" 26 #include "mrvl_qos.h" 27 #include "mrvl_flow.h" 28 #include "mrvl_mtr.h" 29 #include "mrvl_tm.h" 30 31 /* bitmask with reserved hifs */ 32 #define MRVL_MUSDK_HIFS_RESERVED 0x0F 33 /* bitmask with reserved bpools */ 34 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07 35 /* bitmask with reserved kernel RSS tables */ 36 #define MRVL_MUSDK_RSS_RESERVED 0x0F 37 /* maximum number of available hifs */ 38 #define MRVL_MUSDK_HIFS_MAX 9 39 40 /* prefetch shift */ 41 #define MRVL_MUSDK_PREFETCH_SHIFT 2 42 43 /* TCAM has 25 entries reserved for uc/mc filter entries 44 * + 1 for primary mac address 45 */ 46 #define MRVL_MAC_ADDRS_MAX (1 + 25) 47 #define MRVL_MATCH_LEN 16 48 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE) 49 /* Maximum allowable packet size */ 50 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE) 51 52 #define MRVL_IFACE_NAME_ARG "iface" 53 #define MRVL_CFG_ARG "cfg" 54 55 #define MRVL_ARP_LENGTH 28 56 57 #define MRVL_COOKIE_ADDR_INVALID ~0ULL 58 #define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000 59 60 /** Port Rx offload capabilities */ 61 #define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \ 62 DEV_RX_OFFLOAD_JUMBO_FRAME | \ 63 DEV_RX_OFFLOAD_CHECKSUM) 64 65 /** Port Tx offloads capabilities */ 66 #define MRVL_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \ 67 DEV_TX_OFFLOAD_UDP_CKSUM | \ 68 DEV_TX_OFFLOAD_TCP_CKSUM) 69 #define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \ 70 DEV_TX_OFFLOAD_MULTI_SEGS) 71 72 #define MRVL_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \ 73 PKT_TX_TCP_CKSUM | \ 74 PKT_TX_UDP_CKSUM) 75 76 static const char * const valid_args[] = { 77 MRVL_IFACE_NAME_ARG, 78 MRVL_CFG_ARG, 79 NULL 80 }; 81 82 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED; 83 static struct pp2_hif *hifs[RTE_MAX_LCORE]; 84 static int used_bpools[PP2_NUM_PKT_PROC] = { 85 [0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED 86 }; 87 88 static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS]; 89 static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE]; 90 static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID; 91 static int dummy_pool_id[PP2_NUM_PKT_PROC]; 92 struct pp2_bpool *dummy_pool[PP2_NUM_PKT_PROC] = {0}; 93 94 struct mrvl_ifnames { 95 const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC]; 96 int idx; 97 }; 98 99 /* 100 * To use buffer harvesting based on loopback port shadow queue structure 101 * was introduced for buffers information bookkeeping. 102 * 103 * Before sending the packet, related buffer information (pp2_buff_inf) is 104 * stored in shadow queue. After packet is transmitted no longer used 105 * packet buffer is released back to it's original hardware pool, 106 * on condition it originated from interface. 107 * In case it was generated by application itself i.e: mbuf->port field is 108 * 0xff then its released to software mempool. 109 */ 110 struct mrvl_shadow_txq { 111 int head; /* write index - used when sending buffers */ 112 int tail; /* read index - used when releasing buffers */ 113 u16 size; /* queue occupied size */ 114 u16 num_to_release; /* number of descriptors sent, that can be 115 * released 116 */ 117 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */ 118 }; 119 120 struct mrvl_rxq { 121 struct mrvl_priv *priv; 122 struct rte_mempool *mp; 123 int queue_id; 124 int port_id; 125 int cksum_enabled; 126 uint64_t bytes_recv; 127 uint64_t drop_mac; 128 }; 129 130 struct mrvl_txq { 131 struct mrvl_priv *priv; 132 int queue_id; 133 int port_id; 134 uint64_t bytes_sent; 135 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE]; 136 int tx_deferred_start; 137 }; 138 139 static int mrvl_lcore_first; 140 static int mrvl_lcore_last; 141 static int mrvl_dev_num; 142 143 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num); 144 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio, 145 struct pp2_hif *hif, unsigned int core_id, 146 struct mrvl_shadow_txq *sq, int qid, int force); 147 148 static uint16_t mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, 149 uint16_t nb_pkts); 150 static uint16_t mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, 151 uint16_t nb_pkts); 152 static int rte_pmd_mrvl_remove(struct rte_vdev_device *vdev); 153 static void mrvl_deinit_pp2(void); 154 static void mrvl_deinit_hifs(void); 155 156 static int 157 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 158 uint32_t index, uint32_t vmdq __rte_unused); 159 static int 160 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr); 161 static int 162 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); 163 static int mrvl_promiscuous_enable(struct rte_eth_dev *dev); 164 static int mrvl_allmulticast_enable(struct rte_eth_dev *dev); 165 static int 166 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 167 168 #define MRVL_XSTATS_TBL_ENTRY(name) { \ 169 #name, offsetof(struct pp2_ppio_statistics, name), \ 170 sizeof(((struct pp2_ppio_statistics *)0)->name) \ 171 } 172 173 /* Table with xstats data */ 174 static struct { 175 const char *name; 176 unsigned int offset; 177 unsigned int size; 178 } mrvl_xstats_tbl[] = { 179 MRVL_XSTATS_TBL_ENTRY(rx_bytes), 180 MRVL_XSTATS_TBL_ENTRY(rx_packets), 181 MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets), 182 MRVL_XSTATS_TBL_ENTRY(rx_errors), 183 MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped), 184 MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped), 185 MRVL_XSTATS_TBL_ENTRY(rx_early_dropped), 186 MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped), 187 MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped), 188 MRVL_XSTATS_TBL_ENTRY(tx_bytes), 189 MRVL_XSTATS_TBL_ENTRY(tx_packets), 190 MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets), 191 MRVL_XSTATS_TBL_ENTRY(tx_errors) 192 }; 193 194 static inline int 195 mrvl_reserve_bit(int *bitmap, int max) 196 { 197 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap); 198 199 if (n >= max) 200 return -1; 201 202 *bitmap |= 1 << n; 203 204 return n; 205 } 206 207 static int 208 mrvl_pp2_fixup_init(void) 209 { 210 struct pp2_bpool_params bpool_params; 211 char name[15]; 212 int err, i; 213 214 memset(dummy_pool, 0, sizeof(dummy_pool)); 215 for (i = 0; i < pp2_get_num_inst(); i++) { 216 dummy_pool_id[i] = mrvl_reserve_bit(&used_bpools[i], 217 PP2_BPOOL_NUM_POOLS); 218 if (dummy_pool_id[i] < 0) { 219 MRVL_LOG(ERR, "Can't find free pool\n"); 220 return -1; 221 } 222 223 memset(name, 0, sizeof(name)); 224 snprintf(name, sizeof(name), "pool-%d:%d", i, dummy_pool_id[i]); 225 memset(&bpool_params, 0, sizeof(bpool_params)); 226 bpool_params.match = name; 227 bpool_params.buff_len = MRVL_PKT_OFFS; 228 bpool_params.dummy_short_pool = 1; 229 err = pp2_bpool_init(&bpool_params, &dummy_pool[i]); 230 if (err != 0 || !dummy_pool[i]) { 231 MRVL_LOG(ERR, "BPool init failed!\n"); 232 used_bpools[i] &= ~(1 << dummy_pool_id[i]); 233 return -1; 234 } 235 } 236 237 return 0; 238 } 239 240 /** 241 * Initialize packet processor. 242 * 243 * @return 244 * 0 on success, negative error value otherwise. 245 */ 246 static int 247 mrvl_init_pp2(void) 248 { 249 struct pp2_init_params init_params; 250 int err; 251 252 memset(&init_params, 0, sizeof(init_params)); 253 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED; 254 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED; 255 init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED; 256 if (mrvl_cfg && mrvl_cfg->pp2_cfg.prs_udfs.num_udfs) 257 memcpy(&init_params.prs_udfs, &mrvl_cfg->pp2_cfg.prs_udfs, 258 sizeof(struct pp2_parse_udfs)); 259 err = pp2_init(&init_params); 260 if (err != 0) { 261 MRVL_LOG(ERR, "PP2 init failed"); 262 return -1; 263 } 264 265 err = mrvl_pp2_fixup_init(); 266 if (err != 0) { 267 MRVL_LOG(ERR, "PP2 fixup init failed"); 268 return -1; 269 } 270 271 return 0; 272 } 273 274 static void 275 mrvl_pp2_fixup_deinit(void) 276 { 277 int i; 278 279 for (i = 0; i < PP2_NUM_PKT_PROC; i++) { 280 if (!dummy_pool[i]) 281 continue; 282 pp2_bpool_deinit(dummy_pool[i]); 283 used_bpools[i] &= ~(1 << dummy_pool_id[i]); 284 } 285 } 286 287 /** 288 * Deinitialize packet processor. 289 * 290 * @return 291 * 0 on success, negative error value otherwise. 292 */ 293 static void 294 mrvl_deinit_pp2(void) 295 { 296 mrvl_pp2_fixup_deinit(); 297 pp2_deinit(); 298 } 299 300 static inline void 301 mrvl_fill_shadowq(struct mrvl_shadow_txq *sq, struct rte_mbuf *buf) 302 { 303 sq->ent[sq->head].buff.cookie = (uint64_t)buf; 304 sq->ent[sq->head].buff.addr = buf ? 305 rte_mbuf_data_iova_default(buf) : 0; 306 307 sq->ent[sq->head].bpool = 308 (unlikely(!buf || buf->port >= RTE_MAX_ETHPORTS || 309 buf->refcnt > 1)) ? NULL : 310 mrvl_port_to_bpool_lookup[buf->port]; 311 312 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK; 313 sq->size++; 314 } 315 316 /** 317 * Deinitialize per-lcore MUSDK hardware interfaces (hifs). 318 */ 319 static void 320 mrvl_deinit_hifs(void) 321 { 322 int i; 323 324 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) { 325 if (hifs[i]) 326 pp2_hif_deinit(hifs[i]); 327 } 328 used_hifs = MRVL_MUSDK_HIFS_RESERVED; 329 memset(hifs, 0, sizeof(hifs)); 330 } 331 332 static inline void 333 mrvl_fill_desc(struct pp2_ppio_desc *desc, struct rte_mbuf *buf) 334 { 335 pp2_ppio_outq_desc_reset(desc); 336 pp2_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf)); 337 pp2_ppio_outq_desc_set_pkt_offset(desc, 0); 338 pp2_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf)); 339 } 340 341 static inline int 342 mrvl_get_bpool_size(int pp2_id, int pool_id) 343 { 344 int i; 345 int size = 0; 346 347 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) 348 size += mrvl_port_bpool_size[pp2_id][pool_id][i]; 349 350 return size; 351 } 352 353 static int 354 mrvl_init_hif(int core_id) 355 { 356 struct pp2_hif_params params; 357 char match[MRVL_MATCH_LEN]; 358 int ret; 359 360 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX); 361 if (ret < 0) { 362 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id); 363 return ret; 364 } 365 366 snprintf(match, sizeof(match), "hif-%d", ret); 367 memset(¶ms, 0, sizeof(params)); 368 params.match = match; 369 params.out_size = MRVL_PP2_AGGR_TXQD_MAX; 370 ret = pp2_hif_init(¶ms, &hifs[core_id]); 371 if (ret) { 372 MRVL_LOG(ERR, "Failed to initialize hif %d", core_id); 373 return ret; 374 } 375 376 return 0; 377 } 378 379 static inline struct pp2_hif* 380 mrvl_get_hif(struct mrvl_priv *priv, int core_id) 381 { 382 int ret; 383 384 if (likely(hifs[core_id] != NULL)) 385 return hifs[core_id]; 386 387 rte_spinlock_lock(&priv->lock); 388 389 ret = mrvl_init_hif(core_id); 390 if (ret < 0) { 391 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id); 392 goto out; 393 } 394 395 if (core_id < mrvl_lcore_first) 396 mrvl_lcore_first = core_id; 397 398 if (core_id > mrvl_lcore_last) 399 mrvl_lcore_last = core_id; 400 out: 401 rte_spinlock_unlock(&priv->lock); 402 403 return hifs[core_id]; 404 } 405 406 /** 407 * Set tx burst function according to offload flag 408 * 409 * @param dev 410 * Pointer to Ethernet device structure. 411 */ 412 static void 413 mrvl_set_tx_function(struct rte_eth_dev *dev) 414 { 415 struct mrvl_priv *priv = dev->data->dev_private; 416 417 /* Use a simple Tx queue (no offloads, no multi segs) if possible */ 418 if (priv->multiseg) { 419 RTE_LOG(INFO, PMD, "Using multi-segment tx callback\n"); 420 dev->tx_pkt_burst = mrvl_tx_sg_pkt_burst; 421 } else { 422 RTE_LOG(INFO, PMD, "Using single-segment tx callback\n"); 423 dev->tx_pkt_burst = mrvl_tx_pkt_burst; 424 } 425 } 426 427 /** 428 * Configure rss based on dpdk rss configuration. 429 * 430 * @param priv 431 * Pointer to private structure. 432 * @param rss_conf 433 * Pointer to RSS configuration. 434 * 435 * @return 436 * 0 on success, negative error value otherwise. 437 */ 438 static int 439 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf) 440 { 441 if (rss_conf->rss_key) 442 MRVL_LOG(WARNING, "Changing hash key is not supported"); 443 444 if (rss_conf->rss_hf == 0) { 445 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; 446 } else if (rss_conf->rss_hf & ETH_RSS_IPV4) { 447 priv->ppio_params.inqs_params.hash_type = 448 PP2_PPIO_HASH_T_2_TUPLE; 449 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { 450 priv->ppio_params.inqs_params.hash_type = 451 PP2_PPIO_HASH_T_5_TUPLE; 452 priv->rss_hf_tcp = 1; 453 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { 454 priv->ppio_params.inqs_params.hash_type = 455 PP2_PPIO_HASH_T_5_TUPLE; 456 priv->rss_hf_tcp = 0; 457 } else { 458 return -EINVAL; 459 } 460 461 return 0; 462 } 463 464 /** 465 * Ethernet device configuration. 466 * 467 * Prepare the driver for a given number of TX and RX queues and 468 * configure RSS. 469 * 470 * @param dev 471 * Pointer to Ethernet device structure. 472 * 473 * @return 474 * 0 on success, negative error value otherwise. 475 */ 476 static int 477 mrvl_dev_configure(struct rte_eth_dev *dev) 478 { 479 struct mrvl_priv *priv = dev->data->dev_private; 480 int ret; 481 482 if (priv->ppio) { 483 MRVL_LOG(INFO, "Device reconfiguration is not supported"); 484 return -EINVAL; 485 } 486 487 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE && 488 dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { 489 MRVL_LOG(INFO, "Unsupported rx multi queue mode %d", 490 dev->data->dev_conf.rxmode.mq_mode); 491 return -EINVAL; 492 } 493 494 if (dev->data->dev_conf.rxmode.split_hdr_size) { 495 MRVL_LOG(INFO, "Split headers not supported"); 496 return -EINVAL; 497 } 498 499 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 500 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - 501 MRVL_PP2_ETH_HDRS_LEN; 502 if (dev->data->mtu > priv->max_mtu) { 503 MRVL_LOG(ERR, "inherit MTU %u from max_rx_pkt_len %u is larger than max_mtu %u\n", 504 dev->data->mtu, 505 dev->data->dev_conf.rxmode.max_rx_pkt_len, 506 priv->max_mtu); 507 return -EINVAL; 508 } 509 } 510 511 if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) 512 priv->multiseg = 1; 513 514 ret = mrvl_configure_rxqs(priv, dev->data->port_id, 515 dev->data->nb_rx_queues); 516 if (ret < 0) 517 return ret; 518 519 ret = mrvl_configure_txqs(priv, dev->data->port_id, 520 dev->data->nb_tx_queues); 521 if (ret < 0) 522 return ret; 523 524 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues; 525 priv->ppio_params.maintain_stats = 1; 526 priv->nb_rx_queues = dev->data->nb_rx_queues; 527 528 ret = mrvl_tm_init(dev); 529 if (ret < 0) 530 return ret; 531 532 if (dev->data->nb_rx_queues == 1 && 533 dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 534 MRVL_LOG(WARNING, "Disabling hash for 1 rx queue"); 535 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; 536 priv->configured = 1; 537 return 0; 538 } 539 540 ret = mrvl_configure_rss(priv, 541 &dev->data->dev_conf.rx_adv_conf.rss_conf); 542 if (ret < 0) 543 return ret; 544 545 priv->configured = 1; 546 547 return 0; 548 } 549 550 /** 551 * DPDK callback to change the MTU. 552 * 553 * Setting the MTU affects hardware MRU (packets larger than the MRU 554 * will be dropped). 555 * 556 * @param dev 557 * Pointer to Ethernet device structure. 558 * @param mtu 559 * New MTU. 560 * 561 * @return 562 * 0 on success, negative error value otherwise. 563 */ 564 static int 565 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 566 { 567 struct mrvl_priv *priv = dev->data->dev_private; 568 uint16_t mru; 569 uint16_t mbuf_data_size = 0; /* SW buffer size */ 570 int ret; 571 572 mru = MRVL_PP2_MTU_TO_MRU(mtu); 573 /* 574 * min_rx_buf_size is equal to mbuf data size 575 * if pmd didn't set it differently 576 */ 577 mbuf_data_size = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 578 /* Prevent PMD from: 579 * - setting mru greater than the mbuf size resulting in 580 * hw and sw buffer size mismatch 581 * - setting mtu that requires the support of scattered packets 582 * when this feature has not been enabled/supported so far 583 * (TODO check scattered_rx flag here once scattered RX is supported). 584 */ 585 if (mru - RTE_ETHER_CRC_LEN + MRVL_PKT_OFFS > mbuf_data_size) { 586 mru = mbuf_data_size + RTE_ETHER_CRC_LEN - MRVL_PKT_OFFS; 587 mtu = MRVL_PP2_MRU_TO_MTU(mru); 588 MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted " 589 "by current mbuf size: %u. Set MTU to %u, MRU to %u", 590 mbuf_data_size, mtu, mru); 591 } 592 593 if (mtu < RTE_ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) { 594 MRVL_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru); 595 return -EINVAL; 596 } 597 598 dev->data->mtu = mtu; 599 dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE; 600 601 if (!priv->ppio) 602 return 0; 603 604 ret = pp2_ppio_set_mru(priv->ppio, mru); 605 if (ret) { 606 MRVL_LOG(ERR, "Failed to change MRU"); 607 return ret; 608 } 609 610 ret = pp2_ppio_set_mtu(priv->ppio, mtu); 611 if (ret) { 612 MRVL_LOG(ERR, "Failed to change MTU"); 613 return ret; 614 } 615 616 return 0; 617 } 618 619 /** 620 * DPDK callback to bring the link up. 621 * 622 * @param dev 623 * Pointer to Ethernet device structure. 624 * 625 * @return 626 * 0 on success, negative error value otherwise. 627 */ 628 static int 629 mrvl_dev_set_link_up(struct rte_eth_dev *dev) 630 { 631 struct mrvl_priv *priv = dev->data->dev_private; 632 int ret; 633 634 if (!priv->ppio) { 635 dev->data->dev_link.link_status = ETH_LINK_UP; 636 return 0; 637 } 638 639 ret = pp2_ppio_enable(priv->ppio); 640 if (ret) 641 return ret; 642 643 /* 644 * mtu/mru can be updated if pp2_ppio_enable() was called at least once 645 * as pp2_ppio_enable() changes port->t_mode from default 0 to 646 * PP2_TRAFFIC_INGRESS_EGRESS. 647 * 648 * Set mtu to default DPDK value here. 649 */ 650 ret = mrvl_mtu_set(dev, dev->data->mtu); 651 if (ret) { 652 pp2_ppio_disable(priv->ppio); 653 return ret; 654 } 655 656 dev->data->dev_link.link_status = ETH_LINK_UP; 657 return 0; 658 } 659 660 /** 661 * DPDK callback to bring the link down. 662 * 663 * @param dev 664 * Pointer to Ethernet device structure. 665 * 666 * @return 667 * 0 on success, negative error value otherwise. 668 */ 669 static int 670 mrvl_dev_set_link_down(struct rte_eth_dev *dev) 671 { 672 struct mrvl_priv *priv = dev->data->dev_private; 673 int ret; 674 675 if (!priv->ppio) { 676 dev->data->dev_link.link_status = ETH_LINK_DOWN; 677 return 0; 678 } 679 ret = pp2_ppio_disable(priv->ppio); 680 if (ret) 681 return ret; 682 683 dev->data->dev_link.link_status = ETH_LINK_DOWN; 684 return 0; 685 } 686 687 /** 688 * DPDK callback to start tx queue. 689 * 690 * @param dev 691 * Pointer to Ethernet device structure. 692 * @param queue_id 693 * Transmit queue index. 694 * 695 * @return 696 * 0 on success, negative error value otherwise. 697 */ 698 static int 699 mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id) 700 { 701 struct mrvl_priv *priv = dev->data->dev_private; 702 int ret; 703 704 if (!priv) 705 return -EPERM; 706 707 /* passing 1 enables given tx queue */ 708 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1); 709 if (ret) { 710 MRVL_LOG(ERR, "Failed to start txq %d", queue_id); 711 return ret; 712 } 713 714 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 715 716 return 0; 717 } 718 719 /** 720 * DPDK callback to stop tx queue. 721 * 722 * @param dev 723 * Pointer to Ethernet device structure. 724 * @param queue_id 725 * Transmit queue index. 726 * 727 * @return 728 * 0 on success, negative error value otherwise. 729 */ 730 static int 731 mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id) 732 { 733 struct mrvl_priv *priv = dev->data->dev_private; 734 int ret; 735 736 if (!priv->ppio) 737 return -EPERM; 738 739 /* passing 0 disables given tx queue */ 740 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0); 741 if (ret) { 742 MRVL_LOG(ERR, "Failed to stop txq %d", queue_id); 743 return ret; 744 } 745 746 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 747 748 return 0; 749 } 750 751 /** 752 * Populate VLAN Filter configuration. 753 * 754 * @param dev 755 * Pointer to Ethernet device structure. 756 * @param on 757 * Toggle filter. 758 * 759 * @return 760 * 0 on success, negative error value otherwise. 761 */ 762 static int mrvl_populate_vlan_table(struct rte_eth_dev *dev, int on) 763 { 764 uint32_t j; 765 int ret; 766 struct rte_vlan_filter_conf *vfc; 767 768 vfc = &dev->data->vlan_filter_conf; 769 for (j = 0; j < RTE_DIM(vfc->ids); j++) { 770 uint64_t vlan; 771 uint64_t vbit; 772 uint64_t ids = vfc->ids[j]; 773 774 if (ids == 0) 775 continue; 776 777 while (ids) { 778 vlan = 64 * j; 779 /* count trailing zeroes */ 780 vbit = ~ids & (ids - 1); 781 /* clear least significant bit set */ 782 ids ^= (ids ^ (ids - 1)) ^ vbit; 783 for (; vbit; vlan++) 784 vbit >>= 1; 785 ret = mrvl_vlan_filter_set(dev, vlan, on); 786 if (ret) { 787 MRVL_LOG(ERR, "Failed to setup VLAN filter\n"); 788 return ret; 789 } 790 } 791 } 792 793 return 0; 794 } 795 796 /** 797 * DPDK callback to start the device. 798 * 799 * @param dev 800 * Pointer to Ethernet device structure. 801 * 802 * @return 803 * 0 on success, negative errno value on failure. 804 */ 805 static int 806 mrvl_dev_start(struct rte_eth_dev *dev) 807 { 808 struct mrvl_priv *priv = dev->data->dev_private; 809 char match[MRVL_MATCH_LEN]; 810 int ret = 0, i, def_init_size; 811 struct rte_ether_addr *mac_addr; 812 813 if (priv->ppio) 814 return mrvl_dev_set_link_up(dev); 815 816 snprintf(match, sizeof(match), "ppio-%d:%d", 817 priv->pp_id, priv->ppio_id); 818 priv->ppio_params.match = match; 819 priv->ppio_params.eth_start_hdr = PP2_PPIO_HDR_ETH; 820 priv->forward_bad_frames = 0; 821 priv->fill_bpool_buffs = MRVL_BURST_SIZE; 822 823 if (mrvl_cfg) { 824 priv->ppio_params.eth_start_hdr = 825 mrvl_cfg->port[dev->data->port_id].eth_start_hdr; 826 priv->forward_bad_frames = 827 mrvl_cfg->port[dev->data->port_id].forward_bad_frames; 828 priv->fill_bpool_buffs = 829 mrvl_cfg->port[dev->data->port_id].fill_bpool_buffs; 830 } 831 832 /* 833 * Calculate the minimum bpool size for refill feature as follows: 834 * 2 default burst sizes multiply by number of rx queues. 835 * If the bpool size will be below this value, new buffers will 836 * be added to the pool. 837 */ 838 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2; 839 840 /* In case initial bpool size configured in queues setup is 841 * smaller than minimum size add more buffers 842 */ 843 def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2; 844 if (priv->bpool_init_size < def_init_size) { 845 int buffs_to_add = def_init_size - priv->bpool_init_size; 846 847 priv->bpool_init_size += buffs_to_add; 848 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add); 849 if (ret) 850 MRVL_LOG(ERR, "Failed to add buffers to bpool"); 851 } 852 853 /* 854 * Calculate the maximum bpool size for refill feature as follows: 855 * maximum number of descriptors in rx queue multiply by number 856 * of rx queues plus minimum bpool size. 857 * In case the bpool size will exceed this value, superfluous buffers 858 * will be removed 859 */ 860 priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) + 861 priv->bpool_min_size; 862 863 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio); 864 if (ret) { 865 MRVL_LOG(ERR, "Failed to init ppio"); 866 return ret; 867 } 868 869 /* 870 * In case there are some some stale uc/mc mac addresses flush them 871 * here. It cannot be done during mrvl_dev_close() as port information 872 * is already gone at that point (due to pp2_ppio_deinit() in 873 * mrvl_dev_stop()). 874 */ 875 if (!priv->uc_mc_flushed) { 876 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1); 877 if (ret) { 878 MRVL_LOG(ERR, 879 "Failed to flush uc/mc filter list"); 880 goto out; 881 } 882 priv->uc_mc_flushed = 1; 883 } 884 885 ret = mrvl_mtu_set(dev, dev->data->mtu); 886 if (ret) 887 MRVL_LOG(ERR, "Failed to set MTU to %d", dev->data->mtu); 888 889 if (!rte_is_zero_ether_addr(&dev->data->mac_addrs[0])) 890 mrvl_mac_addr_set(dev, &dev->data->mac_addrs[0]); 891 892 for (i = 1; i < MRVL_MAC_ADDRS_MAX; i++) { 893 mac_addr = &dev->data->mac_addrs[i]; 894 895 /* skip zero address */ 896 if (rte_is_zero_ether_addr(mac_addr)) 897 continue; 898 899 mrvl_mac_addr_add(dev, mac_addr, i, 0); 900 } 901 902 if (dev->data->all_multicast == 1) 903 mrvl_allmulticast_enable(dev); 904 905 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 906 ret = mrvl_populate_vlan_table(dev, 1); 907 if (ret) { 908 MRVL_LOG(ERR, "Failed to populate VLAN table"); 909 goto out; 910 } 911 } 912 913 /* For default QoS config, don't start classifier. */ 914 if (mrvl_cfg && 915 mrvl_cfg->port[dev->data->port_id].use_qos_global_defaults == 0) { 916 ret = mrvl_start_qos_mapping(priv); 917 if (ret) { 918 MRVL_LOG(ERR, "Failed to setup QoS mapping"); 919 goto out; 920 } 921 } 922 923 ret = pp2_ppio_set_loopback(priv->ppio, dev->data->dev_conf.lpbk_mode); 924 if (ret) { 925 MRVL_LOG(ERR, "Failed to set loopback"); 926 goto out; 927 } 928 929 if (dev->data->promiscuous == 1) 930 mrvl_promiscuous_enable(dev); 931 932 if (priv->flow_ctrl) { 933 ret = mrvl_flow_ctrl_set(dev, &priv->fc_conf); 934 if (ret) { 935 MRVL_LOG(ERR, "Failed to configure flow control"); 936 goto out; 937 } 938 priv->flow_ctrl = 0; 939 } 940 941 if (dev->data->dev_link.link_status == ETH_LINK_UP) { 942 ret = mrvl_dev_set_link_up(dev); 943 if (ret) { 944 MRVL_LOG(ERR, "Failed to set link up"); 945 dev->data->dev_link.link_status = ETH_LINK_DOWN; 946 goto out; 947 } 948 } 949 950 /* start tx queues */ 951 for (i = 0; i < dev->data->nb_tx_queues; i++) { 952 struct mrvl_txq *txq = dev->data->tx_queues[i]; 953 954 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 955 956 if (!txq->tx_deferred_start) 957 continue; 958 959 /* 960 * All txqs are started by default. Stop them 961 * so that tx_deferred_start works as expected. 962 */ 963 ret = mrvl_tx_queue_stop(dev, i); 964 if (ret) 965 goto out; 966 } 967 968 mrvl_flow_init(dev); 969 mrvl_mtr_init(dev); 970 mrvl_set_tx_function(dev); 971 972 return 0; 973 out: 974 MRVL_LOG(ERR, "Failed to start device"); 975 pp2_ppio_deinit(priv->ppio); 976 return ret; 977 } 978 979 /** 980 * Flush receive queues. 981 * 982 * @param dev 983 * Pointer to Ethernet device structure. 984 */ 985 static void 986 mrvl_flush_rx_queues(struct rte_eth_dev *dev) 987 { 988 int i; 989 990 MRVL_LOG(INFO, "Flushing rx queues"); 991 for (i = 0; i < dev->data->nb_rx_queues; i++) { 992 int ret, num; 993 994 do { 995 struct mrvl_rxq *q = dev->data->rx_queues[i]; 996 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX]; 997 998 num = MRVL_PP2_RXD_MAX; 999 ret = pp2_ppio_recv(q->priv->ppio, 1000 q->priv->rxq_map[q->queue_id].tc, 1001 q->priv->rxq_map[q->queue_id].inq, 1002 descs, (uint16_t *)&num); 1003 } while (ret == 0 && num); 1004 } 1005 } 1006 1007 /** 1008 * Flush transmit shadow queues. 1009 * 1010 * @param dev 1011 * Pointer to Ethernet device structure. 1012 */ 1013 static void 1014 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev) 1015 { 1016 int i, j; 1017 struct mrvl_txq *txq; 1018 1019 MRVL_LOG(INFO, "Flushing tx shadow queues"); 1020 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1021 txq = (struct mrvl_txq *)dev->data->tx_queues[i]; 1022 1023 for (j = 0; j < RTE_MAX_LCORE; j++) { 1024 struct mrvl_shadow_txq *sq; 1025 1026 if (!hifs[j]) 1027 continue; 1028 1029 sq = &txq->shadow_txqs[j]; 1030 mrvl_free_sent_buffers(txq->priv->ppio, 1031 hifs[j], j, sq, txq->queue_id, 1); 1032 while (sq->tail != sq->head) { 1033 uint64_t addr = cookie_addr_high | 1034 sq->ent[sq->tail].buff.cookie; 1035 rte_pktmbuf_free( 1036 (struct rte_mbuf *)addr); 1037 sq->tail = (sq->tail + 1) & 1038 MRVL_PP2_TX_SHADOWQ_MASK; 1039 } 1040 memset(sq, 0, sizeof(*sq)); 1041 } 1042 } 1043 } 1044 1045 /** 1046 * Flush hardware bpool (buffer-pool). 1047 * 1048 * @param dev 1049 * Pointer to Ethernet device structure. 1050 */ 1051 static void 1052 mrvl_flush_bpool(struct rte_eth_dev *dev) 1053 { 1054 struct mrvl_priv *priv = dev->data->dev_private; 1055 struct pp2_hif *hif; 1056 uint32_t num; 1057 int ret; 1058 unsigned int core_id = rte_lcore_id(); 1059 1060 if (core_id == LCORE_ID_ANY) 1061 core_id = rte_get_main_lcore(); 1062 1063 hif = mrvl_get_hif(priv, core_id); 1064 1065 ret = pp2_bpool_get_num_buffs(priv->bpool, &num); 1066 if (ret) { 1067 MRVL_LOG(ERR, "Failed to get bpool buffers number"); 1068 return; 1069 } 1070 1071 while (num--) { 1072 struct pp2_buff_inf inf; 1073 uint64_t addr; 1074 1075 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf); 1076 if (ret) 1077 break; 1078 1079 addr = cookie_addr_high | inf.cookie; 1080 rte_pktmbuf_free((struct rte_mbuf *)addr); 1081 } 1082 } 1083 1084 /** 1085 * DPDK callback to stop the device. 1086 * 1087 * @param dev 1088 * Pointer to Ethernet device structure. 1089 */ 1090 static int 1091 mrvl_dev_stop(struct rte_eth_dev *dev) 1092 { 1093 return mrvl_dev_set_link_down(dev); 1094 } 1095 1096 /** 1097 * DPDK callback to close the device. 1098 * 1099 * @param dev 1100 * Pointer to Ethernet device structure. 1101 */ 1102 static int 1103 mrvl_dev_close(struct rte_eth_dev *dev) 1104 { 1105 struct mrvl_priv *priv = dev->data->dev_private; 1106 size_t i; 1107 1108 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1109 return 0; 1110 1111 mrvl_flush_rx_queues(dev); 1112 mrvl_flush_tx_shadow_queues(dev); 1113 mrvl_flow_deinit(dev); 1114 mrvl_mtr_deinit(dev); 1115 1116 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) { 1117 struct pp2_ppio_tc_params *tc_params = 1118 &priv->ppio_params.inqs_params.tcs_params[i]; 1119 1120 if (tc_params->inqs_params) { 1121 rte_free(tc_params->inqs_params); 1122 tc_params->inqs_params = NULL; 1123 } 1124 } 1125 1126 if (priv->cls_tbl) { 1127 pp2_cls_tbl_deinit(priv->cls_tbl); 1128 priv->cls_tbl = NULL; 1129 } 1130 1131 if (priv->qos_tbl) { 1132 pp2_cls_qos_tbl_deinit(priv->qos_tbl); 1133 priv->qos_tbl = NULL; 1134 } 1135 1136 mrvl_flush_bpool(dev); 1137 mrvl_tm_deinit(dev); 1138 1139 if (priv->ppio) { 1140 pp2_ppio_deinit(priv->ppio); 1141 priv->ppio = NULL; 1142 } 1143 1144 /* policer must be released after ppio deinitialization */ 1145 if (priv->default_policer) { 1146 pp2_cls_plcr_deinit(priv->default_policer); 1147 priv->default_policer = NULL; 1148 } 1149 1150 1151 if (priv->bpool) { 1152 pp2_bpool_deinit(priv->bpool); 1153 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); 1154 priv->bpool = NULL; 1155 } 1156 1157 mrvl_dev_num--; 1158 1159 if (mrvl_dev_num == 0) { 1160 MRVL_LOG(INFO, "Perform MUSDK deinit"); 1161 mrvl_deinit_hifs(); 1162 mrvl_deinit_pp2(); 1163 rte_mvep_deinit(MVEP_MOD_T_PP2); 1164 } 1165 1166 return 0; 1167 } 1168 1169 /** 1170 * DPDK callback to retrieve physical link information. 1171 * 1172 * @param dev 1173 * Pointer to Ethernet device structure. 1174 * @param wait_to_complete 1175 * Wait for request completion (ignored). 1176 * 1177 * @return 1178 * 0 on success, negative error value otherwise. 1179 */ 1180 static int 1181 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 1182 { 1183 /* 1184 * TODO 1185 * once MUSDK provides necessary API use it here 1186 */ 1187 struct mrvl_priv *priv = dev->data->dev_private; 1188 struct ethtool_cmd edata; 1189 struct ifreq req; 1190 int ret, fd, link_up; 1191 1192 if (!priv->ppio) 1193 return -EPERM; 1194 1195 edata.cmd = ETHTOOL_GSET; 1196 1197 strcpy(req.ifr_name, dev->data->name); 1198 req.ifr_data = (void *)&edata; 1199 1200 fd = socket(AF_INET, SOCK_DGRAM, 0); 1201 if (fd == -1) 1202 return -EFAULT; 1203 1204 ret = ioctl(fd, SIOCETHTOOL, &req); 1205 if (ret == -1) { 1206 close(fd); 1207 return -EFAULT; 1208 } 1209 1210 close(fd); 1211 1212 switch (ethtool_cmd_speed(&edata)) { 1213 case SPEED_10: 1214 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M; 1215 break; 1216 case SPEED_100: 1217 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M; 1218 break; 1219 case SPEED_1000: 1220 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G; 1221 break; 1222 case SPEED_2500: 1223 dev->data->dev_link.link_speed = ETH_SPEED_NUM_2_5G; 1224 break; 1225 case SPEED_10000: 1226 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; 1227 break; 1228 default: 1229 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; 1230 } 1231 1232 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX : 1233 ETH_LINK_HALF_DUPLEX; 1234 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG : 1235 ETH_LINK_FIXED; 1236 pp2_ppio_get_link_state(priv->ppio, &link_up); 1237 dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 1238 1239 return 0; 1240 } 1241 1242 /** 1243 * DPDK callback to enable promiscuous mode. 1244 * 1245 * @param dev 1246 * Pointer to Ethernet device structure. 1247 * 1248 * @return 1249 * 0 on success, negative error value otherwise. 1250 */ 1251 static int 1252 mrvl_promiscuous_enable(struct rte_eth_dev *dev) 1253 { 1254 struct mrvl_priv *priv = dev->data->dev_private; 1255 int ret; 1256 1257 if (priv->isolated) 1258 return -ENOTSUP; 1259 1260 if (!priv->ppio) 1261 return 0; 1262 1263 ret = pp2_ppio_set_promisc(priv->ppio, 1); 1264 if (ret) { 1265 MRVL_LOG(ERR, "Failed to enable promiscuous mode"); 1266 return -EAGAIN; 1267 } 1268 1269 return 0; 1270 } 1271 1272 /** 1273 * DPDK callback to enable allmulti mode. 1274 * 1275 * @param dev 1276 * Pointer to Ethernet device structure. 1277 * 1278 * @return 1279 * 0 on success, negative error value otherwise. 1280 */ 1281 static int 1282 mrvl_allmulticast_enable(struct rte_eth_dev *dev) 1283 { 1284 struct mrvl_priv *priv = dev->data->dev_private; 1285 int ret; 1286 1287 if (priv->isolated) 1288 return -ENOTSUP; 1289 1290 if (!priv->ppio) 1291 return 0; 1292 1293 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1); 1294 if (ret) { 1295 MRVL_LOG(ERR, "Failed enable all-multicast mode"); 1296 return -EAGAIN; 1297 } 1298 1299 return 0; 1300 } 1301 1302 /** 1303 * DPDK callback to disable promiscuous mode. 1304 * 1305 * @param dev 1306 * Pointer to Ethernet device structure. 1307 * 1308 * @return 1309 * 0 on success, negative error value otherwise. 1310 */ 1311 static int 1312 mrvl_promiscuous_disable(struct rte_eth_dev *dev) 1313 { 1314 struct mrvl_priv *priv = dev->data->dev_private; 1315 int ret; 1316 1317 if (priv->isolated) 1318 return -ENOTSUP; 1319 1320 if (!priv->ppio) 1321 return 0; 1322 1323 ret = pp2_ppio_set_promisc(priv->ppio, 0); 1324 if (ret) { 1325 MRVL_LOG(ERR, "Failed to disable promiscuous mode"); 1326 return -EAGAIN; 1327 } 1328 1329 return 0; 1330 } 1331 1332 /** 1333 * DPDK callback to disable allmulticast mode. 1334 * 1335 * @param dev 1336 * Pointer to Ethernet device structure. 1337 * 1338 * @return 1339 * 0 on success, negative error value otherwise. 1340 */ 1341 static int 1342 mrvl_allmulticast_disable(struct rte_eth_dev *dev) 1343 { 1344 struct mrvl_priv *priv = dev->data->dev_private; 1345 int ret; 1346 1347 if (priv->isolated) 1348 return -ENOTSUP; 1349 1350 if (!priv->ppio) 1351 return 0; 1352 1353 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0); 1354 if (ret) { 1355 MRVL_LOG(ERR, "Failed to disable all-multicast mode"); 1356 return -EAGAIN; 1357 } 1358 1359 return 0; 1360 } 1361 1362 /** 1363 * DPDK callback to remove a MAC address. 1364 * 1365 * @param dev 1366 * Pointer to Ethernet device structure. 1367 * @param index 1368 * MAC address index. 1369 */ 1370 static void 1371 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1372 { 1373 struct mrvl_priv *priv = dev->data->dev_private; 1374 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 1375 int ret; 1376 1377 if (priv->isolated) 1378 return; 1379 1380 if (!priv->ppio) 1381 return; 1382 1383 ret = pp2_ppio_remove_mac_addr(priv->ppio, 1384 dev->data->mac_addrs[index].addr_bytes); 1385 if (ret) { 1386 rte_ether_format_addr(buf, sizeof(buf), 1387 &dev->data->mac_addrs[index]); 1388 MRVL_LOG(ERR, "Failed to remove mac %s", buf); 1389 } 1390 } 1391 1392 /** 1393 * DPDK callback to add a MAC address. 1394 * 1395 * @param dev 1396 * Pointer to Ethernet device structure. 1397 * @param mac_addr 1398 * MAC address to register. 1399 * @param index 1400 * MAC address index. 1401 * @param vmdq 1402 * VMDq pool index to associate address with (unused). 1403 * 1404 * @return 1405 * 0 on success, negative error value otherwise. 1406 */ 1407 static int 1408 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1409 uint32_t index, uint32_t vmdq __rte_unused) 1410 { 1411 struct mrvl_priv *priv = dev->data->dev_private; 1412 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 1413 int ret; 1414 1415 if (priv->isolated) 1416 return -ENOTSUP; 1417 1418 if (!priv->ppio) 1419 return 0; 1420 1421 if (index == 0) 1422 /* For setting index 0, mrvl_mac_addr_set() should be used.*/ 1423 return -1; 1424 1425 /* 1426 * Maximum number of uc addresses can be tuned via kernel module mvpp2x 1427 * parameter uc_filter_max. Maximum number of mc addresses is then 1428 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and 1429 * 21 respectively. 1430 * 1431 * If more than uc_filter_max uc addresses were added to filter list 1432 * then NIC will switch to promiscuous mode automatically. 1433 * 1434 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses 1435 * were added to filter list then NIC will switch to all-multicast mode 1436 * automatically. 1437 */ 1438 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes); 1439 if (ret) { 1440 rte_ether_format_addr(buf, sizeof(buf), mac_addr); 1441 MRVL_LOG(ERR, "Failed to add mac %s", buf); 1442 return -1; 1443 } 1444 1445 return 0; 1446 } 1447 1448 /** 1449 * DPDK callback to set the primary MAC address. 1450 * 1451 * @param dev 1452 * Pointer to Ethernet device structure. 1453 * @param mac_addr 1454 * MAC address to register. 1455 * 1456 * @return 1457 * 0 on success, negative error value otherwise. 1458 */ 1459 static int 1460 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1461 { 1462 struct mrvl_priv *priv = dev->data->dev_private; 1463 int ret; 1464 1465 if (priv->isolated) 1466 return -ENOTSUP; 1467 1468 if (!priv->ppio) 1469 return 0; 1470 1471 ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes); 1472 if (ret) { 1473 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 1474 rte_ether_format_addr(buf, sizeof(buf), mac_addr); 1475 MRVL_LOG(ERR, "Failed to set mac to %s", buf); 1476 } 1477 1478 return ret; 1479 } 1480 1481 /** 1482 * DPDK callback to get device statistics. 1483 * 1484 * @param dev 1485 * Pointer to Ethernet device structure. 1486 * @param stats 1487 * Stats structure output buffer. 1488 * 1489 * @return 1490 * 0 on success, negative error value otherwise. 1491 */ 1492 static int 1493 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1494 { 1495 struct mrvl_priv *priv = dev->data->dev_private; 1496 struct pp2_ppio_statistics ppio_stats; 1497 uint64_t drop_mac = 0; 1498 unsigned int i, idx, ret; 1499 1500 if (!priv->ppio) 1501 return -EPERM; 1502 1503 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1504 struct mrvl_rxq *rxq = dev->data->rx_queues[i]; 1505 struct pp2_ppio_inq_statistics rx_stats; 1506 1507 if (!rxq) 1508 continue; 1509 1510 idx = rxq->queue_id; 1511 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { 1512 MRVL_LOG(ERR, 1513 "rx queue %d stats out of range (0 - %d)", 1514 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1515 continue; 1516 } 1517 1518 ret = pp2_ppio_inq_get_statistics(priv->ppio, 1519 priv->rxq_map[idx].tc, 1520 priv->rxq_map[idx].inq, 1521 &rx_stats, 0); 1522 if (unlikely(ret)) { 1523 MRVL_LOG(ERR, 1524 "Failed to update rx queue %d stats", idx); 1525 break; 1526 } 1527 1528 stats->q_ibytes[idx] = rxq->bytes_recv; 1529 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac; 1530 stats->q_errors[idx] = rx_stats.drop_early + 1531 rx_stats.drop_fullq + 1532 rx_stats.drop_bm + 1533 rxq->drop_mac; 1534 stats->ibytes += rxq->bytes_recv; 1535 drop_mac += rxq->drop_mac; 1536 } 1537 1538 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1539 struct mrvl_txq *txq = dev->data->tx_queues[i]; 1540 struct pp2_ppio_outq_statistics tx_stats; 1541 1542 if (!txq) 1543 continue; 1544 1545 idx = txq->queue_id; 1546 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { 1547 MRVL_LOG(ERR, 1548 "tx queue %d stats out of range (0 - %d)", 1549 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); 1550 } 1551 1552 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx, 1553 &tx_stats, 0); 1554 if (unlikely(ret)) { 1555 MRVL_LOG(ERR, 1556 "Failed to update tx queue %d stats", idx); 1557 break; 1558 } 1559 1560 stats->q_opackets[idx] = tx_stats.deq_desc; 1561 stats->q_obytes[idx] = txq->bytes_sent; 1562 stats->obytes += txq->bytes_sent; 1563 } 1564 1565 ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); 1566 if (unlikely(ret)) { 1567 MRVL_LOG(ERR, "Failed to update port statistics"); 1568 return ret; 1569 } 1570 1571 stats->ipackets += ppio_stats.rx_packets - drop_mac; 1572 stats->opackets += ppio_stats.tx_packets; 1573 stats->imissed += ppio_stats.rx_fullq_dropped + 1574 ppio_stats.rx_bm_dropped + 1575 ppio_stats.rx_early_dropped + 1576 ppio_stats.rx_fifo_dropped + 1577 ppio_stats.rx_cls_dropped; 1578 stats->ierrors = drop_mac; 1579 1580 return 0; 1581 } 1582 1583 /** 1584 * DPDK callback to clear device statistics. 1585 * 1586 * @param dev 1587 * Pointer to Ethernet device structure. 1588 * 1589 * @return 1590 * 0 on success, negative error value otherwise. 1591 */ 1592 static int 1593 mrvl_stats_reset(struct rte_eth_dev *dev) 1594 { 1595 struct mrvl_priv *priv = dev->data->dev_private; 1596 int i; 1597 1598 if (!priv->ppio) 1599 return 0; 1600 1601 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1602 struct mrvl_rxq *rxq = dev->data->rx_queues[i]; 1603 1604 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc, 1605 priv->rxq_map[i].inq, NULL, 1); 1606 rxq->bytes_recv = 0; 1607 rxq->drop_mac = 0; 1608 } 1609 1610 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1611 struct mrvl_txq *txq = dev->data->tx_queues[i]; 1612 1613 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1); 1614 txq->bytes_sent = 0; 1615 } 1616 1617 return pp2_ppio_get_statistics(priv->ppio, NULL, 1); 1618 } 1619 1620 /** 1621 * DPDK callback to get extended statistics. 1622 * 1623 * @param dev 1624 * Pointer to Ethernet device structure. 1625 * @param stats 1626 * Pointer to xstats table. 1627 * @param n 1628 * Number of entries in xstats table. 1629 * @return 1630 * Negative value on error, number of read xstats otherwise. 1631 */ 1632 static int 1633 mrvl_xstats_get(struct rte_eth_dev *dev, 1634 struct rte_eth_xstat *stats, unsigned int n) 1635 { 1636 struct mrvl_priv *priv = dev->data->dev_private; 1637 struct pp2_ppio_statistics ppio_stats; 1638 unsigned int i; 1639 1640 if (!stats) 1641 return 0; 1642 1643 pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); 1644 for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) { 1645 uint64_t val; 1646 1647 if (mrvl_xstats_tbl[i].size == sizeof(uint32_t)) 1648 val = *(uint32_t *)((uint8_t *)&ppio_stats + 1649 mrvl_xstats_tbl[i].offset); 1650 else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t)) 1651 val = *(uint64_t *)((uint8_t *)&ppio_stats + 1652 mrvl_xstats_tbl[i].offset); 1653 else 1654 return -EINVAL; 1655 1656 stats[i].id = i; 1657 stats[i].value = val; 1658 } 1659 1660 return n; 1661 } 1662 1663 /** 1664 * DPDK callback to reset extended statistics. 1665 * 1666 * @param dev 1667 * Pointer to Ethernet device structure. 1668 * 1669 * @return 1670 * 0 on success, negative error value otherwise. 1671 */ 1672 static int 1673 mrvl_xstats_reset(struct rte_eth_dev *dev) 1674 { 1675 return mrvl_stats_reset(dev); 1676 } 1677 1678 /** 1679 * DPDK callback to get extended statistics names. 1680 * 1681 * @param dev (unused) 1682 * Pointer to Ethernet device structure. 1683 * @param xstats_names 1684 * Pointer to xstats names table. 1685 * @param size 1686 * Size of the xstats names table. 1687 * @return 1688 * Number of read names. 1689 */ 1690 static int 1691 mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused, 1692 struct rte_eth_xstat_name *xstats_names, 1693 unsigned int size) 1694 { 1695 unsigned int i; 1696 1697 if (!xstats_names) 1698 return RTE_DIM(mrvl_xstats_tbl); 1699 1700 for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++) 1701 strlcpy(xstats_names[i].name, mrvl_xstats_tbl[i].name, 1702 RTE_ETH_XSTATS_NAME_SIZE); 1703 1704 return size; 1705 } 1706 1707 /** 1708 * DPDK callback to get information about the device. 1709 * 1710 * @param dev 1711 * Pointer to Ethernet device structure (unused). 1712 * @param info 1713 * Info structure output buffer. 1714 */ 1715 static int 1716 mrvl_dev_infos_get(struct rte_eth_dev *dev, 1717 struct rte_eth_dev_info *info) 1718 { 1719 struct mrvl_priv *priv = dev->data->dev_private; 1720 1721 info->speed_capa = ETH_LINK_SPEED_10M | 1722 ETH_LINK_SPEED_100M | 1723 ETH_LINK_SPEED_1G | 1724 ETH_LINK_SPEED_2_5G | 1725 ETH_LINK_SPEED_10G; 1726 1727 info->max_rx_queues = MRVL_PP2_RXQ_MAX; 1728 info->max_tx_queues = MRVL_PP2_TXQ_MAX; 1729 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX; 1730 1731 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX; 1732 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN; 1733 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN; 1734 1735 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX; 1736 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN; 1737 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN; 1738 1739 info->rx_offload_capa = MRVL_RX_OFFLOADS; 1740 info->rx_queue_offload_capa = MRVL_RX_OFFLOADS; 1741 1742 info->tx_offload_capa = MRVL_TX_OFFLOADS; 1743 info->tx_queue_offload_capa = MRVL_TX_OFFLOADS; 1744 1745 info->flow_type_rss_offloads = ETH_RSS_IPV4 | 1746 ETH_RSS_NONFRAG_IPV4_TCP | 1747 ETH_RSS_NONFRAG_IPV4_UDP; 1748 1749 /* By default packets are dropped if no descriptors are available */ 1750 info->default_rxconf.rx_drop_en = 1; 1751 1752 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX; 1753 info->max_mtu = priv->max_mtu; 1754 1755 return 0; 1756 } 1757 1758 /** 1759 * Return supported packet types. 1760 * 1761 * @param dev 1762 * Pointer to Ethernet device structure (unused). 1763 * 1764 * @return 1765 * Const pointer to the table with supported packet types. 1766 */ 1767 static const uint32_t * 1768 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 1769 { 1770 static const uint32_t ptypes[] = { 1771 RTE_PTYPE_L2_ETHER, 1772 RTE_PTYPE_L2_ETHER_VLAN, 1773 RTE_PTYPE_L2_ETHER_QINQ, 1774 RTE_PTYPE_L3_IPV4, 1775 RTE_PTYPE_L3_IPV4_EXT, 1776 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 1777 RTE_PTYPE_L3_IPV6, 1778 RTE_PTYPE_L3_IPV6_EXT, 1779 RTE_PTYPE_L2_ETHER_ARP, 1780 RTE_PTYPE_L4_TCP, 1781 RTE_PTYPE_L4_UDP 1782 }; 1783 1784 return ptypes; 1785 } 1786 1787 /** 1788 * DPDK callback to get information about specific receive queue. 1789 * 1790 * @param dev 1791 * Pointer to Ethernet device structure. 1792 * @param rx_queue_id 1793 * Receive queue index. 1794 * @param qinfo 1795 * Receive queue information structure. 1796 */ 1797 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 1798 struct rte_eth_rxq_info *qinfo) 1799 { 1800 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id]; 1801 struct mrvl_priv *priv = dev->data->dev_private; 1802 int inq = priv->rxq_map[rx_queue_id].inq; 1803 int tc = priv->rxq_map[rx_queue_id].tc; 1804 struct pp2_ppio_tc_params *tc_params = 1805 &priv->ppio_params.inqs_params.tcs_params[tc]; 1806 1807 qinfo->mp = q->mp; 1808 qinfo->nb_desc = tc_params->inqs_params[inq].size; 1809 } 1810 1811 /** 1812 * DPDK callback to get information about specific transmit queue. 1813 * 1814 * @param dev 1815 * Pointer to Ethernet device structure. 1816 * @param tx_queue_id 1817 * Transmit queue index. 1818 * @param qinfo 1819 * Transmit queue information structure. 1820 */ 1821 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 1822 struct rte_eth_txq_info *qinfo) 1823 { 1824 struct mrvl_priv *priv = dev->data->dev_private; 1825 struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id]; 1826 1827 qinfo->nb_desc = 1828 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size; 1829 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1830 } 1831 1832 /** 1833 * DPDK callback to Configure a VLAN filter. 1834 * 1835 * @param dev 1836 * Pointer to Ethernet device structure. 1837 * @param vlan_id 1838 * VLAN ID to filter. 1839 * @param on 1840 * Toggle filter. 1841 * 1842 * @return 1843 * 0 on success, negative error value otherwise. 1844 */ 1845 static int 1846 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1847 { 1848 struct mrvl_priv *priv = dev->data->dev_private; 1849 1850 if (priv->isolated) 1851 return -ENOTSUP; 1852 1853 if (!priv->ppio) 1854 return 0; 1855 1856 return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) : 1857 pp2_ppio_remove_vlan(priv->ppio, vlan_id); 1858 } 1859 1860 /** 1861 * DPDK callback to Configure VLAN offload. 1862 * 1863 * @param dev 1864 * Pointer to Ethernet device structure. 1865 * @param mask 1866 * VLAN offload mask. 1867 * 1868 * @return 1869 * 0 on success, negative error value otherwise. 1870 */ 1871 static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1872 { 1873 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 1874 int ret; 1875 1876 if (mask & ETH_VLAN_STRIP_MASK) { 1877 MRVL_LOG(ERR, "VLAN stripping is not supported\n"); 1878 return -ENOTSUP; 1879 } 1880 1881 if (mask & ETH_VLAN_FILTER_MASK) { 1882 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1883 ret = mrvl_populate_vlan_table(dev, 1); 1884 else 1885 ret = mrvl_populate_vlan_table(dev, 0); 1886 1887 if (ret) 1888 return ret; 1889 } 1890 1891 if (mask & ETH_VLAN_EXTEND_MASK) { 1892 MRVL_LOG(ERR, "Extend VLAN not supported\n"); 1893 return -ENOTSUP; 1894 } 1895 1896 return 0; 1897 } 1898 1899 /** 1900 * Release buffers to hardware bpool (buffer-pool) 1901 * 1902 * @param rxq 1903 * Receive queue pointer. 1904 * @param num 1905 * Number of buffers to release to bpool. 1906 * 1907 * @return 1908 * 0 on success, negative error value otherwise. 1909 */ 1910 static int 1911 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) 1912 { 1913 struct buff_release_entry entries[num]; 1914 struct rte_mbuf *mbufs[num]; 1915 int i, ret; 1916 unsigned int core_id; 1917 struct pp2_hif *hif; 1918 struct pp2_bpool *bpool; 1919 1920 core_id = rte_lcore_id(); 1921 if (core_id == LCORE_ID_ANY) 1922 core_id = rte_get_main_lcore(); 1923 1924 hif = mrvl_get_hif(rxq->priv, core_id); 1925 if (!hif) 1926 return -1; 1927 1928 bpool = rxq->priv->bpool; 1929 1930 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num); 1931 if (ret) 1932 return ret; 1933 1934 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID) 1935 cookie_addr_high = 1936 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK; 1937 1938 for (i = 0; i < num; i++) { 1939 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK) 1940 != cookie_addr_high) { 1941 MRVL_LOG(ERR, 1942 "mbuf virtual addr high is out of range " 1943 "0x%x instead of 0x%x\n", 1944 (uint32_t)((uint64_t)mbufs[i] >> 32), 1945 (uint32_t)(cookie_addr_high >> 32)); 1946 goto out; 1947 } 1948 1949 entries[i].buff.addr = 1950 rte_mbuf_data_iova_default(mbufs[i]); 1951 entries[i].buff.cookie = (uintptr_t)mbufs[i]; 1952 entries[i].bpool = bpool; 1953 } 1954 1955 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i); 1956 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i; 1957 1958 if (i != num) 1959 goto out; 1960 1961 return 0; 1962 out: 1963 for (; i < num; i++) 1964 rte_pktmbuf_free(mbufs[i]); 1965 1966 return -1; 1967 } 1968 1969 /** 1970 * DPDK callback to configure the receive queue. 1971 * 1972 * @param dev 1973 * Pointer to Ethernet device structure. 1974 * @param idx 1975 * RX queue index. 1976 * @param desc 1977 * Number of descriptors to configure in queue. 1978 * @param socket 1979 * NUMA socket on which memory must be allocated. 1980 * @param conf 1981 * Thresholds parameters. 1982 * @param mp 1983 * Memory pool for buffer allocations. 1984 * 1985 * @return 1986 * 0 on success, negative error value otherwise. 1987 */ 1988 static int 1989 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 1990 unsigned int socket, 1991 const struct rte_eth_rxconf *conf, 1992 struct rte_mempool *mp) 1993 { 1994 struct mrvl_priv *priv = dev->data->dev_private; 1995 struct mrvl_rxq *rxq; 1996 uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp); 1997 uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; 1998 int ret, tc, inq; 1999 uint64_t offloads; 2000 2001 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; 2002 2003 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) { 2004 /* 2005 * Unknown TC mapping, mapping will not have a correct queue. 2006 */ 2007 MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu", 2008 idx, priv->ppio_id); 2009 return -EFAULT; 2010 } 2011 2012 frame_size = buf_size - RTE_PKTMBUF_HEADROOM - 2013 MRVL_PKT_EFFEC_OFFS + RTE_ETHER_CRC_LEN; 2014 if (frame_size < max_rx_pkt_len) { 2015 MRVL_LOG(WARNING, 2016 "Mbuf size must be increased to %u bytes to hold up " 2017 "to %u bytes of data.", 2018 buf_size + max_rx_pkt_len - frame_size, 2019 max_rx_pkt_len); 2020 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 2021 MRVL_LOG(INFO, "Setting max rx pkt len to %u", 2022 dev->data->dev_conf.rxmode.max_rx_pkt_len); 2023 } 2024 2025 if (dev->data->rx_queues[idx]) { 2026 rte_free(dev->data->rx_queues[idx]); 2027 dev->data->rx_queues[idx] = NULL; 2028 } 2029 2030 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket); 2031 if (!rxq) 2032 return -ENOMEM; 2033 2034 rxq->priv = priv; 2035 rxq->mp = mp; 2036 rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM; 2037 rxq->queue_id = idx; 2038 rxq->port_id = dev->data->port_id; 2039 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool; 2040 2041 tc = priv->rxq_map[rxq->queue_id].tc, 2042 inq = priv->rxq_map[rxq->queue_id].inq; 2043 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size = 2044 desc; 2045 2046 ret = mrvl_fill_bpool(rxq, desc); 2047 if (ret) { 2048 rte_free(rxq); 2049 return ret; 2050 } 2051 2052 priv->bpool_init_size += desc; 2053 2054 dev->data->rx_queues[idx] = rxq; 2055 2056 return 0; 2057 } 2058 2059 /** 2060 * DPDK callback to release the receive queue. 2061 * 2062 * @param dev 2063 * Pointer to Ethernet device structure. 2064 * @param qid 2065 * Receive queue index. 2066 */ 2067 static void 2068 mrvl_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 2069 { 2070 struct mrvl_rxq *q = dev->data->rx_queues[qid]; 2071 struct pp2_ppio_tc_params *tc_params; 2072 int i, num, tc, inq; 2073 struct pp2_hif *hif; 2074 unsigned int core_id = rte_lcore_id(); 2075 2076 if (core_id == LCORE_ID_ANY) 2077 core_id = rte_get_main_lcore(); 2078 2079 if (!q) 2080 return; 2081 2082 hif = mrvl_get_hif(q->priv, core_id); 2083 2084 if (!hif) 2085 return; 2086 2087 tc = q->priv->rxq_map[q->queue_id].tc; 2088 inq = q->priv->rxq_map[q->queue_id].inq; 2089 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc]; 2090 num = tc_params->inqs_params[inq].size; 2091 for (i = 0; i < num; i++) { 2092 struct pp2_buff_inf inf; 2093 uint64_t addr; 2094 2095 pp2_bpool_get_buff(hif, q->priv->bpool, &inf); 2096 addr = cookie_addr_high | inf.cookie; 2097 rte_pktmbuf_free((struct rte_mbuf *)addr); 2098 } 2099 2100 rte_free(q); 2101 } 2102 2103 /** 2104 * DPDK callback to configure the transmit queue. 2105 * 2106 * @param dev 2107 * Pointer to Ethernet device structure. 2108 * @param idx 2109 * Transmit queue index. 2110 * @param desc 2111 * Number of descriptors to configure in the queue. 2112 * @param socket 2113 * NUMA socket on which memory must be allocated. 2114 * @param conf 2115 * Tx queue configuration parameters. 2116 * 2117 * @return 2118 * 0 on success, negative error value otherwise. 2119 */ 2120 static int 2121 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 2122 unsigned int socket, 2123 const struct rte_eth_txconf *conf) 2124 { 2125 struct mrvl_priv *priv = dev->data->dev_private; 2126 struct mrvl_txq *txq; 2127 2128 if (dev->data->tx_queues[idx]) { 2129 rte_free(dev->data->tx_queues[idx]); 2130 dev->data->tx_queues[idx] = NULL; 2131 } 2132 2133 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket); 2134 if (!txq) 2135 return -ENOMEM; 2136 2137 txq->priv = priv; 2138 txq->queue_id = idx; 2139 txq->port_id = dev->data->port_id; 2140 txq->tx_deferred_start = conf->tx_deferred_start; 2141 dev->data->tx_queues[idx] = txq; 2142 2143 priv->ppio_params.outqs_params.outqs_params[idx].size = desc; 2144 2145 return 0; 2146 } 2147 2148 /** 2149 * DPDK callback to release the transmit queue. 2150 * 2151 * @param dev 2152 * Pointer to Ethernet device structure. 2153 * @param qid 2154 * Transmit queue index. 2155 */ 2156 static void 2157 mrvl_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 2158 { 2159 struct mrvl_txq *q = dev->data->tx_queues[qid]; 2160 2161 if (!q) 2162 return; 2163 2164 rte_free(q); 2165 } 2166 2167 /** 2168 * DPDK callback to get flow control configuration. 2169 * 2170 * @param dev 2171 * Pointer to Ethernet device structure. 2172 * @param fc_conf 2173 * Pointer to the flow control configuration. 2174 * 2175 * @return 2176 * 0 on success, negative error value otherwise. 2177 */ 2178 static int 2179 mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2180 { 2181 struct mrvl_priv *priv = dev->data->dev_private; 2182 int ret, en; 2183 2184 if (!priv->ppio) { 2185 memcpy(fc_conf, &priv->fc_conf, sizeof(struct rte_eth_fc_conf)); 2186 return 0; 2187 } 2188 2189 fc_conf->autoneg = 1; 2190 ret = pp2_ppio_get_rx_pause(priv->ppio, &en); 2191 if (ret) { 2192 MRVL_LOG(ERR, "Failed to read rx pause state"); 2193 return ret; 2194 } 2195 2196 fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE; 2197 2198 ret = pp2_ppio_get_tx_pause(priv->ppio, &en); 2199 if (ret) { 2200 MRVL_LOG(ERR, "Failed to read tx pause state"); 2201 return ret; 2202 } 2203 2204 if (en) { 2205 if (fc_conf->mode == RTE_FC_NONE) 2206 fc_conf->mode = RTE_FC_TX_PAUSE; 2207 else 2208 fc_conf->mode = RTE_FC_FULL; 2209 } 2210 2211 return 0; 2212 } 2213 2214 /** 2215 * DPDK callback to set flow control configuration. 2216 * 2217 * @param dev 2218 * Pointer to Ethernet device structure. 2219 * @param fc_conf 2220 * Pointer to the flow control configuration. 2221 * 2222 * @return 2223 * 0 on success, negative error value otherwise. 2224 */ 2225 static int 2226 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2227 { 2228 struct mrvl_priv *priv = dev->data->dev_private; 2229 struct pp2_ppio_tx_pause_params mrvl_pause_params; 2230 int ret; 2231 int rx_en, tx_en; 2232 2233 if (fc_conf->high_water || 2234 fc_conf->low_water || 2235 fc_conf->pause_time || 2236 fc_conf->mac_ctrl_frame_fwd) { 2237 MRVL_LOG(ERR, "Flowctrl parameter is not supported"); 2238 2239 return -EINVAL; 2240 } 2241 2242 if (fc_conf->autoneg == 0) { 2243 MRVL_LOG(ERR, "Flowctrl Autoneg disable is not supported"); 2244 return -EINVAL; 2245 } 2246 2247 if (!priv->ppio) { 2248 memcpy(&priv->fc_conf, fc_conf, sizeof(struct rte_eth_fc_conf)); 2249 priv->flow_ctrl = 1; 2250 return 0; 2251 } 2252 2253 switch (fc_conf->mode) { 2254 case RTE_FC_FULL: 2255 rx_en = 1; 2256 tx_en = 1; 2257 break; 2258 case RTE_FC_TX_PAUSE: 2259 rx_en = 0; 2260 tx_en = 1; 2261 break; 2262 case RTE_FC_RX_PAUSE: 2263 rx_en = 1; 2264 tx_en = 0; 2265 break; 2266 case RTE_FC_NONE: 2267 rx_en = 0; 2268 tx_en = 0; 2269 break; 2270 default: 2271 MRVL_LOG(ERR, "Incorrect Flow control flag (%d)", 2272 fc_conf->mode); 2273 return -EINVAL; 2274 } 2275 2276 /* Set RX flow control */ 2277 ret = pp2_ppio_set_rx_pause(priv->ppio, rx_en); 2278 if (ret) { 2279 MRVL_LOG(ERR, "Failed to change RX flowctrl"); 2280 return ret; 2281 } 2282 2283 /* Set TX flow control */ 2284 mrvl_pause_params.en = tx_en; 2285 /* all inqs participate in xon/xoff decision */ 2286 mrvl_pause_params.use_tc_pause_inqs = 0; 2287 ret = pp2_ppio_set_tx_pause(priv->ppio, &mrvl_pause_params); 2288 if (ret) { 2289 MRVL_LOG(ERR, "Failed to change TX flowctrl"); 2290 return ret; 2291 } 2292 2293 return 0; 2294 } 2295 2296 /** 2297 * Update RSS hash configuration 2298 * 2299 * @param dev 2300 * Pointer to Ethernet device structure. 2301 * @param rss_conf 2302 * Pointer to RSS configuration. 2303 * 2304 * @return 2305 * 0 on success, negative error value otherwise. 2306 */ 2307 static int 2308 mrvl_rss_hash_update(struct rte_eth_dev *dev, 2309 struct rte_eth_rss_conf *rss_conf) 2310 { 2311 struct mrvl_priv *priv = dev->data->dev_private; 2312 2313 if (priv->isolated) 2314 return -ENOTSUP; 2315 2316 return mrvl_configure_rss(priv, rss_conf); 2317 } 2318 2319 /** 2320 * DPDK callback to get RSS hash configuration. 2321 * 2322 * @param dev 2323 * Pointer to Ethernet device structure. 2324 * @rss_conf 2325 * Pointer to RSS configuration. 2326 * 2327 * @return 2328 * Always 0. 2329 */ 2330 static int 2331 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev, 2332 struct rte_eth_rss_conf *rss_conf) 2333 { 2334 struct mrvl_priv *priv = dev->data->dev_private; 2335 enum pp2_ppio_hash_type hash_type = 2336 priv->ppio_params.inqs_params.hash_type; 2337 2338 rss_conf->rss_key = NULL; 2339 2340 if (hash_type == PP2_PPIO_HASH_T_NONE) 2341 rss_conf->rss_hf = 0; 2342 else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE) 2343 rss_conf->rss_hf = ETH_RSS_IPV4; 2344 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp) 2345 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP; 2346 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp) 2347 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP; 2348 2349 return 0; 2350 } 2351 2352 /** 2353 * DPDK callback to get rte_flow callbacks. 2354 * 2355 * @param dev 2356 * Pointer to the device structure. 2357 * @param ops 2358 * Pointer to pass the flow ops. 2359 * 2360 * @return 2361 * 0 on success, negative error value otherwise. 2362 */ 2363 static int 2364 mrvl_eth_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 2365 const struct rte_flow_ops **ops) 2366 { 2367 *ops = &mrvl_flow_ops; 2368 return 0; 2369 } 2370 2371 /** 2372 * DPDK callback to get rte_mtr callbacks. 2373 * 2374 * @param dev 2375 * Pointer to the device structure. 2376 * @param ops 2377 * Pointer to pass the mtr ops. 2378 * 2379 * @return 2380 * Always 0. 2381 */ 2382 static int 2383 mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops) 2384 { 2385 *(const void **)ops = &mrvl_mtr_ops; 2386 2387 return 0; 2388 } 2389 2390 /** 2391 * DPDK callback to get rte_tm callbacks. 2392 * 2393 * @param dev 2394 * Pointer to the device structure. 2395 * @param ops 2396 * Pointer to pass the tm ops. 2397 * 2398 * @return 2399 * Always 0. 2400 */ 2401 static int 2402 mrvl_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops) 2403 { 2404 *(const void **)ops = &mrvl_tm_ops; 2405 2406 return 0; 2407 } 2408 2409 static const struct eth_dev_ops mrvl_ops = { 2410 .dev_configure = mrvl_dev_configure, 2411 .dev_start = mrvl_dev_start, 2412 .dev_stop = mrvl_dev_stop, 2413 .dev_set_link_up = mrvl_dev_set_link_up, 2414 .dev_set_link_down = mrvl_dev_set_link_down, 2415 .dev_close = mrvl_dev_close, 2416 .link_update = mrvl_link_update, 2417 .promiscuous_enable = mrvl_promiscuous_enable, 2418 .allmulticast_enable = mrvl_allmulticast_enable, 2419 .promiscuous_disable = mrvl_promiscuous_disable, 2420 .allmulticast_disable = mrvl_allmulticast_disable, 2421 .mac_addr_remove = mrvl_mac_addr_remove, 2422 .mac_addr_add = mrvl_mac_addr_add, 2423 .mac_addr_set = mrvl_mac_addr_set, 2424 .mtu_set = mrvl_mtu_set, 2425 .stats_get = mrvl_stats_get, 2426 .stats_reset = mrvl_stats_reset, 2427 .xstats_get = mrvl_xstats_get, 2428 .xstats_reset = mrvl_xstats_reset, 2429 .xstats_get_names = mrvl_xstats_get_names, 2430 .dev_infos_get = mrvl_dev_infos_get, 2431 .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get, 2432 .rxq_info_get = mrvl_rxq_info_get, 2433 .txq_info_get = mrvl_txq_info_get, 2434 .vlan_filter_set = mrvl_vlan_filter_set, 2435 .vlan_offload_set = mrvl_vlan_offload_set, 2436 .tx_queue_start = mrvl_tx_queue_start, 2437 .tx_queue_stop = mrvl_tx_queue_stop, 2438 .rx_queue_setup = mrvl_rx_queue_setup, 2439 .rx_queue_release = mrvl_rx_queue_release, 2440 .tx_queue_setup = mrvl_tx_queue_setup, 2441 .tx_queue_release = mrvl_tx_queue_release, 2442 .flow_ctrl_get = mrvl_flow_ctrl_get, 2443 .flow_ctrl_set = mrvl_flow_ctrl_set, 2444 .rss_hash_update = mrvl_rss_hash_update, 2445 .rss_hash_conf_get = mrvl_rss_hash_conf_get, 2446 .flow_ops_get = mrvl_eth_flow_ops_get, 2447 .mtr_ops_get = mrvl_mtr_ops_get, 2448 .tm_ops_get = mrvl_tm_ops_get, 2449 }; 2450 2451 /** 2452 * Return packet type information and l3/l4 offsets. 2453 * 2454 * @param desc 2455 * Pointer to the received packet descriptor. 2456 * @param l3_offset 2457 * l3 packet offset. 2458 * @param l4_offset 2459 * l4 packet offset. 2460 * 2461 * @return 2462 * Packet type information. 2463 */ 2464 static inline uint64_t 2465 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, 2466 uint8_t *l3_offset, uint8_t *l4_offset) 2467 { 2468 enum pp2_inq_l3_type l3_type; 2469 enum pp2_inq_l4_type l4_type; 2470 enum pp2_inq_vlan_tag vlan_tag; 2471 uint64_t packet_type; 2472 2473 pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); 2474 pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); 2475 pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag); 2476 2477 packet_type = RTE_PTYPE_L2_ETHER; 2478 2479 switch (vlan_tag) { 2480 case PP2_INQ_VLAN_TAG_SINGLE: 2481 packet_type |= RTE_PTYPE_L2_ETHER_VLAN; 2482 break; 2483 case PP2_INQ_VLAN_TAG_DOUBLE: 2484 case PP2_INQ_VLAN_TAG_TRIPLE: 2485 packet_type |= RTE_PTYPE_L2_ETHER_QINQ; 2486 break; 2487 default: 2488 break; 2489 } 2490 2491 switch (l3_type) { 2492 case PP2_INQ_L3_TYPE_IPV4_NO_OPTS: 2493 packet_type |= RTE_PTYPE_L3_IPV4; 2494 break; 2495 case PP2_INQ_L3_TYPE_IPV4_OK: 2496 packet_type |= RTE_PTYPE_L3_IPV4_EXT; 2497 break; 2498 case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO: 2499 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; 2500 break; 2501 case PP2_INQ_L3_TYPE_IPV6_NO_EXT: 2502 packet_type |= RTE_PTYPE_L3_IPV6; 2503 break; 2504 case PP2_INQ_L3_TYPE_IPV6_EXT: 2505 packet_type |= RTE_PTYPE_L3_IPV6_EXT; 2506 break; 2507 case PP2_INQ_L3_TYPE_ARP: 2508 packet_type |= RTE_PTYPE_L2_ETHER_ARP; 2509 /* 2510 * In case of ARP l4_offset is set to wrong value. 2511 * Set it to proper one so that later on mbuf->l3_len can be 2512 * calculated subtracting l4_offset and l3_offset. 2513 */ 2514 *l4_offset = *l3_offset + MRVL_ARP_LENGTH; 2515 break; 2516 default: 2517 break; 2518 } 2519 2520 switch (l4_type) { 2521 case PP2_INQ_L4_TYPE_TCP: 2522 packet_type |= RTE_PTYPE_L4_TCP; 2523 break; 2524 case PP2_INQ_L4_TYPE_UDP: 2525 packet_type |= RTE_PTYPE_L4_UDP; 2526 break; 2527 default: 2528 break; 2529 } 2530 2531 return packet_type; 2532 } 2533 2534 /** 2535 * Get offload information from the received packet descriptor. 2536 * 2537 * @param desc 2538 * Pointer to the received packet descriptor. 2539 * 2540 * @return 2541 * Mbuf offload flags. 2542 */ 2543 static inline uint64_t 2544 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc, uint64_t packet_type) 2545 { 2546 uint64_t flags = 0; 2547 enum pp2_inq_desc_status status; 2548 2549 if (RTE_ETH_IS_IPV4_HDR(packet_type)) { 2550 status = pp2_ppio_inq_desc_get_l3_pkt_error(desc); 2551 if (unlikely(status != PP2_DESC_ERR_OK)) 2552 flags |= PKT_RX_IP_CKSUM_BAD; 2553 else 2554 flags |= PKT_RX_IP_CKSUM_GOOD; 2555 } 2556 2557 if (((packet_type & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) || 2558 ((packet_type & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP)) { 2559 status = pp2_ppio_inq_desc_get_l4_pkt_error(desc); 2560 if (unlikely(status != PP2_DESC_ERR_OK)) 2561 flags |= PKT_RX_L4_CKSUM_BAD; 2562 else 2563 flags |= PKT_RX_L4_CKSUM_GOOD; 2564 } 2565 2566 return flags; 2567 } 2568 2569 /** 2570 * DPDK callback for receive. 2571 * 2572 * @param rxq 2573 * Generic pointer to the receive queue. 2574 * @param rx_pkts 2575 * Array to store received packets. 2576 * @param nb_pkts 2577 * Maximum number of packets in array. 2578 * 2579 * @return 2580 * Number of packets successfully received. 2581 */ 2582 static uint16_t 2583 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 2584 { 2585 struct mrvl_rxq *q = rxq; 2586 struct pp2_ppio_desc descs[nb_pkts]; 2587 struct pp2_bpool *bpool; 2588 int i, ret, rx_done = 0; 2589 int num; 2590 struct pp2_hif *hif; 2591 unsigned int core_id = rte_lcore_id(); 2592 2593 hif = mrvl_get_hif(q->priv, core_id); 2594 2595 if (unlikely(!q->priv->ppio || !hif)) 2596 return 0; 2597 2598 bpool = q->priv->bpool; 2599 2600 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc, 2601 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts); 2602 if (unlikely(ret < 0)) 2603 return 0; 2604 2605 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts; 2606 2607 for (i = 0; i < nb_pkts; i++) { 2608 struct rte_mbuf *mbuf; 2609 uint8_t l3_offset, l4_offset; 2610 enum pp2_inq_desc_status status; 2611 uint64_t addr; 2612 2613 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { 2614 struct pp2_ppio_desc *pref_desc; 2615 u64 pref_addr; 2616 2617 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT]; 2618 pref_addr = cookie_addr_high | 2619 pp2_ppio_inq_desc_get_cookie(pref_desc); 2620 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr)); 2621 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr)); 2622 } 2623 2624 addr = cookie_addr_high | 2625 pp2_ppio_inq_desc_get_cookie(&descs[i]); 2626 mbuf = (struct rte_mbuf *)addr; 2627 rte_pktmbuf_reset(mbuf); 2628 2629 /* drop packet in case of mac, overrun or resource error */ 2630 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]); 2631 if ((unlikely(status != PP2_DESC_ERR_OK)) && 2632 !(q->priv->forward_bad_frames)) { 2633 struct pp2_buff_inf binf = { 2634 .addr = rte_mbuf_data_iova_default(mbuf), 2635 .cookie = (uint64_t)mbuf, 2636 }; 2637 2638 pp2_bpool_put_buff(hif, bpool, &binf); 2639 mrvl_port_bpool_size 2640 [bpool->pp2_id][bpool->id][core_id]++; 2641 q->drop_mac++; 2642 continue; 2643 } 2644 2645 mbuf->data_off += MRVL_PKT_EFFEC_OFFS; 2646 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]); 2647 mbuf->data_len = mbuf->pkt_len; 2648 mbuf->port = q->port_id; 2649 mbuf->packet_type = 2650 mrvl_desc_to_packet_type_and_offset(&descs[i], 2651 &l3_offset, 2652 &l4_offset); 2653 mbuf->l2_len = l3_offset; 2654 mbuf->l3_len = l4_offset - l3_offset; 2655 2656 if (likely(q->cksum_enabled)) 2657 mbuf->ol_flags = 2658 mrvl_desc_to_ol_flags(&descs[i], 2659 mbuf->packet_type); 2660 2661 rx_pkts[rx_done++] = mbuf; 2662 q->bytes_recv += mbuf->pkt_len; 2663 } 2664 2665 if (rte_spinlock_trylock(&q->priv->lock) == 1) { 2666 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id); 2667 2668 if (unlikely(num <= q->priv->bpool_min_size || 2669 (!rx_done && num < q->priv->bpool_init_size))) { 2670 mrvl_fill_bpool(q, q->priv->fill_bpool_buffs); 2671 } else if (unlikely(num > q->priv->bpool_max_size)) { 2672 int i; 2673 int pkt_to_remove = num - q->priv->bpool_init_size; 2674 struct rte_mbuf *mbuf; 2675 struct pp2_buff_inf buff; 2676 2677 for (i = 0; i < pkt_to_remove; i++) { 2678 ret = pp2_bpool_get_buff(hif, bpool, &buff); 2679 if (ret) 2680 break; 2681 mbuf = (struct rte_mbuf *) 2682 (cookie_addr_high | buff.cookie); 2683 rte_pktmbuf_free(mbuf); 2684 } 2685 mrvl_port_bpool_size 2686 [bpool->pp2_id][bpool->id][core_id] -= i; 2687 } 2688 rte_spinlock_unlock(&q->priv->lock); 2689 } 2690 2691 return rx_done; 2692 } 2693 2694 /** 2695 * Prepare offload information. 2696 * 2697 * @param ol_flags 2698 * Offload flags. 2699 * @param l3_type 2700 * Pointer to the pp2_ouq_l3_type structure. 2701 * @param l4_type 2702 * Pointer to the pp2_outq_l4_type structure. 2703 * @param gen_l3_cksum 2704 * Will be set to 1 in case l3 checksum is computed. 2705 * @param l4_cksum 2706 * Will be set to 1 in case l4 checksum is computed. 2707 */ 2708 static inline void 2709 mrvl_prepare_proto_info(uint64_t ol_flags, 2710 enum pp2_outq_l3_type *l3_type, 2711 enum pp2_outq_l4_type *l4_type, 2712 int *gen_l3_cksum, 2713 int *gen_l4_cksum) 2714 { 2715 /* 2716 * Based on ol_flags prepare information 2717 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor 2718 * for offloading. 2719 * in most of the checksum cases ipv4 must be set, so this is the 2720 * default value 2721 */ 2722 *l3_type = PP2_OUTQ_L3_TYPE_IPV4; 2723 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; 2724 2725 if (ol_flags & PKT_TX_IPV6) { 2726 *l3_type = PP2_OUTQ_L3_TYPE_IPV6; 2727 /* no checksum for ipv6 header */ 2728 *gen_l3_cksum = 0; 2729 } 2730 2731 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) { 2732 *l4_type = PP2_OUTQ_L4_TYPE_TCP; 2733 *gen_l4_cksum = 1; 2734 } else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) { 2735 *l4_type = PP2_OUTQ_L4_TYPE_UDP; 2736 *gen_l4_cksum = 1; 2737 } else { 2738 *l4_type = PP2_OUTQ_L4_TYPE_OTHER; 2739 /* no checksum for other type */ 2740 *gen_l4_cksum = 0; 2741 } 2742 } 2743 2744 /** 2745 * Release already sent buffers to bpool (buffer-pool). 2746 * 2747 * @param ppio 2748 * Pointer to the port structure. 2749 * @param hif 2750 * Pointer to the MUSDK hardware interface. 2751 * @param sq 2752 * Pointer to the shadow queue. 2753 * @param qid 2754 * Queue id number. 2755 * @param force 2756 * Force releasing packets. 2757 */ 2758 static inline void 2759 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif, 2760 unsigned int core_id, struct mrvl_shadow_txq *sq, 2761 int qid, int force) 2762 { 2763 struct buff_release_entry *entry; 2764 uint16_t nb_done = 0, num = 0, skip_bufs = 0; 2765 int i; 2766 2767 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done); 2768 2769 sq->num_to_release += nb_done; 2770 2771 if (likely(!force && 2772 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE)) 2773 return; 2774 2775 nb_done = sq->num_to_release; 2776 sq->num_to_release = 0; 2777 2778 for (i = 0; i < nb_done; i++) { 2779 entry = &sq->ent[sq->tail + num]; 2780 if (unlikely(!entry->buff.addr)) { 2781 MRVL_LOG(ERR, 2782 "Shadow memory @%d: cookie(%lx), pa(%lx)!", 2783 sq->tail, (u64)entry->buff.cookie, 2784 (u64)entry->buff.addr); 2785 skip_bufs = 1; 2786 goto skip; 2787 } 2788 2789 if (unlikely(!entry->bpool)) { 2790 struct rte_mbuf *mbuf; 2791 2792 mbuf = (struct rte_mbuf *)entry->buff.cookie; 2793 rte_pktmbuf_free(mbuf); 2794 skip_bufs = 1; 2795 goto skip; 2796 } 2797 2798 mrvl_port_bpool_size 2799 [entry->bpool->pp2_id][entry->bpool->id][core_id]++; 2800 num++; 2801 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE)) 2802 goto skip; 2803 continue; 2804 skip: 2805 if (likely(num)) 2806 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); 2807 num += skip_bufs; 2808 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; 2809 sq->size -= num; 2810 num = 0; 2811 skip_bufs = 0; 2812 } 2813 2814 if (likely(num)) { 2815 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); 2816 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; 2817 sq->size -= num; 2818 } 2819 } 2820 2821 /** 2822 * DPDK callback for transmit. 2823 * 2824 * @param txq 2825 * Generic pointer transmit queue. 2826 * @param tx_pkts 2827 * Packets to transmit. 2828 * @param nb_pkts 2829 * Number of packets in array. 2830 * 2831 * @return 2832 * Number of packets successfully transmitted. 2833 */ 2834 static uint16_t 2835 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 2836 { 2837 struct mrvl_txq *q = txq; 2838 struct mrvl_shadow_txq *sq; 2839 struct pp2_hif *hif; 2840 struct pp2_ppio_desc descs[nb_pkts]; 2841 unsigned int core_id = rte_lcore_id(); 2842 int i, bytes_sent = 0; 2843 uint16_t num, sq_free_size; 2844 uint64_t addr; 2845 2846 hif = mrvl_get_hif(q->priv, core_id); 2847 sq = &q->shadow_txqs[core_id]; 2848 2849 if (unlikely(!q->priv->ppio || !hif)) 2850 return 0; 2851 2852 if (sq->size) 2853 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, 2854 sq, q->queue_id, 0); 2855 2856 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; 2857 if (unlikely(nb_pkts > sq_free_size)) 2858 nb_pkts = sq_free_size; 2859 2860 for (i = 0; i < nb_pkts; i++) { 2861 struct rte_mbuf *mbuf = tx_pkts[i]; 2862 int gen_l3_cksum, gen_l4_cksum; 2863 enum pp2_outq_l3_type l3_type; 2864 enum pp2_outq_l4_type l4_type; 2865 2866 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { 2867 struct rte_mbuf *pref_pkt_hdr; 2868 2869 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; 2870 rte_mbuf_prefetch_part1(pref_pkt_hdr); 2871 rte_mbuf_prefetch_part2(pref_pkt_hdr); 2872 } 2873 2874 mrvl_fill_shadowq(sq, mbuf); 2875 mrvl_fill_desc(&descs[i], mbuf); 2876 2877 bytes_sent += rte_pktmbuf_pkt_len(mbuf); 2878 /* 2879 * in case unsupported ol_flags were passed 2880 * do not update descriptor offload information 2881 */ 2882 if (!(mbuf->ol_flags & MRVL_TX_PKT_OFFLOADS)) 2883 continue; 2884 mrvl_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type, 2885 &gen_l3_cksum, &gen_l4_cksum); 2886 2887 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, 2888 mbuf->l2_len, 2889 mbuf->l2_len + mbuf->l3_len, 2890 gen_l3_cksum, gen_l4_cksum); 2891 } 2892 2893 num = nb_pkts; 2894 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts); 2895 /* number of packets that were not sent */ 2896 if (unlikely(num > nb_pkts)) { 2897 for (i = nb_pkts; i < num; i++) { 2898 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & 2899 MRVL_PP2_TX_SHADOWQ_MASK; 2900 addr = sq->ent[sq->head].buff.cookie; 2901 bytes_sent -= 2902 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr); 2903 } 2904 sq->size -= num - nb_pkts; 2905 } 2906 2907 q->bytes_sent += bytes_sent; 2908 2909 return nb_pkts; 2910 } 2911 2912 /** DPDK callback for S/G transmit. 2913 * 2914 * @param txq 2915 * Generic pointer transmit queue. 2916 * @param tx_pkts 2917 * Packets to transmit. 2918 * @param nb_pkts 2919 * Number of packets in array. 2920 * 2921 * @return 2922 * Number of packets successfully transmitted. 2923 */ 2924 static uint16_t 2925 mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, 2926 uint16_t nb_pkts) 2927 { 2928 struct mrvl_txq *q = txq; 2929 struct mrvl_shadow_txq *sq; 2930 struct pp2_hif *hif; 2931 struct pp2_ppio_desc descs[nb_pkts * PP2_PPIO_DESC_NUM_FRAGS]; 2932 struct pp2_ppio_sg_pkts pkts; 2933 uint8_t frags[nb_pkts]; 2934 unsigned int core_id = rte_lcore_id(); 2935 int i, j, bytes_sent = 0; 2936 int tail, tail_first; 2937 uint16_t num, sq_free_size; 2938 uint16_t nb_segs, total_descs = 0; 2939 uint64_t addr; 2940 2941 hif = mrvl_get_hif(q->priv, core_id); 2942 sq = &q->shadow_txqs[core_id]; 2943 pkts.frags = frags; 2944 pkts.num = 0; 2945 2946 if (unlikely(!q->priv->ppio || !hif)) 2947 return 0; 2948 2949 if (sq->size) 2950 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, 2951 sq, q->queue_id, 0); 2952 2953 /* Save shadow queue free size */ 2954 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; 2955 2956 tail = 0; 2957 for (i = 0; i < nb_pkts; i++) { 2958 struct rte_mbuf *mbuf = tx_pkts[i]; 2959 struct rte_mbuf *seg = NULL; 2960 int gen_l3_cksum, gen_l4_cksum; 2961 enum pp2_outq_l3_type l3_type; 2962 enum pp2_outq_l4_type l4_type; 2963 2964 nb_segs = mbuf->nb_segs; 2965 tail_first = tail; 2966 total_descs += nb_segs; 2967 2968 /* 2969 * Check if total_descs does not exceed 2970 * shadow queue free size 2971 */ 2972 if (unlikely(total_descs > sq_free_size)) { 2973 total_descs -= nb_segs; 2974 break; 2975 } 2976 2977 /* Check if nb_segs does not exceed the max nb of desc per 2978 * fragmented packet 2979 */ 2980 if (nb_segs > PP2_PPIO_DESC_NUM_FRAGS) { 2981 total_descs -= nb_segs; 2982 RTE_LOG(ERR, PMD, 2983 "Too many segments. Packet won't be sent.\n"); 2984 break; 2985 } 2986 2987 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { 2988 struct rte_mbuf *pref_pkt_hdr; 2989 2990 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; 2991 rte_mbuf_prefetch_part1(pref_pkt_hdr); 2992 rte_mbuf_prefetch_part2(pref_pkt_hdr); 2993 } 2994 2995 pkts.frags[pkts.num] = nb_segs; 2996 pkts.num++; 2997 2998 seg = mbuf; 2999 for (j = 0; j < nb_segs - 1; j++) { 3000 /* For the subsequent segments, set shadow queue 3001 * buffer to NULL 3002 */ 3003 mrvl_fill_shadowq(sq, NULL); 3004 mrvl_fill_desc(&descs[tail], seg); 3005 3006 tail++; 3007 seg = seg->next; 3008 } 3009 /* Put first mbuf info in last shadow queue entry */ 3010 mrvl_fill_shadowq(sq, mbuf); 3011 /* Update descriptor with last segment */ 3012 mrvl_fill_desc(&descs[tail++], seg); 3013 3014 bytes_sent += rte_pktmbuf_pkt_len(mbuf); 3015 /* In case unsupported ol_flags were passed 3016 * do not update descriptor offload information 3017 */ 3018 if (!(mbuf->ol_flags & MRVL_TX_PKT_OFFLOADS)) 3019 continue; 3020 mrvl_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type, 3021 &gen_l3_cksum, &gen_l4_cksum); 3022 3023 pp2_ppio_outq_desc_set_proto_info(&descs[tail_first], l3_type, 3024 l4_type, mbuf->l2_len, 3025 mbuf->l2_len + mbuf->l3_len, 3026 gen_l3_cksum, gen_l4_cksum); 3027 } 3028 3029 num = total_descs; 3030 pp2_ppio_send_sg(q->priv->ppio, hif, q->queue_id, descs, 3031 &total_descs, &pkts); 3032 /* number of packets that were not sent */ 3033 if (unlikely(num > total_descs)) { 3034 for (i = total_descs; i < num; i++) { 3035 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & 3036 MRVL_PP2_TX_SHADOWQ_MASK; 3037 3038 addr = sq->ent[sq->head].buff.cookie; 3039 if (addr) 3040 bytes_sent -= 3041 rte_pktmbuf_pkt_len((struct rte_mbuf *) 3042 (cookie_addr_high | addr)); 3043 } 3044 sq->size -= num - total_descs; 3045 nb_pkts = pkts.num; 3046 } 3047 3048 q->bytes_sent += bytes_sent; 3049 3050 return nb_pkts; 3051 } 3052 3053 /** 3054 * Create private device structure. 3055 * 3056 * @param dev_name 3057 * Pointer to the port name passed in the initialization parameters. 3058 * 3059 * @return 3060 * Pointer to the newly allocated private device structure. 3061 */ 3062 static struct mrvl_priv * 3063 mrvl_priv_create(const char *dev_name) 3064 { 3065 struct pp2_bpool_params bpool_params; 3066 char match[MRVL_MATCH_LEN]; 3067 struct mrvl_priv *priv; 3068 uint16_t max_frame_size; 3069 int ret, bpool_bit; 3070 3071 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id()); 3072 if (!priv) 3073 return NULL; 3074 3075 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name, 3076 &priv->pp_id, &priv->ppio_id); 3077 if (ret) 3078 goto out_free_priv; 3079 3080 ret = pp2_ppio_get_l4_cksum_max_frame_size(priv->pp_id, priv->ppio_id, 3081 &max_frame_size); 3082 if (ret) 3083 goto out_free_priv; 3084 3085 priv->max_mtu = max_frame_size + RTE_ETHER_CRC_LEN - 3086 MRVL_PP2_ETH_HDRS_LEN; 3087 3088 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id], 3089 PP2_BPOOL_NUM_POOLS); 3090 if (bpool_bit < 0) 3091 goto out_free_priv; 3092 priv->bpool_bit = bpool_bit; 3093 3094 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id, 3095 priv->bpool_bit); 3096 memset(&bpool_params, 0, sizeof(bpool_params)); 3097 bpool_params.match = match; 3098 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS; 3099 ret = pp2_bpool_init(&bpool_params, &priv->bpool); 3100 if (ret) 3101 goto out_clear_bpool_bit; 3102 3103 priv->ppio_params.type = PP2_PPIO_T_NIC; 3104 rte_spinlock_init(&priv->lock); 3105 3106 return priv; 3107 out_clear_bpool_bit: 3108 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); 3109 out_free_priv: 3110 rte_free(priv); 3111 return NULL; 3112 } 3113 3114 /** 3115 * Create device representing Ethernet port. 3116 * 3117 * @param name 3118 * Pointer to the port's name. 3119 * 3120 * @return 3121 * 0 on success, negative error value otherwise. 3122 */ 3123 static int 3124 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name) 3125 { 3126 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0); 3127 struct rte_eth_dev *eth_dev; 3128 struct mrvl_priv *priv; 3129 struct ifreq req; 3130 3131 eth_dev = rte_eth_dev_allocate(name); 3132 if (!eth_dev) 3133 return -ENOMEM; 3134 3135 priv = mrvl_priv_create(name); 3136 if (!priv) { 3137 ret = -ENOMEM; 3138 goto out_free; 3139 } 3140 eth_dev->data->dev_private = priv; 3141 3142 eth_dev->data->mac_addrs = 3143 rte_zmalloc("mac_addrs", 3144 RTE_ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0); 3145 if (!eth_dev->data->mac_addrs) { 3146 MRVL_LOG(ERR, "Failed to allocate space for eth addrs"); 3147 ret = -ENOMEM; 3148 goto out_free; 3149 } 3150 3151 memset(&req, 0, sizeof(req)); 3152 strcpy(req.ifr_name, name); 3153 ret = ioctl(fd, SIOCGIFHWADDR, &req); 3154 if (ret) 3155 goto out_free; 3156 3157 memcpy(eth_dev->data->mac_addrs[0].addr_bytes, 3158 req.ifr_addr.sa_data, RTE_ETHER_ADDR_LEN); 3159 3160 eth_dev->device = &vdev->device; 3161 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst; 3162 mrvl_set_tx_function(eth_dev); 3163 eth_dev->dev_ops = &mrvl_ops; 3164 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 3165 3166 eth_dev->data->dev_link.link_status = ETH_LINK_UP; 3167 3168 rte_eth_dev_probing_finish(eth_dev); 3169 return 0; 3170 out_free: 3171 rte_eth_dev_release_port(eth_dev); 3172 3173 return ret; 3174 } 3175 3176 /** 3177 * Callback used by rte_kvargs_process() during argument parsing. 3178 * 3179 * @param key 3180 * Pointer to the parsed key (unused). 3181 * @param value 3182 * Pointer to the parsed value. 3183 * @param extra_args 3184 * Pointer to the extra arguments which contains address of the 3185 * table of pointers to parsed interface names. 3186 * 3187 * @return 3188 * Always 0. 3189 */ 3190 static int 3191 mrvl_get_ifnames(const char *key __rte_unused, const char *value, 3192 void *extra_args) 3193 { 3194 struct mrvl_ifnames *ifnames = extra_args; 3195 3196 ifnames->names[ifnames->idx++] = value; 3197 3198 return 0; 3199 } 3200 3201 /** 3202 * DPDK callback to register the virtual device. 3203 * 3204 * @param vdev 3205 * Pointer to the virtual device. 3206 * 3207 * @return 3208 * 0 on success, negative error value otherwise. 3209 */ 3210 static int 3211 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev) 3212 { 3213 struct rte_kvargs *kvlist; 3214 struct mrvl_ifnames ifnames; 3215 int ret = -EINVAL; 3216 uint32_t i, ifnum, cfgnum; 3217 const char *params; 3218 3219 params = rte_vdev_device_args(vdev); 3220 if (!params) 3221 return -EINVAL; 3222 3223 kvlist = rte_kvargs_parse(params, valid_args); 3224 if (!kvlist) 3225 return -EINVAL; 3226 3227 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG); 3228 if (ifnum > RTE_DIM(ifnames.names)) 3229 goto out_free_kvlist; 3230 3231 ifnames.idx = 0; 3232 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG, 3233 mrvl_get_ifnames, &ifnames); 3234 3235 3236 /* 3237 * The below system initialization should be done only once, 3238 * on the first provided configuration file 3239 */ 3240 if (!mrvl_cfg) { 3241 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG); 3242 MRVL_LOG(INFO, "Parsing config file!"); 3243 if (cfgnum > 1) { 3244 MRVL_LOG(ERR, "Cannot handle more than one config file!"); 3245 goto out_free_kvlist; 3246 } else if (cfgnum == 1) { 3247 rte_kvargs_process(kvlist, MRVL_CFG_ARG, 3248 mrvl_get_cfg, &mrvl_cfg); 3249 } 3250 } 3251 3252 if (mrvl_dev_num) 3253 goto init_devices; 3254 3255 MRVL_LOG(INFO, "Perform MUSDK initializations"); 3256 3257 ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist); 3258 if (ret) 3259 goto out_free_kvlist; 3260 3261 ret = mrvl_init_pp2(); 3262 if (ret) { 3263 MRVL_LOG(ERR, "Failed to init PP!"); 3264 rte_mvep_deinit(MVEP_MOD_T_PP2); 3265 goto out_free_kvlist; 3266 } 3267 3268 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size)); 3269 memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup)); 3270 3271 mrvl_lcore_first = RTE_MAX_LCORE; 3272 mrvl_lcore_last = 0; 3273 3274 init_devices: 3275 for (i = 0; i < ifnum; i++) { 3276 MRVL_LOG(INFO, "Creating %s", ifnames.names[i]); 3277 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]); 3278 if (ret) 3279 goto out_cleanup; 3280 mrvl_dev_num++; 3281 } 3282 3283 rte_kvargs_free(kvlist); 3284 3285 return 0; 3286 out_cleanup: 3287 rte_pmd_mrvl_remove(vdev); 3288 3289 out_free_kvlist: 3290 rte_kvargs_free(kvlist); 3291 3292 return ret; 3293 } 3294 3295 /** 3296 * DPDK callback to remove virtual device. 3297 * 3298 * @param vdev 3299 * Pointer to the removed virtual device. 3300 * 3301 * @return 3302 * 0 on success, negative error value otherwise. 3303 */ 3304 static int 3305 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev) 3306 { 3307 uint16_t port_id; 3308 int ret = 0; 3309 3310 RTE_ETH_FOREACH_DEV(port_id) { 3311 if (rte_eth_devices[port_id].device != &vdev->device) 3312 continue; 3313 ret |= rte_eth_dev_close(port_id); 3314 } 3315 3316 return ret == 0 ? 0 : -EIO; 3317 } 3318 3319 static struct rte_vdev_driver pmd_mrvl_drv = { 3320 .probe = rte_pmd_mrvl_probe, 3321 .remove = rte_pmd_mrvl_remove, 3322 }; 3323 3324 RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv); 3325 RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2); 3326 RTE_LOG_REGISTER_DEFAULT(mrvl_logtype, NOTICE); 3327