xref: /dpdk/drivers/net/mvpp2/mrvl_ethdev.c (revision 8df71650e9fdc6346f09b7a57e86cded7b553152)
1fe939687SNatalie Samsonov /* SPDX-License-Identifier: BSD-3-Clause
25147cc75SYuri Chipchev  * Copyright(c) 2017-2021 Marvell International Ltd.
35147cc75SYuri Chipchev  * Copyright(c) 2017-2021 Semihalf.
4fe939687SNatalie Samsonov  * All rights reserved.
5fe939687SNatalie Samsonov  */
6fe939687SNatalie Samsonov 
76723c0fcSBruce Richardson #include <rte_string_fns.h>
8df96fd0dSBruce Richardson #include <ethdev_driver.h>
9fe939687SNatalie Samsonov #include <rte_kvargs.h>
10fe939687SNatalie Samsonov #include <rte_log.h>
11fe939687SNatalie Samsonov #include <rte_malloc.h>
124851ef2bSDavid Marchand #include <bus_vdev_driver.h>
13fe939687SNatalie Samsonov 
14fe939687SNatalie Samsonov #include <fcntl.h>
15fe939687SNatalie Samsonov #include <linux/ethtool.h>
16fe939687SNatalie Samsonov #include <linux/sockios.h>
17fe939687SNatalie Samsonov #include <net/if.h>
18fe939687SNatalie Samsonov #include <net/if_arp.h>
19fe939687SNatalie Samsonov #include <sys/ioctl.h>
20fe939687SNatalie Samsonov #include <sys/socket.h>
21fe939687SNatalie Samsonov #include <sys/stat.h>
22fe939687SNatalie Samsonov #include <sys/types.h>
23fe939687SNatalie Samsonov 
244b4ab496SLiron Himi #include <rte_mvep_common.h>
25fe939687SNatalie Samsonov #include "mrvl_ethdev.h"
26fe939687SNatalie Samsonov #include "mrvl_qos.h"
27a1f83becSTomasz Duszynski #include "mrvl_flow.h"
28cdb53f8dSTomasz Duszynski #include "mrvl_mtr.h"
29429c3944STomasz Duszynski #include "mrvl_tm.h"
30fe939687SNatalie Samsonov 
31fe939687SNatalie Samsonov /* bitmask with reserved hifs */
32fe939687SNatalie Samsonov #define MRVL_MUSDK_HIFS_RESERVED 0x0F
33fe939687SNatalie Samsonov /* bitmask with reserved bpools */
34fe939687SNatalie Samsonov #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
35fe939687SNatalie Samsonov /* bitmask with reserved kernel RSS tables */
3625ddbf19SYuri Chipchev #define MRVL_MUSDK_RSS_RESERVED 0x0F
37fe939687SNatalie Samsonov /* maximum number of available hifs */
38fe939687SNatalie Samsonov #define MRVL_MUSDK_HIFS_MAX 9
39fe939687SNatalie Samsonov 
40fe939687SNatalie Samsonov /* prefetch shift */
41fe939687SNatalie Samsonov #define MRVL_MUSDK_PREFETCH_SHIFT 2
42fe939687SNatalie Samsonov 
437da1aed6SLiron Himi /* TCAM has 25 entries reserved for uc/mc filter entries
447da1aed6SLiron Himi  * + 1 for primary mac address
457da1aed6SLiron Himi  */
467da1aed6SLiron Himi #define MRVL_MAC_ADDRS_MAX (1 + 25)
47fe939687SNatalie Samsonov #define MRVL_MATCH_LEN 16
48fe939687SNatalie Samsonov #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
49fe939687SNatalie Samsonov /* Maximum allowable packet size */
50fe939687SNatalie Samsonov #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
51fe939687SNatalie Samsonov 
52fe939687SNatalie Samsonov #define MRVL_IFACE_NAME_ARG "iface"
53fe939687SNatalie Samsonov #define MRVL_CFG_ARG "cfg"
54fe939687SNatalie Samsonov 
55fe939687SNatalie Samsonov #define MRVL_ARP_LENGTH 28
56fe939687SNatalie Samsonov 
57fe939687SNatalie Samsonov #define MRVL_COOKIE_ADDR_INVALID ~0ULL
58e04ec42aSTomasz Duszynski #define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
59fe939687SNatalie Samsonov 
60fe939687SNatalie Samsonov /** Port Rx offload capabilities */
61295968d1SFerruh Yigit #define MRVL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
62295968d1SFerruh Yigit 			  RTE_ETH_RX_OFFLOAD_CHECKSUM)
63fe939687SNatalie Samsonov 
64fe939687SNatalie Samsonov /** Port Tx offloads capabilities */
65295968d1SFerruh Yigit #define MRVL_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
66295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_UDP_CKSUM  | \
67295968d1SFerruh Yigit 				  RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
6845ea4c59SLiron Himi #define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \
69295968d1SFerruh Yigit 			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
70fe939687SNatalie Samsonov 
71daa02b5cSOlivier Matz #define MRVL_TX_PKT_OFFLOADS (RTE_MBUF_F_TX_IP_CKSUM | \
72daa02b5cSOlivier Matz 			      RTE_MBUF_F_TX_TCP_CKSUM | \
73daa02b5cSOlivier Matz 			      RTE_MBUF_F_TX_UDP_CKSUM)
7445ea4c59SLiron Himi 
75fe939687SNatalie Samsonov static const char * const valid_args[] = {
76fe939687SNatalie Samsonov 	MRVL_IFACE_NAME_ARG,
77fe939687SNatalie Samsonov 	MRVL_CFG_ARG,
78fe939687SNatalie Samsonov 	NULL
79fe939687SNatalie Samsonov };
80fe939687SNatalie Samsonov 
81fe939687SNatalie Samsonov static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
82fe939687SNatalie Samsonov static struct pp2_hif *hifs[RTE_MAX_LCORE];
83fe939687SNatalie Samsonov static int used_bpools[PP2_NUM_PKT_PROC] = {
84fe37bf0fSTomasz Duszynski 	[0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED
85fe939687SNatalie Samsonov };
86fe939687SNatalie Samsonov 
8761940700SNatalie Samsonov static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
8861940700SNatalie Samsonov static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
8961940700SNatalie Samsonov static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
9030d30720SLiron Himi static int dummy_pool_id[PP2_NUM_PKT_PROC];
9130d30720SLiron Himi struct pp2_bpool *dummy_pool[PP2_NUM_PKT_PROC] = {0};
92fe939687SNatalie Samsonov 
93fe939687SNatalie Samsonov struct mrvl_ifnames {
94fe939687SNatalie Samsonov 	const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
95fe939687SNatalie Samsonov 	int idx;
96fe939687SNatalie Samsonov };
97fe939687SNatalie Samsonov 
98fe939687SNatalie Samsonov /*
99fe939687SNatalie Samsonov  * To use buffer harvesting based on loopback port shadow queue structure
100fe939687SNatalie Samsonov  * was introduced for buffers information bookkeeping.
101fe939687SNatalie Samsonov  *
102fe939687SNatalie Samsonov  * Before sending the packet, related buffer information (pp2_buff_inf) is
103fe939687SNatalie Samsonov  * stored in shadow queue. After packet is transmitted no longer used
104fe939687SNatalie Samsonov  * packet buffer is released back to it's original hardware pool,
105fe939687SNatalie Samsonov  * on condition it originated from interface.
106fe939687SNatalie Samsonov  * In case it  was generated by application itself i.e: mbuf->port field is
107fe939687SNatalie Samsonov  * 0xff then its released to software mempool.
108fe939687SNatalie Samsonov  */
109fe939687SNatalie Samsonov struct mrvl_shadow_txq {
110fe939687SNatalie Samsonov 	int head;           /* write index - used when sending buffers */
111fe939687SNatalie Samsonov 	int tail;           /* read index - used when releasing buffers */
112fe939687SNatalie Samsonov 	u16 size;           /* queue occupied size */
1139e79d810SZyta Szpak 	u16 num_to_release; /* number of descriptors sent, that can be
1149e79d810SZyta Szpak 			     * released
1159e79d810SZyta Szpak 			     */
116fe939687SNatalie Samsonov 	struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
117fe939687SNatalie Samsonov };
118fe939687SNatalie Samsonov 
119fe939687SNatalie Samsonov struct mrvl_rxq {
120fe939687SNatalie Samsonov 	struct mrvl_priv *priv;
121fe939687SNatalie Samsonov 	struct rte_mempool *mp;
122fe939687SNatalie Samsonov 	int queue_id;
123fe939687SNatalie Samsonov 	int port_id;
124fe939687SNatalie Samsonov 	int cksum_enabled;
125fe939687SNatalie Samsonov 	uint64_t bytes_recv;
126fe939687SNatalie Samsonov 	uint64_t drop_mac;
127fe939687SNatalie Samsonov };
128fe939687SNatalie Samsonov 
129fe939687SNatalie Samsonov struct mrvl_txq {
130fe939687SNatalie Samsonov 	struct mrvl_priv *priv;
131fe939687SNatalie Samsonov 	int queue_id;
132fe939687SNatalie Samsonov 	int port_id;
133fe939687SNatalie Samsonov 	uint64_t bytes_sent;
134fe939687SNatalie Samsonov 	struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
135fe939687SNatalie Samsonov 	int tx_deferred_start;
136fe939687SNatalie Samsonov };
137fe939687SNatalie Samsonov 
138fe939687SNatalie Samsonov static int mrvl_lcore_first;
139fe939687SNatalie Samsonov static int mrvl_lcore_last;
140fe939687SNatalie Samsonov static int mrvl_dev_num;
141fe939687SNatalie Samsonov 
142fe939687SNatalie Samsonov static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
143fe939687SNatalie Samsonov static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
144fe939687SNatalie Samsonov 			struct pp2_hif *hif, unsigned int core_id,
145fe939687SNatalie Samsonov 			struct mrvl_shadow_txq *sq, int qid, int force);
146fe939687SNatalie Samsonov 
1479e79d810SZyta Szpak static uint16_t mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
1489e79d810SZyta Szpak 				  uint16_t nb_pkts);
1499e79d810SZyta Szpak static uint16_t mrvl_tx_sg_pkt_burst(void *txq,	struct rte_mbuf **tx_pkts,
1509e79d810SZyta Szpak 				     uint16_t nb_pkts);
151696202caSLiron Himi static int rte_pmd_mrvl_remove(struct rte_vdev_device *vdev);
152696202caSLiron Himi static void mrvl_deinit_pp2(void);
153696202caSLiron Himi static void mrvl_deinit_hifs(void);
1549e79d810SZyta Szpak 
1555147cc75SYuri Chipchev static int
1565147cc75SYuri Chipchev mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1575147cc75SYuri Chipchev 		  uint32_t index, uint32_t vmdq __rte_unused);
1585147cc75SYuri Chipchev static int
1595147cc75SYuri Chipchev mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr);
1605147cc75SYuri Chipchev static int
1615147cc75SYuri Chipchev mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
1625147cc75SYuri Chipchev static int mrvl_promiscuous_enable(struct rte_eth_dev *dev);
1635147cc75SYuri Chipchev static int mrvl_allmulticast_enable(struct rte_eth_dev *dev);
1647f2ae5ddSLiron Himi static int
1657f2ae5ddSLiron Himi mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
1669e79d810SZyta Szpak 
167fe939687SNatalie Samsonov #define MRVL_XSTATS_TBL_ENTRY(name) { \
168fe939687SNatalie Samsonov 	#name, offsetof(struct pp2_ppio_statistics, name),	\
169fe939687SNatalie Samsonov 	sizeof(((struct pp2_ppio_statistics *)0)->name)		\
170fe939687SNatalie Samsonov }
171fe939687SNatalie Samsonov 
172fe939687SNatalie Samsonov /* Table with xstats data */
173fe939687SNatalie Samsonov static struct {
174fe939687SNatalie Samsonov 	const char *name;
175fe939687SNatalie Samsonov 	unsigned int offset;
176fe939687SNatalie Samsonov 	unsigned int size;
177fe939687SNatalie Samsonov } mrvl_xstats_tbl[] = {
178fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(rx_bytes),
179fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(rx_packets),
180fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets),
181fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(rx_errors),
182fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped),
183fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped),
184fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(rx_early_dropped),
185fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped),
186fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped),
187fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(tx_bytes),
188fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(tx_packets),
189fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets),
190fe939687SNatalie Samsonov 	MRVL_XSTATS_TBL_ENTRY(tx_errors)
191fe939687SNatalie Samsonov };
192fe939687SNatalie Samsonov 
19330d30720SLiron Himi static inline int
19430d30720SLiron Himi mrvl_reserve_bit(int *bitmap, int max)
19530d30720SLiron Himi {
1963d4e27fdSDavid Marchand 	int n = sizeof(*bitmap) * 8 - rte_clz32(*bitmap);
19730d30720SLiron Himi 
19830d30720SLiron Himi 	if (n >= max)
19930d30720SLiron Himi 		return -1;
20030d30720SLiron Himi 
20130d30720SLiron Himi 	*bitmap |= 1 << n;
20230d30720SLiron Himi 
20330d30720SLiron Himi 	return n;
20430d30720SLiron Himi }
20530d30720SLiron Himi 
20630d30720SLiron Himi static int
20730d30720SLiron Himi mrvl_pp2_fixup_init(void)
20830d30720SLiron Himi {
20930d30720SLiron Himi 	struct pp2_bpool_params bpool_params;
21030d30720SLiron Himi 	char			name[15];
21130d30720SLiron Himi 	int			err, i;
21230d30720SLiron Himi 
21330d30720SLiron Himi 	memset(dummy_pool, 0, sizeof(dummy_pool));
21430d30720SLiron Himi 	for (i = 0; i < pp2_get_num_inst(); i++) {
21530d30720SLiron Himi 		dummy_pool_id[i] = mrvl_reserve_bit(&used_bpools[i],
21630d30720SLiron Himi 					     PP2_BPOOL_NUM_POOLS);
21730d30720SLiron Himi 		if (dummy_pool_id[i] < 0) {
218*8df71650SJerin Jacob 			MRVL_LOG(ERR, "Can't find free pool");
21930d30720SLiron Himi 			return -1;
22030d30720SLiron Himi 		}
22130d30720SLiron Himi 
22230d30720SLiron Himi 		memset(name, 0, sizeof(name));
22330d30720SLiron Himi 		snprintf(name, sizeof(name), "pool-%d:%d", i, dummy_pool_id[i]);
22430d30720SLiron Himi 		memset(&bpool_params, 0, sizeof(bpool_params));
22530d30720SLiron Himi 		bpool_params.match = name;
22630d30720SLiron Himi 		bpool_params.buff_len = MRVL_PKT_OFFS;
22730d30720SLiron Himi 		bpool_params.dummy_short_pool = 1;
22830d30720SLiron Himi 		err = pp2_bpool_init(&bpool_params, &dummy_pool[i]);
22930d30720SLiron Himi 		if (err != 0 || !dummy_pool[i]) {
230*8df71650SJerin Jacob 			MRVL_LOG(ERR, "BPool init failed!");
23130d30720SLiron Himi 			used_bpools[i] &= ~(1 << dummy_pool_id[i]);
23230d30720SLiron Himi 			return -1;
23330d30720SLiron Himi 		}
23430d30720SLiron Himi 	}
23530d30720SLiron Himi 
23630d30720SLiron Himi 	return 0;
23730d30720SLiron Himi }
23830d30720SLiron Himi 
239f85c08f7SLiron Himi /**
240f85c08f7SLiron Himi  * Initialize packet processor.
241f85c08f7SLiron Himi  *
242f85c08f7SLiron Himi  * @return
243f85c08f7SLiron Himi  *   0 on success, negative error value otherwise.
244f85c08f7SLiron Himi  */
245f85c08f7SLiron Himi static int
246f85c08f7SLiron Himi mrvl_init_pp2(void)
247f85c08f7SLiron Himi {
248f85c08f7SLiron Himi 	struct pp2_init_params	init_params;
24930d30720SLiron Himi 	int			err;
250f85c08f7SLiron Himi 
251f85c08f7SLiron Himi 	memset(&init_params, 0, sizeof(init_params));
252f85c08f7SLiron Himi 	init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
253f85c08f7SLiron Himi 	init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
254f85c08f7SLiron Himi 	init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
255f85c08f7SLiron Himi 	if (mrvl_cfg && mrvl_cfg->pp2_cfg.prs_udfs.num_udfs)
256f85c08f7SLiron Himi 		memcpy(&init_params.prs_udfs, &mrvl_cfg->pp2_cfg.prs_udfs,
257f85c08f7SLiron Himi 		       sizeof(struct pp2_parse_udfs));
25830d30720SLiron Himi 	err = pp2_init(&init_params);
25930d30720SLiron Himi 	if (err != 0) {
26030d30720SLiron Himi 		MRVL_LOG(ERR, "PP2 init failed");
26130d30720SLiron Himi 		return -1;
26230d30720SLiron Himi 	}
26330d30720SLiron Himi 
26430d30720SLiron Himi 	err = mrvl_pp2_fixup_init();
26530d30720SLiron Himi 	if (err != 0) {
26630d30720SLiron Himi 		MRVL_LOG(ERR, "PP2 fixup init failed");
26730d30720SLiron Himi 		return -1;
26830d30720SLiron Himi 	}
26930d30720SLiron Himi 
27030d30720SLiron Himi 	return 0;
27130d30720SLiron Himi }
27230d30720SLiron Himi 
27330d30720SLiron Himi static void
27430d30720SLiron Himi mrvl_pp2_fixup_deinit(void)
27530d30720SLiron Himi {
27630d30720SLiron Himi 	int i;
27730d30720SLiron Himi 
27830d30720SLiron Himi 	for (i = 0; i < PP2_NUM_PKT_PROC; i++) {
27930d30720SLiron Himi 		if (!dummy_pool[i])
28030d30720SLiron Himi 			continue;
28130d30720SLiron Himi 		pp2_bpool_deinit(dummy_pool[i]);
28230d30720SLiron Himi 		used_bpools[i] &= ~(1 << dummy_pool_id[i]);
28330d30720SLiron Himi 	}
284f85c08f7SLiron Himi }
285f85c08f7SLiron Himi 
286f85c08f7SLiron Himi /**
287f85c08f7SLiron Himi  * Deinitialize packet processor.
288f85c08f7SLiron Himi  *
289f85c08f7SLiron Himi  * @return
290f85c08f7SLiron Himi  *   0 on success, negative error value otherwise.
291f85c08f7SLiron Himi  */
292f85c08f7SLiron Himi static void
293f85c08f7SLiron Himi mrvl_deinit_pp2(void)
294f85c08f7SLiron Himi {
29530d30720SLiron Himi 	mrvl_pp2_fixup_deinit();
296f85c08f7SLiron Himi 	pp2_deinit();
297f85c08f7SLiron Himi }
298f85c08f7SLiron Himi 
2999e79d810SZyta Szpak static inline void
3009e79d810SZyta Szpak mrvl_fill_shadowq(struct mrvl_shadow_txq *sq, struct rte_mbuf *buf)
3019e79d810SZyta Szpak {
3029e79d810SZyta Szpak 	sq->ent[sq->head].buff.cookie = (uint64_t)buf;
3039e79d810SZyta Szpak 	sq->ent[sq->head].buff.addr = buf ?
3049e79d810SZyta Szpak 		rte_mbuf_data_iova_default(buf) : 0;
3059e79d810SZyta Szpak 
3069e79d810SZyta Szpak 	sq->ent[sq->head].bpool =
3079e79d810SZyta Szpak 		(unlikely(!buf || buf->port >= RTE_MAX_ETHPORTS ||
3089e79d810SZyta Szpak 		 buf->refcnt > 1)) ? NULL :
3099e79d810SZyta Szpak 		 mrvl_port_to_bpool_lookup[buf->port];
3109e79d810SZyta Szpak 
3119e79d810SZyta Szpak 	sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
3129e79d810SZyta Szpak 	sq->size++;
3139e79d810SZyta Szpak }
3149e79d810SZyta Szpak 
315f85c08f7SLiron Himi /**
316f85c08f7SLiron Himi  * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
317f85c08f7SLiron Himi  */
318f85c08f7SLiron Himi static void
319f85c08f7SLiron Himi mrvl_deinit_hifs(void)
320f85c08f7SLiron Himi {
321f85c08f7SLiron Himi 	int i;
322f85c08f7SLiron Himi 
323f85c08f7SLiron Himi 	for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
324f85c08f7SLiron Himi 		if (hifs[i])
325f85c08f7SLiron Himi 			pp2_hif_deinit(hifs[i]);
326f85c08f7SLiron Himi 	}
327f85c08f7SLiron Himi 	used_hifs = MRVL_MUSDK_HIFS_RESERVED;
328f85c08f7SLiron Himi 	memset(hifs, 0, sizeof(hifs));
329f85c08f7SLiron Himi }
330f85c08f7SLiron Himi 
3319e79d810SZyta Szpak static inline void
3329e79d810SZyta Szpak mrvl_fill_desc(struct pp2_ppio_desc *desc, struct rte_mbuf *buf)
3339e79d810SZyta Szpak {
3349e79d810SZyta Szpak 	pp2_ppio_outq_desc_reset(desc);
3359e79d810SZyta Szpak 	pp2_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf));
3369e79d810SZyta Szpak 	pp2_ppio_outq_desc_set_pkt_offset(desc, 0);
3379e79d810SZyta Szpak 	pp2_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf));
3389e79d810SZyta Szpak }
3399e79d810SZyta Szpak 
340fe939687SNatalie Samsonov static inline int
341fe939687SNatalie Samsonov mrvl_get_bpool_size(int pp2_id, int pool_id)
342fe939687SNatalie Samsonov {
343fe939687SNatalie Samsonov 	int i;
344fe939687SNatalie Samsonov 	int size = 0;
345fe939687SNatalie Samsonov 
346fe939687SNatalie Samsonov 	for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
347fe939687SNatalie Samsonov 		size += mrvl_port_bpool_size[pp2_id][pool_id][i];
348fe939687SNatalie Samsonov 
349fe939687SNatalie Samsonov 	return size;
350fe939687SNatalie Samsonov }
351fe939687SNatalie Samsonov 
352fe939687SNatalie Samsonov static int
353fe939687SNatalie Samsonov mrvl_init_hif(int core_id)
354fe939687SNatalie Samsonov {
355fe939687SNatalie Samsonov 	struct pp2_hif_params params;
356fe939687SNatalie Samsonov 	char match[MRVL_MATCH_LEN];
357fe939687SNatalie Samsonov 	int ret;
358fe939687SNatalie Samsonov 
359fe939687SNatalie Samsonov 	ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
360fe939687SNatalie Samsonov 	if (ret < 0) {
361acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
362fe939687SNatalie Samsonov 		return ret;
363fe939687SNatalie Samsonov 	}
364fe939687SNatalie Samsonov 
365fe939687SNatalie Samsonov 	snprintf(match, sizeof(match), "hif-%d", ret);
366fe939687SNatalie Samsonov 	memset(&params, 0, sizeof(params));
367fe939687SNatalie Samsonov 	params.match = match;
368fe939687SNatalie Samsonov 	params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
369fe939687SNatalie Samsonov 	ret = pp2_hif_init(&params, &hifs[core_id]);
370fe939687SNatalie Samsonov 	if (ret) {
371acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
372fe939687SNatalie Samsonov 		return ret;
373fe939687SNatalie Samsonov 	}
374fe939687SNatalie Samsonov 
375fe939687SNatalie Samsonov 	return 0;
376fe939687SNatalie Samsonov }
377fe939687SNatalie Samsonov 
378fe939687SNatalie Samsonov static inline struct pp2_hif*
379fe939687SNatalie Samsonov mrvl_get_hif(struct mrvl_priv *priv, int core_id)
380fe939687SNatalie Samsonov {
381fe939687SNatalie Samsonov 	int ret;
382fe939687SNatalie Samsonov 
383fe939687SNatalie Samsonov 	if (likely(hifs[core_id] != NULL))
384fe939687SNatalie Samsonov 		return hifs[core_id];
385fe939687SNatalie Samsonov 
386fe939687SNatalie Samsonov 	rte_spinlock_lock(&priv->lock);
387fe939687SNatalie Samsonov 
388fe939687SNatalie Samsonov 	ret = mrvl_init_hif(core_id);
389fe939687SNatalie Samsonov 	if (ret < 0) {
390acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
391fe939687SNatalie Samsonov 		goto out;
392fe939687SNatalie Samsonov 	}
393fe939687SNatalie Samsonov 
394fe939687SNatalie Samsonov 	if (core_id < mrvl_lcore_first)
395fe939687SNatalie Samsonov 		mrvl_lcore_first = core_id;
396fe939687SNatalie Samsonov 
397fe939687SNatalie Samsonov 	if (core_id > mrvl_lcore_last)
398fe939687SNatalie Samsonov 		mrvl_lcore_last = core_id;
399fe939687SNatalie Samsonov out:
400fe939687SNatalie Samsonov 	rte_spinlock_unlock(&priv->lock);
401fe939687SNatalie Samsonov 
402fe939687SNatalie Samsonov 	return hifs[core_id];
403fe939687SNatalie Samsonov }
404fe939687SNatalie Samsonov 
405fe939687SNatalie Samsonov /**
4069e79d810SZyta Szpak  * Set tx burst function according to offload flag
4079e79d810SZyta Szpak  *
4089e79d810SZyta Szpak  * @param dev
4099e79d810SZyta Szpak  *   Pointer to Ethernet device structure.
4109e79d810SZyta Szpak  */
4119e79d810SZyta Szpak static void
4129e79d810SZyta Szpak mrvl_set_tx_function(struct rte_eth_dev *dev)
4139e79d810SZyta Szpak {
4149e79d810SZyta Szpak 	struct mrvl_priv *priv = dev->data->dev_private;
4159e79d810SZyta Szpak 
4169e79d810SZyta Szpak 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
4179e79d810SZyta Szpak 	if (priv->multiseg) {
418a247fcd9SStephen Hemminger 		MRVL_LOG(INFO, "Using multi-segment tx callback");
4199e79d810SZyta Szpak 		dev->tx_pkt_burst = mrvl_tx_sg_pkt_burst;
4209e79d810SZyta Szpak 	} else {
421a247fcd9SStephen Hemminger 		MRVL_LOG(INFO, "Using single-segment tx callback");
4229e79d810SZyta Szpak 		dev->tx_pkt_burst = mrvl_tx_pkt_burst;
4239e79d810SZyta Szpak 	}
4249e79d810SZyta Szpak }
4259e79d810SZyta Szpak 
4269e79d810SZyta Szpak /**
427fe939687SNatalie Samsonov  * Configure rss based on dpdk rss configuration.
428fe939687SNatalie Samsonov  *
429fe939687SNatalie Samsonov  * @param priv
430fe939687SNatalie Samsonov  *   Pointer to private structure.
431fe939687SNatalie Samsonov  * @param rss_conf
432fe939687SNatalie Samsonov  *   Pointer to RSS configuration.
433fe939687SNatalie Samsonov  *
434fe939687SNatalie Samsonov  * @return
435fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
436fe939687SNatalie Samsonov  */
437fe939687SNatalie Samsonov static int
438fe939687SNatalie Samsonov mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
439fe939687SNatalie Samsonov {
440fe939687SNatalie Samsonov 	if (rss_conf->rss_key)
441acab7d58STomasz Duszynski 		MRVL_LOG(WARNING, "Changing hash key is not supported");
442fe939687SNatalie Samsonov 
443fe939687SNatalie Samsonov 	if (rss_conf->rss_hf == 0) {
444fe939687SNatalie Samsonov 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
445295968d1SFerruh Yigit 	} else if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
446fe939687SNatalie Samsonov 		priv->ppio_params.inqs_params.hash_type =
447fe939687SNatalie Samsonov 			PP2_PPIO_HASH_T_2_TUPLE;
448295968d1SFerruh Yigit 	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
449fe939687SNatalie Samsonov 		priv->ppio_params.inqs_params.hash_type =
450fe939687SNatalie Samsonov 			PP2_PPIO_HASH_T_5_TUPLE;
451fe939687SNatalie Samsonov 		priv->rss_hf_tcp = 1;
452295968d1SFerruh Yigit 	} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
453fe939687SNatalie Samsonov 		priv->ppio_params.inqs_params.hash_type =
454fe939687SNatalie Samsonov 			PP2_PPIO_HASH_T_5_TUPLE;
455fe939687SNatalie Samsonov 		priv->rss_hf_tcp = 0;
456fe939687SNatalie Samsonov 	} else {
457fe939687SNatalie Samsonov 		return -EINVAL;
458fe939687SNatalie Samsonov 	}
459fe939687SNatalie Samsonov 
460fe939687SNatalie Samsonov 	return 0;
461fe939687SNatalie Samsonov }
462fe939687SNatalie Samsonov 
463fe939687SNatalie Samsonov /**
464fe939687SNatalie Samsonov  * Ethernet device configuration.
465fe939687SNatalie Samsonov  *
466fe939687SNatalie Samsonov  * Prepare the driver for a given number of TX and RX queues and
467fe939687SNatalie Samsonov  * configure RSS.
468fe939687SNatalie Samsonov  *
469fe939687SNatalie Samsonov  * @param dev
470fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
471fe939687SNatalie Samsonov  *
472fe939687SNatalie Samsonov  * @return
473fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
474fe939687SNatalie Samsonov  */
475fe939687SNatalie Samsonov static int
476fe939687SNatalie Samsonov mrvl_dev_configure(struct rte_eth_dev *dev)
477fe939687SNatalie Samsonov {
478fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
479fe939687SNatalie Samsonov 	int ret;
480fe939687SNatalie Samsonov 
4815997b0a8SNatalie Samsonov 	if (priv->ppio) {
4825997b0a8SNatalie Samsonov 		MRVL_LOG(INFO, "Device reconfiguration is not supported");
4835997b0a8SNatalie Samsonov 		return -EINVAL;
4845997b0a8SNatalie Samsonov 	}
4855997b0a8SNatalie Samsonov 
486295968d1SFerruh Yigit 	if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE &&
487295968d1SFerruh Yigit 	    dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
488acab7d58STomasz Duszynski 		MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
489fe939687SNatalie Samsonov 			dev->data->dev_conf.rxmode.mq_mode);
490fe939687SNatalie Samsonov 		return -EINVAL;
491fe939687SNatalie Samsonov 	}
492fe939687SNatalie Samsonov 
4931bb4a528SFerruh Yigit 	if (dev->data->dev_conf.rxmode.mtu > priv->max_mtu) {
494*8df71650SJerin Jacob 		MRVL_LOG(ERR, "MTU %u is larger than max_mtu %u",
4951bb4a528SFerruh Yigit 			 dev->data->dev_conf.rxmode.mtu,
496949cdeddSLiron Himi 			 priv->max_mtu);
497949cdeddSLiron Himi 		return -EINVAL;
498949cdeddSLiron Himi 	}
499fe939687SNatalie Samsonov 
500295968d1SFerruh Yigit 	if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
5019e79d810SZyta Szpak 		priv->multiseg = 1;
5029e79d810SZyta Szpak 
503fe939687SNatalie Samsonov 	ret = mrvl_configure_rxqs(priv, dev->data->port_id,
504fe939687SNatalie Samsonov 				  dev->data->nb_rx_queues);
505fe939687SNatalie Samsonov 	if (ret < 0)
506fe939687SNatalie Samsonov 		return ret;
507fe939687SNatalie Samsonov 
508fe939687SNatalie Samsonov 	ret = mrvl_configure_txqs(priv, dev->data->port_id,
509fe939687SNatalie Samsonov 				  dev->data->nb_tx_queues);
510fe939687SNatalie Samsonov 	if (ret < 0)
511fe939687SNatalie Samsonov 		return ret;
512fe939687SNatalie Samsonov 
513fe939687SNatalie Samsonov 	priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
514fe939687SNatalie Samsonov 	priv->ppio_params.maintain_stats = 1;
515fe939687SNatalie Samsonov 	priv->nb_rx_queues = dev->data->nb_rx_queues;
516fe939687SNatalie Samsonov 
517429c3944STomasz Duszynski 	ret = mrvl_tm_init(dev);
518429c3944STomasz Duszynski 	if (ret < 0)
519429c3944STomasz Duszynski 		return ret;
520429c3944STomasz Duszynski 
521fe939687SNatalie Samsonov 	if (dev->data->nb_rx_queues == 1 &&
522295968d1SFerruh Yigit 	    dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
523acab7d58STomasz Duszynski 		MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
524fe939687SNatalie Samsonov 		priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
525e622c1a8SDana Vardi 		priv->configured = 1;
526fe939687SNatalie Samsonov 		return 0;
527fe939687SNatalie Samsonov 	}
528fe939687SNatalie Samsonov 
529e622c1a8SDana Vardi 	ret = mrvl_configure_rss(priv,
530fe939687SNatalie Samsonov 			&dev->data->dev_conf.rx_adv_conf.rss_conf);
531e622c1a8SDana Vardi 	if (ret < 0)
532e622c1a8SDana Vardi 		return ret;
533e622c1a8SDana Vardi 
534e622c1a8SDana Vardi 	priv->configured = 1;
535e622c1a8SDana Vardi 
536e622c1a8SDana Vardi 	return 0;
537fe939687SNatalie Samsonov }
538fe939687SNatalie Samsonov 
539fe939687SNatalie Samsonov /**
540fe939687SNatalie Samsonov  * DPDK callback to change the MTU.
541fe939687SNatalie Samsonov  *
542fe939687SNatalie Samsonov  * Setting the MTU affects hardware MRU (packets larger than the MRU
543fe939687SNatalie Samsonov  * will be dropped).
544fe939687SNatalie Samsonov  *
545fe939687SNatalie Samsonov  * @param dev
546fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
547fe939687SNatalie Samsonov  * @param mtu
548fe939687SNatalie Samsonov  *   New MTU.
549fe939687SNatalie Samsonov  *
550fe939687SNatalie Samsonov  * @return
551fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
552fe939687SNatalie Samsonov  */
553fe939687SNatalie Samsonov static int
554fe939687SNatalie Samsonov mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
555fe939687SNatalie Samsonov {
556fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
55779ec6202SNatalie Samsonov 	uint16_t mru;
55879ec6202SNatalie Samsonov 	uint16_t mbuf_data_size = 0; /* SW buffer size */
559fe939687SNatalie Samsonov 	int ret;
560fe939687SNatalie Samsonov 
56179ec6202SNatalie Samsonov 	mru = MRVL_PP2_MTU_TO_MRU(mtu);
56279ec6202SNatalie Samsonov 	/*
56379ec6202SNatalie Samsonov 	 * min_rx_buf_size is equal to mbuf data size
56479ec6202SNatalie Samsonov 	 * if pmd didn't set it differently
56579ec6202SNatalie Samsonov 	 */
56679ec6202SNatalie Samsonov 	mbuf_data_size = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
56779ec6202SNatalie Samsonov 	/* Prevent PMD from:
56879ec6202SNatalie Samsonov 	 * - setting mru greater than the mbuf size resulting in
56979ec6202SNatalie Samsonov 	 * hw and sw buffer size mismatch
57079ec6202SNatalie Samsonov 	 * - setting mtu that requires the support of scattered packets
57179ec6202SNatalie Samsonov 	 * when this feature has not been enabled/supported so far
57279ec6202SNatalie Samsonov 	 * (TODO check scattered_rx flag here once scattered RX is supported).
57379ec6202SNatalie Samsonov 	 */
57431536a68SLiron Himi 	if (mru - RTE_ETHER_CRC_LEN + MRVL_PKT_OFFS > mbuf_data_size) {
57531536a68SLiron Himi 		mru = mbuf_data_size + RTE_ETHER_CRC_LEN - MRVL_PKT_OFFS;
57679ec6202SNatalie Samsonov 		mtu = MRVL_PP2_MRU_TO_MTU(mru);
5777be78d02SJosh Soref 		MRVL_LOG(WARNING, "MTU too big, max MTU possible limited "
57879ec6202SNatalie Samsonov 			"by current mbuf size: %u. Set MTU to %u, MRU to %u",
57979ec6202SNatalie Samsonov 			mbuf_data_size, mtu, mru);
58079ec6202SNatalie Samsonov 	}
58179ec6202SNatalie Samsonov 
58235b2d13fSOlivier Matz 	if (mtu < RTE_ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) {
58379ec6202SNatalie Samsonov 		MRVL_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru);
584fe939687SNatalie Samsonov 		return -EINVAL;
58579ec6202SNatalie Samsonov 	}
58679ec6202SNatalie Samsonov 
587fe939687SNatalie Samsonov 	if (!priv->ppio)
588fe939687SNatalie Samsonov 		return 0;
589fe939687SNatalie Samsonov 
590fe939687SNatalie Samsonov 	ret = pp2_ppio_set_mru(priv->ppio, mru);
59179ec6202SNatalie Samsonov 	if (ret) {
59279ec6202SNatalie Samsonov 		MRVL_LOG(ERR, "Failed to change MRU");
593fe939687SNatalie Samsonov 		return ret;
59479ec6202SNatalie Samsonov 	}
595fe939687SNatalie Samsonov 
59679ec6202SNatalie Samsonov 	ret = pp2_ppio_set_mtu(priv->ppio, mtu);
59779ec6202SNatalie Samsonov 	if (ret) {
59879ec6202SNatalie Samsonov 		MRVL_LOG(ERR, "Failed to change MTU");
59979ec6202SNatalie Samsonov 		return ret;
60079ec6202SNatalie Samsonov 	}
60179ec6202SNatalie Samsonov 
60279ec6202SNatalie Samsonov 	return 0;
603fe939687SNatalie Samsonov }
604fe939687SNatalie Samsonov 
605fe939687SNatalie Samsonov /**
606fe939687SNatalie Samsonov  * DPDK callback to bring the link up.
607fe939687SNatalie Samsonov  *
608fe939687SNatalie Samsonov  * @param dev
609fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
610fe939687SNatalie Samsonov  *
611fe939687SNatalie Samsonov  * @return
612fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
613fe939687SNatalie Samsonov  */
614fe939687SNatalie Samsonov static int
615fe939687SNatalie Samsonov mrvl_dev_set_link_up(struct rte_eth_dev *dev)
616fe939687SNatalie Samsonov {
617fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
618fe939687SNatalie Samsonov 	int ret;
619fe939687SNatalie Samsonov 
6205147cc75SYuri Chipchev 	if (!priv->ppio) {
621295968d1SFerruh Yigit 		dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
6225147cc75SYuri Chipchev 		return 0;
6235147cc75SYuri Chipchev 	}
624fe939687SNatalie Samsonov 
625fe939687SNatalie Samsonov 	ret = pp2_ppio_enable(priv->ppio);
626fe939687SNatalie Samsonov 	if (ret)
627fe939687SNatalie Samsonov 		return ret;
628fe939687SNatalie Samsonov 
629fe939687SNatalie Samsonov 	/*
630fe939687SNatalie Samsonov 	 * mtu/mru can be updated if pp2_ppio_enable() was called at least once
631fe939687SNatalie Samsonov 	 * as pp2_ppio_enable() changes port->t_mode from default 0 to
632fe939687SNatalie Samsonov 	 * PP2_TRAFFIC_INGRESS_EGRESS.
633fe939687SNatalie Samsonov 	 *
634fe939687SNatalie Samsonov 	 * Set mtu to default DPDK value here.
635fe939687SNatalie Samsonov 	 */
636fe939687SNatalie Samsonov 	ret = mrvl_mtu_set(dev, dev->data->mtu);
6375147cc75SYuri Chipchev 	if (ret) {
638fe939687SNatalie Samsonov 		pp2_ppio_disable(priv->ppio);
639fe939687SNatalie Samsonov 		return ret;
640fe939687SNatalie Samsonov 	}
641fe939687SNatalie Samsonov 
642295968d1SFerruh Yigit 	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
6435147cc75SYuri Chipchev 	return 0;
6445147cc75SYuri Chipchev }
6455147cc75SYuri Chipchev 
646fe939687SNatalie Samsonov /**
647fe939687SNatalie Samsonov  * DPDK callback to bring the link down.
648fe939687SNatalie Samsonov  *
649fe939687SNatalie Samsonov  * @param dev
650fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
651fe939687SNatalie Samsonov  *
652fe939687SNatalie Samsonov  * @return
653fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
654fe939687SNatalie Samsonov  */
655fe939687SNatalie Samsonov static int
656fe939687SNatalie Samsonov mrvl_dev_set_link_down(struct rte_eth_dev *dev)
657fe939687SNatalie Samsonov {
658fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
6595147cc75SYuri Chipchev 	int ret;
660fe939687SNatalie Samsonov 
6615147cc75SYuri Chipchev 	if (!priv->ppio) {
662295968d1SFerruh Yigit 		dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
6635147cc75SYuri Chipchev 		return 0;
6645147cc75SYuri Chipchev 	}
6655147cc75SYuri Chipchev 	ret = pp2_ppio_disable(priv->ppio);
6665147cc75SYuri Chipchev 	if (ret)
6675147cc75SYuri Chipchev 		return ret;
668fe939687SNatalie Samsonov 
669295968d1SFerruh Yigit 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
6705147cc75SYuri Chipchev 	return 0;
671fe939687SNatalie Samsonov }
672fe939687SNatalie Samsonov 
673fe939687SNatalie Samsonov /**
674fe939687SNatalie Samsonov  * DPDK callback to start tx queue.
675fe939687SNatalie Samsonov  *
676fe939687SNatalie Samsonov  * @param dev
677fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
678fe939687SNatalie Samsonov  * @param queue_id
679fe939687SNatalie Samsonov  *   Transmit queue index.
680fe939687SNatalie Samsonov  *
681fe939687SNatalie Samsonov  * @return
682fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
683fe939687SNatalie Samsonov  */
684fe939687SNatalie Samsonov static int
685fe939687SNatalie Samsonov mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
686fe939687SNatalie Samsonov {
687fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
688fe939687SNatalie Samsonov 	int ret;
689fe939687SNatalie Samsonov 
690fe939687SNatalie Samsonov 	if (!priv)
691fe939687SNatalie Samsonov 		return -EPERM;
692fe939687SNatalie Samsonov 
693fe939687SNatalie Samsonov 	/* passing 1 enables given tx queue */
694fe939687SNatalie Samsonov 	ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
695fe939687SNatalie Samsonov 	if (ret) {
696acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
697fe939687SNatalie Samsonov 		return ret;
698fe939687SNatalie Samsonov 	}
699fe939687SNatalie Samsonov 
700fe939687SNatalie Samsonov 	dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
701fe939687SNatalie Samsonov 
702fe939687SNatalie Samsonov 	return 0;
703fe939687SNatalie Samsonov }
704fe939687SNatalie Samsonov 
705fe939687SNatalie Samsonov /**
706fe939687SNatalie Samsonov  * DPDK callback to stop tx queue.
707fe939687SNatalie Samsonov  *
708fe939687SNatalie Samsonov  * @param dev
709fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
710fe939687SNatalie Samsonov  * @param queue_id
711fe939687SNatalie Samsonov  *   Transmit queue index.
712fe939687SNatalie Samsonov  *
713fe939687SNatalie Samsonov  * @return
714fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
715fe939687SNatalie Samsonov  */
716fe939687SNatalie Samsonov static int
717fe939687SNatalie Samsonov mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
718fe939687SNatalie Samsonov {
719fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
720fe939687SNatalie Samsonov 	int ret;
721fe939687SNatalie Samsonov 
722fe939687SNatalie Samsonov 	if (!priv->ppio)
723fe939687SNatalie Samsonov 		return -EPERM;
724fe939687SNatalie Samsonov 
725fe939687SNatalie Samsonov 	/* passing 0 disables given tx queue */
726fe939687SNatalie Samsonov 	ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
727fe939687SNatalie Samsonov 	if (ret) {
728acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
729fe939687SNatalie Samsonov 		return ret;
730fe939687SNatalie Samsonov 	}
731fe939687SNatalie Samsonov 
732fe939687SNatalie Samsonov 	dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
733fe939687SNatalie Samsonov 
734fe939687SNatalie Samsonov 	return 0;
735fe939687SNatalie Samsonov }
736fe939687SNatalie Samsonov 
737fe939687SNatalie Samsonov /**
738ff0b8b10SYuri Chipchev  * Populate VLAN Filter configuration.
739ff0b8b10SYuri Chipchev  *
740ff0b8b10SYuri Chipchev  * @param dev
741ff0b8b10SYuri Chipchev  *   Pointer to Ethernet device structure.
742ff0b8b10SYuri Chipchev  * @param on
743ff0b8b10SYuri Chipchev  *   Toggle filter.
744ff0b8b10SYuri Chipchev  *
745ff0b8b10SYuri Chipchev  * @return
746ff0b8b10SYuri Chipchev  *   0 on success, negative error value otherwise.
747ff0b8b10SYuri Chipchev  */
748ff0b8b10SYuri Chipchev static int mrvl_populate_vlan_table(struct rte_eth_dev *dev, int on)
749ff0b8b10SYuri Chipchev {
750ff0b8b10SYuri Chipchev 	uint32_t j;
751ff0b8b10SYuri Chipchev 	int ret;
752ff0b8b10SYuri Chipchev 	struct rte_vlan_filter_conf *vfc;
753ff0b8b10SYuri Chipchev 
754ff0b8b10SYuri Chipchev 	vfc = &dev->data->vlan_filter_conf;
755ff0b8b10SYuri Chipchev 	for (j = 0; j < RTE_DIM(vfc->ids); j++) {
756ff0b8b10SYuri Chipchev 		uint64_t vlan;
757ff0b8b10SYuri Chipchev 		uint64_t vbit;
758ff0b8b10SYuri Chipchev 		uint64_t ids = vfc->ids[j];
759ff0b8b10SYuri Chipchev 
760ff0b8b10SYuri Chipchev 		if (ids == 0)
761ff0b8b10SYuri Chipchev 			continue;
762ff0b8b10SYuri Chipchev 
763ff0b8b10SYuri Chipchev 		while (ids) {
764ff0b8b10SYuri Chipchev 			vlan = 64 * j;
765ff0b8b10SYuri Chipchev 			/* count trailing zeroes */
766ff0b8b10SYuri Chipchev 			vbit = ~ids & (ids - 1);
767ff0b8b10SYuri Chipchev 			/* clear least significant bit set */
768ff0b8b10SYuri Chipchev 			ids ^= (ids ^ (ids - 1)) ^ vbit;
769ff0b8b10SYuri Chipchev 			for (; vbit; vlan++)
770ff0b8b10SYuri Chipchev 				vbit >>= 1;
771ff0b8b10SYuri Chipchev 			ret = mrvl_vlan_filter_set(dev, vlan, on);
772ff0b8b10SYuri Chipchev 			if (ret) {
773*8df71650SJerin Jacob 				MRVL_LOG(ERR, "Failed to setup VLAN filter");
774ff0b8b10SYuri Chipchev 				return ret;
775ff0b8b10SYuri Chipchev 			}
776ff0b8b10SYuri Chipchev 		}
777ff0b8b10SYuri Chipchev 	}
778ff0b8b10SYuri Chipchev 
779ff0b8b10SYuri Chipchev 	return 0;
780ff0b8b10SYuri Chipchev }
781ff0b8b10SYuri Chipchev 
782ff0b8b10SYuri Chipchev /**
783fe939687SNatalie Samsonov  * DPDK callback to start the device.
784fe939687SNatalie Samsonov  *
785fe939687SNatalie Samsonov  * @param dev
786fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
787fe939687SNatalie Samsonov  *
788fe939687SNatalie Samsonov  * @return
789fe939687SNatalie Samsonov  *   0 on success, negative errno value on failure.
790fe939687SNatalie Samsonov  */
791fe939687SNatalie Samsonov static int
792fe939687SNatalie Samsonov mrvl_dev_start(struct rte_eth_dev *dev)
793fe939687SNatalie Samsonov {
794fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
795fe939687SNatalie Samsonov 	char match[MRVL_MATCH_LEN];
796fe939687SNatalie Samsonov 	int ret = 0, i, def_init_size;
7975147cc75SYuri Chipchev 	struct rte_ether_addr *mac_addr;
798fe939687SNatalie Samsonov 
7995997b0a8SNatalie Samsonov 	if (priv->ppio)
8005997b0a8SNatalie Samsonov 		return mrvl_dev_set_link_up(dev);
8015997b0a8SNatalie Samsonov 
802fe939687SNatalie Samsonov 	snprintf(match, sizeof(match), "ppio-%d:%d",
803fe939687SNatalie Samsonov 		 priv->pp_id, priv->ppio_id);
804fe939687SNatalie Samsonov 	priv->ppio_params.match = match;
805c2b5ae61SLiron Himi 	priv->ppio_params.eth_start_hdr = PP2_PPIO_HDR_ETH;
806ef08031fSDana Vardi 	priv->forward_bad_frames = 0;
8074b86050aSDana Vardi 	priv->fill_bpool_buffs = MRVL_BURST_SIZE;
808ef08031fSDana Vardi 
809ef08031fSDana Vardi 	if (mrvl_cfg) {
810c2b5ae61SLiron Himi 		priv->ppio_params.eth_start_hdr =
811d7eb4fb2SLiron Himi 			mrvl_cfg->port[dev->data->port_id].eth_start_hdr;
812ef08031fSDana Vardi 		priv->forward_bad_frames =
813ef08031fSDana Vardi 			mrvl_cfg->port[dev->data->port_id].forward_bad_frames;
8144b86050aSDana Vardi 		priv->fill_bpool_buffs =
8154b86050aSDana Vardi 			mrvl_cfg->port[dev->data->port_id].fill_bpool_buffs;
816ef08031fSDana Vardi 	}
817fe939687SNatalie Samsonov 
818fe939687SNatalie Samsonov 	/*
819fe939687SNatalie Samsonov 	 * Calculate the minimum bpool size for refill feature as follows:
820fe939687SNatalie Samsonov 	 * 2 default burst sizes multiply by number of rx queues.
821fe939687SNatalie Samsonov 	 * If the bpool size will be below this value, new buffers will
822fe939687SNatalie Samsonov 	 * be added to the pool.
823fe939687SNatalie Samsonov 	 */
824fe939687SNatalie Samsonov 	priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
825fe939687SNatalie Samsonov 
826fe939687SNatalie Samsonov 	/* In case initial bpool size configured in queues setup is
827fe939687SNatalie Samsonov 	 * smaller than minimum size add more buffers
828fe939687SNatalie Samsonov 	 */
829fe939687SNatalie Samsonov 	def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
830fe939687SNatalie Samsonov 	if (priv->bpool_init_size < def_init_size) {
831fe939687SNatalie Samsonov 		int buffs_to_add = def_init_size - priv->bpool_init_size;
832fe939687SNatalie Samsonov 
833fe939687SNatalie Samsonov 		priv->bpool_init_size += buffs_to_add;
834fe939687SNatalie Samsonov 		ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
835fe939687SNatalie Samsonov 		if (ret)
836acab7d58STomasz Duszynski 			MRVL_LOG(ERR, "Failed to add buffers to bpool");
837fe939687SNatalie Samsonov 	}
838fe939687SNatalie Samsonov 
839fe939687SNatalie Samsonov 	/*
840fe939687SNatalie Samsonov 	 * Calculate the maximum bpool size for refill feature as follows:
841fe939687SNatalie Samsonov 	 * maximum number of descriptors in rx queue multiply by number
842fe939687SNatalie Samsonov 	 * of rx queues plus minimum bpool size.
843fe939687SNatalie Samsonov 	 * In case the bpool size will exceed this value, superfluous buffers
844fe939687SNatalie Samsonov 	 * will be removed
845fe939687SNatalie Samsonov 	 */
846fe939687SNatalie Samsonov 	priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
847fe939687SNatalie Samsonov 				priv->bpool_min_size;
848fe939687SNatalie Samsonov 
849fe939687SNatalie Samsonov 	ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
850fe939687SNatalie Samsonov 	if (ret) {
851acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to init ppio");
852fe939687SNatalie Samsonov 		return ret;
853fe939687SNatalie Samsonov 	}
854fe939687SNatalie Samsonov 
855fe939687SNatalie Samsonov 	/*
85623f3dac4SStephen Hemminger 	 * In case there are some stale uc/mc mac addresses flush them
857fe939687SNatalie Samsonov 	 * here. It cannot be done during mrvl_dev_close() as port information
858fe939687SNatalie Samsonov 	 * is already gone at that point (due to pp2_ppio_deinit() in
859fe939687SNatalie Samsonov 	 * mrvl_dev_stop()).
860fe939687SNatalie Samsonov 	 */
861fe939687SNatalie Samsonov 	if (!priv->uc_mc_flushed) {
862fe939687SNatalie Samsonov 		ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
863fe939687SNatalie Samsonov 		if (ret) {
864acab7d58STomasz Duszynski 			MRVL_LOG(ERR,
865acab7d58STomasz Duszynski 				"Failed to flush uc/mc filter list");
866fe939687SNatalie Samsonov 			goto out;
867fe939687SNatalie Samsonov 		}
868fe939687SNatalie Samsonov 		priv->uc_mc_flushed = 1;
869fe939687SNatalie Samsonov 	}
870fe939687SNatalie Samsonov 
87179ec6202SNatalie Samsonov 	ret = mrvl_mtu_set(dev, dev->data->mtu);
87279ec6202SNatalie Samsonov 	if (ret)
87379ec6202SNatalie Samsonov 		MRVL_LOG(ERR, "Failed to set MTU to %d", dev->data->mtu);
874fe939687SNatalie Samsonov 
8755147cc75SYuri Chipchev 	if (!rte_is_zero_ether_addr(&dev->data->mac_addrs[0]))
8765147cc75SYuri Chipchev 		mrvl_mac_addr_set(dev, &dev->data->mac_addrs[0]);
8775147cc75SYuri Chipchev 
8785147cc75SYuri Chipchev 	for (i = 1; i < MRVL_MAC_ADDRS_MAX; i++) {
8795147cc75SYuri Chipchev 		mac_addr = &dev->data->mac_addrs[i];
8805147cc75SYuri Chipchev 
8815147cc75SYuri Chipchev 		/* skip zero address */
8825147cc75SYuri Chipchev 		if (rte_is_zero_ether_addr(mac_addr))
8835147cc75SYuri Chipchev 			continue;
8845147cc75SYuri Chipchev 
8855147cc75SYuri Chipchev 		mrvl_mac_addr_add(dev, mac_addr, i, 0);
8865147cc75SYuri Chipchev 	}
8875147cc75SYuri Chipchev 
8885147cc75SYuri Chipchev 	if (dev->data->all_multicast == 1)
8895147cc75SYuri Chipchev 		mrvl_allmulticast_enable(dev);
8905147cc75SYuri Chipchev 
891295968d1SFerruh Yigit 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
892ff0b8b10SYuri Chipchev 		ret = mrvl_populate_vlan_table(dev, 1);
8935147cc75SYuri Chipchev 		if (ret) {
894ff0b8b10SYuri Chipchev 			MRVL_LOG(ERR, "Failed to populate VLAN table");
8955147cc75SYuri Chipchev 			goto out;
8965147cc75SYuri Chipchev 		}
8975147cc75SYuri Chipchev 	}
8985147cc75SYuri Chipchev 
899fe939687SNatalie Samsonov 	/* For default QoS config, don't start classifier. */
900d7eb4fb2SLiron Himi 	if (mrvl_cfg  &&
90141c60f74SDana Vardi 	    mrvl_cfg->port[dev->data->port_id].use_qos_global_defaults == 0) {
902fe939687SNatalie Samsonov 		ret = mrvl_start_qos_mapping(priv);
903fe939687SNatalie Samsonov 		if (ret) {
904acab7d58STomasz Duszynski 			MRVL_LOG(ERR, "Failed to setup QoS mapping");
905fe939687SNatalie Samsonov 			goto out;
906fe939687SNatalie Samsonov 		}
907fe939687SNatalie Samsonov 	}
908fe939687SNatalie Samsonov 
909fc026401SYuri Chipchev 	ret = pp2_ppio_set_loopback(priv->ppio, dev->data->dev_conf.lpbk_mode);
910fc026401SYuri Chipchev 	if (ret) {
911fc026401SYuri Chipchev 		MRVL_LOG(ERR, "Failed to set loopback");
912fc026401SYuri Chipchev 		goto out;
913fc026401SYuri Chipchev 	}
914fc026401SYuri Chipchev 
9155147cc75SYuri Chipchev 	if (dev->data->promiscuous == 1)
9165147cc75SYuri Chipchev 		mrvl_promiscuous_enable(dev);
9175147cc75SYuri Chipchev 
9187f2ae5ddSLiron Himi 	if (priv->flow_ctrl) {
9197f2ae5ddSLiron Himi 		ret = mrvl_flow_ctrl_set(dev, &priv->fc_conf);
9207f2ae5ddSLiron Himi 		if (ret) {
9217f2ae5ddSLiron Himi 			MRVL_LOG(ERR, "Failed to configure flow control");
9227f2ae5ddSLiron Himi 			goto out;
9237f2ae5ddSLiron Himi 		}
9247f2ae5ddSLiron Himi 		priv->flow_ctrl = 0;
9257f2ae5ddSLiron Himi 	}
9267f2ae5ddSLiron Himi 
927295968d1SFerruh Yigit 	if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
928fe939687SNatalie Samsonov 		ret = mrvl_dev_set_link_up(dev);
929fe939687SNatalie Samsonov 		if (ret) {
930acab7d58STomasz Duszynski 			MRVL_LOG(ERR, "Failed to set link up");
931295968d1SFerruh Yigit 			dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
932fe939687SNatalie Samsonov 			goto out;
933fe939687SNatalie Samsonov 		}
9345147cc75SYuri Chipchev 	}
935fe939687SNatalie Samsonov 
936fe939687SNatalie Samsonov 	/* start tx queues */
937fe939687SNatalie Samsonov 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
938fe939687SNatalie Samsonov 		struct mrvl_txq *txq = dev->data->tx_queues[i];
939fe939687SNatalie Samsonov 
940fe939687SNatalie Samsonov 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
941fe939687SNatalie Samsonov 
942fe939687SNatalie Samsonov 		if (!txq->tx_deferred_start)
943fe939687SNatalie Samsonov 			continue;
944fe939687SNatalie Samsonov 
945fe939687SNatalie Samsonov 		/*
946fe939687SNatalie Samsonov 		 * All txqs are started by default. Stop them
947fe939687SNatalie Samsonov 		 * so that tx_deferred_start works as expected.
948fe939687SNatalie Samsonov 		 */
949fe939687SNatalie Samsonov 		ret = mrvl_tx_queue_stop(dev, i);
950fe939687SNatalie Samsonov 		if (ret)
951fe939687SNatalie Samsonov 			goto out;
952fe939687SNatalie Samsonov 	}
953fe939687SNatalie Samsonov 
95422805693SJie Hai 	for (i = 0; i < dev->data->nb_rx_queues; i++)
95522805693SJie Hai 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
95622805693SJie Hai 
957a1f83becSTomasz Duszynski 	mrvl_flow_init(dev);
958cdb53f8dSTomasz Duszynski 	mrvl_mtr_init(dev);
9599e79d810SZyta Szpak 	mrvl_set_tx_function(dev);
960cdb53f8dSTomasz Duszynski 
961fe939687SNatalie Samsonov 	return 0;
962fe939687SNatalie Samsonov out:
963acab7d58STomasz Duszynski 	MRVL_LOG(ERR, "Failed to start device");
964fe939687SNatalie Samsonov 	pp2_ppio_deinit(priv->ppio);
965fe939687SNatalie Samsonov 	return ret;
966fe939687SNatalie Samsonov }
967fe939687SNatalie Samsonov 
968fe939687SNatalie Samsonov /**
969fe939687SNatalie Samsonov  * Flush receive queues.
970fe939687SNatalie Samsonov  *
971fe939687SNatalie Samsonov  * @param dev
972fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
973fe939687SNatalie Samsonov  */
974fe939687SNatalie Samsonov static void
975fe939687SNatalie Samsonov mrvl_flush_rx_queues(struct rte_eth_dev *dev)
976fe939687SNatalie Samsonov {
977fe939687SNatalie Samsonov 	int i;
978fe939687SNatalie Samsonov 
979acab7d58STomasz Duszynski 	MRVL_LOG(INFO, "Flushing rx queues");
980fe939687SNatalie Samsonov 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
981fe939687SNatalie Samsonov 		int ret, num;
982fe939687SNatalie Samsonov 
983fe939687SNatalie Samsonov 		do {
984fe939687SNatalie Samsonov 			struct mrvl_rxq *q = dev->data->rx_queues[i];
985fe939687SNatalie Samsonov 			struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
986fe939687SNatalie Samsonov 
987fe939687SNatalie Samsonov 			num = MRVL_PP2_RXD_MAX;
988fe939687SNatalie Samsonov 			ret = pp2_ppio_recv(q->priv->ppio,
989fe939687SNatalie Samsonov 					    q->priv->rxq_map[q->queue_id].tc,
990fe939687SNatalie Samsonov 					    q->priv->rxq_map[q->queue_id].inq,
991fe939687SNatalie Samsonov 					    descs, (uint16_t *)&num);
992fe939687SNatalie Samsonov 		} while (ret == 0 && num);
993fe939687SNatalie Samsonov 	}
994fe939687SNatalie Samsonov }
995fe939687SNatalie Samsonov 
996fe939687SNatalie Samsonov /**
997fe939687SNatalie Samsonov  * Flush transmit shadow queues.
998fe939687SNatalie Samsonov  *
999fe939687SNatalie Samsonov  * @param dev
1000fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1001fe939687SNatalie Samsonov  */
1002fe939687SNatalie Samsonov static void
1003fe939687SNatalie Samsonov mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
1004fe939687SNatalie Samsonov {
1005fe939687SNatalie Samsonov 	int i, j;
1006fe939687SNatalie Samsonov 	struct mrvl_txq *txq;
1007fe939687SNatalie Samsonov 
1008acab7d58STomasz Duszynski 	MRVL_LOG(INFO, "Flushing tx shadow queues");
1009fe939687SNatalie Samsonov 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1010fe939687SNatalie Samsonov 		txq = (struct mrvl_txq *)dev->data->tx_queues[i];
1011fe939687SNatalie Samsonov 
1012fe939687SNatalie Samsonov 		for (j = 0; j < RTE_MAX_LCORE; j++) {
1013fe939687SNatalie Samsonov 			struct mrvl_shadow_txq *sq;
1014fe939687SNatalie Samsonov 
1015fe939687SNatalie Samsonov 			if (!hifs[j])
1016fe939687SNatalie Samsonov 				continue;
1017fe939687SNatalie Samsonov 
1018fe939687SNatalie Samsonov 			sq = &txq->shadow_txqs[j];
1019fe939687SNatalie Samsonov 			mrvl_free_sent_buffers(txq->priv->ppio,
1020fe939687SNatalie Samsonov 				hifs[j], j, sq, txq->queue_id, 1);
1021fe939687SNatalie Samsonov 			while (sq->tail != sq->head) {
1022fe939687SNatalie Samsonov 				uint64_t addr = cookie_addr_high |
1023fe939687SNatalie Samsonov 					sq->ent[sq->tail].buff.cookie;
1024fe939687SNatalie Samsonov 				rte_pktmbuf_free(
1025fe939687SNatalie Samsonov 					(struct rte_mbuf *)addr);
1026fe939687SNatalie Samsonov 				sq->tail = (sq->tail + 1) &
1027fe939687SNatalie Samsonov 					    MRVL_PP2_TX_SHADOWQ_MASK;
1028fe939687SNatalie Samsonov 			}
1029fe939687SNatalie Samsonov 			memset(sq, 0, sizeof(*sq));
1030fe939687SNatalie Samsonov 		}
1031fe939687SNatalie Samsonov 	}
1032fe939687SNatalie Samsonov }
1033fe939687SNatalie Samsonov 
1034fe939687SNatalie Samsonov /**
1035fe939687SNatalie Samsonov  * Flush hardware bpool (buffer-pool).
1036fe939687SNatalie Samsonov  *
1037fe939687SNatalie Samsonov  * @param dev
1038fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1039fe939687SNatalie Samsonov  */
1040fe939687SNatalie Samsonov static void
1041fe939687SNatalie Samsonov mrvl_flush_bpool(struct rte_eth_dev *dev)
1042fe939687SNatalie Samsonov {
1043fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1044fe939687SNatalie Samsonov 	struct pp2_hif *hif;
1045fe939687SNatalie Samsonov 	uint32_t num;
1046fe939687SNatalie Samsonov 	int ret;
1047fe939687SNatalie Samsonov 	unsigned int core_id = rte_lcore_id();
1048fe939687SNatalie Samsonov 
1049fe939687SNatalie Samsonov 	if (core_id == LCORE_ID_ANY)
1050cb056611SStephen Hemminger 		core_id = rte_get_main_lcore();
1051fe939687SNatalie Samsonov 
1052fe939687SNatalie Samsonov 	hif = mrvl_get_hif(priv, core_id);
1053fe939687SNatalie Samsonov 
1054fe939687SNatalie Samsonov 	ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
1055fe939687SNatalie Samsonov 	if (ret) {
1056acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to get bpool buffers number");
1057fe939687SNatalie Samsonov 		return;
1058fe939687SNatalie Samsonov 	}
1059fe939687SNatalie Samsonov 
1060fe939687SNatalie Samsonov 	while (num--) {
1061fe939687SNatalie Samsonov 		struct pp2_buff_inf inf;
1062fe939687SNatalie Samsonov 		uint64_t addr;
1063fe939687SNatalie Samsonov 
1064fe939687SNatalie Samsonov 		ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
1065fe939687SNatalie Samsonov 		if (ret)
1066fe939687SNatalie Samsonov 			break;
1067fe939687SNatalie Samsonov 
1068fe939687SNatalie Samsonov 		addr = cookie_addr_high | inf.cookie;
1069fe939687SNatalie Samsonov 		rte_pktmbuf_free((struct rte_mbuf *)addr);
1070fe939687SNatalie Samsonov 	}
1071fe939687SNatalie Samsonov }
1072fe939687SNatalie Samsonov 
1073fe939687SNatalie Samsonov /**
1074fe939687SNatalie Samsonov  * DPDK callback to stop the device.
1075fe939687SNatalie Samsonov  *
1076fe939687SNatalie Samsonov  * @param dev
1077fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1078fe939687SNatalie Samsonov  */
107962024eb8SIvan Ilchenko static int
1080fe939687SNatalie Samsonov mrvl_dev_stop(struct rte_eth_dev *dev)
1081fe939687SNatalie Samsonov {
108222805693SJie Hai 	uint16_t i;
108322805693SJie Hai 
108422805693SJie Hai 	for (i = 0; i < dev->data->nb_rx_queues; i++)
108522805693SJie Hai 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
108622805693SJie Hai 	for (i = 0; i < dev->data->nb_tx_queues; i++)
108722805693SJie Hai 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
108822805693SJie Hai 
108962024eb8SIvan Ilchenko 	return mrvl_dev_set_link_down(dev);
1090fe939687SNatalie Samsonov }
1091fe939687SNatalie Samsonov 
1092fe939687SNatalie Samsonov /**
1093fe939687SNatalie Samsonov  * DPDK callback to close the device.
1094fe939687SNatalie Samsonov  *
1095fe939687SNatalie Samsonov  * @param dev
1096fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1097fe939687SNatalie Samsonov  */
1098b142387bSThomas Monjalon static int
1099fe939687SNatalie Samsonov mrvl_dev_close(struct rte_eth_dev *dev)
1100fe939687SNatalie Samsonov {
1101fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1102fe939687SNatalie Samsonov 	size_t i;
1103fe939687SNatalie Samsonov 
110430410493SThomas Monjalon 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
110530410493SThomas Monjalon 		return 0;
110630410493SThomas Monjalon 
11075997b0a8SNatalie Samsonov 	mrvl_flush_rx_queues(dev);
11085997b0a8SNatalie Samsonov 	mrvl_flush_tx_shadow_queues(dev);
1109a1f83becSTomasz Duszynski 	mrvl_flow_deinit(dev);
1110cdb53f8dSTomasz Duszynski 	mrvl_mtr_deinit(dev);
11115997b0a8SNatalie Samsonov 
1112fe939687SNatalie Samsonov 	for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
1113fe939687SNatalie Samsonov 		struct pp2_ppio_tc_params *tc_params =
1114fe939687SNatalie Samsonov 			&priv->ppio_params.inqs_params.tcs_params[i];
1115fe939687SNatalie Samsonov 
1116fe939687SNatalie Samsonov 		if (tc_params->inqs_params) {
1117fe939687SNatalie Samsonov 			rte_free(tc_params->inqs_params);
1118fe939687SNatalie Samsonov 			tc_params->inqs_params = NULL;
1119fe939687SNatalie Samsonov 		}
1120fe939687SNatalie Samsonov 	}
1121fe939687SNatalie Samsonov 
11225997b0a8SNatalie Samsonov 	if (priv->cls_tbl) {
11235997b0a8SNatalie Samsonov 		pp2_cls_tbl_deinit(priv->cls_tbl);
11245997b0a8SNatalie Samsonov 		priv->cls_tbl = NULL;
11255997b0a8SNatalie Samsonov 	}
11265997b0a8SNatalie Samsonov 
11275997b0a8SNatalie Samsonov 	if (priv->qos_tbl) {
11285997b0a8SNatalie Samsonov 		pp2_cls_qos_tbl_deinit(priv->qos_tbl);
11295997b0a8SNatalie Samsonov 		priv->qos_tbl = NULL;
11305997b0a8SNatalie Samsonov 	}
11315997b0a8SNatalie Samsonov 
1132fe939687SNatalie Samsonov 	mrvl_flush_bpool(dev);
1133429c3944STomasz Duszynski 	mrvl_tm_deinit(dev);
11345997b0a8SNatalie Samsonov 
11355997b0a8SNatalie Samsonov 	if (priv->ppio) {
11365997b0a8SNatalie Samsonov 		pp2_ppio_deinit(priv->ppio);
11375997b0a8SNatalie Samsonov 		priv->ppio = NULL;
11385997b0a8SNatalie Samsonov 	}
11395997b0a8SNatalie Samsonov 
11405997b0a8SNatalie Samsonov 	/* policer must be released after ppio deinitialization */
1141e97d8874STomasz Duszynski 	if (priv->default_policer) {
1142e97d8874STomasz Duszynski 		pp2_cls_plcr_deinit(priv->default_policer);
1143e97d8874STomasz Duszynski 		priv->default_policer = NULL;
11445997b0a8SNatalie Samsonov 	}
1145696202caSLiron Himi 
1146696202caSLiron Himi 
1147696202caSLiron Himi 	if (priv->bpool) {
1148696202caSLiron Himi 		pp2_bpool_deinit(priv->bpool);
1149696202caSLiron Himi 		used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
1150696202caSLiron Himi 		priv->bpool = NULL;
1151696202caSLiron Himi 	}
1152696202caSLiron Himi 
1153696202caSLiron Himi 	mrvl_dev_num--;
1154696202caSLiron Himi 
1155696202caSLiron Himi 	if (mrvl_dev_num == 0) {
1156696202caSLiron Himi 		MRVL_LOG(INFO, "Perform MUSDK deinit");
1157696202caSLiron Himi 		mrvl_deinit_hifs();
1158696202caSLiron Himi 		mrvl_deinit_pp2();
1159696202caSLiron Himi 		rte_mvep_deinit(MVEP_MOD_T_PP2);
1160696202caSLiron Himi 	}
1161b142387bSThomas Monjalon 
1162b142387bSThomas Monjalon 	return 0;
1163fe939687SNatalie Samsonov }
1164fe939687SNatalie Samsonov 
1165fe939687SNatalie Samsonov /**
1166fe939687SNatalie Samsonov  * DPDK callback to retrieve physical link information.
1167fe939687SNatalie Samsonov  *
1168fe939687SNatalie Samsonov  * @param dev
1169fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1170fe939687SNatalie Samsonov  * @param wait_to_complete
1171fe939687SNatalie Samsonov  *   Wait for request completion (ignored).
1172fe939687SNatalie Samsonov  *
1173fe939687SNatalie Samsonov  * @return
1174fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
1175fe939687SNatalie Samsonov  */
1176fe939687SNatalie Samsonov static int
1177fe939687SNatalie Samsonov mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
1178fe939687SNatalie Samsonov {
1179fe939687SNatalie Samsonov 	/*
1180fe939687SNatalie Samsonov 	 * TODO
1181fe939687SNatalie Samsonov 	 * once MUSDK provides necessary API use it here
1182fe939687SNatalie Samsonov 	 */
1183fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1184fe939687SNatalie Samsonov 	struct ethtool_cmd edata;
1185fe939687SNatalie Samsonov 	struct ifreq req;
1186fe939687SNatalie Samsonov 	int ret, fd, link_up;
1187fe939687SNatalie Samsonov 
1188fe939687SNatalie Samsonov 	if (!priv->ppio)
1189fe939687SNatalie Samsonov 		return -EPERM;
1190fe939687SNatalie Samsonov 
1191fe939687SNatalie Samsonov 	edata.cmd = ETHTOOL_GSET;
1192fe939687SNatalie Samsonov 
1193fe939687SNatalie Samsonov 	strcpy(req.ifr_name, dev->data->name);
1194fe939687SNatalie Samsonov 	req.ifr_data = (void *)&edata;
1195fe939687SNatalie Samsonov 
1196fe939687SNatalie Samsonov 	fd = socket(AF_INET, SOCK_DGRAM, 0);
1197fe939687SNatalie Samsonov 	if (fd == -1)
1198fe939687SNatalie Samsonov 		return -EFAULT;
1199fe939687SNatalie Samsonov 
1200fe939687SNatalie Samsonov 	ret = ioctl(fd, SIOCETHTOOL, &req);
1201fe939687SNatalie Samsonov 	if (ret == -1) {
1202fe939687SNatalie Samsonov 		close(fd);
1203fe939687SNatalie Samsonov 		return -EFAULT;
1204fe939687SNatalie Samsonov 	}
1205fe939687SNatalie Samsonov 
1206fe939687SNatalie Samsonov 	close(fd);
1207fe939687SNatalie Samsonov 
1208fe939687SNatalie Samsonov 	switch (ethtool_cmd_speed(&edata)) {
1209fe939687SNatalie Samsonov 	case SPEED_10:
1210295968d1SFerruh Yigit 		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
1211fe939687SNatalie Samsonov 		break;
1212fe939687SNatalie Samsonov 	case SPEED_100:
1213295968d1SFerruh Yigit 		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
1214fe939687SNatalie Samsonov 		break;
1215fe939687SNatalie Samsonov 	case SPEED_1000:
1216295968d1SFerruh Yigit 		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
1217fe939687SNatalie Samsonov 		break;
1218528ec03aSMeir Levi 	case SPEED_2500:
1219295968d1SFerruh Yigit 		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
1220528ec03aSMeir Levi 		break;
1221fe939687SNatalie Samsonov 	case SPEED_10000:
1222295968d1SFerruh Yigit 		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
1223fe939687SNatalie Samsonov 		break;
1224fe939687SNatalie Samsonov 	default:
1225295968d1SFerruh Yigit 		dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1226fe939687SNatalie Samsonov 	}
1227fe939687SNatalie Samsonov 
1228295968d1SFerruh Yigit 	dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
1229295968d1SFerruh Yigit 							 RTE_ETH_LINK_HALF_DUPLEX;
1230295968d1SFerruh Yigit 	dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
1231295968d1SFerruh Yigit 							   RTE_ETH_LINK_FIXED;
1232fe939687SNatalie Samsonov 	pp2_ppio_get_link_state(priv->ppio, &link_up);
1233295968d1SFerruh Yigit 	dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
1234fe939687SNatalie Samsonov 
1235fe939687SNatalie Samsonov 	return 0;
1236fe939687SNatalie Samsonov }
1237fe939687SNatalie Samsonov 
1238fe939687SNatalie Samsonov /**
1239fe939687SNatalie Samsonov  * DPDK callback to enable promiscuous mode.
1240fe939687SNatalie Samsonov  *
1241fe939687SNatalie Samsonov  * @param dev
1242fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
12439039c812SAndrew Rybchenko  *
12449039c812SAndrew Rybchenko  * @return
12459039c812SAndrew Rybchenko  *   0 on success, negative error value otherwise.
1246fe939687SNatalie Samsonov  */
12479039c812SAndrew Rybchenko static int
1248fe939687SNatalie Samsonov mrvl_promiscuous_enable(struct rte_eth_dev *dev)
1249fe939687SNatalie Samsonov {
1250fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1251fe939687SNatalie Samsonov 	int ret;
1252fe939687SNatalie Samsonov 
1253fe939687SNatalie Samsonov 	if (priv->isolated)
1254d2c37f73SYuri Chipchev 		return -ENOTSUP;
1255d2c37f73SYuri Chipchev 
1256d2c37f73SYuri Chipchev 	if (!priv->ppio)
12579039c812SAndrew Rybchenko 		return 0;
1258fe939687SNatalie Samsonov 
1259fe939687SNatalie Samsonov 	ret = pp2_ppio_set_promisc(priv->ppio, 1);
12609039c812SAndrew Rybchenko 	if (ret) {
1261acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to enable promiscuous mode");
12629039c812SAndrew Rybchenko 		return -EAGAIN;
12639039c812SAndrew Rybchenko 	}
12649039c812SAndrew Rybchenko 
12659039c812SAndrew Rybchenko 	return 0;
1266fe939687SNatalie Samsonov }
1267fe939687SNatalie Samsonov 
1268fe939687SNatalie Samsonov /**
1269fe939687SNatalie Samsonov  * DPDK callback to enable allmulti mode.
1270fe939687SNatalie Samsonov  *
1271fe939687SNatalie Samsonov  * @param dev
1272fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1273ca041cd4SIvan Ilchenko  *
1274ca041cd4SIvan Ilchenko  * @return
1275ca041cd4SIvan Ilchenko  *   0 on success, negative error value otherwise.
1276fe939687SNatalie Samsonov  */
1277ca041cd4SIvan Ilchenko static int
1278fe939687SNatalie Samsonov mrvl_allmulticast_enable(struct rte_eth_dev *dev)
1279fe939687SNatalie Samsonov {
1280fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1281fe939687SNatalie Samsonov 	int ret;
1282fe939687SNatalie Samsonov 
1283fe939687SNatalie Samsonov 	if (priv->isolated)
1284d2c37f73SYuri Chipchev 		return -ENOTSUP;
1285d2c37f73SYuri Chipchev 
1286d2c37f73SYuri Chipchev 	if (!priv->ppio)
1287ca041cd4SIvan Ilchenko 		return 0;
1288fe939687SNatalie Samsonov 
1289fe939687SNatalie Samsonov 	ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
1290ca041cd4SIvan Ilchenko 	if (ret) {
1291acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed enable all-multicast mode");
1292ca041cd4SIvan Ilchenko 		return -EAGAIN;
1293ca041cd4SIvan Ilchenko 	}
1294ca041cd4SIvan Ilchenko 
1295ca041cd4SIvan Ilchenko 	return 0;
1296fe939687SNatalie Samsonov }
1297fe939687SNatalie Samsonov 
1298fe939687SNatalie Samsonov /**
1299fe939687SNatalie Samsonov  * DPDK callback to disable promiscuous mode.
1300fe939687SNatalie Samsonov  *
1301fe939687SNatalie Samsonov  * @param dev
1302fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
13039039c812SAndrew Rybchenko  *
13049039c812SAndrew Rybchenko  * @return
13059039c812SAndrew Rybchenko  *   0 on success, negative error value otherwise.
1306fe939687SNatalie Samsonov  */
13079039c812SAndrew Rybchenko static int
1308fe939687SNatalie Samsonov mrvl_promiscuous_disable(struct rte_eth_dev *dev)
1309fe939687SNatalie Samsonov {
1310fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1311fe939687SNatalie Samsonov 	int ret;
1312fe939687SNatalie Samsonov 
1313d2c37f73SYuri Chipchev 	if (priv->isolated)
1314d2c37f73SYuri Chipchev 		return -ENOTSUP;
1315d2c37f73SYuri Chipchev 
1316fe939687SNatalie Samsonov 	if (!priv->ppio)
13179039c812SAndrew Rybchenko 		return 0;
1318fe939687SNatalie Samsonov 
1319fe939687SNatalie Samsonov 	ret = pp2_ppio_set_promisc(priv->ppio, 0);
13209039c812SAndrew Rybchenko 	if (ret) {
1321acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to disable promiscuous mode");
13229039c812SAndrew Rybchenko 		return -EAGAIN;
13239039c812SAndrew Rybchenko 	}
13249039c812SAndrew Rybchenko 
13259039c812SAndrew Rybchenko 	return 0;
1326fe939687SNatalie Samsonov }
1327fe939687SNatalie Samsonov 
1328fe939687SNatalie Samsonov /**
1329fe939687SNatalie Samsonov  * DPDK callback to disable allmulticast mode.
1330fe939687SNatalie Samsonov  *
1331fe939687SNatalie Samsonov  * @param dev
1332fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1333ca041cd4SIvan Ilchenko  *
1334ca041cd4SIvan Ilchenko  * @return
1335ca041cd4SIvan Ilchenko  *   0 on success, negative error value otherwise.
1336fe939687SNatalie Samsonov  */
1337ca041cd4SIvan Ilchenko static int
1338fe939687SNatalie Samsonov mrvl_allmulticast_disable(struct rte_eth_dev *dev)
1339fe939687SNatalie Samsonov {
1340fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1341fe939687SNatalie Samsonov 	int ret;
1342fe939687SNatalie Samsonov 
1343d2c37f73SYuri Chipchev 	if (priv->isolated)
1344d2c37f73SYuri Chipchev 		return -ENOTSUP;
1345d2c37f73SYuri Chipchev 
1346fe939687SNatalie Samsonov 	if (!priv->ppio)
1347ca041cd4SIvan Ilchenko 		return 0;
1348fe939687SNatalie Samsonov 
1349fe939687SNatalie Samsonov 	ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
1350ca041cd4SIvan Ilchenko 	if (ret) {
1351acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to disable all-multicast mode");
1352ca041cd4SIvan Ilchenko 		return -EAGAIN;
1353ca041cd4SIvan Ilchenko 	}
1354ca041cd4SIvan Ilchenko 
1355ca041cd4SIvan Ilchenko 	return 0;
1356fe939687SNatalie Samsonov }
1357fe939687SNatalie Samsonov 
1358fe939687SNatalie Samsonov /**
1359fe939687SNatalie Samsonov  * DPDK callback to remove a MAC address.
1360fe939687SNatalie Samsonov  *
1361fe939687SNatalie Samsonov  * @param dev
1362fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1363fe939687SNatalie Samsonov  * @param index
1364fe939687SNatalie Samsonov  *   MAC address index.
1365fe939687SNatalie Samsonov  */
1366fe939687SNatalie Samsonov static void
1367fe939687SNatalie Samsonov mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
1368fe939687SNatalie Samsonov {
1369fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
137035b2d13fSOlivier Matz 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
1371fe939687SNatalie Samsonov 	int ret;
1372fe939687SNatalie Samsonov 
1373d2c37f73SYuri Chipchev 	if (priv->isolated)
1374fe939687SNatalie Samsonov 		return;
1375fe939687SNatalie Samsonov 
1376d2c37f73SYuri Chipchev 	if (!priv->ppio)
1377fe939687SNatalie Samsonov 		return;
1378fe939687SNatalie Samsonov 
1379fe939687SNatalie Samsonov 	ret = pp2_ppio_remove_mac_addr(priv->ppio,
1380fe939687SNatalie Samsonov 				       dev->data->mac_addrs[index].addr_bytes);
1381fe939687SNatalie Samsonov 	if (ret) {
1382538da7a1SOlivier Matz 		rte_ether_format_addr(buf, sizeof(buf),
1383fe939687SNatalie Samsonov 				  &dev->data->mac_addrs[index]);
1384acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to remove mac %s", buf);
1385fe939687SNatalie Samsonov 	}
1386fe939687SNatalie Samsonov }
1387fe939687SNatalie Samsonov 
1388fe939687SNatalie Samsonov /**
1389fe939687SNatalie Samsonov  * DPDK callback to add a MAC address.
1390fe939687SNatalie Samsonov  *
1391fe939687SNatalie Samsonov  * @param dev
1392fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1393fe939687SNatalie Samsonov  * @param mac_addr
1394fe939687SNatalie Samsonov  *   MAC address to register.
1395fe939687SNatalie Samsonov  * @param index
1396fe939687SNatalie Samsonov  *   MAC address index.
1397fe939687SNatalie Samsonov  * @param vmdq
1398fe939687SNatalie Samsonov  *   VMDq pool index to associate address with (unused).
1399fe939687SNatalie Samsonov  *
1400fe939687SNatalie Samsonov  * @return
1401fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
1402fe939687SNatalie Samsonov  */
1403fe939687SNatalie Samsonov static int
14046d13ea8eSOlivier Matz mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1405fe939687SNatalie Samsonov 		  uint32_t index, uint32_t vmdq __rte_unused)
1406fe939687SNatalie Samsonov {
1407fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
140835b2d13fSOlivier Matz 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
1409fe939687SNatalie Samsonov 	int ret;
1410fe939687SNatalie Samsonov 
1411fe939687SNatalie Samsonov 	if (priv->isolated)
1412fe939687SNatalie Samsonov 		return -ENOTSUP;
1413fe939687SNatalie Samsonov 
1414d2c37f73SYuri Chipchev 	if (!priv->ppio)
1415d2c37f73SYuri Chipchev 		return 0;
1416d2c37f73SYuri Chipchev 
1417fe939687SNatalie Samsonov 	if (index == 0)
1418fe939687SNatalie Samsonov 		/* For setting index 0, mrvl_mac_addr_set() should be used.*/
1419fe939687SNatalie Samsonov 		return -1;
1420fe939687SNatalie Samsonov 
1421fe939687SNatalie Samsonov 	/*
1422fe939687SNatalie Samsonov 	 * Maximum number of uc addresses can be tuned via kernel module mvpp2x
1423fe939687SNatalie Samsonov 	 * parameter uc_filter_max. Maximum number of mc addresses is then
1424fe939687SNatalie Samsonov 	 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
1425fe939687SNatalie Samsonov 	 * 21 respectively.
1426fe939687SNatalie Samsonov 	 *
1427fe939687SNatalie Samsonov 	 * If more than uc_filter_max uc addresses were added to filter list
1428fe939687SNatalie Samsonov 	 * then NIC will switch to promiscuous mode automatically.
1429fe939687SNatalie Samsonov 	 *
1430fe939687SNatalie Samsonov 	 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
1431fe939687SNatalie Samsonov 	 * were added to filter list then NIC will switch to all-multicast mode
1432fe939687SNatalie Samsonov 	 * automatically.
1433fe939687SNatalie Samsonov 	 */
1434fe939687SNatalie Samsonov 	ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
1435fe939687SNatalie Samsonov 	if (ret) {
1436538da7a1SOlivier Matz 		rte_ether_format_addr(buf, sizeof(buf), mac_addr);
1437acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to add mac %s", buf);
1438fe939687SNatalie Samsonov 		return -1;
1439fe939687SNatalie Samsonov 	}
1440fe939687SNatalie Samsonov 
1441fe939687SNatalie Samsonov 	return 0;
1442fe939687SNatalie Samsonov }
1443fe939687SNatalie Samsonov 
1444fe939687SNatalie Samsonov /**
1445fe939687SNatalie Samsonov  * DPDK callback to set the primary MAC address.
1446fe939687SNatalie Samsonov  *
1447fe939687SNatalie Samsonov  * @param dev
1448fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1449fe939687SNatalie Samsonov  * @param mac_addr
1450fe939687SNatalie Samsonov  *   MAC address to register.
1451caccf8b3SOlivier Matz  *
1452caccf8b3SOlivier Matz  * @return
1453caccf8b3SOlivier Matz  *   0 on success, negative error value otherwise.
1454fe939687SNatalie Samsonov  */
1455caccf8b3SOlivier Matz static int
14566d13ea8eSOlivier Matz mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1457fe939687SNatalie Samsonov {
1458fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1459fe939687SNatalie Samsonov 	int ret;
1460fe939687SNatalie Samsonov 
1461fe939687SNatalie Samsonov 	if (priv->isolated)
1462caccf8b3SOlivier Matz 		return -ENOTSUP;
1463fe939687SNatalie Samsonov 
1464d2c37f73SYuri Chipchev 	if (!priv->ppio)
1465d2c37f73SYuri Chipchev 		return 0;
1466d2c37f73SYuri Chipchev 
1467fe939687SNatalie Samsonov 	ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
1468fe939687SNatalie Samsonov 	if (ret) {
146935b2d13fSOlivier Matz 		char buf[RTE_ETHER_ADDR_FMT_SIZE];
1470538da7a1SOlivier Matz 		rte_ether_format_addr(buf, sizeof(buf), mac_addr);
1471acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to set mac to %s", buf);
1472fe939687SNatalie Samsonov 	}
1473caccf8b3SOlivier Matz 
1474caccf8b3SOlivier Matz 	return ret;
1475fe939687SNatalie Samsonov }
1476fe939687SNatalie Samsonov 
1477fe939687SNatalie Samsonov /**
1478fe939687SNatalie Samsonov  * DPDK callback to get device statistics.
1479fe939687SNatalie Samsonov  *
1480fe939687SNatalie Samsonov  * @param dev
1481fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1482fe939687SNatalie Samsonov  * @param stats
1483fe939687SNatalie Samsonov  *   Stats structure output buffer.
1484fe939687SNatalie Samsonov  *
1485fe939687SNatalie Samsonov  * @return
1486fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
1487fe939687SNatalie Samsonov  */
1488fe939687SNatalie Samsonov static int
1489fe939687SNatalie Samsonov mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1490fe939687SNatalie Samsonov {
1491fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1492fe939687SNatalie Samsonov 	struct pp2_ppio_statistics ppio_stats;
1493fe939687SNatalie Samsonov 	uint64_t drop_mac = 0;
1494fe939687SNatalie Samsonov 	unsigned int i, idx, ret;
1495fe939687SNatalie Samsonov 
1496fe939687SNatalie Samsonov 	if (!priv->ppio)
1497fe939687SNatalie Samsonov 		return -EPERM;
1498fe939687SNatalie Samsonov 
1499fe939687SNatalie Samsonov 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1500fe939687SNatalie Samsonov 		struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1501fe939687SNatalie Samsonov 		struct pp2_ppio_inq_statistics rx_stats;
1502fe939687SNatalie Samsonov 
1503fe939687SNatalie Samsonov 		if (!rxq)
1504fe939687SNatalie Samsonov 			continue;
1505fe939687SNatalie Samsonov 
1506fe939687SNatalie Samsonov 		idx = rxq->queue_id;
1507fe939687SNatalie Samsonov 		if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1508acab7d58STomasz Duszynski 			MRVL_LOG(ERR,
1509acab7d58STomasz Duszynski 				"rx queue %d stats out of range (0 - %d)",
1510fe939687SNatalie Samsonov 				idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1511fe939687SNatalie Samsonov 			continue;
1512fe939687SNatalie Samsonov 		}
1513fe939687SNatalie Samsonov 
1514fe939687SNatalie Samsonov 		ret = pp2_ppio_inq_get_statistics(priv->ppio,
1515fe939687SNatalie Samsonov 						  priv->rxq_map[idx].tc,
1516fe939687SNatalie Samsonov 						  priv->rxq_map[idx].inq,
1517fe939687SNatalie Samsonov 						  &rx_stats, 0);
1518fe939687SNatalie Samsonov 		if (unlikely(ret)) {
1519acab7d58STomasz Duszynski 			MRVL_LOG(ERR,
1520acab7d58STomasz Duszynski 				"Failed to update rx queue %d stats", idx);
1521fe939687SNatalie Samsonov 			break;
1522fe939687SNatalie Samsonov 		}
1523fe939687SNatalie Samsonov 
1524fe939687SNatalie Samsonov 		stats->q_ibytes[idx] = rxq->bytes_recv;
1525fe939687SNatalie Samsonov 		stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
1526fe939687SNatalie Samsonov 		stats->q_errors[idx] = rx_stats.drop_early +
1527fe939687SNatalie Samsonov 				       rx_stats.drop_fullq +
1528fe939687SNatalie Samsonov 				       rx_stats.drop_bm +
1529fe939687SNatalie Samsonov 				       rxq->drop_mac;
1530fe939687SNatalie Samsonov 		stats->ibytes += rxq->bytes_recv;
1531fe939687SNatalie Samsonov 		drop_mac += rxq->drop_mac;
1532fe939687SNatalie Samsonov 	}
1533fe939687SNatalie Samsonov 
1534fe939687SNatalie Samsonov 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1535fe939687SNatalie Samsonov 		struct mrvl_txq *txq = dev->data->tx_queues[i];
1536fe939687SNatalie Samsonov 		struct pp2_ppio_outq_statistics tx_stats;
1537fe939687SNatalie Samsonov 
1538fe939687SNatalie Samsonov 		if (!txq)
1539fe939687SNatalie Samsonov 			continue;
1540fe939687SNatalie Samsonov 
1541fe939687SNatalie Samsonov 		idx = txq->queue_id;
1542fe939687SNatalie Samsonov 		if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1543acab7d58STomasz Duszynski 			MRVL_LOG(ERR,
1544acab7d58STomasz Duszynski 				"tx queue %d stats out of range (0 - %d)",
1545fe939687SNatalie Samsonov 				idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1546fe939687SNatalie Samsonov 		}
1547fe939687SNatalie Samsonov 
1548fe939687SNatalie Samsonov 		ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
1549fe939687SNatalie Samsonov 						   &tx_stats, 0);
1550fe939687SNatalie Samsonov 		if (unlikely(ret)) {
1551acab7d58STomasz Duszynski 			MRVL_LOG(ERR,
1552acab7d58STomasz Duszynski 				"Failed to update tx queue %d stats", idx);
1553fe939687SNatalie Samsonov 			break;
1554fe939687SNatalie Samsonov 		}
1555fe939687SNatalie Samsonov 
1556fe939687SNatalie Samsonov 		stats->q_opackets[idx] = tx_stats.deq_desc;
1557fe939687SNatalie Samsonov 		stats->q_obytes[idx] = txq->bytes_sent;
1558fe939687SNatalie Samsonov 		stats->obytes += txq->bytes_sent;
1559fe939687SNatalie Samsonov 	}
1560fe939687SNatalie Samsonov 
1561fe939687SNatalie Samsonov 	ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1562fe939687SNatalie Samsonov 	if (unlikely(ret)) {
1563acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to update port statistics");
1564fe939687SNatalie Samsonov 		return ret;
1565fe939687SNatalie Samsonov 	}
1566fe939687SNatalie Samsonov 
1567fe939687SNatalie Samsonov 	stats->ipackets += ppio_stats.rx_packets - drop_mac;
1568fe939687SNatalie Samsonov 	stats->opackets += ppio_stats.tx_packets;
1569fe939687SNatalie Samsonov 	stats->imissed += ppio_stats.rx_fullq_dropped +
1570fe939687SNatalie Samsonov 			  ppio_stats.rx_bm_dropped +
1571fe939687SNatalie Samsonov 			  ppio_stats.rx_early_dropped +
1572fe939687SNatalie Samsonov 			  ppio_stats.rx_fifo_dropped +
1573fe939687SNatalie Samsonov 			  ppio_stats.rx_cls_dropped;
1574fe939687SNatalie Samsonov 	stats->ierrors = drop_mac;
1575fe939687SNatalie Samsonov 
1576fe939687SNatalie Samsonov 	return 0;
1577fe939687SNatalie Samsonov }
1578fe939687SNatalie Samsonov 
1579fe939687SNatalie Samsonov /**
1580fe939687SNatalie Samsonov  * DPDK callback to clear device statistics.
1581fe939687SNatalie Samsonov  *
1582fe939687SNatalie Samsonov  * @param dev
1583fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
15849970a9adSIgor Romanov  *
15859970a9adSIgor Romanov  * @return
15869970a9adSIgor Romanov  *   0 on success, negative error value otherwise.
1587fe939687SNatalie Samsonov  */
15889970a9adSIgor Romanov static int
1589fe939687SNatalie Samsonov mrvl_stats_reset(struct rte_eth_dev *dev)
1590fe939687SNatalie Samsonov {
1591fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1592fe939687SNatalie Samsonov 	int i;
1593fe939687SNatalie Samsonov 
1594fe939687SNatalie Samsonov 	if (!priv->ppio)
15959970a9adSIgor Romanov 		return 0;
1596fe939687SNatalie Samsonov 
1597fe939687SNatalie Samsonov 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1598fe939687SNatalie Samsonov 		struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1599fe939687SNatalie Samsonov 
1600fe939687SNatalie Samsonov 		pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
1601fe939687SNatalie Samsonov 					    priv->rxq_map[i].inq, NULL, 1);
1602fe939687SNatalie Samsonov 		rxq->bytes_recv = 0;
1603fe939687SNatalie Samsonov 		rxq->drop_mac = 0;
1604fe939687SNatalie Samsonov 	}
1605fe939687SNatalie Samsonov 
1606fe939687SNatalie Samsonov 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1607fe939687SNatalie Samsonov 		struct mrvl_txq *txq = dev->data->tx_queues[i];
1608fe939687SNatalie Samsonov 
1609fe939687SNatalie Samsonov 		pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
1610fe939687SNatalie Samsonov 		txq->bytes_sent = 0;
1611fe939687SNatalie Samsonov 	}
1612fe939687SNatalie Samsonov 
16139970a9adSIgor Romanov 	return pp2_ppio_get_statistics(priv->ppio, NULL, 1);
1614fe939687SNatalie Samsonov }
1615fe939687SNatalie Samsonov 
1616fe939687SNatalie Samsonov /**
1617fe939687SNatalie Samsonov  * DPDK callback to get extended statistics.
1618fe939687SNatalie Samsonov  *
1619fe939687SNatalie Samsonov  * @param dev
1620fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1621fe939687SNatalie Samsonov  * @param stats
1622fe939687SNatalie Samsonov  *   Pointer to xstats table.
1623fe939687SNatalie Samsonov  * @param n
1624fe939687SNatalie Samsonov  *   Number of entries in xstats table.
1625fe939687SNatalie Samsonov  * @return
1626fe939687SNatalie Samsonov  *   Negative value on error, number of read xstats otherwise.
1627fe939687SNatalie Samsonov  */
1628fe939687SNatalie Samsonov static int
1629fe939687SNatalie Samsonov mrvl_xstats_get(struct rte_eth_dev *dev,
1630fe939687SNatalie Samsonov 		struct rte_eth_xstat *stats, unsigned int n)
1631fe939687SNatalie Samsonov {
1632fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1633fe939687SNatalie Samsonov 	struct pp2_ppio_statistics ppio_stats;
1634d853d24bSChengwen Feng 	unsigned int i, count;
1635fe939687SNatalie Samsonov 
1636d853d24bSChengwen Feng 	count = RTE_DIM(mrvl_xstats_tbl);
1637d853d24bSChengwen Feng 	if (n < count)
1638d853d24bSChengwen Feng 		return count;
1639fe939687SNatalie Samsonov 
1640fe939687SNatalie Samsonov 	pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1641d853d24bSChengwen Feng 	for (i = 0; i < count; i++) {
1642fe939687SNatalie Samsonov 		uint64_t val;
1643fe939687SNatalie Samsonov 
1644fe939687SNatalie Samsonov 		if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
1645fe939687SNatalie Samsonov 			val = *(uint32_t *)((uint8_t *)&ppio_stats +
1646fe939687SNatalie Samsonov 					    mrvl_xstats_tbl[i].offset);
1647fe939687SNatalie Samsonov 		else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t))
1648fe939687SNatalie Samsonov 			val = *(uint64_t *)((uint8_t *)&ppio_stats +
1649fe939687SNatalie Samsonov 					    mrvl_xstats_tbl[i].offset);
1650fe939687SNatalie Samsonov 		else
1651fe939687SNatalie Samsonov 			return -EINVAL;
1652fe939687SNatalie Samsonov 
1653fe939687SNatalie Samsonov 		stats[i].id = i;
1654fe939687SNatalie Samsonov 		stats[i].value = val;
1655fe939687SNatalie Samsonov 	}
1656fe939687SNatalie Samsonov 
1657d853d24bSChengwen Feng 	return count;
1658fe939687SNatalie Samsonov }
1659fe939687SNatalie Samsonov 
1660fe939687SNatalie Samsonov /**
1661fe939687SNatalie Samsonov  * DPDK callback to reset extended statistics.
1662fe939687SNatalie Samsonov  *
1663fe939687SNatalie Samsonov  * @param dev
1664fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
16659970a9adSIgor Romanov  *
16669970a9adSIgor Romanov  * @return
16679970a9adSIgor Romanov  *   0 on success, negative error value otherwise.
1668fe939687SNatalie Samsonov  */
16699970a9adSIgor Romanov static int
1670fe939687SNatalie Samsonov mrvl_xstats_reset(struct rte_eth_dev *dev)
1671fe939687SNatalie Samsonov {
16729970a9adSIgor Romanov 	return mrvl_stats_reset(dev);
1673fe939687SNatalie Samsonov }
1674fe939687SNatalie Samsonov 
1675fe939687SNatalie Samsonov /**
1676fe939687SNatalie Samsonov  * DPDK callback to get extended statistics names.
1677fe939687SNatalie Samsonov  *
1678fe939687SNatalie Samsonov  * @param dev (unused)
1679fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1680fe939687SNatalie Samsonov  * @param xstats_names
1681fe939687SNatalie Samsonov  *   Pointer to xstats names table.
1682fe939687SNatalie Samsonov  * @param size
1683fe939687SNatalie Samsonov  *   Size of the xstats names table.
1684fe939687SNatalie Samsonov  * @return
1685fe939687SNatalie Samsonov  *   Number of read names.
1686fe939687SNatalie Samsonov  */
1687fe939687SNatalie Samsonov static int
1688fe939687SNatalie Samsonov mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1689fe939687SNatalie Samsonov 		      struct rte_eth_xstat_name *xstats_names,
1690fe939687SNatalie Samsonov 		      unsigned int size)
1691fe939687SNatalie Samsonov {
1692fe939687SNatalie Samsonov 	unsigned int i;
1693fe939687SNatalie Samsonov 
1694fe939687SNatalie Samsonov 	if (!xstats_names)
1695fe939687SNatalie Samsonov 		return RTE_DIM(mrvl_xstats_tbl);
1696fe939687SNatalie Samsonov 
1697fe939687SNatalie Samsonov 	for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++)
16986723c0fcSBruce Richardson 		strlcpy(xstats_names[i].name, mrvl_xstats_tbl[i].name,
16996723c0fcSBruce Richardson 			RTE_ETH_XSTATS_NAME_SIZE);
1700fe939687SNatalie Samsonov 
1701fe939687SNatalie Samsonov 	return size;
1702fe939687SNatalie Samsonov }
1703fe939687SNatalie Samsonov 
1704fe939687SNatalie Samsonov /**
1705fe939687SNatalie Samsonov  * DPDK callback to get information about the device.
1706fe939687SNatalie Samsonov  *
1707fe939687SNatalie Samsonov  * @param dev
1708fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure (unused).
1709fe939687SNatalie Samsonov  * @param info
1710fe939687SNatalie Samsonov  *   Info structure output buffer.
1711fe939687SNatalie Samsonov  */
1712bdad90d1SIvan Ilchenko static int
1713949cdeddSLiron Himi mrvl_dev_infos_get(struct rte_eth_dev *dev,
1714fe939687SNatalie Samsonov 		   struct rte_eth_dev_info *info)
1715fe939687SNatalie Samsonov {
1716949cdeddSLiron Himi 	struct mrvl_priv *priv = dev->data->dev_private;
1717949cdeddSLiron Himi 
17182fe6f1b7SDmitry Kozlyuk 	info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
17192fe6f1b7SDmitry Kozlyuk 
1720295968d1SFerruh Yigit 	info->speed_capa = RTE_ETH_LINK_SPEED_10M |
1721295968d1SFerruh Yigit 			   RTE_ETH_LINK_SPEED_100M |
1722295968d1SFerruh Yigit 			   RTE_ETH_LINK_SPEED_1G |
1723295968d1SFerruh Yigit 			   RTE_ETH_LINK_SPEED_2_5G |
1724295968d1SFerruh Yigit 			   RTE_ETH_LINK_SPEED_10G;
1725fe939687SNatalie Samsonov 
1726fe939687SNatalie Samsonov 	info->max_rx_queues = MRVL_PP2_RXQ_MAX;
1727fe939687SNatalie Samsonov 	info->max_tx_queues = MRVL_PP2_TXQ_MAX;
1728fe939687SNatalie Samsonov 	info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
1729fe939687SNatalie Samsonov 
1730fe939687SNatalie Samsonov 	info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
1731fe939687SNatalie Samsonov 	info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
1732fe939687SNatalie Samsonov 	info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
1733fe939687SNatalie Samsonov 
1734fe939687SNatalie Samsonov 	info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
1735fe939687SNatalie Samsonov 	info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
1736fe939687SNatalie Samsonov 	info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
1737fe939687SNatalie Samsonov 
1738fe939687SNatalie Samsonov 	info->rx_offload_capa = MRVL_RX_OFFLOADS;
1739fe939687SNatalie Samsonov 	info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
1740fe939687SNatalie Samsonov 
1741fe939687SNatalie Samsonov 	info->tx_offload_capa = MRVL_TX_OFFLOADS;
1742fe939687SNatalie Samsonov 	info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
1743fe939687SNatalie Samsonov 
1744295968d1SFerruh Yigit 	info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
1745295968d1SFerruh Yigit 				       RTE_ETH_RSS_NONFRAG_IPV4_TCP |
1746295968d1SFerruh Yigit 				       RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1747fe939687SNatalie Samsonov 
1748fe939687SNatalie Samsonov 	/* By default packets are dropped if no descriptors are available */
1749fe939687SNatalie Samsonov 	info->default_rxconf.rx_drop_en = 1;
1750fe939687SNatalie Samsonov 
1751fe939687SNatalie Samsonov 	info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
1752949cdeddSLiron Himi 	info->max_mtu = priv->max_mtu;
1753bdad90d1SIvan Ilchenko 
1754bdad90d1SIvan Ilchenko 	return 0;
1755fe939687SNatalie Samsonov }
1756fe939687SNatalie Samsonov 
1757fe939687SNatalie Samsonov /**
1758fe939687SNatalie Samsonov  * Return supported packet types.
1759fe939687SNatalie Samsonov  *
1760fe939687SNatalie Samsonov  * @param dev
1761fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure (unused).
1762fe939687SNatalie Samsonov  *
1763fe939687SNatalie Samsonov  * @return
1764fe939687SNatalie Samsonov  *   Const pointer to the table with supported packet types.
1765fe939687SNatalie Samsonov  */
1766fe939687SNatalie Samsonov static const uint32_t *
1767ba6a168aSSivaramakrishnan Venkat mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused,
1768ba6a168aSSivaramakrishnan Venkat 			      size_t *no_of_elements)
1769fe939687SNatalie Samsonov {
1770fe939687SNatalie Samsonov 	static const uint32_t ptypes[] = {
1771fe939687SNatalie Samsonov 		RTE_PTYPE_L2_ETHER,
17724943a290SNatalie Samsonov 		RTE_PTYPE_L2_ETHER_VLAN,
17734943a290SNatalie Samsonov 		RTE_PTYPE_L2_ETHER_QINQ,
1774fe939687SNatalie Samsonov 		RTE_PTYPE_L3_IPV4,
1775fe939687SNatalie Samsonov 		RTE_PTYPE_L3_IPV4_EXT,
1776fe939687SNatalie Samsonov 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1777fe939687SNatalie Samsonov 		RTE_PTYPE_L3_IPV6,
1778fe939687SNatalie Samsonov 		RTE_PTYPE_L3_IPV6_EXT,
1779fe939687SNatalie Samsonov 		RTE_PTYPE_L2_ETHER_ARP,
1780fe939687SNatalie Samsonov 		RTE_PTYPE_L4_TCP,
17812e3ddb56SSivaramakrishnan Venkat 		RTE_PTYPE_L4_UDP,
1782fe939687SNatalie Samsonov 	};
1783fe939687SNatalie Samsonov 
1784ba6a168aSSivaramakrishnan Venkat 	*no_of_elements = RTE_DIM(ptypes);
1785fe939687SNatalie Samsonov 	return ptypes;
1786fe939687SNatalie Samsonov }
1787fe939687SNatalie Samsonov 
1788fe939687SNatalie Samsonov /**
1789fe939687SNatalie Samsonov  * DPDK callback to get information about specific receive queue.
1790fe939687SNatalie Samsonov  *
1791fe939687SNatalie Samsonov  * @param dev
1792fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1793fe939687SNatalie Samsonov  * @param rx_queue_id
1794fe939687SNatalie Samsonov  *   Receive queue index.
1795fe939687SNatalie Samsonov  * @param qinfo
1796fe939687SNatalie Samsonov  *   Receive queue information structure.
1797fe939687SNatalie Samsonov  */
1798fe939687SNatalie Samsonov static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1799fe939687SNatalie Samsonov 			      struct rte_eth_rxq_info *qinfo)
1800fe939687SNatalie Samsonov {
1801fe939687SNatalie Samsonov 	struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
1802fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1803fe939687SNatalie Samsonov 	int inq = priv->rxq_map[rx_queue_id].inq;
1804fe939687SNatalie Samsonov 	int tc = priv->rxq_map[rx_queue_id].tc;
1805fe939687SNatalie Samsonov 	struct pp2_ppio_tc_params *tc_params =
1806fe939687SNatalie Samsonov 		&priv->ppio_params.inqs_params.tcs_params[tc];
1807fe939687SNatalie Samsonov 
1808fe939687SNatalie Samsonov 	qinfo->mp = q->mp;
1809fe939687SNatalie Samsonov 	qinfo->nb_desc = tc_params->inqs_params[inq].size;
1810fe939687SNatalie Samsonov }
1811fe939687SNatalie Samsonov 
1812fe939687SNatalie Samsonov /**
1813fe939687SNatalie Samsonov  * DPDK callback to get information about specific transmit queue.
1814fe939687SNatalie Samsonov  *
1815fe939687SNatalie Samsonov  * @param dev
1816fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1817fe939687SNatalie Samsonov  * @param tx_queue_id
1818fe939687SNatalie Samsonov  *   Transmit queue index.
1819fe939687SNatalie Samsonov  * @param qinfo
1820fe939687SNatalie Samsonov  *   Transmit queue information structure.
1821fe939687SNatalie Samsonov  */
1822fe939687SNatalie Samsonov static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1823fe939687SNatalie Samsonov 			      struct rte_eth_txq_info *qinfo)
1824fe939687SNatalie Samsonov {
1825fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1826fe939687SNatalie Samsonov 	struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id];
1827fe939687SNatalie Samsonov 
1828fe939687SNatalie Samsonov 	qinfo->nb_desc =
1829fe939687SNatalie Samsonov 		priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
1830fe939687SNatalie Samsonov 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1831fe939687SNatalie Samsonov }
1832fe939687SNatalie Samsonov 
1833fe939687SNatalie Samsonov /**
1834fe939687SNatalie Samsonov  * DPDK callback to Configure a VLAN filter.
1835fe939687SNatalie Samsonov  *
1836fe939687SNatalie Samsonov  * @param dev
1837fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1838fe939687SNatalie Samsonov  * @param vlan_id
1839fe939687SNatalie Samsonov  *   VLAN ID to filter.
1840fe939687SNatalie Samsonov  * @param on
1841fe939687SNatalie Samsonov  *   Toggle filter.
1842fe939687SNatalie Samsonov  *
1843fe939687SNatalie Samsonov  * @return
1844fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
1845fe939687SNatalie Samsonov  */
1846fe939687SNatalie Samsonov static int
1847fe939687SNatalie Samsonov mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1848fe939687SNatalie Samsonov {
1849fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1850fe939687SNatalie Samsonov 
1851fe939687SNatalie Samsonov 	if (priv->isolated)
1852fe939687SNatalie Samsonov 		return -ENOTSUP;
1853fe939687SNatalie Samsonov 
1854d2c37f73SYuri Chipchev 	if (!priv->ppio)
1855d2c37f73SYuri Chipchev 		return 0;
1856d2c37f73SYuri Chipchev 
1857fe939687SNatalie Samsonov 	return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
1858fe939687SNatalie Samsonov 		    pp2_ppio_remove_vlan(priv->ppio, vlan_id);
1859fe939687SNatalie Samsonov }
1860fe939687SNatalie Samsonov 
1861fe939687SNatalie Samsonov /**
1862ff0b8b10SYuri Chipchev  * DPDK callback to Configure VLAN offload.
1863ff0b8b10SYuri Chipchev  *
1864ff0b8b10SYuri Chipchev  * @param dev
1865ff0b8b10SYuri Chipchev  *   Pointer to Ethernet device structure.
1866ff0b8b10SYuri Chipchev  * @param mask
1867ff0b8b10SYuri Chipchev  *   VLAN offload mask.
1868ff0b8b10SYuri Chipchev  *
1869ff0b8b10SYuri Chipchev  * @return
1870ff0b8b10SYuri Chipchev  *   0 on success, negative error value otherwise.
1871ff0b8b10SYuri Chipchev  */
1872ff0b8b10SYuri Chipchev static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1873ff0b8b10SYuri Chipchev {
1874ff0b8b10SYuri Chipchev 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1875ff0b8b10SYuri Chipchev 	int ret;
1876ff0b8b10SYuri Chipchev 
1877295968d1SFerruh Yigit 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1878*8df71650SJerin Jacob 		MRVL_LOG(ERR, "VLAN stripping is not supported");
187971c5085bSMeir Levi 		return -ENOTSUP;
188071c5085bSMeir Levi 	}
1881ff0b8b10SYuri Chipchev 
1882295968d1SFerruh Yigit 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1883295968d1SFerruh Yigit 		if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1884ff0b8b10SYuri Chipchev 			ret = mrvl_populate_vlan_table(dev, 1);
1885ff0b8b10SYuri Chipchev 		else
1886ff0b8b10SYuri Chipchev 			ret = mrvl_populate_vlan_table(dev, 0);
1887ff0b8b10SYuri Chipchev 
1888ff0b8b10SYuri Chipchev 		if (ret)
1889ff0b8b10SYuri Chipchev 			return ret;
1890ff0b8b10SYuri Chipchev 	}
1891ff0b8b10SYuri Chipchev 
1892295968d1SFerruh Yigit 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
1893*8df71650SJerin Jacob 		MRVL_LOG(ERR, "Extend VLAN not supported");
189471c5085bSMeir Levi 		return -ENOTSUP;
189571c5085bSMeir Levi 	}
1896ff0b8b10SYuri Chipchev 
1897ff0b8b10SYuri Chipchev 	return 0;
1898ff0b8b10SYuri Chipchev }
1899ff0b8b10SYuri Chipchev 
1900ff0b8b10SYuri Chipchev /**
1901fe939687SNatalie Samsonov  * Release buffers to hardware bpool (buffer-pool)
1902fe939687SNatalie Samsonov  *
1903fe939687SNatalie Samsonov  * @param rxq
1904fe939687SNatalie Samsonov  *   Receive queue pointer.
1905fe939687SNatalie Samsonov  * @param num
1906fe939687SNatalie Samsonov  *   Number of buffers to release to bpool.
1907fe939687SNatalie Samsonov  *
1908fe939687SNatalie Samsonov  * @return
1909fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
1910fe939687SNatalie Samsonov  */
1911fe939687SNatalie Samsonov static int
1912fe939687SNatalie Samsonov mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
1913fe939687SNatalie Samsonov {
19148ce3c5dbSYuri Chipchev 	struct buff_release_entry entries[num];
19158ce3c5dbSYuri Chipchev 	struct rte_mbuf *mbufs[num];
1916fe939687SNatalie Samsonov 	int i, ret;
1917fe939687SNatalie Samsonov 	unsigned int core_id;
1918fe939687SNatalie Samsonov 	struct pp2_hif *hif;
1919fe939687SNatalie Samsonov 	struct pp2_bpool *bpool;
1920fe939687SNatalie Samsonov 
1921fe939687SNatalie Samsonov 	core_id = rte_lcore_id();
1922fe939687SNatalie Samsonov 	if (core_id == LCORE_ID_ANY)
1923cb056611SStephen Hemminger 		core_id = rte_get_main_lcore();
1924fe939687SNatalie Samsonov 
1925fe939687SNatalie Samsonov 	hif = mrvl_get_hif(rxq->priv, core_id);
1926fe939687SNatalie Samsonov 	if (!hif)
1927fe939687SNatalie Samsonov 		return -1;
1928fe939687SNatalie Samsonov 
1929fe939687SNatalie Samsonov 	bpool = rxq->priv->bpool;
1930fe939687SNatalie Samsonov 
1931fe939687SNatalie Samsonov 	ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
1932fe939687SNatalie Samsonov 	if (ret)
1933fe939687SNatalie Samsonov 		return ret;
1934fe939687SNatalie Samsonov 
1935fe939687SNatalie Samsonov 	if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
1936fe939687SNatalie Samsonov 		cookie_addr_high =
1937fe939687SNatalie Samsonov 			(uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
1938fe939687SNatalie Samsonov 
1939fe939687SNatalie Samsonov 	for (i = 0; i < num; i++) {
1940fe939687SNatalie Samsonov 		if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
1941fe939687SNatalie Samsonov 			!= cookie_addr_high) {
1942acab7d58STomasz Duszynski 			MRVL_LOG(ERR,
194336173503SYuri Chipchev 				"mbuf virtual addr high is out of range "
1944*8df71650SJerin Jacob 				"0x%x instead of 0x%x",
194536173503SYuri Chipchev 				(uint32_t)((uint64_t)mbufs[i] >> 32),
194636173503SYuri Chipchev 				(uint32_t)(cookie_addr_high >> 32));
1947fe939687SNatalie Samsonov 			goto out;
1948fe939687SNatalie Samsonov 		}
1949fe939687SNatalie Samsonov 
1950fe939687SNatalie Samsonov 		entries[i].buff.addr =
1951fe939687SNatalie Samsonov 			rte_mbuf_data_iova_default(mbufs[i]);
195236173503SYuri Chipchev 		entries[i].buff.cookie = (uintptr_t)mbufs[i];
1953fe939687SNatalie Samsonov 		entries[i].bpool = bpool;
1954fe939687SNatalie Samsonov 	}
1955fe939687SNatalie Samsonov 
1956fe939687SNatalie Samsonov 	pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
1957fe939687SNatalie Samsonov 	mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
1958fe939687SNatalie Samsonov 
1959fe939687SNatalie Samsonov 	if (i != num)
1960fe939687SNatalie Samsonov 		goto out;
1961fe939687SNatalie Samsonov 
1962fe939687SNatalie Samsonov 	return 0;
1963fe939687SNatalie Samsonov out:
1964fe939687SNatalie Samsonov 	for (; i < num; i++)
1965fe939687SNatalie Samsonov 		rte_pktmbuf_free(mbufs[i]);
1966fe939687SNatalie Samsonov 
1967fe939687SNatalie Samsonov 	return -1;
1968fe939687SNatalie Samsonov }
1969fe939687SNatalie Samsonov 
1970fe939687SNatalie Samsonov /**
1971fe939687SNatalie Samsonov  * DPDK callback to configure the receive queue.
1972fe939687SNatalie Samsonov  *
1973fe939687SNatalie Samsonov  * @param dev
1974fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
1975fe939687SNatalie Samsonov  * @param idx
1976fe939687SNatalie Samsonov  *   RX queue index.
1977fe939687SNatalie Samsonov  * @param desc
1978fe939687SNatalie Samsonov  *   Number of descriptors to configure in queue.
1979fe939687SNatalie Samsonov  * @param socket
1980fe939687SNatalie Samsonov  *   NUMA socket on which memory must be allocated.
1981fe939687SNatalie Samsonov  * @param conf
1982fe939687SNatalie Samsonov  *   Thresholds parameters.
1983fe939687SNatalie Samsonov  * @param mp
1984fe939687SNatalie Samsonov  *   Memory pool for buffer allocations.
1985fe939687SNatalie Samsonov  *
1986fe939687SNatalie Samsonov  * @return
1987fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
1988fe939687SNatalie Samsonov  */
1989fe939687SNatalie Samsonov static int
1990fe939687SNatalie Samsonov mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1991fe939687SNatalie Samsonov 		    unsigned int socket,
1992fe939687SNatalie Samsonov 		    const struct rte_eth_rxconf *conf,
1993fe939687SNatalie Samsonov 		    struct rte_mempool *mp)
1994fe939687SNatalie Samsonov {
1995fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1996fe939687SNatalie Samsonov 	struct mrvl_rxq *rxq;
199779ec6202SNatalie Samsonov 	uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
19981bb4a528SFerruh Yigit 	uint32_t max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
1999fe939687SNatalie Samsonov 	int ret, tc, inq;
2000a4996bd8SWei Dai 	uint64_t offloads;
2001fe939687SNatalie Samsonov 
2002a4996bd8SWei Dai 	offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
2003fe939687SNatalie Samsonov 
2004fe939687SNatalie Samsonov 	if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
2005fe939687SNatalie Samsonov 		/*
2006fe939687SNatalie Samsonov 		 * Unknown TC mapping, mapping will not have a correct queue.
2007fe939687SNatalie Samsonov 		 */
2008acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
2009fe939687SNatalie Samsonov 			idx, priv->ppio_id);
2010fe939687SNatalie Samsonov 		return -EFAULT;
2011fe939687SNatalie Samsonov 	}
2012fe939687SNatalie Samsonov 
20131bb4a528SFerruh Yigit 	frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MRVL_PKT_EFFEC_OFFS;
20141bb4a528SFerruh Yigit 	if (frame_size < max_rx_pktlen) {
201579ec6202SNatalie Samsonov 		MRVL_LOG(WARNING,
201679ec6202SNatalie Samsonov 			"Mbuf size must be increased to %u bytes to hold up "
201779ec6202SNatalie Samsonov 			"to %u bytes of data.",
20181bb4a528SFerruh Yigit 			max_rx_pktlen + buf_size - frame_size,
20191bb4a528SFerruh Yigit 			max_rx_pktlen);
20201bb4a528SFerruh Yigit 		dev->data->mtu = frame_size - RTE_ETHER_HDR_LEN;
20211bb4a528SFerruh Yigit 		MRVL_LOG(INFO, "Setting MTU to %u", dev->data->mtu);
2022fe939687SNatalie Samsonov 	}
2023fe939687SNatalie Samsonov 
2024fe939687SNatalie Samsonov 	if (dev->data->rx_queues[idx]) {
2025fe939687SNatalie Samsonov 		rte_free(dev->data->rx_queues[idx]);
2026fe939687SNatalie Samsonov 		dev->data->rx_queues[idx] = NULL;
2027fe939687SNatalie Samsonov 	}
2028fe939687SNatalie Samsonov 
2029fe939687SNatalie Samsonov 	rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
2030fe939687SNatalie Samsonov 	if (!rxq)
2031fe939687SNatalie Samsonov 		return -ENOMEM;
2032fe939687SNatalie Samsonov 
2033fe939687SNatalie Samsonov 	rxq->priv = priv;
2034fe939687SNatalie Samsonov 	rxq->mp = mp;
2035295968d1SFerruh Yigit 	rxq->cksum_enabled = offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
2036fe939687SNatalie Samsonov 	rxq->queue_id = idx;
2037fe939687SNatalie Samsonov 	rxq->port_id = dev->data->port_id;
2038fe939687SNatalie Samsonov 	mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
2039fe939687SNatalie Samsonov 
2040fe939687SNatalie Samsonov 	tc = priv->rxq_map[rxq->queue_id].tc,
2041fe939687SNatalie Samsonov 	inq = priv->rxq_map[rxq->queue_id].inq;
2042fe939687SNatalie Samsonov 	priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
2043fe939687SNatalie Samsonov 		desc;
2044fe939687SNatalie Samsonov 
2045fe939687SNatalie Samsonov 	ret = mrvl_fill_bpool(rxq, desc);
2046fe939687SNatalie Samsonov 	if (ret) {
2047fe939687SNatalie Samsonov 		rte_free(rxq);
2048fe939687SNatalie Samsonov 		return ret;
2049fe939687SNatalie Samsonov 	}
2050fe939687SNatalie Samsonov 
2051fe939687SNatalie Samsonov 	priv->bpool_init_size += desc;
2052fe939687SNatalie Samsonov 
2053fe939687SNatalie Samsonov 	dev->data->rx_queues[idx] = rxq;
2054fe939687SNatalie Samsonov 
2055fe939687SNatalie Samsonov 	return 0;
2056fe939687SNatalie Samsonov }
2057fe939687SNatalie Samsonov 
2058fe939687SNatalie Samsonov /**
2059fe939687SNatalie Samsonov  * DPDK callback to release the receive queue.
2060fe939687SNatalie Samsonov  *
20617483341aSXueming Li  * @param dev
20627483341aSXueming Li  *   Pointer to Ethernet device structure.
20637483341aSXueming Li  * @param qid
20647483341aSXueming Li  *   Receive queue index.
2065fe939687SNatalie Samsonov  */
2066fe939687SNatalie Samsonov static void
20677483341aSXueming Li mrvl_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
2068fe939687SNatalie Samsonov {
20697483341aSXueming Li 	struct mrvl_rxq *q = dev->data->rx_queues[qid];
2070fe939687SNatalie Samsonov 	struct pp2_ppio_tc_params *tc_params;
2071fe939687SNatalie Samsonov 	int i, num, tc, inq;
2072fe939687SNatalie Samsonov 	struct pp2_hif *hif;
2073fe939687SNatalie Samsonov 	unsigned int core_id = rte_lcore_id();
2074fe939687SNatalie Samsonov 
2075fe939687SNatalie Samsonov 	if (core_id == LCORE_ID_ANY)
2076cb056611SStephen Hemminger 		core_id = rte_get_main_lcore();
2077fe939687SNatalie Samsonov 
207842ab8427STomasz Duszynski 	if (!q)
207942ab8427STomasz Duszynski 		return;
208042ab8427STomasz Duszynski 
2081fe939687SNatalie Samsonov 	hif = mrvl_get_hif(q->priv, core_id);
2082fe939687SNatalie Samsonov 
208342ab8427STomasz Duszynski 	if (!hif)
2084fe939687SNatalie Samsonov 		return;
2085fe939687SNatalie Samsonov 
2086fe939687SNatalie Samsonov 	tc = q->priv->rxq_map[q->queue_id].tc;
2087fe939687SNatalie Samsonov 	inq = q->priv->rxq_map[q->queue_id].inq;
2088fe939687SNatalie Samsonov 	tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
2089fe939687SNatalie Samsonov 	num = tc_params->inqs_params[inq].size;
2090fe939687SNatalie Samsonov 	for (i = 0; i < num; i++) {
2091fe939687SNatalie Samsonov 		struct pp2_buff_inf inf;
2092fe939687SNatalie Samsonov 		uint64_t addr;
2093fe939687SNatalie Samsonov 
2094fe939687SNatalie Samsonov 		pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
2095fe939687SNatalie Samsonov 		addr = cookie_addr_high | inf.cookie;
2096fe939687SNatalie Samsonov 		rte_pktmbuf_free((struct rte_mbuf *)addr);
2097fe939687SNatalie Samsonov 	}
2098fe939687SNatalie Samsonov 
2099fe939687SNatalie Samsonov 	rte_free(q);
2100fe939687SNatalie Samsonov }
2101fe939687SNatalie Samsonov 
2102fe939687SNatalie Samsonov /**
2103fe939687SNatalie Samsonov  * DPDK callback to configure the transmit queue.
2104fe939687SNatalie Samsonov  *
2105fe939687SNatalie Samsonov  * @param dev
2106fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
2107fe939687SNatalie Samsonov  * @param idx
2108fe939687SNatalie Samsonov  *   Transmit queue index.
2109fe939687SNatalie Samsonov  * @param desc
2110fe939687SNatalie Samsonov  *   Number of descriptors to configure in the queue.
2111fe939687SNatalie Samsonov  * @param socket
2112fe939687SNatalie Samsonov  *   NUMA socket on which memory must be allocated.
2113fe939687SNatalie Samsonov  * @param conf
2114fe939687SNatalie Samsonov  *   Tx queue configuration parameters.
2115fe939687SNatalie Samsonov  *
2116fe939687SNatalie Samsonov  * @return
2117fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
2118fe939687SNatalie Samsonov  */
2119fe939687SNatalie Samsonov static int
2120fe939687SNatalie Samsonov mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
2121fe939687SNatalie Samsonov 		    unsigned int socket,
2122fe939687SNatalie Samsonov 		    const struct rte_eth_txconf *conf)
2123fe939687SNatalie Samsonov {
2124fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
2125fe939687SNatalie Samsonov 	struct mrvl_txq *txq;
2126fe939687SNatalie Samsonov 
2127fe939687SNatalie Samsonov 	if (dev->data->tx_queues[idx]) {
2128fe939687SNatalie Samsonov 		rte_free(dev->data->tx_queues[idx]);
2129fe939687SNatalie Samsonov 		dev->data->tx_queues[idx] = NULL;
2130fe939687SNatalie Samsonov 	}
2131fe939687SNatalie Samsonov 
2132fe939687SNatalie Samsonov 	txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
2133fe939687SNatalie Samsonov 	if (!txq)
2134fe939687SNatalie Samsonov 		return -ENOMEM;
2135fe939687SNatalie Samsonov 
2136fe939687SNatalie Samsonov 	txq->priv = priv;
2137fe939687SNatalie Samsonov 	txq->queue_id = idx;
2138fe939687SNatalie Samsonov 	txq->port_id = dev->data->port_id;
2139fe939687SNatalie Samsonov 	txq->tx_deferred_start = conf->tx_deferred_start;
2140fe939687SNatalie Samsonov 	dev->data->tx_queues[idx] = txq;
2141fe939687SNatalie Samsonov 
2142fe939687SNatalie Samsonov 	priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
2143fe939687SNatalie Samsonov 
2144fe939687SNatalie Samsonov 	return 0;
2145fe939687SNatalie Samsonov }
2146fe939687SNatalie Samsonov 
2147fe939687SNatalie Samsonov /**
2148fe939687SNatalie Samsonov  * DPDK callback to release the transmit queue.
2149fe939687SNatalie Samsonov  *
21507483341aSXueming Li  * @param dev
21517483341aSXueming Li  *   Pointer to Ethernet device structure.
21527483341aSXueming Li  * @param qid
21537483341aSXueming Li  *   Transmit queue index.
2154fe939687SNatalie Samsonov  */
2155fe939687SNatalie Samsonov static void
21567483341aSXueming Li mrvl_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
2157fe939687SNatalie Samsonov {
21587483341aSXueming Li 	struct mrvl_txq *q = dev->data->tx_queues[qid];
2159fe939687SNatalie Samsonov 
2160fe939687SNatalie Samsonov 	if (!q)
2161fe939687SNatalie Samsonov 		return;
2162fe939687SNatalie Samsonov 
2163fe939687SNatalie Samsonov 	rte_free(q);
2164fe939687SNatalie Samsonov }
2165fe939687SNatalie Samsonov 
2166fe939687SNatalie Samsonov /**
2167fe939687SNatalie Samsonov  * DPDK callback to get flow control configuration.
2168fe939687SNatalie Samsonov  *
2169fe939687SNatalie Samsonov  * @param dev
2170fe939687SNatalie Samsonov  *  Pointer to Ethernet device structure.
2171fe939687SNatalie Samsonov  * @param fc_conf
2172fe939687SNatalie Samsonov  *  Pointer to the flow control configuration.
2173fe939687SNatalie Samsonov  *
2174fe939687SNatalie Samsonov  * @return
2175fe939687SNatalie Samsonov  *  0 on success, negative error value otherwise.
2176fe939687SNatalie Samsonov  */
2177fe939687SNatalie Samsonov static int
2178fe939687SNatalie Samsonov mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2179fe939687SNatalie Samsonov {
2180fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
2181fe939687SNatalie Samsonov 	int ret, en;
2182fe939687SNatalie Samsonov 
21837f2ae5ddSLiron Himi 	if (!priv->ppio) {
21847f2ae5ddSLiron Himi 		memcpy(fc_conf, &priv->fc_conf, sizeof(struct rte_eth_fc_conf));
21857f2ae5ddSLiron Himi 		return 0;
21867f2ae5ddSLiron Himi 	}
2187fe939687SNatalie Samsonov 
2188c0e5d09eSYuri Chipchev 	fc_conf->autoneg = 1;
2189fe939687SNatalie Samsonov 	ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
2190fe939687SNatalie Samsonov 	if (ret) {
2191acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to read rx pause state");
2192fe939687SNatalie Samsonov 		return ret;
2193fe939687SNatalie Samsonov 	}
2194fe939687SNatalie Samsonov 
2195295968d1SFerruh Yigit 	fc_conf->mode = en ? RTE_ETH_FC_RX_PAUSE : RTE_ETH_FC_NONE;
2196fe939687SNatalie Samsonov 
21978537663bSYuri Chipchev 	ret = pp2_ppio_get_tx_pause(priv->ppio, &en);
21988537663bSYuri Chipchev 	if (ret) {
21998537663bSYuri Chipchev 		MRVL_LOG(ERR, "Failed to read tx pause state");
22008537663bSYuri Chipchev 		return ret;
22018537663bSYuri Chipchev 	}
22028537663bSYuri Chipchev 
22038537663bSYuri Chipchev 	if (en) {
2204295968d1SFerruh Yigit 		if (fc_conf->mode == RTE_ETH_FC_NONE)
2205295968d1SFerruh Yigit 			fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
22068537663bSYuri Chipchev 		else
2207295968d1SFerruh Yigit 			fc_conf->mode = RTE_ETH_FC_FULL;
22088537663bSYuri Chipchev 	}
22098537663bSYuri Chipchev 
2210fe939687SNatalie Samsonov 	return 0;
2211fe939687SNatalie Samsonov }
2212fe939687SNatalie Samsonov 
2213fe939687SNatalie Samsonov /**
2214fe939687SNatalie Samsonov  * DPDK callback to set flow control configuration.
2215fe939687SNatalie Samsonov  *
2216fe939687SNatalie Samsonov  * @param dev
2217fe939687SNatalie Samsonov  *  Pointer to Ethernet device structure.
2218fe939687SNatalie Samsonov  * @param fc_conf
2219fe939687SNatalie Samsonov  *  Pointer to the flow control configuration.
2220fe939687SNatalie Samsonov  *
2221fe939687SNatalie Samsonov  * @return
2222fe939687SNatalie Samsonov  *  0 on success, negative error value otherwise.
2223fe939687SNatalie Samsonov  */
2224fe939687SNatalie Samsonov static int
2225fe939687SNatalie Samsonov mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2226fe939687SNatalie Samsonov {
2227fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
22288537663bSYuri Chipchev 	struct pp2_ppio_tx_pause_params mrvl_pause_params;
22298537663bSYuri Chipchev 	int ret;
22308537663bSYuri Chipchev 	int rx_en, tx_en;
2231fe939687SNatalie Samsonov 
2232fe939687SNatalie Samsonov 	if (fc_conf->high_water ||
2233fe939687SNatalie Samsonov 	    fc_conf->low_water ||
2234fe939687SNatalie Samsonov 	    fc_conf->pause_time ||
2235c0e5d09eSYuri Chipchev 	    fc_conf->mac_ctrl_frame_fwd) {
2236acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Flowctrl parameter is not supported");
2237fe939687SNatalie Samsonov 
2238fe939687SNatalie Samsonov 		return -EINVAL;
2239fe939687SNatalie Samsonov 	}
2240fe939687SNatalie Samsonov 
2241c0e5d09eSYuri Chipchev 	if (fc_conf->autoneg == 0) {
2242c0e5d09eSYuri Chipchev 		MRVL_LOG(ERR, "Flowctrl Autoneg disable is not supported");
2243c0e5d09eSYuri Chipchev 		return -EINVAL;
2244c0e5d09eSYuri Chipchev 	}
2245c0e5d09eSYuri Chipchev 
22467f2ae5ddSLiron Himi 	if (!priv->ppio) {
22477f2ae5ddSLiron Himi 		memcpy(&priv->fc_conf, fc_conf, sizeof(struct rte_eth_fc_conf));
22487f2ae5ddSLiron Himi 		priv->flow_ctrl = 1;
22497f2ae5ddSLiron Himi 		return 0;
22507f2ae5ddSLiron Himi 	}
22517f2ae5ddSLiron Himi 
22528537663bSYuri Chipchev 	switch (fc_conf->mode) {
2253295968d1SFerruh Yigit 	case RTE_ETH_FC_FULL:
22548537663bSYuri Chipchev 		rx_en = 1;
22558537663bSYuri Chipchev 		tx_en = 1;
22568537663bSYuri Chipchev 		break;
2257295968d1SFerruh Yigit 	case RTE_ETH_FC_TX_PAUSE:
22588537663bSYuri Chipchev 		rx_en = 0;
22598537663bSYuri Chipchev 		tx_en = 1;
22608537663bSYuri Chipchev 		break;
2261295968d1SFerruh Yigit 	case RTE_ETH_FC_RX_PAUSE:
22628537663bSYuri Chipchev 		rx_en = 1;
22638537663bSYuri Chipchev 		tx_en = 0;
22648537663bSYuri Chipchev 		break;
2265295968d1SFerruh Yigit 	case RTE_ETH_FC_NONE:
22668537663bSYuri Chipchev 		rx_en = 0;
22678537663bSYuri Chipchev 		tx_en = 0;
22688537663bSYuri Chipchev 		break;
22698537663bSYuri Chipchev 	default:
22708537663bSYuri Chipchev 		MRVL_LOG(ERR, "Incorrect Flow control flag (%d)",
22718537663bSYuri Chipchev 			 fc_conf->mode);
22728537663bSYuri Chipchev 		return -EINVAL;
22738537663bSYuri Chipchev 	}
2274fe939687SNatalie Samsonov 
22758537663bSYuri Chipchev 	/* Set RX flow control */
22768537663bSYuri Chipchev 	ret = pp2_ppio_set_rx_pause(priv->ppio, rx_en);
22778537663bSYuri Chipchev 	if (ret) {
22788537663bSYuri Chipchev 		MRVL_LOG(ERR, "Failed to change RX flowctrl");
22798537663bSYuri Chipchev 		return ret;
22808537663bSYuri Chipchev 	}
2281fe939687SNatalie Samsonov 
22828537663bSYuri Chipchev 	/* Set TX flow control */
22838537663bSYuri Chipchev 	mrvl_pause_params.en = tx_en;
22848537663bSYuri Chipchev 	/* all inqs participate in xon/xoff decision */
22858537663bSYuri Chipchev 	mrvl_pause_params.use_tc_pause_inqs = 0;
22868537663bSYuri Chipchev 	ret = pp2_ppio_set_tx_pause(priv->ppio, &mrvl_pause_params);
22878537663bSYuri Chipchev 	if (ret) {
22888537663bSYuri Chipchev 		MRVL_LOG(ERR, "Failed to change TX flowctrl");
2289fe939687SNatalie Samsonov 		return ret;
2290fe939687SNatalie Samsonov 	}
2291fe939687SNatalie Samsonov 
2292fe939687SNatalie Samsonov 	return 0;
2293fe939687SNatalie Samsonov }
2294fe939687SNatalie Samsonov 
2295fe939687SNatalie Samsonov /**
2296fe939687SNatalie Samsonov  * Update RSS hash configuration
2297fe939687SNatalie Samsonov  *
2298fe939687SNatalie Samsonov  * @param dev
2299fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
2300fe939687SNatalie Samsonov  * @param rss_conf
2301fe939687SNatalie Samsonov  *   Pointer to RSS configuration.
2302fe939687SNatalie Samsonov  *
2303fe939687SNatalie Samsonov  * @return
2304fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
2305fe939687SNatalie Samsonov  */
2306fe939687SNatalie Samsonov static int
2307fe939687SNatalie Samsonov mrvl_rss_hash_update(struct rte_eth_dev *dev,
2308fe939687SNatalie Samsonov 		     struct rte_eth_rss_conf *rss_conf)
2309fe939687SNatalie Samsonov {
2310fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
2311fe939687SNatalie Samsonov 
2312fe939687SNatalie Samsonov 	if (priv->isolated)
2313fe939687SNatalie Samsonov 		return -ENOTSUP;
2314fe939687SNatalie Samsonov 
2315fe939687SNatalie Samsonov 	return mrvl_configure_rss(priv, rss_conf);
2316fe939687SNatalie Samsonov }
2317fe939687SNatalie Samsonov 
2318fe939687SNatalie Samsonov /**
2319fe939687SNatalie Samsonov  * DPDK callback to get RSS hash configuration.
2320fe939687SNatalie Samsonov  *
2321fe939687SNatalie Samsonov  * @param dev
2322fe939687SNatalie Samsonov  *   Pointer to Ethernet device structure.
2323fe939687SNatalie Samsonov  * @rss_conf
2324fe939687SNatalie Samsonov  *   Pointer to RSS configuration.
2325fe939687SNatalie Samsonov  *
2326fe939687SNatalie Samsonov  * @return
2327fe939687SNatalie Samsonov  *   Always 0.
2328fe939687SNatalie Samsonov  */
2329fe939687SNatalie Samsonov static int
2330fe939687SNatalie Samsonov mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
2331fe939687SNatalie Samsonov 		       struct rte_eth_rss_conf *rss_conf)
2332fe939687SNatalie Samsonov {
2333fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
2334fe939687SNatalie Samsonov 	enum pp2_ppio_hash_type hash_type =
2335fe939687SNatalie Samsonov 		priv->ppio_params.inqs_params.hash_type;
2336fe939687SNatalie Samsonov 
2337fe939687SNatalie Samsonov 	rss_conf->rss_key = NULL;
2338fe939687SNatalie Samsonov 
2339fe939687SNatalie Samsonov 	if (hash_type == PP2_PPIO_HASH_T_NONE)
2340fe939687SNatalie Samsonov 		rss_conf->rss_hf = 0;
2341fe939687SNatalie Samsonov 	else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
2342295968d1SFerruh Yigit 		rss_conf->rss_hf = RTE_ETH_RSS_IPV4;
2343fe939687SNatalie Samsonov 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
2344295968d1SFerruh Yigit 		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_TCP;
2345fe939687SNatalie Samsonov 	else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
2346295968d1SFerruh Yigit 		rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_UDP;
2347fe939687SNatalie Samsonov 
2348fe939687SNatalie Samsonov 	return 0;
2349fe939687SNatalie Samsonov }
2350fe939687SNatalie Samsonov 
2351fe939687SNatalie Samsonov /**
2352fe939687SNatalie Samsonov  * DPDK callback to get rte_flow callbacks.
2353fe939687SNatalie Samsonov  *
2354fe939687SNatalie Samsonov  * @param dev
2355fe939687SNatalie Samsonov  *   Pointer to the device structure.
2356fb7ad441SThomas Monjalon  * @param ops
2357fe939687SNatalie Samsonov  *   Pointer to pass the flow ops.
2358fe939687SNatalie Samsonov  *
2359fe939687SNatalie Samsonov  * @return
2360fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
2361fe939687SNatalie Samsonov  */
2362fe939687SNatalie Samsonov static int
2363fb7ad441SThomas Monjalon mrvl_eth_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
2364fb7ad441SThomas Monjalon 		      const struct rte_flow_ops **ops)
2365fe939687SNatalie Samsonov {
2366fb7ad441SThomas Monjalon 	*ops = &mrvl_flow_ops;
2367fe939687SNatalie Samsonov 	return 0;
2368fe939687SNatalie Samsonov }
2369fe939687SNatalie Samsonov 
2370cdb53f8dSTomasz Duszynski /**
2371cdb53f8dSTomasz Duszynski  * DPDK callback to get rte_mtr callbacks.
2372cdb53f8dSTomasz Duszynski  *
2373cdb53f8dSTomasz Duszynski  * @param dev
2374cdb53f8dSTomasz Duszynski  *   Pointer to the device structure.
2375cdb53f8dSTomasz Duszynski  * @param ops
2376cdb53f8dSTomasz Duszynski  *   Pointer to pass the mtr ops.
2377cdb53f8dSTomasz Duszynski  *
2378cdb53f8dSTomasz Duszynski  * @return
2379cdb53f8dSTomasz Duszynski  *   Always 0.
2380cdb53f8dSTomasz Duszynski  */
2381cdb53f8dSTomasz Duszynski static int
2382cdb53f8dSTomasz Duszynski mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
2383cdb53f8dSTomasz Duszynski {
2384cdb53f8dSTomasz Duszynski 	*(const void **)ops = &mrvl_mtr_ops;
2385cdb53f8dSTomasz Duszynski 
2386cdb53f8dSTomasz Duszynski 	return 0;
2387cdb53f8dSTomasz Duszynski }
2388cdb53f8dSTomasz Duszynski 
2389429c3944STomasz Duszynski /**
2390429c3944STomasz Duszynski  * DPDK callback to get rte_tm callbacks.
2391429c3944STomasz Duszynski  *
2392429c3944STomasz Duszynski  * @param dev
2393429c3944STomasz Duszynski  *   Pointer to the device structure.
2394429c3944STomasz Duszynski  * @param ops
2395429c3944STomasz Duszynski  *   Pointer to pass the tm ops.
2396429c3944STomasz Duszynski  *
2397429c3944STomasz Duszynski  * @return
2398429c3944STomasz Duszynski  *   Always 0.
2399429c3944STomasz Duszynski  */
2400429c3944STomasz Duszynski static int
2401429c3944STomasz Duszynski mrvl_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
2402429c3944STomasz Duszynski {
2403429c3944STomasz Duszynski 	*(const void **)ops = &mrvl_tm_ops;
2404429c3944STomasz Duszynski 
2405429c3944STomasz Duszynski 	return 0;
2406429c3944STomasz Duszynski }
2407429c3944STomasz Duszynski 
2408fe939687SNatalie Samsonov static const struct eth_dev_ops mrvl_ops = {
2409fe939687SNatalie Samsonov 	.dev_configure = mrvl_dev_configure,
2410fe939687SNatalie Samsonov 	.dev_start = mrvl_dev_start,
2411fe939687SNatalie Samsonov 	.dev_stop = mrvl_dev_stop,
2412fe939687SNatalie Samsonov 	.dev_set_link_up = mrvl_dev_set_link_up,
2413fe939687SNatalie Samsonov 	.dev_set_link_down = mrvl_dev_set_link_down,
2414fe939687SNatalie Samsonov 	.dev_close = mrvl_dev_close,
2415fe939687SNatalie Samsonov 	.link_update = mrvl_link_update,
2416fe939687SNatalie Samsonov 	.promiscuous_enable = mrvl_promiscuous_enable,
2417fe939687SNatalie Samsonov 	.allmulticast_enable = mrvl_allmulticast_enable,
2418fe939687SNatalie Samsonov 	.promiscuous_disable = mrvl_promiscuous_disable,
2419fe939687SNatalie Samsonov 	.allmulticast_disable = mrvl_allmulticast_disable,
2420fe939687SNatalie Samsonov 	.mac_addr_remove = mrvl_mac_addr_remove,
2421fe939687SNatalie Samsonov 	.mac_addr_add = mrvl_mac_addr_add,
2422fe939687SNatalie Samsonov 	.mac_addr_set = mrvl_mac_addr_set,
2423fe939687SNatalie Samsonov 	.mtu_set = mrvl_mtu_set,
2424fe939687SNatalie Samsonov 	.stats_get = mrvl_stats_get,
2425fe939687SNatalie Samsonov 	.stats_reset = mrvl_stats_reset,
2426fe939687SNatalie Samsonov 	.xstats_get = mrvl_xstats_get,
2427fe939687SNatalie Samsonov 	.xstats_reset = mrvl_xstats_reset,
2428fe939687SNatalie Samsonov 	.xstats_get_names = mrvl_xstats_get_names,
2429fe939687SNatalie Samsonov 	.dev_infos_get = mrvl_dev_infos_get,
2430fe939687SNatalie Samsonov 	.dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
2431fe939687SNatalie Samsonov 	.rxq_info_get = mrvl_rxq_info_get,
2432fe939687SNatalie Samsonov 	.txq_info_get = mrvl_txq_info_get,
2433fe939687SNatalie Samsonov 	.vlan_filter_set = mrvl_vlan_filter_set,
2434ff0b8b10SYuri Chipchev 	.vlan_offload_set = mrvl_vlan_offload_set,
2435fe939687SNatalie Samsonov 	.tx_queue_start = mrvl_tx_queue_start,
2436fe939687SNatalie Samsonov 	.tx_queue_stop = mrvl_tx_queue_stop,
2437fe939687SNatalie Samsonov 	.rx_queue_setup = mrvl_rx_queue_setup,
2438fe939687SNatalie Samsonov 	.rx_queue_release = mrvl_rx_queue_release,
2439fe939687SNatalie Samsonov 	.tx_queue_setup = mrvl_tx_queue_setup,
2440fe939687SNatalie Samsonov 	.tx_queue_release = mrvl_tx_queue_release,
2441fe939687SNatalie Samsonov 	.flow_ctrl_get = mrvl_flow_ctrl_get,
2442fe939687SNatalie Samsonov 	.flow_ctrl_set = mrvl_flow_ctrl_set,
2443fe939687SNatalie Samsonov 	.rss_hash_update = mrvl_rss_hash_update,
2444fe939687SNatalie Samsonov 	.rss_hash_conf_get = mrvl_rss_hash_conf_get,
2445fb7ad441SThomas Monjalon 	.flow_ops_get = mrvl_eth_flow_ops_get,
2446cdb53f8dSTomasz Duszynski 	.mtr_ops_get = mrvl_mtr_ops_get,
2447429c3944STomasz Duszynski 	.tm_ops_get = mrvl_tm_ops_get,
2448fe939687SNatalie Samsonov };
2449fe939687SNatalie Samsonov 
2450fe939687SNatalie Samsonov /**
2451fe939687SNatalie Samsonov  * Return packet type information and l3/l4 offsets.
2452fe939687SNatalie Samsonov  *
2453fe939687SNatalie Samsonov  * @param desc
2454fe939687SNatalie Samsonov  *   Pointer to the received packet descriptor.
2455fe939687SNatalie Samsonov  * @param l3_offset
2456fe939687SNatalie Samsonov  *   l3 packet offset.
2457fe939687SNatalie Samsonov  * @param l4_offset
2458fe939687SNatalie Samsonov  *   l4 packet offset.
2459fe939687SNatalie Samsonov  *
2460fe939687SNatalie Samsonov  * @return
2461fe939687SNatalie Samsonov  *   Packet type information.
2462fe939687SNatalie Samsonov  */
2463fe939687SNatalie Samsonov static inline uint64_t
2464fe939687SNatalie Samsonov mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
2465fe939687SNatalie Samsonov 				    uint8_t *l3_offset, uint8_t *l4_offset)
2466fe939687SNatalie Samsonov {
2467fe939687SNatalie Samsonov 	enum pp2_inq_l3_type l3_type;
2468fe939687SNatalie Samsonov 	enum pp2_inq_l4_type l4_type;
24694943a290SNatalie Samsonov 	enum pp2_inq_vlan_tag vlan_tag;
2470fe939687SNatalie Samsonov 	uint64_t packet_type;
2471fe939687SNatalie Samsonov 
2472fe939687SNatalie Samsonov 	pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
2473fe939687SNatalie Samsonov 	pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
24744943a290SNatalie Samsonov 	pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag);
2475fe939687SNatalie Samsonov 
2476fe939687SNatalie Samsonov 	packet_type = RTE_PTYPE_L2_ETHER;
2477fe939687SNatalie Samsonov 
24784943a290SNatalie Samsonov 	switch (vlan_tag) {
24794943a290SNatalie Samsonov 	case PP2_INQ_VLAN_TAG_SINGLE:
24804943a290SNatalie Samsonov 		packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
24814943a290SNatalie Samsonov 		break;
24824943a290SNatalie Samsonov 	case PP2_INQ_VLAN_TAG_DOUBLE:
24834943a290SNatalie Samsonov 	case PP2_INQ_VLAN_TAG_TRIPLE:
24844943a290SNatalie Samsonov 		packet_type |= RTE_PTYPE_L2_ETHER_QINQ;
24854943a290SNatalie Samsonov 		break;
24864943a290SNatalie Samsonov 	default:
24874943a290SNatalie Samsonov 		break;
24884943a290SNatalie Samsonov 	}
24894943a290SNatalie Samsonov 
2490fe939687SNatalie Samsonov 	switch (l3_type) {
2491fe939687SNatalie Samsonov 	case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
2492fe939687SNatalie Samsonov 		packet_type |= RTE_PTYPE_L3_IPV4;
2493fe939687SNatalie Samsonov 		break;
2494fe939687SNatalie Samsonov 	case PP2_INQ_L3_TYPE_IPV4_OK:
2495fe939687SNatalie Samsonov 		packet_type |= RTE_PTYPE_L3_IPV4_EXT;
2496fe939687SNatalie Samsonov 		break;
2497fe939687SNatalie Samsonov 	case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
2498fe939687SNatalie Samsonov 		packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
2499fe939687SNatalie Samsonov 		break;
2500fe939687SNatalie Samsonov 	case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
2501fe939687SNatalie Samsonov 		packet_type |= RTE_PTYPE_L3_IPV6;
2502fe939687SNatalie Samsonov 		break;
2503fe939687SNatalie Samsonov 	case PP2_INQ_L3_TYPE_IPV6_EXT:
2504fe939687SNatalie Samsonov 		packet_type |= RTE_PTYPE_L3_IPV6_EXT;
2505fe939687SNatalie Samsonov 		break;
2506fe939687SNatalie Samsonov 	case PP2_INQ_L3_TYPE_ARP:
2507fe939687SNatalie Samsonov 		packet_type |= RTE_PTYPE_L2_ETHER_ARP;
2508fe939687SNatalie Samsonov 		/*
2509fe939687SNatalie Samsonov 		 * In case of ARP l4_offset is set to wrong value.
2510fe939687SNatalie Samsonov 		 * Set it to proper one so that later on mbuf->l3_len can be
2511fe939687SNatalie Samsonov 		 * calculated subtracting l4_offset and l3_offset.
2512fe939687SNatalie Samsonov 		 */
2513fe939687SNatalie Samsonov 		*l4_offset = *l3_offset + MRVL_ARP_LENGTH;
2514fe939687SNatalie Samsonov 		break;
2515fe939687SNatalie Samsonov 	default:
2516fe939687SNatalie Samsonov 		break;
2517fe939687SNatalie Samsonov 	}
2518fe939687SNatalie Samsonov 
2519fe939687SNatalie Samsonov 	switch (l4_type) {
2520fe939687SNatalie Samsonov 	case PP2_INQ_L4_TYPE_TCP:
2521fe939687SNatalie Samsonov 		packet_type |= RTE_PTYPE_L4_TCP;
2522fe939687SNatalie Samsonov 		break;
2523fe939687SNatalie Samsonov 	case PP2_INQ_L4_TYPE_UDP:
2524fe939687SNatalie Samsonov 		packet_type |= RTE_PTYPE_L4_UDP;
2525fe939687SNatalie Samsonov 		break;
2526fe939687SNatalie Samsonov 	default:
2527fe939687SNatalie Samsonov 		break;
2528fe939687SNatalie Samsonov 	}
2529fe939687SNatalie Samsonov 
2530fe939687SNatalie Samsonov 	return packet_type;
2531fe939687SNatalie Samsonov }
2532fe939687SNatalie Samsonov 
2533fe939687SNatalie Samsonov /**
2534fe939687SNatalie Samsonov  * Get offload information from the received packet descriptor.
2535fe939687SNatalie Samsonov  *
2536fe939687SNatalie Samsonov  * @param desc
2537fe939687SNatalie Samsonov  *   Pointer to the received packet descriptor.
2538fe939687SNatalie Samsonov  *
2539fe939687SNatalie Samsonov  * @return
2540fe939687SNatalie Samsonov  *   Mbuf offload flags.
2541fe939687SNatalie Samsonov  */
2542fe939687SNatalie Samsonov static inline uint64_t
2543006f6ccbSLiron Himi mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc, uint64_t packet_type)
2544fe939687SNatalie Samsonov {
2545006f6ccbSLiron Himi 	uint64_t flags = 0;
2546fe939687SNatalie Samsonov 	enum pp2_inq_desc_status status;
2547fe939687SNatalie Samsonov 
2548006f6ccbSLiron Himi 	if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
2549fe939687SNatalie Samsonov 		status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
2550fe939687SNatalie Samsonov 		if (unlikely(status != PP2_DESC_ERR_OK))
2551daa02b5cSOlivier Matz 			flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
2552fe939687SNatalie Samsonov 		else
2553daa02b5cSOlivier Matz 			flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
2554006f6ccbSLiron Himi 	}
2555fe939687SNatalie Samsonov 
2556006f6ccbSLiron Himi 	if (((packet_type & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) ||
2557006f6ccbSLiron Himi 	    ((packet_type & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP)) {
2558fe939687SNatalie Samsonov 		status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
2559fe939687SNatalie Samsonov 		if (unlikely(status != PP2_DESC_ERR_OK))
2560daa02b5cSOlivier Matz 			flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
2561fe939687SNatalie Samsonov 		else
2562daa02b5cSOlivier Matz 			flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
2563006f6ccbSLiron Himi 	}
2564fe939687SNatalie Samsonov 
2565fe939687SNatalie Samsonov 	return flags;
2566fe939687SNatalie Samsonov }
2567fe939687SNatalie Samsonov 
2568fe939687SNatalie Samsonov /**
2569fe939687SNatalie Samsonov  * DPDK callback for receive.
2570fe939687SNatalie Samsonov  *
2571fe939687SNatalie Samsonov  * @param rxq
2572fe939687SNatalie Samsonov  *   Generic pointer to the receive queue.
2573fe939687SNatalie Samsonov  * @param rx_pkts
2574fe939687SNatalie Samsonov  *   Array to store received packets.
2575fe939687SNatalie Samsonov  * @param nb_pkts
2576fe939687SNatalie Samsonov  *   Maximum number of packets in array.
2577fe939687SNatalie Samsonov  *
2578fe939687SNatalie Samsonov  * @return
2579fe939687SNatalie Samsonov  *   Number of packets successfully received.
2580fe939687SNatalie Samsonov  */
2581fe939687SNatalie Samsonov static uint16_t
2582fe939687SNatalie Samsonov mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2583fe939687SNatalie Samsonov {
2584fe939687SNatalie Samsonov 	struct mrvl_rxq *q = rxq;
2585fe939687SNatalie Samsonov 	struct pp2_ppio_desc descs[nb_pkts];
2586fe939687SNatalie Samsonov 	struct pp2_bpool *bpool;
2587fe939687SNatalie Samsonov 	int i, ret, rx_done = 0;
2588fe939687SNatalie Samsonov 	int num;
2589fe939687SNatalie Samsonov 	struct pp2_hif *hif;
2590fe939687SNatalie Samsonov 	unsigned int core_id = rte_lcore_id();
2591fe939687SNatalie Samsonov 
2592fe939687SNatalie Samsonov 	hif = mrvl_get_hif(q->priv, core_id);
2593fe939687SNatalie Samsonov 
2594fe939687SNatalie Samsonov 	if (unlikely(!q->priv->ppio || !hif))
2595fe939687SNatalie Samsonov 		return 0;
2596fe939687SNatalie Samsonov 
2597fe939687SNatalie Samsonov 	bpool = q->priv->bpool;
2598fe939687SNatalie Samsonov 
2599fe939687SNatalie Samsonov 	ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
2600fe939687SNatalie Samsonov 			    q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
2601fa876f3aSLiron Himi 	if (unlikely(ret < 0))
2602fe939687SNatalie Samsonov 		return 0;
2603fa876f3aSLiron Himi 
2604fe939687SNatalie Samsonov 	mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
2605fe939687SNatalie Samsonov 
2606fe939687SNatalie Samsonov 	for (i = 0; i < nb_pkts; i++) {
2607fe939687SNatalie Samsonov 		struct rte_mbuf *mbuf;
2608fe939687SNatalie Samsonov 		uint8_t l3_offset, l4_offset;
2609fe939687SNatalie Samsonov 		enum pp2_inq_desc_status status;
2610fe939687SNatalie Samsonov 		uint64_t addr;
2611fe939687SNatalie Samsonov 
2612fe939687SNatalie Samsonov 		if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2613fe939687SNatalie Samsonov 			struct pp2_ppio_desc *pref_desc;
2614fe939687SNatalie Samsonov 			u64 pref_addr;
2615fe939687SNatalie Samsonov 
2616fe939687SNatalie Samsonov 			pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
2617fe939687SNatalie Samsonov 			pref_addr = cookie_addr_high |
2618fe939687SNatalie Samsonov 				    pp2_ppio_inq_desc_get_cookie(pref_desc);
2619fe939687SNatalie Samsonov 			rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
2620fe939687SNatalie Samsonov 			rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
2621fe939687SNatalie Samsonov 		}
2622fe939687SNatalie Samsonov 
2623fe939687SNatalie Samsonov 		addr = cookie_addr_high |
2624fe939687SNatalie Samsonov 		       pp2_ppio_inq_desc_get_cookie(&descs[i]);
2625fe939687SNatalie Samsonov 		mbuf = (struct rte_mbuf *)addr;
2626fe939687SNatalie Samsonov 		rte_pktmbuf_reset(mbuf);
2627fe939687SNatalie Samsonov 
2628fe939687SNatalie Samsonov 		/* drop packet in case of mac, overrun or resource error */
2629fe939687SNatalie Samsonov 		status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
2630ef08031fSDana Vardi 		if ((unlikely(status != PP2_DESC_ERR_OK)) &&
2631ef08031fSDana Vardi 			!(q->priv->forward_bad_frames)) {
2632fe939687SNatalie Samsonov 			struct pp2_buff_inf binf = {
2633fe939687SNatalie Samsonov 				.addr = rte_mbuf_data_iova_default(mbuf),
2634e04ec42aSTomasz Duszynski 				.cookie = (uint64_t)mbuf,
2635fe939687SNatalie Samsonov 			};
2636fe939687SNatalie Samsonov 
2637fe939687SNatalie Samsonov 			pp2_bpool_put_buff(hif, bpool, &binf);
2638fe939687SNatalie Samsonov 			mrvl_port_bpool_size
2639fe939687SNatalie Samsonov 				[bpool->pp2_id][bpool->id][core_id]++;
2640fe939687SNatalie Samsonov 			q->drop_mac++;
2641fe939687SNatalie Samsonov 			continue;
2642fe939687SNatalie Samsonov 		}
2643fe939687SNatalie Samsonov 
2644fe939687SNatalie Samsonov 		mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
2645fe939687SNatalie Samsonov 		mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
2646fe939687SNatalie Samsonov 		mbuf->data_len = mbuf->pkt_len;
2647fe939687SNatalie Samsonov 		mbuf->port = q->port_id;
2648fe939687SNatalie Samsonov 		mbuf->packet_type =
2649fe939687SNatalie Samsonov 			mrvl_desc_to_packet_type_and_offset(&descs[i],
2650fe939687SNatalie Samsonov 							    &l3_offset,
2651fe939687SNatalie Samsonov 							    &l4_offset);
2652fe939687SNatalie Samsonov 		mbuf->l2_len = l3_offset;
2653fe939687SNatalie Samsonov 		mbuf->l3_len = l4_offset - l3_offset;
2654fe939687SNatalie Samsonov 
2655fe939687SNatalie Samsonov 		if (likely(q->cksum_enabled))
2656006f6ccbSLiron Himi 			mbuf->ol_flags =
2657006f6ccbSLiron Himi 				mrvl_desc_to_ol_flags(&descs[i],
2658006f6ccbSLiron Himi 						      mbuf->packet_type);
2659fe939687SNatalie Samsonov 
2660fe939687SNatalie Samsonov 		rx_pkts[rx_done++] = mbuf;
2661fe939687SNatalie Samsonov 		q->bytes_recv += mbuf->pkt_len;
2662fe939687SNatalie Samsonov 	}
2663fe939687SNatalie Samsonov 
2664fe939687SNatalie Samsonov 	if (rte_spinlock_trylock(&q->priv->lock) == 1) {
2665fe939687SNatalie Samsonov 		num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
2666fe939687SNatalie Samsonov 
2667fe939687SNatalie Samsonov 		if (unlikely(num <= q->priv->bpool_min_size ||
2668fe939687SNatalie Samsonov 			     (!rx_done && num < q->priv->bpool_init_size))) {
26694b86050aSDana Vardi 			mrvl_fill_bpool(q, q->priv->fill_bpool_buffs);
2670fe939687SNatalie Samsonov 		} else if (unlikely(num > q->priv->bpool_max_size)) {
2671fe939687SNatalie Samsonov 			int i;
2672fe939687SNatalie Samsonov 			int pkt_to_remove = num - q->priv->bpool_init_size;
2673fe939687SNatalie Samsonov 			struct rte_mbuf *mbuf;
2674fe939687SNatalie Samsonov 			struct pp2_buff_inf buff;
2675fe939687SNatalie Samsonov 
2676fe939687SNatalie Samsonov 			for (i = 0; i < pkt_to_remove; i++) {
2677fe939687SNatalie Samsonov 				ret = pp2_bpool_get_buff(hif, bpool, &buff);
2678fe939687SNatalie Samsonov 				if (ret)
2679fe939687SNatalie Samsonov 					break;
2680fe939687SNatalie Samsonov 				mbuf = (struct rte_mbuf *)
2681fe939687SNatalie Samsonov 					(cookie_addr_high | buff.cookie);
2682fe939687SNatalie Samsonov 				rte_pktmbuf_free(mbuf);
2683fe939687SNatalie Samsonov 			}
2684fe939687SNatalie Samsonov 			mrvl_port_bpool_size
2685fe939687SNatalie Samsonov 				[bpool->pp2_id][bpool->id][core_id] -= i;
2686fe939687SNatalie Samsonov 		}
2687fe939687SNatalie Samsonov 		rte_spinlock_unlock(&q->priv->lock);
2688fe939687SNatalie Samsonov 	}
2689fe939687SNatalie Samsonov 
2690fe939687SNatalie Samsonov 	return rx_done;
2691fe939687SNatalie Samsonov }
2692fe939687SNatalie Samsonov 
2693fe939687SNatalie Samsonov /**
2694fe939687SNatalie Samsonov  * Prepare offload information.
2695fe939687SNatalie Samsonov  *
2696fe939687SNatalie Samsonov  * @param ol_flags
2697fe939687SNatalie Samsonov  *   Offload flags.
2698fe939687SNatalie Samsonov  * @param l3_type
2699fe939687SNatalie Samsonov  *   Pointer to the pp2_ouq_l3_type structure.
2700fe939687SNatalie Samsonov  * @param l4_type
2701fe939687SNatalie Samsonov  *   Pointer to the pp2_outq_l4_type structure.
2702fe939687SNatalie Samsonov  * @param gen_l3_cksum
2703fe939687SNatalie Samsonov  *   Will be set to 1 in case l3 checksum is computed.
2704fe939687SNatalie Samsonov  * @param l4_cksum
2705fe939687SNatalie Samsonov  *   Will be set to 1 in case l4 checksum is computed.
2706fe939687SNatalie Samsonov  */
270745ea4c59SLiron Himi static inline void
270845ea4c59SLiron Himi mrvl_prepare_proto_info(uint64_t ol_flags,
2709fe939687SNatalie Samsonov 			enum pp2_outq_l3_type *l3_type,
2710fe939687SNatalie Samsonov 			enum pp2_outq_l4_type *l4_type,
2711fe939687SNatalie Samsonov 			int *gen_l3_cksum,
2712fe939687SNatalie Samsonov 			int *gen_l4_cksum)
2713fe939687SNatalie Samsonov {
2714fe939687SNatalie Samsonov 	/*
2715fe939687SNatalie Samsonov 	 * Based on ol_flags prepare information
2716fe939687SNatalie Samsonov 	 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
2717fe939687SNatalie Samsonov 	 * for offloading.
271845ea4c59SLiron Himi 	 * in most of the checksum cases ipv4 must be set, so this is the
271945ea4c59SLiron Himi 	 * default value
2720fe939687SNatalie Samsonov 	 */
2721fe939687SNatalie Samsonov 	*l3_type = PP2_OUTQ_L3_TYPE_IPV4;
2722daa02b5cSOlivier Matz 	*gen_l3_cksum = ol_flags & RTE_MBUF_F_TX_IP_CKSUM ? 1 : 0;
272345ea4c59SLiron Himi 
2724daa02b5cSOlivier Matz 	if (ol_flags & RTE_MBUF_F_TX_IPV6) {
2725fe939687SNatalie Samsonov 		*l3_type = PP2_OUTQ_L3_TYPE_IPV6;
2726fe939687SNatalie Samsonov 		/* no checksum for ipv6 header */
2727fe939687SNatalie Samsonov 		*gen_l3_cksum = 0;
2728fe939687SNatalie Samsonov 	}
2729fe939687SNatalie Samsonov 
2730daa02b5cSOlivier Matz 	if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) {
2731fe939687SNatalie Samsonov 		*l4_type = PP2_OUTQ_L4_TYPE_TCP;
2732fe939687SNatalie Samsonov 		*gen_l4_cksum = 1;
2733daa02b5cSOlivier Matz 	} else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) ==  RTE_MBUF_F_TX_UDP_CKSUM) {
2734fe939687SNatalie Samsonov 		*l4_type = PP2_OUTQ_L4_TYPE_UDP;
2735fe939687SNatalie Samsonov 		*gen_l4_cksum = 1;
2736fe939687SNatalie Samsonov 	} else {
2737fe939687SNatalie Samsonov 		*l4_type = PP2_OUTQ_L4_TYPE_OTHER;
2738fe939687SNatalie Samsonov 		/* no checksum for other type */
2739fe939687SNatalie Samsonov 		*gen_l4_cksum = 0;
2740fe939687SNatalie Samsonov 	}
2741fe939687SNatalie Samsonov }
2742fe939687SNatalie Samsonov 
2743fe939687SNatalie Samsonov /**
2744fe939687SNatalie Samsonov  * Release already sent buffers to bpool (buffer-pool).
2745fe939687SNatalie Samsonov  *
2746fe939687SNatalie Samsonov  * @param ppio
2747fe939687SNatalie Samsonov  *   Pointer to the port structure.
2748fe939687SNatalie Samsonov  * @param hif
2749fe939687SNatalie Samsonov  *   Pointer to the MUSDK hardware interface.
2750fe939687SNatalie Samsonov  * @param sq
2751fe939687SNatalie Samsonov  *   Pointer to the shadow queue.
2752fe939687SNatalie Samsonov  * @param qid
2753fe939687SNatalie Samsonov  *   Queue id number.
2754fe939687SNatalie Samsonov  * @param force
2755fe939687SNatalie Samsonov  *   Force releasing packets.
2756fe939687SNatalie Samsonov  */
2757fe939687SNatalie Samsonov static inline void
2758fe939687SNatalie Samsonov mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
2759fe939687SNatalie Samsonov 		       unsigned int core_id, struct mrvl_shadow_txq *sq,
2760fe939687SNatalie Samsonov 		       int qid, int force)
2761fe939687SNatalie Samsonov {
2762fe939687SNatalie Samsonov 	struct buff_release_entry *entry;
2763fe939687SNatalie Samsonov 	uint16_t nb_done = 0, num = 0, skip_bufs = 0;
2764fe939687SNatalie Samsonov 	int i;
2765fe939687SNatalie Samsonov 
2766fe939687SNatalie Samsonov 	pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
2767fe939687SNatalie Samsonov 
2768fe939687SNatalie Samsonov 	sq->num_to_release += nb_done;
2769fe939687SNatalie Samsonov 
2770fe939687SNatalie Samsonov 	if (likely(!force &&
2771fe939687SNatalie Samsonov 		   sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
2772fe939687SNatalie Samsonov 		return;
2773fe939687SNatalie Samsonov 
2774fe939687SNatalie Samsonov 	nb_done = sq->num_to_release;
2775fe939687SNatalie Samsonov 	sq->num_to_release = 0;
2776fe939687SNatalie Samsonov 
2777fe939687SNatalie Samsonov 	for (i = 0; i < nb_done; i++) {
2778fe939687SNatalie Samsonov 		entry = &sq->ent[sq->tail + num];
2779fe939687SNatalie Samsonov 		if (unlikely(!entry->buff.addr)) {
2780acab7d58STomasz Duszynski 			MRVL_LOG(ERR,
2781acab7d58STomasz Duszynski 				"Shadow memory @%d: cookie(%lx), pa(%lx)!",
2782fe939687SNatalie Samsonov 				sq->tail, (u64)entry->buff.cookie,
2783fe939687SNatalie Samsonov 				(u64)entry->buff.addr);
2784fe939687SNatalie Samsonov 			skip_bufs = 1;
2785fe939687SNatalie Samsonov 			goto skip;
2786fe939687SNatalie Samsonov 		}
2787fe939687SNatalie Samsonov 
2788fe939687SNatalie Samsonov 		if (unlikely(!entry->bpool)) {
2789fe939687SNatalie Samsonov 			struct rte_mbuf *mbuf;
2790fe939687SNatalie Samsonov 
279136173503SYuri Chipchev 			mbuf = (struct rte_mbuf *)entry->buff.cookie;
2792fe939687SNatalie Samsonov 			rte_pktmbuf_free(mbuf);
2793fe939687SNatalie Samsonov 			skip_bufs = 1;
2794fe939687SNatalie Samsonov 			goto skip;
2795fe939687SNatalie Samsonov 		}
2796fe939687SNatalie Samsonov 
2797fe939687SNatalie Samsonov 		mrvl_port_bpool_size
2798fe939687SNatalie Samsonov 			[entry->bpool->pp2_id][entry->bpool->id][core_id]++;
2799fe939687SNatalie Samsonov 		num++;
2800fe939687SNatalie Samsonov 		if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
2801fe939687SNatalie Samsonov 			goto skip;
2802fe939687SNatalie Samsonov 		continue;
2803fe939687SNatalie Samsonov skip:
2804fe939687SNatalie Samsonov 		if (likely(num))
2805fe939687SNatalie Samsonov 			pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2806fe939687SNatalie Samsonov 		num += skip_bufs;
2807fe939687SNatalie Samsonov 		sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2808fe939687SNatalie Samsonov 		sq->size -= num;
2809fe939687SNatalie Samsonov 		num = 0;
2810fe939687SNatalie Samsonov 		skip_bufs = 0;
2811fe939687SNatalie Samsonov 	}
2812fe939687SNatalie Samsonov 
2813fe939687SNatalie Samsonov 	if (likely(num)) {
2814fe939687SNatalie Samsonov 		pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2815fe939687SNatalie Samsonov 		sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2816fe939687SNatalie Samsonov 		sq->size -= num;
2817fe939687SNatalie Samsonov 	}
2818fe939687SNatalie Samsonov }
2819fe939687SNatalie Samsonov 
2820fe939687SNatalie Samsonov /**
2821fe939687SNatalie Samsonov  * DPDK callback for transmit.
2822fe939687SNatalie Samsonov  *
2823fe939687SNatalie Samsonov  * @param txq
2824fe939687SNatalie Samsonov  *   Generic pointer transmit queue.
2825fe939687SNatalie Samsonov  * @param tx_pkts
2826fe939687SNatalie Samsonov  *   Packets to transmit.
2827fe939687SNatalie Samsonov  * @param nb_pkts
2828fe939687SNatalie Samsonov  *   Number of packets in array.
2829fe939687SNatalie Samsonov  *
2830fe939687SNatalie Samsonov  * @return
2831fe939687SNatalie Samsonov  *   Number of packets successfully transmitted.
2832fe939687SNatalie Samsonov  */
2833fe939687SNatalie Samsonov static uint16_t
2834fe939687SNatalie Samsonov mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2835fe939687SNatalie Samsonov {
2836fe939687SNatalie Samsonov 	struct mrvl_txq *q = txq;
2837fe939687SNatalie Samsonov 	struct mrvl_shadow_txq *sq;
2838fe939687SNatalie Samsonov 	struct pp2_hif *hif;
2839fe939687SNatalie Samsonov 	struct pp2_ppio_desc descs[nb_pkts];
2840fe939687SNatalie Samsonov 	unsigned int core_id = rte_lcore_id();
284145ea4c59SLiron Himi 	int i, bytes_sent = 0;
2842fe939687SNatalie Samsonov 	uint16_t num, sq_free_size;
2843fe939687SNatalie Samsonov 	uint64_t addr;
2844fe939687SNatalie Samsonov 
2845fe939687SNatalie Samsonov 	hif = mrvl_get_hif(q->priv, core_id);
2846fe939687SNatalie Samsonov 	sq = &q->shadow_txqs[core_id];
2847fe939687SNatalie Samsonov 
2848fe939687SNatalie Samsonov 	if (unlikely(!q->priv->ppio || !hif))
2849fe939687SNatalie Samsonov 		return 0;
2850fe939687SNatalie Samsonov 
2851fe939687SNatalie Samsonov 	if (sq->size)
2852fe939687SNatalie Samsonov 		mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
2853fe939687SNatalie Samsonov 				       sq, q->queue_id, 0);
2854fe939687SNatalie Samsonov 
2855fe939687SNatalie Samsonov 	sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
2856fa876f3aSLiron Himi 	if (unlikely(nb_pkts > sq_free_size))
2857fe939687SNatalie Samsonov 		nb_pkts = sq_free_size;
2858fe939687SNatalie Samsonov 
2859fe939687SNatalie Samsonov 	for (i = 0; i < nb_pkts; i++) {
2860fe939687SNatalie Samsonov 		struct rte_mbuf *mbuf = tx_pkts[i];
2861fe939687SNatalie Samsonov 		int gen_l3_cksum, gen_l4_cksum;
2862fe939687SNatalie Samsonov 		enum pp2_outq_l3_type l3_type;
2863fe939687SNatalie Samsonov 		enum pp2_outq_l4_type l4_type;
2864fe939687SNatalie Samsonov 
2865fe939687SNatalie Samsonov 		if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2866fe939687SNatalie Samsonov 			struct rte_mbuf *pref_pkt_hdr;
2867fe939687SNatalie Samsonov 
2868fe939687SNatalie Samsonov 			pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
2869fe939687SNatalie Samsonov 			rte_mbuf_prefetch_part1(pref_pkt_hdr);
2870fe939687SNatalie Samsonov 			rte_mbuf_prefetch_part2(pref_pkt_hdr);
2871fe939687SNatalie Samsonov 		}
2872fe939687SNatalie Samsonov 
28739e79d810SZyta Szpak 		mrvl_fill_shadowq(sq, mbuf);
28749e79d810SZyta Szpak 		mrvl_fill_desc(&descs[i], mbuf);
2875fe939687SNatalie Samsonov 
2876fe939687SNatalie Samsonov 		bytes_sent += rte_pktmbuf_pkt_len(mbuf);
2877fe939687SNatalie Samsonov 		/*
2878fe939687SNatalie Samsonov 		 * in case unsupported ol_flags were passed
2879fe939687SNatalie Samsonov 		 * do not update descriptor offload information
2880fe939687SNatalie Samsonov 		 */
288145ea4c59SLiron Himi 		if (!(mbuf->ol_flags & MRVL_TX_PKT_OFFLOADS))
2882fe939687SNatalie Samsonov 			continue;
288345ea4c59SLiron Himi 		mrvl_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type,
288445ea4c59SLiron Himi 					&gen_l3_cksum, &gen_l4_cksum);
2885fe939687SNatalie Samsonov 
2886fe939687SNatalie Samsonov 		pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
2887fe939687SNatalie Samsonov 						  mbuf->l2_len,
2888fe939687SNatalie Samsonov 						  mbuf->l2_len + mbuf->l3_len,
2889fe939687SNatalie Samsonov 						  gen_l3_cksum, gen_l4_cksum);
2890fe939687SNatalie Samsonov 	}
2891fe939687SNatalie Samsonov 
2892fe939687SNatalie Samsonov 	num = nb_pkts;
2893fe939687SNatalie Samsonov 	pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
2894fe939687SNatalie Samsonov 	/* number of packets that were not sent */
2895fe939687SNatalie Samsonov 	if (unlikely(num > nb_pkts)) {
2896fe939687SNatalie Samsonov 		for (i = nb_pkts; i < num; i++) {
2897fe939687SNatalie Samsonov 			sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
2898fe939687SNatalie Samsonov 				MRVL_PP2_TX_SHADOWQ_MASK;
289936173503SYuri Chipchev 			addr = sq->ent[sq->head].buff.cookie;
2900fe939687SNatalie Samsonov 			bytes_sent -=
2901fe939687SNatalie Samsonov 				rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
2902fe939687SNatalie Samsonov 		}
2903fe939687SNatalie Samsonov 		sq->size -= num - nb_pkts;
2904fe939687SNatalie Samsonov 	}
2905fe939687SNatalie Samsonov 
2906fe939687SNatalie Samsonov 	q->bytes_sent += bytes_sent;
2907fe939687SNatalie Samsonov 
2908fe939687SNatalie Samsonov 	return nb_pkts;
2909fe939687SNatalie Samsonov }
2910fe939687SNatalie Samsonov 
29119e79d810SZyta Szpak /** DPDK callback for S/G transmit.
29129e79d810SZyta Szpak  *
29139e79d810SZyta Szpak  * @param txq
29149e79d810SZyta Szpak  *   Generic pointer transmit queue.
29159e79d810SZyta Szpak  * @param tx_pkts
29169e79d810SZyta Szpak  *   Packets to transmit.
29179e79d810SZyta Szpak  * @param nb_pkts
29189e79d810SZyta Szpak  *   Number of packets in array.
29199e79d810SZyta Szpak  *
29209e79d810SZyta Szpak  * @return
29219e79d810SZyta Szpak  *   Number of packets successfully transmitted.
29229e79d810SZyta Szpak  */
29239e79d810SZyta Szpak static uint16_t
29249e79d810SZyta Szpak mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
29259e79d810SZyta Szpak 		     uint16_t nb_pkts)
29269e79d810SZyta Szpak {
29279e79d810SZyta Szpak 	struct mrvl_txq *q = txq;
29289e79d810SZyta Szpak 	struct mrvl_shadow_txq *sq;
29299e79d810SZyta Szpak 	struct pp2_hif *hif;
29309e79d810SZyta Szpak 	struct pp2_ppio_desc descs[nb_pkts * PP2_PPIO_DESC_NUM_FRAGS];
29319e79d810SZyta Szpak 	struct pp2_ppio_sg_pkts pkts;
29329e79d810SZyta Szpak 	uint8_t frags[nb_pkts];
29339e79d810SZyta Szpak 	unsigned int core_id = rte_lcore_id();
293445ea4c59SLiron Himi 	int i, j, bytes_sent = 0;
29359e79d810SZyta Szpak 	int tail, tail_first;
29369e79d810SZyta Szpak 	uint16_t num, sq_free_size;
29379e79d810SZyta Szpak 	uint16_t nb_segs, total_descs = 0;
29389e79d810SZyta Szpak 	uint64_t addr;
29399e79d810SZyta Szpak 
29409e79d810SZyta Szpak 	hif = mrvl_get_hif(q->priv, core_id);
29419e79d810SZyta Szpak 	sq = &q->shadow_txqs[core_id];
29429e79d810SZyta Szpak 	pkts.frags = frags;
29439e79d810SZyta Szpak 	pkts.num = 0;
29449e79d810SZyta Szpak 
29459e79d810SZyta Szpak 	if (unlikely(!q->priv->ppio || !hif))
29469e79d810SZyta Szpak 		return 0;
29479e79d810SZyta Szpak 
29489e79d810SZyta Szpak 	if (sq->size)
29499e79d810SZyta Szpak 		mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
29509e79d810SZyta Szpak 				       sq, q->queue_id, 0);
29519e79d810SZyta Szpak 
29529e79d810SZyta Szpak 	/* Save shadow queue free size */
29539e79d810SZyta Szpak 	sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
29549e79d810SZyta Szpak 
29559e79d810SZyta Szpak 	tail = 0;
29569e79d810SZyta Szpak 	for (i = 0; i < nb_pkts; i++) {
29579e79d810SZyta Szpak 		struct rte_mbuf *mbuf = tx_pkts[i];
29589e79d810SZyta Szpak 		struct rte_mbuf *seg = NULL;
29599e79d810SZyta Szpak 		int gen_l3_cksum, gen_l4_cksum;
29609e79d810SZyta Szpak 		enum pp2_outq_l3_type l3_type;
29619e79d810SZyta Szpak 		enum pp2_outq_l4_type l4_type;
29629e79d810SZyta Szpak 
29639e79d810SZyta Szpak 		nb_segs = mbuf->nb_segs;
29649e79d810SZyta Szpak 		tail_first = tail;
29659e79d810SZyta Szpak 		total_descs += nb_segs;
29669e79d810SZyta Szpak 
29679e79d810SZyta Szpak 		/*
29689e79d810SZyta Szpak 		 * Check if total_descs does not exceed
29699e79d810SZyta Szpak 		 * shadow queue free size
29709e79d810SZyta Szpak 		 */
29719e79d810SZyta Szpak 		if (unlikely(total_descs > sq_free_size)) {
29729e79d810SZyta Szpak 			total_descs -= nb_segs;
29739e79d810SZyta Szpak 			break;
29749e79d810SZyta Szpak 		}
29759e79d810SZyta Szpak 
29769e79d810SZyta Szpak 		/* Check if nb_segs does not exceed the max nb of desc per
29779e79d810SZyta Szpak 		 * fragmented packet
29789e79d810SZyta Szpak 		 */
29799e79d810SZyta Szpak 		if (nb_segs > PP2_PPIO_DESC_NUM_FRAGS) {
29809e79d810SZyta Szpak 			total_descs -= nb_segs;
2981a247fcd9SStephen Hemminger 			MRVL_LOG(ERR, "Too many segments. Packet won't be sent.");
29829e79d810SZyta Szpak 			break;
29839e79d810SZyta Szpak 		}
29849e79d810SZyta Szpak 
29859e79d810SZyta Szpak 		if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
29869e79d810SZyta Szpak 			struct rte_mbuf *pref_pkt_hdr;
29879e79d810SZyta Szpak 
29889e79d810SZyta Szpak 			pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
29899e79d810SZyta Szpak 			rte_mbuf_prefetch_part1(pref_pkt_hdr);
29909e79d810SZyta Szpak 			rte_mbuf_prefetch_part2(pref_pkt_hdr);
29919e79d810SZyta Szpak 		}
29929e79d810SZyta Szpak 
29939e79d810SZyta Szpak 		pkts.frags[pkts.num] = nb_segs;
29949e79d810SZyta Szpak 		pkts.num++;
29959e79d810SZyta Szpak 
29969e79d810SZyta Szpak 		seg = mbuf;
29979e79d810SZyta Szpak 		for (j = 0; j < nb_segs - 1; j++) {
29989e79d810SZyta Szpak 			/* For the subsequent segments, set shadow queue
29999e79d810SZyta Szpak 			 * buffer to NULL
30009e79d810SZyta Szpak 			 */
30019e79d810SZyta Szpak 			mrvl_fill_shadowq(sq, NULL);
30029e79d810SZyta Szpak 			mrvl_fill_desc(&descs[tail], seg);
30039e79d810SZyta Szpak 
30049e79d810SZyta Szpak 			tail++;
30059e79d810SZyta Szpak 			seg = seg->next;
30069e79d810SZyta Szpak 		}
30079e79d810SZyta Szpak 		/* Put first mbuf info in last shadow queue entry */
30089e79d810SZyta Szpak 		mrvl_fill_shadowq(sq, mbuf);
30099e79d810SZyta Szpak 		/* Update descriptor with last segment */
30109e79d810SZyta Szpak 		mrvl_fill_desc(&descs[tail++], seg);
30119e79d810SZyta Szpak 
30129e79d810SZyta Szpak 		bytes_sent += rte_pktmbuf_pkt_len(mbuf);
30139e79d810SZyta Szpak 		/* In case unsupported ol_flags were passed
30149e79d810SZyta Szpak 		 * do not update descriptor offload information
30159e79d810SZyta Szpak 		 */
301645ea4c59SLiron Himi 		if (!(mbuf->ol_flags & MRVL_TX_PKT_OFFLOADS))
30179e79d810SZyta Szpak 			continue;
301845ea4c59SLiron Himi 		mrvl_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type,
301945ea4c59SLiron Himi 					&gen_l3_cksum, &gen_l4_cksum);
30209e79d810SZyta Szpak 
30219e79d810SZyta Szpak 		pp2_ppio_outq_desc_set_proto_info(&descs[tail_first], l3_type,
30229e79d810SZyta Szpak 						  l4_type, mbuf->l2_len,
30239e79d810SZyta Szpak 						  mbuf->l2_len + mbuf->l3_len,
30249e79d810SZyta Szpak 						  gen_l3_cksum, gen_l4_cksum);
30259e79d810SZyta Szpak 	}
30269e79d810SZyta Szpak 
30279e79d810SZyta Szpak 	num = total_descs;
30289e79d810SZyta Szpak 	pp2_ppio_send_sg(q->priv->ppio, hif, q->queue_id, descs,
30299e79d810SZyta Szpak 			 &total_descs, &pkts);
30309e79d810SZyta Szpak 	/* number of packets that were not sent */
30319e79d810SZyta Szpak 	if (unlikely(num > total_descs)) {
30329e79d810SZyta Szpak 		for (i = total_descs; i < num; i++) {
30339e79d810SZyta Szpak 			sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
30349e79d810SZyta Szpak 				MRVL_PP2_TX_SHADOWQ_MASK;
30359e79d810SZyta Szpak 
30369e79d810SZyta Szpak 			addr = sq->ent[sq->head].buff.cookie;
30379e79d810SZyta Szpak 			if (addr)
30389e79d810SZyta Szpak 				bytes_sent -=
30399e79d810SZyta Szpak 					rte_pktmbuf_pkt_len((struct rte_mbuf *)
30409e79d810SZyta Szpak 						(cookie_addr_high | addr));
30419e79d810SZyta Szpak 		}
30429e79d810SZyta Szpak 		sq->size -= num - total_descs;
30439e79d810SZyta Szpak 		nb_pkts = pkts.num;
30449e79d810SZyta Szpak 	}
30459e79d810SZyta Szpak 
30469e79d810SZyta Szpak 	q->bytes_sent += bytes_sent;
30479e79d810SZyta Szpak 
30489e79d810SZyta Szpak 	return nb_pkts;
30499e79d810SZyta Szpak }
30509e79d810SZyta Szpak 
3051fe939687SNatalie Samsonov /**
3052fe939687SNatalie Samsonov  * Create private device structure.
3053fe939687SNatalie Samsonov  *
3054fe939687SNatalie Samsonov  * @param dev_name
3055fe939687SNatalie Samsonov  *   Pointer to the port name passed in the initialization parameters.
3056fe939687SNatalie Samsonov  *
3057fe939687SNatalie Samsonov  * @return
3058fe939687SNatalie Samsonov  *   Pointer to the newly allocated private device structure.
3059fe939687SNatalie Samsonov  */
3060fe939687SNatalie Samsonov static struct mrvl_priv *
3061fe939687SNatalie Samsonov mrvl_priv_create(const char *dev_name)
3062fe939687SNatalie Samsonov {
3063fe939687SNatalie Samsonov 	struct pp2_bpool_params bpool_params;
3064fe939687SNatalie Samsonov 	char match[MRVL_MATCH_LEN];
3065fe939687SNatalie Samsonov 	struct mrvl_priv *priv;
3066949cdeddSLiron Himi 	uint16_t max_frame_size;
3067fe939687SNatalie Samsonov 	int ret, bpool_bit;
3068fe939687SNatalie Samsonov 
3069fe939687SNatalie Samsonov 	priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
3070fe939687SNatalie Samsonov 	if (!priv)
3071fe939687SNatalie Samsonov 		return NULL;
3072fe939687SNatalie Samsonov 
3073fe939687SNatalie Samsonov 	ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
3074fe939687SNatalie Samsonov 				       &priv->pp_id, &priv->ppio_id);
3075fe939687SNatalie Samsonov 	if (ret)
3076fe939687SNatalie Samsonov 		goto out_free_priv;
3077fe939687SNatalie Samsonov 
3078949cdeddSLiron Himi 	ret = pp2_ppio_get_l4_cksum_max_frame_size(priv->pp_id, priv->ppio_id,
3079949cdeddSLiron Himi 						   &max_frame_size);
3080949cdeddSLiron Himi 	if (ret)
3081949cdeddSLiron Himi 		goto out_free_priv;
3082949cdeddSLiron Himi 
3083949cdeddSLiron Himi 	priv->max_mtu = max_frame_size + RTE_ETHER_CRC_LEN -
3084949cdeddSLiron Himi 		MRVL_PP2_ETH_HDRS_LEN;
3085949cdeddSLiron Himi 
3086fe939687SNatalie Samsonov 	bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
3087fe939687SNatalie Samsonov 				     PP2_BPOOL_NUM_POOLS);
3088fe939687SNatalie Samsonov 	if (bpool_bit < 0)
3089fe939687SNatalie Samsonov 		goto out_free_priv;
3090fe939687SNatalie Samsonov 	priv->bpool_bit = bpool_bit;
3091fe939687SNatalie Samsonov 
3092fe939687SNatalie Samsonov 	snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
3093fe939687SNatalie Samsonov 		 priv->bpool_bit);
3094fe939687SNatalie Samsonov 	memset(&bpool_params, 0, sizeof(bpool_params));
3095fe939687SNatalie Samsonov 	bpool_params.match = match;
3096fe939687SNatalie Samsonov 	bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
3097fe939687SNatalie Samsonov 	ret = pp2_bpool_init(&bpool_params, &priv->bpool);
3098fe939687SNatalie Samsonov 	if (ret)
3099fe939687SNatalie Samsonov 		goto out_clear_bpool_bit;
3100fe939687SNatalie Samsonov 
3101fe939687SNatalie Samsonov 	priv->ppio_params.type = PP2_PPIO_T_NIC;
3102fe939687SNatalie Samsonov 	rte_spinlock_init(&priv->lock);
3103fe939687SNatalie Samsonov 
3104fe939687SNatalie Samsonov 	return priv;
3105fe939687SNatalie Samsonov out_clear_bpool_bit:
3106fe939687SNatalie Samsonov 	used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
3107fe939687SNatalie Samsonov out_free_priv:
3108fe939687SNatalie Samsonov 	rte_free(priv);
3109fe939687SNatalie Samsonov 	return NULL;
3110fe939687SNatalie Samsonov }
3111fe939687SNatalie Samsonov 
3112fe939687SNatalie Samsonov /**
3113fe939687SNatalie Samsonov  * Create device representing Ethernet port.
3114fe939687SNatalie Samsonov  *
3115fe939687SNatalie Samsonov  * @param name
3116fe939687SNatalie Samsonov  *   Pointer to the port's name.
3117fe939687SNatalie Samsonov  *
3118fe939687SNatalie Samsonov  * @return
3119fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
3120fe939687SNatalie Samsonov  */
3121fe939687SNatalie Samsonov static int
3122fe939687SNatalie Samsonov mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
3123fe939687SNatalie Samsonov {
3124fe939687SNatalie Samsonov 	int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
3125fe939687SNatalie Samsonov 	struct rte_eth_dev *eth_dev;
3126fe939687SNatalie Samsonov 	struct mrvl_priv *priv;
3127fe939687SNatalie Samsonov 	struct ifreq req;
3128fe939687SNatalie Samsonov 
3129fe939687SNatalie Samsonov 	eth_dev = rte_eth_dev_allocate(name);
3130fe939687SNatalie Samsonov 	if (!eth_dev)
3131fe939687SNatalie Samsonov 		return -ENOMEM;
3132fe939687SNatalie Samsonov 
3133fe939687SNatalie Samsonov 	priv = mrvl_priv_create(name);
3134fe939687SNatalie Samsonov 	if (!priv) {
3135fe939687SNatalie Samsonov 		ret = -ENOMEM;
3136e16adf08SThomas Monjalon 		goto out_free;
3137fe939687SNatalie Samsonov 	}
3138e16adf08SThomas Monjalon 	eth_dev->data->dev_private = priv;
3139fe939687SNatalie Samsonov 
3140fe939687SNatalie Samsonov 	eth_dev->data->mac_addrs =
3141fe939687SNatalie Samsonov 		rte_zmalloc("mac_addrs",
314235b2d13fSOlivier Matz 			    RTE_ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
3143fe939687SNatalie Samsonov 	if (!eth_dev->data->mac_addrs) {
3144acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
3145fe939687SNatalie Samsonov 		ret = -ENOMEM;
3146e16adf08SThomas Monjalon 		goto out_free;
3147fe939687SNatalie Samsonov 	}
3148fe939687SNatalie Samsonov 
3149fe939687SNatalie Samsonov 	memset(&req, 0, sizeof(req));
3150fe939687SNatalie Samsonov 	strcpy(req.ifr_name, name);
3151fe939687SNatalie Samsonov 	ret = ioctl(fd, SIOCGIFHWADDR, &req);
3152fe939687SNatalie Samsonov 	if (ret)
3153e16adf08SThomas Monjalon 		goto out_free;
3154fe939687SNatalie Samsonov 
3155fe939687SNatalie Samsonov 	memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
315635b2d13fSOlivier Matz 	       req.ifr_addr.sa_data, RTE_ETHER_ADDR_LEN);
3157fe939687SNatalie Samsonov 
3158fe939687SNatalie Samsonov 	eth_dev->device = &vdev->device;
31599e79d810SZyta Szpak 	eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
31609e79d810SZyta Szpak 	mrvl_set_tx_function(eth_dev);
3161fe939687SNatalie Samsonov 	eth_dev->dev_ops = &mrvl_ops;
3162f30e69b4SFerruh Yigit 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
3163fe939687SNatalie Samsonov 
3164295968d1SFerruh Yigit 	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
31655147cc75SYuri Chipchev 
3166fbe90cddSThomas Monjalon 	rte_eth_dev_probing_finish(eth_dev);
3167fe939687SNatalie Samsonov 	return 0;
3168e16adf08SThomas Monjalon out_free:
3169fe939687SNatalie Samsonov 	rte_eth_dev_release_port(eth_dev);
3170fe939687SNatalie Samsonov 
3171fe939687SNatalie Samsonov 	return ret;
3172fe939687SNatalie Samsonov }
3173fe939687SNatalie Samsonov 
3174fe939687SNatalie Samsonov /**
3175fe939687SNatalie Samsonov  * Callback used by rte_kvargs_process() during argument parsing.
3176fe939687SNatalie Samsonov  *
3177fe939687SNatalie Samsonov  * @param key
3178fe939687SNatalie Samsonov  *   Pointer to the parsed key (unused).
3179fe939687SNatalie Samsonov  * @param value
3180fe939687SNatalie Samsonov  *   Pointer to the parsed value.
3181fe939687SNatalie Samsonov  * @param extra_args
3182fe939687SNatalie Samsonov  *   Pointer to the extra arguments which contains address of the
3183fe939687SNatalie Samsonov  *   table of pointers to parsed interface names.
3184fe939687SNatalie Samsonov  *
3185fe939687SNatalie Samsonov  * @return
3186fe939687SNatalie Samsonov  *   Always 0.
3187fe939687SNatalie Samsonov  */
3188fe939687SNatalie Samsonov static int
3189fe939687SNatalie Samsonov mrvl_get_ifnames(const char *key __rte_unused, const char *value,
3190fe939687SNatalie Samsonov 		 void *extra_args)
3191fe939687SNatalie Samsonov {
3192fe939687SNatalie Samsonov 	struct mrvl_ifnames *ifnames = extra_args;
3193fe939687SNatalie Samsonov 
3194fe939687SNatalie Samsonov 	ifnames->names[ifnames->idx++] = value;
3195fe939687SNatalie Samsonov 
3196fe939687SNatalie Samsonov 	return 0;
3197fe939687SNatalie Samsonov }
3198fe939687SNatalie Samsonov 
3199fe939687SNatalie Samsonov /**
3200fe939687SNatalie Samsonov  * DPDK callback to register the virtual device.
3201fe939687SNatalie Samsonov  *
3202fe939687SNatalie Samsonov  * @param vdev
3203fe939687SNatalie Samsonov  *   Pointer to the virtual device.
3204fe939687SNatalie Samsonov  *
3205fe939687SNatalie Samsonov  * @return
3206fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
3207fe939687SNatalie Samsonov  */
3208fe939687SNatalie Samsonov static int
3209fe939687SNatalie Samsonov rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
3210fe939687SNatalie Samsonov {
3211fe939687SNatalie Samsonov 	struct rte_kvargs *kvlist;
3212fe939687SNatalie Samsonov 	struct mrvl_ifnames ifnames;
3213fe939687SNatalie Samsonov 	int ret = -EINVAL;
3214fe939687SNatalie Samsonov 	uint32_t i, ifnum, cfgnum;
3215fe939687SNatalie Samsonov 	const char *params;
3216fe939687SNatalie Samsonov 
3217fe939687SNatalie Samsonov 	params = rte_vdev_device_args(vdev);
3218fe939687SNatalie Samsonov 	if (!params)
3219fe939687SNatalie Samsonov 		return -EINVAL;
3220fe939687SNatalie Samsonov 
3221fe939687SNatalie Samsonov 	kvlist = rte_kvargs_parse(params, valid_args);
3222fe939687SNatalie Samsonov 	if (!kvlist)
3223fe939687SNatalie Samsonov 		return -EINVAL;
3224fe939687SNatalie Samsonov 
3225fe939687SNatalie Samsonov 	ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
3226fe939687SNatalie Samsonov 	if (ifnum > RTE_DIM(ifnames.names))
3227fe939687SNatalie Samsonov 		goto out_free_kvlist;
3228fe939687SNatalie Samsonov 
3229fe939687SNatalie Samsonov 	ifnames.idx = 0;
3230fe939687SNatalie Samsonov 	rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
3231fe939687SNatalie Samsonov 			   mrvl_get_ifnames, &ifnames);
3232fe939687SNatalie Samsonov 
3233fe939687SNatalie Samsonov 
3234fe939687SNatalie Samsonov 	/*
3235fe939687SNatalie Samsonov 	 * The below system initialization should be done only once,
3236fe939687SNatalie Samsonov 	 * on the first provided configuration file
3237fe939687SNatalie Samsonov 	 */
3238d7eb4fb2SLiron Himi 	if (!mrvl_cfg) {
3239fe939687SNatalie Samsonov 		cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
3240acab7d58STomasz Duszynski 		MRVL_LOG(INFO, "Parsing config file!");
3241fe939687SNatalie Samsonov 		if (cfgnum > 1) {
3242acab7d58STomasz Duszynski 			MRVL_LOG(ERR, "Cannot handle more than one config file!");
3243fe939687SNatalie Samsonov 			goto out_free_kvlist;
3244fe939687SNatalie Samsonov 		} else if (cfgnum == 1) {
3245fe939687SNatalie Samsonov 			rte_kvargs_process(kvlist, MRVL_CFG_ARG,
3246d7eb4fb2SLiron Himi 					   mrvl_get_cfg, &mrvl_cfg);
3247fe939687SNatalie Samsonov 		}
3248fe939687SNatalie Samsonov 	}
3249fe939687SNatalie Samsonov 
3250fe939687SNatalie Samsonov 	if (mrvl_dev_num)
3251fe939687SNatalie Samsonov 		goto init_devices;
3252fe939687SNatalie Samsonov 
3253acab7d58STomasz Duszynski 	MRVL_LOG(INFO, "Perform MUSDK initializations");
32544b4ab496SLiron Himi 
32554b4ab496SLiron Himi 	ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist);
32564b4ab496SLiron Himi 	if (ret)
3257fe939687SNatalie Samsonov 		goto out_free_kvlist;
3258fe939687SNatalie Samsonov 
3259fe939687SNatalie Samsonov 	ret = mrvl_init_pp2();
3260fe939687SNatalie Samsonov 	if (ret) {
3261acab7d58STomasz Duszynski 		MRVL_LOG(ERR, "Failed to init PP!");
32624b4ab496SLiron Himi 		rte_mvep_deinit(MVEP_MOD_T_PP2);
32634b4ab496SLiron Himi 		goto out_free_kvlist;
3264fe939687SNatalie Samsonov 	}
3265fe939687SNatalie Samsonov 
3266fe939687SNatalie Samsonov 	memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
3267fe939687SNatalie Samsonov 	memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup));
3268fe939687SNatalie Samsonov 
3269fe939687SNatalie Samsonov 	mrvl_lcore_first = RTE_MAX_LCORE;
3270fe939687SNatalie Samsonov 	mrvl_lcore_last = 0;
3271fe939687SNatalie Samsonov 
3272fe939687SNatalie Samsonov init_devices:
3273fe939687SNatalie Samsonov 	for (i = 0; i < ifnum; i++) {
3274acab7d58STomasz Duszynski 		MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
3275fe939687SNatalie Samsonov 		ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
3276fe939687SNatalie Samsonov 		if (ret)
3277fe939687SNatalie Samsonov 			goto out_cleanup;
3278696202caSLiron Himi 		mrvl_dev_num++;
3279fe939687SNatalie Samsonov 	}
3280fe939687SNatalie Samsonov 
3281fe939687SNatalie Samsonov 	rte_kvargs_free(kvlist);
3282fe939687SNatalie Samsonov 
3283fe939687SNatalie Samsonov 	return 0;
3284fe939687SNatalie Samsonov out_cleanup:
3285696202caSLiron Himi 	rte_pmd_mrvl_remove(vdev);
3286fe939687SNatalie Samsonov 
3287fe939687SNatalie Samsonov out_free_kvlist:
3288fe939687SNatalie Samsonov 	rte_kvargs_free(kvlist);
3289fe939687SNatalie Samsonov 
3290fe939687SNatalie Samsonov 	return ret;
3291fe939687SNatalie Samsonov }
3292fe939687SNatalie Samsonov 
3293fe939687SNatalie Samsonov /**
3294fe939687SNatalie Samsonov  * DPDK callback to remove virtual device.
3295fe939687SNatalie Samsonov  *
3296fe939687SNatalie Samsonov  * @param vdev
3297fe939687SNatalie Samsonov  *   Pointer to the removed virtual device.
3298fe939687SNatalie Samsonov  *
3299fe939687SNatalie Samsonov  * @return
3300fe939687SNatalie Samsonov  *   0 on success, negative error value otherwise.
3301fe939687SNatalie Samsonov  */
3302fe939687SNatalie Samsonov static int
3303fe939687SNatalie Samsonov rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
3304fe939687SNatalie Samsonov {
3305696202caSLiron Himi 	uint16_t port_id;
33068a5a0aadSThomas Monjalon 	int ret = 0;
3307fe939687SNatalie Samsonov 
3308696202caSLiron Himi 	RTE_ETH_FOREACH_DEV(port_id) {
3309696202caSLiron Himi 		if (rte_eth_devices[port_id].device != &vdev->device)
3310696202caSLiron Himi 			continue;
33118a5a0aadSThomas Monjalon 		ret |= rte_eth_dev_close(port_id);
3312fe939687SNatalie Samsonov 	}
3313fe939687SNatalie Samsonov 
33148a5a0aadSThomas Monjalon 	return ret == 0 ? 0 : -EIO;
3315fe939687SNatalie Samsonov }
3316fe939687SNatalie Samsonov 
3317fe939687SNatalie Samsonov static struct rte_vdev_driver pmd_mrvl_drv = {
3318fe939687SNatalie Samsonov 	.probe = rte_pmd_mrvl_probe,
3319fe939687SNatalie Samsonov 	.remove = rte_pmd_mrvl_remove,
3320fe939687SNatalie Samsonov };
3321fe939687SNatalie Samsonov 
3322fe939687SNatalie Samsonov RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
3323fe939687SNatalie Samsonov RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
3324eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(mrvl_logtype, NOTICE);
3325