xref: /dpdk/drivers/net/cnxk/cnxk_rep_ops.c (revision 51378092a131a30aadbd83bc39ab0cd783eac2e8)
121942175SHarman Kalra /* SPDX-License-Identifier: BSD-3-Clause
221942175SHarman Kalra  * Copyright(C) 2024 Marvell.
321942175SHarman Kalra  */
421942175SHarman Kalra 
521942175SHarman Kalra #include <cnxk_rep.h>
6b2315ae4SHarman Kalra #include <cnxk_rep_msg.h>
7b2315ae4SHarman Kalra 
8b2315ae4SHarman Kalra #define MEMPOOL_CACHE_SIZE 256
9b2315ae4SHarman Kalra #define TX_DESC_PER_QUEUE  512
10b2315ae4SHarman Kalra #define RX_DESC_PER_QUEUE  256
11b2315ae4SHarman Kalra #define NB_REP_VDEV_MBUF   1024
12b2315ae4SHarman Kalra 
13dd40e7cfSHarman Kalra static const struct rte_eth_xstat_name cnxk_rep_xstats_string[] = {
14dd40e7cfSHarman Kalra 	{"rep_nb_rx"},
15dd40e7cfSHarman Kalra 	{"rep_nb_tx"},
16dd40e7cfSHarman Kalra };
17dd40e7cfSHarman Kalra 
18b2315ae4SHarman Kalra static uint16_t
cnxk_rep_tx_burst(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)19b2315ae4SHarman Kalra cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
20b2315ae4SHarman Kalra {
21b2315ae4SHarman Kalra 	struct cnxk_rep_txq *txq = tx_queue;
22b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev;
23b2315ae4SHarman Kalra 	uint16_t n_tx;
24b2315ae4SHarman Kalra 
25b2315ae4SHarman Kalra 	if (unlikely(!txq))
26b2315ae4SHarman Kalra 		return 0;
27b2315ae4SHarman Kalra 
28b2315ae4SHarman Kalra 	rep_dev = txq->rep_dev;
29b2315ae4SHarman Kalra 	plt_rep_dbg("Transmitting %d packets on eswitch queue %d", nb_pkts, txq->qid);
30b2315ae4SHarman Kalra 	n_tx = cnxk_eswitch_dev_tx_burst(rep_dev->parent_dev, txq->qid, tx_pkts, nb_pkts,
31b2315ae4SHarman Kalra 					 NIX_TX_OFFLOAD_VLAN_QINQ_F);
32dd40e7cfSHarman Kalra 	txq->stats.pkts += n_tx;
33b2315ae4SHarman Kalra 	return n_tx;
34b2315ae4SHarman Kalra }
35b2315ae4SHarman Kalra 
36b2315ae4SHarman Kalra static uint16_t
cnxk_rep_rx_burst(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)37b2315ae4SHarman Kalra cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
38b2315ae4SHarman Kalra {
39b2315ae4SHarman Kalra 	struct cnxk_rep_rxq *rxq = rx_queue;
40b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev;
41b2315ae4SHarman Kalra 	uint16_t n_rx;
42b2315ae4SHarman Kalra 
43b2315ae4SHarman Kalra 	if (unlikely(!rxq))
44b2315ae4SHarman Kalra 		return 0;
45b2315ae4SHarman Kalra 
46b2315ae4SHarman Kalra 	rep_dev = rxq->rep_dev;
47b2315ae4SHarman Kalra 	n_rx = cnxk_eswitch_dev_rx_burst(rep_dev->parent_dev, rxq->qid, rx_pkts, nb_pkts);
48b2315ae4SHarman Kalra 	if (n_rx == 0)
49b2315ae4SHarman Kalra 		return 0;
50b2315ae4SHarman Kalra 
51b2315ae4SHarman Kalra 	plt_rep_dbg("Received %d packets on eswitch queue %d", n_rx, rxq->qid);
52dd40e7cfSHarman Kalra 	rxq->stats.pkts += n_rx;
53b2315ae4SHarman Kalra 	return n_rx;
54b2315ae4SHarman Kalra }
55b2315ae4SHarman Kalra 
56b2315ae4SHarman Kalra uint16_t
cnxk_rep_tx_burst_dummy(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)57b2315ae4SHarman Kalra cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
58b2315ae4SHarman Kalra {
59b2315ae4SHarman Kalra 	PLT_SET_USED(tx_queue);
60b2315ae4SHarman Kalra 	PLT_SET_USED(tx_pkts);
61b2315ae4SHarman Kalra 	PLT_SET_USED(nb_pkts);
62b2315ae4SHarman Kalra 
63b2315ae4SHarman Kalra 	return 0;
64b2315ae4SHarman Kalra }
65b2315ae4SHarman Kalra 
66b2315ae4SHarman Kalra uint16_t
cnxk_rep_rx_burst_dummy(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)67b2315ae4SHarman Kalra cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
68b2315ae4SHarman Kalra {
69b2315ae4SHarman Kalra 	PLT_SET_USED(rx_queue);
70b2315ae4SHarman Kalra 	PLT_SET_USED(rx_pkts);
71b2315ae4SHarman Kalra 	PLT_SET_USED(nb_pkts);
72b2315ae4SHarman Kalra 
73b2315ae4SHarman Kalra 	return 0;
74b2315ae4SHarman Kalra }
7521942175SHarman Kalra 
7621942175SHarman Kalra int
cnxk_rep_link_update(struct rte_eth_dev * ethdev,int wait_to_complete)7721942175SHarman Kalra cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
7821942175SHarman Kalra {
79b2315ae4SHarman Kalra 	struct rte_eth_link link;
8021942175SHarman Kalra 	PLT_SET_USED(wait_to_complete);
81b2315ae4SHarman Kalra 
82b2315ae4SHarman Kalra 	memset(&link, 0, sizeof(link));
83b2315ae4SHarman Kalra 	if (ethdev->data->dev_started)
84b2315ae4SHarman Kalra 		link.link_status = RTE_ETH_LINK_UP;
85b2315ae4SHarman Kalra 	else
86b2315ae4SHarman Kalra 		link.link_status = RTE_ETH_LINK_DOWN;
87b2315ae4SHarman Kalra 
88b2315ae4SHarman Kalra 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
89b2315ae4SHarman Kalra 	link.link_autoneg = RTE_ETH_LINK_FIXED;
90b2315ae4SHarman Kalra 	link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
91b2315ae4SHarman Kalra 
92b2315ae4SHarman Kalra 	return rte_eth_linkstatus_set(ethdev, &link);
93b2315ae4SHarman Kalra }
94b2315ae4SHarman Kalra 
95b2315ae4SHarman Kalra int
cnxk_rep_dev_info_get(struct rte_eth_dev * ethdev,struct rte_eth_dev_info * dev_info)96b2315ae4SHarman Kalra cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *dev_info)
97b2315ae4SHarman Kalra {
98b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
99b2315ae4SHarman Kalra 	uint32_t max_rx_pktlen;
100b2315ae4SHarman Kalra 
101b2315ae4SHarman Kalra 	max_rx_pktlen = (roc_nix_max_pkt_len(&rep_dev->parent_dev->nix) + RTE_ETHER_CRC_LEN -
102b2315ae4SHarman Kalra 			 CNXK_NIX_MAX_VTAG_ACT_SIZE);
103b2315ae4SHarman Kalra 
104b2315ae4SHarman Kalra 	dev_info->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN;
105b2315ae4SHarman Kalra 	dev_info->max_rx_pktlen = max_rx_pktlen;
106b2315ae4SHarman Kalra 	dev_info->max_mac_addrs = roc_nix_mac_max_entries_get(&rep_dev->parent_dev->nix);
107b2315ae4SHarman Kalra 
108b2315ae4SHarman Kalra 	dev_info->rx_offload_capa = CNXK_REP_RX_OFFLOAD_CAPA;
109b2315ae4SHarman Kalra 	dev_info->tx_offload_capa = CNXK_REP_TX_OFFLOAD_CAPA;
110b2315ae4SHarman Kalra 	dev_info->rx_queue_offload_capa = 0;
111b2315ae4SHarman Kalra 	dev_info->tx_queue_offload_capa = 0;
112b2315ae4SHarman Kalra 
113b2315ae4SHarman Kalra 	/* For the sake of symmetry, max_rx_queues = max_tx_queues */
114b2315ae4SHarman Kalra 	dev_info->max_rx_queues = 1;
115b2315ae4SHarman Kalra 	dev_info->max_tx_queues = 1;
116b2315ae4SHarman Kalra 
117b2315ae4SHarman Kalra 	/* MTU specifics */
118b2315ae4SHarman Kalra 	dev_info->max_mtu = dev_info->max_rx_pktlen - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
119b2315ae4SHarman Kalra 	dev_info->min_mtu = dev_info->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
120b2315ae4SHarman Kalra 
121b2315ae4SHarman Kalra 	/* Switch info specific */
122b2315ae4SHarman Kalra 	dev_info->switch_info.name = ethdev->device->name;
123b2315ae4SHarman Kalra 	dev_info->switch_info.domain_id = rep_dev->switch_domain_id;
124b2315ae4SHarman Kalra 	dev_info->switch_info.port_id = rep_dev->port_id;
125b2315ae4SHarman Kalra 
12621942175SHarman Kalra 	return 0;
12721942175SHarman Kalra }
12821942175SHarman Kalra 
12921942175SHarman Kalra int
cnxk_rep_representor_info_get(struct rte_eth_dev * ethdev,struct rte_eth_representor_info * info)130b2315ae4SHarman Kalra cnxk_rep_representor_info_get(struct rte_eth_dev *ethdev, struct rte_eth_representor_info *info)
13121942175SHarman Kalra {
132b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
133b2315ae4SHarman Kalra 
134b2315ae4SHarman Kalra 	return cnxk_eswitch_representor_info_get(rep_dev->parent_dev, info);
135b2315ae4SHarman Kalra }
136b2315ae4SHarman Kalra 
137b2315ae4SHarman Kalra static int
rep_eth_conf_chk(const struct rte_eth_conf * conf,uint16_t nb_rx_queues)138b2315ae4SHarman Kalra rep_eth_conf_chk(const struct rte_eth_conf *conf, uint16_t nb_rx_queues)
139b2315ae4SHarman Kalra {
140b2315ae4SHarman Kalra 	const struct rte_eth_rss_conf *rss_conf;
141b2315ae4SHarman Kalra 	int ret = 0;
142b2315ae4SHarman Kalra 
143b2315ae4SHarman Kalra 	if (conf->link_speeds != 0) {
144b2315ae4SHarman Kalra 		plt_err("specific link speeds not supported");
145b2315ae4SHarman Kalra 		ret = -EINVAL;
146b2315ae4SHarman Kalra 	}
147b2315ae4SHarman Kalra 
148b2315ae4SHarman Kalra 	switch (conf->rxmode.mq_mode) {
149b2315ae4SHarman Kalra 	case RTE_ETH_MQ_RX_RSS:
150b2315ae4SHarman Kalra 		if (nb_rx_queues != 1) {
151b2315ae4SHarman Kalra 			plt_err("Rx RSS is not supported with %u queues", nb_rx_queues);
152b2315ae4SHarman Kalra 			ret = -EINVAL;
153b2315ae4SHarman Kalra 			break;
154b2315ae4SHarman Kalra 		}
155b2315ae4SHarman Kalra 
156b2315ae4SHarman Kalra 		rss_conf = &conf->rx_adv_conf.rss_conf;
157b2315ae4SHarman Kalra 		if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
158b2315ae4SHarman Kalra 		    rss_conf->rss_hf != 0) {
159b2315ae4SHarman Kalra 			plt_err("Rx RSS configuration is not supported");
160b2315ae4SHarman Kalra 			ret = -EINVAL;
161b2315ae4SHarman Kalra 		}
162b2315ae4SHarman Kalra 		break;
163b2315ae4SHarman Kalra 	case RTE_ETH_MQ_RX_NONE:
164b2315ae4SHarman Kalra 		break;
165b2315ae4SHarman Kalra 	default:
166b2315ae4SHarman Kalra 		plt_err("Rx mode MQ modes other than RSS not supported");
167b2315ae4SHarman Kalra 		ret = -EINVAL;
168b2315ae4SHarman Kalra 		break;
169b2315ae4SHarman Kalra 	}
170b2315ae4SHarman Kalra 
171b2315ae4SHarman Kalra 	if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
172b2315ae4SHarman Kalra 		plt_err("Tx mode MQ modes not supported");
173b2315ae4SHarman Kalra 		ret = -EINVAL;
174b2315ae4SHarman Kalra 	}
175b2315ae4SHarman Kalra 
176b2315ae4SHarman Kalra 	if (conf->lpbk_mode != 0) {
177b2315ae4SHarman Kalra 		plt_err("loopback not supported");
178b2315ae4SHarman Kalra 		ret = -EINVAL;
179b2315ae4SHarman Kalra 	}
180b2315ae4SHarman Kalra 
181b2315ae4SHarman Kalra 	if (conf->dcb_capability_en != 0) {
182b2315ae4SHarman Kalra 		plt_err("priority-based flow control not supported");
183b2315ae4SHarman Kalra 		ret = -EINVAL;
184b2315ae4SHarman Kalra 	}
185b2315ae4SHarman Kalra 
186b2315ae4SHarman Kalra 	if (conf->intr_conf.lsc != 0) {
187b2315ae4SHarman Kalra 		plt_err("link status change interrupt not supported");
188b2315ae4SHarman Kalra 		ret = -EINVAL;
189b2315ae4SHarman Kalra 	}
190b2315ae4SHarman Kalra 
191b2315ae4SHarman Kalra 	if (conf->intr_conf.rxq != 0) {
192b2315ae4SHarman Kalra 		plt_err("receive queue interrupt not supported");
193b2315ae4SHarman Kalra 		ret = -EINVAL;
194b2315ae4SHarman Kalra 	}
195b2315ae4SHarman Kalra 
196b2315ae4SHarman Kalra 	if (conf->intr_conf.rmv != 0) {
197b2315ae4SHarman Kalra 		plt_err("remove interrupt not supported");
198b2315ae4SHarman Kalra 		ret = -EINVAL;
199b2315ae4SHarman Kalra 	}
200b2315ae4SHarman Kalra 
201b2315ae4SHarman Kalra 	return ret;
20221942175SHarman Kalra }
20321942175SHarman Kalra 
20421942175SHarman Kalra int
cnxk_rep_dev_configure(struct rte_eth_dev * ethdev)20521942175SHarman Kalra cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)
20621942175SHarman Kalra {
207b2315ae4SHarman Kalra 	struct rte_eth_dev_data *ethdev_data = ethdev->data;
208b2315ae4SHarman Kalra 	int rc = -1;
209b2315ae4SHarman Kalra 
210b2315ae4SHarman Kalra 	rc = rep_eth_conf_chk(&ethdev_data->dev_conf, ethdev_data->nb_rx_queues);
211b2315ae4SHarman Kalra 	if (rc)
212b2315ae4SHarman Kalra 		goto fail;
213b2315ae4SHarman Kalra 
214b2315ae4SHarman Kalra 	return 0;
215b2315ae4SHarman Kalra fail:
216b2315ae4SHarman Kalra 	return rc;
217b2315ae4SHarman Kalra }
218b2315ae4SHarman Kalra 
219b2315ae4SHarman Kalra int
cnxk_rep_promiscuous_enable(struct rte_eth_dev * ethdev)220b2315ae4SHarman Kalra cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev)
221b2315ae4SHarman Kalra {
222b2315ae4SHarman Kalra 	PLT_SET_USED(ethdev);
223b2315ae4SHarman Kalra 	return 0;
224b2315ae4SHarman Kalra }
225b2315ae4SHarman Kalra 
226b2315ae4SHarman Kalra int
cnxk_rep_promiscuous_disable(struct rte_eth_dev * ethdev)227b2315ae4SHarman Kalra cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev)
228b2315ae4SHarman Kalra {
22921942175SHarman Kalra 	PLT_SET_USED(ethdev);
23021942175SHarman Kalra 	return 0;
23121942175SHarman Kalra }
23221942175SHarman Kalra 
23321942175SHarman Kalra int
cnxk_rep_dev_start(struct rte_eth_dev * ethdev)23421942175SHarman Kalra cnxk_rep_dev_start(struct rte_eth_dev *ethdev)
23521942175SHarman Kalra {
236b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
237b2315ae4SHarman Kalra 	int rc = 0, qid;
238b2315ae4SHarman Kalra 
239b2315ae4SHarman Kalra 	ethdev->rx_pkt_burst = cnxk_rep_rx_burst;
240b2315ae4SHarman Kalra 	ethdev->tx_pkt_burst = cnxk_rep_tx_burst;
241b2315ae4SHarman Kalra 
242b2315ae4SHarman Kalra 	if (!rep_dev->is_vf_active)
24321942175SHarman Kalra 		return 0;
244b2315ae4SHarman Kalra 
245b2315ae4SHarman Kalra 	if (!rep_dev->rxq || !rep_dev->txq) {
246b2315ae4SHarman Kalra 		plt_err("Invalid rxq or txq for representor id %d", rep_dev->rep_id);
247b2315ae4SHarman Kalra 		rc = -EINVAL;
248b2315ae4SHarman Kalra 		goto fail;
249b2315ae4SHarman Kalra 	}
250b2315ae4SHarman Kalra 
251b2315ae4SHarman Kalra 	/* Start rx queues */
252b2315ae4SHarman Kalra 	qid = rep_dev->rxq->qid;
253b2315ae4SHarman Kalra 	rc = cnxk_eswitch_rxq_start(rep_dev->parent_dev, qid);
254b2315ae4SHarman Kalra 	if (rc) {
255b2315ae4SHarman Kalra 		plt_err("Failed to start rxq %d, rc=%d", qid, rc);
256b2315ae4SHarman Kalra 		goto fail;
257b2315ae4SHarman Kalra 	}
258b2315ae4SHarman Kalra 
259b2315ae4SHarman Kalra 	/* Start tx queues  */
260b2315ae4SHarman Kalra 	qid = rep_dev->txq->qid;
261b2315ae4SHarman Kalra 	rc = cnxk_eswitch_txq_start(rep_dev->parent_dev, qid);
262b2315ae4SHarman Kalra 	if (rc) {
263b2315ae4SHarman Kalra 		plt_err("Failed to start txq %d, rc=%d", qid, rc);
264b2315ae4SHarman Kalra 		goto fail;
265b2315ae4SHarman Kalra 	}
266b2315ae4SHarman Kalra 
267b2315ae4SHarman Kalra 	/* Start rep_xport device only once after first representor gets active */
268b2315ae4SHarman Kalra 	if (!rep_dev->parent_dev->repr_cnt.nb_repr_started) {
269b2315ae4SHarman Kalra 		rc = cnxk_eswitch_nix_rsrc_start(rep_dev->parent_dev);
270b2315ae4SHarman Kalra 		if (rc) {
271b2315ae4SHarman Kalra 			plt_err("Failed to start nix dev, rc %d", rc);
272b2315ae4SHarman Kalra 			goto fail;
273b2315ae4SHarman Kalra 		}
274b2315ae4SHarman Kalra 	}
275b2315ae4SHarman Kalra 
276b2315ae4SHarman Kalra 	ethdev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
277b2315ae4SHarman Kalra 	ethdev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
278b2315ae4SHarman Kalra 
279b2315ae4SHarman Kalra 	rep_dev->parent_dev->repr_cnt.nb_repr_started++;
280b2315ae4SHarman Kalra 
281b2315ae4SHarman Kalra 	return 0;
282b2315ae4SHarman Kalra fail:
283b2315ae4SHarman Kalra 	return rc;
28421942175SHarman Kalra }
28521942175SHarman Kalra 
28621942175SHarman Kalra int
cnxk_rep_dev_close(struct rte_eth_dev * ethdev)28721942175SHarman Kalra cnxk_rep_dev_close(struct rte_eth_dev *ethdev)
28821942175SHarman Kalra {
289b2315ae4SHarman Kalra 	return cnxk_rep_dev_uninit(ethdev);
29021942175SHarman Kalra }
29121942175SHarman Kalra 
29221942175SHarman Kalra int
cnxk_rep_dev_stop(struct rte_eth_dev * ethdev)29321942175SHarman Kalra cnxk_rep_dev_stop(struct rte_eth_dev *ethdev)
29421942175SHarman Kalra {
295b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
296b2315ae4SHarman Kalra 
297b2315ae4SHarman Kalra 	ethdev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;
298b2315ae4SHarman Kalra 	ethdev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;
299b2315ae4SHarman Kalra 	cnxk_rep_rx_queue_stop(ethdev, 0);
300b2315ae4SHarman Kalra 	cnxk_rep_tx_queue_stop(ethdev, 0);
301b2315ae4SHarman Kalra 	rep_dev->parent_dev->repr_cnt.nb_repr_started--;
302b2315ae4SHarman Kalra 
30321942175SHarman Kalra 	return 0;
30421942175SHarman Kalra }
30521942175SHarman Kalra 
30621942175SHarman Kalra int
cnxk_rep_rx_queue_setup(struct rte_eth_dev * ethdev,uint16_t rx_queue_id,uint16_t nb_rx_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mb_pool)30721942175SHarman Kalra cnxk_rep_rx_queue_setup(struct rte_eth_dev *ethdev, uint16_t rx_queue_id, uint16_t nb_rx_desc,
30821942175SHarman Kalra 			unsigned int socket_id, const struct rte_eth_rxconf *rx_conf,
30921942175SHarman Kalra 			struct rte_mempool *mb_pool)
31021942175SHarman Kalra {
311b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
312b2315ae4SHarman Kalra 	struct cnxk_rep_rxq *rxq = NULL;
313b2315ae4SHarman Kalra 	uint16_t qid = 0;
314b2315ae4SHarman Kalra 	int rc;
315b2315ae4SHarman Kalra 
31621942175SHarman Kalra 	PLT_SET_USED(socket_id);
317b2315ae4SHarman Kalra 	/* If no representee assigned, store the respective rxq parameters */
318b2315ae4SHarman Kalra 	if (!rep_dev->is_vf_active && !rep_dev->rxq) {
319b2315ae4SHarman Kalra 		rxq = plt_zmalloc(sizeof(*rxq), RTE_CACHE_LINE_SIZE);
320b2315ae4SHarman Kalra 		if (!rxq) {
321b2315ae4SHarman Kalra 			rc = -ENOMEM;
322b2315ae4SHarman Kalra 			plt_err("Failed to alloc RxQ for rep id %d", rep_dev->rep_id);
323b2315ae4SHarman Kalra 			goto fail;
324b2315ae4SHarman Kalra 		}
325b2315ae4SHarman Kalra 
326b2315ae4SHarman Kalra 		rxq->qid = qid;
327b2315ae4SHarman Kalra 		rxq->nb_desc = nb_rx_desc;
328b2315ae4SHarman Kalra 		rxq->rep_dev = rep_dev;
329b2315ae4SHarman Kalra 		rxq->mpool = mb_pool;
330b2315ae4SHarman Kalra 		rxq->rx_conf = rx_conf;
331b2315ae4SHarman Kalra 		rep_dev->rxq = rxq;
332b2315ae4SHarman Kalra 		ethdev->data->rx_queues[rx_queue_id] = NULL;
333b2315ae4SHarman Kalra 
33421942175SHarman Kalra 		return 0;
33521942175SHarman Kalra 	}
33621942175SHarman Kalra 
337b2315ae4SHarman Kalra 	qid = rep_dev->rep_id;
338b2315ae4SHarman Kalra 	rc = cnxk_eswitch_rxq_setup(rep_dev->parent_dev, qid, nb_rx_desc, rx_conf, mb_pool);
339b2315ae4SHarman Kalra 	if (rc) {
340b2315ae4SHarman Kalra 		plt_err("failed to setup eswitch queue id %d", qid);
341b2315ae4SHarman Kalra 		goto fail;
342b2315ae4SHarman Kalra 	}
343b2315ae4SHarman Kalra 
344b2315ae4SHarman Kalra 	rxq = rep_dev->rxq;
345b2315ae4SHarman Kalra 	if (!rxq) {
346b2315ae4SHarman Kalra 		plt_err("Invalid RXQ handle for representor port %d rep id %d", rep_dev->port_id,
347b2315ae4SHarman Kalra 			rep_dev->rep_id);
348b2315ae4SHarman Kalra 		goto free_queue;
349b2315ae4SHarman Kalra 	}
350b2315ae4SHarman Kalra 
351b2315ae4SHarman Kalra 	rxq->qid = qid;
352b2315ae4SHarman Kalra 	ethdev->data->rx_queues[rx_queue_id] = rxq;
353b2315ae4SHarman Kalra 	ethdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
354b2315ae4SHarman Kalra 	plt_rep_dbg("representor id %d portid %d rxq id %d", rep_dev->port_id,
355b2315ae4SHarman Kalra 		    ethdev->data->port_id, rxq->qid);
356b2315ae4SHarman Kalra 
357b2315ae4SHarman Kalra 	return 0;
358b2315ae4SHarman Kalra free_queue:
359b2315ae4SHarman Kalra 	cnxk_eswitch_rxq_release(rep_dev->parent_dev, qid);
360b2315ae4SHarman Kalra fail:
361b2315ae4SHarman Kalra 	return rc;
362b2315ae4SHarman Kalra }
363b2315ae4SHarman Kalra 
364b2315ae4SHarman Kalra void
cnxk_rep_rx_queue_stop(struct rte_eth_dev * ethdev,uint16_t queue_id)365b2315ae4SHarman Kalra cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id)
366b2315ae4SHarman Kalra {
367b2315ae4SHarman Kalra 	struct cnxk_rep_rxq *rxq = ethdev->data->rx_queues[queue_id];
368b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
369b2315ae4SHarman Kalra 	int rc;
370b2315ae4SHarman Kalra 
371b2315ae4SHarman Kalra 	if (!rxq)
372b2315ae4SHarman Kalra 		return;
373b2315ae4SHarman Kalra 
374b2315ae4SHarman Kalra 	plt_rep_dbg("Stopping rxq %u", rxq->qid);
375b2315ae4SHarman Kalra 
376b2315ae4SHarman Kalra 	rc = cnxk_eswitch_rxq_stop(rep_dev->parent_dev, rxq->qid);
377b2315ae4SHarman Kalra 	if (rc)
378b2315ae4SHarman Kalra 		plt_err("Failed to stop rxq %d, rc=%d", rc, rxq->qid);
379b2315ae4SHarman Kalra 
380b2315ae4SHarman Kalra 	ethdev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
381b2315ae4SHarman Kalra }
382b2315ae4SHarman Kalra 
38321942175SHarman Kalra void
cnxk_rep_rx_queue_release(struct rte_eth_dev * ethdev,uint16_t queue_id)38421942175SHarman Kalra cnxk_rep_rx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
38521942175SHarman Kalra {
386b2315ae4SHarman Kalra 	struct cnxk_rep_rxq *rxq = ethdev->data->rx_queues[queue_id];
387b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
388b2315ae4SHarman Kalra 	int rc;
389b2315ae4SHarman Kalra 
390b2315ae4SHarman Kalra 	if (!rxq) {
391b2315ae4SHarman Kalra 		plt_err("Invalid rxq retrieved for rep_id %d", rep_dev->rep_id);
392b2315ae4SHarman Kalra 		return;
393b2315ae4SHarman Kalra 	}
394b2315ae4SHarman Kalra 
395b2315ae4SHarman Kalra 	plt_rep_dbg("Releasing rxq %u", rxq->qid);
396b2315ae4SHarman Kalra 
397b2315ae4SHarman Kalra 	rc = cnxk_eswitch_rxq_release(rep_dev->parent_dev, rxq->qid);
398b2315ae4SHarman Kalra 	if (rc)
399b2315ae4SHarman Kalra 		plt_err("Failed to release rxq %d, rc=%d", rc, rxq->qid);
40021942175SHarman Kalra }
40121942175SHarman Kalra 
40221942175SHarman Kalra int
cnxk_rep_tx_queue_setup(struct rte_eth_dev * ethdev,uint16_t tx_queue_id,uint16_t nb_tx_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)40321942175SHarman Kalra cnxk_rep_tx_queue_setup(struct rte_eth_dev *ethdev, uint16_t tx_queue_id, uint16_t nb_tx_desc,
40421942175SHarman Kalra 			unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
40521942175SHarman Kalra {
406b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
407b2315ae4SHarman Kalra 	struct cnxk_rep_txq *txq = NULL;
408b2315ae4SHarman Kalra 	int rc = 0, qid = 0;
409b2315ae4SHarman Kalra 
41021942175SHarman Kalra 	PLT_SET_USED(socket_id);
411b2315ae4SHarman Kalra 	/* If no representee assigned, store the respective rxq parameters */
412b2315ae4SHarman Kalra 	if (!rep_dev->is_vf_active && !rep_dev->txq) {
413b2315ae4SHarman Kalra 		txq = plt_zmalloc(sizeof(*txq), RTE_CACHE_LINE_SIZE);
414b2315ae4SHarman Kalra 		if (!txq) {
415b2315ae4SHarman Kalra 			rc = -ENOMEM;
416b2315ae4SHarman Kalra 			plt_err("failed to alloc txq for rep id %d", rep_dev->rep_id);
417b2315ae4SHarman Kalra 			goto free_queue;
418b2315ae4SHarman Kalra 		}
419b2315ae4SHarman Kalra 
420b2315ae4SHarman Kalra 		txq->qid = qid;
421b2315ae4SHarman Kalra 		txq->nb_desc = nb_tx_desc;
422b2315ae4SHarman Kalra 		txq->tx_conf = tx_conf;
423b2315ae4SHarman Kalra 		txq->rep_dev = rep_dev;
424b2315ae4SHarman Kalra 		rep_dev->txq = txq;
425b2315ae4SHarman Kalra 
426b2315ae4SHarman Kalra 		ethdev->data->tx_queues[tx_queue_id] = NULL;
427b2315ae4SHarman Kalra 
42821942175SHarman Kalra 		return 0;
42921942175SHarman Kalra 	}
43021942175SHarman Kalra 
431b2315ae4SHarman Kalra 	qid = rep_dev->rep_id;
432b2315ae4SHarman Kalra 	rc = cnxk_eswitch_txq_setup(rep_dev->parent_dev, qid, nb_tx_desc, tx_conf);
433b2315ae4SHarman Kalra 	if (rc) {
434b2315ae4SHarman Kalra 		plt_err("failed to setup eswitch queue id %d", qid);
435b2315ae4SHarman Kalra 		goto fail;
436b2315ae4SHarman Kalra 	}
437b2315ae4SHarman Kalra 
438b2315ae4SHarman Kalra 	txq = rep_dev->txq;
439b2315ae4SHarman Kalra 	if (!txq) {
440b2315ae4SHarman Kalra 		plt_err("Invalid TXQ handle for representor port %d rep id %d", rep_dev->port_id,
441b2315ae4SHarman Kalra 			rep_dev->rep_id);
442b2315ae4SHarman Kalra 		goto free_queue;
443b2315ae4SHarman Kalra 	}
444b2315ae4SHarman Kalra 
445b2315ae4SHarman Kalra 	txq->qid = qid;
446b2315ae4SHarman Kalra 	ethdev->data->tx_queues[tx_queue_id] = txq;
447b2315ae4SHarman Kalra 	ethdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
448b2315ae4SHarman Kalra 	plt_rep_dbg("representor id %d portid %d txq id %d", rep_dev->port_id,
449b2315ae4SHarman Kalra 		    ethdev->data->port_id, txq->qid);
450b2315ae4SHarman Kalra 
451b2315ae4SHarman Kalra 	return 0;
452b2315ae4SHarman Kalra free_queue:
453b2315ae4SHarman Kalra 	cnxk_eswitch_txq_release(rep_dev->parent_dev, qid);
454b2315ae4SHarman Kalra fail:
455b2315ae4SHarman Kalra 	return rc;
456b2315ae4SHarman Kalra }
457b2315ae4SHarman Kalra 
458b2315ae4SHarman Kalra void
cnxk_rep_tx_queue_stop(struct rte_eth_dev * ethdev,uint16_t queue_id)459b2315ae4SHarman Kalra cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id)
460b2315ae4SHarman Kalra {
461b2315ae4SHarman Kalra 	struct cnxk_rep_txq *txq = ethdev->data->tx_queues[queue_id];
462b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
463b2315ae4SHarman Kalra 	int rc;
464b2315ae4SHarman Kalra 
465b2315ae4SHarman Kalra 	if (!txq)
466b2315ae4SHarman Kalra 		return;
467b2315ae4SHarman Kalra 
468b2315ae4SHarman Kalra 	plt_rep_dbg("Releasing txq %u", txq->qid);
469b2315ae4SHarman Kalra 
470b2315ae4SHarman Kalra 	rc = cnxk_eswitch_txq_stop(rep_dev->parent_dev, txq->qid);
471b2315ae4SHarman Kalra 	if (rc)
472b2315ae4SHarman Kalra 		plt_err("Failed to stop txq %d, rc=%d", rc, txq->qid);
473b2315ae4SHarman Kalra 
474b2315ae4SHarman Kalra 	ethdev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
475b2315ae4SHarman Kalra }
476b2315ae4SHarman Kalra 
47721942175SHarman Kalra void
cnxk_rep_tx_queue_release(struct rte_eth_dev * ethdev,uint16_t queue_id)47821942175SHarman Kalra cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
47921942175SHarman Kalra {
480b2315ae4SHarman Kalra 	struct cnxk_rep_txq *txq = ethdev->data->tx_queues[queue_id];
481b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
482b2315ae4SHarman Kalra 	int rc;
483b2315ae4SHarman Kalra 
484b2315ae4SHarman Kalra 	if (!txq) {
485b2315ae4SHarman Kalra 		plt_err("Invalid txq retrieved for rep_id %d", rep_dev->rep_id);
486b2315ae4SHarman Kalra 		return;
487b2315ae4SHarman Kalra 	}
488b2315ae4SHarman Kalra 
489b2315ae4SHarman Kalra 	plt_rep_dbg("Releasing txq %u", txq->qid);
490b2315ae4SHarman Kalra 
491b2315ae4SHarman Kalra 	rc = cnxk_eswitch_txq_release(rep_dev->parent_dev, txq->qid);
492b2315ae4SHarman Kalra 	if (rc)
493b2315ae4SHarman Kalra 		plt_err("Failed to release txq %d, rc=%d", rc, txq->qid);
49421942175SHarman Kalra }
49521942175SHarman Kalra 
496dd40e7cfSHarman Kalra static int
process_eth_stats(struct cnxk_rep_dev * rep_dev,cnxk_rep_msg_ack_data_t * adata,cnxk_rep_msg_t msg)497dd40e7cfSHarman Kalra process_eth_stats(struct cnxk_rep_dev *rep_dev, cnxk_rep_msg_ack_data_t *adata, cnxk_rep_msg_t msg)
498dd40e7cfSHarman Kalra {
499dd40e7cfSHarman Kalra 	cnxk_rep_msg_eth_stats_meta_t msg_st_meta;
500dd40e7cfSHarman Kalra 	uint32_t len = 0, rc;
501dd40e7cfSHarman Kalra 	void *buffer;
502dd40e7cfSHarman Kalra 	size_t size;
503dd40e7cfSHarman Kalra 
504dd40e7cfSHarman Kalra 	size = CNXK_REP_MSG_MAX_BUFFER_SZ;
505dd40e7cfSHarman Kalra 	buffer = plt_zmalloc(size, 0);
506dd40e7cfSHarman Kalra 	if (!buffer) {
507dd40e7cfSHarman Kalra 		plt_err("Failed to allocate mem");
508dd40e7cfSHarman Kalra 		rc = -ENOMEM;
509dd40e7cfSHarman Kalra 		goto fail;
510dd40e7cfSHarman Kalra 	}
511dd40e7cfSHarman Kalra 
512dd40e7cfSHarman Kalra 	cnxk_rep_msg_populate_header(buffer, &len);
513dd40e7cfSHarman Kalra 
514dd40e7cfSHarman Kalra 	msg_st_meta.portid = rep_dev->rep_id;
515dd40e7cfSHarman Kalra 	cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_st_meta,
516dd40e7cfSHarman Kalra 					   sizeof(cnxk_rep_msg_eth_stats_meta_t), msg);
517dd40e7cfSHarman Kalra 	cnxk_rep_msg_populate_msg_end(buffer, &len);
518dd40e7cfSHarman Kalra 
519dd40e7cfSHarman Kalra 	rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
520dd40e7cfSHarman Kalra 	if (rc) {
521dd40e7cfSHarman Kalra 		plt_err("Failed to process the message, err %d", rc);
522dd40e7cfSHarman Kalra 		goto fail;
523dd40e7cfSHarman Kalra 	}
524dd40e7cfSHarman Kalra 
525dd40e7cfSHarman Kalra 	rte_free(buffer);
526dd40e7cfSHarman Kalra 
527dd40e7cfSHarman Kalra 	return 0;
528dd40e7cfSHarman Kalra fail:
529dd40e7cfSHarman Kalra 	rte_free(buffer);
530dd40e7cfSHarman Kalra 	return rc;
531dd40e7cfSHarman Kalra }
532dd40e7cfSHarman Kalra 
533dd40e7cfSHarman Kalra static int
native_repte_eth_stats(struct cnxk_rep_dev * rep_dev,struct rte_eth_stats * stats)534dd40e7cfSHarman Kalra native_repte_eth_stats(struct cnxk_rep_dev *rep_dev, struct rte_eth_stats *stats)
535dd40e7cfSHarman Kalra {
536dd40e7cfSHarman Kalra 	struct roc_nix_stats nix_stats;
537dd40e7cfSHarman Kalra 	int rc = 0;
538dd40e7cfSHarman Kalra 
539dd40e7cfSHarman Kalra 	rc = roc_eswitch_nix_repte_stats(&rep_dev->parent_dev->nix, rep_dev->hw_func, &nix_stats);
540dd40e7cfSHarman Kalra 	if (rc) {
541dd40e7cfSHarman Kalra 		plt_err("Failed to get stats for representee %x, err %d", rep_dev->hw_func, rc);
542dd40e7cfSHarman Kalra 		goto fail;
543dd40e7cfSHarman Kalra 	}
544dd40e7cfSHarman Kalra 
545dd40e7cfSHarman Kalra 	memset(stats, 0, sizeof(struct rte_eth_stats));
546dd40e7cfSHarman Kalra 	stats->opackets = nix_stats.tx_ucast;
547dd40e7cfSHarman Kalra 	stats->opackets += nix_stats.tx_mcast;
548dd40e7cfSHarman Kalra 	stats->opackets += nix_stats.tx_bcast;
549dd40e7cfSHarman Kalra 	stats->oerrors = nix_stats.tx_drop;
550dd40e7cfSHarman Kalra 	stats->obytes = nix_stats.tx_octs;
551dd40e7cfSHarman Kalra 
552dd40e7cfSHarman Kalra 	stats->ipackets = nix_stats.rx_ucast;
553dd40e7cfSHarman Kalra 	stats->ipackets += nix_stats.rx_mcast;
554dd40e7cfSHarman Kalra 	stats->ipackets += nix_stats.rx_bcast;
555dd40e7cfSHarman Kalra 	stats->imissed = nix_stats.rx_drop;
556dd40e7cfSHarman Kalra 	stats->ibytes = nix_stats.rx_octs;
557dd40e7cfSHarman Kalra 	stats->ierrors = nix_stats.rx_err;
558dd40e7cfSHarman Kalra 
559dd40e7cfSHarman Kalra 	return 0;
560dd40e7cfSHarman Kalra fail:
561dd40e7cfSHarman Kalra 	return rc;
562dd40e7cfSHarman Kalra }
563dd40e7cfSHarman Kalra 
56421942175SHarman Kalra int
cnxk_rep_stats_get(struct rte_eth_dev * ethdev,struct rte_eth_stats * stats)56521942175SHarman Kalra cnxk_rep_stats_get(struct rte_eth_dev *ethdev, struct rte_eth_stats *stats)
56621942175SHarman Kalra {
567dd40e7cfSHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
568dd40e7cfSHarman Kalra 	struct rte_eth_stats vf_stats;
569dd40e7cfSHarman Kalra 	cnxk_rep_msg_ack_data_t adata;
570dd40e7cfSHarman Kalra 	int rc;
571dd40e7cfSHarman Kalra 
572dd40e7cfSHarman Kalra 	/* If representor not representing any active VF, return 0 */
573dd40e7cfSHarman Kalra 	if (!rep_dev->is_vf_active)
57421942175SHarman Kalra 		return 0;
575dd40e7cfSHarman Kalra 
576dd40e7cfSHarman Kalra 	if (rep_dev->native_repte) {
577dd40e7cfSHarman Kalra 		/* For representees which are independent */
578dd40e7cfSHarman Kalra 		rc = native_repte_eth_stats(rep_dev, &vf_stats);
579dd40e7cfSHarman Kalra 		if (rc) {
580dd40e7cfSHarman Kalra 			plt_err("Failed to get stats for vf rep %x (hw_func %x), err %d",
581dd40e7cfSHarman Kalra 				rep_dev->port_id, rep_dev->hw_func, rc);
582dd40e7cfSHarman Kalra 			goto fail;
583dd40e7cfSHarman Kalra 		}
584dd40e7cfSHarman Kalra 	} else {
585dd40e7cfSHarman Kalra 		/* For representees which are part of companian app */
586dd40e7cfSHarman Kalra 		rc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_GET);
587dd40e7cfSHarman Kalra 		if (rc || adata.u.sval < 0) {
588dd40e7cfSHarman Kalra 			if (adata.u.sval < 0)
589dd40e7cfSHarman Kalra 				rc = adata.u.sval;
590dd40e7cfSHarman Kalra 
591dd40e7cfSHarman Kalra 			plt_err("Failed to get stats for vf rep %x, err %d", rep_dev->port_id, rc);
592dd40e7cfSHarman Kalra 		}
593dd40e7cfSHarman Kalra 
594dd40e7cfSHarman Kalra 		if (adata.size != sizeof(struct rte_eth_stats)) {
595dd40e7cfSHarman Kalra 			rc = -EINVAL;
596dd40e7cfSHarman Kalra 			plt_err("Incomplete stats received for vf rep %d", rep_dev->port_id);
597dd40e7cfSHarman Kalra 			goto fail;
598dd40e7cfSHarman Kalra 		}
599dd40e7cfSHarman Kalra 
600dd40e7cfSHarman Kalra 		rte_memcpy(&vf_stats, adata.u.data, adata.size);
601dd40e7cfSHarman Kalra 	}
602dd40e7cfSHarman Kalra 
603dd40e7cfSHarman Kalra 	stats->q_ipackets[0] = vf_stats.ipackets;
604dd40e7cfSHarman Kalra 	stats->q_ibytes[0] = vf_stats.ibytes;
605dd40e7cfSHarman Kalra 	stats->ipackets = vf_stats.ipackets;
606dd40e7cfSHarman Kalra 	stats->ibytes = vf_stats.ibytes;
607dd40e7cfSHarman Kalra 
608dd40e7cfSHarman Kalra 	stats->q_opackets[0] = vf_stats.opackets;
609dd40e7cfSHarman Kalra 	stats->q_obytes[0] = vf_stats.obytes;
610dd40e7cfSHarman Kalra 	stats->opackets = vf_stats.opackets;
611dd40e7cfSHarman Kalra 	stats->obytes = vf_stats.obytes;
612dd40e7cfSHarman Kalra 
613dd40e7cfSHarman Kalra 	plt_rep_dbg("Input packets %" PRId64 " Output packets %" PRId64 "", stats->ipackets,
614dd40e7cfSHarman Kalra 		    stats->opackets);
615dd40e7cfSHarman Kalra 
616dd40e7cfSHarman Kalra 	return 0;
617dd40e7cfSHarman Kalra fail:
618dd40e7cfSHarman Kalra 	return rc;
61921942175SHarman Kalra }
62021942175SHarman Kalra 
62121942175SHarman Kalra int
cnxk_rep_stats_reset(struct rte_eth_dev * ethdev)62221942175SHarman Kalra cnxk_rep_stats_reset(struct rte_eth_dev *ethdev)
62321942175SHarman Kalra {
624dd40e7cfSHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
625dd40e7cfSHarman Kalra 	cnxk_rep_msg_ack_data_t adata;
626dd40e7cfSHarman Kalra 	int rc = 0;
627dd40e7cfSHarman Kalra 
628dd40e7cfSHarman Kalra 	/* If representor not representing any active VF, return 0 */
629dd40e7cfSHarman Kalra 	if (!rep_dev->is_vf_active)
63021942175SHarman Kalra 		return 0;
631dd40e7cfSHarman Kalra 
632dd40e7cfSHarman Kalra 	if (rep_dev->native_repte)
633dd40e7cfSHarman Kalra 		return -ENOTSUP;
634dd40e7cfSHarman Kalra 
635dd40e7cfSHarman Kalra 	rc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_CLEAR);
636dd40e7cfSHarman Kalra 	if (rc || adata.u.sval < 0) {
637dd40e7cfSHarman Kalra 		if (adata.u.sval < 0)
638dd40e7cfSHarman Kalra 			rc = adata.u.sval;
639dd40e7cfSHarman Kalra 
640dd40e7cfSHarman Kalra 		plt_err("Failed to clear stats for vf rep %x, err %d", rep_dev->port_id, rc);
641dd40e7cfSHarman Kalra 	}
642dd40e7cfSHarman Kalra 
643dd40e7cfSHarman Kalra 	return rc;
64421942175SHarman Kalra }
64521942175SHarman Kalra 
64621942175SHarman Kalra int
cnxk_rep_flow_ops_get(struct rte_eth_dev * ethdev,const struct rte_flow_ops ** ops)64721942175SHarman Kalra cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops)
64821942175SHarman Kalra {
64921942175SHarman Kalra 	PLT_SET_USED(ethdev);
650aebe8cf3SHarman Kalra 	*ops = &cnxk_rep_flow_ops;
651aebe8cf3SHarman Kalra 
65221942175SHarman Kalra 	return 0;
65321942175SHarman Kalra }
65421942175SHarman Kalra 
655b2315ae4SHarman Kalra int
cnxk_rep_mac_addr_set(struct rte_eth_dev * eth_dev,struct rte_ether_addr * addr)656b2315ae4SHarman Kalra cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
657b2315ae4SHarman Kalra {
658b2315ae4SHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
659b2315ae4SHarman Kalra 	cnxk_rep_msg_eth_set_mac_meta_t msg_sm_meta;
660b2315ae4SHarman Kalra 	cnxk_rep_msg_ack_data_t adata;
661b2315ae4SHarman Kalra 	uint32_t len = 0, rc;
662b2315ae4SHarman Kalra 	void *buffer;
663b2315ae4SHarman Kalra 	size_t size;
664b2315ae4SHarman Kalra 
665b2315ae4SHarman Kalra 	/* If representor not representing any VF, return 0 */
666b2315ae4SHarman Kalra 	if (!rep_dev->is_vf_active)
667b2315ae4SHarman Kalra 		return 0;
668b2315ae4SHarman Kalra 
669b2315ae4SHarman Kalra 	size = CNXK_REP_MSG_MAX_BUFFER_SZ;
670b2315ae4SHarman Kalra 	buffer = plt_zmalloc(size, 0);
671b2315ae4SHarman Kalra 	if (!buffer) {
672b2315ae4SHarman Kalra 		plt_err("Failed to allocate mem");
673b2315ae4SHarman Kalra 		rc = -ENOMEM;
674b2315ae4SHarman Kalra 		goto fail;
675b2315ae4SHarman Kalra 	}
676b2315ae4SHarman Kalra 
677b2315ae4SHarman Kalra 	cnxk_rep_msg_populate_header(buffer, &len);
678b2315ae4SHarman Kalra 
679b2315ae4SHarman Kalra 	msg_sm_meta.portid = rep_dev->rep_id;
680b2315ae4SHarman Kalra 	rte_memcpy(&msg_sm_meta.addr_bytes, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
681b2315ae4SHarman Kalra 	cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_sm_meta,
682b2315ae4SHarman Kalra 					   sizeof(cnxk_rep_msg_eth_set_mac_meta_t),
683b2315ae4SHarman Kalra 					   CNXK_REP_MSG_ETH_SET_MAC);
684b2315ae4SHarman Kalra 	cnxk_rep_msg_populate_msg_end(buffer, &len);
685b2315ae4SHarman Kalra 
686b2315ae4SHarman Kalra 	rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, &adata);
687b2315ae4SHarman Kalra 	if (rc) {
688b2315ae4SHarman Kalra 		plt_err("Failed to process the message, err %d", rc);
689b2315ae4SHarman Kalra 		goto fail;
690b2315ae4SHarman Kalra 	}
691b2315ae4SHarman Kalra 
692b2315ae4SHarman Kalra 	if (adata.u.sval < 0) {
693b2315ae4SHarman Kalra 		rc = adata.u.sval;
694b2315ae4SHarman Kalra 		plt_err("Failed to set mac address, err %d", rc);
695b2315ae4SHarman Kalra 		goto fail;
696b2315ae4SHarman Kalra 	}
697b2315ae4SHarman Kalra 
698b2315ae4SHarman Kalra 	rte_free(buffer);
699b2315ae4SHarman Kalra 
700b2315ae4SHarman Kalra 	return 0;
701b2315ae4SHarman Kalra fail:
702b2315ae4SHarman Kalra 	rte_free(buffer);
703b2315ae4SHarman Kalra 	return rc;
704b2315ae4SHarman Kalra }
705b2315ae4SHarman Kalra 
706dd40e7cfSHarman Kalra int
cnxk_rep_xstats_get(struct rte_eth_dev * eth_dev,struct rte_eth_xstat * stats,unsigned int n)707dd40e7cfSHarman Kalra cnxk_rep_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *stats, unsigned int n)
708dd40e7cfSHarman Kalra {
709dd40e7cfSHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
710dd40e7cfSHarman Kalra 	unsigned int num = RTE_DIM(cnxk_rep_xstats_string);
711dd40e7cfSHarman Kalra 	int cnt = 0;
712dd40e7cfSHarman Kalra 
713dd40e7cfSHarman Kalra 	if (!rep_dev)
714dd40e7cfSHarman Kalra 		return -EINVAL;
715dd40e7cfSHarman Kalra 
716dd40e7cfSHarman Kalra 	if (n < num)
717dd40e7cfSHarman Kalra 		return num;
718dd40e7cfSHarman Kalra 
719dd40e7cfSHarman Kalra 	stats[cnt].id = cnt;
720dd40e7cfSHarman Kalra 	stats[cnt].value = rep_dev->rxq->stats.pkts;
721dd40e7cfSHarman Kalra 	cnt++;
722dd40e7cfSHarman Kalra 	stats[cnt].id = cnt;
723dd40e7cfSHarman Kalra 	stats[cnt].value = rep_dev->txq->stats.pkts;
724dd40e7cfSHarman Kalra 	cnt++;
725dd40e7cfSHarman Kalra 
726dd40e7cfSHarman Kalra 	return cnt;
727dd40e7cfSHarman Kalra }
728dd40e7cfSHarman Kalra 
729dd40e7cfSHarman Kalra int
cnxk_rep_xstats_reset(struct rte_eth_dev * eth_dev)730dd40e7cfSHarman Kalra cnxk_rep_xstats_reset(struct rte_eth_dev *eth_dev)
731dd40e7cfSHarman Kalra {
732dd40e7cfSHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
733dd40e7cfSHarman Kalra 	int rc;
734dd40e7cfSHarman Kalra 
735dd40e7cfSHarman Kalra 	if (!rep_dev)
736dd40e7cfSHarman Kalra 		return -EINVAL;
737dd40e7cfSHarman Kalra 
738dd40e7cfSHarman Kalra 	rc = cnxk_rep_stats_reset(eth_dev);
739dd40e7cfSHarman Kalra 	if (rc < 0 && rc != -ENOTSUP)
740dd40e7cfSHarman Kalra 		return rc;
741dd40e7cfSHarman Kalra 
742dd40e7cfSHarman Kalra 	rep_dev->rxq->stats.pkts = 0;
743dd40e7cfSHarman Kalra 	rep_dev->txq->stats.pkts = 0;
744dd40e7cfSHarman Kalra 
745dd40e7cfSHarman Kalra 	return 0;
746dd40e7cfSHarman Kalra }
747dd40e7cfSHarman Kalra 
748dd40e7cfSHarman Kalra int
cnxk_rep_xstats_get_names(__rte_unused struct rte_eth_dev * eth_dev,struct rte_eth_xstat_name * xstats_names,unsigned int n)749dd40e7cfSHarman Kalra cnxk_rep_xstats_get_names(__rte_unused struct rte_eth_dev *eth_dev,
750dd40e7cfSHarman Kalra 			  struct rte_eth_xstat_name *xstats_names, unsigned int n)
751dd40e7cfSHarman Kalra {
752dd40e7cfSHarman Kalra 	unsigned int num = RTE_DIM(cnxk_rep_xstats_string);
753dd40e7cfSHarman Kalra 	unsigned int i;
754dd40e7cfSHarman Kalra 
755dd40e7cfSHarman Kalra 	if (xstats_names == NULL)
756dd40e7cfSHarman Kalra 		return num;
757dd40e7cfSHarman Kalra 
758dd40e7cfSHarman Kalra 	if (n < num)
759dd40e7cfSHarman Kalra 		return num;
760dd40e7cfSHarman Kalra 
761dd40e7cfSHarman Kalra 	for (i = 0; i < num; i++)
762dd40e7cfSHarman Kalra 		rte_strscpy(xstats_names[i].name, cnxk_rep_xstats_string[i].name,
763dd40e7cfSHarman Kalra 			    sizeof(xstats_names[i].name));
764dd40e7cfSHarman Kalra 
765dd40e7cfSHarman Kalra 	return num;
766dd40e7cfSHarman Kalra }
767dd40e7cfSHarman Kalra 
768dd40e7cfSHarman Kalra int
cnxk_rep_xstats_get_by_id(struct rte_eth_dev * eth_dev,const uint64_t * ids,uint64_t * values,unsigned int n)769dd40e7cfSHarman Kalra cnxk_rep_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids, uint64_t *values,
770dd40e7cfSHarman Kalra 			  unsigned int n)
771dd40e7cfSHarman Kalra {
772dd40e7cfSHarman Kalra 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
773dd40e7cfSHarman Kalra 	unsigned int num = RTE_DIM(cnxk_rep_xstats_string);
774dd40e7cfSHarman Kalra 	unsigned int i;
775dd40e7cfSHarman Kalra 
776dd40e7cfSHarman Kalra 	if (!rep_dev)
777dd40e7cfSHarman Kalra 		return -EINVAL;
778dd40e7cfSHarman Kalra 
779dd40e7cfSHarman Kalra 	if (n < num)
780dd40e7cfSHarman Kalra 		return num;
781dd40e7cfSHarman Kalra 
782dd40e7cfSHarman Kalra 	if (n > num)
783dd40e7cfSHarman Kalra 		return -EINVAL;
784dd40e7cfSHarman Kalra 
785dd40e7cfSHarman Kalra 	for (i = 0; i < n; i++) {
786dd40e7cfSHarman Kalra 		switch (ids[i]) {
787dd40e7cfSHarman Kalra 		case 0:
788dd40e7cfSHarman Kalra 			values[i] = rep_dev->rxq->stats.pkts;
789dd40e7cfSHarman Kalra 			break;
790dd40e7cfSHarman Kalra 		case 1:
791dd40e7cfSHarman Kalra 			values[i] = rep_dev->txq->stats.pkts;
792dd40e7cfSHarman Kalra 			break;
793dd40e7cfSHarman Kalra 		default:
794dd40e7cfSHarman Kalra 			return -EINVAL;
795dd40e7cfSHarman Kalra 		}
796dd40e7cfSHarman Kalra 	}
797dd40e7cfSHarman Kalra 
798dd40e7cfSHarman Kalra 	return n;
799dd40e7cfSHarman Kalra }
800dd40e7cfSHarman Kalra 
801dd40e7cfSHarman Kalra int
cnxk_rep_xstats_get_names_by_id(__rte_unused struct rte_eth_dev * eth_dev,const uint64_t * ids,struct rte_eth_xstat_name * xstats_names,unsigned int n)802dd40e7cfSHarman Kalra cnxk_rep_xstats_get_names_by_id(__rte_unused struct rte_eth_dev *eth_dev, const uint64_t *ids,
803dd40e7cfSHarman Kalra 				struct rte_eth_xstat_name *xstats_names, unsigned int n)
804dd40e7cfSHarman Kalra {
805dd40e7cfSHarman Kalra 	unsigned int num = RTE_DIM(cnxk_rep_xstats_string);
806dd40e7cfSHarman Kalra 	unsigned int i;
807dd40e7cfSHarman Kalra 
808dd40e7cfSHarman Kalra 	if (n < num)
809dd40e7cfSHarman Kalra 		return num;
810dd40e7cfSHarman Kalra 
811dd40e7cfSHarman Kalra 	if (n > num)
812dd40e7cfSHarman Kalra 		return -EINVAL;
813dd40e7cfSHarman Kalra 
814dd40e7cfSHarman Kalra 	for (i = 0; i < n; i++) {
815dd40e7cfSHarman Kalra 		if (ids[i] >= num)
816dd40e7cfSHarman Kalra 			return -EINVAL;
817dd40e7cfSHarman Kalra 		rte_strscpy(xstats_names[i].name, cnxk_rep_xstats_string[ids[i]].name,
818dd40e7cfSHarman Kalra 			    sizeof(xstats_names[i].name));
819dd40e7cfSHarman Kalra 	}
820dd40e7cfSHarman Kalra 
821dd40e7cfSHarman Kalra 	return n;
822dd40e7cfSHarman Kalra }
823dd40e7cfSHarman Kalra 
824*51378092SAnkur Dwivedi int
cnxk_rep_mtu_set(struct rte_eth_dev * eth_dev,uint16_t mtu)825*51378092SAnkur Dwivedi cnxk_rep_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
826*51378092SAnkur Dwivedi {
827*51378092SAnkur Dwivedi 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
828*51378092SAnkur Dwivedi 	uint32_t frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
829*51378092SAnkur Dwivedi 	int rc = -EINVAL;
830*51378092SAnkur Dwivedi 
831*51378092SAnkur Dwivedi 	/* Check if MTU is within the allowed range */
832*51378092SAnkur Dwivedi 	if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
833*51378092SAnkur Dwivedi 		plt_err("MTU is lesser than minimum");
834*51378092SAnkur Dwivedi 		goto exit;
835*51378092SAnkur Dwivedi 	}
836*51378092SAnkur Dwivedi 
837*51378092SAnkur Dwivedi 	if ((frame_size - RTE_ETHER_CRC_LEN) >
838*51378092SAnkur Dwivedi 	    ((uint32_t)roc_nix_max_pkt_len(&rep_dev->parent_dev->nix))) {
839*51378092SAnkur Dwivedi 		plt_err("MTU is greater than maximum");
840*51378092SAnkur Dwivedi 		goto exit;
841*51378092SAnkur Dwivedi 	}
842*51378092SAnkur Dwivedi 
843*51378092SAnkur Dwivedi 	frame_size -= RTE_ETHER_CRC_LEN;
844*51378092SAnkur Dwivedi 
845*51378092SAnkur Dwivedi 	/* Set frame size on Rx */
846*51378092SAnkur Dwivedi 	rc = roc_nix_mac_max_rx_len_set(&rep_dev->parent_dev->nix, frame_size);
847*51378092SAnkur Dwivedi 	if (rc) {
848*51378092SAnkur Dwivedi 		plt_err("Failed to max Rx frame length, rc=%d", rc);
849*51378092SAnkur Dwivedi 		goto exit;
850*51378092SAnkur Dwivedi 	}
851*51378092SAnkur Dwivedi exit:
852*51378092SAnkur Dwivedi 	return rc;
853*51378092SAnkur Dwivedi }
854*51378092SAnkur Dwivedi 
85521942175SHarman Kalra /* CNXK platform representor dev ops */
85621942175SHarman Kalra struct eth_dev_ops cnxk_rep_dev_ops = {
85721942175SHarman Kalra 	.dev_infos_get = cnxk_rep_dev_info_get,
858b2315ae4SHarman Kalra 	.representor_info_get = cnxk_rep_representor_info_get,
85921942175SHarman Kalra 	.dev_configure = cnxk_rep_dev_configure,
86021942175SHarman Kalra 	.dev_start = cnxk_rep_dev_start,
86121942175SHarman Kalra 	.rx_queue_setup = cnxk_rep_rx_queue_setup,
86221942175SHarman Kalra 	.rx_queue_release = cnxk_rep_rx_queue_release,
86321942175SHarman Kalra 	.tx_queue_setup = cnxk_rep_tx_queue_setup,
86421942175SHarman Kalra 	.tx_queue_release = cnxk_rep_tx_queue_release,
865b2315ae4SHarman Kalra 	.promiscuous_enable   = cnxk_rep_promiscuous_enable,
866b2315ae4SHarman Kalra 	.promiscuous_disable   = cnxk_rep_promiscuous_disable,
867b2315ae4SHarman Kalra 	.mac_addr_set = cnxk_rep_mac_addr_set,
86821942175SHarman Kalra 	.link_update = cnxk_rep_link_update,
86921942175SHarman Kalra 	.dev_close = cnxk_rep_dev_close,
87021942175SHarman Kalra 	.dev_stop = cnxk_rep_dev_stop,
87121942175SHarman Kalra 	.stats_get = cnxk_rep_stats_get,
87221942175SHarman Kalra 	.stats_reset = cnxk_rep_stats_reset,
873dd40e7cfSHarman Kalra 	.flow_ops_get = cnxk_rep_flow_ops_get,
874dd40e7cfSHarman Kalra 	.xstats_get = cnxk_rep_xstats_get,
875dd40e7cfSHarman Kalra 	.xstats_reset = cnxk_rep_xstats_reset,
876dd40e7cfSHarman Kalra 	.xstats_get_names = cnxk_rep_xstats_get_names,
877dd40e7cfSHarman Kalra 	.xstats_get_by_id = cnxk_rep_xstats_get_by_id,
878*51378092SAnkur Dwivedi 	.xstats_get_names_by_id = cnxk_rep_xstats_get_names_by_id,
879*51378092SAnkur Dwivedi 	.mtu_set = cnxk_rep_mtu_set
88021942175SHarman Kalra };
881