xref: /dpdk/drivers/net/cnxk/cnxk_rep_ops.c (revision 51378092a131a30aadbd83bc39ab0cd783eac2e8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2024 Marvell.
3  */
4 
5 #include <cnxk_rep.h>
6 #include <cnxk_rep_msg.h>
7 
8 #define MEMPOOL_CACHE_SIZE 256
9 #define TX_DESC_PER_QUEUE  512
10 #define RX_DESC_PER_QUEUE  256
11 #define NB_REP_VDEV_MBUF   1024
12 
13 static const struct rte_eth_xstat_name cnxk_rep_xstats_string[] = {
14 	{"rep_nb_rx"},
15 	{"rep_nb_tx"},
16 };
17 
18 static uint16_t
cnxk_rep_tx_burst(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)19 cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
20 {
21 	struct cnxk_rep_txq *txq = tx_queue;
22 	struct cnxk_rep_dev *rep_dev;
23 	uint16_t n_tx;
24 
25 	if (unlikely(!txq))
26 		return 0;
27 
28 	rep_dev = txq->rep_dev;
29 	plt_rep_dbg("Transmitting %d packets on eswitch queue %d", nb_pkts, txq->qid);
30 	n_tx = cnxk_eswitch_dev_tx_burst(rep_dev->parent_dev, txq->qid, tx_pkts, nb_pkts,
31 					 NIX_TX_OFFLOAD_VLAN_QINQ_F);
32 	txq->stats.pkts += n_tx;
33 	return n_tx;
34 }
35 
36 static uint16_t
cnxk_rep_rx_burst(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)37 cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
38 {
39 	struct cnxk_rep_rxq *rxq = rx_queue;
40 	struct cnxk_rep_dev *rep_dev;
41 	uint16_t n_rx;
42 
43 	if (unlikely(!rxq))
44 		return 0;
45 
46 	rep_dev = rxq->rep_dev;
47 	n_rx = cnxk_eswitch_dev_rx_burst(rep_dev->parent_dev, rxq->qid, rx_pkts, nb_pkts);
48 	if (n_rx == 0)
49 		return 0;
50 
51 	plt_rep_dbg("Received %d packets on eswitch queue %d", n_rx, rxq->qid);
52 	rxq->stats.pkts += n_rx;
53 	return n_rx;
54 }
55 
56 uint16_t
cnxk_rep_tx_burst_dummy(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)57 cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
58 {
59 	PLT_SET_USED(tx_queue);
60 	PLT_SET_USED(tx_pkts);
61 	PLT_SET_USED(nb_pkts);
62 
63 	return 0;
64 }
65 
66 uint16_t
cnxk_rep_rx_burst_dummy(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)67 cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
68 {
69 	PLT_SET_USED(rx_queue);
70 	PLT_SET_USED(rx_pkts);
71 	PLT_SET_USED(nb_pkts);
72 
73 	return 0;
74 }
75 
76 int
cnxk_rep_link_update(struct rte_eth_dev * ethdev,int wait_to_complete)77 cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
78 {
79 	struct rte_eth_link link;
80 	PLT_SET_USED(wait_to_complete);
81 
82 	memset(&link, 0, sizeof(link));
83 	if (ethdev->data->dev_started)
84 		link.link_status = RTE_ETH_LINK_UP;
85 	else
86 		link.link_status = RTE_ETH_LINK_DOWN;
87 
88 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
89 	link.link_autoneg = RTE_ETH_LINK_FIXED;
90 	link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
91 
92 	return rte_eth_linkstatus_set(ethdev, &link);
93 }
94 
95 int
cnxk_rep_dev_info_get(struct rte_eth_dev * ethdev,struct rte_eth_dev_info * dev_info)96 cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *dev_info)
97 {
98 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
99 	uint32_t max_rx_pktlen;
100 
101 	max_rx_pktlen = (roc_nix_max_pkt_len(&rep_dev->parent_dev->nix) + RTE_ETHER_CRC_LEN -
102 			 CNXK_NIX_MAX_VTAG_ACT_SIZE);
103 
104 	dev_info->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN;
105 	dev_info->max_rx_pktlen = max_rx_pktlen;
106 	dev_info->max_mac_addrs = roc_nix_mac_max_entries_get(&rep_dev->parent_dev->nix);
107 
108 	dev_info->rx_offload_capa = CNXK_REP_RX_OFFLOAD_CAPA;
109 	dev_info->tx_offload_capa = CNXK_REP_TX_OFFLOAD_CAPA;
110 	dev_info->rx_queue_offload_capa = 0;
111 	dev_info->tx_queue_offload_capa = 0;
112 
113 	/* For the sake of symmetry, max_rx_queues = max_tx_queues */
114 	dev_info->max_rx_queues = 1;
115 	dev_info->max_tx_queues = 1;
116 
117 	/* MTU specifics */
118 	dev_info->max_mtu = dev_info->max_rx_pktlen - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
119 	dev_info->min_mtu = dev_info->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
120 
121 	/* Switch info specific */
122 	dev_info->switch_info.name = ethdev->device->name;
123 	dev_info->switch_info.domain_id = rep_dev->switch_domain_id;
124 	dev_info->switch_info.port_id = rep_dev->port_id;
125 
126 	return 0;
127 }
128 
129 int
cnxk_rep_representor_info_get(struct rte_eth_dev * ethdev,struct rte_eth_representor_info * info)130 cnxk_rep_representor_info_get(struct rte_eth_dev *ethdev, struct rte_eth_representor_info *info)
131 {
132 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
133 
134 	return cnxk_eswitch_representor_info_get(rep_dev->parent_dev, info);
135 }
136 
137 static int
rep_eth_conf_chk(const struct rte_eth_conf * conf,uint16_t nb_rx_queues)138 rep_eth_conf_chk(const struct rte_eth_conf *conf, uint16_t nb_rx_queues)
139 {
140 	const struct rte_eth_rss_conf *rss_conf;
141 	int ret = 0;
142 
143 	if (conf->link_speeds != 0) {
144 		plt_err("specific link speeds not supported");
145 		ret = -EINVAL;
146 	}
147 
148 	switch (conf->rxmode.mq_mode) {
149 	case RTE_ETH_MQ_RX_RSS:
150 		if (nb_rx_queues != 1) {
151 			plt_err("Rx RSS is not supported with %u queues", nb_rx_queues);
152 			ret = -EINVAL;
153 			break;
154 		}
155 
156 		rss_conf = &conf->rx_adv_conf.rss_conf;
157 		if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
158 		    rss_conf->rss_hf != 0) {
159 			plt_err("Rx RSS configuration is not supported");
160 			ret = -EINVAL;
161 		}
162 		break;
163 	case RTE_ETH_MQ_RX_NONE:
164 		break;
165 	default:
166 		plt_err("Rx mode MQ modes other than RSS not supported");
167 		ret = -EINVAL;
168 		break;
169 	}
170 
171 	if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
172 		plt_err("Tx mode MQ modes not supported");
173 		ret = -EINVAL;
174 	}
175 
176 	if (conf->lpbk_mode != 0) {
177 		plt_err("loopback not supported");
178 		ret = -EINVAL;
179 	}
180 
181 	if (conf->dcb_capability_en != 0) {
182 		plt_err("priority-based flow control not supported");
183 		ret = -EINVAL;
184 	}
185 
186 	if (conf->intr_conf.lsc != 0) {
187 		plt_err("link status change interrupt not supported");
188 		ret = -EINVAL;
189 	}
190 
191 	if (conf->intr_conf.rxq != 0) {
192 		plt_err("receive queue interrupt not supported");
193 		ret = -EINVAL;
194 	}
195 
196 	if (conf->intr_conf.rmv != 0) {
197 		plt_err("remove interrupt not supported");
198 		ret = -EINVAL;
199 	}
200 
201 	return ret;
202 }
203 
204 int
cnxk_rep_dev_configure(struct rte_eth_dev * ethdev)205 cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)
206 {
207 	struct rte_eth_dev_data *ethdev_data = ethdev->data;
208 	int rc = -1;
209 
210 	rc = rep_eth_conf_chk(&ethdev_data->dev_conf, ethdev_data->nb_rx_queues);
211 	if (rc)
212 		goto fail;
213 
214 	return 0;
215 fail:
216 	return rc;
217 }
218 
219 int
cnxk_rep_promiscuous_enable(struct rte_eth_dev * ethdev)220 cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev)
221 {
222 	PLT_SET_USED(ethdev);
223 	return 0;
224 }
225 
226 int
cnxk_rep_promiscuous_disable(struct rte_eth_dev * ethdev)227 cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev)
228 {
229 	PLT_SET_USED(ethdev);
230 	return 0;
231 }
232 
233 int
cnxk_rep_dev_start(struct rte_eth_dev * ethdev)234 cnxk_rep_dev_start(struct rte_eth_dev *ethdev)
235 {
236 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
237 	int rc = 0, qid;
238 
239 	ethdev->rx_pkt_burst = cnxk_rep_rx_burst;
240 	ethdev->tx_pkt_burst = cnxk_rep_tx_burst;
241 
242 	if (!rep_dev->is_vf_active)
243 		return 0;
244 
245 	if (!rep_dev->rxq || !rep_dev->txq) {
246 		plt_err("Invalid rxq or txq for representor id %d", rep_dev->rep_id);
247 		rc = -EINVAL;
248 		goto fail;
249 	}
250 
251 	/* Start rx queues */
252 	qid = rep_dev->rxq->qid;
253 	rc = cnxk_eswitch_rxq_start(rep_dev->parent_dev, qid);
254 	if (rc) {
255 		plt_err("Failed to start rxq %d, rc=%d", qid, rc);
256 		goto fail;
257 	}
258 
259 	/* Start tx queues  */
260 	qid = rep_dev->txq->qid;
261 	rc = cnxk_eswitch_txq_start(rep_dev->parent_dev, qid);
262 	if (rc) {
263 		plt_err("Failed to start txq %d, rc=%d", qid, rc);
264 		goto fail;
265 	}
266 
267 	/* Start rep_xport device only once after first representor gets active */
268 	if (!rep_dev->parent_dev->repr_cnt.nb_repr_started) {
269 		rc = cnxk_eswitch_nix_rsrc_start(rep_dev->parent_dev);
270 		if (rc) {
271 			plt_err("Failed to start nix dev, rc %d", rc);
272 			goto fail;
273 		}
274 	}
275 
276 	ethdev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
277 	ethdev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
278 
279 	rep_dev->parent_dev->repr_cnt.nb_repr_started++;
280 
281 	return 0;
282 fail:
283 	return rc;
284 }
285 
286 int
cnxk_rep_dev_close(struct rte_eth_dev * ethdev)287 cnxk_rep_dev_close(struct rte_eth_dev *ethdev)
288 {
289 	return cnxk_rep_dev_uninit(ethdev);
290 }
291 
292 int
cnxk_rep_dev_stop(struct rte_eth_dev * ethdev)293 cnxk_rep_dev_stop(struct rte_eth_dev *ethdev)
294 {
295 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
296 
297 	ethdev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;
298 	ethdev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;
299 	cnxk_rep_rx_queue_stop(ethdev, 0);
300 	cnxk_rep_tx_queue_stop(ethdev, 0);
301 	rep_dev->parent_dev->repr_cnt.nb_repr_started--;
302 
303 	return 0;
304 }
305 
306 int
cnxk_rep_rx_queue_setup(struct rte_eth_dev * ethdev,uint16_t rx_queue_id,uint16_t nb_rx_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mb_pool)307 cnxk_rep_rx_queue_setup(struct rte_eth_dev *ethdev, uint16_t rx_queue_id, uint16_t nb_rx_desc,
308 			unsigned int socket_id, const struct rte_eth_rxconf *rx_conf,
309 			struct rte_mempool *mb_pool)
310 {
311 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
312 	struct cnxk_rep_rxq *rxq = NULL;
313 	uint16_t qid = 0;
314 	int rc;
315 
316 	PLT_SET_USED(socket_id);
317 	/* If no representee assigned, store the respective rxq parameters */
318 	if (!rep_dev->is_vf_active && !rep_dev->rxq) {
319 		rxq = plt_zmalloc(sizeof(*rxq), RTE_CACHE_LINE_SIZE);
320 		if (!rxq) {
321 			rc = -ENOMEM;
322 			plt_err("Failed to alloc RxQ for rep id %d", rep_dev->rep_id);
323 			goto fail;
324 		}
325 
326 		rxq->qid = qid;
327 		rxq->nb_desc = nb_rx_desc;
328 		rxq->rep_dev = rep_dev;
329 		rxq->mpool = mb_pool;
330 		rxq->rx_conf = rx_conf;
331 		rep_dev->rxq = rxq;
332 		ethdev->data->rx_queues[rx_queue_id] = NULL;
333 
334 		return 0;
335 	}
336 
337 	qid = rep_dev->rep_id;
338 	rc = cnxk_eswitch_rxq_setup(rep_dev->parent_dev, qid, nb_rx_desc, rx_conf, mb_pool);
339 	if (rc) {
340 		plt_err("failed to setup eswitch queue id %d", qid);
341 		goto fail;
342 	}
343 
344 	rxq = rep_dev->rxq;
345 	if (!rxq) {
346 		plt_err("Invalid RXQ handle for representor port %d rep id %d", rep_dev->port_id,
347 			rep_dev->rep_id);
348 		goto free_queue;
349 	}
350 
351 	rxq->qid = qid;
352 	ethdev->data->rx_queues[rx_queue_id] = rxq;
353 	ethdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
354 	plt_rep_dbg("representor id %d portid %d rxq id %d", rep_dev->port_id,
355 		    ethdev->data->port_id, rxq->qid);
356 
357 	return 0;
358 free_queue:
359 	cnxk_eswitch_rxq_release(rep_dev->parent_dev, qid);
360 fail:
361 	return rc;
362 }
363 
364 void
cnxk_rep_rx_queue_stop(struct rte_eth_dev * ethdev,uint16_t queue_id)365 cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id)
366 {
367 	struct cnxk_rep_rxq *rxq = ethdev->data->rx_queues[queue_id];
368 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
369 	int rc;
370 
371 	if (!rxq)
372 		return;
373 
374 	plt_rep_dbg("Stopping rxq %u", rxq->qid);
375 
376 	rc = cnxk_eswitch_rxq_stop(rep_dev->parent_dev, rxq->qid);
377 	if (rc)
378 		plt_err("Failed to stop rxq %d, rc=%d", rc, rxq->qid);
379 
380 	ethdev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
381 }
382 
383 void
cnxk_rep_rx_queue_release(struct rte_eth_dev * ethdev,uint16_t queue_id)384 cnxk_rep_rx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
385 {
386 	struct cnxk_rep_rxq *rxq = ethdev->data->rx_queues[queue_id];
387 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
388 	int rc;
389 
390 	if (!rxq) {
391 		plt_err("Invalid rxq retrieved for rep_id %d", rep_dev->rep_id);
392 		return;
393 	}
394 
395 	plt_rep_dbg("Releasing rxq %u", rxq->qid);
396 
397 	rc = cnxk_eswitch_rxq_release(rep_dev->parent_dev, rxq->qid);
398 	if (rc)
399 		plt_err("Failed to release rxq %d, rc=%d", rc, rxq->qid);
400 }
401 
402 int
cnxk_rep_tx_queue_setup(struct rte_eth_dev * ethdev,uint16_t tx_queue_id,uint16_t nb_tx_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)403 cnxk_rep_tx_queue_setup(struct rte_eth_dev *ethdev, uint16_t tx_queue_id, uint16_t nb_tx_desc,
404 			unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
405 {
406 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
407 	struct cnxk_rep_txq *txq = NULL;
408 	int rc = 0, qid = 0;
409 
410 	PLT_SET_USED(socket_id);
411 	/* If no representee assigned, store the respective rxq parameters */
412 	if (!rep_dev->is_vf_active && !rep_dev->txq) {
413 		txq = plt_zmalloc(sizeof(*txq), RTE_CACHE_LINE_SIZE);
414 		if (!txq) {
415 			rc = -ENOMEM;
416 			plt_err("failed to alloc txq for rep id %d", rep_dev->rep_id);
417 			goto free_queue;
418 		}
419 
420 		txq->qid = qid;
421 		txq->nb_desc = nb_tx_desc;
422 		txq->tx_conf = tx_conf;
423 		txq->rep_dev = rep_dev;
424 		rep_dev->txq = txq;
425 
426 		ethdev->data->tx_queues[tx_queue_id] = NULL;
427 
428 		return 0;
429 	}
430 
431 	qid = rep_dev->rep_id;
432 	rc = cnxk_eswitch_txq_setup(rep_dev->parent_dev, qid, nb_tx_desc, tx_conf);
433 	if (rc) {
434 		plt_err("failed to setup eswitch queue id %d", qid);
435 		goto fail;
436 	}
437 
438 	txq = rep_dev->txq;
439 	if (!txq) {
440 		plt_err("Invalid TXQ handle for representor port %d rep id %d", rep_dev->port_id,
441 			rep_dev->rep_id);
442 		goto free_queue;
443 	}
444 
445 	txq->qid = qid;
446 	ethdev->data->tx_queues[tx_queue_id] = txq;
447 	ethdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
448 	plt_rep_dbg("representor id %d portid %d txq id %d", rep_dev->port_id,
449 		    ethdev->data->port_id, txq->qid);
450 
451 	return 0;
452 free_queue:
453 	cnxk_eswitch_txq_release(rep_dev->parent_dev, qid);
454 fail:
455 	return rc;
456 }
457 
458 void
cnxk_rep_tx_queue_stop(struct rte_eth_dev * ethdev,uint16_t queue_id)459 cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id)
460 {
461 	struct cnxk_rep_txq *txq = ethdev->data->tx_queues[queue_id];
462 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
463 	int rc;
464 
465 	if (!txq)
466 		return;
467 
468 	plt_rep_dbg("Releasing txq %u", txq->qid);
469 
470 	rc = cnxk_eswitch_txq_stop(rep_dev->parent_dev, txq->qid);
471 	if (rc)
472 		plt_err("Failed to stop txq %d, rc=%d", rc, txq->qid);
473 
474 	ethdev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
475 }
476 
477 void
cnxk_rep_tx_queue_release(struct rte_eth_dev * ethdev,uint16_t queue_id)478 cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
479 {
480 	struct cnxk_rep_txq *txq = ethdev->data->tx_queues[queue_id];
481 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
482 	int rc;
483 
484 	if (!txq) {
485 		plt_err("Invalid txq retrieved for rep_id %d", rep_dev->rep_id);
486 		return;
487 	}
488 
489 	plt_rep_dbg("Releasing txq %u", txq->qid);
490 
491 	rc = cnxk_eswitch_txq_release(rep_dev->parent_dev, txq->qid);
492 	if (rc)
493 		plt_err("Failed to release txq %d, rc=%d", rc, txq->qid);
494 }
495 
496 static int
process_eth_stats(struct cnxk_rep_dev * rep_dev,cnxk_rep_msg_ack_data_t * adata,cnxk_rep_msg_t msg)497 process_eth_stats(struct cnxk_rep_dev *rep_dev, cnxk_rep_msg_ack_data_t *adata, cnxk_rep_msg_t msg)
498 {
499 	cnxk_rep_msg_eth_stats_meta_t msg_st_meta;
500 	uint32_t len = 0, rc;
501 	void *buffer;
502 	size_t size;
503 
504 	size = CNXK_REP_MSG_MAX_BUFFER_SZ;
505 	buffer = plt_zmalloc(size, 0);
506 	if (!buffer) {
507 		plt_err("Failed to allocate mem");
508 		rc = -ENOMEM;
509 		goto fail;
510 	}
511 
512 	cnxk_rep_msg_populate_header(buffer, &len);
513 
514 	msg_st_meta.portid = rep_dev->rep_id;
515 	cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_st_meta,
516 					   sizeof(cnxk_rep_msg_eth_stats_meta_t), msg);
517 	cnxk_rep_msg_populate_msg_end(buffer, &len);
518 
519 	rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
520 	if (rc) {
521 		plt_err("Failed to process the message, err %d", rc);
522 		goto fail;
523 	}
524 
525 	rte_free(buffer);
526 
527 	return 0;
528 fail:
529 	rte_free(buffer);
530 	return rc;
531 }
532 
533 static int
native_repte_eth_stats(struct cnxk_rep_dev * rep_dev,struct rte_eth_stats * stats)534 native_repte_eth_stats(struct cnxk_rep_dev *rep_dev, struct rte_eth_stats *stats)
535 {
536 	struct roc_nix_stats nix_stats;
537 	int rc = 0;
538 
539 	rc = roc_eswitch_nix_repte_stats(&rep_dev->parent_dev->nix, rep_dev->hw_func, &nix_stats);
540 	if (rc) {
541 		plt_err("Failed to get stats for representee %x, err %d", rep_dev->hw_func, rc);
542 		goto fail;
543 	}
544 
545 	memset(stats, 0, sizeof(struct rte_eth_stats));
546 	stats->opackets = nix_stats.tx_ucast;
547 	stats->opackets += nix_stats.tx_mcast;
548 	stats->opackets += nix_stats.tx_bcast;
549 	stats->oerrors = nix_stats.tx_drop;
550 	stats->obytes = nix_stats.tx_octs;
551 
552 	stats->ipackets = nix_stats.rx_ucast;
553 	stats->ipackets += nix_stats.rx_mcast;
554 	stats->ipackets += nix_stats.rx_bcast;
555 	stats->imissed = nix_stats.rx_drop;
556 	stats->ibytes = nix_stats.rx_octs;
557 	stats->ierrors = nix_stats.rx_err;
558 
559 	return 0;
560 fail:
561 	return rc;
562 }
563 
564 int
cnxk_rep_stats_get(struct rte_eth_dev * ethdev,struct rte_eth_stats * stats)565 cnxk_rep_stats_get(struct rte_eth_dev *ethdev, struct rte_eth_stats *stats)
566 {
567 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
568 	struct rte_eth_stats vf_stats;
569 	cnxk_rep_msg_ack_data_t adata;
570 	int rc;
571 
572 	/* If representor not representing any active VF, return 0 */
573 	if (!rep_dev->is_vf_active)
574 		return 0;
575 
576 	if (rep_dev->native_repte) {
577 		/* For representees which are independent */
578 		rc = native_repte_eth_stats(rep_dev, &vf_stats);
579 		if (rc) {
580 			plt_err("Failed to get stats for vf rep %x (hw_func %x), err %d",
581 				rep_dev->port_id, rep_dev->hw_func, rc);
582 			goto fail;
583 		}
584 	} else {
585 		/* For representees which are part of companian app */
586 		rc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_GET);
587 		if (rc || adata.u.sval < 0) {
588 			if (adata.u.sval < 0)
589 				rc = adata.u.sval;
590 
591 			plt_err("Failed to get stats for vf rep %x, err %d", rep_dev->port_id, rc);
592 		}
593 
594 		if (adata.size != sizeof(struct rte_eth_stats)) {
595 			rc = -EINVAL;
596 			plt_err("Incomplete stats received for vf rep %d", rep_dev->port_id);
597 			goto fail;
598 		}
599 
600 		rte_memcpy(&vf_stats, adata.u.data, adata.size);
601 	}
602 
603 	stats->q_ipackets[0] = vf_stats.ipackets;
604 	stats->q_ibytes[0] = vf_stats.ibytes;
605 	stats->ipackets = vf_stats.ipackets;
606 	stats->ibytes = vf_stats.ibytes;
607 
608 	stats->q_opackets[0] = vf_stats.opackets;
609 	stats->q_obytes[0] = vf_stats.obytes;
610 	stats->opackets = vf_stats.opackets;
611 	stats->obytes = vf_stats.obytes;
612 
613 	plt_rep_dbg("Input packets %" PRId64 " Output packets %" PRId64 "", stats->ipackets,
614 		    stats->opackets);
615 
616 	return 0;
617 fail:
618 	return rc;
619 }
620 
621 int
cnxk_rep_stats_reset(struct rte_eth_dev * ethdev)622 cnxk_rep_stats_reset(struct rte_eth_dev *ethdev)
623 {
624 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
625 	cnxk_rep_msg_ack_data_t adata;
626 	int rc = 0;
627 
628 	/* If representor not representing any active VF, return 0 */
629 	if (!rep_dev->is_vf_active)
630 		return 0;
631 
632 	if (rep_dev->native_repte)
633 		return -ENOTSUP;
634 
635 	rc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_CLEAR);
636 	if (rc || adata.u.sval < 0) {
637 		if (adata.u.sval < 0)
638 			rc = adata.u.sval;
639 
640 		plt_err("Failed to clear stats for vf rep %x, err %d", rep_dev->port_id, rc);
641 	}
642 
643 	return rc;
644 }
645 
646 int
cnxk_rep_flow_ops_get(struct rte_eth_dev * ethdev,const struct rte_flow_ops ** ops)647 cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops)
648 {
649 	PLT_SET_USED(ethdev);
650 	*ops = &cnxk_rep_flow_ops;
651 
652 	return 0;
653 }
654 
655 int
cnxk_rep_mac_addr_set(struct rte_eth_dev * eth_dev,struct rte_ether_addr * addr)656 cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
657 {
658 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
659 	cnxk_rep_msg_eth_set_mac_meta_t msg_sm_meta;
660 	cnxk_rep_msg_ack_data_t adata;
661 	uint32_t len = 0, rc;
662 	void *buffer;
663 	size_t size;
664 
665 	/* If representor not representing any VF, return 0 */
666 	if (!rep_dev->is_vf_active)
667 		return 0;
668 
669 	size = CNXK_REP_MSG_MAX_BUFFER_SZ;
670 	buffer = plt_zmalloc(size, 0);
671 	if (!buffer) {
672 		plt_err("Failed to allocate mem");
673 		rc = -ENOMEM;
674 		goto fail;
675 	}
676 
677 	cnxk_rep_msg_populate_header(buffer, &len);
678 
679 	msg_sm_meta.portid = rep_dev->rep_id;
680 	rte_memcpy(&msg_sm_meta.addr_bytes, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
681 	cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_sm_meta,
682 					   sizeof(cnxk_rep_msg_eth_set_mac_meta_t),
683 					   CNXK_REP_MSG_ETH_SET_MAC);
684 	cnxk_rep_msg_populate_msg_end(buffer, &len);
685 
686 	rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, &adata);
687 	if (rc) {
688 		plt_err("Failed to process the message, err %d", rc);
689 		goto fail;
690 	}
691 
692 	if (adata.u.sval < 0) {
693 		rc = adata.u.sval;
694 		plt_err("Failed to set mac address, err %d", rc);
695 		goto fail;
696 	}
697 
698 	rte_free(buffer);
699 
700 	return 0;
701 fail:
702 	rte_free(buffer);
703 	return rc;
704 }
705 
706 int
cnxk_rep_xstats_get(struct rte_eth_dev * eth_dev,struct rte_eth_xstat * stats,unsigned int n)707 cnxk_rep_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *stats, unsigned int n)
708 {
709 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
710 	unsigned int num = RTE_DIM(cnxk_rep_xstats_string);
711 	int cnt = 0;
712 
713 	if (!rep_dev)
714 		return -EINVAL;
715 
716 	if (n < num)
717 		return num;
718 
719 	stats[cnt].id = cnt;
720 	stats[cnt].value = rep_dev->rxq->stats.pkts;
721 	cnt++;
722 	stats[cnt].id = cnt;
723 	stats[cnt].value = rep_dev->txq->stats.pkts;
724 	cnt++;
725 
726 	return cnt;
727 }
728 
729 int
cnxk_rep_xstats_reset(struct rte_eth_dev * eth_dev)730 cnxk_rep_xstats_reset(struct rte_eth_dev *eth_dev)
731 {
732 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
733 	int rc;
734 
735 	if (!rep_dev)
736 		return -EINVAL;
737 
738 	rc = cnxk_rep_stats_reset(eth_dev);
739 	if (rc < 0 && rc != -ENOTSUP)
740 		return rc;
741 
742 	rep_dev->rxq->stats.pkts = 0;
743 	rep_dev->txq->stats.pkts = 0;
744 
745 	return 0;
746 }
747 
748 int
cnxk_rep_xstats_get_names(__rte_unused struct rte_eth_dev * eth_dev,struct rte_eth_xstat_name * xstats_names,unsigned int n)749 cnxk_rep_xstats_get_names(__rte_unused struct rte_eth_dev *eth_dev,
750 			  struct rte_eth_xstat_name *xstats_names, unsigned int n)
751 {
752 	unsigned int num = RTE_DIM(cnxk_rep_xstats_string);
753 	unsigned int i;
754 
755 	if (xstats_names == NULL)
756 		return num;
757 
758 	if (n < num)
759 		return num;
760 
761 	for (i = 0; i < num; i++)
762 		rte_strscpy(xstats_names[i].name, cnxk_rep_xstats_string[i].name,
763 			    sizeof(xstats_names[i].name));
764 
765 	return num;
766 }
767 
768 int
cnxk_rep_xstats_get_by_id(struct rte_eth_dev * eth_dev,const uint64_t * ids,uint64_t * values,unsigned int n)769 cnxk_rep_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids, uint64_t *values,
770 			  unsigned int n)
771 {
772 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
773 	unsigned int num = RTE_DIM(cnxk_rep_xstats_string);
774 	unsigned int i;
775 
776 	if (!rep_dev)
777 		return -EINVAL;
778 
779 	if (n < num)
780 		return num;
781 
782 	if (n > num)
783 		return -EINVAL;
784 
785 	for (i = 0; i < n; i++) {
786 		switch (ids[i]) {
787 		case 0:
788 			values[i] = rep_dev->rxq->stats.pkts;
789 			break;
790 		case 1:
791 			values[i] = rep_dev->txq->stats.pkts;
792 			break;
793 		default:
794 			return -EINVAL;
795 		}
796 	}
797 
798 	return n;
799 }
800 
801 int
cnxk_rep_xstats_get_names_by_id(__rte_unused struct rte_eth_dev * eth_dev,const uint64_t * ids,struct rte_eth_xstat_name * xstats_names,unsigned int n)802 cnxk_rep_xstats_get_names_by_id(__rte_unused struct rte_eth_dev *eth_dev, const uint64_t *ids,
803 				struct rte_eth_xstat_name *xstats_names, unsigned int n)
804 {
805 	unsigned int num = RTE_DIM(cnxk_rep_xstats_string);
806 	unsigned int i;
807 
808 	if (n < num)
809 		return num;
810 
811 	if (n > num)
812 		return -EINVAL;
813 
814 	for (i = 0; i < n; i++) {
815 		if (ids[i] >= num)
816 			return -EINVAL;
817 		rte_strscpy(xstats_names[i].name, cnxk_rep_xstats_string[ids[i]].name,
818 			    sizeof(xstats_names[i].name));
819 	}
820 
821 	return n;
822 }
823 
824 int
cnxk_rep_mtu_set(struct rte_eth_dev * eth_dev,uint16_t mtu)825 cnxk_rep_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
826 {
827 	struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
828 	uint32_t frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
829 	int rc = -EINVAL;
830 
831 	/* Check if MTU is within the allowed range */
832 	if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
833 		plt_err("MTU is lesser than minimum");
834 		goto exit;
835 	}
836 
837 	if ((frame_size - RTE_ETHER_CRC_LEN) >
838 	    ((uint32_t)roc_nix_max_pkt_len(&rep_dev->parent_dev->nix))) {
839 		plt_err("MTU is greater than maximum");
840 		goto exit;
841 	}
842 
843 	frame_size -= RTE_ETHER_CRC_LEN;
844 
845 	/* Set frame size on Rx */
846 	rc = roc_nix_mac_max_rx_len_set(&rep_dev->parent_dev->nix, frame_size);
847 	if (rc) {
848 		plt_err("Failed to max Rx frame length, rc=%d", rc);
849 		goto exit;
850 	}
851 exit:
852 	return rc;
853 }
854 
855 /* CNXK platform representor dev ops */
856 struct eth_dev_ops cnxk_rep_dev_ops = {
857 	.dev_infos_get = cnxk_rep_dev_info_get,
858 	.representor_info_get = cnxk_rep_representor_info_get,
859 	.dev_configure = cnxk_rep_dev_configure,
860 	.dev_start = cnxk_rep_dev_start,
861 	.rx_queue_setup = cnxk_rep_rx_queue_setup,
862 	.rx_queue_release = cnxk_rep_rx_queue_release,
863 	.tx_queue_setup = cnxk_rep_tx_queue_setup,
864 	.tx_queue_release = cnxk_rep_tx_queue_release,
865 	.promiscuous_enable   = cnxk_rep_promiscuous_enable,
866 	.promiscuous_disable   = cnxk_rep_promiscuous_disable,
867 	.mac_addr_set = cnxk_rep_mac_addr_set,
868 	.link_update = cnxk_rep_link_update,
869 	.dev_close = cnxk_rep_dev_close,
870 	.dev_stop = cnxk_rep_dev_stop,
871 	.stats_get = cnxk_rep_stats_get,
872 	.stats_reset = cnxk_rep_stats_reset,
873 	.flow_ops_get = cnxk_rep_flow_ops_get,
874 	.xstats_get = cnxk_rep_xstats_get,
875 	.xstats_reset = cnxk_rep_xstats_reset,
876 	.xstats_get_names = cnxk_rep_xstats_get_names,
877 	.xstats_get_by_id = cnxk_rep_xstats_get_by_id,
878 	.xstats_get_names_by_id = cnxk_rep_xstats_get_names_by_id,
879 	.mtu_set = cnxk_rep_mtu_set
880 };
881