xref: /dpdk/drivers/net/octeon_ep/otx_ep_ethdev.c (revision da7e701151ea8b742d4c38ace3e4fefd1b4507fc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <inttypes.h>
6 #include <ethdev_pci.h>
7 
8 #include "otx_ep_common.h"
9 #include "otx_ep_vf.h"
10 #include "otx2_ep_vf.h"
11 #include "cnxk_ep_vf.h"
12 #include "otx_ep_rxtx.h"
13 #include "otx_ep_mbox.h"
14 
15 #define OTX_EP_DEV(_eth_dev) \
16 	((struct otx_ep_device *)(_eth_dev)->data->dev_private)
17 
18 static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
19 	.nb_max		= OTX_EP_MAX_OQ_DESCRIPTORS,
20 	.nb_min		= OTX_EP_MIN_OQ_DESCRIPTORS,
21 	.nb_align	= OTX_EP_RXD_ALIGN,
22 };
23 
24 static const struct rte_eth_desc_lim otx_ep_tx_desc_lim = {
25 	.nb_max		= OTX_EP_MAX_IQ_DESCRIPTORS,
26 	.nb_min		= OTX_EP_MIN_IQ_DESCRIPTORS,
27 	.nb_align	= OTX_EP_TXD_ALIGN,
28 };
29 
30 static void
31 otx_ep_set_tx_func(struct rte_eth_dev *eth_dev)
32 {
33 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
34 
35 	if (otx_epvf->chip_gen == OTX_EP_CN10XX || otx_epvf->chip_gen == OTX_EP_CN9XX) {
36 		eth_dev->tx_pkt_burst = &cnxk_ep_xmit_pkts;
37 		if (otx_epvf->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
38 			eth_dev->tx_pkt_burst = &cnxk_ep_xmit_pkts_mseg;
39 	} else {
40 		eth_dev->tx_pkt_burst = &otx_ep_xmit_pkts;
41 	}
42 
43 	if (eth_dev->data->dev_started)
44 		rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst =
45 			eth_dev->tx_pkt_burst;
46 }
47 
48 static void
49 otx_ep_set_rx_func(struct rte_eth_dev *eth_dev)
50 {
51 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
52 
53 	if (otx_epvf->chip_gen == OTX_EP_CN10XX) {
54 		eth_dev->rx_pkt_burst = &cnxk_ep_recv_pkts;
55 		if (otx_epvf->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
56 			eth_dev->rx_pkt_burst = &cnxk_ep_recv_pkts_mseg;
57 	} else if (otx_epvf->chip_gen == OTX_EP_CN9XX) {
58 		eth_dev->rx_pkt_burst = &cn9k_ep_recv_pkts;
59 		if (otx_epvf->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
60 			eth_dev->rx_pkt_burst = &cn9k_ep_recv_pkts_mseg;
61 	} else {
62 		eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
63 	}
64 
65 	if (eth_dev->data->dev_started)
66 		rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst =
67 			eth_dev->rx_pkt_burst;
68 }
69 
70 static int
71 otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
72 		    struct rte_eth_dev_info *devinfo)
73 {
74 	struct otx_ep_device *otx_epvf;
75 	int max_rx_pktlen;
76 
77 	otx_epvf = OTX_EP_DEV(eth_dev);
78 
79 	max_rx_pktlen = otx_ep_mbox_get_max_pkt_len(eth_dev);
80 	if (!max_rx_pktlen) {
81 		otx_ep_err("Failed to get Max Rx packet length");
82 		return -EINVAL;
83 	}
84 
85 	devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
86 	devinfo->max_rx_queues = otx_epvf->max_rx_queues;
87 	devinfo->max_tx_queues = otx_epvf->max_tx_queues;
88 
89 	devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
90 	devinfo->max_rx_pktlen = max_rx_pktlen;
91 	devinfo->max_mtu = devinfo->max_rx_pktlen - OTX_EP_ETH_OVERHEAD;
92 	devinfo->min_mtu = RTE_ETHER_MIN_LEN;
93 	devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
94 	devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
95 
96 	devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
97 
98 	devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
99 	devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
100 
101 	devinfo->default_rxportconf.ring_size = OTX_EP_MIN_OQ_DESCRIPTORS;
102 	devinfo->default_txportconf.ring_size = OTX_EP_MIN_IQ_DESCRIPTORS;
103 
104 	return 0;
105 }
106 
107 static int
108 otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
109 {
110 	RTE_SET_USED(wait_to_complete);
111 
112 	if (!eth_dev->data->dev_started)
113 		return 0;
114 	struct rte_eth_link link;
115 	int ret = 0;
116 
117 	memset(&link, 0, sizeof(link));
118 	ret = otx_ep_mbox_get_link_info(eth_dev, &link);
119 	if (ret)
120 		return -EINVAL;
121 	otx_ep_dbg("link status resp link %d duplex %d autoneg %d link_speed %d\n",
122 		    link.link_status, link.link_duplex, link.link_autoneg, link.link_speed);
123 	return rte_eth_linkstatus_set(eth_dev, &link);
124 }
125 
126 static int
127 otx_ep_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
128 {
129 	struct rte_eth_dev_info devinfo;
130 	int32_t ret = 0;
131 
132 	if (otx_ep_dev_info_get(eth_dev, &devinfo)) {
133 		otx_ep_err("Cannot set MTU to %u: failed to get device info", mtu);
134 		return -EPERM;
135 	}
136 
137 	/* Check if MTU is within the allowed range */
138 	if (mtu < devinfo.min_mtu) {
139 		otx_ep_err("Invalid MTU %u: lower than minimum MTU %u", mtu, devinfo.min_mtu);
140 		return -EINVAL;
141 	}
142 
143 	if (mtu > devinfo.max_mtu) {
144 		otx_ep_err("Invalid MTU %u; higher than maximum MTU %u", mtu, devinfo.max_mtu);
145 		return -EINVAL;
146 	}
147 
148 	ret = otx_ep_mbox_set_mtu(eth_dev, mtu);
149 	if (ret)
150 		return -EINVAL;
151 
152 	otx_ep_dbg("MTU is set to %u", mtu);
153 
154 	return 0;
155 }
156 
157 static int
158 otx_ep_dev_set_default_mac_addr(struct rte_eth_dev *eth_dev,
159 				struct rte_ether_addr *mac_addr)
160 {
161 	int ret;
162 
163 	ret = otx_ep_mbox_set_mac_addr(eth_dev, mac_addr);
164 	if (ret)
165 		return -EINVAL;
166 	otx_ep_dbg("Default MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
167 		    RTE_ETHER_ADDR_BYTES(mac_addr));
168 	rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs);
169 	return 0;
170 }
171 
172 static int
173 otx_ep_dev_start(struct rte_eth_dev *eth_dev)
174 {
175 	struct otx_ep_device *otx_epvf;
176 	unsigned int q;
177 	int ret;
178 
179 	otx_epvf = (struct otx_ep_device *)OTX_EP_DEV(eth_dev);
180 	/* Enable IQ/OQ for this device */
181 	ret = otx_epvf->fn_list.enable_io_queues(otx_epvf);
182 	if (ret) {
183 		otx_ep_err("IOQ enable failed\n");
184 		return ret;
185 	}
186 
187 	for (q = 0; q < otx_epvf->nb_rx_queues; q++) {
188 		rte_write32(otx_epvf->droq[q]->nb_desc,
189 			    otx_epvf->droq[q]->pkts_credit_reg);
190 
191 		rte_wmb();
192 		otx_ep_info("OQ[%d] dbells [%d]\n", q,
193 		rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
194 	}
195 
196 	otx_ep_dev_link_update(eth_dev, 0);
197 
198 	otx_ep_set_tx_func(eth_dev);
199 	otx_ep_set_rx_func(eth_dev);
200 
201 	otx_ep_info("dev started\n");
202 
203 	for (q = 0; q < eth_dev->data->nb_rx_queues; q++)
204 		eth_dev->data->rx_queue_state[q] = RTE_ETH_QUEUE_STATE_STARTED;
205 	for (q = 0; q < eth_dev->data->nb_tx_queues; q++)
206 		eth_dev->data->tx_queue_state[q] = RTE_ETH_QUEUE_STATE_STARTED;
207 
208 	return 0;
209 }
210 
211 /* Stop device and disable input/output functions */
212 static int
213 otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
214 {
215 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
216 	uint16_t i;
217 
218 	otx_epvf->fn_list.disable_io_queues(otx_epvf);
219 
220 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
221 		eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
222 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
223 		eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
224 
225 	return 0;
226 }
227 
228 /*
229  * We only need 2 uint32_t locations per IOQ, but separate these so
230  * each IOQ has the variables on its own cache line.
231  */
232 #define OTX_EP_ISM_BUFFER_SIZE (OTX_EP_MAX_IOQS_PER_VF * RTE_CACHE_LINE_SIZE)
233 static int
234 otx_ep_ism_setup(struct otx_ep_device *otx_epvf)
235 {
236 	otx_epvf->ism_buffer_mz =
237 		rte_eth_dma_zone_reserve(otx_epvf->eth_dev, "ism",
238 					 0, OTX_EP_ISM_BUFFER_SIZE,
239 					 OTX_EP_PCI_RING_ALIGN, 0);
240 
241 	/* Same DMA buffer is shared by OQ and IQ, clear it at start */
242 	memset(otx_epvf->ism_buffer_mz->addr, 0, OTX_EP_ISM_BUFFER_SIZE);
243 	if (otx_epvf->ism_buffer_mz == NULL) {
244 		otx_ep_err("Failed to allocate ISM buffer\n");
245 		return(-1);
246 	}
247 	otx_ep_dbg("ISM: virt: 0x%p, dma: 0x%" PRIX64,
248 		    (void *)otx_epvf->ism_buffer_mz->addr,
249 		    otx_epvf->ism_buffer_mz->iova);
250 
251 	return 0;
252 }
253 
254 static int
255 otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
256 {
257 	struct rte_pci_device *pdev = otx_epvf->pdev;
258 	uint32_t dev_id = pdev->id.device_id;
259 	int ret = 0;
260 
261 	switch (dev_id) {
262 	case PCI_DEVID_OCTEONTX_EP_VF:
263 		otx_epvf->chip_id = dev_id;
264 		ret = otx_ep_vf_setup_device(otx_epvf);
265 		otx_epvf->fn_list.disable_io_queues(otx_epvf);
266 		break;
267 	case PCI_DEVID_CN9K_EP_NET_VF:
268 	case PCI_DEVID_CN98XX_EP_NET_VF:
269 	case PCI_DEVID_CNF95N_EP_NET_VF:
270 	case PCI_DEVID_CNF95O_EP_NET_VF:
271 		otx_epvf->chip_id = dev_id;
272 		ret = otx2_ep_vf_setup_device(otx_epvf);
273 		otx_epvf->fn_list.disable_io_queues(otx_epvf);
274 		if (otx_ep_ism_setup(otx_epvf))
275 			ret = -EINVAL;
276 		break;
277 	case PCI_DEVID_CN10KA_EP_NET_VF:
278 	case PCI_DEVID_CN10KB_EP_NET_VF:
279 	case PCI_DEVID_CNF10KA_EP_NET_VF:
280 	case PCI_DEVID_CNF10KB_EP_NET_VF:
281 		otx_epvf->chip_id = dev_id;
282 		ret = cnxk_ep_vf_setup_device(otx_epvf);
283 		otx_epvf->fn_list.disable_io_queues(otx_epvf);
284 		if (otx_ep_ism_setup(otx_epvf))
285 			ret = -EINVAL;
286 		break;
287 	default:
288 		otx_ep_err("Unsupported device\n");
289 		ret = -EINVAL;
290 	}
291 
292 	if (!ret)
293 		otx_ep_info("OTX_EP dev_id[%d]\n", dev_id);
294 
295 	return ret;
296 }
297 
298 /* OTX_EP VF device initialization */
299 static int
300 otx_epdev_init(struct otx_ep_device *otx_epvf)
301 {
302 	uint32_t ethdev_queues;
303 	int ret = 0;
304 
305 	ret = otx_ep_chip_specific_setup(otx_epvf);
306 	if (ret) {
307 		otx_ep_err("Chip specific setup failed\n");
308 		goto setup_fail;
309 	}
310 
311 	otx_epvf->fn_list.setup_device_regs(otx_epvf);
312 
313 	otx_epvf->eth_dev->tx_pkt_burst = &cnxk_ep_xmit_pkts;
314 	otx_epvf->eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
315 	if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF) {
316 		otx_epvf->eth_dev->tx_pkt_burst = &otx_ep_xmit_pkts;
317 		otx_epvf->chip_gen = OTX_EP_CN8XX;
318 	} else if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
319 		 otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
320 		 otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
321 		 otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF) {
322 		otx_epvf->eth_dev->rx_pkt_burst = &cn9k_ep_recv_pkts;
323 		otx_epvf->chip_gen = OTX_EP_CN9XX;
324 	} else if (otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
325 		   otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
326 		   otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
327 		   otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF) {
328 		otx_epvf->eth_dev->rx_pkt_burst = &cnxk_ep_recv_pkts;
329 		otx_epvf->chip_gen = OTX_EP_CN10XX;
330 	} else {
331 		otx_ep_err("Invalid chip_id\n");
332 		ret = -EINVAL;
333 		goto setup_fail;
334 	}
335 	ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
336 	otx_epvf->max_rx_queues = ethdev_queues;
337 	otx_epvf->max_tx_queues = ethdev_queues;
338 
339 	otx_ep_info("OTX_EP Device is Ready\n");
340 
341 setup_fail:
342 	return ret;
343 }
344 
345 static int
346 otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
347 {
348 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
349 	struct rte_eth_dev_data *data = eth_dev->data;
350 	struct rte_eth_rxmode *rxmode;
351 	struct rte_eth_txmode *txmode;
352 	struct rte_eth_conf *conf;
353 
354 	conf = &data->dev_conf;
355 	rxmode = &conf->rxmode;
356 	txmode = &conf->txmode;
357 	if (eth_dev->data->nb_rx_queues > otx_epvf->max_rx_queues ||
358 	    eth_dev->data->nb_tx_queues > otx_epvf->max_tx_queues) {
359 		otx_ep_err("invalid num queues\n");
360 		return -EINVAL;
361 	}
362 	otx_ep_info("OTX_EP Device is configured with num_txq %d num_rxq %d\n",
363 		    eth_dev->data->nb_rx_queues, eth_dev->data->nb_tx_queues);
364 
365 	otx_epvf->rx_offloads = rxmode->offloads;
366 	otx_epvf->tx_offloads = txmode->offloads;
367 
368 	return 0;
369 }
370 
371 /**
372  * Setup our receive queue/ringbuffer. This is the
373  * queue the Octeon uses to send us packets and
374  * responses. We are given a memory pool for our
375  * packet buffers that are used to populate the receive
376  * queue.
377  *
378  * @param eth_dev
379  *    Pointer to the structure rte_eth_dev
380  * @param q_no
381  *    Queue number
382  * @param num_rx_descs
383  *    Number of entries in the queue
384  * @param socket_id
385  *    Where to allocate memory
386  * @param rx_conf
387  *    Pointer to the struction rte_eth_rxconf
388  * @param mp
389  *    Pointer to the packet pool
390  *
391  * @return
392  *    - On success, return 0
393  *    - On failure, return -1
394  */
395 static int
396 otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
397 		       uint16_t num_rx_descs, unsigned int socket_id,
398 		       const struct rte_eth_rxconf *rx_conf __rte_unused,
399 		       struct rte_mempool *mp)
400 {
401 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
402 	struct rte_pktmbuf_pool_private *mbp_priv;
403 	uint16_t buf_size;
404 
405 	if (q_no >= otx_epvf->max_rx_queues) {
406 		otx_ep_err("Invalid rx queue number %u\n", q_no);
407 		return -EINVAL;
408 	}
409 
410 	if (num_rx_descs & (num_rx_descs - 1)) {
411 		otx_ep_err("Invalid rx desc number should be pow 2  %u\n",
412 			   num_rx_descs);
413 		return -EINVAL;
414 	}
415 	if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
416 		otx_ep_err("Invalid rx desc number(%u) should at least be greater than 8xwmark  %u\n",
417 			   num_rx_descs, (SDP_GBL_WMARK * 8));
418 		return -EINVAL;
419 	}
420 
421 	otx_ep_dbg("setting up rx queue %u\n", q_no);
422 
423 	mbp_priv = rte_mempool_get_priv(mp);
424 	buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
425 
426 	if (otx_ep_setup_oqs(otx_epvf, q_no, num_rx_descs, buf_size, mp,
427 			     socket_id)) {
428 		otx_ep_err("droq allocation failed\n");
429 		return -1;
430 	}
431 
432 	eth_dev->data->rx_queues[q_no] = otx_epvf->droq[q_no];
433 
434 	return 0;
435 }
436 
437 /**
438  * Release the receive queue/ringbuffer. Called by
439  * the upper layers.
440  *
441  * @param dev
442  *   Pointer to Ethernet device structure.
443  * @param q_no
444  *   Receive queue index.
445  *
446  * @return
447  *    - nothing
448  */
449 static void
450 otx_ep_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
451 {
452 	struct otx_ep_droq *rq = dev->data->rx_queues[q_no];
453 	struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
454 	int q_id = rq->q_no;
455 
456 	if (otx_ep_delete_oqs(otx_epvf, q_id))
457 		otx_ep_err("Failed to delete OQ:%d\n", q_id);
458 }
459 
460 /**
461  * Allocate and initialize SW ring. Initialize associated HW registers.
462  *
463  * @param eth_dev
464  *   Pointer to structure rte_eth_dev
465  *
466  * @param q_no
467  *   Queue number
468  *
469  * @param num_tx_descs
470  *   Number of ringbuffer descriptors
471  *
472  * @param socket_id
473  *   NUMA socket id, used for memory allocations
474  *
475  * @param tx_conf
476  *   Pointer to the structure rte_eth_txconf
477  *
478  * @return
479  *   - On success, return 0
480  *   - On failure, return -errno value
481  */
482 static int
483 otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
484 		       uint16_t num_tx_descs, unsigned int socket_id,
485 		       const struct rte_eth_txconf *tx_conf __rte_unused)
486 {
487 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
488 	int retval;
489 
490 	if (q_no >= otx_epvf->max_tx_queues) {
491 		otx_ep_err("Invalid tx queue number %u\n", q_no);
492 		return -EINVAL;
493 	}
494 	if (num_tx_descs & (num_tx_descs - 1)) {
495 		otx_ep_err("Invalid tx desc number should be pow 2  %u\n",
496 			   num_tx_descs);
497 		return -EINVAL;
498 	}
499 	if (num_tx_descs < (SDP_GBL_WMARK * 8)) {
500 		otx_ep_err("Invalid tx desc number(%u) should at least be greater than 8*wmark(%u)\n",
501 			   num_tx_descs, (SDP_GBL_WMARK * 8));
502 		return -EINVAL;
503 	}
504 
505 	retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id);
506 
507 	if (retval) {
508 		otx_ep_err("IQ(TxQ) creation failed.\n");
509 		return retval;
510 	}
511 
512 	eth_dev->data->tx_queues[q_no] = otx_epvf->instr_queue[q_no];
513 	otx_ep_dbg("tx queue[%d] setup\n", q_no);
514 	return 0;
515 }
516 
517 /**
518  * Release the transmit queue/ringbuffer. Called by
519  * the upper layers.
520  *
521  * @param dev
522  *    Pointer to Ethernet device structure.
523  * @param q_no
524  *    Transmit queue index.
525  *
526  * @return
527  *    - nothing
528  */
529 static void
530 otx_ep_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
531 {
532 	struct otx_ep_instr_queue *tq = dev->data->tx_queues[q_no];
533 
534 	otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
535 }
536 
537 static int
538 otx_ep_dev_stats_reset(struct rte_eth_dev *dev)
539 {
540 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(dev);
541 	uint32_t i;
542 
543 	for (i = 0; i < otx_epvf->nb_tx_queues; i++)
544 		memset(&otx_epvf->instr_queue[i]->stats, 0,
545 		       sizeof(struct otx_ep_iq_stats));
546 
547 	for (i = 0; i < otx_epvf->nb_rx_queues; i++)
548 		memset(&otx_epvf->droq[i]->stats, 0,
549 		       sizeof(struct otx_ep_droq_stats));
550 
551 	return 0;
552 }
553 
554 static int
555 otx_ep_dev_stats_get(struct rte_eth_dev *eth_dev,
556 				struct rte_eth_stats *stats)
557 {
558 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
559 	struct otx_ep_iq_stats *ostats;
560 	struct otx_ep_droq_stats *istats;
561 	uint32_t i;
562 
563 	memset(stats, 0, sizeof(struct rte_eth_stats));
564 
565 	for (i = 0; i < otx_epvf->nb_tx_queues; i++) {
566 		ostats = &otx_epvf->instr_queue[i]->stats;
567 		stats->q_opackets[i] = ostats->tx_pkts;
568 		stats->q_obytes[i] = ostats->tx_bytes;
569 		stats->opackets += ostats->tx_pkts;
570 		stats->obytes += ostats->tx_bytes;
571 		stats->oerrors += ostats->instr_dropped;
572 	}
573 	for (i = 0; i < otx_epvf->nb_rx_queues; i++) {
574 		istats = &otx_epvf->droq[i]->stats;
575 		stats->q_ipackets[i] = istats->pkts_received;
576 		stats->q_ibytes[i] = istats->bytes_received;
577 		stats->q_errors[i] = istats->rx_err;
578 		stats->ipackets += istats->pkts_received;
579 		stats->ibytes += istats->bytes_received;
580 		stats->imissed += istats->rx_alloc_failure;
581 		stats->ierrors += istats->rx_err;
582 		stats->rx_nombuf += istats->rx_alloc_failure;
583 	}
584 	return 0;
585 }
586 
587 static int
588 otx_ep_dev_close(struct rte_eth_dev *eth_dev)
589 {
590 	struct otx_ep_device *otx_epvf;
591 	uint32_t num_queues, q_no;
592 
593 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
594 		eth_dev->dev_ops = NULL;
595 		eth_dev->rx_pkt_burst = NULL;
596 		eth_dev->tx_pkt_burst = NULL;
597 		return 0;
598 	}
599 
600 	otx_epvf = OTX_EP_DEV(eth_dev);
601 	otx_ep_mbox_send_dev_exit(eth_dev);
602 	otx_epvf->fn_list.disable_io_queues(otx_epvf);
603 	num_queues = otx_epvf->nb_rx_queues;
604 	for (q_no = 0; q_no < num_queues; q_no++) {
605 		if (otx_ep_delete_oqs(otx_epvf, q_no)) {
606 			otx_ep_err("Failed to delete OQ:%d\n", q_no);
607 			return -EINVAL;
608 		}
609 	}
610 	otx_ep_dbg("Num OQs:%d freed\n", otx_epvf->nb_rx_queues);
611 
612 	num_queues = otx_epvf->nb_tx_queues;
613 	for (q_no = 0; q_no < num_queues; q_no++) {
614 		if (otx_ep_delete_iqs(otx_epvf, q_no)) {
615 			otx_ep_err("Failed to delete IQ:%d\n", q_no);
616 			return -EINVAL;
617 		}
618 	}
619 	otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
620 
621 	if (rte_eth_dma_zone_free(eth_dev, "ism", 0)) {
622 		otx_ep_err("Failed to delete ISM buffer\n");
623 		return -EINVAL;
624 	}
625 
626 	return 0;
627 }
628 
629 static int
630 otx_ep_dev_get_mac_addr(struct rte_eth_dev *eth_dev,
631 			struct rte_ether_addr *mac_addr)
632 {
633 	int ret;
634 
635 	ret = otx_ep_mbox_get_mac_addr(eth_dev, mac_addr);
636 	if (ret)
637 		return -EINVAL;
638 	otx_ep_dbg("Get MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
639 		    RTE_ETHER_ADDR_BYTES(mac_addr));
640 	return 0;
641 }
642 
643 /* Define our ethernet definitions */
644 static const struct eth_dev_ops otx_ep_eth_dev_ops = {
645 	.dev_configure		= otx_ep_dev_configure,
646 	.dev_start		= otx_ep_dev_start,
647 	.dev_stop		= otx_ep_dev_stop,
648 	.rx_queue_setup	        = otx_ep_rx_queue_setup,
649 	.rx_queue_release	= otx_ep_rx_queue_release,
650 	.tx_queue_setup	        = otx_ep_tx_queue_setup,
651 	.tx_queue_release	= otx_ep_tx_queue_release,
652 	.dev_infos_get		= otx_ep_dev_info_get,
653 	.stats_get		= otx_ep_dev_stats_get,
654 	.stats_reset		= otx_ep_dev_stats_reset,
655 	.link_update		= otx_ep_dev_link_update,
656 	.dev_close		= otx_ep_dev_close,
657 	.mtu_set		= otx_ep_dev_mtu_set,
658 	.mac_addr_set           = otx_ep_dev_set_default_mac_addr,
659 };
660 
661 static int
662 otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
663 {
664 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
665 		eth_dev->dev_ops = NULL;
666 		eth_dev->rx_pkt_burst = NULL;
667 		eth_dev->tx_pkt_burst = NULL;
668 		return 0;
669 	}
670 
671 	eth_dev->dev_ops = NULL;
672 	eth_dev->rx_pkt_burst = NULL;
673 	eth_dev->tx_pkt_burst = NULL;
674 
675 	return 0;
676 }
677 
678 static int otx_ep_eth_dev_query_set_vf_mac(struct rte_eth_dev *eth_dev,
679 					   struct rte_ether_addr *mac_addr)
680 {
681 	int ret_val;
682 
683 	memset(mac_addr, 0, sizeof(struct rte_ether_addr));
684 	ret_val = otx_ep_dev_get_mac_addr(eth_dev, mac_addr);
685 	if (!ret_val) {
686 		if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
687 			otx_ep_dbg("PF doesn't have valid VF MAC addr" RTE_ETHER_ADDR_PRT_FMT "\n",
688 				    RTE_ETHER_ADDR_BYTES(mac_addr));
689 			rte_eth_random_addr(mac_addr->addr_bytes);
690 			otx_ep_dbg("Setting Random MAC address" RTE_ETHER_ADDR_PRT_FMT "\n",
691 				    RTE_ETHER_ADDR_BYTES(mac_addr));
692 			ret_val = otx_ep_dev_set_default_mac_addr(eth_dev, mac_addr);
693 			if (ret_val) {
694 				otx_ep_err("Setting MAC address " RTE_ETHER_ADDR_PRT_FMT "fails\n",
695 					    RTE_ETHER_ADDR_BYTES(mac_addr));
696 				return ret_val;
697 			}
698 		}
699 		otx_ep_dbg("Received valid MAC addr from PF" RTE_ETHER_ADDR_PRT_FMT "\n",
700 			    RTE_ETHER_ADDR_BYTES(mac_addr));
701 	} else {
702 		otx_ep_err("Getting MAC address from PF via Mbox fails with ret_val: %d\n",
703 			    ret_val);
704 		return ret_val;
705 	}
706 	return 0;
707 }
708 
709 static int
710 otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
711 {
712 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
713 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
714 	struct rte_ether_addr vf_mac_addr;
715 
716 	/* Single process support */
717 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
718 		eth_dev->dev_ops = &otx_ep_eth_dev_ops;
719 		otx_ep_set_tx_func(eth_dev);
720 		otx_ep_set_rx_func(eth_dev);
721 		return 0;
722 	}
723 
724 	rte_eth_copy_pci_info(eth_dev, pdev);
725 	otx_epvf->eth_dev = eth_dev;
726 	otx_epvf->port_id = eth_dev->data->port_id;
727 	eth_dev->dev_ops = &otx_ep_eth_dev_ops;
728 	rte_spinlock_init(&otx_epvf->mbox_lock);
729 
730 	/*
731 	 * Initialize negotiated Mbox version to base version of VF Mbox
732 	 * This will address working legacy PF with latest VF.
733 	 */
734 	otx_epvf->mbox_neg_ver = OTX_EP_MBOX_VERSION_V1;
735 	eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
736 	if (eth_dev->data->mac_addrs == NULL) {
737 		otx_ep_err("MAC addresses memory allocation failed\n");
738 		eth_dev->dev_ops = NULL;
739 		return -ENOMEM;
740 	}
741 	rte_eth_random_addr(vf_mac_addr.addr_bytes);
742 	rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
743 	otx_epvf->hw_addr = pdev->mem_resource[0].addr;
744 	otx_epvf->pdev = pdev;
745 
746 	if (otx_epdev_init(otx_epvf))
747 		return -ENOMEM;
748 	if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
749 	    otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
750 	    otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
751 	    otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF ||
752 	    otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
753 	    otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
754 	    otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
755 	    otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF) {
756 		otx_epvf->pkind = SDP_OTX2_PKIND_FS0;
757 		otx_ep_info("using pkind %d\n", otx_epvf->pkind);
758 	} else if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF) {
759 		otx_epvf->pkind = SDP_PKIND;
760 		otx_ep_info("Using pkind %d.\n", otx_epvf->pkind);
761 	} else {
762 		otx_ep_err("Invalid chip id\n");
763 		return -EINVAL;
764 	}
765 
766 	if (otx_ep_mbox_version_check(eth_dev))
767 		return -EINVAL;
768 
769 	if (otx_ep_eth_dev_query_set_vf_mac(eth_dev,
770 				(struct rte_ether_addr *)&vf_mac_addr)) {
771 		otx_ep_err("set mac addr failed\n");
772 		return -ENODEV;
773 	}
774 	rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
775 
776 	return 0;
777 }
778 
779 static int
780 otx_ep_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
781 		      struct rte_pci_device *pci_dev)
782 {
783 	return rte_eth_dev_pci_generic_probe(pci_dev,
784 					     sizeof(struct otx_ep_device),
785 					     otx_ep_eth_dev_init);
786 }
787 
788 static int
789 otx_ep_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
790 {
791 	return rte_eth_dev_pci_generic_remove(pci_dev,
792 					      otx_ep_eth_dev_uninit);
793 }
794 
795 /* Set of PCI devices this driver supports */
796 static const struct rte_pci_id pci_id_otx_ep_map[] = {
797 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
798 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_EP_NET_VF) },
799 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
800 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95N_EP_NET_VF) },
801 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95O_EP_NET_VF) },
802 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KA_EP_NET_VF) },
803 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_EP_NET_VF) },
804 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF10KA_EP_NET_VF) },
805 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF10KB_EP_NET_VF) },
806 	{ .vendor_id = 0, /* sentinel */ }
807 };
808 
809 static struct rte_pci_driver rte_otx_ep_pmd = {
810 	.id_table	= pci_id_otx_ep_map,
811 	.drv_flags      = RTE_PCI_DRV_NEED_MAPPING,
812 	.probe		= otx_ep_eth_dev_pci_probe,
813 	.remove		= otx_ep_eth_dev_pci_remove,
814 };
815 
816 RTE_PMD_REGISTER_PCI(net_otx_ep, rte_otx_ep_pmd);
817 RTE_PMD_REGISTER_PCI_TABLE(net_otx_ep, pci_id_otx_ep_map);
818 RTE_PMD_REGISTER_KMOD_DEP(net_otx_ep, "* igb_uio | vfio-pci");
819 RTE_LOG_REGISTER_DEFAULT(otx_net_ep_logtype, NOTICE);
820