xref: /dpdk/drivers/net/octeon_ep/otx_ep_ethdev.c (revision 0f1dc8cb671203d52488fd66936f2fe6dcca03cc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <inttypes.h>
6 #include <ethdev_pci.h>
7 
8 #include "otx_ep_common.h"
9 #include "otx_ep_vf.h"
10 #include "otx2_ep_vf.h"
11 #include "cnxk_ep_vf.h"
12 #include "otx_ep_rxtx.h"
13 #include "otx_ep_mbox.h"
14 
15 #define OTX_EP_DEV(_eth_dev) \
16 	((struct otx_ep_device *)(_eth_dev)->data->dev_private)
17 
18 static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
19 	.nb_max		= OTX_EP_MAX_OQ_DESCRIPTORS,
20 	.nb_min		= OTX_EP_MIN_OQ_DESCRIPTORS,
21 	.nb_align	= OTX_EP_RXD_ALIGN,
22 };
23 
24 static const struct rte_eth_desc_lim otx_ep_tx_desc_lim = {
25 	.nb_max		= OTX_EP_MAX_IQ_DESCRIPTORS,
26 	.nb_min		= OTX_EP_MIN_IQ_DESCRIPTORS,
27 	.nb_align	= OTX_EP_TXD_ALIGN,
28 };
29 
30 static void
31 otx_ep_set_tx_func(struct rte_eth_dev *eth_dev)
32 {
33 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
34 
35 	if (otx_epvf->chip_gen == OTX_EP_CN10XX || otx_epvf->chip_gen == OTX_EP_CN9XX) {
36 		eth_dev->tx_pkt_burst = &cnxk_ep_xmit_pkts;
37 		if (otx_epvf->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
38 			eth_dev->tx_pkt_burst = &cnxk_ep_xmit_pkts_mseg;
39 	} else {
40 		eth_dev->tx_pkt_burst = &otx_ep_xmit_pkts;
41 	}
42 
43 	if (eth_dev->data->dev_started)
44 		rte_eth_fp_ops[eth_dev->data->port_id].tx_pkt_burst =
45 			eth_dev->tx_pkt_burst;
46 }
47 
48 static void
49 otx_ep_set_rx_func(struct rte_eth_dev *eth_dev)
50 {
51 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
52 
53 	if (otx_epvf->chip_gen == OTX_EP_CN10XX) {
54 		eth_dev->rx_pkt_burst = &cnxk_ep_recv_pkts;
55 #ifdef RTE_ARCH_X86
56 		eth_dev->rx_pkt_burst = &cnxk_ep_recv_pkts_sse;
57 #ifdef CC_AVX2_SUPPORT
58 		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 &&
59 		    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1)
60 			eth_dev->rx_pkt_burst = &cnxk_ep_recv_pkts_avx;
61 #endif
62 #elif defined(RTE_ARCH_ARM64)
63 		eth_dev->rx_pkt_burst = &cnxk_ep_recv_pkts_neon;
64 #endif
65 		if (otx_epvf->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
66 			eth_dev->rx_pkt_burst = &cnxk_ep_recv_pkts_mseg;
67 	} else if (otx_epvf->chip_gen == OTX_EP_CN9XX) {
68 		eth_dev->rx_pkt_burst = &cn9k_ep_recv_pkts;
69 #ifdef RTE_ARCH_X86
70 		eth_dev->rx_pkt_burst = &cn9k_ep_recv_pkts_sse;
71 #ifdef CC_AVX2_SUPPORT
72 		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 &&
73 		    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1)
74 			eth_dev->rx_pkt_burst = &cn9k_ep_recv_pkts_avx;
75 #endif
76 #elif defined(RTE_ARCH_ARM64)
77 		eth_dev->rx_pkt_burst = &cn9k_ep_recv_pkts_neon;
78 #endif
79 		if (otx_epvf->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
80 			eth_dev->rx_pkt_burst = &cn9k_ep_recv_pkts_mseg;
81 	} else {
82 		eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
83 	}
84 
85 	if (eth_dev->data->dev_started)
86 		rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst =
87 			eth_dev->rx_pkt_burst;
88 }
89 
90 static int
91 otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
92 		    struct rte_eth_dev_info *devinfo)
93 {
94 	struct otx_ep_device *otx_epvf;
95 	int max_rx_pktlen;
96 
97 	otx_epvf = OTX_EP_DEV(eth_dev);
98 
99 	max_rx_pktlen = otx_ep_mbox_get_max_pkt_len(eth_dev);
100 	if (!max_rx_pktlen) {
101 		otx_ep_err("Failed to get Max Rx packet length");
102 		return -EINVAL;
103 	}
104 
105 	devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
106 	devinfo->max_rx_queues = otx_epvf->max_rx_queues;
107 	devinfo->max_tx_queues = otx_epvf->max_tx_queues;
108 
109 	devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
110 	devinfo->max_rx_pktlen = max_rx_pktlen;
111 	devinfo->max_mtu = devinfo->max_rx_pktlen - OTX_EP_ETH_OVERHEAD;
112 	devinfo->min_mtu = RTE_ETHER_MIN_LEN;
113 	devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
114 	devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
115 
116 	devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
117 
118 	devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
119 	devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
120 
121 	devinfo->default_rxportconf.ring_size = OTX_EP_MIN_OQ_DESCRIPTORS;
122 	devinfo->default_txportconf.ring_size = OTX_EP_MIN_IQ_DESCRIPTORS;
123 
124 	return 0;
125 }
126 
127 static int
128 otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
129 {
130 	RTE_SET_USED(wait_to_complete);
131 
132 	if (!eth_dev->data->dev_started)
133 		return 0;
134 	struct rte_eth_link link;
135 	int ret = 0;
136 
137 	memset(&link, 0, sizeof(link));
138 	ret = otx_ep_mbox_get_link_info(eth_dev, &link);
139 	if (ret)
140 		return -EINVAL;
141 	otx_ep_dbg("link status resp link %d duplex %d autoneg %d link_speed %d\n",
142 		    link.link_status, link.link_duplex, link.link_autoneg, link.link_speed);
143 	return rte_eth_linkstatus_set(eth_dev, &link);
144 }
145 
146 static int
147 otx_ep_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
148 {
149 	struct rte_eth_dev_info devinfo;
150 	int32_t ret = 0;
151 
152 	if (otx_ep_dev_info_get(eth_dev, &devinfo)) {
153 		otx_ep_err("Cannot set MTU to %u: failed to get device info", mtu);
154 		return -EPERM;
155 	}
156 
157 	/* Check if MTU is within the allowed range */
158 	if (mtu < devinfo.min_mtu) {
159 		otx_ep_err("Invalid MTU %u: lower than minimum MTU %u", mtu, devinfo.min_mtu);
160 		return -EINVAL;
161 	}
162 
163 	if (mtu > devinfo.max_mtu) {
164 		otx_ep_err("Invalid MTU %u; higher than maximum MTU %u", mtu, devinfo.max_mtu);
165 		return -EINVAL;
166 	}
167 
168 	ret = otx_ep_mbox_set_mtu(eth_dev, mtu);
169 	if (ret)
170 		return -EINVAL;
171 
172 	otx_ep_dbg("MTU is set to %u", mtu);
173 
174 	return 0;
175 }
176 
177 static int
178 otx_ep_dev_set_default_mac_addr(struct rte_eth_dev *eth_dev,
179 				struct rte_ether_addr *mac_addr)
180 {
181 	int ret;
182 
183 	ret = otx_ep_mbox_set_mac_addr(eth_dev, mac_addr);
184 	if (ret)
185 		return -EINVAL;
186 	otx_ep_dbg("Default MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
187 		    RTE_ETHER_ADDR_BYTES(mac_addr));
188 	rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs);
189 	return 0;
190 }
191 
192 static int
193 otx_ep_dev_start(struct rte_eth_dev *eth_dev)
194 {
195 	struct otx_ep_device *otx_epvf;
196 	unsigned int q;
197 	int ret;
198 
199 	otx_epvf = (struct otx_ep_device *)OTX_EP_DEV(eth_dev);
200 	/* Enable IQ/OQ for this device */
201 	ret = otx_epvf->fn_list.enable_io_queues(otx_epvf);
202 	if (ret) {
203 		otx_ep_err("IOQ enable failed\n");
204 		return ret;
205 	}
206 
207 	for (q = 0; q < otx_epvf->nb_rx_queues; q++) {
208 		rte_write32(otx_epvf->droq[q]->nb_desc,
209 			    otx_epvf->droq[q]->pkts_credit_reg);
210 
211 		rte_wmb();
212 		otx_ep_info("OQ[%d] dbells [%d]\n", q,
213 		rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
214 	}
215 
216 	otx_ep_dev_link_update(eth_dev, 0);
217 
218 	otx_ep_set_tx_func(eth_dev);
219 	otx_ep_set_rx_func(eth_dev);
220 
221 	otx_ep_info("dev started\n");
222 
223 	for (q = 0; q < eth_dev->data->nb_rx_queues; q++)
224 		eth_dev->data->rx_queue_state[q] = RTE_ETH_QUEUE_STATE_STARTED;
225 	for (q = 0; q < eth_dev->data->nb_tx_queues; q++)
226 		eth_dev->data->tx_queue_state[q] = RTE_ETH_QUEUE_STATE_STARTED;
227 
228 	return 0;
229 }
230 
231 /* Stop device and disable input/output functions */
232 static int
233 otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
234 {
235 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
236 	uint16_t i;
237 
238 	otx_epvf->fn_list.disable_io_queues(otx_epvf);
239 
240 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
241 		eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
242 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
243 		eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
244 
245 	return 0;
246 }
247 
248 /*
249  * We only need 2 uint32_t locations per IOQ, but separate these so
250  * each IOQ has the variables on its own cache line.
251  */
252 #define OTX_EP_ISM_BUFFER_SIZE (OTX_EP_MAX_IOQS_PER_VF * RTE_CACHE_LINE_SIZE)
253 static int
254 otx_ep_ism_setup(struct otx_ep_device *otx_epvf)
255 {
256 	otx_epvf->ism_buffer_mz =
257 		rte_eth_dma_zone_reserve(otx_epvf->eth_dev, "ism",
258 					 0, OTX_EP_ISM_BUFFER_SIZE,
259 					 OTX_EP_PCI_RING_ALIGN, 0);
260 
261 	/* Same DMA buffer is shared by OQ and IQ, clear it at start */
262 	memset(otx_epvf->ism_buffer_mz->addr, 0, OTX_EP_ISM_BUFFER_SIZE);
263 	if (otx_epvf->ism_buffer_mz == NULL) {
264 		otx_ep_err("Failed to allocate ISM buffer\n");
265 		return(-1);
266 	}
267 	otx_ep_dbg("ISM: virt: 0x%p, dma: 0x%" PRIX64,
268 		    (void *)otx_epvf->ism_buffer_mz->addr,
269 		    otx_epvf->ism_buffer_mz->iova);
270 
271 	return 0;
272 }
273 
274 static int
275 otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
276 {
277 	struct rte_pci_device *pdev = otx_epvf->pdev;
278 	uint32_t dev_id = pdev->id.device_id;
279 	int ret = 0;
280 
281 	switch (dev_id) {
282 	case PCI_DEVID_OCTEONTX_EP_VF:
283 		otx_epvf->chip_id = dev_id;
284 		ret = otx_ep_vf_setup_device(otx_epvf);
285 		otx_epvf->fn_list.disable_io_queues(otx_epvf);
286 		break;
287 	case PCI_DEVID_CN9K_EP_NET_VF:
288 	case PCI_DEVID_CN98XX_EP_NET_VF:
289 	case PCI_DEVID_CNF95N_EP_NET_VF:
290 	case PCI_DEVID_CNF95O_EP_NET_VF:
291 		otx_epvf->chip_id = dev_id;
292 		ret = otx2_ep_vf_setup_device(otx_epvf);
293 		otx_epvf->fn_list.disable_io_queues(otx_epvf);
294 		if (otx_ep_ism_setup(otx_epvf))
295 			ret = -EINVAL;
296 		break;
297 	case PCI_DEVID_CN10KA_EP_NET_VF:
298 	case PCI_DEVID_CN10KB_EP_NET_VF:
299 	case PCI_DEVID_CNF10KA_EP_NET_VF:
300 	case PCI_DEVID_CNF10KB_EP_NET_VF:
301 		otx_epvf->chip_id = dev_id;
302 		ret = cnxk_ep_vf_setup_device(otx_epvf);
303 		otx_epvf->fn_list.disable_io_queues(otx_epvf);
304 		if (otx_ep_ism_setup(otx_epvf))
305 			ret = -EINVAL;
306 		break;
307 	default:
308 		otx_ep_err("Unsupported device\n");
309 		ret = -EINVAL;
310 	}
311 
312 	if (!ret)
313 		otx_ep_info("OTX_EP dev_id[%d]\n", dev_id);
314 
315 	return ret;
316 }
317 
318 /* OTX_EP VF device initialization */
319 static int
320 otx_epdev_init(struct otx_ep_device *otx_epvf)
321 {
322 	uint32_t ethdev_queues;
323 	int ret = 0;
324 
325 	ret = otx_ep_chip_specific_setup(otx_epvf);
326 	if (ret) {
327 		otx_ep_err("Chip specific setup failed\n");
328 		goto setup_fail;
329 	}
330 
331 	otx_epvf->fn_list.setup_device_regs(otx_epvf);
332 
333 	otx_epvf->eth_dev->tx_pkt_burst = &cnxk_ep_xmit_pkts;
334 	otx_epvf->eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
335 	if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF) {
336 		otx_epvf->eth_dev->tx_pkt_burst = &otx_ep_xmit_pkts;
337 		otx_epvf->chip_gen = OTX_EP_CN8XX;
338 	} else if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
339 		 otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
340 		 otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
341 		 otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF) {
342 		otx_epvf->eth_dev->rx_pkt_burst = &cn9k_ep_recv_pkts;
343 		otx_epvf->chip_gen = OTX_EP_CN9XX;
344 	} else if (otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
345 		   otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
346 		   otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
347 		   otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF) {
348 		otx_epvf->eth_dev->rx_pkt_burst = &cnxk_ep_recv_pkts;
349 		otx_epvf->chip_gen = OTX_EP_CN10XX;
350 	} else {
351 		otx_ep_err("Invalid chip_id\n");
352 		ret = -EINVAL;
353 		goto setup_fail;
354 	}
355 	ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
356 	otx_epvf->max_rx_queues = ethdev_queues;
357 	otx_epvf->max_tx_queues = ethdev_queues;
358 
359 	otx_ep_info("OTX_EP Device is Ready\n");
360 
361 setup_fail:
362 	return ret;
363 }
364 
365 static int
366 otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
367 {
368 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
369 	struct rte_eth_dev_data *data = eth_dev->data;
370 	struct rte_eth_rxmode *rxmode;
371 	struct rte_eth_txmode *txmode;
372 	struct rte_eth_conf *conf;
373 
374 	conf = &data->dev_conf;
375 	rxmode = &conf->rxmode;
376 	txmode = &conf->txmode;
377 	if (eth_dev->data->nb_rx_queues > otx_epvf->max_rx_queues ||
378 	    eth_dev->data->nb_tx_queues > otx_epvf->max_tx_queues) {
379 		otx_ep_err("invalid num queues\n");
380 		return -EINVAL;
381 	}
382 	otx_ep_info("OTX_EP Device is configured with num_txq %d num_rxq %d\n",
383 		    eth_dev->data->nb_rx_queues, eth_dev->data->nb_tx_queues);
384 
385 	otx_epvf->rx_offloads = rxmode->offloads;
386 	otx_epvf->tx_offloads = txmode->offloads;
387 
388 	return 0;
389 }
390 
391 /**
392  * Setup our receive queue/ringbuffer. This is the
393  * queue the Octeon uses to send us packets and
394  * responses. We are given a memory pool for our
395  * packet buffers that are used to populate the receive
396  * queue.
397  *
398  * @param eth_dev
399  *    Pointer to the structure rte_eth_dev
400  * @param q_no
401  *    Queue number
402  * @param num_rx_descs
403  *    Number of entries in the queue
404  * @param socket_id
405  *    Where to allocate memory
406  * @param rx_conf
407  *    Pointer to the struction rte_eth_rxconf
408  * @param mp
409  *    Pointer to the packet pool
410  *
411  * @return
412  *    - On success, return 0
413  *    - On failure, return -1
414  */
415 static int
416 otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
417 		       uint16_t num_rx_descs, unsigned int socket_id,
418 		       const struct rte_eth_rxconf *rx_conf __rte_unused,
419 		       struct rte_mempool *mp)
420 {
421 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
422 	struct rte_pktmbuf_pool_private *mbp_priv;
423 	uint16_t buf_size;
424 
425 	if (q_no >= otx_epvf->max_rx_queues) {
426 		otx_ep_err("Invalid rx queue number %u\n", q_no);
427 		return -EINVAL;
428 	}
429 
430 	if (num_rx_descs & (num_rx_descs - 1)) {
431 		otx_ep_err("Invalid rx desc number should be pow 2  %u\n",
432 			   num_rx_descs);
433 		return -EINVAL;
434 	}
435 	if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
436 		otx_ep_err("Invalid rx desc number(%u) should at least be greater than 8xwmark  %u\n",
437 			   num_rx_descs, (SDP_GBL_WMARK * 8));
438 		return -EINVAL;
439 	}
440 
441 	otx_ep_dbg("setting up rx queue %u\n", q_no);
442 
443 	mbp_priv = rte_mempool_get_priv(mp);
444 	buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
445 
446 	if (otx_ep_setup_oqs(otx_epvf, q_no, num_rx_descs, buf_size, mp,
447 			     socket_id)) {
448 		otx_ep_err("droq allocation failed\n");
449 		return -1;
450 	}
451 
452 	eth_dev->data->rx_queues[q_no] = otx_epvf->droq[q_no];
453 
454 	return 0;
455 }
456 
457 /**
458  * Release the receive queue/ringbuffer. Called by
459  * the upper layers.
460  *
461  * @param dev
462  *   Pointer to Ethernet device structure.
463  * @param q_no
464  *   Receive queue index.
465  *
466  * @return
467  *    - nothing
468  */
469 static void
470 otx_ep_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
471 {
472 	struct otx_ep_droq *rq = dev->data->rx_queues[q_no];
473 	struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
474 	int q_id = rq->q_no;
475 
476 	if (otx_ep_delete_oqs(otx_epvf, q_id))
477 		otx_ep_err("Failed to delete OQ:%d\n", q_id);
478 }
479 
480 /**
481  * Allocate and initialize SW ring. Initialize associated HW registers.
482  *
483  * @param eth_dev
484  *   Pointer to structure rte_eth_dev
485  *
486  * @param q_no
487  *   Queue number
488  *
489  * @param num_tx_descs
490  *   Number of ringbuffer descriptors
491  *
492  * @param socket_id
493  *   NUMA socket id, used for memory allocations
494  *
495  * @param tx_conf
496  *   Pointer to the structure rte_eth_txconf
497  *
498  * @return
499  *   - On success, return 0
500  *   - On failure, return -errno value
501  */
502 static int
503 otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
504 		       uint16_t num_tx_descs, unsigned int socket_id,
505 		       const struct rte_eth_txconf *tx_conf __rte_unused)
506 {
507 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
508 	int retval;
509 
510 	if (q_no >= otx_epvf->max_tx_queues) {
511 		otx_ep_err("Invalid tx queue number %u\n", q_no);
512 		return -EINVAL;
513 	}
514 	if (num_tx_descs & (num_tx_descs - 1)) {
515 		otx_ep_err("Invalid tx desc number should be pow 2  %u\n",
516 			   num_tx_descs);
517 		return -EINVAL;
518 	}
519 	if (num_tx_descs < (SDP_GBL_WMARK * 8)) {
520 		otx_ep_err("Invalid tx desc number(%u) should at least be greater than 8*wmark(%u)\n",
521 			   num_tx_descs, (SDP_GBL_WMARK * 8));
522 		return -EINVAL;
523 	}
524 
525 	retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id);
526 
527 	if (retval) {
528 		otx_ep_err("IQ(TxQ) creation failed.\n");
529 		return retval;
530 	}
531 
532 	eth_dev->data->tx_queues[q_no] = otx_epvf->instr_queue[q_no];
533 	otx_ep_dbg("tx queue[%d] setup\n", q_no);
534 	return 0;
535 }
536 
537 /**
538  * Release the transmit queue/ringbuffer. Called by
539  * the upper layers.
540  *
541  * @param dev
542  *    Pointer to Ethernet device structure.
543  * @param q_no
544  *    Transmit queue index.
545  *
546  * @return
547  *    - nothing
548  */
549 static void
550 otx_ep_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
551 {
552 	struct otx_ep_instr_queue *tq = dev->data->tx_queues[q_no];
553 
554 	otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
555 }
556 
557 static int
558 otx_ep_dev_stats_reset(struct rte_eth_dev *dev)
559 {
560 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(dev);
561 	uint32_t i;
562 
563 	for (i = 0; i < otx_epvf->nb_tx_queues; i++)
564 		memset(&otx_epvf->instr_queue[i]->stats, 0,
565 		       sizeof(struct otx_ep_iq_stats));
566 
567 	for (i = 0; i < otx_epvf->nb_rx_queues; i++)
568 		memset(&otx_epvf->droq[i]->stats, 0,
569 		       sizeof(struct otx_ep_droq_stats));
570 
571 	return 0;
572 }
573 
574 static int
575 otx_ep_dev_stats_get(struct rte_eth_dev *eth_dev,
576 				struct rte_eth_stats *stats)
577 {
578 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
579 	struct otx_ep_iq_stats *ostats;
580 	struct otx_ep_droq_stats *istats;
581 	uint32_t i;
582 
583 	memset(stats, 0, sizeof(struct rte_eth_stats));
584 
585 	for (i = 0; i < otx_epvf->nb_tx_queues; i++) {
586 		ostats = &otx_epvf->instr_queue[i]->stats;
587 		stats->q_opackets[i] = ostats->tx_pkts;
588 		stats->q_obytes[i] = ostats->tx_bytes;
589 		stats->opackets += ostats->tx_pkts;
590 		stats->obytes += ostats->tx_bytes;
591 		stats->oerrors += ostats->instr_dropped;
592 	}
593 	for (i = 0; i < otx_epvf->nb_rx_queues; i++) {
594 		istats = &otx_epvf->droq[i]->stats;
595 		stats->q_ipackets[i] = istats->pkts_received;
596 		stats->q_ibytes[i] = istats->bytes_received;
597 		stats->q_errors[i] = istats->rx_err;
598 		stats->ipackets += istats->pkts_received;
599 		stats->ibytes += istats->bytes_received;
600 		stats->imissed += istats->rx_alloc_failure;
601 		stats->ierrors += istats->rx_err;
602 		stats->rx_nombuf += istats->rx_alloc_failure;
603 	}
604 	return 0;
605 }
606 
607 static int
608 otx_ep_dev_close(struct rte_eth_dev *eth_dev)
609 {
610 	struct otx_ep_device *otx_epvf;
611 	uint32_t num_queues, q_no;
612 
613 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
614 		eth_dev->dev_ops = NULL;
615 		eth_dev->rx_pkt_burst = NULL;
616 		eth_dev->tx_pkt_burst = NULL;
617 		return 0;
618 	}
619 
620 	otx_epvf = OTX_EP_DEV(eth_dev);
621 	otx_ep_mbox_send_dev_exit(eth_dev);
622 	otx_epvf->fn_list.disable_io_queues(otx_epvf);
623 	num_queues = otx_epvf->nb_rx_queues;
624 	for (q_no = 0; q_no < num_queues; q_no++) {
625 		if (otx_ep_delete_oqs(otx_epvf, q_no)) {
626 			otx_ep_err("Failed to delete OQ:%d\n", q_no);
627 			return -EINVAL;
628 		}
629 	}
630 	otx_ep_dbg("Num OQs:%d freed\n", otx_epvf->nb_rx_queues);
631 
632 	num_queues = otx_epvf->nb_tx_queues;
633 	for (q_no = 0; q_no < num_queues; q_no++) {
634 		if (otx_ep_delete_iqs(otx_epvf, q_no)) {
635 			otx_ep_err("Failed to delete IQ:%d\n", q_no);
636 			return -EINVAL;
637 		}
638 	}
639 	otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
640 
641 	if (rte_eth_dma_zone_free(eth_dev, "ism", 0)) {
642 		otx_ep_err("Failed to delete ISM buffer\n");
643 		return -EINVAL;
644 	}
645 
646 	return 0;
647 }
648 
649 static int
650 otx_ep_dev_get_mac_addr(struct rte_eth_dev *eth_dev,
651 			struct rte_ether_addr *mac_addr)
652 {
653 	int ret;
654 
655 	ret = otx_ep_mbox_get_mac_addr(eth_dev, mac_addr);
656 	if (ret)
657 		return -EINVAL;
658 	otx_ep_dbg("Get MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
659 		    RTE_ETHER_ADDR_BYTES(mac_addr));
660 	return 0;
661 }
662 
663 /* Define our ethernet definitions */
664 static const struct eth_dev_ops otx_ep_eth_dev_ops = {
665 	.dev_configure		= otx_ep_dev_configure,
666 	.dev_start		= otx_ep_dev_start,
667 	.dev_stop		= otx_ep_dev_stop,
668 	.rx_queue_setup	        = otx_ep_rx_queue_setup,
669 	.rx_queue_release	= otx_ep_rx_queue_release,
670 	.tx_queue_setup	        = otx_ep_tx_queue_setup,
671 	.tx_queue_release	= otx_ep_tx_queue_release,
672 	.dev_infos_get		= otx_ep_dev_info_get,
673 	.stats_get		= otx_ep_dev_stats_get,
674 	.stats_reset		= otx_ep_dev_stats_reset,
675 	.link_update		= otx_ep_dev_link_update,
676 	.dev_close		= otx_ep_dev_close,
677 	.mtu_set		= otx_ep_dev_mtu_set,
678 	.mac_addr_set           = otx_ep_dev_set_default_mac_addr,
679 };
680 
681 static int
682 otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
683 {
684 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
685 		eth_dev->dev_ops = NULL;
686 		eth_dev->rx_pkt_burst = NULL;
687 		eth_dev->tx_pkt_burst = NULL;
688 		return 0;
689 	}
690 
691 	eth_dev->dev_ops = NULL;
692 	eth_dev->rx_pkt_burst = NULL;
693 	eth_dev->tx_pkt_burst = NULL;
694 
695 	return 0;
696 }
697 
698 static int otx_ep_eth_dev_query_set_vf_mac(struct rte_eth_dev *eth_dev,
699 					   struct rte_ether_addr *mac_addr)
700 {
701 	int ret_val;
702 
703 	memset(mac_addr, 0, sizeof(struct rte_ether_addr));
704 	ret_val = otx_ep_dev_get_mac_addr(eth_dev, mac_addr);
705 	if (!ret_val) {
706 		if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
707 			otx_ep_dbg("PF doesn't have valid VF MAC addr" RTE_ETHER_ADDR_PRT_FMT "\n",
708 				    RTE_ETHER_ADDR_BYTES(mac_addr));
709 			rte_eth_random_addr(mac_addr->addr_bytes);
710 			otx_ep_dbg("Setting Random MAC address" RTE_ETHER_ADDR_PRT_FMT "\n",
711 				    RTE_ETHER_ADDR_BYTES(mac_addr));
712 			ret_val = otx_ep_dev_set_default_mac_addr(eth_dev, mac_addr);
713 			if (ret_val) {
714 				otx_ep_err("Setting MAC address " RTE_ETHER_ADDR_PRT_FMT "fails\n",
715 					    RTE_ETHER_ADDR_BYTES(mac_addr));
716 				return ret_val;
717 			}
718 		}
719 		otx_ep_dbg("Received valid MAC addr from PF" RTE_ETHER_ADDR_PRT_FMT "\n",
720 			    RTE_ETHER_ADDR_BYTES(mac_addr));
721 	} else {
722 		otx_ep_err("Getting MAC address from PF via Mbox fails with ret_val: %d\n",
723 			    ret_val);
724 		return ret_val;
725 	}
726 	return 0;
727 }
728 
729 static int
730 otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
731 {
732 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
733 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
734 	struct rte_ether_addr vf_mac_addr;
735 
736 	/* Single process support */
737 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
738 		eth_dev->dev_ops = &otx_ep_eth_dev_ops;
739 		otx_ep_set_tx_func(eth_dev);
740 		otx_ep_set_rx_func(eth_dev);
741 		return 0;
742 	}
743 
744 	rte_eth_copy_pci_info(eth_dev, pdev);
745 	otx_epvf->eth_dev = eth_dev;
746 	otx_epvf->port_id = eth_dev->data->port_id;
747 	eth_dev->dev_ops = &otx_ep_eth_dev_ops;
748 	rte_spinlock_init(&otx_epvf->mbox_lock);
749 
750 	/*
751 	 * Initialize negotiated Mbox version to base version of VF Mbox
752 	 * This will address working legacy PF with latest VF.
753 	 */
754 	otx_epvf->mbox_neg_ver = OTX_EP_MBOX_VERSION_V1;
755 	eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
756 	if (eth_dev->data->mac_addrs == NULL) {
757 		otx_ep_err("MAC addresses memory allocation failed\n");
758 		eth_dev->dev_ops = NULL;
759 		return -ENOMEM;
760 	}
761 	rte_eth_random_addr(vf_mac_addr.addr_bytes);
762 	rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
763 	otx_epvf->hw_addr = pdev->mem_resource[0].addr;
764 	otx_epvf->pdev = pdev;
765 
766 	if (otx_epdev_init(otx_epvf))
767 		return -ENOMEM;
768 	if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
769 	    otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
770 	    otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
771 	    otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF ||
772 	    otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
773 	    otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
774 	    otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
775 	    otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF) {
776 		otx_epvf->pkind = SDP_OTX2_PKIND_FS0;
777 		otx_ep_info("using pkind %d\n", otx_epvf->pkind);
778 	} else if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF) {
779 		otx_epvf->pkind = SDP_PKIND;
780 		otx_ep_info("Using pkind %d.\n", otx_epvf->pkind);
781 	} else {
782 		otx_ep_err("Invalid chip id\n");
783 		return -EINVAL;
784 	}
785 
786 	if (otx_ep_mbox_version_check(eth_dev))
787 		return -EINVAL;
788 
789 	if (otx_ep_eth_dev_query_set_vf_mac(eth_dev,
790 				(struct rte_ether_addr *)&vf_mac_addr)) {
791 		otx_ep_err("set mac addr failed\n");
792 		return -ENODEV;
793 	}
794 	rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
795 
796 	return 0;
797 }
798 
799 static int
800 otx_ep_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
801 		      struct rte_pci_device *pci_dev)
802 {
803 	return rte_eth_dev_pci_generic_probe(pci_dev,
804 					     sizeof(struct otx_ep_device),
805 					     otx_ep_eth_dev_init);
806 }
807 
808 static int
809 otx_ep_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
810 {
811 	return rte_eth_dev_pci_generic_remove(pci_dev,
812 					      otx_ep_eth_dev_uninit);
813 }
814 
815 /* Set of PCI devices this driver supports */
816 static const struct rte_pci_id pci_id_otx_ep_map[] = {
817 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
818 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_EP_NET_VF) },
819 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
820 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95N_EP_NET_VF) },
821 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95O_EP_NET_VF) },
822 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KA_EP_NET_VF) },
823 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_EP_NET_VF) },
824 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF10KA_EP_NET_VF) },
825 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF10KB_EP_NET_VF) },
826 	{ .vendor_id = 0, /* sentinel */ }
827 };
828 
829 static struct rte_pci_driver rte_otx_ep_pmd = {
830 	.id_table	= pci_id_otx_ep_map,
831 	.drv_flags      = RTE_PCI_DRV_NEED_MAPPING,
832 	.probe		= otx_ep_eth_dev_pci_probe,
833 	.remove		= otx_ep_eth_dev_pci_remove,
834 };
835 
836 RTE_PMD_REGISTER_PCI(net_otx_ep, rte_otx_ep_pmd);
837 RTE_PMD_REGISTER_PCI_TABLE(net_otx_ep, pci_id_otx_ep_map);
838 RTE_PMD_REGISTER_KMOD_DEP(net_otx_ep, "* igb_uio | vfio-pci");
839 RTE_LOG_REGISTER_DEFAULT(otx_net_ep_logtype, NOTICE);
840