xref: /dpdk/drivers/net/octeon_ep/otx_ep_ethdev.c (revision 0899a87ce7c71576d445cd956036f6f7e555f01c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <ethdev_pci.h>
6 
7 #include "otx_ep_common.h"
8 #include "otx_ep_vf.h"
9 #include "otx2_ep_vf.h"
10 #include "cnxk_ep_vf.h"
11 #include "otx_ep_rxtx.h"
12 
13 #define OTX_EP_DEV(_eth_dev) \
14 	((struct otx_ep_device *)(_eth_dev)->data->dev_private)
15 
16 static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
17 	.nb_max		= OTX_EP_MAX_OQ_DESCRIPTORS,
18 	.nb_min		= OTX_EP_MIN_OQ_DESCRIPTORS,
19 	.nb_align	= OTX_EP_RXD_ALIGN,
20 };
21 
22 static const struct rte_eth_desc_lim otx_ep_tx_desc_lim = {
23 	.nb_max		= OTX_EP_MAX_IQ_DESCRIPTORS,
24 	.nb_min		= OTX_EP_MIN_IQ_DESCRIPTORS,
25 	.nb_align	= OTX_EP_TXD_ALIGN,
26 };
27 
28 static int
29 otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
30 		    struct rte_eth_dev_info *devinfo)
31 {
32 	struct otx_ep_device *otx_epvf;
33 
34 	otx_epvf = OTX_EP_DEV(eth_dev);
35 
36 	devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
37 	devinfo->max_rx_queues = otx_epvf->max_rx_queues;
38 	devinfo->max_tx_queues = otx_epvf->max_tx_queues;
39 
40 	devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
41 	devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
42 	devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
43 	devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
44 
45 	devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
46 
47 	devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
48 	devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
49 
50 	return 0;
51 }
52 
53 static int
54 otx_ep_dev_start(struct rte_eth_dev *eth_dev)
55 {
56 	struct otx_ep_device *otx_epvf;
57 	unsigned int q;
58 	int ret;
59 
60 	otx_epvf = (struct otx_ep_device *)OTX_EP_DEV(eth_dev);
61 	/* Enable IQ/OQ for this device */
62 	ret = otx_epvf->fn_list.enable_io_queues(otx_epvf);
63 	if (ret) {
64 		otx_ep_err("IOQ enable failed\n");
65 		return ret;
66 	}
67 
68 	for (q = 0; q < otx_epvf->nb_rx_queues; q++) {
69 		rte_write32(otx_epvf->droq[q]->nb_desc,
70 			    otx_epvf->droq[q]->pkts_credit_reg);
71 
72 		rte_wmb();
73 		otx_ep_info("OQ[%d] dbells [%d]\n", q,
74 		rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
75 	}
76 
77 	otx_ep_info("dev started\n");
78 
79 	return 0;
80 }
81 
82 /* Stop device and disable input/output functions */
83 static int
84 otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
85 {
86 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
87 
88 	otx_epvf->fn_list.disable_io_queues(otx_epvf);
89 
90 	return 0;
91 }
92 
93 static int
94 otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
95 {
96 	struct rte_pci_device *pdev = otx_epvf->pdev;
97 	uint32_t dev_id = pdev->id.device_id;
98 	int ret = 0;
99 
100 	switch (dev_id) {
101 	case PCI_DEVID_OCTEONTX_EP_VF:
102 		otx_epvf->chip_id = dev_id;
103 		ret = otx_ep_vf_setup_device(otx_epvf);
104 		otx_epvf->fn_list.disable_io_queues(otx_epvf);
105 		break;
106 	case PCI_DEVID_CN9K_EP_NET_VF:
107 	case PCI_DEVID_CN98XX_EP_NET_VF:
108 		otx_epvf->chip_id = dev_id;
109 		ret = otx2_ep_vf_setup_device(otx_epvf);
110 		otx_epvf->fn_list.disable_io_queues(otx_epvf);
111 		break;
112 	case PCI_DEVID_CNXK_EP_NET_VF:
113 		otx_epvf->chip_id = dev_id;
114 		ret = cnxk_ep_vf_setup_device(otx_epvf);
115 		otx_epvf->fn_list.disable_io_queues(otx_epvf);
116 		break;
117 	default:
118 		otx_ep_err("Unsupported device\n");
119 		ret = -EINVAL;
120 	}
121 
122 	if (!ret)
123 		otx_ep_info("OTX_EP dev_id[%d]\n", dev_id);
124 
125 	return ret;
126 }
127 
128 /* OTX_EP VF device initialization */
129 static int
130 otx_epdev_init(struct otx_ep_device *otx_epvf)
131 {
132 	uint32_t ethdev_queues;
133 	int ret = 0;
134 
135 	ret = otx_ep_chip_specific_setup(otx_epvf);
136 	if (ret) {
137 		otx_ep_err("Chip specific setup failed\n");
138 		goto setup_fail;
139 	}
140 
141 	otx_epvf->fn_list.setup_device_regs(otx_epvf);
142 
143 	otx_epvf->eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
144 	if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF)
145 		otx_epvf->eth_dev->tx_pkt_burst = &otx_ep_xmit_pkts;
146 	else if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
147 		 otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF)
148 		otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
149 	else if (otx_epvf->chip_id == PCI_DEVID_CNXK_EP_NET_VF)
150 		otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
151 	ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
152 	otx_epvf->max_rx_queues = ethdev_queues;
153 	otx_epvf->max_tx_queues = ethdev_queues;
154 
155 	otx_ep_info("OTX_EP Device is Ready\n");
156 
157 setup_fail:
158 	return ret;
159 }
160 
161 static int
162 otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
163 {
164 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
165 	struct rte_eth_dev_data *data = eth_dev->data;
166 	struct rte_eth_rxmode *rxmode;
167 	struct rte_eth_txmode *txmode;
168 	struct rte_eth_conf *conf;
169 
170 	conf = &data->dev_conf;
171 	rxmode = &conf->rxmode;
172 	txmode = &conf->txmode;
173 	if (eth_dev->data->nb_rx_queues > otx_epvf->max_rx_queues ||
174 	    eth_dev->data->nb_tx_queues > otx_epvf->max_tx_queues) {
175 		otx_ep_err("invalid num queues\n");
176 		return -EINVAL;
177 	}
178 	otx_ep_info("OTX_EP Device is configured with num_txq %d num_rxq %d\n",
179 		    eth_dev->data->nb_rx_queues, eth_dev->data->nb_tx_queues);
180 
181 	otx_epvf->rx_offloads = rxmode->offloads;
182 	otx_epvf->tx_offloads = txmode->offloads;
183 
184 	return 0;
185 }
186 
187 /**
188  * Setup our receive queue/ringbuffer. This is the
189  * queue the Octeon uses to send us packets and
190  * responses. We are given a memory pool for our
191  * packet buffers that are used to populate the receive
192  * queue.
193  *
194  * @param eth_dev
195  *    Pointer to the structure rte_eth_dev
196  * @param q_no
197  *    Queue number
198  * @param num_rx_descs
199  *    Number of entries in the queue
200  * @param socket_id
201  *    Where to allocate memory
202  * @param rx_conf
203  *    Pointer to the struction rte_eth_rxconf
204  * @param mp
205  *    Pointer to the packet pool
206  *
207  * @return
208  *    - On success, return 0
209  *    - On failure, return -1
210  */
211 static int
212 otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
213 		       uint16_t num_rx_descs, unsigned int socket_id,
214 		       const struct rte_eth_rxconf *rx_conf __rte_unused,
215 		       struct rte_mempool *mp)
216 {
217 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
218 	struct rte_pktmbuf_pool_private *mbp_priv;
219 	uint16_t buf_size;
220 
221 	if (q_no >= otx_epvf->max_rx_queues) {
222 		otx_ep_err("Invalid rx queue number %u\n", q_no);
223 		return -EINVAL;
224 	}
225 
226 	if (num_rx_descs & (num_rx_descs - 1)) {
227 		otx_ep_err("Invalid rx desc number should be pow 2  %u\n",
228 			   num_rx_descs);
229 		return -EINVAL;
230 	}
231 	if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
232 		otx_ep_err("Invalid rx desc number should at least be greater than 8xwmark  %u\n",
233 			   num_rx_descs);
234 		return -EINVAL;
235 	}
236 
237 	otx_ep_dbg("setting up rx queue %u\n", q_no);
238 
239 	mbp_priv = rte_mempool_get_priv(mp);
240 	buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
241 
242 	if (otx_ep_setup_oqs(otx_epvf, q_no, num_rx_descs, buf_size, mp,
243 			     socket_id)) {
244 		otx_ep_err("droq allocation failed\n");
245 		return -1;
246 	}
247 
248 	eth_dev->data->rx_queues[q_no] = otx_epvf->droq[q_no];
249 
250 	return 0;
251 }
252 
253 /**
254  * Release the receive queue/ringbuffer. Called by
255  * the upper layers.
256  *
257  * @param dev
258  *   Pointer to Ethernet device structure.
259  * @param q_no
260  *   Receive queue index.
261  *
262  * @return
263  *    - nothing
264  */
265 static void
266 otx_ep_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
267 {
268 	struct otx_ep_droq *rq = dev->data->rx_queues[q_no];
269 	struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
270 	int q_id = rq->q_no;
271 
272 	if (otx_ep_delete_oqs(otx_epvf, q_id))
273 		otx_ep_err("Failed to delete OQ:%d\n", q_id);
274 }
275 
276 /**
277  * Allocate and initialize SW ring. Initialize associated HW registers.
278  *
279  * @param eth_dev
280  *   Pointer to structure rte_eth_dev
281  *
282  * @param q_no
283  *   Queue number
284  *
285  * @param num_tx_descs
286  *   Number of ringbuffer descriptors
287  *
288  * @param socket_id
289  *   NUMA socket id, used for memory allocations
290  *
291  * @param tx_conf
292  *   Pointer to the structure rte_eth_txconf
293  *
294  * @return
295  *   - On success, return 0
296  *   - On failure, return -errno value
297  */
298 static int
299 otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
300 		       uint16_t num_tx_descs, unsigned int socket_id,
301 		       const struct rte_eth_txconf *tx_conf __rte_unused)
302 {
303 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
304 	int retval;
305 
306 	if (q_no >= otx_epvf->max_tx_queues) {
307 		otx_ep_err("Invalid tx queue number %u\n", q_no);
308 		return -EINVAL;
309 	}
310 	if (num_tx_descs & (num_tx_descs - 1)) {
311 		otx_ep_err("Invalid tx desc number should be pow 2  %u\n",
312 			   num_tx_descs);
313 		return -EINVAL;
314 	}
315 
316 	retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id);
317 
318 	if (retval) {
319 		otx_ep_err("IQ(TxQ) creation failed.\n");
320 		return retval;
321 	}
322 
323 	eth_dev->data->tx_queues[q_no] = otx_epvf->instr_queue[q_no];
324 	otx_ep_dbg("tx queue[%d] setup\n", q_no);
325 	return 0;
326 }
327 
328 /**
329  * Release the transmit queue/ringbuffer. Called by
330  * the upper layers.
331  *
332  * @param dev
333  *    Pointer to Ethernet device structure.
334  * @param q_no
335  *    Transmit queue index.
336  *
337  * @return
338  *    - nothing
339  */
340 static void
341 otx_ep_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
342 {
343 	struct otx_ep_instr_queue *tq = dev->data->tx_queues[q_no];
344 
345 	otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
346 }
347 
348 static int
349 otx_ep_dev_stats_reset(struct rte_eth_dev *dev)
350 {
351 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(dev);
352 	uint32_t i;
353 
354 	for (i = 0; i < otx_epvf->nb_tx_queues; i++)
355 		memset(&otx_epvf->instr_queue[i]->stats, 0,
356 		       sizeof(struct otx_ep_iq_stats));
357 
358 	for (i = 0; i < otx_epvf->nb_rx_queues; i++)
359 		memset(&otx_epvf->droq[i]->stats, 0,
360 		       sizeof(struct otx_ep_droq_stats));
361 
362 	return 0;
363 }
364 
365 static int
366 otx_ep_dev_stats_get(struct rte_eth_dev *eth_dev,
367 				struct rte_eth_stats *stats)
368 {
369 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
370 	struct otx_ep_iq_stats *ostats;
371 	struct otx_ep_droq_stats *istats;
372 	uint32_t i;
373 
374 	memset(stats, 0, sizeof(struct rte_eth_stats));
375 
376 	for (i = 0; i < otx_epvf->nb_tx_queues; i++) {
377 		ostats = &otx_epvf->instr_queue[i]->stats;
378 		stats->q_opackets[i] = ostats->tx_pkts;
379 		stats->q_obytes[i] = ostats->tx_bytes;
380 		stats->opackets += ostats->tx_pkts;
381 		stats->obytes += ostats->tx_bytes;
382 		stats->oerrors += ostats->instr_dropped;
383 	}
384 	for (i = 0; i < otx_epvf->nb_rx_queues; i++) {
385 		istats = &otx_epvf->droq[i]->stats;
386 		stats->q_ipackets[i] = istats->pkts_received;
387 		stats->q_ibytes[i] = istats->bytes_received;
388 		stats->q_errors[i] = istats->rx_err;
389 		stats->ipackets += istats->pkts_received;
390 		stats->ibytes += istats->bytes_received;
391 		stats->imissed += istats->rx_alloc_failure;
392 		stats->ierrors += istats->rx_err;
393 		stats->rx_nombuf += istats->rx_alloc_failure;
394 	}
395 	return 0;
396 }
397 
398 static int
399 otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
400 {
401 	RTE_SET_USED(wait_to_complete);
402 
403 	if (!eth_dev->data->dev_started)
404 		return 0;
405 	struct rte_eth_link link;
406 
407 	memset(&link, 0, sizeof(link));
408 	link.link_status = RTE_ETH_LINK_UP;
409 	link.link_speed  = RTE_ETH_SPEED_NUM_10G;
410 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
411 	return rte_eth_linkstatus_set(eth_dev, &link);
412 }
413 
414 /* Define our ethernet definitions */
415 static const struct eth_dev_ops otx_ep_eth_dev_ops = {
416 	.dev_configure		= otx_ep_dev_configure,
417 	.dev_start		= otx_ep_dev_start,
418 	.dev_stop		= otx_ep_dev_stop,
419 	.rx_queue_setup	        = otx_ep_rx_queue_setup,
420 	.rx_queue_release	= otx_ep_rx_queue_release,
421 	.tx_queue_setup	        = otx_ep_tx_queue_setup,
422 	.tx_queue_release	= otx_ep_tx_queue_release,
423 	.dev_infos_get		= otx_ep_dev_info_get,
424 	.stats_get		= otx_ep_dev_stats_get,
425 	.stats_reset		= otx_ep_dev_stats_reset,
426 	.link_update		= otx_ep_dev_link_update,
427 };
428 
429 static int
430 otx_epdev_exit(struct rte_eth_dev *eth_dev)
431 {
432 	struct otx_ep_device *otx_epvf;
433 	uint32_t num_queues, q;
434 
435 	otx_ep_info("%s:\n", __func__);
436 
437 	otx_epvf = OTX_EP_DEV(eth_dev);
438 
439 	otx_epvf->fn_list.disable_io_queues(otx_epvf);
440 
441 	num_queues = otx_epvf->nb_rx_queues;
442 	for (q = 0; q < num_queues; q++) {
443 		if (otx_ep_delete_oqs(otx_epvf, q)) {
444 			otx_ep_err("Failed to delete OQ:%d\n", q);
445 			return -EINVAL;
446 		}
447 	}
448 	otx_ep_info("Num OQs:%d freed\n", otx_epvf->nb_rx_queues);
449 
450 	num_queues = otx_epvf->nb_tx_queues;
451 	for (q = 0; q < num_queues; q++) {
452 		if (otx_ep_delete_iqs(otx_epvf, q)) {
453 			otx_ep_err("Failed to delete IQ:%d\n", q);
454 			return -EINVAL;
455 		}
456 	}
457 	otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
458 
459 	return 0;
460 }
461 
462 static int
463 otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
464 {
465 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
466 		return 0;
467 	otx_epdev_exit(eth_dev);
468 
469 	eth_dev->dev_ops = NULL;
470 	eth_dev->rx_pkt_burst = NULL;
471 	eth_dev->tx_pkt_burst = NULL;
472 
473 	return 0;
474 }
475 
476 static int
477 otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
478 {
479 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
480 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
481 	struct rte_ether_addr vf_mac_addr;
482 
483 	/* Single process support */
484 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
485 		return 0;
486 
487 	otx_epvf->eth_dev = eth_dev;
488 	otx_epvf->port_id = eth_dev->data->port_id;
489 	eth_dev->dev_ops = &otx_ep_eth_dev_ops;
490 	eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
491 	if (eth_dev->data->mac_addrs == NULL) {
492 		otx_ep_err("MAC addresses memory allocation failed\n");
493 		eth_dev->dev_ops = NULL;
494 		return -ENOMEM;
495 	}
496 	rte_eth_random_addr(vf_mac_addr.addr_bytes);
497 	rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
498 	otx_epvf->hw_addr = pdev->mem_resource[0].addr;
499 	otx_epvf->pdev = pdev;
500 
501 	otx_epdev_init(otx_epvf);
502 	if (pdev->id.device_id == PCI_DEVID_CN9K_EP_NET_VF)
503 		otx_epvf->pkind = SDP_OTX2_PKIND;
504 	else
505 		otx_epvf->pkind = SDP_PKIND;
506 	otx_ep_info("using pkind %d\n", otx_epvf->pkind);
507 
508 	return 0;
509 }
510 
511 static int
512 otx_ep_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
513 		      struct rte_pci_device *pci_dev)
514 {
515 	return rte_eth_dev_pci_generic_probe(pci_dev,
516 					     sizeof(struct otx_ep_device),
517 					     otx_ep_eth_dev_init);
518 }
519 
520 static int
521 otx_ep_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
522 {
523 	return rte_eth_dev_pci_generic_remove(pci_dev,
524 					      otx_ep_eth_dev_uninit);
525 }
526 
527 /* Set of PCI devices this driver supports */
528 static const struct rte_pci_id pci_id_otx_ep_map[] = {
529 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
530 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_EP_NET_VF) },
531 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
532 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_EP_NET_VF) },
533 	{ .vendor_id = 0, /* sentinel */ }
534 };
535 
536 static struct rte_pci_driver rte_otx_ep_pmd = {
537 	.id_table	= pci_id_otx_ep_map,
538 	.drv_flags      = RTE_PCI_DRV_NEED_MAPPING,
539 	.probe		= otx_ep_eth_dev_pci_probe,
540 	.remove		= otx_ep_eth_dev_pci_remove,
541 };
542 
543 RTE_PMD_REGISTER_PCI(net_otx_ep, rte_otx_ep_pmd);
544 RTE_PMD_REGISTER_PCI_TABLE(net_otx_ep, pci_id_otx_ep_map);
545 RTE_PMD_REGISTER_KMOD_DEP(net_otx_ep, "* igb_uio | vfio-pci");
546 RTE_LOG_REGISTER_DEFAULT(otx_net_ep_logtype, NOTICE);
547