xref: /dpdk/drivers/net/octeon_ep/otx_ep_ethdev.c (revision a57bc0ad37c8bc1fa10f384e6122825702aaee9c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <ethdev_pci.h>
6 
7 #include "otx_ep_common.h"
8 #include "otx_ep_vf.h"
9 #include "otx2_ep_vf.h"
10 #include "otx_ep_rxtx.h"
11 
12 #define OTX_EP_DEV(_eth_dev) \
13 	((struct otx_ep_device *)(_eth_dev)->data->dev_private)
14 
15 static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
16 	.nb_max		= OTX_EP_MAX_OQ_DESCRIPTORS,
17 	.nb_min		= OTX_EP_MIN_OQ_DESCRIPTORS,
18 	.nb_align	= OTX_EP_RXD_ALIGN,
19 };
20 
21 static const struct rte_eth_desc_lim otx_ep_tx_desc_lim = {
22 	.nb_max		= OTX_EP_MAX_IQ_DESCRIPTORS,
23 	.nb_min		= OTX_EP_MIN_IQ_DESCRIPTORS,
24 	.nb_align	= OTX_EP_TXD_ALIGN,
25 };
26 
27 static int
28 otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
29 		    struct rte_eth_dev_info *devinfo)
30 {
31 	struct otx_ep_device *otx_epvf;
32 
33 	otx_epvf = OTX_EP_DEV(eth_dev);
34 
35 	devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
36 	devinfo->max_rx_queues = otx_epvf->max_rx_queues;
37 	devinfo->max_tx_queues = otx_epvf->max_tx_queues;
38 
39 	devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
40 	devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
41 	devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
42 	devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
43 
44 	devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
45 
46 	devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
47 	devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
48 
49 	return 0;
50 }
51 
52 static int
53 otx_ep_dev_start(struct rte_eth_dev *eth_dev)
54 {
55 	struct otx_ep_device *otx_epvf;
56 	unsigned int q;
57 	int ret;
58 
59 	otx_epvf = (struct otx_ep_device *)OTX_EP_DEV(eth_dev);
60 	/* Enable IQ/OQ for this device */
61 	ret = otx_epvf->fn_list.enable_io_queues(otx_epvf);
62 	if (ret) {
63 		otx_ep_err("IOQ enable failed\n");
64 		return ret;
65 	}
66 
67 	for (q = 0; q < otx_epvf->nb_rx_queues; q++) {
68 		rte_write32(otx_epvf->droq[q]->nb_desc,
69 			    otx_epvf->droq[q]->pkts_credit_reg);
70 
71 		rte_wmb();
72 		otx_ep_info("OQ[%d] dbells [%d]\n", q,
73 		rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
74 	}
75 
76 	otx_ep_info("dev started\n");
77 
78 	return 0;
79 }
80 
81 /* Stop device and disable input/output functions */
82 static int
83 otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
84 {
85 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
86 
87 	otx_epvf->fn_list.disable_io_queues(otx_epvf);
88 
89 	return 0;
90 }
91 
92 static int
93 otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
94 {
95 	struct rte_pci_device *pdev = otx_epvf->pdev;
96 	uint32_t dev_id = pdev->id.device_id;
97 	int ret = 0;
98 
99 	switch (dev_id) {
100 	case PCI_DEVID_OCTEONTX_EP_VF:
101 		otx_epvf->chip_id = dev_id;
102 		ret = otx_ep_vf_setup_device(otx_epvf);
103 		otx_epvf->fn_list.disable_io_queues(otx_epvf);
104 		break;
105 	case PCI_DEVID_CN9K_EP_NET_VF:
106 	case PCI_DEVID_CN98XX_EP_NET_VF:
107 		otx_epvf->chip_id = dev_id;
108 		ret = otx2_ep_vf_setup_device(otx_epvf);
109 		otx_epvf->fn_list.disable_io_queues(otx_epvf);
110 		break;
111 	default:
112 		otx_ep_err("Unsupported device\n");
113 		ret = -EINVAL;
114 	}
115 
116 	if (!ret)
117 		otx_ep_info("OTX_EP dev_id[%d]\n", dev_id);
118 
119 	return ret;
120 }
121 
122 /* OTX_EP VF device initialization */
123 static int
124 otx_epdev_init(struct otx_ep_device *otx_epvf)
125 {
126 	uint32_t ethdev_queues;
127 	int ret = 0;
128 
129 	ret = otx_ep_chip_specific_setup(otx_epvf);
130 	if (ret) {
131 		otx_ep_err("Chip specific setup failed\n");
132 		goto setup_fail;
133 	}
134 
135 	otx_epvf->fn_list.setup_device_regs(otx_epvf);
136 
137 	otx_epvf->eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
138 	if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF)
139 		otx_epvf->eth_dev->tx_pkt_burst = &otx_ep_xmit_pkts;
140 	else if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
141 		 otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF)
142 		otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
143 	ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
144 	otx_epvf->max_rx_queues = ethdev_queues;
145 	otx_epvf->max_tx_queues = ethdev_queues;
146 
147 	otx_ep_info("OTX_EP Device is Ready\n");
148 
149 setup_fail:
150 	return ret;
151 }
152 
153 static int
154 otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
155 {
156 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
157 	struct rte_eth_dev_data *data = eth_dev->data;
158 	struct rte_eth_rxmode *rxmode;
159 	struct rte_eth_txmode *txmode;
160 	struct rte_eth_conf *conf;
161 
162 	conf = &data->dev_conf;
163 	rxmode = &conf->rxmode;
164 	txmode = &conf->txmode;
165 	if (eth_dev->data->nb_rx_queues > otx_epvf->max_rx_queues ||
166 	    eth_dev->data->nb_tx_queues > otx_epvf->max_tx_queues) {
167 		otx_ep_err("invalid num queues\n");
168 		return -EINVAL;
169 	}
170 	otx_ep_info("OTX_EP Device is configured with num_txq %d num_rxq %d\n",
171 		    eth_dev->data->nb_rx_queues, eth_dev->data->nb_tx_queues);
172 
173 	otx_epvf->rx_offloads = rxmode->offloads;
174 	otx_epvf->tx_offloads = txmode->offloads;
175 
176 	return 0;
177 }
178 
179 /**
180  * Setup our receive queue/ringbuffer. This is the
181  * queue the Octeon uses to send us packets and
182  * responses. We are given a memory pool for our
183  * packet buffers that are used to populate the receive
184  * queue.
185  *
186  * @param eth_dev
187  *    Pointer to the structure rte_eth_dev
188  * @param q_no
189  *    Queue number
190  * @param num_rx_descs
191  *    Number of entries in the queue
192  * @param socket_id
193  *    Where to allocate memory
194  * @param rx_conf
195  *    Pointer to the struction rte_eth_rxconf
196  * @param mp
197  *    Pointer to the packet pool
198  *
199  * @return
200  *    - On success, return 0
201  *    - On failure, return -1
202  */
203 static int
204 otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
205 		       uint16_t num_rx_descs, unsigned int socket_id,
206 		       const struct rte_eth_rxconf *rx_conf __rte_unused,
207 		       struct rte_mempool *mp)
208 {
209 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
210 	struct rte_pktmbuf_pool_private *mbp_priv;
211 	uint16_t buf_size;
212 
213 	if (q_no >= otx_epvf->max_rx_queues) {
214 		otx_ep_err("Invalid rx queue number %u\n", q_no);
215 		return -EINVAL;
216 	}
217 
218 	if (num_rx_descs & (num_rx_descs - 1)) {
219 		otx_ep_err("Invalid rx desc number should be pow 2  %u\n",
220 			   num_rx_descs);
221 		return -EINVAL;
222 	}
223 	if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
224 		otx_ep_err("Invalid rx desc number should at least be greater than 8xwmark  %u\n",
225 			   num_rx_descs);
226 		return -EINVAL;
227 	}
228 
229 	otx_ep_dbg("setting up rx queue %u\n", q_no);
230 
231 	mbp_priv = rte_mempool_get_priv(mp);
232 	buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
233 
234 	if (otx_ep_setup_oqs(otx_epvf, q_no, num_rx_descs, buf_size, mp,
235 			     socket_id)) {
236 		otx_ep_err("droq allocation failed\n");
237 		return -1;
238 	}
239 
240 	eth_dev->data->rx_queues[q_no] = otx_epvf->droq[q_no];
241 
242 	return 0;
243 }
244 
245 /**
246  * Release the receive queue/ringbuffer. Called by
247  * the upper layers.
248  *
249  * @param dev
250  *   Pointer to Ethernet device structure.
251  * @param q_no
252  *   Receive queue index.
253  *
254  * @return
255  *    - nothing
256  */
257 static void
258 otx_ep_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
259 {
260 	struct otx_ep_droq *rq = dev->data->rx_queues[q_no];
261 	struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
262 	int q_id = rq->q_no;
263 
264 	if (otx_ep_delete_oqs(otx_epvf, q_id))
265 		otx_ep_err("Failed to delete OQ:%d\n", q_id);
266 }
267 
268 /**
269  * Allocate and initialize SW ring. Initialize associated HW registers.
270  *
271  * @param eth_dev
272  *   Pointer to structure rte_eth_dev
273  *
274  * @param q_no
275  *   Queue number
276  *
277  * @param num_tx_descs
278  *   Number of ringbuffer descriptors
279  *
280  * @param socket_id
281  *   NUMA socket id, used for memory allocations
282  *
283  * @param tx_conf
284  *   Pointer to the structure rte_eth_txconf
285  *
286  * @return
287  *   - On success, return 0
288  *   - On failure, return -errno value
289  */
290 static int
291 otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
292 		       uint16_t num_tx_descs, unsigned int socket_id,
293 		       const struct rte_eth_txconf *tx_conf __rte_unused)
294 {
295 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
296 	int retval;
297 
298 	if (q_no >= otx_epvf->max_tx_queues) {
299 		otx_ep_err("Invalid tx queue number %u\n", q_no);
300 		return -EINVAL;
301 	}
302 	if (num_tx_descs & (num_tx_descs - 1)) {
303 		otx_ep_err("Invalid tx desc number should be pow 2  %u\n",
304 			   num_tx_descs);
305 		return -EINVAL;
306 	}
307 
308 	retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id);
309 
310 	if (retval) {
311 		otx_ep_err("IQ(TxQ) creation failed.\n");
312 		return retval;
313 	}
314 
315 	eth_dev->data->tx_queues[q_no] = otx_epvf->instr_queue[q_no];
316 	otx_ep_dbg("tx queue[%d] setup\n", q_no);
317 	return 0;
318 }
319 
320 /**
321  * Release the transmit queue/ringbuffer. Called by
322  * the upper layers.
323  *
324  * @param dev
325  *    Pointer to Ethernet device structure.
326  * @param q_no
327  *    Transmit queue index.
328  *
329  * @return
330  *    - nothing
331  */
332 static void
333 otx_ep_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
334 {
335 	struct otx_ep_instr_queue *tq = dev->data->tx_queues[q_no];
336 
337 	otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
338 }
339 
340 static int
341 otx_ep_dev_stats_reset(struct rte_eth_dev *dev)
342 {
343 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(dev);
344 	uint32_t i;
345 
346 	for (i = 0; i < otx_epvf->nb_tx_queues; i++)
347 		memset(&otx_epvf->instr_queue[i]->stats, 0,
348 		       sizeof(struct otx_ep_iq_stats));
349 
350 	for (i = 0; i < otx_epvf->nb_rx_queues; i++)
351 		memset(&otx_epvf->droq[i]->stats, 0,
352 		       sizeof(struct otx_ep_droq_stats));
353 
354 	return 0;
355 }
356 
357 static int
358 otx_ep_dev_stats_get(struct rte_eth_dev *eth_dev,
359 				struct rte_eth_stats *stats)
360 {
361 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
362 	struct otx_ep_iq_stats *ostats;
363 	struct otx_ep_droq_stats *istats;
364 	uint32_t i;
365 
366 	memset(stats, 0, sizeof(struct rte_eth_stats));
367 
368 	for (i = 0; i < otx_epvf->nb_tx_queues; i++) {
369 		ostats = &otx_epvf->instr_queue[i]->stats;
370 		stats->q_opackets[i] = ostats->tx_pkts;
371 		stats->q_obytes[i] = ostats->tx_bytes;
372 		stats->opackets += ostats->tx_pkts;
373 		stats->obytes += ostats->tx_bytes;
374 		stats->oerrors += ostats->instr_dropped;
375 	}
376 	for (i = 0; i < otx_epvf->nb_rx_queues; i++) {
377 		istats = &otx_epvf->droq[i]->stats;
378 		stats->q_ipackets[i] = istats->pkts_received;
379 		stats->q_ibytes[i] = istats->bytes_received;
380 		stats->q_errors[i] = istats->rx_err;
381 		stats->ipackets += istats->pkts_received;
382 		stats->ibytes += istats->bytes_received;
383 		stats->imissed += istats->rx_alloc_failure;
384 		stats->ierrors += istats->rx_err;
385 		stats->rx_nombuf += istats->rx_alloc_failure;
386 	}
387 	return 0;
388 }
389 
390 /* Define our ethernet definitions */
391 static const struct eth_dev_ops otx_ep_eth_dev_ops = {
392 	.dev_configure		= otx_ep_dev_configure,
393 	.dev_start		= otx_ep_dev_start,
394 	.dev_stop		= otx_ep_dev_stop,
395 	.rx_queue_setup	        = otx_ep_rx_queue_setup,
396 	.rx_queue_release	= otx_ep_rx_queue_release,
397 	.tx_queue_setup	        = otx_ep_tx_queue_setup,
398 	.tx_queue_release	= otx_ep_tx_queue_release,
399 	.dev_infos_get		= otx_ep_dev_info_get,
400 	.stats_get		= otx_ep_dev_stats_get,
401 	.stats_reset		= otx_ep_dev_stats_reset,
402 };
403 
404 static int
405 otx_epdev_exit(struct rte_eth_dev *eth_dev)
406 {
407 	struct otx_ep_device *otx_epvf;
408 	uint32_t num_queues, q;
409 
410 	otx_ep_info("%s:\n", __func__);
411 
412 	otx_epvf = OTX_EP_DEV(eth_dev);
413 
414 	otx_epvf->fn_list.disable_io_queues(otx_epvf);
415 
416 	num_queues = otx_epvf->nb_rx_queues;
417 	for (q = 0; q < num_queues; q++) {
418 		if (otx_ep_delete_oqs(otx_epvf, q)) {
419 			otx_ep_err("Failed to delete OQ:%d\n", q);
420 			return -EINVAL;
421 		}
422 	}
423 	otx_ep_info("Num OQs:%d freed\n", otx_epvf->nb_rx_queues);
424 
425 	num_queues = otx_epvf->nb_tx_queues;
426 	for (q = 0; q < num_queues; q++) {
427 		if (otx_ep_delete_iqs(otx_epvf, q)) {
428 			otx_ep_err("Failed to delete IQ:%d\n", q);
429 			return -EINVAL;
430 		}
431 	}
432 	otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
433 
434 	return 0;
435 }
436 
437 static int
438 otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
439 {
440 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
441 		return 0;
442 	otx_epdev_exit(eth_dev);
443 
444 	eth_dev->dev_ops = NULL;
445 	eth_dev->rx_pkt_burst = NULL;
446 	eth_dev->tx_pkt_burst = NULL;
447 
448 	return 0;
449 }
450 
451 static int
452 otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
453 {
454 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
455 	struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
456 	struct rte_ether_addr vf_mac_addr;
457 
458 	/* Single process support */
459 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
460 		return 0;
461 
462 	otx_epvf->eth_dev = eth_dev;
463 	otx_epvf->port_id = eth_dev->data->port_id;
464 	eth_dev->dev_ops = &otx_ep_eth_dev_ops;
465 	eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
466 	if (eth_dev->data->mac_addrs == NULL) {
467 		otx_ep_err("MAC addresses memory allocation failed\n");
468 		eth_dev->dev_ops = NULL;
469 		return -ENOMEM;
470 	}
471 	rte_eth_random_addr(vf_mac_addr.addr_bytes);
472 	rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
473 	otx_epvf->hw_addr = pdev->mem_resource[0].addr;
474 	otx_epvf->pdev = pdev;
475 
476 	otx_epdev_init(otx_epvf);
477 	if (pdev->id.device_id == PCI_DEVID_CN9K_EP_NET_VF)
478 		otx_epvf->pkind = SDP_OTX2_PKIND;
479 	else
480 		otx_epvf->pkind = SDP_PKIND;
481 	otx_ep_info("using pkind %d\n", otx_epvf->pkind);
482 
483 	return 0;
484 }
485 
486 static int
487 otx_ep_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
488 		      struct rte_pci_device *pci_dev)
489 {
490 	return rte_eth_dev_pci_generic_probe(pci_dev,
491 					     sizeof(struct otx_ep_device),
492 					     otx_ep_eth_dev_init);
493 }
494 
495 static int
496 otx_ep_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
497 {
498 	return rte_eth_dev_pci_generic_remove(pci_dev,
499 					      otx_ep_eth_dev_uninit);
500 }
501 
502 /* Set of PCI devices this driver supports */
503 static const struct rte_pci_id pci_id_otx_ep_map[] = {
504 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
505 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_EP_NET_VF) },
506 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
507 	{ .vendor_id = 0, /* sentinel */ }
508 };
509 
510 static struct rte_pci_driver rte_otx_ep_pmd = {
511 	.id_table	= pci_id_otx_ep_map,
512 	.drv_flags      = RTE_PCI_DRV_NEED_MAPPING,
513 	.probe		= otx_ep_eth_dev_pci_probe,
514 	.remove		= otx_ep_eth_dev_pci_remove,
515 };
516 
517 RTE_PMD_REGISTER_PCI(net_otx_ep, rte_otx_ep_pmd);
518 RTE_PMD_REGISTER_PCI_TABLE(net_otx_ep, pci_id_otx_ep_map);
519 RTE_PMD_REGISTER_KMOD_DEP(net_otx_ep, "* igb_uio | vfio-pci");
520 RTE_LOG_REGISTER_DEFAULT(otx_net_ep_logtype, NOTICE);
521