xref: /dpdk/drivers/net/ring/rte_eth_ring.c (revision ceb1ccd5d50c1a89ba8bdd97cc199e7f07422b98)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "rte_eth_ring.h"
35 #include <rte_mbuf.h>
36 #include <rte_ethdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_memzone.h>
40 #include <rte_string_fns.h>
41 #include <rte_dev.h>
42 #include <rte_kvargs.h>
43 #include <rte_errno.h>
44 
45 #define ETH_RING_NUMA_NODE_ACTION_ARG	"nodeaction"
46 #define ETH_RING_ACTION_CREATE		"CREATE"
47 #define ETH_RING_ACTION_ATTACH		"ATTACH"
48 
49 static const char *valid_arguments[] = {
50 	ETH_RING_NUMA_NODE_ACTION_ARG,
51 	NULL
52 };
53 
54 enum dev_action {
55 	DEV_CREATE,
56 	DEV_ATTACH
57 };
58 
59 struct ring_queue {
60 	struct rte_ring *rng;
61 	rte_atomic64_t rx_pkts;
62 	rte_atomic64_t tx_pkts;
63 	rte_atomic64_t err_pkts;
64 };
65 
66 struct pmd_internals {
67 	unsigned max_rx_queues;
68 	unsigned max_tx_queues;
69 
70 	struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
71 	struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
72 
73 	struct ether_addr address;
74 	enum dev_action action;
75 };
76 
77 
78 static const char *drivername = "Rings PMD";
79 static struct rte_eth_link pmd_link = {
80 		.link_speed = 10000,
81 		.link_duplex = ETH_LINK_FULL_DUPLEX,
82 		.link_status = 0
83 };
84 
85 static uint16_t
86 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
87 {
88 	void **ptrs = (void *)&bufs[0];
89 	struct ring_queue *r = q;
90 	const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
91 			ptrs, nb_bufs);
92 	if (r->rng->flags & RING_F_SC_DEQ)
93 		r->rx_pkts.cnt += nb_rx;
94 	else
95 		rte_atomic64_add(&(r->rx_pkts), nb_rx);
96 	return nb_rx;
97 }
98 
99 static uint16_t
100 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
101 {
102 	void **ptrs = (void *)&bufs[0];
103 	struct ring_queue *r = q;
104 	const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
105 			ptrs, nb_bufs);
106 	if (r->rng->flags & RING_F_SP_ENQ) {
107 		r->tx_pkts.cnt += nb_tx;
108 		r->err_pkts.cnt += nb_bufs - nb_tx;
109 	} else {
110 		rte_atomic64_add(&(r->tx_pkts), nb_tx);
111 		rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
112 	}
113 	return nb_tx;
114 }
115 
116 static int
117 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
118 
119 static int
120 eth_dev_start(struct rte_eth_dev *dev)
121 {
122 	dev->data->dev_link.link_status = 1;
123 	return 0;
124 }
125 
126 static void
127 eth_dev_stop(struct rte_eth_dev *dev)
128 {
129 	dev->data->dev_link.link_status = 0;
130 }
131 
132 static int
133 eth_dev_set_link_down(struct rte_eth_dev *dev)
134 {
135 	dev->data->dev_link.link_status = 0;
136 	return 0;
137 }
138 
139 static int
140 eth_dev_set_link_up(struct rte_eth_dev *dev)
141 {
142 	dev->data->dev_link.link_status = 1;
143 	return 0;
144 }
145 
146 static int
147 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
148 				    uint16_t nb_rx_desc __rte_unused,
149 				    unsigned int socket_id __rte_unused,
150 				    const struct rte_eth_rxconf *rx_conf __rte_unused,
151 				    struct rte_mempool *mb_pool __rte_unused)
152 {
153 	struct pmd_internals *internals = dev->data->dev_private;
154 	dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
155 	return 0;
156 }
157 
158 static int
159 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
160 				    uint16_t nb_tx_desc __rte_unused,
161 				    unsigned int socket_id __rte_unused,
162 				    const struct rte_eth_txconf *tx_conf __rte_unused)
163 {
164 	struct pmd_internals *internals = dev->data->dev_private;
165 	dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
166 	return 0;
167 }
168 
169 
170 static void
171 eth_dev_info(struct rte_eth_dev *dev,
172 		struct rte_eth_dev_info *dev_info)
173 {
174 	struct pmd_internals *internals = dev->data->dev_private;
175 	dev_info->driver_name = drivername;
176 	dev_info->max_mac_addrs = 1;
177 	dev_info->max_rx_pktlen = (uint32_t)-1;
178 	dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
179 	dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
180 	dev_info->min_rx_bufsize = 0;
181 	dev_info->pci_dev = NULL;
182 }
183 
184 static void
185 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
186 {
187 	unsigned i;
188 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
189 	const struct pmd_internals *internal = dev->data->dev_private;
190 
191 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
192 			i < dev->data->nb_rx_queues; i++) {
193 		stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
194 		rx_total += stats->q_ipackets[i];
195 	}
196 
197 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
198 			i < dev->data->nb_tx_queues; i++) {
199 		stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
200 		stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
201 		tx_total += stats->q_opackets[i];
202 		tx_err_total += stats->q_errors[i];
203 	}
204 
205 	stats->ipackets = rx_total;
206 	stats->opackets = tx_total;
207 	stats->oerrors = tx_err_total;
208 }
209 
210 static void
211 eth_stats_reset(struct rte_eth_dev *dev)
212 {
213 	unsigned i;
214 	struct pmd_internals *internal = dev->data->dev_private;
215 	for (i = 0; i < dev->data->nb_rx_queues; i++)
216 		internal->rx_ring_queues[i].rx_pkts.cnt = 0;
217 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
218 		internal->tx_ring_queues[i].tx_pkts.cnt = 0;
219 		internal->tx_ring_queues[i].err_pkts.cnt = 0;
220 	}
221 }
222 
223 static void
224 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
225 	uint32_t index __rte_unused)
226 {
227 }
228 
229 static void
230 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
231 	struct ether_addr *mac_addr __rte_unused,
232 	uint32_t index __rte_unused,
233 	uint32_t vmdq __rte_unused)
234 {
235 }
236 
237 static void
238 eth_queue_release(void *q __rte_unused) { ; }
239 static int
240 eth_link_update(struct rte_eth_dev *dev __rte_unused,
241 		int wait_to_complete __rte_unused) { return 0; }
242 
243 static const struct eth_dev_ops ops = {
244 	.dev_start = eth_dev_start,
245 	.dev_stop = eth_dev_stop,
246 	.dev_set_link_up = eth_dev_set_link_up,
247 	.dev_set_link_down = eth_dev_set_link_down,
248 	.dev_configure = eth_dev_configure,
249 	.dev_infos_get = eth_dev_info,
250 	.rx_queue_setup = eth_rx_queue_setup,
251 	.tx_queue_setup = eth_tx_queue_setup,
252 	.rx_queue_release = eth_queue_release,
253 	.tx_queue_release = eth_queue_release,
254 	.link_update = eth_link_update,
255 	.stats_get = eth_stats_get,
256 	.stats_reset = eth_stats_reset,
257 	.mac_addr_remove = eth_mac_addr_remove,
258 	.mac_addr_add = eth_mac_addr_add,
259 };
260 
261 static int
262 do_eth_dev_ring_create(const char *name,
263 		struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
264 		struct rte_ring *const tx_queues[], const unsigned nb_tx_queues,
265 		const unsigned numa_node, enum dev_action action)
266 {
267 	struct rte_eth_dev_data *data = NULL;
268 	struct pmd_internals *internals = NULL;
269 	struct rte_eth_dev *eth_dev = NULL;
270 	unsigned i;
271 
272 	RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
273 			numa_node);
274 
275 	/* now do all data allocation - for eth_dev structure, dummy pci driver
276 	 * and internal (private) data
277 	 */
278 	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
279 	if (data == NULL) {
280 		rte_errno = ENOMEM;
281 		goto error;
282 	}
283 
284 	data->rx_queues = rte_zmalloc_socket(name,
285 			sizeof(void *) * nb_rx_queues, 0, numa_node);
286 	if (data->rx_queues == NULL) {
287 		rte_errno = ENOMEM;
288 		goto error;
289 	}
290 
291 	data->tx_queues = rte_zmalloc_socket(name,
292 			sizeof(void *) * nb_tx_queues, 0, numa_node);
293 	if (data->tx_queues == NULL) {
294 		rte_errno = ENOMEM;
295 		goto error;
296 	}
297 
298 	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
299 	if (internals == NULL) {
300 		rte_errno = ENOMEM;
301 		goto error;
302 	}
303 
304 	/* reserve an ethdev entry */
305 	eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
306 	if (eth_dev == NULL) {
307 		rte_errno = ENOSPC;
308 		goto error;
309 	}
310 
311 	/* now put it all together
312 	 * - store queue data in internals,
313 	 * - store numa_node info in eth_dev_data
314 	 * - point eth_dev_data to internals
315 	 * - and point eth_dev structure to new eth_dev_data structure
316 	 */
317 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
318 	 * so the rings are local per-process */
319 
320 	internals->action = action;
321 	internals->max_rx_queues = nb_rx_queues;
322 	internals->max_tx_queues = nb_tx_queues;
323 	for (i = 0; i < nb_rx_queues; i++) {
324 		internals->rx_ring_queues[i].rng = rx_queues[i];
325 		data->rx_queues[i] = &internals->rx_ring_queues[i];
326 	}
327 	for (i = 0; i < nb_tx_queues; i++) {
328 		internals->tx_ring_queues[i].rng = tx_queues[i];
329 		data->tx_queues[i] = &internals->tx_ring_queues[i];
330 	}
331 
332 	data->dev_private = internals;
333 	data->port_id = eth_dev->data->port_id;
334 	memmove(data->name, eth_dev->data->name, sizeof(data->name));
335 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
336 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
337 	data->dev_link = pmd_link;
338 	data->mac_addrs = &internals->address;
339 
340 	eth_dev->data = data;
341 	eth_dev->driver = NULL;
342 	eth_dev->dev_ops = &ops;
343 	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
344 	data->kdrv = RTE_KDRV_NONE;
345 	data->drv_name = drivername;
346 	data->numa_node = numa_node;
347 
348 	TAILQ_INIT(&(eth_dev->link_intr_cbs));
349 
350 	/* finally assign rx and tx ops */
351 	eth_dev->rx_pkt_burst = eth_ring_rx;
352 	eth_dev->tx_pkt_burst = eth_ring_tx;
353 
354 	return data->port_id;
355 
356 error:
357 	if (data) {
358 		rte_free(data->rx_queues);
359 		rte_free(data->tx_queues);
360 	}
361 	rte_free(data);
362 	rte_free(internals);
363 
364 	return -1;
365 }
366 
367 int
368 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
369 		const unsigned nb_rx_queues,
370 		struct rte_ring *const tx_queues[],
371 		const unsigned nb_tx_queues,
372 		const unsigned numa_node)
373 {
374 	/* do some parameter checking */
375 	if (rx_queues == NULL && nb_rx_queues > 0) {
376 		rte_errno = EINVAL;
377 		return -1;
378 	}
379 	if (tx_queues == NULL && nb_tx_queues > 0) {
380 		rte_errno = EINVAL;
381 		return -1;
382 	}
383 	if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
384 		rte_errno = EINVAL;
385 		return -1;
386 	}
387 
388 	return do_eth_dev_ring_create(name, rx_queues, nb_rx_queues,
389 			tx_queues, nb_tx_queues, numa_node, DEV_ATTACH);
390 }
391 
392 int
393 rte_eth_from_ring(struct rte_ring *r)
394 {
395 	return rte_eth_from_rings(r->name, &r, 1, &r, 1,
396 			r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
397 }
398 
399 static int
400 eth_dev_ring_create(const char *name, const unsigned numa_node,
401 		enum dev_action action)
402 {
403 	/* rx and tx are so-called from point of view of first port.
404 	 * They are inverted from the point of view of second port
405 	 */
406 	struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
407 	unsigned i;
408 	char rng_name[RTE_RING_NAMESIZE];
409 	unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
410 			RTE_PMD_RING_MAX_TX_RINGS);
411 
412 	for (i = 0; i < num_rings; i++) {
413 		snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
414 		rxtx[i] = (action == DEV_CREATE) ?
415 				rte_ring_create(rng_name, 1024, numa_node,
416 						RING_F_SP_ENQ|RING_F_SC_DEQ) :
417 				rte_ring_lookup(rng_name);
418 		if (rxtx[i] == NULL)
419 			return -1;
420 	}
421 
422 	if (do_eth_dev_ring_create(name, rxtx, num_rings, rxtx, num_rings,
423 		numa_node, action) < 0)
424 		return -1;
425 
426 	return 0;
427 }
428 
429 struct node_action_pair {
430 	char name[PATH_MAX];
431 	unsigned node;
432 	enum dev_action action;
433 };
434 
435 struct node_action_list {
436 	unsigned total;
437 	unsigned count;
438 	struct node_action_pair *list;
439 };
440 
441 static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
442 {
443 	struct node_action_list *info = data;
444 	int ret;
445 	char *name;
446 	char *action;
447 	char *node;
448 	char *end;
449 
450 	name = strdup(value);
451 
452 	ret = -EINVAL;
453 
454 	if (!name) {
455 		RTE_LOG(WARNING, PMD, "command line paramter is empty for ring pmd!\n");
456 		goto out;
457 	}
458 
459 	node = strchr(name, ':');
460 	if (!node) {
461 		RTE_LOG(WARNING, PMD, "could not parse node value from %s", name);
462 		goto out;
463 	}
464 
465 	*node = '\0';
466 	node++;
467 
468 	action = strchr(node, ':');
469 	if (!action) {
470 		RTE_LOG(WARNING, PMD, "could not action value from %s", node);
471 		goto out;
472 	}
473 
474 	*action = '\0';
475 	action++;
476 
477 	/*
478 	 * Need to do some sanity checking here
479 	 */
480 
481 	if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
482 		info->list[info->count].action = DEV_ATTACH;
483 	else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
484 		info->list[info->count].action = DEV_CREATE;
485 	else
486 		goto out;
487 
488 	errno = 0;
489 	info->list[info->count].node = strtol(node, &end, 10);
490 
491 	if ((errno != 0) || (*end != '\0')) {
492 		RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
493 		goto out;
494 	}
495 
496 	snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
497 
498 	info->count++;
499 
500 	ret = 0;
501 out:
502 	free(name);
503 	return ret;
504 }
505 
506 static int
507 rte_pmd_ring_devinit(const char *name, const char *params)
508 {
509 	struct rte_kvargs *kvlist = NULL;
510 	int ret = 0;
511 	struct node_action_list *info = NULL;
512 
513 	RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
514 
515 	if (params == NULL || params[0] == '\0') {
516 		ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
517 		if (ret == -1) {
518 			RTE_LOG(INFO, PMD,
519 				"Attach to pmd_ring for %s\n", name);
520 			ret = eth_dev_ring_create(name, rte_socket_id(),
521 						  DEV_ATTACH);
522 		}
523 	}
524 	else {
525 		kvlist = rte_kvargs_parse(params, valid_arguments);
526 
527 		if (!kvlist) {
528 			RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
529 					" rings-backed ethernet device\n");
530 			ret = eth_dev_ring_create(name, rte_socket_id(),
531 						  DEV_CREATE);
532 			if (ret == -1) {
533 				RTE_LOG(INFO, PMD,
534 					"Attach to pmd_ring for %s\n",
535 					name);
536 				ret = eth_dev_ring_create(name, rte_socket_id(),
537 							  DEV_ATTACH);
538 			}
539 			return ret;
540 		} else {
541 			ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
542 			info = rte_zmalloc("struct node_action_list",
543 					   sizeof(struct node_action_list) +
544 					   (sizeof(struct node_action_pair) * ret),
545 					   0);
546 			if (!info)
547 				goto out_free;
548 
549 			info->total = ret;
550 			info->list = (struct node_action_pair*)(info + 1);
551 
552 			ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
553 						 parse_kvlist, info);
554 
555 			if (ret < 0)
556 				goto out_free;
557 
558 			for (info->count = 0; info->count < info->total; info->count++) {
559 				ret = eth_dev_ring_create(name,
560 							  info->list[info->count].node,
561 							  info->list[info->count].action);
562 				if ((ret == -1) &&
563 				    (info->list[info->count].action == DEV_CREATE)) {
564 					RTE_LOG(INFO, PMD,
565 						"Attach to pmd_ring for %s\n",
566 						name);
567 					ret = eth_dev_ring_create(name,
568 							info->list[info->count].node,
569 							DEV_ATTACH);
570 				}
571 			}
572 		}
573 	}
574 
575 out_free:
576 	rte_kvargs_free(kvlist);
577 	rte_free(info);
578 	return ret;
579 }
580 
581 static int
582 rte_pmd_ring_devuninit(const char *name)
583 {
584 	struct rte_eth_dev *eth_dev = NULL;
585 	struct pmd_internals *internals = NULL;
586 	struct ring_queue *r = NULL;
587 	uint16_t i;
588 
589 	RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name);
590 
591 	if (name == NULL)
592 		return -EINVAL;
593 
594 	/* find an ethdev entry */
595 	eth_dev = rte_eth_dev_allocated(name);
596 	if (eth_dev == NULL)
597 		return -ENODEV;
598 
599 	eth_dev_stop(eth_dev);
600 
601 	if (eth_dev->data) {
602 		internals = eth_dev->data->dev_private;
603 		if (internals->action == DEV_CREATE) {
604 			/*
605 			 * it is only necessary to delete the rings in rx_queues because
606 			 * they are the same used in tx_queues
607 			 */
608 			for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
609 				r = eth_dev->data->rx_queues[i];
610 				rte_ring_free(r->rng);
611 			}
612 		}
613 
614 		rte_free(eth_dev->data->rx_queues);
615 		rte_free(eth_dev->data->tx_queues);
616 		rte_free(eth_dev->data->dev_private);
617 	}
618 
619 	rte_free(eth_dev->data);
620 
621 	rte_eth_dev_release_port(eth_dev);
622 	return 0;
623 }
624 
625 static struct rte_driver pmd_ring_drv = {
626 	.name = "eth_ring",
627 	.type = PMD_VDEV,
628 	.init = rte_pmd_ring_devinit,
629 	.uninit = rte_pmd_ring_devuninit,
630 };
631 
632 PMD_REGISTER_DRIVER(pmd_ring_drv);
633