xref: /dpdk/drivers/net/ring/rte_eth_ring.c (revision f5057be340e44f3edc0fe90fa875eb89a4c49b4f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #include "rte_eth_ring.h"
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_malloc.h>
9 #include <rte_memcpy.h>
10 #include <rte_string_fns.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_errno.h>
14 
15 #define ETH_RING_NUMA_NODE_ACTION_ARG	"nodeaction"
16 #define ETH_RING_ACTION_CREATE		"CREATE"
17 #define ETH_RING_ACTION_ATTACH		"ATTACH"
18 #define ETH_RING_INTERNAL_ARG		"internal"
19 
20 static const char *valid_arguments[] = {
21 	ETH_RING_NUMA_NODE_ACTION_ARG,
22 	ETH_RING_INTERNAL_ARG,
23 	NULL
24 };
25 
26 struct ring_internal_args {
27 	struct rte_ring * const *rx_queues;
28 	const unsigned int nb_rx_queues;
29 	struct rte_ring * const *tx_queues;
30 	const unsigned int nb_tx_queues;
31 	const unsigned int numa_node;
32 	void *addr; /* self addr for sanity check */
33 };
34 
35 enum dev_action {
36 	DEV_CREATE,
37 	DEV_ATTACH
38 };
39 
40 struct ring_queue {
41 	struct rte_ring *rng;
42 	rte_atomic64_t rx_pkts;
43 	rte_atomic64_t tx_pkts;
44 };
45 
46 struct pmd_internals {
47 	unsigned int max_rx_queues;
48 	unsigned int max_tx_queues;
49 
50 	struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
51 	struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
52 
53 	struct rte_ether_addr address;
54 	enum dev_action action;
55 };
56 
57 static struct rte_eth_link pmd_link = {
58 	.link_speed = ETH_SPEED_NUM_10G,
59 	.link_duplex = ETH_LINK_FULL_DUPLEX,
60 	.link_status = ETH_LINK_DOWN,
61 	.link_autoneg = ETH_LINK_FIXED,
62 };
63 
64 RTE_LOG_REGISTER(eth_ring_logtype, pmd.net.ring, NOTICE);
65 
66 #define PMD_LOG(level, fmt, args...) \
67 	rte_log(RTE_LOG_ ## level, eth_ring_logtype, \
68 		"%s(): " fmt "\n", __func__, ##args)
69 
70 static uint16_t
71 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
72 {
73 	void **ptrs = (void *)&bufs[0];
74 	struct ring_queue *r = q;
75 	const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
76 			ptrs, nb_bufs, NULL);
77 	if (r->rng->flags & RING_F_SC_DEQ)
78 		r->rx_pkts.cnt += nb_rx;
79 	else
80 		rte_atomic64_add(&(r->rx_pkts), nb_rx);
81 	return nb_rx;
82 }
83 
84 static uint16_t
85 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
86 {
87 	void **ptrs = (void *)&bufs[0];
88 	struct ring_queue *r = q;
89 	const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
90 			ptrs, nb_bufs, NULL);
91 	if (r->rng->flags & RING_F_SP_ENQ)
92 		r->tx_pkts.cnt += nb_tx;
93 	else
94 		rte_atomic64_add(&(r->tx_pkts), nb_tx);
95 	return nb_tx;
96 }
97 
98 static int
99 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
100 
101 static int
102 eth_dev_start(struct rte_eth_dev *dev)
103 {
104 	dev->data->dev_link.link_status = ETH_LINK_UP;
105 	return 0;
106 }
107 
108 static void
109 eth_dev_stop(struct rte_eth_dev *dev)
110 {
111 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
112 }
113 
114 static int
115 eth_dev_set_link_down(struct rte_eth_dev *dev)
116 {
117 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
118 	return 0;
119 }
120 
121 static int
122 eth_dev_set_link_up(struct rte_eth_dev *dev)
123 {
124 	dev->data->dev_link.link_status = ETH_LINK_UP;
125 	return 0;
126 }
127 
128 static int
129 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
130 				    uint16_t nb_rx_desc __rte_unused,
131 				    unsigned int socket_id __rte_unused,
132 				    const struct rte_eth_rxconf *rx_conf __rte_unused,
133 				    struct rte_mempool *mb_pool __rte_unused)
134 {
135 	struct pmd_internals *internals = dev->data->dev_private;
136 
137 	dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
138 	return 0;
139 }
140 
141 static int
142 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
143 				    uint16_t nb_tx_desc __rte_unused,
144 				    unsigned int socket_id __rte_unused,
145 				    const struct rte_eth_txconf *tx_conf __rte_unused)
146 {
147 	struct pmd_internals *internals = dev->data->dev_private;
148 
149 	dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
150 	return 0;
151 }
152 
153 
154 static int
155 eth_dev_info(struct rte_eth_dev *dev,
156 	     struct rte_eth_dev_info *dev_info)
157 {
158 	struct pmd_internals *internals = dev->data->dev_private;
159 
160 	dev_info->max_mac_addrs = 1;
161 	dev_info->max_rx_pktlen = (uint32_t)-1;
162 	dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
163 	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
164 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
165 	dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
166 	dev_info->min_rx_bufsize = 0;
167 
168 	return 0;
169 }
170 
171 static int
172 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
173 {
174 	unsigned int i;
175 	unsigned long rx_total = 0, tx_total = 0;
176 	const struct pmd_internals *internal = dev->data->dev_private;
177 
178 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
179 			i < dev->data->nb_rx_queues; i++) {
180 		stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
181 		rx_total += stats->q_ipackets[i];
182 	}
183 
184 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
185 			i < dev->data->nb_tx_queues; i++) {
186 		stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
187 		tx_total += stats->q_opackets[i];
188 	}
189 
190 	stats->ipackets = rx_total;
191 	stats->opackets = tx_total;
192 
193 	return 0;
194 }
195 
196 static int
197 eth_stats_reset(struct rte_eth_dev *dev)
198 {
199 	unsigned int i;
200 	struct pmd_internals *internal = dev->data->dev_private;
201 
202 	for (i = 0; i < dev->data->nb_rx_queues; i++)
203 		internal->rx_ring_queues[i].rx_pkts.cnt = 0;
204 	for (i = 0; i < dev->data->nb_tx_queues; i++)
205 		internal->tx_ring_queues[i].tx_pkts.cnt = 0;
206 
207 	return 0;
208 }
209 
210 static void
211 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
212 	uint32_t index __rte_unused)
213 {
214 }
215 
216 static int
217 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
218 	struct rte_ether_addr *mac_addr __rte_unused,
219 	uint32_t index __rte_unused,
220 	uint32_t vmdq __rte_unused)
221 {
222 	return 0;
223 }
224 
225 static void
226 eth_queue_release(void *q __rte_unused) { ; }
227 static int
228 eth_link_update(struct rte_eth_dev *dev __rte_unused,
229 		int wait_to_complete __rte_unused) { return 0; }
230 
231 static int
232 eth_dev_close(struct rte_eth_dev *dev)
233 {
234 	struct pmd_internals *internals = NULL;
235 	struct ring_queue *r = NULL;
236 	uint16_t i;
237 
238 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
239 		return 0;
240 
241 	eth_dev_stop(dev);
242 
243 	internals = dev->data->dev_private;
244 	if (internals->action == DEV_CREATE) {
245 		/*
246 		 * it is only necessary to delete the rings in rx_queues because
247 		 * they are the same used in tx_queues
248 		 */
249 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
250 			r = dev->data->rx_queues[i];
251 			rte_ring_free(r->rng);
252 		}
253 	}
254 
255 	/* mac_addrs must not be freed alone because part of dev_private */
256 	dev->data->mac_addrs = NULL;
257 
258 	return 0;
259 }
260 
261 static const struct eth_dev_ops ops = {
262 	.dev_close = eth_dev_close,
263 	.dev_start = eth_dev_start,
264 	.dev_stop = eth_dev_stop,
265 	.dev_set_link_up = eth_dev_set_link_up,
266 	.dev_set_link_down = eth_dev_set_link_down,
267 	.dev_configure = eth_dev_configure,
268 	.dev_infos_get = eth_dev_info,
269 	.rx_queue_setup = eth_rx_queue_setup,
270 	.tx_queue_setup = eth_tx_queue_setup,
271 	.rx_queue_release = eth_queue_release,
272 	.tx_queue_release = eth_queue_release,
273 	.link_update = eth_link_update,
274 	.stats_get = eth_stats_get,
275 	.stats_reset = eth_stats_reset,
276 	.mac_addr_remove = eth_mac_addr_remove,
277 	.mac_addr_add = eth_mac_addr_add,
278 };
279 
280 static int
281 do_eth_dev_ring_create(const char *name,
282 		struct rte_vdev_device *vdev,
283 		struct rte_ring * const rx_queues[],
284 		const unsigned int nb_rx_queues,
285 		struct rte_ring *const tx_queues[],
286 		const unsigned int nb_tx_queues,
287 		const unsigned int numa_node, enum dev_action action,
288 		struct rte_eth_dev **eth_dev_p)
289 {
290 	struct rte_eth_dev_data *data = NULL;
291 	struct pmd_internals *internals = NULL;
292 	struct rte_eth_dev *eth_dev = NULL;
293 	void **rx_queues_local = NULL;
294 	void **tx_queues_local = NULL;
295 	unsigned int i;
296 
297 	PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u",
298 			numa_node);
299 
300 	rx_queues_local = rte_calloc_socket(name, nb_rx_queues,
301 					    sizeof(void *), 0, numa_node);
302 	if (rx_queues_local == NULL) {
303 		rte_errno = ENOMEM;
304 		goto error;
305 	}
306 
307 	tx_queues_local = rte_calloc_socket(name, nb_tx_queues,
308 					    sizeof(void *), 0, numa_node);
309 	if (tx_queues_local == NULL) {
310 		rte_errno = ENOMEM;
311 		goto error;
312 	}
313 
314 	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
315 	if (internals == NULL) {
316 		rte_errno = ENOMEM;
317 		goto error;
318 	}
319 
320 	/* reserve an ethdev entry */
321 	eth_dev = rte_eth_dev_allocate(name);
322 	if (eth_dev == NULL) {
323 		rte_errno = ENOSPC;
324 		goto error;
325 	}
326 
327 	/* now put it all together
328 	 * - store EAL device in eth_dev,
329 	 * - store queue data in internals,
330 	 * - store numa_node info in eth_dev_data
331 	 * - point eth_dev_data to internals
332 	 * - and point eth_dev structure to new eth_dev_data structure
333 	 */
334 
335 	eth_dev->device = &vdev->device;
336 
337 	data = eth_dev->data;
338 	data->rx_queues = rx_queues_local;
339 	data->tx_queues = tx_queues_local;
340 
341 	internals->action = action;
342 	internals->max_rx_queues = nb_rx_queues;
343 	internals->max_tx_queues = nb_tx_queues;
344 	for (i = 0; i < nb_rx_queues; i++) {
345 		internals->rx_ring_queues[i].rng = rx_queues[i];
346 		data->rx_queues[i] = &internals->rx_ring_queues[i];
347 	}
348 	for (i = 0; i < nb_tx_queues; i++) {
349 		internals->tx_ring_queues[i].rng = tx_queues[i];
350 		data->tx_queues[i] = &internals->tx_ring_queues[i];
351 	}
352 
353 	data->dev_private = internals;
354 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
355 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
356 	data->dev_link = pmd_link;
357 	data->mac_addrs = &internals->address;
358 	data->promiscuous = 1;
359 	data->all_multicast = 1;
360 
361 	eth_dev->dev_ops = &ops;
362 	data->numa_node = numa_node;
363 
364 	/* finally assign rx and tx ops */
365 	eth_dev->rx_pkt_burst = eth_ring_rx;
366 	eth_dev->tx_pkt_burst = eth_ring_tx;
367 
368 	rte_eth_dev_probing_finish(eth_dev);
369 	*eth_dev_p = eth_dev;
370 
371 	return data->port_id;
372 
373 error:
374 	rte_free(rx_queues_local);
375 	rte_free(tx_queues_local);
376 	rte_free(internals);
377 
378 	return -1;
379 }
380 
381 int
382 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
383 		const unsigned int nb_rx_queues,
384 		struct rte_ring *const tx_queues[],
385 		const unsigned int nb_tx_queues,
386 		const unsigned int numa_node)
387 {
388 	struct ring_internal_args args = {
389 		.rx_queues = rx_queues,
390 		.nb_rx_queues = nb_rx_queues,
391 		.tx_queues = tx_queues,
392 		.nb_tx_queues = nb_tx_queues,
393 		.numa_node = numa_node,
394 		.addr = &args,
395 	};
396 	char args_str[32];
397 	char ring_name[RTE_RING_NAMESIZE];
398 	uint16_t port_id = RTE_MAX_ETHPORTS;
399 	int ret;
400 
401 	/* do some parameter checking */
402 	if (rx_queues == NULL && nb_rx_queues > 0) {
403 		rte_errno = EINVAL;
404 		return -1;
405 	}
406 	if (tx_queues == NULL && nb_tx_queues > 0) {
407 		rte_errno = EINVAL;
408 		return -1;
409 	}
410 	if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
411 		rte_errno = EINVAL;
412 		return -1;
413 	}
414 
415 	snprintf(args_str, sizeof(args_str), "%s=%p",
416 		 ETH_RING_INTERNAL_ARG, &args);
417 
418 	ret = snprintf(ring_name, sizeof(ring_name), "net_ring_%s", name);
419 	if (ret >= (int)sizeof(ring_name)) {
420 		rte_errno = ENAMETOOLONG;
421 		return -1;
422 	}
423 
424 	ret = rte_vdev_init(ring_name, args_str);
425 	if (ret) {
426 		rte_errno = EINVAL;
427 		return -1;
428 	}
429 
430 	ret = rte_eth_dev_get_port_by_name(ring_name, &port_id);
431 	if (ret) {
432 		rte_errno = ENODEV;
433 		return -1;
434 	}
435 
436 	return port_id;
437 }
438 
439 int
440 rte_eth_from_ring(struct rte_ring *r)
441 {
442 	return rte_eth_from_rings(r->name, &r, 1, &r, 1,
443 			r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
444 }
445 
446 static int
447 eth_dev_ring_create(const char *name,
448 		struct rte_vdev_device *vdev,
449 		const unsigned int numa_node,
450 		enum dev_action action, struct rte_eth_dev **eth_dev)
451 {
452 	/* rx and tx are so-called from point of view of first port.
453 	 * They are inverted from the point of view of second port
454 	 */
455 	struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
456 	unsigned int i;
457 	char rng_name[RTE_RING_NAMESIZE];
458 	unsigned int num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
459 			RTE_PMD_RING_MAX_TX_RINGS);
460 
461 	for (i = 0; i < num_rings; i++) {
462 		int cc;
463 
464 		cc = snprintf(rng_name, sizeof(rng_name),
465 			      "ETH_RXTX%u_%s", i, name);
466 		if (cc >= (int)sizeof(rng_name)) {
467 			rte_errno = ENAMETOOLONG;
468 			return -1;
469 		}
470 
471 		rxtx[i] = (action == DEV_CREATE) ?
472 				rte_ring_create(rng_name, 1024, numa_node,
473 						RING_F_SP_ENQ|RING_F_SC_DEQ) :
474 				rte_ring_lookup(rng_name);
475 		if (rxtx[i] == NULL)
476 			return -1;
477 	}
478 
479 	if (do_eth_dev_ring_create(name, vdev, rxtx, num_rings, rxtx, num_rings,
480 		numa_node, action, eth_dev) < 0)
481 		return -1;
482 
483 	return 0;
484 }
485 
486 struct node_action_pair {
487 	char name[PATH_MAX];
488 	unsigned int node;
489 	enum dev_action action;
490 };
491 
492 struct node_action_list {
493 	unsigned int total;
494 	unsigned int count;
495 	struct node_action_pair *list;
496 };
497 
498 static int parse_kvlist(const char *key __rte_unused,
499 			const char *value, void *data)
500 {
501 	struct node_action_list *info = data;
502 	int ret;
503 	char *name;
504 	char *action;
505 	char *node;
506 	char *end;
507 
508 	name = strdup(value);
509 
510 	ret = -EINVAL;
511 
512 	if (!name) {
513 		PMD_LOG(WARNING, "command line parameter is empty for ring pmd!");
514 		goto out;
515 	}
516 
517 	node = strchr(name, ':');
518 	if (!node) {
519 		PMD_LOG(WARNING, "could not parse node value from %s",
520 			name);
521 		goto out;
522 	}
523 
524 	*node = '\0';
525 	node++;
526 
527 	action = strchr(node, ':');
528 	if (!action) {
529 		PMD_LOG(WARNING, "could not parse action value from %s",
530 			node);
531 		goto out;
532 	}
533 
534 	*action = '\0';
535 	action++;
536 
537 	/*
538 	 * Need to do some sanity checking here
539 	 */
540 
541 	if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
542 		info->list[info->count].action = DEV_ATTACH;
543 	else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
544 		info->list[info->count].action = DEV_CREATE;
545 	else
546 		goto out;
547 
548 	errno = 0;
549 	info->list[info->count].node = strtol(node, &end, 10);
550 
551 	if ((errno != 0) || (*end != '\0')) {
552 		PMD_LOG(WARNING,
553 			"node value %s is unparseable as a number", node);
554 		goto out;
555 	}
556 
557 	strlcpy(info->list[info->count].name, name,
558 		sizeof(info->list[info->count].name));
559 
560 	info->count++;
561 
562 	ret = 0;
563 out:
564 	free(name);
565 	return ret;
566 }
567 
568 static int
569 parse_internal_args(const char *key __rte_unused, const char *value,
570 		void *data)
571 {
572 	struct ring_internal_args **internal_args = data;
573 	void *args;
574 
575 	sscanf(value, "%p", &args);
576 
577 	*internal_args = args;
578 
579 	if ((*internal_args)->addr != args)
580 		return -1;
581 
582 	return 0;
583 }
584 
585 static int
586 rte_pmd_ring_probe(struct rte_vdev_device *dev)
587 {
588 	const char *name, *params;
589 	struct rte_kvargs *kvlist = NULL;
590 	int ret = 0;
591 	struct node_action_list *info = NULL;
592 	struct rte_eth_dev *eth_dev = NULL;
593 	struct ring_internal_args *internal_args;
594 
595 	name = rte_vdev_device_name(dev);
596 	params = rte_vdev_device_args(dev);
597 
598 	PMD_LOG(INFO, "Initializing pmd_ring for %s", name);
599 
600 	if (params == NULL || params[0] == '\0') {
601 		ret = eth_dev_ring_create(name, dev, rte_socket_id(), DEV_CREATE,
602 				&eth_dev);
603 		if (ret == -1) {
604 			PMD_LOG(INFO,
605 				"Attach to pmd_ring for %s", name);
606 			ret = eth_dev_ring_create(name, dev, rte_socket_id(),
607 						  DEV_ATTACH, &eth_dev);
608 		}
609 	} else {
610 		kvlist = rte_kvargs_parse(params, valid_arguments);
611 
612 		if (!kvlist) {
613 			PMD_LOG(INFO,
614 				"Ignoring unsupported parameters when creatingrings-backed ethernet device");
615 			ret = eth_dev_ring_create(name, dev, rte_socket_id(),
616 						  DEV_CREATE, &eth_dev);
617 			if (ret == -1) {
618 				PMD_LOG(INFO,
619 					"Attach to pmd_ring for %s",
620 					name);
621 				ret = eth_dev_ring_create(name, dev, rte_socket_id(),
622 							  DEV_ATTACH, &eth_dev);
623 			}
624 
625 			return ret;
626 		}
627 
628 		if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
629 			ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
630 						 parse_internal_args,
631 						 &internal_args);
632 			if (ret < 0)
633 				goto out_free;
634 
635 			ret = do_eth_dev_ring_create(name, dev,
636 				internal_args->rx_queues,
637 				internal_args->nb_rx_queues,
638 				internal_args->tx_queues,
639 				internal_args->nb_tx_queues,
640 				internal_args->numa_node,
641 				DEV_ATTACH,
642 				&eth_dev);
643 			if (ret >= 0)
644 				ret = 0;
645 		} else {
646 			ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
647 			info = rte_zmalloc("struct node_action_list",
648 					   sizeof(struct node_action_list) +
649 					   (sizeof(struct node_action_pair) * ret),
650 					   0);
651 			if (!info)
652 				goto out_free;
653 
654 			info->total = ret;
655 			info->list = (struct node_action_pair *)(info + 1);
656 
657 			ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
658 						 parse_kvlist, info);
659 
660 			if (ret < 0)
661 				goto out_free;
662 
663 			for (info->count = 0; info->count < info->total; info->count++) {
664 				ret = eth_dev_ring_create(info->list[info->count].name,
665 							  dev,
666 							  info->list[info->count].node,
667 							  info->list[info->count].action,
668 							  &eth_dev);
669 				if ((ret == -1) &&
670 				    (info->list[info->count].action == DEV_CREATE)) {
671 					PMD_LOG(INFO,
672 						"Attach to pmd_ring for %s",
673 						name);
674 					ret = eth_dev_ring_create(name, dev,
675 							info->list[info->count].node,
676 							DEV_ATTACH,
677 							&eth_dev);
678 				}
679 			}
680 		}
681 	}
682 
683 out_free:
684 	rte_kvargs_free(kvlist);
685 	rte_free(info);
686 	return ret;
687 }
688 
689 static int
690 rte_pmd_ring_remove(struct rte_vdev_device *dev)
691 {
692 	const char *name = rte_vdev_device_name(dev);
693 	struct rte_eth_dev *eth_dev = NULL;
694 
695 	PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name);
696 
697 	if (name == NULL)
698 		return -EINVAL;
699 
700 	/* find an ethdev entry */
701 	eth_dev = rte_eth_dev_allocated(name);
702 	if (eth_dev == NULL)
703 		return 0; /* port already released */
704 
705 	eth_dev_close(eth_dev);
706 	rte_eth_dev_release_port(eth_dev);
707 	return 0;
708 }
709 
710 static struct rte_vdev_driver pmd_ring_drv = {
711 	.probe = rte_pmd_ring_probe,
712 	.remove = rte_pmd_ring_remove,
713 };
714 
715 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
716 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
717 RTE_PMD_REGISTER_PARAM_STRING(net_ring,
718 	ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");
719