xref: /dpdk/drivers/net/null/rte_eth_null.c (revision b2feed01d6675e7918740db2028a0037e9af1c2d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_dev.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
41 
42 #include "rte_eth_null.h"
43 
44 #define ETH_NULL_PACKET_SIZE_ARG	"size"
45 #define ETH_NULL_PACKET_COPY_ARG	"copy"
46 
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
49 
50 static const char *valid_arguments[] = {
51 	ETH_NULL_PACKET_SIZE_ARG,
52 	ETH_NULL_PACKET_COPY_ARG,
53 	NULL
54 };
55 
56 struct pmd_internals;
57 
58 struct null_queue {
59 	struct pmd_internals *internals;
60 
61 	struct rte_mempool *mb_pool;
62 	struct rte_mbuf *dummy_packet;
63 
64 	rte_atomic64_t rx_pkts;
65 	rte_atomic64_t tx_pkts;
66 	rte_atomic64_t err_pkts;
67 };
68 
69 struct pmd_internals {
70 	unsigned packet_size;
71 	unsigned packet_copy;
72 
73 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 
76 	/** Bit mask of RSS offloads, the bit offset also means flow type */
77 	uint64_t flow_type_rss_offloads;
78 
79 	rte_spinlock_t rss_lock;
80 
81 	uint16_t reta_size;
82 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
83 			RTE_RETA_GROUP_SIZE];
84 
85 	uint8_t rss_key[40];                /**< 40-byte hash key. */
86 };
87 
88 
89 static struct ether_addr eth_addr = { .addr_bytes = {0} };
90 static const char *drivername = "Null PMD";
91 static struct rte_eth_link pmd_link = {
92 	.link_speed = ETH_SPEED_NUM_10G,
93 	.link_duplex = ETH_LINK_FULL_DUPLEX,
94 	.link_status = ETH_LINK_DOWN,
95 	.link_autoneg = ETH_LINK_SPEED_AUTONEG,
96 };
97 
98 static uint16_t
99 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
100 {
101 	int i;
102 	struct null_queue *h = q;
103 	unsigned packet_size;
104 
105 	if ((q == NULL) || (bufs == NULL))
106 		return 0;
107 
108 	packet_size = h->internals->packet_size;
109 	for (i = 0; i < nb_bufs; i++) {
110 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
111 		if (!bufs[i])
112 			break;
113 		bufs[i]->data_len = (uint16_t)packet_size;
114 		bufs[i]->pkt_len = packet_size;
115 		bufs[i]->nb_segs = 1;
116 		bufs[i]->next = NULL;
117 	}
118 
119 	rte_atomic64_add(&(h->rx_pkts), i);
120 
121 	return i;
122 }
123 
124 static uint16_t
125 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
126 {
127 	int i;
128 	struct null_queue *h = q;
129 	unsigned packet_size;
130 
131 	if ((q == NULL) || (bufs == NULL))
132 		return 0;
133 
134 	packet_size = h->internals->packet_size;
135 	for (i = 0; i < nb_bufs; i++) {
136 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
137 		if (!bufs[i])
138 			break;
139 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
140 					packet_size);
141 		bufs[i]->data_len = (uint16_t)packet_size;
142 		bufs[i]->pkt_len = packet_size;
143 		bufs[i]->nb_segs = 1;
144 		bufs[i]->next = NULL;
145 	}
146 
147 	rte_atomic64_add(&(h->rx_pkts), i);
148 
149 	return i;
150 }
151 
152 static uint16_t
153 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
154 {
155 	int i;
156 	struct null_queue *h = q;
157 
158 	if ((q == NULL) || (bufs == NULL))
159 		return 0;
160 
161 	for (i = 0; i < nb_bufs; i++)
162 		rte_pktmbuf_free(bufs[i]);
163 
164 	rte_atomic64_add(&(h->tx_pkts), i);
165 
166 	return i;
167 }
168 
169 static uint16_t
170 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
171 {
172 	int i;
173 	struct null_queue *h = q;
174 	unsigned packet_size;
175 
176 	if ((q == NULL) || (bufs == NULL))
177 		return 0;
178 
179 	packet_size = h->internals->packet_size;
180 	for (i = 0; i < nb_bufs; i++) {
181 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
182 					packet_size);
183 		rte_pktmbuf_free(bufs[i]);
184 	}
185 
186 	rte_atomic64_add(&(h->tx_pkts), i);
187 
188 	return i;
189 }
190 
191 static int
192 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
193 {
194 	return 0;
195 }
196 
197 static int
198 eth_dev_start(struct rte_eth_dev *dev)
199 {
200 	if (dev == NULL)
201 		return -EINVAL;
202 
203 	dev->data->dev_link.link_status = ETH_LINK_UP;
204 	return 0;
205 }
206 
207 static void
208 eth_dev_stop(struct rte_eth_dev *dev)
209 {
210 	if (dev == NULL)
211 		return;
212 
213 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
214 }
215 
216 static int
217 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
218 		uint16_t nb_rx_desc __rte_unused,
219 		unsigned int socket_id __rte_unused,
220 		const struct rte_eth_rxconf *rx_conf __rte_unused,
221 		struct rte_mempool *mb_pool)
222 {
223 	struct rte_mbuf *dummy_packet;
224 	struct pmd_internals *internals;
225 	unsigned packet_size;
226 
227 	if ((dev == NULL) || (mb_pool == NULL))
228 		return -EINVAL;
229 
230 	internals = dev->data->dev_private;
231 
232 	if (rx_queue_id >= dev->data->nb_rx_queues)
233 		return -ENODEV;
234 
235 	packet_size = internals->packet_size;
236 
237 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
238 	dev->data->rx_queues[rx_queue_id] =
239 		&internals->rx_null_queues[rx_queue_id];
240 	dummy_packet = rte_zmalloc_socket(NULL,
241 			packet_size, 0, dev->data->numa_node);
242 	if (dummy_packet == NULL)
243 		return -ENOMEM;
244 
245 	internals->rx_null_queues[rx_queue_id].internals = internals;
246 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
247 
248 	return 0;
249 }
250 
251 static int
252 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
253 		uint16_t nb_tx_desc __rte_unused,
254 		unsigned int socket_id __rte_unused,
255 		const struct rte_eth_txconf *tx_conf __rte_unused)
256 {
257 	struct rte_mbuf *dummy_packet;
258 	struct pmd_internals *internals;
259 	unsigned packet_size;
260 
261 	if (dev == NULL)
262 		return -EINVAL;
263 
264 	internals = dev->data->dev_private;
265 
266 	if (tx_queue_id >= dev->data->nb_tx_queues)
267 		return -ENODEV;
268 
269 	packet_size = internals->packet_size;
270 
271 	dev->data->tx_queues[tx_queue_id] =
272 		&internals->tx_null_queues[tx_queue_id];
273 	dummy_packet = rte_zmalloc_socket(NULL,
274 			packet_size, 0, dev->data->numa_node);
275 	if (dummy_packet == NULL)
276 		return -ENOMEM;
277 
278 	internals->tx_null_queues[tx_queue_id].internals = internals;
279 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
280 
281 	return 0;
282 }
283 
284 
285 static void
286 eth_dev_info(struct rte_eth_dev *dev,
287 		struct rte_eth_dev_info *dev_info)
288 {
289 	struct pmd_internals *internals;
290 
291 	if ((dev == NULL) || (dev_info == NULL))
292 		return;
293 
294 	internals = dev->data->dev_private;
295 	dev_info->driver_name = drivername;
296 	dev_info->max_mac_addrs = 1;
297 	dev_info->max_rx_pktlen = (uint32_t)-1;
298 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
299 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
300 	dev_info->min_rx_bufsize = 0;
301 	dev_info->pci_dev = NULL;
302 	dev_info->reta_size = internals->reta_size;
303 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
304 }
305 
306 static void
307 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
308 {
309 	unsigned i, num_stats;
310 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
311 	const struct pmd_internals *internal;
312 
313 	if ((dev == NULL) || (igb_stats == NULL))
314 		return;
315 
316 	internal = dev->data->dev_private;
317 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
318 			RTE_MIN(dev->data->nb_rx_queues,
319 				RTE_DIM(internal->rx_null_queues)));
320 	for (i = 0; i < num_stats; i++) {
321 		igb_stats->q_ipackets[i] =
322 			internal->rx_null_queues[i].rx_pkts.cnt;
323 		rx_total += igb_stats->q_ipackets[i];
324 	}
325 
326 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
327 			RTE_MIN(dev->data->nb_tx_queues,
328 				RTE_DIM(internal->tx_null_queues)));
329 	for (i = 0; i < num_stats; i++) {
330 		igb_stats->q_opackets[i] =
331 			internal->tx_null_queues[i].tx_pkts.cnt;
332 		igb_stats->q_errors[i] =
333 			internal->tx_null_queues[i].err_pkts.cnt;
334 		tx_total += igb_stats->q_opackets[i];
335 		tx_err_total += igb_stats->q_errors[i];
336 	}
337 
338 	igb_stats->ipackets = rx_total;
339 	igb_stats->opackets = tx_total;
340 	igb_stats->oerrors = tx_err_total;
341 }
342 
343 static void
344 eth_stats_reset(struct rte_eth_dev *dev)
345 {
346 	unsigned i;
347 	struct pmd_internals *internal;
348 
349 	if (dev == NULL)
350 		return;
351 
352 	internal = dev->data->dev_private;
353 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
354 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
355 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
356 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
357 		internal->tx_null_queues[i].err_pkts.cnt = 0;
358 	}
359 }
360 
361 static void
362 eth_queue_release(void *q)
363 {
364 	struct null_queue *nq;
365 
366 	if (q == NULL)
367 		return;
368 
369 	nq = q;
370 	rte_free(nq->dummy_packet);
371 }
372 
373 static int
374 eth_link_update(struct rte_eth_dev *dev __rte_unused,
375 		int wait_to_complete __rte_unused) { return 0; }
376 
377 static int
378 eth_rss_reta_update(struct rte_eth_dev *dev,
379 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
380 {
381 	int i, j;
382 	struct pmd_internals *internal = dev->data->dev_private;
383 
384 	if (reta_size != internal->reta_size)
385 		return -EINVAL;
386 
387 	rte_spinlock_lock(&internal->rss_lock);
388 
389 	/* Copy RETA table */
390 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
391 		internal->reta_conf[i].mask = reta_conf[i].mask;
392 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
393 			if ((reta_conf[i].mask >> j) & 0x01)
394 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
395 	}
396 
397 	rte_spinlock_unlock(&internal->rss_lock);
398 
399 	return 0;
400 }
401 
402 static int
403 eth_rss_reta_query(struct rte_eth_dev *dev,
404 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
405 {
406 	int i, j;
407 	struct pmd_internals *internal = dev->data->dev_private;
408 
409 	if (reta_size != internal->reta_size)
410 		return -EINVAL;
411 
412 	rte_spinlock_lock(&internal->rss_lock);
413 
414 	/* Copy RETA table */
415 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
416 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
417 			if ((reta_conf[i].mask >> j) & 0x01)
418 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
419 	}
420 
421 	rte_spinlock_unlock(&internal->rss_lock);
422 
423 	return 0;
424 }
425 
426 static int
427 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
428 {
429 	struct pmd_internals *internal = dev->data->dev_private;
430 
431 	rte_spinlock_lock(&internal->rss_lock);
432 
433 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
434 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
435 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
436 
437 	if (rss_conf->rss_key)
438 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
439 
440 	rte_spinlock_unlock(&internal->rss_lock);
441 
442 	return 0;
443 }
444 
445 static int
446 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
447 		struct rte_eth_rss_conf *rss_conf)
448 {
449 	struct pmd_internals *internal = dev->data->dev_private;
450 
451 	rte_spinlock_lock(&internal->rss_lock);
452 
453 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
454 	if (rss_conf->rss_key)
455 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
456 
457 	rte_spinlock_unlock(&internal->rss_lock);
458 
459 	return 0;
460 }
461 
462 static const struct eth_dev_ops ops = {
463 	.dev_start = eth_dev_start,
464 	.dev_stop = eth_dev_stop,
465 	.dev_configure = eth_dev_configure,
466 	.dev_infos_get = eth_dev_info,
467 	.rx_queue_setup = eth_rx_queue_setup,
468 	.tx_queue_setup = eth_tx_queue_setup,
469 	.rx_queue_release = eth_queue_release,
470 	.tx_queue_release = eth_queue_release,
471 	.link_update = eth_link_update,
472 	.stats_get = eth_stats_get,
473 	.stats_reset = eth_stats_reset,
474 	.reta_update = eth_rss_reta_update,
475 	.reta_query = eth_rss_reta_query,
476 	.rss_hash_update = eth_rss_hash_update,
477 	.rss_hash_conf_get = eth_rss_hash_conf_get
478 };
479 
480 int
481 eth_dev_null_create(const char *name,
482 		const unsigned numa_node,
483 		unsigned packet_size,
484 		unsigned packet_copy)
485 {
486 	const unsigned nb_rx_queues = 1;
487 	const unsigned nb_tx_queues = 1;
488 	struct rte_eth_dev_data *data = NULL;
489 	struct pmd_internals *internals = NULL;
490 	struct rte_eth_dev *eth_dev = NULL;
491 
492 	static const uint8_t default_rss_key[40] = {
493 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
494 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
495 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
496 		0xBE, 0xAC, 0x01, 0xFA
497 	};
498 
499 	if (name == NULL)
500 		return -EINVAL;
501 
502 	RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
503 			numa_node);
504 
505 	/* now do all data allocation - for eth_dev structure, dummy pci driver
506 	 * and internal (private) data
507 	 */
508 	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
509 	if (data == NULL)
510 		goto error;
511 
512 	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
513 	if (internals == NULL)
514 		goto error;
515 
516 	/* reserve an ethdev entry */
517 	eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
518 	if (eth_dev == NULL)
519 		goto error;
520 
521 	/* now put it all together
522 	 * - store queue data in internals,
523 	 * - store numa_node info in ethdev data
524 	 * - point eth_dev_data to internals
525 	 * - and point eth_dev structure to new eth_dev_data structure
526 	 */
527 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
528 	 * so the nulls are local per-process */
529 
530 	internals->packet_size = packet_size;
531 	internals->packet_copy = packet_copy;
532 
533 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
534 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
535 
536 	rte_memcpy(internals->rss_key, default_rss_key, 40);
537 
538 	data->dev_private = internals;
539 	data->port_id = eth_dev->data->port_id;
540 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
541 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
542 	data->dev_link = pmd_link;
543 	data->mac_addrs = &eth_addr;
544 	strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
545 
546 	eth_dev->data = data;
547 	eth_dev->dev_ops = &ops;
548 
549 	TAILQ_INIT(&eth_dev->link_intr_cbs);
550 
551 	eth_dev->driver = NULL;
552 	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
553 	data->kdrv = RTE_KDRV_NONE;
554 	data->drv_name = drivername;
555 	data->numa_node = numa_node;
556 
557 	/* finally assign rx and tx ops */
558 	if (packet_copy) {
559 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
560 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
561 	} else {
562 		eth_dev->rx_pkt_burst = eth_null_rx;
563 		eth_dev->tx_pkt_burst = eth_null_tx;
564 	}
565 
566 	return 0;
567 
568 error:
569 	rte_free(data);
570 	rte_free(internals);
571 
572 	return -1;
573 }
574 
575 static inline int
576 get_packet_size_arg(const char *key __rte_unused,
577 		const char *value, void *extra_args)
578 {
579 	const char *a = value;
580 	unsigned *packet_size = extra_args;
581 
582 	if ((value == NULL) || (extra_args == NULL))
583 		return -EINVAL;
584 
585 	*packet_size = (unsigned)strtoul(a, NULL, 0);
586 	if (*packet_size == UINT_MAX)
587 		return -1;
588 
589 	return 0;
590 }
591 
592 static inline int
593 get_packet_copy_arg(const char *key __rte_unused,
594 		const char *value, void *extra_args)
595 {
596 	const char *a = value;
597 	unsigned *packet_copy = extra_args;
598 
599 	if ((value == NULL) || (extra_args == NULL))
600 		return -EINVAL;
601 
602 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
603 	if (*packet_copy == UINT_MAX)
604 		return -1;
605 
606 	return 0;
607 }
608 
609 static int
610 rte_pmd_null_devinit(const char *name, const char *params)
611 {
612 	unsigned numa_node;
613 	unsigned packet_size = default_packet_size;
614 	unsigned packet_copy = default_packet_copy;
615 	struct rte_kvargs *kvlist = NULL;
616 	int ret;
617 
618 	if (name == NULL)
619 		return -EINVAL;
620 
621 	RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
622 
623 	numa_node = rte_socket_id();
624 
625 	if (params != NULL) {
626 		kvlist = rte_kvargs_parse(params, valid_arguments);
627 		if (kvlist == NULL)
628 			return -1;
629 
630 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
631 
632 			ret = rte_kvargs_process(kvlist,
633 					ETH_NULL_PACKET_SIZE_ARG,
634 					&get_packet_size_arg, &packet_size);
635 			if (ret < 0)
636 				goto free_kvlist;
637 		}
638 
639 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
640 
641 			ret = rte_kvargs_process(kvlist,
642 					ETH_NULL_PACKET_COPY_ARG,
643 					&get_packet_copy_arg, &packet_copy);
644 			if (ret < 0)
645 				goto free_kvlist;
646 		}
647 	}
648 
649 	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
650 			"packet copy is %s\n", packet_size,
651 			packet_copy ? "enabled" : "disabled");
652 
653 	ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
654 
655 free_kvlist:
656 	if (kvlist)
657 		rte_kvargs_free(kvlist);
658 	return ret;
659 }
660 
661 static int
662 rte_pmd_null_devuninit(const char *name)
663 {
664 	struct rte_eth_dev *eth_dev = NULL;
665 
666 	if (name == NULL)
667 		return -EINVAL;
668 
669 	RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
670 			rte_socket_id());
671 
672 	/* find the ethdev entry */
673 	eth_dev = rte_eth_dev_allocated(name);
674 	if (eth_dev == NULL)
675 		return -1;
676 
677 	rte_free(eth_dev->data->dev_private);
678 	rte_free(eth_dev->data);
679 
680 	rte_eth_dev_release_port(eth_dev);
681 
682 	return 0;
683 }
684 
685 static struct rte_driver pmd_null_drv = {
686 	.name = "eth_null",
687 	.type = PMD_VDEV,
688 	.init = rte_pmd_null_devinit,
689 	.uninit = rte_pmd_null_devuninit,
690 };
691 
692 PMD_REGISTER_DRIVER(pmd_null_drv);
693