xref: /dpdk/drivers/net/null/rte_eth_null.c (revision c5fcdd04894d5ceb0e92e97bf18178cdfe2978ee)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_dev.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
41 
42 #include "rte_eth_null.h"
43 
44 #define ETH_NULL_PACKET_SIZE_ARG	"size"
45 #define ETH_NULL_PACKET_COPY_ARG	"copy"
46 
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
49 
50 static const char *valid_arguments[] = {
51 	ETH_NULL_PACKET_SIZE_ARG,
52 	ETH_NULL_PACKET_COPY_ARG,
53 	NULL
54 };
55 
56 struct pmd_internals;
57 
58 struct null_queue {
59 	struct pmd_internals *internals;
60 
61 	struct rte_mempool *mb_pool;
62 	struct rte_mbuf *dummy_packet;
63 
64 	rte_atomic64_t rx_pkts;
65 	rte_atomic64_t tx_pkts;
66 	rte_atomic64_t err_pkts;
67 };
68 
69 struct pmd_internals {
70 	unsigned packet_size;
71 	unsigned packet_copy;
72 	unsigned numa_node;
73 
74 	unsigned nb_rx_queues;
75 	unsigned nb_tx_queues;
76 
77 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
78 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
79 
80 	/** Bit mask of RSS offloads, the bit offset also means flow type */
81 	uint64_t flow_type_rss_offloads;
82 
83 	rte_spinlock_t rss_lock;
84 
85 	uint16_t reta_size;
86 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
87 			RTE_RETA_GROUP_SIZE];
88 
89 	uint8_t rss_key[40];                /**< 40-byte hash key. */
90 };
91 
92 
93 static struct ether_addr eth_addr = { .addr_bytes = {0} };
94 static const char *drivername = "Null PMD";
95 static struct rte_eth_link pmd_link = {
96 	.link_speed = 10000,
97 	.link_duplex = ETH_LINK_FULL_DUPLEX,
98 	.link_status = 0
99 };
100 
101 static uint16_t
102 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
103 {
104 	int i;
105 	struct null_queue *h = q;
106 	unsigned packet_size;
107 
108 	if ((q == NULL) || (bufs == NULL))
109 		return 0;
110 
111 	packet_size = h->internals->packet_size;
112 	for (i = 0; i < nb_bufs; i++) {
113 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
114 		if (!bufs[i])
115 			break;
116 		bufs[i]->data_len = (uint16_t)packet_size;
117 		bufs[i]->pkt_len = packet_size;
118 		bufs[i]->nb_segs = 1;
119 		bufs[i]->next = NULL;
120 	}
121 
122 	rte_atomic64_add(&(h->rx_pkts), i);
123 
124 	return i;
125 }
126 
127 static uint16_t
128 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
129 {
130 	int i;
131 	struct null_queue *h = q;
132 	unsigned packet_size;
133 
134 	if ((q == NULL) || (bufs == NULL))
135 		return 0;
136 
137 	packet_size = h->internals->packet_size;
138 	for (i = 0; i < nb_bufs; i++) {
139 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
140 		if (!bufs[i])
141 			break;
142 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
143 					packet_size);
144 		bufs[i]->data_len = (uint16_t)packet_size;
145 		bufs[i]->pkt_len = packet_size;
146 		bufs[i]->nb_segs = 1;
147 		bufs[i]->next = NULL;
148 	}
149 
150 	rte_atomic64_add(&(h->rx_pkts), i);
151 
152 	return i;
153 }
154 
155 static uint16_t
156 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
157 {
158 	int i;
159 	struct null_queue *h = q;
160 
161 	if ((q == NULL) || (bufs == NULL))
162 		return 0;
163 
164 	for (i = 0; i < nb_bufs; i++)
165 		rte_pktmbuf_free(bufs[i]);
166 
167 	rte_atomic64_add(&(h->tx_pkts), i);
168 
169 	return i;
170 }
171 
172 static uint16_t
173 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
174 {
175 	int i;
176 	struct null_queue *h = q;
177 	unsigned packet_size;
178 
179 	if ((q == NULL) || (bufs == NULL))
180 		return 0;
181 
182 	packet_size = h->internals->packet_size;
183 	for (i = 0; i < nb_bufs; i++) {
184 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
185 					packet_size);
186 		rte_pktmbuf_free(bufs[i]);
187 	}
188 
189 	rte_atomic64_add(&(h->tx_pkts), i);
190 
191 	return i;
192 }
193 
194 static int
195 eth_dev_configure(struct rte_eth_dev *dev) {
196 	struct pmd_internals *internals;
197 
198 	internals = dev->data->dev_private;
199 	internals->nb_rx_queues = dev->data->nb_rx_queues;
200 	internals->nb_tx_queues = dev->data->nb_tx_queues;
201 
202 	return 0;
203 }
204 
205 static int
206 eth_dev_start(struct rte_eth_dev *dev)
207 {
208 	if (dev == NULL)
209 		return -EINVAL;
210 
211 	dev->data->dev_link.link_status = 1;
212 	return 0;
213 }
214 
215 static void
216 eth_dev_stop(struct rte_eth_dev *dev)
217 {
218 	if (dev == NULL)
219 		return;
220 
221 	dev->data->dev_link.link_status = 0;
222 }
223 
224 static int
225 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
226 		uint16_t nb_rx_desc __rte_unused,
227 		unsigned int socket_id __rte_unused,
228 		const struct rte_eth_rxconf *rx_conf __rte_unused,
229 		struct rte_mempool *mb_pool)
230 {
231 	struct rte_mbuf *dummy_packet;
232 	struct pmd_internals *internals;
233 	unsigned packet_size;
234 
235 	if ((dev == NULL) || (mb_pool == NULL))
236 		return -EINVAL;
237 
238 	internals = dev->data->dev_private;
239 
240 	if (rx_queue_id >= internals->nb_rx_queues)
241 		return -ENODEV;
242 
243 	packet_size = internals->packet_size;
244 
245 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
246 	dev->data->rx_queues[rx_queue_id] =
247 		&internals->rx_null_queues[rx_queue_id];
248 	dummy_packet = rte_zmalloc_socket(NULL,
249 			packet_size, 0, internals->numa_node);
250 	if (dummy_packet == NULL)
251 		return -ENOMEM;
252 
253 	internals->rx_null_queues[rx_queue_id].internals = internals;
254 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
255 
256 	return 0;
257 }
258 
259 static int
260 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
261 		uint16_t nb_tx_desc __rte_unused,
262 		unsigned int socket_id __rte_unused,
263 		const struct rte_eth_txconf *tx_conf __rte_unused)
264 {
265 	struct rte_mbuf *dummy_packet;
266 	struct pmd_internals *internals;
267 	unsigned packet_size;
268 
269 	if (dev == NULL)
270 		return -EINVAL;
271 
272 	internals = dev->data->dev_private;
273 
274 	if (tx_queue_id >= internals->nb_tx_queues)
275 		return -ENODEV;
276 
277 	packet_size = internals->packet_size;
278 
279 	dev->data->tx_queues[tx_queue_id] =
280 		&internals->tx_null_queues[tx_queue_id];
281 	dummy_packet = rte_zmalloc_socket(NULL,
282 			packet_size, 0, internals->numa_node);
283 	if (dummy_packet == NULL)
284 		return -ENOMEM;
285 
286 	internals->tx_null_queues[tx_queue_id].internals = internals;
287 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
288 
289 	return 0;
290 }
291 
292 
293 static void
294 eth_dev_info(struct rte_eth_dev *dev,
295 		struct rte_eth_dev_info *dev_info)
296 {
297 	struct pmd_internals *internals;
298 
299 	if ((dev == NULL) || (dev_info == NULL))
300 		return;
301 
302 	internals = dev->data->dev_private;
303 	dev_info->driver_name = drivername;
304 	dev_info->max_mac_addrs = 1;
305 	dev_info->max_rx_pktlen = (uint32_t)-1;
306 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
307 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
308 	dev_info->min_rx_bufsize = 0;
309 	dev_info->pci_dev = NULL;
310 	dev_info->reta_size = internals->reta_size;
311 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
312 }
313 
314 static void
315 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
316 {
317 	unsigned i, num_stats;
318 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
319 	const struct pmd_internals *internal;
320 
321 	if ((dev == NULL) || (igb_stats == NULL))
322 		return;
323 
324 	internal = dev->data->dev_private;
325 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
326 			RTE_MIN(internal->nb_rx_queues,
327 				RTE_DIM(internal->rx_null_queues)));
328 	for (i = 0; i < num_stats; i++) {
329 		igb_stats->q_ipackets[i] =
330 			internal->rx_null_queues[i].rx_pkts.cnt;
331 		rx_total += igb_stats->q_ipackets[i];
332 	}
333 
334 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
335 			RTE_MIN(internal->nb_tx_queues,
336 				RTE_DIM(internal->tx_null_queues)));
337 	for (i = 0; i < num_stats; i++) {
338 		igb_stats->q_opackets[i] =
339 			internal->tx_null_queues[i].tx_pkts.cnt;
340 		igb_stats->q_errors[i] =
341 			internal->tx_null_queues[i].err_pkts.cnt;
342 		tx_total += igb_stats->q_opackets[i];
343 		tx_err_total += igb_stats->q_errors[i];
344 	}
345 
346 	igb_stats->ipackets = rx_total;
347 	igb_stats->opackets = tx_total;
348 	igb_stats->oerrors = tx_err_total;
349 }
350 
351 static void
352 eth_stats_reset(struct rte_eth_dev *dev)
353 {
354 	unsigned i;
355 	struct pmd_internals *internal;
356 
357 	if (dev == NULL)
358 		return;
359 
360 	internal = dev->data->dev_private;
361 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
362 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
363 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
364 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
365 		internal->tx_null_queues[i].err_pkts.cnt = 0;
366 	}
367 }
368 
369 static void
370 eth_queue_release(void *q)
371 {
372 	struct null_queue *nq;
373 
374 	if (q == NULL)
375 		return;
376 
377 	nq = q;
378 	rte_free(nq->dummy_packet);
379 }
380 
381 static int
382 eth_link_update(struct rte_eth_dev *dev __rte_unused,
383 		int wait_to_complete __rte_unused) { return 0; }
384 
385 static int
386 eth_rss_reta_update(struct rte_eth_dev *dev,
387 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
388 {
389 	int i, j;
390 	struct pmd_internals *internal = dev->data->dev_private;
391 
392 	if (reta_size != internal->reta_size)
393 		return -EINVAL;
394 
395 	rte_spinlock_lock(&internal->rss_lock);
396 
397 	/* Copy RETA table */
398 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
399 		internal->reta_conf[i].mask = reta_conf[i].mask;
400 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
401 			if ((reta_conf[i].mask >> j) & 0x01)
402 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
403 	}
404 
405 	rte_spinlock_unlock(&internal->rss_lock);
406 
407 	return 0;
408 }
409 
410 static int
411 eth_rss_reta_query(struct rte_eth_dev *dev,
412 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
413 {
414 	int i, j;
415 	struct pmd_internals *internal = dev->data->dev_private;
416 
417 	if (reta_size != internal->reta_size)
418 		return -EINVAL;
419 
420 	rte_spinlock_lock(&internal->rss_lock);
421 
422 	/* Copy RETA table */
423 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
424 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
425 			if ((reta_conf[i].mask >> j) & 0x01)
426 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
427 	}
428 
429 	rte_spinlock_unlock(&internal->rss_lock);
430 
431 	return 0;
432 }
433 
434 static int
435 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
436 {
437 	struct pmd_internals *internal = dev->data->dev_private;
438 
439 	rte_spinlock_lock(&internal->rss_lock);
440 
441 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
442 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
443 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
444 
445 	if (rss_conf->rss_key)
446 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
447 
448 	rte_spinlock_unlock(&internal->rss_lock);
449 
450 	return 0;
451 }
452 
453 static int
454 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
455 		struct rte_eth_rss_conf *rss_conf)
456 {
457 	struct pmd_internals *internal = dev->data->dev_private;
458 
459 	rte_spinlock_lock(&internal->rss_lock);
460 
461 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
462 	if (rss_conf->rss_key)
463 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
464 
465 	rte_spinlock_unlock(&internal->rss_lock);
466 
467 	return 0;
468 }
469 
470 static const struct eth_dev_ops ops = {
471 	.dev_start = eth_dev_start,
472 	.dev_stop = eth_dev_stop,
473 	.dev_configure = eth_dev_configure,
474 	.dev_infos_get = eth_dev_info,
475 	.rx_queue_setup = eth_rx_queue_setup,
476 	.tx_queue_setup = eth_tx_queue_setup,
477 	.rx_queue_release = eth_queue_release,
478 	.tx_queue_release = eth_queue_release,
479 	.link_update = eth_link_update,
480 	.stats_get = eth_stats_get,
481 	.stats_reset = eth_stats_reset,
482 	.reta_update = eth_rss_reta_update,
483 	.reta_query = eth_rss_reta_query,
484 	.rss_hash_update = eth_rss_hash_update,
485 	.rss_hash_conf_get = eth_rss_hash_conf_get
486 };
487 
488 int
489 eth_dev_null_create(const char *name,
490 		const unsigned numa_node,
491 		unsigned packet_size,
492 		unsigned packet_copy)
493 {
494 	const unsigned nb_rx_queues = 1;
495 	const unsigned nb_tx_queues = 1;
496 	struct rte_eth_dev_data *data = NULL;
497 	struct pmd_internals *internals = NULL;
498 	struct rte_eth_dev *eth_dev = NULL;
499 
500 	static const uint8_t default_rss_key[40] = {
501 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
502 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
503 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
504 		0xBE, 0xAC, 0x01, 0xFA
505 	};
506 
507 	if (name == NULL)
508 		return -EINVAL;
509 
510 	RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
511 			numa_node);
512 
513 	/* now do all data allocation - for eth_dev structure, dummy pci driver
514 	 * and internal (private) data
515 	 */
516 	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
517 	if (data == NULL)
518 		goto error;
519 
520 	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
521 	if (internals == NULL)
522 		goto error;
523 
524 	/* reserve an ethdev entry */
525 	eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
526 	if (eth_dev == NULL)
527 		goto error;
528 
529 	/* now put it all together
530 	 * - store queue data in internals,
531 	 * - store numa_node info in ethdev data
532 	 * - point eth_dev_data to internals
533 	 * - and point eth_dev structure to new eth_dev_data structure
534 	 */
535 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
536 	 * so the nulls are local per-process */
537 
538 	internals->nb_rx_queues = nb_rx_queues;
539 	internals->nb_tx_queues = nb_tx_queues;
540 	internals->packet_size = packet_size;
541 	internals->packet_copy = packet_copy;
542 	internals->numa_node = numa_node;
543 
544 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
545 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
546 
547 	rte_memcpy(internals->rss_key, default_rss_key, 40);
548 
549 	data->dev_private = internals;
550 	data->port_id = eth_dev->data->port_id;
551 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
552 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
553 	data->dev_link = pmd_link;
554 	data->mac_addrs = &eth_addr;
555 	strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
556 
557 	eth_dev->data = data;
558 	eth_dev->dev_ops = &ops;
559 
560 	TAILQ_INIT(&eth_dev->link_intr_cbs);
561 
562 	eth_dev->driver = NULL;
563 	eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
564 	eth_dev->data->kdrv = RTE_KDRV_NONE;
565 	eth_dev->data->drv_name = drivername;
566 	eth_dev->data->numa_node = numa_node;
567 
568 	/* finally assign rx and tx ops */
569 	if (packet_copy) {
570 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
571 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
572 	} else {
573 		eth_dev->rx_pkt_burst = eth_null_rx;
574 		eth_dev->tx_pkt_burst = eth_null_tx;
575 	}
576 
577 	return 0;
578 
579 error:
580 	rte_free(data);
581 	rte_free(internals);
582 
583 	return -1;
584 }
585 
586 static inline int
587 get_packet_size_arg(const char *key __rte_unused,
588 		const char *value, void *extra_args)
589 {
590 	const char *a = value;
591 	unsigned *packet_size = extra_args;
592 
593 	if ((value == NULL) || (extra_args == NULL))
594 		return -EINVAL;
595 
596 	*packet_size = (unsigned)strtoul(a, NULL, 0);
597 	if (*packet_size == UINT_MAX)
598 		return -1;
599 
600 	return 0;
601 }
602 
603 static inline int
604 get_packet_copy_arg(const char *key __rte_unused,
605 		const char *value, void *extra_args)
606 {
607 	const char *a = value;
608 	unsigned *packet_copy = extra_args;
609 
610 	if ((value == NULL) || (extra_args == NULL))
611 		return -EINVAL;
612 
613 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
614 	if (*packet_copy == UINT_MAX)
615 		return -1;
616 
617 	return 0;
618 }
619 
620 static int
621 rte_pmd_null_devinit(const char *name, const char *params)
622 {
623 	unsigned numa_node;
624 	unsigned packet_size = default_packet_size;
625 	unsigned packet_copy = default_packet_copy;
626 	struct rte_kvargs *kvlist = NULL;
627 	int ret;
628 
629 	if (name == NULL)
630 		return -EINVAL;
631 
632 	RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
633 
634 	numa_node = rte_socket_id();
635 
636 	if (params != NULL) {
637 		kvlist = rte_kvargs_parse(params, valid_arguments);
638 		if (kvlist == NULL)
639 			return -1;
640 
641 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
642 
643 			ret = rte_kvargs_process(kvlist,
644 					ETH_NULL_PACKET_SIZE_ARG,
645 					&get_packet_size_arg, &packet_size);
646 			if (ret < 0)
647 				goto free_kvlist;
648 		}
649 
650 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
651 
652 			ret = rte_kvargs_process(kvlist,
653 					ETH_NULL_PACKET_COPY_ARG,
654 					&get_packet_copy_arg, &packet_copy);
655 			if (ret < 0)
656 				goto free_kvlist;
657 		}
658 	}
659 
660 	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
661 			"packet copy is %s\n", packet_size,
662 			packet_copy ? "enabled" : "disabled");
663 
664 	ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
665 
666 free_kvlist:
667 	if (kvlist)
668 		rte_kvargs_free(kvlist);
669 	return ret;
670 }
671 
672 static int
673 rte_pmd_null_devuninit(const char *name)
674 {
675 	struct rte_eth_dev *eth_dev = NULL;
676 
677 	if (name == NULL)
678 		return -EINVAL;
679 
680 	RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
681 			rte_socket_id());
682 
683 	/* find the ethdev entry */
684 	eth_dev = rte_eth_dev_allocated(name);
685 	if (eth_dev == NULL)
686 		return -1;
687 
688 	rte_free(eth_dev->data->dev_private);
689 	rte_free(eth_dev->data);
690 
691 	rte_eth_dev_release_port(eth_dev);
692 
693 	return 0;
694 }
695 
696 static struct rte_driver pmd_null_drv = {
697 	.name = "eth_null",
698 	.type = PMD_VDEV,
699 	.init = rte_pmd_null_devinit,
700 	.uninit = rte_pmd_null_devuninit,
701 };
702 
703 PMD_REGISTER_DRIVER(pmd_null_drv);
704