xref: /dpdk/drivers/net/null/rte_eth_null.c (revision c6dab2a873f65c5a4ea9735aa24d9539426adba4)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_vdev.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
41 
42 #include "rte_eth_null.h"
43 
44 #define ETH_NULL_PACKET_SIZE_ARG	"size"
45 #define ETH_NULL_PACKET_COPY_ARG	"copy"
46 
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
49 
50 static const char *valid_arguments[] = {
51 	ETH_NULL_PACKET_SIZE_ARG,
52 	ETH_NULL_PACKET_COPY_ARG,
53 	NULL
54 };
55 
56 struct pmd_internals;
57 
58 struct null_queue {
59 	struct pmd_internals *internals;
60 
61 	struct rte_mempool *mb_pool;
62 	struct rte_mbuf *dummy_packet;
63 
64 	rte_atomic64_t rx_pkts;
65 	rte_atomic64_t tx_pkts;
66 	rte_atomic64_t err_pkts;
67 };
68 
69 struct pmd_internals {
70 	unsigned packet_size;
71 	unsigned packet_copy;
72 	uint8_t port_id;
73 
74 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76 
77 	/** Bit mask of RSS offloads, the bit offset also means flow type */
78 	uint64_t flow_type_rss_offloads;
79 
80 	rte_spinlock_t rss_lock;
81 
82 	uint16_t reta_size;
83 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84 			RTE_RETA_GROUP_SIZE];
85 
86 	uint8_t rss_key[40];                /**< 40-byte hash key. */
87 };
88 
89 
90 static struct ether_addr eth_addr = { .addr_bytes = {0} };
91 static const char *drivername = "Null PMD";
92 static struct rte_eth_link pmd_link = {
93 	.link_speed = ETH_SPEED_NUM_10G,
94 	.link_duplex = ETH_LINK_FULL_DUPLEX,
95 	.link_status = ETH_LINK_DOWN,
96 	.link_autoneg = ETH_LINK_SPEED_AUTONEG,
97 };
98 
99 static uint16_t
100 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
101 {
102 	int i;
103 	struct null_queue *h = q;
104 	unsigned packet_size;
105 
106 	if ((q == NULL) || (bufs == NULL))
107 		return 0;
108 
109 	packet_size = h->internals->packet_size;
110 	for (i = 0; i < nb_bufs; i++) {
111 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
112 		if (!bufs[i])
113 			break;
114 		bufs[i]->data_len = (uint16_t)packet_size;
115 		bufs[i]->pkt_len = packet_size;
116 		bufs[i]->nb_segs = 1;
117 		bufs[i]->next = NULL;
118 		bufs[i]->port = h->internals->port_id;
119 	}
120 
121 	rte_atomic64_add(&(h->rx_pkts), i);
122 
123 	return i;
124 }
125 
126 static uint16_t
127 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
128 {
129 	int i;
130 	struct null_queue *h = q;
131 	unsigned packet_size;
132 
133 	if ((q == NULL) || (bufs == NULL))
134 		return 0;
135 
136 	packet_size = h->internals->packet_size;
137 	for (i = 0; i < nb_bufs; i++) {
138 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
139 		if (!bufs[i])
140 			break;
141 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
142 					packet_size);
143 		bufs[i]->data_len = (uint16_t)packet_size;
144 		bufs[i]->pkt_len = packet_size;
145 		bufs[i]->nb_segs = 1;
146 		bufs[i]->next = NULL;
147 		bufs[i]->port = h->internals->port_id;
148 	}
149 
150 	rte_atomic64_add(&(h->rx_pkts), i);
151 
152 	return i;
153 }
154 
155 static uint16_t
156 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
157 {
158 	int i;
159 	struct null_queue *h = q;
160 
161 	if ((q == NULL) || (bufs == NULL))
162 		return 0;
163 
164 	for (i = 0; i < nb_bufs; i++)
165 		rte_pktmbuf_free(bufs[i]);
166 
167 	rte_atomic64_add(&(h->tx_pkts), i);
168 
169 	return i;
170 }
171 
172 static uint16_t
173 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
174 {
175 	int i;
176 	struct null_queue *h = q;
177 	unsigned packet_size;
178 
179 	if ((q == NULL) || (bufs == NULL))
180 		return 0;
181 
182 	packet_size = h->internals->packet_size;
183 	for (i = 0; i < nb_bufs; i++) {
184 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
185 					packet_size);
186 		rte_pktmbuf_free(bufs[i]);
187 	}
188 
189 	rte_atomic64_add(&(h->tx_pkts), i);
190 
191 	return i;
192 }
193 
194 static int
195 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
196 {
197 	return 0;
198 }
199 
200 static int
201 eth_dev_start(struct rte_eth_dev *dev)
202 {
203 	if (dev == NULL)
204 		return -EINVAL;
205 
206 	dev->data->dev_link.link_status = ETH_LINK_UP;
207 	return 0;
208 }
209 
210 static void
211 eth_dev_stop(struct rte_eth_dev *dev)
212 {
213 	if (dev == NULL)
214 		return;
215 
216 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
217 }
218 
219 static int
220 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
221 		uint16_t nb_rx_desc __rte_unused,
222 		unsigned int socket_id __rte_unused,
223 		const struct rte_eth_rxconf *rx_conf __rte_unused,
224 		struct rte_mempool *mb_pool)
225 {
226 	struct rte_mbuf *dummy_packet;
227 	struct pmd_internals *internals;
228 	unsigned packet_size;
229 
230 	if ((dev == NULL) || (mb_pool == NULL))
231 		return -EINVAL;
232 
233 	internals = dev->data->dev_private;
234 
235 	if (rx_queue_id >= dev->data->nb_rx_queues)
236 		return -ENODEV;
237 
238 	packet_size = internals->packet_size;
239 
240 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
241 	dev->data->rx_queues[rx_queue_id] =
242 		&internals->rx_null_queues[rx_queue_id];
243 	dummy_packet = rte_zmalloc_socket(NULL,
244 			packet_size, 0, dev->data->numa_node);
245 	if (dummy_packet == NULL)
246 		return -ENOMEM;
247 
248 	internals->rx_null_queues[rx_queue_id].internals = internals;
249 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
250 
251 	return 0;
252 }
253 
254 static int
255 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
256 		uint16_t nb_tx_desc __rte_unused,
257 		unsigned int socket_id __rte_unused,
258 		const struct rte_eth_txconf *tx_conf __rte_unused)
259 {
260 	struct rte_mbuf *dummy_packet;
261 	struct pmd_internals *internals;
262 	unsigned packet_size;
263 
264 	if (dev == NULL)
265 		return -EINVAL;
266 
267 	internals = dev->data->dev_private;
268 
269 	if (tx_queue_id >= dev->data->nb_tx_queues)
270 		return -ENODEV;
271 
272 	packet_size = internals->packet_size;
273 
274 	dev->data->tx_queues[tx_queue_id] =
275 		&internals->tx_null_queues[tx_queue_id];
276 	dummy_packet = rte_zmalloc_socket(NULL,
277 			packet_size, 0, dev->data->numa_node);
278 	if (dummy_packet == NULL)
279 		return -ENOMEM;
280 
281 	internals->tx_null_queues[tx_queue_id].internals = internals;
282 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
283 
284 	return 0;
285 }
286 
287 
288 static void
289 eth_dev_info(struct rte_eth_dev *dev,
290 		struct rte_eth_dev_info *dev_info)
291 {
292 	struct pmd_internals *internals;
293 
294 	if ((dev == NULL) || (dev_info == NULL))
295 		return;
296 
297 	internals = dev->data->dev_private;
298 	dev_info->driver_name = drivername;
299 	dev_info->max_mac_addrs = 1;
300 	dev_info->max_rx_pktlen = (uint32_t)-1;
301 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
302 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
303 	dev_info->min_rx_bufsize = 0;
304 	dev_info->reta_size = internals->reta_size;
305 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
306 }
307 
308 static void
309 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
310 {
311 	unsigned i, num_stats;
312 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
313 	const struct pmd_internals *internal;
314 
315 	if ((dev == NULL) || (igb_stats == NULL))
316 		return;
317 
318 	internal = dev->data->dev_private;
319 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
320 			RTE_MIN(dev->data->nb_rx_queues,
321 				RTE_DIM(internal->rx_null_queues)));
322 	for (i = 0; i < num_stats; i++) {
323 		igb_stats->q_ipackets[i] =
324 			internal->rx_null_queues[i].rx_pkts.cnt;
325 		rx_total += igb_stats->q_ipackets[i];
326 	}
327 
328 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
329 			RTE_MIN(dev->data->nb_tx_queues,
330 				RTE_DIM(internal->tx_null_queues)));
331 	for (i = 0; i < num_stats; i++) {
332 		igb_stats->q_opackets[i] =
333 			internal->tx_null_queues[i].tx_pkts.cnt;
334 		igb_stats->q_errors[i] =
335 			internal->tx_null_queues[i].err_pkts.cnt;
336 		tx_total += igb_stats->q_opackets[i];
337 		tx_err_total += igb_stats->q_errors[i];
338 	}
339 
340 	igb_stats->ipackets = rx_total;
341 	igb_stats->opackets = tx_total;
342 	igb_stats->oerrors = tx_err_total;
343 }
344 
345 static void
346 eth_stats_reset(struct rte_eth_dev *dev)
347 {
348 	unsigned i;
349 	struct pmd_internals *internal;
350 
351 	if (dev == NULL)
352 		return;
353 
354 	internal = dev->data->dev_private;
355 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
356 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
357 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
358 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
359 		internal->tx_null_queues[i].err_pkts.cnt = 0;
360 	}
361 }
362 
363 static void
364 eth_queue_release(void *q)
365 {
366 	struct null_queue *nq;
367 
368 	if (q == NULL)
369 		return;
370 
371 	nq = q;
372 	rte_free(nq->dummy_packet);
373 }
374 
375 static int
376 eth_link_update(struct rte_eth_dev *dev __rte_unused,
377 		int wait_to_complete __rte_unused) { return 0; }
378 
379 static int
380 eth_rss_reta_update(struct rte_eth_dev *dev,
381 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
382 {
383 	int i, j;
384 	struct pmd_internals *internal = dev->data->dev_private;
385 
386 	if (reta_size != internal->reta_size)
387 		return -EINVAL;
388 
389 	rte_spinlock_lock(&internal->rss_lock);
390 
391 	/* Copy RETA table */
392 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
393 		internal->reta_conf[i].mask = reta_conf[i].mask;
394 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
395 			if ((reta_conf[i].mask >> j) & 0x01)
396 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
397 	}
398 
399 	rte_spinlock_unlock(&internal->rss_lock);
400 
401 	return 0;
402 }
403 
404 static int
405 eth_rss_reta_query(struct rte_eth_dev *dev,
406 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
407 {
408 	int i, j;
409 	struct pmd_internals *internal = dev->data->dev_private;
410 
411 	if (reta_size != internal->reta_size)
412 		return -EINVAL;
413 
414 	rte_spinlock_lock(&internal->rss_lock);
415 
416 	/* Copy RETA table */
417 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
418 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
419 			if ((reta_conf[i].mask >> j) & 0x01)
420 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
421 	}
422 
423 	rte_spinlock_unlock(&internal->rss_lock);
424 
425 	return 0;
426 }
427 
428 static int
429 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
430 {
431 	struct pmd_internals *internal = dev->data->dev_private;
432 
433 	rte_spinlock_lock(&internal->rss_lock);
434 
435 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
436 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
437 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
438 
439 	if (rss_conf->rss_key)
440 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
441 
442 	rte_spinlock_unlock(&internal->rss_lock);
443 
444 	return 0;
445 }
446 
447 static int
448 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
449 		struct rte_eth_rss_conf *rss_conf)
450 {
451 	struct pmd_internals *internal = dev->data->dev_private;
452 
453 	rte_spinlock_lock(&internal->rss_lock);
454 
455 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
456 	if (rss_conf->rss_key)
457 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
458 
459 	rte_spinlock_unlock(&internal->rss_lock);
460 
461 	return 0;
462 }
463 
464 static const struct eth_dev_ops ops = {
465 	.dev_start = eth_dev_start,
466 	.dev_stop = eth_dev_stop,
467 	.dev_configure = eth_dev_configure,
468 	.dev_infos_get = eth_dev_info,
469 	.rx_queue_setup = eth_rx_queue_setup,
470 	.tx_queue_setup = eth_tx_queue_setup,
471 	.rx_queue_release = eth_queue_release,
472 	.tx_queue_release = eth_queue_release,
473 	.link_update = eth_link_update,
474 	.stats_get = eth_stats_get,
475 	.stats_reset = eth_stats_reset,
476 	.reta_update = eth_rss_reta_update,
477 	.reta_query = eth_rss_reta_query,
478 	.rss_hash_update = eth_rss_hash_update,
479 	.rss_hash_conf_get = eth_rss_hash_conf_get
480 };
481 
482 int
483 eth_dev_null_create(const char *name,
484 		const unsigned numa_node,
485 		unsigned packet_size,
486 		unsigned packet_copy)
487 {
488 	const unsigned nb_rx_queues = 1;
489 	const unsigned nb_tx_queues = 1;
490 	struct rte_eth_dev_data *data = NULL;
491 	struct pmd_internals *internals = NULL;
492 	struct rte_eth_dev *eth_dev = NULL;
493 
494 	static const uint8_t default_rss_key[40] = {
495 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
496 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
497 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
498 		0xBE, 0xAC, 0x01, 0xFA
499 	};
500 
501 	if (name == NULL)
502 		return -EINVAL;
503 
504 	RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
505 			numa_node);
506 
507 	/* now do all data allocation - for eth_dev structure, dummy pci driver
508 	 * and internal (private) data
509 	 */
510 	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
511 	if (data == NULL)
512 		goto error;
513 
514 	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
515 	if (internals == NULL)
516 		goto error;
517 
518 	/* reserve an ethdev entry */
519 	eth_dev = rte_eth_dev_allocate(name);
520 	if (eth_dev == NULL)
521 		goto error;
522 
523 	/* now put it all together
524 	 * - store queue data in internals,
525 	 * - store numa_node info in ethdev data
526 	 * - point eth_dev_data to internals
527 	 * - and point eth_dev structure to new eth_dev_data structure
528 	 */
529 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
530 	 * so the nulls are local per-process */
531 
532 	internals->packet_size = packet_size;
533 	internals->packet_copy = packet_copy;
534 	internals->port_id = eth_dev->data->port_id;
535 
536 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
537 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
538 
539 	rte_memcpy(internals->rss_key, default_rss_key, 40);
540 
541 	data->dev_private = internals;
542 	data->port_id = eth_dev->data->port_id;
543 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
544 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
545 	data->dev_link = pmd_link;
546 	data->mac_addrs = &eth_addr;
547 	strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
548 
549 	eth_dev->data = data;
550 	eth_dev->dev_ops = &ops;
551 
552 	eth_dev->driver = NULL;
553 	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
554 	data->kdrv = RTE_KDRV_NONE;
555 	data->drv_name = drivername;
556 	data->numa_node = numa_node;
557 
558 	/* finally assign rx and tx ops */
559 	if (packet_copy) {
560 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
561 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
562 	} else {
563 		eth_dev->rx_pkt_burst = eth_null_rx;
564 		eth_dev->tx_pkt_burst = eth_null_tx;
565 	}
566 
567 	return 0;
568 
569 error:
570 	rte_free(data);
571 	rte_free(internals);
572 
573 	return -1;
574 }
575 
576 static inline int
577 get_packet_size_arg(const char *key __rte_unused,
578 		const char *value, void *extra_args)
579 {
580 	const char *a = value;
581 	unsigned *packet_size = extra_args;
582 
583 	if ((value == NULL) || (extra_args == NULL))
584 		return -EINVAL;
585 
586 	*packet_size = (unsigned)strtoul(a, NULL, 0);
587 	if (*packet_size == UINT_MAX)
588 		return -1;
589 
590 	return 0;
591 }
592 
593 static inline int
594 get_packet_copy_arg(const char *key __rte_unused,
595 		const char *value, void *extra_args)
596 {
597 	const char *a = value;
598 	unsigned *packet_copy = extra_args;
599 
600 	if ((value == NULL) || (extra_args == NULL))
601 		return -EINVAL;
602 
603 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
604 	if (*packet_copy == UINT_MAX)
605 		return -1;
606 
607 	return 0;
608 }
609 
610 static int
611 rte_pmd_null_probe(const char *name, const char *params)
612 {
613 	unsigned numa_node;
614 	unsigned packet_size = default_packet_size;
615 	unsigned packet_copy = default_packet_copy;
616 	struct rte_kvargs *kvlist = NULL;
617 	int ret;
618 
619 	if (name == NULL)
620 		return -EINVAL;
621 
622 	RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
623 
624 	numa_node = rte_socket_id();
625 
626 	if (params != NULL) {
627 		kvlist = rte_kvargs_parse(params, valid_arguments);
628 		if (kvlist == NULL)
629 			return -1;
630 
631 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
632 
633 			ret = rte_kvargs_process(kvlist,
634 					ETH_NULL_PACKET_SIZE_ARG,
635 					&get_packet_size_arg, &packet_size);
636 			if (ret < 0)
637 				goto free_kvlist;
638 		}
639 
640 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
641 
642 			ret = rte_kvargs_process(kvlist,
643 					ETH_NULL_PACKET_COPY_ARG,
644 					&get_packet_copy_arg, &packet_copy);
645 			if (ret < 0)
646 				goto free_kvlist;
647 		}
648 	}
649 
650 	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
651 			"packet copy is %s\n", packet_size,
652 			packet_copy ? "enabled" : "disabled");
653 
654 	ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
655 
656 free_kvlist:
657 	if (kvlist)
658 		rte_kvargs_free(kvlist);
659 	return ret;
660 }
661 
662 static int
663 rte_pmd_null_remove(const char *name)
664 {
665 	struct rte_eth_dev *eth_dev = NULL;
666 
667 	if (name == NULL)
668 		return -EINVAL;
669 
670 	RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
671 			rte_socket_id());
672 
673 	/* find the ethdev entry */
674 	eth_dev = rte_eth_dev_allocated(name);
675 	if (eth_dev == NULL)
676 		return -1;
677 
678 	rte_free(eth_dev->data->dev_private);
679 	rte_free(eth_dev->data);
680 
681 	rte_eth_dev_release_port(eth_dev);
682 
683 	return 0;
684 }
685 
686 static struct rte_vdev_driver pmd_null_drv = {
687 	.probe = rte_pmd_null_probe,
688 	.remove = rte_pmd_null_remove,
689 };
690 
691 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
692 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
693 RTE_PMD_REGISTER_PARAM_STRING(net_null,
694 	"size=<int> "
695 	"copy=<int>");
696