xref: /dpdk/drivers/net/null/rte_eth_null.c (revision 117eaa70584b73eebf6f648cf3ee6f2ab03264a0)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42 
43 #define ETH_NULL_PACKET_SIZE_ARG	"size"
44 #define ETH_NULL_PACKET_COPY_ARG	"copy"
45 
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48 
49 static const char *valid_arguments[] = {
50 	ETH_NULL_PACKET_SIZE_ARG,
51 	ETH_NULL_PACKET_COPY_ARG,
52 	NULL
53 };
54 
55 struct pmd_internals;
56 
57 struct null_queue {
58 	struct pmd_internals *internals;
59 
60 	struct rte_mempool *mb_pool;
61 	struct rte_mbuf *dummy_packet;
62 
63 	rte_atomic64_t rx_pkts;
64 	rte_atomic64_t tx_pkts;
65 	rte_atomic64_t err_pkts;
66 };
67 
68 struct pmd_internals {
69 	unsigned packet_size;
70 	unsigned packet_copy;
71 	uint16_t port_id;
72 
73 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 
76 	/** Bit mask of RSS offloads, the bit offset also means flow type */
77 	uint64_t flow_type_rss_offloads;
78 
79 	rte_spinlock_t rss_lock;
80 
81 	uint16_t reta_size;
82 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
83 			RTE_RETA_GROUP_SIZE];
84 
85 	uint8_t rss_key[40];                /**< 40-byte hash key. */
86 };
87 
88 
89 static struct ether_addr eth_addr = { .addr_bytes = {0} };
90 static struct rte_eth_link pmd_link = {
91 	.link_speed = ETH_SPEED_NUM_10G,
92 	.link_duplex = ETH_LINK_FULL_DUPLEX,
93 	.link_status = ETH_LINK_DOWN,
94 	.link_autoneg = ETH_LINK_AUTONEG,
95 };
96 
97 static uint16_t
98 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
99 {
100 	int i;
101 	struct null_queue *h = q;
102 	unsigned packet_size;
103 
104 	if ((q == NULL) || (bufs == NULL))
105 		return 0;
106 
107 	packet_size = h->internals->packet_size;
108 	for (i = 0; i < nb_bufs; i++) {
109 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
110 		if (!bufs[i])
111 			break;
112 		bufs[i]->data_len = (uint16_t)packet_size;
113 		bufs[i]->pkt_len = packet_size;
114 		bufs[i]->port = h->internals->port_id;
115 	}
116 
117 	rte_atomic64_add(&(h->rx_pkts), i);
118 
119 	return i;
120 }
121 
122 static uint16_t
123 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
124 {
125 	int i;
126 	struct null_queue *h = q;
127 	unsigned packet_size;
128 
129 	if ((q == NULL) || (bufs == NULL))
130 		return 0;
131 
132 	packet_size = h->internals->packet_size;
133 	for (i = 0; i < nb_bufs; i++) {
134 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
135 		if (!bufs[i])
136 			break;
137 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
138 					packet_size);
139 		bufs[i]->data_len = (uint16_t)packet_size;
140 		bufs[i]->pkt_len = packet_size;
141 		bufs[i]->port = h->internals->port_id;
142 	}
143 
144 	rte_atomic64_add(&(h->rx_pkts), i);
145 
146 	return i;
147 }
148 
149 static uint16_t
150 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
151 {
152 	int i;
153 	struct null_queue *h = q;
154 
155 	if ((q == NULL) || (bufs == NULL))
156 		return 0;
157 
158 	for (i = 0; i < nb_bufs; i++)
159 		rte_pktmbuf_free(bufs[i]);
160 
161 	rte_atomic64_add(&(h->tx_pkts), i);
162 
163 	return i;
164 }
165 
166 static uint16_t
167 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
168 {
169 	int i;
170 	struct null_queue *h = q;
171 	unsigned packet_size;
172 
173 	if ((q == NULL) || (bufs == NULL))
174 		return 0;
175 
176 	packet_size = h->internals->packet_size;
177 	for (i = 0; i < nb_bufs; i++) {
178 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
179 					packet_size);
180 		rte_pktmbuf_free(bufs[i]);
181 	}
182 
183 	rte_atomic64_add(&(h->tx_pkts), i);
184 
185 	return i;
186 }
187 
188 static int
189 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
190 {
191 	return 0;
192 }
193 
194 static int
195 eth_dev_start(struct rte_eth_dev *dev)
196 {
197 	if (dev == NULL)
198 		return -EINVAL;
199 
200 	dev->data->dev_link.link_status = ETH_LINK_UP;
201 	return 0;
202 }
203 
204 static void
205 eth_dev_stop(struct rte_eth_dev *dev)
206 {
207 	if (dev == NULL)
208 		return;
209 
210 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
211 }
212 
213 static int
214 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
215 		uint16_t nb_rx_desc __rte_unused,
216 		unsigned int socket_id __rte_unused,
217 		const struct rte_eth_rxconf *rx_conf __rte_unused,
218 		struct rte_mempool *mb_pool)
219 {
220 	struct rte_mbuf *dummy_packet;
221 	struct pmd_internals *internals;
222 	unsigned packet_size;
223 
224 	if ((dev == NULL) || (mb_pool == NULL))
225 		return -EINVAL;
226 
227 	internals = dev->data->dev_private;
228 
229 	if (rx_queue_id >= dev->data->nb_rx_queues)
230 		return -ENODEV;
231 
232 	packet_size = internals->packet_size;
233 
234 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
235 	dev->data->rx_queues[rx_queue_id] =
236 		&internals->rx_null_queues[rx_queue_id];
237 	dummy_packet = rte_zmalloc_socket(NULL,
238 			packet_size, 0, dev->data->numa_node);
239 	if (dummy_packet == NULL)
240 		return -ENOMEM;
241 
242 	internals->rx_null_queues[rx_queue_id].internals = internals;
243 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
244 
245 	return 0;
246 }
247 
248 static int
249 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
250 		uint16_t nb_tx_desc __rte_unused,
251 		unsigned int socket_id __rte_unused,
252 		const struct rte_eth_txconf *tx_conf __rte_unused)
253 {
254 	struct rte_mbuf *dummy_packet;
255 	struct pmd_internals *internals;
256 	unsigned packet_size;
257 
258 	if (dev == NULL)
259 		return -EINVAL;
260 
261 	internals = dev->data->dev_private;
262 
263 	if (tx_queue_id >= dev->data->nb_tx_queues)
264 		return -ENODEV;
265 
266 	packet_size = internals->packet_size;
267 
268 	dev->data->tx_queues[tx_queue_id] =
269 		&internals->tx_null_queues[tx_queue_id];
270 	dummy_packet = rte_zmalloc_socket(NULL,
271 			packet_size, 0, dev->data->numa_node);
272 	if (dummy_packet == NULL)
273 		return -ENOMEM;
274 
275 	internals->tx_null_queues[tx_queue_id].internals = internals;
276 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
277 
278 	return 0;
279 }
280 
281 static int
282 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
283 {
284 	return 0;
285 }
286 
287 static void
288 eth_dev_info(struct rte_eth_dev *dev,
289 		struct rte_eth_dev_info *dev_info)
290 {
291 	struct pmd_internals *internals;
292 
293 	if ((dev == NULL) || (dev_info == NULL))
294 		return;
295 
296 	internals = dev->data->dev_private;
297 	dev_info->max_mac_addrs = 1;
298 	dev_info->max_rx_pktlen = (uint32_t)-1;
299 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
300 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
301 	dev_info->min_rx_bufsize = 0;
302 	dev_info->reta_size = internals->reta_size;
303 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
304 }
305 
306 static int
307 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
308 {
309 	unsigned i, num_stats;
310 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
311 	const struct pmd_internals *internal;
312 
313 	if ((dev == NULL) || (igb_stats == NULL))
314 		return -EINVAL;
315 
316 	internal = dev->data->dev_private;
317 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
318 			RTE_MIN(dev->data->nb_rx_queues,
319 				RTE_DIM(internal->rx_null_queues)));
320 	for (i = 0; i < num_stats; i++) {
321 		igb_stats->q_ipackets[i] =
322 			internal->rx_null_queues[i].rx_pkts.cnt;
323 		rx_total += igb_stats->q_ipackets[i];
324 	}
325 
326 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
327 			RTE_MIN(dev->data->nb_tx_queues,
328 				RTE_DIM(internal->tx_null_queues)));
329 	for (i = 0; i < num_stats; i++) {
330 		igb_stats->q_opackets[i] =
331 			internal->tx_null_queues[i].tx_pkts.cnt;
332 		igb_stats->q_errors[i] =
333 			internal->tx_null_queues[i].err_pkts.cnt;
334 		tx_total += igb_stats->q_opackets[i];
335 		tx_err_total += igb_stats->q_errors[i];
336 	}
337 
338 	igb_stats->ipackets = rx_total;
339 	igb_stats->opackets = tx_total;
340 	igb_stats->oerrors = tx_err_total;
341 
342 	return 0;
343 }
344 
345 static void
346 eth_stats_reset(struct rte_eth_dev *dev)
347 {
348 	unsigned i;
349 	struct pmd_internals *internal;
350 
351 	if (dev == NULL)
352 		return;
353 
354 	internal = dev->data->dev_private;
355 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
356 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
357 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
358 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
359 		internal->tx_null_queues[i].err_pkts.cnt = 0;
360 	}
361 }
362 
363 static void
364 eth_queue_release(void *q)
365 {
366 	struct null_queue *nq;
367 
368 	if (q == NULL)
369 		return;
370 
371 	nq = q;
372 	rte_free(nq->dummy_packet);
373 }
374 
375 static int
376 eth_link_update(struct rte_eth_dev *dev __rte_unused,
377 		int wait_to_complete __rte_unused) { return 0; }
378 
379 static int
380 eth_rss_reta_update(struct rte_eth_dev *dev,
381 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
382 {
383 	int i, j;
384 	struct pmd_internals *internal = dev->data->dev_private;
385 
386 	if (reta_size != internal->reta_size)
387 		return -EINVAL;
388 
389 	rte_spinlock_lock(&internal->rss_lock);
390 
391 	/* Copy RETA table */
392 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
393 		internal->reta_conf[i].mask = reta_conf[i].mask;
394 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
395 			if ((reta_conf[i].mask >> j) & 0x01)
396 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
397 	}
398 
399 	rte_spinlock_unlock(&internal->rss_lock);
400 
401 	return 0;
402 }
403 
404 static int
405 eth_rss_reta_query(struct rte_eth_dev *dev,
406 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
407 {
408 	int i, j;
409 	struct pmd_internals *internal = dev->data->dev_private;
410 
411 	if (reta_size != internal->reta_size)
412 		return -EINVAL;
413 
414 	rte_spinlock_lock(&internal->rss_lock);
415 
416 	/* Copy RETA table */
417 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
418 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
419 			if ((reta_conf[i].mask >> j) & 0x01)
420 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
421 	}
422 
423 	rte_spinlock_unlock(&internal->rss_lock);
424 
425 	return 0;
426 }
427 
428 static int
429 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
430 {
431 	struct pmd_internals *internal = dev->data->dev_private;
432 
433 	rte_spinlock_lock(&internal->rss_lock);
434 
435 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
436 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
437 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
438 
439 	if (rss_conf->rss_key)
440 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
441 
442 	rte_spinlock_unlock(&internal->rss_lock);
443 
444 	return 0;
445 }
446 
447 static int
448 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
449 		struct rte_eth_rss_conf *rss_conf)
450 {
451 	struct pmd_internals *internal = dev->data->dev_private;
452 
453 	rte_spinlock_lock(&internal->rss_lock);
454 
455 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
456 	if (rss_conf->rss_key)
457 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
458 
459 	rte_spinlock_unlock(&internal->rss_lock);
460 
461 	return 0;
462 }
463 
464 static const struct eth_dev_ops ops = {
465 	.dev_start = eth_dev_start,
466 	.dev_stop = eth_dev_stop,
467 	.dev_configure = eth_dev_configure,
468 	.dev_infos_get = eth_dev_info,
469 	.rx_queue_setup = eth_rx_queue_setup,
470 	.tx_queue_setup = eth_tx_queue_setup,
471 	.rx_queue_release = eth_queue_release,
472 	.tx_queue_release = eth_queue_release,
473 	.mtu_set = eth_mtu_set,
474 	.link_update = eth_link_update,
475 	.stats_get = eth_stats_get,
476 	.stats_reset = eth_stats_reset,
477 	.reta_update = eth_rss_reta_update,
478 	.reta_query = eth_rss_reta_query,
479 	.rss_hash_update = eth_rss_hash_update,
480 	.rss_hash_conf_get = eth_rss_hash_conf_get
481 };
482 
483 static struct rte_vdev_driver pmd_null_drv;
484 
485 static int
486 eth_dev_null_create(struct rte_vdev_device *dev,
487 		unsigned packet_size,
488 		unsigned packet_copy)
489 {
490 	const unsigned nb_rx_queues = 1;
491 	const unsigned nb_tx_queues = 1;
492 	struct rte_eth_dev_data *data = NULL;
493 	struct pmd_internals *internals = NULL;
494 	struct rte_eth_dev *eth_dev = NULL;
495 
496 	static const uint8_t default_rss_key[40] = {
497 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
498 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
499 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
500 		0xBE, 0xAC, 0x01, 0xFA
501 	};
502 
503 	if (dev->device.numa_node == SOCKET_ID_ANY)
504 		dev->device.numa_node = rte_socket_id();
505 
506 	RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
507 		dev->device.numa_node);
508 
509 	/* now do all data allocation - for eth_dev structure, dummy pci driver
510 	 * and internal (private) data
511 	 */
512 	data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
513 		dev->device.numa_node);
514 	if (!data)
515 		return -ENOMEM;
516 
517 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
518 	if (!eth_dev) {
519 		rte_free(data);
520 		return -ENOMEM;
521 	}
522 
523 	/* now put it all together
524 	 * - store queue data in internals,
525 	 * - store numa_node info in ethdev data
526 	 * - point eth_dev_data to internals
527 	 * - and point eth_dev structure to new eth_dev_data structure
528 	 */
529 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
530 	 * so the nulls are local per-process */
531 
532 	internals = eth_dev->data->dev_private;
533 	internals->packet_size = packet_size;
534 	internals->packet_copy = packet_copy;
535 	internals->port_id = eth_dev->data->port_id;
536 
537 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
538 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
539 
540 	rte_memcpy(internals->rss_key, default_rss_key, 40);
541 
542 	rte_memcpy(data, eth_dev->data, sizeof(*data));
543 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
544 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
545 	data->dev_link = pmd_link;
546 	data->mac_addrs = &eth_addr;
547 
548 	eth_dev->data = data;
549 	eth_dev->dev_ops = &ops;
550 
551 	/* finally assign rx and tx ops */
552 	if (packet_copy) {
553 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
554 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
555 	} else {
556 		eth_dev->rx_pkt_burst = eth_null_rx;
557 		eth_dev->tx_pkt_burst = eth_null_tx;
558 	}
559 
560 	return 0;
561 }
562 
563 static inline int
564 get_packet_size_arg(const char *key __rte_unused,
565 		const char *value, void *extra_args)
566 {
567 	const char *a = value;
568 	unsigned *packet_size = extra_args;
569 
570 	if ((value == NULL) || (extra_args == NULL))
571 		return -EINVAL;
572 
573 	*packet_size = (unsigned)strtoul(a, NULL, 0);
574 	if (*packet_size == UINT_MAX)
575 		return -1;
576 
577 	return 0;
578 }
579 
580 static inline int
581 get_packet_copy_arg(const char *key __rte_unused,
582 		const char *value, void *extra_args)
583 {
584 	const char *a = value;
585 	unsigned *packet_copy = extra_args;
586 
587 	if ((value == NULL) || (extra_args == NULL))
588 		return -EINVAL;
589 
590 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
591 	if (*packet_copy == UINT_MAX)
592 		return -1;
593 
594 	return 0;
595 }
596 
597 static int
598 rte_pmd_null_probe(struct rte_vdev_device *dev)
599 {
600 	const char *name, *params;
601 	unsigned packet_size = default_packet_size;
602 	unsigned packet_copy = default_packet_copy;
603 	struct rte_kvargs *kvlist = NULL;
604 	int ret;
605 
606 	if (!dev)
607 		return -EINVAL;
608 
609 	name = rte_vdev_device_name(dev);
610 	params = rte_vdev_device_args(dev);
611 	RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
612 
613 	if (params != NULL) {
614 		kvlist = rte_kvargs_parse(params, valid_arguments);
615 		if (kvlist == NULL)
616 			return -1;
617 
618 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
619 
620 			ret = rte_kvargs_process(kvlist,
621 					ETH_NULL_PACKET_SIZE_ARG,
622 					&get_packet_size_arg, &packet_size);
623 			if (ret < 0)
624 				goto free_kvlist;
625 		}
626 
627 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
628 
629 			ret = rte_kvargs_process(kvlist,
630 					ETH_NULL_PACKET_COPY_ARG,
631 					&get_packet_copy_arg, &packet_copy);
632 			if (ret < 0)
633 				goto free_kvlist;
634 		}
635 	}
636 
637 	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
638 			"packet copy is %s\n", packet_size,
639 			packet_copy ? "enabled" : "disabled");
640 
641 	ret = eth_dev_null_create(dev, packet_size, packet_copy);
642 
643 free_kvlist:
644 	if (kvlist)
645 		rte_kvargs_free(kvlist);
646 	return ret;
647 }
648 
649 static int
650 rte_pmd_null_remove(struct rte_vdev_device *dev)
651 {
652 	struct rte_eth_dev *eth_dev = NULL;
653 
654 	if (!dev)
655 		return -EINVAL;
656 
657 	RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
658 			rte_socket_id());
659 
660 	/* find the ethdev entry */
661 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
662 	if (eth_dev == NULL)
663 		return -1;
664 
665 	rte_free(eth_dev->data->dev_private);
666 	rte_free(eth_dev->data);
667 
668 	rte_eth_dev_release_port(eth_dev);
669 
670 	return 0;
671 }
672 
673 static struct rte_vdev_driver pmd_null_drv = {
674 	.probe = rte_pmd_null_probe,
675 	.remove = rte_pmd_null_remove,
676 };
677 
678 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
679 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
680 RTE_PMD_REGISTER_PARAM_STRING(net_null,
681 	"size=<int> "
682 	"copy=<int>");
683