xref: /dpdk/drivers/net/null/rte_eth_null.c (revision 358309f36776ba397601ba25710e7d23ee8f55ce)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42 
43 #define ETH_NULL_PACKET_SIZE_ARG	"size"
44 #define ETH_NULL_PACKET_COPY_ARG	"copy"
45 
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48 
49 static const char *valid_arguments[] = {
50 	ETH_NULL_PACKET_SIZE_ARG,
51 	ETH_NULL_PACKET_COPY_ARG,
52 	NULL
53 };
54 
55 struct pmd_internals;
56 
57 struct null_queue {
58 	struct pmd_internals *internals;
59 
60 	struct rte_mempool *mb_pool;
61 	struct rte_mbuf *dummy_packet;
62 
63 	rte_atomic64_t rx_pkts;
64 	rte_atomic64_t tx_pkts;
65 	rte_atomic64_t err_pkts;
66 };
67 
68 struct pmd_internals {
69 	unsigned packet_size;
70 	unsigned packet_copy;
71 	uint16_t port_id;
72 
73 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 
76 	struct ether_addr eth_addr;
77 	/** Bit mask of RSS offloads, the bit offset also means flow type */
78 	uint64_t flow_type_rss_offloads;
79 
80 	rte_spinlock_t rss_lock;
81 
82 	uint16_t reta_size;
83 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84 			RTE_RETA_GROUP_SIZE];
85 
86 	uint8_t rss_key[40];                /**< 40-byte hash key. */
87 };
88 static struct rte_eth_link pmd_link = {
89 	.link_speed = ETH_SPEED_NUM_10G,
90 	.link_duplex = ETH_LINK_FULL_DUPLEX,
91 	.link_status = ETH_LINK_DOWN,
92 	.link_autoneg = ETH_LINK_AUTONEG,
93 };
94 
95 static uint16_t
96 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
97 {
98 	int i;
99 	struct null_queue *h = q;
100 	unsigned packet_size;
101 
102 	if ((q == NULL) || (bufs == NULL))
103 		return 0;
104 
105 	packet_size = h->internals->packet_size;
106 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
107 		return 0;
108 
109 	for (i = 0; i < nb_bufs; i++) {
110 		bufs[i]->data_len = (uint16_t)packet_size;
111 		bufs[i]->pkt_len = packet_size;
112 		bufs[i]->port = h->internals->port_id;
113 	}
114 
115 	rte_atomic64_add(&(h->rx_pkts), i);
116 
117 	return i;
118 }
119 
120 static uint16_t
121 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
122 {
123 	int i;
124 	struct null_queue *h = q;
125 	unsigned packet_size;
126 
127 	if ((q == NULL) || (bufs == NULL))
128 		return 0;
129 
130 	packet_size = h->internals->packet_size;
131 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
132 		return 0;
133 
134 	for (i = 0; i < nb_bufs; i++) {
135 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
136 					packet_size);
137 		bufs[i]->data_len = (uint16_t)packet_size;
138 		bufs[i]->pkt_len = packet_size;
139 		bufs[i]->port = h->internals->port_id;
140 	}
141 
142 	rte_atomic64_add(&(h->rx_pkts), i);
143 
144 	return i;
145 }
146 
147 static uint16_t
148 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
149 {
150 	int i;
151 	struct null_queue *h = q;
152 
153 	if ((q == NULL) || (bufs == NULL))
154 		return 0;
155 
156 	for (i = 0; i < nb_bufs; i++)
157 		rte_pktmbuf_free(bufs[i]);
158 
159 	rte_atomic64_add(&(h->tx_pkts), i);
160 
161 	return i;
162 }
163 
164 static uint16_t
165 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
166 {
167 	int i;
168 	struct null_queue *h = q;
169 	unsigned packet_size;
170 
171 	if ((q == NULL) || (bufs == NULL))
172 		return 0;
173 
174 	packet_size = h->internals->packet_size;
175 	for (i = 0; i < nb_bufs; i++) {
176 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
177 					packet_size);
178 		rte_pktmbuf_free(bufs[i]);
179 	}
180 
181 	rte_atomic64_add(&(h->tx_pkts), i);
182 
183 	return i;
184 }
185 
186 static int
187 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
188 {
189 	return 0;
190 }
191 
192 static int
193 eth_dev_start(struct rte_eth_dev *dev)
194 {
195 	if (dev == NULL)
196 		return -EINVAL;
197 
198 	dev->data->dev_link.link_status = ETH_LINK_UP;
199 	return 0;
200 }
201 
202 static void
203 eth_dev_stop(struct rte_eth_dev *dev)
204 {
205 	if (dev == NULL)
206 		return;
207 
208 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
209 }
210 
211 static int
212 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
213 		uint16_t nb_rx_desc __rte_unused,
214 		unsigned int socket_id __rte_unused,
215 		const struct rte_eth_rxconf *rx_conf __rte_unused,
216 		struct rte_mempool *mb_pool)
217 {
218 	struct rte_mbuf *dummy_packet;
219 	struct pmd_internals *internals;
220 	unsigned packet_size;
221 
222 	if ((dev == NULL) || (mb_pool == NULL))
223 		return -EINVAL;
224 
225 	internals = dev->data->dev_private;
226 
227 	if (rx_queue_id >= dev->data->nb_rx_queues)
228 		return -ENODEV;
229 
230 	packet_size = internals->packet_size;
231 
232 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
233 	dev->data->rx_queues[rx_queue_id] =
234 		&internals->rx_null_queues[rx_queue_id];
235 	dummy_packet = rte_zmalloc_socket(NULL,
236 			packet_size, 0, dev->data->numa_node);
237 	if (dummy_packet == NULL)
238 		return -ENOMEM;
239 
240 	internals->rx_null_queues[rx_queue_id].internals = internals;
241 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
242 
243 	return 0;
244 }
245 
246 static int
247 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
248 		uint16_t nb_tx_desc __rte_unused,
249 		unsigned int socket_id __rte_unused,
250 		const struct rte_eth_txconf *tx_conf __rte_unused)
251 {
252 	struct rte_mbuf *dummy_packet;
253 	struct pmd_internals *internals;
254 	unsigned packet_size;
255 
256 	if (dev == NULL)
257 		return -EINVAL;
258 
259 	internals = dev->data->dev_private;
260 
261 	if (tx_queue_id >= dev->data->nb_tx_queues)
262 		return -ENODEV;
263 
264 	packet_size = internals->packet_size;
265 
266 	dev->data->tx_queues[tx_queue_id] =
267 		&internals->tx_null_queues[tx_queue_id];
268 	dummy_packet = rte_zmalloc_socket(NULL,
269 			packet_size, 0, dev->data->numa_node);
270 	if (dummy_packet == NULL)
271 		return -ENOMEM;
272 
273 	internals->tx_null_queues[tx_queue_id].internals = internals;
274 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
275 
276 	return 0;
277 }
278 
279 static int
280 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
281 {
282 	return 0;
283 }
284 
285 static void
286 eth_dev_info(struct rte_eth_dev *dev,
287 		struct rte_eth_dev_info *dev_info)
288 {
289 	struct pmd_internals *internals;
290 
291 	if ((dev == NULL) || (dev_info == NULL))
292 		return;
293 
294 	internals = dev->data->dev_private;
295 	dev_info->max_mac_addrs = 1;
296 	dev_info->max_rx_pktlen = (uint32_t)-1;
297 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
298 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
299 	dev_info->min_rx_bufsize = 0;
300 	dev_info->reta_size = internals->reta_size;
301 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
302 }
303 
304 static int
305 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
306 {
307 	unsigned i, num_stats;
308 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
309 	const struct pmd_internals *internal;
310 
311 	if ((dev == NULL) || (igb_stats == NULL))
312 		return -EINVAL;
313 
314 	internal = dev->data->dev_private;
315 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
316 			RTE_MIN(dev->data->nb_rx_queues,
317 				RTE_DIM(internal->rx_null_queues)));
318 	for (i = 0; i < num_stats; i++) {
319 		igb_stats->q_ipackets[i] =
320 			internal->rx_null_queues[i].rx_pkts.cnt;
321 		rx_total += igb_stats->q_ipackets[i];
322 	}
323 
324 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
325 			RTE_MIN(dev->data->nb_tx_queues,
326 				RTE_DIM(internal->tx_null_queues)));
327 	for (i = 0; i < num_stats; i++) {
328 		igb_stats->q_opackets[i] =
329 			internal->tx_null_queues[i].tx_pkts.cnt;
330 		igb_stats->q_errors[i] =
331 			internal->tx_null_queues[i].err_pkts.cnt;
332 		tx_total += igb_stats->q_opackets[i];
333 		tx_err_total += igb_stats->q_errors[i];
334 	}
335 
336 	igb_stats->ipackets = rx_total;
337 	igb_stats->opackets = tx_total;
338 	igb_stats->oerrors = tx_err_total;
339 
340 	return 0;
341 }
342 
343 static void
344 eth_stats_reset(struct rte_eth_dev *dev)
345 {
346 	unsigned i;
347 	struct pmd_internals *internal;
348 
349 	if (dev == NULL)
350 		return;
351 
352 	internal = dev->data->dev_private;
353 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
354 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
355 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
356 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
357 		internal->tx_null_queues[i].err_pkts.cnt = 0;
358 	}
359 }
360 
361 static void
362 eth_queue_release(void *q)
363 {
364 	struct null_queue *nq;
365 
366 	if (q == NULL)
367 		return;
368 
369 	nq = q;
370 	rte_free(nq->dummy_packet);
371 }
372 
373 static int
374 eth_link_update(struct rte_eth_dev *dev __rte_unused,
375 		int wait_to_complete __rte_unused) { return 0; }
376 
377 static int
378 eth_rss_reta_update(struct rte_eth_dev *dev,
379 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
380 {
381 	int i, j;
382 	struct pmd_internals *internal = dev->data->dev_private;
383 
384 	if (reta_size != internal->reta_size)
385 		return -EINVAL;
386 
387 	rte_spinlock_lock(&internal->rss_lock);
388 
389 	/* Copy RETA table */
390 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
391 		internal->reta_conf[i].mask = reta_conf[i].mask;
392 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
393 			if ((reta_conf[i].mask >> j) & 0x01)
394 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
395 	}
396 
397 	rte_spinlock_unlock(&internal->rss_lock);
398 
399 	return 0;
400 }
401 
402 static int
403 eth_rss_reta_query(struct rte_eth_dev *dev,
404 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
405 {
406 	int i, j;
407 	struct pmd_internals *internal = dev->data->dev_private;
408 
409 	if (reta_size != internal->reta_size)
410 		return -EINVAL;
411 
412 	rte_spinlock_lock(&internal->rss_lock);
413 
414 	/* Copy RETA table */
415 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
416 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
417 			if ((reta_conf[i].mask >> j) & 0x01)
418 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
419 	}
420 
421 	rte_spinlock_unlock(&internal->rss_lock);
422 
423 	return 0;
424 }
425 
426 static int
427 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
428 {
429 	struct pmd_internals *internal = dev->data->dev_private;
430 
431 	rte_spinlock_lock(&internal->rss_lock);
432 
433 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
434 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
435 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
436 
437 	if (rss_conf->rss_key)
438 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
439 
440 	rte_spinlock_unlock(&internal->rss_lock);
441 
442 	return 0;
443 }
444 
445 static int
446 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
447 		struct rte_eth_rss_conf *rss_conf)
448 {
449 	struct pmd_internals *internal = dev->data->dev_private;
450 
451 	rte_spinlock_lock(&internal->rss_lock);
452 
453 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
454 	if (rss_conf->rss_key)
455 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
456 
457 	rte_spinlock_unlock(&internal->rss_lock);
458 
459 	return 0;
460 }
461 
462 static void
463 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
464 		    __rte_unused struct ether_addr *addr)
465 {
466 }
467 
468 static const struct eth_dev_ops ops = {
469 	.dev_start = eth_dev_start,
470 	.dev_stop = eth_dev_stop,
471 	.dev_configure = eth_dev_configure,
472 	.dev_infos_get = eth_dev_info,
473 	.rx_queue_setup = eth_rx_queue_setup,
474 	.tx_queue_setup = eth_tx_queue_setup,
475 	.rx_queue_release = eth_queue_release,
476 	.tx_queue_release = eth_queue_release,
477 	.mtu_set = eth_mtu_set,
478 	.link_update = eth_link_update,
479 	.mac_addr_set = eth_mac_address_set,
480 	.stats_get = eth_stats_get,
481 	.stats_reset = eth_stats_reset,
482 	.reta_update = eth_rss_reta_update,
483 	.reta_query = eth_rss_reta_query,
484 	.rss_hash_update = eth_rss_hash_update,
485 	.rss_hash_conf_get = eth_rss_hash_conf_get
486 };
487 
488 static struct rte_vdev_driver pmd_null_drv;
489 
490 static int
491 eth_dev_null_create(struct rte_vdev_device *dev,
492 		unsigned packet_size,
493 		unsigned packet_copy)
494 {
495 	const unsigned nb_rx_queues = 1;
496 	const unsigned nb_tx_queues = 1;
497 	struct rte_eth_dev_data *data = NULL;
498 	struct pmd_internals *internals = NULL;
499 	struct rte_eth_dev *eth_dev = NULL;
500 
501 	static const uint8_t default_rss_key[40] = {
502 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
503 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
504 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
505 		0xBE, 0xAC, 0x01, 0xFA
506 	};
507 
508 	if (dev->device.numa_node == SOCKET_ID_ANY)
509 		dev->device.numa_node = rte_socket_id();
510 
511 	RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
512 		dev->device.numa_node);
513 
514 	/* now do all data allocation - for eth_dev structure, dummy pci driver
515 	 * and internal (private) data
516 	 */
517 	data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
518 		dev->device.numa_node);
519 	if (!data)
520 		return -ENOMEM;
521 
522 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
523 	if (!eth_dev) {
524 		rte_free(data);
525 		return -ENOMEM;
526 	}
527 	/* now put it all together
528 	 * - store queue data in internals,
529 	 * - store numa_node info in ethdev data
530 	 * - point eth_dev_data to internals
531 	 * - and point eth_dev structure to new eth_dev_data structure
532 	 */
533 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
534 	 * so the nulls are local per-process */
535 
536 	internals = eth_dev->data->dev_private;
537 	internals->packet_size = packet_size;
538 	internals->packet_copy = packet_copy;
539 	internals->port_id = eth_dev->data->port_id;
540 	eth_random_addr(internals->eth_addr.addr_bytes);
541 
542 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
543 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
544 
545 	rte_memcpy(internals->rss_key, default_rss_key, 40);
546 
547 	rte_memcpy(data, eth_dev->data, sizeof(*data));
548 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
549 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
550 	data->dev_link = pmd_link;
551 	data->mac_addrs = &internals->eth_addr;
552 
553 	eth_dev->data = data;
554 	eth_dev->dev_ops = &ops;
555 
556 	/* finally assign rx and tx ops */
557 	if (packet_copy) {
558 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
559 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
560 	} else {
561 		eth_dev->rx_pkt_burst = eth_null_rx;
562 		eth_dev->tx_pkt_burst = eth_null_tx;
563 	}
564 
565 	return 0;
566 }
567 
568 static inline int
569 get_packet_size_arg(const char *key __rte_unused,
570 		const char *value, void *extra_args)
571 {
572 	const char *a = value;
573 	unsigned *packet_size = extra_args;
574 
575 	if ((value == NULL) || (extra_args == NULL))
576 		return -EINVAL;
577 
578 	*packet_size = (unsigned)strtoul(a, NULL, 0);
579 	if (*packet_size == UINT_MAX)
580 		return -1;
581 
582 	return 0;
583 }
584 
585 static inline int
586 get_packet_copy_arg(const char *key __rte_unused,
587 		const char *value, void *extra_args)
588 {
589 	const char *a = value;
590 	unsigned *packet_copy = extra_args;
591 
592 	if ((value == NULL) || (extra_args == NULL))
593 		return -EINVAL;
594 
595 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
596 	if (*packet_copy == UINT_MAX)
597 		return -1;
598 
599 	return 0;
600 }
601 
602 static int
603 rte_pmd_null_probe(struct rte_vdev_device *dev)
604 {
605 	const char *name, *params;
606 	unsigned packet_size = default_packet_size;
607 	unsigned packet_copy = default_packet_copy;
608 	struct rte_kvargs *kvlist = NULL;
609 	int ret;
610 
611 	if (!dev)
612 		return -EINVAL;
613 
614 	name = rte_vdev_device_name(dev);
615 	params = rte_vdev_device_args(dev);
616 	RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
617 
618 	if (params != NULL) {
619 		kvlist = rte_kvargs_parse(params, valid_arguments);
620 		if (kvlist == NULL)
621 			return -1;
622 
623 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
624 
625 			ret = rte_kvargs_process(kvlist,
626 					ETH_NULL_PACKET_SIZE_ARG,
627 					&get_packet_size_arg, &packet_size);
628 			if (ret < 0)
629 				goto free_kvlist;
630 		}
631 
632 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
633 
634 			ret = rte_kvargs_process(kvlist,
635 					ETH_NULL_PACKET_COPY_ARG,
636 					&get_packet_copy_arg, &packet_copy);
637 			if (ret < 0)
638 				goto free_kvlist;
639 		}
640 	}
641 
642 	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
643 			"packet copy is %s\n", packet_size,
644 			packet_copy ? "enabled" : "disabled");
645 
646 	ret = eth_dev_null_create(dev, packet_size, packet_copy);
647 
648 free_kvlist:
649 	if (kvlist)
650 		rte_kvargs_free(kvlist);
651 	return ret;
652 }
653 
654 static int
655 rte_pmd_null_remove(struct rte_vdev_device *dev)
656 {
657 	struct rte_eth_dev *eth_dev = NULL;
658 
659 	if (!dev)
660 		return -EINVAL;
661 
662 	RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
663 			rte_socket_id());
664 
665 	/* find the ethdev entry */
666 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
667 	if (eth_dev == NULL)
668 		return -1;
669 
670 	rte_free(eth_dev->data->dev_private);
671 	rte_free(eth_dev->data);
672 
673 	rte_eth_dev_release_port(eth_dev);
674 
675 	return 0;
676 }
677 
678 static struct rte_vdev_driver pmd_null_drv = {
679 	.probe = rte_pmd_null_probe,
680 	.remove = rte_pmd_null_remove,
681 };
682 
683 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
684 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
685 RTE_PMD_REGISTER_PARAM_STRING(net_null,
686 	"size=<int> "
687 	"copy=<int>");
688