xref: /dpdk/drivers/net/null/rte_eth_null.c (revision a997a33b2a0145ad3e6320ea1fc7df8d51a2fcdf)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_vdev.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
41 
42 #include "rte_eth_null.h"
43 
44 #define ETH_NULL_PACKET_SIZE_ARG	"size"
45 #define ETH_NULL_PACKET_COPY_ARG	"copy"
46 
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
49 
50 static const char *valid_arguments[] = {
51 	ETH_NULL_PACKET_SIZE_ARG,
52 	ETH_NULL_PACKET_COPY_ARG,
53 	NULL
54 };
55 
56 struct pmd_internals;
57 
58 struct null_queue {
59 	struct pmd_internals *internals;
60 
61 	struct rte_mempool *mb_pool;
62 	struct rte_mbuf *dummy_packet;
63 
64 	rte_atomic64_t rx_pkts;
65 	rte_atomic64_t tx_pkts;
66 	rte_atomic64_t err_pkts;
67 };
68 
69 struct pmd_internals {
70 	unsigned packet_size;
71 	unsigned packet_copy;
72 	uint8_t port_id;
73 
74 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76 
77 	/** Bit mask of RSS offloads, the bit offset also means flow type */
78 	uint64_t flow_type_rss_offloads;
79 
80 	rte_spinlock_t rss_lock;
81 
82 	uint16_t reta_size;
83 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84 			RTE_RETA_GROUP_SIZE];
85 
86 	uint8_t rss_key[40];                /**< 40-byte hash key. */
87 };
88 
89 
90 static struct ether_addr eth_addr = { .addr_bytes = {0} };
91 static const char *drivername = "Null PMD";
92 static struct rte_eth_link pmd_link = {
93 	.link_speed = ETH_SPEED_NUM_10G,
94 	.link_duplex = ETH_LINK_FULL_DUPLEX,
95 	.link_status = ETH_LINK_DOWN,
96 	.link_autoneg = ETH_LINK_SPEED_AUTONEG,
97 };
98 
99 static uint16_t
100 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
101 {
102 	int i;
103 	struct null_queue *h = q;
104 	unsigned packet_size;
105 
106 	if ((q == NULL) || (bufs == NULL))
107 		return 0;
108 
109 	packet_size = h->internals->packet_size;
110 	for (i = 0; i < nb_bufs; i++) {
111 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
112 		if (!bufs[i])
113 			break;
114 		bufs[i]->data_len = (uint16_t)packet_size;
115 		bufs[i]->pkt_len = packet_size;
116 		bufs[i]->nb_segs = 1;
117 		bufs[i]->next = NULL;
118 		bufs[i]->port = h->internals->port_id;
119 	}
120 
121 	rte_atomic64_add(&(h->rx_pkts), i);
122 
123 	return i;
124 }
125 
126 static uint16_t
127 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
128 {
129 	int i;
130 	struct null_queue *h = q;
131 	unsigned packet_size;
132 
133 	if ((q == NULL) || (bufs == NULL))
134 		return 0;
135 
136 	packet_size = h->internals->packet_size;
137 	for (i = 0; i < nb_bufs; i++) {
138 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
139 		if (!bufs[i])
140 			break;
141 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
142 					packet_size);
143 		bufs[i]->data_len = (uint16_t)packet_size;
144 		bufs[i]->pkt_len = packet_size;
145 		bufs[i]->nb_segs = 1;
146 		bufs[i]->next = NULL;
147 		bufs[i]->port = h->internals->port_id;
148 	}
149 
150 	rte_atomic64_add(&(h->rx_pkts), i);
151 
152 	return i;
153 }
154 
155 static uint16_t
156 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
157 {
158 	int i;
159 	struct null_queue *h = q;
160 
161 	if ((q == NULL) || (bufs == NULL))
162 		return 0;
163 
164 	for (i = 0; i < nb_bufs; i++)
165 		rte_pktmbuf_free(bufs[i]);
166 
167 	rte_atomic64_add(&(h->tx_pkts), i);
168 
169 	return i;
170 }
171 
172 static uint16_t
173 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
174 {
175 	int i;
176 	struct null_queue *h = q;
177 	unsigned packet_size;
178 
179 	if ((q == NULL) || (bufs == NULL))
180 		return 0;
181 
182 	packet_size = h->internals->packet_size;
183 	for (i = 0; i < nb_bufs; i++) {
184 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
185 					packet_size);
186 		rte_pktmbuf_free(bufs[i]);
187 	}
188 
189 	rte_atomic64_add(&(h->tx_pkts), i);
190 
191 	return i;
192 }
193 
194 static int
195 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
196 {
197 	return 0;
198 }
199 
200 static int
201 eth_dev_start(struct rte_eth_dev *dev)
202 {
203 	if (dev == NULL)
204 		return -EINVAL;
205 
206 	dev->data->dev_link.link_status = ETH_LINK_UP;
207 	return 0;
208 }
209 
210 static void
211 eth_dev_stop(struct rte_eth_dev *dev)
212 {
213 	if (dev == NULL)
214 		return;
215 
216 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
217 }
218 
219 static int
220 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
221 		uint16_t nb_rx_desc __rte_unused,
222 		unsigned int socket_id __rte_unused,
223 		const struct rte_eth_rxconf *rx_conf __rte_unused,
224 		struct rte_mempool *mb_pool)
225 {
226 	struct rte_mbuf *dummy_packet;
227 	struct pmd_internals *internals;
228 	unsigned packet_size;
229 
230 	if ((dev == NULL) || (mb_pool == NULL))
231 		return -EINVAL;
232 
233 	internals = dev->data->dev_private;
234 
235 	if (rx_queue_id >= dev->data->nb_rx_queues)
236 		return -ENODEV;
237 
238 	packet_size = internals->packet_size;
239 
240 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
241 	dev->data->rx_queues[rx_queue_id] =
242 		&internals->rx_null_queues[rx_queue_id];
243 	dummy_packet = rte_zmalloc_socket(NULL,
244 			packet_size, 0, dev->data->numa_node);
245 	if (dummy_packet == NULL)
246 		return -ENOMEM;
247 
248 	internals->rx_null_queues[rx_queue_id].internals = internals;
249 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
250 
251 	return 0;
252 }
253 
254 static int
255 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
256 		uint16_t nb_tx_desc __rte_unused,
257 		unsigned int socket_id __rte_unused,
258 		const struct rte_eth_txconf *tx_conf __rte_unused)
259 {
260 	struct rte_mbuf *dummy_packet;
261 	struct pmd_internals *internals;
262 	unsigned packet_size;
263 
264 	if (dev == NULL)
265 		return -EINVAL;
266 
267 	internals = dev->data->dev_private;
268 
269 	if (tx_queue_id >= dev->data->nb_tx_queues)
270 		return -ENODEV;
271 
272 	packet_size = internals->packet_size;
273 
274 	dev->data->tx_queues[tx_queue_id] =
275 		&internals->tx_null_queues[tx_queue_id];
276 	dummy_packet = rte_zmalloc_socket(NULL,
277 			packet_size, 0, dev->data->numa_node);
278 	if (dummy_packet == NULL)
279 		return -ENOMEM;
280 
281 	internals->tx_null_queues[tx_queue_id].internals = internals;
282 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
283 
284 	return 0;
285 }
286 
287 
288 static void
289 eth_dev_info(struct rte_eth_dev *dev,
290 		struct rte_eth_dev_info *dev_info)
291 {
292 	struct pmd_internals *internals;
293 
294 	if ((dev == NULL) || (dev_info == NULL))
295 		return;
296 
297 	internals = dev->data->dev_private;
298 	dev_info->driver_name = drivername;
299 	dev_info->max_mac_addrs = 1;
300 	dev_info->max_rx_pktlen = (uint32_t)-1;
301 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
302 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
303 	dev_info->min_rx_bufsize = 0;
304 	dev_info->pci_dev = NULL;
305 	dev_info->reta_size = internals->reta_size;
306 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
307 }
308 
309 static void
310 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
311 {
312 	unsigned i, num_stats;
313 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
314 	const struct pmd_internals *internal;
315 
316 	if ((dev == NULL) || (igb_stats == NULL))
317 		return;
318 
319 	internal = dev->data->dev_private;
320 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
321 			RTE_MIN(dev->data->nb_rx_queues,
322 				RTE_DIM(internal->rx_null_queues)));
323 	for (i = 0; i < num_stats; i++) {
324 		igb_stats->q_ipackets[i] =
325 			internal->rx_null_queues[i].rx_pkts.cnt;
326 		rx_total += igb_stats->q_ipackets[i];
327 	}
328 
329 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
330 			RTE_MIN(dev->data->nb_tx_queues,
331 				RTE_DIM(internal->tx_null_queues)));
332 	for (i = 0; i < num_stats; i++) {
333 		igb_stats->q_opackets[i] =
334 			internal->tx_null_queues[i].tx_pkts.cnt;
335 		igb_stats->q_errors[i] =
336 			internal->tx_null_queues[i].err_pkts.cnt;
337 		tx_total += igb_stats->q_opackets[i];
338 		tx_err_total += igb_stats->q_errors[i];
339 	}
340 
341 	igb_stats->ipackets = rx_total;
342 	igb_stats->opackets = tx_total;
343 	igb_stats->oerrors = tx_err_total;
344 }
345 
346 static void
347 eth_stats_reset(struct rte_eth_dev *dev)
348 {
349 	unsigned i;
350 	struct pmd_internals *internal;
351 
352 	if (dev == NULL)
353 		return;
354 
355 	internal = dev->data->dev_private;
356 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
357 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
358 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
359 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
360 		internal->tx_null_queues[i].err_pkts.cnt = 0;
361 	}
362 }
363 
364 static void
365 eth_queue_release(void *q)
366 {
367 	struct null_queue *nq;
368 
369 	if (q == NULL)
370 		return;
371 
372 	nq = q;
373 	rte_free(nq->dummy_packet);
374 }
375 
376 static int
377 eth_link_update(struct rte_eth_dev *dev __rte_unused,
378 		int wait_to_complete __rte_unused) { return 0; }
379 
380 static int
381 eth_rss_reta_update(struct rte_eth_dev *dev,
382 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
383 {
384 	int i, j;
385 	struct pmd_internals *internal = dev->data->dev_private;
386 
387 	if (reta_size != internal->reta_size)
388 		return -EINVAL;
389 
390 	rte_spinlock_lock(&internal->rss_lock);
391 
392 	/* Copy RETA table */
393 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
394 		internal->reta_conf[i].mask = reta_conf[i].mask;
395 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
396 			if ((reta_conf[i].mask >> j) & 0x01)
397 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
398 	}
399 
400 	rte_spinlock_unlock(&internal->rss_lock);
401 
402 	return 0;
403 }
404 
405 static int
406 eth_rss_reta_query(struct rte_eth_dev *dev,
407 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
408 {
409 	int i, j;
410 	struct pmd_internals *internal = dev->data->dev_private;
411 
412 	if (reta_size != internal->reta_size)
413 		return -EINVAL;
414 
415 	rte_spinlock_lock(&internal->rss_lock);
416 
417 	/* Copy RETA table */
418 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
419 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
420 			if ((reta_conf[i].mask >> j) & 0x01)
421 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
422 	}
423 
424 	rte_spinlock_unlock(&internal->rss_lock);
425 
426 	return 0;
427 }
428 
429 static int
430 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
431 {
432 	struct pmd_internals *internal = dev->data->dev_private;
433 
434 	rte_spinlock_lock(&internal->rss_lock);
435 
436 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
437 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
438 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
439 
440 	if (rss_conf->rss_key)
441 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
442 
443 	rte_spinlock_unlock(&internal->rss_lock);
444 
445 	return 0;
446 }
447 
448 static int
449 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
450 		struct rte_eth_rss_conf *rss_conf)
451 {
452 	struct pmd_internals *internal = dev->data->dev_private;
453 
454 	rte_spinlock_lock(&internal->rss_lock);
455 
456 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
457 	if (rss_conf->rss_key)
458 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
459 
460 	rte_spinlock_unlock(&internal->rss_lock);
461 
462 	return 0;
463 }
464 
465 static const struct eth_dev_ops ops = {
466 	.dev_start = eth_dev_start,
467 	.dev_stop = eth_dev_stop,
468 	.dev_configure = eth_dev_configure,
469 	.dev_infos_get = eth_dev_info,
470 	.rx_queue_setup = eth_rx_queue_setup,
471 	.tx_queue_setup = eth_tx_queue_setup,
472 	.rx_queue_release = eth_queue_release,
473 	.tx_queue_release = eth_queue_release,
474 	.link_update = eth_link_update,
475 	.stats_get = eth_stats_get,
476 	.stats_reset = eth_stats_reset,
477 	.reta_update = eth_rss_reta_update,
478 	.reta_query = eth_rss_reta_query,
479 	.rss_hash_update = eth_rss_hash_update,
480 	.rss_hash_conf_get = eth_rss_hash_conf_get
481 };
482 
483 int
484 eth_dev_null_create(const char *name,
485 		const unsigned numa_node,
486 		unsigned packet_size,
487 		unsigned packet_copy)
488 {
489 	const unsigned nb_rx_queues = 1;
490 	const unsigned nb_tx_queues = 1;
491 	struct rte_eth_dev_data *data = NULL;
492 	struct pmd_internals *internals = NULL;
493 	struct rte_eth_dev *eth_dev = NULL;
494 
495 	static const uint8_t default_rss_key[40] = {
496 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
497 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
498 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
499 		0xBE, 0xAC, 0x01, 0xFA
500 	};
501 
502 	if (name == NULL)
503 		return -EINVAL;
504 
505 	RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
506 			numa_node);
507 
508 	/* now do all data allocation - for eth_dev structure, dummy pci driver
509 	 * and internal (private) data
510 	 */
511 	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
512 	if (data == NULL)
513 		goto error;
514 
515 	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
516 	if (internals == NULL)
517 		goto error;
518 
519 	/* reserve an ethdev entry */
520 	eth_dev = rte_eth_dev_allocate(name);
521 	if (eth_dev == NULL)
522 		goto error;
523 
524 	/* now put it all together
525 	 * - store queue data in internals,
526 	 * - store numa_node info in ethdev data
527 	 * - point eth_dev_data to internals
528 	 * - and point eth_dev structure to new eth_dev_data structure
529 	 */
530 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
531 	 * so the nulls are local per-process */
532 
533 	internals->packet_size = packet_size;
534 	internals->packet_copy = packet_copy;
535 	internals->port_id = eth_dev->data->port_id;
536 
537 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
538 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
539 
540 	rte_memcpy(internals->rss_key, default_rss_key, 40);
541 
542 	data->dev_private = internals;
543 	data->port_id = eth_dev->data->port_id;
544 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
545 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
546 	data->dev_link = pmd_link;
547 	data->mac_addrs = &eth_addr;
548 	strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
549 
550 	eth_dev->data = data;
551 	eth_dev->dev_ops = &ops;
552 
553 	eth_dev->driver = NULL;
554 	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
555 	data->kdrv = RTE_KDRV_NONE;
556 	data->drv_name = drivername;
557 	data->numa_node = numa_node;
558 
559 	/* finally assign rx and tx ops */
560 	if (packet_copy) {
561 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
562 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
563 	} else {
564 		eth_dev->rx_pkt_burst = eth_null_rx;
565 		eth_dev->tx_pkt_burst = eth_null_tx;
566 	}
567 
568 	return 0;
569 
570 error:
571 	rte_free(data);
572 	rte_free(internals);
573 
574 	return -1;
575 }
576 
577 static inline int
578 get_packet_size_arg(const char *key __rte_unused,
579 		const char *value, void *extra_args)
580 {
581 	const char *a = value;
582 	unsigned *packet_size = extra_args;
583 
584 	if ((value == NULL) || (extra_args == NULL))
585 		return -EINVAL;
586 
587 	*packet_size = (unsigned)strtoul(a, NULL, 0);
588 	if (*packet_size == UINT_MAX)
589 		return -1;
590 
591 	return 0;
592 }
593 
594 static inline int
595 get_packet_copy_arg(const char *key __rte_unused,
596 		const char *value, void *extra_args)
597 {
598 	const char *a = value;
599 	unsigned *packet_copy = extra_args;
600 
601 	if ((value == NULL) || (extra_args == NULL))
602 		return -EINVAL;
603 
604 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
605 	if (*packet_copy == UINT_MAX)
606 		return -1;
607 
608 	return 0;
609 }
610 
611 static int
612 rte_pmd_null_probe(const char *name, const char *params)
613 {
614 	unsigned numa_node;
615 	unsigned packet_size = default_packet_size;
616 	unsigned packet_copy = default_packet_copy;
617 	struct rte_kvargs *kvlist = NULL;
618 	int ret;
619 
620 	if (name == NULL)
621 		return -EINVAL;
622 
623 	RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
624 
625 	numa_node = rte_socket_id();
626 
627 	if (params != NULL) {
628 		kvlist = rte_kvargs_parse(params, valid_arguments);
629 		if (kvlist == NULL)
630 			return -1;
631 
632 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
633 
634 			ret = rte_kvargs_process(kvlist,
635 					ETH_NULL_PACKET_SIZE_ARG,
636 					&get_packet_size_arg, &packet_size);
637 			if (ret < 0)
638 				goto free_kvlist;
639 		}
640 
641 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
642 
643 			ret = rte_kvargs_process(kvlist,
644 					ETH_NULL_PACKET_COPY_ARG,
645 					&get_packet_copy_arg, &packet_copy);
646 			if (ret < 0)
647 				goto free_kvlist;
648 		}
649 	}
650 
651 	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
652 			"packet copy is %s\n", packet_size,
653 			packet_copy ? "enabled" : "disabled");
654 
655 	ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
656 
657 free_kvlist:
658 	if (kvlist)
659 		rte_kvargs_free(kvlist);
660 	return ret;
661 }
662 
663 static int
664 rte_pmd_null_remove(const char *name)
665 {
666 	struct rte_eth_dev *eth_dev = NULL;
667 
668 	if (name == NULL)
669 		return -EINVAL;
670 
671 	RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
672 			rte_socket_id());
673 
674 	/* find the ethdev entry */
675 	eth_dev = rte_eth_dev_allocated(name);
676 	if (eth_dev == NULL)
677 		return -1;
678 
679 	rte_free(eth_dev->data->dev_private);
680 	rte_free(eth_dev->data);
681 
682 	rte_eth_dev_release_port(eth_dev);
683 
684 	return 0;
685 }
686 
687 static struct rte_vdev_driver pmd_null_drv = {
688 	.probe = rte_pmd_null_probe,
689 	.remove = rte_pmd_null_remove,
690 };
691 
692 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
693 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
694 RTE_PMD_REGISTER_PARAM_STRING(net_null,
695 	"size=<int> "
696 	"copy=<int>");
697