xref: /dpdk/drivers/net/null/rte_eth_null.c (revision 10def6a1070999a8da2b89e8a8e10fa1eb8b866c)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_vdev.h>
39 #include <rte_kvargs.h>
40 #include <rte_spinlock.h>
41 
42 #include "rte_eth_null.h"
43 
44 #define ETH_NULL_PACKET_SIZE_ARG	"size"
45 #define ETH_NULL_PACKET_COPY_ARG	"copy"
46 
47 static unsigned default_packet_size = 64;
48 static unsigned default_packet_copy;
49 
50 static const char *valid_arguments[] = {
51 	ETH_NULL_PACKET_SIZE_ARG,
52 	ETH_NULL_PACKET_COPY_ARG,
53 	NULL
54 };
55 
56 struct pmd_internals;
57 
58 struct null_queue {
59 	struct pmd_internals *internals;
60 
61 	struct rte_mempool *mb_pool;
62 	struct rte_mbuf *dummy_packet;
63 
64 	rte_atomic64_t rx_pkts;
65 	rte_atomic64_t tx_pkts;
66 	rte_atomic64_t err_pkts;
67 };
68 
69 struct pmd_internals {
70 	unsigned packet_size;
71 	unsigned packet_copy;
72 	uint8_t port_id;
73 
74 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76 
77 	/** Bit mask of RSS offloads, the bit offset also means flow type */
78 	uint64_t flow_type_rss_offloads;
79 
80 	rte_spinlock_t rss_lock;
81 
82 	uint16_t reta_size;
83 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84 			RTE_RETA_GROUP_SIZE];
85 
86 	uint8_t rss_key[40];                /**< 40-byte hash key. */
87 };
88 
89 
90 static struct ether_addr eth_addr = { .addr_bytes = {0} };
91 static struct rte_eth_link pmd_link = {
92 	.link_speed = ETH_SPEED_NUM_10G,
93 	.link_duplex = ETH_LINK_FULL_DUPLEX,
94 	.link_status = ETH_LINK_DOWN,
95 	.link_autoneg = ETH_LINK_SPEED_AUTONEG,
96 };
97 
98 static uint16_t
99 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
100 {
101 	int i;
102 	struct null_queue *h = q;
103 	unsigned packet_size;
104 
105 	if ((q == NULL) || (bufs == NULL))
106 		return 0;
107 
108 	packet_size = h->internals->packet_size;
109 	for (i = 0; i < nb_bufs; i++) {
110 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
111 		if (!bufs[i])
112 			break;
113 		bufs[i]->data_len = (uint16_t)packet_size;
114 		bufs[i]->pkt_len = packet_size;
115 		bufs[i]->port = h->internals->port_id;
116 	}
117 
118 	rte_atomic64_add(&(h->rx_pkts), i);
119 
120 	return i;
121 }
122 
123 static uint16_t
124 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
125 {
126 	int i;
127 	struct null_queue *h = q;
128 	unsigned packet_size;
129 
130 	if ((q == NULL) || (bufs == NULL))
131 		return 0;
132 
133 	packet_size = h->internals->packet_size;
134 	for (i = 0; i < nb_bufs; i++) {
135 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
136 		if (!bufs[i])
137 			break;
138 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
139 					packet_size);
140 		bufs[i]->data_len = (uint16_t)packet_size;
141 		bufs[i]->pkt_len = packet_size;
142 		bufs[i]->nb_segs = 1;
143 		bufs[i]->next = NULL;
144 		bufs[i]->port = h->internals->port_id;
145 	}
146 
147 	rte_atomic64_add(&(h->rx_pkts), i);
148 
149 	return i;
150 }
151 
152 static uint16_t
153 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
154 {
155 	int i;
156 	struct null_queue *h = q;
157 
158 	if ((q == NULL) || (bufs == NULL))
159 		return 0;
160 
161 	for (i = 0; i < nb_bufs; i++)
162 		rte_pktmbuf_free(bufs[i]);
163 
164 	rte_atomic64_add(&(h->tx_pkts), i);
165 
166 	return i;
167 }
168 
169 static uint16_t
170 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
171 {
172 	int i;
173 	struct null_queue *h = q;
174 	unsigned packet_size;
175 
176 	if ((q == NULL) || (bufs == NULL))
177 		return 0;
178 
179 	packet_size = h->internals->packet_size;
180 	for (i = 0; i < nb_bufs; i++) {
181 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
182 					packet_size);
183 		rte_pktmbuf_free(bufs[i]);
184 	}
185 
186 	rte_atomic64_add(&(h->tx_pkts), i);
187 
188 	return i;
189 }
190 
191 static int
192 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
193 {
194 	return 0;
195 }
196 
197 static int
198 eth_dev_start(struct rte_eth_dev *dev)
199 {
200 	if (dev == NULL)
201 		return -EINVAL;
202 
203 	dev->data->dev_link.link_status = ETH_LINK_UP;
204 	return 0;
205 }
206 
207 static void
208 eth_dev_stop(struct rte_eth_dev *dev)
209 {
210 	if (dev == NULL)
211 		return;
212 
213 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
214 }
215 
216 static int
217 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
218 		uint16_t nb_rx_desc __rte_unused,
219 		unsigned int socket_id __rte_unused,
220 		const struct rte_eth_rxconf *rx_conf __rte_unused,
221 		struct rte_mempool *mb_pool)
222 {
223 	struct rte_mbuf *dummy_packet;
224 	struct pmd_internals *internals;
225 	unsigned packet_size;
226 
227 	if ((dev == NULL) || (mb_pool == NULL))
228 		return -EINVAL;
229 
230 	internals = dev->data->dev_private;
231 
232 	if (rx_queue_id >= dev->data->nb_rx_queues)
233 		return -ENODEV;
234 
235 	packet_size = internals->packet_size;
236 
237 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
238 	dev->data->rx_queues[rx_queue_id] =
239 		&internals->rx_null_queues[rx_queue_id];
240 	dummy_packet = rte_zmalloc_socket(NULL,
241 			packet_size, 0, dev->data->numa_node);
242 	if (dummy_packet == NULL)
243 		return -ENOMEM;
244 
245 	internals->rx_null_queues[rx_queue_id].internals = internals;
246 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
247 
248 	return 0;
249 }
250 
251 static int
252 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
253 		uint16_t nb_tx_desc __rte_unused,
254 		unsigned int socket_id __rte_unused,
255 		const struct rte_eth_txconf *tx_conf __rte_unused)
256 {
257 	struct rte_mbuf *dummy_packet;
258 	struct pmd_internals *internals;
259 	unsigned packet_size;
260 
261 	if (dev == NULL)
262 		return -EINVAL;
263 
264 	internals = dev->data->dev_private;
265 
266 	if (tx_queue_id >= dev->data->nb_tx_queues)
267 		return -ENODEV;
268 
269 	packet_size = internals->packet_size;
270 
271 	dev->data->tx_queues[tx_queue_id] =
272 		&internals->tx_null_queues[tx_queue_id];
273 	dummy_packet = rte_zmalloc_socket(NULL,
274 			packet_size, 0, dev->data->numa_node);
275 	if (dummy_packet == NULL)
276 		return -ENOMEM;
277 
278 	internals->tx_null_queues[tx_queue_id].internals = internals;
279 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
280 
281 	return 0;
282 }
283 
284 
285 static void
286 eth_dev_info(struct rte_eth_dev *dev,
287 		struct rte_eth_dev_info *dev_info)
288 {
289 	struct pmd_internals *internals;
290 
291 	if ((dev == NULL) || (dev_info == NULL))
292 		return;
293 
294 	internals = dev->data->dev_private;
295 	dev_info->max_mac_addrs = 1;
296 	dev_info->max_rx_pktlen = (uint32_t)-1;
297 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
298 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
299 	dev_info->min_rx_bufsize = 0;
300 	dev_info->reta_size = internals->reta_size;
301 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
302 }
303 
304 static void
305 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
306 {
307 	unsigned i, num_stats;
308 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
309 	const struct pmd_internals *internal;
310 
311 	if ((dev == NULL) || (igb_stats == NULL))
312 		return;
313 
314 	internal = dev->data->dev_private;
315 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
316 			RTE_MIN(dev->data->nb_rx_queues,
317 				RTE_DIM(internal->rx_null_queues)));
318 	for (i = 0; i < num_stats; i++) {
319 		igb_stats->q_ipackets[i] =
320 			internal->rx_null_queues[i].rx_pkts.cnt;
321 		rx_total += igb_stats->q_ipackets[i];
322 	}
323 
324 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
325 			RTE_MIN(dev->data->nb_tx_queues,
326 				RTE_DIM(internal->tx_null_queues)));
327 	for (i = 0; i < num_stats; i++) {
328 		igb_stats->q_opackets[i] =
329 			internal->tx_null_queues[i].tx_pkts.cnt;
330 		igb_stats->q_errors[i] =
331 			internal->tx_null_queues[i].err_pkts.cnt;
332 		tx_total += igb_stats->q_opackets[i];
333 		tx_err_total += igb_stats->q_errors[i];
334 	}
335 
336 	igb_stats->ipackets = rx_total;
337 	igb_stats->opackets = tx_total;
338 	igb_stats->oerrors = tx_err_total;
339 }
340 
341 static void
342 eth_stats_reset(struct rte_eth_dev *dev)
343 {
344 	unsigned i;
345 	struct pmd_internals *internal;
346 
347 	if (dev == NULL)
348 		return;
349 
350 	internal = dev->data->dev_private;
351 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
352 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
353 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
354 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
355 		internal->tx_null_queues[i].err_pkts.cnt = 0;
356 	}
357 }
358 
359 static void
360 eth_queue_release(void *q)
361 {
362 	struct null_queue *nq;
363 
364 	if (q == NULL)
365 		return;
366 
367 	nq = q;
368 	rte_free(nq->dummy_packet);
369 }
370 
371 static int
372 eth_link_update(struct rte_eth_dev *dev __rte_unused,
373 		int wait_to_complete __rte_unused) { return 0; }
374 
375 static int
376 eth_rss_reta_update(struct rte_eth_dev *dev,
377 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
378 {
379 	int i, j;
380 	struct pmd_internals *internal = dev->data->dev_private;
381 
382 	if (reta_size != internal->reta_size)
383 		return -EINVAL;
384 
385 	rte_spinlock_lock(&internal->rss_lock);
386 
387 	/* Copy RETA table */
388 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
389 		internal->reta_conf[i].mask = reta_conf[i].mask;
390 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
391 			if ((reta_conf[i].mask >> j) & 0x01)
392 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
393 	}
394 
395 	rte_spinlock_unlock(&internal->rss_lock);
396 
397 	return 0;
398 }
399 
400 static int
401 eth_rss_reta_query(struct rte_eth_dev *dev,
402 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
403 {
404 	int i, j;
405 	struct pmd_internals *internal = dev->data->dev_private;
406 
407 	if (reta_size != internal->reta_size)
408 		return -EINVAL;
409 
410 	rte_spinlock_lock(&internal->rss_lock);
411 
412 	/* Copy RETA table */
413 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
414 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
415 			if ((reta_conf[i].mask >> j) & 0x01)
416 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
417 	}
418 
419 	rte_spinlock_unlock(&internal->rss_lock);
420 
421 	return 0;
422 }
423 
424 static int
425 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
426 {
427 	struct pmd_internals *internal = dev->data->dev_private;
428 
429 	rte_spinlock_lock(&internal->rss_lock);
430 
431 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
432 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
433 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
434 
435 	if (rss_conf->rss_key)
436 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
437 
438 	rte_spinlock_unlock(&internal->rss_lock);
439 
440 	return 0;
441 }
442 
443 static int
444 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
445 		struct rte_eth_rss_conf *rss_conf)
446 {
447 	struct pmd_internals *internal = dev->data->dev_private;
448 
449 	rte_spinlock_lock(&internal->rss_lock);
450 
451 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
452 	if (rss_conf->rss_key)
453 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
454 
455 	rte_spinlock_unlock(&internal->rss_lock);
456 
457 	return 0;
458 }
459 
460 static const struct eth_dev_ops ops = {
461 	.dev_start = eth_dev_start,
462 	.dev_stop = eth_dev_stop,
463 	.dev_configure = eth_dev_configure,
464 	.dev_infos_get = eth_dev_info,
465 	.rx_queue_setup = eth_rx_queue_setup,
466 	.tx_queue_setup = eth_tx_queue_setup,
467 	.rx_queue_release = eth_queue_release,
468 	.tx_queue_release = eth_queue_release,
469 	.link_update = eth_link_update,
470 	.stats_get = eth_stats_get,
471 	.stats_reset = eth_stats_reset,
472 	.reta_update = eth_rss_reta_update,
473 	.reta_query = eth_rss_reta_query,
474 	.rss_hash_update = eth_rss_hash_update,
475 	.rss_hash_conf_get = eth_rss_hash_conf_get
476 };
477 
478 static struct rte_vdev_driver pmd_null_drv;
479 
480 int
481 eth_dev_null_create(const char *name,
482 		const unsigned numa_node,
483 		unsigned packet_size,
484 		unsigned packet_copy)
485 {
486 	const unsigned nb_rx_queues = 1;
487 	const unsigned nb_tx_queues = 1;
488 	struct rte_eth_dev_data *data = NULL;
489 	struct pmd_internals *internals = NULL;
490 	struct rte_eth_dev *eth_dev = NULL;
491 
492 	static const uint8_t default_rss_key[40] = {
493 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
494 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
495 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
496 		0xBE, 0xAC, 0x01, 0xFA
497 	};
498 
499 	if (name == NULL)
500 		return -EINVAL;
501 
502 	RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
503 			numa_node);
504 
505 	/* now do all data allocation - for eth_dev structure, dummy pci driver
506 	 * and internal (private) data
507 	 */
508 	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
509 	if (data == NULL)
510 		goto error;
511 
512 	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
513 	if (internals == NULL)
514 		goto error;
515 
516 	/* reserve an ethdev entry */
517 	eth_dev = rte_eth_dev_allocate(name);
518 	if (eth_dev == NULL)
519 		goto error;
520 
521 	/* now put it all together
522 	 * - store queue data in internals,
523 	 * - store numa_node info in ethdev data
524 	 * - point eth_dev_data to internals
525 	 * - and point eth_dev structure to new eth_dev_data structure
526 	 */
527 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
528 	 * so the nulls are local per-process */
529 
530 	internals->packet_size = packet_size;
531 	internals->packet_copy = packet_copy;
532 	internals->port_id = eth_dev->data->port_id;
533 
534 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
535 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
536 
537 	rte_memcpy(internals->rss_key, default_rss_key, 40);
538 
539 	data->dev_private = internals;
540 	data->port_id = eth_dev->data->port_id;
541 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
542 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
543 	data->dev_link = pmd_link;
544 	data->mac_addrs = &eth_addr;
545 	strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
546 
547 	eth_dev->data = data;
548 	eth_dev->dev_ops = &ops;
549 
550 	eth_dev->driver = NULL;
551 	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
552 	data->kdrv = RTE_KDRV_NONE;
553 	data->drv_name = pmd_null_drv.driver.name;
554 	data->numa_node = numa_node;
555 
556 	/* finally assign rx and tx ops */
557 	if (packet_copy) {
558 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
559 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
560 	} else {
561 		eth_dev->rx_pkt_burst = eth_null_rx;
562 		eth_dev->tx_pkt_burst = eth_null_tx;
563 	}
564 
565 	return 0;
566 
567 error:
568 	rte_free(data);
569 	rte_free(internals);
570 
571 	return -1;
572 }
573 
574 static inline int
575 get_packet_size_arg(const char *key __rte_unused,
576 		const char *value, void *extra_args)
577 {
578 	const char *a = value;
579 	unsigned *packet_size = extra_args;
580 
581 	if ((value == NULL) || (extra_args == NULL))
582 		return -EINVAL;
583 
584 	*packet_size = (unsigned)strtoul(a, NULL, 0);
585 	if (*packet_size == UINT_MAX)
586 		return -1;
587 
588 	return 0;
589 }
590 
591 static inline int
592 get_packet_copy_arg(const char *key __rte_unused,
593 		const char *value, void *extra_args)
594 {
595 	const char *a = value;
596 	unsigned *packet_copy = extra_args;
597 
598 	if ((value == NULL) || (extra_args == NULL))
599 		return -EINVAL;
600 
601 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
602 	if (*packet_copy == UINT_MAX)
603 		return -1;
604 
605 	return 0;
606 }
607 
608 static int
609 rte_pmd_null_probe(const char *name, const char *params)
610 {
611 	unsigned numa_node;
612 	unsigned packet_size = default_packet_size;
613 	unsigned packet_copy = default_packet_copy;
614 	struct rte_kvargs *kvlist = NULL;
615 	int ret;
616 
617 	if (name == NULL)
618 		return -EINVAL;
619 
620 	RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
621 
622 	numa_node = rte_socket_id();
623 
624 	if (params != NULL) {
625 		kvlist = rte_kvargs_parse(params, valid_arguments);
626 		if (kvlist == NULL)
627 			return -1;
628 
629 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
630 
631 			ret = rte_kvargs_process(kvlist,
632 					ETH_NULL_PACKET_SIZE_ARG,
633 					&get_packet_size_arg, &packet_size);
634 			if (ret < 0)
635 				goto free_kvlist;
636 		}
637 
638 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
639 
640 			ret = rte_kvargs_process(kvlist,
641 					ETH_NULL_PACKET_COPY_ARG,
642 					&get_packet_copy_arg, &packet_copy);
643 			if (ret < 0)
644 				goto free_kvlist;
645 		}
646 	}
647 
648 	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
649 			"packet copy is %s\n", packet_size,
650 			packet_copy ? "enabled" : "disabled");
651 
652 	ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
653 
654 free_kvlist:
655 	if (kvlist)
656 		rte_kvargs_free(kvlist);
657 	return ret;
658 }
659 
660 static int
661 rte_pmd_null_remove(const char *name)
662 {
663 	struct rte_eth_dev *eth_dev = NULL;
664 
665 	if (name == NULL)
666 		return -EINVAL;
667 
668 	RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
669 			rte_socket_id());
670 
671 	/* find the ethdev entry */
672 	eth_dev = rte_eth_dev_allocated(name);
673 	if (eth_dev == NULL)
674 		return -1;
675 
676 	rte_free(eth_dev->data->dev_private);
677 	rte_free(eth_dev->data);
678 
679 	rte_eth_dev_release_port(eth_dev);
680 
681 	return 0;
682 }
683 
684 static struct rte_vdev_driver pmd_null_drv = {
685 	.probe = rte_pmd_null_probe,
686 	.remove = rte_pmd_null_remove,
687 };
688 
689 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
690 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
691 RTE_PMD_REGISTER_PARAM_STRING(net_null,
692 	"size=<int> "
693 	"copy=<int>");
694