xref: /dpdk/drivers/net/null/rte_eth_null.c (revision a103a97e7191179ad6a451ce85182df2ecb10c26)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42 
43 #define ETH_NULL_PACKET_SIZE_ARG	"size"
44 #define ETH_NULL_PACKET_COPY_ARG	"copy"
45 
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48 
49 static const char *valid_arguments[] = {
50 	ETH_NULL_PACKET_SIZE_ARG,
51 	ETH_NULL_PACKET_COPY_ARG,
52 	NULL
53 };
54 
55 struct pmd_internals;
56 
57 struct null_queue {
58 	struct pmd_internals *internals;
59 
60 	struct rte_mempool *mb_pool;
61 	struct rte_mbuf *dummy_packet;
62 
63 	rte_atomic64_t rx_pkts;
64 	rte_atomic64_t tx_pkts;
65 	rte_atomic64_t err_pkts;
66 };
67 
68 struct pmd_internals {
69 	unsigned packet_size;
70 	unsigned packet_copy;
71 	uint16_t port_id;
72 
73 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 
76 	/** Bit mask of RSS offloads, the bit offset also means flow type */
77 	uint64_t flow_type_rss_offloads;
78 
79 	rte_spinlock_t rss_lock;
80 
81 	uint16_t reta_size;
82 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
83 			RTE_RETA_GROUP_SIZE];
84 
85 	uint8_t rss_key[40];                /**< 40-byte hash key. */
86 };
87 
88 
89 static struct ether_addr eth_addr = { .addr_bytes = {0} };
90 static struct rte_eth_link pmd_link = {
91 	.link_speed = ETH_SPEED_NUM_10G,
92 	.link_duplex = ETH_LINK_FULL_DUPLEX,
93 	.link_status = ETH_LINK_DOWN,
94 	.link_autoneg = ETH_LINK_SPEED_AUTONEG,
95 };
96 
97 static uint16_t
98 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
99 {
100 	int i;
101 	struct null_queue *h = q;
102 	unsigned packet_size;
103 
104 	if ((q == NULL) || (bufs == NULL))
105 		return 0;
106 
107 	packet_size = h->internals->packet_size;
108 	for (i = 0; i < nb_bufs; i++) {
109 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
110 		if (!bufs[i])
111 			break;
112 		bufs[i]->data_len = (uint16_t)packet_size;
113 		bufs[i]->pkt_len = packet_size;
114 		bufs[i]->port = h->internals->port_id;
115 	}
116 
117 	rte_atomic64_add(&(h->rx_pkts), i);
118 
119 	return i;
120 }
121 
122 static uint16_t
123 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
124 {
125 	int i;
126 	struct null_queue *h = q;
127 	unsigned packet_size;
128 
129 	if ((q == NULL) || (bufs == NULL))
130 		return 0;
131 
132 	packet_size = h->internals->packet_size;
133 	for (i = 0; i < nb_bufs; i++) {
134 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
135 		if (!bufs[i])
136 			break;
137 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
138 					packet_size);
139 		bufs[i]->data_len = (uint16_t)packet_size;
140 		bufs[i]->pkt_len = packet_size;
141 		bufs[i]->port = h->internals->port_id;
142 	}
143 
144 	rte_atomic64_add(&(h->rx_pkts), i);
145 
146 	return i;
147 }
148 
149 static uint16_t
150 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
151 {
152 	int i;
153 	struct null_queue *h = q;
154 
155 	if ((q == NULL) || (bufs == NULL))
156 		return 0;
157 
158 	for (i = 0; i < nb_bufs; i++)
159 		rte_pktmbuf_free(bufs[i]);
160 
161 	rte_atomic64_add(&(h->tx_pkts), i);
162 
163 	return i;
164 }
165 
166 static uint16_t
167 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
168 {
169 	int i;
170 	struct null_queue *h = q;
171 	unsigned packet_size;
172 
173 	if ((q == NULL) || (bufs == NULL))
174 		return 0;
175 
176 	packet_size = h->internals->packet_size;
177 	for (i = 0; i < nb_bufs; i++) {
178 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
179 					packet_size);
180 		rte_pktmbuf_free(bufs[i]);
181 	}
182 
183 	rte_atomic64_add(&(h->tx_pkts), i);
184 
185 	return i;
186 }
187 
188 static int
189 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
190 {
191 	return 0;
192 }
193 
194 static int
195 eth_dev_start(struct rte_eth_dev *dev)
196 {
197 	if (dev == NULL)
198 		return -EINVAL;
199 
200 	dev->data->dev_link.link_status = ETH_LINK_UP;
201 	return 0;
202 }
203 
204 static void
205 eth_dev_stop(struct rte_eth_dev *dev)
206 {
207 	if (dev == NULL)
208 		return;
209 
210 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
211 }
212 
213 static int
214 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
215 		uint16_t nb_rx_desc __rte_unused,
216 		unsigned int socket_id __rte_unused,
217 		const struct rte_eth_rxconf *rx_conf __rte_unused,
218 		struct rte_mempool *mb_pool)
219 {
220 	struct rte_mbuf *dummy_packet;
221 	struct pmd_internals *internals;
222 	unsigned packet_size;
223 
224 	if ((dev == NULL) || (mb_pool == NULL))
225 		return -EINVAL;
226 
227 	internals = dev->data->dev_private;
228 
229 	if (rx_queue_id >= dev->data->nb_rx_queues)
230 		return -ENODEV;
231 
232 	packet_size = internals->packet_size;
233 
234 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
235 	dev->data->rx_queues[rx_queue_id] =
236 		&internals->rx_null_queues[rx_queue_id];
237 	dummy_packet = rte_zmalloc_socket(NULL,
238 			packet_size, 0, dev->data->numa_node);
239 	if (dummy_packet == NULL)
240 		return -ENOMEM;
241 
242 	internals->rx_null_queues[rx_queue_id].internals = internals;
243 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
244 
245 	return 0;
246 }
247 
248 static int
249 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
250 		uint16_t nb_tx_desc __rte_unused,
251 		unsigned int socket_id __rte_unused,
252 		const struct rte_eth_txconf *tx_conf __rte_unused)
253 {
254 	struct rte_mbuf *dummy_packet;
255 	struct pmd_internals *internals;
256 	unsigned packet_size;
257 
258 	if (dev == NULL)
259 		return -EINVAL;
260 
261 	internals = dev->data->dev_private;
262 
263 	if (tx_queue_id >= dev->data->nb_tx_queues)
264 		return -ENODEV;
265 
266 	packet_size = internals->packet_size;
267 
268 	dev->data->tx_queues[tx_queue_id] =
269 		&internals->tx_null_queues[tx_queue_id];
270 	dummy_packet = rte_zmalloc_socket(NULL,
271 			packet_size, 0, dev->data->numa_node);
272 	if (dummy_packet == NULL)
273 		return -ENOMEM;
274 
275 	internals->tx_null_queues[tx_queue_id].internals = internals;
276 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
277 
278 	return 0;
279 }
280 
281 
282 static void
283 eth_dev_info(struct rte_eth_dev *dev,
284 		struct rte_eth_dev_info *dev_info)
285 {
286 	struct pmd_internals *internals;
287 
288 	if ((dev == NULL) || (dev_info == NULL))
289 		return;
290 
291 	internals = dev->data->dev_private;
292 	dev_info->max_mac_addrs = 1;
293 	dev_info->max_rx_pktlen = (uint32_t)-1;
294 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
295 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
296 	dev_info->min_rx_bufsize = 0;
297 	dev_info->reta_size = internals->reta_size;
298 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
299 }
300 
301 static void
302 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
303 {
304 	unsigned i, num_stats;
305 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
306 	const struct pmd_internals *internal;
307 
308 	if ((dev == NULL) || (igb_stats == NULL))
309 		return;
310 
311 	internal = dev->data->dev_private;
312 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
313 			RTE_MIN(dev->data->nb_rx_queues,
314 				RTE_DIM(internal->rx_null_queues)));
315 	for (i = 0; i < num_stats; i++) {
316 		igb_stats->q_ipackets[i] =
317 			internal->rx_null_queues[i].rx_pkts.cnt;
318 		rx_total += igb_stats->q_ipackets[i];
319 	}
320 
321 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
322 			RTE_MIN(dev->data->nb_tx_queues,
323 				RTE_DIM(internal->tx_null_queues)));
324 	for (i = 0; i < num_stats; i++) {
325 		igb_stats->q_opackets[i] =
326 			internal->tx_null_queues[i].tx_pkts.cnt;
327 		igb_stats->q_errors[i] =
328 			internal->tx_null_queues[i].err_pkts.cnt;
329 		tx_total += igb_stats->q_opackets[i];
330 		tx_err_total += igb_stats->q_errors[i];
331 	}
332 
333 	igb_stats->ipackets = rx_total;
334 	igb_stats->opackets = tx_total;
335 	igb_stats->oerrors = tx_err_total;
336 }
337 
338 static void
339 eth_stats_reset(struct rte_eth_dev *dev)
340 {
341 	unsigned i;
342 	struct pmd_internals *internal;
343 
344 	if (dev == NULL)
345 		return;
346 
347 	internal = dev->data->dev_private;
348 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
349 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
350 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
351 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
352 		internal->tx_null_queues[i].err_pkts.cnt = 0;
353 	}
354 }
355 
356 static void
357 eth_queue_release(void *q)
358 {
359 	struct null_queue *nq;
360 
361 	if (q == NULL)
362 		return;
363 
364 	nq = q;
365 	rte_free(nq->dummy_packet);
366 }
367 
368 static int
369 eth_link_update(struct rte_eth_dev *dev __rte_unused,
370 		int wait_to_complete __rte_unused) { return 0; }
371 
372 static int
373 eth_rss_reta_update(struct rte_eth_dev *dev,
374 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
375 {
376 	int i, j;
377 	struct pmd_internals *internal = dev->data->dev_private;
378 
379 	if (reta_size != internal->reta_size)
380 		return -EINVAL;
381 
382 	rte_spinlock_lock(&internal->rss_lock);
383 
384 	/* Copy RETA table */
385 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
386 		internal->reta_conf[i].mask = reta_conf[i].mask;
387 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
388 			if ((reta_conf[i].mask >> j) & 0x01)
389 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
390 	}
391 
392 	rte_spinlock_unlock(&internal->rss_lock);
393 
394 	return 0;
395 }
396 
397 static int
398 eth_rss_reta_query(struct rte_eth_dev *dev,
399 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
400 {
401 	int i, j;
402 	struct pmd_internals *internal = dev->data->dev_private;
403 
404 	if (reta_size != internal->reta_size)
405 		return -EINVAL;
406 
407 	rte_spinlock_lock(&internal->rss_lock);
408 
409 	/* Copy RETA table */
410 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
411 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
412 			if ((reta_conf[i].mask >> j) & 0x01)
413 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
414 	}
415 
416 	rte_spinlock_unlock(&internal->rss_lock);
417 
418 	return 0;
419 }
420 
421 static int
422 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
423 {
424 	struct pmd_internals *internal = dev->data->dev_private;
425 
426 	rte_spinlock_lock(&internal->rss_lock);
427 
428 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
429 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
430 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
431 
432 	if (rss_conf->rss_key)
433 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
434 
435 	rte_spinlock_unlock(&internal->rss_lock);
436 
437 	return 0;
438 }
439 
440 static int
441 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
442 		struct rte_eth_rss_conf *rss_conf)
443 {
444 	struct pmd_internals *internal = dev->data->dev_private;
445 
446 	rte_spinlock_lock(&internal->rss_lock);
447 
448 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
449 	if (rss_conf->rss_key)
450 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
451 
452 	rte_spinlock_unlock(&internal->rss_lock);
453 
454 	return 0;
455 }
456 
457 static const struct eth_dev_ops ops = {
458 	.dev_start = eth_dev_start,
459 	.dev_stop = eth_dev_stop,
460 	.dev_configure = eth_dev_configure,
461 	.dev_infos_get = eth_dev_info,
462 	.rx_queue_setup = eth_rx_queue_setup,
463 	.tx_queue_setup = eth_tx_queue_setup,
464 	.rx_queue_release = eth_queue_release,
465 	.tx_queue_release = eth_queue_release,
466 	.link_update = eth_link_update,
467 	.stats_get = eth_stats_get,
468 	.stats_reset = eth_stats_reset,
469 	.reta_update = eth_rss_reta_update,
470 	.reta_query = eth_rss_reta_query,
471 	.rss_hash_update = eth_rss_hash_update,
472 	.rss_hash_conf_get = eth_rss_hash_conf_get
473 };
474 
475 static struct rte_vdev_driver pmd_null_drv;
476 
477 static int
478 eth_dev_null_create(struct rte_vdev_device *dev,
479 		unsigned packet_size,
480 		unsigned packet_copy)
481 {
482 	const unsigned nb_rx_queues = 1;
483 	const unsigned nb_tx_queues = 1;
484 	struct rte_eth_dev_data *data = NULL;
485 	struct pmd_internals *internals = NULL;
486 	struct rte_eth_dev *eth_dev = NULL;
487 
488 	static const uint8_t default_rss_key[40] = {
489 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
490 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
491 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
492 		0xBE, 0xAC, 0x01, 0xFA
493 	};
494 
495 	if (dev->device.numa_node == SOCKET_ID_ANY)
496 		dev->device.numa_node = rte_socket_id();
497 
498 	RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
499 		dev->device.numa_node);
500 
501 	/* now do all data allocation - for eth_dev structure, dummy pci driver
502 	 * and internal (private) data
503 	 */
504 	data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
505 		dev->device.numa_node);
506 	if (!data)
507 		return -ENOMEM;
508 
509 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
510 	if (!eth_dev) {
511 		rte_free(data);
512 		return -ENOMEM;
513 	}
514 
515 	/* now put it all together
516 	 * - store queue data in internals,
517 	 * - store numa_node info in ethdev data
518 	 * - point eth_dev_data to internals
519 	 * - and point eth_dev structure to new eth_dev_data structure
520 	 */
521 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
522 	 * so the nulls are local per-process */
523 
524 	internals = eth_dev->data->dev_private;
525 	internals->packet_size = packet_size;
526 	internals->packet_copy = packet_copy;
527 	internals->port_id = eth_dev->data->port_id;
528 
529 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
530 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
531 
532 	rte_memcpy(internals->rss_key, default_rss_key, 40);
533 
534 	rte_memcpy(data, eth_dev->data, sizeof(*data));
535 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
536 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
537 	data->dev_link = pmd_link;
538 	data->mac_addrs = &eth_addr;
539 
540 	eth_dev->data = data;
541 	eth_dev->dev_ops = &ops;
542 
543 	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
544 
545 	/* finally assign rx and tx ops */
546 	if (packet_copy) {
547 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
548 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
549 	} else {
550 		eth_dev->rx_pkt_burst = eth_null_rx;
551 		eth_dev->tx_pkt_burst = eth_null_tx;
552 	}
553 
554 	return 0;
555 }
556 
557 static inline int
558 get_packet_size_arg(const char *key __rte_unused,
559 		const char *value, void *extra_args)
560 {
561 	const char *a = value;
562 	unsigned *packet_size = extra_args;
563 
564 	if ((value == NULL) || (extra_args == NULL))
565 		return -EINVAL;
566 
567 	*packet_size = (unsigned)strtoul(a, NULL, 0);
568 	if (*packet_size == UINT_MAX)
569 		return -1;
570 
571 	return 0;
572 }
573 
574 static inline int
575 get_packet_copy_arg(const char *key __rte_unused,
576 		const char *value, void *extra_args)
577 {
578 	const char *a = value;
579 	unsigned *packet_copy = extra_args;
580 
581 	if ((value == NULL) || (extra_args == NULL))
582 		return -EINVAL;
583 
584 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
585 	if (*packet_copy == UINT_MAX)
586 		return -1;
587 
588 	return 0;
589 }
590 
591 static int
592 rte_pmd_null_probe(struct rte_vdev_device *dev)
593 {
594 	const char *name, *params;
595 	unsigned packet_size = default_packet_size;
596 	unsigned packet_copy = default_packet_copy;
597 	struct rte_kvargs *kvlist = NULL;
598 	int ret;
599 
600 	if (!dev)
601 		return -EINVAL;
602 
603 	name = rte_vdev_device_name(dev);
604 	params = rte_vdev_device_args(dev);
605 	RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
606 
607 	if (params != NULL) {
608 		kvlist = rte_kvargs_parse(params, valid_arguments);
609 		if (kvlist == NULL)
610 			return -1;
611 
612 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
613 
614 			ret = rte_kvargs_process(kvlist,
615 					ETH_NULL_PACKET_SIZE_ARG,
616 					&get_packet_size_arg, &packet_size);
617 			if (ret < 0)
618 				goto free_kvlist;
619 		}
620 
621 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
622 
623 			ret = rte_kvargs_process(kvlist,
624 					ETH_NULL_PACKET_COPY_ARG,
625 					&get_packet_copy_arg, &packet_copy);
626 			if (ret < 0)
627 				goto free_kvlist;
628 		}
629 	}
630 
631 	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
632 			"packet copy is %s\n", packet_size,
633 			packet_copy ? "enabled" : "disabled");
634 
635 	ret = eth_dev_null_create(dev, packet_size, packet_copy);
636 
637 free_kvlist:
638 	if (kvlist)
639 		rte_kvargs_free(kvlist);
640 	return ret;
641 }
642 
643 static int
644 rte_pmd_null_remove(struct rte_vdev_device *dev)
645 {
646 	struct rte_eth_dev *eth_dev = NULL;
647 
648 	if (!dev)
649 		return -EINVAL;
650 
651 	RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
652 			rte_socket_id());
653 
654 	/* find the ethdev entry */
655 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
656 	if (eth_dev == NULL)
657 		return -1;
658 
659 	rte_free(eth_dev->data->dev_private);
660 	rte_free(eth_dev->data);
661 
662 	rte_eth_dev_release_port(eth_dev);
663 
664 	return 0;
665 }
666 
667 static struct rte_vdev_driver pmd_null_drv = {
668 	.probe = rte_pmd_null_probe,
669 	.remove = rte_pmd_null_remove,
670 };
671 
672 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
673 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
674 RTE_PMD_REGISTER_PARAM_STRING(net_null,
675 	"size=<int> "
676 	"copy=<int>");
677