xref: /dpdk/drivers/net/null/rte_eth_null.c (revision b79e4c00af0e7cfb8601ab0208659d226b82bd10)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42 
43 #define ETH_NULL_PACKET_SIZE_ARG	"size"
44 #define ETH_NULL_PACKET_COPY_ARG	"copy"
45 
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48 
49 static const char *valid_arguments[] = {
50 	ETH_NULL_PACKET_SIZE_ARG,
51 	ETH_NULL_PACKET_COPY_ARG,
52 	"driver",
53 	NULL
54 };
55 
56 struct pmd_internals;
57 
58 struct null_queue {
59 	struct pmd_internals *internals;
60 
61 	struct rte_mempool *mb_pool;
62 	struct rte_mbuf *dummy_packet;
63 
64 	rte_atomic64_t rx_pkts;
65 	rte_atomic64_t tx_pkts;
66 	rte_atomic64_t err_pkts;
67 };
68 
69 struct pmd_internals {
70 	unsigned packet_size;
71 	unsigned packet_copy;
72 	uint8_t port_id;
73 
74 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76 
77 	/** Bit mask of RSS offloads, the bit offset also means flow type */
78 	uint64_t flow_type_rss_offloads;
79 
80 	rte_spinlock_t rss_lock;
81 
82 	uint16_t reta_size;
83 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84 			RTE_RETA_GROUP_SIZE];
85 
86 	uint8_t rss_key[40];                /**< 40-byte hash key. */
87 };
88 
89 
90 static struct ether_addr eth_addr = { .addr_bytes = {0} };
91 static struct rte_eth_link pmd_link = {
92 	.link_speed = ETH_SPEED_NUM_10G,
93 	.link_duplex = ETH_LINK_FULL_DUPLEX,
94 	.link_status = ETH_LINK_DOWN,
95 	.link_autoneg = ETH_LINK_SPEED_AUTONEG,
96 };
97 
98 static uint16_t
99 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
100 {
101 	int i;
102 	struct null_queue *h = q;
103 	unsigned packet_size;
104 
105 	if ((q == NULL) || (bufs == NULL))
106 		return 0;
107 
108 	packet_size = h->internals->packet_size;
109 	for (i = 0; i < nb_bufs; i++) {
110 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
111 		if (!bufs[i])
112 			break;
113 		bufs[i]->data_len = (uint16_t)packet_size;
114 		bufs[i]->pkt_len = packet_size;
115 		bufs[i]->port = h->internals->port_id;
116 	}
117 
118 	rte_atomic64_add(&(h->rx_pkts), i);
119 
120 	return i;
121 }
122 
123 static uint16_t
124 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
125 {
126 	int i;
127 	struct null_queue *h = q;
128 	unsigned packet_size;
129 
130 	if ((q == NULL) || (bufs == NULL))
131 		return 0;
132 
133 	packet_size = h->internals->packet_size;
134 	for (i = 0; i < nb_bufs; i++) {
135 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
136 		if (!bufs[i])
137 			break;
138 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
139 					packet_size);
140 		bufs[i]->data_len = (uint16_t)packet_size;
141 		bufs[i]->pkt_len = packet_size;
142 		bufs[i]->port = h->internals->port_id;
143 	}
144 
145 	rte_atomic64_add(&(h->rx_pkts), i);
146 
147 	return i;
148 }
149 
150 static uint16_t
151 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
152 {
153 	int i;
154 	struct null_queue *h = q;
155 
156 	if ((q == NULL) || (bufs == NULL))
157 		return 0;
158 
159 	for (i = 0; i < nb_bufs; i++)
160 		rte_pktmbuf_free(bufs[i]);
161 
162 	rte_atomic64_add(&(h->tx_pkts), i);
163 
164 	return i;
165 }
166 
167 static uint16_t
168 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
169 {
170 	int i;
171 	struct null_queue *h = q;
172 	unsigned packet_size;
173 
174 	if ((q == NULL) || (bufs == NULL))
175 		return 0;
176 
177 	packet_size = h->internals->packet_size;
178 	for (i = 0; i < nb_bufs; i++) {
179 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
180 					packet_size);
181 		rte_pktmbuf_free(bufs[i]);
182 	}
183 
184 	rte_atomic64_add(&(h->tx_pkts), i);
185 
186 	return i;
187 }
188 
189 static int
190 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
191 {
192 	return 0;
193 }
194 
195 static int
196 eth_dev_start(struct rte_eth_dev *dev)
197 {
198 	if (dev == NULL)
199 		return -EINVAL;
200 
201 	dev->data->dev_link.link_status = ETH_LINK_UP;
202 	return 0;
203 }
204 
205 static void
206 eth_dev_stop(struct rte_eth_dev *dev)
207 {
208 	if (dev == NULL)
209 		return;
210 
211 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
212 }
213 
214 static int
215 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
216 		uint16_t nb_rx_desc __rte_unused,
217 		unsigned int socket_id __rte_unused,
218 		const struct rte_eth_rxconf *rx_conf __rte_unused,
219 		struct rte_mempool *mb_pool)
220 {
221 	struct rte_mbuf *dummy_packet;
222 	struct pmd_internals *internals;
223 	unsigned packet_size;
224 
225 	if ((dev == NULL) || (mb_pool == NULL))
226 		return -EINVAL;
227 
228 	internals = dev->data->dev_private;
229 
230 	if (rx_queue_id >= dev->data->nb_rx_queues)
231 		return -ENODEV;
232 
233 	packet_size = internals->packet_size;
234 
235 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
236 	dev->data->rx_queues[rx_queue_id] =
237 		&internals->rx_null_queues[rx_queue_id];
238 	dummy_packet = rte_zmalloc_socket(NULL,
239 			packet_size, 0, dev->data->numa_node);
240 	if (dummy_packet == NULL)
241 		return -ENOMEM;
242 
243 	internals->rx_null_queues[rx_queue_id].internals = internals;
244 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
245 
246 	return 0;
247 }
248 
249 static int
250 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
251 		uint16_t nb_tx_desc __rte_unused,
252 		unsigned int socket_id __rte_unused,
253 		const struct rte_eth_txconf *tx_conf __rte_unused)
254 {
255 	struct rte_mbuf *dummy_packet;
256 	struct pmd_internals *internals;
257 	unsigned packet_size;
258 
259 	if (dev == NULL)
260 		return -EINVAL;
261 
262 	internals = dev->data->dev_private;
263 
264 	if (tx_queue_id >= dev->data->nb_tx_queues)
265 		return -ENODEV;
266 
267 	packet_size = internals->packet_size;
268 
269 	dev->data->tx_queues[tx_queue_id] =
270 		&internals->tx_null_queues[tx_queue_id];
271 	dummy_packet = rte_zmalloc_socket(NULL,
272 			packet_size, 0, dev->data->numa_node);
273 	if (dummy_packet == NULL)
274 		return -ENOMEM;
275 
276 	internals->tx_null_queues[tx_queue_id].internals = internals;
277 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
278 
279 	return 0;
280 }
281 
282 
283 static void
284 eth_dev_info(struct rte_eth_dev *dev,
285 		struct rte_eth_dev_info *dev_info)
286 {
287 	struct pmd_internals *internals;
288 
289 	if ((dev == NULL) || (dev_info == NULL))
290 		return;
291 
292 	internals = dev->data->dev_private;
293 	dev_info->max_mac_addrs = 1;
294 	dev_info->max_rx_pktlen = (uint32_t)-1;
295 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
296 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
297 	dev_info->min_rx_bufsize = 0;
298 	dev_info->reta_size = internals->reta_size;
299 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
300 }
301 
302 static void
303 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
304 {
305 	unsigned i, num_stats;
306 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
307 	const struct pmd_internals *internal;
308 
309 	if ((dev == NULL) || (igb_stats == NULL))
310 		return;
311 
312 	internal = dev->data->dev_private;
313 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
314 			RTE_MIN(dev->data->nb_rx_queues,
315 				RTE_DIM(internal->rx_null_queues)));
316 	for (i = 0; i < num_stats; i++) {
317 		igb_stats->q_ipackets[i] =
318 			internal->rx_null_queues[i].rx_pkts.cnt;
319 		rx_total += igb_stats->q_ipackets[i];
320 	}
321 
322 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
323 			RTE_MIN(dev->data->nb_tx_queues,
324 				RTE_DIM(internal->tx_null_queues)));
325 	for (i = 0; i < num_stats; i++) {
326 		igb_stats->q_opackets[i] =
327 			internal->tx_null_queues[i].tx_pkts.cnt;
328 		igb_stats->q_errors[i] =
329 			internal->tx_null_queues[i].err_pkts.cnt;
330 		tx_total += igb_stats->q_opackets[i];
331 		tx_err_total += igb_stats->q_errors[i];
332 	}
333 
334 	igb_stats->ipackets = rx_total;
335 	igb_stats->opackets = tx_total;
336 	igb_stats->oerrors = tx_err_total;
337 }
338 
339 static void
340 eth_stats_reset(struct rte_eth_dev *dev)
341 {
342 	unsigned i;
343 	struct pmd_internals *internal;
344 
345 	if (dev == NULL)
346 		return;
347 
348 	internal = dev->data->dev_private;
349 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
350 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
351 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
352 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
353 		internal->tx_null_queues[i].err_pkts.cnt = 0;
354 	}
355 }
356 
357 static void
358 eth_queue_release(void *q)
359 {
360 	struct null_queue *nq;
361 
362 	if (q == NULL)
363 		return;
364 
365 	nq = q;
366 	rte_free(nq->dummy_packet);
367 }
368 
369 static int
370 eth_link_update(struct rte_eth_dev *dev __rte_unused,
371 		int wait_to_complete __rte_unused) { return 0; }
372 
373 static int
374 eth_rss_reta_update(struct rte_eth_dev *dev,
375 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
376 {
377 	int i, j;
378 	struct pmd_internals *internal = dev->data->dev_private;
379 
380 	if (reta_size != internal->reta_size)
381 		return -EINVAL;
382 
383 	rte_spinlock_lock(&internal->rss_lock);
384 
385 	/* Copy RETA table */
386 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
387 		internal->reta_conf[i].mask = reta_conf[i].mask;
388 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
389 			if ((reta_conf[i].mask >> j) & 0x01)
390 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
391 	}
392 
393 	rte_spinlock_unlock(&internal->rss_lock);
394 
395 	return 0;
396 }
397 
398 static int
399 eth_rss_reta_query(struct rte_eth_dev *dev,
400 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
401 {
402 	int i, j;
403 	struct pmd_internals *internal = dev->data->dev_private;
404 
405 	if (reta_size != internal->reta_size)
406 		return -EINVAL;
407 
408 	rte_spinlock_lock(&internal->rss_lock);
409 
410 	/* Copy RETA table */
411 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
412 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
413 			if ((reta_conf[i].mask >> j) & 0x01)
414 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
415 	}
416 
417 	rte_spinlock_unlock(&internal->rss_lock);
418 
419 	return 0;
420 }
421 
422 static int
423 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
424 {
425 	struct pmd_internals *internal = dev->data->dev_private;
426 
427 	rte_spinlock_lock(&internal->rss_lock);
428 
429 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
430 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
431 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
432 
433 	if (rss_conf->rss_key)
434 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
435 
436 	rte_spinlock_unlock(&internal->rss_lock);
437 
438 	return 0;
439 }
440 
441 static int
442 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
443 		struct rte_eth_rss_conf *rss_conf)
444 {
445 	struct pmd_internals *internal = dev->data->dev_private;
446 
447 	rte_spinlock_lock(&internal->rss_lock);
448 
449 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
450 	if (rss_conf->rss_key)
451 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
452 
453 	rte_spinlock_unlock(&internal->rss_lock);
454 
455 	return 0;
456 }
457 
458 static const struct eth_dev_ops ops = {
459 	.dev_start = eth_dev_start,
460 	.dev_stop = eth_dev_stop,
461 	.dev_configure = eth_dev_configure,
462 	.dev_infos_get = eth_dev_info,
463 	.rx_queue_setup = eth_rx_queue_setup,
464 	.tx_queue_setup = eth_tx_queue_setup,
465 	.rx_queue_release = eth_queue_release,
466 	.tx_queue_release = eth_queue_release,
467 	.link_update = eth_link_update,
468 	.stats_get = eth_stats_get,
469 	.stats_reset = eth_stats_reset,
470 	.reta_update = eth_rss_reta_update,
471 	.reta_query = eth_rss_reta_query,
472 	.rss_hash_update = eth_rss_hash_update,
473 	.rss_hash_conf_get = eth_rss_hash_conf_get
474 };
475 
476 static struct rte_vdev_driver pmd_null_drv;
477 
478 static int
479 eth_dev_null_create(struct rte_vdev_device *dev,
480 		unsigned packet_size,
481 		unsigned packet_copy)
482 {
483 	const unsigned nb_rx_queues = 1;
484 	const unsigned nb_tx_queues = 1;
485 	struct rte_eth_dev_data *data = NULL;
486 	struct pmd_internals *internals = NULL;
487 	struct rte_eth_dev *eth_dev = NULL;
488 
489 	static const uint8_t default_rss_key[40] = {
490 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
491 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
492 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
493 		0xBE, 0xAC, 0x01, 0xFA
494 	};
495 
496 	if (dev->device.numa_node == SOCKET_ID_ANY)
497 		dev->device.numa_node = rte_socket_id();
498 
499 	RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
500 		dev->device.numa_node);
501 
502 	/* now do all data allocation - for eth_dev structure, dummy pci driver
503 	 * and internal (private) data
504 	 */
505 	data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
506 		dev->device.numa_node);
507 	if (!data)
508 		return -ENOMEM;
509 
510 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
511 	if (!eth_dev) {
512 		rte_free(data);
513 		return -ENOMEM;
514 	}
515 
516 	/* now put it all together
517 	 * - store queue data in internals,
518 	 * - store numa_node info in ethdev data
519 	 * - point eth_dev_data to internals
520 	 * - and point eth_dev structure to new eth_dev_data structure
521 	 */
522 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
523 	 * so the nulls are local per-process */
524 
525 	internals = eth_dev->data->dev_private;
526 	internals->packet_size = packet_size;
527 	internals->packet_copy = packet_copy;
528 	internals->port_id = eth_dev->data->port_id;
529 
530 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
531 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
532 
533 	rte_memcpy(internals->rss_key, default_rss_key, 40);
534 
535 	rte_memcpy(data, eth_dev->data, sizeof(*data));
536 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
537 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
538 	data->dev_link = pmd_link;
539 	data->mac_addrs = &eth_addr;
540 
541 	eth_dev->data = data;
542 	eth_dev->dev_ops = &ops;
543 
544 	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
545 
546 	/* finally assign rx and tx ops */
547 	if (packet_copy) {
548 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
549 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
550 	} else {
551 		eth_dev->rx_pkt_burst = eth_null_rx;
552 		eth_dev->tx_pkt_burst = eth_null_tx;
553 	}
554 
555 	return 0;
556 }
557 
558 static inline int
559 get_packet_size_arg(const char *key __rte_unused,
560 		const char *value, void *extra_args)
561 {
562 	const char *a = value;
563 	unsigned *packet_size = extra_args;
564 
565 	if ((value == NULL) || (extra_args == NULL))
566 		return -EINVAL;
567 
568 	*packet_size = (unsigned)strtoul(a, NULL, 0);
569 	if (*packet_size == UINT_MAX)
570 		return -1;
571 
572 	return 0;
573 }
574 
575 static inline int
576 get_packet_copy_arg(const char *key __rte_unused,
577 		const char *value, void *extra_args)
578 {
579 	const char *a = value;
580 	unsigned *packet_copy = extra_args;
581 
582 	if ((value == NULL) || (extra_args == NULL))
583 		return -EINVAL;
584 
585 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
586 	if (*packet_copy == UINT_MAX)
587 		return -1;
588 
589 	return 0;
590 }
591 
592 static int
593 rte_pmd_null_probe(struct rte_vdev_device *dev)
594 {
595 	const char *name, *params;
596 	unsigned packet_size = default_packet_size;
597 	unsigned packet_copy = default_packet_copy;
598 	struct rte_kvargs *kvlist = NULL;
599 	int ret;
600 
601 	if (!dev)
602 		return -EINVAL;
603 
604 	name = rte_vdev_device_name(dev);
605 	params = rte_vdev_device_args(dev);
606 	RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
607 
608 	if (params != NULL) {
609 		kvlist = rte_kvargs_parse(params, valid_arguments);
610 		if (kvlist == NULL)
611 			return -1;
612 
613 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
614 
615 			ret = rte_kvargs_process(kvlist,
616 					ETH_NULL_PACKET_SIZE_ARG,
617 					&get_packet_size_arg, &packet_size);
618 			if (ret < 0)
619 				goto free_kvlist;
620 		}
621 
622 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
623 
624 			ret = rte_kvargs_process(kvlist,
625 					ETH_NULL_PACKET_COPY_ARG,
626 					&get_packet_copy_arg, &packet_copy);
627 			if (ret < 0)
628 				goto free_kvlist;
629 		}
630 	}
631 
632 	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
633 			"packet copy is %s\n", packet_size,
634 			packet_copy ? "enabled" : "disabled");
635 
636 	ret = eth_dev_null_create(dev, packet_size, packet_copy);
637 
638 free_kvlist:
639 	if (kvlist)
640 		rte_kvargs_free(kvlist);
641 	return ret;
642 }
643 
644 static int
645 rte_pmd_null_remove(struct rte_vdev_device *dev)
646 {
647 	struct rte_eth_dev *eth_dev = NULL;
648 
649 	if (!dev)
650 		return -EINVAL;
651 
652 	RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
653 			rte_socket_id());
654 
655 	/* find the ethdev entry */
656 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
657 	if (eth_dev == NULL)
658 		return -1;
659 
660 	rte_free(eth_dev->data->dev_private);
661 	rte_free(eth_dev->data);
662 
663 	rte_eth_dev_release_port(eth_dev);
664 
665 	return 0;
666 }
667 
668 static struct rte_vdev_driver pmd_null_drv = {
669 	.probe = rte_pmd_null_probe,
670 	.remove = rte_pmd_null_remove,
671 };
672 
673 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
674 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
675 RTE_PMD_REGISTER_PARAM_STRING(net_null,
676 	"size=<int> "
677 	"copy=<int>");
678