xref: /dpdk/drivers/net/null/rte_eth_null.c (revision 90197eb0945b50c9cd6e11f310cfc5078b28f75e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42 
43 #define ETH_NULL_PACKET_SIZE_ARG	"size"
44 #define ETH_NULL_PACKET_COPY_ARG	"copy"
45 
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48 
49 static const char *valid_arguments[] = {
50 	ETH_NULL_PACKET_SIZE_ARG,
51 	ETH_NULL_PACKET_COPY_ARG,
52 	NULL
53 };
54 
55 struct pmd_internals;
56 
57 struct null_queue {
58 	struct pmd_internals *internals;
59 
60 	struct rte_mempool *mb_pool;
61 	struct rte_mbuf *dummy_packet;
62 
63 	rte_atomic64_t rx_pkts;
64 	rte_atomic64_t tx_pkts;
65 	rte_atomic64_t err_pkts;
66 };
67 
68 struct pmd_internals {
69 	unsigned packet_size;
70 	unsigned packet_copy;
71 	uint16_t port_id;
72 
73 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 
76 	struct rte_ether_addr eth_addr;
77 	/** Bit mask of RSS offloads, the bit offset also means flow type */
78 	uint64_t flow_type_rss_offloads;
79 
80 	rte_spinlock_t rss_lock;
81 
82 	uint16_t reta_size;
83 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84 			RTE_RETA_GROUP_SIZE];
85 
86 	uint8_t rss_key[40];                /**< 40-byte hash key. */
87 };
88 static struct rte_eth_link pmd_link = {
89 	.link_speed = ETH_SPEED_NUM_10G,
90 	.link_duplex = ETH_LINK_FULL_DUPLEX,
91 	.link_status = ETH_LINK_DOWN,
92 	.link_autoneg = ETH_LINK_FIXED,
93 };
94 
95 static int eth_null_logtype;
96 
97 #define PMD_LOG(level, fmt, args...) \
98 	rte_log(RTE_LOG_ ## level, eth_null_logtype, \
99 		"%s(): " fmt "\n", __func__, ##args)
100 
101 static uint16_t
102 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
103 {
104 	int i;
105 	struct null_queue *h = q;
106 	unsigned packet_size;
107 
108 	if ((q == NULL) || (bufs == NULL))
109 		return 0;
110 
111 	packet_size = h->internals->packet_size;
112 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
113 		return 0;
114 
115 	for (i = 0; i < nb_bufs; i++) {
116 		bufs[i]->data_len = (uint16_t)packet_size;
117 		bufs[i]->pkt_len = packet_size;
118 		bufs[i]->port = h->internals->port_id;
119 	}
120 
121 	rte_atomic64_add(&(h->rx_pkts), i);
122 
123 	return i;
124 }
125 
126 static uint16_t
127 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
128 {
129 	int i;
130 	struct null_queue *h = q;
131 	unsigned packet_size;
132 
133 	if ((q == NULL) || (bufs == NULL))
134 		return 0;
135 
136 	packet_size = h->internals->packet_size;
137 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
138 		return 0;
139 
140 	for (i = 0; i < nb_bufs; i++) {
141 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
142 					packet_size);
143 		bufs[i]->data_len = (uint16_t)packet_size;
144 		bufs[i]->pkt_len = packet_size;
145 		bufs[i]->port = h->internals->port_id;
146 	}
147 
148 	rte_atomic64_add(&(h->rx_pkts), i);
149 
150 	return i;
151 }
152 
153 static uint16_t
154 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
155 {
156 	int i;
157 	struct null_queue *h = q;
158 
159 	if ((q == NULL) || (bufs == NULL))
160 		return 0;
161 
162 	for (i = 0; i < nb_bufs; i++)
163 		rte_pktmbuf_free(bufs[i]);
164 
165 	rte_atomic64_add(&(h->tx_pkts), i);
166 
167 	return i;
168 }
169 
170 static uint16_t
171 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
172 {
173 	int i;
174 	struct null_queue *h = q;
175 	unsigned packet_size;
176 
177 	if ((q == NULL) || (bufs == NULL))
178 		return 0;
179 
180 	packet_size = h->internals->packet_size;
181 	for (i = 0; i < nb_bufs; i++) {
182 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
183 					packet_size);
184 		rte_pktmbuf_free(bufs[i]);
185 	}
186 
187 	rte_atomic64_add(&(h->tx_pkts), i);
188 
189 	return i;
190 }
191 
192 static int
193 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
194 {
195 	return 0;
196 }
197 
198 static int
199 eth_dev_start(struct rte_eth_dev *dev)
200 {
201 	if (dev == NULL)
202 		return -EINVAL;
203 
204 	dev->data->dev_link.link_status = ETH_LINK_UP;
205 	return 0;
206 }
207 
208 static void
209 eth_dev_stop(struct rte_eth_dev *dev)
210 {
211 	if (dev == NULL)
212 		return;
213 
214 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
215 }
216 
217 static int
218 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
219 		uint16_t nb_rx_desc __rte_unused,
220 		unsigned int socket_id __rte_unused,
221 		const struct rte_eth_rxconf *rx_conf __rte_unused,
222 		struct rte_mempool *mb_pool)
223 {
224 	struct rte_mbuf *dummy_packet;
225 	struct pmd_internals *internals;
226 	unsigned packet_size;
227 
228 	if ((dev == NULL) || (mb_pool == NULL))
229 		return -EINVAL;
230 
231 	internals = dev->data->dev_private;
232 
233 	if (rx_queue_id >= dev->data->nb_rx_queues)
234 		return -ENODEV;
235 
236 	packet_size = internals->packet_size;
237 
238 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
239 	dev->data->rx_queues[rx_queue_id] =
240 		&internals->rx_null_queues[rx_queue_id];
241 	dummy_packet = rte_zmalloc_socket(NULL,
242 			packet_size, 0, dev->data->numa_node);
243 	if (dummy_packet == NULL)
244 		return -ENOMEM;
245 
246 	internals->rx_null_queues[rx_queue_id].internals = internals;
247 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
248 
249 	return 0;
250 }
251 
252 static int
253 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
254 		uint16_t nb_tx_desc __rte_unused,
255 		unsigned int socket_id __rte_unused,
256 		const struct rte_eth_txconf *tx_conf __rte_unused)
257 {
258 	struct rte_mbuf *dummy_packet;
259 	struct pmd_internals *internals;
260 	unsigned packet_size;
261 
262 	if (dev == NULL)
263 		return -EINVAL;
264 
265 	internals = dev->data->dev_private;
266 
267 	if (tx_queue_id >= dev->data->nb_tx_queues)
268 		return -ENODEV;
269 
270 	packet_size = internals->packet_size;
271 
272 	dev->data->tx_queues[tx_queue_id] =
273 		&internals->tx_null_queues[tx_queue_id];
274 	dummy_packet = rte_zmalloc_socket(NULL,
275 			packet_size, 0, dev->data->numa_node);
276 	if (dummy_packet == NULL)
277 		return -ENOMEM;
278 
279 	internals->tx_null_queues[tx_queue_id].internals = internals;
280 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
281 
282 	return 0;
283 }
284 
285 static int
286 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
287 {
288 	return 0;
289 }
290 
291 static void
292 eth_dev_info(struct rte_eth_dev *dev,
293 		struct rte_eth_dev_info *dev_info)
294 {
295 	struct pmd_internals *internals;
296 
297 	if ((dev == NULL) || (dev_info == NULL))
298 		return;
299 
300 	internals = dev->data->dev_private;
301 	dev_info->max_mac_addrs = 1;
302 	dev_info->max_rx_pktlen = (uint32_t)-1;
303 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
304 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
305 	dev_info->min_rx_bufsize = 0;
306 	dev_info->reta_size = internals->reta_size;
307 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
308 }
309 
310 static int
311 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
312 {
313 	unsigned i, num_stats;
314 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
315 	const struct pmd_internals *internal;
316 
317 	if ((dev == NULL) || (igb_stats == NULL))
318 		return -EINVAL;
319 
320 	internal = dev->data->dev_private;
321 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
322 			RTE_MIN(dev->data->nb_rx_queues,
323 				RTE_DIM(internal->rx_null_queues)));
324 	for (i = 0; i < num_stats; i++) {
325 		igb_stats->q_ipackets[i] =
326 			internal->rx_null_queues[i].rx_pkts.cnt;
327 		rx_total += igb_stats->q_ipackets[i];
328 	}
329 
330 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
331 			RTE_MIN(dev->data->nb_tx_queues,
332 				RTE_DIM(internal->tx_null_queues)));
333 	for (i = 0; i < num_stats; i++) {
334 		igb_stats->q_opackets[i] =
335 			internal->tx_null_queues[i].tx_pkts.cnt;
336 		tx_total += igb_stats->q_opackets[i];
337 		tx_err_total += internal->tx_null_queues[i].err_pkts.cnt;
338 	}
339 
340 	igb_stats->ipackets = rx_total;
341 	igb_stats->opackets = tx_total;
342 	igb_stats->oerrors = tx_err_total;
343 
344 	return 0;
345 }
346 
347 static void
348 eth_stats_reset(struct rte_eth_dev *dev)
349 {
350 	unsigned i;
351 	struct pmd_internals *internal;
352 
353 	if (dev == NULL)
354 		return;
355 
356 	internal = dev->data->dev_private;
357 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
358 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
359 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
360 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
361 		internal->tx_null_queues[i].err_pkts.cnt = 0;
362 	}
363 }
364 
365 static void
366 eth_queue_release(void *q)
367 {
368 	struct null_queue *nq;
369 
370 	if (q == NULL)
371 		return;
372 
373 	nq = q;
374 	rte_free(nq->dummy_packet);
375 }
376 
377 static int
378 eth_link_update(struct rte_eth_dev *dev __rte_unused,
379 		int wait_to_complete __rte_unused) { return 0; }
380 
381 static int
382 eth_rss_reta_update(struct rte_eth_dev *dev,
383 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
384 {
385 	int i, j;
386 	struct pmd_internals *internal = dev->data->dev_private;
387 
388 	if (reta_size != internal->reta_size)
389 		return -EINVAL;
390 
391 	rte_spinlock_lock(&internal->rss_lock);
392 
393 	/* Copy RETA table */
394 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
395 		internal->reta_conf[i].mask = reta_conf[i].mask;
396 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
397 			if ((reta_conf[i].mask >> j) & 0x01)
398 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
399 	}
400 
401 	rte_spinlock_unlock(&internal->rss_lock);
402 
403 	return 0;
404 }
405 
406 static int
407 eth_rss_reta_query(struct rte_eth_dev *dev,
408 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
409 {
410 	int i, j;
411 	struct pmd_internals *internal = dev->data->dev_private;
412 
413 	if (reta_size != internal->reta_size)
414 		return -EINVAL;
415 
416 	rte_spinlock_lock(&internal->rss_lock);
417 
418 	/* Copy RETA table */
419 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
420 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
421 			if ((reta_conf[i].mask >> j) & 0x01)
422 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
423 	}
424 
425 	rte_spinlock_unlock(&internal->rss_lock);
426 
427 	return 0;
428 }
429 
430 static int
431 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
432 {
433 	struct pmd_internals *internal = dev->data->dev_private;
434 
435 	rte_spinlock_lock(&internal->rss_lock);
436 
437 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
438 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
439 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
440 
441 	if (rss_conf->rss_key)
442 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
443 
444 	rte_spinlock_unlock(&internal->rss_lock);
445 
446 	return 0;
447 }
448 
449 static int
450 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
451 		struct rte_eth_rss_conf *rss_conf)
452 {
453 	struct pmd_internals *internal = dev->data->dev_private;
454 
455 	rte_spinlock_lock(&internal->rss_lock);
456 
457 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
458 	if (rss_conf->rss_key)
459 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
460 
461 	rte_spinlock_unlock(&internal->rss_lock);
462 
463 	return 0;
464 }
465 
466 static int
467 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
468 		    __rte_unused struct rte_ether_addr *addr)
469 {
470 	return 0;
471 }
472 
473 static const struct eth_dev_ops ops = {
474 	.dev_start = eth_dev_start,
475 	.dev_stop = eth_dev_stop,
476 	.dev_configure = eth_dev_configure,
477 	.dev_infos_get = eth_dev_info,
478 	.rx_queue_setup = eth_rx_queue_setup,
479 	.tx_queue_setup = eth_tx_queue_setup,
480 	.rx_queue_release = eth_queue_release,
481 	.tx_queue_release = eth_queue_release,
482 	.mtu_set = eth_mtu_set,
483 	.link_update = eth_link_update,
484 	.mac_addr_set = eth_mac_address_set,
485 	.stats_get = eth_stats_get,
486 	.stats_reset = eth_stats_reset,
487 	.reta_update = eth_rss_reta_update,
488 	.reta_query = eth_rss_reta_query,
489 	.rss_hash_update = eth_rss_hash_update,
490 	.rss_hash_conf_get = eth_rss_hash_conf_get
491 };
492 
493 static struct rte_vdev_driver pmd_null_drv;
494 
495 static int
496 eth_dev_null_create(struct rte_vdev_device *dev,
497 		unsigned packet_size,
498 		unsigned packet_copy)
499 {
500 	const unsigned nb_rx_queues = 1;
501 	const unsigned nb_tx_queues = 1;
502 	struct rte_eth_dev_data *data;
503 	struct pmd_internals *internals = NULL;
504 	struct rte_eth_dev *eth_dev = NULL;
505 
506 	static const uint8_t default_rss_key[40] = {
507 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
508 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
509 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
510 		0xBE, 0xAC, 0x01, 0xFA
511 	};
512 
513 	if (dev->device.numa_node == SOCKET_ID_ANY)
514 		dev->device.numa_node = rte_socket_id();
515 
516 	PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
517 		dev->device.numa_node);
518 
519 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
520 	if (!eth_dev)
521 		return -ENOMEM;
522 
523 	/* now put it all together
524 	 * - store queue data in internals,
525 	 * - store numa_node info in ethdev data
526 	 * - point eth_dev_data to internals
527 	 * - and point eth_dev structure to new eth_dev_data structure
528 	 */
529 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
530 	 * so the nulls are local per-process */
531 
532 	internals = eth_dev->data->dev_private;
533 	internals->packet_size = packet_size;
534 	internals->packet_copy = packet_copy;
535 	internals->port_id = eth_dev->data->port_id;
536 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
537 
538 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
539 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
540 
541 	rte_memcpy(internals->rss_key, default_rss_key, 40);
542 
543 	data = eth_dev->data;
544 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
545 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
546 	data->dev_link = pmd_link;
547 	data->mac_addrs = &internals->eth_addr;
548 
549 	eth_dev->dev_ops = &ops;
550 
551 	/* finally assign rx and tx ops */
552 	if (packet_copy) {
553 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
554 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
555 	} else {
556 		eth_dev->rx_pkt_burst = eth_null_rx;
557 		eth_dev->tx_pkt_burst = eth_null_tx;
558 	}
559 
560 	rte_eth_dev_probing_finish(eth_dev);
561 	return 0;
562 }
563 
564 static inline int
565 get_packet_size_arg(const char *key __rte_unused,
566 		const char *value, void *extra_args)
567 {
568 	const char *a = value;
569 	unsigned *packet_size = extra_args;
570 
571 	if ((value == NULL) || (extra_args == NULL))
572 		return -EINVAL;
573 
574 	*packet_size = (unsigned)strtoul(a, NULL, 0);
575 	if (*packet_size == UINT_MAX)
576 		return -1;
577 
578 	return 0;
579 }
580 
581 static inline int
582 get_packet_copy_arg(const char *key __rte_unused,
583 		const char *value, void *extra_args)
584 {
585 	const char *a = value;
586 	unsigned *packet_copy = extra_args;
587 
588 	if ((value == NULL) || (extra_args == NULL))
589 		return -EINVAL;
590 
591 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
592 	if (*packet_copy == UINT_MAX)
593 		return -1;
594 
595 	return 0;
596 }
597 
598 static int
599 rte_pmd_null_probe(struct rte_vdev_device *dev)
600 {
601 	const char *name, *params;
602 	unsigned packet_size = default_packet_size;
603 	unsigned packet_copy = default_packet_copy;
604 	struct rte_kvargs *kvlist = NULL;
605 	struct rte_eth_dev *eth_dev;
606 	int ret;
607 
608 	if (!dev)
609 		return -EINVAL;
610 
611 	name = rte_vdev_device_name(dev);
612 	params = rte_vdev_device_args(dev);
613 	PMD_LOG(INFO, "Initializing pmd_null for %s", name);
614 
615 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
616 		eth_dev = rte_eth_dev_attach_secondary(name);
617 		if (!eth_dev) {
618 			PMD_LOG(ERR, "Failed to probe %s", name);
619 			return -1;
620 		}
621 		/* TODO: request info from primary to set up Rx and Tx */
622 		eth_dev->dev_ops = &ops;
623 		eth_dev->device = &dev->device;
624 		rte_eth_dev_probing_finish(eth_dev);
625 		return 0;
626 	}
627 
628 	if (params != NULL) {
629 		kvlist = rte_kvargs_parse(params, valid_arguments);
630 		if (kvlist == NULL)
631 			return -1;
632 
633 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
634 
635 			ret = rte_kvargs_process(kvlist,
636 					ETH_NULL_PACKET_SIZE_ARG,
637 					&get_packet_size_arg, &packet_size);
638 			if (ret < 0)
639 				goto free_kvlist;
640 		}
641 
642 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
643 
644 			ret = rte_kvargs_process(kvlist,
645 					ETH_NULL_PACKET_COPY_ARG,
646 					&get_packet_copy_arg, &packet_copy);
647 			if (ret < 0)
648 				goto free_kvlist;
649 		}
650 	}
651 
652 	PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
653 			"packet copy is %s", packet_size,
654 			packet_copy ? "enabled" : "disabled");
655 
656 	ret = eth_dev_null_create(dev, packet_size, packet_copy);
657 
658 free_kvlist:
659 	if (kvlist)
660 		rte_kvargs_free(kvlist);
661 	return ret;
662 }
663 
664 static int
665 rte_pmd_null_remove(struct rte_vdev_device *dev)
666 {
667 	struct rte_eth_dev *eth_dev = NULL;
668 
669 	if (!dev)
670 		return -EINVAL;
671 
672 	PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
673 			rte_socket_id());
674 
675 	/* find the ethdev entry */
676 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
677 	if (eth_dev == NULL)
678 		return -1;
679 
680 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
681 		/* mac_addrs must not be freed alone because part of dev_private */
682 		eth_dev->data->mac_addrs = NULL;
683 
684 	rte_eth_dev_release_port(eth_dev);
685 
686 	return 0;
687 }
688 
689 static struct rte_vdev_driver pmd_null_drv = {
690 	.probe = rte_pmd_null_probe,
691 	.remove = rte_pmd_null_remove,
692 };
693 
694 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
695 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
696 RTE_PMD_REGISTER_PARAM_STRING(net_null,
697 	"size=<int> "
698 	"copy=<int>");
699 
700 RTE_INIT(eth_null_init_log)
701 {
702 	eth_null_logtype = rte_log_register("pmd.net.null");
703 	if (eth_null_logtype >= 0)
704 		rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
705 }
706