xref: /dpdk/drivers/net/null/rte_eth_null.c (revision 25d11a86c56d50947af33d0b79ede622809bd8b9)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42 
43 #define ETH_NULL_PACKET_SIZE_ARG	"size"
44 #define ETH_NULL_PACKET_COPY_ARG	"copy"
45 
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48 
49 static const char *valid_arguments[] = {
50 	ETH_NULL_PACKET_SIZE_ARG,
51 	ETH_NULL_PACKET_COPY_ARG,
52 	NULL
53 };
54 
55 struct pmd_internals;
56 
57 struct null_queue {
58 	struct pmd_internals *internals;
59 
60 	struct rte_mempool *mb_pool;
61 	struct rte_mbuf *dummy_packet;
62 
63 	rte_atomic64_t rx_pkts;
64 	rte_atomic64_t tx_pkts;
65 	rte_atomic64_t err_pkts;
66 };
67 
68 struct pmd_internals {
69 	unsigned packet_size;
70 	unsigned packet_copy;
71 	uint16_t port_id;
72 
73 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75 
76 	struct ether_addr eth_addr;
77 	/** Bit mask of RSS offloads, the bit offset also means flow type */
78 	uint64_t flow_type_rss_offloads;
79 
80 	rte_spinlock_t rss_lock;
81 
82 	uint16_t reta_size;
83 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84 			RTE_RETA_GROUP_SIZE];
85 
86 	uint8_t rss_key[40];                /**< 40-byte hash key. */
87 };
88 static struct rte_eth_link pmd_link = {
89 	.link_speed = ETH_SPEED_NUM_10G,
90 	.link_duplex = ETH_LINK_FULL_DUPLEX,
91 	.link_status = ETH_LINK_DOWN,
92 	.link_autoneg = ETH_LINK_FIXED,
93 };
94 
95 static int eth_null_logtype;
96 
97 #define PMD_LOG(level, fmt, args...) \
98 	rte_log(RTE_LOG_ ## level, eth_null_logtype, \
99 		"%s(): " fmt "\n", __func__, ##args)
100 
101 static uint16_t
102 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
103 {
104 	int i;
105 	struct null_queue *h = q;
106 	unsigned packet_size;
107 
108 	if ((q == NULL) || (bufs == NULL))
109 		return 0;
110 
111 	packet_size = h->internals->packet_size;
112 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
113 		return 0;
114 
115 	for (i = 0; i < nb_bufs; i++) {
116 		bufs[i]->data_len = (uint16_t)packet_size;
117 		bufs[i]->pkt_len = packet_size;
118 		bufs[i]->port = h->internals->port_id;
119 	}
120 
121 	rte_atomic64_add(&(h->rx_pkts), i);
122 
123 	return i;
124 }
125 
126 static uint16_t
127 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
128 {
129 	int i;
130 	struct null_queue *h = q;
131 	unsigned packet_size;
132 
133 	if ((q == NULL) || (bufs == NULL))
134 		return 0;
135 
136 	packet_size = h->internals->packet_size;
137 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
138 		return 0;
139 
140 	for (i = 0; i < nb_bufs; i++) {
141 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
142 					packet_size);
143 		bufs[i]->data_len = (uint16_t)packet_size;
144 		bufs[i]->pkt_len = packet_size;
145 		bufs[i]->port = h->internals->port_id;
146 	}
147 
148 	rte_atomic64_add(&(h->rx_pkts), i);
149 
150 	return i;
151 }
152 
153 static uint16_t
154 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
155 {
156 	int i;
157 	struct null_queue *h = q;
158 
159 	if ((q == NULL) || (bufs == NULL))
160 		return 0;
161 
162 	for (i = 0; i < nb_bufs; i++)
163 		rte_pktmbuf_free(bufs[i]);
164 
165 	rte_atomic64_add(&(h->tx_pkts), i);
166 
167 	return i;
168 }
169 
170 static uint16_t
171 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
172 {
173 	int i;
174 	struct null_queue *h = q;
175 	unsigned packet_size;
176 
177 	if ((q == NULL) || (bufs == NULL))
178 		return 0;
179 
180 	packet_size = h->internals->packet_size;
181 	for (i = 0; i < nb_bufs; i++) {
182 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
183 					packet_size);
184 		rte_pktmbuf_free(bufs[i]);
185 	}
186 
187 	rte_atomic64_add(&(h->tx_pkts), i);
188 
189 	return i;
190 }
191 
192 static int
193 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
194 {
195 	return 0;
196 }
197 
198 static int
199 eth_dev_start(struct rte_eth_dev *dev)
200 {
201 	if (dev == NULL)
202 		return -EINVAL;
203 
204 	dev->data->dev_link.link_status = ETH_LINK_UP;
205 	return 0;
206 }
207 
208 static void
209 eth_dev_stop(struct rte_eth_dev *dev)
210 {
211 	if (dev == NULL)
212 		return;
213 
214 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
215 }
216 
217 static int
218 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
219 		uint16_t nb_rx_desc __rte_unused,
220 		unsigned int socket_id __rte_unused,
221 		const struct rte_eth_rxconf *rx_conf __rte_unused,
222 		struct rte_mempool *mb_pool)
223 {
224 	struct rte_mbuf *dummy_packet;
225 	struct pmd_internals *internals;
226 	unsigned packet_size;
227 
228 	if ((dev == NULL) || (mb_pool == NULL))
229 		return -EINVAL;
230 
231 	internals = dev->data->dev_private;
232 
233 	if (rx_queue_id >= dev->data->nb_rx_queues)
234 		return -ENODEV;
235 
236 	packet_size = internals->packet_size;
237 
238 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
239 	dev->data->rx_queues[rx_queue_id] =
240 		&internals->rx_null_queues[rx_queue_id];
241 	dummy_packet = rte_zmalloc_socket(NULL,
242 			packet_size, 0, dev->data->numa_node);
243 	if (dummy_packet == NULL)
244 		return -ENOMEM;
245 
246 	internals->rx_null_queues[rx_queue_id].internals = internals;
247 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
248 
249 	return 0;
250 }
251 
252 static int
253 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
254 		uint16_t nb_tx_desc __rte_unused,
255 		unsigned int socket_id __rte_unused,
256 		const struct rte_eth_txconf *tx_conf __rte_unused)
257 {
258 	struct rte_mbuf *dummy_packet;
259 	struct pmd_internals *internals;
260 	unsigned packet_size;
261 
262 	if (dev == NULL)
263 		return -EINVAL;
264 
265 	internals = dev->data->dev_private;
266 
267 	if (tx_queue_id >= dev->data->nb_tx_queues)
268 		return -ENODEV;
269 
270 	packet_size = internals->packet_size;
271 
272 	dev->data->tx_queues[tx_queue_id] =
273 		&internals->tx_null_queues[tx_queue_id];
274 	dummy_packet = rte_zmalloc_socket(NULL,
275 			packet_size, 0, dev->data->numa_node);
276 	if (dummy_packet == NULL)
277 		return -ENOMEM;
278 
279 	internals->tx_null_queues[tx_queue_id].internals = internals;
280 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
281 
282 	return 0;
283 }
284 
285 static int
286 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
287 {
288 	return 0;
289 }
290 
291 static void
292 eth_dev_info(struct rte_eth_dev *dev,
293 		struct rte_eth_dev_info *dev_info)
294 {
295 	struct pmd_internals *internals;
296 
297 	if ((dev == NULL) || (dev_info == NULL))
298 		return;
299 
300 	internals = dev->data->dev_private;
301 	dev_info->max_mac_addrs = 1;
302 	dev_info->max_rx_pktlen = (uint32_t)-1;
303 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
304 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
305 	dev_info->min_rx_bufsize = 0;
306 	dev_info->reta_size = internals->reta_size;
307 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
308 }
309 
310 static int
311 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
312 {
313 	unsigned i, num_stats;
314 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
315 	const struct pmd_internals *internal;
316 
317 	if ((dev == NULL) || (igb_stats == NULL))
318 		return -EINVAL;
319 
320 	internal = dev->data->dev_private;
321 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
322 			RTE_MIN(dev->data->nb_rx_queues,
323 				RTE_DIM(internal->rx_null_queues)));
324 	for (i = 0; i < num_stats; i++) {
325 		igb_stats->q_ipackets[i] =
326 			internal->rx_null_queues[i].rx_pkts.cnt;
327 		rx_total += igb_stats->q_ipackets[i];
328 	}
329 
330 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
331 			RTE_MIN(dev->data->nb_tx_queues,
332 				RTE_DIM(internal->tx_null_queues)));
333 	for (i = 0; i < num_stats; i++) {
334 		igb_stats->q_opackets[i] =
335 			internal->tx_null_queues[i].tx_pkts.cnt;
336 		igb_stats->q_errors[i] =
337 			internal->tx_null_queues[i].err_pkts.cnt;
338 		tx_total += igb_stats->q_opackets[i];
339 		tx_err_total += igb_stats->q_errors[i];
340 	}
341 
342 	igb_stats->ipackets = rx_total;
343 	igb_stats->opackets = tx_total;
344 	igb_stats->oerrors = tx_err_total;
345 
346 	return 0;
347 }
348 
349 static void
350 eth_stats_reset(struct rte_eth_dev *dev)
351 {
352 	unsigned i;
353 	struct pmd_internals *internal;
354 
355 	if (dev == NULL)
356 		return;
357 
358 	internal = dev->data->dev_private;
359 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
360 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
361 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
362 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
363 		internal->tx_null_queues[i].err_pkts.cnt = 0;
364 	}
365 }
366 
367 static void
368 eth_queue_release(void *q)
369 {
370 	struct null_queue *nq;
371 
372 	if (q == NULL)
373 		return;
374 
375 	nq = q;
376 	rte_free(nq->dummy_packet);
377 }
378 
379 static int
380 eth_link_update(struct rte_eth_dev *dev __rte_unused,
381 		int wait_to_complete __rte_unused) { return 0; }
382 
383 static int
384 eth_rss_reta_update(struct rte_eth_dev *dev,
385 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
386 {
387 	int i, j;
388 	struct pmd_internals *internal = dev->data->dev_private;
389 
390 	if (reta_size != internal->reta_size)
391 		return -EINVAL;
392 
393 	rte_spinlock_lock(&internal->rss_lock);
394 
395 	/* Copy RETA table */
396 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
397 		internal->reta_conf[i].mask = reta_conf[i].mask;
398 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
399 			if ((reta_conf[i].mask >> j) & 0x01)
400 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
401 	}
402 
403 	rte_spinlock_unlock(&internal->rss_lock);
404 
405 	return 0;
406 }
407 
408 static int
409 eth_rss_reta_query(struct rte_eth_dev *dev,
410 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
411 {
412 	int i, j;
413 	struct pmd_internals *internal = dev->data->dev_private;
414 
415 	if (reta_size != internal->reta_size)
416 		return -EINVAL;
417 
418 	rte_spinlock_lock(&internal->rss_lock);
419 
420 	/* Copy RETA table */
421 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
422 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
423 			if ((reta_conf[i].mask >> j) & 0x01)
424 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
425 	}
426 
427 	rte_spinlock_unlock(&internal->rss_lock);
428 
429 	return 0;
430 }
431 
432 static int
433 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
434 {
435 	struct pmd_internals *internal = dev->data->dev_private;
436 
437 	rte_spinlock_lock(&internal->rss_lock);
438 
439 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
440 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
441 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
442 
443 	if (rss_conf->rss_key)
444 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
445 
446 	rte_spinlock_unlock(&internal->rss_lock);
447 
448 	return 0;
449 }
450 
451 static int
452 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
453 		struct rte_eth_rss_conf *rss_conf)
454 {
455 	struct pmd_internals *internal = dev->data->dev_private;
456 
457 	rte_spinlock_lock(&internal->rss_lock);
458 
459 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
460 	if (rss_conf->rss_key)
461 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
462 
463 	rte_spinlock_unlock(&internal->rss_lock);
464 
465 	return 0;
466 }
467 
468 static int
469 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
470 		    __rte_unused struct ether_addr *addr)
471 {
472 	return 0;
473 }
474 
475 static const struct eth_dev_ops ops = {
476 	.dev_start = eth_dev_start,
477 	.dev_stop = eth_dev_stop,
478 	.dev_configure = eth_dev_configure,
479 	.dev_infos_get = eth_dev_info,
480 	.rx_queue_setup = eth_rx_queue_setup,
481 	.tx_queue_setup = eth_tx_queue_setup,
482 	.rx_queue_release = eth_queue_release,
483 	.tx_queue_release = eth_queue_release,
484 	.mtu_set = eth_mtu_set,
485 	.link_update = eth_link_update,
486 	.mac_addr_set = eth_mac_address_set,
487 	.stats_get = eth_stats_get,
488 	.stats_reset = eth_stats_reset,
489 	.reta_update = eth_rss_reta_update,
490 	.reta_query = eth_rss_reta_query,
491 	.rss_hash_update = eth_rss_hash_update,
492 	.rss_hash_conf_get = eth_rss_hash_conf_get
493 };
494 
495 static struct rte_vdev_driver pmd_null_drv;
496 
497 static int
498 eth_dev_null_create(struct rte_vdev_device *dev,
499 		unsigned packet_size,
500 		unsigned packet_copy)
501 {
502 	const unsigned nb_rx_queues = 1;
503 	const unsigned nb_tx_queues = 1;
504 	struct rte_eth_dev_data *data;
505 	struct pmd_internals *internals = NULL;
506 	struct rte_eth_dev *eth_dev = NULL;
507 
508 	static const uint8_t default_rss_key[40] = {
509 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
510 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
511 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
512 		0xBE, 0xAC, 0x01, 0xFA
513 	};
514 
515 	if (dev->device.numa_node == SOCKET_ID_ANY)
516 		dev->device.numa_node = rte_socket_id();
517 
518 	PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
519 		dev->device.numa_node);
520 
521 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
522 	if (!eth_dev)
523 		return -ENOMEM;
524 
525 	/* now put it all together
526 	 * - store queue data in internals,
527 	 * - store numa_node info in ethdev data
528 	 * - point eth_dev_data to internals
529 	 * - and point eth_dev structure to new eth_dev_data structure
530 	 */
531 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
532 	 * so the nulls are local per-process */
533 
534 	internals = eth_dev->data->dev_private;
535 	internals->packet_size = packet_size;
536 	internals->packet_copy = packet_copy;
537 	internals->port_id = eth_dev->data->port_id;
538 	eth_random_addr(internals->eth_addr.addr_bytes);
539 
540 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
541 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
542 
543 	rte_memcpy(internals->rss_key, default_rss_key, 40);
544 
545 	data = eth_dev->data;
546 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
547 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
548 	data->dev_link = pmd_link;
549 	data->mac_addrs = &internals->eth_addr;
550 
551 	eth_dev->dev_ops = &ops;
552 
553 	/* finally assign rx and tx ops */
554 	if (packet_copy) {
555 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
556 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
557 	} else {
558 		eth_dev->rx_pkt_burst = eth_null_rx;
559 		eth_dev->tx_pkt_burst = eth_null_tx;
560 	}
561 
562 	rte_eth_dev_probing_finish(eth_dev);
563 	return 0;
564 }
565 
566 static inline int
567 get_packet_size_arg(const char *key __rte_unused,
568 		const char *value, void *extra_args)
569 {
570 	const char *a = value;
571 	unsigned *packet_size = extra_args;
572 
573 	if ((value == NULL) || (extra_args == NULL))
574 		return -EINVAL;
575 
576 	*packet_size = (unsigned)strtoul(a, NULL, 0);
577 	if (*packet_size == UINT_MAX)
578 		return -1;
579 
580 	return 0;
581 }
582 
583 static inline int
584 get_packet_copy_arg(const char *key __rte_unused,
585 		const char *value, void *extra_args)
586 {
587 	const char *a = value;
588 	unsigned *packet_copy = extra_args;
589 
590 	if ((value == NULL) || (extra_args == NULL))
591 		return -EINVAL;
592 
593 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
594 	if (*packet_copy == UINT_MAX)
595 		return -1;
596 
597 	return 0;
598 }
599 
600 static int
601 rte_pmd_null_probe(struct rte_vdev_device *dev)
602 {
603 	const char *name, *params;
604 	unsigned packet_size = default_packet_size;
605 	unsigned packet_copy = default_packet_copy;
606 	struct rte_kvargs *kvlist = NULL;
607 	struct rte_eth_dev *eth_dev;
608 	int ret;
609 
610 	if (!dev)
611 		return -EINVAL;
612 
613 	name = rte_vdev_device_name(dev);
614 	params = rte_vdev_device_args(dev);
615 	PMD_LOG(INFO, "Initializing pmd_null for %s", name);
616 
617 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
618 		eth_dev = rte_eth_dev_attach_secondary(name);
619 		if (!eth_dev) {
620 			PMD_LOG(ERR, "Failed to probe %s", name);
621 			return -1;
622 		}
623 		/* TODO: request info from primary to set up Rx and Tx */
624 		eth_dev->dev_ops = &ops;
625 		eth_dev->device = &dev->device;
626 		rte_eth_dev_probing_finish(eth_dev);
627 		return 0;
628 	}
629 
630 	if (params != NULL) {
631 		kvlist = rte_kvargs_parse(params, valid_arguments);
632 		if (kvlist == NULL)
633 			return -1;
634 
635 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
636 
637 			ret = rte_kvargs_process(kvlist,
638 					ETH_NULL_PACKET_SIZE_ARG,
639 					&get_packet_size_arg, &packet_size);
640 			if (ret < 0)
641 				goto free_kvlist;
642 		}
643 
644 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
645 
646 			ret = rte_kvargs_process(kvlist,
647 					ETH_NULL_PACKET_COPY_ARG,
648 					&get_packet_copy_arg, &packet_copy);
649 			if (ret < 0)
650 				goto free_kvlist;
651 		}
652 	}
653 
654 	PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
655 			"packet copy is %s", packet_size,
656 			packet_copy ? "enabled" : "disabled");
657 
658 	ret = eth_dev_null_create(dev, packet_size, packet_copy);
659 
660 free_kvlist:
661 	if (kvlist)
662 		rte_kvargs_free(kvlist);
663 	return ret;
664 }
665 
666 static int
667 rte_pmd_null_remove(struct rte_vdev_device *dev)
668 {
669 	struct rte_eth_dev *eth_dev = NULL;
670 
671 	if (!dev)
672 		return -EINVAL;
673 
674 	PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
675 			rte_socket_id());
676 
677 	/* find the ethdev entry */
678 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
679 	if (eth_dev == NULL)
680 		return -1;
681 
682 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
683 		/* mac_addrs must not be freed alone because part of dev_private */
684 		eth_dev->data->mac_addrs = NULL;
685 
686 	rte_eth_dev_release_port(eth_dev);
687 
688 	return 0;
689 }
690 
691 static struct rte_vdev_driver pmd_null_drv = {
692 	.probe = rte_pmd_null_probe,
693 	.remove = rte_pmd_null_remove,
694 };
695 
696 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
697 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
698 RTE_PMD_REGISTER_PARAM_STRING(net_null,
699 	"size=<int> "
700 	"copy=<int>");
701 
702 RTE_INIT(eth_null_init_log)
703 {
704 	eth_null_logtype = rte_log_register("pmd.net.null");
705 	if (eth_null_logtype >= 0)
706 		rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
707 }
708