xref: /dpdk/drivers/net/null/rte_eth_null.c (revision 12e10bd068e243d13a2aa03c1a4a1709c0faca3f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42 
43 #define ETH_NULL_PACKET_SIZE_ARG	"size"
44 #define ETH_NULL_PACKET_COPY_ARG	"copy"
45 
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48 
49 static const char *valid_arguments[] = {
50 	ETH_NULL_PACKET_SIZE_ARG,
51 	ETH_NULL_PACKET_COPY_ARG,
52 	NULL
53 };
54 
55 struct pmd_internals;
56 
57 struct null_queue {
58 	struct pmd_internals *internals;
59 
60 	struct rte_mempool *mb_pool;
61 	struct rte_mbuf *dummy_packet;
62 
63 	rte_atomic64_t rx_pkts;
64 	rte_atomic64_t tx_pkts;
65 };
66 
67 struct pmd_internals {
68 	unsigned packet_size;
69 	unsigned packet_copy;
70 	uint16_t port_id;
71 
72 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
73 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74 
75 	struct rte_ether_addr eth_addr;
76 	/** Bit mask of RSS offloads, the bit offset also means flow type */
77 	uint64_t flow_type_rss_offloads;
78 
79 	rte_spinlock_t rss_lock;
80 
81 	uint16_t reta_size;
82 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
83 			RTE_RETA_GROUP_SIZE];
84 
85 	uint8_t rss_key[40];                /**< 40-byte hash key. */
86 };
87 static struct rte_eth_link pmd_link = {
88 	.link_speed = ETH_SPEED_NUM_10G,
89 	.link_duplex = ETH_LINK_FULL_DUPLEX,
90 	.link_status = ETH_LINK_DOWN,
91 	.link_autoneg = ETH_LINK_FIXED,
92 };
93 
94 static int eth_null_logtype;
95 
96 #define PMD_LOG(level, fmt, args...) \
97 	rte_log(RTE_LOG_ ## level, eth_null_logtype, \
98 		"%s(): " fmt "\n", __func__, ##args)
99 
100 static uint16_t
101 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
102 {
103 	int i;
104 	struct null_queue *h = q;
105 	unsigned packet_size;
106 
107 	if ((q == NULL) || (bufs == NULL))
108 		return 0;
109 
110 	packet_size = h->internals->packet_size;
111 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
112 		return 0;
113 
114 	for (i = 0; i < nb_bufs; i++) {
115 		bufs[i]->data_len = (uint16_t)packet_size;
116 		bufs[i]->pkt_len = packet_size;
117 		bufs[i]->port = h->internals->port_id;
118 	}
119 
120 	rte_atomic64_add(&(h->rx_pkts), i);
121 
122 	return i;
123 }
124 
125 static uint16_t
126 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
127 {
128 	int i;
129 	struct null_queue *h = q;
130 	unsigned packet_size;
131 
132 	if ((q == NULL) || (bufs == NULL))
133 		return 0;
134 
135 	packet_size = h->internals->packet_size;
136 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
137 		return 0;
138 
139 	for (i = 0; i < nb_bufs; i++) {
140 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
141 					packet_size);
142 		bufs[i]->data_len = (uint16_t)packet_size;
143 		bufs[i]->pkt_len = packet_size;
144 		bufs[i]->port = h->internals->port_id;
145 	}
146 
147 	rte_atomic64_add(&(h->rx_pkts), i);
148 
149 	return i;
150 }
151 
152 static uint16_t
153 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
154 {
155 	int i;
156 	struct null_queue *h = q;
157 
158 	if ((q == NULL) || (bufs == NULL))
159 		return 0;
160 
161 	for (i = 0; i < nb_bufs; i++)
162 		rte_pktmbuf_free(bufs[i]);
163 
164 	rte_atomic64_add(&(h->tx_pkts), i);
165 
166 	return i;
167 }
168 
169 static uint16_t
170 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
171 {
172 	int i;
173 	struct null_queue *h = q;
174 	unsigned packet_size;
175 
176 	if ((q == NULL) || (bufs == NULL))
177 		return 0;
178 
179 	packet_size = h->internals->packet_size;
180 	for (i = 0; i < nb_bufs; i++) {
181 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
182 					packet_size);
183 		rte_pktmbuf_free(bufs[i]);
184 	}
185 
186 	rte_atomic64_add(&(h->tx_pkts), i);
187 
188 	return i;
189 }
190 
191 static int
192 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
193 {
194 	return 0;
195 }
196 
197 static int
198 eth_dev_start(struct rte_eth_dev *dev)
199 {
200 	if (dev == NULL)
201 		return -EINVAL;
202 
203 	dev->data->dev_link.link_status = ETH_LINK_UP;
204 	return 0;
205 }
206 
207 static void
208 eth_dev_stop(struct rte_eth_dev *dev)
209 {
210 	if (dev == NULL)
211 		return;
212 
213 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
214 }
215 
216 static int
217 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
218 		uint16_t nb_rx_desc __rte_unused,
219 		unsigned int socket_id __rte_unused,
220 		const struct rte_eth_rxconf *rx_conf __rte_unused,
221 		struct rte_mempool *mb_pool)
222 {
223 	struct rte_mbuf *dummy_packet;
224 	struct pmd_internals *internals;
225 	unsigned packet_size;
226 
227 	if ((dev == NULL) || (mb_pool == NULL))
228 		return -EINVAL;
229 
230 	internals = dev->data->dev_private;
231 
232 	if (rx_queue_id >= dev->data->nb_rx_queues)
233 		return -ENODEV;
234 
235 	packet_size = internals->packet_size;
236 
237 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
238 	dev->data->rx_queues[rx_queue_id] =
239 		&internals->rx_null_queues[rx_queue_id];
240 	dummy_packet = rte_zmalloc_socket(NULL,
241 			packet_size, 0, dev->data->numa_node);
242 	if (dummy_packet == NULL)
243 		return -ENOMEM;
244 
245 	internals->rx_null_queues[rx_queue_id].internals = internals;
246 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
247 
248 	return 0;
249 }
250 
251 static int
252 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
253 		uint16_t nb_tx_desc __rte_unused,
254 		unsigned int socket_id __rte_unused,
255 		const struct rte_eth_txconf *tx_conf __rte_unused)
256 {
257 	struct rte_mbuf *dummy_packet;
258 	struct pmd_internals *internals;
259 	unsigned packet_size;
260 
261 	if (dev == NULL)
262 		return -EINVAL;
263 
264 	internals = dev->data->dev_private;
265 
266 	if (tx_queue_id >= dev->data->nb_tx_queues)
267 		return -ENODEV;
268 
269 	packet_size = internals->packet_size;
270 
271 	dev->data->tx_queues[tx_queue_id] =
272 		&internals->tx_null_queues[tx_queue_id];
273 	dummy_packet = rte_zmalloc_socket(NULL,
274 			packet_size, 0, dev->data->numa_node);
275 	if (dummy_packet == NULL)
276 		return -ENOMEM;
277 
278 	internals->tx_null_queues[tx_queue_id].internals = internals;
279 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
280 
281 	return 0;
282 }
283 
284 static int
285 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
286 {
287 	return 0;
288 }
289 
290 static void
291 eth_dev_info(struct rte_eth_dev *dev,
292 		struct rte_eth_dev_info *dev_info)
293 {
294 	struct pmd_internals *internals;
295 
296 	if ((dev == NULL) || (dev_info == NULL))
297 		return;
298 
299 	internals = dev->data->dev_private;
300 	dev_info->max_mac_addrs = 1;
301 	dev_info->max_rx_pktlen = (uint32_t)-1;
302 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
303 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
304 	dev_info->min_rx_bufsize = 0;
305 	dev_info->reta_size = internals->reta_size;
306 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
307 }
308 
309 static int
310 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
311 {
312 	unsigned i, num_stats;
313 	unsigned long rx_total = 0, tx_total = 0;
314 	const struct pmd_internals *internal;
315 
316 	if ((dev == NULL) || (igb_stats == NULL))
317 		return -EINVAL;
318 
319 	internal = dev->data->dev_private;
320 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
321 			RTE_MIN(dev->data->nb_rx_queues,
322 				RTE_DIM(internal->rx_null_queues)));
323 	for (i = 0; i < num_stats; i++) {
324 		igb_stats->q_ipackets[i] =
325 			internal->rx_null_queues[i].rx_pkts.cnt;
326 		rx_total += igb_stats->q_ipackets[i];
327 	}
328 
329 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
330 			RTE_MIN(dev->data->nb_tx_queues,
331 				RTE_DIM(internal->tx_null_queues)));
332 	for (i = 0; i < num_stats; i++) {
333 		igb_stats->q_opackets[i] =
334 			internal->tx_null_queues[i].tx_pkts.cnt;
335 		tx_total += igb_stats->q_opackets[i];
336 	}
337 
338 	igb_stats->ipackets = rx_total;
339 	igb_stats->opackets = tx_total;
340 
341 	return 0;
342 }
343 
344 static void
345 eth_stats_reset(struct rte_eth_dev *dev)
346 {
347 	unsigned i;
348 	struct pmd_internals *internal;
349 
350 	if (dev == NULL)
351 		return;
352 
353 	internal = dev->data->dev_private;
354 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
355 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
356 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
357 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
358 }
359 
360 static void
361 eth_queue_release(void *q)
362 {
363 	struct null_queue *nq;
364 
365 	if (q == NULL)
366 		return;
367 
368 	nq = q;
369 	rte_free(nq->dummy_packet);
370 }
371 
372 static int
373 eth_link_update(struct rte_eth_dev *dev __rte_unused,
374 		int wait_to_complete __rte_unused) { return 0; }
375 
376 static int
377 eth_rss_reta_update(struct rte_eth_dev *dev,
378 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
379 {
380 	int i, j;
381 	struct pmd_internals *internal = dev->data->dev_private;
382 
383 	if (reta_size != internal->reta_size)
384 		return -EINVAL;
385 
386 	rte_spinlock_lock(&internal->rss_lock);
387 
388 	/* Copy RETA table */
389 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
390 		internal->reta_conf[i].mask = reta_conf[i].mask;
391 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
392 			if ((reta_conf[i].mask >> j) & 0x01)
393 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
394 	}
395 
396 	rte_spinlock_unlock(&internal->rss_lock);
397 
398 	return 0;
399 }
400 
401 static int
402 eth_rss_reta_query(struct rte_eth_dev *dev,
403 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
404 {
405 	int i, j;
406 	struct pmd_internals *internal = dev->data->dev_private;
407 
408 	if (reta_size != internal->reta_size)
409 		return -EINVAL;
410 
411 	rte_spinlock_lock(&internal->rss_lock);
412 
413 	/* Copy RETA table */
414 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
415 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
416 			if ((reta_conf[i].mask >> j) & 0x01)
417 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
418 	}
419 
420 	rte_spinlock_unlock(&internal->rss_lock);
421 
422 	return 0;
423 }
424 
425 static int
426 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
427 {
428 	struct pmd_internals *internal = dev->data->dev_private;
429 
430 	rte_spinlock_lock(&internal->rss_lock);
431 
432 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
433 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
434 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
435 
436 	if (rss_conf->rss_key)
437 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
438 
439 	rte_spinlock_unlock(&internal->rss_lock);
440 
441 	return 0;
442 }
443 
444 static int
445 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
446 		struct rte_eth_rss_conf *rss_conf)
447 {
448 	struct pmd_internals *internal = dev->data->dev_private;
449 
450 	rte_spinlock_lock(&internal->rss_lock);
451 
452 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
453 	if (rss_conf->rss_key)
454 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
455 
456 	rte_spinlock_unlock(&internal->rss_lock);
457 
458 	return 0;
459 }
460 
461 static int
462 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
463 		    __rte_unused struct rte_ether_addr *addr)
464 {
465 	return 0;
466 }
467 
468 static const struct eth_dev_ops ops = {
469 	.dev_start = eth_dev_start,
470 	.dev_stop = eth_dev_stop,
471 	.dev_configure = eth_dev_configure,
472 	.dev_infos_get = eth_dev_info,
473 	.rx_queue_setup = eth_rx_queue_setup,
474 	.tx_queue_setup = eth_tx_queue_setup,
475 	.rx_queue_release = eth_queue_release,
476 	.tx_queue_release = eth_queue_release,
477 	.mtu_set = eth_mtu_set,
478 	.link_update = eth_link_update,
479 	.mac_addr_set = eth_mac_address_set,
480 	.stats_get = eth_stats_get,
481 	.stats_reset = eth_stats_reset,
482 	.reta_update = eth_rss_reta_update,
483 	.reta_query = eth_rss_reta_query,
484 	.rss_hash_update = eth_rss_hash_update,
485 	.rss_hash_conf_get = eth_rss_hash_conf_get
486 };
487 
488 static int
489 eth_dev_null_create(struct rte_vdev_device *dev,
490 		unsigned packet_size,
491 		unsigned packet_copy)
492 {
493 	const unsigned nb_rx_queues = 1;
494 	const unsigned nb_tx_queues = 1;
495 	struct rte_eth_dev_data *data;
496 	struct pmd_internals *internals = NULL;
497 	struct rte_eth_dev *eth_dev = NULL;
498 
499 	static const uint8_t default_rss_key[40] = {
500 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
501 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
502 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
503 		0xBE, 0xAC, 0x01, 0xFA
504 	};
505 
506 	if (dev->device.numa_node == SOCKET_ID_ANY)
507 		dev->device.numa_node = rte_socket_id();
508 
509 	PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
510 		dev->device.numa_node);
511 
512 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
513 	if (!eth_dev)
514 		return -ENOMEM;
515 
516 	/* now put it all together
517 	 * - store queue data in internals,
518 	 * - store numa_node info in ethdev data
519 	 * - point eth_dev_data to internals
520 	 * - and point eth_dev structure to new eth_dev_data structure
521 	 */
522 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
523 	 * so the nulls are local per-process */
524 
525 	internals = eth_dev->data->dev_private;
526 	internals->packet_size = packet_size;
527 	internals->packet_copy = packet_copy;
528 	internals->port_id = eth_dev->data->port_id;
529 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
530 
531 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
532 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
533 
534 	rte_memcpy(internals->rss_key, default_rss_key, 40);
535 
536 	data = eth_dev->data;
537 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
538 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
539 	data->dev_link = pmd_link;
540 	data->mac_addrs = &internals->eth_addr;
541 
542 	eth_dev->dev_ops = &ops;
543 
544 	/* finally assign rx and tx ops */
545 	if (packet_copy) {
546 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
547 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
548 	} else {
549 		eth_dev->rx_pkt_burst = eth_null_rx;
550 		eth_dev->tx_pkt_burst = eth_null_tx;
551 	}
552 
553 	rte_eth_dev_probing_finish(eth_dev);
554 	return 0;
555 }
556 
557 static inline int
558 get_packet_size_arg(const char *key __rte_unused,
559 		const char *value, void *extra_args)
560 {
561 	const char *a = value;
562 	unsigned *packet_size = extra_args;
563 
564 	if ((value == NULL) || (extra_args == NULL))
565 		return -EINVAL;
566 
567 	*packet_size = (unsigned)strtoul(a, NULL, 0);
568 	if (*packet_size == UINT_MAX)
569 		return -1;
570 
571 	return 0;
572 }
573 
574 static inline int
575 get_packet_copy_arg(const char *key __rte_unused,
576 		const char *value, void *extra_args)
577 {
578 	const char *a = value;
579 	unsigned *packet_copy = extra_args;
580 
581 	if ((value == NULL) || (extra_args == NULL))
582 		return -EINVAL;
583 
584 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
585 	if (*packet_copy == UINT_MAX)
586 		return -1;
587 
588 	return 0;
589 }
590 
591 static int
592 rte_pmd_null_probe(struct rte_vdev_device *dev)
593 {
594 	const char *name, *params;
595 	unsigned packet_size = default_packet_size;
596 	unsigned packet_copy = default_packet_copy;
597 	struct rte_kvargs *kvlist = NULL;
598 	struct rte_eth_dev *eth_dev;
599 	int ret;
600 
601 	if (!dev)
602 		return -EINVAL;
603 
604 	name = rte_vdev_device_name(dev);
605 	params = rte_vdev_device_args(dev);
606 	PMD_LOG(INFO, "Initializing pmd_null for %s", name);
607 
608 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
609 		eth_dev = rte_eth_dev_attach_secondary(name);
610 		if (!eth_dev) {
611 			PMD_LOG(ERR, "Failed to probe %s", name);
612 			return -1;
613 		}
614 		/* TODO: request info from primary to set up Rx and Tx */
615 		eth_dev->dev_ops = &ops;
616 		eth_dev->device = &dev->device;
617 		rte_eth_dev_probing_finish(eth_dev);
618 		return 0;
619 	}
620 
621 	if (params != NULL) {
622 		kvlist = rte_kvargs_parse(params, valid_arguments);
623 		if (kvlist == NULL)
624 			return -1;
625 
626 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
627 
628 			ret = rte_kvargs_process(kvlist,
629 					ETH_NULL_PACKET_SIZE_ARG,
630 					&get_packet_size_arg, &packet_size);
631 			if (ret < 0)
632 				goto free_kvlist;
633 		}
634 
635 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
636 
637 			ret = rte_kvargs_process(kvlist,
638 					ETH_NULL_PACKET_COPY_ARG,
639 					&get_packet_copy_arg, &packet_copy);
640 			if (ret < 0)
641 				goto free_kvlist;
642 		}
643 	}
644 
645 	PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
646 			"packet copy is %s", packet_size,
647 			packet_copy ? "enabled" : "disabled");
648 
649 	ret = eth_dev_null_create(dev, packet_size, packet_copy);
650 
651 free_kvlist:
652 	if (kvlist)
653 		rte_kvargs_free(kvlist);
654 	return ret;
655 }
656 
657 static int
658 rte_pmd_null_remove(struct rte_vdev_device *dev)
659 {
660 	struct rte_eth_dev *eth_dev = NULL;
661 
662 	if (!dev)
663 		return -EINVAL;
664 
665 	PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
666 			rte_socket_id());
667 
668 	/* find the ethdev entry */
669 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
670 	if (eth_dev == NULL)
671 		return -1;
672 
673 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
674 		/* mac_addrs must not be freed alone because part of dev_private */
675 		eth_dev->data->mac_addrs = NULL;
676 
677 	rte_eth_dev_release_port(eth_dev);
678 
679 	return 0;
680 }
681 
682 static struct rte_vdev_driver pmd_null_drv = {
683 	.probe = rte_pmd_null_probe,
684 	.remove = rte_pmd_null_remove,
685 };
686 
687 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
688 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
689 RTE_PMD_REGISTER_PARAM_STRING(net_null,
690 	"size=<int> "
691 	"copy=<int>");
692 
693 RTE_INIT(eth_null_init_log)
694 {
695 	eth_null_logtype = rte_log_register("pmd.net.null");
696 	if (eth_null_logtype >= 0)
697 		rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
698 }
699