xref: /dpdk/drivers/net/null/rte_eth_null.c (revision f5057be340e44f3edc0fe90fa875eb89a4c49b4f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) IGEL Co.,Ltd.
3  *  All rights reserved.
4  */
5 
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
14 
15 #define ETH_NULL_PACKET_SIZE_ARG	"size"
16 #define ETH_NULL_PACKET_COPY_ARG	"copy"
17 #define ETH_NULL_PACKET_NO_RX_ARG	"no-rx"
18 
19 static unsigned int default_packet_size = 64;
20 static unsigned int default_packet_copy;
21 static unsigned int default_no_rx;
22 
23 static const char *valid_arguments[] = {
24 	ETH_NULL_PACKET_SIZE_ARG,
25 	ETH_NULL_PACKET_COPY_ARG,
26 	ETH_NULL_PACKET_NO_RX_ARG,
27 	NULL
28 };
29 
30 struct pmd_internals;
31 
32 struct null_queue {
33 	struct pmd_internals *internals;
34 
35 	struct rte_mempool *mb_pool;
36 	struct rte_mbuf *dummy_packet;
37 
38 	rte_atomic64_t rx_pkts;
39 	rte_atomic64_t tx_pkts;
40 };
41 
42 struct pmd_options {
43 	unsigned int packet_copy;
44 	unsigned int packet_size;
45 	unsigned int no_rx;
46 };
47 
48 struct pmd_internals {
49 	unsigned int packet_size;
50 	unsigned int packet_copy;
51 	unsigned int no_rx;
52 	uint16_t port_id;
53 
54 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
55 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
56 
57 	struct rte_ether_addr eth_addr;
58 	/** Bit mask of RSS offloads, the bit offset also means flow type */
59 	uint64_t flow_type_rss_offloads;
60 
61 	rte_spinlock_t rss_lock;
62 
63 	uint16_t reta_size;
64 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
65 			RTE_RETA_GROUP_SIZE];
66 
67 	uint8_t rss_key[40];                /**< 40-byte hash key. */
68 };
69 static struct rte_eth_link pmd_link = {
70 	.link_speed = ETH_SPEED_NUM_10G,
71 	.link_duplex = ETH_LINK_FULL_DUPLEX,
72 	.link_status = ETH_LINK_DOWN,
73 	.link_autoneg = ETH_LINK_FIXED,
74 };
75 
76 RTE_LOG_REGISTER(eth_null_logtype, pmd.net.null, NOTICE);
77 
78 #define PMD_LOG(level, fmt, args...) \
79 	rte_log(RTE_LOG_ ## level, eth_null_logtype, \
80 		"%s(): " fmt "\n", __func__, ##args)
81 
82 static uint16_t
83 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
84 {
85 	int i;
86 	struct null_queue *h = q;
87 	unsigned int packet_size;
88 
89 	if ((q == NULL) || (bufs == NULL))
90 		return 0;
91 
92 	packet_size = h->internals->packet_size;
93 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
94 		return 0;
95 
96 	for (i = 0; i < nb_bufs; i++) {
97 		bufs[i]->data_len = (uint16_t)packet_size;
98 		bufs[i]->pkt_len = packet_size;
99 		bufs[i]->port = h->internals->port_id;
100 	}
101 
102 	rte_atomic64_add(&(h->rx_pkts), i);
103 
104 	return i;
105 }
106 
107 static uint16_t
108 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
109 {
110 	int i;
111 	struct null_queue *h = q;
112 	unsigned int packet_size;
113 
114 	if ((q == NULL) || (bufs == NULL))
115 		return 0;
116 
117 	packet_size = h->internals->packet_size;
118 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
119 		return 0;
120 
121 	for (i = 0; i < nb_bufs; i++) {
122 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
123 					packet_size);
124 		bufs[i]->data_len = (uint16_t)packet_size;
125 		bufs[i]->pkt_len = packet_size;
126 		bufs[i]->port = h->internals->port_id;
127 	}
128 
129 	rte_atomic64_add(&(h->rx_pkts), i);
130 
131 	return i;
132 }
133 
134 static uint16_t
135 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
136 		uint16_t nb_bufs __rte_unused)
137 {
138 	return 0;
139 }
140 
141 static uint16_t
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143 {
144 	int i;
145 	struct null_queue *h = q;
146 
147 	if ((q == NULL) || (bufs == NULL))
148 		return 0;
149 
150 	for (i = 0; i < nb_bufs; i++)
151 		rte_pktmbuf_free(bufs[i]);
152 
153 	rte_atomic64_add(&(h->tx_pkts), i);
154 
155 	return i;
156 }
157 
158 static uint16_t
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
160 {
161 	int i;
162 	struct null_queue *h = q;
163 	unsigned int packet_size;
164 
165 	if ((q == NULL) || (bufs == NULL))
166 		return 0;
167 
168 	packet_size = h->internals->packet_size;
169 	for (i = 0; i < nb_bufs; i++) {
170 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
171 					packet_size);
172 		rte_pktmbuf_free(bufs[i]);
173 	}
174 
175 	rte_atomic64_add(&(h->tx_pkts), i);
176 
177 	return i;
178 }
179 
180 static int
181 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
182 {
183 	return 0;
184 }
185 
186 static int
187 eth_dev_start(struct rte_eth_dev *dev)
188 {
189 	if (dev == NULL)
190 		return -EINVAL;
191 
192 	dev->data->dev_link.link_status = ETH_LINK_UP;
193 	return 0;
194 }
195 
196 static void
197 eth_dev_stop(struct rte_eth_dev *dev)
198 {
199 	if (dev == NULL)
200 		return;
201 
202 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
203 }
204 
205 static int
206 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
207 		uint16_t nb_rx_desc __rte_unused,
208 		unsigned int socket_id __rte_unused,
209 		const struct rte_eth_rxconf *rx_conf __rte_unused,
210 		struct rte_mempool *mb_pool)
211 {
212 	struct rte_mbuf *dummy_packet;
213 	struct pmd_internals *internals;
214 	unsigned int packet_size;
215 
216 	if ((dev == NULL) || (mb_pool == NULL))
217 		return -EINVAL;
218 
219 	internals = dev->data->dev_private;
220 
221 	if (rx_queue_id >= dev->data->nb_rx_queues)
222 		return -ENODEV;
223 
224 	packet_size = internals->packet_size;
225 
226 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
227 	dev->data->rx_queues[rx_queue_id] =
228 		&internals->rx_null_queues[rx_queue_id];
229 	dummy_packet = rte_zmalloc_socket(NULL,
230 			packet_size, 0, dev->data->numa_node);
231 	if (dummy_packet == NULL)
232 		return -ENOMEM;
233 
234 	internals->rx_null_queues[rx_queue_id].internals = internals;
235 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
236 
237 	return 0;
238 }
239 
240 static int
241 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
242 		uint16_t nb_tx_desc __rte_unused,
243 		unsigned int socket_id __rte_unused,
244 		const struct rte_eth_txconf *tx_conf __rte_unused)
245 {
246 	struct rte_mbuf *dummy_packet;
247 	struct pmd_internals *internals;
248 	unsigned int packet_size;
249 
250 	if (dev == NULL)
251 		return -EINVAL;
252 
253 	internals = dev->data->dev_private;
254 
255 	if (tx_queue_id >= dev->data->nb_tx_queues)
256 		return -ENODEV;
257 
258 	packet_size = internals->packet_size;
259 
260 	dev->data->tx_queues[tx_queue_id] =
261 		&internals->tx_null_queues[tx_queue_id];
262 	dummy_packet = rte_zmalloc_socket(NULL,
263 			packet_size, 0, dev->data->numa_node);
264 	if (dummy_packet == NULL)
265 		return -ENOMEM;
266 
267 	internals->tx_null_queues[tx_queue_id].internals = internals;
268 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
269 
270 	return 0;
271 }
272 
273 static int
274 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
275 {
276 	return 0;
277 }
278 
279 static int
280 eth_dev_info(struct rte_eth_dev *dev,
281 		struct rte_eth_dev_info *dev_info)
282 {
283 	struct pmd_internals *internals;
284 
285 	if ((dev == NULL) || (dev_info == NULL))
286 		return -EINVAL;
287 
288 	internals = dev->data->dev_private;
289 	dev_info->max_mac_addrs = 1;
290 	dev_info->max_rx_pktlen = (uint32_t)-1;
291 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
292 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
293 	dev_info->min_rx_bufsize = 0;
294 	dev_info->reta_size = internals->reta_size;
295 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
296 
297 	return 0;
298 }
299 
300 static int
301 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
302 {
303 	unsigned int i, num_stats;
304 	unsigned long rx_total = 0, tx_total = 0;
305 	const struct pmd_internals *internal;
306 
307 	if ((dev == NULL) || (igb_stats == NULL))
308 		return -EINVAL;
309 
310 	internal = dev->data->dev_private;
311 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
312 			RTE_MIN(dev->data->nb_rx_queues,
313 				RTE_DIM(internal->rx_null_queues)));
314 	for (i = 0; i < num_stats; i++) {
315 		igb_stats->q_ipackets[i] =
316 			internal->rx_null_queues[i].rx_pkts.cnt;
317 		rx_total += igb_stats->q_ipackets[i];
318 	}
319 
320 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
321 			RTE_MIN(dev->data->nb_tx_queues,
322 				RTE_DIM(internal->tx_null_queues)));
323 	for (i = 0; i < num_stats; i++) {
324 		igb_stats->q_opackets[i] =
325 			internal->tx_null_queues[i].tx_pkts.cnt;
326 		tx_total += igb_stats->q_opackets[i];
327 	}
328 
329 	igb_stats->ipackets = rx_total;
330 	igb_stats->opackets = tx_total;
331 
332 	return 0;
333 }
334 
335 static int
336 eth_stats_reset(struct rte_eth_dev *dev)
337 {
338 	unsigned int i;
339 	struct pmd_internals *internal;
340 
341 	if (dev == NULL)
342 		return -EINVAL;
343 
344 	internal = dev->data->dev_private;
345 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
346 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
347 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
348 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
349 
350 	return 0;
351 }
352 
353 static void
354 eth_queue_release(void *q)
355 {
356 	struct null_queue *nq;
357 
358 	if (q == NULL)
359 		return;
360 
361 	nq = q;
362 	rte_free(nq->dummy_packet);
363 }
364 
365 static int
366 eth_link_update(struct rte_eth_dev *dev __rte_unused,
367 		int wait_to_complete __rte_unused) { return 0; }
368 
369 static int
370 eth_rss_reta_update(struct rte_eth_dev *dev,
371 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
372 {
373 	int i, j;
374 	struct pmd_internals *internal = dev->data->dev_private;
375 
376 	if (reta_size != internal->reta_size)
377 		return -EINVAL;
378 
379 	rte_spinlock_lock(&internal->rss_lock);
380 
381 	/* Copy RETA table */
382 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
383 		internal->reta_conf[i].mask = reta_conf[i].mask;
384 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
385 			if ((reta_conf[i].mask >> j) & 0x01)
386 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
387 	}
388 
389 	rte_spinlock_unlock(&internal->rss_lock);
390 
391 	return 0;
392 }
393 
394 static int
395 eth_rss_reta_query(struct rte_eth_dev *dev,
396 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
397 {
398 	int i, j;
399 	struct pmd_internals *internal = dev->data->dev_private;
400 
401 	if (reta_size != internal->reta_size)
402 		return -EINVAL;
403 
404 	rte_spinlock_lock(&internal->rss_lock);
405 
406 	/* Copy RETA table */
407 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
408 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
409 			if ((reta_conf[i].mask >> j) & 0x01)
410 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
411 	}
412 
413 	rte_spinlock_unlock(&internal->rss_lock);
414 
415 	return 0;
416 }
417 
418 static int
419 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
420 {
421 	struct pmd_internals *internal = dev->data->dev_private;
422 
423 	rte_spinlock_lock(&internal->rss_lock);
424 
425 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
426 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
427 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
428 
429 	if (rss_conf->rss_key)
430 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
431 
432 	rte_spinlock_unlock(&internal->rss_lock);
433 
434 	return 0;
435 }
436 
437 static int
438 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
439 		struct rte_eth_rss_conf *rss_conf)
440 {
441 	struct pmd_internals *internal = dev->data->dev_private;
442 
443 	rte_spinlock_lock(&internal->rss_lock);
444 
445 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
446 	if (rss_conf->rss_key)
447 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
448 
449 	rte_spinlock_unlock(&internal->rss_lock);
450 
451 	return 0;
452 }
453 
454 static int
455 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
456 		    __rte_unused struct rte_ether_addr *addr)
457 {
458 	return 0;
459 }
460 
461 static int
462 eth_dev_close(struct rte_eth_dev *dev)
463 {
464 	PMD_LOG(INFO, "Closing null ethdev on NUMA socket %u",
465 			rte_socket_id());
466 
467 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
468 		return 0;
469 
470 	/* mac_addrs must not be freed alone because part of dev_private */
471 	dev->data->mac_addrs = NULL;
472 
473 	return 0;
474 }
475 
476 static const struct eth_dev_ops ops = {
477 	.dev_close = eth_dev_close,
478 	.dev_start = eth_dev_start,
479 	.dev_stop = eth_dev_stop,
480 	.dev_configure = eth_dev_configure,
481 	.dev_infos_get = eth_dev_info,
482 	.rx_queue_setup = eth_rx_queue_setup,
483 	.tx_queue_setup = eth_tx_queue_setup,
484 	.rx_queue_release = eth_queue_release,
485 	.tx_queue_release = eth_queue_release,
486 	.mtu_set = eth_mtu_set,
487 	.link_update = eth_link_update,
488 	.mac_addr_set = eth_mac_address_set,
489 	.stats_get = eth_stats_get,
490 	.stats_reset = eth_stats_reset,
491 	.reta_update = eth_rss_reta_update,
492 	.reta_query = eth_rss_reta_query,
493 	.rss_hash_update = eth_rss_hash_update,
494 	.rss_hash_conf_get = eth_rss_hash_conf_get
495 };
496 
497 static int
498 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
499 {
500 	const unsigned int nb_rx_queues = 1;
501 	const unsigned int nb_tx_queues = 1;
502 	struct rte_eth_dev_data *data;
503 	struct pmd_internals *internals = NULL;
504 	struct rte_eth_dev *eth_dev = NULL;
505 
506 	static const uint8_t default_rss_key[40] = {
507 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
508 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
509 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
510 		0xBE, 0xAC, 0x01, 0xFA
511 	};
512 
513 	if (dev->device.numa_node == SOCKET_ID_ANY)
514 		dev->device.numa_node = rte_socket_id();
515 
516 	PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
517 		dev->device.numa_node);
518 
519 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
520 	if (!eth_dev)
521 		return -ENOMEM;
522 
523 	/* now put it all together
524 	 * - store queue data in internals,
525 	 * - store numa_node info in ethdev data
526 	 * - point eth_dev_data to internals
527 	 * - and point eth_dev structure to new eth_dev_data structure
528 	 */
529 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
530 	 * so the nulls are local per-process */
531 
532 	internals = eth_dev->data->dev_private;
533 	internals->packet_size = args->packet_size;
534 	internals->packet_copy = args->packet_copy;
535 	internals->no_rx = args->no_rx;
536 	internals->port_id = eth_dev->data->port_id;
537 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
538 
539 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
540 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
541 
542 	rte_memcpy(internals->rss_key, default_rss_key, 40);
543 
544 	data = eth_dev->data;
545 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
546 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
547 	data->dev_link = pmd_link;
548 	data->mac_addrs = &internals->eth_addr;
549 	data->promiscuous = 1;
550 	data->all_multicast = 1;
551 
552 	eth_dev->dev_ops = &ops;
553 
554 	/* finally assign rx and tx ops */
555 	if (internals->packet_copy) {
556 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
557 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
558 	} else if (internals->no_rx) {
559 		eth_dev->rx_pkt_burst = eth_null_no_rx;
560 		eth_dev->tx_pkt_burst = eth_null_tx;
561 	} else {
562 		eth_dev->rx_pkt_burst = eth_null_rx;
563 		eth_dev->tx_pkt_burst = eth_null_tx;
564 	}
565 
566 	rte_eth_dev_probing_finish(eth_dev);
567 	return 0;
568 }
569 
570 static inline int
571 get_packet_size_arg(const char *key __rte_unused,
572 		const char *value, void *extra_args)
573 {
574 	const char *a = value;
575 	unsigned int *packet_size = extra_args;
576 
577 	if ((value == NULL) || (extra_args == NULL))
578 		return -EINVAL;
579 
580 	*packet_size = (unsigned int)strtoul(a, NULL, 0);
581 	if (*packet_size == UINT_MAX)
582 		return -1;
583 
584 	return 0;
585 }
586 
587 static inline int
588 get_packet_copy_arg(const char *key __rte_unused,
589 		const char *value, void *extra_args)
590 {
591 	const char *a = value;
592 	unsigned int *packet_copy = extra_args;
593 
594 	if ((value == NULL) || (extra_args == NULL))
595 		return -EINVAL;
596 
597 	*packet_copy = (unsigned int)strtoul(a, NULL, 0);
598 	if (*packet_copy == UINT_MAX)
599 		return -1;
600 
601 	return 0;
602 }
603 
604 static int
605 get_packet_no_rx_arg(const char *key __rte_unused,
606 		const char *value, void *extra_args)
607 {
608 	const char *a = value;
609 	unsigned int no_rx;
610 
611 	if (value == NULL || extra_args == NULL)
612 		return -EINVAL;
613 
614 	no_rx = (unsigned int)strtoul(a, NULL, 0);
615 	if (no_rx != 0 && no_rx != 1)
616 		return -1;
617 
618 	*(unsigned int *)extra_args = no_rx;
619 	return 0;
620 }
621 
622 static int
623 rte_pmd_null_probe(struct rte_vdev_device *dev)
624 {
625 	const char *name, *params;
626 	struct pmd_options args = {
627 		.packet_copy = default_packet_copy,
628 		.packet_size = default_packet_size,
629 		.no_rx = default_no_rx,
630 	};
631 	struct rte_kvargs *kvlist = NULL;
632 	struct rte_eth_dev *eth_dev;
633 	int ret;
634 
635 	if (!dev)
636 		return -EINVAL;
637 
638 	name = rte_vdev_device_name(dev);
639 	params = rte_vdev_device_args(dev);
640 	PMD_LOG(INFO, "Initializing pmd_null for %s", name);
641 
642 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
643 		struct pmd_internals *internals;
644 		eth_dev = rte_eth_dev_attach_secondary(name);
645 		if (!eth_dev) {
646 			PMD_LOG(ERR, "Failed to probe %s", name);
647 			return -1;
648 		}
649 		/* TODO: request info from primary to set up Rx and Tx */
650 		eth_dev->dev_ops = &ops;
651 		eth_dev->device = &dev->device;
652 		internals = eth_dev->data->dev_private;
653 		if (internals->packet_copy) {
654 			eth_dev->rx_pkt_burst = eth_null_copy_rx;
655 			eth_dev->tx_pkt_burst = eth_null_copy_tx;
656 		} else if (internals->no_rx) {
657 			eth_dev->rx_pkt_burst = eth_null_no_rx;
658 			eth_dev->tx_pkt_burst = eth_null_tx;
659 		} else {
660 			eth_dev->rx_pkt_burst = eth_null_rx;
661 			eth_dev->tx_pkt_burst = eth_null_tx;
662 		}
663 		rte_eth_dev_probing_finish(eth_dev);
664 		return 0;
665 	}
666 
667 	if (params != NULL) {
668 		kvlist = rte_kvargs_parse(params, valid_arguments);
669 		if (kvlist == NULL)
670 			return -1;
671 
672 		ret = rte_kvargs_process(kvlist,
673 				ETH_NULL_PACKET_SIZE_ARG,
674 				&get_packet_size_arg, &args.packet_size);
675 		if (ret < 0)
676 			goto free_kvlist;
677 
678 
679 		ret = rte_kvargs_process(kvlist,
680 				ETH_NULL_PACKET_COPY_ARG,
681 				&get_packet_copy_arg, &args.packet_copy);
682 		if (ret < 0)
683 			goto free_kvlist;
684 
685 		ret = rte_kvargs_process(kvlist,
686 				ETH_NULL_PACKET_NO_RX_ARG,
687 				&get_packet_no_rx_arg, &args.no_rx);
688 		if (ret < 0)
689 			goto free_kvlist;
690 
691 		if (args.no_rx && args.packet_copy) {
692 			PMD_LOG(ERR,
693 				"Both %s and %s arguments at the same time not supported",
694 				ETH_NULL_PACKET_COPY_ARG,
695 				ETH_NULL_PACKET_NO_RX_ARG);
696 			goto free_kvlist;
697 		}
698 	}
699 
700 	PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
701 			"packet copy is %s", args.packet_size,
702 			args.packet_copy ? "enabled" : "disabled");
703 
704 	ret = eth_dev_null_create(dev, &args);
705 
706 free_kvlist:
707 	if (kvlist)
708 		rte_kvargs_free(kvlist);
709 	return ret;
710 }
711 
712 static int
713 rte_pmd_null_remove(struct rte_vdev_device *dev)
714 {
715 	struct rte_eth_dev *eth_dev = NULL;
716 
717 	if (!dev)
718 		return -EINVAL;
719 
720 	/* find the ethdev entry */
721 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
722 	if (eth_dev == NULL)
723 		return 0; /* port already released */
724 
725 	eth_dev_close(eth_dev);
726 	rte_eth_dev_release_port(eth_dev);
727 
728 	return 0;
729 }
730 
731 static struct rte_vdev_driver pmd_null_drv = {
732 	.probe = rte_pmd_null_probe,
733 	.remove = rte_pmd_null_remove,
734 };
735 
736 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
737 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
738 RTE_PMD_REGISTER_PARAM_STRING(net_null,
739 	"size=<int> "
740 	"copy=<int> "
741 	ETH_NULL_PACKET_NO_RX_ARG "=0|1");
742