xref: /dpdk/drivers/net/null/rte_eth_null.c (revision 62774b78a84e9fa5df56d04cffed69bef8c901f1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) IGEL Co.,Ltd.
3  *  All rights reserved.
4  */
5 
6 #include <stdlib.h>
7 
8 #include <rte_mbuf.h>
9 #include <ethdev_driver.h>
10 #include <ethdev_vdev.h>
11 #include <rte_malloc.h>
12 #include <rte_memcpy.h>
13 #include <bus_vdev_driver.h>
14 #include <rte_kvargs.h>
15 #include <rte_spinlock.h>
16 
17 #define ETH_NULL_PACKET_SIZE_ARG	"size"
18 #define ETH_NULL_PACKET_COPY_ARG	"copy"
19 #define ETH_NULL_PACKET_NO_RX_ARG	"no-rx"
20 
21 static unsigned int default_packet_size = 64;
22 static unsigned int default_packet_copy;
23 static unsigned int default_no_rx;
24 
25 static const char *valid_arguments[] = {
26 	ETH_NULL_PACKET_SIZE_ARG,
27 	ETH_NULL_PACKET_COPY_ARG,
28 	ETH_NULL_PACKET_NO_RX_ARG,
29 	NULL
30 };
31 
32 struct pmd_internals;
33 
34 struct null_queue {
35 	struct pmd_internals *internals;
36 
37 	struct rte_mempool *mb_pool;
38 	struct rte_mbuf *dummy_packet;
39 
40 	uint64_t rx_pkts;
41 	uint64_t tx_pkts;
42 };
43 
44 struct pmd_options {
45 	unsigned int packet_copy;
46 	unsigned int packet_size;
47 	unsigned int no_rx;
48 };
49 
50 struct pmd_internals {
51 	unsigned int packet_size;
52 	unsigned int packet_copy;
53 	unsigned int no_rx;
54 	uint16_t port_id;
55 
56 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
57 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
58 
59 	struct rte_ether_addr eth_addr;
60 	/** Bit mask of RSS offloads, the bit offset also means flow type */
61 	uint64_t flow_type_rss_offloads;
62 
63 	rte_spinlock_t rss_lock;
64 
65 	uint16_t reta_size;
66 	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
67 			RTE_ETH_RETA_GROUP_SIZE];
68 
69 	uint8_t rss_key[40];                /**< 40-byte hash key. */
70 };
71 static struct rte_eth_link pmd_link = {
72 	.link_speed = RTE_ETH_SPEED_NUM_10G,
73 	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
74 	.link_status = RTE_ETH_LINK_DOWN,
75 	.link_autoneg = RTE_ETH_LINK_FIXED,
76 };
77 
78 RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
79 
80 #define PMD_LOG(level, fmt, args...) \
81 	rte_log(RTE_LOG_ ## level, eth_null_logtype, \
82 		"%s(): " fmt "\n", __func__, ##args)
83 
84 static uint16_t
85 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
86 {
87 	int i;
88 	struct null_queue *h = q;
89 	unsigned int packet_size;
90 
91 	if ((q == NULL) || (bufs == NULL))
92 		return 0;
93 
94 	packet_size = h->internals->packet_size;
95 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
96 		return 0;
97 
98 	for (i = 0; i < nb_bufs; i++) {
99 		bufs[i]->data_len = (uint16_t)packet_size;
100 		bufs[i]->pkt_len = packet_size;
101 		bufs[i]->port = h->internals->port_id;
102 	}
103 
104 	/* NOTE: review for potential ordering optimization */
105 	__atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
106 
107 	return i;
108 }
109 
110 static uint16_t
111 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
112 {
113 	int i;
114 	struct null_queue *h = q;
115 	unsigned int packet_size;
116 
117 	if ((q == NULL) || (bufs == NULL))
118 		return 0;
119 
120 	packet_size = h->internals->packet_size;
121 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
122 		return 0;
123 
124 	for (i = 0; i < nb_bufs; i++) {
125 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
126 					packet_size);
127 		bufs[i]->data_len = (uint16_t)packet_size;
128 		bufs[i]->pkt_len = packet_size;
129 		bufs[i]->port = h->internals->port_id;
130 	}
131 
132 	/* NOTE: review for potential ordering optimization */
133 	__atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
134 
135 	return i;
136 }
137 
138 static uint16_t
139 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
140 		uint16_t nb_bufs __rte_unused)
141 {
142 	return 0;
143 }
144 
145 static uint16_t
146 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
147 {
148 	int i;
149 	struct null_queue *h = q;
150 
151 	if ((q == NULL) || (bufs == NULL))
152 		return 0;
153 
154 	for (i = 0; i < nb_bufs; i++)
155 		rte_pktmbuf_free(bufs[i]);
156 
157 	/* NOTE: review for potential ordering optimization */
158 	__atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
159 
160 	return i;
161 }
162 
163 static uint16_t
164 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
165 {
166 	int i;
167 	struct null_queue *h = q;
168 	unsigned int packet_size;
169 
170 	if ((q == NULL) || (bufs == NULL))
171 		return 0;
172 
173 	packet_size = h->internals->packet_size;
174 	for (i = 0; i < nb_bufs; i++) {
175 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
176 					packet_size);
177 		rte_pktmbuf_free(bufs[i]);
178 	}
179 
180 	/* NOTE: review for potential ordering optimization */
181 	__atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
182 
183 	return i;
184 }
185 
186 static int
187 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
188 {
189 	return 0;
190 }
191 
192 static int
193 eth_dev_start(struct rte_eth_dev *dev)
194 {
195 	if (dev == NULL)
196 		return -EINVAL;
197 
198 	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
199 	return 0;
200 }
201 
202 static int
203 eth_dev_stop(struct rte_eth_dev *dev)
204 {
205 	if (dev == NULL)
206 		return 0;
207 
208 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
209 
210 	return 0;
211 }
212 
213 static int
214 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
215 		uint16_t nb_rx_desc __rte_unused,
216 		unsigned int socket_id __rte_unused,
217 		const struct rte_eth_rxconf *rx_conf __rte_unused,
218 		struct rte_mempool *mb_pool)
219 {
220 	struct rte_mbuf *dummy_packet;
221 	struct pmd_internals *internals;
222 	unsigned int packet_size;
223 
224 	if ((dev == NULL) || (mb_pool == NULL))
225 		return -EINVAL;
226 
227 	internals = dev->data->dev_private;
228 
229 	if (rx_queue_id >= dev->data->nb_rx_queues)
230 		return -ENODEV;
231 
232 	packet_size = internals->packet_size;
233 
234 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
235 	dev->data->rx_queues[rx_queue_id] =
236 		&internals->rx_null_queues[rx_queue_id];
237 	dummy_packet = rte_zmalloc_socket(NULL,
238 			packet_size, 0, dev->data->numa_node);
239 	if (dummy_packet == NULL)
240 		return -ENOMEM;
241 
242 	internals->rx_null_queues[rx_queue_id].internals = internals;
243 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
244 
245 	return 0;
246 }
247 
248 static int
249 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
250 		uint16_t nb_tx_desc __rte_unused,
251 		unsigned int socket_id __rte_unused,
252 		const struct rte_eth_txconf *tx_conf __rte_unused)
253 {
254 	struct rte_mbuf *dummy_packet;
255 	struct pmd_internals *internals;
256 	unsigned int packet_size;
257 
258 	if (dev == NULL)
259 		return -EINVAL;
260 
261 	internals = dev->data->dev_private;
262 
263 	if (tx_queue_id >= dev->data->nb_tx_queues)
264 		return -ENODEV;
265 
266 	packet_size = internals->packet_size;
267 
268 	dev->data->tx_queues[tx_queue_id] =
269 		&internals->tx_null_queues[tx_queue_id];
270 	dummy_packet = rte_zmalloc_socket(NULL,
271 			packet_size, 0, dev->data->numa_node);
272 	if (dummy_packet == NULL)
273 		return -ENOMEM;
274 
275 	internals->tx_null_queues[tx_queue_id].internals = internals;
276 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
277 
278 	return 0;
279 }
280 
281 static int
282 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
283 {
284 	return 0;
285 }
286 
287 static int
288 eth_dev_info(struct rte_eth_dev *dev,
289 		struct rte_eth_dev_info *dev_info)
290 {
291 	struct pmd_internals *internals;
292 
293 	if ((dev == NULL) || (dev_info == NULL))
294 		return -EINVAL;
295 
296 	internals = dev->data->dev_private;
297 	dev_info->max_mac_addrs = 1;
298 	dev_info->max_rx_pktlen = (uint32_t)-1;
299 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
300 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
301 	dev_info->min_rx_bufsize = 0;
302 	dev_info->reta_size = internals->reta_size;
303 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
304 
305 	return 0;
306 }
307 
308 static int
309 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
310 {
311 	unsigned int i, num_stats;
312 	unsigned long rx_total = 0, tx_total = 0;
313 	const struct pmd_internals *internal;
314 
315 	if ((dev == NULL) || (igb_stats == NULL))
316 		return -EINVAL;
317 
318 	internal = dev->data->dev_private;
319 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
320 			RTE_MIN(dev->data->nb_rx_queues,
321 				RTE_DIM(internal->rx_null_queues)));
322 	for (i = 0; i < num_stats; i++) {
323 		/* NOTE: review for atomic access */
324 		igb_stats->q_ipackets[i] =
325 			internal->rx_null_queues[i].rx_pkts;
326 		rx_total += igb_stats->q_ipackets[i];
327 	}
328 
329 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
330 			RTE_MIN(dev->data->nb_tx_queues,
331 				RTE_DIM(internal->tx_null_queues)));
332 	for (i = 0; i < num_stats; i++) {
333 		/* NOTE: review for atomic access */
334 		igb_stats->q_opackets[i] =
335 			internal->tx_null_queues[i].tx_pkts;
336 		tx_total += igb_stats->q_opackets[i];
337 	}
338 
339 	igb_stats->ipackets = rx_total;
340 	igb_stats->opackets = tx_total;
341 
342 	return 0;
343 }
344 
345 static int
346 eth_stats_reset(struct rte_eth_dev *dev)
347 {
348 	unsigned int i;
349 	struct pmd_internals *internal;
350 
351 	if (dev == NULL)
352 		return -EINVAL;
353 
354 	internal = dev->data->dev_private;
355 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
356 		/* NOTE: review for atomic access */
357 		internal->rx_null_queues[i].rx_pkts = 0;
358 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
359 		/* NOTE: review for atomic access */
360 		internal->tx_null_queues[i].tx_pkts = 0;
361 
362 	return 0;
363 }
364 
365 static void
366 eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
367 {
368 	struct null_queue *nq = dev->data->rx_queues[qid];
369 
370 	if (nq == NULL)
371 		return;
372 
373 	rte_free(nq->dummy_packet);
374 }
375 
376 static void
377 eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
378 {
379 	struct null_queue *nq = dev->data->tx_queues[qid];
380 
381 	if (nq == NULL)
382 		return;
383 
384 	rte_free(nq->dummy_packet);
385 }
386 
387 static int
388 eth_link_update(struct rte_eth_dev *dev __rte_unused,
389 		int wait_to_complete __rte_unused) { return 0; }
390 
391 static int
392 eth_rss_reta_update(struct rte_eth_dev *dev,
393 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
394 {
395 	int i, j;
396 	struct pmd_internals *internal = dev->data->dev_private;
397 
398 	if (reta_size != internal->reta_size)
399 		return -EINVAL;
400 
401 	rte_spinlock_lock(&internal->rss_lock);
402 
403 	/* Copy RETA table */
404 	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
405 		internal->reta_conf[i].mask = reta_conf[i].mask;
406 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
407 			if ((reta_conf[i].mask >> j) & 0x01)
408 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
409 	}
410 
411 	rte_spinlock_unlock(&internal->rss_lock);
412 
413 	return 0;
414 }
415 
416 static int
417 eth_rss_reta_query(struct rte_eth_dev *dev,
418 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
419 {
420 	int i, j;
421 	struct pmd_internals *internal = dev->data->dev_private;
422 
423 	if (reta_size != internal->reta_size)
424 		return -EINVAL;
425 
426 	rte_spinlock_lock(&internal->rss_lock);
427 
428 	/* Copy RETA table */
429 	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
430 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
431 			if ((reta_conf[i].mask >> j) & 0x01)
432 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
433 	}
434 
435 	rte_spinlock_unlock(&internal->rss_lock);
436 
437 	return 0;
438 }
439 
440 static int
441 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
442 {
443 	struct pmd_internals *internal = dev->data->dev_private;
444 
445 	rte_spinlock_lock(&internal->rss_lock);
446 
447 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
448 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
449 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
450 
451 	if (rss_conf->rss_key)
452 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
453 
454 	rte_spinlock_unlock(&internal->rss_lock);
455 
456 	return 0;
457 }
458 
459 static int
460 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
461 		struct rte_eth_rss_conf *rss_conf)
462 {
463 	struct pmd_internals *internal = dev->data->dev_private;
464 
465 	rte_spinlock_lock(&internal->rss_lock);
466 
467 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
468 	if (rss_conf->rss_key)
469 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
470 
471 	rte_spinlock_unlock(&internal->rss_lock);
472 
473 	return 0;
474 }
475 
476 static int
477 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
478 		    __rte_unused struct rte_ether_addr *addr)
479 {
480 	return 0;
481 }
482 
483 static int
484 eth_dev_close(struct rte_eth_dev *dev)
485 {
486 	PMD_LOG(INFO, "Closing null ethdev on NUMA socket %u",
487 			rte_socket_id());
488 
489 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
490 		return 0;
491 
492 	/* mac_addrs must not be freed alone because part of dev_private */
493 	dev->data->mac_addrs = NULL;
494 
495 	return 0;
496 }
497 
498 static const struct eth_dev_ops ops = {
499 	.dev_close = eth_dev_close,
500 	.dev_start = eth_dev_start,
501 	.dev_stop = eth_dev_stop,
502 	.dev_configure = eth_dev_configure,
503 	.dev_infos_get = eth_dev_info,
504 	.rx_queue_setup = eth_rx_queue_setup,
505 	.tx_queue_setup = eth_tx_queue_setup,
506 	.rx_queue_release = eth_rx_queue_release,
507 	.tx_queue_release = eth_tx_queue_release,
508 	.mtu_set = eth_mtu_set,
509 	.link_update = eth_link_update,
510 	.mac_addr_set = eth_mac_address_set,
511 	.stats_get = eth_stats_get,
512 	.stats_reset = eth_stats_reset,
513 	.reta_update = eth_rss_reta_update,
514 	.reta_query = eth_rss_reta_query,
515 	.rss_hash_update = eth_rss_hash_update,
516 	.rss_hash_conf_get = eth_rss_hash_conf_get
517 };
518 
519 static int
520 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
521 {
522 	const unsigned int nb_rx_queues = 1;
523 	const unsigned int nb_tx_queues = 1;
524 	struct rte_eth_dev_data *data;
525 	struct pmd_internals *internals = NULL;
526 	struct rte_eth_dev *eth_dev = NULL;
527 
528 	static const uint8_t default_rss_key[40] = {
529 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
530 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
531 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
532 		0xBE, 0xAC, 0x01, 0xFA
533 	};
534 
535 	if (dev->device.numa_node == SOCKET_ID_ANY)
536 		dev->device.numa_node = rte_socket_id();
537 
538 	PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
539 		dev->device.numa_node);
540 
541 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
542 	if (!eth_dev)
543 		return -ENOMEM;
544 
545 	/* now put it all together
546 	 * - store queue data in internals,
547 	 * - store numa_node info in ethdev data
548 	 * - point eth_dev_data to internals
549 	 * - and point eth_dev structure to new eth_dev_data structure
550 	 */
551 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
552 	 * so the nulls are local per-process */
553 
554 	internals = eth_dev->data->dev_private;
555 	internals->packet_size = args->packet_size;
556 	internals->packet_copy = args->packet_copy;
557 	internals->no_rx = args->no_rx;
558 	internals->port_id = eth_dev->data->port_id;
559 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
560 
561 	internals->flow_type_rss_offloads =  RTE_ETH_RSS_PROTO_MASK;
562 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE;
563 
564 	rte_memcpy(internals->rss_key, default_rss_key, 40);
565 
566 	data = eth_dev->data;
567 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
568 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
569 	data->dev_link = pmd_link;
570 	data->mac_addrs = &internals->eth_addr;
571 	data->promiscuous = 1;
572 	data->all_multicast = 1;
573 	data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
574 
575 	eth_dev->dev_ops = &ops;
576 
577 	/* finally assign rx and tx ops */
578 	if (internals->packet_copy) {
579 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
580 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
581 	} else if (internals->no_rx) {
582 		eth_dev->rx_pkt_burst = eth_null_no_rx;
583 		eth_dev->tx_pkt_burst = eth_null_tx;
584 	} else {
585 		eth_dev->rx_pkt_burst = eth_null_rx;
586 		eth_dev->tx_pkt_burst = eth_null_tx;
587 	}
588 
589 	rte_eth_dev_probing_finish(eth_dev);
590 	return 0;
591 }
592 
593 static inline int
594 get_packet_size_arg(const char *key __rte_unused,
595 		const char *value, void *extra_args)
596 {
597 	const char *a = value;
598 	unsigned int *packet_size = extra_args;
599 
600 	if ((value == NULL) || (extra_args == NULL))
601 		return -EINVAL;
602 
603 	*packet_size = (unsigned int)strtoul(a, NULL, 0);
604 	if (*packet_size == UINT_MAX)
605 		return -1;
606 
607 	return 0;
608 }
609 
610 static inline int
611 get_packet_copy_arg(const char *key __rte_unused,
612 		const char *value, void *extra_args)
613 {
614 	const char *a = value;
615 	unsigned int *packet_copy = extra_args;
616 
617 	if ((value == NULL) || (extra_args == NULL))
618 		return -EINVAL;
619 
620 	*packet_copy = (unsigned int)strtoul(a, NULL, 0);
621 	if (*packet_copy == UINT_MAX)
622 		return -1;
623 
624 	return 0;
625 }
626 
627 static int
628 get_packet_no_rx_arg(const char *key __rte_unused,
629 		const char *value, void *extra_args)
630 {
631 	const char *a = value;
632 	unsigned int no_rx;
633 
634 	if (value == NULL || extra_args == NULL)
635 		return -EINVAL;
636 
637 	no_rx = (unsigned int)strtoul(a, NULL, 0);
638 	if (no_rx != 0 && no_rx != 1)
639 		return -1;
640 
641 	*(unsigned int *)extra_args = no_rx;
642 	return 0;
643 }
644 
645 static int
646 rte_pmd_null_probe(struct rte_vdev_device *dev)
647 {
648 	const char *name, *params;
649 	struct pmd_options args = {
650 		.packet_copy = default_packet_copy,
651 		.packet_size = default_packet_size,
652 		.no_rx = default_no_rx,
653 	};
654 	struct rte_kvargs *kvlist = NULL;
655 	struct rte_eth_dev *eth_dev;
656 	int ret;
657 
658 	if (!dev)
659 		return -EINVAL;
660 
661 	name = rte_vdev_device_name(dev);
662 	params = rte_vdev_device_args(dev);
663 	PMD_LOG(INFO, "Initializing pmd_null for %s", name);
664 
665 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
666 		struct pmd_internals *internals;
667 		eth_dev = rte_eth_dev_attach_secondary(name);
668 		if (!eth_dev) {
669 			PMD_LOG(ERR, "Failed to probe %s", name);
670 			return -1;
671 		}
672 		/* TODO: request info from primary to set up Rx and Tx */
673 		eth_dev->dev_ops = &ops;
674 		eth_dev->device = &dev->device;
675 		internals = eth_dev->data->dev_private;
676 		if (internals->packet_copy) {
677 			eth_dev->rx_pkt_burst = eth_null_copy_rx;
678 			eth_dev->tx_pkt_burst = eth_null_copy_tx;
679 		} else if (internals->no_rx) {
680 			eth_dev->rx_pkt_burst = eth_null_no_rx;
681 			eth_dev->tx_pkt_burst = eth_null_tx;
682 		} else {
683 			eth_dev->rx_pkt_burst = eth_null_rx;
684 			eth_dev->tx_pkt_burst = eth_null_tx;
685 		}
686 		rte_eth_dev_probing_finish(eth_dev);
687 		return 0;
688 	}
689 
690 	if (params != NULL) {
691 		kvlist = rte_kvargs_parse(params, valid_arguments);
692 		if (kvlist == NULL)
693 			return -1;
694 
695 		ret = rte_kvargs_process(kvlist,
696 				ETH_NULL_PACKET_SIZE_ARG,
697 				&get_packet_size_arg, &args.packet_size);
698 		if (ret < 0)
699 			goto free_kvlist;
700 
701 
702 		ret = rte_kvargs_process(kvlist,
703 				ETH_NULL_PACKET_COPY_ARG,
704 				&get_packet_copy_arg, &args.packet_copy);
705 		if (ret < 0)
706 			goto free_kvlist;
707 
708 		ret = rte_kvargs_process(kvlist,
709 				ETH_NULL_PACKET_NO_RX_ARG,
710 				&get_packet_no_rx_arg, &args.no_rx);
711 		if (ret < 0)
712 			goto free_kvlist;
713 
714 		if (args.no_rx && args.packet_copy) {
715 			PMD_LOG(ERR,
716 				"Both %s and %s arguments at the same time not supported",
717 				ETH_NULL_PACKET_COPY_ARG,
718 				ETH_NULL_PACKET_NO_RX_ARG);
719 			goto free_kvlist;
720 		}
721 	}
722 
723 	PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
724 			"packet copy is %s", args.packet_size,
725 			args.packet_copy ? "enabled" : "disabled");
726 
727 	ret = eth_dev_null_create(dev, &args);
728 
729 free_kvlist:
730 	rte_kvargs_free(kvlist);
731 	return ret;
732 }
733 
734 static int
735 rte_pmd_null_remove(struct rte_vdev_device *dev)
736 {
737 	struct rte_eth_dev *eth_dev = NULL;
738 
739 	if (!dev)
740 		return -EINVAL;
741 
742 	/* find the ethdev entry */
743 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
744 	if (eth_dev == NULL)
745 		return 0; /* port already released */
746 
747 	eth_dev_close(eth_dev);
748 	rte_eth_dev_release_port(eth_dev);
749 
750 	return 0;
751 }
752 
753 static struct rte_vdev_driver pmd_null_drv = {
754 	.probe = rte_pmd_null_probe,
755 	.remove = rte_pmd_null_remove,
756 };
757 
758 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
759 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
760 RTE_PMD_REGISTER_PARAM_STRING(net_null,
761 	"size=<int> "
762 	"copy=<int> "
763 	ETH_NULL_PACKET_NO_RX_ARG "=0|1");
764