xref: /dpdk/drivers/net/null/rte_eth_null.c (revision 2b843cac232eb3f2fa79e4254e21766817e2019f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) IGEL Co.,Ltd.
3  *  All rights reserved.
4  */
5 
6 #include <stdlib.h>
7 
8 #include <rte_mbuf.h>
9 #include <ethdev_driver.h>
10 #include <ethdev_vdev.h>
11 #include <rte_malloc.h>
12 #include <rte_memcpy.h>
13 #include <bus_vdev_driver.h>
14 #include <rte_kvargs.h>
15 #include <rte_spinlock.h>
16 
17 #define ETH_NULL_PACKET_SIZE_ARG	"size"
18 #define ETH_NULL_PACKET_COPY_ARG	"copy"
19 #define ETH_NULL_PACKET_NO_RX_ARG	"no-rx"
20 
21 static unsigned int default_packet_size = 64;
22 static unsigned int default_packet_copy;
23 static unsigned int default_no_rx;
24 
25 static const char *valid_arguments[] = {
26 	ETH_NULL_PACKET_SIZE_ARG,
27 	ETH_NULL_PACKET_COPY_ARG,
28 	ETH_NULL_PACKET_NO_RX_ARG,
29 	NULL
30 };
31 
32 struct pmd_internals;
33 
34 struct null_queue {
35 	struct pmd_internals *internals;
36 
37 	struct rte_mempool *mb_pool;
38 	struct rte_mbuf *dummy_packet;
39 
40 	RTE_ATOMIC(uint64_t) rx_pkts;
41 	RTE_ATOMIC(uint64_t) tx_pkts;
42 };
43 
44 struct pmd_options {
45 	unsigned int packet_copy;
46 	unsigned int packet_size;
47 	unsigned int no_rx;
48 };
49 
50 struct pmd_internals {
51 	unsigned int packet_size;
52 	unsigned int packet_copy;
53 	unsigned int no_rx;
54 	uint16_t port_id;
55 
56 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
57 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
58 
59 	struct rte_ether_addr eth_addr;
60 	/** Bit mask of RSS offloads, the bit offset also means flow type */
61 	uint64_t flow_type_rss_offloads;
62 
63 	rte_spinlock_t rss_lock;
64 
65 	uint16_t reta_size;
66 	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
67 			RTE_ETH_RETA_GROUP_SIZE];
68 
69 	uint8_t rss_key[40];                /**< 40-byte hash key. */
70 };
71 static struct rte_eth_link pmd_link = {
72 	.link_speed = RTE_ETH_SPEED_NUM_10G,
73 	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
74 	.link_status = RTE_ETH_LINK_DOWN,
75 	.link_autoneg = RTE_ETH_LINK_FIXED,
76 };
77 
78 RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
79 #define RTE_LOGTYPE_ETH_NULL eth_null_logtype
80 
81 #define PMD_LOG(level, ...) \
82 	RTE_LOG_LINE_PREFIX(level, ETH_NULL, "%s(): ", __func__, __VA_ARGS__)
83 
84 static uint16_t
85 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
86 {
87 	int i;
88 	struct null_queue *h = q;
89 	unsigned int packet_size;
90 
91 	if ((q == NULL) || (bufs == NULL))
92 		return 0;
93 
94 	packet_size = h->internals->packet_size;
95 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
96 		return 0;
97 
98 	for (i = 0; i < nb_bufs; i++) {
99 		bufs[i]->data_len = (uint16_t)packet_size;
100 		bufs[i]->pkt_len = packet_size;
101 		bufs[i]->port = h->internals->port_id;
102 	}
103 
104 	/* NOTE: review for potential ordering optimization */
105 	rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
106 
107 	return i;
108 }
109 
110 static uint16_t
111 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
112 {
113 	int i;
114 	struct null_queue *h = q;
115 	unsigned int packet_size;
116 
117 	if ((q == NULL) || (bufs == NULL))
118 		return 0;
119 
120 	packet_size = h->internals->packet_size;
121 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
122 		return 0;
123 
124 	for (i = 0; i < nb_bufs; i++) {
125 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
126 					packet_size);
127 		bufs[i]->data_len = (uint16_t)packet_size;
128 		bufs[i]->pkt_len = packet_size;
129 		bufs[i]->port = h->internals->port_id;
130 	}
131 
132 	/* NOTE: review for potential ordering optimization */
133 	rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
134 
135 	return i;
136 }
137 
138 static uint16_t
139 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
140 		uint16_t nb_bufs __rte_unused)
141 {
142 	return 0;
143 }
144 
145 static uint16_t
146 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
147 {
148 	int i;
149 	struct null_queue *h = q;
150 
151 	if ((q == NULL) || (bufs == NULL))
152 		return 0;
153 
154 	for (i = 0; i < nb_bufs; i++)
155 		rte_pktmbuf_free(bufs[i]);
156 
157 	/* NOTE: review for potential ordering optimization */
158 	rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
159 
160 	return i;
161 }
162 
163 static uint16_t
164 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
165 {
166 	int i;
167 	struct null_queue *h = q;
168 	unsigned int packet_size;
169 
170 	if ((q == NULL) || (bufs == NULL))
171 		return 0;
172 
173 	packet_size = h->internals->packet_size;
174 	for (i = 0; i < nb_bufs; i++) {
175 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
176 					packet_size);
177 		rte_pktmbuf_free(bufs[i]);
178 	}
179 
180 	/* NOTE: review for potential ordering optimization */
181 	rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
182 
183 	return i;
184 }
185 
186 static int
187 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
188 {
189 	return 0;
190 }
191 
192 static int
193 eth_dev_start(struct rte_eth_dev *dev)
194 {
195 	uint16_t i;
196 
197 	if (dev == NULL)
198 		return -EINVAL;
199 
200 	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
201 
202 	for (i = 0; i < dev->data->nb_rx_queues; i++)
203 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
204 	for (i = 0; i < dev->data->nb_tx_queues; i++)
205 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
206 
207 	return 0;
208 }
209 
210 static int
211 eth_dev_stop(struct rte_eth_dev *dev)
212 {
213 	uint16_t i;
214 
215 	if (dev == NULL)
216 		return 0;
217 
218 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
219 
220 	for (i = 0; i < dev->data->nb_rx_queues; i++)
221 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
222 	for (i = 0; i < dev->data->nb_tx_queues; i++)
223 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
224 
225 	return 0;
226 }
227 
228 static int
229 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
230 		uint16_t nb_rx_desc __rte_unused,
231 		unsigned int socket_id __rte_unused,
232 		const struct rte_eth_rxconf *rx_conf __rte_unused,
233 		struct rte_mempool *mb_pool)
234 {
235 	struct rte_mbuf *dummy_packet;
236 	struct pmd_internals *internals;
237 	unsigned int packet_size;
238 
239 	if ((dev == NULL) || (mb_pool == NULL))
240 		return -EINVAL;
241 
242 	internals = dev->data->dev_private;
243 
244 	if (rx_queue_id >= dev->data->nb_rx_queues)
245 		return -ENODEV;
246 
247 	packet_size = internals->packet_size;
248 
249 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
250 	dev->data->rx_queues[rx_queue_id] =
251 		&internals->rx_null_queues[rx_queue_id];
252 	dummy_packet = rte_zmalloc_socket(NULL,
253 			packet_size, 0, dev->data->numa_node);
254 	if (dummy_packet == NULL)
255 		return -ENOMEM;
256 
257 	internals->rx_null_queues[rx_queue_id].internals = internals;
258 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
259 
260 	return 0;
261 }
262 
263 static int
264 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
265 		uint16_t nb_tx_desc __rte_unused,
266 		unsigned int socket_id __rte_unused,
267 		const struct rte_eth_txconf *tx_conf __rte_unused)
268 {
269 	struct rte_mbuf *dummy_packet;
270 	struct pmd_internals *internals;
271 	unsigned int packet_size;
272 
273 	if (dev == NULL)
274 		return -EINVAL;
275 
276 	internals = dev->data->dev_private;
277 
278 	if (tx_queue_id >= dev->data->nb_tx_queues)
279 		return -ENODEV;
280 
281 	packet_size = internals->packet_size;
282 
283 	dev->data->tx_queues[tx_queue_id] =
284 		&internals->tx_null_queues[tx_queue_id];
285 	dummy_packet = rte_zmalloc_socket(NULL,
286 			packet_size, 0, dev->data->numa_node);
287 	if (dummy_packet == NULL)
288 		return -ENOMEM;
289 
290 	internals->tx_null_queues[tx_queue_id].internals = internals;
291 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
292 
293 	return 0;
294 }
295 
296 static int
297 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
298 {
299 	return 0;
300 }
301 
302 static int
303 eth_dev_info(struct rte_eth_dev *dev,
304 		struct rte_eth_dev_info *dev_info)
305 {
306 	struct pmd_internals *internals;
307 
308 	if ((dev == NULL) || (dev_info == NULL))
309 		return -EINVAL;
310 
311 	internals = dev->data->dev_private;
312 	dev_info->max_mac_addrs = 1;
313 	dev_info->max_rx_pktlen = (uint32_t)-1;
314 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
315 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
316 	dev_info->min_rx_bufsize = 0;
317 	dev_info->reta_size = internals->reta_size;
318 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
319 	dev_info->hash_key_size = sizeof(internals->rss_key);
320 
321 	return 0;
322 }
323 
324 static int
325 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
326 {
327 	unsigned int i, num_stats;
328 	unsigned long rx_total = 0, tx_total = 0;
329 	const struct pmd_internals *internal;
330 
331 	if ((dev == NULL) || (igb_stats == NULL))
332 		return -EINVAL;
333 
334 	internal = dev->data->dev_private;
335 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
336 			RTE_MIN(dev->data->nb_rx_queues,
337 				RTE_DIM(internal->rx_null_queues)));
338 	for (i = 0; i < num_stats; i++) {
339 		/* NOTE: review for atomic access */
340 		igb_stats->q_ipackets[i] =
341 			internal->rx_null_queues[i].rx_pkts;
342 		rx_total += igb_stats->q_ipackets[i];
343 	}
344 
345 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
346 			RTE_MIN(dev->data->nb_tx_queues,
347 				RTE_DIM(internal->tx_null_queues)));
348 	for (i = 0; i < num_stats; i++) {
349 		/* NOTE: review for atomic access */
350 		igb_stats->q_opackets[i] =
351 			internal->tx_null_queues[i].tx_pkts;
352 		tx_total += igb_stats->q_opackets[i];
353 	}
354 
355 	igb_stats->ipackets = rx_total;
356 	igb_stats->opackets = tx_total;
357 
358 	return 0;
359 }
360 
361 static int
362 eth_stats_reset(struct rte_eth_dev *dev)
363 {
364 	unsigned int i;
365 	struct pmd_internals *internal;
366 
367 	if (dev == NULL)
368 		return -EINVAL;
369 
370 	internal = dev->data->dev_private;
371 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
372 		/* NOTE: review for atomic access */
373 		internal->rx_null_queues[i].rx_pkts = 0;
374 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
375 		/* NOTE: review for atomic access */
376 		internal->tx_null_queues[i].tx_pkts = 0;
377 
378 	return 0;
379 }
380 
381 static void
382 eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
383 {
384 	struct null_queue *nq = dev->data->rx_queues[qid];
385 
386 	if (nq == NULL)
387 		return;
388 
389 	rte_free(nq->dummy_packet);
390 }
391 
392 static void
393 eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
394 {
395 	struct null_queue *nq = dev->data->tx_queues[qid];
396 
397 	if (nq == NULL)
398 		return;
399 
400 	rte_free(nq->dummy_packet);
401 }
402 
403 static int
404 eth_link_update(struct rte_eth_dev *dev __rte_unused,
405 		int wait_to_complete __rte_unused) { return 0; }
406 
407 static int
408 eth_rss_reta_update(struct rte_eth_dev *dev,
409 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
410 {
411 	int i, j;
412 	struct pmd_internals *internal = dev->data->dev_private;
413 
414 	if (reta_size != internal->reta_size)
415 		return -EINVAL;
416 
417 	rte_spinlock_lock(&internal->rss_lock);
418 
419 	/* Copy RETA table */
420 	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
421 		internal->reta_conf[i].mask = reta_conf[i].mask;
422 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
423 			if ((reta_conf[i].mask >> j) & 0x01)
424 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
425 	}
426 
427 	rte_spinlock_unlock(&internal->rss_lock);
428 
429 	return 0;
430 }
431 
432 static int
433 eth_rss_reta_query(struct rte_eth_dev *dev,
434 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
435 {
436 	int i, j;
437 	struct pmd_internals *internal = dev->data->dev_private;
438 
439 	if (reta_size != internal->reta_size)
440 		return -EINVAL;
441 
442 	rte_spinlock_lock(&internal->rss_lock);
443 
444 	/* Copy RETA table */
445 	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
446 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
447 			if ((reta_conf[i].mask >> j) & 0x01)
448 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
449 	}
450 
451 	rte_spinlock_unlock(&internal->rss_lock);
452 
453 	return 0;
454 }
455 
456 static int
457 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
458 {
459 	struct pmd_internals *internal = dev->data->dev_private;
460 
461 	rte_spinlock_lock(&internal->rss_lock);
462 
463 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
464 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
465 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
466 
467 	if (rss_conf->rss_key)
468 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
469 
470 	rte_spinlock_unlock(&internal->rss_lock);
471 
472 	return 0;
473 }
474 
475 static int
476 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
477 		struct rte_eth_rss_conf *rss_conf)
478 {
479 	struct pmd_internals *internal = dev->data->dev_private;
480 
481 	rte_spinlock_lock(&internal->rss_lock);
482 
483 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
484 	if (rss_conf->rss_key)
485 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
486 
487 	rte_spinlock_unlock(&internal->rss_lock);
488 
489 	return 0;
490 }
491 
492 static int
493 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
494 		    __rte_unused struct rte_ether_addr *addr)
495 {
496 	return 0;
497 }
498 
499 static int
500 eth_dev_close(struct rte_eth_dev *dev)
501 {
502 	PMD_LOG(INFO, "Closing null ethdev on NUMA socket %u",
503 			rte_socket_id());
504 
505 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
506 		return 0;
507 
508 	/* mac_addrs must not be freed alone because part of dev_private */
509 	dev->data->mac_addrs = NULL;
510 
511 	return 0;
512 }
513 
514 static const struct eth_dev_ops ops = {
515 	.dev_close = eth_dev_close,
516 	.dev_start = eth_dev_start,
517 	.dev_stop = eth_dev_stop,
518 	.dev_configure = eth_dev_configure,
519 	.dev_infos_get = eth_dev_info,
520 	.rx_queue_setup = eth_rx_queue_setup,
521 	.tx_queue_setup = eth_tx_queue_setup,
522 	.rx_queue_release = eth_rx_queue_release,
523 	.tx_queue_release = eth_tx_queue_release,
524 	.mtu_set = eth_mtu_set,
525 	.link_update = eth_link_update,
526 	.mac_addr_set = eth_mac_address_set,
527 	.stats_get = eth_stats_get,
528 	.stats_reset = eth_stats_reset,
529 	.reta_update = eth_rss_reta_update,
530 	.reta_query = eth_rss_reta_query,
531 	.rss_hash_update = eth_rss_hash_update,
532 	.rss_hash_conf_get = eth_rss_hash_conf_get
533 };
534 
535 static int
536 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
537 {
538 	const unsigned int nb_rx_queues = 1;
539 	const unsigned int nb_tx_queues = 1;
540 	struct rte_eth_dev_data *data;
541 	struct pmd_internals *internals = NULL;
542 	struct rte_eth_dev *eth_dev = NULL;
543 
544 	static const uint8_t default_rss_key[40] = {
545 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
546 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
547 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
548 		0xBE, 0xAC, 0x01, 0xFA
549 	};
550 
551 	if (dev->device.numa_node == SOCKET_ID_ANY)
552 		dev->device.numa_node = rte_socket_id();
553 
554 	PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
555 		dev->device.numa_node);
556 
557 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
558 	if (!eth_dev)
559 		return -ENOMEM;
560 
561 	/* now put it all together
562 	 * - store queue data in internals,
563 	 * - store numa_node info in ethdev data
564 	 * - point eth_dev_data to internals
565 	 * - and point eth_dev structure to new eth_dev_data structure
566 	 */
567 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
568 	 * so the nulls are local per-process */
569 
570 	internals = eth_dev->data->dev_private;
571 	internals->packet_size = args->packet_size;
572 	internals->packet_copy = args->packet_copy;
573 	internals->no_rx = args->no_rx;
574 	internals->port_id = eth_dev->data->port_id;
575 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
576 
577 	internals->flow_type_rss_offloads =  RTE_ETH_RSS_PROTO_MASK;
578 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE;
579 
580 	rte_memcpy(internals->rss_key, default_rss_key, 40);
581 
582 	data = eth_dev->data;
583 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
584 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
585 	data->dev_link = pmd_link;
586 	data->mac_addrs = &internals->eth_addr;
587 	data->promiscuous = 1;
588 	data->all_multicast = 1;
589 	data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
590 
591 	eth_dev->dev_ops = &ops;
592 
593 	/* finally assign rx and tx ops */
594 	if (internals->packet_copy) {
595 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
596 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
597 	} else if (internals->no_rx) {
598 		eth_dev->rx_pkt_burst = eth_null_no_rx;
599 		eth_dev->tx_pkt_burst = eth_null_tx;
600 	} else {
601 		eth_dev->rx_pkt_burst = eth_null_rx;
602 		eth_dev->tx_pkt_burst = eth_null_tx;
603 	}
604 
605 	rte_eth_dev_probing_finish(eth_dev);
606 	return 0;
607 }
608 
609 static inline int
610 get_packet_size_arg(const char *key __rte_unused,
611 		const char *value, void *extra_args)
612 {
613 	const char *a = value;
614 	unsigned int *packet_size = extra_args;
615 
616 	if ((value == NULL) || (extra_args == NULL))
617 		return -EINVAL;
618 
619 	*packet_size = (unsigned int)strtoul(a, NULL, 0);
620 	if (*packet_size == UINT_MAX)
621 		return -1;
622 
623 	return 0;
624 }
625 
626 static inline int
627 get_packet_copy_arg(const char *key __rte_unused,
628 		const char *value, void *extra_args)
629 {
630 	const char *a = value;
631 	unsigned int *packet_copy = extra_args;
632 
633 	if ((value == NULL) || (extra_args == NULL))
634 		return -EINVAL;
635 
636 	*packet_copy = (unsigned int)strtoul(a, NULL, 0);
637 	if (*packet_copy == UINT_MAX)
638 		return -1;
639 
640 	return 0;
641 }
642 
643 static int
644 get_packet_no_rx_arg(const char *key __rte_unused,
645 		const char *value, void *extra_args)
646 {
647 	const char *a = value;
648 	unsigned int no_rx;
649 
650 	if (value == NULL || extra_args == NULL)
651 		return -EINVAL;
652 
653 	no_rx = (unsigned int)strtoul(a, NULL, 0);
654 	if (no_rx != 0 && no_rx != 1)
655 		return -1;
656 
657 	*(unsigned int *)extra_args = no_rx;
658 	return 0;
659 }
660 
661 static int
662 rte_pmd_null_probe(struct rte_vdev_device *dev)
663 {
664 	const char *name, *params;
665 	struct pmd_options args = {
666 		.packet_copy = default_packet_copy,
667 		.packet_size = default_packet_size,
668 		.no_rx = default_no_rx,
669 	};
670 	struct rte_kvargs *kvlist = NULL;
671 	struct rte_eth_dev *eth_dev;
672 	int ret;
673 
674 	if (!dev)
675 		return -EINVAL;
676 
677 	name = rte_vdev_device_name(dev);
678 	params = rte_vdev_device_args(dev);
679 	PMD_LOG(INFO, "Initializing pmd_null for %s", name);
680 
681 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
682 		struct pmd_internals *internals;
683 		eth_dev = rte_eth_dev_attach_secondary(name);
684 		if (!eth_dev) {
685 			PMD_LOG(ERR, "Failed to probe %s", name);
686 			return -1;
687 		}
688 		/* TODO: request info from primary to set up Rx and Tx */
689 		eth_dev->dev_ops = &ops;
690 		eth_dev->device = &dev->device;
691 		internals = eth_dev->data->dev_private;
692 		if (internals->packet_copy) {
693 			eth_dev->rx_pkt_burst = eth_null_copy_rx;
694 			eth_dev->tx_pkt_burst = eth_null_copy_tx;
695 		} else if (internals->no_rx) {
696 			eth_dev->rx_pkt_burst = eth_null_no_rx;
697 			eth_dev->tx_pkt_burst = eth_null_tx;
698 		} else {
699 			eth_dev->rx_pkt_burst = eth_null_rx;
700 			eth_dev->tx_pkt_burst = eth_null_tx;
701 		}
702 		rte_eth_dev_probing_finish(eth_dev);
703 		return 0;
704 	}
705 
706 	if (params != NULL) {
707 		kvlist = rte_kvargs_parse(params, valid_arguments);
708 		if (kvlist == NULL)
709 			return -1;
710 
711 		ret = rte_kvargs_process(kvlist,
712 				ETH_NULL_PACKET_SIZE_ARG,
713 				&get_packet_size_arg, &args.packet_size);
714 		if (ret < 0)
715 			goto free_kvlist;
716 
717 
718 		ret = rte_kvargs_process(kvlist,
719 				ETH_NULL_PACKET_COPY_ARG,
720 				&get_packet_copy_arg, &args.packet_copy);
721 		if (ret < 0)
722 			goto free_kvlist;
723 
724 		ret = rte_kvargs_process(kvlist,
725 				ETH_NULL_PACKET_NO_RX_ARG,
726 				&get_packet_no_rx_arg, &args.no_rx);
727 		if (ret < 0)
728 			goto free_kvlist;
729 
730 		if (args.no_rx && args.packet_copy) {
731 			PMD_LOG(ERR,
732 				"Both %s and %s arguments at the same time not supported",
733 				ETH_NULL_PACKET_COPY_ARG,
734 				ETH_NULL_PACKET_NO_RX_ARG);
735 			goto free_kvlist;
736 		}
737 	}
738 
739 	PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
740 			"packet copy is %s", args.packet_size,
741 			args.packet_copy ? "enabled" : "disabled");
742 
743 	ret = eth_dev_null_create(dev, &args);
744 
745 free_kvlist:
746 	rte_kvargs_free(kvlist);
747 	return ret;
748 }
749 
750 static int
751 rte_pmd_null_remove(struct rte_vdev_device *dev)
752 {
753 	struct rte_eth_dev *eth_dev = NULL;
754 
755 	if (!dev)
756 		return -EINVAL;
757 
758 	/* find the ethdev entry */
759 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
760 	if (eth_dev == NULL)
761 		return 0; /* port already released */
762 
763 	eth_dev_close(eth_dev);
764 	rte_eth_dev_release_port(eth_dev);
765 
766 	return 0;
767 }
768 
769 static struct rte_vdev_driver pmd_null_drv = {
770 	.probe = rte_pmd_null_probe,
771 	.remove = rte_pmd_null_remove,
772 };
773 
774 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
775 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
776 RTE_PMD_REGISTER_PARAM_STRING(net_null,
777 	"size=<int> "
778 	"copy=<int> "
779 	ETH_NULL_PACKET_NO_RX_ARG "=0|1");
780