xref: /dpdk/drivers/net/null/rte_eth_null.c (revision da7e701151ea8b742d4c38ace3e4fefd1b4507fc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) IGEL Co.,Ltd.
3  *  All rights reserved.
4  */
5 
6 #include <stdlib.h>
7 
8 #include <rte_mbuf.h>
9 #include <ethdev_driver.h>
10 #include <ethdev_vdev.h>
11 #include <rte_malloc.h>
12 #include <rte_memcpy.h>
13 #include <bus_vdev_driver.h>
14 #include <rte_kvargs.h>
15 #include <rte_spinlock.h>
16 
17 #define ETH_NULL_PACKET_SIZE_ARG	"size"
18 #define ETH_NULL_PACKET_COPY_ARG	"copy"
19 #define ETH_NULL_PACKET_NO_RX_ARG	"no-rx"
20 
21 static unsigned int default_packet_size = 64;
22 static unsigned int default_packet_copy;
23 static unsigned int default_no_rx;
24 
25 static const char *valid_arguments[] = {
26 	ETH_NULL_PACKET_SIZE_ARG,
27 	ETH_NULL_PACKET_COPY_ARG,
28 	ETH_NULL_PACKET_NO_RX_ARG,
29 	NULL
30 };
31 
32 struct pmd_internals;
33 
34 struct null_queue {
35 	struct pmd_internals *internals;
36 
37 	struct rte_mempool *mb_pool;
38 	struct rte_mbuf *dummy_packet;
39 
40 	uint64_t rx_pkts;
41 	uint64_t tx_pkts;
42 };
43 
44 struct pmd_options {
45 	unsigned int packet_copy;
46 	unsigned int packet_size;
47 	unsigned int no_rx;
48 };
49 
50 struct pmd_internals {
51 	unsigned int packet_size;
52 	unsigned int packet_copy;
53 	unsigned int no_rx;
54 	uint16_t port_id;
55 
56 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
57 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
58 
59 	struct rte_ether_addr eth_addr;
60 	/** Bit mask of RSS offloads, the bit offset also means flow type */
61 	uint64_t flow_type_rss_offloads;
62 
63 	rte_spinlock_t rss_lock;
64 
65 	uint16_t reta_size;
66 	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
67 			RTE_ETH_RETA_GROUP_SIZE];
68 
69 	uint8_t rss_key[40];                /**< 40-byte hash key. */
70 };
71 static struct rte_eth_link pmd_link = {
72 	.link_speed = RTE_ETH_SPEED_NUM_10G,
73 	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
74 	.link_status = RTE_ETH_LINK_DOWN,
75 	.link_autoneg = RTE_ETH_LINK_FIXED,
76 };
77 
78 RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
79 
80 #define PMD_LOG(level, fmt, args...) \
81 	rte_log(RTE_LOG_ ## level, eth_null_logtype, \
82 		"%s(): " fmt "\n", __func__, ##args)
83 
84 static uint16_t
85 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
86 {
87 	int i;
88 	struct null_queue *h = q;
89 	unsigned int packet_size;
90 
91 	if ((q == NULL) || (bufs == NULL))
92 		return 0;
93 
94 	packet_size = h->internals->packet_size;
95 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
96 		return 0;
97 
98 	for (i = 0; i < nb_bufs; i++) {
99 		bufs[i]->data_len = (uint16_t)packet_size;
100 		bufs[i]->pkt_len = packet_size;
101 		bufs[i]->port = h->internals->port_id;
102 	}
103 
104 	/* NOTE: review for potential ordering optimization */
105 	__atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
106 
107 	return i;
108 }
109 
110 static uint16_t
111 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
112 {
113 	int i;
114 	struct null_queue *h = q;
115 	unsigned int packet_size;
116 
117 	if ((q == NULL) || (bufs == NULL))
118 		return 0;
119 
120 	packet_size = h->internals->packet_size;
121 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
122 		return 0;
123 
124 	for (i = 0; i < nb_bufs; i++) {
125 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
126 					packet_size);
127 		bufs[i]->data_len = (uint16_t)packet_size;
128 		bufs[i]->pkt_len = packet_size;
129 		bufs[i]->port = h->internals->port_id;
130 	}
131 
132 	/* NOTE: review for potential ordering optimization */
133 	__atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
134 
135 	return i;
136 }
137 
138 static uint16_t
139 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
140 		uint16_t nb_bufs __rte_unused)
141 {
142 	return 0;
143 }
144 
145 static uint16_t
146 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
147 {
148 	int i;
149 	struct null_queue *h = q;
150 
151 	if ((q == NULL) || (bufs == NULL))
152 		return 0;
153 
154 	for (i = 0; i < nb_bufs; i++)
155 		rte_pktmbuf_free(bufs[i]);
156 
157 	/* NOTE: review for potential ordering optimization */
158 	__atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
159 
160 	return i;
161 }
162 
163 static uint16_t
164 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
165 {
166 	int i;
167 	struct null_queue *h = q;
168 	unsigned int packet_size;
169 
170 	if ((q == NULL) || (bufs == NULL))
171 		return 0;
172 
173 	packet_size = h->internals->packet_size;
174 	for (i = 0; i < nb_bufs; i++) {
175 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
176 					packet_size);
177 		rte_pktmbuf_free(bufs[i]);
178 	}
179 
180 	/* NOTE: review for potential ordering optimization */
181 	__atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
182 
183 	return i;
184 }
185 
186 static int
187 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
188 {
189 	return 0;
190 }
191 
192 static int
193 eth_dev_start(struct rte_eth_dev *dev)
194 {
195 	uint16_t i;
196 
197 	if (dev == NULL)
198 		return -EINVAL;
199 
200 	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
201 
202 	for (i = 0; i < dev->data->nb_rx_queues; i++)
203 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
204 	for (i = 0; i < dev->data->nb_tx_queues; i++)
205 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
206 
207 	return 0;
208 }
209 
210 static int
211 eth_dev_stop(struct rte_eth_dev *dev)
212 {
213 	uint16_t i;
214 
215 	if (dev == NULL)
216 		return 0;
217 
218 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
219 
220 	for (i = 0; i < dev->data->nb_rx_queues; i++)
221 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
222 	for (i = 0; i < dev->data->nb_tx_queues; i++)
223 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
224 
225 	return 0;
226 }
227 
228 static int
229 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
230 		uint16_t nb_rx_desc __rte_unused,
231 		unsigned int socket_id __rte_unused,
232 		const struct rte_eth_rxconf *rx_conf __rte_unused,
233 		struct rte_mempool *mb_pool)
234 {
235 	struct rte_mbuf *dummy_packet;
236 	struct pmd_internals *internals;
237 	unsigned int packet_size;
238 
239 	if ((dev == NULL) || (mb_pool == NULL))
240 		return -EINVAL;
241 
242 	internals = dev->data->dev_private;
243 
244 	if (rx_queue_id >= dev->data->nb_rx_queues)
245 		return -ENODEV;
246 
247 	packet_size = internals->packet_size;
248 
249 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
250 	dev->data->rx_queues[rx_queue_id] =
251 		&internals->rx_null_queues[rx_queue_id];
252 	dummy_packet = rte_zmalloc_socket(NULL,
253 			packet_size, 0, dev->data->numa_node);
254 	if (dummy_packet == NULL)
255 		return -ENOMEM;
256 
257 	internals->rx_null_queues[rx_queue_id].internals = internals;
258 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
259 
260 	return 0;
261 }
262 
263 static int
264 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
265 		uint16_t nb_tx_desc __rte_unused,
266 		unsigned int socket_id __rte_unused,
267 		const struct rte_eth_txconf *tx_conf __rte_unused)
268 {
269 	struct rte_mbuf *dummy_packet;
270 	struct pmd_internals *internals;
271 	unsigned int packet_size;
272 
273 	if (dev == NULL)
274 		return -EINVAL;
275 
276 	internals = dev->data->dev_private;
277 
278 	if (tx_queue_id >= dev->data->nb_tx_queues)
279 		return -ENODEV;
280 
281 	packet_size = internals->packet_size;
282 
283 	dev->data->tx_queues[tx_queue_id] =
284 		&internals->tx_null_queues[tx_queue_id];
285 	dummy_packet = rte_zmalloc_socket(NULL,
286 			packet_size, 0, dev->data->numa_node);
287 	if (dummy_packet == NULL)
288 		return -ENOMEM;
289 
290 	internals->tx_null_queues[tx_queue_id].internals = internals;
291 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
292 
293 	return 0;
294 }
295 
296 static int
297 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
298 {
299 	return 0;
300 }
301 
302 static int
303 eth_dev_info(struct rte_eth_dev *dev,
304 		struct rte_eth_dev_info *dev_info)
305 {
306 	struct pmd_internals *internals;
307 
308 	if ((dev == NULL) || (dev_info == NULL))
309 		return -EINVAL;
310 
311 	internals = dev->data->dev_private;
312 	dev_info->max_mac_addrs = 1;
313 	dev_info->max_rx_pktlen = (uint32_t)-1;
314 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
315 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
316 	dev_info->min_rx_bufsize = 0;
317 	dev_info->reta_size = internals->reta_size;
318 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
319 
320 	return 0;
321 }
322 
323 static int
324 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
325 {
326 	unsigned int i, num_stats;
327 	unsigned long rx_total = 0, tx_total = 0;
328 	const struct pmd_internals *internal;
329 
330 	if ((dev == NULL) || (igb_stats == NULL))
331 		return -EINVAL;
332 
333 	internal = dev->data->dev_private;
334 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
335 			RTE_MIN(dev->data->nb_rx_queues,
336 				RTE_DIM(internal->rx_null_queues)));
337 	for (i = 0; i < num_stats; i++) {
338 		/* NOTE: review for atomic access */
339 		igb_stats->q_ipackets[i] =
340 			internal->rx_null_queues[i].rx_pkts;
341 		rx_total += igb_stats->q_ipackets[i];
342 	}
343 
344 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
345 			RTE_MIN(dev->data->nb_tx_queues,
346 				RTE_DIM(internal->tx_null_queues)));
347 	for (i = 0; i < num_stats; i++) {
348 		/* NOTE: review for atomic access */
349 		igb_stats->q_opackets[i] =
350 			internal->tx_null_queues[i].tx_pkts;
351 		tx_total += igb_stats->q_opackets[i];
352 	}
353 
354 	igb_stats->ipackets = rx_total;
355 	igb_stats->opackets = tx_total;
356 
357 	return 0;
358 }
359 
360 static int
361 eth_stats_reset(struct rte_eth_dev *dev)
362 {
363 	unsigned int i;
364 	struct pmd_internals *internal;
365 
366 	if (dev == NULL)
367 		return -EINVAL;
368 
369 	internal = dev->data->dev_private;
370 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
371 		/* NOTE: review for atomic access */
372 		internal->rx_null_queues[i].rx_pkts = 0;
373 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
374 		/* NOTE: review for atomic access */
375 		internal->tx_null_queues[i].tx_pkts = 0;
376 
377 	return 0;
378 }
379 
380 static void
381 eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
382 {
383 	struct null_queue *nq = dev->data->rx_queues[qid];
384 
385 	if (nq == NULL)
386 		return;
387 
388 	rte_free(nq->dummy_packet);
389 }
390 
391 static void
392 eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
393 {
394 	struct null_queue *nq = dev->data->tx_queues[qid];
395 
396 	if (nq == NULL)
397 		return;
398 
399 	rte_free(nq->dummy_packet);
400 }
401 
402 static int
403 eth_link_update(struct rte_eth_dev *dev __rte_unused,
404 		int wait_to_complete __rte_unused) { return 0; }
405 
406 static int
407 eth_rss_reta_update(struct rte_eth_dev *dev,
408 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
409 {
410 	int i, j;
411 	struct pmd_internals *internal = dev->data->dev_private;
412 
413 	if (reta_size != internal->reta_size)
414 		return -EINVAL;
415 
416 	rte_spinlock_lock(&internal->rss_lock);
417 
418 	/* Copy RETA table */
419 	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
420 		internal->reta_conf[i].mask = reta_conf[i].mask;
421 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
422 			if ((reta_conf[i].mask >> j) & 0x01)
423 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
424 	}
425 
426 	rte_spinlock_unlock(&internal->rss_lock);
427 
428 	return 0;
429 }
430 
431 static int
432 eth_rss_reta_query(struct rte_eth_dev *dev,
433 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
434 {
435 	int i, j;
436 	struct pmd_internals *internal = dev->data->dev_private;
437 
438 	if (reta_size != internal->reta_size)
439 		return -EINVAL;
440 
441 	rte_spinlock_lock(&internal->rss_lock);
442 
443 	/* Copy RETA table */
444 	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
445 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
446 			if ((reta_conf[i].mask >> j) & 0x01)
447 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
448 	}
449 
450 	rte_spinlock_unlock(&internal->rss_lock);
451 
452 	return 0;
453 }
454 
455 static int
456 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
457 {
458 	struct pmd_internals *internal = dev->data->dev_private;
459 
460 	rte_spinlock_lock(&internal->rss_lock);
461 
462 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
463 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
464 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
465 
466 	if (rss_conf->rss_key)
467 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
468 
469 	rte_spinlock_unlock(&internal->rss_lock);
470 
471 	return 0;
472 }
473 
474 static int
475 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
476 		struct rte_eth_rss_conf *rss_conf)
477 {
478 	struct pmd_internals *internal = dev->data->dev_private;
479 
480 	rte_spinlock_lock(&internal->rss_lock);
481 
482 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
483 	if (rss_conf->rss_key)
484 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
485 
486 	rte_spinlock_unlock(&internal->rss_lock);
487 
488 	return 0;
489 }
490 
491 static int
492 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
493 		    __rte_unused struct rte_ether_addr *addr)
494 {
495 	return 0;
496 }
497 
498 static int
499 eth_dev_close(struct rte_eth_dev *dev)
500 {
501 	PMD_LOG(INFO, "Closing null ethdev on NUMA socket %u",
502 			rte_socket_id());
503 
504 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
505 		return 0;
506 
507 	/* mac_addrs must not be freed alone because part of dev_private */
508 	dev->data->mac_addrs = NULL;
509 
510 	return 0;
511 }
512 
513 static const struct eth_dev_ops ops = {
514 	.dev_close = eth_dev_close,
515 	.dev_start = eth_dev_start,
516 	.dev_stop = eth_dev_stop,
517 	.dev_configure = eth_dev_configure,
518 	.dev_infos_get = eth_dev_info,
519 	.rx_queue_setup = eth_rx_queue_setup,
520 	.tx_queue_setup = eth_tx_queue_setup,
521 	.rx_queue_release = eth_rx_queue_release,
522 	.tx_queue_release = eth_tx_queue_release,
523 	.mtu_set = eth_mtu_set,
524 	.link_update = eth_link_update,
525 	.mac_addr_set = eth_mac_address_set,
526 	.stats_get = eth_stats_get,
527 	.stats_reset = eth_stats_reset,
528 	.reta_update = eth_rss_reta_update,
529 	.reta_query = eth_rss_reta_query,
530 	.rss_hash_update = eth_rss_hash_update,
531 	.rss_hash_conf_get = eth_rss_hash_conf_get
532 };
533 
534 static int
535 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
536 {
537 	const unsigned int nb_rx_queues = 1;
538 	const unsigned int nb_tx_queues = 1;
539 	struct rte_eth_dev_data *data;
540 	struct pmd_internals *internals = NULL;
541 	struct rte_eth_dev *eth_dev = NULL;
542 
543 	static const uint8_t default_rss_key[40] = {
544 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
545 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
546 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
547 		0xBE, 0xAC, 0x01, 0xFA
548 	};
549 
550 	if (dev->device.numa_node == SOCKET_ID_ANY)
551 		dev->device.numa_node = rte_socket_id();
552 
553 	PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
554 		dev->device.numa_node);
555 
556 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
557 	if (!eth_dev)
558 		return -ENOMEM;
559 
560 	/* now put it all together
561 	 * - store queue data in internals,
562 	 * - store numa_node info in ethdev data
563 	 * - point eth_dev_data to internals
564 	 * - and point eth_dev structure to new eth_dev_data structure
565 	 */
566 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
567 	 * so the nulls are local per-process */
568 
569 	internals = eth_dev->data->dev_private;
570 	internals->packet_size = args->packet_size;
571 	internals->packet_copy = args->packet_copy;
572 	internals->no_rx = args->no_rx;
573 	internals->port_id = eth_dev->data->port_id;
574 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
575 
576 	internals->flow_type_rss_offloads =  RTE_ETH_RSS_PROTO_MASK;
577 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE;
578 
579 	rte_memcpy(internals->rss_key, default_rss_key, 40);
580 
581 	data = eth_dev->data;
582 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
583 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
584 	data->dev_link = pmd_link;
585 	data->mac_addrs = &internals->eth_addr;
586 	data->promiscuous = 1;
587 	data->all_multicast = 1;
588 	data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
589 
590 	eth_dev->dev_ops = &ops;
591 
592 	/* finally assign rx and tx ops */
593 	if (internals->packet_copy) {
594 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
595 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
596 	} else if (internals->no_rx) {
597 		eth_dev->rx_pkt_burst = eth_null_no_rx;
598 		eth_dev->tx_pkt_burst = eth_null_tx;
599 	} else {
600 		eth_dev->rx_pkt_burst = eth_null_rx;
601 		eth_dev->tx_pkt_burst = eth_null_tx;
602 	}
603 
604 	rte_eth_dev_probing_finish(eth_dev);
605 	return 0;
606 }
607 
608 static inline int
609 get_packet_size_arg(const char *key __rte_unused,
610 		const char *value, void *extra_args)
611 {
612 	const char *a = value;
613 	unsigned int *packet_size = extra_args;
614 
615 	if ((value == NULL) || (extra_args == NULL))
616 		return -EINVAL;
617 
618 	*packet_size = (unsigned int)strtoul(a, NULL, 0);
619 	if (*packet_size == UINT_MAX)
620 		return -1;
621 
622 	return 0;
623 }
624 
625 static inline int
626 get_packet_copy_arg(const char *key __rte_unused,
627 		const char *value, void *extra_args)
628 {
629 	const char *a = value;
630 	unsigned int *packet_copy = extra_args;
631 
632 	if ((value == NULL) || (extra_args == NULL))
633 		return -EINVAL;
634 
635 	*packet_copy = (unsigned int)strtoul(a, NULL, 0);
636 	if (*packet_copy == UINT_MAX)
637 		return -1;
638 
639 	return 0;
640 }
641 
642 static int
643 get_packet_no_rx_arg(const char *key __rte_unused,
644 		const char *value, void *extra_args)
645 {
646 	const char *a = value;
647 	unsigned int no_rx;
648 
649 	if (value == NULL || extra_args == NULL)
650 		return -EINVAL;
651 
652 	no_rx = (unsigned int)strtoul(a, NULL, 0);
653 	if (no_rx != 0 && no_rx != 1)
654 		return -1;
655 
656 	*(unsigned int *)extra_args = no_rx;
657 	return 0;
658 }
659 
660 static int
661 rte_pmd_null_probe(struct rte_vdev_device *dev)
662 {
663 	const char *name, *params;
664 	struct pmd_options args = {
665 		.packet_copy = default_packet_copy,
666 		.packet_size = default_packet_size,
667 		.no_rx = default_no_rx,
668 	};
669 	struct rte_kvargs *kvlist = NULL;
670 	struct rte_eth_dev *eth_dev;
671 	int ret;
672 
673 	if (!dev)
674 		return -EINVAL;
675 
676 	name = rte_vdev_device_name(dev);
677 	params = rte_vdev_device_args(dev);
678 	PMD_LOG(INFO, "Initializing pmd_null for %s", name);
679 
680 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
681 		struct pmd_internals *internals;
682 		eth_dev = rte_eth_dev_attach_secondary(name);
683 		if (!eth_dev) {
684 			PMD_LOG(ERR, "Failed to probe %s", name);
685 			return -1;
686 		}
687 		/* TODO: request info from primary to set up Rx and Tx */
688 		eth_dev->dev_ops = &ops;
689 		eth_dev->device = &dev->device;
690 		internals = eth_dev->data->dev_private;
691 		if (internals->packet_copy) {
692 			eth_dev->rx_pkt_burst = eth_null_copy_rx;
693 			eth_dev->tx_pkt_burst = eth_null_copy_tx;
694 		} else if (internals->no_rx) {
695 			eth_dev->rx_pkt_burst = eth_null_no_rx;
696 			eth_dev->tx_pkt_burst = eth_null_tx;
697 		} else {
698 			eth_dev->rx_pkt_burst = eth_null_rx;
699 			eth_dev->tx_pkt_burst = eth_null_tx;
700 		}
701 		rte_eth_dev_probing_finish(eth_dev);
702 		return 0;
703 	}
704 
705 	if (params != NULL) {
706 		kvlist = rte_kvargs_parse(params, valid_arguments);
707 		if (kvlist == NULL)
708 			return -1;
709 
710 		ret = rte_kvargs_process(kvlist,
711 				ETH_NULL_PACKET_SIZE_ARG,
712 				&get_packet_size_arg, &args.packet_size);
713 		if (ret < 0)
714 			goto free_kvlist;
715 
716 
717 		ret = rte_kvargs_process(kvlist,
718 				ETH_NULL_PACKET_COPY_ARG,
719 				&get_packet_copy_arg, &args.packet_copy);
720 		if (ret < 0)
721 			goto free_kvlist;
722 
723 		ret = rte_kvargs_process(kvlist,
724 				ETH_NULL_PACKET_NO_RX_ARG,
725 				&get_packet_no_rx_arg, &args.no_rx);
726 		if (ret < 0)
727 			goto free_kvlist;
728 
729 		if (args.no_rx && args.packet_copy) {
730 			PMD_LOG(ERR,
731 				"Both %s and %s arguments at the same time not supported",
732 				ETH_NULL_PACKET_COPY_ARG,
733 				ETH_NULL_PACKET_NO_RX_ARG);
734 			goto free_kvlist;
735 		}
736 	}
737 
738 	PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
739 			"packet copy is %s", args.packet_size,
740 			args.packet_copy ? "enabled" : "disabled");
741 
742 	ret = eth_dev_null_create(dev, &args);
743 
744 free_kvlist:
745 	rte_kvargs_free(kvlist);
746 	return ret;
747 }
748 
749 static int
750 rte_pmd_null_remove(struct rte_vdev_device *dev)
751 {
752 	struct rte_eth_dev *eth_dev = NULL;
753 
754 	if (!dev)
755 		return -EINVAL;
756 
757 	/* find the ethdev entry */
758 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
759 	if (eth_dev == NULL)
760 		return 0; /* port already released */
761 
762 	eth_dev_close(eth_dev);
763 	rte_eth_dev_release_port(eth_dev);
764 
765 	return 0;
766 }
767 
768 static struct rte_vdev_driver pmd_null_drv = {
769 	.probe = rte_pmd_null_probe,
770 	.remove = rte_pmd_null_remove,
771 };
772 
773 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
774 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
775 RTE_PMD_REGISTER_PARAM_STRING(net_null,
776 	"size=<int> "
777 	"copy=<int> "
778 	ETH_NULL_PACKET_NO_RX_ARG "=0|1");
779