xref: /dpdk/drivers/net/null/rte_eth_null.c (revision 8f1d23ece06adff5eae9f1b4365bdbbd3abee2b2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) IGEL Co.,Ltd.
3  *  All rights reserved.
4  */
5 
6 #include <stdlib.h>
7 
8 #include <rte_mbuf.h>
9 #include <ethdev_driver.h>
10 #include <ethdev_vdev.h>
11 #include <rte_malloc.h>
12 #include <rte_memcpy.h>
13 #include <rte_bus_vdev.h>
14 #include <rte_kvargs.h>
15 #include <rte_spinlock.h>
16 
17 #define ETH_NULL_PACKET_SIZE_ARG	"size"
18 #define ETH_NULL_PACKET_COPY_ARG	"copy"
19 #define ETH_NULL_PACKET_NO_RX_ARG	"no-rx"
20 
21 static unsigned int default_packet_size = 64;
22 static unsigned int default_packet_copy;
23 static unsigned int default_no_rx;
24 
25 static const char *valid_arguments[] = {
26 	ETH_NULL_PACKET_SIZE_ARG,
27 	ETH_NULL_PACKET_COPY_ARG,
28 	ETH_NULL_PACKET_NO_RX_ARG,
29 	NULL
30 };
31 
32 struct pmd_internals;
33 
34 struct null_queue {
35 	struct pmd_internals *internals;
36 
37 	struct rte_mempool *mb_pool;
38 	struct rte_mbuf *dummy_packet;
39 
40 	rte_atomic64_t rx_pkts;
41 	rte_atomic64_t tx_pkts;
42 };
43 
44 struct pmd_options {
45 	unsigned int packet_copy;
46 	unsigned int packet_size;
47 	unsigned int no_rx;
48 };
49 
50 struct pmd_internals {
51 	unsigned int packet_size;
52 	unsigned int packet_copy;
53 	unsigned int no_rx;
54 	uint16_t port_id;
55 
56 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
57 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
58 
59 	struct rte_ether_addr eth_addr;
60 	/** Bit mask of RSS offloads, the bit offset also means flow type */
61 	uint64_t flow_type_rss_offloads;
62 
63 	rte_spinlock_t rss_lock;
64 
65 	uint16_t reta_size;
66 	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
67 			RTE_ETH_RETA_GROUP_SIZE];
68 
69 	uint8_t rss_key[40];                /**< 40-byte hash key. */
70 };
71 static struct rte_eth_link pmd_link = {
72 	.link_speed = RTE_ETH_SPEED_NUM_10G,
73 	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
74 	.link_status = RTE_ETH_LINK_DOWN,
75 	.link_autoneg = RTE_ETH_LINK_FIXED,
76 };
77 
78 RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
79 
80 #define PMD_LOG(level, fmt, args...) \
81 	rte_log(RTE_LOG_ ## level, eth_null_logtype, \
82 		"%s(): " fmt "\n", __func__, ##args)
83 
84 static uint16_t
85 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
86 {
87 	int i;
88 	struct null_queue *h = q;
89 	unsigned int packet_size;
90 
91 	if ((q == NULL) || (bufs == NULL))
92 		return 0;
93 
94 	packet_size = h->internals->packet_size;
95 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
96 		return 0;
97 
98 	for (i = 0; i < nb_bufs; i++) {
99 		bufs[i]->data_len = (uint16_t)packet_size;
100 		bufs[i]->pkt_len = packet_size;
101 		bufs[i]->port = h->internals->port_id;
102 	}
103 
104 	rte_atomic64_add(&(h->rx_pkts), i);
105 
106 	return i;
107 }
108 
109 static uint16_t
110 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
111 {
112 	int i;
113 	struct null_queue *h = q;
114 	unsigned int packet_size;
115 
116 	if ((q == NULL) || (bufs == NULL))
117 		return 0;
118 
119 	packet_size = h->internals->packet_size;
120 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
121 		return 0;
122 
123 	for (i = 0; i < nb_bufs; i++) {
124 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
125 					packet_size);
126 		bufs[i]->data_len = (uint16_t)packet_size;
127 		bufs[i]->pkt_len = packet_size;
128 		bufs[i]->port = h->internals->port_id;
129 	}
130 
131 	rte_atomic64_add(&(h->rx_pkts), i);
132 
133 	return i;
134 }
135 
136 static uint16_t
137 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
138 		uint16_t nb_bufs __rte_unused)
139 {
140 	return 0;
141 }
142 
143 static uint16_t
144 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
145 {
146 	int i;
147 	struct null_queue *h = q;
148 
149 	if ((q == NULL) || (bufs == NULL))
150 		return 0;
151 
152 	for (i = 0; i < nb_bufs; i++)
153 		rte_pktmbuf_free(bufs[i]);
154 
155 	rte_atomic64_add(&(h->tx_pkts), i);
156 
157 	return i;
158 }
159 
160 static uint16_t
161 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
162 {
163 	int i;
164 	struct null_queue *h = q;
165 	unsigned int packet_size;
166 
167 	if ((q == NULL) || (bufs == NULL))
168 		return 0;
169 
170 	packet_size = h->internals->packet_size;
171 	for (i = 0; i < nb_bufs; i++) {
172 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
173 					packet_size);
174 		rte_pktmbuf_free(bufs[i]);
175 	}
176 
177 	rte_atomic64_add(&(h->tx_pkts), i);
178 
179 	return i;
180 }
181 
182 static int
183 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
184 {
185 	return 0;
186 }
187 
188 static int
189 eth_dev_start(struct rte_eth_dev *dev)
190 {
191 	if (dev == NULL)
192 		return -EINVAL;
193 
194 	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
195 	return 0;
196 }
197 
198 static int
199 eth_dev_stop(struct rte_eth_dev *dev)
200 {
201 	if (dev == NULL)
202 		return 0;
203 
204 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
205 
206 	return 0;
207 }
208 
209 static int
210 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
211 		uint16_t nb_rx_desc __rte_unused,
212 		unsigned int socket_id __rte_unused,
213 		const struct rte_eth_rxconf *rx_conf __rte_unused,
214 		struct rte_mempool *mb_pool)
215 {
216 	struct rte_mbuf *dummy_packet;
217 	struct pmd_internals *internals;
218 	unsigned int packet_size;
219 
220 	if ((dev == NULL) || (mb_pool == NULL))
221 		return -EINVAL;
222 
223 	internals = dev->data->dev_private;
224 
225 	if (rx_queue_id >= dev->data->nb_rx_queues)
226 		return -ENODEV;
227 
228 	packet_size = internals->packet_size;
229 
230 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
231 	dev->data->rx_queues[rx_queue_id] =
232 		&internals->rx_null_queues[rx_queue_id];
233 	dummy_packet = rte_zmalloc_socket(NULL,
234 			packet_size, 0, dev->data->numa_node);
235 	if (dummy_packet == NULL)
236 		return -ENOMEM;
237 
238 	internals->rx_null_queues[rx_queue_id].internals = internals;
239 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
240 
241 	return 0;
242 }
243 
244 static int
245 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
246 		uint16_t nb_tx_desc __rte_unused,
247 		unsigned int socket_id __rte_unused,
248 		const struct rte_eth_txconf *tx_conf __rte_unused)
249 {
250 	struct rte_mbuf *dummy_packet;
251 	struct pmd_internals *internals;
252 	unsigned int packet_size;
253 
254 	if (dev == NULL)
255 		return -EINVAL;
256 
257 	internals = dev->data->dev_private;
258 
259 	if (tx_queue_id >= dev->data->nb_tx_queues)
260 		return -ENODEV;
261 
262 	packet_size = internals->packet_size;
263 
264 	dev->data->tx_queues[tx_queue_id] =
265 		&internals->tx_null_queues[tx_queue_id];
266 	dummy_packet = rte_zmalloc_socket(NULL,
267 			packet_size, 0, dev->data->numa_node);
268 	if (dummy_packet == NULL)
269 		return -ENOMEM;
270 
271 	internals->tx_null_queues[tx_queue_id].internals = internals;
272 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
273 
274 	return 0;
275 }
276 
277 static int
278 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
279 {
280 	return 0;
281 }
282 
283 static int
284 eth_dev_info(struct rte_eth_dev *dev,
285 		struct rte_eth_dev_info *dev_info)
286 {
287 	struct pmd_internals *internals;
288 
289 	if ((dev == NULL) || (dev_info == NULL))
290 		return -EINVAL;
291 
292 	internals = dev->data->dev_private;
293 	dev_info->max_mac_addrs = 1;
294 	dev_info->max_rx_pktlen = (uint32_t)-1;
295 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
296 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
297 	dev_info->min_rx_bufsize = 0;
298 	dev_info->reta_size = internals->reta_size;
299 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
300 
301 	return 0;
302 }
303 
304 static int
305 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
306 {
307 	unsigned int i, num_stats;
308 	unsigned long rx_total = 0, tx_total = 0;
309 	const struct pmd_internals *internal;
310 
311 	if ((dev == NULL) || (igb_stats == NULL))
312 		return -EINVAL;
313 
314 	internal = dev->data->dev_private;
315 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
316 			RTE_MIN(dev->data->nb_rx_queues,
317 				RTE_DIM(internal->rx_null_queues)));
318 	for (i = 0; i < num_stats; i++) {
319 		igb_stats->q_ipackets[i] =
320 			internal->rx_null_queues[i].rx_pkts.cnt;
321 		rx_total += igb_stats->q_ipackets[i];
322 	}
323 
324 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
325 			RTE_MIN(dev->data->nb_tx_queues,
326 				RTE_DIM(internal->tx_null_queues)));
327 	for (i = 0; i < num_stats; i++) {
328 		igb_stats->q_opackets[i] =
329 			internal->tx_null_queues[i].tx_pkts.cnt;
330 		tx_total += igb_stats->q_opackets[i];
331 	}
332 
333 	igb_stats->ipackets = rx_total;
334 	igb_stats->opackets = tx_total;
335 
336 	return 0;
337 }
338 
339 static int
340 eth_stats_reset(struct rte_eth_dev *dev)
341 {
342 	unsigned int i;
343 	struct pmd_internals *internal;
344 
345 	if (dev == NULL)
346 		return -EINVAL;
347 
348 	internal = dev->data->dev_private;
349 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
350 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
351 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
352 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
353 
354 	return 0;
355 }
356 
357 static void
358 eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
359 {
360 	struct null_queue *nq = dev->data->rx_queues[qid];
361 
362 	if (nq == NULL)
363 		return;
364 
365 	rte_free(nq->dummy_packet);
366 }
367 
368 static void
369 eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
370 {
371 	struct null_queue *nq = dev->data->tx_queues[qid];
372 
373 	if (nq == NULL)
374 		return;
375 
376 	rte_free(nq->dummy_packet);
377 }
378 
379 static int
380 eth_link_update(struct rte_eth_dev *dev __rte_unused,
381 		int wait_to_complete __rte_unused) { return 0; }
382 
383 static int
384 eth_rss_reta_update(struct rte_eth_dev *dev,
385 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
386 {
387 	int i, j;
388 	struct pmd_internals *internal = dev->data->dev_private;
389 
390 	if (reta_size != internal->reta_size)
391 		return -EINVAL;
392 
393 	rte_spinlock_lock(&internal->rss_lock);
394 
395 	/* Copy RETA table */
396 	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
397 		internal->reta_conf[i].mask = reta_conf[i].mask;
398 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
399 			if ((reta_conf[i].mask >> j) & 0x01)
400 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
401 	}
402 
403 	rte_spinlock_unlock(&internal->rss_lock);
404 
405 	return 0;
406 }
407 
408 static int
409 eth_rss_reta_query(struct rte_eth_dev *dev,
410 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
411 {
412 	int i, j;
413 	struct pmd_internals *internal = dev->data->dev_private;
414 
415 	if (reta_size != internal->reta_size)
416 		return -EINVAL;
417 
418 	rte_spinlock_lock(&internal->rss_lock);
419 
420 	/* Copy RETA table */
421 	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
422 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
423 			if ((reta_conf[i].mask >> j) & 0x01)
424 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
425 	}
426 
427 	rte_spinlock_unlock(&internal->rss_lock);
428 
429 	return 0;
430 }
431 
432 static int
433 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
434 {
435 	struct pmd_internals *internal = dev->data->dev_private;
436 
437 	rte_spinlock_lock(&internal->rss_lock);
438 
439 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
440 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
441 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
442 
443 	if (rss_conf->rss_key)
444 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
445 
446 	rte_spinlock_unlock(&internal->rss_lock);
447 
448 	return 0;
449 }
450 
451 static int
452 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
453 		struct rte_eth_rss_conf *rss_conf)
454 {
455 	struct pmd_internals *internal = dev->data->dev_private;
456 
457 	rte_spinlock_lock(&internal->rss_lock);
458 
459 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
460 	if (rss_conf->rss_key)
461 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
462 
463 	rte_spinlock_unlock(&internal->rss_lock);
464 
465 	return 0;
466 }
467 
468 static int
469 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
470 		    __rte_unused struct rte_ether_addr *addr)
471 {
472 	return 0;
473 }
474 
475 static int
476 eth_dev_close(struct rte_eth_dev *dev)
477 {
478 	PMD_LOG(INFO, "Closing null ethdev on NUMA socket %u",
479 			rte_socket_id());
480 
481 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
482 		return 0;
483 
484 	/* mac_addrs must not be freed alone because part of dev_private */
485 	dev->data->mac_addrs = NULL;
486 
487 	return 0;
488 }
489 
490 static const struct eth_dev_ops ops = {
491 	.dev_close = eth_dev_close,
492 	.dev_start = eth_dev_start,
493 	.dev_stop = eth_dev_stop,
494 	.dev_configure = eth_dev_configure,
495 	.dev_infos_get = eth_dev_info,
496 	.rx_queue_setup = eth_rx_queue_setup,
497 	.tx_queue_setup = eth_tx_queue_setup,
498 	.rx_queue_release = eth_rx_queue_release,
499 	.tx_queue_release = eth_tx_queue_release,
500 	.mtu_set = eth_mtu_set,
501 	.link_update = eth_link_update,
502 	.mac_addr_set = eth_mac_address_set,
503 	.stats_get = eth_stats_get,
504 	.stats_reset = eth_stats_reset,
505 	.reta_update = eth_rss_reta_update,
506 	.reta_query = eth_rss_reta_query,
507 	.rss_hash_update = eth_rss_hash_update,
508 	.rss_hash_conf_get = eth_rss_hash_conf_get
509 };
510 
511 static int
512 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
513 {
514 	const unsigned int nb_rx_queues = 1;
515 	const unsigned int nb_tx_queues = 1;
516 	struct rte_eth_dev_data *data;
517 	struct pmd_internals *internals = NULL;
518 	struct rte_eth_dev *eth_dev = NULL;
519 
520 	static const uint8_t default_rss_key[40] = {
521 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
522 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
523 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
524 		0xBE, 0xAC, 0x01, 0xFA
525 	};
526 
527 	if (dev->device.numa_node == SOCKET_ID_ANY)
528 		dev->device.numa_node = rte_socket_id();
529 
530 	PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
531 		dev->device.numa_node);
532 
533 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
534 	if (!eth_dev)
535 		return -ENOMEM;
536 
537 	/* now put it all together
538 	 * - store queue data in internals,
539 	 * - store numa_node info in ethdev data
540 	 * - point eth_dev_data to internals
541 	 * - and point eth_dev structure to new eth_dev_data structure
542 	 */
543 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
544 	 * so the nulls are local per-process */
545 
546 	internals = eth_dev->data->dev_private;
547 	internals->packet_size = args->packet_size;
548 	internals->packet_copy = args->packet_copy;
549 	internals->no_rx = args->no_rx;
550 	internals->port_id = eth_dev->data->port_id;
551 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
552 
553 	internals->flow_type_rss_offloads =  RTE_ETH_RSS_PROTO_MASK;
554 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE;
555 
556 	rte_memcpy(internals->rss_key, default_rss_key, 40);
557 
558 	data = eth_dev->data;
559 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
560 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
561 	data->dev_link = pmd_link;
562 	data->mac_addrs = &internals->eth_addr;
563 	data->promiscuous = 1;
564 	data->all_multicast = 1;
565 	data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
566 
567 	eth_dev->dev_ops = &ops;
568 
569 	/* finally assign rx and tx ops */
570 	if (internals->packet_copy) {
571 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
572 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
573 	} else if (internals->no_rx) {
574 		eth_dev->rx_pkt_burst = eth_null_no_rx;
575 		eth_dev->tx_pkt_burst = eth_null_tx;
576 	} else {
577 		eth_dev->rx_pkt_burst = eth_null_rx;
578 		eth_dev->tx_pkt_burst = eth_null_tx;
579 	}
580 
581 	rte_eth_dev_probing_finish(eth_dev);
582 	return 0;
583 }
584 
585 static inline int
586 get_packet_size_arg(const char *key __rte_unused,
587 		const char *value, void *extra_args)
588 {
589 	const char *a = value;
590 	unsigned int *packet_size = extra_args;
591 
592 	if ((value == NULL) || (extra_args == NULL))
593 		return -EINVAL;
594 
595 	*packet_size = (unsigned int)strtoul(a, NULL, 0);
596 	if (*packet_size == UINT_MAX)
597 		return -1;
598 
599 	return 0;
600 }
601 
602 static inline int
603 get_packet_copy_arg(const char *key __rte_unused,
604 		const char *value, void *extra_args)
605 {
606 	const char *a = value;
607 	unsigned int *packet_copy = extra_args;
608 
609 	if ((value == NULL) || (extra_args == NULL))
610 		return -EINVAL;
611 
612 	*packet_copy = (unsigned int)strtoul(a, NULL, 0);
613 	if (*packet_copy == UINT_MAX)
614 		return -1;
615 
616 	return 0;
617 }
618 
619 static int
620 get_packet_no_rx_arg(const char *key __rte_unused,
621 		const char *value, void *extra_args)
622 {
623 	const char *a = value;
624 	unsigned int no_rx;
625 
626 	if (value == NULL || extra_args == NULL)
627 		return -EINVAL;
628 
629 	no_rx = (unsigned int)strtoul(a, NULL, 0);
630 	if (no_rx != 0 && no_rx != 1)
631 		return -1;
632 
633 	*(unsigned int *)extra_args = no_rx;
634 	return 0;
635 }
636 
637 static int
638 rte_pmd_null_probe(struct rte_vdev_device *dev)
639 {
640 	const char *name, *params;
641 	struct pmd_options args = {
642 		.packet_copy = default_packet_copy,
643 		.packet_size = default_packet_size,
644 		.no_rx = default_no_rx,
645 	};
646 	struct rte_kvargs *kvlist = NULL;
647 	struct rte_eth_dev *eth_dev;
648 	int ret;
649 
650 	if (!dev)
651 		return -EINVAL;
652 
653 	name = rte_vdev_device_name(dev);
654 	params = rte_vdev_device_args(dev);
655 	PMD_LOG(INFO, "Initializing pmd_null for %s", name);
656 
657 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
658 		struct pmd_internals *internals;
659 		eth_dev = rte_eth_dev_attach_secondary(name);
660 		if (!eth_dev) {
661 			PMD_LOG(ERR, "Failed to probe %s", name);
662 			return -1;
663 		}
664 		/* TODO: request info from primary to set up Rx and Tx */
665 		eth_dev->dev_ops = &ops;
666 		eth_dev->device = &dev->device;
667 		internals = eth_dev->data->dev_private;
668 		if (internals->packet_copy) {
669 			eth_dev->rx_pkt_burst = eth_null_copy_rx;
670 			eth_dev->tx_pkt_burst = eth_null_copy_tx;
671 		} else if (internals->no_rx) {
672 			eth_dev->rx_pkt_burst = eth_null_no_rx;
673 			eth_dev->tx_pkt_burst = eth_null_tx;
674 		} else {
675 			eth_dev->rx_pkt_burst = eth_null_rx;
676 			eth_dev->tx_pkt_burst = eth_null_tx;
677 		}
678 		rte_eth_dev_probing_finish(eth_dev);
679 		return 0;
680 	}
681 
682 	if (params != NULL) {
683 		kvlist = rte_kvargs_parse(params, valid_arguments);
684 		if (kvlist == NULL)
685 			return -1;
686 
687 		ret = rte_kvargs_process(kvlist,
688 				ETH_NULL_PACKET_SIZE_ARG,
689 				&get_packet_size_arg, &args.packet_size);
690 		if (ret < 0)
691 			goto free_kvlist;
692 
693 
694 		ret = rte_kvargs_process(kvlist,
695 				ETH_NULL_PACKET_COPY_ARG,
696 				&get_packet_copy_arg, &args.packet_copy);
697 		if (ret < 0)
698 			goto free_kvlist;
699 
700 		ret = rte_kvargs_process(kvlist,
701 				ETH_NULL_PACKET_NO_RX_ARG,
702 				&get_packet_no_rx_arg, &args.no_rx);
703 		if (ret < 0)
704 			goto free_kvlist;
705 
706 		if (args.no_rx && args.packet_copy) {
707 			PMD_LOG(ERR,
708 				"Both %s and %s arguments at the same time not supported",
709 				ETH_NULL_PACKET_COPY_ARG,
710 				ETH_NULL_PACKET_NO_RX_ARG);
711 			goto free_kvlist;
712 		}
713 	}
714 
715 	PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
716 			"packet copy is %s", args.packet_size,
717 			args.packet_copy ? "enabled" : "disabled");
718 
719 	ret = eth_dev_null_create(dev, &args);
720 
721 free_kvlist:
722 	rte_kvargs_free(kvlist);
723 	return ret;
724 }
725 
726 static int
727 rte_pmd_null_remove(struct rte_vdev_device *dev)
728 {
729 	struct rte_eth_dev *eth_dev = NULL;
730 
731 	if (!dev)
732 		return -EINVAL;
733 
734 	/* find the ethdev entry */
735 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
736 	if (eth_dev == NULL)
737 		return 0; /* port already released */
738 
739 	eth_dev_close(eth_dev);
740 	rte_eth_dev_release_port(eth_dev);
741 
742 	return 0;
743 }
744 
745 static struct rte_vdev_driver pmd_null_drv = {
746 	.probe = rte_pmd_null_probe,
747 	.remove = rte_pmd_null_remove,
748 };
749 
750 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
751 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
752 RTE_PMD_REGISTER_PARAM_STRING(net_null,
753 	"size=<int> "
754 	"copy=<int> "
755 	ETH_NULL_PACKET_NO_RX_ARG "=0|1");
756