xref: /dpdk/drivers/net/null/rte_eth_null.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) IGEL Co.,Ltd.
3  *  All rights reserved.
4  */
5 
6 #include <rte_mbuf.h>
7 #include <ethdev_driver.h>
8 #include <ethdev_vdev.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_spinlock.h>
14 
15 #define ETH_NULL_PACKET_SIZE_ARG	"size"
16 #define ETH_NULL_PACKET_COPY_ARG	"copy"
17 #define ETH_NULL_PACKET_NO_RX_ARG	"no-rx"
18 
19 static unsigned int default_packet_size = 64;
20 static unsigned int default_packet_copy;
21 static unsigned int default_no_rx;
22 
23 static const char *valid_arguments[] = {
24 	ETH_NULL_PACKET_SIZE_ARG,
25 	ETH_NULL_PACKET_COPY_ARG,
26 	ETH_NULL_PACKET_NO_RX_ARG,
27 	NULL
28 };
29 
30 struct pmd_internals;
31 
32 struct null_queue {
33 	struct pmd_internals *internals;
34 
35 	struct rte_mempool *mb_pool;
36 	struct rte_mbuf *dummy_packet;
37 
38 	rte_atomic64_t rx_pkts;
39 	rte_atomic64_t tx_pkts;
40 };
41 
42 struct pmd_options {
43 	unsigned int packet_copy;
44 	unsigned int packet_size;
45 	unsigned int no_rx;
46 };
47 
48 struct pmd_internals {
49 	unsigned int packet_size;
50 	unsigned int packet_copy;
51 	unsigned int no_rx;
52 	uint16_t port_id;
53 
54 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
55 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
56 
57 	struct rte_ether_addr eth_addr;
58 	/** Bit mask of RSS offloads, the bit offset also means flow type */
59 	uint64_t flow_type_rss_offloads;
60 
61 	rte_spinlock_t rss_lock;
62 
63 	uint16_t reta_size;
64 	struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_128 /
65 			RTE_ETH_RETA_GROUP_SIZE];
66 
67 	uint8_t rss_key[40];                /**< 40-byte hash key. */
68 };
69 static struct rte_eth_link pmd_link = {
70 	.link_speed = RTE_ETH_SPEED_NUM_10G,
71 	.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
72 	.link_status = RTE_ETH_LINK_DOWN,
73 	.link_autoneg = RTE_ETH_LINK_FIXED,
74 };
75 
76 RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
77 
78 #define PMD_LOG(level, fmt, args...) \
79 	rte_log(RTE_LOG_ ## level, eth_null_logtype, \
80 		"%s(): " fmt "\n", __func__, ##args)
81 
82 static uint16_t
83 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
84 {
85 	int i;
86 	struct null_queue *h = q;
87 	unsigned int packet_size;
88 
89 	if ((q == NULL) || (bufs == NULL))
90 		return 0;
91 
92 	packet_size = h->internals->packet_size;
93 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
94 		return 0;
95 
96 	for (i = 0; i < nb_bufs; i++) {
97 		bufs[i]->data_len = (uint16_t)packet_size;
98 		bufs[i]->pkt_len = packet_size;
99 		bufs[i]->port = h->internals->port_id;
100 	}
101 
102 	rte_atomic64_add(&(h->rx_pkts), i);
103 
104 	return i;
105 }
106 
107 static uint16_t
108 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
109 {
110 	int i;
111 	struct null_queue *h = q;
112 	unsigned int packet_size;
113 
114 	if ((q == NULL) || (bufs == NULL))
115 		return 0;
116 
117 	packet_size = h->internals->packet_size;
118 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
119 		return 0;
120 
121 	for (i = 0; i < nb_bufs; i++) {
122 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
123 					packet_size);
124 		bufs[i]->data_len = (uint16_t)packet_size;
125 		bufs[i]->pkt_len = packet_size;
126 		bufs[i]->port = h->internals->port_id;
127 	}
128 
129 	rte_atomic64_add(&(h->rx_pkts), i);
130 
131 	return i;
132 }
133 
134 static uint16_t
135 eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
136 		uint16_t nb_bufs __rte_unused)
137 {
138 	return 0;
139 }
140 
141 static uint16_t
142 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143 {
144 	int i;
145 	struct null_queue *h = q;
146 
147 	if ((q == NULL) || (bufs == NULL))
148 		return 0;
149 
150 	for (i = 0; i < nb_bufs; i++)
151 		rte_pktmbuf_free(bufs[i]);
152 
153 	rte_atomic64_add(&(h->tx_pkts), i);
154 
155 	return i;
156 }
157 
158 static uint16_t
159 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
160 {
161 	int i;
162 	struct null_queue *h = q;
163 	unsigned int packet_size;
164 
165 	if ((q == NULL) || (bufs == NULL))
166 		return 0;
167 
168 	packet_size = h->internals->packet_size;
169 	for (i = 0; i < nb_bufs; i++) {
170 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
171 					packet_size);
172 		rte_pktmbuf_free(bufs[i]);
173 	}
174 
175 	rte_atomic64_add(&(h->tx_pkts), i);
176 
177 	return i;
178 }
179 
180 static int
181 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
182 {
183 	return 0;
184 }
185 
186 static int
187 eth_dev_start(struct rte_eth_dev *dev)
188 {
189 	if (dev == NULL)
190 		return -EINVAL;
191 
192 	dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
193 	return 0;
194 }
195 
196 static int
197 eth_dev_stop(struct rte_eth_dev *dev)
198 {
199 	if (dev == NULL)
200 		return 0;
201 
202 	dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
203 
204 	return 0;
205 }
206 
207 static int
208 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
209 		uint16_t nb_rx_desc __rte_unused,
210 		unsigned int socket_id __rte_unused,
211 		const struct rte_eth_rxconf *rx_conf __rte_unused,
212 		struct rte_mempool *mb_pool)
213 {
214 	struct rte_mbuf *dummy_packet;
215 	struct pmd_internals *internals;
216 	unsigned int packet_size;
217 
218 	if ((dev == NULL) || (mb_pool == NULL))
219 		return -EINVAL;
220 
221 	internals = dev->data->dev_private;
222 
223 	if (rx_queue_id >= dev->data->nb_rx_queues)
224 		return -ENODEV;
225 
226 	packet_size = internals->packet_size;
227 
228 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
229 	dev->data->rx_queues[rx_queue_id] =
230 		&internals->rx_null_queues[rx_queue_id];
231 	dummy_packet = rte_zmalloc_socket(NULL,
232 			packet_size, 0, dev->data->numa_node);
233 	if (dummy_packet == NULL)
234 		return -ENOMEM;
235 
236 	internals->rx_null_queues[rx_queue_id].internals = internals;
237 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
238 
239 	return 0;
240 }
241 
242 static int
243 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
244 		uint16_t nb_tx_desc __rte_unused,
245 		unsigned int socket_id __rte_unused,
246 		const struct rte_eth_txconf *tx_conf __rte_unused)
247 {
248 	struct rte_mbuf *dummy_packet;
249 	struct pmd_internals *internals;
250 	unsigned int packet_size;
251 
252 	if (dev == NULL)
253 		return -EINVAL;
254 
255 	internals = dev->data->dev_private;
256 
257 	if (tx_queue_id >= dev->data->nb_tx_queues)
258 		return -ENODEV;
259 
260 	packet_size = internals->packet_size;
261 
262 	dev->data->tx_queues[tx_queue_id] =
263 		&internals->tx_null_queues[tx_queue_id];
264 	dummy_packet = rte_zmalloc_socket(NULL,
265 			packet_size, 0, dev->data->numa_node);
266 	if (dummy_packet == NULL)
267 		return -ENOMEM;
268 
269 	internals->tx_null_queues[tx_queue_id].internals = internals;
270 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
271 
272 	return 0;
273 }
274 
275 static int
276 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
277 {
278 	return 0;
279 }
280 
281 static int
282 eth_dev_info(struct rte_eth_dev *dev,
283 		struct rte_eth_dev_info *dev_info)
284 {
285 	struct pmd_internals *internals;
286 
287 	if ((dev == NULL) || (dev_info == NULL))
288 		return -EINVAL;
289 
290 	internals = dev->data->dev_private;
291 	dev_info->max_mac_addrs = 1;
292 	dev_info->max_rx_pktlen = (uint32_t)-1;
293 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
294 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
295 	dev_info->min_rx_bufsize = 0;
296 	dev_info->reta_size = internals->reta_size;
297 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
298 
299 	return 0;
300 }
301 
302 static int
303 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
304 {
305 	unsigned int i, num_stats;
306 	unsigned long rx_total = 0, tx_total = 0;
307 	const struct pmd_internals *internal;
308 
309 	if ((dev == NULL) || (igb_stats == NULL))
310 		return -EINVAL;
311 
312 	internal = dev->data->dev_private;
313 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
314 			RTE_MIN(dev->data->nb_rx_queues,
315 				RTE_DIM(internal->rx_null_queues)));
316 	for (i = 0; i < num_stats; i++) {
317 		igb_stats->q_ipackets[i] =
318 			internal->rx_null_queues[i].rx_pkts.cnt;
319 		rx_total += igb_stats->q_ipackets[i];
320 	}
321 
322 	num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
323 			RTE_MIN(dev->data->nb_tx_queues,
324 				RTE_DIM(internal->tx_null_queues)));
325 	for (i = 0; i < num_stats; i++) {
326 		igb_stats->q_opackets[i] =
327 			internal->tx_null_queues[i].tx_pkts.cnt;
328 		tx_total += igb_stats->q_opackets[i];
329 	}
330 
331 	igb_stats->ipackets = rx_total;
332 	igb_stats->opackets = tx_total;
333 
334 	return 0;
335 }
336 
337 static int
338 eth_stats_reset(struct rte_eth_dev *dev)
339 {
340 	unsigned int i;
341 	struct pmd_internals *internal;
342 
343 	if (dev == NULL)
344 		return -EINVAL;
345 
346 	internal = dev->data->dev_private;
347 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
348 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
349 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
350 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
351 
352 	return 0;
353 }
354 
355 static void
356 eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
357 {
358 	struct null_queue *nq = dev->data->rx_queues[qid];
359 
360 	if (nq == NULL)
361 		return;
362 
363 	rte_free(nq->dummy_packet);
364 }
365 
366 static void
367 eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
368 {
369 	struct null_queue *nq = dev->data->tx_queues[qid];
370 
371 	if (nq == NULL)
372 		return;
373 
374 	rte_free(nq->dummy_packet);
375 }
376 
377 static int
378 eth_link_update(struct rte_eth_dev *dev __rte_unused,
379 		int wait_to_complete __rte_unused) { return 0; }
380 
381 static int
382 eth_rss_reta_update(struct rte_eth_dev *dev,
383 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
384 {
385 	int i, j;
386 	struct pmd_internals *internal = dev->data->dev_private;
387 
388 	if (reta_size != internal->reta_size)
389 		return -EINVAL;
390 
391 	rte_spinlock_lock(&internal->rss_lock);
392 
393 	/* Copy RETA table */
394 	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
395 		internal->reta_conf[i].mask = reta_conf[i].mask;
396 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
397 			if ((reta_conf[i].mask >> j) & 0x01)
398 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
399 	}
400 
401 	rte_spinlock_unlock(&internal->rss_lock);
402 
403 	return 0;
404 }
405 
406 static int
407 eth_rss_reta_query(struct rte_eth_dev *dev,
408 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
409 {
410 	int i, j;
411 	struct pmd_internals *internal = dev->data->dev_private;
412 
413 	if (reta_size != internal->reta_size)
414 		return -EINVAL;
415 
416 	rte_spinlock_lock(&internal->rss_lock);
417 
418 	/* Copy RETA table */
419 	for (i = 0; i < (internal->reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {
420 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
421 			if ((reta_conf[i].mask >> j) & 0x01)
422 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
423 	}
424 
425 	rte_spinlock_unlock(&internal->rss_lock);
426 
427 	return 0;
428 }
429 
430 static int
431 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
432 {
433 	struct pmd_internals *internal = dev->data->dev_private;
434 
435 	rte_spinlock_lock(&internal->rss_lock);
436 
437 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
438 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
439 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
440 
441 	if (rss_conf->rss_key)
442 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
443 
444 	rte_spinlock_unlock(&internal->rss_lock);
445 
446 	return 0;
447 }
448 
449 static int
450 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
451 		struct rte_eth_rss_conf *rss_conf)
452 {
453 	struct pmd_internals *internal = dev->data->dev_private;
454 
455 	rte_spinlock_lock(&internal->rss_lock);
456 
457 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
458 	if (rss_conf->rss_key)
459 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
460 
461 	rte_spinlock_unlock(&internal->rss_lock);
462 
463 	return 0;
464 }
465 
466 static int
467 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
468 		    __rte_unused struct rte_ether_addr *addr)
469 {
470 	return 0;
471 }
472 
473 static int
474 eth_dev_close(struct rte_eth_dev *dev)
475 {
476 	PMD_LOG(INFO, "Closing null ethdev on NUMA socket %u",
477 			rte_socket_id());
478 
479 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
480 		return 0;
481 
482 	/* mac_addrs must not be freed alone because part of dev_private */
483 	dev->data->mac_addrs = NULL;
484 
485 	return 0;
486 }
487 
488 static const struct eth_dev_ops ops = {
489 	.dev_close = eth_dev_close,
490 	.dev_start = eth_dev_start,
491 	.dev_stop = eth_dev_stop,
492 	.dev_configure = eth_dev_configure,
493 	.dev_infos_get = eth_dev_info,
494 	.rx_queue_setup = eth_rx_queue_setup,
495 	.tx_queue_setup = eth_tx_queue_setup,
496 	.rx_queue_release = eth_rx_queue_release,
497 	.tx_queue_release = eth_tx_queue_release,
498 	.mtu_set = eth_mtu_set,
499 	.link_update = eth_link_update,
500 	.mac_addr_set = eth_mac_address_set,
501 	.stats_get = eth_stats_get,
502 	.stats_reset = eth_stats_reset,
503 	.reta_update = eth_rss_reta_update,
504 	.reta_query = eth_rss_reta_query,
505 	.rss_hash_update = eth_rss_hash_update,
506 	.rss_hash_conf_get = eth_rss_hash_conf_get
507 };
508 
509 static int
510 eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
511 {
512 	const unsigned int nb_rx_queues = 1;
513 	const unsigned int nb_tx_queues = 1;
514 	struct rte_eth_dev_data *data;
515 	struct pmd_internals *internals = NULL;
516 	struct rte_eth_dev *eth_dev = NULL;
517 
518 	static const uint8_t default_rss_key[40] = {
519 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
520 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
521 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
522 		0xBE, 0xAC, 0x01, 0xFA
523 	};
524 
525 	if (dev->device.numa_node == SOCKET_ID_ANY)
526 		dev->device.numa_node = rte_socket_id();
527 
528 	PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
529 		dev->device.numa_node);
530 
531 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
532 	if (!eth_dev)
533 		return -ENOMEM;
534 
535 	/* now put it all together
536 	 * - store queue data in internals,
537 	 * - store numa_node info in ethdev data
538 	 * - point eth_dev_data to internals
539 	 * - and point eth_dev structure to new eth_dev_data structure
540 	 */
541 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
542 	 * so the nulls are local per-process */
543 
544 	internals = eth_dev->data->dev_private;
545 	internals->packet_size = args->packet_size;
546 	internals->packet_copy = args->packet_copy;
547 	internals->no_rx = args->no_rx;
548 	internals->port_id = eth_dev->data->port_id;
549 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
550 
551 	internals->flow_type_rss_offloads =  RTE_ETH_RSS_PROTO_MASK;
552 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE;
553 
554 	rte_memcpy(internals->rss_key, default_rss_key, 40);
555 
556 	data = eth_dev->data;
557 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
558 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
559 	data->dev_link = pmd_link;
560 	data->mac_addrs = &internals->eth_addr;
561 	data->promiscuous = 1;
562 	data->all_multicast = 1;
563 	data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
564 
565 	eth_dev->dev_ops = &ops;
566 
567 	/* finally assign rx and tx ops */
568 	if (internals->packet_copy) {
569 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
570 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
571 	} else if (internals->no_rx) {
572 		eth_dev->rx_pkt_burst = eth_null_no_rx;
573 		eth_dev->tx_pkt_burst = eth_null_tx;
574 	} else {
575 		eth_dev->rx_pkt_burst = eth_null_rx;
576 		eth_dev->tx_pkt_burst = eth_null_tx;
577 	}
578 
579 	rte_eth_dev_probing_finish(eth_dev);
580 	return 0;
581 }
582 
583 static inline int
584 get_packet_size_arg(const char *key __rte_unused,
585 		const char *value, void *extra_args)
586 {
587 	const char *a = value;
588 	unsigned int *packet_size = extra_args;
589 
590 	if ((value == NULL) || (extra_args == NULL))
591 		return -EINVAL;
592 
593 	*packet_size = (unsigned int)strtoul(a, NULL, 0);
594 	if (*packet_size == UINT_MAX)
595 		return -1;
596 
597 	return 0;
598 }
599 
600 static inline int
601 get_packet_copy_arg(const char *key __rte_unused,
602 		const char *value, void *extra_args)
603 {
604 	const char *a = value;
605 	unsigned int *packet_copy = extra_args;
606 
607 	if ((value == NULL) || (extra_args == NULL))
608 		return -EINVAL;
609 
610 	*packet_copy = (unsigned int)strtoul(a, NULL, 0);
611 	if (*packet_copy == UINT_MAX)
612 		return -1;
613 
614 	return 0;
615 }
616 
617 static int
618 get_packet_no_rx_arg(const char *key __rte_unused,
619 		const char *value, void *extra_args)
620 {
621 	const char *a = value;
622 	unsigned int no_rx;
623 
624 	if (value == NULL || extra_args == NULL)
625 		return -EINVAL;
626 
627 	no_rx = (unsigned int)strtoul(a, NULL, 0);
628 	if (no_rx != 0 && no_rx != 1)
629 		return -1;
630 
631 	*(unsigned int *)extra_args = no_rx;
632 	return 0;
633 }
634 
635 static int
636 rte_pmd_null_probe(struct rte_vdev_device *dev)
637 {
638 	const char *name, *params;
639 	struct pmd_options args = {
640 		.packet_copy = default_packet_copy,
641 		.packet_size = default_packet_size,
642 		.no_rx = default_no_rx,
643 	};
644 	struct rte_kvargs *kvlist = NULL;
645 	struct rte_eth_dev *eth_dev;
646 	int ret;
647 
648 	if (!dev)
649 		return -EINVAL;
650 
651 	name = rte_vdev_device_name(dev);
652 	params = rte_vdev_device_args(dev);
653 	PMD_LOG(INFO, "Initializing pmd_null for %s", name);
654 
655 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
656 		struct pmd_internals *internals;
657 		eth_dev = rte_eth_dev_attach_secondary(name);
658 		if (!eth_dev) {
659 			PMD_LOG(ERR, "Failed to probe %s", name);
660 			return -1;
661 		}
662 		/* TODO: request info from primary to set up Rx and Tx */
663 		eth_dev->dev_ops = &ops;
664 		eth_dev->device = &dev->device;
665 		internals = eth_dev->data->dev_private;
666 		if (internals->packet_copy) {
667 			eth_dev->rx_pkt_burst = eth_null_copy_rx;
668 			eth_dev->tx_pkt_burst = eth_null_copy_tx;
669 		} else if (internals->no_rx) {
670 			eth_dev->rx_pkt_burst = eth_null_no_rx;
671 			eth_dev->tx_pkt_burst = eth_null_tx;
672 		} else {
673 			eth_dev->rx_pkt_burst = eth_null_rx;
674 			eth_dev->tx_pkt_burst = eth_null_tx;
675 		}
676 		rte_eth_dev_probing_finish(eth_dev);
677 		return 0;
678 	}
679 
680 	if (params != NULL) {
681 		kvlist = rte_kvargs_parse(params, valid_arguments);
682 		if (kvlist == NULL)
683 			return -1;
684 
685 		ret = rte_kvargs_process(kvlist,
686 				ETH_NULL_PACKET_SIZE_ARG,
687 				&get_packet_size_arg, &args.packet_size);
688 		if (ret < 0)
689 			goto free_kvlist;
690 
691 
692 		ret = rte_kvargs_process(kvlist,
693 				ETH_NULL_PACKET_COPY_ARG,
694 				&get_packet_copy_arg, &args.packet_copy);
695 		if (ret < 0)
696 			goto free_kvlist;
697 
698 		ret = rte_kvargs_process(kvlist,
699 				ETH_NULL_PACKET_NO_RX_ARG,
700 				&get_packet_no_rx_arg, &args.no_rx);
701 		if (ret < 0)
702 			goto free_kvlist;
703 
704 		if (args.no_rx && args.packet_copy) {
705 			PMD_LOG(ERR,
706 				"Both %s and %s arguments at the same time not supported",
707 				ETH_NULL_PACKET_COPY_ARG,
708 				ETH_NULL_PACKET_NO_RX_ARG);
709 			goto free_kvlist;
710 		}
711 	}
712 
713 	PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
714 			"packet copy is %s", args.packet_size,
715 			args.packet_copy ? "enabled" : "disabled");
716 
717 	ret = eth_dev_null_create(dev, &args);
718 
719 free_kvlist:
720 	if (kvlist)
721 		rte_kvargs_free(kvlist);
722 	return ret;
723 }
724 
725 static int
726 rte_pmd_null_remove(struct rte_vdev_device *dev)
727 {
728 	struct rte_eth_dev *eth_dev = NULL;
729 
730 	if (!dev)
731 		return -EINVAL;
732 
733 	/* find the ethdev entry */
734 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
735 	if (eth_dev == NULL)
736 		return 0; /* port already released */
737 
738 	eth_dev_close(eth_dev);
739 	rte_eth_dev_release_port(eth_dev);
740 
741 	return 0;
742 }
743 
744 static struct rte_vdev_driver pmd_null_drv = {
745 	.probe = rte_pmd_null_probe,
746 	.remove = rte_pmd_null_remove,
747 };
748 
749 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
750 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
751 RTE_PMD_REGISTER_PARAM_STRING(net_null,
752 	"size=<int> "
753 	"copy=<int> "
754 	ETH_NULL_PACKET_NO_RX_ARG "=0|1");
755