xref: /dpdk/drivers/net/null/rte_eth_null.c (revision 050fe6e9ff970ff92d842912136be8f9f52e171f)
1b3b413f7SBruce Richardson /*-
2b3b413f7SBruce Richardson  *   BSD LICENSE
3b3b413f7SBruce Richardson  *
4b3b413f7SBruce Richardson  *   Copyright (C) IGEL Co.,Ltd.
5b3b413f7SBruce Richardson  *   All rights reserved.
6b3b413f7SBruce Richardson  *
7b3b413f7SBruce Richardson  *   Redistribution and use in source and binary forms, with or without
8b3b413f7SBruce Richardson  *   modification, are permitted provided that the following conditions
9b3b413f7SBruce Richardson  *   are met:
10b3b413f7SBruce Richardson  *
11b3b413f7SBruce Richardson  *     * Redistributions of source code must retain the above copyright
12b3b413f7SBruce Richardson  *       notice, this list of conditions and the following disclaimer.
13b3b413f7SBruce Richardson  *     * Redistributions in binary form must reproduce the above copyright
14b3b413f7SBruce Richardson  *       notice, this list of conditions and the following disclaimer in
15b3b413f7SBruce Richardson  *       the documentation and/or other materials provided with the
16b3b413f7SBruce Richardson  *       distribution.
17b3b413f7SBruce Richardson  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18b3b413f7SBruce Richardson  *       contributors may be used to endorse or promote products derived
19b3b413f7SBruce Richardson  *       from this software without specific prior written permission.
20b3b413f7SBruce Richardson  *
21b3b413f7SBruce Richardson  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22b3b413f7SBruce Richardson  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23b3b413f7SBruce Richardson  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24b3b413f7SBruce Richardson  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25b3b413f7SBruce Richardson  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26b3b413f7SBruce Richardson  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27b3b413f7SBruce Richardson  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28b3b413f7SBruce Richardson  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29b3b413f7SBruce Richardson  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30b3b413f7SBruce Richardson  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31b3b413f7SBruce Richardson  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32b3b413f7SBruce Richardson  */
33b3b413f7SBruce Richardson 
34b3b413f7SBruce Richardson #include <rte_mbuf.h>
35b3b413f7SBruce Richardson #include <rte_ethdev.h>
36*050fe6e9SJan Blunck #include <rte_ethdev_vdev.h>
37b3b413f7SBruce Richardson #include <rte_malloc.h>
38b3b413f7SBruce Richardson #include <rte_memcpy.h>
39fe363dd4SJan Viktorin #include <rte_vdev.h>
40b3b413f7SBruce Richardson #include <rte_kvargs.h>
411ccec0a8STomasz Kulasek #include <rte_spinlock.h>
42b3b413f7SBruce Richardson 
43b3b413f7SBruce Richardson #define ETH_NULL_PACKET_SIZE_ARG	"size"
44b3b413f7SBruce Richardson #define ETH_NULL_PACKET_COPY_ARG	"copy"
45b3b413f7SBruce Richardson 
46b3b413f7SBruce Richardson static unsigned default_packet_size = 64;
47b3b413f7SBruce Richardson static unsigned default_packet_copy;
48b3b413f7SBruce Richardson 
49b3b413f7SBruce Richardson static const char *valid_arguments[] = {
50b3b413f7SBruce Richardson 	ETH_NULL_PACKET_SIZE_ARG,
51b3b413f7SBruce Richardson 	ETH_NULL_PACKET_COPY_ARG,
5287c3bf29SJan Blunck 	"driver",
53b3b413f7SBruce Richardson 	NULL
54b3b413f7SBruce Richardson };
55b3b413f7SBruce Richardson 
56b3b413f7SBruce Richardson struct pmd_internals;
57b3b413f7SBruce Richardson 
58b3b413f7SBruce Richardson struct null_queue {
59b3b413f7SBruce Richardson 	struct pmd_internals *internals;
60b3b413f7SBruce Richardson 
61b3b413f7SBruce Richardson 	struct rte_mempool *mb_pool;
62b3b413f7SBruce Richardson 	struct rte_mbuf *dummy_packet;
63b3b413f7SBruce Richardson 
64b3b413f7SBruce Richardson 	rte_atomic64_t rx_pkts;
65b3b413f7SBruce Richardson 	rte_atomic64_t tx_pkts;
66b3b413f7SBruce Richardson 	rte_atomic64_t err_pkts;
67b3b413f7SBruce Richardson };
68b3b413f7SBruce Richardson 
69b3b413f7SBruce Richardson struct pmd_internals {
70b3b413f7SBruce Richardson 	unsigned packet_size;
71b3b413f7SBruce Richardson 	unsigned packet_copy;
725cf86418SSean Harte 	uint8_t port_id;
73b3b413f7SBruce Richardson 
74dd7c54a6STomasz Kulasek 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75dd7c54a6STomasz Kulasek 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
761ccec0a8STomasz Kulasek 
771ccec0a8STomasz Kulasek 	/** Bit mask of RSS offloads, the bit offset also means flow type */
781ccec0a8STomasz Kulasek 	uint64_t flow_type_rss_offloads;
791ccec0a8STomasz Kulasek 
801ccec0a8STomasz Kulasek 	rte_spinlock_t rss_lock;
811ccec0a8STomasz Kulasek 
821ccec0a8STomasz Kulasek 	uint16_t reta_size;
831ccec0a8STomasz Kulasek 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
841ccec0a8STomasz Kulasek 			RTE_RETA_GROUP_SIZE];
851ccec0a8STomasz Kulasek 
861ccec0a8STomasz Kulasek 	uint8_t rss_key[40];                /**< 40-byte hash key. */
87b3b413f7SBruce Richardson };
88b3b413f7SBruce Richardson 
89b3b413f7SBruce Richardson 
90b3b413f7SBruce Richardson static struct ether_addr eth_addr = { .addr_bytes = {0} };
91b3b413f7SBruce Richardson static struct rte_eth_link pmd_link = {
9239fd068aSMarc Sune 	.link_speed = ETH_SPEED_NUM_10G,
93b3b413f7SBruce Richardson 	.link_duplex = ETH_LINK_FULL_DUPLEX,
9409419f23SThomas Monjalon 	.link_status = ETH_LINK_DOWN,
9582113036SMarc Sune 	.link_autoneg = ETH_LINK_SPEED_AUTONEG,
96b3b413f7SBruce Richardson };
97b3b413f7SBruce Richardson 
98b3b413f7SBruce Richardson static uint16_t
99b3b413f7SBruce Richardson eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
100b3b413f7SBruce Richardson {
101b3b413f7SBruce Richardson 	int i;
102b3b413f7SBruce Richardson 	struct null_queue *h = q;
103b3b413f7SBruce Richardson 	unsigned packet_size;
104b3b413f7SBruce Richardson 
105b3b413f7SBruce Richardson 	if ((q == NULL) || (bufs == NULL))
106b3b413f7SBruce Richardson 		return 0;
107b3b413f7SBruce Richardson 
108b3b413f7SBruce Richardson 	packet_size = h->internals->packet_size;
109b3b413f7SBruce Richardson 	for (i = 0; i < nb_bufs; i++) {
110b3b413f7SBruce Richardson 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
111b3b413f7SBruce Richardson 		if (!bufs[i])
112b3b413f7SBruce Richardson 			break;
113b3b413f7SBruce Richardson 		bufs[i]->data_len = (uint16_t)packet_size;
114b3b413f7SBruce Richardson 		bufs[i]->pkt_len = packet_size;
1155cf86418SSean Harte 		bufs[i]->port = h->internals->port_id;
116b3b413f7SBruce Richardson 	}
117b3b413f7SBruce Richardson 
118b3b413f7SBruce Richardson 	rte_atomic64_add(&(h->rx_pkts), i);
119b3b413f7SBruce Richardson 
120b3b413f7SBruce Richardson 	return i;
121b3b413f7SBruce Richardson }
122b3b413f7SBruce Richardson 
123b3b413f7SBruce Richardson static uint16_t
124b3b413f7SBruce Richardson eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
125b3b413f7SBruce Richardson {
126b3b413f7SBruce Richardson 	int i;
127b3b413f7SBruce Richardson 	struct null_queue *h = q;
128b3b413f7SBruce Richardson 	unsigned packet_size;
129b3b413f7SBruce Richardson 
130b3b413f7SBruce Richardson 	if ((q == NULL) || (bufs == NULL))
131b3b413f7SBruce Richardson 		return 0;
132b3b413f7SBruce Richardson 
133b3b413f7SBruce Richardson 	packet_size = h->internals->packet_size;
134b3b413f7SBruce Richardson 	for (i = 0; i < nb_bufs; i++) {
135b3b413f7SBruce Richardson 		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
136b3b413f7SBruce Richardson 		if (!bufs[i])
137b3b413f7SBruce Richardson 			break;
138b3b413f7SBruce Richardson 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
139b3b413f7SBruce Richardson 					packet_size);
140b3b413f7SBruce Richardson 		bufs[i]->data_len = (uint16_t)packet_size;
141b3b413f7SBruce Richardson 		bufs[i]->pkt_len = packet_size;
142b3b413f7SBruce Richardson 		bufs[i]->nb_segs = 1;
143b3b413f7SBruce Richardson 		bufs[i]->next = NULL;
1445cf86418SSean Harte 		bufs[i]->port = h->internals->port_id;
145b3b413f7SBruce Richardson 	}
146b3b413f7SBruce Richardson 
147b3b413f7SBruce Richardson 	rte_atomic64_add(&(h->rx_pkts), i);
148b3b413f7SBruce Richardson 
149b3b413f7SBruce Richardson 	return i;
150b3b413f7SBruce Richardson }
151b3b413f7SBruce Richardson 
152b3b413f7SBruce Richardson static uint16_t
153b3b413f7SBruce Richardson eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
154b3b413f7SBruce Richardson {
155b3b413f7SBruce Richardson 	int i;
156b3b413f7SBruce Richardson 	struct null_queue *h = q;
157b3b413f7SBruce Richardson 
158b3b413f7SBruce Richardson 	if ((q == NULL) || (bufs == NULL))
159b3b413f7SBruce Richardson 		return 0;
160b3b413f7SBruce Richardson 
161b3b413f7SBruce Richardson 	for (i = 0; i < nb_bufs; i++)
162b3b413f7SBruce Richardson 		rte_pktmbuf_free(bufs[i]);
163b3b413f7SBruce Richardson 
164b3b413f7SBruce Richardson 	rte_atomic64_add(&(h->tx_pkts), i);
165b3b413f7SBruce Richardson 
166b3b413f7SBruce Richardson 	return i;
167b3b413f7SBruce Richardson }
168b3b413f7SBruce Richardson 
169b3b413f7SBruce Richardson static uint16_t
170b3b413f7SBruce Richardson eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
171b3b413f7SBruce Richardson {
172b3b413f7SBruce Richardson 	int i;
173b3b413f7SBruce Richardson 	struct null_queue *h = q;
174b3b413f7SBruce Richardson 	unsigned packet_size;
175b3b413f7SBruce Richardson 
176b3b413f7SBruce Richardson 	if ((q == NULL) || (bufs == NULL))
177b3b413f7SBruce Richardson 		return 0;
178b3b413f7SBruce Richardson 
179b3b413f7SBruce Richardson 	packet_size = h->internals->packet_size;
180b3b413f7SBruce Richardson 	for (i = 0; i < nb_bufs; i++) {
181b3b413f7SBruce Richardson 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
182b3b413f7SBruce Richardson 					packet_size);
183b3b413f7SBruce Richardson 		rte_pktmbuf_free(bufs[i]);
184b3b413f7SBruce Richardson 	}
185b3b413f7SBruce Richardson 
186b3b413f7SBruce Richardson 	rte_atomic64_add(&(h->tx_pkts), i);
187b3b413f7SBruce Richardson 
188b3b413f7SBruce Richardson 	return i;
189b3b413f7SBruce Richardson }
190b3b413f7SBruce Richardson 
191b3b413f7SBruce Richardson static int
192c9634e44SFerruh Yigit eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
193c9634e44SFerruh Yigit {
194dd7c54a6STomasz Kulasek 	return 0;
195dd7c54a6STomasz Kulasek }
196b3b413f7SBruce Richardson 
197b3b413f7SBruce Richardson static int
198b3b413f7SBruce Richardson eth_dev_start(struct rte_eth_dev *dev)
199b3b413f7SBruce Richardson {
200b3b413f7SBruce Richardson 	if (dev == NULL)
201b3b413f7SBruce Richardson 		return -EINVAL;
202b3b413f7SBruce Richardson 
20309419f23SThomas Monjalon 	dev->data->dev_link.link_status = ETH_LINK_UP;
204b3b413f7SBruce Richardson 	return 0;
205b3b413f7SBruce Richardson }
206b3b413f7SBruce Richardson 
207b3b413f7SBruce Richardson static void
208b3b413f7SBruce Richardson eth_dev_stop(struct rte_eth_dev *dev)
209b3b413f7SBruce Richardson {
210b3b413f7SBruce Richardson 	if (dev == NULL)
211b3b413f7SBruce Richardson 		return;
212b3b413f7SBruce Richardson 
21309419f23SThomas Monjalon 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
214b3b413f7SBruce Richardson }
215b3b413f7SBruce Richardson 
216b3b413f7SBruce Richardson static int
217b3b413f7SBruce Richardson eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
218b3b413f7SBruce Richardson 		uint16_t nb_rx_desc __rte_unused,
219b3b413f7SBruce Richardson 		unsigned int socket_id __rte_unused,
220b3b413f7SBruce Richardson 		const struct rte_eth_rxconf *rx_conf __rte_unused,
221b3b413f7SBruce Richardson 		struct rte_mempool *mb_pool)
222b3b413f7SBruce Richardson {
223b3b413f7SBruce Richardson 	struct rte_mbuf *dummy_packet;
224b3b413f7SBruce Richardson 	struct pmd_internals *internals;
225b3b413f7SBruce Richardson 	unsigned packet_size;
226b3b413f7SBruce Richardson 
227b3b413f7SBruce Richardson 	if ((dev == NULL) || (mb_pool == NULL))
228b3b413f7SBruce Richardson 		return -EINVAL;
229b3b413f7SBruce Richardson 
230dd7c54a6STomasz Kulasek 	internals = dev->data->dev_private;
231dd7c54a6STomasz Kulasek 
232c9634e44SFerruh Yigit 	if (rx_queue_id >= dev->data->nb_rx_queues)
233b3b413f7SBruce Richardson 		return -ENODEV;
234b3b413f7SBruce Richardson 
235b3b413f7SBruce Richardson 	packet_size = internals->packet_size;
236b3b413f7SBruce Richardson 
237b3b413f7SBruce Richardson 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
238b3b413f7SBruce Richardson 	dev->data->rx_queues[rx_queue_id] =
239b3b413f7SBruce Richardson 		&internals->rx_null_queues[rx_queue_id];
240b3b413f7SBruce Richardson 	dummy_packet = rte_zmalloc_socket(NULL,
241c9634e44SFerruh Yigit 			packet_size, 0, dev->data->numa_node);
242b3b413f7SBruce Richardson 	if (dummy_packet == NULL)
243b3b413f7SBruce Richardson 		return -ENOMEM;
244b3b413f7SBruce Richardson 
245b3b413f7SBruce Richardson 	internals->rx_null_queues[rx_queue_id].internals = internals;
246b3b413f7SBruce Richardson 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
247b3b413f7SBruce Richardson 
248b3b413f7SBruce Richardson 	return 0;
249b3b413f7SBruce Richardson }
250b3b413f7SBruce Richardson 
251b3b413f7SBruce Richardson static int
252b3b413f7SBruce Richardson eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
253b3b413f7SBruce Richardson 		uint16_t nb_tx_desc __rte_unused,
254b3b413f7SBruce Richardson 		unsigned int socket_id __rte_unused,
255b3b413f7SBruce Richardson 		const struct rte_eth_txconf *tx_conf __rte_unused)
256b3b413f7SBruce Richardson {
257b3b413f7SBruce Richardson 	struct rte_mbuf *dummy_packet;
258b3b413f7SBruce Richardson 	struct pmd_internals *internals;
259b3b413f7SBruce Richardson 	unsigned packet_size;
260b3b413f7SBruce Richardson 
261b3b413f7SBruce Richardson 	if (dev == NULL)
262b3b413f7SBruce Richardson 		return -EINVAL;
263b3b413f7SBruce Richardson 
264dd7c54a6STomasz Kulasek 	internals = dev->data->dev_private;
265dd7c54a6STomasz Kulasek 
266c9634e44SFerruh Yigit 	if (tx_queue_id >= dev->data->nb_tx_queues)
267b3b413f7SBruce Richardson 		return -ENODEV;
268b3b413f7SBruce Richardson 
269b3b413f7SBruce Richardson 	packet_size = internals->packet_size;
270b3b413f7SBruce Richardson 
271b3b413f7SBruce Richardson 	dev->data->tx_queues[tx_queue_id] =
272b3b413f7SBruce Richardson 		&internals->tx_null_queues[tx_queue_id];
273b3b413f7SBruce Richardson 	dummy_packet = rte_zmalloc_socket(NULL,
274c9634e44SFerruh Yigit 			packet_size, 0, dev->data->numa_node);
275b3b413f7SBruce Richardson 	if (dummy_packet == NULL)
276b3b413f7SBruce Richardson 		return -ENOMEM;
277b3b413f7SBruce Richardson 
278b3b413f7SBruce Richardson 	internals->tx_null_queues[tx_queue_id].internals = internals;
279b3b413f7SBruce Richardson 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
280b3b413f7SBruce Richardson 
281b3b413f7SBruce Richardson 	return 0;
282b3b413f7SBruce Richardson }
283b3b413f7SBruce Richardson 
284b3b413f7SBruce Richardson 
285b3b413f7SBruce Richardson static void
286b3b413f7SBruce Richardson eth_dev_info(struct rte_eth_dev *dev,
287b3b413f7SBruce Richardson 		struct rte_eth_dev_info *dev_info)
288b3b413f7SBruce Richardson {
289b3b413f7SBruce Richardson 	struct pmd_internals *internals;
290b3b413f7SBruce Richardson 
291b3b413f7SBruce Richardson 	if ((dev == NULL) || (dev_info == NULL))
292b3b413f7SBruce Richardson 		return;
293b3b413f7SBruce Richardson 
294b3b413f7SBruce Richardson 	internals = dev->data->dev_private;
295b3b413f7SBruce Richardson 	dev_info->max_mac_addrs = 1;
296b3b413f7SBruce Richardson 	dev_info->max_rx_pktlen = (uint32_t)-1;
297dd7c54a6STomasz Kulasek 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
298dd7c54a6STomasz Kulasek 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
299b3b413f7SBruce Richardson 	dev_info->min_rx_bufsize = 0;
3001ccec0a8STomasz Kulasek 	dev_info->reta_size = internals->reta_size;
3011ccec0a8STomasz Kulasek 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
302b3b413f7SBruce Richardson }
303b3b413f7SBruce Richardson 
304b3b413f7SBruce Richardson static void
305b3b413f7SBruce Richardson eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
306b3b413f7SBruce Richardson {
307b3b413f7SBruce Richardson 	unsigned i, num_stats;
308b3b413f7SBruce Richardson 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
309b3b413f7SBruce Richardson 	const struct pmd_internals *internal;
310b3b413f7SBruce Richardson 
311b3b413f7SBruce Richardson 	if ((dev == NULL) || (igb_stats == NULL))
312b3b413f7SBruce Richardson 		return;
313b3b413f7SBruce Richardson 
314b3b413f7SBruce Richardson 	internal = dev->data->dev_private;
315b3b413f7SBruce Richardson 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
316c9634e44SFerruh Yigit 			RTE_MIN(dev->data->nb_rx_queues,
317b34141b2SBruce Richardson 				RTE_DIM(internal->rx_null_queues)));
318b3b413f7SBruce Richardson 	for (i = 0; i < num_stats; i++) {
319b3b413f7SBruce Richardson 		igb_stats->q_ipackets[i] =
320b3b413f7SBruce Richardson 			internal->rx_null_queues[i].rx_pkts.cnt;
321b3b413f7SBruce Richardson 		rx_total += igb_stats->q_ipackets[i];
322b3b413f7SBruce Richardson 	}
323b3b413f7SBruce Richardson 
324b3b413f7SBruce Richardson 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
325c9634e44SFerruh Yigit 			RTE_MIN(dev->data->nb_tx_queues,
326b34141b2SBruce Richardson 				RTE_DIM(internal->tx_null_queues)));
327b3b413f7SBruce Richardson 	for (i = 0; i < num_stats; i++) {
328b3b413f7SBruce Richardson 		igb_stats->q_opackets[i] =
329b3b413f7SBruce Richardson 			internal->tx_null_queues[i].tx_pkts.cnt;
330b3b413f7SBruce Richardson 		igb_stats->q_errors[i] =
331b3b413f7SBruce Richardson 			internal->tx_null_queues[i].err_pkts.cnt;
332b3b413f7SBruce Richardson 		tx_total += igb_stats->q_opackets[i];
333b3b413f7SBruce Richardson 		tx_err_total += igb_stats->q_errors[i];
334b3b413f7SBruce Richardson 	}
335b3b413f7SBruce Richardson 
336b3b413f7SBruce Richardson 	igb_stats->ipackets = rx_total;
337b3b413f7SBruce Richardson 	igb_stats->opackets = tx_total;
338b3b413f7SBruce Richardson 	igb_stats->oerrors = tx_err_total;
339b3b413f7SBruce Richardson }
340b3b413f7SBruce Richardson 
341b3b413f7SBruce Richardson static void
342b3b413f7SBruce Richardson eth_stats_reset(struct rte_eth_dev *dev)
343b3b413f7SBruce Richardson {
344b3b413f7SBruce Richardson 	unsigned i;
345b3b413f7SBruce Richardson 	struct pmd_internals *internal;
346b3b413f7SBruce Richardson 
347b3b413f7SBruce Richardson 	if (dev == NULL)
348b3b413f7SBruce Richardson 		return;
349b3b413f7SBruce Richardson 
350b3b413f7SBruce Richardson 	internal = dev->data->dev_private;
351b34141b2SBruce Richardson 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
352b3b413f7SBruce Richardson 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
353b34141b2SBruce Richardson 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
354b3b413f7SBruce Richardson 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
355b3b413f7SBruce Richardson 		internal->tx_null_queues[i].err_pkts.cnt = 0;
356b3b413f7SBruce Richardson 	}
357b3b413f7SBruce Richardson }
358b3b413f7SBruce Richardson 
359b3b413f7SBruce Richardson static void
360b3b413f7SBruce Richardson eth_queue_release(void *q)
361b3b413f7SBruce Richardson {
362b3b413f7SBruce Richardson 	struct null_queue *nq;
363b3b413f7SBruce Richardson 
364b3b413f7SBruce Richardson 	if (q == NULL)
365b3b413f7SBruce Richardson 		return;
366b3b413f7SBruce Richardson 
367b3b413f7SBruce Richardson 	nq = q;
368b3b413f7SBruce Richardson 	rte_free(nq->dummy_packet);
369b3b413f7SBruce Richardson }
370b3b413f7SBruce Richardson 
371b3b413f7SBruce Richardson static int
372b3b413f7SBruce Richardson eth_link_update(struct rte_eth_dev *dev __rte_unused,
373b3b413f7SBruce Richardson 		int wait_to_complete __rte_unused) { return 0; }
374b3b413f7SBruce Richardson 
3751ccec0a8STomasz Kulasek static int
3761ccec0a8STomasz Kulasek eth_rss_reta_update(struct rte_eth_dev *dev,
3771ccec0a8STomasz Kulasek 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
3781ccec0a8STomasz Kulasek {
3791ccec0a8STomasz Kulasek 	int i, j;
3801ccec0a8STomasz Kulasek 	struct pmd_internals *internal = dev->data->dev_private;
3811ccec0a8STomasz Kulasek 
3821ccec0a8STomasz Kulasek 	if (reta_size != internal->reta_size)
3831ccec0a8STomasz Kulasek 		return -EINVAL;
3841ccec0a8STomasz Kulasek 
3851ccec0a8STomasz Kulasek 	rte_spinlock_lock(&internal->rss_lock);
3861ccec0a8STomasz Kulasek 
3871ccec0a8STomasz Kulasek 	/* Copy RETA table */
3881ccec0a8STomasz Kulasek 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
3891ccec0a8STomasz Kulasek 		internal->reta_conf[i].mask = reta_conf[i].mask;
3901ccec0a8STomasz Kulasek 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
3911ccec0a8STomasz Kulasek 			if ((reta_conf[i].mask >> j) & 0x01)
3921ccec0a8STomasz Kulasek 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
3931ccec0a8STomasz Kulasek 	}
3941ccec0a8STomasz Kulasek 
3951ccec0a8STomasz Kulasek 	rte_spinlock_unlock(&internal->rss_lock);
3961ccec0a8STomasz Kulasek 
3971ccec0a8STomasz Kulasek 	return 0;
3981ccec0a8STomasz Kulasek }
3991ccec0a8STomasz Kulasek 
4001ccec0a8STomasz Kulasek static int
4011ccec0a8STomasz Kulasek eth_rss_reta_query(struct rte_eth_dev *dev,
4021ccec0a8STomasz Kulasek 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
4031ccec0a8STomasz Kulasek {
4041ccec0a8STomasz Kulasek 	int i, j;
4051ccec0a8STomasz Kulasek 	struct pmd_internals *internal = dev->data->dev_private;
4061ccec0a8STomasz Kulasek 
4071ccec0a8STomasz Kulasek 	if (reta_size != internal->reta_size)
4081ccec0a8STomasz Kulasek 		return -EINVAL;
4091ccec0a8STomasz Kulasek 
4101ccec0a8STomasz Kulasek 	rte_spinlock_lock(&internal->rss_lock);
4111ccec0a8STomasz Kulasek 
4121ccec0a8STomasz Kulasek 	/* Copy RETA table */
4131ccec0a8STomasz Kulasek 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
4141ccec0a8STomasz Kulasek 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
4151ccec0a8STomasz Kulasek 			if ((reta_conf[i].mask >> j) & 0x01)
4161ccec0a8STomasz Kulasek 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
4171ccec0a8STomasz Kulasek 	}
4181ccec0a8STomasz Kulasek 
4191ccec0a8STomasz Kulasek 	rte_spinlock_unlock(&internal->rss_lock);
4201ccec0a8STomasz Kulasek 
4211ccec0a8STomasz Kulasek 	return 0;
4221ccec0a8STomasz Kulasek }
4231ccec0a8STomasz Kulasek 
4241ccec0a8STomasz Kulasek static int
4251ccec0a8STomasz Kulasek eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
4261ccec0a8STomasz Kulasek {
4271ccec0a8STomasz Kulasek 	struct pmd_internals *internal = dev->data->dev_private;
4281ccec0a8STomasz Kulasek 
4291ccec0a8STomasz Kulasek 	rte_spinlock_lock(&internal->rss_lock);
4301ccec0a8STomasz Kulasek 
4311ccec0a8STomasz Kulasek 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
4321ccec0a8STomasz Kulasek 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
4331ccec0a8STomasz Kulasek 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
4341ccec0a8STomasz Kulasek 
4351ccec0a8STomasz Kulasek 	if (rss_conf->rss_key)
4361ccec0a8STomasz Kulasek 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
4371ccec0a8STomasz Kulasek 
4381ccec0a8STomasz Kulasek 	rte_spinlock_unlock(&internal->rss_lock);
4391ccec0a8STomasz Kulasek 
4401ccec0a8STomasz Kulasek 	return 0;
4411ccec0a8STomasz Kulasek }
4421ccec0a8STomasz Kulasek 
4431ccec0a8STomasz Kulasek static int
4441ccec0a8STomasz Kulasek eth_rss_hash_conf_get(struct rte_eth_dev *dev,
4451ccec0a8STomasz Kulasek 		struct rte_eth_rss_conf *rss_conf)
4461ccec0a8STomasz Kulasek {
4471ccec0a8STomasz Kulasek 	struct pmd_internals *internal = dev->data->dev_private;
4481ccec0a8STomasz Kulasek 
4491ccec0a8STomasz Kulasek 	rte_spinlock_lock(&internal->rss_lock);
4501ccec0a8STomasz Kulasek 
4511ccec0a8STomasz Kulasek 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
4521ccec0a8STomasz Kulasek 	if (rss_conf->rss_key)
4531ccec0a8STomasz Kulasek 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
4541ccec0a8STomasz Kulasek 
4551ccec0a8STomasz Kulasek 	rte_spinlock_unlock(&internal->rss_lock);
4561ccec0a8STomasz Kulasek 
4571ccec0a8STomasz Kulasek 	return 0;
4581ccec0a8STomasz Kulasek }
4591ccec0a8STomasz Kulasek 
460b3b413f7SBruce Richardson static const struct eth_dev_ops ops = {
461b3b413f7SBruce Richardson 	.dev_start = eth_dev_start,
462b3b413f7SBruce Richardson 	.dev_stop = eth_dev_stop,
463b3b413f7SBruce Richardson 	.dev_configure = eth_dev_configure,
464b3b413f7SBruce Richardson 	.dev_infos_get = eth_dev_info,
465b3b413f7SBruce Richardson 	.rx_queue_setup = eth_rx_queue_setup,
466b3b413f7SBruce Richardson 	.tx_queue_setup = eth_tx_queue_setup,
467b3b413f7SBruce Richardson 	.rx_queue_release = eth_queue_release,
468b3b413f7SBruce Richardson 	.tx_queue_release = eth_queue_release,
469b3b413f7SBruce Richardson 	.link_update = eth_link_update,
470b3b413f7SBruce Richardson 	.stats_get = eth_stats_get,
471b3b413f7SBruce Richardson 	.stats_reset = eth_stats_reset,
4721ccec0a8STomasz Kulasek 	.reta_update = eth_rss_reta_update,
4731ccec0a8STomasz Kulasek 	.reta_query = eth_rss_reta_query,
4741ccec0a8STomasz Kulasek 	.rss_hash_update = eth_rss_hash_update,
4751ccec0a8STomasz Kulasek 	.rss_hash_conf_get = eth_rss_hash_conf_get
476b3b413f7SBruce Richardson };
477b3b413f7SBruce Richardson 
47873db5badSDavid Marchand static struct rte_vdev_driver pmd_null_drv;
47973db5badSDavid Marchand 
480c3b047beSJan Blunck static int
481*050fe6e9SJan Blunck eth_dev_null_create(struct rte_vdev_device *dev,
482b3b413f7SBruce Richardson 		unsigned packet_size,
483b3b413f7SBruce Richardson 		unsigned packet_copy)
484b3b413f7SBruce Richardson {
485b3b413f7SBruce Richardson 	const unsigned nb_rx_queues = 1;
486b3b413f7SBruce Richardson 	const unsigned nb_tx_queues = 1;
487b3b413f7SBruce Richardson 	struct rte_eth_dev_data *data = NULL;
488b3b413f7SBruce Richardson 	struct pmd_internals *internals = NULL;
489b3b413f7SBruce Richardson 	struct rte_eth_dev *eth_dev = NULL;
490b3b413f7SBruce Richardson 
4911ccec0a8STomasz Kulasek 	static const uint8_t default_rss_key[40] = {
4921ccec0a8STomasz Kulasek 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
4931ccec0a8STomasz Kulasek 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
4941ccec0a8STomasz Kulasek 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
4951ccec0a8STomasz Kulasek 		0xBE, 0xAC, 0x01, 0xFA
4961ccec0a8STomasz Kulasek 	};
4971ccec0a8STomasz Kulasek 
498*050fe6e9SJan Blunck 	if (dev->device.numa_node == SOCKET_ID_ANY)
499*050fe6e9SJan Blunck 		dev->device.numa_node = rte_socket_id();
500b3b413f7SBruce Richardson 
501b3b413f7SBruce Richardson 	RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
502*050fe6e9SJan Blunck 		dev->device.numa_node);
503b3b413f7SBruce Richardson 
504b3b413f7SBruce Richardson 	/* now do all data allocation - for eth_dev structure, dummy pci driver
505b3b413f7SBruce Richardson 	 * and internal (private) data
506b3b413f7SBruce Richardson 	 */
507*050fe6e9SJan Blunck 	data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
508*050fe6e9SJan Blunck 		dev->device.numa_node);
509*050fe6e9SJan Blunck 	if (!data)
510*050fe6e9SJan Blunck 		return -ENOMEM;
511b3b413f7SBruce Richardson 
512*050fe6e9SJan Blunck 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
513*050fe6e9SJan Blunck 	if (!eth_dev) {
514*050fe6e9SJan Blunck 		rte_free(data);
515*050fe6e9SJan Blunck 		return -ENOMEM;
516*050fe6e9SJan Blunck 	}
517b3b413f7SBruce Richardson 
518b3b413f7SBruce Richardson 	/* now put it all together
519b3b413f7SBruce Richardson 	 * - store queue data in internals,
5208fb9e2bbSBernard Iremonger 	 * - store numa_node info in ethdev data
5218fb9e2bbSBernard Iremonger 	 * - point eth_dev_data to internals
522b3b413f7SBruce Richardson 	 * - and point eth_dev structure to new eth_dev_data structure
523b3b413f7SBruce Richardson 	 */
524b3b413f7SBruce Richardson 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
525b3b413f7SBruce Richardson 	 * so the nulls are local per-process */
526b3b413f7SBruce Richardson 
527*050fe6e9SJan Blunck 	internals = eth_dev->data->dev_private;
528b3b413f7SBruce Richardson 	internals->packet_size = packet_size;
529b3b413f7SBruce Richardson 	internals->packet_copy = packet_copy;
5305cf86418SSean Harte 	internals->port_id = eth_dev->data->port_id;
531b3b413f7SBruce Richardson 
5321ccec0a8STomasz Kulasek 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
5331ccec0a8STomasz Kulasek 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
5341ccec0a8STomasz Kulasek 
5351ccec0a8STomasz Kulasek 	rte_memcpy(internals->rss_key, default_rss_key, 40);
5361ccec0a8STomasz Kulasek 
537*050fe6e9SJan Blunck 	rte_memcpy(data, eth_dev->data, sizeof(*data));
538b3b413f7SBruce Richardson 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
539b3b413f7SBruce Richardson 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
540b3b413f7SBruce Richardson 	data->dev_link = pmd_link;
541b3b413f7SBruce Richardson 	data->mac_addrs = &eth_addr;
542b3b413f7SBruce Richardson 
543b3b413f7SBruce Richardson 	eth_dev->data = data;
544b3b413f7SBruce Richardson 	eth_dev->dev_ops = &ops;
5456799cfe4SBernard Iremonger 
546c9634e44SFerruh Yigit 	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
5478fb9e2bbSBernard Iremonger 
548b3b413f7SBruce Richardson 	/* finally assign rx and tx ops */
549b3b413f7SBruce Richardson 	if (packet_copy) {
550b3b413f7SBruce Richardson 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
551b3b413f7SBruce Richardson 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
552b3b413f7SBruce Richardson 	} else {
553b3b413f7SBruce Richardson 		eth_dev->rx_pkt_burst = eth_null_rx;
554b3b413f7SBruce Richardson 		eth_dev->tx_pkt_burst = eth_null_tx;
555b3b413f7SBruce Richardson 	}
556b3b413f7SBruce Richardson 
557b3b413f7SBruce Richardson 	return 0;
558b3b413f7SBruce Richardson }
559b3b413f7SBruce Richardson 
560b3b413f7SBruce Richardson static inline int
561b3b413f7SBruce Richardson get_packet_size_arg(const char *key __rte_unused,
562b3b413f7SBruce Richardson 		const char *value, void *extra_args)
563b3b413f7SBruce Richardson {
564b3b413f7SBruce Richardson 	const char *a = value;
565b3b413f7SBruce Richardson 	unsigned *packet_size = extra_args;
566b3b413f7SBruce Richardson 
567b3b413f7SBruce Richardson 	if ((value == NULL) || (extra_args == NULL))
568b3b413f7SBruce Richardson 		return -EINVAL;
569b3b413f7SBruce Richardson 
570b3b413f7SBruce Richardson 	*packet_size = (unsigned)strtoul(a, NULL, 0);
571b3b413f7SBruce Richardson 	if (*packet_size == UINT_MAX)
572b3b413f7SBruce Richardson 		return -1;
573b3b413f7SBruce Richardson 
574b3b413f7SBruce Richardson 	return 0;
575b3b413f7SBruce Richardson }
576b3b413f7SBruce Richardson 
577b3b413f7SBruce Richardson static inline int
578b3b413f7SBruce Richardson get_packet_copy_arg(const char *key __rte_unused,
579b3b413f7SBruce Richardson 		const char *value, void *extra_args)
580b3b413f7SBruce Richardson {
581b3b413f7SBruce Richardson 	const char *a = value;
582b3b413f7SBruce Richardson 	unsigned *packet_copy = extra_args;
583b3b413f7SBruce Richardson 
584b3b413f7SBruce Richardson 	if ((value == NULL) || (extra_args == NULL))
585b3b413f7SBruce Richardson 		return -EINVAL;
586b3b413f7SBruce Richardson 
587b3b413f7SBruce Richardson 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
588b3b413f7SBruce Richardson 	if (*packet_copy == UINT_MAX)
589b3b413f7SBruce Richardson 		return -1;
590b3b413f7SBruce Richardson 
591b3b413f7SBruce Richardson 	return 0;
592b3b413f7SBruce Richardson }
593b3b413f7SBruce Richardson 
594b3b413f7SBruce Richardson static int
5955d2aa461SJan Blunck rte_pmd_null_probe(struct rte_vdev_device *dev)
596b3b413f7SBruce Richardson {
5975d2aa461SJan Blunck 	const char *name, *params;
598b3b413f7SBruce Richardson 	unsigned packet_size = default_packet_size;
599b3b413f7SBruce Richardson 	unsigned packet_copy = default_packet_copy;
600b3b413f7SBruce Richardson 	struct rte_kvargs *kvlist = NULL;
601b3b413f7SBruce Richardson 	int ret;
602b3b413f7SBruce Richardson 
6035d2aa461SJan Blunck 	if (!dev)
604b3b413f7SBruce Richardson 		return -EINVAL;
605b3b413f7SBruce Richardson 
6065d2aa461SJan Blunck 	name = rte_vdev_device_name(dev);
6075d2aa461SJan Blunck 	params = rte_vdev_device_args(dev);
608b3b413f7SBruce Richardson 	RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
609b3b413f7SBruce Richardson 
610b3b413f7SBruce Richardson 	if (params != NULL) {
611b3b413f7SBruce Richardson 		kvlist = rte_kvargs_parse(params, valid_arguments);
612b3b413f7SBruce Richardson 		if (kvlist == NULL)
613b3b413f7SBruce Richardson 			return -1;
614b3b413f7SBruce Richardson 
615b3b413f7SBruce Richardson 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
616b3b413f7SBruce Richardson 
617b3b413f7SBruce Richardson 			ret = rte_kvargs_process(kvlist,
618b3b413f7SBruce Richardson 					ETH_NULL_PACKET_SIZE_ARG,
619b3b413f7SBruce Richardson 					&get_packet_size_arg, &packet_size);
620b3b413f7SBruce Richardson 			if (ret < 0)
621b3b413f7SBruce Richardson 				goto free_kvlist;
622b3b413f7SBruce Richardson 		}
623b3b413f7SBruce Richardson 
624b3b413f7SBruce Richardson 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
625b3b413f7SBruce Richardson 
626b3b413f7SBruce Richardson 			ret = rte_kvargs_process(kvlist,
627b3b413f7SBruce Richardson 					ETH_NULL_PACKET_COPY_ARG,
628b3b413f7SBruce Richardson 					&get_packet_copy_arg, &packet_copy);
629b3b413f7SBruce Richardson 			if (ret < 0)
630b3b413f7SBruce Richardson 				goto free_kvlist;
631b3b413f7SBruce Richardson 		}
632b3b413f7SBruce Richardson 	}
633b3b413f7SBruce Richardson 
634b3b413f7SBruce Richardson 	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
635b3b413f7SBruce Richardson 			"packet copy is %s\n", packet_size,
636b3b413f7SBruce Richardson 			packet_copy ? "enabled" : "disabled");
637b3b413f7SBruce Richardson 
638*050fe6e9SJan Blunck 	ret = eth_dev_null_create(dev, packet_size, packet_copy);
639b3b413f7SBruce Richardson 
640b3b413f7SBruce Richardson free_kvlist:
641b3b413f7SBruce Richardson 	if (kvlist)
642b3b413f7SBruce Richardson 		rte_kvargs_free(kvlist);
643b3b413f7SBruce Richardson 	return ret;
644b3b413f7SBruce Richardson }
645b3b413f7SBruce Richardson 
646b3b413f7SBruce Richardson static int
6475d2aa461SJan Blunck rte_pmd_null_remove(struct rte_vdev_device *dev)
648b3b413f7SBruce Richardson {
649b3b413f7SBruce Richardson 	struct rte_eth_dev *eth_dev = NULL;
650b3b413f7SBruce Richardson 
6515d2aa461SJan Blunck 	if (!dev)
652b3b413f7SBruce Richardson 		return -EINVAL;
653b3b413f7SBruce Richardson 
654b3b413f7SBruce Richardson 	RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
655b3b413f7SBruce Richardson 			rte_socket_id());
656b3b413f7SBruce Richardson 
6576799cfe4SBernard Iremonger 	/* find the ethdev entry */
6585d2aa461SJan Blunck 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
659b3b413f7SBruce Richardson 	if (eth_dev == NULL)
660b3b413f7SBruce Richardson 		return -1;
661b3b413f7SBruce Richardson 
662b3b413f7SBruce Richardson 	rte_free(eth_dev->data->dev_private);
663b3b413f7SBruce Richardson 	rte_free(eth_dev->data);
664b3b413f7SBruce Richardson 
665b3b413f7SBruce Richardson 	rte_eth_dev_release_port(eth_dev);
666b3b413f7SBruce Richardson 
667b3b413f7SBruce Richardson 	return 0;
668b3b413f7SBruce Richardson }
669b3b413f7SBruce Richardson 
670fe363dd4SJan Viktorin static struct rte_vdev_driver pmd_null_drv = {
67150a3345fSShreyansh Jain 	.probe = rte_pmd_null_probe,
67250a3345fSShreyansh Jain 	.remove = rte_pmd_null_remove,
673b3b413f7SBruce Richardson };
674b3b413f7SBruce Richardson 
67501f19227SShreyansh Jain RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
6769fa80cb2SJan Blunck RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
67701f19227SShreyansh Jain RTE_PMD_REGISTER_PARAM_STRING(net_null,
67865eca099SPablo de Lara 	"size=<int> "
67965eca099SPablo de Lara 	"copy=<int>");
680