xref: /dpdk/drivers/net/null/rte_eth_null.c (revision f165210321c43247702927cc2c8439b643950c71)
16fc581e9SStephen Hemminger /* SPDX-License-Identifier: BSD-3-Clause
2b3b413f7SBruce Richardson  * Copyright (C) IGEL Co.,Ltd.
3b3b413f7SBruce Richardson  *  All rights reserved.
4b3b413f7SBruce Richardson  */
5b3b413f7SBruce Richardson 
6b3b413f7SBruce Richardson #include <rte_mbuf.h>
7ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h>
8050fe6e9SJan Blunck #include <rte_ethdev_vdev.h>
9b3b413f7SBruce Richardson #include <rte_malloc.h>
10b3b413f7SBruce Richardson #include <rte_memcpy.h>
11d4a586d2SJianfeng Tan #include <rte_bus_vdev.h>
12b3b413f7SBruce Richardson #include <rte_kvargs.h>
131ccec0a8STomasz Kulasek #include <rte_spinlock.h>
14b3b413f7SBruce Richardson 
15b3b413f7SBruce Richardson #define ETH_NULL_PACKET_SIZE_ARG	"size"
16b3b413f7SBruce Richardson #define ETH_NULL_PACKET_COPY_ARG	"copy"
17b3b413f7SBruce Richardson 
18b3b413f7SBruce Richardson static unsigned default_packet_size = 64;
19b3b413f7SBruce Richardson static unsigned default_packet_copy;
20b3b413f7SBruce Richardson 
21b3b413f7SBruce Richardson static const char *valid_arguments[] = {
22b3b413f7SBruce Richardson 	ETH_NULL_PACKET_SIZE_ARG,
23b3b413f7SBruce Richardson 	ETH_NULL_PACKET_COPY_ARG,
24b3b413f7SBruce Richardson 	NULL
25b3b413f7SBruce Richardson };
26b3b413f7SBruce Richardson 
27b3b413f7SBruce Richardson struct pmd_internals;
28b3b413f7SBruce Richardson 
29b3b413f7SBruce Richardson struct null_queue {
30b3b413f7SBruce Richardson 	struct pmd_internals *internals;
31b3b413f7SBruce Richardson 
32b3b413f7SBruce Richardson 	struct rte_mempool *mb_pool;
33b3b413f7SBruce Richardson 	struct rte_mbuf *dummy_packet;
34b3b413f7SBruce Richardson 
35b3b413f7SBruce Richardson 	rte_atomic64_t rx_pkts;
36b3b413f7SBruce Richardson 	rte_atomic64_t tx_pkts;
37b3b413f7SBruce Richardson };
38b3b413f7SBruce Richardson 
39b3b413f7SBruce Richardson struct pmd_internals {
40b3b413f7SBruce Richardson 	unsigned packet_size;
41b3b413f7SBruce Richardson 	unsigned packet_copy;
42f8244c63SZhiyong Yang 	uint16_t port_id;
43b3b413f7SBruce Richardson 
44dd7c54a6STomasz Kulasek 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
45dd7c54a6STomasz Kulasek 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
461ccec0a8STomasz Kulasek 
476d13ea8eSOlivier Matz 	struct rte_ether_addr eth_addr;
481ccec0a8STomasz Kulasek 	/** Bit mask of RSS offloads, the bit offset also means flow type */
491ccec0a8STomasz Kulasek 	uint64_t flow_type_rss_offloads;
501ccec0a8STomasz Kulasek 
511ccec0a8STomasz Kulasek 	rte_spinlock_t rss_lock;
521ccec0a8STomasz Kulasek 
531ccec0a8STomasz Kulasek 	uint16_t reta_size;
541ccec0a8STomasz Kulasek 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
551ccec0a8STomasz Kulasek 			RTE_RETA_GROUP_SIZE];
561ccec0a8STomasz Kulasek 
571ccec0a8STomasz Kulasek 	uint8_t rss_key[40];                /**< 40-byte hash key. */
58b3b413f7SBruce Richardson };
59b3b413f7SBruce Richardson static struct rte_eth_link pmd_link = {
6039fd068aSMarc Sune 	.link_speed = ETH_SPEED_NUM_10G,
61b3b413f7SBruce Richardson 	.link_duplex = ETH_LINK_FULL_DUPLEX,
6209419f23SThomas Monjalon 	.link_status = ETH_LINK_DOWN,
6318869f97SFerruh Yigit 	.link_autoneg = ETH_LINK_FIXED,
64b3b413f7SBruce Richardson };
65b3b413f7SBruce Richardson 
66eb16afb9SStephen Hemminger static int eth_null_logtype;
67eb16afb9SStephen Hemminger 
68eb16afb9SStephen Hemminger #define PMD_LOG(level, fmt, args...) \
69eb16afb9SStephen Hemminger 	rte_log(RTE_LOG_ ## level, eth_null_logtype, \
70eb16afb9SStephen Hemminger 		"%s(): " fmt "\n", __func__, ##args)
71eb16afb9SStephen Hemminger 
72b3b413f7SBruce Richardson static uint16_t
73b3b413f7SBruce Richardson eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
74b3b413f7SBruce Richardson {
75b3b413f7SBruce Richardson 	int i;
76b3b413f7SBruce Richardson 	struct null_queue *h = q;
77b3b413f7SBruce Richardson 	unsigned packet_size;
78b3b413f7SBruce Richardson 
79b3b413f7SBruce Richardson 	if ((q == NULL) || (bufs == NULL))
80b3b413f7SBruce Richardson 		return 0;
81b3b413f7SBruce Richardson 
82b3b413f7SBruce Richardson 	packet_size = h->internals->packet_size;
834e436ddaSMallesh Koujalagi 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
844e436ddaSMallesh Koujalagi 		return 0;
854e436ddaSMallesh Koujalagi 
86b3b413f7SBruce Richardson 	for (i = 0; i < nb_bufs; i++) {
87b3b413f7SBruce Richardson 		bufs[i]->data_len = (uint16_t)packet_size;
88b3b413f7SBruce Richardson 		bufs[i]->pkt_len = packet_size;
895cf86418SSean Harte 		bufs[i]->port = h->internals->port_id;
90b3b413f7SBruce Richardson 	}
91b3b413f7SBruce Richardson 
92b3b413f7SBruce Richardson 	rte_atomic64_add(&(h->rx_pkts), i);
93b3b413f7SBruce Richardson 
94b3b413f7SBruce Richardson 	return i;
95b3b413f7SBruce Richardson }
96b3b413f7SBruce Richardson 
97b3b413f7SBruce Richardson static uint16_t
98b3b413f7SBruce Richardson eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
99b3b413f7SBruce Richardson {
100b3b413f7SBruce Richardson 	int i;
101b3b413f7SBruce Richardson 	struct null_queue *h = q;
102b3b413f7SBruce Richardson 	unsigned packet_size;
103b3b413f7SBruce Richardson 
104b3b413f7SBruce Richardson 	if ((q == NULL) || (bufs == NULL))
105b3b413f7SBruce Richardson 		return 0;
106b3b413f7SBruce Richardson 
107b3b413f7SBruce Richardson 	packet_size = h->internals->packet_size;
1084e436ddaSMallesh Koujalagi 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
1094e436ddaSMallesh Koujalagi 		return 0;
1104e436ddaSMallesh Koujalagi 
111b3b413f7SBruce Richardson 	for (i = 0; i < nb_bufs; i++) {
112b3b413f7SBruce Richardson 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
113b3b413f7SBruce Richardson 					packet_size);
114b3b413f7SBruce Richardson 		bufs[i]->data_len = (uint16_t)packet_size;
115b3b413f7SBruce Richardson 		bufs[i]->pkt_len = packet_size;
1165cf86418SSean Harte 		bufs[i]->port = h->internals->port_id;
117b3b413f7SBruce Richardson 	}
118b3b413f7SBruce Richardson 
119b3b413f7SBruce Richardson 	rte_atomic64_add(&(h->rx_pkts), i);
120b3b413f7SBruce Richardson 
121b3b413f7SBruce Richardson 	return i;
122b3b413f7SBruce Richardson }
123b3b413f7SBruce Richardson 
124b3b413f7SBruce Richardson static uint16_t
125b3b413f7SBruce Richardson eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
126b3b413f7SBruce Richardson {
127b3b413f7SBruce Richardson 	int i;
128b3b413f7SBruce Richardson 	struct null_queue *h = q;
129b3b413f7SBruce Richardson 
130b3b413f7SBruce Richardson 	if ((q == NULL) || (bufs == NULL))
131b3b413f7SBruce Richardson 		return 0;
132b3b413f7SBruce Richardson 
133b3b413f7SBruce Richardson 	for (i = 0; i < nb_bufs; i++)
134b3b413f7SBruce Richardson 		rte_pktmbuf_free(bufs[i]);
135b3b413f7SBruce Richardson 
136b3b413f7SBruce Richardson 	rte_atomic64_add(&(h->tx_pkts), i);
137b3b413f7SBruce Richardson 
138b3b413f7SBruce Richardson 	return i;
139b3b413f7SBruce Richardson }
140b3b413f7SBruce Richardson 
141b3b413f7SBruce Richardson static uint16_t
142b3b413f7SBruce Richardson eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
143b3b413f7SBruce Richardson {
144b3b413f7SBruce Richardson 	int i;
145b3b413f7SBruce Richardson 	struct null_queue *h = q;
146b3b413f7SBruce Richardson 	unsigned packet_size;
147b3b413f7SBruce Richardson 
148b3b413f7SBruce Richardson 	if ((q == NULL) || (bufs == NULL))
149b3b413f7SBruce Richardson 		return 0;
150b3b413f7SBruce Richardson 
151b3b413f7SBruce Richardson 	packet_size = h->internals->packet_size;
152b3b413f7SBruce Richardson 	for (i = 0; i < nb_bufs; i++) {
153b3b413f7SBruce Richardson 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
154b3b413f7SBruce Richardson 					packet_size);
155b3b413f7SBruce Richardson 		rte_pktmbuf_free(bufs[i]);
156b3b413f7SBruce Richardson 	}
157b3b413f7SBruce Richardson 
158b3b413f7SBruce Richardson 	rte_atomic64_add(&(h->tx_pkts), i);
159b3b413f7SBruce Richardson 
160b3b413f7SBruce Richardson 	return i;
161b3b413f7SBruce Richardson }
162b3b413f7SBruce Richardson 
163b3b413f7SBruce Richardson static int
164c9634e44SFerruh Yigit eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
165c9634e44SFerruh Yigit {
166dd7c54a6STomasz Kulasek 	return 0;
167dd7c54a6STomasz Kulasek }
168b3b413f7SBruce Richardson 
169b3b413f7SBruce Richardson static int
170b3b413f7SBruce Richardson eth_dev_start(struct rte_eth_dev *dev)
171b3b413f7SBruce Richardson {
172b3b413f7SBruce Richardson 	if (dev == NULL)
173b3b413f7SBruce Richardson 		return -EINVAL;
174b3b413f7SBruce Richardson 
17509419f23SThomas Monjalon 	dev->data->dev_link.link_status = ETH_LINK_UP;
176b3b413f7SBruce Richardson 	return 0;
177b3b413f7SBruce Richardson }
178b3b413f7SBruce Richardson 
179b3b413f7SBruce Richardson static void
180b3b413f7SBruce Richardson eth_dev_stop(struct rte_eth_dev *dev)
181b3b413f7SBruce Richardson {
182b3b413f7SBruce Richardson 	if (dev == NULL)
183b3b413f7SBruce Richardson 		return;
184b3b413f7SBruce Richardson 
18509419f23SThomas Monjalon 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
186b3b413f7SBruce Richardson }
187b3b413f7SBruce Richardson 
188b3b413f7SBruce Richardson static int
189b3b413f7SBruce Richardson eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
190b3b413f7SBruce Richardson 		uint16_t nb_rx_desc __rte_unused,
191b3b413f7SBruce Richardson 		unsigned int socket_id __rte_unused,
192b3b413f7SBruce Richardson 		const struct rte_eth_rxconf *rx_conf __rte_unused,
193b3b413f7SBruce Richardson 		struct rte_mempool *mb_pool)
194b3b413f7SBruce Richardson {
195b3b413f7SBruce Richardson 	struct rte_mbuf *dummy_packet;
196b3b413f7SBruce Richardson 	struct pmd_internals *internals;
197b3b413f7SBruce Richardson 	unsigned packet_size;
198b3b413f7SBruce Richardson 
199b3b413f7SBruce Richardson 	if ((dev == NULL) || (mb_pool == NULL))
200b3b413f7SBruce Richardson 		return -EINVAL;
201b3b413f7SBruce Richardson 
202dd7c54a6STomasz Kulasek 	internals = dev->data->dev_private;
203dd7c54a6STomasz Kulasek 
204c9634e44SFerruh Yigit 	if (rx_queue_id >= dev->data->nb_rx_queues)
205b3b413f7SBruce Richardson 		return -ENODEV;
206b3b413f7SBruce Richardson 
207b3b413f7SBruce Richardson 	packet_size = internals->packet_size;
208b3b413f7SBruce Richardson 
209b3b413f7SBruce Richardson 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
210b3b413f7SBruce Richardson 	dev->data->rx_queues[rx_queue_id] =
211b3b413f7SBruce Richardson 		&internals->rx_null_queues[rx_queue_id];
212b3b413f7SBruce Richardson 	dummy_packet = rte_zmalloc_socket(NULL,
213c9634e44SFerruh Yigit 			packet_size, 0, dev->data->numa_node);
214b3b413f7SBruce Richardson 	if (dummy_packet == NULL)
215b3b413f7SBruce Richardson 		return -ENOMEM;
216b3b413f7SBruce Richardson 
217b3b413f7SBruce Richardson 	internals->rx_null_queues[rx_queue_id].internals = internals;
218b3b413f7SBruce Richardson 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
219b3b413f7SBruce Richardson 
220b3b413f7SBruce Richardson 	return 0;
221b3b413f7SBruce Richardson }
222b3b413f7SBruce Richardson 
223b3b413f7SBruce Richardson static int
224b3b413f7SBruce Richardson eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
225b3b413f7SBruce Richardson 		uint16_t nb_tx_desc __rte_unused,
226b3b413f7SBruce Richardson 		unsigned int socket_id __rte_unused,
227b3b413f7SBruce Richardson 		const struct rte_eth_txconf *tx_conf __rte_unused)
228b3b413f7SBruce Richardson {
229b3b413f7SBruce Richardson 	struct rte_mbuf *dummy_packet;
230b3b413f7SBruce Richardson 	struct pmd_internals *internals;
231b3b413f7SBruce Richardson 	unsigned packet_size;
232b3b413f7SBruce Richardson 
233b3b413f7SBruce Richardson 	if (dev == NULL)
234b3b413f7SBruce Richardson 		return -EINVAL;
235b3b413f7SBruce Richardson 
236dd7c54a6STomasz Kulasek 	internals = dev->data->dev_private;
237dd7c54a6STomasz Kulasek 
238c9634e44SFerruh Yigit 	if (tx_queue_id >= dev->data->nb_tx_queues)
239b3b413f7SBruce Richardson 		return -ENODEV;
240b3b413f7SBruce Richardson 
241b3b413f7SBruce Richardson 	packet_size = internals->packet_size;
242b3b413f7SBruce Richardson 
243b3b413f7SBruce Richardson 	dev->data->tx_queues[tx_queue_id] =
244b3b413f7SBruce Richardson 		&internals->tx_null_queues[tx_queue_id];
245b3b413f7SBruce Richardson 	dummy_packet = rte_zmalloc_socket(NULL,
246c9634e44SFerruh Yigit 			packet_size, 0, dev->data->numa_node);
247b3b413f7SBruce Richardson 	if (dummy_packet == NULL)
248b3b413f7SBruce Richardson 		return -ENOMEM;
249b3b413f7SBruce Richardson 
250b3b413f7SBruce Richardson 	internals->tx_null_queues[tx_queue_id].internals = internals;
251b3b413f7SBruce Richardson 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
252b3b413f7SBruce Richardson 
253b3b413f7SBruce Richardson 	return 0;
254b3b413f7SBruce Richardson }
255b3b413f7SBruce Richardson 
256e6acdc77SMallesh Koujalagi static int
257e6acdc77SMallesh Koujalagi eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
258e6acdc77SMallesh Koujalagi {
259e6acdc77SMallesh Koujalagi 	return 0;
260e6acdc77SMallesh Koujalagi }
261b3b413f7SBruce Richardson 
262bdad90d1SIvan Ilchenko static int
263b3b413f7SBruce Richardson eth_dev_info(struct rte_eth_dev *dev,
264b3b413f7SBruce Richardson 		struct rte_eth_dev_info *dev_info)
265b3b413f7SBruce Richardson {
266b3b413f7SBruce Richardson 	struct pmd_internals *internals;
267b3b413f7SBruce Richardson 
268b3b413f7SBruce Richardson 	if ((dev == NULL) || (dev_info == NULL))
269bdad90d1SIvan Ilchenko 		return -EINVAL;
270b3b413f7SBruce Richardson 
271b3b413f7SBruce Richardson 	internals = dev->data->dev_private;
272b3b413f7SBruce Richardson 	dev_info->max_mac_addrs = 1;
273b3b413f7SBruce Richardson 	dev_info->max_rx_pktlen = (uint32_t)-1;
274dd7c54a6STomasz Kulasek 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
275dd7c54a6STomasz Kulasek 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
276b3b413f7SBruce Richardson 	dev_info->min_rx_bufsize = 0;
2771ccec0a8STomasz Kulasek 	dev_info->reta_size = internals->reta_size;
2781ccec0a8STomasz Kulasek 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
279bdad90d1SIvan Ilchenko 
280bdad90d1SIvan Ilchenko 	return 0;
281b3b413f7SBruce Richardson }
282b3b413f7SBruce Richardson 
283d5b0924bSMatan Azrad static int
284b3b413f7SBruce Richardson eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
285b3b413f7SBruce Richardson {
286b3b413f7SBruce Richardson 	unsigned i, num_stats;
2879b6076a6SDavid Marchand 	unsigned long rx_total = 0, tx_total = 0;
288b3b413f7SBruce Richardson 	const struct pmd_internals *internal;
289b3b413f7SBruce Richardson 
290b3b413f7SBruce Richardson 	if ((dev == NULL) || (igb_stats == NULL))
291d5b0924bSMatan Azrad 		return -EINVAL;
292b3b413f7SBruce Richardson 
293b3b413f7SBruce Richardson 	internal = dev->data->dev_private;
294b3b413f7SBruce Richardson 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
295c9634e44SFerruh Yigit 			RTE_MIN(dev->data->nb_rx_queues,
296b34141b2SBruce Richardson 				RTE_DIM(internal->rx_null_queues)));
297b3b413f7SBruce Richardson 	for (i = 0; i < num_stats; i++) {
298b3b413f7SBruce Richardson 		igb_stats->q_ipackets[i] =
299b3b413f7SBruce Richardson 			internal->rx_null_queues[i].rx_pkts.cnt;
300b3b413f7SBruce Richardson 		rx_total += igb_stats->q_ipackets[i];
301b3b413f7SBruce Richardson 	}
302b3b413f7SBruce Richardson 
303b3b413f7SBruce Richardson 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
304c9634e44SFerruh Yigit 			RTE_MIN(dev->data->nb_tx_queues,
305b34141b2SBruce Richardson 				RTE_DIM(internal->tx_null_queues)));
306b3b413f7SBruce Richardson 	for (i = 0; i < num_stats; i++) {
307b3b413f7SBruce Richardson 		igb_stats->q_opackets[i] =
308b3b413f7SBruce Richardson 			internal->tx_null_queues[i].tx_pkts.cnt;
309b3b413f7SBruce Richardson 		tx_total += igb_stats->q_opackets[i];
310b3b413f7SBruce Richardson 	}
311b3b413f7SBruce Richardson 
312b3b413f7SBruce Richardson 	igb_stats->ipackets = rx_total;
313b3b413f7SBruce Richardson 	igb_stats->opackets = tx_total;
314d5b0924bSMatan Azrad 
315d5b0924bSMatan Azrad 	return 0;
316b3b413f7SBruce Richardson }
317b3b413f7SBruce Richardson 
3189970a9adSIgor Romanov static int
319b3b413f7SBruce Richardson eth_stats_reset(struct rte_eth_dev *dev)
320b3b413f7SBruce Richardson {
321b3b413f7SBruce Richardson 	unsigned i;
322b3b413f7SBruce Richardson 	struct pmd_internals *internal;
323b3b413f7SBruce Richardson 
324b3b413f7SBruce Richardson 	if (dev == NULL)
3259970a9adSIgor Romanov 		return -EINVAL;
326b3b413f7SBruce Richardson 
327b3b413f7SBruce Richardson 	internal = dev->data->dev_private;
328b34141b2SBruce Richardson 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
329b3b413f7SBruce Richardson 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
3309b6076a6SDavid Marchand 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
331b3b413f7SBruce Richardson 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
3329970a9adSIgor Romanov 
3339970a9adSIgor Romanov 	return 0;
334b3b413f7SBruce Richardson }
335b3b413f7SBruce Richardson 
336b3b413f7SBruce Richardson static void
337b3b413f7SBruce Richardson eth_queue_release(void *q)
338b3b413f7SBruce Richardson {
339b3b413f7SBruce Richardson 	struct null_queue *nq;
340b3b413f7SBruce Richardson 
341b3b413f7SBruce Richardson 	if (q == NULL)
342b3b413f7SBruce Richardson 		return;
343b3b413f7SBruce Richardson 
344b3b413f7SBruce Richardson 	nq = q;
345b3b413f7SBruce Richardson 	rte_free(nq->dummy_packet);
346b3b413f7SBruce Richardson }
347b3b413f7SBruce Richardson 
348b3b413f7SBruce Richardson static int
349b3b413f7SBruce Richardson eth_link_update(struct rte_eth_dev *dev __rte_unused,
350b3b413f7SBruce Richardson 		int wait_to_complete __rte_unused) { return 0; }
351b3b413f7SBruce Richardson 
3521ccec0a8STomasz Kulasek static int
3531ccec0a8STomasz Kulasek eth_rss_reta_update(struct rte_eth_dev *dev,
3541ccec0a8STomasz Kulasek 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
3551ccec0a8STomasz Kulasek {
3561ccec0a8STomasz Kulasek 	int i, j;
3571ccec0a8STomasz Kulasek 	struct pmd_internals *internal = dev->data->dev_private;
3581ccec0a8STomasz Kulasek 
3591ccec0a8STomasz Kulasek 	if (reta_size != internal->reta_size)
3601ccec0a8STomasz Kulasek 		return -EINVAL;
3611ccec0a8STomasz Kulasek 
3621ccec0a8STomasz Kulasek 	rte_spinlock_lock(&internal->rss_lock);
3631ccec0a8STomasz Kulasek 
3641ccec0a8STomasz Kulasek 	/* Copy RETA table */
3651ccec0a8STomasz Kulasek 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
3661ccec0a8STomasz Kulasek 		internal->reta_conf[i].mask = reta_conf[i].mask;
3671ccec0a8STomasz Kulasek 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
3681ccec0a8STomasz Kulasek 			if ((reta_conf[i].mask >> j) & 0x01)
3691ccec0a8STomasz Kulasek 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
3701ccec0a8STomasz Kulasek 	}
3711ccec0a8STomasz Kulasek 
3721ccec0a8STomasz Kulasek 	rte_spinlock_unlock(&internal->rss_lock);
3731ccec0a8STomasz Kulasek 
3741ccec0a8STomasz Kulasek 	return 0;
3751ccec0a8STomasz Kulasek }
3761ccec0a8STomasz Kulasek 
3771ccec0a8STomasz Kulasek static int
3781ccec0a8STomasz Kulasek eth_rss_reta_query(struct rte_eth_dev *dev,
3791ccec0a8STomasz Kulasek 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
3801ccec0a8STomasz Kulasek {
3811ccec0a8STomasz Kulasek 	int i, j;
3821ccec0a8STomasz Kulasek 	struct pmd_internals *internal = dev->data->dev_private;
3831ccec0a8STomasz Kulasek 
3841ccec0a8STomasz Kulasek 	if (reta_size != internal->reta_size)
3851ccec0a8STomasz Kulasek 		return -EINVAL;
3861ccec0a8STomasz Kulasek 
3871ccec0a8STomasz Kulasek 	rte_spinlock_lock(&internal->rss_lock);
3881ccec0a8STomasz Kulasek 
3891ccec0a8STomasz Kulasek 	/* Copy RETA table */
3901ccec0a8STomasz Kulasek 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
3911ccec0a8STomasz Kulasek 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
3921ccec0a8STomasz Kulasek 			if ((reta_conf[i].mask >> j) & 0x01)
3931ccec0a8STomasz Kulasek 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
3941ccec0a8STomasz Kulasek 	}
3951ccec0a8STomasz Kulasek 
3961ccec0a8STomasz Kulasek 	rte_spinlock_unlock(&internal->rss_lock);
3971ccec0a8STomasz Kulasek 
3981ccec0a8STomasz Kulasek 	return 0;
3991ccec0a8STomasz Kulasek }
4001ccec0a8STomasz Kulasek 
4011ccec0a8STomasz Kulasek static int
4021ccec0a8STomasz Kulasek eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
4031ccec0a8STomasz Kulasek {
4041ccec0a8STomasz Kulasek 	struct pmd_internals *internal = dev->data->dev_private;
4051ccec0a8STomasz Kulasek 
4061ccec0a8STomasz Kulasek 	rte_spinlock_lock(&internal->rss_lock);
4071ccec0a8STomasz Kulasek 
4081ccec0a8STomasz Kulasek 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
4091ccec0a8STomasz Kulasek 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
4101ccec0a8STomasz Kulasek 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
4111ccec0a8STomasz Kulasek 
4121ccec0a8STomasz Kulasek 	if (rss_conf->rss_key)
4131ccec0a8STomasz Kulasek 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
4141ccec0a8STomasz Kulasek 
4151ccec0a8STomasz Kulasek 	rte_spinlock_unlock(&internal->rss_lock);
4161ccec0a8STomasz Kulasek 
4171ccec0a8STomasz Kulasek 	return 0;
4181ccec0a8STomasz Kulasek }
4191ccec0a8STomasz Kulasek 
4201ccec0a8STomasz Kulasek static int
4211ccec0a8STomasz Kulasek eth_rss_hash_conf_get(struct rte_eth_dev *dev,
4221ccec0a8STomasz Kulasek 		struct rte_eth_rss_conf *rss_conf)
4231ccec0a8STomasz Kulasek {
4241ccec0a8STomasz Kulasek 	struct pmd_internals *internal = dev->data->dev_private;
4251ccec0a8STomasz Kulasek 
4261ccec0a8STomasz Kulasek 	rte_spinlock_lock(&internal->rss_lock);
4271ccec0a8STomasz Kulasek 
4281ccec0a8STomasz Kulasek 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
4291ccec0a8STomasz Kulasek 	if (rss_conf->rss_key)
4301ccec0a8STomasz Kulasek 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
4311ccec0a8STomasz Kulasek 
4321ccec0a8STomasz Kulasek 	rte_spinlock_unlock(&internal->rss_lock);
4331ccec0a8STomasz Kulasek 
4341ccec0a8STomasz Kulasek 	return 0;
4351ccec0a8STomasz Kulasek }
4361ccec0a8STomasz Kulasek 
437caccf8b3SOlivier Matz static int
438c5ac7748SRadu Nicolau eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
4396d13ea8eSOlivier Matz 		    __rte_unused struct rte_ether_addr *addr)
440c5ac7748SRadu Nicolau {
441caccf8b3SOlivier Matz 	return 0;
442c5ac7748SRadu Nicolau }
443c5ac7748SRadu Nicolau 
444b3b413f7SBruce Richardson static const struct eth_dev_ops ops = {
445b3b413f7SBruce Richardson 	.dev_start = eth_dev_start,
446b3b413f7SBruce Richardson 	.dev_stop = eth_dev_stop,
447b3b413f7SBruce Richardson 	.dev_configure = eth_dev_configure,
448b3b413f7SBruce Richardson 	.dev_infos_get = eth_dev_info,
449b3b413f7SBruce Richardson 	.rx_queue_setup = eth_rx_queue_setup,
450b3b413f7SBruce Richardson 	.tx_queue_setup = eth_tx_queue_setup,
451b3b413f7SBruce Richardson 	.rx_queue_release = eth_queue_release,
452b3b413f7SBruce Richardson 	.tx_queue_release = eth_queue_release,
453e6acdc77SMallesh Koujalagi 	.mtu_set = eth_mtu_set,
454b3b413f7SBruce Richardson 	.link_update = eth_link_update,
455c5ac7748SRadu Nicolau 	.mac_addr_set = eth_mac_address_set,
456b3b413f7SBruce Richardson 	.stats_get = eth_stats_get,
457b3b413f7SBruce Richardson 	.stats_reset = eth_stats_reset,
4581ccec0a8STomasz Kulasek 	.reta_update = eth_rss_reta_update,
4591ccec0a8STomasz Kulasek 	.reta_query = eth_rss_reta_query,
4601ccec0a8STomasz Kulasek 	.rss_hash_update = eth_rss_hash_update,
4611ccec0a8STomasz Kulasek 	.rss_hash_conf_get = eth_rss_hash_conf_get
462b3b413f7SBruce Richardson };
463b3b413f7SBruce Richardson 
464c3b047beSJan Blunck static int
465050fe6e9SJan Blunck eth_dev_null_create(struct rte_vdev_device *dev,
466b3b413f7SBruce Richardson 		unsigned packet_size,
467b3b413f7SBruce Richardson 		unsigned packet_copy)
468b3b413f7SBruce Richardson {
469b3b413f7SBruce Richardson 	const unsigned nb_rx_queues = 1;
470b3b413f7SBruce Richardson 	const unsigned nb_tx_queues = 1;
4715f19dee6SJianfeng Tan 	struct rte_eth_dev_data *data;
472b3b413f7SBruce Richardson 	struct pmd_internals *internals = NULL;
473b3b413f7SBruce Richardson 	struct rte_eth_dev *eth_dev = NULL;
474b3b413f7SBruce Richardson 
4751ccec0a8STomasz Kulasek 	static const uint8_t default_rss_key[40] = {
4761ccec0a8STomasz Kulasek 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
4771ccec0a8STomasz Kulasek 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
4781ccec0a8STomasz Kulasek 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
4791ccec0a8STomasz Kulasek 		0xBE, 0xAC, 0x01, 0xFA
4801ccec0a8STomasz Kulasek 	};
4811ccec0a8STomasz Kulasek 
482050fe6e9SJan Blunck 	if (dev->device.numa_node == SOCKET_ID_ANY)
483050fe6e9SJan Blunck 		dev->device.numa_node = rte_socket_id();
484b3b413f7SBruce Richardson 
485eb16afb9SStephen Hemminger 	PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
486050fe6e9SJan Blunck 		dev->device.numa_node);
487b3b413f7SBruce Richardson 
4885f19dee6SJianfeng Tan 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
4895f19dee6SJianfeng Tan 	if (!eth_dev)
490050fe6e9SJan Blunck 		return -ENOMEM;
491b3b413f7SBruce Richardson 
492b3b413f7SBruce Richardson 	/* now put it all together
493b3b413f7SBruce Richardson 	 * - store queue data in internals,
4948fb9e2bbSBernard Iremonger 	 * - store numa_node info in ethdev data
4958fb9e2bbSBernard Iremonger 	 * - point eth_dev_data to internals
496b3b413f7SBruce Richardson 	 * - and point eth_dev structure to new eth_dev_data structure
497b3b413f7SBruce Richardson 	 */
498b3b413f7SBruce Richardson 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
499b3b413f7SBruce Richardson 	 * so the nulls are local per-process */
500b3b413f7SBruce Richardson 
501050fe6e9SJan Blunck 	internals = eth_dev->data->dev_private;
502b3b413f7SBruce Richardson 	internals->packet_size = packet_size;
503b3b413f7SBruce Richardson 	internals->packet_copy = packet_copy;
5045cf86418SSean Harte 	internals->port_id = eth_dev->data->port_id;
505538da7a1SOlivier Matz 	rte_eth_random_addr(internals->eth_addr.addr_bytes);
506b3b413f7SBruce Richardson 
5071ccec0a8STomasz Kulasek 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
5081ccec0a8STomasz Kulasek 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
5091ccec0a8STomasz Kulasek 
5101ccec0a8STomasz Kulasek 	rte_memcpy(internals->rss_key, default_rss_key, 40);
5111ccec0a8STomasz Kulasek 
5125f19dee6SJianfeng Tan 	data = eth_dev->data;
513b3b413f7SBruce Richardson 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
514b3b413f7SBruce Richardson 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
515b3b413f7SBruce Richardson 	data->dev_link = pmd_link;
516c1cd6fb3SMallesh Koujalagi 	data->mac_addrs = &internals->eth_addr;
517*f1652103SCiara Power 	data->promiscuous = 1;
518*f1652103SCiara Power 	data->all_multicast = 1;
519b3b413f7SBruce Richardson 
520b3b413f7SBruce Richardson 	eth_dev->dev_ops = &ops;
5216799cfe4SBernard Iremonger 
522b3b413f7SBruce Richardson 	/* finally assign rx and tx ops */
523b3b413f7SBruce Richardson 	if (packet_copy) {
524b3b413f7SBruce Richardson 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
525b3b413f7SBruce Richardson 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
526b3b413f7SBruce Richardson 	} else {
527b3b413f7SBruce Richardson 		eth_dev->rx_pkt_burst = eth_null_rx;
528b3b413f7SBruce Richardson 		eth_dev->tx_pkt_burst = eth_null_tx;
529b3b413f7SBruce Richardson 	}
530b3b413f7SBruce Richardson 
531fbe90cddSThomas Monjalon 	rte_eth_dev_probing_finish(eth_dev);
532b3b413f7SBruce Richardson 	return 0;
533b3b413f7SBruce Richardson }
534b3b413f7SBruce Richardson 
535b3b413f7SBruce Richardson static inline int
536b3b413f7SBruce Richardson get_packet_size_arg(const char *key __rte_unused,
537b3b413f7SBruce Richardson 		const char *value, void *extra_args)
538b3b413f7SBruce Richardson {
539b3b413f7SBruce Richardson 	const char *a = value;
540b3b413f7SBruce Richardson 	unsigned *packet_size = extra_args;
541b3b413f7SBruce Richardson 
542b3b413f7SBruce Richardson 	if ((value == NULL) || (extra_args == NULL))
543b3b413f7SBruce Richardson 		return -EINVAL;
544b3b413f7SBruce Richardson 
545b3b413f7SBruce Richardson 	*packet_size = (unsigned)strtoul(a, NULL, 0);
546b3b413f7SBruce Richardson 	if (*packet_size == UINT_MAX)
547b3b413f7SBruce Richardson 		return -1;
548b3b413f7SBruce Richardson 
549b3b413f7SBruce Richardson 	return 0;
550b3b413f7SBruce Richardson }
551b3b413f7SBruce Richardson 
552b3b413f7SBruce Richardson static inline int
553b3b413f7SBruce Richardson get_packet_copy_arg(const char *key __rte_unused,
554b3b413f7SBruce Richardson 		const char *value, void *extra_args)
555b3b413f7SBruce Richardson {
556b3b413f7SBruce Richardson 	const char *a = value;
557b3b413f7SBruce Richardson 	unsigned *packet_copy = extra_args;
558b3b413f7SBruce Richardson 
559b3b413f7SBruce Richardson 	if ((value == NULL) || (extra_args == NULL))
560b3b413f7SBruce Richardson 		return -EINVAL;
561b3b413f7SBruce Richardson 
562b3b413f7SBruce Richardson 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
563b3b413f7SBruce Richardson 	if (*packet_copy == UINT_MAX)
564b3b413f7SBruce Richardson 		return -1;
565b3b413f7SBruce Richardson 
566b3b413f7SBruce Richardson 	return 0;
567b3b413f7SBruce Richardson }
568b3b413f7SBruce Richardson 
569b3b413f7SBruce Richardson static int
5705d2aa461SJan Blunck rte_pmd_null_probe(struct rte_vdev_device *dev)
571b3b413f7SBruce Richardson {
5725d2aa461SJan Blunck 	const char *name, *params;
573b3b413f7SBruce Richardson 	unsigned packet_size = default_packet_size;
574b3b413f7SBruce Richardson 	unsigned packet_copy = default_packet_copy;
575b3b413f7SBruce Richardson 	struct rte_kvargs *kvlist = NULL;
576ee27edbeSJianfeng Tan 	struct rte_eth_dev *eth_dev;
577b3b413f7SBruce Richardson 	int ret;
578b3b413f7SBruce Richardson 
5795d2aa461SJan Blunck 	if (!dev)
580b3b413f7SBruce Richardson 		return -EINVAL;
581b3b413f7SBruce Richardson 
5825d2aa461SJan Blunck 	name = rte_vdev_device_name(dev);
5835d2aa461SJan Blunck 	params = rte_vdev_device_args(dev);
584eb16afb9SStephen Hemminger 	PMD_LOG(INFO, "Initializing pmd_null for %s", name);
585b3b413f7SBruce Richardson 
5864852aa8fSQi Zhang 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
587ee27edbeSJianfeng Tan 		eth_dev = rte_eth_dev_attach_secondary(name);
588ee27edbeSJianfeng Tan 		if (!eth_dev) {
589eb16afb9SStephen Hemminger 			PMD_LOG(ERR, "Failed to probe %s", name);
590ee27edbeSJianfeng Tan 			return -1;
591ee27edbeSJianfeng Tan 		}
592ee27edbeSJianfeng Tan 		/* TODO: request info from primary to set up Rx and Tx */
593ee27edbeSJianfeng Tan 		eth_dev->dev_ops = &ops;
594d1c3ab22SFerruh Yigit 		eth_dev->device = &dev->device;
595bccc77a6SYasufumi Ogawa 		if (packet_copy) {
596bccc77a6SYasufumi Ogawa 			eth_dev->rx_pkt_burst = eth_null_copy_rx;
597bccc77a6SYasufumi Ogawa 			eth_dev->tx_pkt_burst = eth_null_copy_tx;
598bccc77a6SYasufumi Ogawa 		} else {
599bccc77a6SYasufumi Ogawa 			eth_dev->rx_pkt_burst = eth_null_rx;
600bccc77a6SYasufumi Ogawa 			eth_dev->tx_pkt_burst = eth_null_tx;
601bccc77a6SYasufumi Ogawa 		}
602fbe90cddSThomas Monjalon 		rte_eth_dev_probing_finish(eth_dev);
603ee27edbeSJianfeng Tan 		return 0;
604ee27edbeSJianfeng Tan 	}
605ee27edbeSJianfeng Tan 
606b3b413f7SBruce Richardson 	if (params != NULL) {
607b3b413f7SBruce Richardson 		kvlist = rte_kvargs_parse(params, valid_arguments);
608b3b413f7SBruce Richardson 		if (kvlist == NULL)
609b3b413f7SBruce Richardson 			return -1;
610b3b413f7SBruce Richardson 
611b3b413f7SBruce Richardson 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
612b3b413f7SBruce Richardson 
613b3b413f7SBruce Richardson 			ret = rte_kvargs_process(kvlist,
614b3b413f7SBruce Richardson 					ETH_NULL_PACKET_SIZE_ARG,
615b3b413f7SBruce Richardson 					&get_packet_size_arg, &packet_size);
616b3b413f7SBruce Richardson 			if (ret < 0)
617b3b413f7SBruce Richardson 				goto free_kvlist;
618b3b413f7SBruce Richardson 		}
619b3b413f7SBruce Richardson 
620b3b413f7SBruce Richardson 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
621b3b413f7SBruce Richardson 
622b3b413f7SBruce Richardson 			ret = rte_kvargs_process(kvlist,
623b3b413f7SBruce Richardson 					ETH_NULL_PACKET_COPY_ARG,
624b3b413f7SBruce Richardson 					&get_packet_copy_arg, &packet_copy);
625b3b413f7SBruce Richardson 			if (ret < 0)
626b3b413f7SBruce Richardson 				goto free_kvlist;
627b3b413f7SBruce Richardson 		}
628b3b413f7SBruce Richardson 	}
629b3b413f7SBruce Richardson 
630eb16afb9SStephen Hemminger 	PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
631eb16afb9SStephen Hemminger 			"packet copy is %s", packet_size,
632b3b413f7SBruce Richardson 			packet_copy ? "enabled" : "disabled");
633b3b413f7SBruce Richardson 
634050fe6e9SJan Blunck 	ret = eth_dev_null_create(dev, packet_size, packet_copy);
635b3b413f7SBruce Richardson 
636b3b413f7SBruce Richardson free_kvlist:
637b3b413f7SBruce Richardson 	if (kvlist)
638b3b413f7SBruce Richardson 		rte_kvargs_free(kvlist);
639b3b413f7SBruce Richardson 	return ret;
640b3b413f7SBruce Richardson }
641b3b413f7SBruce Richardson 
642b3b413f7SBruce Richardson static int
6435d2aa461SJan Blunck rte_pmd_null_remove(struct rte_vdev_device *dev)
644b3b413f7SBruce Richardson {
645b3b413f7SBruce Richardson 	struct rte_eth_dev *eth_dev = NULL;
646b3b413f7SBruce Richardson 
6475d2aa461SJan Blunck 	if (!dev)
648b3b413f7SBruce Richardson 		return -EINVAL;
649b3b413f7SBruce Richardson 
650eb16afb9SStephen Hemminger 	PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
651b3b413f7SBruce Richardson 			rte_socket_id());
652b3b413f7SBruce Richardson 
6536799cfe4SBernard Iremonger 	/* find the ethdev entry */
6545d2aa461SJan Blunck 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
655b3b413f7SBruce Richardson 	if (eth_dev == NULL)
656b3b413f7SBruce Richardson 		return -1;
657b3b413f7SBruce Richardson 
658662dbc32SThomas Monjalon 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
659e16adf08SThomas Monjalon 		/* mac_addrs must not be freed alone because part of dev_private */
660e16adf08SThomas Monjalon 		eth_dev->data->mac_addrs = NULL;
661662dbc32SThomas Monjalon 
662b3b413f7SBruce Richardson 	rte_eth_dev_release_port(eth_dev);
663b3b413f7SBruce Richardson 
664b3b413f7SBruce Richardson 	return 0;
665b3b413f7SBruce Richardson }
666b3b413f7SBruce Richardson 
667fe363dd4SJan Viktorin static struct rte_vdev_driver pmd_null_drv = {
66850a3345fSShreyansh Jain 	.probe = rte_pmd_null_probe,
66950a3345fSShreyansh Jain 	.remove = rte_pmd_null_remove,
670b3b413f7SBruce Richardson };
671b3b413f7SBruce Richardson 
67201f19227SShreyansh Jain RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
6739fa80cb2SJan Blunck RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
67401f19227SShreyansh Jain RTE_PMD_REGISTER_PARAM_STRING(net_null,
67565eca099SPablo de Lara 	"size=<int> "
67665eca099SPablo de Lara 	"copy=<int>");
677eb16afb9SStephen Hemminger 
678f8e99896SThomas Monjalon RTE_INIT(eth_null_init_log)
679eb16afb9SStephen Hemminger {
680eb16afb9SStephen Hemminger 	eth_null_logtype = rte_log_register("pmd.net.null");
681eb16afb9SStephen Hemminger 	if (eth_null_logtype >= 0)
682eb16afb9SStephen Hemminger 		rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
683eb16afb9SStephen Hemminger }
684