xref: /dpdk/drivers/net/null/rte_eth_null.c (revision e16adf08e54d5b1ff3b1116c372bbca279fced9d)
1b3b413f7SBruce Richardson /*-
2b3b413f7SBruce Richardson  *   BSD LICENSE
3b3b413f7SBruce Richardson  *
4b3b413f7SBruce Richardson  *   Copyright (C) IGEL Co.,Ltd.
5b3b413f7SBruce Richardson  *   All rights reserved.
6b3b413f7SBruce Richardson  *
7b3b413f7SBruce Richardson  *   Redistribution and use in source and binary forms, with or without
8b3b413f7SBruce Richardson  *   modification, are permitted provided that the following conditions
9b3b413f7SBruce Richardson  *   are met:
10b3b413f7SBruce Richardson  *
11b3b413f7SBruce Richardson  *     * Redistributions of source code must retain the above copyright
12b3b413f7SBruce Richardson  *       notice, this list of conditions and the following disclaimer.
13b3b413f7SBruce Richardson  *     * Redistributions in binary form must reproduce the above copyright
14b3b413f7SBruce Richardson  *       notice, this list of conditions and the following disclaimer in
15b3b413f7SBruce Richardson  *       the documentation and/or other materials provided with the
16b3b413f7SBruce Richardson  *       distribution.
17b3b413f7SBruce Richardson  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18b3b413f7SBruce Richardson  *       contributors may be used to endorse or promote products derived
19b3b413f7SBruce Richardson  *       from this software without specific prior written permission.
20b3b413f7SBruce Richardson  *
21b3b413f7SBruce Richardson  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22b3b413f7SBruce Richardson  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23b3b413f7SBruce Richardson  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24b3b413f7SBruce Richardson  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25b3b413f7SBruce Richardson  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26b3b413f7SBruce Richardson  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27b3b413f7SBruce Richardson  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28b3b413f7SBruce Richardson  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29b3b413f7SBruce Richardson  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30b3b413f7SBruce Richardson  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31b3b413f7SBruce Richardson  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32b3b413f7SBruce Richardson  */
33b3b413f7SBruce Richardson 
34b3b413f7SBruce Richardson #include <rte_mbuf.h>
35ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h>
36050fe6e9SJan Blunck #include <rte_ethdev_vdev.h>
37b3b413f7SBruce Richardson #include <rte_malloc.h>
38b3b413f7SBruce Richardson #include <rte_memcpy.h>
39d4a586d2SJianfeng Tan #include <rte_bus_vdev.h>
40b3b413f7SBruce Richardson #include <rte_kvargs.h>
411ccec0a8STomasz Kulasek #include <rte_spinlock.h>
42b3b413f7SBruce Richardson 
43b3b413f7SBruce Richardson #define ETH_NULL_PACKET_SIZE_ARG	"size"
44b3b413f7SBruce Richardson #define ETH_NULL_PACKET_COPY_ARG	"copy"
45b3b413f7SBruce Richardson 
46b3b413f7SBruce Richardson static unsigned default_packet_size = 64;
47b3b413f7SBruce Richardson static unsigned default_packet_copy;
48b3b413f7SBruce Richardson 
49b3b413f7SBruce Richardson static const char *valid_arguments[] = {
50b3b413f7SBruce Richardson 	ETH_NULL_PACKET_SIZE_ARG,
51b3b413f7SBruce Richardson 	ETH_NULL_PACKET_COPY_ARG,
52b3b413f7SBruce Richardson 	NULL
53b3b413f7SBruce Richardson };
54b3b413f7SBruce Richardson 
55b3b413f7SBruce Richardson struct pmd_internals;
56b3b413f7SBruce Richardson 
57b3b413f7SBruce Richardson struct null_queue {
58b3b413f7SBruce Richardson 	struct pmd_internals *internals;
59b3b413f7SBruce Richardson 
60b3b413f7SBruce Richardson 	struct rte_mempool *mb_pool;
61b3b413f7SBruce Richardson 	struct rte_mbuf *dummy_packet;
62b3b413f7SBruce Richardson 
63b3b413f7SBruce Richardson 	rte_atomic64_t rx_pkts;
64b3b413f7SBruce Richardson 	rte_atomic64_t tx_pkts;
65b3b413f7SBruce Richardson 	rte_atomic64_t err_pkts;
66b3b413f7SBruce Richardson };
67b3b413f7SBruce Richardson 
68b3b413f7SBruce Richardson struct pmd_internals {
69b3b413f7SBruce Richardson 	unsigned packet_size;
70b3b413f7SBruce Richardson 	unsigned packet_copy;
71f8244c63SZhiyong Yang 	uint16_t port_id;
72b3b413f7SBruce Richardson 
73dd7c54a6STomasz Kulasek 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74dd7c54a6STomasz Kulasek 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
751ccec0a8STomasz Kulasek 
76c1cd6fb3SMallesh Koujalagi 	struct ether_addr eth_addr;
771ccec0a8STomasz Kulasek 	/** Bit mask of RSS offloads, the bit offset also means flow type */
781ccec0a8STomasz Kulasek 	uint64_t flow_type_rss_offloads;
791ccec0a8STomasz Kulasek 
801ccec0a8STomasz Kulasek 	rte_spinlock_t rss_lock;
811ccec0a8STomasz Kulasek 
821ccec0a8STomasz Kulasek 	uint16_t reta_size;
831ccec0a8STomasz Kulasek 	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
841ccec0a8STomasz Kulasek 			RTE_RETA_GROUP_SIZE];
851ccec0a8STomasz Kulasek 
861ccec0a8STomasz Kulasek 	uint8_t rss_key[40];                /**< 40-byte hash key. */
87b3b413f7SBruce Richardson };
88b3b413f7SBruce Richardson static struct rte_eth_link pmd_link = {
8939fd068aSMarc Sune 	.link_speed = ETH_SPEED_NUM_10G,
90b3b413f7SBruce Richardson 	.link_duplex = ETH_LINK_FULL_DUPLEX,
9109419f23SThomas Monjalon 	.link_status = ETH_LINK_DOWN,
9218869f97SFerruh Yigit 	.link_autoneg = ETH_LINK_FIXED,
93b3b413f7SBruce Richardson };
94b3b413f7SBruce Richardson 
95eb16afb9SStephen Hemminger static int eth_null_logtype;
96eb16afb9SStephen Hemminger 
97eb16afb9SStephen Hemminger #define PMD_LOG(level, fmt, args...) \
98eb16afb9SStephen Hemminger 	rte_log(RTE_LOG_ ## level, eth_null_logtype, \
99eb16afb9SStephen Hemminger 		"%s(): " fmt "\n", __func__, ##args)
100eb16afb9SStephen Hemminger 
101b3b413f7SBruce Richardson static uint16_t
102b3b413f7SBruce Richardson eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
103b3b413f7SBruce Richardson {
104b3b413f7SBruce Richardson 	int i;
105b3b413f7SBruce Richardson 	struct null_queue *h = q;
106b3b413f7SBruce Richardson 	unsigned packet_size;
107b3b413f7SBruce Richardson 
108b3b413f7SBruce Richardson 	if ((q == NULL) || (bufs == NULL))
109b3b413f7SBruce Richardson 		return 0;
110b3b413f7SBruce Richardson 
111b3b413f7SBruce Richardson 	packet_size = h->internals->packet_size;
1124e436ddaSMallesh Koujalagi 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
1134e436ddaSMallesh Koujalagi 		return 0;
1144e436ddaSMallesh Koujalagi 
115b3b413f7SBruce Richardson 	for (i = 0; i < nb_bufs; i++) {
116b3b413f7SBruce Richardson 		bufs[i]->data_len = (uint16_t)packet_size;
117b3b413f7SBruce Richardson 		bufs[i]->pkt_len = packet_size;
1185cf86418SSean Harte 		bufs[i]->port = h->internals->port_id;
119b3b413f7SBruce Richardson 	}
120b3b413f7SBruce Richardson 
121b3b413f7SBruce Richardson 	rte_atomic64_add(&(h->rx_pkts), i);
122b3b413f7SBruce Richardson 
123b3b413f7SBruce Richardson 	return i;
124b3b413f7SBruce Richardson }
125b3b413f7SBruce Richardson 
126b3b413f7SBruce Richardson static uint16_t
127b3b413f7SBruce Richardson eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
128b3b413f7SBruce Richardson {
129b3b413f7SBruce Richardson 	int i;
130b3b413f7SBruce Richardson 	struct null_queue *h = q;
131b3b413f7SBruce Richardson 	unsigned packet_size;
132b3b413f7SBruce Richardson 
133b3b413f7SBruce Richardson 	if ((q == NULL) || (bufs == NULL))
134b3b413f7SBruce Richardson 		return 0;
135b3b413f7SBruce Richardson 
136b3b413f7SBruce Richardson 	packet_size = h->internals->packet_size;
1374e436ddaSMallesh Koujalagi 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
1384e436ddaSMallesh Koujalagi 		return 0;
1394e436ddaSMallesh Koujalagi 
140b3b413f7SBruce Richardson 	for (i = 0; i < nb_bufs; i++) {
141b3b413f7SBruce Richardson 		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
142b3b413f7SBruce Richardson 					packet_size);
143b3b413f7SBruce Richardson 		bufs[i]->data_len = (uint16_t)packet_size;
144b3b413f7SBruce Richardson 		bufs[i]->pkt_len = packet_size;
1455cf86418SSean Harte 		bufs[i]->port = h->internals->port_id;
146b3b413f7SBruce Richardson 	}
147b3b413f7SBruce Richardson 
148b3b413f7SBruce Richardson 	rte_atomic64_add(&(h->rx_pkts), i);
149b3b413f7SBruce Richardson 
150b3b413f7SBruce Richardson 	return i;
151b3b413f7SBruce Richardson }
152b3b413f7SBruce Richardson 
153b3b413f7SBruce Richardson static uint16_t
154b3b413f7SBruce Richardson eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
155b3b413f7SBruce Richardson {
156b3b413f7SBruce Richardson 	int i;
157b3b413f7SBruce Richardson 	struct null_queue *h = q;
158b3b413f7SBruce Richardson 
159b3b413f7SBruce Richardson 	if ((q == NULL) || (bufs == NULL))
160b3b413f7SBruce Richardson 		return 0;
161b3b413f7SBruce Richardson 
162b3b413f7SBruce Richardson 	for (i = 0; i < nb_bufs; i++)
163b3b413f7SBruce Richardson 		rte_pktmbuf_free(bufs[i]);
164b3b413f7SBruce Richardson 
165b3b413f7SBruce Richardson 	rte_atomic64_add(&(h->tx_pkts), i);
166b3b413f7SBruce Richardson 
167b3b413f7SBruce Richardson 	return i;
168b3b413f7SBruce Richardson }
169b3b413f7SBruce Richardson 
170b3b413f7SBruce Richardson static uint16_t
171b3b413f7SBruce Richardson eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
172b3b413f7SBruce Richardson {
173b3b413f7SBruce Richardson 	int i;
174b3b413f7SBruce Richardson 	struct null_queue *h = q;
175b3b413f7SBruce Richardson 	unsigned packet_size;
176b3b413f7SBruce Richardson 
177b3b413f7SBruce Richardson 	if ((q == NULL) || (bufs == NULL))
178b3b413f7SBruce Richardson 		return 0;
179b3b413f7SBruce Richardson 
180b3b413f7SBruce Richardson 	packet_size = h->internals->packet_size;
181b3b413f7SBruce Richardson 	for (i = 0; i < nb_bufs; i++) {
182b3b413f7SBruce Richardson 		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
183b3b413f7SBruce Richardson 					packet_size);
184b3b413f7SBruce Richardson 		rte_pktmbuf_free(bufs[i]);
185b3b413f7SBruce Richardson 	}
186b3b413f7SBruce Richardson 
187b3b413f7SBruce Richardson 	rte_atomic64_add(&(h->tx_pkts), i);
188b3b413f7SBruce Richardson 
189b3b413f7SBruce Richardson 	return i;
190b3b413f7SBruce Richardson }
191b3b413f7SBruce Richardson 
192b3b413f7SBruce Richardson static int
193c9634e44SFerruh Yigit eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
194c9634e44SFerruh Yigit {
195dd7c54a6STomasz Kulasek 	return 0;
196dd7c54a6STomasz Kulasek }
197b3b413f7SBruce Richardson 
198b3b413f7SBruce Richardson static int
199b3b413f7SBruce Richardson eth_dev_start(struct rte_eth_dev *dev)
200b3b413f7SBruce Richardson {
201b3b413f7SBruce Richardson 	if (dev == NULL)
202b3b413f7SBruce Richardson 		return -EINVAL;
203b3b413f7SBruce Richardson 
20409419f23SThomas Monjalon 	dev->data->dev_link.link_status = ETH_LINK_UP;
205b3b413f7SBruce Richardson 	return 0;
206b3b413f7SBruce Richardson }
207b3b413f7SBruce Richardson 
208b3b413f7SBruce Richardson static void
209b3b413f7SBruce Richardson eth_dev_stop(struct rte_eth_dev *dev)
210b3b413f7SBruce Richardson {
211b3b413f7SBruce Richardson 	if (dev == NULL)
212b3b413f7SBruce Richardson 		return;
213b3b413f7SBruce Richardson 
21409419f23SThomas Monjalon 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
215b3b413f7SBruce Richardson }
216b3b413f7SBruce Richardson 
217b3b413f7SBruce Richardson static int
218b3b413f7SBruce Richardson eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
219b3b413f7SBruce Richardson 		uint16_t nb_rx_desc __rte_unused,
220b3b413f7SBruce Richardson 		unsigned int socket_id __rte_unused,
221b3b413f7SBruce Richardson 		const struct rte_eth_rxconf *rx_conf __rte_unused,
222b3b413f7SBruce Richardson 		struct rte_mempool *mb_pool)
223b3b413f7SBruce Richardson {
224b3b413f7SBruce Richardson 	struct rte_mbuf *dummy_packet;
225b3b413f7SBruce Richardson 	struct pmd_internals *internals;
226b3b413f7SBruce Richardson 	unsigned packet_size;
227b3b413f7SBruce Richardson 
228b3b413f7SBruce Richardson 	if ((dev == NULL) || (mb_pool == NULL))
229b3b413f7SBruce Richardson 		return -EINVAL;
230b3b413f7SBruce Richardson 
231dd7c54a6STomasz Kulasek 	internals = dev->data->dev_private;
232dd7c54a6STomasz Kulasek 
233c9634e44SFerruh Yigit 	if (rx_queue_id >= dev->data->nb_rx_queues)
234b3b413f7SBruce Richardson 		return -ENODEV;
235b3b413f7SBruce Richardson 
236b3b413f7SBruce Richardson 	packet_size = internals->packet_size;
237b3b413f7SBruce Richardson 
238b3b413f7SBruce Richardson 	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
239b3b413f7SBruce Richardson 	dev->data->rx_queues[rx_queue_id] =
240b3b413f7SBruce Richardson 		&internals->rx_null_queues[rx_queue_id];
241b3b413f7SBruce Richardson 	dummy_packet = rte_zmalloc_socket(NULL,
242c9634e44SFerruh Yigit 			packet_size, 0, dev->data->numa_node);
243b3b413f7SBruce Richardson 	if (dummy_packet == NULL)
244b3b413f7SBruce Richardson 		return -ENOMEM;
245b3b413f7SBruce Richardson 
246b3b413f7SBruce Richardson 	internals->rx_null_queues[rx_queue_id].internals = internals;
247b3b413f7SBruce Richardson 	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
248b3b413f7SBruce Richardson 
249b3b413f7SBruce Richardson 	return 0;
250b3b413f7SBruce Richardson }
251b3b413f7SBruce Richardson 
252b3b413f7SBruce Richardson static int
253b3b413f7SBruce Richardson eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
254b3b413f7SBruce Richardson 		uint16_t nb_tx_desc __rte_unused,
255b3b413f7SBruce Richardson 		unsigned int socket_id __rte_unused,
256b3b413f7SBruce Richardson 		const struct rte_eth_txconf *tx_conf __rte_unused)
257b3b413f7SBruce Richardson {
258b3b413f7SBruce Richardson 	struct rte_mbuf *dummy_packet;
259b3b413f7SBruce Richardson 	struct pmd_internals *internals;
260b3b413f7SBruce Richardson 	unsigned packet_size;
261b3b413f7SBruce Richardson 
262b3b413f7SBruce Richardson 	if (dev == NULL)
263b3b413f7SBruce Richardson 		return -EINVAL;
264b3b413f7SBruce Richardson 
265dd7c54a6STomasz Kulasek 	internals = dev->data->dev_private;
266dd7c54a6STomasz Kulasek 
267c9634e44SFerruh Yigit 	if (tx_queue_id >= dev->data->nb_tx_queues)
268b3b413f7SBruce Richardson 		return -ENODEV;
269b3b413f7SBruce Richardson 
270b3b413f7SBruce Richardson 	packet_size = internals->packet_size;
271b3b413f7SBruce Richardson 
272b3b413f7SBruce Richardson 	dev->data->tx_queues[tx_queue_id] =
273b3b413f7SBruce Richardson 		&internals->tx_null_queues[tx_queue_id];
274b3b413f7SBruce Richardson 	dummy_packet = rte_zmalloc_socket(NULL,
275c9634e44SFerruh Yigit 			packet_size, 0, dev->data->numa_node);
276b3b413f7SBruce Richardson 	if (dummy_packet == NULL)
277b3b413f7SBruce Richardson 		return -ENOMEM;
278b3b413f7SBruce Richardson 
279b3b413f7SBruce Richardson 	internals->tx_null_queues[tx_queue_id].internals = internals;
280b3b413f7SBruce Richardson 	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
281b3b413f7SBruce Richardson 
282b3b413f7SBruce Richardson 	return 0;
283b3b413f7SBruce Richardson }
284b3b413f7SBruce Richardson 
285e6acdc77SMallesh Koujalagi static int
286e6acdc77SMallesh Koujalagi eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
287e6acdc77SMallesh Koujalagi {
288e6acdc77SMallesh Koujalagi 	return 0;
289e6acdc77SMallesh Koujalagi }
290b3b413f7SBruce Richardson 
291b3b413f7SBruce Richardson static void
292b3b413f7SBruce Richardson eth_dev_info(struct rte_eth_dev *dev,
293b3b413f7SBruce Richardson 		struct rte_eth_dev_info *dev_info)
294b3b413f7SBruce Richardson {
295b3b413f7SBruce Richardson 	struct pmd_internals *internals;
296b3b413f7SBruce Richardson 
297b3b413f7SBruce Richardson 	if ((dev == NULL) || (dev_info == NULL))
298b3b413f7SBruce Richardson 		return;
299b3b413f7SBruce Richardson 
300b3b413f7SBruce Richardson 	internals = dev->data->dev_private;
301b3b413f7SBruce Richardson 	dev_info->max_mac_addrs = 1;
302b3b413f7SBruce Richardson 	dev_info->max_rx_pktlen = (uint32_t)-1;
303dd7c54a6STomasz Kulasek 	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
304dd7c54a6STomasz Kulasek 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
305b3b413f7SBruce Richardson 	dev_info->min_rx_bufsize = 0;
3061ccec0a8STomasz Kulasek 	dev_info->reta_size = internals->reta_size;
3071ccec0a8STomasz Kulasek 	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
308b3b413f7SBruce Richardson }
309b3b413f7SBruce Richardson 
310d5b0924bSMatan Azrad static int
311b3b413f7SBruce Richardson eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
312b3b413f7SBruce Richardson {
313b3b413f7SBruce Richardson 	unsigned i, num_stats;
314b3b413f7SBruce Richardson 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
315b3b413f7SBruce Richardson 	const struct pmd_internals *internal;
316b3b413f7SBruce Richardson 
317b3b413f7SBruce Richardson 	if ((dev == NULL) || (igb_stats == NULL))
318d5b0924bSMatan Azrad 		return -EINVAL;
319b3b413f7SBruce Richardson 
320b3b413f7SBruce Richardson 	internal = dev->data->dev_private;
321b3b413f7SBruce Richardson 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
322c9634e44SFerruh Yigit 			RTE_MIN(dev->data->nb_rx_queues,
323b34141b2SBruce Richardson 				RTE_DIM(internal->rx_null_queues)));
324b3b413f7SBruce Richardson 	for (i = 0; i < num_stats; i++) {
325b3b413f7SBruce Richardson 		igb_stats->q_ipackets[i] =
326b3b413f7SBruce Richardson 			internal->rx_null_queues[i].rx_pkts.cnt;
327b3b413f7SBruce Richardson 		rx_total += igb_stats->q_ipackets[i];
328b3b413f7SBruce Richardson 	}
329b3b413f7SBruce Richardson 
330b3b413f7SBruce Richardson 	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
331c9634e44SFerruh Yigit 			RTE_MIN(dev->data->nb_tx_queues,
332b34141b2SBruce Richardson 				RTE_DIM(internal->tx_null_queues)));
333b3b413f7SBruce Richardson 	for (i = 0; i < num_stats; i++) {
334b3b413f7SBruce Richardson 		igb_stats->q_opackets[i] =
335b3b413f7SBruce Richardson 			internal->tx_null_queues[i].tx_pkts.cnt;
336b3b413f7SBruce Richardson 		igb_stats->q_errors[i] =
337b3b413f7SBruce Richardson 			internal->tx_null_queues[i].err_pkts.cnt;
338b3b413f7SBruce Richardson 		tx_total += igb_stats->q_opackets[i];
339b3b413f7SBruce Richardson 		tx_err_total += igb_stats->q_errors[i];
340b3b413f7SBruce Richardson 	}
341b3b413f7SBruce Richardson 
342b3b413f7SBruce Richardson 	igb_stats->ipackets = rx_total;
343b3b413f7SBruce Richardson 	igb_stats->opackets = tx_total;
344b3b413f7SBruce Richardson 	igb_stats->oerrors = tx_err_total;
345d5b0924bSMatan Azrad 
346d5b0924bSMatan Azrad 	return 0;
347b3b413f7SBruce Richardson }
348b3b413f7SBruce Richardson 
349b3b413f7SBruce Richardson static void
350b3b413f7SBruce Richardson eth_stats_reset(struct rte_eth_dev *dev)
351b3b413f7SBruce Richardson {
352b3b413f7SBruce Richardson 	unsigned i;
353b3b413f7SBruce Richardson 	struct pmd_internals *internal;
354b3b413f7SBruce Richardson 
355b3b413f7SBruce Richardson 	if (dev == NULL)
356b3b413f7SBruce Richardson 		return;
357b3b413f7SBruce Richardson 
358b3b413f7SBruce Richardson 	internal = dev->data->dev_private;
359b34141b2SBruce Richardson 	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
360b3b413f7SBruce Richardson 		internal->rx_null_queues[i].rx_pkts.cnt = 0;
361b34141b2SBruce Richardson 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
362b3b413f7SBruce Richardson 		internal->tx_null_queues[i].tx_pkts.cnt = 0;
363b3b413f7SBruce Richardson 		internal->tx_null_queues[i].err_pkts.cnt = 0;
364b3b413f7SBruce Richardson 	}
365b3b413f7SBruce Richardson }
366b3b413f7SBruce Richardson 
367b3b413f7SBruce Richardson static void
368b3b413f7SBruce Richardson eth_queue_release(void *q)
369b3b413f7SBruce Richardson {
370b3b413f7SBruce Richardson 	struct null_queue *nq;
371b3b413f7SBruce Richardson 
372b3b413f7SBruce Richardson 	if (q == NULL)
373b3b413f7SBruce Richardson 		return;
374b3b413f7SBruce Richardson 
375b3b413f7SBruce Richardson 	nq = q;
376b3b413f7SBruce Richardson 	rte_free(nq->dummy_packet);
377b3b413f7SBruce Richardson }
378b3b413f7SBruce Richardson 
379b3b413f7SBruce Richardson static int
380b3b413f7SBruce Richardson eth_link_update(struct rte_eth_dev *dev __rte_unused,
381b3b413f7SBruce Richardson 		int wait_to_complete __rte_unused) { return 0; }
382b3b413f7SBruce Richardson 
3831ccec0a8STomasz Kulasek static int
3841ccec0a8STomasz Kulasek eth_rss_reta_update(struct rte_eth_dev *dev,
3851ccec0a8STomasz Kulasek 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
3861ccec0a8STomasz Kulasek {
3871ccec0a8STomasz Kulasek 	int i, j;
3881ccec0a8STomasz Kulasek 	struct pmd_internals *internal = dev->data->dev_private;
3891ccec0a8STomasz Kulasek 
3901ccec0a8STomasz Kulasek 	if (reta_size != internal->reta_size)
3911ccec0a8STomasz Kulasek 		return -EINVAL;
3921ccec0a8STomasz Kulasek 
3931ccec0a8STomasz Kulasek 	rte_spinlock_lock(&internal->rss_lock);
3941ccec0a8STomasz Kulasek 
3951ccec0a8STomasz Kulasek 	/* Copy RETA table */
3961ccec0a8STomasz Kulasek 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
3971ccec0a8STomasz Kulasek 		internal->reta_conf[i].mask = reta_conf[i].mask;
3981ccec0a8STomasz Kulasek 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
3991ccec0a8STomasz Kulasek 			if ((reta_conf[i].mask >> j) & 0x01)
4001ccec0a8STomasz Kulasek 				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
4011ccec0a8STomasz Kulasek 	}
4021ccec0a8STomasz Kulasek 
4031ccec0a8STomasz Kulasek 	rte_spinlock_unlock(&internal->rss_lock);
4041ccec0a8STomasz Kulasek 
4051ccec0a8STomasz Kulasek 	return 0;
4061ccec0a8STomasz Kulasek }
4071ccec0a8STomasz Kulasek 
4081ccec0a8STomasz Kulasek static int
4091ccec0a8STomasz Kulasek eth_rss_reta_query(struct rte_eth_dev *dev,
4101ccec0a8STomasz Kulasek 		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
4111ccec0a8STomasz Kulasek {
4121ccec0a8STomasz Kulasek 	int i, j;
4131ccec0a8STomasz Kulasek 	struct pmd_internals *internal = dev->data->dev_private;
4141ccec0a8STomasz Kulasek 
4151ccec0a8STomasz Kulasek 	if (reta_size != internal->reta_size)
4161ccec0a8STomasz Kulasek 		return -EINVAL;
4171ccec0a8STomasz Kulasek 
4181ccec0a8STomasz Kulasek 	rte_spinlock_lock(&internal->rss_lock);
4191ccec0a8STomasz Kulasek 
4201ccec0a8STomasz Kulasek 	/* Copy RETA table */
4211ccec0a8STomasz Kulasek 	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
4221ccec0a8STomasz Kulasek 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
4231ccec0a8STomasz Kulasek 			if ((reta_conf[i].mask >> j) & 0x01)
4241ccec0a8STomasz Kulasek 				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
4251ccec0a8STomasz Kulasek 	}
4261ccec0a8STomasz Kulasek 
4271ccec0a8STomasz Kulasek 	rte_spinlock_unlock(&internal->rss_lock);
4281ccec0a8STomasz Kulasek 
4291ccec0a8STomasz Kulasek 	return 0;
4301ccec0a8STomasz Kulasek }
4311ccec0a8STomasz Kulasek 
4321ccec0a8STomasz Kulasek static int
4331ccec0a8STomasz Kulasek eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
4341ccec0a8STomasz Kulasek {
4351ccec0a8STomasz Kulasek 	struct pmd_internals *internal = dev->data->dev_private;
4361ccec0a8STomasz Kulasek 
4371ccec0a8STomasz Kulasek 	rte_spinlock_lock(&internal->rss_lock);
4381ccec0a8STomasz Kulasek 
4391ccec0a8STomasz Kulasek 	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
4401ccec0a8STomasz Kulasek 		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
4411ccec0a8STomasz Kulasek 				rss_conf->rss_hf & internal->flow_type_rss_offloads;
4421ccec0a8STomasz Kulasek 
4431ccec0a8STomasz Kulasek 	if (rss_conf->rss_key)
4441ccec0a8STomasz Kulasek 		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
4451ccec0a8STomasz Kulasek 
4461ccec0a8STomasz Kulasek 	rte_spinlock_unlock(&internal->rss_lock);
4471ccec0a8STomasz Kulasek 
4481ccec0a8STomasz Kulasek 	return 0;
4491ccec0a8STomasz Kulasek }
4501ccec0a8STomasz Kulasek 
4511ccec0a8STomasz Kulasek static int
4521ccec0a8STomasz Kulasek eth_rss_hash_conf_get(struct rte_eth_dev *dev,
4531ccec0a8STomasz Kulasek 		struct rte_eth_rss_conf *rss_conf)
4541ccec0a8STomasz Kulasek {
4551ccec0a8STomasz Kulasek 	struct pmd_internals *internal = dev->data->dev_private;
4561ccec0a8STomasz Kulasek 
4571ccec0a8STomasz Kulasek 	rte_spinlock_lock(&internal->rss_lock);
4581ccec0a8STomasz Kulasek 
4591ccec0a8STomasz Kulasek 	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
4601ccec0a8STomasz Kulasek 	if (rss_conf->rss_key)
4611ccec0a8STomasz Kulasek 		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
4621ccec0a8STomasz Kulasek 
4631ccec0a8STomasz Kulasek 	rte_spinlock_unlock(&internal->rss_lock);
4641ccec0a8STomasz Kulasek 
4651ccec0a8STomasz Kulasek 	return 0;
4661ccec0a8STomasz Kulasek }
4671ccec0a8STomasz Kulasek 
468caccf8b3SOlivier Matz static int
469c5ac7748SRadu Nicolau eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
470c5ac7748SRadu Nicolau 		    __rte_unused struct ether_addr *addr)
471c5ac7748SRadu Nicolau {
472caccf8b3SOlivier Matz 	return 0;
473c5ac7748SRadu Nicolau }
474c5ac7748SRadu Nicolau 
475b3b413f7SBruce Richardson static const struct eth_dev_ops ops = {
476b3b413f7SBruce Richardson 	.dev_start = eth_dev_start,
477b3b413f7SBruce Richardson 	.dev_stop = eth_dev_stop,
478b3b413f7SBruce Richardson 	.dev_configure = eth_dev_configure,
479b3b413f7SBruce Richardson 	.dev_infos_get = eth_dev_info,
480b3b413f7SBruce Richardson 	.rx_queue_setup = eth_rx_queue_setup,
481b3b413f7SBruce Richardson 	.tx_queue_setup = eth_tx_queue_setup,
482b3b413f7SBruce Richardson 	.rx_queue_release = eth_queue_release,
483b3b413f7SBruce Richardson 	.tx_queue_release = eth_queue_release,
484e6acdc77SMallesh Koujalagi 	.mtu_set = eth_mtu_set,
485b3b413f7SBruce Richardson 	.link_update = eth_link_update,
486c5ac7748SRadu Nicolau 	.mac_addr_set = eth_mac_address_set,
487b3b413f7SBruce Richardson 	.stats_get = eth_stats_get,
488b3b413f7SBruce Richardson 	.stats_reset = eth_stats_reset,
4891ccec0a8STomasz Kulasek 	.reta_update = eth_rss_reta_update,
4901ccec0a8STomasz Kulasek 	.reta_query = eth_rss_reta_query,
4911ccec0a8STomasz Kulasek 	.rss_hash_update = eth_rss_hash_update,
4921ccec0a8STomasz Kulasek 	.rss_hash_conf_get = eth_rss_hash_conf_get
493b3b413f7SBruce Richardson };
494b3b413f7SBruce Richardson 
49573db5badSDavid Marchand static struct rte_vdev_driver pmd_null_drv;
49673db5badSDavid Marchand 
497c3b047beSJan Blunck static int
498050fe6e9SJan Blunck eth_dev_null_create(struct rte_vdev_device *dev,
499b3b413f7SBruce Richardson 		unsigned packet_size,
500b3b413f7SBruce Richardson 		unsigned packet_copy)
501b3b413f7SBruce Richardson {
502b3b413f7SBruce Richardson 	const unsigned nb_rx_queues = 1;
503b3b413f7SBruce Richardson 	const unsigned nb_tx_queues = 1;
5045f19dee6SJianfeng Tan 	struct rte_eth_dev_data *data;
505b3b413f7SBruce Richardson 	struct pmd_internals *internals = NULL;
506b3b413f7SBruce Richardson 	struct rte_eth_dev *eth_dev = NULL;
507b3b413f7SBruce Richardson 
5081ccec0a8STomasz Kulasek 	static const uint8_t default_rss_key[40] = {
5091ccec0a8STomasz Kulasek 		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
5101ccec0a8STomasz Kulasek 		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
5111ccec0a8STomasz Kulasek 		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
5121ccec0a8STomasz Kulasek 		0xBE, 0xAC, 0x01, 0xFA
5131ccec0a8STomasz Kulasek 	};
5141ccec0a8STomasz Kulasek 
515050fe6e9SJan Blunck 	if (dev->device.numa_node == SOCKET_ID_ANY)
516050fe6e9SJan Blunck 		dev->device.numa_node = rte_socket_id();
517b3b413f7SBruce Richardson 
518eb16afb9SStephen Hemminger 	PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
519050fe6e9SJan Blunck 		dev->device.numa_node);
520b3b413f7SBruce Richardson 
5215f19dee6SJianfeng Tan 	eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
5225f19dee6SJianfeng Tan 	if (!eth_dev)
523050fe6e9SJan Blunck 		return -ENOMEM;
524b3b413f7SBruce Richardson 
525b3b413f7SBruce Richardson 	/* now put it all together
526b3b413f7SBruce Richardson 	 * - store queue data in internals,
5278fb9e2bbSBernard Iremonger 	 * - store numa_node info in ethdev data
5288fb9e2bbSBernard Iremonger 	 * - point eth_dev_data to internals
529b3b413f7SBruce Richardson 	 * - and point eth_dev structure to new eth_dev_data structure
530b3b413f7SBruce Richardson 	 */
531b3b413f7SBruce Richardson 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
532b3b413f7SBruce Richardson 	 * so the nulls are local per-process */
533b3b413f7SBruce Richardson 
534050fe6e9SJan Blunck 	internals = eth_dev->data->dev_private;
535b3b413f7SBruce Richardson 	internals->packet_size = packet_size;
536b3b413f7SBruce Richardson 	internals->packet_copy = packet_copy;
5375cf86418SSean Harte 	internals->port_id = eth_dev->data->port_id;
538c1cd6fb3SMallesh Koujalagi 	eth_random_addr(internals->eth_addr.addr_bytes);
539b3b413f7SBruce Richardson 
5401ccec0a8STomasz Kulasek 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
5411ccec0a8STomasz Kulasek 	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
5421ccec0a8STomasz Kulasek 
5431ccec0a8STomasz Kulasek 	rte_memcpy(internals->rss_key, default_rss_key, 40);
5441ccec0a8STomasz Kulasek 
5455f19dee6SJianfeng Tan 	data = eth_dev->data;
546b3b413f7SBruce Richardson 	data->nb_rx_queues = (uint16_t)nb_rx_queues;
547b3b413f7SBruce Richardson 	data->nb_tx_queues = (uint16_t)nb_tx_queues;
548b3b413f7SBruce Richardson 	data->dev_link = pmd_link;
549c1cd6fb3SMallesh Koujalagi 	data->mac_addrs = &internals->eth_addr;
550b3b413f7SBruce Richardson 
551b3b413f7SBruce Richardson 	eth_dev->dev_ops = &ops;
5526799cfe4SBernard Iremonger 
553b3b413f7SBruce Richardson 	/* finally assign rx and tx ops */
554b3b413f7SBruce Richardson 	if (packet_copy) {
555b3b413f7SBruce Richardson 		eth_dev->rx_pkt_burst = eth_null_copy_rx;
556b3b413f7SBruce Richardson 		eth_dev->tx_pkt_burst = eth_null_copy_tx;
557b3b413f7SBruce Richardson 	} else {
558b3b413f7SBruce Richardson 		eth_dev->rx_pkt_burst = eth_null_rx;
559b3b413f7SBruce Richardson 		eth_dev->tx_pkt_burst = eth_null_tx;
560b3b413f7SBruce Richardson 	}
561b3b413f7SBruce Richardson 
562fbe90cddSThomas Monjalon 	rte_eth_dev_probing_finish(eth_dev);
563b3b413f7SBruce Richardson 	return 0;
564b3b413f7SBruce Richardson }
565b3b413f7SBruce Richardson 
566b3b413f7SBruce Richardson static inline int
567b3b413f7SBruce Richardson get_packet_size_arg(const char *key __rte_unused,
568b3b413f7SBruce Richardson 		const char *value, void *extra_args)
569b3b413f7SBruce Richardson {
570b3b413f7SBruce Richardson 	const char *a = value;
571b3b413f7SBruce Richardson 	unsigned *packet_size = extra_args;
572b3b413f7SBruce Richardson 
573b3b413f7SBruce Richardson 	if ((value == NULL) || (extra_args == NULL))
574b3b413f7SBruce Richardson 		return -EINVAL;
575b3b413f7SBruce Richardson 
576b3b413f7SBruce Richardson 	*packet_size = (unsigned)strtoul(a, NULL, 0);
577b3b413f7SBruce Richardson 	if (*packet_size == UINT_MAX)
578b3b413f7SBruce Richardson 		return -1;
579b3b413f7SBruce Richardson 
580b3b413f7SBruce Richardson 	return 0;
581b3b413f7SBruce Richardson }
582b3b413f7SBruce Richardson 
583b3b413f7SBruce Richardson static inline int
584b3b413f7SBruce Richardson get_packet_copy_arg(const char *key __rte_unused,
585b3b413f7SBruce Richardson 		const char *value, void *extra_args)
586b3b413f7SBruce Richardson {
587b3b413f7SBruce Richardson 	const char *a = value;
588b3b413f7SBruce Richardson 	unsigned *packet_copy = extra_args;
589b3b413f7SBruce Richardson 
590b3b413f7SBruce Richardson 	if ((value == NULL) || (extra_args == NULL))
591b3b413f7SBruce Richardson 		return -EINVAL;
592b3b413f7SBruce Richardson 
593b3b413f7SBruce Richardson 	*packet_copy = (unsigned)strtoul(a, NULL, 0);
594b3b413f7SBruce Richardson 	if (*packet_copy == UINT_MAX)
595b3b413f7SBruce Richardson 		return -1;
596b3b413f7SBruce Richardson 
597b3b413f7SBruce Richardson 	return 0;
598b3b413f7SBruce Richardson }
599b3b413f7SBruce Richardson 
600b3b413f7SBruce Richardson static int
6015d2aa461SJan Blunck rte_pmd_null_probe(struct rte_vdev_device *dev)
602b3b413f7SBruce Richardson {
6035d2aa461SJan Blunck 	const char *name, *params;
604b3b413f7SBruce Richardson 	unsigned packet_size = default_packet_size;
605b3b413f7SBruce Richardson 	unsigned packet_copy = default_packet_copy;
606b3b413f7SBruce Richardson 	struct rte_kvargs *kvlist = NULL;
607ee27edbeSJianfeng Tan 	struct rte_eth_dev *eth_dev;
608b3b413f7SBruce Richardson 	int ret;
609b3b413f7SBruce Richardson 
6105d2aa461SJan Blunck 	if (!dev)
611b3b413f7SBruce Richardson 		return -EINVAL;
612b3b413f7SBruce Richardson 
6135d2aa461SJan Blunck 	name = rte_vdev_device_name(dev);
6145d2aa461SJan Blunck 	params = rte_vdev_device_args(dev);
615eb16afb9SStephen Hemminger 	PMD_LOG(INFO, "Initializing pmd_null for %s", name);
616b3b413f7SBruce Richardson 
6174852aa8fSQi Zhang 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
618ee27edbeSJianfeng Tan 		eth_dev = rte_eth_dev_attach_secondary(name);
619ee27edbeSJianfeng Tan 		if (!eth_dev) {
620eb16afb9SStephen Hemminger 			PMD_LOG(ERR, "Failed to probe %s", name);
621ee27edbeSJianfeng Tan 			return -1;
622ee27edbeSJianfeng Tan 		}
623ee27edbeSJianfeng Tan 		/* TODO: request info from primary to set up Rx and Tx */
624ee27edbeSJianfeng Tan 		eth_dev->dev_ops = &ops;
625d1c3ab22SFerruh Yigit 		eth_dev->device = &dev->device;
626fbe90cddSThomas Monjalon 		rte_eth_dev_probing_finish(eth_dev);
627ee27edbeSJianfeng Tan 		return 0;
628ee27edbeSJianfeng Tan 	}
629ee27edbeSJianfeng Tan 
630b3b413f7SBruce Richardson 	if (params != NULL) {
631b3b413f7SBruce Richardson 		kvlist = rte_kvargs_parse(params, valid_arguments);
632b3b413f7SBruce Richardson 		if (kvlist == NULL)
633b3b413f7SBruce Richardson 			return -1;
634b3b413f7SBruce Richardson 
635b3b413f7SBruce Richardson 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
636b3b413f7SBruce Richardson 
637b3b413f7SBruce Richardson 			ret = rte_kvargs_process(kvlist,
638b3b413f7SBruce Richardson 					ETH_NULL_PACKET_SIZE_ARG,
639b3b413f7SBruce Richardson 					&get_packet_size_arg, &packet_size);
640b3b413f7SBruce Richardson 			if (ret < 0)
641b3b413f7SBruce Richardson 				goto free_kvlist;
642b3b413f7SBruce Richardson 		}
643b3b413f7SBruce Richardson 
644b3b413f7SBruce Richardson 		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
645b3b413f7SBruce Richardson 
646b3b413f7SBruce Richardson 			ret = rte_kvargs_process(kvlist,
647b3b413f7SBruce Richardson 					ETH_NULL_PACKET_COPY_ARG,
648b3b413f7SBruce Richardson 					&get_packet_copy_arg, &packet_copy);
649b3b413f7SBruce Richardson 			if (ret < 0)
650b3b413f7SBruce Richardson 				goto free_kvlist;
651b3b413f7SBruce Richardson 		}
652b3b413f7SBruce Richardson 	}
653b3b413f7SBruce Richardson 
654eb16afb9SStephen Hemminger 	PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
655eb16afb9SStephen Hemminger 			"packet copy is %s", packet_size,
656b3b413f7SBruce Richardson 			packet_copy ? "enabled" : "disabled");
657b3b413f7SBruce Richardson 
658050fe6e9SJan Blunck 	ret = eth_dev_null_create(dev, packet_size, packet_copy);
659b3b413f7SBruce Richardson 
660b3b413f7SBruce Richardson free_kvlist:
661b3b413f7SBruce Richardson 	if (kvlist)
662b3b413f7SBruce Richardson 		rte_kvargs_free(kvlist);
663b3b413f7SBruce Richardson 	return ret;
664b3b413f7SBruce Richardson }
665b3b413f7SBruce Richardson 
666b3b413f7SBruce Richardson static int
6675d2aa461SJan Blunck rte_pmd_null_remove(struct rte_vdev_device *dev)
668b3b413f7SBruce Richardson {
669b3b413f7SBruce Richardson 	struct rte_eth_dev *eth_dev = NULL;
670b3b413f7SBruce Richardson 
6715d2aa461SJan Blunck 	if (!dev)
672b3b413f7SBruce Richardson 		return -EINVAL;
673b3b413f7SBruce Richardson 
674eb16afb9SStephen Hemminger 	PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
675b3b413f7SBruce Richardson 			rte_socket_id());
676b3b413f7SBruce Richardson 
6776799cfe4SBernard Iremonger 	/* find the ethdev entry */
6785d2aa461SJan Blunck 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
679b3b413f7SBruce Richardson 	if (eth_dev == NULL)
680b3b413f7SBruce Richardson 		return -1;
681b3b413f7SBruce Richardson 
6824852aa8fSQi Zhang 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
6834852aa8fSQi Zhang 		return rte_eth_dev_release_port_secondary(eth_dev);
6844852aa8fSQi Zhang 
685*e16adf08SThomas Monjalon 	/* mac_addrs must not be freed alone because part of dev_private */
686*e16adf08SThomas Monjalon 	eth_dev->data->mac_addrs = NULL;
687b3b413f7SBruce Richardson 	rte_eth_dev_release_port(eth_dev);
688b3b413f7SBruce Richardson 
689b3b413f7SBruce Richardson 	return 0;
690b3b413f7SBruce Richardson }
691b3b413f7SBruce Richardson 
692fe363dd4SJan Viktorin static struct rte_vdev_driver pmd_null_drv = {
69350a3345fSShreyansh Jain 	.probe = rte_pmd_null_probe,
69450a3345fSShreyansh Jain 	.remove = rte_pmd_null_remove,
695b3b413f7SBruce Richardson };
696b3b413f7SBruce Richardson 
69701f19227SShreyansh Jain RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
6989fa80cb2SJan Blunck RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
69901f19227SShreyansh Jain RTE_PMD_REGISTER_PARAM_STRING(net_null,
70065eca099SPablo de Lara 	"size=<int> "
70165eca099SPablo de Lara 	"copy=<int>");
702eb16afb9SStephen Hemminger 
703f8e99896SThomas Monjalon RTE_INIT(eth_null_init_log)
704eb16afb9SStephen Hemminger {
705eb16afb9SStephen Hemminger 	eth_null_logtype = rte_log_register("pmd.net.null");
706eb16afb9SStephen Hemminger 	if (eth_null_logtype >= 0)
707eb16afb9SStephen Hemminger 		rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
708eb16afb9SStephen Hemminger }
709