xref: /dpdk/app/test/virtual_pmd.c (revision fc1f2750a3ec6da919e3c86e59d56f34ec97154b)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_memory.h>
39 
40 #include "virtual_pmd.h"
41 
42 #define MAX_PKT_BURST 512
43 
44 static const char *virtual_ethdev_driver_name = "Virtual PMD";
45 
46 struct virtual_ethdev_private {
47 	struct rte_eth_stats eth_stats;
48 
49 	struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST];
50 	int rx_pkt_burst_len;
51 };
52 
53 struct virtual_ethdev_queue {
54 	int port_id;
55 	int queue_id;
56 };
57 
58 static int
59 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
60 {
61 	eth_dev->data->dev_started = 1;
62 
63 	return 0;
64 }
65 
66 static int
67 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused)
68 {
69 	eth_dev->data->dev_started = 0;
70 
71 	return -1;
72 }
73 static void  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
74 {
75 	eth_dev->data->dev_link.link_status = 0;
76 	eth_dev->data->dev_started = 0;
77 }
78 
79 static void
80 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused)
81 {}
82 
83 static int
84 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused)
85 {
86 	return 0;
87 }
88 
89 static int
90 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused)
91 {
92 	return -1;
93 }
94 
95 static void
96 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
97 		struct rte_eth_dev_info *dev_info)
98 {
99 	dev_info->driver_name = virtual_ethdev_driver_name;
100 	dev_info->max_mac_addrs = 1;
101 
102 	dev_info->max_rx_pktlen = (uint32_t)2048;
103 
104 	dev_info->max_rx_queues = (uint16_t)128;
105 	dev_info->max_tx_queues = (uint16_t)512;
106 
107 	dev_info->min_rx_bufsize = 0;
108 	dev_info->pci_dev = NULL;
109 }
110 
111 static int
112 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev,
113 		uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused,
114 		unsigned int socket_id,
115 		const struct rte_eth_rxconf *rx_conf __rte_unused,
116 		struct rte_mempool *mb_pool __rte_unused)
117 {
118 	struct virtual_ethdev_queue *rx_q;
119 
120 	rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
121 			sizeof(struct virtual_ethdev_queue), 0, socket_id);
122 
123 	if (rx_q == NULL)
124 		return -1;
125 
126 	rx_q->port_id = dev->data->port_id;
127 	rx_q->queue_id = rx_queue_id;
128 
129 	dev->data->rx_queues[rx_queue_id] = rx_q;
130 
131 	return 0;
132 }
133 
134 static int
135 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
136 		uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused,
137 		unsigned int socket_id __rte_unused,
138 		const struct rte_eth_rxconf *rx_conf __rte_unused,
139 		struct rte_mempool *mb_pool __rte_unused)
140 {
141 	return -1;
142 }
143 
144 static int
145 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev,
146 		uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused,
147 		unsigned int socket_id,
148 		const struct rte_eth_txconf *tx_conf __rte_unused)
149 {
150 	struct virtual_ethdev_queue *tx_q;
151 
152 	tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
153 			sizeof(struct virtual_ethdev_queue), 0, socket_id);
154 
155 	if (tx_q == NULL)
156 		return -1;
157 
158 	tx_q->port_id = dev->data->port_id;
159 	tx_q->queue_id = tx_queue_id;
160 
161 	dev->data->tx_queues[tx_queue_id] = tx_q;
162 
163 	return 0;
164 }
165 
166 static int
167 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
168 		uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused,
169 		unsigned int socket_id __rte_unused,
170 		const struct rte_eth_txconf *tx_conf __rte_unused)
171 {
172 	return -1;
173 }
174 
175 static void
176 virtual_ethdev_rx_queue_release(void *q __rte_unused)
177 {
178 }
179 
180 static void
181 virtual_ethdev_tx_queue_release(void *q __rte_unused)
182 {
183 }
184 
185 static int
186 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
187 		int wait_to_complete __rte_unused)
188 {
189 	if (!bonded_eth_dev->data->dev_started)
190 		bonded_eth_dev->data->dev_link.link_status = 0;
191 
192 	return 0;
193 }
194 
195 static int
196 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused,
197 		int wait_to_complete __rte_unused)
198 {
199 	return -1;
200 }
201 
202 static void
203 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
204 {
205 	struct virtual_ethdev_private *dev_private = dev->data->dev_private;
206 
207 	if (stats)
208 		rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats));
209 }
210 
211 static void
212 virtual_ethdev_stats_reset(struct rte_eth_dev *dev)
213 {
214 	struct virtual_ethdev_private *dev_private = dev->data->dev_private;
215 
216 	dev_private->rx_pkt_burst_len = 0;
217 
218 	/* Reset internal statistics */
219 	memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats));
220 }
221 
222 static void
223 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused)
224 {}
225 
226 static void
227 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused)
228 {}
229 
230 
231 static struct eth_dev_ops virtual_ethdev_default_dev_ops = {
232 		.dev_configure = virtual_ethdev_configure_success,
233 		.dev_start = virtual_ethdev_start_success,
234 		.dev_stop = virtual_ethdev_stop,
235 		.dev_close = virtual_ethdev_close,
236 		.dev_infos_get = virtual_ethdev_info_get,
237 		.rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
238 		.tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
239 		.rx_queue_release = virtual_ethdev_rx_queue_release,
240 		.tx_queue_release = virtual_ethdev_tx_queue_release,
241 		.link_update = virtual_ethdev_link_update_success,
242 		.stats_get = virtual_ethdev_stats_get,
243 		.stats_reset = virtual_ethdev_stats_reset,
244 		.promiscuous_enable = virtual_ethdev_promiscuous_mode_enable,
245 		.promiscuous_disable = virtual_ethdev_promiscuous_mode_disable
246 };
247 
248 
249 void
250 virtual_ethdev_start_fn_set_success(uint8_t port_id, uint8_t success)
251 {
252 	struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
253 
254 	if (success)
255 		vrtl_eth_dev->dev_ops->dev_start = virtual_ethdev_start_success;
256 	else
257 		vrtl_eth_dev->dev_ops->dev_start = virtual_ethdev_start_fail;
258 
259 }
260 
261 void
262 virtual_ethdev_configure_fn_set_success(uint8_t port_id, uint8_t success)
263 {
264 	struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
265 
266 	if (success)
267 		vrtl_eth_dev->dev_ops->dev_configure = virtual_ethdev_configure_success;
268 	else
269 		vrtl_eth_dev->dev_ops->dev_configure = virtual_ethdev_configure_fail;
270 }
271 
272 void
273 virtual_ethdev_rx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
274 {
275 	struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
276 
277 	if (success)
278 		vrtl_eth_dev->dev_ops->rx_queue_setup =
279 				virtual_ethdev_rx_queue_setup_success;
280 	else
281 		vrtl_eth_dev->dev_ops->rx_queue_setup =
282 				virtual_ethdev_rx_queue_setup_fail;
283 }
284 
285 void
286 virtual_ethdev_tx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
287 {
288 	struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
289 
290 	if (success)
291 		vrtl_eth_dev->dev_ops->tx_queue_setup =
292 				virtual_ethdev_tx_queue_setup_success;
293 	else
294 		vrtl_eth_dev->dev_ops->tx_queue_setup =
295 				virtual_ethdev_tx_queue_setup_fail;
296 }
297 
298 void
299 virtual_ethdev_link_update_fn_set_success(uint8_t port_id, uint8_t success)
300 {
301 	struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
302 
303 	if (success)
304 		vrtl_eth_dev->dev_ops->link_update = virtual_ethdev_link_update_success;
305 	else
306 		vrtl_eth_dev->dev_ops->link_update = virtual_ethdev_link_update_fail;
307 }
308 
309 
310 static uint16_t
311 virtual_ethdev_rx_burst_success(void *queue __rte_unused,
312 							 struct rte_mbuf **bufs,
313 							 uint16_t nb_pkts)
314 {
315 	struct rte_eth_dev *vrtl_eth_dev;
316 	struct virtual_ethdev_queue *pq_map;
317 	struct virtual_ethdev_private *dev_private;
318 
319 	int i;
320 
321 	pq_map = (struct virtual_ethdev_queue *)queue;
322 
323 	vrtl_eth_dev = &rte_eth_devices[pq_map->port_id];
324 
325 	dev_private = vrtl_eth_dev->data->dev_private;
326 
327 	if (dev_private->rx_pkt_burst_len > 0) {
328 		if (dev_private->rx_pkt_burst_len < nb_pkts) {
329 
330 			for (i = 0; i < dev_private->rx_pkt_burst_len; i++) {
331 				bufs[i] = dev_private->rx_pkt_burst[i];
332 				dev_private->rx_pkt_burst[i] = NULL;
333 			}
334 
335 			dev_private->eth_stats.ipackets = dev_private->rx_pkt_burst_len;
336 		}
337 		/* reset private burst values */
338 		dev_private->rx_pkt_burst_len = 0;
339 	}
340 
341 	return dev_private->eth_stats.ipackets;
342 }
343 
344 static uint16_t
345 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
346 							 struct rte_mbuf **bufs __rte_unused,
347 							 uint16_t nb_pkts __rte_unused)
348 {
349 	return 0;
350 }
351 
352 static uint16_t
353 virtual_ethdev_tx_burst_success(void *queue,
354 							 struct rte_mbuf **bufs __rte_unused,
355 							 uint16_t nb_pkts)
356 {
357 	struct rte_eth_dev *vrtl_eth_dev;
358 	struct virtual_ethdev_queue *tx_q;
359 	struct virtual_ethdev_private *dev_private;
360 	int i;
361 
362 	tx_q = (struct virtual_ethdev_queue *)queue;
363 
364 	vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
365 
366 	if (vrtl_eth_dev->data->dev_link.link_status) {
367 		dev_private = vrtl_eth_dev->data->dev_private;
368 		dev_private->eth_stats.opackets += nb_pkts;
369 
370 		return nb_pkts;
371 	}
372 
373 	/* free packets in burst */
374 	for (i = 0; i < nb_pkts; i++) {
375 		if (bufs[i] != NULL)
376 			rte_pktmbuf_free(bufs[i]);
377 
378 		bufs[i] = NULL;
379 	}
380 
381 	return 0;
382 }
383 
384 
385 static uint16_t
386 virtual_ethdev_tx_burst_fail(void *queue __rte_unused,
387 		struct rte_mbuf **bufs __rte_unused, uint16_t nb_pkts __rte_unused)
388 {
389 	return 0;
390 }
391 
392 
393 void
394 virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success)
395 {
396 	struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
397 
398 	if (success)
399 		vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
400 	else
401 		vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail;
402 }
403 
404 
405 void
406 virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success)
407 {
408 	struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
409 
410 	if (success)
411 		vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
412 	else
413 		vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail;
414 }
415 
416 
417 void
418 virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id, uint8_t link_status)
419 {
420 	struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
421 
422 	vrtl_eth_dev->data->dev_link.link_status = link_status;
423 
424 	_rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC);
425 }
426 
427 
428 
429 void
430 virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
431 		struct rte_mbuf **pkt_burst, int burst_length)
432 {
433 	struct virtual_ethdev_private *dev_private = NULL;
434 	struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
435 
436 	int i;
437 
438 	dev_private = vrtl_eth_dev->data->dev_private;
439 
440 	for (i = 0; i < burst_length; i++)
441 		dev_private->rx_pkt_burst[i] = pkt_burst[i];
442 
443 	dev_private->rx_pkt_burst_len = burst_length;
444 }
445 
446 static uint8_t
447 get_number_of_sockets(void)
448 {
449 	int sockets = 0;
450 	int i;
451 	const struct rte_memseg *ms = rte_eal_get_physmem_layout();
452 
453 	for (i = 0; i < RTE_MAX_MEMSEG && ms[i].addr != NULL; i++) {
454 		if (sockets < ms[i].socket_id)
455 			sockets = ms[i].socket_id;
456 	}
457 	/* Number of sockets = maximum socket_id + 1 */
458 	return ++sockets;
459 }
460 
461 
462 int
463 virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
464 		uint8_t socket_id)
465 {
466 	struct rte_pci_device *pci_dev = NULL;
467 	struct rte_eth_dev *eth_dev = NULL;
468 	struct eth_driver *eth_drv = NULL;
469 	struct rte_pci_driver *pci_drv = NULL;
470 	struct eth_dev_ops *dev_ops = NULL;
471 	struct rte_pci_id *id_table = NULL;
472 	struct virtual_ethdev_private *dev_private = NULL;
473 
474 
475 	/* now do all data allocation - for eth_dev structure, dummy pci driver
476 	 * and internal (dev_private) data
477 	 */
478 
479 	if (socket_id >= get_number_of_sockets())
480 		goto err;
481 
482 	pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
483 	if (pci_dev == NULL)
484 		goto err;
485 
486 	eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id);
487 	if (eth_drv == NULL)
488 		goto err;
489 
490 	pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
491 	if (pci_drv == NULL)
492 		goto err;
493 
494 	dev_ops = rte_zmalloc_socket(name, sizeof(*dev_ops), 0, socket_id);
495 	if (dev_ops == NULL)
496 		goto err;
497 
498 	id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id);
499 	if (id_table == NULL)
500 		goto err;
501 
502 	dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id);
503 	if (dev_private == NULL)
504 		goto err;
505 
506 	/* reserve an ethdev entry */
507 	eth_dev = rte_eth_dev_allocate(name);
508 	if (eth_dev == NULL)
509 		goto err;
510 
511 	pci_dev->numa_node = socket_id;
512 	pci_drv->name = virtual_ethdev_driver_name;
513 	pci_drv->id_table = id_table;
514 	pci_drv->drv_flags = RTE_PCI_DRV_INTR_LSC;
515 
516 	eth_drv->pci_drv = (struct rte_pci_driver)(*pci_drv);
517 	eth_dev->driver = eth_drv;
518 
519 	eth_dev->data->nb_rx_queues = (uint16_t)1;
520 	eth_dev->data->nb_tx_queues = (uint16_t)1;
521 
522 	TAILQ_INIT(&(eth_dev->callbacks));
523 
524 	eth_dev->data->dev_link.link_status = 0;
525 	eth_dev->data->dev_link.link_speed = ETH_LINK_SPEED_10000;
526 	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
527 
528 	eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
529 	if (eth_dev->data->mac_addrs == NULL)
530 		goto err;
531 
532 	memcpy(eth_dev->data->mac_addrs, mac_addr,
533 			sizeof(*eth_dev->data->mac_addrs));
534 	eth_dev->data->mac_addrs->addr_bytes[5] = eth_dev->data->port_id;
535 
536 	eth_dev->data->dev_started = 0;
537 	eth_dev->data->promiscuous = 0;
538 	eth_dev->data->scattered_rx = 0;
539 	eth_dev->data->all_multicast = 0;
540 
541 	memset(dev_private, 0, sizeof(*dev_private));
542 	eth_dev->data->dev_private = dev_private;
543 
544 	eth_dev->dev_ops = dev_ops;
545 
546 	/* Copy default device operation functions */
547 	memcpy(eth_dev->dev_ops, &virtual_ethdev_default_dev_ops,
548 			sizeof(*eth_dev->dev_ops));
549 
550 	eth_dev->pci_dev = pci_dev;
551 	eth_dev->pci_dev->driver = &eth_drv->pci_drv;
552 
553 	eth_dev->pci_dev->driver->id_table->device_id = 0xBEEF;
554 
555 	eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
556 	eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
557 
558 	return eth_dev->data->port_id;
559 
560 err:
561 	if (pci_dev)
562 		rte_free(pci_dev);
563 	if (pci_drv)
564 		rte_free(pci_drv);
565 	if (eth_drv)
566 		rte_free(eth_drv);
567 	if (dev_ops)
568 		rte_free(dev_ops);
569 	if (id_table)
570 		rte_free(id_table);
571 	if (dev_private)
572 		rte_free(dev_private);
573 
574 	return -1;
575 }
576