1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <rte_mbuf.h>
6 #include <rte_ethdev.h>
7 #include <ethdev_driver.h>
8 #include <rte_pci.h>
9 #include <bus_pci_driver.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_memory.h>
13 #include <rte_ring.h>
14
15 #include "virtual_pmd.h"
16
17 #define MAX_PKT_BURST 512
18
19 static const char *virtual_ethdev_driver_name = "Virtual PMD";
20
21 struct virtual_ethdev_private {
22 struct eth_dev_ops dev_ops;
23 struct rte_eth_stats eth_stats;
24
25 struct rte_ring *rx_queue;
26 struct rte_ring *tx_queue;
27
28 int tx_burst_fail_count;
29 };
30
31 struct virtual_ethdev_queue {
32 int port_id;
33 int queue_id;
34 };
35
36 static int
virtual_ethdev_start_success(struct rte_eth_dev * eth_dev __rte_unused)37 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
38 {
39 eth_dev->data->dev_started = 1;
40
41 return 0;
42 }
43
44 static int
virtual_ethdev_start_fail(struct rte_eth_dev * eth_dev __rte_unused)45 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused)
46 {
47 eth_dev->data->dev_started = 0;
48
49 return -1;
50 }
virtual_ethdev_stop(struct rte_eth_dev * eth_dev __rte_unused)51 static int virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
52 {
53 void *pkt = NULL;
54 struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
55
56 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
57 eth_dev->data->dev_started = 0;
58 while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
59 rte_pktmbuf_free(pkt);
60
61 while (rte_ring_dequeue(prv->tx_queue, &pkt) != -ENOENT)
62 rte_pktmbuf_free(pkt);
63
64 return 0;
65 }
66
67 static int
virtual_ethdev_close(struct rte_eth_dev * dev __rte_unused)68 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused)
69 {
70 return 0;
71 }
72
73 static int
virtual_ethdev_configure_success(struct rte_eth_dev * dev __rte_unused)74 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused)
75 {
76 return 0;
77 }
78
79 static int
virtual_ethdev_configure_fail(struct rte_eth_dev * dev __rte_unused)80 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused)
81 {
82 return -1;
83 }
84
85 static int
virtual_ethdev_info_get(struct rte_eth_dev * dev __rte_unused,struct rte_eth_dev_info * dev_info)86 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
87 struct rte_eth_dev_info *dev_info)
88 {
89 dev_info->driver_name = virtual_ethdev_driver_name;
90 dev_info->max_mac_addrs = 1;
91
92 dev_info->max_rx_pktlen = (uint32_t)2048;
93
94 dev_info->max_rx_queues = (uint16_t)128;
95 dev_info->max_tx_queues = (uint16_t)512;
96
97 dev_info->min_rx_bufsize = 0;
98
99 return 0;
100 }
101
102 static int
virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev * dev,uint16_t rx_queue_id,uint16_t nb_rx_desc __rte_unused,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf __rte_unused,struct rte_mempool * mb_pool __rte_unused)103 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev,
104 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused,
105 unsigned int socket_id,
106 const struct rte_eth_rxconf *rx_conf __rte_unused,
107 struct rte_mempool *mb_pool __rte_unused)
108 {
109 struct virtual_ethdev_queue *rx_q;
110
111 rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
112 sizeof(struct virtual_ethdev_queue), 0, socket_id);
113
114 if (rx_q == NULL)
115 return -1;
116
117 rx_q->port_id = dev->data->port_id;
118 rx_q->queue_id = rx_queue_id;
119
120 dev->data->rx_queues[rx_queue_id] = rx_q;
121
122 return 0;
123 }
124
125 static int
virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev * dev __rte_unused,uint16_t rx_queue_id __rte_unused,uint16_t nb_rx_desc __rte_unused,unsigned int socket_id __rte_unused,const struct rte_eth_rxconf * rx_conf __rte_unused,struct rte_mempool * mb_pool __rte_unused)126 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
127 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused,
128 unsigned int socket_id __rte_unused,
129 const struct rte_eth_rxconf *rx_conf __rte_unused,
130 struct rte_mempool *mb_pool __rte_unused)
131 {
132 return -1;
133 }
134
135 static int
virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev * dev,uint16_t tx_queue_id,uint16_t nb_tx_desc __rte_unused,unsigned int socket_id,const struct rte_eth_txconf * tx_conf __rte_unused)136 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev,
137 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused,
138 unsigned int socket_id,
139 const struct rte_eth_txconf *tx_conf __rte_unused)
140 {
141 struct virtual_ethdev_queue *tx_q;
142
143 tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
144 sizeof(struct virtual_ethdev_queue), 0, socket_id);
145
146 if (tx_q == NULL)
147 return -1;
148
149 tx_q->port_id = dev->data->port_id;
150 tx_q->queue_id = tx_queue_id;
151
152 dev->data->tx_queues[tx_queue_id] = tx_q;
153
154 return 0;
155 }
156
157 static int
virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev * dev __rte_unused,uint16_t tx_queue_id __rte_unused,uint16_t nb_tx_desc __rte_unused,unsigned int socket_id __rte_unused,const struct rte_eth_txconf * tx_conf __rte_unused)158 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
159 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused,
160 unsigned int socket_id __rte_unused,
161 const struct rte_eth_txconf *tx_conf __rte_unused)
162 {
163 return -1;
164 }
165
166 static int
virtual_ethdev_link_update_success(struct rte_eth_dev * bonding_eth_dev,int wait_to_complete __rte_unused)167 virtual_ethdev_link_update_success(struct rte_eth_dev *bonding_eth_dev,
168 int wait_to_complete __rte_unused)
169 {
170 if (!bonding_eth_dev->data->dev_started)
171 bonding_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
172
173 return 0;
174 }
175
176 static int
virtual_ethdev_link_update_fail(struct rte_eth_dev * bonding_eth_dev __rte_unused,int wait_to_complete __rte_unused)177 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonding_eth_dev __rte_unused,
178 int wait_to_complete __rte_unused)
179 {
180 return -1;
181 }
182
183 static int
virtual_ethdev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)184 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
185 {
186 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
187
188 if (stats)
189 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats));
190
191 return 0;
192 }
193
194 static int
virtual_ethdev_stats_reset(struct rte_eth_dev * dev)195 virtual_ethdev_stats_reset(struct rte_eth_dev *dev)
196 {
197 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
198 void *pkt = NULL;
199
200 while (rte_ring_dequeue(dev_private->tx_queue, &pkt) == -ENOBUFS)
201 rte_pktmbuf_free(pkt);
202
203 /* Reset internal statistics */
204 memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats));
205
206 return 0;
207 }
208
209 static int
virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev * dev __rte_unused)210 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused)
211 {
212 return 0;
213 }
214
215 static int
virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev * dev __rte_unused)216 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused)
217 {
218 return 0;
219 }
220
221 static int
virtual_ethdev_mac_address_set(__rte_unused struct rte_eth_dev * dev,__rte_unused struct rte_ether_addr * addr)222 virtual_ethdev_mac_address_set(__rte_unused struct rte_eth_dev *dev,
223 __rte_unused struct rte_ether_addr *addr)
224 {
225 return 0;
226 }
227
228 static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
229 .dev_configure = virtual_ethdev_configure_success,
230 .dev_start = virtual_ethdev_start_success,
231 .dev_stop = virtual_ethdev_stop,
232 .dev_close = virtual_ethdev_close,
233 .dev_infos_get = virtual_ethdev_info_get,
234 .rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
235 .tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
236 .link_update = virtual_ethdev_link_update_success,
237 .mac_addr_set = virtual_ethdev_mac_address_set,
238 .stats_get = virtual_ethdev_stats_get,
239 .stats_reset = virtual_ethdev_stats_reset,
240 .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable,
241 .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable
242 };
243
244 void
virtual_ethdev_start_fn_set_success(uint16_t port_id,uint8_t success)245 virtual_ethdev_start_fn_set_success(uint16_t port_id, uint8_t success)
246 {
247 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
248 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
249 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
250
251 if (success)
252 dev_ops->dev_start = virtual_ethdev_start_success;
253 else
254 dev_ops->dev_start = virtual_ethdev_start_fail;
255
256 }
257
258 void
virtual_ethdev_configure_fn_set_success(uint16_t port_id,uint8_t success)259 virtual_ethdev_configure_fn_set_success(uint16_t port_id, uint8_t success)
260 {
261 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
262 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
263 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
264
265 if (success)
266 dev_ops->dev_configure = virtual_ethdev_configure_success;
267 else
268 dev_ops->dev_configure = virtual_ethdev_configure_fail;
269 }
270
271 void
virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id,uint8_t success)272 virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
273 {
274 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
275 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
276 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
277
278 if (success)
279 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_success;
280 else
281 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_fail;
282 }
283
284 void
virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id,uint8_t success)285 virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
286 {
287 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
288 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
289 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
290
291 if (success)
292 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_success;
293 else
294 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_fail;
295 }
296
297 void
virtual_ethdev_link_update_fn_set_success(uint16_t port_id,uint8_t success)298 virtual_ethdev_link_update_fn_set_success(uint16_t port_id, uint8_t success)
299 {
300 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
301 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
302 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
303
304 if (success)
305 dev_ops->link_update = virtual_ethdev_link_update_success;
306 else
307 dev_ops->link_update = virtual_ethdev_link_update_fail;
308 }
309
310
311 static uint16_t
virtual_ethdev_rx_burst_success(void * queue __rte_unused,struct rte_mbuf ** bufs,uint16_t nb_pkts)312 virtual_ethdev_rx_burst_success(void *queue __rte_unused,
313 struct rte_mbuf **bufs,
314 uint16_t nb_pkts)
315 {
316 struct rte_eth_dev *vrtl_eth_dev;
317 struct virtual_ethdev_queue *pq_map;
318 struct virtual_ethdev_private *dev_private;
319
320 int rx_count, i;
321
322 pq_map = (struct virtual_ethdev_queue *)queue;
323 vrtl_eth_dev = &rte_eth_devices[pq_map->port_id];
324 dev_private = vrtl_eth_dev->data->dev_private;
325
326 rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
327 nb_pkts, NULL);
328
329 /* increments ipackets count */
330 dev_private->eth_stats.ipackets += rx_count;
331
332 /* increments ibytes count */
333 for (i = 0; i < rx_count; i++)
334 dev_private->eth_stats.ibytes += rte_pktmbuf_pkt_len(bufs[i]);
335
336 return rx_count;
337 }
338
339 static uint16_t
virtual_ethdev_rx_burst_fail(void * queue __rte_unused,struct rte_mbuf ** bufs __rte_unused,uint16_t nb_pkts __rte_unused)340 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
341 struct rte_mbuf **bufs __rte_unused,
342 uint16_t nb_pkts __rte_unused)
343 {
344 return 0;
345 }
346
347 static uint16_t
virtual_ethdev_tx_burst_success(void * queue,struct rte_mbuf ** bufs,uint16_t nb_pkts)348 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
349 uint16_t nb_pkts)
350 {
351 struct virtual_ethdev_queue *tx_q = queue;
352
353 struct rte_eth_dev *vrtl_eth_dev;
354 struct virtual_ethdev_private *dev_private;
355
356 int i;
357
358 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
359 dev_private = vrtl_eth_dev->data->dev_private;
360
361 if (!vrtl_eth_dev->data->dev_link.link_status)
362 nb_pkts = 0;
363 else
364 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
365 nb_pkts, NULL);
366
367 /* increment opacket count */
368 dev_private->eth_stats.opackets += nb_pkts;
369
370 /* increment obytes count */
371 for (i = 0; i < nb_pkts; i++)
372 dev_private->eth_stats.obytes += rte_pktmbuf_pkt_len(bufs[i]);
373
374 return nb_pkts;
375 }
376
377 static uint16_t
virtual_ethdev_tx_burst_fail(void * queue,struct rte_mbuf ** bufs,uint16_t nb_pkts)378 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
379 uint16_t nb_pkts)
380 {
381 struct rte_eth_dev *vrtl_eth_dev = NULL;
382 struct virtual_ethdev_queue *tx_q = NULL;
383 struct virtual_ethdev_private *dev_private = NULL;
384
385 int i;
386
387 tx_q = queue;
388 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
389 dev_private = vrtl_eth_dev->data->dev_private;
390
391 if (dev_private->tx_burst_fail_count < nb_pkts) {
392 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count;
393
394 /* increment opacket count */
395 dev_private->eth_stats.opackets += successfully_txd;
396
397 /* free packets in burst */
398 for (i = 0; i < successfully_txd; i++) {
399 /* free packets in burst */
400 rte_pktmbuf_free(bufs[i]);
401
402 bufs[i] = NULL;
403 }
404
405 return successfully_txd;
406 }
407
408 return 0;
409 }
410
411
412 void
virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id,uint8_t success)413 virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success)
414 {
415 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
416
417 rte_eth_dev_stop(port_id);
418
419 if (success)
420 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
421 else
422 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail;
423
424 rte_eth_dev_start(port_id);
425 }
426
427
428 void
virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id,uint8_t success)429 virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success)
430 {
431 struct virtual_ethdev_private *dev_private = NULL;
432 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
433
434 rte_eth_dev_stop(port_id);
435 dev_private = vrtl_eth_dev->data->dev_private;
436
437 if (success)
438 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
439 else
440 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail;
441
442 dev_private->tx_burst_fail_count = 0;
443 rte_eth_dev_start(port_id);
444 }
445
446 void
virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,uint8_t packet_fail_count)447 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
448 uint8_t packet_fail_count)
449 {
450 struct virtual_ethdev_private *dev_private = NULL;
451 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
452
453 dev_private = vrtl_eth_dev->data->dev_private;
454 dev_private->tx_burst_fail_count = packet_fail_count;
455 }
456
457 void
virtual_ethdev_set_link_status(uint16_t port_id,uint8_t link_status)458 virtual_ethdev_set_link_status(uint16_t port_id, uint8_t link_status)
459 {
460 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
461
462 vrtl_eth_dev->data->dev_link.link_status = link_status;
463 }
464
465 void
virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,uint8_t link_status)466 virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,
467 uint8_t link_status)
468 {
469 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
470
471 vrtl_eth_dev->data->dev_link.link_status = link_status;
472
473 rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC,
474 NULL);
475 }
476
477 int
virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id,struct rte_mbuf ** pkt_burst,int burst_length)478 virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id,
479 struct rte_mbuf **pkt_burst, int burst_length)
480 {
481 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
482 struct virtual_ethdev_private *dev_private =
483 vrtl_eth_dev->data->dev_private;
484
485 return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
486 burst_length, NULL);
487 }
488
489 int
virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id,struct rte_mbuf ** pkt_burst,int burst_length)490 virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id,
491 struct rte_mbuf **pkt_burst, int burst_length)
492 {
493 struct virtual_ethdev_private *dev_private;
494 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
495
496 dev_private = vrtl_eth_dev->data->dev_private;
497 return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
498 burst_length, NULL);
499 }
500
501
502 int
virtual_ethdev_create(const char * name,struct rte_ether_addr * mac_addr,uint8_t socket_id,uint8_t isr_support)503 virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
504 uint8_t socket_id, uint8_t isr_support)
505 {
506 struct rte_pci_device *pci_dev = NULL;
507 struct rte_eth_dev *eth_dev = NULL;
508 struct rte_pci_driver *pci_drv = NULL;
509 struct rte_pci_id *id_table = NULL;
510 struct virtual_ethdev_private *dev_private = NULL;
511 char name_buf[RTE_RING_NAMESIZE];
512
513
514 /* now do all data allocation - for eth_dev structure, dummy pci driver
515 * and internal (dev_private) data
516 */
517
518 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
519 if (pci_dev == NULL)
520 goto err;
521
522 pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
523 if (pci_drv == NULL)
524 goto err;
525
526 id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id);
527 if (id_table == NULL)
528 goto err;
529 id_table->device_id = 0xBEEF;
530
531 dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id);
532 if (dev_private == NULL)
533 goto err;
534
535 snprintf(name_buf, sizeof(name_buf), "%s_rxQ", name);
536 dev_private->rx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
537 0);
538 if (dev_private->rx_queue == NULL)
539 goto err;
540
541 snprintf(name_buf, sizeof(name_buf), "%s_txQ", name);
542 dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
543 0);
544 if (dev_private->tx_queue == NULL)
545 goto err;
546
547 /* reserve an ethdev entry */
548 eth_dev = rte_eth_dev_allocate(name);
549 if (eth_dev == NULL)
550 goto err;
551
552 pci_dev->device.numa_node = socket_id;
553 pci_dev->device.name = eth_dev->data->name;
554 pci_drv->driver.name = virtual_ethdev_driver_name;
555 pci_drv->id_table = id_table;
556
557 if (isr_support)
558 pci_drv->drv_flags |= RTE_PCI_DRV_INTR_LSC;
559 else
560 pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
561
562
563 eth_dev->device = &pci_dev->device;
564 eth_dev->device->driver = &pci_drv->driver;
565
566 eth_dev->data->nb_rx_queues = (uint16_t)1;
567 eth_dev->data->nb_tx_queues = (uint16_t)1;
568
569 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
570 eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
571 eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
572
573 eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
574 if (eth_dev->data->mac_addrs == NULL)
575 goto err;
576
577 memcpy(eth_dev->data->mac_addrs, mac_addr,
578 sizeof(*eth_dev->data->mac_addrs));
579
580 eth_dev->data->dev_started = 0;
581 eth_dev->data->promiscuous = 0;
582 eth_dev->data->scattered_rx = 0;
583 eth_dev->data->all_multicast = 0;
584
585 eth_dev->data->dev_private = dev_private;
586
587 /* Copy default device operation functions */
588 dev_private->dev_ops = virtual_ethdev_default_dev_ops;
589 eth_dev->dev_ops = &dev_private->dev_ops;
590
591 pci_dev->device.driver = &pci_drv->driver;
592 eth_dev->device = &pci_dev->device;
593
594 eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
595 eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
596
597 rte_eth_dev_probing_finish(eth_dev);
598
599 return eth_dev->data->port_id;
600
601 err:
602 rte_free(pci_dev);
603 rte_free(pci_drv);
604 rte_free(id_table);
605 rte_free(dev_private);
606
607 return -1;
608 }
609