1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 */
4
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <errno.h>
10 #include <unistd.h>
11
12 #include <ethdev_driver.h>
13 #include <rte_memcpy.h>
14 #include <rte_string_fns.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_ether.h>
19 #include <rte_ip.h>
20 #include <rte_arp.h>
21 #include <rte_common.h>
22 #include <rte_errno.h>
23 #include <rte_cpuflags.h>
24 #include <rte_vect.h>
25 #include <rte_memory.h>
26 #include <rte_eal_paging.h>
27 #include <rte_eal.h>
28 #include <dev_driver.h>
29 #include <rte_cycles.h>
30 #include <rte_kvargs.h>
31
32 #include "virtio_ethdev.h"
33 #include "virtio.h"
34 #include "virtio_logs.h"
35 #include "virtqueue.h"
36 #include "virtio_cvq.h"
37 #include "virtio_rxtx.h"
38 #include "virtio_rxtx_simple.h"
39 #include "virtio_user/virtio_user_dev.h"
40
41 static int virtio_dev_configure(struct rte_eth_dev *dev);
42 static int virtio_dev_start(struct rte_eth_dev *dev);
43 static int virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
44 static int virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
45 static int virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
46 static int virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
47 static uint32_t virtio_dev_speed_capa_get(uint32_t speed);
48 static int virtio_dev_devargs_parse(struct rte_devargs *devargs,
49 uint32_t *speed,
50 int *vectorized);
51 static int virtio_dev_info_get(struct rte_eth_dev *dev,
52 struct rte_eth_dev_info *dev_info);
53 static int virtio_dev_link_update(struct rte_eth_dev *dev,
54 int wait_to_complete);
55 static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
56 static int virtio_dev_rss_hash_update(struct rte_eth_dev *dev,
57 struct rte_eth_rss_conf *rss_conf);
58 static int virtio_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
59 struct rte_eth_rss_conf *rss_conf);
60 static int virtio_dev_rss_reta_update(struct rte_eth_dev *dev,
61 struct rte_eth_rss_reta_entry64 *reta_conf,
62 uint16_t reta_size);
63 static int virtio_dev_rss_reta_query(struct rte_eth_dev *dev,
64 struct rte_eth_rss_reta_entry64 *reta_conf,
65 uint16_t reta_size);
66
67 static void virtio_set_hwaddr(struct virtio_hw *hw);
68 static void virtio_get_hwaddr(struct virtio_hw *hw);
69
70 static int virtio_dev_stats_get(struct rte_eth_dev *dev,
71 struct rte_eth_stats *stats);
72 static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
73 struct rte_eth_xstat *xstats, unsigned n);
74 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
75 struct rte_eth_xstat_name *xstats_names,
76 unsigned limit);
77 static int virtio_dev_stats_reset(struct rte_eth_dev *dev);
78 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
79 static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
80 uint16_t vlan_id, int on);
81 static int virtio_mac_addr_add(struct rte_eth_dev *dev,
82 struct rte_ether_addr *mac_addr,
83 uint32_t index, uint32_t vmdq);
84 static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
85 static int virtio_mac_addr_set(struct rte_eth_dev *dev,
86 struct rte_ether_addr *mac_addr);
87
88 static int virtio_intr_disable(struct rte_eth_dev *dev);
89 static int virtio_get_monitor_addr(void *rx_queue,
90 struct rte_power_monitor_cond *pmc);
91
92 static int virtio_dev_queue_stats_mapping_set(
93 struct rte_eth_dev *eth_dev,
94 uint16_t queue_id,
95 uint8_t stat_idx,
96 uint8_t is_rx);
97
98 static void virtio_notify_peers(struct rte_eth_dev *dev);
99 static void virtio_ack_link_announce(struct rte_eth_dev *dev);
100
101 struct rte_virtio_xstats_name_off {
102 char name[RTE_ETH_XSTATS_NAME_SIZE];
103 unsigned offset;
104 };
105
106 /* [rt]x_qX_ is prepended to the name string here */
107 static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
108 {"good_packets", offsetof(struct virtnet_rx, stats.packets)},
109 {"good_bytes", offsetof(struct virtnet_rx, stats.bytes)},
110 {"errors", offsetof(struct virtnet_rx, stats.errors)},
111 {"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)},
112 {"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)},
113 {"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])},
114 {"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])},
115 {"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])},
116 {"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])},
117 {"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])},
118 {"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])},
119 {"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
120 {"size_1519_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])},
121 };
122
123 /* [rt]x_qX_ is prepended to the name string here */
124 static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
125 {"good_packets", offsetof(struct virtnet_tx, stats.packets)},
126 {"good_bytes", offsetof(struct virtnet_tx, stats.bytes)},
127 {"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)},
128 {"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)},
129 {"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])},
130 {"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])},
131 {"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])},
132 {"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])},
133 {"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])},
134 {"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])},
135 {"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
136 {"size_1519_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])},
137 };
138
139 #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
140 sizeof(rte_virtio_rxq_stat_strings[0]))
141 #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
142 sizeof(rte_virtio_txq_stat_strings[0]))
143
144 struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
145
146 static int
virtio_set_multiple_queues_rss(struct rte_eth_dev * dev,uint16_t nb_queues)147 virtio_set_multiple_queues_rss(struct rte_eth_dev *dev, uint16_t nb_queues)
148 {
149 struct virtio_hw *hw = dev->data->dev_private;
150 struct virtio_pmd_ctrl ctrl;
151 struct virtio_net_ctrl_rss rss;
152 int dlen, ret;
153
154 rss.hash_types = hw->rss_hash_types & VIRTIO_NET_HASH_TYPE_MASK;
155 RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(VIRTIO_NET_RSS_RETA_SIZE));
156 rss.indirection_table_mask = VIRTIO_NET_RSS_RETA_SIZE - 1;
157 rss.unclassified_queue = 0;
158 memcpy(rss.indirection_table, hw->rss_reta, VIRTIO_NET_RSS_RETA_SIZE * sizeof(uint16_t));
159 rss.max_tx_vq = nb_queues;
160 rss.hash_key_length = VIRTIO_NET_RSS_KEY_SIZE;
161 memcpy(rss.hash_key_data, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
162
163 ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
164 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_RSS_CONFIG;
165 memcpy(ctrl.data, &rss, sizeof(rss));
166
167 dlen = sizeof(rss);
168
169 ret = virtio_send_command(hw->cvq, &ctrl, &dlen, 1);
170 if (ret) {
171 PMD_INIT_LOG(ERR, "RSS multiqueue configured but send command failed");
172 return -EINVAL;
173 }
174
175 return 0;
176 }
177
178 static int
virtio_set_multiple_queues_auto(struct rte_eth_dev * dev,uint16_t nb_queues)179 virtio_set_multiple_queues_auto(struct rte_eth_dev *dev, uint16_t nb_queues)
180 {
181 struct virtio_hw *hw = dev->data->dev_private;
182 struct virtio_pmd_ctrl ctrl;
183 int dlen;
184 int ret;
185
186 ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
187 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
188 memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
189
190 dlen = sizeof(uint16_t);
191
192 ret = virtio_send_command(hw->cvq, &ctrl, &dlen, 1);
193 if (ret) {
194 PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
195 "failed, this is too late now...");
196 return -EINVAL;
197 }
198
199 return 0;
200 }
201
202 static int
virtio_set_multiple_queues(struct rte_eth_dev * dev,uint16_t nb_queues)203 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
204 {
205 struct virtio_hw *hw = dev->data->dev_private;
206
207 if (virtio_with_feature(hw, VIRTIO_NET_F_RSS))
208 return virtio_set_multiple_queues_rss(dev, nb_queues);
209 else
210 return virtio_set_multiple_queues_auto(dev, nb_queues);
211 }
212
213 static uint16_t
virtio_get_nr_vq(struct virtio_hw * hw)214 virtio_get_nr_vq(struct virtio_hw *hw)
215 {
216 uint16_t nr_vq = hw->max_queue_pairs * 2;
217
218 if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
219 nr_vq += 1;
220
221 return nr_vq;
222 }
223
224 static void
virtio_control_queue_notify(struct virtqueue * vq,__rte_unused void * cookie)225 virtio_control_queue_notify(struct virtqueue *vq, __rte_unused void *cookie)
226 {
227 virtqueue_notify(vq);
228 }
229
230 static int
virtio_init_queue(struct rte_eth_dev * dev,uint16_t queue_idx)231 virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
232 {
233 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
234 unsigned int vq_size;
235 struct virtio_hw *hw = dev->data->dev_private;
236 struct virtqueue *vq;
237 int queue_type = virtio_get_queue_type(hw, queue_idx);
238 int ret;
239 int numa_node = dev->device->numa_node;
240
241 PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
242 queue_idx, numa_node);
243
244 /*
245 * Read the virtqueue size from the Queue Size field
246 * Always power of 2 and if 0 virtqueue does not exist
247 */
248 vq_size = VIRTIO_OPS(hw)->get_queue_num(hw, queue_idx);
249 PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
250 if (vq_size == 0) {
251 PMD_INIT_LOG(ERR, "virtqueue does not exist");
252 return -EINVAL;
253 }
254
255 if (!virtio_with_packed_queue(hw) && !rte_is_power_of_2(vq_size)) {
256 PMD_INIT_LOG(ERR, "split virtqueue size is not power of 2");
257 return -EINVAL;
258 }
259
260 snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, queue_idx);
261
262 vq = virtqueue_alloc(hw, queue_idx, vq_size, queue_type, numa_node, vq_name);
263 if (!vq) {
264 PMD_INIT_LOG(ERR, "virtqueue init failed");
265 return -ENOMEM;
266 }
267
268 hw->vqs[queue_idx] = vq;
269
270 if (queue_type == VTNET_CQ) {
271 hw->cvq = &vq->cq;
272 vq->cq.notify_queue = &virtio_control_queue_notify;
273 }
274
275 if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
276 PMD_INIT_LOG(ERR, "setup_queue failed");
277 ret = -EINVAL;
278 goto clean_vq;
279 }
280
281 return 0;
282
283 clean_vq:
284 if (queue_type == VTNET_CQ)
285 hw->cvq = NULL;
286 virtqueue_free(vq);
287 hw->vqs[queue_idx] = NULL;
288
289 return ret;
290 }
291
292 static void
virtio_free_queues(struct virtio_hw * hw)293 virtio_free_queues(struct virtio_hw *hw)
294 {
295 uint16_t nr_vq = virtio_get_nr_vq(hw);
296 struct virtqueue *vq;
297 uint16_t i;
298
299 if (hw->vqs == NULL)
300 return;
301
302 for (i = 0; i < nr_vq; i++) {
303 vq = hw->vqs[i];
304 if (!vq)
305 continue;
306 virtqueue_free(vq);
307 hw->vqs[i] = NULL;
308 }
309
310 rte_free(hw->vqs);
311 hw->vqs = NULL;
312 }
313
314 static int
virtio_alloc_queues(struct rte_eth_dev * dev)315 virtio_alloc_queues(struct rte_eth_dev *dev)
316 {
317 struct virtio_hw *hw = dev->data->dev_private;
318 uint16_t nr_vq = virtio_get_nr_vq(hw);
319 uint16_t i;
320 int ret;
321
322 hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
323 if (!hw->vqs) {
324 PMD_INIT_LOG(ERR, "failed to allocate vqs");
325 return -ENOMEM;
326 }
327
328 for (i = 0; i < nr_vq; i++) {
329 ret = virtio_init_queue(dev, i);
330 if (ret < 0) {
331 virtio_free_queues(hw);
332 return ret;
333 }
334 }
335
336 return 0;
337 }
338
339 static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
340
341 static void
virtio_free_rss(struct virtio_hw * hw)342 virtio_free_rss(struct virtio_hw *hw)
343 {
344 rte_free(hw->rss_key);
345 hw->rss_key = NULL;
346
347 rte_free(hw->rss_reta);
348 hw->rss_reta = NULL;
349 }
350
351 int
virtio_dev_close(struct rte_eth_dev * dev)352 virtio_dev_close(struct rte_eth_dev *dev)
353 {
354 struct virtio_hw *hw = dev->data->dev_private;
355 struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
356
357 PMD_INIT_LOG(DEBUG, "virtio_dev_close");
358 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
359 return 0;
360
361 if (!hw->opened)
362 return 0;
363 hw->opened = 0;
364
365 /* reset the NIC */
366 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
367 VIRTIO_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
368 if (intr_conf->rxq)
369 virtio_queues_unbind_intr(dev);
370
371 if (intr_conf->lsc || intr_conf->rxq) {
372 virtio_intr_disable(dev);
373 rte_intr_efd_disable(dev->intr_handle);
374 rte_intr_vec_list_free(dev->intr_handle);
375 }
376
377 virtio_reset(hw);
378 virtio_dev_free_mbufs(dev);
379 virtio_free_queues(hw);
380 virtio_free_rss(hw);
381
382 return VIRTIO_OPS(hw)->dev_close(hw);
383 }
384
385 static int
virtio_dev_promiscuous_enable(struct rte_eth_dev * dev)386 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
387 {
388 struct virtio_hw *hw = dev->data->dev_private;
389 struct virtio_pmd_ctrl ctrl;
390 int dlen[1];
391 int ret;
392
393 if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
394 PMD_INIT_LOG(INFO, "host does not support rx control");
395 return -ENOTSUP;
396 }
397
398 ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
399 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
400 ctrl.data[0] = 1;
401 dlen[0] = 1;
402
403 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
404 if (ret) {
405 PMD_INIT_LOG(ERR, "Failed to enable promisc");
406 return -EAGAIN;
407 }
408
409 return 0;
410 }
411
412 static int
virtio_dev_promiscuous_disable(struct rte_eth_dev * dev)413 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
414 {
415 struct virtio_hw *hw = dev->data->dev_private;
416 struct virtio_pmd_ctrl ctrl;
417 int dlen[1];
418 int ret;
419
420 if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
421 PMD_INIT_LOG(INFO, "host does not support rx control");
422 return -ENOTSUP;
423 }
424
425 ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
426 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
427 ctrl.data[0] = 0;
428 dlen[0] = 1;
429
430 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
431 if (ret) {
432 PMD_INIT_LOG(ERR, "Failed to disable promisc");
433 return -EAGAIN;
434 }
435
436 return 0;
437 }
438
439 static int
virtio_dev_allmulticast_enable(struct rte_eth_dev * dev)440 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
441 {
442 struct virtio_hw *hw = dev->data->dev_private;
443 struct virtio_pmd_ctrl ctrl;
444 int dlen[1];
445 int ret;
446
447 if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
448 PMD_INIT_LOG(INFO, "host does not support rx control");
449 return -ENOTSUP;
450 }
451
452 ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
453 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
454 ctrl.data[0] = 1;
455 dlen[0] = 1;
456
457 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
458 if (ret) {
459 PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
460 return -EAGAIN;
461 }
462
463 return 0;
464 }
465
466 static int
virtio_dev_allmulticast_disable(struct rte_eth_dev * dev)467 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
468 {
469 struct virtio_hw *hw = dev->data->dev_private;
470 struct virtio_pmd_ctrl ctrl;
471 int dlen[1];
472 int ret;
473
474 if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
475 PMD_INIT_LOG(INFO, "host does not support rx control");
476 return -ENOTSUP;
477 }
478
479 ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
480 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
481 ctrl.data[0] = 0;
482 dlen[0] = 1;
483
484 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
485 if (ret) {
486 PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
487 return -EAGAIN;
488 }
489
490 return 0;
491 }
492
493 uint16_t
virtio_rx_mem_pool_buf_size(struct rte_mempool * mp)494 virtio_rx_mem_pool_buf_size(struct rte_mempool *mp)
495 {
496 return rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
497 }
498
499 bool
virtio_rx_check_scatter(uint16_t max_rx_pkt_len,uint16_t rx_buf_size,bool rx_scatter_enabled,const char ** error)500 virtio_rx_check_scatter(uint16_t max_rx_pkt_len, uint16_t rx_buf_size,
501 bool rx_scatter_enabled, const char **error)
502 {
503 if (!rx_scatter_enabled && max_rx_pkt_len > rx_buf_size) {
504 *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
505 return false;
506 }
507
508 return true;
509 }
510
511 static bool
virtio_check_scatter_on_all_rx_queues(struct rte_eth_dev * dev,uint16_t frame_size)512 virtio_check_scatter_on_all_rx_queues(struct rte_eth_dev *dev,
513 uint16_t frame_size)
514 {
515 struct virtio_hw *hw = dev->data->dev_private;
516 struct virtnet_rx *rxvq;
517 struct virtqueue *vq;
518 unsigned int qidx;
519 uint16_t buf_size;
520 const char *error;
521
522 if (hw->vqs == NULL)
523 return true;
524
525 for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
526 vq = hw->vqs[2 * qidx + VTNET_SQ_RQ_QUEUE_IDX];
527 if (vq == NULL)
528 continue;
529
530 rxvq = &vq->rxq;
531 if (rxvq->mpool == NULL)
532 continue;
533 buf_size = virtio_rx_mem_pool_buf_size(rxvq->mpool);
534
535 if (!virtio_rx_check_scatter(frame_size, buf_size,
536 hw->rx_ol_scatter, &error)) {
537 PMD_INIT_LOG(ERR, "MTU check for RxQ %u failed: %s",
538 qidx, error);
539 return false;
540 }
541 }
542
543 return true;
544 }
545
546 #define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */
547 static int
virtio_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)548 virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
549 {
550 struct virtio_hw *hw = dev->data->dev_private;
551 uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
552 hw->vtnet_hdr_size;
553 uint32_t frame_size = mtu + ether_hdr_len;
554 uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
555
556 max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
557
558 if (mtu < RTE_ETHER_MIN_MTU || frame_size > max_frame_size) {
559 PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
560 RTE_ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
561 return -EINVAL;
562 }
563
564 if (!virtio_check_scatter_on_all_rx_queues(dev, frame_size)) {
565 PMD_INIT_LOG(ERR, "MTU vs Rx scatter and Rx buffers check failed");
566 return -EINVAL;
567 }
568
569 hw->max_rx_pkt_len = frame_size;
570
571 return 0;
572 }
573
574 static int
virtio_dev_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)575 virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
576 {
577 struct virtio_hw *hw = dev->data->dev_private;
578 struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
579 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
580
581 virtqueue_enable_intr(vq);
582 virtio_mb(hw->weak_barriers);
583 return 0;
584 }
585
586 static int
virtio_dev_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)587 virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
588 {
589 struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
590 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
591
592 virtqueue_disable_intr(vq);
593 return 0;
594 }
595
596 static int
virtio_dev_priv_dump(struct rte_eth_dev * dev,FILE * f)597 virtio_dev_priv_dump(struct rte_eth_dev *dev, FILE *f)
598 {
599 struct virtio_hw *hw = dev->data->dev_private;
600
601 fprintf(f, "guest_features: 0x%" PRIx64 "\n", hw->guest_features);
602 fprintf(f, "vtnet_hdr_size: %u\n", hw->vtnet_hdr_size);
603 fprintf(f, "use_vec: rx-%u tx-%u\n", hw->use_vec_rx, hw->use_vec_tx);
604 fprintf(f, "use_inorder: rx-%u tx-%u\n", hw->use_inorder_rx, hw->use_inorder_tx);
605 fprintf(f, "intr_lsc: %u\n", hw->intr_lsc);
606 fprintf(f, "max_mtu: %u\n", hw->max_mtu);
607 fprintf(f, "max_rx_pkt_len: %zu\n", hw->max_rx_pkt_len);
608 fprintf(f, "max_queue_pairs: %u\n", hw->max_queue_pairs);
609 fprintf(f, "req_guest_features: 0x%" PRIx64 "\n", hw->req_guest_features);
610
611 return 0;
612 }
613
614 /*
615 * dev_ops for virtio, bare necessities for basic operation
616 */
617 static const struct eth_dev_ops virtio_eth_dev_ops = {
618 .dev_configure = virtio_dev_configure,
619 .dev_start = virtio_dev_start,
620 .dev_stop = virtio_dev_stop,
621 .dev_close = virtio_dev_close,
622 .promiscuous_enable = virtio_dev_promiscuous_enable,
623 .promiscuous_disable = virtio_dev_promiscuous_disable,
624 .allmulticast_enable = virtio_dev_allmulticast_enable,
625 .allmulticast_disable = virtio_dev_allmulticast_disable,
626 .mtu_set = virtio_mtu_set,
627 .dev_infos_get = virtio_dev_info_get,
628 .stats_get = virtio_dev_stats_get,
629 .xstats_get = virtio_dev_xstats_get,
630 .xstats_get_names = virtio_dev_xstats_get_names,
631 .stats_reset = virtio_dev_stats_reset,
632 .xstats_reset = virtio_dev_stats_reset,
633 .link_update = virtio_dev_link_update,
634 .vlan_offload_set = virtio_dev_vlan_offload_set,
635 .rx_queue_setup = virtio_dev_rx_queue_setup,
636 .rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
637 .rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
638 .tx_queue_setup = virtio_dev_tx_queue_setup,
639 .rss_hash_update = virtio_dev_rss_hash_update,
640 .rss_hash_conf_get = virtio_dev_rss_hash_conf_get,
641 .reta_update = virtio_dev_rss_reta_update,
642 .reta_query = virtio_dev_rss_reta_query,
643 /* collect stats per queue */
644 .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
645 .vlan_filter_set = virtio_vlan_filter_set,
646 .mac_addr_add = virtio_mac_addr_add,
647 .mac_addr_remove = virtio_mac_addr_remove,
648 .mac_addr_set = virtio_mac_addr_set,
649 .get_monitor_addr = virtio_get_monitor_addr,
650 .eth_dev_priv_dump = virtio_dev_priv_dump,
651 };
652
653 /*
654 * dev_ops for virtio-user in secondary processes, as we just have
655 * some limited supports currently.
656 */
657 const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = {
658 .dev_infos_get = virtio_dev_info_get,
659 .stats_get = virtio_dev_stats_get,
660 .xstats_get = virtio_dev_xstats_get,
661 .xstats_get_names = virtio_dev_xstats_get_names,
662 .stats_reset = virtio_dev_stats_reset,
663 .xstats_reset = virtio_dev_stats_reset,
664 /* collect stats per queue */
665 .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
666 };
667
668 static void
virtio_update_stats(struct rte_eth_dev * dev,struct rte_eth_stats * stats)669 virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
670 {
671 unsigned i;
672
673 for (i = 0; i < dev->data->nb_tx_queues; i++) {
674 const struct virtnet_tx *txvq = dev->data->tx_queues[i];
675 if (txvq == NULL)
676 continue;
677
678 stats->opackets += txvq->stats.packets;
679 stats->obytes += txvq->stats.bytes;
680
681 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
682 stats->q_opackets[i] = txvq->stats.packets;
683 stats->q_obytes[i] = txvq->stats.bytes;
684 }
685 }
686
687 for (i = 0; i < dev->data->nb_rx_queues; i++) {
688 const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
689 if (rxvq == NULL)
690 continue;
691
692 stats->ipackets += rxvq->stats.packets;
693 stats->ibytes += rxvq->stats.bytes;
694 stats->ierrors += rxvq->stats.errors;
695
696 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
697 stats->q_ipackets[i] = rxvq->stats.packets;
698 stats->q_ibytes[i] = rxvq->stats.bytes;
699 }
700 }
701
702 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
703 }
704
virtio_dev_xstats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned limit)705 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
706 struct rte_eth_xstat_name *xstats_names,
707 __rte_unused unsigned limit)
708 {
709 unsigned i;
710 unsigned count = 0;
711 unsigned t;
712
713 unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
714 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
715
716 if (xstats_names != NULL) {
717 /* Note: limit checked in rte_eth_xstats_names() */
718
719 for (i = 0; i < dev->data->nb_rx_queues; i++) {
720 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
721 if (rxvq == NULL)
722 continue;
723 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
724 snprintf(xstats_names[count].name,
725 sizeof(xstats_names[count].name),
726 "rx_q%u_%s", i,
727 rte_virtio_rxq_stat_strings[t].name);
728 count++;
729 }
730 }
731
732 for (i = 0; i < dev->data->nb_tx_queues; i++) {
733 struct virtnet_tx *txvq = dev->data->tx_queues[i];
734 if (txvq == NULL)
735 continue;
736 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
737 snprintf(xstats_names[count].name,
738 sizeof(xstats_names[count].name),
739 "tx_q%u_%s", i,
740 rte_virtio_txq_stat_strings[t].name);
741 count++;
742 }
743 }
744 return count;
745 }
746 return nstats;
747 }
748
749 static int
virtio_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned n)750 virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
751 unsigned n)
752 {
753 unsigned i;
754 unsigned count = 0;
755
756 unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
757 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
758
759 if (n < nstats)
760 return nstats;
761
762 for (i = 0; i < dev->data->nb_rx_queues; i++) {
763 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
764
765 if (rxvq == NULL)
766 continue;
767
768 unsigned t;
769
770 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
771 xstats[count].value = *(uint64_t *)(((char *)rxvq) +
772 rte_virtio_rxq_stat_strings[t].offset);
773 xstats[count].id = count;
774 count++;
775 }
776 }
777
778 for (i = 0; i < dev->data->nb_tx_queues; i++) {
779 struct virtnet_tx *txvq = dev->data->tx_queues[i];
780
781 if (txvq == NULL)
782 continue;
783
784 unsigned t;
785
786 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
787 xstats[count].value = *(uint64_t *)(((char *)txvq) +
788 rte_virtio_txq_stat_strings[t].offset);
789 xstats[count].id = count;
790 count++;
791 }
792 }
793
794 return count;
795 }
796
797 static int
virtio_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)798 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
799 {
800 virtio_update_stats(dev, stats);
801
802 return 0;
803 }
804
805 static int
virtio_dev_stats_reset(struct rte_eth_dev * dev)806 virtio_dev_stats_reset(struct rte_eth_dev *dev)
807 {
808 unsigned int i;
809
810 for (i = 0; i < dev->data->nb_tx_queues; i++) {
811 struct virtnet_tx *txvq = dev->data->tx_queues[i];
812 if (txvq == NULL)
813 continue;
814
815 txvq->stats.packets = 0;
816 txvq->stats.bytes = 0;
817 txvq->stats.multicast = 0;
818 txvq->stats.broadcast = 0;
819 memset(txvq->stats.size_bins, 0,
820 sizeof(txvq->stats.size_bins[0]) * 8);
821 }
822
823 for (i = 0; i < dev->data->nb_rx_queues; i++) {
824 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
825 if (rxvq == NULL)
826 continue;
827
828 rxvq->stats.packets = 0;
829 rxvq->stats.bytes = 0;
830 rxvq->stats.errors = 0;
831 rxvq->stats.multicast = 0;
832 rxvq->stats.broadcast = 0;
833 memset(rxvq->stats.size_bins, 0,
834 sizeof(rxvq->stats.size_bins[0]) * 8);
835 }
836
837 return 0;
838 }
839
840 static void
virtio_set_hwaddr(struct virtio_hw * hw)841 virtio_set_hwaddr(struct virtio_hw *hw)
842 {
843 virtio_write_dev_config(hw,
844 offsetof(struct virtio_net_config, mac),
845 &hw->mac_addr, RTE_ETHER_ADDR_LEN);
846 }
847
848 static void
virtio_get_hwaddr(struct virtio_hw * hw)849 virtio_get_hwaddr(struct virtio_hw *hw)
850 {
851 if (virtio_with_feature(hw, VIRTIO_NET_F_MAC)) {
852 virtio_read_dev_config(hw,
853 offsetof(struct virtio_net_config, mac),
854 &hw->mac_addr, RTE_ETHER_ADDR_LEN);
855 } else {
856 rte_eth_random_addr(&hw->mac_addr[0]);
857 virtio_set_hwaddr(hw);
858 }
859 }
860
861 static int
virtio_mac_table_set(struct virtio_hw * hw,const struct virtio_net_ctrl_mac * uc,const struct virtio_net_ctrl_mac * mc)862 virtio_mac_table_set(struct virtio_hw *hw,
863 const struct virtio_net_ctrl_mac *uc,
864 const struct virtio_net_ctrl_mac *mc)
865 {
866 struct virtio_pmd_ctrl ctrl;
867 int err, len[2];
868
869 if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
870 PMD_DRV_LOG(INFO, "host does not support mac table");
871 return -1;
872 }
873
874 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
875 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
876
877 len[0] = uc->entries * RTE_ETHER_ADDR_LEN + sizeof(uc->entries);
878 memcpy(ctrl.data, uc, len[0]);
879
880 len[1] = mc->entries * RTE_ETHER_ADDR_LEN + sizeof(mc->entries);
881 memcpy(ctrl.data + len[0], mc, len[1]);
882
883 err = virtio_send_command(hw->cvq, &ctrl, len, 2);
884 if (err != 0)
885 PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
886 return err;
887 }
888
889 static int
virtio_mac_addr_add(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t vmdq __rte_unused)890 virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
891 uint32_t index, uint32_t vmdq __rte_unused)
892 {
893 struct virtio_hw *hw = dev->data->dev_private;
894 const struct rte_ether_addr *addrs = dev->data->mac_addrs;
895 unsigned int i;
896 struct virtio_net_ctrl_mac *uc, *mc;
897
898 if (index >= VIRTIO_MAX_MAC_ADDRS) {
899 PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
900 return -EINVAL;
901 }
902
903 uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
904 sizeof(uc->entries));
905 uc->entries = 0;
906 mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
907 sizeof(mc->entries));
908 mc->entries = 0;
909
910 for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
911 const struct rte_ether_addr *addr
912 = (i == index) ? mac_addr : addrs + i;
913 struct virtio_net_ctrl_mac *tbl
914 = rte_is_multicast_ether_addr(addr) ? mc : uc;
915
916 if (rte_is_zero_ether_addr(addr))
917 break;
918 memcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN);
919 }
920
921 return virtio_mac_table_set(hw, uc, mc);
922 }
923
924 static void
virtio_mac_addr_remove(struct rte_eth_dev * dev,uint32_t index)925 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
926 {
927 struct virtio_hw *hw = dev->data->dev_private;
928 struct rte_ether_addr *addrs = dev->data->mac_addrs;
929 struct virtio_net_ctrl_mac *uc, *mc;
930 unsigned int i;
931
932 if (index >= VIRTIO_MAX_MAC_ADDRS) {
933 PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
934 return;
935 }
936
937 uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
938 sizeof(uc->entries));
939 uc->entries = 0;
940 mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
941 sizeof(mc->entries));
942 mc->entries = 0;
943
944 for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
945 struct virtio_net_ctrl_mac *tbl;
946
947 if (i == index || rte_is_zero_ether_addr(addrs + i))
948 continue;
949
950 tbl = rte_is_multicast_ether_addr(addrs + i) ? mc : uc;
951 memcpy(&tbl->macs[tbl->entries++], addrs + i,
952 RTE_ETHER_ADDR_LEN);
953 }
954
955 virtio_mac_table_set(hw, uc, mc);
956 }
957
958 static int
virtio_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)959 virtio_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
960 {
961 struct virtio_hw *hw = dev->data->dev_private;
962
963 memcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
964
965 /* Use atomic update if available */
966 if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
967 struct virtio_pmd_ctrl ctrl;
968 int len = RTE_ETHER_ADDR_LEN;
969
970 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
971 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
972
973 memcpy(ctrl.data, mac_addr, RTE_ETHER_ADDR_LEN);
974 return virtio_send_command(hw->cvq, &ctrl, &len, 1);
975 }
976
977 if (!virtio_with_feature(hw, VIRTIO_NET_F_MAC))
978 return -ENOTSUP;
979
980 virtio_set_hwaddr(hw);
981 return 0;
982 }
983
984 #define CLB_VAL_IDX 0
985 #define CLB_MSK_IDX 1
986 #define CLB_MATCH_IDX 2
987 static int
virtio_monitor_callback(const uint64_t value,const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])988 virtio_monitor_callback(const uint64_t value,
989 const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
990 {
991 const uint64_t m = opaque[CLB_MSK_IDX];
992 const uint64_t v = opaque[CLB_VAL_IDX];
993 const uint64_t c = opaque[CLB_MATCH_IDX];
994
995 if (c)
996 return (value & m) == v ? -1 : 0;
997 else
998 return (value & m) == v ? 0 : -1;
999 }
1000
1001 static int
virtio_get_monitor_addr(void * rx_queue,struct rte_power_monitor_cond * pmc)1002 virtio_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
1003 {
1004 struct virtnet_rx *rxvq = rx_queue;
1005 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1006 struct virtio_hw *hw;
1007
1008 if (vq == NULL)
1009 return -EINVAL;
1010
1011 hw = vq->hw;
1012 if (virtio_with_packed_queue(hw)) {
1013 struct vring_packed_desc *desc;
1014 desc = vq->vq_packed.ring.desc;
1015 pmc->addr = &desc[vq->vq_used_cons_idx].flags;
1016 if (vq->vq_packed.used_wrap_counter)
1017 pmc->opaque[CLB_VAL_IDX] =
1018 VRING_PACKED_DESC_F_AVAIL_USED;
1019 else
1020 pmc->opaque[CLB_VAL_IDX] = 0;
1021 pmc->opaque[CLB_MSK_IDX] = VRING_PACKED_DESC_F_AVAIL_USED;
1022 pmc->opaque[CLB_MATCH_IDX] = 1;
1023 pmc->size = sizeof(desc[vq->vq_used_cons_idx].flags);
1024 } else {
1025 pmc->addr = &vq->vq_split.ring.used->idx;
1026 pmc->opaque[CLB_VAL_IDX] = vq->vq_used_cons_idx
1027 & (vq->vq_nentries - 1);
1028 pmc->opaque[CLB_MSK_IDX] = vq->vq_nentries - 1;
1029 pmc->opaque[CLB_MATCH_IDX] = 0;
1030 pmc->size = sizeof(vq->vq_split.ring.used->idx);
1031 }
1032 pmc->fn = virtio_monitor_callback;
1033
1034 return 0;
1035 }
1036
1037 static int
virtio_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)1038 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1039 {
1040 struct virtio_hw *hw = dev->data->dev_private;
1041 struct virtio_pmd_ctrl ctrl;
1042 int len;
1043
1044 if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
1045 return -ENOTSUP;
1046
1047 ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
1048 ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
1049 memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
1050 len = sizeof(vlan_id);
1051
1052 return virtio_send_command(hw->cvq, &ctrl, &len, 1);
1053 }
1054
1055 static int
virtio_intr_unmask(struct rte_eth_dev * dev)1056 virtio_intr_unmask(struct rte_eth_dev *dev)
1057 {
1058 struct virtio_hw *hw = dev->data->dev_private;
1059
1060 if (rte_intr_ack(dev->intr_handle) < 0)
1061 return -1;
1062
1063 if (VIRTIO_OPS(hw)->intr_detect)
1064 VIRTIO_OPS(hw)->intr_detect(hw);
1065
1066 return 0;
1067 }
1068
1069 static int
virtio_intr_enable(struct rte_eth_dev * dev)1070 virtio_intr_enable(struct rte_eth_dev *dev)
1071 {
1072 struct virtio_hw *hw = dev->data->dev_private;
1073
1074 if (rte_intr_enable(dev->intr_handle) < 0)
1075 return -1;
1076
1077 if (VIRTIO_OPS(hw)->intr_detect)
1078 VIRTIO_OPS(hw)->intr_detect(hw);
1079
1080 return 0;
1081 }
1082
1083 static int
virtio_intr_disable(struct rte_eth_dev * dev)1084 virtio_intr_disable(struct rte_eth_dev *dev)
1085 {
1086 struct virtio_hw *hw = dev->data->dev_private;
1087
1088 if (rte_intr_disable(dev->intr_handle) < 0)
1089 return -1;
1090
1091 if (VIRTIO_OPS(hw)->intr_detect)
1092 VIRTIO_OPS(hw)->intr_detect(hw);
1093
1094 return 0;
1095 }
1096
1097 static int
virtio_ethdev_negotiate_features(struct virtio_hw * hw,uint64_t req_features)1098 virtio_ethdev_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
1099 {
1100 uint64_t host_features;
1101
1102 /* Prepare guest_features: feature that driver wants to support */
1103 PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
1104 req_features);
1105
1106 /* Read device(host) feature bits */
1107 host_features = VIRTIO_OPS(hw)->get_features(hw);
1108 PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
1109 host_features);
1110
1111 /* If supported, ensure MTU value is valid before acknowledging it. */
1112 if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
1113 struct virtio_net_config config;
1114
1115 virtio_read_dev_config(hw,
1116 offsetof(struct virtio_net_config, mtu),
1117 &config.mtu, sizeof(config.mtu));
1118
1119 if (config.mtu < RTE_ETHER_MIN_MTU)
1120 req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
1121 }
1122
1123 /*
1124 * Negotiate features: Subset of device feature bits are written back
1125 * guest feature bits.
1126 */
1127 hw->guest_features = req_features;
1128 hw->guest_features = virtio_negotiate_features(hw, host_features);
1129 PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
1130 hw->guest_features);
1131
1132 if (VIRTIO_OPS(hw)->features_ok(hw) < 0)
1133 return -1;
1134
1135 if (virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
1136 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1137
1138 if (!(virtio_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
1139 PMD_INIT_LOG(ERR, "Failed to set FEATURES_OK status!");
1140 return -1;
1141 }
1142 }
1143
1144 hw->req_guest_features = req_features;
1145
1146 return 0;
1147 }
1148
1149 static void
virtio_notify_peers(struct rte_eth_dev * dev)1150 virtio_notify_peers(struct rte_eth_dev *dev)
1151 {
1152 struct virtio_hw *hw = dev->data->dev_private;
1153 struct virtnet_rx *rxvq;
1154 struct rte_mbuf *rarp_mbuf;
1155
1156 if (!dev->data->rx_queues)
1157 return;
1158
1159 rxvq = dev->data->rx_queues[0];
1160 if (!rxvq)
1161 return;
1162
1163 rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
1164 (struct rte_ether_addr *)hw->mac_addr);
1165 if (rarp_mbuf == NULL) {
1166 PMD_DRV_LOG(ERR, "failed to make RARP packet.");
1167 return;
1168 }
1169
1170 rte_spinlock_lock(&hw->state_lock);
1171 if (hw->started == 0) {
1172 /* If virtio port just stopped, no need to send RARP */
1173 rte_pktmbuf_free(rarp_mbuf);
1174 goto out;
1175 }
1176 hw->started = 0;
1177
1178 /*
1179 * Prevent the worker threads from touching queues to avoid contention,
1180 * 1 ms should be enough for the ongoing Tx function to finish.
1181 */
1182 rte_delay_ms(1);
1183
1184 hw->inject_pkts = &rarp_mbuf;
1185 dev->tx_pkt_burst(dev->data->tx_queues[0], &rarp_mbuf, 1);
1186 hw->inject_pkts = NULL;
1187
1188 hw->started = 1;
1189
1190 out:
1191 rte_spinlock_unlock(&hw->state_lock);
1192 }
1193
1194 static void
virtio_ack_link_announce(struct rte_eth_dev * dev)1195 virtio_ack_link_announce(struct rte_eth_dev *dev)
1196 {
1197 struct virtio_hw *hw = dev->data->dev_private;
1198 struct virtio_pmd_ctrl ctrl;
1199
1200 ctrl.hdr.class = VIRTIO_NET_CTRL_ANNOUNCE;
1201 ctrl.hdr.cmd = VIRTIO_NET_CTRL_ANNOUNCE_ACK;
1202
1203 virtio_send_command(hw->cvq, &ctrl, NULL, 0);
1204 }
1205
1206 /*
1207 * Process virtio config changed interrupt. Call the callback
1208 * if link state changed, generate gratuitous RARP packet if
1209 * the status indicates an ANNOUNCE.
1210 */
1211 void
virtio_interrupt_handler(void * param)1212 virtio_interrupt_handler(void *param)
1213 {
1214 struct rte_eth_dev *dev = param;
1215 struct virtio_hw *hw = dev->data->dev_private;
1216 uint8_t isr;
1217 uint16_t status;
1218
1219 /* Read interrupt status which clears interrupt */
1220 isr = virtio_get_isr(hw);
1221 PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
1222
1223 if (virtio_intr_unmask(dev) < 0)
1224 PMD_DRV_LOG(ERR, "interrupt enable failed");
1225
1226 if (isr & VIRTIO_ISR_CONFIG) {
1227 if (virtio_dev_link_update(dev, 0) == 0)
1228 rte_eth_dev_callback_process(dev,
1229 RTE_ETH_EVENT_INTR_LSC,
1230 NULL);
1231
1232 if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1233 virtio_read_dev_config(hw,
1234 offsetof(struct virtio_net_config, status),
1235 &status, sizeof(status));
1236 if (status & VIRTIO_NET_S_ANNOUNCE) {
1237 virtio_notify_peers(dev);
1238 if (hw->cvq)
1239 virtio_ack_link_announce(dev);
1240 }
1241 }
1242 }
1243 }
1244
1245 /* set rx and tx handlers according to what is supported */
1246 static void
set_rxtx_funcs(struct rte_eth_dev * eth_dev)1247 set_rxtx_funcs(struct rte_eth_dev *eth_dev)
1248 {
1249 struct virtio_hw *hw = eth_dev->data->dev_private;
1250
1251 eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
1252 if (virtio_with_packed_queue(hw)) {
1253 PMD_INIT_LOG(INFO,
1254 "virtio: using packed ring %s Tx path on port %u",
1255 hw->use_vec_tx ? "vectorized" : "standard",
1256 eth_dev->data->port_id);
1257 if (hw->use_vec_tx)
1258 eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed_vec;
1259 else
1260 eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
1261 } else {
1262 if (hw->use_inorder_tx) {
1263 PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
1264 eth_dev->data->port_id);
1265 eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
1266 } else {
1267 PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
1268 eth_dev->data->port_id);
1269 eth_dev->tx_pkt_burst = virtio_xmit_pkts;
1270 }
1271 }
1272
1273 if (virtio_with_packed_queue(hw)) {
1274 if (hw->use_vec_rx) {
1275 PMD_INIT_LOG(INFO,
1276 "virtio: using packed ring vectorized Rx path on port %u",
1277 eth_dev->data->port_id);
1278 eth_dev->rx_pkt_burst =
1279 &virtio_recv_pkts_packed_vec;
1280 } else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1281 PMD_INIT_LOG(INFO,
1282 "virtio: using packed ring mergeable buffer Rx path on port %u",
1283 eth_dev->data->port_id);
1284 eth_dev->rx_pkt_burst =
1285 &virtio_recv_mergeable_pkts_packed;
1286 } else {
1287 PMD_INIT_LOG(INFO,
1288 "virtio: using packed ring standard Rx path on port %u",
1289 eth_dev->data->port_id);
1290 eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
1291 }
1292 } else {
1293 if (hw->use_vec_rx) {
1294 PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u",
1295 eth_dev->data->port_id);
1296 eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
1297 } else if (hw->use_inorder_rx) {
1298 PMD_INIT_LOG(INFO,
1299 "virtio: using inorder Rx path on port %u",
1300 eth_dev->data->port_id);
1301 eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder;
1302 } else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1303 PMD_INIT_LOG(INFO,
1304 "virtio: using mergeable buffer Rx path on port %u",
1305 eth_dev->data->port_id);
1306 eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
1307 } else {
1308 PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
1309 eth_dev->data->port_id);
1310 eth_dev->rx_pkt_burst = &virtio_recv_pkts;
1311 }
1312 }
1313
1314 }
1315
1316 /* Only support 1:1 queue/interrupt mapping so far.
1317 * TODO: support n:1 queue/interrupt mapping when there are limited number of
1318 * interrupt vectors (<N+1).
1319 */
1320 static int
virtio_queues_bind_intr(struct rte_eth_dev * dev)1321 virtio_queues_bind_intr(struct rte_eth_dev *dev)
1322 {
1323 uint32_t i;
1324 struct virtio_hw *hw = dev->data->dev_private;
1325
1326 PMD_INIT_LOG(INFO, "queue/interrupt binding");
1327 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
1328 if (rte_intr_vec_list_index_set(dev->intr_handle, i,
1329 i + 1))
1330 return -rte_errno;
1331 if (VIRTIO_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
1332 VIRTIO_MSI_NO_VECTOR) {
1333 PMD_DRV_LOG(ERR, "failed to set queue vector");
1334 return -EBUSY;
1335 }
1336 }
1337
1338 return 0;
1339 }
1340
1341 static void
virtio_queues_unbind_intr(struct rte_eth_dev * dev)1342 virtio_queues_unbind_intr(struct rte_eth_dev *dev)
1343 {
1344 uint32_t i;
1345 struct virtio_hw *hw = dev->data->dev_private;
1346
1347 PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
1348 for (i = 0; i < dev->data->nb_rx_queues; ++i)
1349 VIRTIO_OPS(hw)->set_queue_irq(hw,
1350 hw->vqs[i * VTNET_CQ],
1351 VIRTIO_MSI_NO_VECTOR);
1352 }
1353
1354 static int
virtio_configure_intr(struct rte_eth_dev * dev)1355 virtio_configure_intr(struct rte_eth_dev *dev)
1356 {
1357 struct virtio_hw *hw = dev->data->dev_private;
1358 int ret;
1359
1360 if (!rte_intr_cap_multiple(dev->intr_handle)) {
1361 PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
1362 return -ENOTSUP;
1363 }
1364
1365 ret = rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues);
1366 if (ret < 0) {
1367 PMD_INIT_LOG(ERR, "Fail to create eventfd");
1368 return ret;
1369 }
1370
1371 ret = rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",
1372 hw->max_queue_pairs);
1373 if (ret < 0) {
1374 PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
1375 hw->max_queue_pairs);
1376 return ret;
1377 }
1378
1379 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1380 /* Re-register callback to update max_intr */
1381 rte_intr_callback_unregister(dev->intr_handle,
1382 virtio_interrupt_handler,
1383 dev);
1384 rte_intr_callback_register(dev->intr_handle,
1385 virtio_interrupt_handler,
1386 dev);
1387 }
1388
1389 /* DO NOT try to remove this! This function will enable msix, or QEMU
1390 * will encounter SIGSEGV when DRIVER_OK is sent.
1391 * And for legacy devices, this should be done before queue/vec binding
1392 * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
1393 * (22) will be ignored.
1394 */
1395 if (virtio_intr_enable(dev) < 0) {
1396 PMD_DRV_LOG(ERR, "interrupt enable failed");
1397 return -EINVAL;
1398 }
1399
1400 ret = virtio_queues_bind_intr(dev);
1401 if (ret < 0) {
1402 PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
1403 return ret;
1404 }
1405
1406 return 0;
1407 }
1408
1409 static void
virtio_get_speed_duplex(struct rte_eth_dev * eth_dev,struct rte_eth_link * link)1410 virtio_get_speed_duplex(struct rte_eth_dev *eth_dev,
1411 struct rte_eth_link *link)
1412 {
1413 struct virtio_hw *hw = eth_dev->data->dev_private;
1414 struct virtio_net_config *config;
1415 struct virtio_net_config local_config;
1416
1417 config = &local_config;
1418 virtio_read_dev_config(hw,
1419 offsetof(struct virtio_net_config, speed),
1420 &config->speed, sizeof(config->speed));
1421 virtio_read_dev_config(hw,
1422 offsetof(struct virtio_net_config, duplex),
1423 &config->duplex, sizeof(config->duplex));
1424 hw->speed = config->speed;
1425 hw->duplex = config->duplex;
1426 if (link != NULL) {
1427 link->link_duplex = hw->duplex;
1428 link->link_speed = hw->speed;
1429 }
1430 PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
1431 hw->speed, hw->duplex);
1432 }
1433
1434 static uint64_t
ethdev_to_virtio_rss_offloads(uint64_t ethdev_hash_types)1435 ethdev_to_virtio_rss_offloads(uint64_t ethdev_hash_types)
1436 {
1437 uint64_t virtio_hash_types = 0;
1438
1439 if (ethdev_hash_types & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
1440 RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
1441 virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IPV4;
1442
1443 if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
1444 virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCPV4;
1445
1446 if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
1447 virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDPV4;
1448
1449 if (ethdev_hash_types & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
1450 RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
1451 virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IPV6;
1452
1453 if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
1454 virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCPV6;
1455
1456 if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
1457 virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDPV6;
1458
1459 if (ethdev_hash_types & RTE_ETH_RSS_IPV6_EX)
1460 virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IP_EX;
1461
1462 if (ethdev_hash_types & RTE_ETH_RSS_IPV6_TCP_EX)
1463 virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCP_EX;
1464
1465 if (ethdev_hash_types & RTE_ETH_RSS_IPV6_UDP_EX)
1466 virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDP_EX;
1467
1468 return virtio_hash_types;
1469 }
1470
1471 static uint64_t
virtio_to_ethdev_rss_offloads(uint64_t virtio_hash_types)1472 virtio_to_ethdev_rss_offloads(uint64_t virtio_hash_types)
1473 {
1474 uint64_t rss_offloads = 0;
1475
1476 if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IPV4)
1477 rss_offloads |= RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
1478 RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
1479
1480 if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCPV4)
1481 rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
1482
1483 if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDPV4)
1484 rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1485
1486 if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IPV6)
1487 rss_offloads |= RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
1488 RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
1489
1490 if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCPV6)
1491 rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
1492
1493 if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDPV6)
1494 rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1495
1496 if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IP_EX)
1497 rss_offloads |= RTE_ETH_RSS_IPV6_EX;
1498
1499 if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCP_EX)
1500 rss_offloads |= RTE_ETH_RSS_IPV6_TCP_EX;
1501
1502 if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDP_EX)
1503 rss_offloads |= RTE_ETH_RSS_IPV6_UDP_EX;
1504
1505 return rss_offloads;
1506 }
1507
1508 static int
virtio_dev_get_rss_config(struct virtio_hw * hw,uint32_t * rss_hash_types)1509 virtio_dev_get_rss_config(struct virtio_hw *hw, uint32_t *rss_hash_types)
1510 {
1511 struct virtio_net_config local_config;
1512 struct virtio_net_config *config = &local_config;
1513
1514 virtio_read_dev_config(hw,
1515 offsetof(struct virtio_net_config, rss_max_key_size),
1516 &config->rss_max_key_size,
1517 sizeof(config->rss_max_key_size));
1518 if (config->rss_max_key_size < VIRTIO_NET_RSS_KEY_SIZE) {
1519 PMD_INIT_LOG(ERR, "Invalid device RSS max key size (%u)",
1520 config->rss_max_key_size);
1521 return -EINVAL;
1522 }
1523
1524 virtio_read_dev_config(hw,
1525 offsetof(struct virtio_net_config,
1526 rss_max_indirection_table_length),
1527 &config->rss_max_indirection_table_length,
1528 sizeof(config->rss_max_indirection_table_length));
1529 if (config->rss_max_indirection_table_length < VIRTIO_NET_RSS_RETA_SIZE) {
1530 PMD_INIT_LOG(ERR, "Invalid device RSS max reta size (%u)",
1531 config->rss_max_indirection_table_length);
1532 return -EINVAL;
1533 }
1534
1535 virtio_read_dev_config(hw,
1536 offsetof(struct virtio_net_config, supported_hash_types),
1537 &config->supported_hash_types,
1538 sizeof(config->supported_hash_types));
1539 if ((config->supported_hash_types & VIRTIO_NET_HASH_TYPE_MASK) == 0) {
1540 PMD_INIT_LOG(ERR, "Invalid device RSS hash types (0x%x)",
1541 config->supported_hash_types);
1542 return -EINVAL;
1543 }
1544
1545 *rss_hash_types = config->supported_hash_types & VIRTIO_NET_HASH_TYPE_MASK;
1546
1547 PMD_INIT_LOG(DEBUG, "Device RSS config:");
1548 PMD_INIT_LOG(DEBUG, "\t-Max key size: %u", config->rss_max_key_size);
1549 PMD_INIT_LOG(DEBUG, "\t-Max reta size: %u", config->rss_max_indirection_table_length);
1550 PMD_INIT_LOG(DEBUG, "\t-Supported hash types: 0x%x", *rss_hash_types);
1551
1552 return 0;
1553 }
1554
1555 static int
virtio_dev_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1556 virtio_dev_rss_hash_update(struct rte_eth_dev *dev,
1557 struct rte_eth_rss_conf *rss_conf)
1558 {
1559 struct virtio_hw *hw = dev->data->dev_private;
1560 char old_rss_key[VIRTIO_NET_RSS_KEY_SIZE];
1561 uint32_t old_hash_types;
1562 uint16_t nb_queues;
1563 int ret;
1564
1565 if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1566 return -ENOTSUP;
1567
1568 if (rss_conf->rss_hf & ~virtio_to_ethdev_rss_offloads(VIRTIO_NET_HASH_TYPE_MASK))
1569 return -EINVAL;
1570
1571 old_hash_types = hw->rss_hash_types;
1572 hw->rss_hash_types = ethdev_to_virtio_rss_offloads(rss_conf->rss_hf);
1573
1574 if (rss_conf->rss_key && rss_conf->rss_key_len) {
1575 if (rss_conf->rss_key_len != VIRTIO_NET_RSS_KEY_SIZE) {
1576 PMD_INIT_LOG(ERR, "Driver only supports %u RSS key length",
1577 VIRTIO_NET_RSS_KEY_SIZE);
1578 ret = -EINVAL;
1579 goto restore_types;
1580 }
1581 memcpy(old_rss_key, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1582 memcpy(hw->rss_key, rss_conf->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1583 }
1584
1585 nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1586 ret = virtio_set_multiple_queues_rss(dev, nb_queues);
1587 if (ret < 0) {
1588 PMD_INIT_LOG(ERR, "Failed to apply new RSS config to the device");
1589 goto restore_key;
1590 }
1591
1592 return 0;
1593 restore_key:
1594 if (rss_conf->rss_key && rss_conf->rss_key_len)
1595 memcpy(hw->rss_key, old_rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1596 restore_types:
1597 hw->rss_hash_types = old_hash_types;
1598
1599 return ret;
1600 }
1601
1602 static int
virtio_dev_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1603 virtio_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1604 struct rte_eth_rss_conf *rss_conf)
1605 {
1606 struct virtio_hw *hw = dev->data->dev_private;
1607
1608 if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1609 return -ENOTSUP;
1610
1611 if (rss_conf->rss_key && rss_conf->rss_key_len >= VIRTIO_NET_RSS_KEY_SIZE)
1612 memcpy(rss_conf->rss_key, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1613 rss_conf->rss_key_len = VIRTIO_NET_RSS_KEY_SIZE;
1614 rss_conf->rss_hf = virtio_to_ethdev_rss_offloads(hw->rss_hash_types);
1615
1616 return 0;
1617 }
1618
virtio_dev_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1619 static int virtio_dev_rss_reta_update(struct rte_eth_dev *dev,
1620 struct rte_eth_rss_reta_entry64 *reta_conf,
1621 uint16_t reta_size)
1622 {
1623 struct virtio_hw *hw = dev->data->dev_private;
1624 uint16_t nb_queues;
1625 uint16_t old_reta[VIRTIO_NET_RSS_RETA_SIZE];
1626 int idx, pos, i, ret;
1627
1628 if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1629 return -ENOTSUP;
1630
1631 if (reta_size != VIRTIO_NET_RSS_RETA_SIZE)
1632 return -EINVAL;
1633
1634 memcpy(old_reta, hw->rss_reta, sizeof(old_reta));
1635
1636 for (i = 0; i < reta_size; i++) {
1637 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1638 pos = i % RTE_ETH_RETA_GROUP_SIZE;
1639
1640 if (((reta_conf[idx].mask >> pos) & 0x1) == 0)
1641 continue;
1642
1643 hw->rss_reta[i] = reta_conf[idx].reta[pos];
1644 }
1645
1646 nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1647 ret = virtio_set_multiple_queues_rss(dev, nb_queues);
1648 if (ret < 0) {
1649 PMD_INIT_LOG(ERR, "Failed to apply new RETA to the device");
1650 memcpy(hw->rss_reta, old_reta, sizeof(old_reta));
1651 }
1652
1653 hw->rss_rx_queues = dev->data->nb_rx_queues;
1654
1655 return ret;
1656 }
1657
virtio_dev_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1658 static int virtio_dev_rss_reta_query(struct rte_eth_dev *dev,
1659 struct rte_eth_rss_reta_entry64 *reta_conf,
1660 uint16_t reta_size)
1661 {
1662 struct virtio_hw *hw = dev->data->dev_private;
1663 int idx, i;
1664
1665 if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1666 return -ENOTSUP;
1667
1668 if (reta_size != VIRTIO_NET_RSS_RETA_SIZE)
1669 return -EINVAL;
1670
1671 for (i = 0; i < reta_size; i++) {
1672 idx = i / RTE_ETH_RETA_GROUP_SIZE;
1673 reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = hw->rss_reta[i];
1674 }
1675
1676 return 0;
1677 }
1678
1679 /*
1680 * As default RSS hash key, it uses the default key of the
1681 * Intel IXGBE devices. It can be updated by the application
1682 * with any 40B key value.
1683 */
1684 static uint8_t rss_intel_key[VIRTIO_NET_RSS_KEY_SIZE] = {
1685 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1686 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1687 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1688 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1689 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1690 };
1691
1692 static int
virtio_dev_rss_init(struct rte_eth_dev * eth_dev)1693 virtio_dev_rss_init(struct rte_eth_dev *eth_dev)
1694 {
1695 struct virtio_hw *hw = eth_dev->data->dev_private;
1696 uint16_t nb_rx_queues = eth_dev->data->nb_rx_queues;
1697 struct rte_eth_rss_conf *rss_conf;
1698 int ret, i;
1699
1700 if (!nb_rx_queues) {
1701 PMD_INIT_LOG(ERR, "Cannot init RSS if no Rx queues");
1702 return -EINVAL;
1703 }
1704
1705 rss_conf = ð_dev->data->dev_conf.rx_adv_conf.rss_conf;
1706
1707 ret = virtio_dev_get_rss_config(hw, &hw->rss_hash_types);
1708 if (ret)
1709 return ret;
1710
1711 if (rss_conf->rss_hf) {
1712 /* Ensure requested hash types are supported by the device */
1713 if (rss_conf->rss_hf & ~virtio_to_ethdev_rss_offloads(hw->rss_hash_types))
1714 return -EINVAL;
1715
1716 hw->rss_hash_types = ethdev_to_virtio_rss_offloads(rss_conf->rss_hf);
1717 }
1718
1719 if (!hw->rss_key) {
1720 /* Setup default RSS key if not already setup by the user */
1721 hw->rss_key = rte_malloc_socket("rss_key",
1722 VIRTIO_NET_RSS_KEY_SIZE, 0,
1723 eth_dev->device->numa_node);
1724 if (!hw->rss_key) {
1725 PMD_INIT_LOG(ERR, "Failed to allocate RSS key");
1726 return -ENOMEM;
1727 }
1728 }
1729
1730 if (rss_conf->rss_key && rss_conf->rss_key_len) {
1731 if (rss_conf->rss_key_len != VIRTIO_NET_RSS_KEY_SIZE) {
1732 PMD_INIT_LOG(ERR, "Driver only supports %u RSS key length",
1733 VIRTIO_NET_RSS_KEY_SIZE);
1734 return -EINVAL;
1735 }
1736 memcpy(hw->rss_key, rss_conf->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1737 } else {
1738 memcpy(hw->rss_key, rss_intel_key, VIRTIO_NET_RSS_KEY_SIZE);
1739 }
1740
1741 if (!hw->rss_reta) {
1742 /* Setup default RSS reta if not already setup by the user */
1743 hw->rss_reta = rte_zmalloc_socket("rss_reta",
1744 VIRTIO_NET_RSS_RETA_SIZE * sizeof(uint16_t), 0,
1745 eth_dev->device->numa_node);
1746 if (!hw->rss_reta) {
1747 PMD_INIT_LOG(ERR, "Failed to allocate RSS reta");
1748 return -ENOMEM;
1749 }
1750
1751 hw->rss_rx_queues = 0;
1752 }
1753
1754 /* Re-initialize the RSS reta if the number of RX queues has changed */
1755 if (hw->rss_rx_queues != nb_rx_queues) {
1756 for (i = 0; i < VIRTIO_NET_RSS_RETA_SIZE; i++)
1757 hw->rss_reta[i] = i % nb_rx_queues;
1758 hw->rss_rx_queues = nb_rx_queues;
1759 }
1760
1761 return 0;
1762 }
1763
1764 #define DUPLEX_UNKNOWN 0xff
1765 /* reset device and renegotiate features if needed */
1766 static int
virtio_init_device(struct rte_eth_dev * eth_dev,uint64_t req_features)1767 virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
1768 {
1769 struct virtio_hw *hw = eth_dev->data->dev_private;
1770 struct virtio_net_config *config;
1771 struct virtio_net_config local_config;
1772 int ret;
1773
1774 /* Reset the device although not necessary at startup */
1775 virtio_reset(hw);
1776
1777 if (hw->vqs) {
1778 virtio_dev_free_mbufs(eth_dev);
1779 virtio_free_queues(hw);
1780 }
1781
1782 /* Tell the host we've noticed this device. */
1783 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1784
1785 /* Tell the host we've known how to drive the device. */
1786 virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1787 if (virtio_ethdev_negotiate_features(hw, req_features) < 0)
1788 return -EINVAL;
1789
1790 hw->weak_barriers = !virtio_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
1791
1792 /* If host does not support both status and MSI-X then disable LSC */
1793 if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->intr_lsc)
1794 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
1795 else
1796 eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1797
1798 /* Setting up rx_header size for the device */
1799 if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
1800 virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
1801 virtio_with_packed_queue(hw))
1802 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1803 else
1804 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
1805
1806 /* Copy the permanent MAC address to: virtio_hw */
1807 virtio_get_hwaddr(hw);
1808 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
1809 ð_dev->data->mac_addrs[0]);
1810 PMD_INIT_LOG(DEBUG,
1811 "PORT MAC: " RTE_ETHER_ADDR_PRT_FMT,
1812 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
1813 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
1814
1815 hw->get_speed_via_feat = hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN &&
1816 virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX);
1817 if (hw->get_speed_via_feat)
1818 virtio_get_speed_duplex(eth_dev, NULL);
1819 if (hw->duplex == DUPLEX_UNKNOWN)
1820 hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
1821 PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
1822 hw->speed, hw->duplex);
1823 if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
1824 config = &local_config;
1825
1826 virtio_read_dev_config(hw,
1827 offsetof(struct virtio_net_config, mac),
1828 &config->mac, sizeof(config->mac));
1829
1830 if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1831 virtio_read_dev_config(hw,
1832 offsetof(struct virtio_net_config, status),
1833 &config->status, sizeof(config->status));
1834 } else {
1835 PMD_INIT_LOG(DEBUG,
1836 "VIRTIO_NET_F_STATUS is not supported");
1837 config->status = 0;
1838 }
1839
1840 if (virtio_with_feature(hw, VIRTIO_NET_F_MQ) ||
1841 virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
1842 virtio_read_dev_config(hw,
1843 offsetof(struct virtio_net_config, max_virtqueue_pairs),
1844 &config->max_virtqueue_pairs,
1845 sizeof(config->max_virtqueue_pairs));
1846 } else {
1847 PMD_INIT_LOG(DEBUG,
1848 "Neither VIRTIO_NET_F_MQ nor VIRTIO_NET_F_RSS are supported");
1849 config->max_virtqueue_pairs = 1;
1850 }
1851
1852 hw->max_queue_pairs = config->max_virtqueue_pairs;
1853
1854 if (virtio_with_feature(hw, VIRTIO_NET_F_MTU)) {
1855 virtio_read_dev_config(hw,
1856 offsetof(struct virtio_net_config, mtu),
1857 &config->mtu,
1858 sizeof(config->mtu));
1859
1860 /*
1861 * MTU value has already been checked at negotiation
1862 * time, but check again in case it has changed since
1863 * then, which should not happen.
1864 */
1865 if (config->mtu < RTE_ETHER_MIN_MTU) {
1866 PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
1867 config->mtu);
1868 return -EINVAL;
1869 }
1870
1871 hw->max_mtu = config->mtu;
1872 /* Set initial MTU to maximum one supported by vhost */
1873 eth_dev->data->mtu = config->mtu;
1874
1875 } else {
1876 hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
1877 VLAN_TAG_LEN - hw->vtnet_hdr_size;
1878 }
1879
1880 hw->rss_hash_types = 0;
1881 if (virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
1882 ret = virtio_dev_rss_init(eth_dev);
1883 if (ret < 0)
1884 return ret;
1885 }
1886
1887 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
1888 config->max_virtqueue_pairs);
1889 PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
1890 PMD_INIT_LOG(DEBUG,
1891 "PORT MAC: " RTE_ETHER_ADDR_PRT_FMT,
1892 config->mac[0], config->mac[1],
1893 config->mac[2], config->mac[3],
1894 config->mac[4], config->mac[5]);
1895 } else {
1896 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
1897 hw->max_queue_pairs = 1;
1898 hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
1899 VLAN_TAG_LEN - hw->vtnet_hdr_size;
1900 }
1901
1902 ret = virtio_alloc_queues(eth_dev);
1903 if (ret < 0)
1904 return ret;
1905
1906 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1907 ret = virtio_configure_intr(eth_dev);
1908 if (ret < 0) {
1909 PMD_INIT_LOG(ERR, "failed to configure interrupt");
1910 virtio_free_queues(hw);
1911 return ret;
1912 }
1913 }
1914
1915 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1916 /* Enable vector (0) for Link State Interrupt */
1917 if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) ==
1918 VIRTIO_MSI_NO_VECTOR) {
1919 PMD_DRV_LOG(ERR, "failed to set config vector");
1920 return -EBUSY;
1921 }
1922
1923 virtio_reinit_complete(hw);
1924
1925 return 0;
1926 }
1927
1928 /*
1929 * This function is based on probe() function in virtio_pci.c
1930 * It returns 0 on success.
1931 */
1932 int
eth_virtio_dev_init(struct rte_eth_dev * eth_dev)1933 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
1934 {
1935 struct virtio_hw *hw = eth_dev->data->dev_private;
1936 uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
1937 int vectorized = 0;
1938 int ret;
1939
1940 if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
1941 PMD_INIT_LOG(ERR,
1942 "Not sufficient headroom required = %d, avail = %d",
1943 (int)sizeof(struct virtio_net_hdr_mrg_rxbuf),
1944 RTE_PKTMBUF_HEADROOM);
1945
1946 return -1;
1947 }
1948
1949 eth_dev->dev_ops = &virtio_eth_dev_ops;
1950
1951 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1952 set_rxtx_funcs(eth_dev);
1953 return 0;
1954 }
1955
1956 ret = virtio_dev_devargs_parse(eth_dev->device->devargs, &speed, &vectorized);
1957 if (ret < 0)
1958 return ret;
1959 hw->speed = speed;
1960 hw->duplex = DUPLEX_UNKNOWN;
1961
1962 /* Allocate memory for storing MAC addresses */
1963 eth_dev->data->mac_addrs = rte_zmalloc("virtio",
1964 VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);
1965 if (eth_dev->data->mac_addrs == NULL) {
1966 PMD_INIT_LOG(ERR,
1967 "Failed to allocate %d bytes needed to store MAC addresses",
1968 VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);
1969 return -ENOMEM;
1970 }
1971
1972 rte_spinlock_init(&hw->state_lock);
1973
1974 if (vectorized) {
1975 hw->use_vec_rx = 1;
1976 hw->use_vec_tx = 1;
1977 }
1978
1979 /* reset device and negotiate default features */
1980 ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
1981 if (ret < 0)
1982 goto err_virtio_init;
1983
1984 if (vectorized) {
1985 if (!virtio_with_packed_queue(hw)) {
1986 hw->use_vec_tx = 0;
1987 } else {
1988 #if !defined(CC_AVX512_SUPPORT) && !defined(RTE_ARCH_ARM)
1989 hw->use_vec_rx = 0;
1990 hw->use_vec_tx = 0;
1991 PMD_DRV_LOG(INFO,
1992 "building environment do not support packed ring vectorized");
1993 #endif
1994 }
1995 }
1996
1997 hw->opened = 1;
1998
1999 return 0;
2000
2001 err_virtio_init:
2002 rte_free(eth_dev->data->mac_addrs);
2003 eth_dev->data->mac_addrs = NULL;
2004 return ret;
2005 }
2006
2007 static uint32_t
virtio_dev_speed_capa_get(uint32_t speed)2008 virtio_dev_speed_capa_get(uint32_t speed)
2009 {
2010 switch (speed) {
2011 case RTE_ETH_SPEED_NUM_10G:
2012 return RTE_ETH_LINK_SPEED_10G;
2013 case RTE_ETH_SPEED_NUM_20G:
2014 return RTE_ETH_LINK_SPEED_20G;
2015 case RTE_ETH_SPEED_NUM_25G:
2016 return RTE_ETH_LINK_SPEED_25G;
2017 case RTE_ETH_SPEED_NUM_40G:
2018 return RTE_ETH_LINK_SPEED_40G;
2019 case RTE_ETH_SPEED_NUM_50G:
2020 return RTE_ETH_LINK_SPEED_50G;
2021 case RTE_ETH_SPEED_NUM_56G:
2022 return RTE_ETH_LINK_SPEED_56G;
2023 case RTE_ETH_SPEED_NUM_100G:
2024 return RTE_ETH_LINK_SPEED_100G;
2025 case RTE_ETH_SPEED_NUM_200G:
2026 return RTE_ETH_LINK_SPEED_200G;
2027 case RTE_ETH_SPEED_NUM_400G:
2028 return RTE_ETH_LINK_SPEED_400G;
2029 default:
2030 return 0;
2031 }
2032 }
2033
vectorized_check_handler(__rte_unused const char * key,const char * value,void * ret_val)2034 static int vectorized_check_handler(__rte_unused const char *key,
2035 const char *value, void *ret_val)
2036 {
2037 if (value == NULL || ret_val == NULL)
2038 return -EINVAL;
2039
2040 if (strcmp(value, "1") == 0)
2041 *(int *)ret_val = 1;
2042 else
2043 *(int *)ret_val = 0;
2044
2045 return 0;
2046 }
2047
2048 #define VIRTIO_ARG_SPEED "speed"
2049 #define VIRTIO_ARG_VECTORIZED "vectorized"
2050
2051 static int
link_speed_handler(const char * key __rte_unused,const char * value,void * ret_val)2052 link_speed_handler(const char *key __rte_unused,
2053 const char *value, void *ret_val)
2054 {
2055 uint32_t val;
2056 if (!value || !ret_val)
2057 return -EINVAL;
2058 val = strtoul(value, NULL, 0);
2059 /* validate input */
2060 if (virtio_dev_speed_capa_get(val) == 0)
2061 return -EINVAL;
2062 *(uint32_t *)ret_val = val;
2063
2064 return 0;
2065 }
2066
2067
2068 static int
virtio_dev_devargs_parse(struct rte_devargs * devargs,uint32_t * speed,int * vectorized)2069 virtio_dev_devargs_parse(struct rte_devargs *devargs, uint32_t *speed, int *vectorized)
2070 {
2071 struct rte_kvargs *kvlist;
2072 int ret = 0;
2073
2074 if (devargs == NULL)
2075 return 0;
2076
2077 kvlist = rte_kvargs_parse(devargs->args, NULL);
2078 if (kvlist == NULL) {
2079 PMD_INIT_LOG(ERR, "error when parsing param");
2080 return 0;
2081 }
2082
2083 if (speed && rte_kvargs_count(kvlist, VIRTIO_ARG_SPEED) == 1) {
2084 ret = rte_kvargs_process(kvlist,
2085 VIRTIO_ARG_SPEED,
2086 link_speed_handler, speed);
2087 if (ret < 0) {
2088 PMD_INIT_LOG(ERR, "Failed to parse %s",
2089 VIRTIO_ARG_SPEED);
2090 goto exit;
2091 }
2092 }
2093
2094 if (vectorized &&
2095 rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) {
2096 ret = rte_kvargs_process(kvlist,
2097 VIRTIO_ARG_VECTORIZED,
2098 vectorized_check_handler, vectorized);
2099 if (ret < 0) {
2100 PMD_INIT_LOG(ERR, "Failed to parse %s",
2101 VIRTIO_ARG_VECTORIZED);
2102 goto exit;
2103 }
2104 }
2105
2106 exit:
2107 rte_kvargs_free(kvlist);
2108 return ret;
2109 }
2110
2111 static uint8_t
rx_offload_enabled(struct virtio_hw * hw)2112 rx_offload_enabled(struct virtio_hw *hw)
2113 {
2114 return virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
2115 virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2116 virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
2117 }
2118
2119 static uint8_t
tx_offload_enabled(struct virtio_hw * hw)2120 tx_offload_enabled(struct virtio_hw *hw)
2121 {
2122 return virtio_with_feature(hw, VIRTIO_NET_F_CSUM) ||
2123 virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
2124 virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
2125 }
2126
2127 /*
2128 * Configure virtio device
2129 * It returns 0 on success.
2130 */
2131 static int
virtio_dev_configure(struct rte_eth_dev * dev)2132 virtio_dev_configure(struct rte_eth_dev *dev)
2133 {
2134 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2135 const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
2136 struct virtio_hw *hw = dev->data->dev_private;
2137 uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
2138 hw->vtnet_hdr_size;
2139 uint64_t rx_offloads = rxmode->offloads;
2140 uint64_t tx_offloads = txmode->offloads;
2141 uint64_t req_features;
2142 int ret;
2143
2144 PMD_INIT_LOG(DEBUG, "configure");
2145 req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
2146
2147 if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE && rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
2148 PMD_DRV_LOG(ERR,
2149 "Unsupported Rx multi queue mode %d",
2150 rxmode->mq_mode);
2151 return -EINVAL;
2152 }
2153
2154 if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
2155 PMD_DRV_LOG(ERR,
2156 "Unsupported Tx multi queue mode %d",
2157 txmode->mq_mode);
2158 return -EINVAL;
2159 }
2160
2161 if (dev->data->dev_conf.intr_conf.rxq) {
2162 ret = virtio_init_device(dev, hw->req_guest_features);
2163 if (ret < 0)
2164 return ret;
2165 }
2166
2167 if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)
2168 req_features |= (1ULL << VIRTIO_NET_F_RSS);
2169
2170 if (rxmode->mtu > hw->max_mtu)
2171 req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
2172
2173 hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
2174
2175 if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2176 RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
2177 req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
2178
2179 if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
2180 req_features |=
2181 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2182 (1ULL << VIRTIO_NET_F_GUEST_TSO6);
2183
2184 if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
2185 RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
2186 req_features |= (1ULL << VIRTIO_NET_F_CSUM);
2187
2188 if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
2189 req_features |=
2190 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2191 (1ULL << VIRTIO_NET_F_HOST_TSO6);
2192
2193 /* if request features changed, reinit the device */
2194 if (req_features != hw->req_guest_features) {
2195 ret = virtio_init_device(dev, req_features);
2196 if (ret < 0)
2197 return ret;
2198 }
2199
2200 /* if queues are not allocated, reinit the device */
2201 if (hw->vqs == NULL) {
2202 ret = virtio_init_device(dev, hw->req_guest_features);
2203 if (ret < 0)
2204 return ret;
2205 }
2206
2207 if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
2208 !virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
2209 PMD_DRV_LOG(ERR, "RSS support requested but not supported by the device");
2210 return -ENOTSUP;
2211 }
2212
2213 if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2214 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
2215 !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
2216 PMD_DRV_LOG(ERR,
2217 "rx checksum not available on this host");
2218 return -ENOTSUP;
2219 }
2220
2221 if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
2222 (!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2223 !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
2224 PMD_DRV_LOG(ERR,
2225 "Large Receive Offload not available on this host");
2226 return -ENOTSUP;
2227 }
2228
2229 /* start control queue */
2230 if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
2231 virtio_dev_cq_start(dev);
2232
2233 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
2234 hw->vlan_strip = 1;
2235
2236 hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
2237
2238 if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
2239 !virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
2240 PMD_DRV_LOG(ERR,
2241 "vlan filtering not available on this host");
2242 return -ENOTSUP;
2243 }
2244
2245 hw->has_tx_offload = tx_offload_enabled(hw);
2246 hw->has_rx_offload = rx_offload_enabled(hw);
2247
2248 if (virtio_with_packed_queue(hw)) {
2249 #if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
2250 if ((hw->use_vec_rx || hw->use_vec_tx) &&
2251 (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
2252 !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
2253 !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
2254 rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
2255 PMD_DRV_LOG(INFO,
2256 "disabled packed ring vectorized path for requirements not met");
2257 hw->use_vec_rx = 0;
2258 hw->use_vec_tx = 0;
2259 }
2260 #elif defined(RTE_ARCH_ARM)
2261 if ((hw->use_vec_rx || hw->use_vec_tx) &&
2262 (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON) ||
2263 !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
2264 !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
2265 rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)) {
2266 PMD_DRV_LOG(INFO,
2267 "disabled packed ring vectorized path for requirements not met");
2268 hw->use_vec_rx = 0;
2269 hw->use_vec_tx = 0;
2270 }
2271 #else
2272 hw->use_vec_rx = 0;
2273 hw->use_vec_tx = 0;
2274 #endif
2275
2276 if (hw->use_vec_rx) {
2277 if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
2278 PMD_DRV_LOG(INFO,
2279 "disabled packed ring vectorized rx for mrg_rxbuf enabled");
2280 hw->use_vec_rx = 0;
2281 }
2282
2283 if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
2284 PMD_DRV_LOG(INFO,
2285 "disabled packed ring vectorized rx for TCP_LRO enabled");
2286 hw->use_vec_rx = 0;
2287 }
2288 }
2289 } else {
2290 if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) {
2291 hw->use_inorder_tx = 1;
2292 hw->use_inorder_rx = 1;
2293 hw->use_vec_rx = 0;
2294 }
2295
2296 if (hw->use_vec_rx) {
2297 #if defined RTE_ARCH_ARM
2298 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
2299 PMD_DRV_LOG(INFO,
2300 "disabled split ring vectorized path for requirement not met");
2301 hw->use_vec_rx = 0;
2302 }
2303 #endif
2304 if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
2305 PMD_DRV_LOG(INFO,
2306 "disabled split ring vectorized rx for mrg_rxbuf enabled");
2307 hw->use_vec_rx = 0;
2308 }
2309
2310 if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2311 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
2312 RTE_ETH_RX_OFFLOAD_TCP_LRO |
2313 RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
2314 PMD_DRV_LOG(INFO,
2315 "disabled split ring vectorized rx for offloading enabled");
2316 hw->use_vec_rx = 0;
2317 }
2318
2319 if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
2320 PMD_DRV_LOG(INFO,
2321 "disabled split ring vectorized rx, max SIMD bitwidth too low");
2322 hw->use_vec_rx = 0;
2323 }
2324 }
2325 }
2326
2327 return 0;
2328 }
2329
2330
2331 static int
virtio_dev_start(struct rte_eth_dev * dev)2332 virtio_dev_start(struct rte_eth_dev *dev)
2333 {
2334 uint16_t nb_queues, i;
2335 struct virtqueue *vq;
2336 struct virtio_hw *hw = dev->data->dev_private;
2337 int ret;
2338
2339 /* Finish the initialization of the queues */
2340 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2341 ret = virtio_dev_rx_queue_setup_finish(dev, i);
2342 if (ret < 0)
2343 return ret;
2344 }
2345 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2346 ret = virtio_dev_tx_queue_setup_finish(dev, i);
2347 if (ret < 0)
2348 return ret;
2349 }
2350
2351 /* check if lsc interrupt feature is enabled */
2352 if (dev->data->dev_conf.intr_conf.lsc) {
2353 if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
2354 PMD_DRV_LOG(ERR, "link status not supported by host");
2355 return -ENOTSUP;
2356 }
2357 }
2358
2359 /* Enable uio/vfio intr/eventfd mapping: although we already did that
2360 * in device configure, but it could be unmapped when device is
2361 * stopped.
2362 */
2363 if (dev->data->dev_conf.intr_conf.lsc ||
2364 dev->data->dev_conf.intr_conf.rxq) {
2365 virtio_intr_disable(dev);
2366
2367 /* Setup interrupt callback */
2368 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
2369 rte_intr_callback_register(dev->intr_handle,
2370 virtio_interrupt_handler,
2371 dev);
2372
2373 if (virtio_intr_enable(dev) < 0) {
2374 PMD_DRV_LOG(ERR, "interrupt enable failed");
2375 return -EIO;
2376 }
2377 }
2378
2379 /*Notify the backend
2380 *Otherwise the tap backend might already stop its queue due to fullness.
2381 *vhost backend will have no chance to be waked up
2382 */
2383 nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2384 if (hw->max_queue_pairs > 1) {
2385 if (virtio_set_multiple_queues(dev, nb_queues) != 0)
2386 return -EINVAL;
2387 }
2388
2389 PMD_INIT_LOG(DEBUG, "nb_queues=%u (port=%u)", nb_queues,
2390 dev->data->port_id);
2391
2392 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2393 vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
2394 /* Flush the old packets */
2395 virtqueue_rxvq_flush(vq);
2396 virtqueue_notify(vq);
2397 }
2398
2399 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2400 vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
2401 virtqueue_notify(vq);
2402 }
2403
2404 PMD_INIT_LOG(DEBUG, "Notified backend at initialization (port=%u)",
2405 dev->data->port_id);
2406
2407 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2408 vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
2409 VIRTQUEUE_DUMP(vq);
2410 }
2411
2412 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2413 vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
2414 VIRTQUEUE_DUMP(vq);
2415 }
2416
2417 set_rxtx_funcs(dev);
2418 hw->started = 1;
2419
2420 for (i = 0; i < dev->data->nb_rx_queues; i++)
2421 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
2422 for (i = 0; i < dev->data->nb_tx_queues; i++)
2423 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
2424
2425 /* Initialize Link state */
2426 virtio_dev_link_update(dev, 0);
2427
2428 return 0;
2429 }
2430
virtio_dev_free_mbufs(struct rte_eth_dev * dev)2431 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
2432 {
2433 struct virtio_hw *hw = dev->data->dev_private;
2434 uint16_t nr_vq = virtio_get_nr_vq(hw);
2435 const char *type __rte_unused;
2436 unsigned int i, mbuf_num = 0;
2437 struct virtqueue *vq;
2438 struct rte_mbuf *buf;
2439 int queue_type;
2440
2441 if (hw->vqs == NULL)
2442 return;
2443
2444 for (i = 0; i < nr_vq; i++) {
2445 vq = hw->vqs[i];
2446 if (!vq)
2447 continue;
2448
2449 queue_type = virtio_get_queue_type(hw, i);
2450 if (queue_type == VTNET_RQ)
2451 type = "rxq";
2452 else if (queue_type == VTNET_TQ)
2453 type = "txq";
2454 else
2455 continue;
2456
2457 PMD_INIT_LOG(DEBUG,
2458 "Before freeing %s[%d] used and unused buf",
2459 type, i);
2460 VIRTQUEUE_DUMP(vq);
2461
2462 while ((buf = virtqueue_detach_unused(vq)) != NULL) {
2463 rte_pktmbuf_free(buf);
2464 mbuf_num++;
2465 }
2466
2467 PMD_INIT_LOG(DEBUG,
2468 "After freeing %s[%d] used and unused buf",
2469 type, i);
2470 VIRTQUEUE_DUMP(vq);
2471 }
2472
2473 PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
2474 }
2475
2476 static void
virtio_tx_completed_cleanup(struct rte_eth_dev * dev)2477 virtio_tx_completed_cleanup(struct rte_eth_dev *dev)
2478 {
2479 struct virtio_hw *hw = dev->data->dev_private;
2480 struct virtqueue *vq;
2481 int qidx;
2482 void (*xmit_cleanup)(struct virtqueue *vq, uint16_t nb_used);
2483
2484 if (virtio_with_packed_queue(hw)) {
2485 if (hw->use_vec_tx)
2486 xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
2487 else if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
2488 xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
2489 else
2490 xmit_cleanup = &virtio_xmit_cleanup_normal_packed;
2491 } else {
2492 if (hw->use_inorder_tx)
2493 xmit_cleanup = &virtio_xmit_cleanup_inorder;
2494 else
2495 xmit_cleanup = &virtio_xmit_cleanup;
2496 }
2497
2498 for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
2499 vq = hw->vqs[2 * qidx + VTNET_SQ_TQ_QUEUE_IDX];
2500 if (vq != NULL)
2501 xmit_cleanup(vq, virtqueue_nused(vq));
2502 }
2503 }
2504
2505 /*
2506 * Stop device: disable interrupt and mark link down
2507 */
2508 int
virtio_dev_stop(struct rte_eth_dev * dev)2509 virtio_dev_stop(struct rte_eth_dev *dev)
2510 {
2511 struct virtio_hw *hw = dev->data->dev_private;
2512 struct rte_eth_link link;
2513 struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
2514 uint16_t i;
2515
2516 PMD_INIT_LOG(DEBUG, "stop");
2517 dev->data->dev_started = 0;
2518
2519 rte_spinlock_lock(&hw->state_lock);
2520 if (!hw->started)
2521 goto out_unlock;
2522 hw->started = 0;
2523
2524 virtio_tx_completed_cleanup(dev);
2525
2526 if (intr_conf->lsc || intr_conf->rxq) {
2527 virtio_intr_disable(dev);
2528
2529 /* Reset interrupt callback */
2530 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
2531 rte_intr_callback_unregister(dev->intr_handle,
2532 virtio_interrupt_handler,
2533 dev);
2534 }
2535 }
2536
2537 memset(&link, 0, sizeof(link));
2538 rte_eth_linkstatus_set(dev, &link);
2539 out_unlock:
2540 rte_spinlock_unlock(&hw->state_lock);
2541
2542 for (i = 0; i < dev->data->nb_rx_queues; i++)
2543 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2544 for (i = 0; i < dev->data->nb_tx_queues; i++)
2545 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2546
2547 return 0;
2548 }
2549
2550 static int
virtio_dev_link_update(struct rte_eth_dev * dev,__rte_unused int wait_to_complete)2551 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
2552 {
2553 struct rte_eth_link link;
2554 uint16_t status;
2555 struct virtio_hw *hw = dev->data->dev_private;
2556
2557 memset(&link, 0, sizeof(link));
2558 link.link_duplex = hw->duplex;
2559 link.link_speed = hw->speed;
2560 link.link_autoneg = RTE_ETH_LINK_AUTONEG;
2561
2562 if (!hw->started) {
2563 link.link_status = RTE_ETH_LINK_DOWN;
2564 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2565 } else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
2566 PMD_INIT_LOG(DEBUG, "Get link status from hw");
2567 virtio_read_dev_config(hw,
2568 offsetof(struct virtio_net_config, status),
2569 &status, sizeof(status));
2570 if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
2571 link.link_status = RTE_ETH_LINK_DOWN;
2572 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2573 PMD_INIT_LOG(DEBUG, "Port %d is down",
2574 dev->data->port_id);
2575 } else {
2576 link.link_status = RTE_ETH_LINK_UP;
2577 if (hw->get_speed_via_feat)
2578 virtio_get_speed_duplex(dev, &link);
2579 PMD_INIT_LOG(DEBUG, "Port %d is up",
2580 dev->data->port_id);
2581 }
2582 } else {
2583 link.link_status = RTE_ETH_LINK_UP;
2584 if (hw->get_speed_via_feat)
2585 virtio_get_speed_duplex(dev, &link);
2586 }
2587
2588 return rte_eth_linkstatus_set(dev, &link);
2589 }
2590
2591 static int
virtio_dev_vlan_offload_set(struct rte_eth_dev * dev,int mask)2592 virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2593 {
2594 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2595 struct virtio_hw *hw = dev->data->dev_private;
2596 uint64_t offloads = rxmode->offloads;
2597
2598 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
2599 if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
2600 !virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
2601
2602 PMD_DRV_LOG(NOTICE,
2603 "vlan filtering not available on this host");
2604
2605 return -ENOTSUP;
2606 }
2607 }
2608
2609 if (mask & RTE_ETH_VLAN_STRIP_MASK)
2610 hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
2611
2612 return 0;
2613 }
2614
2615 static int
virtio_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)2616 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2617 {
2618 uint64_t tso_mask, host_features;
2619 uint32_t rss_hash_types = 0;
2620 struct virtio_hw *hw = dev->data->dev_private;
2621 dev_info->speed_capa = virtio_dev_speed_capa_get(hw->speed);
2622
2623 dev_info->max_rx_queues =
2624 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
2625 dev_info->max_tx_queues =
2626 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
2627 dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
2628 dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
2629 dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
2630 dev_info->max_mtu = hw->max_mtu;
2631
2632 host_features = VIRTIO_OPS(hw)->get_features(hw);
2633 dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
2634 if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
2635 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
2636 if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
2637 dev_info->rx_offload_capa |=
2638 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
2639 RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
2640 }
2641 if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
2642 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
2643 tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2644 (1ULL << VIRTIO_NET_F_GUEST_TSO6);
2645 if ((host_features & tso_mask) == tso_mask)
2646 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
2647
2648 dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
2649 RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
2650 if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
2651 dev_info->tx_offload_capa |=
2652 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
2653 RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
2654 }
2655 tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2656 (1ULL << VIRTIO_NET_F_HOST_TSO6);
2657 if ((host_features & tso_mask) == tso_mask)
2658 dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
2659
2660 if (host_features & (1ULL << VIRTIO_NET_F_RSS)) {
2661 virtio_dev_get_rss_config(hw, &rss_hash_types);
2662 dev_info->hash_key_size = VIRTIO_NET_RSS_KEY_SIZE;
2663 dev_info->reta_size = VIRTIO_NET_RSS_RETA_SIZE;
2664 dev_info->flow_type_rss_offloads =
2665 virtio_to_ethdev_rss_offloads(rss_hash_types);
2666 } else {
2667 dev_info->hash_key_size = 0;
2668 dev_info->reta_size = 0;
2669 dev_info->flow_type_rss_offloads = 0;
2670 }
2671
2672 if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) {
2673 /*
2674 * According to 2.7 Packed Virtqueues,
2675 * 2.7.10.1 Structure Size and Alignment:
2676 * The Queue Size value does not have to be a power of 2.
2677 */
2678 dev_info->rx_desc_lim.nb_max = UINT16_MAX;
2679 dev_info->tx_desc_lim.nb_max = UINT16_MAX;
2680 } else {
2681 /*
2682 * According to 2.6 Split Virtqueues:
2683 * Queue Size value is always a power of 2. The maximum Queue
2684 * Size value is 32768.
2685 */
2686 dev_info->rx_desc_lim.nb_max = 32768;
2687 dev_info->tx_desc_lim.nb_max = 32768;
2688 }
2689 /*
2690 * Actual minimum is not the same for virtqueues of different kinds,
2691 * but to avoid tangling the code with separate branches, rely on
2692 * default thresholds since desc number must be at least of their size.
2693 */
2694 dev_info->rx_desc_lim.nb_min = RTE_MAX(DEFAULT_RX_FREE_THRESH,
2695 RTE_VIRTIO_VPMD_RX_REARM_THRESH);
2696 dev_info->tx_desc_lim.nb_min = DEFAULT_TX_FREE_THRESH;
2697 dev_info->rx_desc_lim.nb_align = 1;
2698 dev_info->tx_desc_lim.nb_align = 1;
2699
2700 return 0;
2701 }
2702
2703 /*
2704 * It enables testpmd to collect per queue stats.
2705 */
2706 static int
virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev * eth_dev,__rte_unused uint16_t queue_id,__rte_unused uint8_t stat_idx,__rte_unused uint8_t is_rx)2707 virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
2708 __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
2709 __rte_unused uint8_t is_rx)
2710 {
2711 return 0;
2712 }
2713
2714 RTE_LOG_REGISTER_SUFFIX(virtio_logtype_init, init, NOTICE);
2715 RTE_LOG_REGISTER_SUFFIX(virtio_logtype_driver, driver, NOTICE);
2716