1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <string.h> 7 #include <stdio.h> 8 #include <errno.h> 9 #include <unistd.h> 10 11 #include <rte_ethdev_driver.h> 12 #include <rte_ethdev_pci.h> 13 #include <rte_memcpy.h> 14 #include <rte_string_fns.h> 15 #include <rte_memzone.h> 16 #include <rte_malloc.h> 17 #include <rte_branch_prediction.h> 18 #include <rte_pci.h> 19 #include <rte_bus_pci.h> 20 #include <rte_ether.h> 21 #include <rte_ip.h> 22 #include <rte_arp.h> 23 #include <rte_common.h> 24 #include <rte_errno.h> 25 #include <rte_cpuflags.h> 26 27 #include <rte_memory.h> 28 #include <rte_eal.h> 29 #include <rte_dev.h> 30 #include <rte_cycles.h> 31 32 #include "virtio_ethdev.h" 33 #include "virtio_pci.h" 34 #include "virtio_logs.h" 35 #include "virtqueue.h" 36 #include "virtio_rxtx.h" 37 38 static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev); 39 static int virtio_dev_configure(struct rte_eth_dev *dev); 40 static int virtio_dev_start(struct rte_eth_dev *dev); 41 static void virtio_dev_stop(struct rte_eth_dev *dev); 42 static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev); 43 static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev); 44 static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev); 45 static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev); 46 static void virtio_dev_info_get(struct rte_eth_dev *dev, 47 struct rte_eth_dev_info *dev_info); 48 static int virtio_dev_link_update(struct rte_eth_dev *dev, 49 int wait_to_complete); 50 static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); 51 52 static void virtio_set_hwaddr(struct virtio_hw *hw); 53 static void virtio_get_hwaddr(struct virtio_hw *hw); 54 55 static int virtio_dev_stats_get(struct rte_eth_dev *dev, 56 struct rte_eth_stats *stats); 57 static int virtio_dev_xstats_get(struct rte_eth_dev *dev, 58 struct rte_eth_xstat *xstats, unsigned n); 59 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev, 60 struct rte_eth_xstat_name *xstats_names, 61 unsigned limit); 62 static void virtio_dev_stats_reset(struct rte_eth_dev *dev); 63 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev); 64 static int virtio_vlan_filter_set(struct rte_eth_dev *dev, 65 uint16_t vlan_id, int on); 66 static int virtio_mac_addr_add(struct rte_eth_dev *dev, 67 struct ether_addr *mac_addr, 68 uint32_t index, uint32_t vmdq); 69 static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); 70 static void virtio_mac_addr_set(struct rte_eth_dev *dev, 71 struct ether_addr *mac_addr); 72 73 static int virtio_intr_enable(struct rte_eth_dev *dev); 74 static int virtio_intr_disable(struct rte_eth_dev *dev); 75 76 static int virtio_dev_queue_stats_mapping_set( 77 struct rte_eth_dev *eth_dev, 78 uint16_t queue_id, 79 uint8_t stat_idx, 80 uint8_t is_rx); 81 82 int virtio_logtype_init; 83 int virtio_logtype_driver; 84 85 static void virtio_notify_peers(struct rte_eth_dev *dev); 86 static void virtio_ack_link_announce(struct rte_eth_dev *dev); 87 88 /* 89 * The set of PCI devices this driver supports 90 */ 91 static const struct rte_pci_id pci_id_virtio_map[] = { 92 { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) }, 93 { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) }, 94 { .vendor_id = 0, /* sentinel */ }, 95 }; 96 97 struct rte_virtio_xstats_name_off { 98 char name[RTE_ETH_XSTATS_NAME_SIZE]; 99 unsigned offset; 100 }; 101 102 /* [rt]x_qX_ is prepended to the name string here */ 103 static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = { 104 {"good_packets", offsetof(struct virtnet_rx, stats.packets)}, 105 {"good_bytes", offsetof(struct virtnet_rx, stats.bytes)}, 106 {"errors", offsetof(struct virtnet_rx, stats.errors)}, 107 {"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)}, 108 {"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)}, 109 {"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])}, 110 {"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])}, 111 {"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])}, 112 {"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])}, 113 {"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])}, 114 {"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])}, 115 {"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])}, 116 {"size_1519_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])}, 117 }; 118 119 /* [rt]x_qX_ is prepended to the name string here */ 120 static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = { 121 {"good_packets", offsetof(struct virtnet_tx, stats.packets)}, 122 {"good_bytes", offsetof(struct virtnet_tx, stats.bytes)}, 123 {"errors", offsetof(struct virtnet_tx, stats.errors)}, 124 {"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)}, 125 {"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)}, 126 {"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])}, 127 {"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])}, 128 {"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])}, 129 {"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])}, 130 {"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])}, 131 {"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])}, 132 {"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])}, 133 {"size_1519_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])}, 134 }; 135 136 #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \ 137 sizeof(rte_virtio_rxq_stat_strings[0])) 138 #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \ 139 sizeof(rte_virtio_txq_stat_strings[0])) 140 141 struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS]; 142 143 static int 144 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, 145 int *dlen, int pkt_num) 146 { 147 uint32_t head, i; 148 int k, sum = 0; 149 virtio_net_ctrl_ack status = ~0; 150 struct virtio_pmd_ctrl *result; 151 struct virtqueue *vq; 152 153 ctrl->status = status; 154 155 if (!cvq || !cvq->vq) { 156 PMD_INIT_LOG(ERR, "Control queue is not supported."); 157 return -1; 158 } 159 160 rte_spinlock_lock(&cvq->lock); 161 vq = cvq->vq; 162 head = vq->vq_desc_head_idx; 163 164 PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, " 165 "vq->hw->cvq = %p vq = %p", 166 vq->vq_desc_head_idx, status, vq->hw->cvq, vq); 167 168 if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) { 169 rte_spinlock_unlock(&cvq->lock); 170 return -1; 171 } 172 173 memcpy(cvq->virtio_net_hdr_mz->addr, ctrl, 174 sizeof(struct virtio_pmd_ctrl)); 175 176 /* 177 * Format is enforced in qemu code: 178 * One TX packet for header; 179 * At least one TX packet per argument; 180 * One RX packet for ACK. 181 */ 182 vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT; 183 vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem; 184 vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr); 185 vq->vq_free_cnt--; 186 i = vq->vq_ring.desc[head].next; 187 188 for (k = 0; k < pkt_num; k++) { 189 vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT; 190 vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem 191 + sizeof(struct virtio_net_ctrl_hdr) 192 + sizeof(ctrl->status) + sizeof(uint8_t)*sum; 193 vq->vq_ring.desc[i].len = dlen[k]; 194 sum += dlen[k]; 195 vq->vq_free_cnt--; 196 i = vq->vq_ring.desc[i].next; 197 } 198 199 vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE; 200 vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem 201 + sizeof(struct virtio_net_ctrl_hdr); 202 vq->vq_ring.desc[i].len = sizeof(ctrl->status); 203 vq->vq_free_cnt--; 204 205 vq->vq_desc_head_idx = vq->vq_ring.desc[i].next; 206 207 vq_update_avail_ring(vq, head); 208 vq_update_avail_idx(vq); 209 210 PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index); 211 212 virtqueue_notify(vq); 213 214 rte_rmb(); 215 while (VIRTQUEUE_NUSED(vq) == 0) { 216 rte_rmb(); 217 usleep(100); 218 } 219 220 while (VIRTQUEUE_NUSED(vq)) { 221 uint32_t idx, desc_idx, used_idx; 222 struct vring_used_elem *uep; 223 224 used_idx = (uint32_t)(vq->vq_used_cons_idx 225 & (vq->vq_nentries - 1)); 226 uep = &vq->vq_ring.used->ring[used_idx]; 227 idx = (uint32_t) uep->id; 228 desc_idx = idx; 229 230 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) { 231 desc_idx = vq->vq_ring.desc[desc_idx].next; 232 vq->vq_free_cnt++; 233 } 234 235 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx; 236 vq->vq_desc_head_idx = idx; 237 238 vq->vq_used_cons_idx++; 239 vq->vq_free_cnt++; 240 } 241 242 PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d", 243 vq->vq_free_cnt, vq->vq_desc_head_idx); 244 245 result = cvq->virtio_net_hdr_mz->addr; 246 247 rte_spinlock_unlock(&cvq->lock); 248 return result->status; 249 } 250 251 static int 252 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues) 253 { 254 struct virtio_hw *hw = dev->data->dev_private; 255 struct virtio_pmd_ctrl ctrl; 256 int dlen[1]; 257 int ret; 258 259 ctrl.hdr.class = VIRTIO_NET_CTRL_MQ; 260 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; 261 memcpy(ctrl.data, &nb_queues, sizeof(uint16_t)); 262 263 dlen[0] = sizeof(uint16_t); 264 265 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); 266 if (ret) { 267 PMD_INIT_LOG(ERR, "Multiqueue configured but send command " 268 "failed, this is too late now..."); 269 return -EINVAL; 270 } 271 272 return 0; 273 } 274 275 static void 276 virtio_dev_queue_release(void *queue __rte_unused) 277 { 278 /* do nothing */ 279 } 280 281 static uint16_t 282 virtio_get_nr_vq(struct virtio_hw *hw) 283 { 284 uint16_t nr_vq = hw->max_queue_pairs * 2; 285 286 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) 287 nr_vq += 1; 288 289 return nr_vq; 290 } 291 292 static void 293 virtio_init_vring(struct virtqueue *vq) 294 { 295 int size = vq->vq_nentries; 296 struct vring *vr = &vq->vq_ring; 297 uint8_t *ring_mem = vq->vq_ring_virt_mem; 298 299 PMD_INIT_FUNC_TRACE(); 300 301 /* 302 * Reinitialise since virtio port might have been stopped and restarted 303 */ 304 memset(ring_mem, 0, vq->vq_ring_size); 305 vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); 306 vq->vq_used_cons_idx = 0; 307 vq->vq_desc_head_idx = 0; 308 vq->vq_avail_idx = 0; 309 vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); 310 vq->vq_free_cnt = vq->vq_nentries; 311 memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); 312 313 vring_desc_init(vr->desc, size); 314 315 /* 316 * Disable device(host) interrupting guest 317 */ 318 virtqueue_disable_intr(vq); 319 } 320 321 static int 322 virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) 323 { 324 char vq_name[VIRTQUEUE_MAX_NAME_SZ]; 325 char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ]; 326 const struct rte_memzone *mz = NULL, *hdr_mz = NULL; 327 unsigned int vq_size, size; 328 struct virtio_hw *hw = dev->data->dev_private; 329 struct virtnet_rx *rxvq = NULL; 330 struct virtnet_tx *txvq = NULL; 331 struct virtnet_ctl *cvq = NULL; 332 struct virtqueue *vq; 333 size_t sz_hdr_mz = 0; 334 void *sw_ring = NULL; 335 int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx); 336 int ret; 337 338 PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx); 339 340 /* 341 * Read the virtqueue size from the Queue Size field 342 * Always power of 2 and if 0 virtqueue does not exist 343 */ 344 vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx); 345 PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size); 346 if (vq_size == 0) { 347 PMD_INIT_LOG(ERR, "virtqueue does not exist"); 348 return -EINVAL; 349 } 350 351 if (!rte_is_power_of_2(vq_size)) { 352 PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2"); 353 return -EINVAL; 354 } 355 356 snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", 357 dev->data->port_id, vtpci_queue_idx); 358 359 size = RTE_ALIGN_CEIL(sizeof(*vq) + 360 vq_size * sizeof(struct vq_desc_extra), 361 RTE_CACHE_LINE_SIZE); 362 if (queue_type == VTNET_TQ) { 363 /* 364 * For each xmit packet, allocate a virtio_net_hdr 365 * and indirect ring elements 366 */ 367 sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region); 368 } else if (queue_type == VTNET_CQ) { 369 /* Allocate a page for control vq command, data and status */ 370 sz_hdr_mz = PAGE_SIZE; 371 } 372 373 vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, 374 SOCKET_ID_ANY); 375 if (vq == NULL) { 376 PMD_INIT_LOG(ERR, "can not allocate vq"); 377 return -ENOMEM; 378 } 379 hw->vqs[vtpci_queue_idx] = vq; 380 381 vq->hw = hw; 382 vq->vq_queue_index = vtpci_queue_idx; 383 vq->vq_nentries = vq_size; 384 385 /* 386 * Reserve a memzone for vring elements 387 */ 388 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); 389 vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); 390 PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", 391 size, vq->vq_ring_size); 392 393 mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, 394 SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, 395 VIRTIO_PCI_VRING_ALIGN); 396 if (mz == NULL) { 397 if (rte_errno == EEXIST) 398 mz = rte_memzone_lookup(vq_name); 399 if (mz == NULL) { 400 ret = -ENOMEM; 401 goto fail_q_alloc; 402 } 403 } 404 405 memset(mz->addr, 0, mz->len); 406 407 vq->vq_ring_mem = mz->iova; 408 vq->vq_ring_virt_mem = mz->addr; 409 PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, 410 (uint64_t)mz->iova); 411 PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64, 412 (uint64_t)(uintptr_t)mz->addr); 413 414 virtio_init_vring(vq); 415 416 if (sz_hdr_mz) { 417 snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr", 418 dev->data->port_id, vtpci_queue_idx); 419 hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz, 420 SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, 421 RTE_CACHE_LINE_SIZE); 422 if (hdr_mz == NULL) { 423 if (rte_errno == EEXIST) 424 hdr_mz = rte_memzone_lookup(vq_hdr_name); 425 if (hdr_mz == NULL) { 426 ret = -ENOMEM; 427 goto fail_q_alloc; 428 } 429 } 430 } 431 432 if (queue_type == VTNET_RQ) { 433 size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) * 434 sizeof(vq->sw_ring[0]); 435 436 sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, 437 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 438 if (!sw_ring) { 439 PMD_INIT_LOG(ERR, "can not allocate RX soft ring"); 440 ret = -ENOMEM; 441 goto fail_q_alloc; 442 } 443 444 vq->sw_ring = sw_ring; 445 rxvq = &vq->rxq; 446 rxvq->vq = vq; 447 rxvq->port_id = dev->data->port_id; 448 rxvq->mz = mz; 449 } else if (queue_type == VTNET_TQ) { 450 txvq = &vq->txq; 451 txvq->vq = vq; 452 txvq->port_id = dev->data->port_id; 453 txvq->mz = mz; 454 txvq->virtio_net_hdr_mz = hdr_mz; 455 txvq->virtio_net_hdr_mem = hdr_mz->iova; 456 } else if (queue_type == VTNET_CQ) { 457 cvq = &vq->cq; 458 cvq->vq = vq; 459 cvq->mz = mz; 460 cvq->virtio_net_hdr_mz = hdr_mz; 461 cvq->virtio_net_hdr_mem = hdr_mz->iova; 462 memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE); 463 464 hw->cvq = cvq; 465 } 466 467 /* For virtio_user case (that is when hw->dev is NULL), we use 468 * virtual address. And we need properly set _offset_, please see 469 * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information. 470 */ 471 if (!hw->virtio_user_dev) 472 vq->offset = offsetof(struct rte_mbuf, buf_iova); 473 else { 474 vq->vq_ring_mem = (uintptr_t)mz->addr; 475 vq->offset = offsetof(struct rte_mbuf, buf_addr); 476 if (queue_type == VTNET_TQ) 477 txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr; 478 else if (queue_type == VTNET_CQ) 479 cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr; 480 } 481 482 if (queue_type == VTNET_TQ) { 483 struct virtio_tx_region *txr; 484 unsigned int i; 485 486 txr = hdr_mz->addr; 487 memset(txr, 0, vq_size * sizeof(*txr)); 488 for (i = 0; i < vq_size; i++) { 489 struct vring_desc *start_dp = txr[i].tx_indir; 490 491 vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir)); 492 493 /* first indirect descriptor is always the tx header */ 494 start_dp->addr = txvq->virtio_net_hdr_mem 495 + i * sizeof(*txr) 496 + offsetof(struct virtio_tx_region, tx_hdr); 497 498 start_dp->len = hw->vtnet_hdr_size; 499 start_dp->flags = VRING_DESC_F_NEXT; 500 } 501 } 502 503 if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) { 504 PMD_INIT_LOG(ERR, "setup_queue failed"); 505 return -EINVAL; 506 } 507 508 return 0; 509 510 fail_q_alloc: 511 rte_free(sw_ring); 512 rte_memzone_free(hdr_mz); 513 rte_memzone_free(mz); 514 rte_free(vq); 515 516 return ret; 517 } 518 519 static void 520 virtio_free_queues(struct virtio_hw *hw) 521 { 522 uint16_t nr_vq = virtio_get_nr_vq(hw); 523 struct virtqueue *vq; 524 int queue_type; 525 uint16_t i; 526 527 if (hw->vqs == NULL) 528 return; 529 530 for (i = 0; i < nr_vq; i++) { 531 vq = hw->vqs[i]; 532 if (!vq) 533 continue; 534 535 queue_type = virtio_get_queue_type(hw, i); 536 if (queue_type == VTNET_RQ) { 537 rte_free(vq->sw_ring); 538 rte_memzone_free(vq->rxq.mz); 539 } else if (queue_type == VTNET_TQ) { 540 rte_memzone_free(vq->txq.mz); 541 rte_memzone_free(vq->txq.virtio_net_hdr_mz); 542 } else { 543 rte_memzone_free(vq->cq.mz); 544 rte_memzone_free(vq->cq.virtio_net_hdr_mz); 545 } 546 547 rte_free(vq); 548 hw->vqs[i] = NULL; 549 } 550 551 rte_free(hw->vqs); 552 hw->vqs = NULL; 553 } 554 555 static int 556 virtio_alloc_queues(struct rte_eth_dev *dev) 557 { 558 struct virtio_hw *hw = dev->data->dev_private; 559 uint16_t nr_vq = virtio_get_nr_vq(hw); 560 uint16_t i; 561 int ret; 562 563 hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0); 564 if (!hw->vqs) { 565 PMD_INIT_LOG(ERR, "failed to allocate vqs"); 566 return -ENOMEM; 567 } 568 569 for (i = 0; i < nr_vq; i++) { 570 ret = virtio_init_queue(dev, i); 571 if (ret < 0) { 572 virtio_free_queues(hw); 573 return ret; 574 } 575 } 576 577 return 0; 578 } 579 580 static void virtio_queues_unbind_intr(struct rte_eth_dev *dev); 581 582 static void 583 virtio_dev_close(struct rte_eth_dev *dev) 584 { 585 struct virtio_hw *hw = dev->data->dev_private; 586 struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf; 587 588 PMD_INIT_LOG(DEBUG, "virtio_dev_close"); 589 590 /* reset the NIC */ 591 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 592 VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR); 593 if (intr_conf->rxq) 594 virtio_queues_unbind_intr(dev); 595 596 if (intr_conf->lsc || intr_conf->rxq) { 597 virtio_intr_disable(dev); 598 rte_intr_efd_disable(dev->intr_handle); 599 rte_free(dev->intr_handle->intr_vec); 600 dev->intr_handle->intr_vec = NULL; 601 } 602 603 vtpci_reset(hw); 604 virtio_dev_free_mbufs(dev); 605 virtio_free_queues(hw); 606 } 607 608 static void 609 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev) 610 { 611 struct virtio_hw *hw = dev->data->dev_private; 612 struct virtio_pmd_ctrl ctrl; 613 int dlen[1]; 614 int ret; 615 616 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { 617 PMD_INIT_LOG(INFO, "host does not support rx control"); 618 return; 619 } 620 621 ctrl.hdr.class = VIRTIO_NET_CTRL_RX; 622 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC; 623 ctrl.data[0] = 1; 624 dlen[0] = 1; 625 626 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); 627 if (ret) 628 PMD_INIT_LOG(ERR, "Failed to enable promisc"); 629 } 630 631 static void 632 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev) 633 { 634 struct virtio_hw *hw = dev->data->dev_private; 635 struct virtio_pmd_ctrl ctrl; 636 int dlen[1]; 637 int ret; 638 639 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { 640 PMD_INIT_LOG(INFO, "host does not support rx control"); 641 return; 642 } 643 644 ctrl.hdr.class = VIRTIO_NET_CTRL_RX; 645 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC; 646 ctrl.data[0] = 0; 647 dlen[0] = 1; 648 649 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); 650 if (ret) 651 PMD_INIT_LOG(ERR, "Failed to disable promisc"); 652 } 653 654 static void 655 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev) 656 { 657 struct virtio_hw *hw = dev->data->dev_private; 658 struct virtio_pmd_ctrl ctrl; 659 int dlen[1]; 660 int ret; 661 662 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { 663 PMD_INIT_LOG(INFO, "host does not support rx control"); 664 return; 665 } 666 667 ctrl.hdr.class = VIRTIO_NET_CTRL_RX; 668 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI; 669 ctrl.data[0] = 1; 670 dlen[0] = 1; 671 672 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); 673 if (ret) 674 PMD_INIT_LOG(ERR, "Failed to enable allmulticast"); 675 } 676 677 static void 678 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev) 679 { 680 struct virtio_hw *hw = dev->data->dev_private; 681 struct virtio_pmd_ctrl ctrl; 682 int dlen[1]; 683 int ret; 684 685 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { 686 PMD_INIT_LOG(INFO, "host does not support rx control"); 687 return; 688 } 689 690 ctrl.hdr.class = VIRTIO_NET_CTRL_RX; 691 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI; 692 ctrl.data[0] = 0; 693 dlen[0] = 1; 694 695 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); 696 if (ret) 697 PMD_INIT_LOG(ERR, "Failed to disable allmulticast"); 698 } 699 700 #define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */ 701 static int 702 virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 703 { 704 struct virtio_hw *hw = dev->data->dev_private; 705 uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN + 706 hw->vtnet_hdr_size; 707 uint32_t frame_size = mtu + ether_hdr_len; 708 uint32_t max_frame_size = hw->max_mtu + ether_hdr_len; 709 710 max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN); 711 712 if (mtu < ETHER_MIN_MTU || frame_size > max_frame_size) { 713 PMD_INIT_LOG(ERR, "MTU should be between %d and %d", 714 ETHER_MIN_MTU, max_frame_size - ether_hdr_len); 715 return -EINVAL; 716 } 717 return 0; 718 } 719 720 static int 721 virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 722 { 723 struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id]; 724 struct virtqueue *vq = rxvq->vq; 725 726 virtqueue_enable_intr(vq); 727 return 0; 728 } 729 730 static int 731 virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 732 { 733 struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id]; 734 struct virtqueue *vq = rxvq->vq; 735 736 virtqueue_disable_intr(vq); 737 return 0; 738 } 739 740 /* 741 * dev_ops for virtio, bare necessities for basic operation 742 */ 743 static const struct eth_dev_ops virtio_eth_dev_ops = { 744 .dev_configure = virtio_dev_configure, 745 .dev_start = virtio_dev_start, 746 .dev_stop = virtio_dev_stop, 747 .dev_close = virtio_dev_close, 748 .promiscuous_enable = virtio_dev_promiscuous_enable, 749 .promiscuous_disable = virtio_dev_promiscuous_disable, 750 .allmulticast_enable = virtio_dev_allmulticast_enable, 751 .allmulticast_disable = virtio_dev_allmulticast_disable, 752 .mtu_set = virtio_mtu_set, 753 .dev_infos_get = virtio_dev_info_get, 754 .stats_get = virtio_dev_stats_get, 755 .xstats_get = virtio_dev_xstats_get, 756 .xstats_get_names = virtio_dev_xstats_get_names, 757 .stats_reset = virtio_dev_stats_reset, 758 .xstats_reset = virtio_dev_stats_reset, 759 .link_update = virtio_dev_link_update, 760 .vlan_offload_set = virtio_dev_vlan_offload_set, 761 .rx_queue_setup = virtio_dev_rx_queue_setup, 762 .rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable, 763 .rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable, 764 .rx_queue_release = virtio_dev_queue_release, 765 .rx_descriptor_done = virtio_dev_rx_queue_done, 766 .tx_queue_setup = virtio_dev_tx_queue_setup, 767 .tx_queue_release = virtio_dev_queue_release, 768 /* collect stats per queue */ 769 .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set, 770 .vlan_filter_set = virtio_vlan_filter_set, 771 .mac_addr_add = virtio_mac_addr_add, 772 .mac_addr_remove = virtio_mac_addr_remove, 773 .mac_addr_set = virtio_mac_addr_set, 774 }; 775 776 static void 777 virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 778 { 779 unsigned i; 780 781 for (i = 0; i < dev->data->nb_tx_queues; i++) { 782 const struct virtnet_tx *txvq = dev->data->tx_queues[i]; 783 if (txvq == NULL) 784 continue; 785 786 stats->opackets += txvq->stats.packets; 787 stats->obytes += txvq->stats.bytes; 788 stats->oerrors += txvq->stats.errors; 789 790 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { 791 stats->q_opackets[i] = txvq->stats.packets; 792 stats->q_obytes[i] = txvq->stats.bytes; 793 } 794 } 795 796 for (i = 0; i < dev->data->nb_rx_queues; i++) { 797 const struct virtnet_rx *rxvq = dev->data->rx_queues[i]; 798 if (rxvq == NULL) 799 continue; 800 801 stats->ipackets += rxvq->stats.packets; 802 stats->ibytes += rxvq->stats.bytes; 803 stats->ierrors += rxvq->stats.errors; 804 805 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { 806 stats->q_ipackets[i] = rxvq->stats.packets; 807 stats->q_ibytes[i] = rxvq->stats.bytes; 808 } 809 } 810 811 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 812 } 813 814 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev, 815 struct rte_eth_xstat_name *xstats_names, 816 __rte_unused unsigned limit) 817 { 818 unsigned i; 819 unsigned count = 0; 820 unsigned t; 821 822 unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS + 823 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS; 824 825 if (xstats_names != NULL) { 826 /* Note: limit checked in rte_eth_xstats_names() */ 827 828 for (i = 0; i < dev->data->nb_rx_queues; i++) { 829 struct virtnet_rx *rxvq = dev->data->rx_queues[i]; 830 if (rxvq == NULL) 831 continue; 832 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) { 833 snprintf(xstats_names[count].name, 834 sizeof(xstats_names[count].name), 835 "rx_q%u_%s", i, 836 rte_virtio_rxq_stat_strings[t].name); 837 count++; 838 } 839 } 840 841 for (i = 0; i < dev->data->nb_tx_queues; i++) { 842 struct virtnet_tx *txvq = dev->data->tx_queues[i]; 843 if (txvq == NULL) 844 continue; 845 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) { 846 snprintf(xstats_names[count].name, 847 sizeof(xstats_names[count].name), 848 "tx_q%u_%s", i, 849 rte_virtio_txq_stat_strings[t].name); 850 count++; 851 } 852 } 853 return count; 854 } 855 return nstats; 856 } 857 858 static int 859 virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 860 unsigned n) 861 { 862 unsigned i; 863 unsigned count = 0; 864 865 unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS + 866 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS; 867 868 if (n < nstats) 869 return nstats; 870 871 for (i = 0; i < dev->data->nb_rx_queues; i++) { 872 struct virtnet_rx *rxvq = dev->data->rx_queues[i]; 873 874 if (rxvq == NULL) 875 continue; 876 877 unsigned t; 878 879 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) { 880 xstats[count].value = *(uint64_t *)(((char *)rxvq) + 881 rte_virtio_rxq_stat_strings[t].offset); 882 xstats[count].id = count; 883 count++; 884 } 885 } 886 887 for (i = 0; i < dev->data->nb_tx_queues; i++) { 888 struct virtnet_tx *txvq = dev->data->tx_queues[i]; 889 890 if (txvq == NULL) 891 continue; 892 893 unsigned t; 894 895 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) { 896 xstats[count].value = *(uint64_t *)(((char *)txvq) + 897 rte_virtio_txq_stat_strings[t].offset); 898 xstats[count].id = count; 899 count++; 900 } 901 } 902 903 return count; 904 } 905 906 static int 907 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 908 { 909 virtio_update_stats(dev, stats); 910 911 return 0; 912 } 913 914 static void 915 virtio_dev_stats_reset(struct rte_eth_dev *dev) 916 { 917 unsigned int i; 918 919 for (i = 0; i < dev->data->nb_tx_queues; i++) { 920 struct virtnet_tx *txvq = dev->data->tx_queues[i]; 921 if (txvq == NULL) 922 continue; 923 924 txvq->stats.packets = 0; 925 txvq->stats.bytes = 0; 926 txvq->stats.errors = 0; 927 txvq->stats.multicast = 0; 928 txvq->stats.broadcast = 0; 929 memset(txvq->stats.size_bins, 0, 930 sizeof(txvq->stats.size_bins[0]) * 8); 931 } 932 933 for (i = 0; i < dev->data->nb_rx_queues; i++) { 934 struct virtnet_rx *rxvq = dev->data->rx_queues[i]; 935 if (rxvq == NULL) 936 continue; 937 938 rxvq->stats.packets = 0; 939 rxvq->stats.bytes = 0; 940 rxvq->stats.errors = 0; 941 rxvq->stats.multicast = 0; 942 rxvq->stats.broadcast = 0; 943 memset(rxvq->stats.size_bins, 0, 944 sizeof(rxvq->stats.size_bins[0]) * 8); 945 } 946 } 947 948 static void 949 virtio_set_hwaddr(struct virtio_hw *hw) 950 { 951 vtpci_write_dev_config(hw, 952 offsetof(struct virtio_net_config, mac), 953 &hw->mac_addr, ETHER_ADDR_LEN); 954 } 955 956 static void 957 virtio_get_hwaddr(struct virtio_hw *hw) 958 { 959 if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) { 960 vtpci_read_dev_config(hw, 961 offsetof(struct virtio_net_config, mac), 962 &hw->mac_addr, ETHER_ADDR_LEN); 963 } else { 964 eth_random_addr(&hw->mac_addr[0]); 965 virtio_set_hwaddr(hw); 966 } 967 } 968 969 static int 970 virtio_mac_table_set(struct virtio_hw *hw, 971 const struct virtio_net_ctrl_mac *uc, 972 const struct virtio_net_ctrl_mac *mc) 973 { 974 struct virtio_pmd_ctrl ctrl; 975 int err, len[2]; 976 977 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 978 PMD_DRV_LOG(INFO, "host does not support mac table"); 979 return -1; 980 } 981 982 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC; 983 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 984 985 len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries); 986 memcpy(ctrl.data, uc, len[0]); 987 988 len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries); 989 memcpy(ctrl.data + len[0], mc, len[1]); 990 991 err = virtio_send_command(hw->cvq, &ctrl, len, 2); 992 if (err != 0) 993 PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err); 994 return err; 995 } 996 997 static int 998 virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 999 uint32_t index, uint32_t vmdq __rte_unused) 1000 { 1001 struct virtio_hw *hw = dev->data->dev_private; 1002 const struct ether_addr *addrs = dev->data->mac_addrs; 1003 unsigned int i; 1004 struct virtio_net_ctrl_mac *uc, *mc; 1005 1006 if (index >= VIRTIO_MAX_MAC_ADDRS) { 1007 PMD_DRV_LOG(ERR, "mac address index %u out of range", index); 1008 return -EINVAL; 1009 } 1010 1011 uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries)); 1012 uc->entries = 0; 1013 mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries)); 1014 mc->entries = 0; 1015 1016 for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) { 1017 const struct ether_addr *addr 1018 = (i == index) ? mac_addr : addrs + i; 1019 struct virtio_net_ctrl_mac *tbl 1020 = is_multicast_ether_addr(addr) ? mc : uc; 1021 1022 memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN); 1023 } 1024 1025 return virtio_mac_table_set(hw, uc, mc); 1026 } 1027 1028 static void 1029 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1030 { 1031 struct virtio_hw *hw = dev->data->dev_private; 1032 struct ether_addr *addrs = dev->data->mac_addrs; 1033 struct virtio_net_ctrl_mac *uc, *mc; 1034 unsigned int i; 1035 1036 if (index >= VIRTIO_MAX_MAC_ADDRS) { 1037 PMD_DRV_LOG(ERR, "mac address index %u out of range", index); 1038 return; 1039 } 1040 1041 uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries)); 1042 uc->entries = 0; 1043 mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries)); 1044 mc->entries = 0; 1045 1046 for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) { 1047 struct virtio_net_ctrl_mac *tbl; 1048 1049 if (i == index || is_zero_ether_addr(addrs + i)) 1050 continue; 1051 1052 tbl = is_multicast_ether_addr(addrs + i) ? mc : uc; 1053 memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN); 1054 } 1055 1056 virtio_mac_table_set(hw, uc, mc); 1057 } 1058 1059 static void 1060 virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 1061 { 1062 struct virtio_hw *hw = dev->data->dev_private; 1063 1064 memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN); 1065 1066 /* Use atomic update if available */ 1067 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 1068 struct virtio_pmd_ctrl ctrl; 1069 int len = ETHER_ADDR_LEN; 1070 1071 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC; 1072 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; 1073 1074 memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN); 1075 virtio_send_command(hw->cvq, &ctrl, &len, 1); 1076 } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) 1077 virtio_set_hwaddr(hw); 1078 } 1079 1080 static int 1081 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1082 { 1083 struct virtio_hw *hw = dev->data->dev_private; 1084 struct virtio_pmd_ctrl ctrl; 1085 int len; 1086 1087 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) 1088 return -ENOTSUP; 1089 1090 ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN; 1091 ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; 1092 memcpy(ctrl.data, &vlan_id, sizeof(vlan_id)); 1093 len = sizeof(vlan_id); 1094 1095 return virtio_send_command(hw->cvq, &ctrl, &len, 1); 1096 } 1097 1098 static int 1099 virtio_intr_enable(struct rte_eth_dev *dev) 1100 { 1101 struct virtio_hw *hw = dev->data->dev_private; 1102 1103 if (rte_intr_enable(dev->intr_handle) < 0) 1104 return -1; 1105 1106 if (!hw->virtio_user_dev) 1107 hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev)); 1108 1109 return 0; 1110 } 1111 1112 static int 1113 virtio_intr_disable(struct rte_eth_dev *dev) 1114 { 1115 struct virtio_hw *hw = dev->data->dev_private; 1116 1117 if (rte_intr_disable(dev->intr_handle) < 0) 1118 return -1; 1119 1120 if (!hw->virtio_user_dev) 1121 hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev)); 1122 1123 return 0; 1124 } 1125 1126 static int 1127 virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features) 1128 { 1129 uint64_t host_features; 1130 1131 /* Prepare guest_features: feature that driver wants to support */ 1132 PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64, 1133 req_features); 1134 1135 /* Read device(host) feature bits */ 1136 host_features = VTPCI_OPS(hw)->get_features(hw); 1137 PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64, 1138 host_features); 1139 1140 /* If supported, ensure MTU value is valid before acknowledging it. */ 1141 if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) { 1142 struct virtio_net_config config; 1143 1144 vtpci_read_dev_config(hw, 1145 offsetof(struct virtio_net_config, mtu), 1146 &config.mtu, sizeof(config.mtu)); 1147 1148 if (config.mtu < ETHER_MIN_MTU) 1149 req_features &= ~(1ULL << VIRTIO_NET_F_MTU); 1150 } 1151 1152 /* 1153 * Negotiate features: Subset of device feature bits are written back 1154 * guest feature bits. 1155 */ 1156 hw->guest_features = req_features; 1157 hw->guest_features = vtpci_negotiate_features(hw, host_features); 1158 PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64, 1159 hw->guest_features); 1160 1161 if (hw->modern) { 1162 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) { 1163 PMD_INIT_LOG(ERR, 1164 "VIRTIO_F_VERSION_1 features is not enabled."); 1165 return -1; 1166 } 1167 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK); 1168 if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) { 1169 PMD_INIT_LOG(ERR, 1170 "failed to set FEATURES_OK status!"); 1171 return -1; 1172 } 1173 } 1174 1175 hw->req_guest_features = req_features; 1176 1177 return 0; 1178 } 1179 1180 int 1181 virtio_dev_pause(struct rte_eth_dev *dev) 1182 { 1183 struct virtio_hw *hw = dev->data->dev_private; 1184 1185 rte_spinlock_lock(&hw->state_lock); 1186 1187 if (hw->started == 0) { 1188 /* Device is just stopped. */ 1189 rte_spinlock_unlock(&hw->state_lock); 1190 return -1; 1191 } 1192 hw->started = 0; 1193 /* 1194 * Prevent the worker threads from touching queues to avoid contention, 1195 * 1 ms should be enough for the ongoing Tx function to finish. 1196 */ 1197 rte_delay_ms(1); 1198 return 0; 1199 } 1200 1201 /* 1202 * Recover hw state to let the worker threads continue. 1203 */ 1204 void 1205 virtio_dev_resume(struct rte_eth_dev *dev) 1206 { 1207 struct virtio_hw *hw = dev->data->dev_private; 1208 1209 hw->started = 1; 1210 rte_spinlock_unlock(&hw->state_lock); 1211 } 1212 1213 /* 1214 * Should be called only after device is paused. 1215 */ 1216 int 1217 virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts, 1218 int nb_pkts) 1219 { 1220 struct virtio_hw *hw = dev->data->dev_private; 1221 struct virtnet_tx *txvq = dev->data->tx_queues[0]; 1222 int ret; 1223 1224 hw->inject_pkts = tx_pkts; 1225 ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts); 1226 hw->inject_pkts = NULL; 1227 1228 return ret; 1229 } 1230 1231 static void 1232 virtio_notify_peers(struct rte_eth_dev *dev) 1233 { 1234 struct virtio_hw *hw = dev->data->dev_private; 1235 struct virtnet_rx *rxvq; 1236 struct rte_mbuf *rarp_mbuf; 1237 1238 if (!dev->data->rx_queues) 1239 return; 1240 1241 rxvq = dev->data->rx_queues[0]; 1242 rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool, 1243 (struct ether_addr *)hw->mac_addr); 1244 if (rarp_mbuf == NULL) { 1245 PMD_DRV_LOG(ERR, "failed to make RARP packet."); 1246 return; 1247 } 1248 1249 /* If virtio port just stopped, no need to send RARP */ 1250 if (virtio_dev_pause(dev) < 0) { 1251 rte_pktmbuf_free(rarp_mbuf); 1252 return; 1253 } 1254 1255 virtio_inject_pkts(dev, &rarp_mbuf, 1); 1256 virtio_dev_resume(dev); 1257 } 1258 1259 static void 1260 virtio_ack_link_announce(struct rte_eth_dev *dev) 1261 { 1262 struct virtio_hw *hw = dev->data->dev_private; 1263 struct virtio_pmd_ctrl ctrl; 1264 1265 ctrl.hdr.class = VIRTIO_NET_CTRL_ANNOUNCE; 1266 ctrl.hdr.cmd = VIRTIO_NET_CTRL_ANNOUNCE_ACK; 1267 1268 virtio_send_command(hw->cvq, &ctrl, NULL, 0); 1269 } 1270 1271 /* 1272 * Process virtio config changed interrupt. Call the callback 1273 * if link state changed, generate gratuitous RARP packet if 1274 * the status indicates an ANNOUNCE. 1275 */ 1276 void 1277 virtio_interrupt_handler(void *param) 1278 { 1279 struct rte_eth_dev *dev = param; 1280 struct virtio_hw *hw = dev->data->dev_private; 1281 uint8_t isr; 1282 1283 /* Read interrupt status which clears interrupt */ 1284 isr = vtpci_isr(hw); 1285 PMD_DRV_LOG(INFO, "interrupt status = %#x", isr); 1286 1287 if (virtio_intr_enable(dev) < 0) 1288 PMD_DRV_LOG(ERR, "interrupt enable failed"); 1289 1290 if (isr & VIRTIO_PCI_ISR_CONFIG) { 1291 if (virtio_dev_link_update(dev, 0) == 0) 1292 _rte_eth_dev_callback_process(dev, 1293 RTE_ETH_EVENT_INTR_LSC, 1294 NULL); 1295 } 1296 1297 if (isr & VIRTIO_NET_S_ANNOUNCE) { 1298 virtio_notify_peers(dev); 1299 if (hw->cvq) 1300 virtio_ack_link_announce(dev); 1301 } 1302 } 1303 1304 /* set rx and tx handlers according to what is supported */ 1305 static void 1306 set_rxtx_funcs(struct rte_eth_dev *eth_dev) 1307 { 1308 struct virtio_hw *hw = eth_dev->data->dev_private; 1309 1310 if (hw->use_simple_rx) { 1311 PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", 1312 eth_dev->data->port_id); 1313 eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; 1314 } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { 1315 PMD_INIT_LOG(INFO, 1316 "virtio: using mergeable buffer Rx path on port %u", 1317 eth_dev->data->port_id); 1318 eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts; 1319 } else { 1320 PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u", 1321 eth_dev->data->port_id); 1322 eth_dev->rx_pkt_burst = &virtio_recv_pkts; 1323 } 1324 1325 if (hw->use_simple_tx) { 1326 PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u", 1327 eth_dev->data->port_id); 1328 eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple; 1329 } else { 1330 PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u", 1331 eth_dev->data->port_id); 1332 eth_dev->tx_pkt_burst = virtio_xmit_pkts; 1333 } 1334 } 1335 1336 /* Only support 1:1 queue/interrupt mapping so far. 1337 * TODO: support n:1 queue/interrupt mapping when there are limited number of 1338 * interrupt vectors (<N+1). 1339 */ 1340 static int 1341 virtio_queues_bind_intr(struct rte_eth_dev *dev) 1342 { 1343 uint32_t i; 1344 struct virtio_hw *hw = dev->data->dev_private; 1345 1346 PMD_INIT_LOG(INFO, "queue/interrupt binding"); 1347 for (i = 0; i < dev->data->nb_rx_queues; ++i) { 1348 dev->intr_handle->intr_vec[i] = i + 1; 1349 if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) == 1350 VIRTIO_MSI_NO_VECTOR) { 1351 PMD_DRV_LOG(ERR, "failed to set queue vector"); 1352 return -EBUSY; 1353 } 1354 } 1355 1356 return 0; 1357 } 1358 1359 static void 1360 virtio_queues_unbind_intr(struct rte_eth_dev *dev) 1361 { 1362 uint32_t i; 1363 struct virtio_hw *hw = dev->data->dev_private; 1364 1365 PMD_INIT_LOG(INFO, "queue/interrupt unbinding"); 1366 for (i = 0; i < dev->data->nb_rx_queues; ++i) 1367 VTPCI_OPS(hw)->set_queue_irq(hw, 1368 hw->vqs[i * VTNET_CQ], 1369 VIRTIO_MSI_NO_VECTOR); 1370 } 1371 1372 static int 1373 virtio_configure_intr(struct rte_eth_dev *dev) 1374 { 1375 struct virtio_hw *hw = dev->data->dev_private; 1376 1377 if (!rte_intr_cap_multiple(dev->intr_handle)) { 1378 PMD_INIT_LOG(ERR, "Multiple intr vector not supported"); 1379 return -ENOTSUP; 1380 } 1381 1382 if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) { 1383 PMD_INIT_LOG(ERR, "Fail to create eventfd"); 1384 return -1; 1385 } 1386 1387 if (!dev->intr_handle->intr_vec) { 1388 dev->intr_handle->intr_vec = 1389 rte_zmalloc("intr_vec", 1390 hw->max_queue_pairs * sizeof(int), 0); 1391 if (!dev->intr_handle->intr_vec) { 1392 PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors", 1393 hw->max_queue_pairs); 1394 return -ENOMEM; 1395 } 1396 } 1397 1398 /* Re-register callback to update max_intr */ 1399 rte_intr_callback_unregister(dev->intr_handle, 1400 virtio_interrupt_handler, 1401 dev); 1402 rte_intr_callback_register(dev->intr_handle, 1403 virtio_interrupt_handler, 1404 dev); 1405 1406 /* DO NOT try to remove this! This function will enable msix, or QEMU 1407 * will encounter SIGSEGV when DRIVER_OK is sent. 1408 * And for legacy devices, this should be done before queue/vec binding 1409 * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR 1410 * (22) will be ignored. 1411 */ 1412 if (virtio_intr_enable(dev) < 0) { 1413 PMD_DRV_LOG(ERR, "interrupt enable failed"); 1414 return -1; 1415 } 1416 1417 if (virtio_queues_bind_intr(dev) < 0) { 1418 PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt"); 1419 return -1; 1420 } 1421 1422 return 0; 1423 } 1424 1425 /* reset device and renegotiate features if needed */ 1426 static int 1427 virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) 1428 { 1429 struct virtio_hw *hw = eth_dev->data->dev_private; 1430 struct virtio_net_config *config; 1431 struct virtio_net_config local_config; 1432 struct rte_pci_device *pci_dev = NULL; 1433 int ret; 1434 1435 /* Reset the device although not necessary at startup */ 1436 vtpci_reset(hw); 1437 1438 if (hw->vqs) { 1439 virtio_dev_free_mbufs(eth_dev); 1440 virtio_free_queues(hw); 1441 } 1442 1443 /* Tell the host we've noticed this device. */ 1444 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK); 1445 1446 /* Tell the host we've known how to drive the device. */ 1447 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); 1448 if (virtio_negotiate_features(hw, req_features) < 0) 1449 return -1; 1450 1451 if (!hw->virtio_user_dev) { 1452 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1453 rte_eth_copy_pci_info(eth_dev, pci_dev); 1454 } 1455 1456 /* If host does not support both status and MSI-X then disable LSC */ 1457 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && 1458 hw->use_msix != VIRTIO_MSIX_NONE) 1459 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 1460 else 1461 eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; 1462 1463 /* Setting up rx_header size for the device */ 1464 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || 1465 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) 1466 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1467 else 1468 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr); 1469 1470 /* Copy the permanent MAC address to: virtio_hw */ 1471 virtio_get_hwaddr(hw); 1472 ether_addr_copy((struct ether_addr *) hw->mac_addr, 1473 ð_dev->data->mac_addrs[0]); 1474 PMD_INIT_LOG(DEBUG, 1475 "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X", 1476 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2], 1477 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]); 1478 1479 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) { 1480 config = &local_config; 1481 1482 vtpci_read_dev_config(hw, 1483 offsetof(struct virtio_net_config, mac), 1484 &config->mac, sizeof(config->mac)); 1485 1486 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { 1487 vtpci_read_dev_config(hw, 1488 offsetof(struct virtio_net_config, status), 1489 &config->status, sizeof(config->status)); 1490 } else { 1491 PMD_INIT_LOG(DEBUG, 1492 "VIRTIO_NET_F_STATUS is not supported"); 1493 config->status = 0; 1494 } 1495 1496 if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) { 1497 vtpci_read_dev_config(hw, 1498 offsetof(struct virtio_net_config, max_virtqueue_pairs), 1499 &config->max_virtqueue_pairs, 1500 sizeof(config->max_virtqueue_pairs)); 1501 } else { 1502 PMD_INIT_LOG(DEBUG, 1503 "VIRTIO_NET_F_MQ is not supported"); 1504 config->max_virtqueue_pairs = 1; 1505 } 1506 1507 hw->max_queue_pairs = config->max_virtqueue_pairs; 1508 1509 if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) { 1510 vtpci_read_dev_config(hw, 1511 offsetof(struct virtio_net_config, mtu), 1512 &config->mtu, 1513 sizeof(config->mtu)); 1514 1515 /* 1516 * MTU value has already been checked at negotiation 1517 * time, but check again in case it has changed since 1518 * then, which should not happen. 1519 */ 1520 if (config->mtu < ETHER_MIN_MTU) { 1521 PMD_INIT_LOG(ERR, "invalid max MTU value (%u)", 1522 config->mtu); 1523 return -1; 1524 } 1525 1526 hw->max_mtu = config->mtu; 1527 /* Set initial MTU to maximum one supported by vhost */ 1528 eth_dev->data->mtu = config->mtu; 1529 1530 } else { 1531 hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN - 1532 VLAN_TAG_LEN - hw->vtnet_hdr_size; 1533 } 1534 1535 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d", 1536 config->max_virtqueue_pairs); 1537 PMD_INIT_LOG(DEBUG, "config->status=%d", config->status); 1538 PMD_INIT_LOG(DEBUG, 1539 "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X", 1540 config->mac[0], config->mac[1], 1541 config->mac[2], config->mac[3], 1542 config->mac[4], config->mac[5]); 1543 } else { 1544 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1"); 1545 hw->max_queue_pairs = 1; 1546 hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN - 1547 VLAN_TAG_LEN - hw->vtnet_hdr_size; 1548 } 1549 1550 ret = virtio_alloc_queues(eth_dev); 1551 if (ret < 0) 1552 return ret; 1553 1554 if (eth_dev->data->dev_conf.intr_conf.rxq) { 1555 if (virtio_configure_intr(eth_dev) < 0) { 1556 PMD_INIT_LOG(ERR, "failed to configure interrupt"); 1557 return -1; 1558 } 1559 } 1560 1561 vtpci_reinit_complete(hw); 1562 1563 if (pci_dev) 1564 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1565 eth_dev->data->port_id, pci_dev->id.vendor_id, 1566 pci_dev->id.device_id); 1567 1568 return 0; 1569 } 1570 1571 /* 1572 * Remap the PCI device again (IO port map for legacy device and 1573 * memory map for modern device), so that the secondary process 1574 * could have the PCI initiated correctly. 1575 */ 1576 static int 1577 virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw) 1578 { 1579 if (hw->modern) { 1580 /* 1581 * We don't have to re-parse the PCI config space, since 1582 * rte_pci_map_device() makes sure the mapped address 1583 * in secondary process would equal to the one mapped in 1584 * the primary process: error will be returned if that 1585 * requirement is not met. 1586 * 1587 * That said, we could simply reuse all cap pointers 1588 * (such as dev_cfg, common_cfg, etc.) parsed from the 1589 * primary process, which is stored in shared memory. 1590 */ 1591 if (rte_pci_map_device(pci_dev)) { 1592 PMD_INIT_LOG(DEBUG, "failed to map pci device!"); 1593 return -1; 1594 } 1595 } else { 1596 if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0) 1597 return -1; 1598 } 1599 1600 return 0; 1601 } 1602 1603 static void 1604 virtio_set_vtpci_ops(struct virtio_hw *hw) 1605 { 1606 #ifdef RTE_VIRTIO_USER 1607 if (hw->virtio_user_dev) 1608 VTPCI_OPS(hw) = &virtio_user_ops; 1609 else 1610 #endif 1611 if (hw->modern) 1612 VTPCI_OPS(hw) = &modern_ops; 1613 else 1614 VTPCI_OPS(hw) = &legacy_ops; 1615 } 1616 1617 /* 1618 * This function is based on probe() function in virtio_pci.c 1619 * It returns 0 on success. 1620 */ 1621 int 1622 eth_virtio_dev_init(struct rte_eth_dev *eth_dev) 1623 { 1624 struct virtio_hw *hw = eth_dev->data->dev_private; 1625 int ret; 1626 1627 RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf)); 1628 1629 eth_dev->dev_ops = &virtio_eth_dev_ops; 1630 1631 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 1632 if (!hw->virtio_user_dev) { 1633 ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), hw); 1634 if (ret) 1635 return ret; 1636 } 1637 1638 virtio_set_vtpci_ops(hw); 1639 set_rxtx_funcs(eth_dev); 1640 1641 return 0; 1642 } 1643 1644 /* Allocate memory for storing MAC addresses */ 1645 eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0); 1646 if (eth_dev->data->mac_addrs == NULL) { 1647 PMD_INIT_LOG(ERR, 1648 "Failed to allocate %d bytes needed to store MAC addresses", 1649 VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN); 1650 return -ENOMEM; 1651 } 1652 1653 hw->port_id = eth_dev->data->port_id; 1654 /* For virtio_user case the hw->virtio_user_dev is populated by 1655 * virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called. 1656 */ 1657 if (!hw->virtio_user_dev) { 1658 ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw); 1659 if (ret) 1660 goto out; 1661 } 1662 1663 /* reset device and negotiate default features */ 1664 ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES); 1665 if (ret < 0) 1666 goto out; 1667 1668 /* Setup interrupt callback */ 1669 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1670 rte_intr_callback_register(eth_dev->intr_handle, 1671 virtio_interrupt_handler, eth_dev); 1672 1673 return 0; 1674 1675 out: 1676 rte_free(eth_dev->data->mac_addrs); 1677 return ret; 1678 } 1679 1680 static int 1681 eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev) 1682 { 1683 PMD_INIT_FUNC_TRACE(); 1684 1685 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 1686 return -EPERM; 1687 1688 virtio_dev_stop(eth_dev); 1689 virtio_dev_close(eth_dev); 1690 1691 eth_dev->dev_ops = NULL; 1692 eth_dev->tx_pkt_burst = NULL; 1693 eth_dev->rx_pkt_burst = NULL; 1694 1695 rte_free(eth_dev->data->mac_addrs); 1696 eth_dev->data->mac_addrs = NULL; 1697 1698 /* reset interrupt callback */ 1699 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1700 rte_intr_callback_unregister(eth_dev->intr_handle, 1701 virtio_interrupt_handler, 1702 eth_dev); 1703 if (eth_dev->device) 1704 rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev)); 1705 1706 PMD_INIT_LOG(DEBUG, "dev_uninit completed"); 1707 1708 return 0; 1709 } 1710 1711 static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1712 struct rte_pci_device *pci_dev) 1713 { 1714 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw), 1715 eth_virtio_dev_init); 1716 } 1717 1718 static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev) 1719 { 1720 return rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit); 1721 } 1722 1723 static struct rte_pci_driver rte_virtio_pmd = { 1724 .driver = { 1725 .name = "net_virtio", 1726 }, 1727 .id_table = pci_id_virtio_map, 1728 .drv_flags = 0, 1729 .probe = eth_virtio_pci_probe, 1730 .remove = eth_virtio_pci_remove, 1731 }; 1732 1733 RTE_INIT(rte_virtio_pmd_init); 1734 static void 1735 rte_virtio_pmd_init(void) 1736 { 1737 if (rte_eal_iopl_init() != 0) { 1738 PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD"); 1739 return; 1740 } 1741 1742 rte_pci_register(&rte_virtio_pmd); 1743 } 1744 1745 /* 1746 * Configure virtio device 1747 * It returns 0 on success. 1748 */ 1749 static int 1750 virtio_dev_configure(struct rte_eth_dev *dev) 1751 { 1752 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 1753 struct virtio_hw *hw = dev->data->dev_private; 1754 uint64_t rx_offloads = rxmode->offloads; 1755 uint64_t req_features; 1756 int ret; 1757 1758 PMD_INIT_LOG(DEBUG, "configure"); 1759 req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES; 1760 1761 if (dev->data->dev_conf.intr_conf.rxq) { 1762 ret = virtio_init_device(dev, hw->req_guest_features); 1763 if (ret < 0) 1764 return ret; 1765 } 1766 1767 if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | 1768 DEV_RX_OFFLOAD_TCP_CKSUM)) 1769 req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM); 1770 1771 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) 1772 req_features |= 1773 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | 1774 (1ULL << VIRTIO_NET_F_GUEST_TSO6); 1775 1776 /* if request features changed, reinit the device */ 1777 if (req_features != hw->req_guest_features) { 1778 ret = virtio_init_device(dev, req_features); 1779 if (ret < 0) 1780 return ret; 1781 } 1782 1783 if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | 1784 DEV_RX_OFFLOAD_TCP_CKSUM)) && 1785 !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) { 1786 PMD_DRV_LOG(ERR, 1787 "rx checksum not available on this host"); 1788 return -ENOTSUP; 1789 } 1790 1791 if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) && 1792 (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) || 1793 !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) { 1794 PMD_DRV_LOG(ERR, 1795 "Large Receive Offload not available on this host"); 1796 return -ENOTSUP; 1797 } 1798 1799 /* start control queue */ 1800 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) 1801 virtio_dev_cq_start(dev); 1802 1803 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1804 hw->vlan_strip = 1; 1805 1806 if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1807 && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) { 1808 PMD_DRV_LOG(ERR, 1809 "vlan filtering not available on this host"); 1810 return -ENOTSUP; 1811 } 1812 1813 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1814 /* Enable vector (0) for Link State Intrerrupt */ 1815 if (VTPCI_OPS(hw)->set_config_irq(hw, 0) == 1816 VIRTIO_MSI_NO_VECTOR) { 1817 PMD_DRV_LOG(ERR, "failed to set config vector"); 1818 return -EBUSY; 1819 } 1820 1821 rte_spinlock_init(&hw->state_lock); 1822 1823 hw->use_simple_rx = 1; 1824 hw->use_simple_tx = 1; 1825 1826 #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM 1827 if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) { 1828 hw->use_simple_rx = 0; 1829 hw->use_simple_tx = 0; 1830 } 1831 #endif 1832 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { 1833 hw->use_simple_rx = 0; 1834 hw->use_simple_tx = 0; 1835 } 1836 1837 if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | 1838 DEV_RX_OFFLOAD_TCP_CKSUM)) 1839 hw->use_simple_rx = 0; 1840 1841 return 0; 1842 } 1843 1844 1845 static int 1846 virtio_dev_start(struct rte_eth_dev *dev) 1847 { 1848 uint16_t nb_queues, i; 1849 struct virtnet_rx *rxvq; 1850 struct virtnet_tx *txvq __rte_unused; 1851 struct virtio_hw *hw = dev->data->dev_private; 1852 int ret; 1853 1854 /* Finish the initialization of the queues */ 1855 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1856 ret = virtio_dev_rx_queue_setup_finish(dev, i); 1857 if (ret < 0) 1858 return ret; 1859 } 1860 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1861 ret = virtio_dev_tx_queue_setup_finish(dev, i); 1862 if (ret < 0) 1863 return ret; 1864 } 1865 1866 /* check if lsc interrupt feature is enabled */ 1867 if (dev->data->dev_conf.intr_conf.lsc) { 1868 if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { 1869 PMD_DRV_LOG(ERR, "link status not supported by host"); 1870 return -ENOTSUP; 1871 } 1872 } 1873 1874 /* Enable uio/vfio intr/eventfd mapping: althrough we already did that 1875 * in device configure, but it could be unmapped when device is 1876 * stopped. 1877 */ 1878 if (dev->data->dev_conf.intr_conf.lsc || 1879 dev->data->dev_conf.intr_conf.rxq) { 1880 virtio_intr_disable(dev); 1881 1882 if (virtio_intr_enable(dev) < 0) { 1883 PMD_DRV_LOG(ERR, "interrupt enable failed"); 1884 return -EIO; 1885 } 1886 } 1887 1888 /*Notify the backend 1889 *Otherwise the tap backend might already stop its queue due to fullness. 1890 *vhost backend will have no chance to be waked up 1891 */ 1892 nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues); 1893 if (hw->max_queue_pairs > 1) { 1894 if (virtio_set_multiple_queues(dev, nb_queues) != 0) 1895 return -EINVAL; 1896 } 1897 1898 PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues); 1899 1900 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1901 rxvq = dev->data->rx_queues[i]; 1902 /* Flush the old packets */ 1903 virtqueue_rxvq_flush(rxvq->vq); 1904 virtqueue_notify(rxvq->vq); 1905 } 1906 1907 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1908 txvq = dev->data->tx_queues[i]; 1909 virtqueue_notify(txvq->vq); 1910 } 1911 1912 PMD_INIT_LOG(DEBUG, "Notified backend at initialization"); 1913 1914 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1915 rxvq = dev->data->rx_queues[i]; 1916 VIRTQUEUE_DUMP(rxvq->vq); 1917 } 1918 1919 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1920 txvq = dev->data->tx_queues[i]; 1921 VIRTQUEUE_DUMP(txvq->vq); 1922 } 1923 1924 set_rxtx_funcs(dev); 1925 hw->started = 1; 1926 1927 /* Initialize Link state */ 1928 virtio_dev_link_update(dev, 0); 1929 1930 return 0; 1931 } 1932 1933 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev) 1934 { 1935 struct virtio_hw *hw = dev->data->dev_private; 1936 uint16_t nr_vq = virtio_get_nr_vq(hw); 1937 const char *type __rte_unused; 1938 unsigned int i, mbuf_num = 0; 1939 struct virtqueue *vq; 1940 struct rte_mbuf *buf; 1941 int queue_type; 1942 1943 if (hw->vqs == NULL) 1944 return; 1945 1946 for (i = 0; i < nr_vq; i++) { 1947 vq = hw->vqs[i]; 1948 if (!vq) 1949 continue; 1950 1951 queue_type = virtio_get_queue_type(hw, i); 1952 if (queue_type == VTNET_RQ) 1953 type = "rxq"; 1954 else if (queue_type == VTNET_TQ) 1955 type = "txq"; 1956 else 1957 continue; 1958 1959 PMD_INIT_LOG(DEBUG, 1960 "Before freeing %s[%d] used and unused buf", 1961 type, i); 1962 VIRTQUEUE_DUMP(vq); 1963 1964 while ((buf = virtqueue_detach_unused(vq)) != NULL) { 1965 rte_pktmbuf_free(buf); 1966 mbuf_num++; 1967 } 1968 1969 PMD_INIT_LOG(DEBUG, 1970 "After freeing %s[%d] used and unused buf", 1971 type, i); 1972 VIRTQUEUE_DUMP(vq); 1973 } 1974 1975 PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num); 1976 } 1977 1978 /* 1979 * Stop device: disable interrupt and mark link down 1980 */ 1981 static void 1982 virtio_dev_stop(struct rte_eth_dev *dev) 1983 { 1984 struct virtio_hw *hw = dev->data->dev_private; 1985 struct rte_eth_link link; 1986 struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf; 1987 1988 PMD_INIT_LOG(DEBUG, "stop"); 1989 1990 rte_spinlock_lock(&hw->state_lock); 1991 if (intr_conf->lsc || intr_conf->rxq) 1992 virtio_intr_disable(dev); 1993 1994 hw->started = 0; 1995 memset(&link, 0, sizeof(link)); 1996 rte_eth_linkstatus_set(dev, &link); 1997 rte_spinlock_unlock(&hw->state_lock); 1998 } 1999 2000 static int 2001 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) 2002 { 2003 struct rte_eth_link link; 2004 uint16_t status; 2005 struct virtio_hw *hw = dev->data->dev_private; 2006 2007 memset(&link, 0, sizeof(link)); 2008 link.link_duplex = ETH_LINK_FULL_DUPLEX; 2009 link.link_speed = ETH_SPEED_NUM_10G; 2010 link.link_autoneg = ETH_LINK_FIXED; 2011 2012 if (hw->started == 0) { 2013 link.link_status = ETH_LINK_DOWN; 2014 } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { 2015 PMD_INIT_LOG(DEBUG, "Get link status from hw"); 2016 vtpci_read_dev_config(hw, 2017 offsetof(struct virtio_net_config, status), 2018 &status, sizeof(status)); 2019 if ((status & VIRTIO_NET_S_LINK_UP) == 0) { 2020 link.link_status = ETH_LINK_DOWN; 2021 PMD_INIT_LOG(DEBUG, "Port %d is down", 2022 dev->data->port_id); 2023 } else { 2024 link.link_status = ETH_LINK_UP; 2025 PMD_INIT_LOG(DEBUG, "Port %d is up", 2026 dev->data->port_id); 2027 } 2028 } else { 2029 link.link_status = ETH_LINK_UP; 2030 } 2031 2032 return rte_eth_linkstatus_set(dev, &link); 2033 } 2034 2035 static int 2036 virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2037 { 2038 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 2039 struct virtio_hw *hw = dev->data->dev_private; 2040 uint64_t offloads = rxmode->offloads; 2041 2042 if (mask & ETH_VLAN_FILTER_MASK) { 2043 if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) && 2044 !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) { 2045 2046 PMD_DRV_LOG(NOTICE, 2047 "vlan filtering not available on this host"); 2048 2049 return -ENOTSUP; 2050 } 2051 } 2052 2053 if (mask & ETH_VLAN_STRIP_MASK) 2054 hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 2055 2056 return 0; 2057 } 2058 2059 static void 2060 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2061 { 2062 uint64_t tso_mask, host_features; 2063 struct virtio_hw *hw = dev->data->dev_private; 2064 2065 dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */ 2066 2067 dev_info->max_rx_queues = 2068 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES); 2069 dev_info->max_tx_queues = 2070 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES); 2071 dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE; 2072 dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN; 2073 dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS; 2074 dev_info->default_txconf = (struct rte_eth_txconf) { 2075 .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS 2076 }; 2077 2078 host_features = VTPCI_OPS(hw)->get_features(hw); 2079 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; 2080 if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) { 2081 dev_info->rx_offload_capa |= 2082 DEV_RX_OFFLOAD_TCP_CKSUM | 2083 DEV_RX_OFFLOAD_UDP_CKSUM; 2084 } 2085 if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN)) 2086 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER; 2087 tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) | 2088 (1ULL << VIRTIO_NET_F_GUEST_TSO6); 2089 if ((host_features & tso_mask) == tso_mask) 2090 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; 2091 2092 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS | 2093 DEV_TX_OFFLOAD_VLAN_INSERT; 2094 if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) { 2095 dev_info->tx_offload_capa |= 2096 DEV_TX_OFFLOAD_UDP_CKSUM | 2097 DEV_TX_OFFLOAD_TCP_CKSUM; 2098 } 2099 tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) | 2100 (1ULL << VIRTIO_NET_F_HOST_TSO6); 2101 if ((hw->guest_features & tso_mask) == tso_mask) 2102 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; 2103 } 2104 2105 /* 2106 * It enables testpmd to collect per queue stats. 2107 */ 2108 static int 2109 virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev, 2110 __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx, 2111 __rte_unused uint8_t is_rx) 2112 { 2113 return 0; 2114 } 2115 2116 RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__); 2117 RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map); 2118 RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci"); 2119 2120 RTE_INIT(virtio_init_log); 2121 static void 2122 virtio_init_log(void) 2123 { 2124 virtio_logtype_init = rte_log_register("pmd.net.virtio.init"); 2125 if (virtio_logtype_init >= 0) 2126 rte_log_set_level(virtio_logtype_init, RTE_LOG_NOTICE); 2127 virtio_logtype_driver = rte_log_register("pmd.net.virtio.driver"); 2128 if (virtio_logtype_driver >= 0) 2129 rte_log_set_level(virtio_logtype_driver, RTE_LOG_NOTICE); 2130 } 2131