1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdint.h> 35 #include <string.h> 36 #include <stdio.h> 37 #include <errno.h> 38 #include <unistd.h> 39 40 #include <rte_ethdev.h> 41 #include <rte_memcpy.h> 42 #include <rte_string_fns.h> 43 #include <rte_memzone.h> 44 #include <rte_malloc.h> 45 #include <rte_atomic.h> 46 #include <rte_branch_prediction.h> 47 #include <rte_pci.h> 48 #include <rte_ether.h> 49 #include <rte_common.h> 50 #include <rte_errno.h> 51 52 #include <rte_memory.h> 53 #include <rte_eal.h> 54 #include <rte_dev.h> 55 56 #include "virtio_ethdev.h" 57 #include "virtio_pci.h" 58 #include "virtio_logs.h" 59 #include "virtqueue.h" 60 #include "virtio_rxtx.h" 61 62 static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev); 63 static int virtio_dev_configure(struct rte_eth_dev *dev); 64 static int virtio_dev_start(struct rte_eth_dev *dev); 65 static void virtio_dev_stop(struct rte_eth_dev *dev); 66 static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev); 67 static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev); 68 static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev); 69 static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev); 70 static void virtio_dev_info_get(struct rte_eth_dev *dev, 71 struct rte_eth_dev_info *dev_info); 72 static int virtio_dev_link_update(struct rte_eth_dev *dev, 73 __rte_unused int wait_to_complete); 74 75 static void virtio_set_hwaddr(struct virtio_hw *hw); 76 static void virtio_get_hwaddr(struct virtio_hw *hw); 77 78 static void virtio_dev_stats_get(struct rte_eth_dev *dev, 79 struct rte_eth_stats *stats); 80 static int virtio_dev_xstats_get(struct rte_eth_dev *dev, 81 struct rte_eth_xstat *xstats, unsigned n); 82 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev, 83 struct rte_eth_xstat_name *xstats_names, 84 unsigned limit); 85 static void virtio_dev_stats_reset(struct rte_eth_dev *dev); 86 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev); 87 static int virtio_vlan_filter_set(struct rte_eth_dev *dev, 88 uint16_t vlan_id, int on); 89 static void virtio_mac_addr_add(struct rte_eth_dev *dev, 90 struct ether_addr *mac_addr, 91 uint32_t index, uint32_t vmdq __rte_unused); 92 static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); 93 static void virtio_mac_addr_set(struct rte_eth_dev *dev, 94 struct ether_addr *mac_addr); 95 96 static int virtio_dev_queue_stats_mapping_set( 97 __rte_unused struct rte_eth_dev *eth_dev, 98 __rte_unused uint16_t queue_id, 99 __rte_unused uint8_t stat_idx, 100 __rte_unused uint8_t is_rx); 101 102 /* 103 * The set of PCI devices this driver supports 104 */ 105 static const struct rte_pci_id pci_id_virtio_map[] = { 106 { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) }, 107 { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) }, 108 { .vendor_id = 0, /* sentinel */ }, 109 }; 110 111 struct rte_virtio_xstats_name_off { 112 char name[RTE_ETH_XSTATS_NAME_SIZE]; 113 unsigned offset; 114 }; 115 116 /* [rt]x_qX_ is prepended to the name string here */ 117 static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = { 118 {"good_packets", offsetof(struct virtnet_rx, stats.packets)}, 119 {"good_bytes", offsetof(struct virtnet_rx, stats.bytes)}, 120 {"errors", offsetof(struct virtnet_rx, stats.errors)}, 121 {"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)}, 122 {"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)}, 123 {"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])}, 124 {"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])}, 125 {"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])}, 126 {"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])}, 127 {"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])}, 128 {"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])}, 129 {"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])}, 130 {"size_1519_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])}, 131 }; 132 133 /* [rt]x_qX_ is prepended to the name string here */ 134 static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = { 135 {"good_packets", offsetof(struct virtnet_tx, stats.packets)}, 136 {"good_bytes", offsetof(struct virtnet_tx, stats.bytes)}, 137 {"errors", offsetof(struct virtnet_tx, stats.errors)}, 138 {"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)}, 139 {"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)}, 140 {"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])}, 141 {"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])}, 142 {"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])}, 143 {"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])}, 144 {"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])}, 145 {"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])}, 146 {"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])}, 147 {"size_1519_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])}, 148 }; 149 150 #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \ 151 sizeof(rte_virtio_rxq_stat_strings[0])) 152 #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \ 153 sizeof(rte_virtio_txq_stat_strings[0])) 154 155 static int 156 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, 157 int *dlen, int pkt_num) 158 { 159 uint32_t head, i; 160 int k, sum = 0; 161 virtio_net_ctrl_ack status = ~0; 162 struct virtio_pmd_ctrl result; 163 struct virtqueue *vq; 164 165 ctrl->status = status; 166 167 if (!cvq || !cvq->vq) { 168 PMD_INIT_LOG(ERR, "Control queue is not supported."); 169 return -1; 170 } 171 vq = cvq->vq; 172 head = vq->vq_desc_head_idx; 173 174 PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, " 175 "vq->hw->cvq = %p vq = %p", 176 vq->vq_desc_head_idx, status, vq->hw->cvq, vq); 177 178 if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1)) 179 return -1; 180 181 memcpy(cvq->virtio_net_hdr_mz->addr, ctrl, 182 sizeof(struct virtio_pmd_ctrl)); 183 184 /* 185 * Format is enforced in qemu code: 186 * One TX packet for header; 187 * At least one TX packet per argument; 188 * One RX packet for ACK. 189 */ 190 vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT; 191 vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem; 192 vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr); 193 vq->vq_free_cnt--; 194 i = vq->vq_ring.desc[head].next; 195 196 for (k = 0; k < pkt_num; k++) { 197 vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT; 198 vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem 199 + sizeof(struct virtio_net_ctrl_hdr) 200 + sizeof(ctrl->status) + sizeof(uint8_t)*sum; 201 vq->vq_ring.desc[i].len = dlen[k]; 202 sum += dlen[k]; 203 vq->vq_free_cnt--; 204 i = vq->vq_ring.desc[i].next; 205 } 206 207 vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE; 208 vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem 209 + sizeof(struct virtio_net_ctrl_hdr); 210 vq->vq_ring.desc[i].len = sizeof(ctrl->status); 211 vq->vq_free_cnt--; 212 213 vq->vq_desc_head_idx = vq->vq_ring.desc[i].next; 214 215 vq_update_avail_ring(vq, head); 216 vq_update_avail_idx(vq); 217 218 PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index); 219 220 virtqueue_notify(vq); 221 222 rte_rmb(); 223 while (VIRTQUEUE_NUSED(vq) == 0) { 224 rte_rmb(); 225 usleep(100); 226 } 227 228 while (VIRTQUEUE_NUSED(vq)) { 229 uint32_t idx, desc_idx, used_idx; 230 struct vring_used_elem *uep; 231 232 used_idx = (uint32_t)(vq->vq_used_cons_idx 233 & (vq->vq_nentries - 1)); 234 uep = &vq->vq_ring.used->ring[used_idx]; 235 idx = (uint32_t) uep->id; 236 desc_idx = idx; 237 238 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) { 239 desc_idx = vq->vq_ring.desc[desc_idx].next; 240 vq->vq_free_cnt++; 241 } 242 243 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx; 244 vq->vq_desc_head_idx = idx; 245 246 vq->vq_used_cons_idx++; 247 vq->vq_free_cnt++; 248 } 249 250 PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d", 251 vq->vq_free_cnt, vq->vq_desc_head_idx); 252 253 memcpy(&result, cvq->virtio_net_hdr_mz->addr, 254 sizeof(struct virtio_pmd_ctrl)); 255 256 return result.status; 257 } 258 259 static int 260 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues) 261 { 262 struct virtio_hw *hw = dev->data->dev_private; 263 struct virtio_pmd_ctrl ctrl; 264 int dlen[1]; 265 int ret; 266 267 ctrl.hdr.class = VIRTIO_NET_CTRL_MQ; 268 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; 269 memcpy(ctrl.data, &nb_queues, sizeof(uint16_t)); 270 271 dlen[0] = sizeof(uint16_t); 272 273 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); 274 if (ret) { 275 PMD_INIT_LOG(ERR, "Multiqueue configured but send command " 276 "failed, this is too late now..."); 277 return -EINVAL; 278 } 279 280 return 0; 281 } 282 283 static void 284 virtio_dev_queue_release(void *queue __rte_unused) 285 { 286 /* do nothing */ 287 } 288 289 static int 290 virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx) 291 { 292 if (vtpci_queue_idx == hw->max_queue_pairs * 2) 293 return VTNET_CQ; 294 else if (vtpci_queue_idx % 2 == 0) 295 return VTNET_RQ; 296 else 297 return VTNET_TQ; 298 } 299 300 static uint16_t 301 virtio_get_nr_vq(struct virtio_hw *hw) 302 { 303 uint16_t nr_vq = hw->max_queue_pairs * 2; 304 305 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) 306 nr_vq += 1; 307 308 return nr_vq; 309 } 310 311 static void 312 virtio_init_vring(struct virtqueue *vq) 313 { 314 int size = vq->vq_nentries; 315 struct vring *vr = &vq->vq_ring; 316 uint8_t *ring_mem = vq->vq_ring_virt_mem; 317 318 PMD_INIT_FUNC_TRACE(); 319 320 /* 321 * Reinitialise since virtio port might have been stopped and restarted 322 */ 323 memset(ring_mem, 0, vq->vq_ring_size); 324 vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); 325 vq->vq_used_cons_idx = 0; 326 vq->vq_desc_head_idx = 0; 327 vq->vq_avail_idx = 0; 328 vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); 329 vq->vq_free_cnt = vq->vq_nentries; 330 memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); 331 332 vring_desc_init(vr->desc, size); 333 334 /* 335 * Disable device(host) interrupting guest 336 */ 337 virtqueue_disable_intr(vq); 338 } 339 340 static int 341 virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) 342 { 343 char vq_name[VIRTQUEUE_MAX_NAME_SZ]; 344 char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ]; 345 const struct rte_memzone *mz = NULL, *hdr_mz = NULL; 346 unsigned int vq_size, size; 347 struct virtio_hw *hw = dev->data->dev_private; 348 struct virtnet_rx *rxvq = NULL; 349 struct virtnet_tx *txvq = NULL; 350 struct virtnet_ctl *cvq = NULL; 351 struct virtqueue *vq; 352 size_t sz_hdr_mz = 0; 353 void *sw_ring = NULL; 354 int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx); 355 int ret; 356 357 PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx); 358 359 /* 360 * Read the virtqueue size from the Queue Size field 361 * Always power of 2 and if 0 virtqueue does not exist 362 */ 363 vq_size = hw->vtpci_ops->get_queue_num(hw, vtpci_queue_idx); 364 PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size); 365 if (vq_size == 0) { 366 PMD_INIT_LOG(ERR, "virtqueue does not exist"); 367 return -EINVAL; 368 } 369 370 if (!rte_is_power_of_2(vq_size)) { 371 PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2"); 372 return -EINVAL; 373 } 374 375 snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", 376 dev->data->port_id, vtpci_queue_idx); 377 378 size = RTE_ALIGN_CEIL(sizeof(*vq) + 379 vq_size * sizeof(struct vq_desc_extra), 380 RTE_CACHE_LINE_SIZE); 381 if (queue_type == VTNET_TQ) { 382 /* 383 * For each xmit packet, allocate a virtio_net_hdr 384 * and indirect ring elements 385 */ 386 sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region); 387 } else if (queue_type == VTNET_CQ) { 388 /* Allocate a page for control vq command, data and status */ 389 sz_hdr_mz = PAGE_SIZE; 390 } 391 392 vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, 393 SOCKET_ID_ANY); 394 if (vq == NULL) { 395 PMD_INIT_LOG(ERR, "can not allocate vq"); 396 return -ENOMEM; 397 } 398 hw->vqs[vtpci_queue_idx] = vq; 399 400 vq->hw = hw; 401 vq->vq_queue_index = vtpci_queue_idx; 402 vq->vq_nentries = vq_size; 403 404 /* 405 * Reserve a memzone for vring elements 406 */ 407 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); 408 vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); 409 PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", 410 size, vq->vq_ring_size); 411 412 mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, 413 SOCKET_ID_ANY, 414 0, VIRTIO_PCI_VRING_ALIGN); 415 if (mz == NULL) { 416 if (rte_errno == EEXIST) 417 mz = rte_memzone_lookup(vq_name); 418 if (mz == NULL) { 419 ret = -ENOMEM; 420 goto fail_q_alloc; 421 } 422 } 423 424 memset(mz->addr, 0, sizeof(mz->len)); 425 426 vq->vq_ring_mem = mz->phys_addr; 427 vq->vq_ring_virt_mem = mz->addr; 428 PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, 429 (uint64_t)mz->phys_addr); 430 PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64, 431 (uint64_t)(uintptr_t)mz->addr); 432 433 virtio_init_vring(vq); 434 435 if (sz_hdr_mz) { 436 snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr", 437 dev->data->port_id, vtpci_queue_idx); 438 hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz, 439 SOCKET_ID_ANY, 0, 440 RTE_CACHE_LINE_SIZE); 441 if (hdr_mz == NULL) { 442 if (rte_errno == EEXIST) 443 hdr_mz = rte_memzone_lookup(vq_hdr_name); 444 if (hdr_mz == NULL) { 445 ret = -ENOMEM; 446 goto fail_q_alloc; 447 } 448 } 449 } 450 451 if (queue_type == VTNET_RQ) { 452 size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) * 453 sizeof(vq->sw_ring[0]); 454 455 sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, 456 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 457 if (!sw_ring) { 458 PMD_INIT_LOG(ERR, "can not allocate RX soft ring"); 459 ret = -ENOMEM; 460 goto fail_q_alloc; 461 } 462 463 vq->sw_ring = sw_ring; 464 rxvq = &vq->rxq; 465 rxvq->vq = vq; 466 rxvq->port_id = dev->data->port_id; 467 rxvq->mz = mz; 468 } else if (queue_type == VTNET_TQ) { 469 txvq = &vq->txq; 470 txvq->vq = vq; 471 txvq->port_id = dev->data->port_id; 472 txvq->mz = mz; 473 txvq->virtio_net_hdr_mz = hdr_mz; 474 txvq->virtio_net_hdr_mem = hdr_mz->phys_addr; 475 } else if (queue_type == VTNET_CQ) { 476 cvq = &vq->cq; 477 cvq->vq = vq; 478 cvq->mz = mz; 479 cvq->virtio_net_hdr_mz = hdr_mz; 480 cvq->virtio_net_hdr_mem = hdr_mz->phys_addr; 481 memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE); 482 483 hw->cvq = cvq; 484 } 485 486 /* For virtio_user case (that is when dev->pci_dev is NULL), we use 487 * virtual address. And we need properly set _offset_, please see 488 * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information. 489 */ 490 if (dev->pci_dev) 491 vq->offset = offsetof(struct rte_mbuf, buf_physaddr); 492 else { 493 vq->vq_ring_mem = (uintptr_t)mz->addr; 494 vq->offset = offsetof(struct rte_mbuf, buf_addr); 495 if (queue_type == VTNET_TQ) 496 txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr; 497 else if (queue_type == VTNET_CQ) 498 cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr; 499 } 500 501 if (queue_type == VTNET_TQ) { 502 struct virtio_tx_region *txr; 503 unsigned int i; 504 505 txr = hdr_mz->addr; 506 memset(txr, 0, vq_size * sizeof(*txr)); 507 for (i = 0; i < vq_size; i++) { 508 struct vring_desc *start_dp = txr[i].tx_indir; 509 510 vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir)); 511 512 /* first indirect descriptor is always the tx header */ 513 start_dp->addr = txvq->virtio_net_hdr_mem 514 + i * sizeof(*txr) 515 + offsetof(struct virtio_tx_region, tx_hdr); 516 517 start_dp->len = hw->vtnet_hdr_size; 518 start_dp->flags = VRING_DESC_F_NEXT; 519 } 520 } 521 522 if (hw->vtpci_ops->setup_queue(hw, vq) < 0) { 523 PMD_INIT_LOG(ERR, "setup_queue failed"); 524 return -EINVAL; 525 } 526 527 return 0; 528 529 fail_q_alloc: 530 rte_free(sw_ring); 531 rte_memzone_free(hdr_mz); 532 rte_memzone_free(mz); 533 rte_free(vq); 534 535 return ret; 536 } 537 538 static void 539 virtio_free_queues(struct virtio_hw *hw) 540 { 541 uint16_t nr_vq = virtio_get_nr_vq(hw); 542 struct virtqueue *vq; 543 int queue_type; 544 uint16_t i; 545 546 for (i = 0; i < nr_vq; i++) { 547 vq = hw->vqs[i]; 548 if (!vq) 549 continue; 550 551 queue_type = virtio_get_queue_type(hw, i); 552 if (queue_type == VTNET_RQ) { 553 rte_free(vq->sw_ring); 554 rte_memzone_free(vq->rxq.mz); 555 } else if (queue_type == VTNET_TQ) { 556 rte_memzone_free(vq->txq.mz); 557 rte_memzone_free(vq->txq.virtio_net_hdr_mz); 558 } else { 559 rte_memzone_free(vq->cq.mz); 560 rte_memzone_free(vq->cq.virtio_net_hdr_mz); 561 } 562 563 rte_free(vq); 564 } 565 566 rte_free(hw->vqs); 567 } 568 569 static int 570 virtio_alloc_queues(struct rte_eth_dev *dev) 571 { 572 struct virtio_hw *hw = dev->data->dev_private; 573 uint16_t nr_vq = virtio_get_nr_vq(hw); 574 uint16_t i; 575 int ret; 576 577 hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0); 578 if (!hw->vqs) { 579 PMD_INIT_LOG(ERR, "failed to allocate vqs"); 580 return -ENOMEM; 581 } 582 583 for (i = 0; i < nr_vq; i++) { 584 ret = virtio_init_queue(dev, i); 585 if (ret < 0) { 586 virtio_free_queues(hw); 587 return ret; 588 } 589 } 590 591 return 0; 592 } 593 594 static void 595 virtio_dev_close(struct rte_eth_dev *dev) 596 { 597 struct virtio_hw *hw = dev->data->dev_private; 598 599 PMD_INIT_LOG(DEBUG, "virtio_dev_close"); 600 601 /* reset the NIC */ 602 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 603 vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR); 604 vtpci_reset(hw); 605 virtio_dev_free_mbufs(dev); 606 virtio_free_queues(hw); 607 } 608 609 static void 610 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev) 611 { 612 struct virtio_hw *hw = dev->data->dev_private; 613 struct virtio_pmd_ctrl ctrl; 614 int dlen[1]; 615 int ret; 616 617 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { 618 PMD_INIT_LOG(INFO, "host does not support rx control\n"); 619 return; 620 } 621 622 ctrl.hdr.class = VIRTIO_NET_CTRL_RX; 623 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC; 624 ctrl.data[0] = 1; 625 dlen[0] = 1; 626 627 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); 628 if (ret) 629 PMD_INIT_LOG(ERR, "Failed to enable promisc"); 630 } 631 632 static void 633 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev) 634 { 635 struct virtio_hw *hw = dev->data->dev_private; 636 struct virtio_pmd_ctrl ctrl; 637 int dlen[1]; 638 int ret; 639 640 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { 641 PMD_INIT_LOG(INFO, "host does not support rx control\n"); 642 return; 643 } 644 645 ctrl.hdr.class = VIRTIO_NET_CTRL_RX; 646 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC; 647 ctrl.data[0] = 0; 648 dlen[0] = 1; 649 650 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); 651 if (ret) 652 PMD_INIT_LOG(ERR, "Failed to disable promisc"); 653 } 654 655 static void 656 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev) 657 { 658 struct virtio_hw *hw = dev->data->dev_private; 659 struct virtio_pmd_ctrl ctrl; 660 int dlen[1]; 661 int ret; 662 663 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { 664 PMD_INIT_LOG(INFO, "host does not support rx control\n"); 665 return; 666 } 667 668 ctrl.hdr.class = VIRTIO_NET_CTRL_RX; 669 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI; 670 ctrl.data[0] = 1; 671 dlen[0] = 1; 672 673 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); 674 if (ret) 675 PMD_INIT_LOG(ERR, "Failed to enable allmulticast"); 676 } 677 678 static void 679 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev) 680 { 681 struct virtio_hw *hw = dev->data->dev_private; 682 struct virtio_pmd_ctrl ctrl; 683 int dlen[1]; 684 int ret; 685 686 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { 687 PMD_INIT_LOG(INFO, "host does not support rx control\n"); 688 return; 689 } 690 691 ctrl.hdr.class = VIRTIO_NET_CTRL_RX; 692 ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI; 693 ctrl.data[0] = 0; 694 dlen[0] = 1; 695 696 ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); 697 if (ret) 698 PMD_INIT_LOG(ERR, "Failed to disable allmulticast"); 699 } 700 701 #define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */ 702 static int 703 virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 704 { 705 struct virtio_hw *hw = dev->data->dev_private; 706 uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN + 707 hw->vtnet_hdr_size; 708 uint32_t frame_size = mtu + ether_hdr_len; 709 710 if (mtu < ETHER_MIN_MTU || frame_size > VIRTIO_MAX_RX_PKTLEN) { 711 PMD_INIT_LOG(ERR, "MTU should be between %d and %d\n", 712 ETHER_MIN_MTU, VIRTIO_MAX_RX_PKTLEN - ether_hdr_len); 713 return -EINVAL; 714 } 715 return 0; 716 } 717 718 /* 719 * dev_ops for virtio, bare necessities for basic operation 720 */ 721 static const struct eth_dev_ops virtio_eth_dev_ops = { 722 .dev_configure = virtio_dev_configure, 723 .dev_start = virtio_dev_start, 724 .dev_stop = virtio_dev_stop, 725 .dev_close = virtio_dev_close, 726 .promiscuous_enable = virtio_dev_promiscuous_enable, 727 .promiscuous_disable = virtio_dev_promiscuous_disable, 728 .allmulticast_enable = virtio_dev_allmulticast_enable, 729 .allmulticast_disable = virtio_dev_allmulticast_disable, 730 .mtu_set = virtio_mtu_set, 731 .dev_infos_get = virtio_dev_info_get, 732 .stats_get = virtio_dev_stats_get, 733 .xstats_get = virtio_dev_xstats_get, 734 .xstats_get_names = virtio_dev_xstats_get_names, 735 .stats_reset = virtio_dev_stats_reset, 736 .xstats_reset = virtio_dev_stats_reset, 737 .link_update = virtio_dev_link_update, 738 .rx_queue_setup = virtio_dev_rx_queue_setup, 739 .rx_queue_release = virtio_dev_queue_release, 740 .tx_queue_setup = virtio_dev_tx_queue_setup, 741 .tx_queue_release = virtio_dev_queue_release, 742 /* collect stats per queue */ 743 .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set, 744 .vlan_filter_set = virtio_vlan_filter_set, 745 .mac_addr_add = virtio_mac_addr_add, 746 .mac_addr_remove = virtio_mac_addr_remove, 747 .mac_addr_set = virtio_mac_addr_set, 748 }; 749 750 static inline int 751 virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev, 752 struct rte_eth_link *link) 753 { 754 struct rte_eth_link *dst = link; 755 struct rte_eth_link *src = &(dev->data->dev_link); 756 757 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 758 *(uint64_t *)src) == 0) 759 return -1; 760 761 return 0; 762 } 763 764 /** 765 * Atomically writes the link status information into global 766 * structure rte_eth_dev. 767 * 768 * @param dev 769 * - Pointer to the structure rte_eth_dev to read from. 770 * - Pointer to the buffer to be saved with the link status. 771 * 772 * @return 773 * - On success, zero. 774 * - On failure, negative value. 775 */ 776 static inline int 777 virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev, 778 struct rte_eth_link *link) 779 { 780 struct rte_eth_link *dst = &(dev->data->dev_link); 781 struct rte_eth_link *src = link; 782 783 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 784 *(uint64_t *)src) == 0) 785 return -1; 786 787 return 0; 788 } 789 790 static void 791 virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 792 { 793 unsigned i; 794 795 for (i = 0; i < dev->data->nb_tx_queues; i++) { 796 const struct virtnet_tx *txvq = dev->data->tx_queues[i]; 797 if (txvq == NULL) 798 continue; 799 800 stats->opackets += txvq->stats.packets; 801 stats->obytes += txvq->stats.bytes; 802 stats->oerrors += txvq->stats.errors; 803 804 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { 805 stats->q_opackets[i] = txvq->stats.packets; 806 stats->q_obytes[i] = txvq->stats.bytes; 807 } 808 } 809 810 for (i = 0; i < dev->data->nb_rx_queues; i++) { 811 const struct virtnet_rx *rxvq = dev->data->rx_queues[i]; 812 if (rxvq == NULL) 813 continue; 814 815 stats->ipackets += rxvq->stats.packets; 816 stats->ibytes += rxvq->stats.bytes; 817 stats->ierrors += rxvq->stats.errors; 818 819 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { 820 stats->q_ipackets[i] = rxvq->stats.packets; 821 stats->q_ibytes[i] = rxvq->stats.bytes; 822 } 823 } 824 825 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 826 } 827 828 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev, 829 struct rte_eth_xstat_name *xstats_names, 830 __rte_unused unsigned limit) 831 { 832 unsigned i; 833 unsigned count = 0; 834 unsigned t; 835 836 unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS + 837 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS; 838 839 if (xstats_names != NULL) { 840 /* Note: limit checked in rte_eth_xstats_names() */ 841 842 for (i = 0; i < dev->data->nb_rx_queues; i++) { 843 struct virtqueue *rxvq = dev->data->rx_queues[i]; 844 if (rxvq == NULL) 845 continue; 846 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) { 847 snprintf(xstats_names[count].name, 848 sizeof(xstats_names[count].name), 849 "rx_q%u_%s", i, 850 rte_virtio_rxq_stat_strings[t].name); 851 count++; 852 } 853 } 854 855 for (i = 0; i < dev->data->nb_tx_queues; i++) { 856 struct virtqueue *txvq = dev->data->tx_queues[i]; 857 if (txvq == NULL) 858 continue; 859 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) { 860 snprintf(xstats_names[count].name, 861 sizeof(xstats_names[count].name), 862 "tx_q%u_%s", i, 863 rte_virtio_txq_stat_strings[t].name); 864 count++; 865 } 866 } 867 return count; 868 } 869 return nstats; 870 } 871 872 static int 873 virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 874 unsigned n) 875 { 876 unsigned i; 877 unsigned count = 0; 878 879 unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS + 880 dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS; 881 882 if (n < nstats) 883 return nstats; 884 885 for (i = 0; i < dev->data->nb_rx_queues; i++) { 886 struct virtnet_rx *rxvq = dev->data->rx_queues[i]; 887 888 if (rxvq == NULL) 889 continue; 890 891 unsigned t; 892 893 for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) { 894 xstats[count].value = *(uint64_t *)(((char *)rxvq) + 895 rte_virtio_rxq_stat_strings[t].offset); 896 count++; 897 } 898 } 899 900 for (i = 0; i < dev->data->nb_tx_queues; i++) { 901 struct virtnet_tx *txvq = dev->data->tx_queues[i]; 902 903 if (txvq == NULL) 904 continue; 905 906 unsigned t; 907 908 for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) { 909 xstats[count].value = *(uint64_t *)(((char *)txvq) + 910 rte_virtio_txq_stat_strings[t].offset); 911 count++; 912 } 913 } 914 915 return count; 916 } 917 918 static void 919 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 920 { 921 virtio_update_stats(dev, stats); 922 } 923 924 static void 925 virtio_dev_stats_reset(struct rte_eth_dev *dev) 926 { 927 unsigned int i; 928 929 for (i = 0; i < dev->data->nb_tx_queues; i++) { 930 struct virtnet_tx *txvq = dev->data->tx_queues[i]; 931 if (txvq == NULL) 932 continue; 933 934 txvq->stats.packets = 0; 935 txvq->stats.bytes = 0; 936 txvq->stats.errors = 0; 937 txvq->stats.multicast = 0; 938 txvq->stats.broadcast = 0; 939 memset(txvq->stats.size_bins, 0, 940 sizeof(txvq->stats.size_bins[0]) * 8); 941 } 942 943 for (i = 0; i < dev->data->nb_rx_queues; i++) { 944 struct virtnet_rx *rxvq = dev->data->rx_queues[i]; 945 if (rxvq == NULL) 946 continue; 947 948 rxvq->stats.packets = 0; 949 rxvq->stats.bytes = 0; 950 rxvq->stats.errors = 0; 951 rxvq->stats.multicast = 0; 952 rxvq->stats.broadcast = 0; 953 memset(rxvq->stats.size_bins, 0, 954 sizeof(rxvq->stats.size_bins[0]) * 8); 955 } 956 } 957 958 static void 959 virtio_set_hwaddr(struct virtio_hw *hw) 960 { 961 vtpci_write_dev_config(hw, 962 offsetof(struct virtio_net_config, mac), 963 &hw->mac_addr, ETHER_ADDR_LEN); 964 } 965 966 static void 967 virtio_get_hwaddr(struct virtio_hw *hw) 968 { 969 if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) { 970 vtpci_read_dev_config(hw, 971 offsetof(struct virtio_net_config, mac), 972 &hw->mac_addr, ETHER_ADDR_LEN); 973 } else { 974 eth_random_addr(&hw->mac_addr[0]); 975 virtio_set_hwaddr(hw); 976 } 977 } 978 979 static void 980 virtio_mac_table_set(struct virtio_hw *hw, 981 const struct virtio_net_ctrl_mac *uc, 982 const struct virtio_net_ctrl_mac *mc) 983 { 984 struct virtio_pmd_ctrl ctrl; 985 int err, len[2]; 986 987 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 988 PMD_DRV_LOG(INFO, "host does not support mac table"); 989 return; 990 } 991 992 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC; 993 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 994 995 len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries); 996 memcpy(ctrl.data, uc, len[0]); 997 998 len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries); 999 memcpy(ctrl.data + len[0], mc, len[1]); 1000 1001 err = virtio_send_command(hw->cvq, &ctrl, len, 2); 1002 if (err != 0) 1003 PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err); 1004 } 1005 1006 static void 1007 virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 1008 uint32_t index, uint32_t vmdq __rte_unused) 1009 { 1010 struct virtio_hw *hw = dev->data->dev_private; 1011 const struct ether_addr *addrs = dev->data->mac_addrs; 1012 unsigned int i; 1013 struct virtio_net_ctrl_mac *uc, *mc; 1014 1015 if (index >= VIRTIO_MAX_MAC_ADDRS) { 1016 PMD_DRV_LOG(ERR, "mac address index %u out of range", index); 1017 return; 1018 } 1019 1020 uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries)); 1021 uc->entries = 0; 1022 mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries)); 1023 mc->entries = 0; 1024 1025 for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) { 1026 const struct ether_addr *addr 1027 = (i == index) ? mac_addr : addrs + i; 1028 struct virtio_net_ctrl_mac *tbl 1029 = is_multicast_ether_addr(addr) ? mc : uc; 1030 1031 memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN); 1032 } 1033 1034 virtio_mac_table_set(hw, uc, mc); 1035 } 1036 1037 static void 1038 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1039 { 1040 struct virtio_hw *hw = dev->data->dev_private; 1041 struct ether_addr *addrs = dev->data->mac_addrs; 1042 struct virtio_net_ctrl_mac *uc, *mc; 1043 unsigned int i; 1044 1045 if (index >= VIRTIO_MAX_MAC_ADDRS) { 1046 PMD_DRV_LOG(ERR, "mac address index %u out of range", index); 1047 return; 1048 } 1049 1050 uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries)); 1051 uc->entries = 0; 1052 mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries)); 1053 mc->entries = 0; 1054 1055 for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) { 1056 struct virtio_net_ctrl_mac *tbl; 1057 1058 if (i == index || is_zero_ether_addr(addrs + i)) 1059 continue; 1060 1061 tbl = is_multicast_ether_addr(addrs + i) ? mc : uc; 1062 memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN); 1063 } 1064 1065 virtio_mac_table_set(hw, uc, mc); 1066 } 1067 1068 static void 1069 virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 1070 { 1071 struct virtio_hw *hw = dev->data->dev_private; 1072 1073 memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN); 1074 1075 /* Use atomic update if available */ 1076 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 1077 struct virtio_pmd_ctrl ctrl; 1078 int len = ETHER_ADDR_LEN; 1079 1080 ctrl.hdr.class = VIRTIO_NET_CTRL_MAC; 1081 ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; 1082 1083 memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN); 1084 virtio_send_command(hw->cvq, &ctrl, &len, 1); 1085 } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) 1086 virtio_set_hwaddr(hw); 1087 } 1088 1089 static int 1090 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1091 { 1092 struct virtio_hw *hw = dev->data->dev_private; 1093 struct virtio_pmd_ctrl ctrl; 1094 int len; 1095 1096 if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) 1097 return -ENOTSUP; 1098 1099 ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN; 1100 ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; 1101 memcpy(ctrl.data, &vlan_id, sizeof(vlan_id)); 1102 len = sizeof(vlan_id); 1103 1104 return virtio_send_command(hw->cvq, &ctrl, &len, 1); 1105 } 1106 1107 static int 1108 virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features) 1109 { 1110 uint64_t host_features; 1111 1112 /* Prepare guest_features: feature that driver wants to support */ 1113 PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64, 1114 req_features); 1115 1116 /* Read device(host) feature bits */ 1117 host_features = hw->vtpci_ops->get_features(hw); 1118 PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64, 1119 host_features); 1120 1121 /* 1122 * Negotiate features: Subset of device feature bits are written back 1123 * guest feature bits. 1124 */ 1125 hw->guest_features = req_features; 1126 hw->guest_features = vtpci_negotiate_features(hw, host_features); 1127 PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64, 1128 hw->guest_features); 1129 1130 if (hw->modern) { 1131 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) { 1132 PMD_INIT_LOG(ERR, 1133 "VIRTIO_F_VERSION_1 features is not enabled."); 1134 return -1; 1135 } 1136 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK); 1137 if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) { 1138 PMD_INIT_LOG(ERR, 1139 "failed to set FEATURES_OK status!"); 1140 return -1; 1141 } 1142 } 1143 1144 hw->req_guest_features = req_features; 1145 1146 return 0; 1147 } 1148 1149 /* 1150 * Process Virtio Config changed interrupt and call the callback 1151 * if link state changed. 1152 */ 1153 static void 1154 virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle, 1155 void *param) 1156 { 1157 struct rte_eth_dev *dev = param; 1158 struct virtio_hw *hw = dev->data->dev_private; 1159 uint8_t isr; 1160 1161 /* Read interrupt status which clears interrupt */ 1162 isr = vtpci_isr(hw); 1163 PMD_DRV_LOG(INFO, "interrupt status = %#x", isr); 1164 1165 if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0) 1166 PMD_DRV_LOG(ERR, "interrupt enable failed"); 1167 1168 if (isr & VIRTIO_PCI_ISR_CONFIG) { 1169 if (virtio_dev_link_update(dev, 0) == 0) 1170 _rte_eth_dev_callback_process(dev, 1171 RTE_ETH_EVENT_INTR_LSC, NULL); 1172 } 1173 1174 } 1175 1176 static void 1177 rx_func_get(struct rte_eth_dev *eth_dev) 1178 { 1179 struct virtio_hw *hw = eth_dev->data->dev_private; 1180 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) 1181 eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts; 1182 else 1183 eth_dev->rx_pkt_burst = &virtio_recv_pkts; 1184 } 1185 1186 /* reset device and renegotiate features if needed */ 1187 static int 1188 virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) 1189 { 1190 struct virtio_hw *hw = eth_dev->data->dev_private; 1191 struct virtio_net_config *config; 1192 struct virtio_net_config local_config; 1193 struct rte_pci_device *pci_dev = eth_dev->pci_dev; 1194 int ret; 1195 1196 /* Reset the device although not necessary at startup */ 1197 vtpci_reset(hw); 1198 1199 /* Tell the host we've noticed this device. */ 1200 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK); 1201 1202 /* Tell the host we've known how to drive the device. */ 1203 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); 1204 if (virtio_negotiate_features(hw, req_features) < 0) 1205 return -1; 1206 1207 /* If host does not support status then disable LSC */ 1208 if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) 1209 eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; 1210 else 1211 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 1212 1213 rte_eth_copy_pci_info(eth_dev, pci_dev); 1214 1215 rx_func_get(eth_dev); 1216 1217 /* Setting up rx_header size for the device */ 1218 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || 1219 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) 1220 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1221 else 1222 hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr); 1223 1224 /* Copy the permanent MAC address to: virtio_hw */ 1225 virtio_get_hwaddr(hw); 1226 ether_addr_copy((struct ether_addr *) hw->mac_addr, 1227 ð_dev->data->mac_addrs[0]); 1228 PMD_INIT_LOG(DEBUG, 1229 "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X", 1230 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2], 1231 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]); 1232 1233 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) { 1234 config = &local_config; 1235 1236 vtpci_read_dev_config(hw, 1237 offsetof(struct virtio_net_config, mac), 1238 &config->mac, sizeof(config->mac)); 1239 1240 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { 1241 vtpci_read_dev_config(hw, 1242 offsetof(struct virtio_net_config, status), 1243 &config->status, sizeof(config->status)); 1244 } else { 1245 PMD_INIT_LOG(DEBUG, 1246 "VIRTIO_NET_F_STATUS is not supported"); 1247 config->status = 0; 1248 } 1249 1250 if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) { 1251 vtpci_read_dev_config(hw, 1252 offsetof(struct virtio_net_config, max_virtqueue_pairs), 1253 &config->max_virtqueue_pairs, 1254 sizeof(config->max_virtqueue_pairs)); 1255 } else { 1256 PMD_INIT_LOG(DEBUG, 1257 "VIRTIO_NET_F_MQ is not supported"); 1258 config->max_virtqueue_pairs = 1; 1259 } 1260 1261 hw->max_queue_pairs = config->max_virtqueue_pairs; 1262 1263 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d", 1264 config->max_virtqueue_pairs); 1265 PMD_INIT_LOG(DEBUG, "config->status=%d", config->status); 1266 PMD_INIT_LOG(DEBUG, 1267 "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X", 1268 config->mac[0], config->mac[1], 1269 config->mac[2], config->mac[3], 1270 config->mac[4], config->mac[5]); 1271 } else { 1272 PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1"); 1273 hw->max_queue_pairs = 1; 1274 } 1275 1276 ret = virtio_alloc_queues(eth_dev); 1277 if (ret < 0) 1278 return ret; 1279 vtpci_reinit_complete(hw); 1280 1281 if (pci_dev) 1282 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1283 eth_dev->data->port_id, pci_dev->id.vendor_id, 1284 pci_dev->id.device_id); 1285 1286 return 0; 1287 } 1288 1289 /* 1290 * This function is based on probe() function in virtio_pci.c 1291 * It returns 0 on success. 1292 */ 1293 int 1294 eth_virtio_dev_init(struct rte_eth_dev *eth_dev) 1295 { 1296 struct virtio_hw *hw = eth_dev->data->dev_private; 1297 struct rte_pci_device *pci_dev; 1298 uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE; 1299 int ret; 1300 1301 RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf)); 1302 1303 eth_dev->dev_ops = &virtio_eth_dev_ops; 1304 eth_dev->tx_pkt_burst = &virtio_xmit_pkts; 1305 1306 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 1307 rx_func_get(eth_dev); 1308 return 0; 1309 } 1310 1311 /* Allocate memory for storing MAC addresses */ 1312 eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0); 1313 if (eth_dev->data->mac_addrs == NULL) { 1314 PMD_INIT_LOG(ERR, 1315 "Failed to allocate %d bytes needed to store MAC addresses", 1316 VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN); 1317 return -ENOMEM; 1318 } 1319 1320 pci_dev = eth_dev->pci_dev; 1321 1322 if (pci_dev) { 1323 ret = vtpci_init(pci_dev, hw, &dev_flags); 1324 if (ret) 1325 return ret; 1326 } 1327 1328 eth_dev->data->dev_flags = dev_flags; 1329 1330 /* reset device and negotiate default features */ 1331 ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES); 1332 if (ret < 0) 1333 return ret; 1334 1335 /* Setup interrupt callback */ 1336 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1337 rte_intr_callback_register(&pci_dev->intr_handle, 1338 virtio_interrupt_handler, eth_dev); 1339 1340 return 0; 1341 } 1342 1343 static int 1344 eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev) 1345 { 1346 struct rte_pci_device *pci_dev; 1347 1348 PMD_INIT_FUNC_TRACE(); 1349 1350 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 1351 return -EPERM; 1352 1353 virtio_dev_stop(eth_dev); 1354 virtio_dev_close(eth_dev); 1355 pci_dev = eth_dev->pci_dev; 1356 1357 eth_dev->dev_ops = NULL; 1358 eth_dev->tx_pkt_burst = NULL; 1359 eth_dev->rx_pkt_burst = NULL; 1360 1361 rte_free(eth_dev->data->mac_addrs); 1362 eth_dev->data->mac_addrs = NULL; 1363 1364 /* reset interrupt callback */ 1365 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1366 rte_intr_callback_unregister(&pci_dev->intr_handle, 1367 virtio_interrupt_handler, 1368 eth_dev); 1369 rte_eal_pci_unmap_device(pci_dev); 1370 1371 PMD_INIT_LOG(DEBUG, "dev_uninit completed"); 1372 1373 return 0; 1374 } 1375 1376 static struct eth_driver rte_virtio_pmd = { 1377 .pci_drv = { 1378 .driver = { 1379 .name = "net_virtio", 1380 }, 1381 .id_table = pci_id_virtio_map, 1382 .drv_flags = RTE_PCI_DRV_DETACHABLE, 1383 .probe = rte_eth_dev_pci_probe, 1384 .remove = rte_eth_dev_pci_remove, 1385 }, 1386 .eth_dev_init = eth_virtio_dev_init, 1387 .eth_dev_uninit = eth_virtio_dev_uninit, 1388 .dev_private_size = sizeof(struct virtio_hw), 1389 }; 1390 1391 RTE_INIT(rte_virtio_pmd_init); 1392 static void 1393 rte_virtio_pmd_init(void) 1394 { 1395 if (rte_eal_iopl_init() != 0) { 1396 PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD"); 1397 return; 1398 } 1399 1400 rte_eal_pci_register(&rte_virtio_pmd.pci_drv); 1401 } 1402 1403 /* 1404 * Configure virtio device 1405 * It returns 0 on success. 1406 */ 1407 static int 1408 virtio_dev_configure(struct rte_eth_dev *dev) 1409 { 1410 const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 1411 struct virtio_hw *hw = dev->data->dev_private; 1412 uint64_t req_features; 1413 int ret; 1414 1415 PMD_INIT_LOG(DEBUG, "configure"); 1416 req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES; 1417 if (rxmode->hw_ip_checksum) 1418 req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM); 1419 if (rxmode->enable_lro) 1420 req_features |= 1421 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | 1422 (1ULL << VIRTIO_NET_F_GUEST_TSO6); 1423 1424 /* if request features changed, reinit the device */ 1425 if (req_features != hw->req_guest_features) { 1426 ret = virtio_init_device(dev, req_features); 1427 if (ret < 0) 1428 return ret; 1429 } 1430 1431 if (rxmode->hw_ip_checksum && 1432 !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) { 1433 PMD_DRV_LOG(NOTICE, 1434 "rx ip checksum not available on this host"); 1435 return -ENOTSUP; 1436 } 1437 1438 if (rxmode->enable_lro && 1439 (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) || 1440 !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) { 1441 PMD_DRV_LOG(NOTICE, 1442 "lro not available on this host"); 1443 return -ENOTSUP; 1444 } 1445 1446 /* start control queue */ 1447 if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) 1448 virtio_dev_cq_start(dev); 1449 1450 hw->vlan_strip = rxmode->hw_vlan_strip; 1451 1452 if (rxmode->hw_vlan_filter 1453 && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) { 1454 PMD_DRV_LOG(NOTICE, 1455 "vlan filtering not available on this host"); 1456 return -ENOTSUP; 1457 } 1458 1459 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1460 if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) { 1461 PMD_DRV_LOG(ERR, "failed to set config vector"); 1462 return -EBUSY; 1463 } 1464 1465 return 0; 1466 } 1467 1468 1469 static int 1470 virtio_dev_start(struct rte_eth_dev *dev) 1471 { 1472 uint16_t nb_queues, i; 1473 struct virtnet_rx *rxvq; 1474 struct virtnet_tx *txvq __rte_unused; 1475 struct virtio_hw *hw = dev->data->dev_private; 1476 1477 /* check if lsc interrupt feature is enabled */ 1478 if (dev->data->dev_conf.intr_conf.lsc) { 1479 if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { 1480 PMD_DRV_LOG(ERR, "link status not supported by host"); 1481 return -ENOTSUP; 1482 } 1483 1484 if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0) { 1485 PMD_DRV_LOG(ERR, "interrupt enable failed"); 1486 return -EIO; 1487 } 1488 } 1489 1490 /* Initialize Link state */ 1491 virtio_dev_link_update(dev, 0); 1492 1493 /*Notify the backend 1494 *Otherwise the tap backend might already stop its queue due to fullness. 1495 *vhost backend will have no chance to be waked up 1496 */ 1497 nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues); 1498 if (hw->max_queue_pairs > 1) { 1499 if (virtio_set_multiple_queues(dev, nb_queues) != 0) 1500 return -EINVAL; 1501 } 1502 1503 PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues); 1504 1505 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1506 rxvq = dev->data->rx_queues[i]; 1507 virtqueue_notify(rxvq->vq); 1508 } 1509 1510 PMD_INIT_LOG(DEBUG, "Notified backend at initialization"); 1511 1512 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1513 rxvq = dev->data->rx_queues[i]; 1514 VIRTQUEUE_DUMP(rxvq->vq); 1515 } 1516 1517 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1518 txvq = dev->data->tx_queues[i]; 1519 VIRTQUEUE_DUMP(txvq->vq); 1520 } 1521 1522 return 0; 1523 } 1524 1525 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev) 1526 { 1527 struct rte_mbuf *buf; 1528 int i, mbuf_num = 0; 1529 1530 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1531 struct virtnet_rx *rxvq = dev->data->rx_queues[i]; 1532 1533 PMD_INIT_LOG(DEBUG, 1534 "Before freeing rxq[%d] used and unused buf", i); 1535 VIRTQUEUE_DUMP(rxvq->vq); 1536 1537 PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq); 1538 while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) { 1539 rte_pktmbuf_free(buf); 1540 mbuf_num++; 1541 } 1542 1543 PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num); 1544 PMD_INIT_LOG(DEBUG, 1545 "After freeing rxq[%d] used and unused buf", i); 1546 VIRTQUEUE_DUMP(rxvq->vq); 1547 } 1548 1549 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1550 struct virtnet_tx *txvq = dev->data->tx_queues[i]; 1551 1552 PMD_INIT_LOG(DEBUG, 1553 "Before freeing txq[%d] used and unused bufs", 1554 i); 1555 VIRTQUEUE_DUMP(txvq->vq); 1556 1557 mbuf_num = 0; 1558 while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) { 1559 rte_pktmbuf_free(buf); 1560 mbuf_num++; 1561 } 1562 1563 PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num); 1564 PMD_INIT_LOG(DEBUG, 1565 "After freeing txq[%d] used and unused buf", i); 1566 VIRTQUEUE_DUMP(txvq->vq); 1567 } 1568 } 1569 1570 /* 1571 * Stop device: disable interrupt and mark link down 1572 */ 1573 static void 1574 virtio_dev_stop(struct rte_eth_dev *dev) 1575 { 1576 struct rte_eth_link link; 1577 1578 PMD_INIT_LOG(DEBUG, "stop"); 1579 1580 if (dev->data->dev_conf.intr_conf.lsc) 1581 rte_intr_disable(&dev->pci_dev->intr_handle); 1582 1583 memset(&link, 0, sizeof(link)); 1584 virtio_dev_atomic_write_link_status(dev, &link); 1585 } 1586 1587 static int 1588 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) 1589 { 1590 struct rte_eth_link link, old; 1591 uint16_t status; 1592 struct virtio_hw *hw = dev->data->dev_private; 1593 memset(&link, 0, sizeof(link)); 1594 virtio_dev_atomic_read_link_status(dev, &link); 1595 old = link; 1596 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1597 link.link_speed = SPEED_10G; 1598 1599 if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { 1600 PMD_INIT_LOG(DEBUG, "Get link status from hw"); 1601 vtpci_read_dev_config(hw, 1602 offsetof(struct virtio_net_config, status), 1603 &status, sizeof(status)); 1604 if ((status & VIRTIO_NET_S_LINK_UP) == 0) { 1605 link.link_status = ETH_LINK_DOWN; 1606 PMD_INIT_LOG(DEBUG, "Port %d is down", 1607 dev->data->port_id); 1608 } else { 1609 link.link_status = ETH_LINK_UP; 1610 PMD_INIT_LOG(DEBUG, "Port %d is up", 1611 dev->data->port_id); 1612 } 1613 } else { 1614 link.link_status = ETH_LINK_UP; 1615 } 1616 virtio_dev_atomic_write_link_status(dev, &link); 1617 1618 return (old.link_status == link.link_status) ? -1 : 0; 1619 } 1620 1621 static void 1622 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1623 { 1624 uint64_t tso_mask; 1625 struct virtio_hw *hw = dev->data->dev_private; 1626 1627 if (dev->pci_dev) 1628 dev_info->driver_name = dev->driver->pci_drv.driver.name; 1629 else 1630 dev_info->driver_name = "virtio_user PMD"; 1631 dev_info->max_rx_queues = 1632 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES); 1633 dev_info->max_tx_queues = 1634 RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES); 1635 dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE; 1636 dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN; 1637 dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS; 1638 dev_info->default_txconf = (struct rte_eth_txconf) { 1639 .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS 1640 }; 1641 dev_info->rx_offload_capa = 1642 DEV_RX_OFFLOAD_TCP_CKSUM | 1643 DEV_RX_OFFLOAD_UDP_CKSUM | 1644 DEV_RX_OFFLOAD_TCP_LRO; 1645 dev_info->tx_offload_capa = 0; 1646 1647 if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) { 1648 dev_info->tx_offload_capa |= 1649 DEV_TX_OFFLOAD_UDP_CKSUM | 1650 DEV_TX_OFFLOAD_TCP_CKSUM; 1651 } 1652 1653 tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) | 1654 (1ULL << VIRTIO_NET_F_HOST_TSO6); 1655 if ((hw->guest_features & tso_mask) == tso_mask) 1656 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; 1657 } 1658 1659 /* 1660 * It enables testpmd to collect per queue stats. 1661 */ 1662 static int 1663 virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev, 1664 __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx, 1665 __rte_unused uint8_t is_rx) 1666 { 1667 return 0; 1668 } 1669 1670 RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__); 1671 RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map); 1672