1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2018 Intel Corporation 3 */ 4 5 #ifndef _VHOST_NET_CDEV_H_ 6 #define _VHOST_NET_CDEV_H_ 7 #include <stdint.h> 8 #include <stdio.h> 9 #include <stdbool.h> 10 #include <sys/types.h> 11 #include <sys/queue.h> 12 #include <unistd.h> 13 #include <linux/vhost.h> 14 #include <linux/virtio_net.h> 15 #include <sys/socket.h> 16 #include <linux/if.h> 17 18 #include <rte_log.h> 19 #include <rte_ether.h> 20 #include <rte_rwlock.h> 21 #include <rte_malloc.h> 22 23 #include "rte_vhost.h" 24 #include "rte_vdpa.h" 25 #include "rte_vdpa_dev.h" 26 27 #include "rte_vhost_async.h" 28 29 /* Used to indicate that the device is running on a data core */ 30 #define VIRTIO_DEV_RUNNING ((uint32_t)1 << 0) 31 /* Used to indicate that the device is ready to operate */ 32 #define VIRTIO_DEV_READY ((uint32_t)1 << 1) 33 /* Used to indicate that the built-in vhost net device backend is enabled */ 34 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET ((uint32_t)1 << 2) 35 /* Used to indicate that the device has its own data path and configured */ 36 #define VIRTIO_DEV_VDPA_CONFIGURED ((uint32_t)1 << 3) 37 /* Used to indicate that the feature negotiation failed */ 38 #define VIRTIO_DEV_FEATURES_FAILED ((uint32_t)1 << 4) 39 /* Used to indicate that the virtio_net tx code should fill TX ol_flags */ 40 #define VIRTIO_DEV_LEGACY_OL_FLAGS ((uint32_t)1 << 5) 41 42 /* Backend value set by guest. */ 43 #define VIRTIO_DEV_STOPPED -1 44 45 #define BUF_VECTOR_MAX 256 46 47 #define VHOST_LOG_CACHE_NR 32 48 49 #define MAX_PKT_BURST 32 50 51 #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2) 52 #define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 4) 53 54 #define PACKED_DESC_ENQUEUE_USED_FLAG(w) \ 55 ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \ 56 VRING_DESC_F_WRITE) 57 #define PACKED_DESC_DEQUEUE_USED_FLAG(w) \ 58 ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0) 59 #define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \ 60 VRING_DESC_F_INDIRECT) 61 62 #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \ 63 sizeof(struct vring_packed_desc)) 64 #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1) 65 66 #ifdef VHOST_GCC_UNROLL_PRAGMA 67 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \ 68 for (iter = val; iter < size; iter++) 69 #endif 70 71 #ifdef VHOST_CLANG_UNROLL_PRAGMA 72 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \ 73 for (iter = val; iter < size; iter++) 74 #endif 75 76 #ifdef VHOST_ICC_UNROLL_PRAGMA 77 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \ 78 for (iter = val; iter < size; iter++) 79 #endif 80 81 #ifndef vhost_for_each_try_unroll 82 #define vhost_for_each_try_unroll(iter, val, num) \ 83 for (iter = val; iter < num; iter++) 84 #endif 85 86 /** 87 * Structure contains buffer address, length and descriptor index 88 * from vring to do scatter RX. 89 */ 90 struct buf_vector { 91 uint64_t buf_iova; 92 uint64_t buf_addr; 93 uint32_t buf_len; 94 uint32_t desc_idx; 95 }; 96 97 /* 98 * Structure contains the info for each batched memory copy. 99 */ 100 struct batch_copy_elem { 101 void *dst; 102 void *src; 103 uint32_t len; 104 uint64_t log_addr; 105 }; 106 107 /* 108 * Structure that contains the info for batched dirty logging. 109 */ 110 struct log_cache_entry { 111 uint32_t offset; 112 unsigned long val; 113 }; 114 115 struct vring_used_elem_packed { 116 uint16_t id; 117 uint16_t flags; 118 uint32_t len; 119 uint32_t count; 120 }; 121 122 /** 123 * Structure contains variables relevant to RX/TX virtqueues. 124 */ 125 struct vhost_virtqueue { 126 union { 127 struct vring_desc *desc; 128 struct vring_packed_desc *desc_packed; 129 }; 130 union { 131 struct vring_avail *avail; 132 struct vring_packed_desc_event *driver_event; 133 }; 134 union { 135 struct vring_used *used; 136 struct vring_packed_desc_event *device_event; 137 }; 138 uint16_t size; 139 140 uint16_t last_avail_idx; 141 uint16_t last_used_idx; 142 /* Last used index we notify to front end. */ 143 uint16_t signalled_used; 144 bool signalled_used_valid; 145 #define VIRTIO_INVALID_EVENTFD (-1) 146 #define VIRTIO_UNINITIALIZED_EVENTFD (-2) 147 148 bool enabled; 149 bool access_ok; 150 bool ready; 151 152 rte_spinlock_t access_lock; 153 154 155 union { 156 struct vring_used_elem *shadow_used_split; 157 struct vring_used_elem_packed *shadow_used_packed; 158 }; 159 uint16_t shadow_used_idx; 160 /* Record packed ring enqueue latest desc cache aligned index */ 161 uint16_t shadow_aligned_idx; 162 /* Record packed ring first dequeue desc index */ 163 uint16_t shadow_last_used_idx; 164 165 uint16_t batch_copy_nb_elems; 166 struct batch_copy_elem *batch_copy_elems; 167 int numa_node; 168 bool used_wrap_counter; 169 bool avail_wrap_counter; 170 171 /* Physical address of used ring, for logging */ 172 uint16_t log_cache_nb_elem; 173 uint64_t log_guest_addr; 174 struct log_cache_entry *log_cache; 175 176 rte_rwlock_t iotlb_lock; 177 rte_rwlock_t iotlb_pending_lock; 178 struct rte_mempool *iotlb_pool; 179 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list; 180 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list; 181 int iotlb_cache_nr; 182 183 /* Used to notify the guest (trigger interrupt) */ 184 int callfd; 185 /* Currently unused as polling mode is enabled */ 186 int kickfd; 187 188 /* inflight share memory info */ 189 union { 190 struct rte_vhost_inflight_info_split *inflight_split; 191 struct rte_vhost_inflight_info_packed *inflight_packed; 192 }; 193 struct rte_vhost_resubmit_info *resubmit_inflight; 194 uint64_t global_counter; 195 196 /* operation callbacks for async dma */ 197 struct rte_vhost_async_channel_ops async_ops; 198 199 struct rte_vhost_iov_iter *it_pool; 200 struct iovec *vec_pool; 201 202 /* async data transfer status */ 203 struct async_inflight_info *async_pkts_info; 204 uint16_t async_pkts_idx; 205 uint16_t async_pkts_inflight_n; 206 uint16_t async_last_pkts_n; 207 union { 208 struct vring_used_elem *async_descs_split; 209 struct vring_used_elem_packed *async_buffers_packed; 210 }; 211 union { 212 uint16_t async_desc_idx_split; 213 uint16_t async_buffer_idx_packed; 214 }; 215 union { 216 uint16_t last_async_desc_idx_split; 217 uint16_t last_async_buffer_idx_packed; 218 }; 219 220 /* vq async features */ 221 bool async_registered; 222 223 int notif_enable; 224 #define VIRTIO_UNINITIALIZED_NOTIF (-1) 225 226 struct vhost_vring_addr ring_addrs; 227 } __rte_cache_aligned; 228 229 /* Virtio device status as per Virtio specification */ 230 #define VIRTIO_DEVICE_STATUS_RESET 0x00 231 #define VIRTIO_DEVICE_STATUS_ACK 0x01 232 #define VIRTIO_DEVICE_STATUS_DRIVER 0x02 233 #define VIRTIO_DEVICE_STATUS_DRIVER_OK 0x04 234 #define VIRTIO_DEVICE_STATUS_FEATURES_OK 0x08 235 #define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET 0x40 236 #define VIRTIO_DEVICE_STATUS_FAILED 0x80 237 238 #define VHOST_MAX_VRING 0x100 239 #define VHOST_MAX_QUEUE_PAIRS 0x80 240 241 /* Declare IOMMU related bits for older kernels */ 242 #ifndef VIRTIO_F_IOMMU_PLATFORM 243 244 #define VIRTIO_F_IOMMU_PLATFORM 33 245 246 struct vhost_iotlb_msg { 247 __u64 iova; 248 __u64 size; 249 __u64 uaddr; 250 #define VHOST_ACCESS_RO 0x1 251 #define VHOST_ACCESS_WO 0x2 252 #define VHOST_ACCESS_RW 0x3 253 __u8 perm; 254 #define VHOST_IOTLB_MISS 1 255 #define VHOST_IOTLB_UPDATE 2 256 #define VHOST_IOTLB_INVALIDATE 3 257 #define VHOST_IOTLB_ACCESS_FAIL 4 258 __u8 type; 259 }; 260 261 #define VHOST_IOTLB_MSG 0x1 262 263 struct vhost_msg { 264 int type; 265 union { 266 struct vhost_iotlb_msg iotlb; 267 __u8 padding[64]; 268 }; 269 }; 270 #endif 271 272 /* 273 * Define virtio 1.0 for older kernels 274 */ 275 #ifndef VIRTIO_F_VERSION_1 276 #define VIRTIO_F_VERSION_1 32 277 #endif 278 279 /* Declare packed ring related bits for older kernels */ 280 #ifndef VIRTIO_F_RING_PACKED 281 282 #define VIRTIO_F_RING_PACKED 34 283 284 struct vring_packed_desc { 285 uint64_t addr; 286 uint32_t len; 287 uint16_t id; 288 uint16_t flags; 289 }; 290 291 struct vring_packed_desc_event { 292 uint16_t off_wrap; 293 uint16_t flags; 294 }; 295 #endif 296 297 /* 298 * Declare below packed ring defines unconditionally 299 * as Kernel header might use different names. 300 */ 301 #define VRING_DESC_F_AVAIL (1ULL << 7) 302 #define VRING_DESC_F_USED (1ULL << 15) 303 304 #define VRING_EVENT_F_ENABLE 0x0 305 #define VRING_EVENT_F_DISABLE 0x1 306 #define VRING_EVENT_F_DESC 0x2 307 308 /* 309 * Available and used descs are in same order 310 */ 311 #ifndef VIRTIO_F_IN_ORDER 312 #define VIRTIO_F_IN_ORDER 35 313 #endif 314 315 /* Features supported by this builtin vhost-user net driver. */ 316 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \ 317 (1ULL << VIRTIO_F_ANY_LAYOUT) | \ 318 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \ 319 (1ULL << VIRTIO_NET_F_CTRL_RX) | \ 320 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \ 321 (1ULL << VIRTIO_NET_F_MQ) | \ 322 (1ULL << VIRTIO_F_VERSION_1) | \ 323 (1ULL << VHOST_F_LOG_ALL) | \ 324 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \ 325 (1ULL << VIRTIO_NET_F_GSO) | \ 326 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \ 327 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \ 328 (1ULL << VIRTIO_NET_F_HOST_UFO) | \ 329 (1ULL << VIRTIO_NET_F_HOST_ECN) | \ 330 (1ULL << VIRTIO_NET_F_CSUM) | \ 331 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \ 332 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ 333 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ 334 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \ 335 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ 336 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \ 337 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \ 338 (1ULL << VIRTIO_NET_F_MTU) | \ 339 (1ULL << VIRTIO_F_IN_ORDER) | \ 340 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \ 341 (1ULL << VIRTIO_F_RING_PACKED)) 342 343 344 struct guest_page { 345 uint64_t guest_phys_addr; 346 uint64_t host_phys_addr; 347 uint64_t size; 348 }; 349 350 struct inflight_mem_info { 351 int fd; 352 void *addr; 353 uint64_t size; 354 }; 355 356 /** 357 * Device structure contains all configuration information relating 358 * to the device. 359 */ 360 struct virtio_net { 361 /* Frontend (QEMU) memory and memory region information */ 362 struct rte_vhost_memory *mem; 363 uint64_t features; 364 uint64_t protocol_features; 365 int vid; 366 uint32_t flags; 367 uint16_t vhost_hlen; 368 /* to tell if we need broadcast rarp packet */ 369 int16_t broadcast_rarp; 370 uint32_t nr_vring; 371 int async_copy; 372 373 /* Record the dma map status for each region. */ 374 bool *async_map_status; 375 376 int extbuf; 377 int linearbuf; 378 struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2]; 379 struct inflight_mem_info *inflight_info; 380 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ) 381 char ifname[IF_NAME_SZ]; 382 uint64_t log_size; 383 uint64_t log_base; 384 uint64_t log_addr; 385 struct rte_ether_addr mac; 386 uint16_t mtu; 387 uint8_t status; 388 389 struct vhost_device_ops const *notify_ops; 390 391 uint32_t nr_guest_pages; 392 uint32_t max_guest_pages; 393 struct guest_page *guest_pages; 394 395 int slave_req_fd; 396 rte_spinlock_t slave_req_lock; 397 398 int postcopy_ufd; 399 int postcopy_listening; 400 401 struct rte_vdpa_device *vdpa_dev; 402 403 /* context data for the external message handlers */ 404 void *extern_data; 405 /* pre and post vhost user message handlers for the device */ 406 struct rte_vhost_user_extern_ops extern_ops; 407 } __rte_cache_aligned; 408 409 static __rte_always_inline bool 410 vq_is_packed(struct virtio_net *dev) 411 { 412 return dev->features & (1ull << VIRTIO_F_RING_PACKED); 413 } 414 415 static inline bool 416 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) 417 { 418 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); 419 420 return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) && 421 wrap_counter != !!(flags & VRING_DESC_F_USED); 422 } 423 424 static inline void 425 vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num) 426 { 427 vq->last_used_idx += num; 428 if (vq->last_used_idx >= vq->size) { 429 vq->used_wrap_counter ^= 1; 430 vq->last_used_idx -= vq->size; 431 } 432 } 433 434 static inline void 435 vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num) 436 { 437 vq->last_avail_idx += num; 438 if (vq->last_avail_idx >= vq->size) { 439 vq->avail_wrap_counter ^= 1; 440 vq->last_avail_idx -= vq->size; 441 } 442 } 443 444 void __vhost_log_cache_write(struct virtio_net *dev, 445 struct vhost_virtqueue *vq, 446 uint64_t addr, uint64_t len); 447 void __vhost_log_cache_write_iova(struct virtio_net *dev, 448 struct vhost_virtqueue *vq, 449 uint64_t iova, uint64_t len); 450 void __vhost_log_cache_sync(struct virtio_net *dev, 451 struct vhost_virtqueue *vq); 452 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len); 453 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, 454 uint64_t iova, uint64_t len); 455 456 static __rte_always_inline void 457 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len) 458 { 459 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) 460 __vhost_log_write(dev, addr, len); 461 } 462 463 static __rte_always_inline void 464 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) 465 { 466 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) 467 __vhost_log_cache_sync(dev, vq); 468 } 469 470 static __rte_always_inline void 471 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq, 472 uint64_t addr, uint64_t len) 473 { 474 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) 475 __vhost_log_cache_write(dev, vq, addr, len); 476 } 477 478 static __rte_always_inline void 479 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, 480 uint64_t offset, uint64_t len) 481 { 482 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) { 483 if (unlikely(vq->log_guest_addr == 0)) 484 return; 485 __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, 486 len); 487 } 488 } 489 490 static __rte_always_inline void 491 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, 492 uint64_t offset, uint64_t len) 493 { 494 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) { 495 if (unlikely(vq->log_guest_addr == 0)) 496 return; 497 __vhost_log_write(dev, vq->log_guest_addr + offset, len); 498 } 499 } 500 501 static __rte_always_inline void 502 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, 503 uint64_t iova, uint64_t len) 504 { 505 if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL)))) 506 return; 507 508 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) 509 __vhost_log_cache_write_iova(dev, vq, iova, len); 510 else 511 __vhost_log_cache_write(dev, vq, iova, len); 512 } 513 514 static __rte_always_inline void 515 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, 516 uint64_t iova, uint64_t len) 517 { 518 if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL)))) 519 return; 520 521 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) 522 __vhost_log_write_iova(dev, vq, iova, len); 523 else 524 __vhost_log_write(dev, iova, len); 525 } 526 527 extern int vhost_config_log_level; 528 extern int vhost_data_log_level; 529 530 #define VHOST_LOG_CONFIG(level, fmt, args...) \ 531 rte_log(RTE_LOG_ ## level, vhost_config_log_level, \ 532 "VHOST_CONFIG: " fmt, ##args) 533 534 #define VHOST_LOG_DATA(level, fmt, args...) \ 535 (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ? \ 536 rte_log(RTE_LOG_ ## level, vhost_data_log_level, \ 537 "VHOST_DATA : " fmt, ##args) : \ 538 0) 539 540 #ifdef RTE_LIBRTE_VHOST_DEBUG 541 #define VHOST_MAX_PRINT_BUFF 6072 542 #define PRINT_PACKET(device, addr, size, header) do { \ 543 char *pkt_addr = (char *)(addr); \ 544 unsigned int index; \ 545 char packet[VHOST_MAX_PRINT_BUFF]; \ 546 \ 547 if ((header)) \ 548 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \ 549 else \ 550 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \ 551 for (index = 0; index < (size); index++) { \ 552 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \ 553 "%02hhx ", pkt_addr[index]); \ 554 } \ 555 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \ 556 \ 557 VHOST_LOG_DATA(DEBUG, "%s", packet); \ 558 } while (0) 559 #else 560 #define PRINT_PACKET(device, addr, size, header) do {} while (0) 561 #endif 562 563 #define MAX_VHOST_DEVICE 1024 564 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; 565 566 #define VHOST_BINARY_SEARCH_THRESH 256 567 568 static __rte_always_inline int guest_page_addrcmp(const void *p1, 569 const void *p2) 570 { 571 const struct guest_page *page1 = (const struct guest_page *)p1; 572 const struct guest_page *page2 = (const struct guest_page *)p2; 573 574 if (page1->guest_phys_addr > page2->guest_phys_addr) 575 return 1; 576 if (page1->guest_phys_addr < page2->guest_phys_addr) 577 return -1; 578 579 return 0; 580 } 581 582 static __rte_always_inline rte_iova_t 583 gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa, 584 uint64_t gpa_size, uint64_t *hpa_size) 585 { 586 uint32_t i; 587 struct guest_page *page; 588 struct guest_page key; 589 590 *hpa_size = gpa_size; 591 if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { 592 key.guest_phys_addr = gpa & ~(dev->guest_pages[0].size - 1); 593 page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages, 594 sizeof(struct guest_page), guest_page_addrcmp); 595 if (page) { 596 if (gpa + gpa_size <= 597 page->guest_phys_addr + page->size) { 598 return gpa - page->guest_phys_addr + 599 page->host_phys_addr; 600 } else if (gpa < page->guest_phys_addr + 601 page->size) { 602 *hpa_size = page->guest_phys_addr + 603 page->size - gpa; 604 return gpa - page->guest_phys_addr + 605 page->host_phys_addr; 606 } 607 } 608 } else { 609 for (i = 0; i < dev->nr_guest_pages; i++) { 610 page = &dev->guest_pages[i]; 611 612 if (gpa >= page->guest_phys_addr) { 613 if (gpa + gpa_size <= 614 page->guest_phys_addr + page->size) { 615 return gpa - page->guest_phys_addr + 616 page->host_phys_addr; 617 } else if (gpa < page->guest_phys_addr + 618 page->size) { 619 *hpa_size = page->guest_phys_addr + 620 page->size - gpa; 621 return gpa - page->guest_phys_addr + 622 page->host_phys_addr; 623 } 624 } 625 } 626 } 627 628 *hpa_size = 0; 629 return 0; 630 } 631 632 /* Convert guest physical address to host physical address */ 633 static __rte_always_inline rte_iova_t 634 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size) 635 { 636 rte_iova_t hpa; 637 uint64_t hpa_size; 638 639 hpa = gpa_to_first_hpa(dev, gpa, size, &hpa_size); 640 return hpa_size == size ? hpa : 0; 641 } 642 643 static __rte_always_inline uint64_t 644 hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len) 645 { 646 struct rte_vhost_mem_region *r; 647 uint32_t i; 648 649 if (unlikely(!dev || !dev->mem)) 650 return 0; 651 652 for (i = 0; i < dev->mem->nregions; i++) { 653 r = &dev->mem->regions[i]; 654 655 if (vva >= r->host_user_addr && 656 vva + len < r->host_user_addr + r->size) { 657 return r->guest_phys_addr + vva - r->host_user_addr; 658 } 659 } 660 return 0; 661 } 662 663 static __rte_always_inline struct virtio_net * 664 get_device(int vid) 665 { 666 struct virtio_net *dev = vhost_devices[vid]; 667 668 if (unlikely(!dev)) { 669 VHOST_LOG_CONFIG(ERR, 670 "(%d) device not found.\n", vid); 671 } 672 673 return dev; 674 } 675 676 int vhost_new_device(void); 677 void cleanup_device(struct virtio_net *dev, int destroy); 678 void reset_device(struct virtio_net *dev); 679 void vhost_destroy_device(int); 680 void vhost_destroy_device_notify(struct virtio_net *dev); 681 682 void cleanup_vq(struct vhost_virtqueue *vq, int destroy); 683 void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq); 684 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq); 685 686 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx); 687 688 void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev); 689 690 void vhost_set_ifname(int, const char *if_name, unsigned int if_len); 691 void vhost_setup_virtio_net(int vid, bool enable, bool legacy_ol_flags); 692 void vhost_enable_extbuf(int vid); 693 void vhost_enable_linearbuf(int vid); 694 int vhost_enable_guest_notification(struct virtio_net *dev, 695 struct vhost_virtqueue *vq, int enable); 696 697 struct vhost_device_ops const *vhost_driver_callback_get(const char *path); 698 699 /* 700 * Backend-specific cleanup. 701 * 702 * TODO: fix it; we have one backend now 703 */ 704 void vhost_backend_cleanup(struct virtio_net *dev); 705 706 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, 707 uint64_t iova, uint64_t *len, uint8_t perm); 708 void *vhost_alloc_copy_ind_table(struct virtio_net *dev, 709 struct vhost_virtqueue *vq, 710 uint64_t desc_addr, uint64_t desc_len); 711 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq); 712 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq, 713 uint64_t log_addr); 714 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq); 715 716 static __rte_always_inline uint64_t 717 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, 718 uint64_t iova, uint64_t *len, uint8_t perm) 719 { 720 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) 721 return rte_vhost_va_from_guest_pa(dev->mem, iova, len); 722 723 return __vhost_iova_to_vva(dev, vq, iova, len, perm); 724 } 725 726 #define vhost_avail_event(vr) \ 727 (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size]) 728 #define vhost_used_event(vr) \ 729 (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size]) 730 731 /* 732 * The following is used with VIRTIO_RING_F_EVENT_IDX. 733 * Assuming a given event_idx value from the other size, if we have 734 * just incremented index from old to new_idx, should we trigger an 735 * event? 736 */ 737 static __rte_always_inline int 738 vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) 739 { 740 return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old); 741 } 742 743 static __rte_always_inline void 744 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq) 745 { 746 /* Flush used->idx update before we read avail->flags. */ 747 rte_atomic_thread_fence(__ATOMIC_SEQ_CST); 748 749 /* Don't kick guest if we don't reach index specified by guest. */ 750 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) { 751 uint16_t old = vq->signalled_used; 752 uint16_t new = vq->last_used_idx; 753 bool signalled_used_valid = vq->signalled_used_valid; 754 755 vq->signalled_used = new; 756 vq->signalled_used_valid = true; 757 758 VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n", 759 __func__, 760 vhost_used_event(vq), 761 old, new); 762 763 if ((vhost_need_event(vhost_used_event(vq), new, old) && 764 (vq->callfd >= 0)) || 765 unlikely(!signalled_used_valid)) { 766 eventfd_write(vq->callfd, (eventfd_t) 1); 767 if (dev->notify_ops->guest_notified) 768 dev->notify_ops->guest_notified(dev->vid); 769 } 770 } else { 771 /* Kick the guest if necessary. */ 772 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT) 773 && (vq->callfd >= 0)) { 774 eventfd_write(vq->callfd, (eventfd_t)1); 775 if (dev->notify_ops->guest_notified) 776 dev->notify_ops->guest_notified(dev->vid); 777 } 778 } 779 } 780 781 static __rte_always_inline void 782 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) 783 { 784 uint16_t old, new, off, off_wrap; 785 bool signalled_used_valid, kick = false; 786 787 /* Flush used desc update. */ 788 rte_atomic_thread_fence(__ATOMIC_SEQ_CST); 789 790 if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) { 791 if (vq->driver_event->flags != 792 VRING_EVENT_F_DISABLE) 793 kick = true; 794 goto kick; 795 } 796 797 old = vq->signalled_used; 798 new = vq->last_used_idx; 799 vq->signalled_used = new; 800 signalled_used_valid = vq->signalled_used_valid; 801 vq->signalled_used_valid = true; 802 803 if (vq->driver_event->flags != VRING_EVENT_F_DESC) { 804 if (vq->driver_event->flags != VRING_EVENT_F_DISABLE) 805 kick = true; 806 goto kick; 807 } 808 809 if (unlikely(!signalled_used_valid)) { 810 kick = true; 811 goto kick; 812 } 813 814 rte_atomic_thread_fence(__ATOMIC_ACQUIRE); 815 816 off_wrap = vq->driver_event->off_wrap; 817 off = off_wrap & ~(1 << 15); 818 819 if (new <= old) 820 old -= vq->size; 821 822 if (vq->used_wrap_counter != off_wrap >> 15) 823 off -= vq->size; 824 825 if (vhost_need_event(off, new, old)) 826 kick = true; 827 kick: 828 if (kick) { 829 eventfd_write(vq->callfd, (eventfd_t)1); 830 if (dev->notify_ops->guest_notified) 831 dev->notify_ops->guest_notified(dev->vid); 832 } 833 } 834 835 static __rte_always_inline void 836 free_ind_table(void *idesc) 837 { 838 rte_free(idesc); 839 } 840 841 static __rte_always_inline void 842 restore_mbuf(struct rte_mbuf *m) 843 { 844 uint32_t mbuf_size, priv_size; 845 846 while (m) { 847 priv_size = rte_pktmbuf_priv_size(m->pool); 848 mbuf_size = sizeof(struct rte_mbuf) + priv_size; 849 /* start of buffer is after mbuf structure and priv data */ 850 851 m->buf_addr = (char *)m + mbuf_size; 852 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size; 853 m = m->next; 854 } 855 } 856 857 static __rte_always_inline bool 858 mbuf_is_consumed(struct rte_mbuf *m) 859 { 860 while (m) { 861 if (rte_mbuf_refcnt_read(m) > 1) 862 return false; 863 m = m->next; 864 } 865 866 return true; 867 } 868 869 #endif /* _VHOST_NET_CDEV_H_ */ 870