1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdbool.h> 7 #include <stdlib.h> 8 #include <linux/virtio_net.h> 9 10 #include <rte_mbuf.h> 11 #include <rte_memcpy.h> 12 #include <rte_vhost.h> 13 14 #include "main.h" 15 16 /* 17 * A very simple vhost-user net driver implementation, without 18 * any extra features being enabled, such as TSO and mrg-Rx. 19 */ 20 21 void 22 vs_vhost_net_setup(struct vhost_dev *dev) 23 { 24 uint16_t i; 25 int vid = dev->vid; 26 struct vhost_queue *queue; 27 int ret; 28 29 RTE_LOG(INFO, VHOST_CONFIG, 30 "setting builtin vhost-user net driver\n"); 31 32 rte_vhost_get_negotiated_features(vid, &dev->features); 33 if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) 34 dev->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 35 else 36 dev->hdr_len = sizeof(struct virtio_net_hdr); 37 38 ret = rte_vhost_get_mem_table(vid, &dev->mem); 39 if (ret < 0) { 40 RTE_LOG(ERR, VHOST_CONFIG, "Failed to get " 41 "VM memory layout for device(%d)\n", vid); 42 return; 43 } 44 45 dev->nr_vrings = rte_vhost_get_vring_num(vid); 46 for (i = 0; i < dev->nr_vrings; i++) { 47 queue = &dev->queues[i]; 48 49 queue->last_used_idx = 0; 50 queue->last_avail_idx = 0; 51 rte_vhost_get_vhost_vring(vid, i, &queue->vr); 52 } 53 } 54 55 void 56 vs_vhost_net_remove(struct vhost_dev *dev) 57 { 58 free(dev->mem); 59 } 60 61 static __rte_always_inline int 62 enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr, 63 struct rte_mbuf *m, uint16_t desc_idx) 64 { 65 uint32_t desc_avail, desc_offset; 66 uint64_t desc_chunck_len; 67 uint32_t mbuf_avail, mbuf_offset; 68 uint32_t cpy_len; 69 struct vring_desc *desc; 70 uint64_t desc_addr, desc_gaddr; 71 struct virtio_net_hdr virtio_hdr = {0, 0, 0, 0, 0, 0}; 72 /* A counter to avoid desc dead loop chain */ 73 uint16_t nr_desc = 1; 74 75 desc = &vr->desc[desc_idx]; 76 desc_chunck_len = desc->len; 77 desc_gaddr = desc->addr; 78 desc_addr = rte_vhost_va_from_guest_pa( 79 dev->mem, desc_gaddr, &desc_chunck_len); 80 /* 81 * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid 82 * performance issue with some versions of gcc (4.8.4 and 5.3.0) which 83 * otherwise stores offset on the stack instead of in a register. 84 */ 85 if (unlikely(desc->len < dev->hdr_len) || !desc_addr) 86 return -1; 87 88 rte_prefetch0((void *)(uintptr_t)desc_addr); 89 90 /* write virtio-net header */ 91 if (likely(desc_chunck_len >= dev->hdr_len)) { 92 *(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr; 93 desc_offset = dev->hdr_len; 94 } else { 95 uint64_t len; 96 uint64_t remain = dev->hdr_len; 97 uint64_t src = (uint64_t)(uintptr_t)&virtio_hdr, dst; 98 uint64_t guest_addr = desc_gaddr; 99 100 while (remain) { 101 len = remain; 102 dst = rte_vhost_va_from_guest_pa(dev->mem, 103 guest_addr, &len); 104 if (unlikely(!dst || !len)) 105 return -1; 106 107 rte_memcpy((void *)(uintptr_t)dst, 108 (void *)(uintptr_t)src, 109 len); 110 111 remain -= len; 112 guest_addr += len; 113 src += len; 114 } 115 116 desc_chunck_len = desc->len - dev->hdr_len; 117 desc_gaddr += dev->hdr_len; 118 desc_addr = rte_vhost_va_from_guest_pa( 119 dev->mem, desc_gaddr, 120 &desc_chunck_len); 121 if (unlikely(!desc_addr)) 122 return -1; 123 124 desc_offset = 0; 125 } 126 127 desc_avail = desc->len - dev->hdr_len; 128 129 mbuf_avail = rte_pktmbuf_data_len(m); 130 mbuf_offset = 0; 131 while (mbuf_avail != 0 || m->next != NULL) { 132 /* done with current mbuf, fetch next */ 133 if (mbuf_avail == 0) { 134 m = m->next; 135 136 mbuf_offset = 0; 137 mbuf_avail = rte_pktmbuf_data_len(m); 138 } 139 140 /* done with current desc buf, fetch next */ 141 if (desc_avail == 0) { 142 if ((desc->flags & VRING_DESC_F_NEXT) == 0) { 143 /* Room in vring buffer is not enough */ 144 return -1; 145 } 146 if (unlikely(desc->next >= vr->size || 147 ++nr_desc > vr->size)) 148 return -1; 149 150 desc = &vr->desc[desc->next]; 151 desc_chunck_len = desc->len; 152 desc_gaddr = desc->addr; 153 desc_addr = rte_vhost_va_from_guest_pa( 154 dev->mem, desc_gaddr, &desc_chunck_len); 155 if (unlikely(!desc_addr)) 156 return -1; 157 158 desc_offset = 0; 159 desc_avail = desc->len; 160 } else if (unlikely(desc_chunck_len == 0)) { 161 desc_chunck_len = desc_avail; 162 desc_gaddr += desc_offset; 163 desc_addr = rte_vhost_va_from_guest_pa(dev->mem, 164 desc_gaddr, 165 &desc_chunck_len); 166 if (unlikely(!desc_addr)) 167 return -1; 168 169 desc_offset = 0; 170 } 171 172 cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail); 173 rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)), 174 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset), 175 cpy_len); 176 177 mbuf_avail -= cpy_len; 178 mbuf_offset += cpy_len; 179 desc_avail -= cpy_len; 180 desc_offset += cpy_len; 181 desc_chunck_len -= cpy_len; 182 } 183 184 return 0; 185 } 186 187 uint16_t 188 vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id, 189 struct rte_mbuf **pkts, uint32_t count) 190 { 191 struct vhost_queue *queue; 192 struct rte_vhost_vring *vr; 193 uint16_t avail_idx, free_entries, start_idx; 194 uint16_t desc_indexes[MAX_PKT_BURST]; 195 uint16_t used_idx; 196 uint32_t i; 197 198 queue = &dev->queues[queue_id]; 199 vr = &queue->vr; 200 201 avail_idx = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE); 202 start_idx = queue->last_used_idx; 203 free_entries = avail_idx - start_idx; 204 count = RTE_MIN(count, free_entries); 205 count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST); 206 if (count == 0) 207 return 0; 208 209 /* Retrieve all of the desc indexes first to avoid caching issues. */ 210 rte_prefetch0(&vr->avail->ring[start_idx & (vr->size - 1)]); 211 for (i = 0; i < count; i++) { 212 used_idx = (start_idx + i) & (vr->size - 1); 213 desc_indexes[i] = vr->avail->ring[used_idx]; 214 vr->used->ring[used_idx].id = desc_indexes[i]; 215 vr->used->ring[used_idx].len = pkts[i]->pkt_len + 216 dev->hdr_len; 217 } 218 219 rte_prefetch0(&vr->desc[desc_indexes[0]]); 220 for (i = 0; i < count; i++) { 221 uint16_t desc_idx = desc_indexes[i]; 222 int err; 223 224 err = enqueue_pkt(dev, vr, pkts[i], desc_idx); 225 if (unlikely(err)) { 226 used_idx = (start_idx + i) & (vr->size - 1); 227 vr->used->ring[used_idx].len = dev->hdr_len; 228 } 229 230 if (i + 1 < count) 231 rte_prefetch0(&vr->desc[desc_indexes[i+1]]); 232 } 233 234 __atomic_add_fetch(&vr->used->idx, count, __ATOMIC_RELEASE); 235 queue->last_used_idx += count; 236 237 rte_vhost_vring_call(dev->vid, queue_id); 238 239 return count; 240 } 241 242 uint16_t 243 builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id, 244 struct rte_mbuf **pkts, uint32_t count) 245 { 246 return vs_enqueue_pkts(dev, queue_id, pkts, count); 247 } 248 249 static __rte_always_inline int 250 dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr, 251 struct rte_mbuf *m, uint16_t desc_idx, 252 struct rte_mempool *mbuf_pool) 253 { 254 struct vring_desc *desc; 255 uint64_t desc_addr, desc_gaddr; 256 uint32_t desc_avail, desc_offset; 257 uint64_t desc_chunck_len; 258 uint32_t mbuf_avail, mbuf_offset; 259 uint32_t cpy_len; 260 struct rte_mbuf *cur = m, *prev = m; 261 /* A counter to avoid desc dead loop chain */ 262 uint32_t nr_desc = 1; 263 264 desc = &vr->desc[desc_idx]; 265 if (unlikely((desc->len < dev->hdr_len)) || 266 (desc->flags & VRING_DESC_F_INDIRECT)) 267 return -1; 268 269 desc_chunck_len = desc->len; 270 desc_gaddr = desc->addr; 271 desc_addr = rte_vhost_va_from_guest_pa( 272 dev->mem, desc_gaddr, &desc_chunck_len); 273 if (unlikely(!desc_addr)) 274 return -1; 275 276 /* 277 * We don't support ANY_LAYOUT, neither VERSION_1, meaning 278 * a Tx packet from guest must have 2 desc buffers at least: 279 * the first for storing the header and the others for 280 * storing the data. 281 * 282 * And since we don't support TSO, we could simply skip the 283 * header. 284 */ 285 desc = &vr->desc[desc->next]; 286 desc_chunck_len = desc->len; 287 desc_gaddr = desc->addr; 288 desc_addr = rte_vhost_va_from_guest_pa( 289 dev->mem, desc_gaddr, &desc_chunck_len); 290 if (unlikely(!desc_addr)) 291 return -1; 292 rte_prefetch0((void *)(uintptr_t)desc_addr); 293 294 desc_offset = 0; 295 desc_avail = desc->len; 296 nr_desc += 1; 297 298 mbuf_offset = 0; 299 mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM; 300 while (1) { 301 cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail); 302 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, 303 mbuf_offset), 304 (void *)((uintptr_t)(desc_addr + desc_offset)), 305 cpy_len); 306 307 mbuf_avail -= cpy_len; 308 mbuf_offset += cpy_len; 309 desc_avail -= cpy_len; 310 desc_offset += cpy_len; 311 desc_chunck_len -= cpy_len; 312 313 /* This desc reaches to its end, get the next one */ 314 if (desc_avail == 0) { 315 if ((desc->flags & VRING_DESC_F_NEXT) == 0) 316 break; 317 318 if (unlikely(desc->next >= vr->size || 319 ++nr_desc > vr->size)) 320 return -1; 321 desc = &vr->desc[desc->next]; 322 323 desc_chunck_len = desc->len; 324 desc_gaddr = desc->addr; 325 desc_addr = rte_vhost_va_from_guest_pa( 326 dev->mem, desc_gaddr, &desc_chunck_len); 327 if (unlikely(!desc_addr)) 328 return -1; 329 rte_prefetch0((void *)(uintptr_t)desc_addr); 330 331 desc_offset = 0; 332 desc_avail = desc->len; 333 } else if (unlikely(desc_chunck_len == 0)) { 334 desc_chunck_len = desc_avail; 335 desc_gaddr += desc_offset; 336 desc_addr = rte_vhost_va_from_guest_pa(dev->mem, 337 desc_gaddr, 338 &desc_chunck_len); 339 if (unlikely(!desc_addr)) 340 return -1; 341 342 desc_offset = 0; 343 } 344 345 /* 346 * This mbuf reaches to its end, get a new one 347 * to hold more data. 348 */ 349 if (mbuf_avail == 0) { 350 cur = rte_pktmbuf_alloc(mbuf_pool); 351 if (unlikely(cur == NULL)) { 352 RTE_LOG(ERR, VHOST_DATA, "Failed to " 353 "allocate memory for mbuf.\n"); 354 return -1; 355 } 356 357 prev->next = cur; 358 prev->data_len = mbuf_offset; 359 m->nb_segs += 1; 360 m->pkt_len += mbuf_offset; 361 prev = cur; 362 363 mbuf_offset = 0; 364 mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM; 365 } 366 } 367 368 prev->data_len = mbuf_offset; 369 m->pkt_len += mbuf_offset; 370 371 return 0; 372 } 373 374 static uint16_t 375 vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id, 376 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) 377 { 378 struct vhost_queue *queue; 379 struct rte_vhost_vring *vr; 380 uint32_t desc_indexes[MAX_PKT_BURST]; 381 uint32_t used_idx; 382 uint32_t i = 0; 383 uint16_t free_entries; 384 uint16_t avail_idx; 385 386 queue = &dev->queues[queue_id]; 387 vr = &queue->vr; 388 389 free_entries = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE) - 390 queue->last_avail_idx; 391 if (free_entries == 0) 392 return 0; 393 394 /* Prefetch available and used ring */ 395 avail_idx = queue->last_avail_idx & (vr->size - 1); 396 used_idx = queue->last_used_idx & (vr->size - 1); 397 rte_prefetch0(&vr->avail->ring[avail_idx]); 398 rte_prefetch0(&vr->used->ring[used_idx]); 399 400 count = RTE_MIN(count, MAX_PKT_BURST); 401 count = RTE_MIN(count, free_entries); 402 403 if (unlikely(count == 0)) 404 return 0; 405 406 /* 407 * Retrieve all of the head indexes first and pre-update used entries 408 * to avoid caching issues. 409 */ 410 for (i = 0; i < count; i++) { 411 avail_idx = (queue->last_avail_idx + i) & (vr->size - 1); 412 used_idx = (queue->last_used_idx + i) & (vr->size - 1); 413 desc_indexes[i] = vr->avail->ring[avail_idx]; 414 415 vr->used->ring[used_idx].id = desc_indexes[i]; 416 vr->used->ring[used_idx].len = 0; 417 } 418 419 /* Prefetch descriptor index. */ 420 rte_prefetch0(&vr->desc[desc_indexes[0]]); 421 for (i = 0; i < count; i++) { 422 int err; 423 424 if (likely(i + 1 < count)) 425 rte_prefetch0(&vr->desc[desc_indexes[i + 1]]); 426 427 pkts[i] = rte_pktmbuf_alloc(mbuf_pool); 428 if (unlikely(pkts[i] == NULL)) { 429 RTE_LOG(ERR, VHOST_DATA, 430 "Failed to allocate memory for mbuf.\n"); 431 break; 432 } 433 434 err = dequeue_pkt(dev, vr, pkts[i], desc_indexes[i], mbuf_pool); 435 if (unlikely(err)) { 436 rte_pktmbuf_free(pkts[i]); 437 break; 438 } 439 440 } 441 442 queue->last_avail_idx += i; 443 queue->last_used_idx += i; 444 445 __atomic_add_fetch(&vr->used->idx, i, __ATOMIC_ACQ_REL); 446 447 rte_vhost_vring_call(dev->vid, queue_id); 448 449 return i; 450 } 451 452 uint16_t 453 builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id, 454 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) 455 { 456 return vs_dequeue_pkts(dev, queue_id, mbuf_pool, pkts, count); 457 } 458