1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdbool.h> 7 #include <linux/virtio_net.h> 8 9 #include <rte_mbuf.h> 10 #include <rte_memcpy.h> 11 #include <rte_vhost.h> 12 13 #include "main.h" 14 15 /* 16 * A very simple vhost-user net driver implementation, without 17 * any extra features being enabled, such as TSO and mrg-Rx. 18 */ 19 20 void 21 vs_vhost_net_setup(struct vhost_dev *dev) 22 { 23 uint16_t i; 24 int vid = dev->vid; 25 struct vhost_queue *queue; 26 27 RTE_LOG(INFO, VHOST_CONFIG, 28 "setting builtin vhost-user net driver\n"); 29 30 rte_vhost_get_negotiated_features(vid, &dev->features); 31 if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) 32 dev->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 33 else 34 dev->hdr_len = sizeof(struct virtio_net_hdr); 35 36 rte_vhost_get_mem_table(vid, &dev->mem); 37 38 dev->nr_vrings = rte_vhost_get_vring_num(vid); 39 for (i = 0; i < dev->nr_vrings; i++) { 40 queue = &dev->queues[i]; 41 42 queue->last_used_idx = 0; 43 queue->last_avail_idx = 0; 44 rte_vhost_get_vhost_vring(vid, i, &queue->vr); 45 } 46 } 47 48 void 49 vs_vhost_net_remove(struct vhost_dev *dev) 50 { 51 free(dev->mem); 52 } 53 54 static __rte_always_inline int 55 enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr, 56 struct rte_mbuf *m, uint16_t desc_idx) 57 { 58 uint32_t desc_avail, desc_offset; 59 uint32_t mbuf_avail, mbuf_offset; 60 uint32_t cpy_len; 61 struct vring_desc *desc; 62 uint64_t desc_addr; 63 struct virtio_net_hdr virtio_hdr = {0, 0, 0, 0, 0, 0}; 64 /* A counter to avoid desc dead loop chain */ 65 uint16_t nr_desc = 1; 66 67 desc = &vr->desc[desc_idx]; 68 desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); 69 /* 70 * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid 71 * performance issue with some versions of gcc (4.8.4 and 5.3.0) which 72 * otherwise stores offset on the stack instead of in a register. 73 */ 74 if (unlikely(desc->len < dev->hdr_len) || !desc_addr) 75 return -1; 76 77 rte_prefetch0((void *)(uintptr_t)desc_addr); 78 79 /* write virtio-net header */ 80 *(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr; 81 82 desc_offset = dev->hdr_len; 83 desc_avail = desc->len - dev->hdr_len; 84 85 mbuf_avail = rte_pktmbuf_data_len(m); 86 mbuf_offset = 0; 87 while (mbuf_avail != 0 || m->next != NULL) { 88 /* done with current mbuf, fetch next */ 89 if (mbuf_avail == 0) { 90 m = m->next; 91 92 mbuf_offset = 0; 93 mbuf_avail = rte_pktmbuf_data_len(m); 94 } 95 96 /* done with current desc buf, fetch next */ 97 if (desc_avail == 0) { 98 if ((desc->flags & VRING_DESC_F_NEXT) == 0) { 99 /* Room in vring buffer is not enough */ 100 return -1; 101 } 102 if (unlikely(desc->next >= vr->size || 103 ++nr_desc > vr->size)) 104 return -1; 105 106 desc = &vr->desc[desc->next]; 107 desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); 108 if (unlikely(!desc_addr)) 109 return -1; 110 111 desc_offset = 0; 112 desc_avail = desc->len; 113 } 114 115 cpy_len = RTE_MIN(desc_avail, mbuf_avail); 116 rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)), 117 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset), 118 cpy_len); 119 120 mbuf_avail -= cpy_len; 121 mbuf_offset += cpy_len; 122 desc_avail -= cpy_len; 123 desc_offset += cpy_len; 124 } 125 126 return 0; 127 } 128 129 uint16_t 130 vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id, 131 struct rte_mbuf **pkts, uint32_t count) 132 { 133 struct vhost_queue *queue; 134 struct rte_vhost_vring *vr; 135 uint16_t avail_idx, free_entries, start_idx; 136 uint16_t desc_indexes[MAX_PKT_BURST]; 137 uint16_t used_idx; 138 uint32_t i; 139 140 queue = &dev->queues[queue_id]; 141 vr = &queue->vr; 142 143 avail_idx = *((volatile uint16_t *)&vr->avail->idx); 144 start_idx = queue->last_used_idx; 145 free_entries = avail_idx - start_idx; 146 count = RTE_MIN(count, free_entries); 147 count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST); 148 if (count == 0) 149 return 0; 150 151 /* Retrieve all of the desc indexes first to avoid caching issues. */ 152 rte_prefetch0(&vr->avail->ring[start_idx & (vr->size - 1)]); 153 for (i = 0; i < count; i++) { 154 used_idx = (start_idx + i) & (vr->size - 1); 155 desc_indexes[i] = vr->avail->ring[used_idx]; 156 vr->used->ring[used_idx].id = desc_indexes[i]; 157 vr->used->ring[used_idx].len = pkts[i]->pkt_len + 158 dev->hdr_len; 159 } 160 161 rte_prefetch0(&vr->desc[desc_indexes[0]]); 162 for (i = 0; i < count; i++) { 163 uint16_t desc_idx = desc_indexes[i]; 164 int err; 165 166 err = enqueue_pkt(dev, vr, pkts[i], desc_idx); 167 if (unlikely(err)) { 168 used_idx = (start_idx + i) & (vr->size - 1); 169 vr->used->ring[used_idx].len = dev->hdr_len; 170 } 171 172 if (i + 1 < count) 173 rte_prefetch0(&vr->desc[desc_indexes[i+1]]); 174 } 175 176 rte_smp_wmb(); 177 178 *(volatile uint16_t *)&vr->used->idx += count; 179 queue->last_used_idx += count; 180 181 /* flush used->idx update before we read avail->flags. */ 182 rte_mb(); 183 184 /* Kick the guest if necessary. */ 185 if (!(vr->avail->flags & VRING_AVAIL_F_NO_INTERRUPT) 186 && (vr->callfd >= 0)) 187 eventfd_write(vr->callfd, (eventfd_t)1); 188 return count; 189 } 190 191 static __rte_always_inline int 192 dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr, 193 struct rte_mbuf *m, uint16_t desc_idx, 194 struct rte_mempool *mbuf_pool) 195 { 196 struct vring_desc *desc; 197 uint64_t desc_addr; 198 uint32_t desc_avail, desc_offset; 199 uint32_t mbuf_avail, mbuf_offset; 200 uint32_t cpy_len; 201 struct rte_mbuf *cur = m, *prev = m; 202 /* A counter to avoid desc dead loop chain */ 203 uint32_t nr_desc = 1; 204 205 desc = &vr->desc[desc_idx]; 206 if (unlikely((desc->len < dev->hdr_len)) || 207 (desc->flags & VRING_DESC_F_INDIRECT)) 208 return -1; 209 210 desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); 211 if (unlikely(!desc_addr)) 212 return -1; 213 214 /* 215 * We don't support ANY_LAYOUT, neither VERSION_1, meaning 216 * a Tx packet from guest must have 2 desc buffers at least: 217 * the first for storing the header and the others for 218 * storing the data. 219 * 220 * And since we don't support TSO, we could simply skip the 221 * header. 222 */ 223 desc = &vr->desc[desc->next]; 224 desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); 225 if (unlikely(!desc_addr)) 226 return -1; 227 rte_prefetch0((void *)(uintptr_t)desc_addr); 228 229 desc_offset = 0; 230 desc_avail = desc->len; 231 nr_desc += 1; 232 233 mbuf_offset = 0; 234 mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM; 235 while (1) { 236 cpy_len = RTE_MIN(desc_avail, mbuf_avail); 237 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, 238 mbuf_offset), 239 (void *)((uintptr_t)(desc_addr + desc_offset)), 240 cpy_len); 241 242 mbuf_avail -= cpy_len; 243 mbuf_offset += cpy_len; 244 desc_avail -= cpy_len; 245 desc_offset += cpy_len; 246 247 /* This desc reaches to its end, get the next one */ 248 if (desc_avail == 0) { 249 if ((desc->flags & VRING_DESC_F_NEXT) == 0) 250 break; 251 252 if (unlikely(desc->next >= vr->size || 253 ++nr_desc > vr->size)) 254 return -1; 255 desc = &vr->desc[desc->next]; 256 257 desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); 258 if (unlikely(!desc_addr)) 259 return -1; 260 rte_prefetch0((void *)(uintptr_t)desc_addr); 261 262 desc_offset = 0; 263 desc_avail = desc->len; 264 } 265 266 /* 267 * This mbuf reaches to its end, get a new one 268 * to hold more data. 269 */ 270 if (mbuf_avail == 0) { 271 cur = rte_pktmbuf_alloc(mbuf_pool); 272 if (unlikely(cur == NULL)) { 273 RTE_LOG(ERR, VHOST_DATA, "Failed to " 274 "allocate memory for mbuf.\n"); 275 return -1; 276 } 277 278 prev->next = cur; 279 prev->data_len = mbuf_offset; 280 m->nb_segs += 1; 281 m->pkt_len += mbuf_offset; 282 prev = cur; 283 284 mbuf_offset = 0; 285 mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM; 286 } 287 } 288 289 prev->data_len = mbuf_offset; 290 m->pkt_len += mbuf_offset; 291 292 return 0; 293 } 294 295 uint16_t 296 vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id, 297 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) 298 { 299 struct vhost_queue *queue; 300 struct rte_vhost_vring *vr; 301 uint32_t desc_indexes[MAX_PKT_BURST]; 302 uint32_t used_idx; 303 uint32_t i = 0; 304 uint16_t free_entries; 305 uint16_t avail_idx; 306 307 queue = &dev->queues[queue_id]; 308 vr = &queue->vr; 309 310 free_entries = *((volatile uint16_t *)&vr->avail->idx) - 311 queue->last_avail_idx; 312 if (free_entries == 0) 313 return 0; 314 315 /* Prefetch available and used ring */ 316 avail_idx = queue->last_avail_idx & (vr->size - 1); 317 used_idx = queue->last_used_idx & (vr->size - 1); 318 rte_prefetch0(&vr->avail->ring[avail_idx]); 319 rte_prefetch0(&vr->used->ring[used_idx]); 320 321 count = RTE_MIN(count, MAX_PKT_BURST); 322 count = RTE_MIN(count, free_entries); 323 324 if (unlikely(count == 0)) 325 return 0; 326 327 /* 328 * Retrieve all of the head indexes first and pre-update used entries 329 * to avoid caching issues. 330 */ 331 for (i = 0; i < count; i++) { 332 avail_idx = (queue->last_avail_idx + i) & (vr->size - 1); 333 used_idx = (queue->last_used_idx + i) & (vr->size - 1); 334 desc_indexes[i] = vr->avail->ring[avail_idx]; 335 336 vr->used->ring[used_idx].id = desc_indexes[i]; 337 vr->used->ring[used_idx].len = 0; 338 } 339 340 /* Prefetch descriptor index. */ 341 rte_prefetch0(&vr->desc[desc_indexes[0]]); 342 for (i = 0; i < count; i++) { 343 int err; 344 345 if (likely(i + 1 < count)) 346 rte_prefetch0(&vr->desc[desc_indexes[i + 1]]); 347 348 pkts[i] = rte_pktmbuf_alloc(mbuf_pool); 349 if (unlikely(pkts[i] == NULL)) { 350 RTE_LOG(ERR, VHOST_DATA, 351 "Failed to allocate memory for mbuf.\n"); 352 break; 353 } 354 355 err = dequeue_pkt(dev, vr, pkts[i], desc_indexes[i], mbuf_pool); 356 if (unlikely(err)) { 357 rte_pktmbuf_free(pkts[i]); 358 break; 359 } 360 361 } 362 363 queue->last_avail_idx += i; 364 queue->last_used_idx += i; 365 rte_smp_wmb(); 366 rte_smp_rmb(); 367 368 vr->used->idx += i; 369 370 if (!(vr->avail->flags & VRING_AVAIL_F_NO_INTERRUPT) 371 && (vr->callfd >= 0)) 372 eventfd_write(vr->callfd, (eventfd_t)1); 373 374 return i; 375 } 376