1 /* $OpenBSD: virtio.c,v 1.1 2017/01/21 11:23:01 reyk Exp $ */ 2 /* $NetBSD: virtio.c,v 1.3 2011/11/02 23:05:52 njoly Exp $ */ 3 4 /* 5 * Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg. 6 * Copyright (c) 2010 Minoura Makoto. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/device.h> 34 #include <sys/mutex.h> 35 #include <sys/atomic.h> 36 #include <sys/malloc.h> 37 38 #include <dev/pv/virtioreg.h> 39 #include <dev/pv/virtiovar.h> 40 41 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */ 42 43 #if VIRTIO_DEBUG 44 #define VIRTIO_ASSERT(x) KASSERT(x) 45 #else 46 #define VIRTIO_ASSERT(x) 47 #endif 48 49 void virtio_init_vq(struct virtio_softc *, 50 struct virtqueue *, int); 51 void vq_free_entry(struct virtqueue *, struct vq_entry *); 52 struct vq_entry *vq_alloc_entry(struct virtqueue *); 53 54 struct cfdriver virtio_cd = { 55 NULL, "virtio", DV_DULL 56 }; 57 58 #define virtio_set_status(sc, s) (sc)->sc_ops->set_status(sc, s) 59 #define virtio_device_reset(sc) virtio_set_status((sc), 0) 60 61 static const char * const virtio_device_name[] = { 62 "Unknown (0)", /* 0 */ 63 "Network", /* 1 */ 64 "Block", /* 2 */ 65 "Console", /* 3 */ 66 "Entropy", /* 4 */ 67 "Memory Balloon", /* 5 */ 68 "IO Memory", /* 6 */ 69 "Rpmsg", /* 7 */ 70 "SCSI host", /* 8 */ 71 "9P Transport" /* 9 */ 72 "mac80211 wlan" /* 10 */ 73 }; 74 #define NDEVNAMES (sizeof(virtio_device_name)/sizeof(char*)) 75 76 static const struct virtio_feature_name transport_feature_names[] = { 77 { VIRTIO_F_NOTIFY_ON_EMPTY, "NotifyOnEmpty"}, 78 { VIRTIO_F_RING_INDIRECT_DESC, "RingIndirectDesc"}, 79 { VIRTIO_F_RING_EVENT_IDX, "RingEventIdx"}, 80 { VIRTIO_F_BAD_FEATURE, "BadFeature"}, 81 { 0, NULL} 82 }; 83 84 const char * 85 virtio_device_string(int id) 86 { 87 return id < NDEVNAMES ? virtio_device_name[id] : "Unknown"; 88 } 89 90 void 91 virtio_log_features(uint32_t host, uint32_t neg, 92 const struct virtio_feature_name *guest_feature_names) 93 { 94 const struct virtio_feature_name *namep; 95 int i; 96 char c; 97 uint32_t bit; 98 99 for (i = 0; i < 32; i++) { 100 if (i == 30) { 101 /* 102 * VIRTIO_F_BAD_FEATURE is only used for 103 * checking correct negotiation 104 */ 105 continue; 106 } 107 bit = 1 << i; 108 if ((host&bit) == 0) 109 continue; 110 namep = (i < 24) ? guest_feature_names : 111 transport_feature_names; 112 while (namep->bit && namep->bit != bit) 113 namep++; 114 c = (neg&bit) ? '+' : '-'; 115 if (namep->name) 116 printf(" %c%s", c, namep->name); 117 else 118 printf(" %cUnknown(%d)", c, i); 119 } 120 } 121 122 /* 123 * Reset the device. 124 */ 125 /* 126 * To reset the device to a known state, do following: 127 * virtio_reset(sc); // this will stop the device activity 128 * <dequeue finished requests>; // virtio_dequeue() still can be called 129 * <revoke pending requests in the vqs if any>; 130 * virtio_reinit_start(sc); // dequeue prohibitted 131 * newfeatures = virtio_negotiate_features(sc, requestedfeatures); 132 * <some other initialization>; 133 * virtio_reinit_end(sc); // device activated; enqueue allowed 134 * Once attached, feature negotiation can only be allowed after virtio_reset. 135 */ 136 void 137 virtio_reset(struct virtio_softc *sc) 138 { 139 virtio_device_reset(sc); 140 } 141 142 void 143 virtio_reinit_start(struct virtio_softc *sc) 144 { 145 int i; 146 147 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 148 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 149 for (i = 0; i < sc->sc_nvqs; i++) { 150 int n; 151 struct virtqueue *vq = &sc->sc_vqs[i]; 152 n = virtio_read_queue_size(sc, vq->vq_index); 153 if (n == 0) /* vq disappeared */ 154 continue; 155 if (n != vq->vq_num) { 156 panic("%s: virtqueue size changed, vq index %d\n", 157 sc->sc_dev.dv_xname, vq->vq_index); 158 } 159 virtio_init_vq(sc, vq, 1); 160 virtio_setup_queue(sc, vq->vq_index, 161 vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE); 162 } 163 } 164 165 void 166 virtio_reinit_end(struct virtio_softc *sc) 167 { 168 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 169 } 170 171 /* 172 * dmamap sync operations for a virtqueue. 173 */ 174 static inline void 175 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops) 176 { 177 /* availoffset == sizeof(vring_desc)*vq_num */ 178 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset, 179 ops); 180 } 181 182 static inline void 183 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 184 { 185 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, vq->vq_availoffset, 186 offsetof(struct vring_avail, ring) + vq->vq_num * sizeof(uint16_t), 187 ops); 188 } 189 190 static inline void 191 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops) 192 { 193 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, vq->vq_usedoffset, 194 offsetof(struct vring_used, ring) + vq->vq_num * 195 sizeof(struct vring_used_elem), ops); 196 } 197 198 static inline void 199 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot, 200 int ops) 201 { 202 int offset = vq->vq_indirectoffset + 203 sizeof(struct vring_desc) * vq->vq_maxnsegs * slot; 204 205 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, offset, 206 sizeof(struct vring_desc) * vq->vq_maxnsegs, ops); 207 } 208 209 /* 210 * Scan vq, bus_dmamap_sync for the vqs (not for the payload), 211 * and calls (*vq_done)() if some entries are consumed. 212 * For use in transport specific irq handlers. 213 */ 214 int 215 virtio_check_vqs(struct virtio_softc *sc) 216 { 217 struct virtqueue *vq; 218 int i, r = 0; 219 220 /* going backwards is better for if_vio */ 221 for (i = sc->sc_nvqs - 1; i >= 0; i--) { 222 vq = &sc->sc_vqs[i]; 223 if (vq->vq_queued) { 224 vq->vq_queued = 0; 225 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE); 226 } 227 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD); 228 if (vq->vq_used_idx != vq->vq_used->idx) { 229 if (vq->vq_done) 230 r |= (vq->vq_done)(vq); 231 } 232 } 233 234 return r; 235 } 236 237 /* 238 * Initialize vq structure. 239 */ 240 void 241 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, int reinit) 242 { 243 int i, j; 244 int vq_size = vq->vq_num; 245 246 memset(vq->vq_vaddr, 0, vq->vq_bytesize); 247 248 /* build the indirect descriptor chain */ 249 if (vq->vq_indirect != NULL) { 250 struct vring_desc *vd; 251 252 for (i = 0; i < vq_size; i++) { 253 vd = vq->vq_indirect; 254 vd += vq->vq_maxnsegs * i; 255 for (j = 0; j < vq->vq_maxnsegs-1; j++) 256 vd[j].next = j + 1; 257 } 258 } 259 260 /* free slot management */ 261 SIMPLEQ_INIT(&vq->vq_freelist); 262 for (i = 0; i < vq_size; i++) { 263 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, 264 &vq->vq_entries[i], qe_list); 265 vq->vq_entries[i].qe_index = i; 266 } 267 268 /* enqueue/dequeue status */ 269 vq->vq_avail_idx = 0; 270 vq->vq_used_idx = 0; 271 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 272 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD); 273 vq->vq_queued = 1; 274 } 275 276 /* 277 * Allocate/free a vq. 278 */ 279 int 280 virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index, 281 int maxsegsize, int maxnsegs, const char *name) 282 { 283 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0; 284 int rsegs, r, hdrlen; 285 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \ 286 ~(VIRTIO_PAGE_SIZE-1)) 287 288 memset(vq, 0, sizeof(*vq)); 289 290 vq_size = virtio_read_queue_size(sc, index); 291 if (vq_size == 0) { 292 printf("virtqueue not exist, index %d for %s\n", index, name); 293 goto err; 294 } 295 if (((vq_size - 1) & vq_size) != 0) 296 panic("vq_size not power of two: %d", vq_size); 297 298 hdrlen = (sc->sc_features & VIRTIO_F_RING_EVENT_IDX) ? 3 : 2; 299 300 /* allocsize1: descriptor table + avail ring + pad */ 301 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc) * vq_size 302 + sizeof(uint16_t) * (hdrlen + vq_size)); 303 /* allocsize2: used ring + pad */ 304 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t) * hdrlen 305 + sizeof(struct vring_used_elem) * vq_size); 306 /* allocsize3: indirect table */ 307 /* XXX: This is rather inefficient. In practice only a fraction of this 308 * XXX: memory will be used. 309 */ 310 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) 311 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size; 312 else 313 allocsize3 = 0; 314 allocsize = allocsize1 + allocsize2 + allocsize3; 315 316 /* alloc and map the memory */ 317 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0, 318 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 319 if (r != 0) { 320 printf("virtqueue %d for %s allocation failed, error %d\n", 321 index, name, r); 322 goto err; 323 } 324 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize, 325 (caddr_t*)&vq->vq_vaddr, BUS_DMA_NOWAIT); 326 if (r != 0) { 327 printf("virtqueue %d for %s map failed, error %d\n", index, 328 name, r); 329 goto err; 330 } 331 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0, 332 BUS_DMA_NOWAIT, &vq->vq_dmamap); 333 if (r != 0) { 334 printf("virtqueue %d for %s dmamap creation failed, " 335 "error %d\n", index, name, r); 336 goto err; 337 } 338 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, vq->vq_vaddr, 339 allocsize, NULL, BUS_DMA_NOWAIT); 340 if (r != 0) { 341 printf("virtqueue %d for %s dmamap load failed, error %d\n", 342 index, name, r); 343 goto err; 344 } 345 346 virtio_setup_queue(sc, index, 347 vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE); 348 349 /* remember addresses and offsets for later use */ 350 vq->vq_owner = sc; 351 vq->vq_num = vq_size; 352 vq->vq_mask = vq_size - 1; 353 vq->vq_index = index; 354 vq->vq_desc = vq->vq_vaddr; 355 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size; 356 vq->vq_avail = (struct vring_avail*)(((char*)vq->vq_desc) + 357 vq->vq_availoffset); 358 vq->vq_usedoffset = allocsize1; 359 vq->vq_used = (struct vring_used*)(((char*)vq->vq_desc) + 360 vq->vq_usedoffset); 361 if (allocsize3 > 0) { 362 vq->vq_indirectoffset = allocsize1 + allocsize2; 363 vq->vq_indirect = (void*)(((char*)vq->vq_desc) 364 + vq->vq_indirectoffset); 365 } 366 vq->vq_bytesize = allocsize; 367 vq->vq_maxsegsize = maxsegsize; 368 vq->vq_maxnsegs = maxnsegs; 369 370 /* free slot management */ 371 vq->vq_entries = mallocarray(vq_size, sizeof(struct vq_entry), 372 M_DEVBUF, M_NOWAIT | M_ZERO); 373 if (vq->vq_entries == NULL) { 374 r = ENOMEM; 375 goto err; 376 } 377 378 virtio_init_vq(sc, vq, 0); 379 380 #if VIRTIO_DEBUG 381 printf("\nallocated %u byte for virtqueue %d for %s, size %d\n", 382 allocsize, index, name, vq_size); 383 if (allocsize3 > 0) 384 printf("using %d byte (%d entries) indirect descriptors\n", 385 allocsize3, maxnsegs * vq_size); 386 #endif 387 return 0; 388 389 err: 390 virtio_setup_queue(sc, index, 0); 391 if (vq->vq_dmamap) 392 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 393 if (vq->vq_vaddr) 394 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize); 395 if (vq->vq_segs[0].ds_addr) 396 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 397 memset(vq, 0, sizeof(*vq)); 398 399 return -1; 400 } 401 402 int 403 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq) 404 { 405 struct vq_entry *qe; 406 int i = 0; 407 408 /* device must be already deactivated */ 409 /* confirm the vq is empty */ 410 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) { 411 i++; 412 } 413 if (i != vq->vq_num) { 414 printf("%s: freeing non-empty vq, index %d\n", 415 sc->sc_dev.dv_xname, vq->vq_index); 416 return EBUSY; 417 } 418 419 /* tell device that there's no virtqueue any longer */ 420 virtio_setup_queue(sc, vq->vq_index, 0); 421 422 free(vq->vq_entries, M_DEVBUF, 0); 423 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap); 424 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 425 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize); 426 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 427 memset(vq, 0, sizeof(*vq)); 428 429 return 0; 430 } 431 432 /* 433 * Free descriptor management. 434 */ 435 struct vq_entry * 436 vq_alloc_entry(struct virtqueue *vq) 437 { 438 struct vq_entry *qe; 439 440 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) 441 return NULL; 442 qe = SIMPLEQ_FIRST(&vq->vq_freelist); 443 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list); 444 445 return qe; 446 } 447 448 void 449 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe) 450 { 451 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list); 452 } 453 454 /* 455 * Enqueue several dmamaps as a single request. 456 */ 457 /* 458 * Typical usage: 459 * <queue size> number of followings are stored in arrays 460 * - command blocks (in dmamem) should be pre-allocated and mapped 461 * - dmamaps for command blocks should be pre-allocated and loaded 462 * - dmamaps for payload should be pre-allocated 463 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot 464 * if (r) // currently 0 or EAGAIN 465 * return r; 466 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..); 467 * if (r) { 468 * virtio_enqueue_abort(sc, vq, slot); 469 * bus_dmamap_unload(dmat, dmamap_payload[slot]); 470 * return r; 471 * } 472 * r = virtio_enqueue_reserve(sc, vq, slot, 473 * dmamap_payload[slot]->dm_nsegs+1); 474 * // ^ +1 for command 475 * if (r) { // currently 0 or EAGAIN 476 * bus_dmamap_unload(dmat, dmamap_payload[slot]); 477 * return r; // do not call abort() 478 * } 479 * <setup and prepare commands> 480 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE); 481 * bus_dmamap_sync(dmat, dmamap_payload[slot],...); 482 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], 0); 483 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite); 484 * virtio_enqueue_commit(sc, vq, slot, 1); 485 */ 486 487 /* 488 * enqueue_prep: allocate a slot number 489 */ 490 int 491 virtio_enqueue_prep(struct virtqueue *vq, int *slotp) 492 { 493 struct vq_entry *qe1; 494 495 VIRTIO_ASSERT(slotp != NULL); 496 497 qe1 = vq_alloc_entry(vq); 498 if (qe1 == NULL) 499 return EAGAIN; 500 /* next slot is not allocated yet */ 501 qe1->qe_next = -1; 502 *slotp = qe1->qe_index; 503 504 return 0; 505 } 506 507 /* 508 * enqueue_reserve: allocate remaining slots and build the descriptor chain. 509 * Calls virtio_enqueue_abort() on failure. 510 */ 511 int 512 virtio_enqueue_reserve(struct virtqueue *vq, int slot, int nsegs) 513 { 514 int indirect; 515 struct vq_entry *qe1 = &vq->vq_entries[slot]; 516 517 VIRTIO_ASSERT(qe1->qe_next == -1); 518 VIRTIO_ASSERT(1 <= nsegs && nsegs <= vq->vq_num); 519 520 if ((vq->vq_indirect != NULL) && (nsegs >= MINSEG_INDIRECT) && 521 (nsegs <= vq->vq_maxnsegs)) 522 indirect = 1; 523 else 524 indirect = 0; 525 qe1->qe_indirect = indirect; 526 527 if (indirect) { 528 struct vring_desc *vd; 529 int i; 530 531 vd = &vq->vq_desc[qe1->qe_index]; 532 vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr + 533 vq->vq_indirectoffset; 534 vd->addr += sizeof(struct vring_desc) * vq->vq_maxnsegs * 535 qe1->qe_index; 536 vd->len = sizeof(struct vring_desc) * nsegs; 537 vd->flags = VRING_DESC_F_INDIRECT; 538 539 vd = vq->vq_indirect; 540 vd += vq->vq_maxnsegs * qe1->qe_index; 541 qe1->qe_desc_base = vd; 542 543 for (i = 0; i < nsegs-1; i++) 544 vd[i].flags = VRING_DESC_F_NEXT; 545 vd[i].flags = 0; 546 qe1->qe_next = 0; 547 548 return 0; 549 } else { 550 struct vring_desc *vd; 551 struct vq_entry *qe; 552 int i, s; 553 554 vd = &vq->vq_desc[0]; 555 qe1->qe_desc_base = vd; 556 qe1->qe_next = qe1->qe_index; 557 s = slot; 558 for (i = 0; i < nsegs - 1; i++) { 559 qe = vq_alloc_entry(vq); 560 if (qe == NULL) { 561 vd[s].flags = 0; 562 virtio_enqueue_abort(vq, slot); 563 return EAGAIN; 564 } 565 vd[s].flags = VRING_DESC_F_NEXT; 566 vd[s].next = qe->qe_index; 567 s = qe->qe_index; 568 } 569 vd[s].flags = 0; 570 571 return 0; 572 } 573 } 574 575 /* 576 * enqueue: enqueue a single dmamap. 577 */ 578 int 579 virtio_enqueue(struct virtqueue *vq, int slot, bus_dmamap_t dmamap, int write) 580 { 581 struct vq_entry *qe1 = &vq->vq_entries[slot]; 582 struct vring_desc *vd = qe1->qe_desc_base; 583 int i; 584 int s = qe1->qe_next; 585 586 VIRTIO_ASSERT(s >= 0); 587 VIRTIO_ASSERT(dmamap->dm_nsegs > 0); 588 if (dmamap->dm_nsegs > vq->vq_maxnsegs) { 589 #if VIRTIO_DEBUG 590 for (i = 0; i < dmamap->dm_nsegs; i++) { 591 printf(" %d (%d): %p %lx \n", i, write, 592 (void *)dmamap->dm_segs[i].ds_addr, 593 dmamap->dm_segs[i].ds_len); 594 } 595 #endif 596 panic("dmamap->dm_nseg %d > vq->vq_maxnsegs %d\n", 597 dmamap->dm_nsegs, vq->vq_maxnsegs); 598 } 599 600 for (i = 0; i < dmamap->dm_nsegs; i++) { 601 vd[s].addr = dmamap->dm_segs[i].ds_addr; 602 vd[s].len = dmamap->dm_segs[i].ds_len; 603 if (!write) 604 vd[s].flags |= VRING_DESC_F_WRITE; 605 s = vd[s].next; 606 } 607 qe1->qe_next = s; 608 609 return 0; 610 } 611 612 int 613 virtio_enqueue_p(struct virtqueue *vq, int slot, bus_dmamap_t dmamap, 614 bus_addr_t start, bus_size_t len, int write) 615 { 616 struct vq_entry *qe1 = &vq->vq_entries[slot]; 617 struct vring_desc *vd = qe1->qe_desc_base; 618 int s = qe1->qe_next; 619 620 VIRTIO_ASSERT(s >= 0); 621 /* XXX todo: handle more segments */ 622 VIRTIO_ASSERT(dmamap->dm_nsegs == 1); 623 VIRTIO_ASSERT((dmamap->dm_segs[0].ds_len > start) && 624 (dmamap->dm_segs[0].ds_len >= start + len)); 625 626 vd[s].addr = dmamap->dm_segs[0].ds_addr + start; 627 vd[s].len = len; 628 if (!write) 629 vd[s].flags |= VRING_DESC_F_WRITE; 630 qe1->qe_next = vd[s].next; 631 632 return 0; 633 } 634 635 static void 636 publish_avail_idx(struct virtio_softc *sc, struct virtqueue *vq) 637 { 638 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 639 640 virtio_membar_producer(); 641 vq->vq_avail->idx = vq->vq_avail_idx; 642 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE); 643 vq->vq_queued = 1; 644 } 645 646 /* 647 * enqueue_commit: add it to the aring. 648 */ 649 void 650 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot, 651 int notifynow) 652 { 653 struct vq_entry *qe1; 654 655 if (slot < 0) 656 goto notify; 657 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE); 658 qe1 = &vq->vq_entries[slot]; 659 if (qe1->qe_indirect) 660 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE); 661 vq->vq_avail->ring[(vq->vq_avail_idx++) & vq->vq_mask] = slot; 662 663 notify: 664 if (notifynow) { 665 if (vq->vq_owner->sc_features & VIRTIO_F_RING_EVENT_IDX) { 666 uint16_t o = vq->vq_avail->idx; 667 uint16_t n = vq->vq_avail_idx; 668 uint16_t t; 669 publish_avail_idx(sc, vq); 670 671 virtio_membar_sync(); 672 t = VQ_AVAIL_EVENT(vq) + 1; 673 if ((uint16_t)(n - t) < (uint16_t)(n - o)) 674 sc->sc_ops->kick(sc, vq->vq_index); 675 } else { 676 publish_avail_idx(sc, vq); 677 678 virtio_membar_sync(); 679 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) 680 sc->sc_ops->kick(sc, vq->vq_index); 681 } 682 } 683 } 684 685 /* 686 * enqueue_abort: rollback. 687 */ 688 int 689 virtio_enqueue_abort(struct virtqueue *vq, int slot) 690 { 691 struct vq_entry *qe = &vq->vq_entries[slot]; 692 struct vring_desc *vd; 693 int s; 694 695 if (qe->qe_next < 0) { 696 vq_free_entry(vq, qe); 697 return 0; 698 } 699 700 s = slot; 701 vd = &vq->vq_desc[0]; 702 while (vd[s].flags & VRING_DESC_F_NEXT) { 703 s = vd[s].next; 704 vq_free_entry(vq, qe); 705 qe = &vq->vq_entries[s]; 706 } 707 vq_free_entry(vq, qe); 708 return 0; 709 } 710 711 /* 712 * Dequeue a request. 713 */ 714 /* 715 * dequeue: dequeue a request from uring; dmamap_sync for uring is 716 * already done in the interrupt handler. 717 */ 718 int 719 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq, 720 int *slotp, int *lenp) 721 { 722 uint16_t slot, usedidx; 723 struct vq_entry *qe; 724 725 if (vq->vq_used_idx == vq->vq_used->idx) 726 return ENOENT; 727 usedidx = vq->vq_used_idx++; 728 usedidx &= vq->vq_mask; 729 730 virtio_membar_consumer(); 731 slot = vq->vq_used->ring[usedidx].id; 732 qe = &vq->vq_entries[slot]; 733 734 if (qe->qe_indirect) 735 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE); 736 737 if (slotp) 738 *slotp = slot; 739 if (lenp) 740 *lenp = vq->vq_used->ring[usedidx].len; 741 742 return 0; 743 } 744 745 /* 746 * dequeue_commit: complete dequeue; the slot is recycled for future use. 747 * if you forget to call this the slot will be leaked. 748 */ 749 int 750 virtio_dequeue_commit(struct virtqueue *vq, int slot) 751 { 752 struct vq_entry *qe = &vq->vq_entries[slot]; 753 struct vring_desc *vd = &vq->vq_desc[0]; 754 int s = slot; 755 756 while (vd[s].flags & VRING_DESC_F_NEXT) { 757 s = vd[s].next; 758 vq_free_entry(vq, qe); 759 qe = &vq->vq_entries[s]; 760 } 761 vq_free_entry(vq, qe); 762 763 return 0; 764 } 765 766 /* 767 * Increase the event index in order to delay interrupts. 768 * Returns 0 on success; returns 1 if the used ring has already advanced 769 * too far, and the caller must process the queue again (otherewise, no 770 * more interrupts will happen). 771 */ 772 int 773 virtio_postpone_intr(struct virtqueue *vq, uint16_t nslots) 774 { 775 uint16_t idx; 776 777 idx = vq->vq_used_idx + nslots; 778 779 /* set the new event index: avail_ring->used_event = idx */ 780 VQ_USED_EVENT(vq) = idx; 781 virtio_membar_sync(); 782 783 vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE); 784 vq->vq_queued++; 785 786 if (nslots < virtio_nused(vq)) 787 return 1; 788 789 return 0; 790 } 791 792 /* 793 * Postpone interrupt until 3/4 of the available descriptors have been 794 * consumed. 795 */ 796 int 797 virtio_postpone_intr_smart(struct virtqueue *vq) 798 { 799 uint16_t nslots; 800 801 nslots = (uint16_t)(vq->vq_avail->idx - vq->vq_used_idx) * 3 / 4; 802 803 return virtio_postpone_intr(vq, nslots); 804 } 805 806 /* 807 * Postpone interrupt until all of the available descriptors have been 808 * consumed. 809 */ 810 int 811 virtio_postpone_intr_far(struct virtqueue *vq) 812 { 813 uint16_t nslots; 814 815 nslots = (uint16_t)(vq->vq_avail->idx - vq->vq_used_idx); 816 817 return virtio_postpone_intr(vq, nslots); 818 } 819 820 821 /* 822 * Start/stop vq interrupt. No guarantee. 823 */ 824 void 825 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 826 { 827 if ((sc->sc_features & VIRTIO_F_RING_EVENT_IDX)) { 828 /* 829 * No way to disable the interrupt completely with 830 * RingEventIdx. Instead advance used_event by half 831 * the possible value. This won't happen soon and 832 * is far enough in the past to not trigger a spurios 833 * interrupt. 834 */ 835 VQ_USED_EVENT(vq) = vq->vq_used_idx + 0x8000; 836 } else { 837 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 838 } 839 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 840 vq->vq_queued++; 841 } 842 843 int 844 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 845 { 846 /* 847 * If event index feature is negotiated, enabling 848 * interrupts is done through setting the latest 849 * consumed index in the used_event field 850 */ 851 if (sc->sc_features & VIRTIO_F_RING_EVENT_IDX) 852 VQ_USED_EVENT(vq) = vq->vq_used_idx; 853 else 854 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 855 856 virtio_membar_sync(); 857 858 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE); 859 vq->vq_queued++; 860 861 if (vq->vq_used_idx != vq->vq_used->idx) 862 return 1; 863 864 return 0; 865 } 866 867 /* 868 * Returns a number of slots in the used ring available to 869 * be supplied to the avail ring. 870 */ 871 int 872 virtio_nused(struct virtqueue *vq) 873 { 874 uint16_t n; 875 876 n = (uint16_t)(vq->vq_used->idx - vq->vq_used_idx); 877 VIRTIO_ASSERT(n <= vq->vq_num); 878 879 return n; 880 } 881 882 #if VIRTIO_DEBUG 883 void 884 virtio_vq_dump(struct virtqueue *vq) 885 { 886 /* Common fields */ 887 printf(" + vq num: %d\n", vq->vq_num); 888 printf(" + vq mask: 0x%X\n", vq->vq_mask); 889 printf(" + vq index: %d\n", vq->vq_index); 890 printf(" + vq used idx: %d\n", vq->vq_used_idx); 891 printf(" + vq avail idx: %d\n", vq->vq_avail_idx); 892 printf(" + vq queued: %d\n",vq->vq_queued); 893 /* Avail ring fields */ 894 printf(" + avail flags: 0x%X\n", vq->vq_avail->flags); 895 printf(" + avail idx: %d\n", vq->vq_avail->idx); 896 printf(" + avail event: %d\n", VQ_AVAIL_EVENT(vq)); 897 /* Used ring fields */ 898 printf(" + used flags: 0x%X\n",vq->vq_used->flags); 899 printf(" + used idx: %d\n",vq->vq_used->idx); 900 printf(" + used event: %d\n", VQ_USED_EVENT(vq)); 901 printf(" +++++++++++++++++++++++++++\n"); 902 } 903 #endif 904