1 /* $NetBSD: ld_virtio.c,v 1.35 2024/06/12 16:51:53 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2010 Minoura Makoto. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.35 2024/06/12 16:51:53 riastradh Exp $"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/buf.h> 35 #include <sys/bufq.h> 36 #include <sys/bus.h> 37 #include <sys/device.h> 38 #include <sys/disk.h> 39 #include <sys/mutex.h> 40 #include <sys/module.h> 41 42 #include <dev/ldvar.h> 43 #include <dev/pci/virtioreg.h> 44 #include <dev/pci/virtiovar.h> 45 46 #include "ioconf.h" 47 48 /* 49 * ld_virtioreg: 50 */ 51 /* Configuration registers */ 52 #define VIRTIO_BLK_CONFIG_CAPACITY 0 /* 64bit */ 53 #define VIRTIO_BLK_CONFIG_SIZE_MAX 8 /* 32bit */ 54 #define VIRTIO_BLK_CONFIG_SEG_MAX 12 /* 32bit */ 55 #define VIRTIO_BLK_CONFIG_GEOMETRY_C 16 /* 16bit */ 56 #define VIRTIO_BLK_CONFIG_GEOMETRY_H 18 /* 8bit */ 57 #define VIRTIO_BLK_CONFIG_GEOMETRY_S 19 /* 8bit */ 58 #define VIRTIO_BLK_CONFIG_BLK_SIZE 20 /* 32bit */ 59 #define VIRTIO_BLK_CONFIG_WRITEBACK 32 /* 8bit */ 60 61 /* Feature bits */ 62 #define VIRTIO_BLK_F_BARRIER (1<<0) 63 #define VIRTIO_BLK_F_SIZE_MAX (1<<1) 64 #define VIRTIO_BLK_F_SEG_MAX (1<<2) 65 #define VIRTIO_BLK_F_GEOMETRY (1<<4) 66 #define VIRTIO_BLK_F_RO (1<<5) 67 #define VIRTIO_BLK_F_BLK_SIZE (1<<6) 68 #define VIRTIO_BLK_F_SCSI (1<<7) 69 #define VIRTIO_BLK_F_FLUSH (1<<9) 70 #define VIRTIO_BLK_F_TOPOLOGY (1<<10) 71 #define VIRTIO_BLK_F_CONFIG_WCE (1<<11) 72 73 /* 74 * Each block request uses at least two segments - one for the header 75 * and one for the status. 76 */ 77 #define VIRTIO_BLK_CTRL_SEGMENTS 2 78 79 #define VIRTIO_BLK_FLAG_BITS \ 80 VIRTIO_COMMON_FLAG_BITS \ 81 "b\x0b" "CONFIG_WCE\0" \ 82 "b\x0a" "TOPOLOGY\0" \ 83 "b\x09" "FLUSH\0" \ 84 "b\x07" "SCSI\0" \ 85 "b\x06" "BLK_SIZE\0" \ 86 "b\x05" "RO\0" \ 87 "b\x04" "GEOMETRY\0" \ 88 "b\x02" "SEG_MAX\0" \ 89 "b\x01" "SIZE_MAX\0" \ 90 "b\x00" "BARRIER\0" 91 92 /* Command */ 93 #define VIRTIO_BLK_T_IN 0 94 #define VIRTIO_BLK_T_OUT 1 95 #define VIRTIO_BLK_T_FLUSH 4 96 #define VIRTIO_BLK_T_BARRIER 0x80000000 97 98 /* Sector */ 99 #define VIRTIO_BLK_BSIZE 512 100 101 /* Status */ 102 #define VIRTIO_BLK_S_OK 0 103 #define VIRTIO_BLK_S_IOERR 1 104 #define VIRTIO_BLK_S_UNSUPP 2 105 106 /* Request header structure */ 107 struct virtio_blk_req_hdr { 108 uint32_t type; /* VIRTIO_BLK_T_* */ 109 uint32_t ioprio; 110 uint64_t sector; 111 } __packed; 112 /* payload and 1 byte status follows */ 113 114 115 /* 116 * ld_virtiovar: 117 */ 118 struct virtio_blk_req { 119 struct virtio_blk_req_hdr vr_hdr; 120 uint8_t vr_status; 121 struct buf *vr_bp; 122 #define DUMMY_VR_BP ((void *)1) 123 bus_dmamap_t vr_cmdsts; 124 bus_dmamap_t vr_payload; 125 }; 126 127 struct ld_virtio_softc { 128 struct ld_softc sc_ld; 129 device_t sc_dev; 130 131 uint32_t sc_seg_max; /* max number of segs in xfer */ 132 uint32_t sc_size_max; /* max size of single seg */ 133 134 struct virtio_softc *sc_virtio; 135 struct virtqueue sc_vq; 136 137 struct virtio_blk_req *sc_reqs; 138 bus_dma_segment_t sc_reqs_seg; 139 140 int sc_readonly; 141 142 enum { 143 SYNC_FREE, SYNC_BUSY, SYNC_DONE 144 } sc_sync_use; 145 kcondvar_t sc_sync_wait; 146 kmutex_t sc_sync_wait_lock; 147 uint8_t sc_sync_status; 148 }; 149 150 static int ld_virtio_match(device_t, cfdata_t, void *); 151 static void ld_virtio_attach(device_t, device_t, void *); 152 static int ld_virtio_detach(device_t, int); 153 154 CFATTACH_DECL_NEW(ld_virtio, sizeof(struct ld_virtio_softc), 155 ld_virtio_match, ld_virtio_attach, ld_virtio_detach, NULL); 156 157 static int 158 ld_virtio_match(device_t parent, cfdata_t match, void *aux) 159 { 160 struct virtio_attach_args *va = aux; 161 162 if (va->sc_childdevid == VIRTIO_DEVICE_ID_BLOCK) 163 return 1; 164 165 return 0; 166 } 167 168 static int ld_virtio_vq_done(struct virtqueue *); 169 static int ld_virtio_dump(struct ld_softc *, void *, int, int); 170 static int ld_virtio_start(struct ld_softc *, struct buf *); 171 static int ld_virtio_ioctl(struct ld_softc *, u_long, void *, int32_t, bool); 172 173 static int 174 ld_virtio_alloc_reqs(struct ld_virtio_softc *sc, int qsize) 175 { 176 int allocsize, r, rsegs, i; 177 struct ld_softc *ld = &sc->sc_ld; 178 void *vaddr; 179 180 allocsize = sizeof(struct virtio_blk_req) * qsize; 181 r = bus_dmamem_alloc(virtio_dmat(sc->sc_virtio), allocsize, 0, 0, 182 &sc->sc_reqs_seg, 1, &rsegs, BUS_DMA_WAITOK); 183 if (r != 0) { 184 aprint_error_dev(sc->sc_dev, 185 "DMA memory allocation failed, size %d, " 186 "error code %d\n", allocsize, r); 187 goto err_none; 188 } 189 r = bus_dmamem_map(virtio_dmat(sc->sc_virtio), 190 &sc->sc_reqs_seg, 1, allocsize, 191 &vaddr, BUS_DMA_WAITOK); 192 if (r != 0) { 193 aprint_error_dev(sc->sc_dev, 194 "DMA memory map failed, " 195 "error code %d\n", r); 196 goto err_dmamem_alloc; 197 } 198 sc->sc_reqs = vaddr; 199 memset(vaddr, 0, allocsize); 200 for (i = 0; i < qsize; i++) { 201 struct virtio_blk_req *vr = &sc->sc_reqs[i]; 202 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio), 203 offsetof(struct virtio_blk_req, vr_bp), 204 1, 205 offsetof(struct virtio_blk_req, vr_bp), 206 0, 207 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, 208 &vr->vr_cmdsts); 209 if (r != 0) { 210 aprint_error_dev(sc->sc_dev, 211 "command dmamap creation failed, " 212 "error code %d\n", r); 213 goto err_reqs; 214 } 215 r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), vr->vr_cmdsts, 216 &vr->vr_hdr, 217 offsetof(struct virtio_blk_req, vr_bp), 218 NULL, BUS_DMA_WAITOK); 219 if (r != 0) { 220 aprint_error_dev(sc->sc_dev, 221 "command dmamap load failed, " 222 "error code %d\n", r); 223 goto err_reqs; 224 } 225 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio), 226 /*size*/ld->sc_maxxfer, 227 /*nseg*/sc->sc_seg_max, 228 /*maxsegsz*/sc->sc_size_max, 229 /*boundary*/0, 230 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW, 231 &vr->vr_payload); 232 if (r != 0) { 233 aprint_error_dev(sc->sc_dev, 234 "payload dmamap creation failed, " 235 "error code %d\n", r); 236 goto err_reqs; 237 } 238 } 239 return 0; 240 241 err_reqs: 242 for (i = 0; i < qsize; i++) { 243 struct virtio_blk_req *vr = &sc->sc_reqs[i]; 244 if (vr->vr_cmdsts) { 245 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio), 246 vr->vr_cmdsts); 247 vr->vr_cmdsts = 0; 248 } 249 if (vr->vr_payload) { 250 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio), 251 vr->vr_payload); 252 vr->vr_payload = 0; 253 } 254 } 255 bus_dmamem_unmap(virtio_dmat(sc->sc_virtio), sc->sc_reqs, allocsize); 256 err_dmamem_alloc: 257 bus_dmamem_free(virtio_dmat(sc->sc_virtio), &sc->sc_reqs_seg, 1); 258 err_none: 259 return -1; 260 } 261 262 static void 263 ld_virtio_attach(device_t parent, device_t self, void *aux) 264 { 265 struct ld_virtio_softc *sc = device_private(self); 266 struct ld_softc *ld = &sc->sc_ld; 267 struct virtio_softc *vsc = device_private(parent); 268 uint64_t features; 269 int qsize; 270 271 if (virtio_child(vsc) != NULL) { 272 aprint_normal(": child already attached for %s; " 273 "something wrong...\n", device_xname(parent)); 274 return; 275 } 276 277 sc->sc_dev = self; 278 sc->sc_virtio = vsc; 279 280 virtio_child_attach_start(vsc, self, IPL_BIO, 281 (VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX | 282 VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_BLK_SIZE | 283 VIRTIO_BLK_F_FLUSH | VIRTIO_BLK_F_CONFIG_WCE), 284 VIRTIO_BLK_FLAG_BITS); 285 286 features = virtio_features(vsc); 287 if (features == 0) 288 goto err; 289 290 if (features & VIRTIO_BLK_F_RO) 291 sc->sc_readonly = 1; 292 else 293 sc->sc_readonly = 0; 294 295 if (features & VIRTIO_BLK_F_BLK_SIZE) { 296 ld->sc_secsize = virtio_read_device_config_4(vsc, 297 VIRTIO_BLK_CONFIG_BLK_SIZE); 298 } else 299 ld->sc_secsize = VIRTIO_BLK_BSIZE; 300 301 if (features & VIRTIO_BLK_F_SEG_MAX) { 302 sc->sc_seg_max = virtio_read_device_config_4(vsc, 303 VIRTIO_BLK_CONFIG_SEG_MAX); 304 if (sc->sc_seg_max == 0) { 305 aprint_error_dev(sc->sc_dev, 306 "Invalid SEG_MAX %d\n", sc->sc_seg_max); 307 goto err; 308 } 309 } else { 310 sc->sc_seg_max = 1; 311 aprint_verbose_dev(sc->sc_dev, 312 "Unknown SEG_MAX, assuming %"PRIu32"\n", sc->sc_seg_max); 313 } 314 315 /* At least genfs_io assumes size_max*seg_max >= MAXPHYS. */ 316 if (features & VIRTIO_BLK_F_SIZE_MAX) { 317 sc->sc_size_max = virtio_read_device_config_4(vsc, 318 VIRTIO_BLK_CONFIG_SIZE_MAX); 319 if (sc->sc_size_max < MAXPHYS/sc->sc_seg_max) { 320 aprint_error_dev(sc->sc_dev, 321 "Too small SIZE_MAX %d minimum is %d\n", 322 sc->sc_size_max, MAXPHYS/sc->sc_seg_max); 323 // goto err; 324 sc->sc_size_max = MAXPHYS/sc->sc_seg_max; 325 } else if (sc->sc_size_max > MAXPHYS) { 326 aprint_verbose_dev(sc->sc_dev, 327 "Clip SIZE_MAX from %d to %d\n", 328 sc->sc_size_max, MAXPHYS); 329 sc->sc_size_max = MAXPHYS; 330 } 331 } else { 332 sc->sc_size_max = MAXPHYS; 333 aprint_verbose_dev(sc->sc_dev, 334 "Unknown SIZE_MAX, assuming %"PRIu32"\n", 335 sc->sc_size_max); 336 } 337 338 aprint_normal_dev(sc->sc_dev, "max %"PRIu32" segs" 339 " of max %"PRIu32" bytes\n", 340 sc->sc_seg_max, sc->sc_size_max); 341 342 virtio_init_vq_vqdone(vsc, &sc->sc_vq, 0, 343 ld_virtio_vq_done); 344 345 if (virtio_alloc_vq(vsc, &sc->sc_vq, sc->sc_size_max, 346 sc->sc_seg_max + VIRTIO_BLK_CTRL_SEGMENTS, "I/O request") != 0) 347 goto err; 348 qsize = sc->sc_vq.vq_num; 349 350 if (virtio_child_attach_finish(vsc, &sc->sc_vq, 1, 351 NULL, VIRTIO_F_INTR_MSIX) != 0) 352 goto err; 353 354 ld->sc_dv = self; 355 ld->sc_secperunit = virtio_read_device_config_8(vsc, 356 VIRTIO_BLK_CONFIG_CAPACITY) / (ld->sc_secsize / VIRTIO_BLK_BSIZE); 357 358 /* 359 * Clamp ld->sc_maxxfer to MAXPHYS before ld_virtio_alloc_reqs 360 * allocates DMA maps of at most ld->sc_maxxfer bytes. 361 * ldattach will also clamp to MAXPHYS, but not until after 362 * ld_virtio_alloc_reqs is done, so that doesn't help. 363 */ 364 ld->sc_maxxfer = MIN(MAXPHYS, sc->sc_size_max * sc->sc_seg_max); 365 366 if (features & VIRTIO_BLK_F_GEOMETRY) { 367 ld->sc_ncylinders = virtio_read_device_config_2(vsc, 368 VIRTIO_BLK_CONFIG_GEOMETRY_C); 369 ld->sc_nheads = virtio_read_device_config_1(vsc, 370 VIRTIO_BLK_CONFIG_GEOMETRY_H); 371 ld->sc_nsectors = virtio_read_device_config_1(vsc, 372 VIRTIO_BLK_CONFIG_GEOMETRY_S); 373 } 374 ld->sc_maxqueuecnt = qsize - 1; /* reserve slot for dumps, flushes */ 375 376 if (ld_virtio_alloc_reqs(sc, qsize) < 0) 377 goto err; 378 379 cv_init(&sc->sc_sync_wait, "vblksync"); 380 mutex_init(&sc->sc_sync_wait_lock, MUTEX_DEFAULT, IPL_BIO); 381 sc->sc_sync_use = SYNC_FREE; 382 383 ld->sc_dump = ld_virtio_dump; 384 ld->sc_start = ld_virtio_start; 385 ld->sc_ioctl = ld_virtio_ioctl; 386 387 ld->sc_flags = LDF_ENABLED | LDF_MPSAFE; 388 ldattach(ld, BUFQ_DISK_DEFAULT_STRAT); 389 390 return; 391 392 err: 393 virtio_child_attach_failed(vsc); 394 return; 395 } 396 397 static int 398 ld_virtio_start(struct ld_softc *ld, struct buf *bp) 399 { 400 /* splbio */ 401 struct ld_virtio_softc *sc = device_private(ld->sc_dv); 402 struct virtio_softc *vsc = sc->sc_virtio; 403 struct virtqueue *vq = &sc->sc_vq; 404 struct virtio_blk_req *vr; 405 int r; 406 int isread = (bp->b_flags & B_READ); 407 int slot; 408 409 if (sc->sc_readonly && !isread) 410 return EIO; 411 412 r = virtio_enqueue_prep(vsc, vq, &slot); 413 if (r != 0) 414 return r; 415 416 vr = &sc->sc_reqs[slot]; 417 KASSERT(vr->vr_bp == NULL); 418 419 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload, 420 bp->b_data, bp->b_bcount, NULL, 421 ((isread?BUS_DMA_READ:BUS_DMA_WRITE) 422 |BUS_DMA_NOWAIT)); 423 if (r != 0) { 424 aprint_error_dev(sc->sc_dev, 425 "payload dmamap failed, error code %d\n", r); 426 virtio_enqueue_abort(vsc, vq, slot); 427 return r; 428 } 429 430 KASSERT(vr->vr_payload->dm_nsegs <= sc->sc_seg_max); 431 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 432 VIRTIO_BLK_CTRL_SEGMENTS); 433 if (r != 0) { 434 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload); 435 return r; 436 } 437 438 vr->vr_bp = bp; 439 vr->vr_hdr.type = virtio_rw32(vsc, 440 isread ? VIRTIO_BLK_T_IN : VIRTIO_BLK_T_OUT); 441 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0); 442 vr->vr_hdr.sector = virtio_rw64(vsc, 443 bp->b_rawblkno * sc->sc_ld.sc_secsize / 444 VIRTIO_BLK_BSIZE); 445 446 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 447 0, sizeof(struct virtio_blk_req_hdr), 448 BUS_DMASYNC_PREWRITE); 449 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload, 450 0, bp->b_bcount, 451 isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE); 452 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 453 offsetof(struct virtio_blk_req, vr_status), 454 sizeof(uint8_t), 455 BUS_DMASYNC_PREREAD); 456 457 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 458 0, sizeof(struct virtio_blk_req_hdr), 459 true); 460 virtio_enqueue(vsc, vq, slot, vr->vr_payload, !isread); 461 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 462 offsetof(struct virtio_blk_req, vr_status), 463 sizeof(uint8_t), 464 false); 465 virtio_enqueue_commit(vsc, vq, slot, true); 466 467 return 0; 468 } 469 470 static void 471 ld_virtio_vq_done1(struct ld_virtio_softc *sc, struct virtio_softc *vsc, 472 struct virtqueue *vq, int slot) 473 { 474 struct virtio_blk_req *vr = &sc->sc_reqs[slot]; 475 struct buf *bp = vr->vr_bp; 476 477 vr->vr_bp = NULL; 478 479 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 480 0, sizeof(struct virtio_blk_req_hdr), 481 BUS_DMASYNC_POSTWRITE); 482 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 483 sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t), 484 BUS_DMASYNC_POSTREAD); 485 if (bp == DUMMY_VR_BP) { 486 mutex_enter(&sc->sc_sync_wait_lock); 487 sc->sc_sync_status = vr->vr_status; 488 sc->sc_sync_use = SYNC_DONE; 489 cv_broadcast(&sc->sc_sync_wait); 490 mutex_exit(&sc->sc_sync_wait_lock); 491 virtio_dequeue_commit(vsc, vq, slot); 492 return; 493 } 494 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload, 495 0, bp->b_bcount, 496 (bp->b_flags & B_READ)?BUS_DMASYNC_POSTREAD 497 :BUS_DMASYNC_POSTWRITE); 498 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload); 499 500 if (vr->vr_status != VIRTIO_BLK_S_OK) { 501 bp->b_error = EIO; 502 bp->b_resid = bp->b_bcount; 503 } else { 504 bp->b_error = 0; 505 bp->b_resid = 0; 506 } 507 508 virtio_dequeue_commit(vsc, vq, slot); 509 510 lddone(&sc->sc_ld, bp); 511 } 512 513 static int 514 ld_virtio_vq_done(struct virtqueue *vq) 515 { 516 struct virtio_softc *vsc = vq->vq_owner; 517 struct ld_virtio_softc *sc = device_private(virtio_child(vsc)); 518 int r = 0; 519 int slot; 520 521 again: 522 if (virtio_dequeue(vsc, vq, &slot, NULL)) 523 return r; 524 r = 1; 525 526 ld_virtio_vq_done1(sc, vsc, vq, slot); 527 goto again; 528 } 529 530 static int 531 ld_virtio_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt) 532 { 533 struct ld_virtio_softc *sc = device_private(ld->sc_dv); 534 struct virtio_softc *vsc = sc->sc_virtio; 535 struct virtqueue *vq = &sc->sc_vq; 536 struct virtio_blk_req *vr; 537 int slot, r; 538 539 if (sc->sc_readonly) 540 return EIO; 541 542 r = virtio_enqueue_prep(vsc, vq, &slot); 543 if (r != 0) { 544 if (r == EAGAIN) { /* no free slot; dequeue first */ 545 delay(100); 546 ld_virtio_vq_done(vq); 547 r = virtio_enqueue_prep(vsc, vq, &slot); 548 if (r != 0) 549 return r; 550 } 551 return r; 552 } 553 vr = &sc->sc_reqs[slot]; 554 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload, 555 data, blkcnt*ld->sc_secsize, NULL, 556 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 557 if (r != 0) 558 return r; 559 560 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 561 VIRTIO_BLK_CTRL_SEGMENTS); 562 if (r != 0) { 563 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload); 564 return r; 565 } 566 567 vr->vr_bp = (void*)0xdeadbeef; 568 vr->vr_hdr.type = virtio_rw32(vsc, VIRTIO_BLK_T_OUT); 569 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0); 570 vr->vr_hdr.sector = virtio_rw64(vsc, 571 (daddr_t) blkno * ld->sc_secsize / 572 VIRTIO_BLK_BSIZE); 573 574 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 575 0, sizeof(struct virtio_blk_req_hdr), 576 BUS_DMASYNC_PREWRITE); 577 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload, 578 0, blkcnt*ld->sc_secsize, 579 BUS_DMASYNC_PREWRITE); 580 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 581 offsetof(struct virtio_blk_req, vr_status), 582 sizeof(uint8_t), 583 BUS_DMASYNC_PREREAD); 584 585 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 586 0, sizeof(struct virtio_blk_req_hdr), 587 true); 588 virtio_enqueue(vsc, vq, slot, vr->vr_payload, true); 589 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 590 offsetof(struct virtio_blk_req, vr_status), 591 sizeof(uint8_t), 592 false); 593 virtio_enqueue_commit(vsc, vq, slot, true); 594 595 for ( ; ; ) { 596 int dslot; 597 598 r = virtio_dequeue(vsc, vq, &dslot, NULL); 599 if (r != 0) 600 continue; 601 if (dslot != slot) { 602 ld_virtio_vq_done1(sc, vsc, vq, dslot); 603 continue; 604 } else 605 break; 606 } 607 608 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 609 0, sizeof(struct virtio_blk_req_hdr), 610 BUS_DMASYNC_POSTWRITE); 611 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload, 612 0, blkcnt*ld->sc_secsize, 613 BUS_DMASYNC_POSTWRITE); 614 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 615 offsetof(struct virtio_blk_req, vr_status), 616 sizeof(uint8_t), 617 BUS_DMASYNC_POSTREAD); 618 if (vr->vr_status == VIRTIO_BLK_S_OK) 619 r = 0; 620 else 621 r = EIO; 622 virtio_dequeue_commit(vsc, vq, slot); 623 624 return r; 625 } 626 627 static int 628 ld_virtio_detach(device_t self, int flags) 629 { 630 struct ld_virtio_softc *sc = device_private(self); 631 struct ld_softc *ld = &sc->sc_ld; 632 bus_dma_tag_t dmat = virtio_dmat(sc->sc_virtio); 633 int r, i, qsize; 634 635 qsize = sc->sc_vq.vq_num; 636 r = ldbegindetach(ld, flags); 637 if (r != 0) 638 return r; 639 virtio_reset(sc->sc_virtio); 640 virtio_free_vq(sc->sc_virtio, &sc->sc_vq); 641 642 for (i = 0; i < qsize; i++) { 643 bus_dmamap_destroy(dmat, 644 sc->sc_reqs[i].vr_cmdsts); 645 bus_dmamap_destroy(dmat, 646 sc->sc_reqs[i].vr_payload); 647 } 648 bus_dmamem_unmap(dmat, sc->sc_reqs, 649 sizeof(struct virtio_blk_req) * qsize); 650 bus_dmamem_free(dmat, &sc->sc_reqs_seg, 1); 651 652 ldenddetach(ld); 653 654 cv_destroy(&sc->sc_sync_wait); 655 mutex_destroy(&sc->sc_sync_wait_lock); 656 657 virtio_child_detach(sc->sc_virtio); 658 659 return 0; 660 } 661 662 static int 663 ld_virtio_flush(struct ld_softc *ld, bool poll) 664 { 665 struct ld_virtio_softc * const sc = device_private(ld->sc_dv); 666 struct virtio_softc * const vsc = sc->sc_virtio; 667 const uint64_t features = virtio_features(vsc); 668 struct virtqueue *vq = &sc->sc_vq; 669 struct virtio_blk_req *vr; 670 int slot; 671 int r; 672 673 if ((features & VIRTIO_BLK_F_FLUSH) == 0) 674 return 0; 675 676 mutex_enter(&sc->sc_sync_wait_lock); 677 while (sc->sc_sync_use != SYNC_FREE) { 678 if (poll) { 679 mutex_exit(&sc->sc_sync_wait_lock); 680 ld_virtio_vq_done(vq); 681 mutex_enter(&sc->sc_sync_wait_lock); 682 continue; 683 } 684 cv_wait(&sc->sc_sync_wait, &sc->sc_sync_wait_lock); 685 } 686 sc->sc_sync_use = SYNC_BUSY; 687 mutex_exit(&sc->sc_sync_wait_lock); 688 689 r = virtio_enqueue_prep(vsc, vq, &slot); 690 if (r != 0) { 691 return r; 692 } 693 694 vr = &sc->sc_reqs[slot]; 695 KASSERT(vr->vr_bp == NULL); 696 697 r = virtio_enqueue_reserve(vsc, vq, slot, VIRTIO_BLK_CTRL_SEGMENTS); 698 if (r != 0) { 699 return r; 700 } 701 702 vr->vr_bp = DUMMY_VR_BP; 703 vr->vr_hdr.type = virtio_rw32(vsc, VIRTIO_BLK_T_FLUSH); 704 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0); 705 vr->vr_hdr.sector = virtio_rw64(vsc, 0); 706 707 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 708 0, sizeof(struct virtio_blk_req_hdr), 709 BUS_DMASYNC_PREWRITE); 710 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 711 offsetof(struct virtio_blk_req, vr_status), 712 sizeof(uint8_t), 713 BUS_DMASYNC_PREREAD); 714 715 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 716 0, sizeof(struct virtio_blk_req_hdr), 717 true); 718 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 719 offsetof(struct virtio_blk_req, vr_status), 720 sizeof(uint8_t), 721 false); 722 virtio_enqueue_commit(vsc, vq, slot, true); 723 724 mutex_enter(&sc->sc_sync_wait_lock); 725 while (sc->sc_sync_use != SYNC_DONE) { 726 if (poll) { 727 mutex_exit(&sc->sc_sync_wait_lock); 728 ld_virtio_vq_done(vq); 729 mutex_enter(&sc->sc_sync_wait_lock); 730 continue; 731 } 732 cv_wait(&sc->sc_sync_wait, &sc->sc_sync_wait_lock); 733 } 734 735 if (sc->sc_sync_status == VIRTIO_BLK_S_OK) 736 r = 0; 737 else 738 r = EIO; 739 740 sc->sc_sync_use = SYNC_FREE; 741 cv_broadcast(&sc->sc_sync_wait); 742 mutex_exit(&sc->sc_sync_wait_lock); 743 744 return r; 745 } 746 747 static int 748 ld_virtio_getcache(struct ld_softc *ld, int *bitsp) 749 { 750 struct ld_virtio_softc * const sc = device_private(ld->sc_dv); 751 struct virtio_softc * const vsc = sc->sc_virtio; 752 const uint64_t features = virtio_features(vsc); 753 754 *bitsp = DKCACHE_READ; 755 if ((features & VIRTIO_BLK_F_CONFIG_WCE) != 0) 756 *bitsp |= DKCACHE_WCHANGE; 757 if (virtio_read_device_config_1(vsc, 758 VIRTIO_BLK_CONFIG_WRITEBACK) != 0x00) 759 *bitsp |= DKCACHE_WRITE; 760 761 return 0; 762 } 763 764 static int 765 ld_virtio_setcache(struct ld_softc *ld, int bits) 766 { 767 struct ld_virtio_softc * const sc = device_private(ld->sc_dv); 768 struct virtio_softc * const vsc = sc->sc_virtio; 769 const uint8_t wce = (bits & DKCACHE_WRITE) ? 0x01 : 0x00; 770 771 virtio_write_device_config_1(vsc, 772 VIRTIO_BLK_CONFIG_WRITEBACK, wce); 773 if (virtio_read_device_config_1(vsc, 774 VIRTIO_BLK_CONFIG_WRITEBACK) != wce) 775 return EIO; 776 777 return 0; 778 } 779 780 static int 781 ld_virtio_ioctl(struct ld_softc *ld, u_long cmd, void *addr, int32_t flag, bool poll) 782 { 783 int error; 784 785 switch (cmd) { 786 case DIOCCACHESYNC: 787 error = ld_virtio_flush(ld, poll); 788 break; 789 790 case DIOCGCACHE: 791 error = ld_virtio_getcache(ld, (int *)addr); 792 break; 793 794 case DIOCSCACHE: 795 error = ld_virtio_setcache(ld, *(int *)addr); 796 break; 797 798 default: 799 error = EPASSTHROUGH; 800 break; 801 } 802 803 return error; 804 } 805 806 MODULE(MODULE_CLASS_DRIVER, ld_virtio, "ld,virtio"); 807 808 #ifdef _MODULE 809 /* 810 * XXX Don't allow ioconf.c to redefine the "struct cfdriver ld_cd" 811 * XXX it will be defined in the common-code module 812 */ 813 #undef CFDRIVER_DECL 814 #define CFDRIVER_DECL(name, class, attr) 815 #include "ioconf.c" 816 #endif 817 818 static int 819 ld_virtio_modcmd(modcmd_t cmd, void *opaque) 820 { 821 #ifdef _MODULE 822 /* 823 * We ignore the cfdriver_vec[] that ioconf provides, since 824 * the cfdrivers are attached already. 825 */ 826 static struct cfdriver * const no_cfdriver_vec[] = { NULL }; 827 #endif 828 int error = 0; 829 830 #ifdef _MODULE 831 switch (cmd) { 832 case MODULE_CMD_INIT: 833 error = config_init_component(no_cfdriver_vec, 834 cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio); 835 break; 836 case MODULE_CMD_FINI: 837 error = config_fini_component(no_cfdriver_vec, 838 cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio); 839 break; 840 default: 841 error = ENOTTY; 842 break; 843 } 844 #endif 845 846 return error; 847 } 848