1 /* $NetBSD: vdsk.c,v 1.11 2023/12/12 21:34:34 andvar Exp $ */ 2 /* $OpenBSD: vdsk.c,v 1.46 2015/01/25 21:42:13 kettenis Exp $ */ 3 /* 4 * Copyright (c) 2009, 2011 Mark Kettenis 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/kmem.h> 20 #include <sys/param.h> 21 #include <sys/buf.h> 22 #include <sys/device.h> 23 #include <sys/systm.h> 24 25 #include <machine/autoconf.h> 26 #include <machine/hypervisor.h> 27 28 #include <uvm/uvm_extern.h> 29 30 #include <dev/scsipi/scsi_all.h> 31 #include <dev/scsipi/scsipi_disk.h> 32 #include <dev/scsipi/scsipi_cd.h> 33 #include <dev/scsipi/scsiconf.h> 34 35 #include <dev/scsipi/scsi_disk.h> 36 #include <dev/scsipi/scsipi_all.h> 37 #include <dev/scsipi/scsiconf.h> 38 #include <dev/scsipi/scsi_message.h> 39 40 #include <sparc64/dev/cbusvar.h> 41 #include <sparc64/dev/ldcvar.h> 42 #include <sparc64/dev/viovar.h> 43 44 #ifdef VDSK_DEBUG 45 #define DPRINTF(x) printf x 46 #else 47 #define DPRINTF(x) 48 #endif 49 50 #define VDSK_TX_ENTRIES 32 51 #define VDSK_RX_ENTRIES 32 52 53 struct vd_attr_info { 54 struct vio_msg_tag tag; 55 uint8_t xfer_mode; 56 uint8_t vd_type; 57 uint8_t vd_mtype; 58 uint8_t _reserved1; 59 uint32_t vdisk_block_size; 60 uint64_t operations; 61 uint64_t vdisk_size; 62 uint64_t max_xfer_sz; 63 uint64_t _reserved2[2]; 64 }; 65 66 #define VD_DISK_TYPE_SLICE 0x01 67 #define VD_DISK_TYPE_DISK 0x02 68 69 #define VD_MEDIA_TYPE_FIXED 0x01 70 #define VD_MEDIA_TYPE_CD 0x02 71 #define VD_MEDIA_TYPE_DVD 0x03 72 73 /* vDisk version 1.0. */ 74 #define VD_OP_BREAD 0x01 75 #define VD_OP_BWRITE 0x02 76 #define VD_OP_FLUSH 0x03 77 #define VD_OP_GET_WCE 0x04 78 #define VD_OP_SET_WCE 0x05 79 #define VD_OP_GET_VTOC 0x06 80 #define VD_OP_SET_VTOC 0x07 81 #define VD_OP_GET_DISKGEOM 0x08 82 #define VD_OP_SET_DISKGEOM 0x09 83 #define VD_OP_GET_DEVID 0x0b 84 #define VD_OP_GET_EFI 0x0c 85 #define VD_OP_SET_EFI 0x0d 86 87 /* vDisk version 1.1 */ 88 #define VD_OP_SCSICMD 0x0a 89 #define VD_OP_RESET 0x0e 90 #define VD_OP_GET_ACCESS 0x0f 91 #define VD_OP_SET_ACCESS 0x10 92 #define VD_OP_GET_CAPACITY 0x11 93 94 struct vd_desc { 95 struct vio_dring_hdr hdr; 96 uint64_t req_id; 97 uint8_t operation; 98 uint8_t slice; 99 uint16_t _reserved1; 100 uint32_t status; 101 uint64_t offset; 102 uint64_t size; 103 uint32_t ncookies; 104 uint32_t _reserved2; 105 struct ldc_cookie cookie[MAXPHYS / PAGE_SIZE]; 106 }; 107 108 #define VD_SLICE_NONE 0xff 109 110 struct vdsk_dring { 111 bus_dmamap_t vd_map; 112 bus_dma_segment_t vd_seg; 113 struct vd_desc *vd_desc; 114 int vd_nentries; 115 }; 116 117 #if OPENBSD_BUSDMA 118 struct vdsk_dring *vdsk_dring_alloc(bus_dma_tag_t, int); 119 void vdsk_dring_free(bus_dma_tag_t, struct vdsk_dring *); 120 #else 121 struct vdsk_dring *vdsk_dring_alloc(int); 122 void vdsk_dring_free(struct vdsk_dring *); 123 #endif 124 125 /* 126 * We support vDisk 1.0 and 1.1. 127 */ 128 #define VDSK_MAJOR 1 129 #define VDSK_MINOR 1 130 131 struct vdsk_soft_desc { 132 int vsd_map_idx[MAXPHYS / PAGE_SIZE]; 133 struct scsipi_xfer *vsd_xs; 134 int vsd_ncookies; 135 }; 136 137 struct vdsk_softc { 138 device_t sc_dv; 139 140 struct scsipi_adapter sc_adapter; 141 struct scsipi_channel sc_channel; 142 143 bus_space_tag_t sc_bustag; 144 bus_dma_tag_t sc_dmatag; 145 146 void *sc_tx_ih; 147 void *sc_rx_ih; 148 149 struct ldc_conn sc_lc; 150 151 uint16_t sc_vio_state; 152 #define VIO_SND_VER_INFO 0x0001 153 #define VIO_ACK_VER_INFO 0x0002 154 #define VIO_SND_ATTR_INFO 0x0004 155 #define VIO_ACK_ATTR_INFO 0x0008 156 #define VIO_SND_DRING_REG 0x0010 157 #define VIO_ACK_DRING_REG 0x0020 158 #define VIO_SND_RDX 0x0040 159 #define VIO_ACK_RDX 0x0080 160 #define VIO_ESTABLISHED 0x00ff 161 162 uint16_t sc_major; 163 uint16_t sc_minor; 164 165 uint32_t sc_local_sid; 166 uint64_t sc_dring_ident; 167 uint64_t sc_seq_no; 168 169 int sc_tx_cnt; 170 int sc_tx_prod; 171 int sc_tx_cons; 172 173 struct ldc_map *sc_lm; 174 struct vdsk_dring *sc_vd; 175 struct vdsk_soft_desc *sc_vsd; 176 177 uint32_t sc_vdisk_block_size; 178 uint64_t sc_vdisk_size; 179 uint8_t sc_vd_mtype; 180 }; 181 182 int vdsk_match(device_t, cfdata_t, void *); 183 void vdsk_attach(device_t, device_t, void *); 184 void vdsk_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t, 185 void *); 186 187 CFATTACH_DECL_NEW(vdsk, sizeof(struct vdsk_softc), 188 vdsk_match, vdsk_attach, NULL, NULL); 189 190 int vdsk_tx_intr(void *); 191 int vdsk_rx_intr(void *); 192 193 void vdsk_rx_data(struct ldc_conn *, struct ldc_pkt *); 194 void vdsk_rx_vio_ctrl(struct vdsk_softc *, struct vio_msg *); 195 void vdsk_rx_vio_ver_info(struct vdsk_softc *, struct vio_msg_tag *); 196 void vdsk_rx_vio_attr_info(struct vdsk_softc *, struct vio_msg_tag *); 197 void vdsk_rx_vio_dring_reg(struct vdsk_softc *, struct vio_msg_tag *); 198 void vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *); 199 void vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *); 200 void vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *); 201 202 void vdsk_ldc_reset(struct ldc_conn *); 203 void vdsk_ldc_start(struct ldc_conn *); 204 205 void vdsk_sendmsg(struct vdsk_softc *, void *, size_t); 206 void vdsk_send_ver_info(struct vdsk_softc *, uint16_t, uint16_t); 207 void vdsk_send_attr_info(struct vdsk_softc *); 208 void vdsk_send_dring_reg(struct vdsk_softc *); 209 void vdsk_send_rdx(struct vdsk_softc *); 210 211 void *vdsk_io_get(void *); 212 void vdsk_io_put(void *, void *); 213 214 void vdsk_scsi_cmd(struct vdsk_softc *sc, struct scsipi_xfer *); 215 int vdsk_submit_cmd(struct vdsk_softc *sc, struct scsipi_xfer *); 216 void vdsk_complete_cmd(struct vdsk_softc *sc, struct scsipi_xfer *, int); 217 void vdsk_scsi_inq(struct vdsk_softc *sc, struct scsipi_xfer *); 218 void vdsk_scsi_inquiry(struct vdsk_softc *sc, struct scsipi_xfer *); 219 void vdsk_scsi_capacity(struct vdsk_softc *sc, struct scsipi_xfer *); 220 void vdsk_scsi_capacity16(struct vdsk_softc *sc, struct scsipi_xfer *); 221 void vdsk_scsi_report_luns(struct vdsk_softc *sc, struct scsipi_xfer *); 222 void vdsk_scsi_done(struct scsipi_xfer *, int); 223 224 int 225 vdsk_match(device_t parent, cfdata_t match, void *aux) 226 { 227 struct cbus_attach_args *ca = aux; 228 229 if (strcmp(ca->ca_name, "disk") == 0) 230 return (1); 231 232 return (0); 233 } 234 235 void 236 vdsk_attach(device_t parent, device_t self, void *aux) 237 { 238 struct vdsk_softc *sc = device_private(self); 239 struct cbus_attach_args *ca = aux; 240 struct ldc_conn *lc; 241 int err, s; 242 int timeout; 243 vaddr_t va; 244 paddr_t pa; 245 246 sc->sc_bustag = ca->ca_bustag; 247 sc->sc_dmatag = ca->ca_dmatag; 248 249 printf(": ivec 0x%llx, 0x%llx", 250 (long long unsigned int)ca->ca_tx_ino, 251 (long long unsigned int)ca->ca_rx_ino); 252 253 /* 254 * Un-configure queues before registering interrupt handlers, 255 * such that we don't get any stale LDC packets or events. 256 */ 257 hv_ldc_tx_qconf(ca->ca_id, 0, 0); 258 hv_ldc_rx_qconf(ca->ca_id, 0, 0); 259 260 sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_tx_ino, 261 IPL_BIO, vdsk_tx_intr, sc); 262 sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_rx_ino, 263 IPL_BIO, vdsk_rx_intr, sc); 264 if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) { 265 printf(", can't establish interrupt\n"); 266 return; 267 } 268 269 lc = &sc->sc_lc; 270 lc->lc_id = ca->ca_id; 271 lc->lc_sc = sc; 272 lc->lc_reset = vdsk_ldc_reset; 273 lc->lc_start = vdsk_ldc_start; 274 lc->lc_rx_data = vdsk_rx_data; 275 276 #if OPENBSD_BUSDMA 277 lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES); 278 #else 279 lc->lc_txq = ldc_queue_alloc(VDSK_TX_ENTRIES); 280 #endif 281 #if OPENBSD_BUSDMA 282 lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES); 283 #else 284 lc->lc_rxq = ldc_queue_alloc(VDSK_RX_ENTRIES); 285 #endif 286 #if OPENBSD_BUSDMA 287 sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048); 288 #else 289 sc->sc_lm = ldc_map_alloc(2048); 290 #endif 291 #if OPENBSD_BUSDMA 292 err = hv_ldc_set_map_table(lc->lc_id, 293 sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries); 294 #else 295 va = (vaddr_t)sc->sc_lm->lm_slot; 296 pa = 0; 297 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) 298 panic("pmap_extract failed %lx\n", va); 299 err = hv_ldc_set_map_table(lc->lc_id, pa, 2048); 300 #endif 301 if (err != H_EOK) { 302 printf("hv_ldc_set_map_table %d\n", err); 303 goto free_map; 304 } 305 #if OPENBSD_BUSDMA 306 sc->sc_vd = vdsk_dring_alloc(sc->sc_dmatag, 32); 307 #else 308 sc->sc_vd = vdsk_dring_alloc(32); 309 #endif 310 sc->sc_vsd = kmem_zalloc(32 * sizeof(*sc->sc_vsd), KM_SLEEP); 311 312 #if OPENBSD_BUSDMA 313 sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr; 314 #else 315 va = (vaddr_t)sc->sc_vd->vd_desc; 316 pa = 0; 317 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) 318 panic("pmap_extract failed %lx\n", va); 319 320 sc->sc_lm->lm_slot[0].entry = pa; 321 #endif 322 sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK; 323 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW; 324 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_R | LDC_MTE_W; 325 sc->sc_lm->lm_next = 1; 326 sc->sc_lm->lm_count = 1; 327 va = lc->lc_txq->lq_va; 328 pa = 0; 329 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) 330 panic("pmap_extract failed %lx\n", va); 331 #if OPENBSD_BUSDMA 332 err = hv_ldc_tx_qconf(lc->lc_id, 333 lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries); 334 #else 335 err = hv_ldc_tx_qconf(lc->lc_id, pa, lc->lc_txq->lq_nentries); 336 #endif 337 if (err != H_EOK) 338 printf("hv_ldc_tx_qconf %d\n", err); 339 va = (vaddr_t)lc->lc_rxq->lq_va; 340 pa = 0; 341 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) 342 panic("pmap_extract failed %lx\n", va); 343 #if OPENBSD_BUSDMA 344 err = hv_ldc_rx_qconf(lc->lc_id, 345 lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries); 346 #else 347 err = hv_ldc_rx_qconf(lc->lc_id, pa, lc->lc_rxq->lq_nentries); 348 #endif 349 if (err != H_EOK) 350 printf("hv_ldc_rx_qconf %d\n", err); 351 352 cbus_intr_setenabled(sc->sc_bustag, ca->ca_tx_ino, INTR_ENABLED); 353 cbus_intr_setenabled(sc->sc_bustag, ca->ca_rx_ino, INTR_ENABLED); 354 355 ldc_send_vers(lc); 356 357 printf("\n"); 358 359 /* 360 * Interrupts aren't enabled during autoconf, so poll for VIO 361 * peer-to-peer handshake completion. 362 */ 363 s = splbio(); 364 timeout = 10 * 1000; 365 do { 366 if (vdsk_rx_intr(sc) && sc->sc_vio_state == VIO_ESTABLISHED) 367 break; 368 369 delay(1000); 370 } while(--timeout > 0); 371 splx(s); 372 373 if (sc->sc_vio_state != VIO_ESTABLISHED) { 374 printf("vio not established: %d\n", sc->sc_vio_state); 375 return; 376 } 377 378 sc->sc_dv = self; 379 380 sc->sc_adapter.adapt_dev = sc->sc_dv; 381 sc->sc_adapter.adapt_nchannels = 1; 382 sc->sc_adapter.adapt_openings = sc->sc_vd->vd_nentries - 1; 383 sc->sc_adapter.adapt_max_periph = sc->sc_vd->vd_nentries - 1; 384 385 sc->sc_adapter.adapt_minphys = minphys; 386 sc->sc_adapter.adapt_request = vdsk_scsipi_request; 387 388 sc->sc_channel.chan_adapter = &sc->sc_adapter; 389 sc->sc_channel.chan_bustype = &scsi_bustype; 390 sc->sc_channel.chan_channel = 0; 391 sc->sc_channel.chan_ntargets = 2; /* XXX why not 1? */ 392 sc->sc_channel.chan_nluns = 1; /* XXX slices should be presented as luns? */ 393 sc->sc_channel.chan_id = 0; 394 sc->sc_channel.chan_flags = SCSIPI_CHAN_NOSETTLE; 395 396 config_found(self, &sc->sc_channel, scsiprint, CFARGS_NONE); 397 398 return; 399 400 free_map: 401 hv_ldc_set_map_table(lc->lc_id, 0, 0); 402 #if OPENBSD_BUSDMA 403 ldc_map_free(sc->sc_dmatag, sc->sc_lm); 404 #else 405 ldc_map_free(sc->sc_lm); 406 #endif 407 } 408 409 void 410 vdsk_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 411 void *arg) 412 { 413 414 struct vdsk_softc *sc; 415 struct scsipi_xfer *xs; 416 417 sc = device_private(chan->chan_adapter->adapt_dev); 418 419 xs = arg; 420 421 switch (req) { 422 case ADAPTER_REQ_RUN_XFER: 423 vdsk_scsi_cmd(sc, xs); 424 break; 425 case ADAPTER_REQ_GROW_RESOURCES: 426 case ADAPTER_REQ_SET_XFER_MODE: 427 /* Ignored */ 428 break; 429 default: 430 panic("req unhandled: %x", req); 431 } 432 433 } 434 435 int 436 vdsk_tx_intr(void *arg) 437 { 438 panic("%s: not verified yet", __FUNCTION__); 439 440 struct vdsk_softc *sc = arg; 441 struct ldc_conn *lc = &sc->sc_lc; 442 uint64_t tx_head, tx_tail, tx_state; 443 444 hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); 445 if (tx_state != lc->lc_tx_state) { 446 switch (tx_state) { 447 case LDC_CHANNEL_DOWN: 448 DPRINTF(("Tx link down\n")); 449 break; 450 case LDC_CHANNEL_UP: 451 DPRINTF(("Tx link up\n")); 452 break; 453 case LDC_CHANNEL_RESET: 454 DPRINTF(("Tx link reset\n")); 455 break; 456 } 457 lc->lc_tx_state = tx_state; 458 } 459 460 return (1); 461 } 462 463 int 464 vdsk_rx_intr(void *arg) 465 { 466 struct vdsk_softc *sc = arg; 467 struct ldc_conn *lc = &sc->sc_lc; 468 uint64_t rx_head, rx_tail, rx_state; 469 struct ldc_pkt *lp; 470 int err; 471 472 err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state); 473 if (err == H_EINVAL) { 474 printf("hv_ldc_rx_get_state H_EINVAL\n"); 475 return (0); 476 } 477 if (err != H_EOK) { 478 printf("hv_ldc_rx_get_state %d\n", err); 479 return (0); 480 } 481 482 if (rx_state != lc->lc_rx_state) { 483 sc->sc_vio_state = 0; 484 lc->lc_tx_seqid = 0; 485 lc->lc_state = 0; 486 switch (rx_state) { 487 case LDC_CHANNEL_DOWN: 488 DPRINTF(("Rx link down\n")); 489 break; 490 case LDC_CHANNEL_UP: 491 DPRINTF(("Rx link up\n")); 492 ldc_send_vers(lc); 493 break; 494 case LDC_CHANNEL_RESET: 495 DPRINTF(("Rx link reset\n")); 496 ldc_send_vers(lc); 497 break; 498 } 499 lc->lc_rx_state = rx_state; 500 hv_ldc_rx_set_qhead(lc->lc_id, rx_tail); 501 return (1); 502 } 503 504 if (rx_head == rx_tail) 505 return (0); 506 507 lp = (struct ldc_pkt *)(uintptr_t)(lc->lc_rxq->lq_va + rx_head); 508 switch (lp->type) { 509 case LDC_CTRL: 510 ldc_rx_ctrl(lc, lp); 511 break; 512 513 case LDC_DATA: 514 ldc_rx_data(lc, lp); 515 break; 516 517 default: 518 DPRINTF(("%0x02/%0x02/%0x02\n", lp->type, lp->stype, 519 lp->ctrl)); 520 ldc_reset(lc); 521 break; 522 } 523 524 if (lc->lc_state == 0) 525 return (1); 526 527 rx_head += sizeof(*lp); 528 rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1); 529 err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head); 530 if (err != H_EOK) 531 printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err); 532 533 return (1); 534 } 535 536 void 537 vdsk_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp) 538 { 539 struct vio_msg *vm = (struct vio_msg *)lp; 540 541 switch (vm->type) { 542 case VIO_TYPE_CTRL: 543 if ((lp->env & LDC_FRAG_START) == 0 && 544 (lp->env & LDC_FRAG_STOP) == 0) 545 return; 546 vdsk_rx_vio_ctrl(lc->lc_sc, vm); 547 break; 548 549 case VIO_TYPE_DATA: 550 if((lp->env & LDC_FRAG_START) == 0) 551 return; 552 vdsk_rx_vio_data(lc->lc_sc, vm); 553 break; 554 555 default: 556 DPRINTF(("Unhandled packet type 0x%02x\n", vm->type)); 557 ldc_reset(lc); 558 break; 559 } 560 } 561 562 void 563 vdsk_rx_vio_ctrl(struct vdsk_softc *sc, struct vio_msg *vm) 564 { 565 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type; 566 567 switch (tag->stype_env) { 568 case VIO_VER_INFO: 569 vdsk_rx_vio_ver_info(sc, tag); 570 break; 571 case VIO_ATTR_INFO: 572 vdsk_rx_vio_attr_info(sc, tag); 573 break; 574 case VIO_DRING_REG: 575 vdsk_rx_vio_dring_reg(sc, tag); 576 break; 577 case VIO_RDX: 578 vdsk_rx_vio_rdx(sc, tag); 579 break; 580 default: 581 DPRINTF(("CTRL/0x%02x/0x%04x\n", tag->stype, tag->stype_env)); 582 break; 583 } 584 } 585 586 void 587 vdsk_rx_vio_ver_info(struct vdsk_softc *sc, struct vio_msg_tag *tag) 588 { 589 struct vio_ver_info *vi = (struct vio_ver_info *)tag; 590 591 switch (vi->tag.stype) { 592 case VIO_SUBTYPE_INFO: 593 DPRINTF(("CTRL/INFO/VER_INFO\n")); 594 break; 595 596 case VIO_SUBTYPE_ACK: 597 DPRINTF(("CTRL/ACK/VER_INFO\n")); 598 if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) { 599 ldc_reset(&sc->sc_lc); 600 break; 601 } 602 sc->sc_major = vi->major; 603 sc->sc_minor = vi->minor; 604 sc->sc_vio_state |= VIO_ACK_VER_INFO; 605 break; 606 607 default: 608 DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype)); 609 break; 610 } 611 612 if (ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO)) 613 vdsk_send_attr_info(sc); 614 } 615 616 void 617 vdsk_rx_vio_attr_info(struct vdsk_softc *sc, struct vio_msg_tag *tag) 618 { 619 struct vd_attr_info *ai = (struct vd_attr_info *)tag; 620 621 switch (ai->tag.stype) { 622 case VIO_SUBTYPE_INFO: 623 DPRINTF(("CTRL/INFO/ATTR_INFO\n")); 624 break; 625 626 case VIO_SUBTYPE_ACK: 627 DPRINTF(("CTRL/ACK/ATTR_INFO\n")); 628 if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) { 629 ldc_reset(&sc->sc_lc); 630 break; 631 } 632 633 sc->sc_vdisk_block_size = ai->vdisk_block_size; 634 sc->sc_vdisk_size = ai->vdisk_size; 635 if (sc->sc_major > 1 || sc->sc_minor >= 1) 636 sc->sc_vd_mtype = ai->vd_mtype; 637 else 638 sc->sc_vd_mtype = VD_MEDIA_TYPE_FIXED; 639 640 sc->sc_vio_state |= VIO_ACK_ATTR_INFO; 641 break; 642 643 default: 644 DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype)); 645 break; 646 } 647 648 if (ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO)) 649 vdsk_send_dring_reg(sc); 650 651 } 652 653 void 654 vdsk_rx_vio_dring_reg(struct vdsk_softc *sc, struct vio_msg_tag *tag) 655 { 656 struct vio_dring_reg *dr = (struct vio_dring_reg *)tag; 657 658 switch (dr->tag.stype) { 659 case VIO_SUBTYPE_INFO: 660 DPRINTF(("CTRL/INFO/DRING_REG\n")); 661 break; 662 663 case VIO_SUBTYPE_ACK: 664 DPRINTF(("CTRL/ACK/DRING_REG\n")); 665 if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) { 666 ldc_reset(&sc->sc_lc); 667 break; 668 } 669 670 sc->sc_dring_ident = dr->dring_ident; 671 sc->sc_seq_no = 1; 672 673 sc->sc_vio_state |= VIO_ACK_DRING_REG; 674 break; 675 676 default: 677 DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype)); 678 break; 679 } 680 681 if (ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG)) 682 vdsk_send_rdx(sc); 683 } 684 685 void 686 vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *tag) 687 { 688 switch(tag->stype) { 689 case VIO_SUBTYPE_INFO: 690 DPRINTF(("CTRL/INFO/RDX\n")); 691 break; 692 693 case VIO_SUBTYPE_ACK: 694 { 695 int prod; 696 697 DPRINTF(("CTRL/ACK/RDX\n")); 698 if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) { 699 ldc_reset(&sc->sc_lc); 700 break; 701 } 702 sc->sc_vio_state |= VIO_ACK_RDX; 703 704 /* 705 * If this ACK is the result of a reconnect, we may 706 * have pending I/O that we need to resubmit. We need 707 * to rebuild the ring descriptors though since the 708 * vDisk server on the other side may have touched 709 * them already. So we just clean up the ring and the 710 * LDC map and resubmit the SCSI commands based on our 711 * soft descriptors. 712 */ 713 prod = sc->sc_tx_prod; 714 sc->sc_tx_prod = sc->sc_tx_cons; 715 sc->sc_tx_cnt = 0; 716 sc->sc_lm->lm_next = 1; 717 sc->sc_lm->lm_count = 1; 718 while (sc->sc_tx_prod != prod) 719 vdsk_submit_cmd(sc, sc->sc_vsd[sc->sc_tx_prod].vsd_xs); 720 break; 721 } 722 723 default: 724 DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype)); 725 break; 726 } 727 } 728 729 void 730 vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *vm) 731 { 732 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type; 733 734 if (sc->sc_vio_state != VIO_ESTABLISHED) { 735 DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype, 736 tag->stype_env)); 737 return; 738 } 739 740 switch(tag->stype_env) { 741 case VIO_DRING_DATA: 742 vdsk_rx_vio_dring_data(sc, tag); 743 break; 744 745 default: 746 DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env)); 747 break; 748 } 749 } 750 751 void 752 vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *tag) 753 { 754 switch(tag->stype) { 755 case VIO_SUBTYPE_INFO: 756 DPRINTF(("DATA/INFO/DRING_DATA\n")); 757 break; 758 759 case VIO_SUBTYPE_ACK: 760 { 761 struct scsipi_xfer *xs; 762 int cons; 763 764 cons = sc->sc_tx_cons; 765 while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) { 766 xs = sc->sc_vsd[cons].vsd_xs; 767 if (ISSET(xs->xs_control, XS_CTL_POLL) == 0) 768 vdsk_complete_cmd(sc, xs, cons); 769 cons++; 770 cons &= (sc->sc_vd->vd_nentries - 1); 771 } 772 sc->sc_tx_cons = cons; 773 break; 774 } 775 776 case VIO_SUBTYPE_NACK: 777 DPRINTF(("DATA/NACK/DRING_DATA\n")); 778 struct ldc_conn *lc = &sc->sc_lc; 779 ldc_send_vers(lc); 780 break; 781 782 default: 783 DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype)); 784 break; 785 } 786 } 787 788 void 789 vdsk_ldc_reset(struct ldc_conn *lc) 790 { 791 792 struct vdsk_softc *sc = lc->lc_sc; 793 794 sc->sc_vio_state = 0; 795 } 796 797 void 798 vdsk_ldc_start(struct ldc_conn *lc) 799 { 800 801 struct vdsk_softc *sc = lc->lc_sc; 802 803 vdsk_send_ver_info(sc, VDSK_MAJOR, VDSK_MINOR); 804 } 805 806 void 807 vdsk_sendmsg(struct vdsk_softc *sc, void *msg, size_t len) 808 { 809 810 struct ldc_conn *lc = &sc->sc_lc; 811 int err; 812 813 err = ldc_send_unreliable(lc, msg, len); 814 if (err) 815 printf("%s: ldc_send_unreliable: %d\n", __func__, err); 816 } 817 818 void 819 vdsk_send_ver_info(struct vdsk_softc *sc, uint16_t major, uint16_t minor) 820 { 821 822 struct vio_ver_info vi; 823 824 /* Allocate new session ID. */ 825 sc->sc_local_sid = gettick(); 826 827 bzero(&vi, sizeof(vi)); 828 vi.tag.type = VIO_TYPE_CTRL; 829 vi.tag.stype = VIO_SUBTYPE_INFO; 830 vi.tag.stype_env = VIO_VER_INFO; 831 vi.tag.sid = sc->sc_local_sid; 832 vi.major = major; 833 vi.minor = minor; 834 vi.dev_class = VDEV_DISK; 835 vdsk_sendmsg(sc, &vi, sizeof(vi)); 836 837 sc->sc_vio_state |= VIO_SND_VER_INFO; 838 } 839 840 void 841 vdsk_send_attr_info(struct vdsk_softc *sc) 842 { 843 struct vd_attr_info ai; 844 845 bzero(&ai, sizeof(ai)); 846 ai.tag.type = VIO_TYPE_CTRL; 847 ai.tag.stype = VIO_SUBTYPE_INFO; 848 ai.tag.stype_env = VIO_ATTR_INFO; 849 ai.tag.sid = sc->sc_local_sid; 850 ai.xfer_mode = VIO_DRING_MODE; 851 ai.vdisk_block_size = DEV_BSIZE; 852 ai.max_xfer_sz = MAXPHYS / DEV_BSIZE; 853 vdsk_sendmsg(sc, &ai, sizeof(ai)); 854 855 sc->sc_vio_state |= VIO_SND_ATTR_INFO; 856 } 857 858 void 859 vdsk_send_dring_reg(struct vdsk_softc *sc) 860 { 861 struct vio_dring_reg dr; 862 863 bzero(&dr, sizeof(dr)); 864 dr.tag.type = VIO_TYPE_CTRL; 865 dr.tag.stype = VIO_SUBTYPE_INFO; 866 dr.tag.stype_env = VIO_DRING_REG; 867 dr.tag.sid = sc->sc_local_sid; 868 dr.dring_ident = 0; 869 dr.num_descriptors = sc->sc_vd->vd_nentries; 870 dr.descriptor_size = sizeof(struct vd_desc); 871 dr.options = VIO_TX_RING | VIO_RX_RING; 872 dr.ncookies = 1; 873 dr.cookie[0].addr = 0; 874 dr.cookie[0].size = PAGE_SIZE; 875 vdsk_sendmsg(sc, &dr, sizeof(dr)); 876 877 sc->sc_vio_state |= VIO_SND_DRING_REG; 878 }; 879 880 void 881 vdsk_send_rdx(struct vdsk_softc *sc) 882 { 883 struct vio_rdx rdx; 884 885 bzero(&rdx, sizeof(rdx)); 886 rdx.tag.type = VIO_TYPE_CTRL; 887 rdx.tag.stype = VIO_SUBTYPE_INFO; 888 rdx.tag.stype_env = VIO_RDX; 889 rdx.tag.sid = sc->sc_local_sid; 890 vdsk_sendmsg(sc, &rdx, sizeof(rdx)); 891 892 sc->sc_vio_state |= VIO_SND_RDX; 893 } 894 895 #if OPENBSD_BUSDMA 896 struct vdsk_dring * 897 vdsk_dring_alloc(bus_dma_tag_t t, int nentries) 898 #else 899 struct vdsk_dring * 900 vdsk_dring_alloc(int nentries) 901 #endif 902 { 903 904 struct vdsk_dring *vd; 905 bus_size_t size; 906 vaddr_t va; 907 #if OPENBSD_BUSDMA 908 int nsegs; 909 #endif 910 int i; 911 912 vd = kmem_zalloc(sizeof(struct vdsk_dring), KM_SLEEP); 913 914 size = roundup(nentries * sizeof(struct vd_desc), PAGE_SIZE); 915 916 #if OPENBSD_BUSDMA 917 if (bus_dmamap_create(t, size, 1, size, 0, 918 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0) 919 return (NULL); 920 921 if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1, 922 &nsegs, BUS_DMA_NOWAIT) != 0) 923 goto destroy; 924 925 if (bus_dmamem_map(t, &vd->vd_seg, 1, size, (void*)&va, 926 BUS_DMA_NOWAIT) != 0) 927 goto free; 928 929 if (bus_dmamap_load(t, vd->vd_map, (void*)va, size, NULL, 930 BUS_DMA_NOWAIT) != 0) 931 goto unmap; 932 #else 933 va = (vaddr_t)kmem_zalloc(size, KM_SLEEP); 934 #endif 935 vd->vd_desc = (struct vd_desc *)va; 936 vd->vd_nentries = nentries; 937 bzero(vd->vd_desc, nentries * sizeof(struct vd_desc)); 938 for (i = 0; i < vd->vd_nentries; i++) 939 vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE; 940 return (vd); 941 942 #if OPENBSD_BUSDMA 943 unmap: 944 bus_dmamem_unmap(t, (void*)va, size); 945 free: 946 bus_dmamem_free(t, &vd->vd_seg, 1); 947 destroy: 948 bus_dmamap_destroy(t, vd->vd_map); 949 #endif 950 return (NULL); 951 } 952 953 #if OPENBSD_BUSDMA 954 void 955 vdsk_dring_free(bus_dma_tag_t t, struct vdsk_dring *vd) 956 #else 957 void 958 vdsk_dring_free(struct vdsk_dring *vd) 959 #endif 960 { 961 962 bus_size_t size; 963 964 size = vd->vd_nentries * sizeof(struct vd_desc); 965 size = roundup(size, PAGE_SIZE); 966 967 #if OPENBSD_BUSDMA 968 bus_dmamap_unload(t, vd->vd_map); 969 970 bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size); 971 bus_dmamem_free(t, &vd->vd_seg, 1); 972 bus_dmamap_destroy(t, vd->vd_map); 973 #else 974 kmem_free(vd->vd_desc, size); 975 #endif 976 kmem_free(vd, size); 977 } 978 979 void * 980 vdsk_io_get(void *xsc) 981 { 982 983 panic("%s: not verified yet", __FUNCTION__); 984 985 struct vdsk_softc *sc = xsc; 986 void *rv = sc; /* just has to be !NULL */ 987 int s; 988 989 s = splbio(); 990 if (sc->sc_vio_state != VIO_ESTABLISHED || 991 sc->sc_tx_cnt >= sc->sc_vd->vd_nentries) 992 rv = NULL; 993 else 994 sc->sc_tx_cnt++; 995 splx(s); 996 997 return (rv); 998 } 999 1000 void 1001 vdsk_io_put(void *xsc, void *io) 1002 { 1003 1004 panic("%s: not verified yet", __FUNCTION__); 1005 1006 struct vdsk_softc *sc = xsc; 1007 int s; 1008 1009 #ifdef DIAGNOSTIC 1010 if (sc != io) 1011 panic("vsdk_io_put: unexpected io"); 1012 #endif 1013 1014 s = splbio(); 1015 sc->sc_tx_cnt--; 1016 splx(s); 1017 } 1018 1019 void 1020 vdsk_scsi_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs) 1021 { 1022 int timeout, s; 1023 int desc; 1024 1025 DPRINTF(("vdsk_scsi_cmd() opcode %x\n", xs->cmd->opcode)); 1026 1027 switch (xs->cmd->opcode) { 1028 1029 case SCSI_READ_6_COMMAND: 1030 case READ_10: 1031 case READ_12: 1032 case READ_16: 1033 case SCSI_WRITE_6_COMMAND: 1034 case WRITE_10: 1035 case WRITE_12: 1036 case WRITE_16: 1037 case SCSI_SYNCHRONIZE_CACHE_10: 1038 break; 1039 1040 case INQUIRY: 1041 vdsk_scsi_inq(sc, xs); 1042 return; 1043 1044 case READ_CAPACITY_10: 1045 vdsk_scsi_capacity(sc, xs); 1046 return; 1047 1048 case READ_CAPACITY_16: 1049 vdsk_scsi_capacity16(sc, xs); 1050 return; 1051 1052 case SCSI_REPORT_LUNS: 1053 vdsk_scsi_report_luns(sc, xs); 1054 return; 1055 1056 case SCSI_TEST_UNIT_READY: 1057 case START_STOP: 1058 case SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL: 1059 case SCSI_MODE_SENSE_6: 1060 case SCSI_MAINTENANCE_IN: 1061 vdsk_scsi_done(xs, XS_NOERROR); 1062 return; 1063 1064 case SCSI_MODE_SENSE_10: 1065 case READ_TOC: 1066 vdsk_scsi_done(xs, XS_DRIVER_STUFFUP); 1067 return; 1068 1069 default: 1070 panic("%s unhandled cmd 0x%02x\n", 1071 __func__, xs->cmd->opcode); 1072 } 1073 1074 s = splbio(); 1075 desc = vdsk_submit_cmd(sc, xs); 1076 1077 if (!ISSET(xs->xs_control, XS_CTL_POLL)) { 1078 splx(s); 1079 return; 1080 } 1081 timeout = 1000; 1082 do { 1083 if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) 1084 break; 1085 1086 delay(1000); 1087 } while(--timeout > 0); 1088 if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) { 1089 vdsk_complete_cmd(sc, xs, desc); 1090 } else { 1091 ldc_reset(&sc->sc_lc); 1092 vdsk_scsi_done(xs, XS_TIMEOUT); 1093 } 1094 splx(s); 1095 } 1096 1097 int 1098 vdsk_submit_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs) 1099 { 1100 struct ldc_map *map = sc->sc_lm; 1101 struct vio_dring_msg dm; 1102 struct scsi_rw_6 *rw6; 1103 struct scsipi_rw_10 *rw10; 1104 struct scsipi_rw_12 *rw12; 1105 struct scsipi_rw_16 *rw16; 1106 u_int64_t lba = 0; 1107 uint8_t operation; 1108 vaddr_t va; 1109 paddr_t pa; 1110 psize_t nbytes; 1111 int len, ncookies; 1112 int desc; 1113 1114 switch (xs->cmd->opcode) { 1115 1116 case SCSI_READ_6_COMMAND: 1117 case READ_10: 1118 case READ_12: 1119 case READ_16: 1120 operation = VD_OP_BREAD; 1121 break; 1122 1123 case SCSI_WRITE_6_COMMAND: 1124 case WRITE_10: 1125 case WRITE_12: 1126 case WRITE_16: 1127 operation = VD_OP_BWRITE; 1128 break; 1129 1130 case SCSI_SYNCHRONIZE_CACHE_10: 1131 operation = VD_OP_FLUSH; 1132 break; 1133 1134 default: 1135 panic("%s unhandled cmd opcode 0x%x", 1136 __func__, xs->cmd->opcode); 1137 } 1138 1139 /* 1140 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same 1141 * layout as 10-byte READ/WRITE commands. 1142 */ 1143 if (xs->cmdlen == 6) { 1144 rw6 = (struct scsi_rw_6 *)xs->cmd; 1145 lba = _3btol(rw6->addr) & (SRW_TOPADDR << 16 | 0xffff); 1146 } else if (xs->cmdlen == 10) { 1147 rw10 = (struct scsipi_rw_10 *)xs->cmd; 1148 lba = _4btol(rw10->addr); 1149 } else if (xs->cmdlen == 12) { 1150 rw12 = (struct scsipi_rw_12 *)xs->cmd; 1151 lba = _4btol(rw12->addr); 1152 } else if (xs->cmdlen == 16) { 1153 rw16 = (struct scsipi_rw_16 *)xs->cmd; 1154 lba = _8btol(rw16->addr); 1155 } 1156 1157 DPRINTF(("lba = %lu\n", lba)); 1158 1159 desc = sc->sc_tx_prod; 1160 ncookies = 0; 1161 len = xs->datalen; 1162 va = (vaddr_t)xs->data; 1163 while (len > 0) { 1164 DPRINTF(("len = %u\n", len)); 1165 KASSERT(ncookies < MAXPHYS / PAGE_SIZE); 1166 pa = 0; 1167 pmap_extract(pmap_kernel(), va, &pa); 1168 while (map->lm_slot[map->lm_next].entry != 0) { 1169 map->lm_next++; 1170 map->lm_next &= (map->lm_nentries - 1); 1171 } 1172 map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); 1173 map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW; 1174 map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW; 1175 map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W; 1176 map->lm_count++; 1177 1178 nbytes = MIN(len, PAGE_SIZE - (pa & PAGE_MASK)); 1179 1180 sc->sc_vd->vd_desc[desc].cookie[ncookies].addr = 1181 map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); 1182 sc->sc_vd->vd_desc[desc].cookie[ncookies].size = nbytes; 1183 1184 sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next; 1185 va += nbytes; 1186 len -= nbytes; 1187 ncookies++; 1188 } 1189 if (ISSET(xs->xs_control, XS_CTL_POLL) == 0) 1190 sc->sc_vd->vd_desc[desc].hdr.ack = 1; 1191 else 1192 sc->sc_vd->vd_desc[desc].hdr.ack = 0; 1193 sc->sc_vd->vd_desc[desc].operation = operation; 1194 sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE; 1195 sc->sc_vd->vd_desc[desc].status = 0xffffffff; 1196 sc->sc_vd->vd_desc[desc].offset = lba; 1197 sc->sc_vd->vd_desc[desc].size = xs->datalen; 1198 sc->sc_vd->vd_desc[desc].ncookies = ncookies; 1199 1200 membar_Sync(); 1201 1202 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY; 1203 1204 sc->sc_vsd[desc].vsd_xs = xs; 1205 sc->sc_vsd[desc].vsd_ncookies = ncookies; 1206 1207 sc->sc_tx_prod++; 1208 sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1); 1209 1210 bzero(&dm, sizeof(dm)); 1211 dm.tag.type = VIO_TYPE_DATA; 1212 dm.tag.stype = VIO_SUBTYPE_INFO; 1213 dm.tag.stype_env = VIO_DRING_DATA; 1214 dm.tag.sid = sc->sc_local_sid; 1215 dm.seq_no = sc->sc_seq_no++; 1216 dm.dring_ident = sc->sc_dring_ident; 1217 dm.start_idx = dm.end_idx = desc; 1218 vdsk_sendmsg(sc, &dm, sizeof(dm)); 1219 1220 return desc; 1221 } 1222 1223 void 1224 vdsk_complete_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs, int desc) 1225 { 1226 struct ldc_map *map = sc->sc_lm; 1227 int cookie, idx; 1228 int error; 1229 1230 cookie = 0; 1231 while (cookie < sc->sc_vsd[desc].vsd_ncookies) { 1232 idx = sc->sc_vsd[desc].vsd_map_idx[cookie++]; 1233 map->lm_slot[idx].entry = 0; 1234 map->lm_count--; 1235 } 1236 1237 error = XS_NOERROR; 1238 if (sc->sc_vd->vd_desc[desc].status != 0) 1239 error = XS_DRIVER_STUFFUP; 1240 xs->resid = xs->datalen - 1241 sc->sc_vd->vd_desc[desc].size; 1242 1243 /* 1244 * scsi_done() called by vdsk_scsi_done() requires 1245 * the kernel to be locked 1246 */ 1247 KERNEL_LOCK(1, curlwp); 1248 vdsk_scsi_done(xs, error); 1249 KERNEL_UNLOCK_ONE(curlwp); 1250 1251 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_FREE; 1252 1253 } 1254 1255 void 1256 vdsk_scsi_inq(struct vdsk_softc *sc, struct scsipi_xfer *xs) 1257 { 1258 1259 vdsk_scsi_inquiry(sc, xs); 1260 } 1261 1262 void 1263 vdsk_scsi_inquiry(struct vdsk_softc *sc, struct scsipi_xfer *xs) 1264 { 1265 1266 struct scsipi_inquiry_data inq; 1267 char buf[5]; 1268 1269 bzero(&inq, sizeof(inq)); 1270 1271 switch (sc->sc_vd_mtype) { 1272 case VD_MEDIA_TYPE_CD: 1273 case VD_MEDIA_TYPE_DVD: 1274 inq.device = T_CDROM; 1275 break; 1276 1277 case VD_MEDIA_TYPE_FIXED: 1278 default: 1279 inq.device = T_DIRECT; 1280 break; 1281 } 1282 1283 inq.version = 0x05; /* SPC-3 */ 1284 inq.response_format = 2; 1285 inq.additional_length = 32; 1286 inq.flags3 |= SID_CmdQue; 1287 bcopy("SUN ", inq.vendor, sizeof(inq.vendor)); 1288 bcopy("Virtual Disk ", inq.product, sizeof(inq.product)); 1289 snprintf(buf, sizeof(buf), "%u.%u ", sc->sc_major, sc->sc_minor); 1290 bcopy(buf, inq.revision, sizeof(inq.revision)); 1291 1292 bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen)); 1293 1294 vdsk_scsi_done(xs, XS_NOERROR); 1295 } 1296 1297 void 1298 vdsk_scsi_capacity(struct vdsk_softc *sc, struct scsipi_xfer *xs) 1299 { 1300 1301 struct scsipi_read_capacity_10_data rcd; 1302 uint64_t capacity; 1303 1304 bzero(&rcd, sizeof(rcd)); 1305 1306 capacity = sc->sc_vdisk_size - 1; 1307 if (capacity > 0xffffffff) 1308 capacity = 0xffffffff; 1309 1310 _lto4b(capacity, rcd.addr); 1311 _lto4b(sc->sc_vdisk_block_size, rcd.length); 1312 1313 DPRINTF(("%s() capacity %lu block size %u\n", 1314 __FUNCTION__, capacity, sc->sc_vdisk_block_size)); 1315 1316 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 1317 1318 vdsk_scsi_done(xs, XS_NOERROR); 1319 } 1320 1321 void 1322 vdsk_scsi_capacity16(struct vdsk_softc *sc, struct scsipi_xfer *xs) 1323 { 1324 1325 struct scsipi_read_capacity_16_data rcd; 1326 uint64_t capacity; 1327 1328 bzero(&rcd, sizeof(rcd)); 1329 1330 capacity = sc->sc_vdisk_size - 1; 1331 1332 _lto8b(capacity, rcd.addr); 1333 _lto4b(sc->sc_vdisk_block_size, rcd.length); 1334 1335 DPRINTF(("%s() capacity %lu block size %u\n", 1336 __FUNCTION__, capacity, sc->sc_vdisk_block_size)); 1337 1338 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 1339 1340 vdsk_scsi_done(xs, XS_NOERROR); 1341 } 1342 1343 void 1344 vdsk_scsi_report_luns(struct vdsk_softc *sc, struct scsipi_xfer *xs) 1345 { 1346 vdsk_scsi_done(xs, XS_NOERROR); 1347 } 1348 1349 void 1350 vdsk_scsi_done(struct scsipi_xfer *xs, int error) 1351 { 1352 1353 xs->error = error; 1354 1355 scsipi_done(xs); 1356 } 1357