1 /* $OpenBSD: vdsk.c,v 1.46 2015/01/25 21:42:13 kettenis Exp $ */ 2 /* 3 * Copyright (c) 2009, 2011 Mark Kettenis 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <sys/param.h> 19 #include <sys/buf.h> 20 #include <sys/device.h> 21 #include <sys/malloc.h> 22 #include <sys/systm.h> 23 24 #include <machine/autoconf.h> 25 #include <machine/hypervisor.h> 26 27 #include <uvm/uvm_extern.h> 28 29 #include <scsi/scsi_all.h> 30 #include <scsi/cd.h> 31 #include <scsi/scsi_disk.h> 32 #include <scsi/scsiconf.h> 33 34 #include <sparc64/dev/cbusvar.h> 35 #include <sparc64/dev/ldcvar.h> 36 #include <sparc64/dev/viovar.h> 37 38 #ifdef VDSK_DEBUG 39 #define DPRINTF(x) printf x 40 #else 41 #define DPRINTF(x) 42 #endif 43 44 #define VDSK_TX_ENTRIES 32 45 #define VDSK_RX_ENTRIES 32 46 47 struct vd_attr_info { 48 struct vio_msg_tag tag; 49 uint8_t xfer_mode; 50 uint8_t vd_type; 51 uint8_t vd_mtype; 52 uint8_t _reserved1; 53 uint32_t vdisk_block_size; 54 uint64_t operations; 55 uint64_t vdisk_size; 56 uint64_t max_xfer_sz; 57 uint64_t _reserved2[2]; 58 }; 59 60 #define VD_DISK_TYPE_SLICE 0x01 61 #define VD_DISK_TYPE_DISK 0x02 62 63 #define VD_MEDIA_TYPE_FIXED 0x01 64 #define VD_MEDIA_TYPE_CD 0x02 65 #define VD_MEDIA_TYPE_DVD 0x03 66 67 /* vDisk version 1.0. */ 68 #define VD_OP_BREAD 0x01 69 #define VD_OP_BWRITE 0x02 70 #define VD_OP_FLUSH 0x03 71 #define VD_OP_GET_WCE 0x04 72 #define VD_OP_SET_WCE 0x05 73 #define VD_OP_GET_VTOC 0x06 74 #define VD_OP_SET_VTOC 0x07 75 #define VD_OP_GET_DISKGEOM 0x08 76 #define VD_OP_SET_DISKGEOM 0x09 77 #define VD_OP_GET_DEVID 0x0b 78 #define VD_OP_GET_EFI 0x0c 79 #define VD_OP_SET_EFI 0x0d 80 81 /* vDisk version 1.1 */ 82 #define VD_OP_SCSICMD 0x0a 83 #define VD_OP_RESET 0x0e 84 #define VD_OP_GET_ACCESS 0x0f 85 #define VD_OP_SET_ACCESS 0x10 86 #define VD_OP_GET_CAPACITY 0x11 87 88 struct vd_desc { 89 struct vio_dring_hdr hdr; 90 uint64_t req_id; 91 uint8_t operation; 92 uint8_t slice; 93 uint16_t _reserved1; 94 uint32_t status; 95 uint64_t offset; 96 uint64_t size; 97 uint32_t ncookies; 98 uint32_t _reserved2; 99 struct ldc_cookie cookie[MAXPHYS / PAGE_SIZE]; 100 }; 101 102 #define VD_SLICE_NONE 0xff 103 104 struct vdsk_dring { 105 bus_dmamap_t vd_map; 106 bus_dma_segment_t vd_seg; 107 struct vd_desc *vd_desc; 108 int vd_nentries; 109 }; 110 111 struct vdsk_dring *vdsk_dring_alloc(bus_dma_tag_t, int); 112 void vdsk_dring_free(bus_dma_tag_t, struct vdsk_dring *); 113 114 /* 115 * We support vDisk 1.0 and 1.1. 116 */ 117 #define VDSK_MAJOR 1 118 #define VDSK_MINOR 1 119 120 struct vdsk_soft_desc { 121 int vsd_map_idx[MAXPHYS / PAGE_SIZE]; 122 struct scsi_xfer *vsd_xs; 123 int vsd_ncookies; 124 }; 125 126 struct vdsk_softc { 127 struct device sc_dv; 128 bus_space_tag_t sc_bustag; 129 bus_dma_tag_t sc_dmatag; 130 131 void *sc_tx_ih; 132 void *sc_rx_ih; 133 134 struct ldc_conn sc_lc; 135 136 uint16_t sc_vio_state; 137 #define VIO_SND_VER_INFO 0x0001 138 #define VIO_ACK_VER_INFO 0x0002 139 #define VIO_SND_ATTR_INFO 0x0004 140 #define VIO_ACK_ATTR_INFO 0x0008 141 #define VIO_SND_DRING_REG 0x0010 142 #define VIO_ACK_DRING_REG 0x0020 143 #define VIO_SND_RDX 0x0040 144 #define VIO_ACK_RDX 0x0080 145 #define VIO_ESTABLISHED 0x00ff 146 147 uint16_t sc_major; 148 uint16_t sc_minor; 149 150 uint32_t sc_local_sid; 151 uint64_t sc_dring_ident; 152 uint64_t sc_seq_no; 153 154 int sc_tx_cnt; 155 int sc_tx_prod; 156 int sc_tx_cons; 157 158 struct ldc_map *sc_lm; 159 struct vdsk_dring *sc_vd; 160 struct vdsk_soft_desc *sc_vsd; 161 162 struct scsi_iopool sc_iopool; 163 struct scsi_adapter sc_switch; 164 struct scsi_link sc_link; 165 166 uint32_t sc_vdisk_block_size; 167 uint64_t sc_vdisk_size; 168 uint8_t sc_vd_mtype; 169 }; 170 171 int vdsk_match(struct device *, void *, void *); 172 void vdsk_attach(struct device *, struct device *, void *); 173 174 struct cfattach vdsk_ca = { 175 sizeof(struct vdsk_softc), vdsk_match, vdsk_attach 176 }; 177 178 struct cfdriver vdsk_cd = { 179 NULL, "vdsk", DV_DULL 180 }; 181 182 int vdsk_tx_intr(void *); 183 int vdsk_rx_intr(void *); 184 185 void vdsk_rx_data(struct ldc_conn *, struct ldc_pkt *); 186 void vdsk_rx_vio_ctrl(struct vdsk_softc *, struct vio_msg *); 187 void vdsk_rx_vio_ver_info(struct vdsk_softc *, struct vio_msg_tag *); 188 void vdsk_rx_vio_attr_info(struct vdsk_softc *, struct vio_msg_tag *); 189 void vdsk_rx_vio_dring_reg(struct vdsk_softc *, struct vio_msg_tag *); 190 void vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *); 191 void vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *); 192 void vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *); 193 194 void vdsk_ldc_reset(struct ldc_conn *); 195 void vdsk_ldc_start(struct ldc_conn *); 196 197 void vdsk_sendmsg(struct vdsk_softc *, void *, size_t); 198 void vdsk_send_ver_info(struct vdsk_softc *, uint16_t, uint16_t); 199 void vdsk_send_attr_info(struct vdsk_softc *); 200 void vdsk_send_dring_reg(struct vdsk_softc *); 201 void vdsk_send_rdx(struct vdsk_softc *); 202 203 void *vdsk_io_get(void *); 204 void vdsk_io_put(void *, void *); 205 206 void vdsk_scsi_cmd(struct scsi_xfer *); 207 int vdsk_submit_cmd(struct scsi_xfer *); 208 void vdsk_complete_cmd(struct scsi_xfer *, int); 209 int vdsk_dev_probe(struct scsi_link *); 210 void vdsk_dev_free(struct scsi_link *); 211 212 void vdsk_scsi_inq(struct scsi_xfer *); 213 void vdsk_scsi_inquiry(struct scsi_xfer *); 214 void vdsk_scsi_capacity(struct scsi_xfer *); 215 void vdsk_scsi_capacity16(struct scsi_xfer *); 216 void vdsk_scsi_done(struct scsi_xfer *, int); 217 218 int 219 vdsk_match(struct device *parent, void *match, void *aux) 220 { 221 struct cbus_attach_args *ca = aux; 222 223 if (strcmp(ca->ca_name, "disk") == 0) 224 return (1); 225 226 return (0); 227 } 228 229 void 230 vdsk_attach(struct device *parent, struct device *self, void *aux) 231 { 232 struct vdsk_softc *sc = (struct vdsk_softc *)self; 233 struct cbus_attach_args *ca = aux; 234 struct scsibus_attach_args saa; 235 struct ldc_conn *lc; 236 int err, s; 237 int timeout; 238 239 sc->sc_bustag = ca->ca_bustag; 240 sc->sc_dmatag = ca->ca_dmatag; 241 242 printf(": ivec 0x%llx, 0x%llx", ca->ca_tx_ino, ca->ca_rx_ino); 243 244 /* 245 * Un-configure queues before registering interrupt handlers, 246 * such that we dont get any stale LDC packets or events. 247 */ 248 hv_ldc_tx_qconf(ca->ca_id, 0, 0); 249 hv_ldc_rx_qconf(ca->ca_id, 0, 0); 250 251 sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_tx_ino, 252 IPL_BIO, 0, vdsk_tx_intr, sc, sc->sc_dv.dv_xname); 253 sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_rx_ino, 254 IPL_BIO, 0, vdsk_rx_intr, sc, sc->sc_dv.dv_xname); 255 if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) { 256 printf(", can't establish interrupt\n"); 257 return; 258 } 259 260 lc = &sc->sc_lc; 261 lc->lc_id = ca->ca_id; 262 lc->lc_sc = sc; 263 lc->lc_reset = vdsk_ldc_reset; 264 lc->lc_start = vdsk_ldc_start; 265 lc->lc_rx_data = vdsk_rx_data; 266 267 lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES); 268 if (lc->lc_txq == NULL) { 269 printf(", can't allocate tx queue\n"); 270 return; 271 } 272 273 lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES); 274 if (lc->lc_rxq == NULL) { 275 printf(", can't allocate rx queue\n"); 276 goto free_txqueue; 277 } 278 279 sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048); 280 if (sc->sc_lm == NULL) { 281 printf(", can't allocate LDC mapping table\n"); 282 goto free_rxqueue; 283 } 284 285 err = hv_ldc_set_map_table(lc->lc_id, 286 sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries); 287 if (err != H_EOK) { 288 printf("hv_ldc_set_map_table %d\n", err); 289 goto free_map; 290 } 291 292 sc->sc_vd = vdsk_dring_alloc(sc->sc_dmatag, 32); 293 if (sc->sc_vd == NULL) { 294 printf(", can't allocate dring\n"); 295 goto free_map; 296 } 297 sc->sc_vsd = malloc(32 * sizeof(*sc->sc_vsd), M_DEVBUF, M_NOWAIT); 298 if (sc->sc_vsd == NULL) { 299 printf(", can't allocate software ring\n"); 300 goto free_dring; 301 } 302 303 sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr; 304 sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK; 305 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW; 306 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_R | LDC_MTE_W; 307 sc->sc_lm->lm_next = 1; 308 sc->sc_lm->lm_count = 1; 309 310 err = hv_ldc_tx_qconf(lc->lc_id, 311 lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries); 312 if (err != H_EOK) 313 printf("hv_ldc_tx_qconf %d\n", err); 314 315 err = hv_ldc_rx_qconf(lc->lc_id, 316 lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries); 317 if (err != H_EOK) 318 printf("hv_ldc_rx_qconf %d\n", err); 319 320 cbus_intr_setenabled(sc->sc_bustag, ca->ca_tx_ino, INTR_ENABLED); 321 cbus_intr_setenabled(sc->sc_bustag, ca->ca_rx_ino, INTR_ENABLED); 322 323 ldc_send_vers(lc); 324 325 printf("\n"); 326 327 /* 328 * Interrupts aren't enabled during autoconf, so poll for VIO 329 * peer-to-peer hanshake completion. 330 */ 331 s = splbio(); 332 timeout = 1000; 333 do { 334 if (vdsk_rx_intr(sc) && sc->sc_vio_state == VIO_ESTABLISHED) 335 break; 336 337 delay(1000); 338 } while(--timeout > 0); 339 splx(s); 340 341 if (sc->sc_vio_state != VIO_ESTABLISHED) 342 return; 343 344 scsi_iopool_init(&sc->sc_iopool, sc, vdsk_io_get, vdsk_io_put); 345 346 sc->sc_switch.scsi_cmd = vdsk_scsi_cmd; 347 sc->sc_switch.scsi_minphys = scsi_minphys; 348 sc->sc_switch.dev_probe = vdsk_dev_probe; 349 sc->sc_switch.dev_free = vdsk_dev_free; 350 351 sc->sc_link.adapter = &sc->sc_switch; 352 sc->sc_link.adapter_softc = self; 353 sc->sc_link.adapter_buswidth = 2; 354 sc->sc_link.luns = 1; /* XXX slices should be presented as luns? */ 355 sc->sc_link.adapter_target = 2; 356 sc->sc_link.openings = sc->sc_vd->vd_nentries - 1; 357 sc->sc_link.pool = &sc->sc_iopool; 358 359 bzero(&saa, sizeof(saa)); 360 saa.saa_sc_link = &sc->sc_link; 361 config_found(self, &saa, scsiprint); 362 363 return; 364 365 free_dring: 366 vdsk_dring_free(sc->sc_dmatag, sc->sc_vd); 367 free_map: 368 hv_ldc_set_map_table(lc->lc_id, 0, 0); 369 ldc_map_free(sc->sc_dmatag, sc->sc_lm); 370 free_rxqueue: 371 ldc_queue_free(sc->sc_dmatag, lc->lc_rxq); 372 free_txqueue: 373 ldc_queue_free(sc->sc_dmatag, lc->lc_txq); 374 } 375 376 int 377 vdsk_tx_intr(void *arg) 378 { 379 struct vdsk_softc *sc = arg; 380 struct ldc_conn *lc = &sc->sc_lc; 381 uint64_t tx_head, tx_tail, tx_state; 382 383 hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); 384 if (tx_state != lc->lc_tx_state) { 385 switch (tx_state) { 386 case LDC_CHANNEL_DOWN: 387 DPRINTF(("Tx link down\n")); 388 break; 389 case LDC_CHANNEL_UP: 390 DPRINTF(("Tx link up\n")); 391 break; 392 case LDC_CHANNEL_RESET: 393 DPRINTF(("Tx link reset\n")); 394 break; 395 } 396 lc->lc_tx_state = tx_state; 397 } 398 399 return (1); 400 } 401 402 int 403 vdsk_rx_intr(void *arg) 404 { 405 struct vdsk_softc *sc = arg; 406 struct ldc_conn *lc = &sc->sc_lc; 407 uint64_t rx_head, rx_tail, rx_state; 408 struct ldc_pkt *lp; 409 int err; 410 411 err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state); 412 if (err == H_EINVAL) 413 return (0); 414 if (err != H_EOK) { 415 printf("hv_ldc_rx_get_state %d\n", err); 416 return (0); 417 } 418 419 if (rx_state != lc->lc_rx_state) { 420 sc->sc_vio_state = 0; 421 lc->lc_tx_seqid = 0; 422 lc->lc_state = 0; 423 switch (rx_state) { 424 case LDC_CHANNEL_DOWN: 425 DPRINTF(("Rx link down\n")); 426 break; 427 case LDC_CHANNEL_UP: 428 DPRINTF(("Rx link up\n")); 429 ldc_send_vers(lc); 430 break; 431 case LDC_CHANNEL_RESET: 432 DPRINTF(("Rx link reset\n")); 433 break; 434 } 435 lc->lc_rx_state = rx_state; 436 hv_ldc_rx_set_qhead(lc->lc_id, rx_tail); 437 return (1); 438 } 439 440 if (rx_head == rx_tail) 441 return (0); 442 443 lp = (struct ldc_pkt *)(lc->lc_rxq->lq_va + rx_head); 444 switch (lp->type) { 445 case LDC_CTRL: 446 ldc_rx_ctrl(lc, lp); 447 break; 448 449 case LDC_DATA: 450 ldc_rx_data(lc, lp); 451 break; 452 453 default: 454 DPRINTF(("%0x02/%0x02/%0x02\n", lp->type, lp->stype, 455 lp->ctrl)); 456 ldc_reset(lc); 457 break; 458 } 459 460 if (lc->lc_state == 0) 461 return (1); 462 463 rx_head += sizeof(*lp); 464 rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1); 465 err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head); 466 if (err != H_EOK) 467 printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err); 468 469 return (1); 470 } 471 472 void 473 vdsk_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp) 474 { 475 struct vio_msg *vm = (struct vio_msg *)lp; 476 477 switch (vm->type) { 478 case VIO_TYPE_CTRL: 479 if ((lp->env & LDC_FRAG_START) == 0 && 480 (lp->env & LDC_FRAG_STOP) == 0) 481 return; 482 vdsk_rx_vio_ctrl(lc->lc_sc, vm); 483 break; 484 485 case VIO_TYPE_DATA: 486 if((lp->env & LDC_FRAG_START) == 0) 487 return; 488 vdsk_rx_vio_data(lc->lc_sc, vm); 489 break; 490 491 default: 492 DPRINTF(("Unhandled packet type 0x%02x\n", vm->type)); 493 ldc_reset(lc); 494 break; 495 } 496 } 497 498 void 499 vdsk_rx_vio_ctrl(struct vdsk_softc *sc, struct vio_msg *vm) 500 { 501 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type; 502 503 switch (tag->stype_env) { 504 case VIO_VER_INFO: 505 vdsk_rx_vio_ver_info(sc, tag); 506 break; 507 case VIO_ATTR_INFO: 508 vdsk_rx_vio_attr_info(sc, tag); 509 break; 510 case VIO_DRING_REG: 511 vdsk_rx_vio_dring_reg(sc, tag); 512 break; 513 case VIO_RDX: 514 vdsk_rx_vio_rdx(sc, tag); 515 break; 516 default: 517 DPRINTF(("CTRL/0x%02x/0x%04x\n", tag->stype, tag->stype_env)); 518 break; 519 } 520 } 521 522 void 523 vdsk_rx_vio_ver_info(struct vdsk_softc *sc, struct vio_msg_tag *tag) 524 { 525 struct vio_ver_info *vi = (struct vio_ver_info *)tag; 526 527 switch (vi->tag.stype) { 528 case VIO_SUBTYPE_INFO: 529 DPRINTF(("CTRL/INFO/VER_INFO\n")); 530 break; 531 532 case VIO_SUBTYPE_ACK: 533 DPRINTF(("CTRL/ACK/VER_INFO\n")); 534 if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) { 535 ldc_reset(&sc->sc_lc); 536 break; 537 } 538 sc->sc_major = vi->major; 539 sc->sc_minor = vi->minor; 540 sc->sc_vio_state |= VIO_ACK_VER_INFO; 541 break; 542 543 default: 544 DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype)); 545 break; 546 } 547 548 if (ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO)) 549 vdsk_send_attr_info(sc); 550 } 551 552 void 553 vdsk_rx_vio_attr_info(struct vdsk_softc *sc, struct vio_msg_tag *tag) 554 { 555 struct vd_attr_info *ai = (struct vd_attr_info *)tag; 556 557 switch (ai->tag.stype) { 558 case VIO_SUBTYPE_INFO: 559 DPRINTF(("CTRL/INFO/ATTR_INFO\n")); 560 break; 561 562 case VIO_SUBTYPE_ACK: 563 DPRINTF(("CTRL/ACK/ATTR_INFO\n")); 564 if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) { 565 ldc_reset(&sc->sc_lc); 566 break; 567 } 568 569 sc->sc_vdisk_block_size = ai->vdisk_block_size; 570 sc->sc_vdisk_size = ai->vdisk_size; 571 if (sc->sc_major > 1 || sc->sc_minor >= 1) 572 sc->sc_vd_mtype = ai->vd_mtype; 573 else 574 sc->sc_vd_mtype = VD_MEDIA_TYPE_FIXED; 575 576 sc->sc_vio_state |= VIO_ACK_ATTR_INFO; 577 break; 578 579 default: 580 DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype)); 581 break; 582 } 583 584 if (ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO)) 585 vdsk_send_dring_reg(sc); 586 587 } 588 589 void 590 vdsk_rx_vio_dring_reg(struct vdsk_softc *sc, struct vio_msg_tag *tag) 591 { 592 struct vio_dring_reg *dr = (struct vio_dring_reg *)tag; 593 594 switch (dr->tag.stype) { 595 case VIO_SUBTYPE_INFO: 596 DPRINTF(("CTRL/INFO/DRING_REG\n")); 597 break; 598 599 case VIO_SUBTYPE_ACK: 600 DPRINTF(("CTRL/ACK/DRING_REG\n")); 601 if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) { 602 ldc_reset(&sc->sc_lc); 603 break; 604 } 605 606 sc->sc_dring_ident = dr->dring_ident; 607 sc->sc_seq_no = 1; 608 609 sc->sc_vio_state |= VIO_ACK_DRING_REG; 610 break; 611 612 default: 613 DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype)); 614 break; 615 } 616 617 if (ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG)) 618 vdsk_send_rdx(sc); 619 } 620 621 void 622 vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *tag) 623 { 624 switch(tag->stype) { 625 case VIO_SUBTYPE_INFO: 626 DPRINTF(("CTRL/INFO/RDX\n")); 627 break; 628 629 case VIO_SUBTYPE_ACK: 630 { 631 int prod; 632 633 DPRINTF(("CTRL/ACK/RDX\n")); 634 if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) { 635 ldc_reset(&sc->sc_lc); 636 break; 637 } 638 sc->sc_vio_state |= VIO_ACK_RDX; 639 640 /* 641 * If this ACK is the result of a reconnect, we may 642 * have pending I/O that we need to resubmit. We need 643 * to rebuild the ring descriptors though since the 644 * vDisk server on the other side may have touched 645 * them already. So we just clean up the ring and the 646 * LDC map and resubmit the SCSI commands based on our 647 * soft descriptors. 648 */ 649 prod = sc->sc_tx_prod; 650 sc->sc_tx_prod = sc->sc_tx_cons; 651 sc->sc_tx_cnt = 0; 652 sc->sc_lm->lm_next = 1; 653 sc->sc_lm->lm_count = 1; 654 while (sc->sc_tx_prod != prod) 655 vdsk_submit_cmd(sc->sc_vsd[sc->sc_tx_prod].vsd_xs); 656 657 scsi_iopool_run(&sc->sc_iopool); 658 break; 659 } 660 661 default: 662 DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype)); 663 break; 664 } 665 } 666 667 void 668 vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *vm) 669 { 670 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type; 671 672 if (sc->sc_vio_state != VIO_ESTABLISHED) { 673 DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype, 674 tag->stype_env)); 675 return; 676 } 677 678 switch(tag->stype_env) { 679 case VIO_DRING_DATA: 680 vdsk_rx_vio_dring_data(sc, tag); 681 break; 682 683 default: 684 DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env)); 685 break; 686 } 687 } 688 689 void 690 vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *tag) 691 { 692 switch(tag->stype) { 693 case VIO_SUBTYPE_INFO: 694 DPRINTF(("DATA/INFO/DRING_DATA\n")); 695 break; 696 697 case VIO_SUBTYPE_ACK: 698 { 699 struct scsi_xfer *xs; 700 int cons; 701 702 cons = sc->sc_tx_cons; 703 while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) { 704 xs = sc->sc_vsd[cons].vsd_xs; 705 if (ISSET(xs->flags, SCSI_POLL) == 0) 706 vdsk_complete_cmd(xs, cons); 707 cons++; 708 cons &= (sc->sc_vd->vd_nentries - 1); 709 } 710 sc->sc_tx_cons = cons; 711 break; 712 } 713 714 case VIO_SUBTYPE_NACK: 715 DPRINTF(("DATA/NACK/DRING_DATA\n")); 716 break; 717 718 default: 719 DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype)); 720 break; 721 } 722 } 723 724 void 725 vdsk_ldc_reset(struct ldc_conn *lc) 726 { 727 struct vdsk_softc *sc = lc->lc_sc; 728 729 sc->sc_vio_state = 0; 730 } 731 732 void 733 vdsk_ldc_start(struct ldc_conn *lc) 734 { 735 struct vdsk_softc *sc = lc->lc_sc; 736 737 vdsk_send_ver_info(sc, VDSK_MAJOR, VDSK_MINOR); 738 } 739 740 void 741 vdsk_sendmsg(struct vdsk_softc *sc, void *msg, size_t len) 742 { 743 struct ldc_conn *lc = &sc->sc_lc; 744 int err; 745 746 err = ldc_send_unreliable(lc, msg, len); 747 if (err) 748 printf("%s: ldc_send_unreliable: %d\n", __func__, err); 749 } 750 751 void 752 vdsk_send_ver_info(struct vdsk_softc *sc, uint16_t major, uint16_t minor) 753 { 754 struct vio_ver_info vi; 755 756 /* Allocate new session ID. */ 757 sc->sc_local_sid = tick(); 758 759 bzero(&vi, sizeof(vi)); 760 vi.tag.type = VIO_TYPE_CTRL; 761 vi.tag.stype = VIO_SUBTYPE_INFO; 762 vi.tag.stype_env = VIO_VER_INFO; 763 vi.tag.sid = sc->sc_local_sid; 764 vi.major = major; 765 vi.minor = minor; 766 vi.dev_class = VDEV_DISK; 767 vdsk_sendmsg(sc, &vi, sizeof(vi)); 768 769 sc->sc_vio_state |= VIO_SND_VER_INFO; 770 } 771 772 void 773 vdsk_send_attr_info(struct vdsk_softc *sc) 774 { 775 struct vd_attr_info ai; 776 777 bzero(&ai, sizeof(ai)); 778 ai.tag.type = VIO_TYPE_CTRL; 779 ai.tag.stype = VIO_SUBTYPE_INFO; 780 ai.tag.stype_env = VIO_ATTR_INFO; 781 ai.tag.sid = sc->sc_local_sid; 782 ai.xfer_mode = VIO_DRING_MODE; 783 ai.vdisk_block_size = DEV_BSIZE; 784 ai.max_xfer_sz = MAXPHYS / DEV_BSIZE; 785 vdsk_sendmsg(sc, &ai, sizeof(ai)); 786 787 sc->sc_vio_state |= VIO_SND_ATTR_INFO; 788 } 789 790 void 791 vdsk_send_dring_reg(struct vdsk_softc *sc) 792 { 793 struct vio_dring_reg dr; 794 795 bzero(&dr, sizeof(dr)); 796 dr.tag.type = VIO_TYPE_CTRL; 797 dr.tag.stype = VIO_SUBTYPE_INFO; 798 dr.tag.stype_env = VIO_DRING_REG; 799 dr.tag.sid = sc->sc_local_sid; 800 dr.dring_ident = 0; 801 dr.num_descriptors = sc->sc_vd->vd_nentries; 802 dr.descriptor_size = sizeof(struct vd_desc); 803 dr.options = VIO_TX_RING | VIO_RX_RING; 804 dr.ncookies = 1; 805 dr.cookie[0].addr = 0; 806 dr.cookie[0].size = PAGE_SIZE; 807 vdsk_sendmsg(sc, &dr, sizeof(dr)); 808 809 sc->sc_vio_state |= VIO_SND_DRING_REG; 810 }; 811 812 void 813 vdsk_send_rdx(struct vdsk_softc *sc) 814 { 815 struct vio_rdx rdx; 816 817 bzero(&rdx, sizeof(rdx)); 818 rdx.tag.type = VIO_TYPE_CTRL; 819 rdx.tag.stype = VIO_SUBTYPE_INFO; 820 rdx.tag.stype_env = VIO_RDX; 821 rdx.tag.sid = sc->sc_local_sid; 822 vdsk_sendmsg(sc, &rdx, sizeof(rdx)); 823 824 sc->sc_vio_state |= VIO_SND_RDX; 825 } 826 827 struct vdsk_dring * 828 vdsk_dring_alloc(bus_dma_tag_t t, int nentries) 829 { 830 struct vdsk_dring *vd; 831 bus_size_t size; 832 caddr_t va; 833 int nsegs; 834 int i; 835 836 vd = malloc(sizeof(struct vdsk_dring), M_DEVBUF, M_NOWAIT); 837 if (vd == NULL) 838 return NULL; 839 840 size = roundup(nentries * sizeof(struct vd_desc), PAGE_SIZE); 841 842 if (bus_dmamap_create(t, size, 1, size, 0, 843 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0) 844 return (NULL); 845 846 if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1, 847 &nsegs, BUS_DMA_NOWAIT) != 0) 848 goto destroy; 849 850 if (bus_dmamem_map(t, &vd->vd_seg, 1, size, &va, 851 BUS_DMA_NOWAIT) != 0) 852 goto free; 853 854 if (bus_dmamap_load(t, vd->vd_map, va, size, NULL, 855 BUS_DMA_NOWAIT) != 0) 856 goto unmap; 857 858 vd->vd_desc = (struct vd_desc *)va; 859 vd->vd_nentries = nentries; 860 bzero(vd->vd_desc, nentries * sizeof(struct vd_desc)); 861 for (i = 0; i < vd->vd_nentries; i++) 862 vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE; 863 return (vd); 864 865 unmap: 866 bus_dmamem_unmap(t, va, size); 867 free: 868 bus_dmamem_free(t, &vd->vd_seg, 1); 869 destroy: 870 bus_dmamap_destroy(t, vd->vd_map); 871 872 return (NULL); 873 } 874 875 void 876 vdsk_dring_free(bus_dma_tag_t t, struct vdsk_dring *vd) 877 { 878 bus_size_t size; 879 880 size = vd->vd_nentries * sizeof(struct vd_desc); 881 size = roundup(size, PAGE_SIZE); 882 883 bus_dmamap_unload(t, vd->vd_map); 884 bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size); 885 bus_dmamem_free(t, &vd->vd_seg, 1); 886 bus_dmamap_destroy(t, vd->vd_map); 887 free(vd, M_DEVBUF, 0); 888 } 889 890 void * 891 vdsk_io_get(void *xsc) 892 { 893 struct vdsk_softc *sc = xsc; 894 void *rv = sc; /* just has to be !NULL */ 895 int s; 896 897 s = splbio(); 898 if (sc->sc_vio_state != VIO_ESTABLISHED || 899 sc->sc_tx_cnt >= sc->sc_vd->vd_nentries) 900 rv = NULL; 901 else 902 sc->sc_tx_cnt++; 903 splx(s); 904 905 return (rv); 906 } 907 908 void 909 vdsk_io_put(void *xsc, void *io) 910 { 911 struct vdsk_softc *sc = xsc; 912 int s; 913 914 #ifdef DIAGNOSTIC 915 if (sc != io) 916 panic("vsdk_io_put: unexpected io"); 917 #endif 918 919 s = splbio(); 920 sc->sc_tx_cnt--; 921 splx(s); 922 } 923 924 void 925 vdsk_scsi_cmd(struct scsi_xfer *xs) 926 { 927 struct vdsk_softc *sc = xs->sc_link->adapter_softc; 928 int timeout, s; 929 int desc; 930 931 switch (xs->cmd->opcode) { 932 case READ_BIG: 933 case READ_COMMAND: 934 case READ_12: 935 case READ_16: 936 case WRITE_BIG: 937 case WRITE_COMMAND: 938 case WRITE_12: 939 case WRITE_16: 940 case SYNCHRONIZE_CACHE: 941 break; 942 943 case INQUIRY: 944 vdsk_scsi_inq(xs); 945 return; 946 case READ_CAPACITY: 947 vdsk_scsi_capacity(xs); 948 return; 949 case READ_CAPACITY_16: 950 vdsk_scsi_capacity16(xs); 951 return; 952 953 case TEST_UNIT_READY: 954 case START_STOP: 955 case PREVENT_ALLOW: 956 vdsk_scsi_done(xs, XS_NOERROR); 957 return; 958 959 default: 960 printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode); 961 case MODE_SENSE: 962 case MODE_SENSE_BIG: 963 case REPORT_LUNS: 964 case READ_TOC: 965 vdsk_scsi_done(xs, XS_DRIVER_STUFFUP); 966 return; 967 } 968 969 s = splbio(); 970 desc = vdsk_submit_cmd(xs); 971 972 if (!ISSET(xs->flags, SCSI_POLL)) { 973 splx(s); 974 return; 975 } 976 977 timeout = 1000; 978 do { 979 if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) 980 break; 981 982 delay(1000); 983 } while(--timeout > 0); 984 if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) { 985 vdsk_complete_cmd(xs, desc); 986 } else { 987 ldc_reset(&sc->sc_lc); 988 vdsk_scsi_done(xs, XS_TIMEOUT); 989 } 990 splx(s); 991 } 992 993 int 994 vdsk_submit_cmd(struct scsi_xfer *xs) 995 { 996 struct vdsk_softc *sc = xs->sc_link->adapter_softc; 997 struct ldc_map *map = sc->sc_lm; 998 struct vio_dring_msg dm; 999 struct scsi_rw *rw; 1000 struct scsi_rw_big *rwb; 1001 struct scsi_rw_12 *rw12; 1002 struct scsi_rw_16 *rw16; 1003 u_int64_t lba; 1004 u_int32_t sector_count; 1005 uint8_t operation; 1006 vaddr_t va; 1007 paddr_t pa; 1008 psize_t nbytes; 1009 int len, ncookies; 1010 int desc; 1011 1012 switch (xs->cmd->opcode) { 1013 case READ_BIG: 1014 case READ_COMMAND: 1015 case READ_12: 1016 case READ_16: 1017 operation = VD_OP_BREAD; 1018 break; 1019 1020 case WRITE_BIG: 1021 case WRITE_COMMAND: 1022 case WRITE_12: 1023 case WRITE_16: 1024 operation = VD_OP_BWRITE; 1025 break; 1026 1027 case SYNCHRONIZE_CACHE: 1028 operation = VD_OP_FLUSH; 1029 break; 1030 } 1031 1032 /* 1033 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same 1034 * layout as 10-byte READ/WRITE commands. 1035 */ 1036 if (xs->cmdlen == 6) { 1037 rw = (struct scsi_rw *)xs->cmd; 1038 lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff); 1039 sector_count = rw->length ? rw->length : 0x100; 1040 } else if (xs->cmdlen == 10) { 1041 rwb = (struct scsi_rw_big *)xs->cmd; 1042 lba = _4btol(rwb->addr); 1043 sector_count = _2btol(rwb->length); 1044 } else if (xs->cmdlen == 12) { 1045 rw12 = (struct scsi_rw_12 *)xs->cmd; 1046 lba = _4btol(rw12->addr); 1047 sector_count = _4btol(rw12->length); 1048 } else if (xs->cmdlen == 16) { 1049 rw16 = (struct scsi_rw_16 *)xs->cmd; 1050 lba = _8btol(rw16->addr); 1051 sector_count = _4btol(rw16->length); 1052 } 1053 1054 desc = sc->sc_tx_prod; 1055 1056 ncookies = 0; 1057 len = xs->datalen; 1058 va = (vaddr_t)xs->data; 1059 while (len > 0) { 1060 KASSERT(ncookies < MAXPHYS / PAGE_SIZE); 1061 pmap_extract(pmap_kernel(), va, &pa); 1062 while (map->lm_slot[map->lm_next].entry != 0) { 1063 map->lm_next++; 1064 map->lm_next &= (map->lm_nentries - 1); 1065 } 1066 map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); 1067 map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW; 1068 map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW; 1069 map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W; 1070 map->lm_count++; 1071 1072 nbytes = MIN(len, PAGE_SIZE - (pa & PAGE_MASK)); 1073 1074 sc->sc_vd->vd_desc[desc].cookie[ncookies].addr = 1075 map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); 1076 sc->sc_vd->vd_desc[desc].cookie[ncookies].size = nbytes; 1077 1078 sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next; 1079 va += nbytes; 1080 len -= nbytes; 1081 ncookies++; 1082 } 1083 1084 if (ISSET(xs->flags, SCSI_POLL) == 0) 1085 sc->sc_vd->vd_desc[desc].hdr.ack = 1; 1086 else 1087 sc->sc_vd->vd_desc[desc].hdr.ack = 0; 1088 sc->sc_vd->vd_desc[desc].operation = operation; 1089 sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE; 1090 sc->sc_vd->vd_desc[desc].status = 0xffffffff; 1091 sc->sc_vd->vd_desc[desc].offset = lba; 1092 sc->sc_vd->vd_desc[desc].size = xs->datalen; 1093 sc->sc_vd->vd_desc[desc].ncookies = ncookies; 1094 membar(Sync); 1095 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY; 1096 1097 sc->sc_vsd[desc].vsd_xs = xs; 1098 sc->sc_vsd[desc].vsd_ncookies = ncookies; 1099 1100 sc->sc_tx_prod++; 1101 sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1); 1102 1103 bzero(&dm, sizeof(dm)); 1104 dm.tag.type = VIO_TYPE_DATA; 1105 dm.tag.stype = VIO_SUBTYPE_INFO; 1106 dm.tag.stype_env = VIO_DRING_DATA; 1107 dm.tag.sid = sc->sc_local_sid; 1108 dm.seq_no = sc->sc_seq_no++; 1109 dm.dring_ident = sc->sc_dring_ident; 1110 dm.start_idx = dm.end_idx = desc; 1111 vdsk_sendmsg(sc, &dm, sizeof(dm)); 1112 1113 return desc; 1114 } 1115 1116 void 1117 vdsk_complete_cmd(struct scsi_xfer *xs, int desc) 1118 { 1119 struct vdsk_softc *sc = xs->sc_link->adapter_softc; 1120 struct ldc_map *map = sc->sc_lm; 1121 int cookie, idx; 1122 int error; 1123 1124 cookie = 0; 1125 while (cookie < sc->sc_vsd[desc].vsd_ncookies) { 1126 idx = sc->sc_vsd[desc].vsd_map_idx[cookie++]; 1127 map->lm_slot[idx].entry = 0; 1128 map->lm_count--; 1129 } 1130 1131 error = XS_NOERROR; 1132 if (sc->sc_vd->vd_desc[desc].status != 0) 1133 error = XS_DRIVER_STUFFUP; 1134 xs->resid = xs->datalen - 1135 sc->sc_vd->vd_desc[desc].size; 1136 vdsk_scsi_done(xs, error); 1137 1138 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_FREE; 1139 } 1140 1141 void 1142 vdsk_scsi_inq(struct scsi_xfer *xs) 1143 { 1144 struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd; 1145 1146 if (ISSET(inq->flags, SI_EVPD)) 1147 vdsk_scsi_done(xs, XS_DRIVER_STUFFUP); 1148 else 1149 vdsk_scsi_inquiry(xs); 1150 } 1151 1152 void 1153 vdsk_scsi_inquiry(struct scsi_xfer *xs) 1154 { 1155 struct vdsk_softc *sc = xs->sc_link->adapter_softc; 1156 struct scsi_inquiry_data inq; 1157 char buf[5]; 1158 1159 bzero(&inq, sizeof(inq)); 1160 1161 switch (sc->sc_vd_mtype) { 1162 case VD_MEDIA_TYPE_CD: 1163 case VD_MEDIA_TYPE_DVD: 1164 inq.device = T_CDROM; 1165 break; 1166 1167 case VD_MEDIA_TYPE_FIXED: 1168 default: 1169 inq.device = T_DIRECT; 1170 break; 1171 } 1172 1173 inq.version = 0x05; /* SPC-3 */ 1174 inq.response_format = 2; 1175 inq.additional_length = 32; 1176 inq.flags |= SID_CmdQue; 1177 bcopy("SUN ", inq.vendor, sizeof(inq.vendor)); 1178 bcopy("Virtual Disk ", inq.product, sizeof(inq.product)); 1179 snprintf(buf, sizeof(buf), "%u.%u ", sc->sc_major, sc->sc_minor); 1180 bcopy(buf, inq.revision, sizeof(inq.revision)); 1181 1182 bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen)); 1183 1184 vdsk_scsi_done(xs, XS_NOERROR); 1185 } 1186 1187 void 1188 vdsk_scsi_capacity(struct scsi_xfer *xs) 1189 { 1190 struct vdsk_softc *sc = xs->sc_link->adapter_softc; 1191 struct scsi_read_cap_data rcd; 1192 uint64_t capacity; 1193 1194 bzero(&rcd, sizeof(rcd)); 1195 1196 capacity = sc->sc_vdisk_size - 1; 1197 if (capacity > 0xffffffff) 1198 capacity = 0xffffffff; 1199 1200 _lto4b(capacity, rcd.addr); 1201 _lto4b(sc->sc_vdisk_block_size, rcd.length); 1202 1203 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 1204 1205 vdsk_scsi_done(xs, XS_NOERROR); 1206 } 1207 1208 void 1209 vdsk_scsi_capacity16(struct scsi_xfer *xs) 1210 { 1211 struct vdsk_softc *sc = xs->sc_link->adapter_softc; 1212 struct scsi_read_cap_data_16 rcd; 1213 1214 bzero(&rcd, sizeof(rcd)); 1215 1216 _lto8b(sc->sc_vdisk_size - 1, rcd.addr); 1217 _lto4b(sc->sc_vdisk_block_size, rcd.length); 1218 1219 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 1220 1221 vdsk_scsi_done(xs, XS_NOERROR); 1222 } 1223 1224 void 1225 vdsk_scsi_done(struct scsi_xfer *xs, int error) 1226 { 1227 xs->error = error; 1228 1229 scsi_done(xs); 1230 } 1231 1232 int 1233 vdsk_dev_probe(struct scsi_link *link) 1234 { 1235 KASSERT(link->lun == 0); 1236 1237 if (link->target == 0) 1238 return (0); 1239 1240 return (ENODEV); 1241 } 1242 1243 void 1244 vdsk_dev_free(struct scsi_link *link) 1245 { 1246 printf("%s\n", __func__); 1247 } 1248