1 /* $OpenBSD: vdsk.c,v 1.53 2020/02/18 17:20:12 krw Exp $ */ 2 /* 3 * Copyright (c) 2009, 2011 Mark Kettenis 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <sys/param.h> 19 #include <sys/buf.h> 20 #include <sys/device.h> 21 #include <sys/malloc.h> 22 #include <sys/systm.h> 23 24 #include <machine/autoconf.h> 25 #include <machine/hypervisor.h> 26 27 #include <uvm/uvm_extern.h> 28 29 #include <scsi/scsi_all.h> 30 #include <scsi/cd.h> 31 #include <scsi/scsi_disk.h> 32 #include <scsi/scsiconf.h> 33 34 #include <sparc64/dev/cbusvar.h> 35 #include <sparc64/dev/ldcvar.h> 36 #include <sparc64/dev/viovar.h> 37 38 #ifdef VDSK_DEBUG 39 #define DPRINTF(x) printf x 40 #else 41 #define DPRINTF(x) 42 #endif 43 44 #define VDSK_TX_ENTRIES 32 45 #define VDSK_RX_ENTRIES 32 46 47 struct vd_attr_info { 48 struct vio_msg_tag tag; 49 uint8_t xfer_mode; 50 uint8_t vd_type; 51 uint8_t vd_mtype; 52 uint8_t _reserved1; 53 uint32_t vdisk_block_size; 54 uint64_t operations; 55 uint64_t vdisk_size; 56 uint64_t max_xfer_sz; 57 uint64_t _reserved2[2]; 58 }; 59 60 #define VD_DISK_TYPE_SLICE 0x01 61 #define VD_DISK_TYPE_DISK 0x02 62 63 #define VD_MEDIA_TYPE_FIXED 0x01 64 #define VD_MEDIA_TYPE_CD 0x02 65 #define VD_MEDIA_TYPE_DVD 0x03 66 67 /* vDisk version 1.0. */ 68 #define VD_OP_BREAD 0x01 69 #define VD_OP_BWRITE 0x02 70 #define VD_OP_FLUSH 0x03 71 #define VD_OP_GET_WCE 0x04 72 #define VD_OP_SET_WCE 0x05 73 #define VD_OP_GET_VTOC 0x06 74 #define VD_OP_SET_VTOC 0x07 75 #define VD_OP_GET_DISKGEOM 0x08 76 #define VD_OP_SET_DISKGEOM 0x09 77 #define VD_OP_GET_DEVID 0x0b 78 #define VD_OP_GET_EFI 0x0c 79 #define VD_OP_SET_EFI 0x0d 80 81 /* vDisk version 1.1 */ 82 #define VD_OP_SCSICMD 0x0a 83 #define VD_OP_RESET 0x0e 84 #define VD_OP_GET_ACCESS 0x0f 85 #define VD_OP_SET_ACCESS 0x10 86 #define VD_OP_GET_CAPACITY 0x11 87 88 struct vd_desc { 89 struct vio_dring_hdr hdr; 90 uint64_t req_id; 91 uint8_t operation; 92 uint8_t slice; 93 uint16_t _reserved1; 94 uint32_t status; 95 uint64_t offset; 96 uint64_t size; 97 uint32_t ncookies; 98 uint32_t _reserved2; 99 struct ldc_cookie cookie[MAXPHYS / PAGE_SIZE]; 100 }; 101 102 #define VD_SLICE_NONE 0xff 103 104 struct vdsk_dring { 105 bus_dmamap_t vd_map; 106 bus_dma_segment_t vd_seg; 107 struct vd_desc *vd_desc; 108 int vd_nentries; 109 }; 110 111 struct vdsk_dring *vdsk_dring_alloc(bus_dma_tag_t, int); 112 void vdsk_dring_free(bus_dma_tag_t, struct vdsk_dring *); 113 114 /* 115 * We support vDisk 1.0 and 1.1. 116 */ 117 #define VDSK_MAJOR 1 118 #define VDSK_MINOR 1 119 120 struct vdsk_soft_desc { 121 int vsd_map_idx[MAXPHYS / PAGE_SIZE]; 122 struct scsi_xfer *vsd_xs; 123 int vsd_ncookies; 124 }; 125 126 struct vdsk_softc { 127 struct device sc_dv; 128 bus_space_tag_t sc_bustag; 129 bus_dma_tag_t sc_dmatag; 130 131 void *sc_tx_ih; 132 void *sc_rx_ih; 133 134 struct ldc_conn sc_lc; 135 136 uint16_t sc_vio_state; 137 #define VIO_SND_VER_INFO 0x0001 138 #define VIO_ACK_VER_INFO 0x0002 139 #define VIO_SND_ATTR_INFO 0x0004 140 #define VIO_ACK_ATTR_INFO 0x0008 141 #define VIO_SND_DRING_REG 0x0010 142 #define VIO_ACK_DRING_REG 0x0020 143 #define VIO_SND_RDX 0x0040 144 #define VIO_ACK_RDX 0x0080 145 #define VIO_ESTABLISHED 0x00ff 146 147 uint16_t sc_major; 148 uint16_t sc_minor; 149 150 uint32_t sc_local_sid; 151 uint64_t sc_dring_ident; 152 uint64_t sc_seq_no; 153 154 int sc_tx_cnt; 155 int sc_tx_prod; 156 int sc_tx_cons; 157 158 struct ldc_map *sc_lm; 159 struct vdsk_dring *sc_vd; 160 struct vdsk_soft_desc *sc_vsd; 161 162 struct scsi_iopool sc_iopool; 163 struct scsi_link sc_link; 164 165 uint32_t sc_vdisk_block_size; 166 uint64_t sc_vdisk_size; 167 uint8_t sc_vd_mtype; 168 }; 169 170 int vdsk_match(struct device *, void *, void *); 171 void vdsk_attach(struct device *, struct device *, void *); 172 173 struct cfattach vdsk_ca = { 174 sizeof(struct vdsk_softc), vdsk_match, vdsk_attach 175 }; 176 177 struct cfdriver vdsk_cd = { 178 NULL, "vdsk", DV_DULL 179 }; 180 181 void vdsk_scsi_cmd(struct scsi_xfer *); 182 int vdsk_dev_probe(struct scsi_link *); 183 void vdsk_dev_free(struct scsi_link *); 184 185 struct scsi_adapter vdsk_switch = { 186 vdsk_scsi_cmd, NULL, vdsk_dev_probe, vdsk_dev_free, NULL 187 }; 188 189 int vdsk_tx_intr(void *); 190 int vdsk_rx_intr(void *); 191 192 void vdsk_rx_data(struct ldc_conn *, struct ldc_pkt *); 193 void vdsk_rx_vio_ctrl(struct vdsk_softc *, struct vio_msg *); 194 void vdsk_rx_vio_ver_info(struct vdsk_softc *, struct vio_msg_tag *); 195 void vdsk_rx_vio_attr_info(struct vdsk_softc *, struct vio_msg_tag *); 196 void vdsk_rx_vio_dring_reg(struct vdsk_softc *, struct vio_msg_tag *); 197 void vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *); 198 void vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *); 199 void vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *); 200 201 void vdsk_ldc_reset(struct ldc_conn *); 202 void vdsk_ldc_start(struct ldc_conn *); 203 204 void vdsk_sendmsg(struct vdsk_softc *, void *, size_t); 205 void vdsk_send_ver_info(struct vdsk_softc *, uint16_t, uint16_t); 206 void vdsk_send_attr_info(struct vdsk_softc *); 207 void vdsk_send_dring_reg(struct vdsk_softc *); 208 void vdsk_send_rdx(struct vdsk_softc *); 209 210 void *vdsk_io_get(void *); 211 void vdsk_io_put(void *, void *); 212 213 int vdsk_submit_cmd(struct scsi_xfer *); 214 void vdsk_complete_cmd(struct scsi_xfer *, int); 215 216 void vdsk_scsi_inq(struct scsi_xfer *); 217 void vdsk_scsi_inquiry(struct scsi_xfer *); 218 void vdsk_scsi_capacity(struct scsi_xfer *); 219 void vdsk_scsi_capacity16(struct scsi_xfer *); 220 void vdsk_scsi_done(struct scsi_xfer *, int); 221 222 int 223 vdsk_match(struct device *parent, void *match, void *aux) 224 { 225 struct cbus_attach_args *ca = aux; 226 227 if (strcmp(ca->ca_name, "disk") == 0) 228 return (1); 229 230 return (0); 231 } 232 233 void 234 vdsk_attach(struct device *parent, struct device *self, void *aux) 235 { 236 struct vdsk_softc *sc = (struct vdsk_softc *)self; 237 struct cbus_attach_args *ca = aux; 238 struct scsibus_attach_args saa; 239 struct ldc_conn *lc; 240 int err, s; 241 int timeout; 242 243 sc->sc_bustag = ca->ca_bustag; 244 sc->sc_dmatag = ca->ca_dmatag; 245 246 printf(": ivec 0x%llx, 0x%llx", ca->ca_tx_ino, ca->ca_rx_ino); 247 248 /* 249 * Un-configure queues before registering interrupt handlers, 250 * such that we dont get any stale LDC packets or events. 251 */ 252 hv_ldc_tx_qconf(ca->ca_id, 0, 0); 253 hv_ldc_rx_qconf(ca->ca_id, 0, 0); 254 255 sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_tx_ino, 256 IPL_BIO, 0, vdsk_tx_intr, sc, sc->sc_dv.dv_xname); 257 sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_rx_ino, 258 IPL_BIO, 0, vdsk_rx_intr, sc, sc->sc_dv.dv_xname); 259 if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) { 260 printf(", can't establish interrupt\n"); 261 return; 262 } 263 264 lc = &sc->sc_lc; 265 lc->lc_id = ca->ca_id; 266 lc->lc_sc = sc; 267 lc->lc_reset = vdsk_ldc_reset; 268 lc->lc_start = vdsk_ldc_start; 269 lc->lc_rx_data = vdsk_rx_data; 270 271 lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES); 272 if (lc->lc_txq == NULL) { 273 printf(", can't allocate tx queue\n"); 274 return; 275 } 276 277 lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES); 278 if (lc->lc_rxq == NULL) { 279 printf(", can't allocate rx queue\n"); 280 goto free_txqueue; 281 } 282 283 sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048); 284 if (sc->sc_lm == NULL) { 285 printf(", can't allocate LDC mapping table\n"); 286 goto free_rxqueue; 287 } 288 289 err = hv_ldc_set_map_table(lc->lc_id, 290 sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries); 291 if (err != H_EOK) { 292 printf("hv_ldc_set_map_table %d\n", err); 293 goto free_map; 294 } 295 296 sc->sc_vd = vdsk_dring_alloc(sc->sc_dmatag, 32); 297 if (sc->sc_vd == NULL) { 298 printf(", can't allocate dring\n"); 299 goto free_map; 300 } 301 sc->sc_vsd = malloc(32 * sizeof(*sc->sc_vsd), M_DEVBUF, M_NOWAIT); 302 if (sc->sc_vsd == NULL) { 303 printf(", can't allocate software ring\n"); 304 goto free_dring; 305 } 306 307 sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr; 308 sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK; 309 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW; 310 sc->sc_lm->lm_slot[0].entry |= LDC_MTE_R | LDC_MTE_W; 311 sc->sc_lm->lm_next = 1; 312 sc->sc_lm->lm_count = 1; 313 314 err = hv_ldc_tx_qconf(lc->lc_id, 315 lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries); 316 if (err != H_EOK) 317 printf("hv_ldc_tx_qconf %d\n", err); 318 319 err = hv_ldc_rx_qconf(lc->lc_id, 320 lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries); 321 if (err != H_EOK) 322 printf("hv_ldc_rx_qconf %d\n", err); 323 324 cbus_intr_setenabled(sc->sc_bustag, ca->ca_tx_ino, INTR_ENABLED); 325 cbus_intr_setenabled(sc->sc_bustag, ca->ca_rx_ino, INTR_ENABLED); 326 327 ldc_send_vers(lc); 328 329 printf("\n"); 330 331 /* 332 * Interrupts aren't enabled during autoconf, so poll for VIO 333 * peer-to-peer hanshake completion. 334 */ 335 s = splbio(); 336 timeout = 1000; 337 do { 338 if (vdsk_rx_intr(sc) && sc->sc_vio_state == VIO_ESTABLISHED) 339 break; 340 341 delay(1000); 342 } while(--timeout > 0); 343 splx(s); 344 345 if (sc->sc_vio_state != VIO_ESTABLISHED) 346 return; 347 348 scsi_iopool_init(&sc->sc_iopool, sc, vdsk_io_get, vdsk_io_put); 349 350 sc->sc_link.adapter = &vdsk_switch; 351 sc->sc_link.adapter_softc = self; 352 sc->sc_link.adapter_buswidth = 2; 353 sc->sc_link.luns = 1; /* XXX slices should be presented as luns? */ 354 sc->sc_link.adapter_target = 2; 355 sc->sc_link.openings = sc->sc_vd->vd_nentries - 1; 356 sc->sc_link.pool = &sc->sc_iopool; 357 358 bzero(&saa, sizeof(saa)); 359 saa.saa_sc_link = &sc->sc_link; 360 config_found(self, &saa, scsiprint); 361 362 return; 363 364 free_dring: 365 vdsk_dring_free(sc->sc_dmatag, sc->sc_vd); 366 free_map: 367 hv_ldc_set_map_table(lc->lc_id, 0, 0); 368 ldc_map_free(sc->sc_dmatag, sc->sc_lm); 369 free_rxqueue: 370 ldc_queue_free(sc->sc_dmatag, lc->lc_rxq); 371 free_txqueue: 372 ldc_queue_free(sc->sc_dmatag, lc->lc_txq); 373 } 374 375 int 376 vdsk_tx_intr(void *arg) 377 { 378 struct vdsk_softc *sc = arg; 379 struct ldc_conn *lc = &sc->sc_lc; 380 uint64_t tx_head, tx_tail, tx_state; 381 382 hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); 383 if (tx_state != lc->lc_tx_state) { 384 switch (tx_state) { 385 case LDC_CHANNEL_DOWN: 386 DPRINTF(("%s: Tx link down\n", __func__)); 387 break; 388 case LDC_CHANNEL_UP: 389 DPRINTF(("%s: Tx link up\n", __func__)); 390 break; 391 case LDC_CHANNEL_RESET: 392 DPRINTF(("%s: Tx link reset\n", __func__)); 393 break; 394 } 395 lc->lc_tx_state = tx_state; 396 } 397 398 return (1); 399 } 400 401 int 402 vdsk_rx_intr(void *arg) 403 { 404 struct vdsk_softc *sc = arg; 405 struct ldc_conn *lc = &sc->sc_lc; 406 uint64_t rx_head, rx_tail, rx_state; 407 struct ldc_pkt *lp; 408 int err; 409 410 err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state); 411 if (err == H_EINVAL) 412 return (0); 413 if (err != H_EOK) { 414 printf("hv_ldc_rx_get_state %d\n", err); 415 return (0); 416 } 417 418 if (rx_state != lc->lc_rx_state) { 419 sc->sc_vio_state = 0; 420 lc->lc_tx_seqid = 0; 421 lc->lc_state = 0; 422 switch (rx_state) { 423 case LDC_CHANNEL_DOWN: 424 DPRINTF(("%s: Rx link down\n", __func__)); 425 break; 426 case LDC_CHANNEL_UP: 427 DPRINTF(("%s: Rx link up\n", __func__)); 428 ldc_send_vers(lc); 429 break; 430 case LDC_CHANNEL_RESET: 431 DPRINTF(("%s: Rx link reset\n", __func__)); 432 break; 433 } 434 lc->lc_rx_state = rx_state; 435 hv_ldc_rx_set_qhead(lc->lc_id, rx_tail); 436 return (1); 437 } 438 439 if (rx_head == rx_tail) 440 return (0); 441 442 lp = (struct ldc_pkt *)(lc->lc_rxq->lq_va + rx_head); 443 switch (lp->type) { 444 case LDC_CTRL: 445 ldc_rx_ctrl(lc, lp); 446 break; 447 448 case LDC_DATA: 449 ldc_rx_data(lc, lp); 450 break; 451 452 default: 453 DPRINTF(("%0x02/%0x02/%0x02\n", lp->type, lp->stype, 454 lp->ctrl)); 455 ldc_reset(lc); 456 break; 457 } 458 459 if (lc->lc_state == 0) 460 return (1); 461 462 rx_head += sizeof(*lp); 463 rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1); 464 err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head); 465 if (err != H_EOK) 466 printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err); 467 468 return (1); 469 } 470 471 void 472 vdsk_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp) 473 { 474 struct vio_msg *vm = (struct vio_msg *)lp; 475 476 switch (vm->type) { 477 case VIO_TYPE_CTRL: 478 if ((lp->env & LDC_FRAG_START) == 0 && 479 (lp->env & LDC_FRAG_STOP) == 0) 480 return; 481 vdsk_rx_vio_ctrl(lc->lc_sc, vm); 482 break; 483 484 case VIO_TYPE_DATA: 485 if((lp->env & LDC_FRAG_START) == 0) 486 return; 487 vdsk_rx_vio_data(lc->lc_sc, vm); 488 break; 489 490 default: 491 DPRINTF(("Unhandled packet type 0x%02x\n", vm->type)); 492 ldc_reset(lc); 493 break; 494 } 495 } 496 497 void 498 vdsk_rx_vio_ctrl(struct vdsk_softc *sc, struct vio_msg *vm) 499 { 500 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type; 501 502 switch (tag->stype_env) { 503 case VIO_VER_INFO: 504 vdsk_rx_vio_ver_info(sc, tag); 505 break; 506 case VIO_ATTR_INFO: 507 vdsk_rx_vio_attr_info(sc, tag); 508 break; 509 case VIO_DRING_REG: 510 vdsk_rx_vio_dring_reg(sc, tag); 511 break; 512 case VIO_RDX: 513 vdsk_rx_vio_rdx(sc, tag); 514 break; 515 default: 516 DPRINTF(("CTRL/0x%02x/0x%04x\n", tag->stype, tag->stype_env)); 517 break; 518 } 519 } 520 521 void 522 vdsk_rx_vio_ver_info(struct vdsk_softc *sc, struct vio_msg_tag *tag) 523 { 524 struct vio_ver_info *vi = (struct vio_ver_info *)tag; 525 526 switch (vi->tag.stype) { 527 case VIO_SUBTYPE_INFO: 528 DPRINTF(("CTRL/INFO/VER_INFO\n")); 529 break; 530 531 case VIO_SUBTYPE_ACK: 532 DPRINTF(("CTRL/ACK/VER_INFO\n")); 533 if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) { 534 ldc_reset(&sc->sc_lc); 535 break; 536 } 537 sc->sc_major = vi->major; 538 sc->sc_minor = vi->minor; 539 sc->sc_vio_state |= VIO_ACK_VER_INFO; 540 break; 541 542 default: 543 DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype)); 544 break; 545 } 546 547 if (ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO)) 548 vdsk_send_attr_info(sc); 549 } 550 551 void 552 vdsk_rx_vio_attr_info(struct vdsk_softc *sc, struct vio_msg_tag *tag) 553 { 554 struct vd_attr_info *ai = (struct vd_attr_info *)tag; 555 556 switch (ai->tag.stype) { 557 case VIO_SUBTYPE_INFO: 558 DPRINTF(("CTRL/INFO/ATTR_INFO\n")); 559 break; 560 561 case VIO_SUBTYPE_ACK: 562 DPRINTF(("CTRL/ACK/ATTR_INFO\n")); 563 if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) { 564 ldc_reset(&sc->sc_lc); 565 break; 566 } 567 568 sc->sc_vdisk_block_size = ai->vdisk_block_size; 569 sc->sc_vdisk_size = ai->vdisk_size; 570 if (sc->sc_major > 1 || sc->sc_minor >= 1) 571 sc->sc_vd_mtype = ai->vd_mtype; 572 else 573 sc->sc_vd_mtype = VD_MEDIA_TYPE_FIXED; 574 575 sc->sc_vio_state |= VIO_ACK_ATTR_INFO; 576 break; 577 578 default: 579 DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype)); 580 break; 581 } 582 583 if (ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO)) 584 vdsk_send_dring_reg(sc); 585 586 } 587 588 void 589 vdsk_rx_vio_dring_reg(struct vdsk_softc *sc, struct vio_msg_tag *tag) 590 { 591 struct vio_dring_reg *dr = (struct vio_dring_reg *)tag; 592 593 switch (dr->tag.stype) { 594 case VIO_SUBTYPE_INFO: 595 DPRINTF(("CTRL/INFO/DRING_REG\n")); 596 break; 597 598 case VIO_SUBTYPE_ACK: 599 DPRINTF(("CTRL/ACK/DRING_REG\n")); 600 if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) { 601 ldc_reset(&sc->sc_lc); 602 break; 603 } 604 605 sc->sc_dring_ident = dr->dring_ident; 606 sc->sc_seq_no = 1; 607 608 sc->sc_vio_state |= VIO_ACK_DRING_REG; 609 break; 610 611 default: 612 DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype)); 613 break; 614 } 615 616 if (ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG)) 617 vdsk_send_rdx(sc); 618 } 619 620 void 621 vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *tag) 622 { 623 switch(tag->stype) { 624 case VIO_SUBTYPE_INFO: 625 DPRINTF(("CTRL/INFO/RDX\n")); 626 break; 627 628 case VIO_SUBTYPE_ACK: 629 { 630 int prod; 631 632 DPRINTF(("CTRL/ACK/RDX\n")); 633 if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) { 634 ldc_reset(&sc->sc_lc); 635 break; 636 } 637 sc->sc_vio_state |= VIO_ACK_RDX; 638 639 /* 640 * If this ACK is the result of a reconnect, we may 641 * have pending I/O that we need to resubmit. We need 642 * to rebuild the ring descriptors though since the 643 * vDisk server on the other side may have touched 644 * them already. So we just clean up the ring and the 645 * LDC map and resubmit the SCSI commands based on our 646 * soft descriptors. 647 */ 648 prod = sc->sc_tx_prod; 649 sc->sc_tx_prod = sc->sc_tx_cons; 650 sc->sc_tx_cnt = 0; 651 sc->sc_lm->lm_next = 1; 652 sc->sc_lm->lm_count = 1; 653 while (sc->sc_tx_prod != prod) 654 vdsk_submit_cmd(sc->sc_vsd[sc->sc_tx_prod].vsd_xs); 655 656 scsi_iopool_run(&sc->sc_iopool); 657 break; 658 } 659 660 default: 661 DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype)); 662 break; 663 } 664 } 665 666 void 667 vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *vm) 668 { 669 struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type; 670 671 if (sc->sc_vio_state != VIO_ESTABLISHED) { 672 DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype, 673 tag->stype_env)); 674 return; 675 } 676 677 switch(tag->stype_env) { 678 case VIO_DRING_DATA: 679 vdsk_rx_vio_dring_data(sc, tag); 680 break; 681 682 default: 683 DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env)); 684 break; 685 } 686 } 687 688 void 689 vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *tag) 690 { 691 switch(tag->stype) { 692 case VIO_SUBTYPE_INFO: 693 DPRINTF(("DATA/INFO/DRING_DATA\n")); 694 break; 695 696 case VIO_SUBTYPE_ACK: 697 { 698 struct scsi_xfer *xs; 699 int cons; 700 701 cons = sc->sc_tx_cons; 702 while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) { 703 xs = sc->sc_vsd[cons].vsd_xs; 704 if (ISSET(xs->flags, SCSI_POLL) == 0) 705 vdsk_complete_cmd(xs, cons); 706 cons++; 707 cons &= (sc->sc_vd->vd_nentries - 1); 708 } 709 sc->sc_tx_cons = cons; 710 break; 711 } 712 713 case VIO_SUBTYPE_NACK: 714 DPRINTF(("DATA/NACK/DRING_DATA\n")); 715 break; 716 717 default: 718 DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype)); 719 break; 720 } 721 } 722 723 void 724 vdsk_ldc_reset(struct ldc_conn *lc) 725 { 726 struct vdsk_softc *sc = lc->lc_sc; 727 728 sc->sc_vio_state = 0; 729 } 730 731 void 732 vdsk_ldc_start(struct ldc_conn *lc) 733 { 734 struct vdsk_softc *sc = lc->lc_sc; 735 736 vdsk_send_ver_info(sc, VDSK_MAJOR, VDSK_MINOR); 737 } 738 739 void 740 vdsk_sendmsg(struct vdsk_softc *sc, void *msg, size_t len) 741 { 742 struct ldc_conn *lc = &sc->sc_lc; 743 int err; 744 745 err = ldc_send_unreliable(lc, msg, len); 746 if (err) 747 printf("%s: ldc_send_unreliable: %d\n", __func__, err); 748 } 749 750 void 751 vdsk_send_ver_info(struct vdsk_softc *sc, uint16_t major, uint16_t minor) 752 { 753 struct vio_ver_info vi; 754 755 /* Allocate new session ID. */ 756 sc->sc_local_sid = tick(); 757 758 bzero(&vi, sizeof(vi)); 759 vi.tag.type = VIO_TYPE_CTRL; 760 vi.tag.stype = VIO_SUBTYPE_INFO; 761 vi.tag.stype_env = VIO_VER_INFO; 762 vi.tag.sid = sc->sc_local_sid; 763 vi.major = major; 764 vi.minor = minor; 765 vi.dev_class = VDEV_DISK; 766 vdsk_sendmsg(sc, &vi, sizeof(vi)); 767 768 sc->sc_vio_state |= VIO_SND_VER_INFO; 769 } 770 771 void 772 vdsk_send_attr_info(struct vdsk_softc *sc) 773 { 774 struct vd_attr_info ai; 775 776 bzero(&ai, sizeof(ai)); 777 ai.tag.type = VIO_TYPE_CTRL; 778 ai.tag.stype = VIO_SUBTYPE_INFO; 779 ai.tag.stype_env = VIO_ATTR_INFO; 780 ai.tag.sid = sc->sc_local_sid; 781 ai.xfer_mode = VIO_DRING_MODE; 782 ai.vdisk_block_size = DEV_BSIZE; 783 ai.max_xfer_sz = MAXPHYS / DEV_BSIZE; 784 vdsk_sendmsg(sc, &ai, sizeof(ai)); 785 786 sc->sc_vio_state |= VIO_SND_ATTR_INFO; 787 } 788 789 void 790 vdsk_send_dring_reg(struct vdsk_softc *sc) 791 { 792 struct vio_dring_reg dr; 793 794 bzero(&dr, sizeof(dr)); 795 dr.tag.type = VIO_TYPE_CTRL; 796 dr.tag.stype = VIO_SUBTYPE_INFO; 797 dr.tag.stype_env = VIO_DRING_REG; 798 dr.tag.sid = sc->sc_local_sid; 799 dr.dring_ident = 0; 800 dr.num_descriptors = sc->sc_vd->vd_nentries; 801 dr.descriptor_size = sizeof(struct vd_desc); 802 dr.options = VIO_TX_RING | VIO_RX_RING; 803 dr.ncookies = 1; 804 dr.cookie[0].addr = 0; 805 dr.cookie[0].size = PAGE_SIZE; 806 vdsk_sendmsg(sc, &dr, sizeof(dr)); 807 808 sc->sc_vio_state |= VIO_SND_DRING_REG; 809 }; 810 811 void 812 vdsk_send_rdx(struct vdsk_softc *sc) 813 { 814 struct vio_rdx rdx; 815 816 bzero(&rdx, sizeof(rdx)); 817 rdx.tag.type = VIO_TYPE_CTRL; 818 rdx.tag.stype = VIO_SUBTYPE_INFO; 819 rdx.tag.stype_env = VIO_RDX; 820 rdx.tag.sid = sc->sc_local_sid; 821 vdsk_sendmsg(sc, &rdx, sizeof(rdx)); 822 823 sc->sc_vio_state |= VIO_SND_RDX; 824 } 825 826 struct vdsk_dring * 827 vdsk_dring_alloc(bus_dma_tag_t t, int nentries) 828 { 829 struct vdsk_dring *vd; 830 bus_size_t size; 831 caddr_t va; 832 int nsegs; 833 int i; 834 835 vd = malloc(sizeof(struct vdsk_dring), M_DEVBUF, M_NOWAIT); 836 if (vd == NULL) 837 return NULL; 838 839 size = roundup(nentries * sizeof(struct vd_desc), PAGE_SIZE); 840 841 if (bus_dmamap_create(t, size, 1, size, 0, 842 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0) 843 goto error; 844 845 if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1, 846 &nsegs, BUS_DMA_NOWAIT) != 0) 847 goto destroy; 848 849 if (bus_dmamem_map(t, &vd->vd_seg, 1, size, &va, 850 BUS_DMA_NOWAIT) != 0) 851 goto free; 852 853 if (bus_dmamap_load(t, vd->vd_map, va, size, NULL, 854 BUS_DMA_NOWAIT) != 0) 855 goto unmap; 856 857 vd->vd_desc = (struct vd_desc *)va; 858 vd->vd_nentries = nentries; 859 bzero(vd->vd_desc, nentries * sizeof(struct vd_desc)); 860 for (i = 0; i < vd->vd_nentries; i++) 861 vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE; 862 return (vd); 863 864 unmap: 865 bus_dmamem_unmap(t, va, size); 866 free: 867 bus_dmamem_free(t, &vd->vd_seg, 1); 868 destroy: 869 bus_dmamap_destroy(t, vd->vd_map); 870 error: 871 free(vd, M_DEVBUF, sizeof(struct vdsk_dring)); 872 873 return (NULL); 874 } 875 876 void 877 vdsk_dring_free(bus_dma_tag_t t, struct vdsk_dring *vd) 878 { 879 bus_size_t size; 880 881 size = vd->vd_nentries * sizeof(struct vd_desc); 882 size = roundup(size, PAGE_SIZE); 883 884 bus_dmamap_unload(t, vd->vd_map); 885 bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size); 886 bus_dmamem_free(t, &vd->vd_seg, 1); 887 bus_dmamap_destroy(t, vd->vd_map); 888 free(vd, M_DEVBUF, 0); 889 } 890 891 void * 892 vdsk_io_get(void *xsc) 893 { 894 struct vdsk_softc *sc = xsc; 895 void *rv = sc; /* just has to be !NULL */ 896 int s; 897 898 s = splbio(); 899 if (sc->sc_vio_state != VIO_ESTABLISHED || 900 sc->sc_tx_cnt >= sc->sc_vd->vd_nentries) 901 rv = NULL; 902 else 903 sc->sc_tx_cnt++; 904 splx(s); 905 906 return (rv); 907 } 908 909 void 910 vdsk_io_put(void *xsc, void *io) 911 { 912 struct vdsk_softc *sc = xsc; 913 int s; 914 915 #ifdef DIAGNOSTIC 916 if (sc != io) 917 panic("vdsk_io_put: unexpected io"); 918 #endif 919 920 s = splbio(); 921 sc->sc_tx_cnt--; 922 splx(s); 923 } 924 925 void 926 vdsk_scsi_cmd(struct scsi_xfer *xs) 927 { 928 struct vdsk_softc *sc = xs->sc_link->adapter_softc; 929 int timeout, s; 930 int desc; 931 932 switch (xs->cmd->opcode) { 933 case READ_BIG: 934 case READ_COMMAND: 935 case READ_12: 936 case READ_16: 937 case WRITE_BIG: 938 case WRITE_COMMAND: 939 case WRITE_12: 940 case WRITE_16: 941 case SYNCHRONIZE_CACHE: 942 break; 943 944 case INQUIRY: 945 vdsk_scsi_inq(xs); 946 return; 947 case READ_CAPACITY: 948 vdsk_scsi_capacity(xs); 949 return; 950 case READ_CAPACITY_16: 951 vdsk_scsi_capacity16(xs); 952 return; 953 954 case TEST_UNIT_READY: 955 case START_STOP: 956 case PREVENT_ALLOW: 957 vdsk_scsi_done(xs, XS_NOERROR); 958 return; 959 960 default: 961 printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode); 962 case MODE_SENSE: 963 case MODE_SENSE_BIG: 964 case REPORT_LUNS: 965 case READ_TOC: 966 vdsk_scsi_done(xs, XS_DRIVER_STUFFUP); 967 return; 968 } 969 970 s = splbio(); 971 desc = vdsk_submit_cmd(xs); 972 973 if (!ISSET(xs->flags, SCSI_POLL)) { 974 splx(s); 975 return; 976 } 977 978 timeout = 1000; 979 do { 980 if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) 981 break; 982 983 delay(1000); 984 } while(--timeout > 0); 985 if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) { 986 vdsk_complete_cmd(xs, desc); 987 } else { 988 ldc_reset(&sc->sc_lc); 989 vdsk_scsi_done(xs, XS_TIMEOUT); 990 } 991 splx(s); 992 } 993 994 int 995 vdsk_submit_cmd(struct scsi_xfer *xs) 996 { 997 struct vdsk_softc *sc = xs->sc_link->adapter_softc; 998 struct ldc_map *map = sc->sc_lm; 999 struct vio_dring_msg dm; 1000 struct scsi_rw *rw; 1001 struct scsi_rw_big *rwb; 1002 struct scsi_rw_12 *rw12; 1003 struct scsi_rw_16 *rw16; 1004 u_int64_t lba; 1005 u_int32_t sector_count; 1006 uint8_t operation; 1007 vaddr_t va; 1008 paddr_t pa; 1009 psize_t nbytes; 1010 int len, ncookies; 1011 int desc; 1012 1013 switch (xs->cmd->opcode) { 1014 case READ_BIG: 1015 case READ_COMMAND: 1016 case READ_12: 1017 case READ_16: 1018 operation = VD_OP_BREAD; 1019 break; 1020 1021 case WRITE_BIG: 1022 case WRITE_COMMAND: 1023 case WRITE_12: 1024 case WRITE_16: 1025 operation = VD_OP_BWRITE; 1026 break; 1027 1028 case SYNCHRONIZE_CACHE: 1029 operation = VD_OP_FLUSH; 1030 break; 1031 } 1032 1033 /* 1034 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same 1035 * layout as 10-byte READ/WRITE commands. 1036 */ 1037 if (xs->cmdlen == 6) { 1038 rw = (struct scsi_rw *)xs->cmd; 1039 lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff); 1040 sector_count = rw->length ? rw->length : 0x100; 1041 } else if (xs->cmdlen == 10) { 1042 rwb = (struct scsi_rw_big *)xs->cmd; 1043 lba = _4btol(rwb->addr); 1044 sector_count = _2btol(rwb->length); 1045 } else if (xs->cmdlen == 12) { 1046 rw12 = (struct scsi_rw_12 *)xs->cmd; 1047 lba = _4btol(rw12->addr); 1048 sector_count = _4btol(rw12->length); 1049 } else if (xs->cmdlen == 16) { 1050 rw16 = (struct scsi_rw_16 *)xs->cmd; 1051 lba = _8btol(rw16->addr); 1052 sector_count = _4btol(rw16->length); 1053 } 1054 1055 desc = sc->sc_tx_prod; 1056 1057 ncookies = 0; 1058 len = xs->datalen; 1059 va = (vaddr_t)xs->data; 1060 while (len > 0) { 1061 KASSERT(ncookies < MAXPHYS / PAGE_SIZE); 1062 pmap_extract(pmap_kernel(), va, &pa); 1063 while (map->lm_slot[map->lm_next].entry != 0) { 1064 map->lm_next++; 1065 map->lm_next &= (map->lm_nentries - 1); 1066 } 1067 map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); 1068 map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW; 1069 map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW; 1070 map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W; 1071 map->lm_count++; 1072 1073 nbytes = MIN(len, PAGE_SIZE - (pa & PAGE_MASK)); 1074 1075 sc->sc_vd->vd_desc[desc].cookie[ncookies].addr = 1076 map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); 1077 sc->sc_vd->vd_desc[desc].cookie[ncookies].size = nbytes; 1078 1079 sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next; 1080 va += nbytes; 1081 len -= nbytes; 1082 ncookies++; 1083 } 1084 1085 if (ISSET(xs->flags, SCSI_POLL) == 0) 1086 sc->sc_vd->vd_desc[desc].hdr.ack = 1; 1087 else 1088 sc->sc_vd->vd_desc[desc].hdr.ack = 0; 1089 sc->sc_vd->vd_desc[desc].operation = operation; 1090 sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE; 1091 sc->sc_vd->vd_desc[desc].status = 0xffffffff; 1092 sc->sc_vd->vd_desc[desc].offset = lba; 1093 sc->sc_vd->vd_desc[desc].size = xs->datalen; 1094 sc->sc_vd->vd_desc[desc].ncookies = ncookies; 1095 membar_sync(); 1096 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY; 1097 1098 sc->sc_vsd[desc].vsd_xs = xs; 1099 sc->sc_vsd[desc].vsd_ncookies = ncookies; 1100 1101 sc->sc_tx_prod++; 1102 sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1); 1103 1104 bzero(&dm, sizeof(dm)); 1105 dm.tag.type = VIO_TYPE_DATA; 1106 dm.tag.stype = VIO_SUBTYPE_INFO; 1107 dm.tag.stype_env = VIO_DRING_DATA; 1108 dm.tag.sid = sc->sc_local_sid; 1109 dm.seq_no = sc->sc_seq_no++; 1110 dm.dring_ident = sc->sc_dring_ident; 1111 dm.start_idx = dm.end_idx = desc; 1112 vdsk_sendmsg(sc, &dm, sizeof(dm)); 1113 1114 return desc; 1115 } 1116 1117 void 1118 vdsk_complete_cmd(struct scsi_xfer *xs, int desc) 1119 { 1120 struct vdsk_softc *sc = xs->sc_link->adapter_softc; 1121 struct ldc_map *map = sc->sc_lm; 1122 int cookie, idx; 1123 int error; 1124 1125 cookie = 0; 1126 while (cookie < sc->sc_vsd[desc].vsd_ncookies) { 1127 idx = sc->sc_vsd[desc].vsd_map_idx[cookie++]; 1128 map->lm_slot[idx].entry = 0; 1129 map->lm_count--; 1130 } 1131 1132 error = XS_NOERROR; 1133 if (sc->sc_vd->vd_desc[desc].status != 0) 1134 error = XS_DRIVER_STUFFUP; 1135 xs->resid = xs->datalen - 1136 sc->sc_vd->vd_desc[desc].size; 1137 vdsk_scsi_done(xs, error); 1138 1139 sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_FREE; 1140 } 1141 1142 void 1143 vdsk_scsi_inq(struct scsi_xfer *xs) 1144 { 1145 struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd; 1146 1147 if (ISSET(inq->flags, SI_EVPD)) 1148 vdsk_scsi_done(xs, XS_DRIVER_STUFFUP); 1149 else 1150 vdsk_scsi_inquiry(xs); 1151 } 1152 1153 void 1154 vdsk_scsi_inquiry(struct scsi_xfer *xs) 1155 { 1156 struct vdsk_softc *sc = xs->sc_link->adapter_softc; 1157 struct scsi_inquiry_data inq; 1158 char buf[5]; 1159 1160 bzero(&inq, sizeof(inq)); 1161 1162 switch (sc->sc_vd_mtype) { 1163 case VD_MEDIA_TYPE_CD: 1164 case VD_MEDIA_TYPE_DVD: 1165 inq.device = T_CDROM; 1166 break; 1167 1168 case VD_MEDIA_TYPE_FIXED: 1169 default: 1170 inq.device = T_DIRECT; 1171 break; 1172 } 1173 1174 inq.version = 0x05; /* SPC-3 */ 1175 inq.response_format = 2; 1176 inq.additional_length = 32; 1177 inq.flags |= SID_CmdQue; 1178 bcopy("SUN ", inq.vendor, sizeof(inq.vendor)); 1179 bcopy("Virtual Disk ", inq.product, sizeof(inq.product)); 1180 snprintf(buf, sizeof(buf), "%u.%u ", sc->sc_major, sc->sc_minor); 1181 bcopy(buf, inq.revision, sizeof(inq.revision)); 1182 1183 bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen)); 1184 1185 vdsk_scsi_done(xs, XS_NOERROR); 1186 } 1187 1188 void 1189 vdsk_scsi_capacity(struct scsi_xfer *xs) 1190 { 1191 struct vdsk_softc *sc = xs->sc_link->adapter_softc; 1192 struct scsi_read_cap_data rcd; 1193 uint64_t capacity; 1194 1195 bzero(&rcd, sizeof(rcd)); 1196 1197 capacity = sc->sc_vdisk_size - 1; 1198 if (capacity > 0xffffffff) 1199 capacity = 0xffffffff; 1200 1201 _lto4b(capacity, rcd.addr); 1202 _lto4b(sc->sc_vdisk_block_size, rcd.length); 1203 1204 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 1205 1206 vdsk_scsi_done(xs, XS_NOERROR); 1207 } 1208 1209 void 1210 vdsk_scsi_capacity16(struct scsi_xfer *xs) 1211 { 1212 struct vdsk_softc *sc = xs->sc_link->adapter_softc; 1213 struct scsi_read_cap_data_16 rcd; 1214 1215 bzero(&rcd, sizeof(rcd)); 1216 1217 _lto8b(sc->sc_vdisk_size - 1, rcd.addr); 1218 _lto4b(sc->sc_vdisk_block_size, rcd.length); 1219 1220 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 1221 1222 vdsk_scsi_done(xs, XS_NOERROR); 1223 } 1224 1225 void 1226 vdsk_scsi_done(struct scsi_xfer *xs, int error) 1227 { 1228 xs->error = error; 1229 1230 scsi_done(xs); 1231 } 1232 1233 int 1234 vdsk_dev_probe(struct scsi_link *link) 1235 { 1236 KASSERT(link->lun == 0); 1237 1238 if (link->target == 0) 1239 return (0); 1240 1241 return (ENODEV); 1242 } 1243 1244 void 1245 vdsk_dev_free(struct scsi_link *link) 1246 { 1247 printf("%s\n", __func__); 1248 } 1249