xref: /netbsd-src/sys/arch/sparc64/dev/vdsk.c (revision 9fb66d812c00ebfb445c0b47dea128f32aa6fe96)
1 /*	$NetBSD: vdsk.c,v 1.5 2019/11/19 20:07:30 palle Exp $	*/
2 /*	$OpenBSD: vdsk.c,v 1.46 2015/01/25 21:42:13 kettenis Exp $	*/
3 /*
4  * Copyright (c) 2009, 2011 Mark Kettenis
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/kmem.h>
20 #include <sys/param.h>
21 #include <sys/buf.h>
22 #include <sys/device.h>
23 #include <sys/systm.h>
24 
25 #include <machine/autoconf.h>
26 #include <machine/hypervisor.h>
27 
28 #include <uvm/uvm_extern.h>
29 
30 #include <dev/scsipi/scsi_all.h>
31 #include <dev/scsipi/scsipi_disk.h>
32 #include <dev/scsipi/scsipi_cd.h>
33 #include <dev/scsipi/scsiconf.h>
34 
35 #include <dev/scsipi/scsi_disk.h>
36 #include <dev/scsipi/scsipi_all.h>
37 #include <dev/scsipi/scsiconf.h>
38 #include <dev/scsipi/scsi_message.h>
39 
40 #include <sparc64/dev/cbusvar.h>
41 #include <sparc64/dev/ldcvar.h>
42 #include <sparc64/dev/viovar.h>
43 
44 #ifdef VDSK_DEBUG
45 #define DPRINTF(x)	printf x
46 #else
47 #define DPRINTF(x)
48 #endif
49 
50 #define VDSK_TX_ENTRIES		32
51 #define VDSK_RX_ENTRIES		32
52 
53 struct vd_attr_info {
54 	struct vio_msg_tag	tag;
55 	uint8_t			xfer_mode;
56 	uint8_t			vd_type;
57 	uint8_t			vd_mtype;
58 	uint8_t			_reserved1;
59 	uint32_t		vdisk_block_size;
60 	uint64_t		operations;
61 	uint64_t		vdisk_size;
62 	uint64_t		max_xfer_sz;
63 	uint64_t		_reserved2[2];
64 };
65 
66 #define VD_DISK_TYPE_SLICE	0x01
67 #define VD_DISK_TYPE_DISK	0x02
68 
69 #define VD_MEDIA_TYPE_FIXED	0x01
70 #define VD_MEDIA_TYPE_CD	0x02
71 #define VD_MEDIA_TYPE_DVD	0x03
72 
73 /* vDisk version 1.0. */
74 #define VD_OP_BREAD		0x01
75 #define VD_OP_BWRITE		0x02
76 #define VD_OP_FLUSH		0x03
77 #define VD_OP_GET_WCE		0x04
78 #define VD_OP_SET_WCE		0x05
79 #define VD_OP_GET_VTOC		0x06
80 #define VD_OP_SET_VTOC		0x07
81 #define VD_OP_GET_DISKGEOM	0x08
82 #define VD_OP_SET_DISKGEOM	0x09
83 #define VD_OP_GET_DEVID		0x0b
84 #define VD_OP_GET_EFI		0x0c
85 #define VD_OP_SET_EFI		0x0d
86 
87 /* vDisk version 1.1 */
88 #define VD_OP_SCSICMD		0x0a
89 #define VD_OP_RESET		0x0e
90 #define VD_OP_GET_ACCESS	0x0f
91 #define VD_OP_SET_ACCESS	0x10
92 #define VD_OP_GET_CAPACITY	0x11
93 
94 struct vd_desc {
95 	struct vio_dring_hdr	hdr;
96 	uint64_t		req_id;
97 	uint8_t			operation;
98 	uint8_t			slice;
99 	uint16_t		_reserved1;
100 	uint32_t		status;
101 	uint64_t		offset;
102 	uint64_t		size;
103 	uint32_t		ncookies;
104 	uint32_t		_reserved2;
105 	struct ldc_cookie	cookie[MAXPHYS / PAGE_SIZE];
106 };
107 
108 #define VD_SLICE_NONE		0xff
109 
110 struct vdsk_dring {
111 	bus_dmamap_t		vd_map;
112 	bus_dma_segment_t	vd_seg;
113 	struct vd_desc		*vd_desc;
114 	int			vd_nentries;
115 };
116 
117 #if OPENBSD_BUSDMA
118 struct vdsk_dring *vdsk_dring_alloc(bus_dma_tag_t, int);
119 void	vdsk_dring_free(bus_dma_tag_t, struct vdsk_dring *);
120 #else
121 struct vdsk_dring *vdsk_dring_alloc(int);
122 void	vdsk_dring_free(struct vdsk_dring *);
123 #endif
124 
125 /*
126  * We support vDisk 1.0 and 1.1.
127  */
128 #define VDSK_MAJOR	1
129 #define VDSK_MINOR	1
130 
131 struct vdsk_soft_desc {
132 	int		vsd_map_idx[MAXPHYS / PAGE_SIZE];
133 	struct scsipi_xfer *vsd_xs;
134 	int		vsd_ncookies;
135 };
136 
137 struct vdsk_softc {
138 	device_t sc_dv;
139 
140 	struct scsipi_adapter sc_adapter;
141 	struct scsipi_channel sc_channel;
142 
143 	bus_space_tag_t	sc_bustag;
144 	bus_dma_tag_t	sc_dmatag;
145 
146 	void		*sc_tx_ih;
147 	void		*sc_rx_ih;
148 
149 	struct ldc_conn	sc_lc;
150 
151 	uint16_t	sc_vio_state;
152 #define VIO_SND_VER_INFO	0x0001
153 #define VIO_ACK_VER_INFO	0x0002
154 #define VIO_SND_ATTR_INFO	0x0004
155 #define VIO_ACK_ATTR_INFO	0x0008
156 #define VIO_SND_DRING_REG	0x0010
157 #define VIO_ACK_DRING_REG	0x0020
158 #define VIO_SND_RDX		0x0040
159 #define VIO_ACK_RDX		0x0080
160 #define VIO_ESTABLISHED		0x00ff
161 
162 	uint16_t	sc_major;
163 	uint16_t	sc_minor;
164 
165 	uint32_t	sc_local_sid;
166 	uint64_t	sc_dring_ident;
167 	uint64_t	sc_seq_no;
168 
169 	int		sc_tx_cnt;
170 	int		sc_tx_prod;
171 	int		sc_tx_cons;
172 
173 	struct ldc_map	*sc_lm;
174 	struct vdsk_dring *sc_vd;
175 	struct vdsk_soft_desc *sc_vsd;
176 
177 	uint32_t	sc_vdisk_block_size;
178 	uint64_t	sc_vdisk_size;
179 	uint8_t		sc_vd_mtype;
180 };
181 
182 int	vdsk_match(device_t, cfdata_t, void *);
183 void	vdsk_attach(device_t, device_t, void *);
184 void	vdsk_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t,
185 			     void *);
186 
187 CFATTACH_DECL_NEW(vdsk, sizeof(struct vdsk_softc),
188 		  vdsk_match, vdsk_attach, NULL, NULL);
189 
190 int	vdsk_tx_intr(void *);
191 int	vdsk_rx_intr(void *);
192 
193 void	vdsk_rx_data(struct ldc_conn *, struct ldc_pkt *);
194 void	vdsk_rx_vio_ctrl(struct vdsk_softc *, struct vio_msg *);
195 void	vdsk_rx_vio_ver_info(struct vdsk_softc *, struct vio_msg_tag *);
196 void	vdsk_rx_vio_attr_info(struct vdsk_softc *, struct vio_msg_tag *);
197 void	vdsk_rx_vio_dring_reg(struct vdsk_softc *, struct vio_msg_tag *);
198 void	vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *);
199 void	vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *);
200 void	vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *);
201 
202 void	vdsk_ldc_reset(struct ldc_conn *);
203 void	vdsk_ldc_start(struct ldc_conn *);
204 
205 void	vdsk_sendmsg(struct vdsk_softc *, void *, size_t);
206 void	vdsk_send_ver_info(struct vdsk_softc *, uint16_t, uint16_t);
207 void	vdsk_send_attr_info(struct vdsk_softc *);
208 void	vdsk_send_dring_reg(struct vdsk_softc *);
209 void	vdsk_send_rdx(struct vdsk_softc *);
210 
211 void	*vdsk_io_get(void *);
212 void	vdsk_io_put(void *, void *);
213 
214 void	vdsk_scsi_cmd(struct vdsk_softc *sc, struct scsipi_xfer *);
215 int	vdsk_submit_cmd(struct vdsk_softc *sc, struct scsipi_xfer *);
216 void	vdsk_complete_cmd(struct vdsk_softc *sc, struct scsipi_xfer *, int);
217 void	vdsk_scsi_inq(struct vdsk_softc *sc, struct scsipi_xfer *);
218 void	vdsk_scsi_inquiry(struct vdsk_softc *sc, struct scsipi_xfer *);
219 void	vdsk_scsi_capacity(struct vdsk_softc *sc, struct scsipi_xfer *);
220 void	vdsk_scsi_capacity16(struct vdsk_softc *sc, struct scsipi_xfer *);
221 void	vdsk_scsi_done(struct scsipi_xfer *, int);
222 
223 int
224 vdsk_match(device_t parent, cfdata_t match, void *aux)
225 {
226 	struct cbus_attach_args *ca = aux;
227 
228 	if (strcmp(ca->ca_name, "disk") == 0)
229 		return (1);
230 
231 	return (0);
232 }
233 
234 void
235 vdsk_attach(device_t parent, device_t self, void *aux)
236 {
237 	struct vdsk_softc *sc = device_private(self);
238 	struct cbus_attach_args *ca = aux;
239 	struct ldc_conn *lc;
240 	int err, s;
241 	int timeout;
242         vaddr_t va;
243         paddr_t pa;
244 
245 	sc->sc_bustag = ca->ca_bustag;
246 	sc->sc_dmatag = ca->ca_dmatag;
247 
248 	printf(": ivec 0x%llx, 0x%llx",
249 	       (long long unsigned int)ca->ca_tx_ino,
250 	       (long long unsigned int)ca->ca_rx_ino);
251 
252 	/*
253 	 * Un-configure queues before registering interrupt handlers,
254 	 * such that we dont get any stale LDC packets or events.
255 	 */
256 	hv_ldc_tx_qconf(ca->ca_id, 0, 0);
257 	hv_ldc_rx_qconf(ca->ca_id, 0, 0);
258 
259 	sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_tx_ino,
260 	    IPL_BIO, vdsk_tx_intr, sc);
261 	sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_rx_ino,
262 	    IPL_BIO, vdsk_rx_intr, sc);
263 	if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) {
264 		printf(", can't establish interrupt\n");
265 		return;
266 	}
267 
268 	lc = &sc->sc_lc;
269 	lc->lc_id = ca->ca_id;
270 	lc->lc_sc = sc;
271 	lc->lc_reset = vdsk_ldc_reset;
272 	lc->lc_start = vdsk_ldc_start;
273 	lc->lc_rx_data = vdsk_rx_data;
274 
275 #if OPENBSD_BUSDMA
276 	lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES);
277 #else
278 	lc->lc_txq = ldc_queue_alloc(VDSK_TX_ENTRIES);
279 #endif
280 #if OPENBSD_BUSDMA
281 	lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES);
282 #else
283 	lc->lc_rxq = ldc_queue_alloc(VDSK_RX_ENTRIES);
284 #endif
285 #if OPENBSD_BUSDMA
286 	sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048);
287 #else
288 	sc->sc_lm = ldc_map_alloc(2048);
289 #endif
290 #if OPENBSD_BUSDMA
291 	err = hv_ldc_set_map_table(lc->lc_id,
292 	    sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries);
293 #else
294         va = (vaddr_t)sc->sc_lm->lm_slot;
295         pa = 0;
296 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
297 	  panic("pmap_extract failed %lx\n", va);
298 	err = hv_ldc_set_map_table(lc->lc_id, pa, 2048);
299 #endif
300 	if (err != H_EOK) {
301 		printf("hv_ldc_set_map_table %d\n", err);
302 		goto free_map;
303 	}
304 #if OPENBSD_BUSDMA
305 	sc->sc_vd = vdsk_dring_alloc(sc->sc_dmatag, 32);
306 #else
307 	sc->sc_vd = vdsk_dring_alloc(32);
308 #endif
309 	sc->sc_vsd = kmem_zalloc(32 * sizeof(*sc->sc_vsd), KM_SLEEP);
310 
311 #if OPENBSD_BUSDMA
312 	sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr;
313 #else
314         va = (vaddr_t)sc->sc_vd->vd_desc;
315         pa = 0;
316 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
317 	  panic("pmap_extract failed %lx\n", va);
318 
319 	sc->sc_lm->lm_slot[0].entry = pa;
320 #endif
321 	sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK;
322 	sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
323 	sc->sc_lm->lm_slot[0].entry |= LDC_MTE_R | LDC_MTE_W;
324 	sc->sc_lm->lm_next = 1;
325 	sc->sc_lm->lm_count = 1;
326 	va = lc->lc_txq->lq_va;
327 	pa = 0;
328 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
329 		panic("pmap_extract failed %lx\n", va);
330 #if OPENBSD_BUSDMA
331 	err = hv_ldc_tx_qconf(lc->lc_id,
332 	    lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries);
333 #else
334         err = hv_ldc_tx_qconf(lc->lc_id, pa, lc->lc_txq->lq_nentries);
335 #endif
336 	if (err != H_EOK)
337 		printf("hv_ldc_tx_qconf %d\n", err);
338 	va = (vaddr_t)lc->lc_rxq->lq_va;
339 	pa = 0;
340 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
341 	  panic("pmap_extract failed %lx\n", va);
342 #if OPENBSD_BUSDMA
343 	err = hv_ldc_rx_qconf(lc->lc_id,
344 	    lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries);
345 #else
346         err = hv_ldc_rx_qconf(lc->lc_id, pa, lc->lc_rxq->lq_nentries);
347 #endif
348 	if (err != H_EOK)
349 		printf("hv_ldc_rx_qconf %d\n", err);
350 
351 	cbus_intr_setenabled(sc->sc_bustag, ca->ca_tx_ino, INTR_ENABLED);
352 	cbus_intr_setenabled(sc->sc_bustag, ca->ca_rx_ino, INTR_ENABLED);
353 
354 	ldc_send_vers(lc);
355 
356 	printf("\n");
357 
358 	/*
359 	 * Interrupts aren't enabled during autoconf, so poll for VIO
360 	 * peer-to-peer hanshake completion.
361 	 */
362 	s = splbio();
363 	timeout = 10 * 1000;
364 	do {
365 		if (vdsk_rx_intr(sc) && sc->sc_vio_state == VIO_ESTABLISHED)
366 			break;
367 
368 		delay(1000);
369 	} while(--timeout > 0);
370 	splx(s);
371 
372 	if (sc->sc_vio_state != VIO_ESTABLISHED) {
373 	  printf("vio not establshed: %d\n", sc->sc_vio_state);
374 	  return;
375 	}
376 
377 	sc->sc_dv = self;
378 
379 	sc->sc_adapter.adapt_dev = sc->sc_dv;
380 	sc->sc_adapter.adapt_nchannels = 1;
381 	sc->sc_adapter.adapt_openings = sc->sc_vd->vd_nentries - 1;
382 	sc->sc_adapter.adapt_max_periph = sc->sc_vd->vd_nentries - 1;
383 
384 	sc->sc_adapter.adapt_minphys = minphys;
385 	sc->sc_adapter.adapt_request = vdsk_scsipi_request;
386 
387 	sc->sc_channel.chan_adapter = &sc->sc_adapter;
388 	sc->sc_channel.chan_bustype = &scsi_bustype;
389 	sc->sc_channel.chan_channel = 0;
390 	sc->sc_channel.chan_ntargets = 2; /* XXX why not 1? */
391 	sc->sc_channel.chan_nluns = 1; /* XXX slices should be presented as luns? */
392 	sc->sc_channel.chan_id = 0;
393 	sc->sc_channel.chan_flags = SCSIPI_CHAN_NOSETTLE;
394 
395 	config_found(self, &sc->sc_channel, scsiprint);
396 
397 	return;
398 
399 free_map:
400 	hv_ldc_set_map_table(lc->lc_id, 0, 0);
401 #if OPENBSD_BUSDMA
402 	ldc_map_free(sc->sc_dmatag, sc->sc_lm);
403 #else
404 	ldc_map_free(sc->sc_lm);
405 #endif
406 }
407 
408 void
409 vdsk_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
410 		     void *arg)
411 {
412 
413 	struct vdsk_softc *sc;
414 	struct scsipi_xfer *xs;
415 
416 	sc = device_private(chan->chan_adapter->adapt_dev);
417 
418 	xs = arg;
419 
420 	switch (req) {
421 		case ADAPTER_REQ_RUN_XFER:
422 			vdsk_scsi_cmd(sc, xs);
423 			break;
424 		case ADAPTER_REQ_GROW_RESOURCES:
425 		case ADAPTER_REQ_SET_XFER_MODE:
426 			/* Ignored */
427 			break;
428 		default:
429 			panic("req unhandled: %x", req);
430 	}
431 
432 }
433 
434 int
435 vdsk_tx_intr(void *arg)
436 {
437 	panic("%s: not verified yet", __FUNCTION__);
438 
439 	struct vdsk_softc *sc = arg;
440 	struct ldc_conn *lc = &sc->sc_lc;
441 	uint64_t tx_head, tx_tail, tx_state;
442 
443 	hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state);
444 	if (tx_state != lc->lc_tx_state) {
445 		switch (tx_state) {
446 		case LDC_CHANNEL_DOWN:
447 			DPRINTF(("Tx link down\n"));
448 			break;
449 		case LDC_CHANNEL_UP:
450 			DPRINTF(("Tx link up\n"));
451 			break;
452 		case LDC_CHANNEL_RESET:
453 			DPRINTF(("Tx link reset\n"));
454 			break;
455 		}
456 		lc->lc_tx_state = tx_state;
457 	}
458 
459 	return (1);
460 }
461 
462 int
463 vdsk_rx_intr(void *arg)
464 {
465 	struct vdsk_softc *sc = arg;
466 	struct ldc_conn *lc = &sc->sc_lc;
467 	uint64_t rx_head, rx_tail, rx_state;
468 	struct ldc_pkt *lp;
469 	int err;
470 
471 	err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state);
472 	if (err == H_EINVAL) {
473 		printf("hv_ldc_rx_get_state H_EINVAL\n");
474 		return (0);
475 	}
476 	if (err != H_EOK) {
477 		printf("hv_ldc_rx_get_state %d\n", err);
478 		return (0);
479 	}
480 
481 	if (rx_state != lc->lc_rx_state) {
482 		sc->sc_vio_state = 0;
483 		lc->lc_tx_seqid = 0;
484 		lc->lc_state = 0;
485 		switch (rx_state) {
486 		case LDC_CHANNEL_DOWN:
487 			DPRINTF(("Rx link down\n"));
488 			break;
489 		case LDC_CHANNEL_UP:
490 			DPRINTF(("Rx link up\n"));
491 			ldc_send_vers(lc);
492 			break;
493 		case LDC_CHANNEL_RESET:
494 			DPRINTF(("Rx link reset\n"));
495 			break;
496 		}
497 		lc->lc_rx_state = rx_state;
498 		hv_ldc_rx_set_qhead(lc->lc_id, rx_tail);
499 		return (1);
500 	}
501 
502 	if (rx_head == rx_tail)
503 		return (0);
504 
505 	lp = (struct ldc_pkt *)(uintptr_t)(lc->lc_rxq->lq_va + rx_head);
506 	switch (lp->type) {
507 		case LDC_CTRL:
508 			ldc_rx_ctrl(lc, lp);
509 			break;
510 
511 		case LDC_DATA:
512 			ldc_rx_data(lc, lp);
513 			break;
514 
515 		default:
516 			DPRINTF(("%0x02/%0x02/%0x02\n", lp->type, lp->stype,
517 			    lp->ctrl));
518 			ldc_reset(lc);
519 			break;
520 	}
521 
522 	if (lc->lc_state == 0)
523 		return (1);
524 
525 	rx_head += sizeof(*lp);
526 	rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1);
527 	err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head);
528 	if (err != H_EOK)
529 		printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err);
530 
531 	return (1);
532 }
533 
534 void
535 vdsk_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp)
536 {
537 	struct vio_msg *vm = (struct vio_msg *)lp;
538 
539 	switch (vm->type) {
540 		case VIO_TYPE_CTRL:
541 			if ((lp->env & LDC_FRAG_START) == 0 &&
542 			    (lp->env & LDC_FRAG_STOP) == 0)
543 				return;
544 			vdsk_rx_vio_ctrl(lc->lc_sc, vm);
545 			break;
546 
547 	case VIO_TYPE_DATA:
548 			if((lp->env & LDC_FRAG_START) == 0)
549 				return;
550 			vdsk_rx_vio_data(lc->lc_sc, vm);
551 			break;
552 
553 		default:
554 			DPRINTF(("Unhandled packet type 0x%02x\n", vm->type));
555 			ldc_reset(lc);
556 			break;
557 	}
558 }
559 
560 void
561 vdsk_rx_vio_ctrl(struct vdsk_softc *sc, struct vio_msg *vm)
562 {
563 	struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
564 
565 	switch (tag->stype_env) {
566 		case VIO_VER_INFO:
567 			vdsk_rx_vio_ver_info(sc, tag);
568 			break;
569 		case VIO_ATTR_INFO:
570 			vdsk_rx_vio_attr_info(sc, tag);
571 			break;
572 		case VIO_DRING_REG:
573 			vdsk_rx_vio_dring_reg(sc, tag);
574 			break;
575 		case VIO_RDX:
576 			vdsk_rx_vio_rdx(sc, tag);
577 			break;
578 		default:
579 			DPRINTF(("CTRL/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
580 			break;
581 	}
582 }
583 
584 void
585 vdsk_rx_vio_ver_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
586 {
587 	struct vio_ver_info *vi = (struct vio_ver_info *)tag;
588 
589 	switch (vi->tag.stype) {
590 		case VIO_SUBTYPE_INFO:
591 			DPRINTF(("CTRL/INFO/VER_INFO\n"));
592 			break;
593 
594 		case VIO_SUBTYPE_ACK:
595 			DPRINTF(("CTRL/ACK/VER_INFO\n"));
596 			if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) {
597 				ldc_reset(&sc->sc_lc);
598 				break;
599 			}
600 			sc->sc_major = vi->major;
601 			sc->sc_minor = vi->minor;
602 			sc->sc_vio_state |= VIO_ACK_VER_INFO;
603 			break;
604 
605 		default:
606 			DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype));
607 			break;
608 	}
609 
610 	if (ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO))
611 		vdsk_send_attr_info(sc);
612 }
613 
614 void
615 vdsk_rx_vio_attr_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
616 {
617 	struct vd_attr_info *ai = (struct vd_attr_info *)tag;
618 
619 	switch (ai->tag.stype) {
620 		case VIO_SUBTYPE_INFO:
621 			DPRINTF(("CTRL/INFO/ATTR_INFO\n"));
622 			break;
623 
624 		case VIO_SUBTYPE_ACK:
625 			DPRINTF(("CTRL/ACK/ATTR_INFO\n"));
626 			if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) {
627 				ldc_reset(&sc->sc_lc);
628 				break;
629 			}
630 
631 			sc->sc_vdisk_block_size = ai->vdisk_block_size;
632 			sc->sc_vdisk_size = ai->vdisk_size;
633 			if (sc->sc_major > 1 || sc->sc_minor >= 1)
634 				sc->sc_vd_mtype = ai->vd_mtype;
635 			else
636 				sc->sc_vd_mtype = VD_MEDIA_TYPE_FIXED;
637 
638 			sc->sc_vio_state |= VIO_ACK_ATTR_INFO;
639 			break;
640 
641 		default:
642 			DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype));
643 			break;
644 	}
645 
646 	if (ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO))
647 		vdsk_send_dring_reg(sc);
648 
649 }
650 
651 void
652 vdsk_rx_vio_dring_reg(struct vdsk_softc *sc, struct vio_msg_tag *tag)
653 {
654 	struct vio_dring_reg *dr = (struct vio_dring_reg *)tag;
655 
656 	switch (dr->tag.stype) {
657 		case VIO_SUBTYPE_INFO:
658 			DPRINTF(("CTRL/INFO/DRING_REG\n"));
659 			break;
660 
661 		case VIO_SUBTYPE_ACK:
662 			DPRINTF(("CTRL/ACK/DRING_REG\n"));
663 			if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) {
664 				ldc_reset(&sc->sc_lc);
665 				break;
666 			}
667 
668 			sc->sc_dring_ident = dr->dring_ident;
669 			sc->sc_seq_no = 1;
670 
671 			sc->sc_vio_state |= VIO_ACK_DRING_REG;
672 			break;
673 
674 		default:
675 			DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype));
676 			break;
677 	}
678 
679 	if (ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG))
680 		vdsk_send_rdx(sc);
681 }
682 
683 void
684 vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *tag)
685 {
686 	switch(tag->stype) {
687 		case VIO_SUBTYPE_INFO:
688 			DPRINTF(("CTRL/INFO/RDX\n"));
689 			break;
690 
691 		case VIO_SUBTYPE_ACK:
692 		{
693 			int prod;
694 
695 			DPRINTF(("CTRL/ACK/RDX\n"));
696 			if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) {
697 				ldc_reset(&sc->sc_lc);
698 				break;
699 			}
700 			sc->sc_vio_state |= VIO_ACK_RDX;
701 
702 			/*
703 			 * If this ACK is the result of a reconnect, we may
704 			 * have pending I/O that we need to resubmit.  We need
705 			 * to rebuild the ring descriptors though since the
706 			 * vDisk server on the other side may have touched
707 			 * them already.  So we just clean up the ring and the
708 			 * LDC map and resubmit the SCSI commands based on our
709 			 * soft descriptors.
710 			 */
711 			prod = sc->sc_tx_prod;
712 			sc->sc_tx_prod = sc->sc_tx_cons;
713 			sc->sc_tx_cnt = 0;
714 			sc->sc_lm->lm_next = 1;
715 			sc->sc_lm->lm_count = 1;
716 			while (sc->sc_tx_prod != prod)
717 			  vdsk_submit_cmd(sc, sc->sc_vsd[sc->sc_tx_prod].vsd_xs);
718 			break;
719 	}
720 
721 		default:
722 			DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype));
723 			break;
724 	}
725 }
726 
727 void
728 vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *vm)
729 {
730 	struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
731 
732 	if (sc->sc_vio_state != VIO_ESTABLISHED) {
733 		DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype,
734 		    tag->stype_env));
735 		return;
736 	}
737 
738 	switch(tag->stype_env) {
739 		case VIO_DRING_DATA:
740 			vdsk_rx_vio_dring_data(sc, tag);
741 			break;
742 
743 		default:
744 			DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
745 			break;
746 	}
747 }
748 
749 void
750 vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *tag)
751 {
752 	switch(tag->stype) {
753 		case VIO_SUBTYPE_INFO:
754 			DPRINTF(("DATA/INFO/DRING_DATA\n"));
755 			break;
756 
757 		case VIO_SUBTYPE_ACK:
758 		{
759 			struct scsipi_xfer *xs;
760 			int cons;
761 
762 			cons = sc->sc_tx_cons;
763 			while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) {
764 				xs = sc->sc_vsd[cons].vsd_xs;
765 				if (ISSET(xs->xs_control, XS_CTL_POLL) == 0)
766 					vdsk_complete_cmd(sc, xs, cons);
767 				cons++;
768 				cons &= (sc->sc_vd->vd_nentries - 1);
769 			}
770 			sc->sc_tx_cons = cons;
771 			break;
772 	}
773 
774 		case VIO_SUBTYPE_NACK:
775 			DPRINTF(("DATA/NACK/DRING_DATA\n"));
776 			break;
777 
778 		default:
779 			DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
780 			break;
781 	}
782 }
783 
784 void
785 vdsk_ldc_reset(struct ldc_conn *lc)
786 {
787 
788 	struct vdsk_softc *sc = lc->lc_sc;
789 
790 	sc->sc_vio_state = 0;
791 }
792 
793 void
794 vdsk_ldc_start(struct ldc_conn *lc)
795 {
796 
797 	struct vdsk_softc *sc = lc->lc_sc;
798 
799 	vdsk_send_ver_info(sc, VDSK_MAJOR, VDSK_MINOR);
800 }
801 
802 void
803 vdsk_sendmsg(struct vdsk_softc *sc, void *msg, size_t len)
804 {
805 
806 	struct ldc_conn *lc = &sc->sc_lc;
807 	int err;
808 
809 	err = ldc_send_unreliable(lc, msg, len);
810 	if (err)
811 		printf("%s: ldc_send_unreliable: %d\n", __func__, err);
812 }
813 
814 void
815 vdsk_send_ver_info(struct vdsk_softc *sc, uint16_t major, uint16_t minor)
816 {
817 
818 	struct vio_ver_info vi;
819 
820 	/* Allocate new session ID. */
821 	sc->sc_local_sid = gettick();
822 
823 	bzero(&vi, sizeof(vi));
824 	vi.tag.type = VIO_TYPE_CTRL;
825 	vi.tag.stype = VIO_SUBTYPE_INFO;
826 	vi.tag.stype_env = VIO_VER_INFO;
827 	vi.tag.sid = sc->sc_local_sid;
828 	vi.major = major;
829 	vi.minor = minor;
830 	vi.dev_class = VDEV_DISK;
831 	vdsk_sendmsg(sc, &vi, sizeof(vi));
832 
833 	sc->sc_vio_state |= VIO_SND_VER_INFO;
834 }
835 
836 void
837 vdsk_send_attr_info(struct vdsk_softc *sc)
838 {
839 	struct vd_attr_info ai;
840 
841 	bzero(&ai, sizeof(ai));
842 	ai.tag.type = VIO_TYPE_CTRL;
843 	ai.tag.stype = VIO_SUBTYPE_INFO;
844 	ai.tag.stype_env = VIO_ATTR_INFO;
845 	ai.tag.sid = sc->sc_local_sid;
846 	ai.xfer_mode = VIO_DRING_MODE;
847 	ai.vdisk_block_size = DEV_BSIZE;
848 	ai.max_xfer_sz = MAXPHYS / DEV_BSIZE;
849 	vdsk_sendmsg(sc, &ai, sizeof(ai));
850 
851 	sc->sc_vio_state |= VIO_SND_ATTR_INFO;
852 }
853 
854 void
855 vdsk_send_dring_reg(struct vdsk_softc *sc)
856 {
857 	struct vio_dring_reg dr;
858 
859 	bzero(&dr, sizeof(dr));
860 	dr.tag.type = VIO_TYPE_CTRL;
861 	dr.tag.stype = VIO_SUBTYPE_INFO;
862 	dr.tag.stype_env = VIO_DRING_REG;
863 	dr.tag.sid = sc->sc_local_sid;
864 	dr.dring_ident = 0;
865 	dr.num_descriptors = sc->sc_vd->vd_nentries;
866 	dr.descriptor_size = sizeof(struct vd_desc);
867 	dr.options = VIO_TX_RING | VIO_RX_RING;
868 	dr.ncookies = 1;
869 	dr.cookie[0].addr = 0;
870 	dr.cookie[0].size = PAGE_SIZE;
871 	vdsk_sendmsg(sc, &dr, sizeof(dr));
872 
873 	sc->sc_vio_state |= VIO_SND_DRING_REG;
874 };
875 
876 void
877 vdsk_send_rdx(struct vdsk_softc *sc)
878 {
879 	struct vio_rdx rdx;
880 
881 	bzero(&rdx, sizeof(rdx));
882 	rdx.tag.type = VIO_TYPE_CTRL;
883 	rdx.tag.stype = VIO_SUBTYPE_INFO;
884 	rdx.tag.stype_env = VIO_RDX;
885 	rdx.tag.sid = sc->sc_local_sid;
886 	vdsk_sendmsg(sc, &rdx, sizeof(rdx));
887 
888 	sc->sc_vio_state |= VIO_SND_RDX;
889 }
890 
891 #if OPENBSD_BUSDMA
892 struct vdsk_dring *
893 vdsk_dring_alloc(bus_dma_tag_t t, int nentries)
894 #else
895 struct vdsk_dring *
896 vdsk_dring_alloc(int nentries)
897 #endif
898 {
899 
900 	struct vdsk_dring *vd;
901 	bus_size_t size;
902 	vaddr_t va;
903 #if OPENBSD_BUSDMA
904 	int nsegs;
905 #endif
906 	int i;
907 
908 	vd = kmem_zalloc(sizeof(struct vdsk_dring), KM_SLEEP);
909 
910 	size = roundup(nentries * sizeof(struct vd_desc), PAGE_SIZE);
911 
912 #if OPENBSD_BUSDMA
913 	if (bus_dmamap_create(t, size, 1, size, 0,
914 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0)
915 		return (NULL);
916 
917 	if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1,
918 	    &nsegs, BUS_DMA_NOWAIT) != 0)
919 		goto destroy;
920 
921 	if (bus_dmamem_map(t, &vd->vd_seg, 1, size, (void*)&va,
922 	    BUS_DMA_NOWAIT) != 0)
923 		goto free;
924 
925 	if (bus_dmamap_load(t, vd->vd_map, (void*)va, size, NULL,
926 	    BUS_DMA_NOWAIT) != 0)
927 		goto unmap;
928 #else
929 	va = (vaddr_t)kmem_zalloc(size, KM_SLEEP);
930 #endif
931 	vd->vd_desc = (struct vd_desc *)va;
932 	vd->vd_nentries = nentries;
933 	bzero(vd->vd_desc, nentries * sizeof(struct vd_desc));
934 	for (i = 0; i < vd->vd_nentries; i++)
935 		vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE;
936 	return (vd);
937 
938 #if OPENBSD_BUSDMA
939 unmap:
940 	bus_dmamem_unmap(t, (void*)va, size);
941 free:
942 	bus_dmamem_free(t, &vd->vd_seg, 1);
943 destroy:
944 	bus_dmamap_destroy(t, vd->vd_map);
945 #endif
946 	return (NULL);
947 }
948 
949 #if OPENBSD_BUSDMA
950 void
951 vdsk_dring_free(bus_dma_tag_t t, struct vdsk_dring *vd)
952 #else
953 void
954 vdsk_dring_free(struct vdsk_dring *vd)
955 #endif
956 {
957 
958 	bus_size_t size;
959 
960 	size = vd->vd_nentries * sizeof(struct vd_desc);
961 	size = roundup(size, PAGE_SIZE);
962 
963 #if OPENBSD_BUSDMA
964 	bus_dmamap_unload(t, vd->vd_map);
965 
966 	bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size);
967 	bus_dmamem_free(t, &vd->vd_seg, 1);
968 	bus_dmamap_destroy(t, vd->vd_map);
969 #else
970 	kmem_free(vd->vd_desc, size);
971 #endif
972 	kmem_free(vd, size);
973 }
974 
975 void *
976 vdsk_io_get(void *xsc)
977 {
978 
979 	panic("%s: not verified yet", __FUNCTION__);
980 
981 	struct vdsk_softc *sc = xsc;
982 	void *rv = sc; /* just has to be !NULL */
983 	int s;
984 
985 	s = splbio();
986 	if (sc->sc_vio_state != VIO_ESTABLISHED ||
987 	    sc->sc_tx_cnt >= sc->sc_vd->vd_nentries)
988 		rv = NULL;
989 	else
990 		sc->sc_tx_cnt++;
991 	splx(s);
992 
993 	return (rv);
994 }
995 
996 void
997 vdsk_io_put(void *xsc, void *io)
998 {
999 
1000 	panic("%s: not verified yet", __FUNCTION__);
1001 
1002 	struct vdsk_softc *sc = xsc;
1003 	int s;
1004 
1005 #ifdef DIAGNOSTIC
1006 	if (sc != io)
1007 		panic("vsdk_io_put: unexpected io");
1008 #endif
1009 
1010 	s = splbio();
1011 	sc->sc_tx_cnt--;
1012 	splx(s);
1013 }
1014 
1015 void
1016 vdsk_scsi_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1017 {
1018 	int timeout, s;
1019 	int desc;
1020 
1021 	DPRINTF(("vdsk_scsi_cmd() opcode %x\n", xs->cmd->opcode));
1022 
1023 	switch (xs->cmd->opcode) {
1024 
1025 		case SCSI_READ_6_COMMAND:
1026 		case READ_10:
1027 		case READ_12:
1028 		case READ_16:
1029 		case SCSI_WRITE_6_COMMAND:
1030 		case WRITE_10:
1031 		case WRITE_12:
1032 		case WRITE_16:
1033 		case SCSI_SYNCHRONIZE_CACHE_10:
1034 			break;
1035 
1036 		case INQUIRY:
1037 			vdsk_scsi_inq(sc, xs);
1038 			return;
1039 
1040 		case READ_CAPACITY_10:
1041 			vdsk_scsi_capacity(sc, xs);
1042 			return;
1043 
1044 		case READ_CAPACITY_16:
1045 			vdsk_scsi_capacity16(sc, xs);
1046 			return;
1047 
1048 		case SCSI_TEST_UNIT_READY:
1049 		case START_STOP:
1050 		case SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL:
1051 		case SCSI_MODE_SENSE_6:
1052 		case SCSI_MAINTENANCE_IN:
1053 			vdsk_scsi_done(xs, XS_NOERROR);
1054 			return;
1055 
1056 		case SCSI_MODE_SENSE_10:
1057 		case READ_TOC:
1058 			vdsk_scsi_done(xs, XS_DRIVER_STUFFUP);
1059 			return;
1060 
1061 		default:
1062 			panic("%s unhandled cmd 0x%02x\n",
1063 			      __func__, xs->cmd->opcode);
1064 	}
1065 
1066 	s = splbio();
1067 	desc = vdsk_submit_cmd(sc, xs);
1068 
1069 	if (!ISSET(xs->xs_control, XS_CTL_POLL)) {
1070 		splx(s);
1071 		return;
1072 	}
1073 	timeout = 1000;
1074 	do {
1075 		if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE)
1076 			break;
1077 
1078 		delay(1000);
1079 	} while(--timeout > 0);
1080 	if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) {
1081 		vdsk_complete_cmd(sc, xs, desc);
1082 	} else {
1083 		ldc_reset(&sc->sc_lc);
1084 		vdsk_scsi_done(xs, XS_TIMEOUT);
1085 	}
1086 	splx(s);
1087 }
1088 
1089 int
1090 vdsk_submit_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1091 {
1092 	struct ldc_map *map = sc->sc_lm;
1093 	struct vio_dring_msg dm;
1094 	struct scsi_rw_6 *rw6;
1095 	struct scsipi_rw_10 *rw10;
1096 	struct scsipi_rw_12 *rw12;
1097 	struct scsipi_rw_16 *rw16;
1098 	u_int64_t lba = 0;
1099 	uint8_t operation;
1100 	vaddr_t va;
1101 	paddr_t pa;
1102 	psize_t nbytes;
1103 	int len, ncookies;
1104 	int desc;
1105 
1106 	switch (xs->cmd->opcode) {
1107 
1108 		case SCSI_READ_6_COMMAND:
1109 		case READ_10:
1110 		case READ_12:
1111 		case READ_16:
1112 			operation = VD_OP_BREAD;
1113 			break;
1114 
1115 		case SCSI_WRITE_6_COMMAND:
1116 		case WRITE_10:
1117 		case WRITE_12:
1118 		case WRITE_16:
1119 			operation = VD_OP_BWRITE;
1120 			break;
1121 
1122 		case SCSI_SYNCHRONIZE_CACHE_10:
1123 			operation = VD_OP_FLUSH;
1124 			break;
1125 
1126 		default:
1127 			panic("%s  unhandled cmd opcode 0x%x",
1128 			      __func__, xs->cmd->opcode);
1129 	}
1130 
1131 	/*
1132 	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
1133 	 * layout as 10-byte READ/WRITE commands.
1134 	 */
1135 	if (xs->cmdlen == 6) {
1136 		rw6 = (struct scsi_rw_6 *)xs->cmd;
1137 		lba = _3btol(rw6->addr) & (SRW_TOPADDR << 16 | 0xffff);
1138 	} else if (xs->cmdlen == 10) {
1139 		rw10 = (struct scsipi_rw_10 *)xs->cmd;
1140 		lba = _4btol(rw10->addr);
1141 	} else if (xs->cmdlen == 12) {
1142 		rw12 = (struct scsipi_rw_12 *)xs->cmd;
1143 		lba = _4btol(rw12->addr);
1144 	} else if (xs->cmdlen == 16) {
1145 		rw16 = (struct scsipi_rw_16 *)xs->cmd;
1146 		lba = _8btol(rw16->addr);
1147 	}
1148 
1149 	DPRINTF(("lba = %lu\n", lba));
1150 
1151 	desc = sc->sc_tx_prod;
1152 	ncookies = 0;
1153 	len = xs->datalen;
1154 	va = (vaddr_t)xs->data;
1155 	while (len > 0) {
1156 	  DPRINTF(("len = %u\n", len));
1157 		KASSERT(ncookies < MAXPHYS / PAGE_SIZE);
1158 		pa = 0;
1159 		pmap_extract(pmap_kernel(), va, &pa);
1160 		while (map->lm_slot[map->lm_next].entry != 0) {
1161 			map->lm_next++;
1162 			map->lm_next &= (map->lm_nentries - 1);
1163 		}
1164 		map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
1165 		map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
1166 		map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW;
1167 		map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W;
1168 		map->lm_count++;
1169 
1170 		nbytes = MIN(len, PAGE_SIZE - (pa & PAGE_MASK));
1171 
1172 		sc->sc_vd->vd_desc[desc].cookie[ncookies].addr =
1173 		    map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
1174 		sc->sc_vd->vd_desc[desc].cookie[ncookies].size = nbytes;
1175 
1176 		sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next;
1177 		va += nbytes;
1178 		len -= nbytes;
1179 		ncookies++;
1180 	}
1181 	if (ISSET(xs->xs_control, XS_CTL_POLL) == 0)
1182 		sc->sc_vd->vd_desc[desc].hdr.ack = 1;
1183 	else
1184 		sc->sc_vd->vd_desc[desc].hdr.ack = 0;
1185 	sc->sc_vd->vd_desc[desc].operation = operation;
1186 	sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE;
1187 	sc->sc_vd->vd_desc[desc].status = 0xffffffff;
1188 	sc->sc_vd->vd_desc[desc].offset = lba;
1189 	sc->sc_vd->vd_desc[desc].size = xs->datalen;
1190 	sc->sc_vd->vd_desc[desc].ncookies = ncookies;
1191 
1192 	membar_Sync();
1193 
1194 	sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY;
1195 
1196 	sc->sc_vsd[desc].vsd_xs = xs;
1197 	sc->sc_vsd[desc].vsd_ncookies = ncookies;
1198 
1199 	sc->sc_tx_prod++;
1200 	sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1);
1201 
1202 	bzero(&dm, sizeof(dm));
1203 	dm.tag.type = VIO_TYPE_DATA;
1204 	dm.tag.stype = VIO_SUBTYPE_INFO;
1205 	dm.tag.stype_env = VIO_DRING_DATA;
1206 	dm.tag.sid = sc->sc_local_sid;
1207 	dm.seq_no = sc->sc_seq_no++;
1208 	dm.dring_ident = sc->sc_dring_ident;
1209 	dm.start_idx = dm.end_idx = desc;
1210 	vdsk_sendmsg(sc, &dm, sizeof(dm));
1211 
1212 	return desc;
1213 }
1214 
1215 void
1216 vdsk_complete_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs, int desc)
1217 {
1218 	struct ldc_map *map = sc->sc_lm;
1219 	int cookie, idx;
1220 	int error;
1221 
1222 	cookie = 0;
1223 	while (cookie < sc->sc_vsd[desc].vsd_ncookies) {
1224 		idx = sc->sc_vsd[desc].vsd_map_idx[cookie++];
1225 		map->lm_slot[idx].entry = 0;
1226 		map->lm_count--;
1227 	}
1228 
1229 	error = XS_NOERROR;
1230 	if (sc->sc_vd->vd_desc[desc].status != 0)
1231 		error = XS_DRIVER_STUFFUP;
1232 	xs->resid = xs->datalen -
1233 		sc->sc_vd->vd_desc[desc].size;
1234 
1235 	/*
1236 	 * scsi_done() called by vdsk_scsi_done() requires
1237 	 * the kernel to be locked
1238 	 */
1239 	KERNEL_LOCK(1, curlwp);
1240 	vdsk_scsi_done(xs, error);
1241 	KERNEL_UNLOCK_ONE(curlwp);
1242 
1243 	sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_FREE;
1244 
1245 }
1246 
1247 void
1248 vdsk_scsi_inq(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1249 {
1250 
1251 	vdsk_scsi_inquiry(sc, xs);
1252 }
1253 
1254 void
1255 vdsk_scsi_inquiry(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1256 {
1257 
1258 	struct scsipi_inquiry_data inq;
1259 	char buf[5];
1260 
1261 	bzero(&inq, sizeof(inq));
1262 
1263 	switch (sc->sc_vd_mtype) {
1264 		case VD_MEDIA_TYPE_CD:
1265 		case VD_MEDIA_TYPE_DVD:
1266 			inq.device = T_CDROM;
1267 			break;
1268 
1269 		case VD_MEDIA_TYPE_FIXED:
1270 		default:
1271 			inq.device = T_DIRECT;
1272 			break;
1273 	}
1274 
1275 	inq.version = 0x05; /* SPC-3 */
1276 	inq.response_format = 2;
1277 	inq.additional_length = 32;
1278 	inq.flags3 |= SID_CmdQue;
1279 	bcopy("SUN     ", inq.vendor, sizeof(inq.vendor));
1280 	bcopy("Virtual Disk    ", inq.product, sizeof(inq.product));
1281 	snprintf(buf, sizeof(buf), "%u.%u ", sc->sc_major, sc->sc_minor);
1282 	bcopy(buf, inq.revision, sizeof(inq.revision));
1283 
1284 	bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
1285 
1286 	vdsk_scsi_done(xs, XS_NOERROR);
1287 }
1288 
1289 void
1290 vdsk_scsi_capacity(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1291 {
1292 
1293 	struct scsipi_read_capacity_10_data rcd;
1294 	uint64_t capacity;
1295 
1296 	bzero(&rcd, sizeof(rcd));
1297 
1298 	capacity = sc->sc_vdisk_size - 1;
1299 	if (capacity > 0xffffffff)
1300 		capacity = 0xffffffff;
1301 
1302 	_lto4b(capacity, rcd.addr);
1303 	_lto4b(sc->sc_vdisk_block_size, rcd.length);
1304 
1305 	DPRINTF(("%s() capacity %lu  block size %u\n",
1306 		 __FUNCTION__, capacity, sc->sc_vdisk_block_size));
1307 
1308 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1309 
1310 	vdsk_scsi_done(xs, XS_NOERROR);
1311 }
1312 
1313 void
1314 vdsk_scsi_capacity16(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1315 {
1316 
1317 	struct scsipi_read_capacity_16_data rcd;
1318 
1319 	bzero(&rcd, sizeof(rcd));
1320 
1321 	_lto8b(sc->sc_vdisk_size - 1, rcd.addr);
1322 	_lto4b(sc->sc_vdisk_block_size, rcd.length);
1323 
1324 	DPRINTF(("%s() capacity %lu  block size %u\n",
1325 		 __FUNCTION__, capacity, sc->sc_vdisk_block_size));
1326 
1327 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1328 
1329 	vdsk_scsi_done(xs, XS_NOERROR);
1330 }
1331 
1332 void
1333 vdsk_scsi_done(struct scsipi_xfer *xs, int error)
1334 {
1335 
1336 	xs->error = error;
1337 
1338 	scsipi_done(xs);
1339 }
1340