xref: /netbsd-src/sys/arch/sparc64/dev/vdsk.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: vdsk.c,v 1.3 2017/03/03 21:09:25 palle Exp $	*/
2 /*	$OpenBSD: vdsk.c,v 1.46 2015/01/25 21:42:13 kettenis Exp $	*/
3 /*
4  * Copyright (c) 2009, 2011 Mark Kettenis
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/kmem.h>
20 #include <sys/param.h>
21 #include <sys/buf.h>
22 #include <sys/device.h>
23 #include <sys/systm.h>
24 
25 #include <machine/autoconf.h>
26 #include <machine/hypervisor.h>
27 
28 #include <uvm/uvm_extern.h>
29 
30 #include <dev/scsipi/scsi_all.h>
31 #include <dev/scsipi/scsipi_disk.h>
32 #include <dev/scsipi/scsipi_cd.h>
33 #include <dev/scsipi/scsiconf.h>
34 
35 #include <dev/scsipi/scsi_disk.h>
36 #include <dev/scsipi/scsipi_all.h>
37 #include <dev/scsipi/scsiconf.h>
38 #include <dev/scsipi/scsi_message.h>
39 
40 #include <sparc64/dev/cbusvar.h>
41 #include <sparc64/dev/ldcvar.h>
42 #include <sparc64/dev/viovar.h>
43 
44 #ifdef VDSK_DEBUG
45 #define DPRINTF(x)	printf x
46 #else
47 #define DPRINTF(x)
48 #endif
49 
50 #define VDSK_TX_ENTRIES		32
51 #define VDSK_RX_ENTRIES		32
52 
53 struct vd_attr_info {
54 	struct vio_msg_tag	tag;
55 	uint8_t			xfer_mode;
56 	uint8_t			vd_type;
57 	uint8_t			vd_mtype;
58 	uint8_t			_reserved1;
59 	uint32_t		vdisk_block_size;
60 	uint64_t		operations;
61 	uint64_t		vdisk_size;
62 	uint64_t		max_xfer_sz;
63 	uint64_t		_reserved2[2];
64 };
65 
66 #define VD_DISK_TYPE_SLICE	0x01
67 #define VD_DISK_TYPE_DISK	0x02
68 
69 #define VD_MEDIA_TYPE_FIXED	0x01
70 #define VD_MEDIA_TYPE_CD	0x02
71 #define VD_MEDIA_TYPE_DVD	0x03
72 
73 /* vDisk version 1.0. */
74 #define VD_OP_BREAD		0x01
75 #define VD_OP_BWRITE		0x02
76 #define VD_OP_FLUSH		0x03
77 #define VD_OP_GET_WCE		0x04
78 #define VD_OP_SET_WCE		0x05
79 #define VD_OP_GET_VTOC		0x06
80 #define VD_OP_SET_VTOC		0x07
81 #define VD_OP_GET_DISKGEOM	0x08
82 #define VD_OP_SET_DISKGEOM	0x09
83 #define VD_OP_GET_DEVID		0x0b
84 #define VD_OP_GET_EFI		0x0c
85 #define VD_OP_SET_EFI		0x0d
86 
87 /* vDisk version 1.1 */
88 #define VD_OP_SCSICMD		0x0a
89 #define VD_OP_RESET		0x0e
90 #define VD_OP_GET_ACCESS	0x0f
91 #define VD_OP_SET_ACCESS	0x10
92 #define VD_OP_GET_CAPACITY	0x11
93 
94 struct vd_desc {
95 	struct vio_dring_hdr	hdr;
96 	uint64_t		req_id;
97 	uint8_t			operation;
98 	uint8_t			slice;
99 	uint16_t		_reserved1;
100 	uint32_t		status;
101 	uint64_t		offset;
102 	uint64_t		size;
103 	uint32_t		ncookies;
104 	uint32_t		_reserved2;
105 	struct ldc_cookie	cookie[MAXPHYS / PAGE_SIZE];
106 };
107 
108 #define VD_SLICE_NONE		0xff
109 
110 struct vdsk_dring {
111 	bus_dmamap_t		vd_map;
112 	bus_dma_segment_t	vd_seg;
113 	struct vd_desc		*vd_desc;
114 	int			vd_nentries;
115 };
116 
117 #if OPENBSD_BUSDMA
118 struct vdsk_dring *vdsk_dring_alloc(bus_dma_tag_t, int);
119 void	vdsk_dring_free(bus_dma_tag_t, struct vdsk_dring *);
120 #else
121 struct vdsk_dring *vdsk_dring_alloc(int);
122 void	vdsk_dring_free(struct vdsk_dring *);
123 #endif
124 
125 /*
126  * We support vDisk 1.0 and 1.1.
127  */
128 #define VDSK_MAJOR	1
129 #define VDSK_MINOR	1
130 
131 struct vdsk_soft_desc {
132 	int		vsd_map_idx[MAXPHYS / PAGE_SIZE];
133 	struct scsipi_xfer *vsd_xs;
134 	int		vsd_ncookies;
135 };
136 
137 struct vdsk_softc {
138 	device_t sc_dv;
139 
140 	struct scsipi_adapter sc_adapter;
141 	struct scsipi_channel sc_channel;
142 
143 	bus_space_tag_t	sc_bustag;
144 	bus_dma_tag_t	sc_dmatag;
145 
146 	void		*sc_tx_ih;
147 	void		*sc_rx_ih;
148 
149 	struct ldc_conn	sc_lc;
150 
151 	uint16_t	sc_vio_state;
152 #define VIO_SND_VER_INFO	0x0001
153 #define VIO_ACK_VER_INFO	0x0002
154 #define VIO_SND_ATTR_INFO	0x0004
155 #define VIO_ACK_ATTR_INFO	0x0008
156 #define VIO_SND_DRING_REG	0x0010
157 #define VIO_ACK_DRING_REG	0x0020
158 #define VIO_SND_RDX		0x0040
159 #define VIO_ACK_RDX		0x0080
160 #define VIO_ESTABLISHED		0x00ff
161 
162 	uint16_t	sc_major;
163 	uint16_t	sc_minor;
164 
165 	uint32_t	sc_local_sid;
166 	uint64_t	sc_dring_ident;
167 	uint64_t	sc_seq_no;
168 
169 	int		sc_tx_cnt;
170 	int		sc_tx_prod;
171 	int		sc_tx_cons;
172 
173 	struct ldc_map	*sc_lm;
174 	struct vdsk_dring *sc_vd;
175 	struct vdsk_soft_desc *sc_vsd;
176 
177 	uint32_t	sc_vdisk_block_size;
178 	uint64_t	sc_vdisk_size;
179 	uint8_t		sc_vd_mtype;
180 };
181 
182 int	vdsk_match(device_t, cfdata_t, void *);
183 void	vdsk_attach(device_t, device_t, void *);
184 void	vdsk_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t,
185 			     void *);
186 
187 CFATTACH_DECL_NEW(vdsk, sizeof(struct vdsk_softc),
188 		  vdsk_match, vdsk_attach, NULL, NULL);
189 
190 int	vdsk_tx_intr(void *);
191 int	vdsk_rx_intr(void *);
192 
193 void	vdsk_rx_data(struct ldc_conn *, struct ldc_pkt *);
194 void	vdsk_rx_vio_ctrl(struct vdsk_softc *, struct vio_msg *);
195 void	vdsk_rx_vio_ver_info(struct vdsk_softc *, struct vio_msg_tag *);
196 void	vdsk_rx_vio_attr_info(struct vdsk_softc *, struct vio_msg_tag *);
197 void	vdsk_rx_vio_dring_reg(struct vdsk_softc *, struct vio_msg_tag *);
198 void	vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *);
199 void	vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *);
200 void	vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *);
201 
202 void	vdsk_ldc_reset(struct ldc_conn *);
203 void	vdsk_ldc_start(struct ldc_conn *);
204 
205 void	vdsk_sendmsg(struct vdsk_softc *, void *, size_t);
206 void	vdsk_send_ver_info(struct vdsk_softc *, uint16_t, uint16_t);
207 void	vdsk_send_attr_info(struct vdsk_softc *);
208 void	vdsk_send_dring_reg(struct vdsk_softc *);
209 void	vdsk_send_rdx(struct vdsk_softc *);
210 
211 void	*vdsk_io_get(void *);
212 void	vdsk_io_put(void *, void *);
213 
214 void	vdsk_scsi_cmd(struct vdsk_softc *sc, struct scsipi_xfer *);
215 int	vdsk_submit_cmd(struct vdsk_softc *sc, struct scsipi_xfer *);
216 void	vdsk_complete_cmd(struct vdsk_softc *sc, struct scsipi_xfer *, int);
217 void	vdsk_scsi_inq(struct vdsk_softc *sc, struct scsipi_xfer *);
218 void	vdsk_scsi_inquiry(struct vdsk_softc *sc, struct scsipi_xfer *);
219 void	vdsk_scsi_capacity(struct vdsk_softc *sc, struct scsipi_xfer *);
220 void	vdsk_scsi_capacity16(struct vdsk_softc *sc, struct scsipi_xfer *);
221 void	vdsk_scsi_done(struct scsipi_xfer *, int);
222 
223 int
224 vdsk_match(device_t parent, cfdata_t match, void *aux)
225 {
226 	struct cbus_attach_args *ca = aux;
227 
228 	if (strcmp(ca->ca_name, "disk") == 0)
229 		return (1);
230 
231 	return (0);
232 }
233 
234 void
235 vdsk_attach(device_t parent, device_t self, void *aux)
236 {
237 	struct vdsk_softc *sc = device_private(self);
238 	struct cbus_attach_args *ca = aux;
239 	struct ldc_conn *lc;
240 	int err, s;
241 	int timeout;
242         vaddr_t va;
243         paddr_t pa;
244 
245 	sc->sc_bustag = ca->ca_bustag;
246 	sc->sc_dmatag = ca->ca_dmatag;
247 
248 	printf(": ivec 0x%llx, 0x%llx",
249 	       (long long unsigned int)ca->ca_tx_ino,
250 	       (long long unsigned int)ca->ca_rx_ino);
251 
252 	/*
253 	 * Un-configure queues before registering interrupt handlers,
254 	 * such that we dont get any stale LDC packets or events.
255 	 */
256 	hv_ldc_tx_qconf(ca->ca_id, 0, 0);
257 	hv_ldc_rx_qconf(ca->ca_id, 0, 0);
258 
259 	sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_tx_ino,
260 	    IPL_BIO, vdsk_tx_intr, sc);
261 	sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_rx_ino,
262 	    IPL_BIO, vdsk_rx_intr, sc);
263 	if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) {
264 		printf(", can't establish interrupt\n");
265 		return;
266 	}
267 
268 	lc = &sc->sc_lc;
269 	lc->lc_id = ca->ca_id;
270 	lc->lc_sc = sc;
271 	lc->lc_reset = vdsk_ldc_reset;
272 	lc->lc_start = vdsk_ldc_start;
273 	lc->lc_rx_data = vdsk_rx_data;
274 
275 #if OPENBSD_BUSDMA
276 	lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES);
277 #else
278 	lc->lc_txq = ldc_queue_alloc(VDSK_TX_ENTRIES);
279 #endif
280 	if (lc->lc_txq == NULL) {
281 		printf(", can't allocate tx queue\n");
282 		return;
283 	}
284 #if OPENBSD_BUSDMA
285 	lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES);
286 #else
287 	lc->lc_rxq = ldc_queue_alloc(VDSK_RX_ENTRIES);
288 #endif
289 	if (lc->lc_rxq == NULL) {
290 		printf(", can't allocate rx queue\n");
291 		goto free_txqueue;
292 	}
293 
294 #if OPENBSD_BUSDMA
295 	sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048);
296 #else
297 	sc->sc_lm = ldc_map_alloc(2048);
298 #endif
299 	if (sc->sc_lm == NULL) {
300 		printf(", can't allocate LDC mapping table\n");
301 		goto free_rxqueue;
302 	}
303 
304 #if OPENBSD_BUSDMA
305 	err = hv_ldc_set_map_table(lc->lc_id,
306 	    sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries);
307 #else
308         va = (vaddr_t)sc->sc_lm->lm_slot;
309         pa = 0;
310 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
311 	  panic("pmap_extract failed %lx\n", va);
312 	err = hv_ldc_set_map_table(lc->lc_id, pa, 2048);
313 #endif
314 	if (err != H_EOK) {
315 		printf("hv_ldc_set_map_table %d\n", err);
316 		goto free_map;
317 	}
318 #if OPENBSD_BUSDMA
319 	sc->sc_vd = vdsk_dring_alloc(sc->sc_dmatag, 32);
320 #else
321 	sc->sc_vd = vdsk_dring_alloc(32);
322 #endif
323 	if (sc->sc_vd == NULL) {
324 		printf(", can't allocate dring\n");
325 		goto free_map;
326 	}
327 	sc->sc_vsd = kmem_zalloc(32 * sizeof(*sc->sc_vsd), KM_NOSLEEP);
328 	if (sc->sc_vsd == NULL) {
329 		printf(", can't allocate software ring\n");
330 		goto free_dring;
331 	}
332 
333 #if OPENBSD_BUSDMA
334 	sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr;
335 #else
336         va = (vaddr_t)sc->sc_vd->vd_desc;
337         pa = 0;
338 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
339 	  panic("pmap_extract failed %lx\n", va);
340 
341 	sc->sc_lm->lm_slot[0].entry = pa;
342 #endif
343 	sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK;
344 	sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
345 	sc->sc_lm->lm_slot[0].entry |= LDC_MTE_R | LDC_MTE_W;
346 	sc->sc_lm->lm_next = 1;
347 	sc->sc_lm->lm_count = 1;
348 	va = lc->lc_txq->lq_va;
349 	pa = 0;
350 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
351 		panic("pmap_extract failed %lx\n", va);
352 #if OPENBSD_BUSDMA
353 	err = hv_ldc_tx_qconf(lc->lc_id,
354 	    lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries);
355 #else
356         err = hv_ldc_tx_qconf(lc->lc_id, pa, lc->lc_txq->lq_nentries);
357 #endif
358 	if (err != H_EOK)
359 		printf("hv_ldc_tx_qconf %d\n", err);
360 	va = (vaddr_t)lc->lc_rxq->lq_va;
361 	pa = 0;
362 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
363 	  panic("pmap_extract failed %lx\n", va);
364 #if OPENBSD_BUSDMA
365 	err = hv_ldc_rx_qconf(lc->lc_id,
366 	    lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries);
367 #else
368         err = hv_ldc_rx_qconf(lc->lc_id, pa, lc->lc_rxq->lq_nentries);
369 #endif
370 	if (err != H_EOK)
371 		printf("hv_ldc_rx_qconf %d\n", err);
372 
373 	cbus_intr_setenabled(sc->sc_bustag, ca->ca_tx_ino, INTR_ENABLED);
374 	cbus_intr_setenabled(sc->sc_bustag, ca->ca_rx_ino, INTR_ENABLED);
375 
376 	ldc_send_vers(lc);
377 
378 	printf("\n");
379 
380 	/*
381 	 * Interrupts aren't enabled during autoconf, so poll for VIO
382 	 * peer-to-peer hanshake completion.
383 	 */
384 	s = splbio();
385 	timeout = 10 * 1000;
386 	do {
387 		if (vdsk_rx_intr(sc) && sc->sc_vio_state == VIO_ESTABLISHED)
388 			break;
389 
390 		delay(1000);
391 	} while(--timeout > 0);
392 	splx(s);
393 
394 	if (sc->sc_vio_state != VIO_ESTABLISHED) {
395 	  printf("vio not establshed: %d\n", sc->sc_vio_state);
396 	  return;
397 	}
398 
399 	sc->sc_dv = self;
400 
401 	sc->sc_adapter.adapt_dev = sc->sc_dv;
402 	sc->sc_adapter.adapt_nchannels = 1;
403 	sc->sc_adapter.adapt_openings = sc->sc_vd->vd_nentries - 1;
404 	sc->sc_adapter.adapt_max_periph = sc->sc_vd->vd_nentries - 1;
405 
406 	sc->sc_adapter.adapt_minphys = minphys;
407 	sc->sc_adapter.adapt_request = vdsk_scsipi_request;
408 
409 	sc->sc_channel.chan_adapter = &sc->sc_adapter;
410 	sc->sc_channel.chan_bustype = &scsi_bustype;
411 	sc->sc_channel.chan_channel = 0;
412 	sc->sc_channel.chan_ntargets = 2; /* XXX why not 1? */
413 	sc->sc_channel.chan_nluns = 1; /* XXX slices should be presented as luns? */
414 	sc->sc_channel.chan_id = 0;
415 	sc->sc_channel.chan_flags = SCSIPI_CHAN_NOSETTLE;
416 
417 	config_found(self, &sc->sc_channel, scsiprint);
418 
419 	return;
420 
421 free_dring:
422 #if OPENBSD_BUSDMA
423 	vdsk_dring_free(sc->sc_dmatag, sc->sc_vd);
424 #else
425 	vdsk_dring_free(sc->sc_vd);
426 #endif
427 free_map:
428 	hv_ldc_set_map_table(lc->lc_id, 0, 0);
429 #if OPENBSD_BUSDMA
430 	ldc_map_free(sc->sc_dmatag, sc->sc_lm);
431 #else
432 	ldc_map_free(sc->sc_lm);
433 #endif
434 free_rxqueue:
435 #if OPENBSD_BUSDMA
436 	ldc_queue_free(sc->sc_dmatag, lc->lc_rxq);
437 #else
438 	ldc_queue_free(lc->lc_rxq);
439 #endif
440 free_txqueue:
441 #if OPENBSD_BUSDMA
442 	ldc_queue_free(sc->sc_dmatag, lc->lc_txq);
443 #else
444 	ldc_queue_free(lc->lc_txq);
445 #endif
446 }
447 
448 void
449 vdsk_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
450 		     void *arg)
451 {
452 
453 	struct vdsk_softc *sc;
454 	struct scsipi_xfer *xs;
455 
456 	sc = device_private(chan->chan_adapter->adapt_dev);
457 
458 	xs = arg;
459 
460 	switch (req) {
461 		case ADAPTER_REQ_RUN_XFER:
462 			vdsk_scsi_cmd(sc, xs);
463 			break;
464 		case ADAPTER_REQ_GROW_RESOURCES:
465 		case ADAPTER_REQ_SET_XFER_MODE:
466 			/* Ignored */
467 			break;
468 		default:
469 			panic("req unhandled: %x", req);
470 	}
471 
472 }
473 
474 int
475 vdsk_tx_intr(void *arg)
476 {
477 	panic("%s: not verified yet", __FUNCTION__);
478 
479 	struct vdsk_softc *sc = arg;
480 	struct ldc_conn *lc = &sc->sc_lc;
481 	uint64_t tx_head, tx_tail, tx_state;
482 
483 	hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state);
484 	if (tx_state != lc->lc_tx_state) {
485 		switch (tx_state) {
486 		case LDC_CHANNEL_DOWN:
487 			DPRINTF(("Tx link down\n"));
488 			break;
489 		case LDC_CHANNEL_UP:
490 			DPRINTF(("Tx link up\n"));
491 			break;
492 		case LDC_CHANNEL_RESET:
493 			DPRINTF(("Tx link reset\n"));
494 			break;
495 		}
496 		lc->lc_tx_state = tx_state;
497 	}
498 
499 	return (1);
500 }
501 
502 int
503 vdsk_rx_intr(void *arg)
504 {
505 	struct vdsk_softc *sc = arg;
506 	struct ldc_conn *lc = &sc->sc_lc;
507 	uint64_t rx_head, rx_tail, rx_state;
508 	struct ldc_pkt *lp;
509 	int err;
510 
511 	err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state);
512 	if (err == H_EINVAL) {
513 		printf("hv_ldc_rx_get_state H_EINVAL\n");
514 		return (0);
515 	}
516 	if (err != H_EOK) {
517 		printf("hv_ldc_rx_get_state %d\n", err);
518 		return (0);
519 	}
520 
521 	if (rx_state != lc->lc_rx_state) {
522 		sc->sc_vio_state = 0;
523 		lc->lc_tx_seqid = 0;
524 		lc->lc_state = 0;
525 		switch (rx_state) {
526 		case LDC_CHANNEL_DOWN:
527 			DPRINTF(("Rx link down\n"));
528 			break;
529 		case LDC_CHANNEL_UP:
530 			DPRINTF(("Rx link up\n"));
531 			ldc_send_vers(lc);
532 			break;
533 		case LDC_CHANNEL_RESET:
534 			DPRINTF(("Rx link reset\n"));
535 			break;
536 		}
537 		lc->lc_rx_state = rx_state;
538 		hv_ldc_rx_set_qhead(lc->lc_id, rx_tail);
539 		return (1);
540 	}
541 
542 	if (rx_head == rx_tail)
543 		return (0);
544 
545 	lp = (struct ldc_pkt *)(uintptr_t)(lc->lc_rxq->lq_va + rx_head);
546 	switch (lp->type) {
547 		case LDC_CTRL:
548 			ldc_rx_ctrl(lc, lp);
549 			break;
550 
551 		case LDC_DATA:
552 			ldc_rx_data(lc, lp);
553 			break;
554 
555 		default:
556 			DPRINTF(("%0x02/%0x02/%0x02\n", lp->type, lp->stype,
557 			    lp->ctrl));
558 			ldc_reset(lc);
559 			break;
560 	}
561 
562 	if (lc->lc_state == 0)
563 		return (1);
564 
565 	rx_head += sizeof(*lp);
566 	rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1);
567 	err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head);
568 	if (err != H_EOK)
569 		printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err);
570 
571 	return (1);
572 }
573 
574 void
575 vdsk_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp)
576 {
577 	struct vio_msg *vm = (struct vio_msg *)lp;
578 
579 	switch (vm->type) {
580 		case VIO_TYPE_CTRL:
581 			if ((lp->env & LDC_FRAG_START) == 0 &&
582 			    (lp->env & LDC_FRAG_STOP) == 0)
583 				return;
584 			vdsk_rx_vio_ctrl(lc->lc_sc, vm);
585 			break;
586 
587 	case VIO_TYPE_DATA:
588 			if((lp->env & LDC_FRAG_START) == 0)
589 				return;
590 			vdsk_rx_vio_data(lc->lc_sc, vm);
591 			break;
592 
593 		default:
594 			DPRINTF(("Unhandled packet type 0x%02x\n", vm->type));
595 			ldc_reset(lc);
596 			break;
597 	}
598 }
599 
600 void
601 vdsk_rx_vio_ctrl(struct vdsk_softc *sc, struct vio_msg *vm)
602 {
603 	struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
604 
605 	switch (tag->stype_env) {
606 		case VIO_VER_INFO:
607 			vdsk_rx_vio_ver_info(sc, tag);
608 			break;
609 		case VIO_ATTR_INFO:
610 			vdsk_rx_vio_attr_info(sc, tag);
611 			break;
612 		case VIO_DRING_REG:
613 			vdsk_rx_vio_dring_reg(sc, tag);
614 			break;
615 		case VIO_RDX:
616 			vdsk_rx_vio_rdx(sc, tag);
617 			break;
618 		default:
619 			DPRINTF(("CTRL/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
620 			break;
621 	}
622 }
623 
624 void
625 vdsk_rx_vio_ver_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
626 {
627 	struct vio_ver_info *vi = (struct vio_ver_info *)tag;
628 
629 	switch (vi->tag.stype) {
630 		case VIO_SUBTYPE_INFO:
631 			DPRINTF(("CTRL/INFO/VER_INFO\n"));
632 			break;
633 
634 		case VIO_SUBTYPE_ACK:
635 			DPRINTF(("CTRL/ACK/VER_INFO\n"));
636 			if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) {
637 				ldc_reset(&sc->sc_lc);
638 				break;
639 			}
640 			sc->sc_major = vi->major;
641 			sc->sc_minor = vi->minor;
642 			sc->sc_vio_state |= VIO_ACK_VER_INFO;
643 			break;
644 
645 		default:
646 			DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype));
647 			break;
648 	}
649 
650 	if (ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO))
651 		vdsk_send_attr_info(sc);
652 }
653 
654 void
655 vdsk_rx_vio_attr_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
656 {
657 	struct vd_attr_info *ai = (struct vd_attr_info *)tag;
658 
659 	switch (ai->tag.stype) {
660 		case VIO_SUBTYPE_INFO:
661 			DPRINTF(("CTRL/INFO/ATTR_INFO\n"));
662 			break;
663 
664 		case VIO_SUBTYPE_ACK:
665 			DPRINTF(("CTRL/ACK/ATTR_INFO\n"));
666 			if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) {
667 				ldc_reset(&sc->sc_lc);
668 				break;
669 			}
670 
671 			sc->sc_vdisk_block_size = ai->vdisk_block_size;
672 			sc->sc_vdisk_size = ai->vdisk_size;
673 			if (sc->sc_major > 1 || sc->sc_minor >= 1)
674 				sc->sc_vd_mtype = ai->vd_mtype;
675 			else
676 				sc->sc_vd_mtype = VD_MEDIA_TYPE_FIXED;
677 
678 			sc->sc_vio_state |= VIO_ACK_ATTR_INFO;
679 			break;
680 
681 		default:
682 			DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype));
683 			break;
684 	}
685 
686 	if (ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO))
687 		vdsk_send_dring_reg(sc);
688 
689 }
690 
691 void
692 vdsk_rx_vio_dring_reg(struct vdsk_softc *sc, struct vio_msg_tag *tag)
693 {
694 	struct vio_dring_reg *dr = (struct vio_dring_reg *)tag;
695 
696 	switch (dr->tag.stype) {
697 		case VIO_SUBTYPE_INFO:
698 			DPRINTF(("CTRL/INFO/DRING_REG\n"));
699 			break;
700 
701 		case VIO_SUBTYPE_ACK:
702 			DPRINTF(("CTRL/ACK/DRING_REG\n"));
703 			if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) {
704 				ldc_reset(&sc->sc_lc);
705 				break;
706 			}
707 
708 			sc->sc_dring_ident = dr->dring_ident;
709 			sc->sc_seq_no = 1;
710 
711 			sc->sc_vio_state |= VIO_ACK_DRING_REG;
712 			break;
713 
714 		default:
715 			DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype));
716 			break;
717 	}
718 
719 	if (ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG))
720 		vdsk_send_rdx(sc);
721 }
722 
723 void
724 vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *tag)
725 {
726 	switch(tag->stype) {
727 		case VIO_SUBTYPE_INFO:
728 			DPRINTF(("CTRL/INFO/RDX\n"));
729 			break;
730 
731 		case VIO_SUBTYPE_ACK:
732 		{
733 			int prod;
734 
735 			DPRINTF(("CTRL/ACK/RDX\n"));
736 			if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) {
737 				ldc_reset(&sc->sc_lc);
738 				break;
739 			}
740 			sc->sc_vio_state |= VIO_ACK_RDX;
741 
742 			/*
743 			 * If this ACK is the result of a reconnect, we may
744 			 * have pending I/O that we need to resubmit.  We need
745 			 * to rebuild the ring descriptors though since the
746 			 * vDisk server on the other side may have touched
747 			 * them already.  So we just clean up the ring and the
748 			 * LDC map and resubmit the SCSI commands based on our
749 			 * soft descriptors.
750 			 */
751 			prod = sc->sc_tx_prod;
752 			sc->sc_tx_prod = sc->sc_tx_cons;
753 			sc->sc_tx_cnt = 0;
754 			sc->sc_lm->lm_next = 1;
755 			sc->sc_lm->lm_count = 1;
756 			while (sc->sc_tx_prod != prod)
757 			  vdsk_submit_cmd(sc, sc->sc_vsd[sc->sc_tx_prod].vsd_xs);
758 			break;
759 	}
760 
761 		default:
762 			DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype));
763 			break;
764 	}
765 }
766 
767 void
768 vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *vm)
769 {
770 	struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
771 
772 	if (sc->sc_vio_state != VIO_ESTABLISHED) {
773 		DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype,
774 		    tag->stype_env));
775 		return;
776 	}
777 
778 	switch(tag->stype_env) {
779 		case VIO_DRING_DATA:
780 			vdsk_rx_vio_dring_data(sc, tag);
781 			break;
782 
783 		default:
784 			DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
785 			break;
786 	}
787 }
788 
789 void
790 vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *tag)
791 {
792 	switch(tag->stype) {
793 		case VIO_SUBTYPE_INFO:
794 			DPRINTF(("DATA/INFO/DRING_DATA\n"));
795 			break;
796 
797 		case VIO_SUBTYPE_ACK:
798 		{
799 			struct scsipi_xfer *xs;
800 			int cons;
801 
802 			cons = sc->sc_tx_cons;
803 			while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) {
804 				xs = sc->sc_vsd[cons].vsd_xs;
805 				if (ISSET(xs->xs_control, XS_CTL_POLL) == 0)
806 					vdsk_complete_cmd(sc, xs, cons);
807 				cons++;
808 				cons &= (sc->sc_vd->vd_nentries - 1);
809 			}
810 			sc->sc_tx_cons = cons;
811 			break;
812 	}
813 
814 		case VIO_SUBTYPE_NACK:
815 			DPRINTF(("DATA/NACK/DRING_DATA\n"));
816 			break;
817 
818 		default:
819 			DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
820 			break;
821 	}
822 }
823 
824 void
825 vdsk_ldc_reset(struct ldc_conn *lc)
826 {
827 
828 	struct vdsk_softc *sc = lc->lc_sc;
829 
830 	sc->sc_vio_state = 0;
831 }
832 
833 void
834 vdsk_ldc_start(struct ldc_conn *lc)
835 {
836 
837 	struct vdsk_softc *sc = lc->lc_sc;
838 
839 	vdsk_send_ver_info(sc, VDSK_MAJOR, VDSK_MINOR);
840 }
841 
842 void
843 vdsk_sendmsg(struct vdsk_softc *sc, void *msg, size_t len)
844 {
845 
846 	struct ldc_conn *lc = &sc->sc_lc;
847 	int err;
848 
849 	err = ldc_send_unreliable(lc, msg, len);
850 	if (err)
851 		printf("%s: ldc_send_unreliable: %d\n", __func__, err);
852 }
853 
854 void
855 vdsk_send_ver_info(struct vdsk_softc *sc, uint16_t major, uint16_t minor)
856 {
857 
858 	struct vio_ver_info vi;
859 
860 	/* Allocate new session ID. */
861 	sc->sc_local_sid = gettick();
862 
863 	bzero(&vi, sizeof(vi));
864 	vi.tag.type = VIO_TYPE_CTRL;
865 	vi.tag.stype = VIO_SUBTYPE_INFO;
866 	vi.tag.stype_env = VIO_VER_INFO;
867 	vi.tag.sid = sc->sc_local_sid;
868 	vi.major = major;
869 	vi.minor = minor;
870 	vi.dev_class = VDEV_DISK;
871 	vdsk_sendmsg(sc, &vi, sizeof(vi));
872 
873 	sc->sc_vio_state |= VIO_SND_VER_INFO;
874 }
875 
876 void
877 vdsk_send_attr_info(struct vdsk_softc *sc)
878 {
879 	struct vd_attr_info ai;
880 
881 	bzero(&ai, sizeof(ai));
882 	ai.tag.type = VIO_TYPE_CTRL;
883 	ai.tag.stype = VIO_SUBTYPE_INFO;
884 	ai.tag.stype_env = VIO_ATTR_INFO;
885 	ai.tag.sid = sc->sc_local_sid;
886 	ai.xfer_mode = VIO_DRING_MODE;
887 	ai.vdisk_block_size = DEV_BSIZE;
888 	ai.max_xfer_sz = MAXPHYS / DEV_BSIZE;
889 	vdsk_sendmsg(sc, &ai, sizeof(ai));
890 
891 	sc->sc_vio_state |= VIO_SND_ATTR_INFO;
892 }
893 
894 void
895 vdsk_send_dring_reg(struct vdsk_softc *sc)
896 {
897 	struct vio_dring_reg dr;
898 
899 	bzero(&dr, sizeof(dr));
900 	dr.tag.type = VIO_TYPE_CTRL;
901 	dr.tag.stype = VIO_SUBTYPE_INFO;
902 	dr.tag.stype_env = VIO_DRING_REG;
903 	dr.tag.sid = sc->sc_local_sid;
904 	dr.dring_ident = 0;
905 	dr.num_descriptors = sc->sc_vd->vd_nentries;
906 	dr.descriptor_size = sizeof(struct vd_desc);
907 	dr.options = VIO_TX_RING | VIO_RX_RING;
908 	dr.ncookies = 1;
909 	dr.cookie[0].addr = 0;
910 	dr.cookie[0].size = PAGE_SIZE;
911 	vdsk_sendmsg(sc, &dr, sizeof(dr));
912 
913 	sc->sc_vio_state |= VIO_SND_DRING_REG;
914 };
915 
916 void
917 vdsk_send_rdx(struct vdsk_softc *sc)
918 {
919 	struct vio_rdx rdx;
920 
921 	bzero(&rdx, sizeof(rdx));
922 	rdx.tag.type = VIO_TYPE_CTRL;
923 	rdx.tag.stype = VIO_SUBTYPE_INFO;
924 	rdx.tag.stype_env = VIO_RDX;
925 	rdx.tag.sid = sc->sc_local_sid;
926 	vdsk_sendmsg(sc, &rdx, sizeof(rdx));
927 
928 	sc->sc_vio_state |= VIO_SND_RDX;
929 }
930 
931 #if OPENBSD_BUSDMA
932 struct vdsk_dring *
933 vdsk_dring_alloc(bus_dma_tag_t t, int nentries)
934 #else
935 struct vdsk_dring *
936 vdsk_dring_alloc(int nentries)
937 #endif
938 {
939 
940 	struct vdsk_dring *vd;
941 	bus_size_t size;
942 	vaddr_t va;
943 #if OPENBSD_BUSDMA
944 	int nsegs;
945 #endif
946 	int i;
947 
948 	vd = kmem_zalloc(sizeof(struct vdsk_dring), KM_NOSLEEP);
949 	if (vd == NULL)
950 		return NULL;
951 
952 	size = roundup(nentries * sizeof(struct vd_desc), PAGE_SIZE);
953 
954 #if OPENBSD_BUSDMA
955 	if (bus_dmamap_create(t, size, 1, size, 0,
956 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0)
957 		return (NULL);
958 
959 	if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1,
960 	    &nsegs, BUS_DMA_NOWAIT) != 0)
961 		goto destroy;
962 
963 	if (bus_dmamem_map(t, &vd->vd_seg, 1, size, (void*)&va,
964 	    BUS_DMA_NOWAIT) != 0)
965 		goto free;
966 
967 	if (bus_dmamap_load(t, vd->vd_map, (void*)va, size, NULL,
968 	    BUS_DMA_NOWAIT) != 0)
969 		goto unmap;
970 #else
971 	va = (vaddr_t)kmem_zalloc(size, KM_NOSLEEP);
972 	if (va == 0)
973 		goto free;
974 #endif
975 	vd->vd_desc = (struct vd_desc *)va;
976 	vd->vd_nentries = nentries;
977 	bzero(vd->vd_desc, nentries * sizeof(struct vd_desc));
978 	for (i = 0; i < vd->vd_nentries; i++)
979 		vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE;
980 	return (vd);
981 
982 #if OPENBSD_BUSDMA
983 unmap:
984 	bus_dmamem_unmap(t, (void*)va, size);
985 free:
986 	bus_dmamem_free(t, &vd->vd_seg, 1);
987 destroy:
988 	bus_dmamap_destroy(t, vd->vd_map);
989 #else
990 free:
991 	kmem_free(vd, sizeof(struct vdsk_dring));
992 #endif
993 	return (NULL);
994 }
995 
996 #if OPENBSD_BUSDMA
997 void
998 vdsk_dring_free(bus_dma_tag_t t, struct vdsk_dring *vd)
999 #else
1000 void
1001 vdsk_dring_free(struct vdsk_dring *vd)
1002 #endif
1003 {
1004 
1005 	bus_size_t size;
1006 
1007 	size = vd->vd_nentries * sizeof(struct vd_desc);
1008 	size = roundup(size, PAGE_SIZE);
1009 
1010 #if OPENBSD_BUSDMA
1011 	bus_dmamap_unload(t, vd->vd_map);
1012 
1013 	bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size);
1014 	bus_dmamem_free(t, &vd->vd_seg, 1);
1015 	bus_dmamap_destroy(t, vd->vd_map);
1016 #else
1017 	kmem_free(vd->vd_desc, size);
1018 #endif
1019 	kmem_free(vd, size);
1020 }
1021 
1022 void *
1023 vdsk_io_get(void *xsc)
1024 {
1025 
1026 	panic("%s: not verified yet", __FUNCTION__);
1027 
1028 	struct vdsk_softc *sc = xsc;
1029 	void *rv = sc; /* just has to be !NULL */
1030 	int s;
1031 
1032 	s = splbio();
1033 	if (sc->sc_vio_state != VIO_ESTABLISHED ||
1034 	    sc->sc_tx_cnt >= sc->sc_vd->vd_nentries)
1035 		rv = NULL;
1036 	else
1037 		sc->sc_tx_cnt++;
1038 	splx(s);
1039 
1040 	return (rv);
1041 }
1042 
1043 void
1044 vdsk_io_put(void *xsc, void *io)
1045 {
1046 
1047 	panic("%s: not verified yet", __FUNCTION__);
1048 
1049 	struct vdsk_softc *sc = xsc;
1050 	int s;
1051 
1052 #ifdef DIAGNOSTIC
1053 	if (sc != io)
1054 		panic("vsdk_io_put: unexpected io");
1055 #endif
1056 
1057 	s = splbio();
1058 	sc->sc_tx_cnt--;
1059 	splx(s);
1060 }
1061 
1062 void
1063 vdsk_scsi_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1064 {
1065 	int timeout, s;
1066 	int desc;
1067 
1068 	DPRINTF(("vdsk_scsi_cmd() opcode %x\n", xs->cmd->opcode));
1069 
1070 	switch (xs->cmd->opcode) {
1071 
1072 		case SCSI_READ_6_COMMAND:
1073 		case READ_10:
1074 		case READ_12:
1075 		case READ_16:
1076 		case SCSI_WRITE_6_COMMAND:
1077 		case WRITE_10:
1078 		case WRITE_12:
1079 		case WRITE_16:
1080 		case SCSI_SYNCHRONIZE_CACHE_10:
1081 			break;
1082 
1083 		case INQUIRY:
1084 			vdsk_scsi_inq(sc, xs);
1085 			return;
1086 
1087 		case READ_CAPACITY_10:
1088 			vdsk_scsi_capacity(sc, xs);
1089 			return;
1090 
1091 		case READ_CAPACITY_16:
1092 			vdsk_scsi_capacity16(sc, xs);
1093 			return;
1094 
1095 		case SCSI_TEST_UNIT_READY:
1096 		case START_STOP:
1097 		case SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL:
1098 		case SCSI_MODE_SENSE_6:
1099 			vdsk_scsi_done(xs, XS_NOERROR);
1100 			return;
1101 
1102 		case SCSI_MODE_SENSE_10:
1103 		case READ_TOC:
1104 			vdsk_scsi_done(xs, XS_DRIVER_STUFFUP);
1105 			return;
1106 
1107 		default:
1108 			panic("%s unhandled cmd 0x%02x\n",
1109 			      __func__, xs->cmd->opcode);
1110 	}
1111 
1112 	s = splbio();
1113 	desc = vdsk_submit_cmd(sc, xs);
1114 
1115 	if (!ISSET(xs->xs_control, XS_CTL_POLL)) {
1116 		splx(s);
1117 		return;
1118 	}
1119 	timeout = 1000;
1120 	do {
1121 		if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE)
1122 			break;
1123 
1124 		delay(1000);
1125 	} while(--timeout > 0);
1126 	if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) {
1127 		vdsk_complete_cmd(sc, xs, desc);
1128 	} else {
1129 		ldc_reset(&sc->sc_lc);
1130 		vdsk_scsi_done(xs, XS_TIMEOUT);
1131 	}
1132 	splx(s);
1133 }
1134 
1135 int
1136 vdsk_submit_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1137 {
1138 	struct ldc_map *map = sc->sc_lm;
1139 	struct vio_dring_msg dm;
1140 	struct scsi_rw_6 *rw6;
1141 	struct scsipi_rw_10 *rw10;
1142 	struct scsipi_rw_12 *rw12;
1143 	struct scsipi_rw_16 *rw16;
1144 	u_int64_t lba = 0;
1145 	uint8_t operation;
1146 	vaddr_t va;
1147 	paddr_t pa;
1148 	psize_t nbytes;
1149 	int len, ncookies;
1150 	int desc;
1151 
1152 	switch (xs->cmd->opcode) {
1153 
1154 		case SCSI_READ_6_COMMAND:
1155 		case READ_10:
1156 		case READ_12:
1157 		case READ_16:
1158 			operation = VD_OP_BREAD;
1159 			break;
1160 
1161 		case SCSI_WRITE_6_COMMAND:
1162 		case WRITE_10:
1163 		case WRITE_12:
1164 		case WRITE_16:
1165 			operation = VD_OP_BWRITE;
1166 			break;
1167 
1168 		case SCSI_SYNCHRONIZE_CACHE_10:
1169 			operation = VD_OP_FLUSH;
1170 			break;
1171 
1172 		default:
1173 			panic("%s  unhandled cmd opcode 0x%x",
1174 			      __func__, xs->cmd->opcode);
1175 	}
1176 
1177 	/*
1178 	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
1179 	 * layout as 10-byte READ/WRITE commands.
1180 	 */
1181 	if (xs->cmdlen == 6) {
1182 		rw6 = (struct scsi_rw_6 *)xs->cmd;
1183 		lba = _3btol(rw6->addr) & (SRW_TOPADDR << 16 | 0xffff);
1184 	} else if (xs->cmdlen == 10) {
1185 		rw10 = (struct scsipi_rw_10 *)xs->cmd;
1186 		lba = _4btol(rw10->addr);
1187 	} else if (xs->cmdlen == 12) {
1188 		rw12 = (struct scsipi_rw_12 *)xs->cmd;
1189 		lba = _4btol(rw12->addr);
1190 	} else if (xs->cmdlen == 16) {
1191 		rw16 = (struct scsipi_rw_16 *)xs->cmd;
1192 		lba = _8btol(rw16->addr);
1193 	}
1194 
1195 	DPRINTF(("lba = %lu\n", lba));
1196 
1197 	desc = sc->sc_tx_prod;
1198 	ncookies = 0;
1199 	len = xs->datalen;
1200 	va = (vaddr_t)xs->data;
1201 	while (len > 0) {
1202 	  DPRINTF(("len = %u\n", len));
1203 		KASSERT(ncookies < MAXPHYS / PAGE_SIZE);
1204 		pa = 0;
1205 		pmap_extract(pmap_kernel(), va, &pa);
1206 		while (map->lm_slot[map->lm_next].entry != 0) {
1207 			map->lm_next++;
1208 			map->lm_next &= (map->lm_nentries - 1);
1209 		}
1210 		map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
1211 		map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
1212 		map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW;
1213 		map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W;
1214 		map->lm_count++;
1215 
1216 		nbytes = MIN(len, PAGE_SIZE - (pa & PAGE_MASK));
1217 
1218 		sc->sc_vd->vd_desc[desc].cookie[ncookies].addr =
1219 		    map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
1220 		sc->sc_vd->vd_desc[desc].cookie[ncookies].size = nbytes;
1221 
1222 		sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next;
1223 		va += nbytes;
1224 		len -= nbytes;
1225 		ncookies++;
1226 	}
1227 	if (ISSET(xs->xs_control, XS_CTL_POLL) == 0)
1228 		sc->sc_vd->vd_desc[desc].hdr.ack = 1;
1229 	else
1230 		sc->sc_vd->vd_desc[desc].hdr.ack = 0;
1231 	sc->sc_vd->vd_desc[desc].operation = operation;
1232 	sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE;
1233 	sc->sc_vd->vd_desc[desc].status = 0xffffffff;
1234 	sc->sc_vd->vd_desc[desc].offset = lba;
1235 	sc->sc_vd->vd_desc[desc].size = xs->datalen;
1236 	sc->sc_vd->vd_desc[desc].ncookies = ncookies;
1237 
1238 	membar_Sync();
1239 
1240 	sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY;
1241 
1242 	sc->sc_vsd[desc].vsd_xs = xs;
1243 	sc->sc_vsd[desc].vsd_ncookies = ncookies;
1244 
1245 	sc->sc_tx_prod++;
1246 	sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1);
1247 
1248 	bzero(&dm, sizeof(dm));
1249 	dm.tag.type = VIO_TYPE_DATA;
1250 	dm.tag.stype = VIO_SUBTYPE_INFO;
1251 	dm.tag.stype_env = VIO_DRING_DATA;
1252 	dm.tag.sid = sc->sc_local_sid;
1253 	dm.seq_no = sc->sc_seq_no++;
1254 	dm.dring_ident = sc->sc_dring_ident;
1255 	dm.start_idx = dm.end_idx = desc;
1256 	vdsk_sendmsg(sc, &dm, sizeof(dm));
1257 
1258 	return desc;
1259 }
1260 
1261 void
1262 vdsk_complete_cmd(struct vdsk_softc *sc, struct scsipi_xfer *xs, int desc)
1263 {
1264 	struct ldc_map *map = sc->sc_lm;
1265 	int cookie, idx;
1266 	int error;
1267 
1268 	cookie = 0;
1269 	while (cookie < sc->sc_vsd[desc].vsd_ncookies) {
1270 		idx = sc->sc_vsd[desc].vsd_map_idx[cookie++];
1271 		map->lm_slot[idx].entry = 0;
1272 		map->lm_count--;
1273 	}
1274 
1275 	error = XS_NOERROR;
1276 	if (sc->sc_vd->vd_desc[desc].status != 0)
1277 		error = XS_DRIVER_STUFFUP;
1278 	xs->resid = xs->datalen -
1279 		sc->sc_vd->vd_desc[desc].size;
1280 
1281 	/*
1282 	 * scsi_done() called by vdsk_scsi_done() requires
1283 	 * the kernel to be locked
1284 	 */
1285 	KERNEL_LOCK(1, curlwp);
1286 	vdsk_scsi_done(xs, error);
1287 	KERNEL_UNLOCK_ONE(curlwp);
1288 
1289 	sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_FREE;
1290 
1291 }
1292 
1293 void
1294 vdsk_scsi_inq(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1295 {
1296 
1297 	vdsk_scsi_inquiry(sc, xs);
1298 }
1299 
1300 void
1301 vdsk_scsi_inquiry(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1302 {
1303 
1304 	struct scsipi_inquiry_data inq;
1305 	char buf[5];
1306 
1307 	bzero(&inq, sizeof(inq));
1308 
1309 	switch (sc->sc_vd_mtype) {
1310 		case VD_MEDIA_TYPE_CD:
1311 		case VD_MEDIA_TYPE_DVD:
1312 			inq.device = T_CDROM;
1313 			break;
1314 
1315 		case VD_MEDIA_TYPE_FIXED:
1316 		default:
1317 			inq.device = T_DIRECT;
1318 			break;
1319 	}
1320 
1321 	inq.version = 0x05; /* SPC-3 */
1322 	inq.response_format = 2;
1323 	inq.additional_length = 32;
1324 	inq.flags3 |= SID_CmdQue;
1325 	bcopy("SUN     ", inq.vendor, sizeof(inq.vendor));
1326 	bcopy("Virtual Disk    ", inq.product, sizeof(inq.product));
1327 	snprintf(buf, sizeof(buf), "%u.%u ", sc->sc_major, sc->sc_minor);
1328 	bcopy(buf, inq.revision, sizeof(inq.revision));
1329 
1330 	bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
1331 
1332 	vdsk_scsi_done(xs, XS_NOERROR);
1333 }
1334 
1335 void
1336 vdsk_scsi_capacity(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1337 {
1338 
1339 	struct scsipi_read_capacity_10_data rcd;
1340 	uint64_t capacity;
1341 
1342 	bzero(&rcd, sizeof(rcd));
1343 
1344 	capacity = sc->sc_vdisk_size - 1;
1345 	if (capacity > 0xffffffff)
1346 		capacity = 0xffffffff;
1347 
1348 	_lto4b(capacity, rcd.addr);
1349 	_lto4b(sc->sc_vdisk_block_size, rcd.length);
1350 
1351 	DPRINTF(("%s() capacity %lu  block size %u\n",
1352 		 __FUNCTION__, capacity, sc->sc_vdisk_block_size));
1353 
1354 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1355 
1356 	vdsk_scsi_done(xs, XS_NOERROR);
1357 }
1358 
1359 void
1360 vdsk_scsi_capacity16(struct vdsk_softc *sc, struct scsipi_xfer *xs)
1361 {
1362 
1363 	struct scsipi_read_capacity_16_data rcd;
1364 
1365 	bzero(&rcd, sizeof(rcd));
1366 
1367 	_lto8b(sc->sc_vdisk_size - 1, rcd.addr);
1368 	_lto4b(sc->sc_vdisk_block_size, rcd.length);
1369 
1370 	DPRINTF(("%s() capacity %lu  block size %u\n",
1371 		 __FUNCTION__, capacity, sc->sc_vdisk_block_size));
1372 
1373 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1374 
1375 	vdsk_scsi_done(xs, XS_NOERROR);
1376 }
1377 
1378 void
1379 vdsk_scsi_done(struct scsipi_xfer *xs, int error)
1380 {
1381 
1382 	xs->error = error;
1383 
1384 	scsipi_done(xs);
1385 }
1386 
1387