xref: /openbsd-src/sys/dev/pv/xbf.c (revision d59bb9942320b767f2a19aaa7690c8c6e30b724c)
1 /*	$OpenBSD: xbf.c,v 1.24 2017/02/24 16:58:12 mikeb Exp $	*/
2 
3 /*
4  * Copyright (c) 2016 Mike Belopuhov
5  * Copyright (c) 2009, 2011 Mark Kettenis
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bio.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/atomic.h>
25 #include <sys/device.h>
26 #include <sys/kernel.h>
27 #include <sys/buf.h>
28 #include <sys/malloc.h>
29 #include <sys/task.h>
30 
31 #include <machine/bus.h>
32 
33 #include <dev/pv/xenreg.h>
34 #include <dev/pv/xenvar.h>
35 
36 #include <scsi/scsi_all.h>
37 #include <scsi/cd.h>
38 #include <scsi/scsi_disk.h>
39 #include <scsi/scsiconf.h>
40 
41 /* #define XBF_DEBUG */
42 
43 #ifdef XBF_DEBUG
44 #define DPRINTF(x...)		printf(x)
45 #else
46 #define DPRINTF(x...)
47 #endif
48 
49 #define XBF_OP_READ		0
50 #define XBF_OP_WRITE		1
51 #define XBF_OP_BARRIER		2 /* feature-barrier */
52 #define XBF_OP_FLUSH		3 /* feature-flush-cache */
53 #define XBF_OP_DISCARD		5 /* feature-discard */
54 #define XBF_OP_INDIRECT		6 /* feature-max-indirect-segments */
55 
56 #define XBF_MAX_SGE		11
57 #define XBF_MAX_ISGE		8
58 
59 #define XBF_SEC_SHIFT		9
60 
61 #define XBF_CDROM		1
62 #define XBF_REMOVABLE		2
63 #define XBF_READONLY		4
64 
65 #define XBF_OK			0
66 #define XBF_EIO			-1 /* generic failure */
67 #define XBF_EOPNOTSUPP		-2 /* only for XBF_OP_BARRIER */
68 
69 struct xbf_sge {
70 	uint32_t		 sge_ref;
71 	uint8_t			 sge_first;
72 	uint8_t			 sge_last;
73 	uint16_t		 sge_pad;
74 } __packed;
75 
76 /* Generic I/O request */
77 struct xbf_req {
78 	uint8_t			 req_op;
79 	uint8_t			 req_nsegs;
80 	uint16_t		 req_unit;
81 #ifdef __amd64__
82 	uint32_t		 req_pad;
83 #endif
84 	uint64_t		 req_id;
85 	uint64_t		 req_sector;
86 	struct xbf_sge		 req_sgl[XBF_MAX_SGE];
87 } __packed;
88 
89 /* Indirect I/O request */
90 struct xbf_ireq {
91 	uint8_t			 req_op;
92 	uint8_t			 req_iop;
93 	uint16_t		 req_nsegs;
94 #ifdef __amd64__
95 	uint32_t		 req_pad;
96 #endif
97 	uint64_t		 req_id;
98 	uint64_t		 req_sector;
99 	uint16_t		 req_unit;
100 	uint32_t		 req_gref[XBF_MAX_ISGE];
101 #ifdef __i386__
102 	uint64_t		 req_pad;
103 #endif
104 } __packed;
105 
106 struct xbf_rsp {
107 	uint64_t		 rsp_id;
108 	uint8_t			 rsp_op;
109 	uint8_t			 rsp_pad1;
110 	int16_t			 rsp_status;
111 #ifdef __amd64__
112 	uint32_t		 rsp_pad2;
113 #endif
114 } __packed;
115 
116 union xbf_ring_desc {
117 	struct xbf_req	 	 xrd_req;
118 	struct xbf_ireq		 xrd_ireq;
119 	struct xbf_rsp	 	 xrd_rsp;
120 } __packed;
121 
122 #define XBF_MIN_RING_SIZE	1
123 #define XBF_MAX_RING_SIZE	8
124 #define XBF_MAX_REQS		256 /* must be a power of 2 */
125 
126 struct xbf_ring {
127 	volatile uint32_t	 xr_prod;
128 	volatile uint32_t	 xr_prod_event;
129 	volatile uint32_t	 xr_cons;
130 	volatile uint32_t	 xr_cons_event;
131 	uint32_t		 xr_reserved[12];
132 	union xbf_ring_desc	 xr_desc[0];
133 } __packed;
134 
135 struct xbf_dma_mem {
136 	bus_size_t		 dma_size;
137 	bus_dma_tag_t		 dma_tag;
138 	bus_dmamap_t		 dma_map;
139 	bus_dma_segment_t	*dma_seg;
140 	int			 dma_nsegs; /* total amount */
141 	int			 dma_rsegs; /* used amount */
142 	caddr_t			 dma_vaddr;
143 };
144 
145 struct xbf_softc {
146 	struct device		 sc_dev;
147 	struct device		*sc_parent;
148 	char			 sc_node[XEN_MAX_NODE_LEN];
149 	char			 sc_backend[XEN_MAX_BACKEND_LEN];
150 	bus_dma_tag_t		 sc_dmat;
151 	int			 sc_domid;
152 
153 	xen_intr_handle_t	 sc_xih;
154 
155 	int			 sc_state;
156 #define  XBF_CONNECTED		  4
157 #define  XBF_CLOSING		  5
158 
159 	int			 sc_caps;
160 #define  XBF_CAP_BARRIER	  0x0001
161 #define  XBF_CAP_FLUSH		  0x0002
162 
163 	uint32_t		 sc_type;
164 	uint32_t		 sc_unit;
165 	char			 sc_dtype[16];
166 	char			 sc_prod[16];
167 
168 	uint32_t		 sc_maxphys;
169 	uint64_t		 sc_disk_size;
170 	uint32_t		 sc_block_size;
171 
172 	struct xbf_ring		*sc_xr;
173 	uint32_t		 sc_xr_cons;
174 	uint32_t		 sc_xr_prod;
175 	uint32_t		 sc_xr_size; /* in pages */
176 	struct xbf_dma_mem	 sc_xr_dma;
177 	uint32_t		 sc_xr_ref[XBF_MAX_RING_SIZE];
178 	int			 sc_xr_ndesc;
179 
180 	struct scsi_xfer	**sc_xs;
181 	bus_dmamap_t		*sc_xs_map;
182 	int			 sc_xs_avail;
183 	struct xbf_dma_mem	*sc_xs_bb;
184 
185 	struct scsi_iopool	 sc_iopool;
186 	struct scsi_adapter	 sc_switch;
187 	struct scsi_link         sc_link;
188 	struct device		*sc_scsibus;
189 };
190 
191 int	xbf_match(struct device *, void *, void *);
192 void	xbf_attach(struct device *, struct device *, void *);
193 int	xbf_detach(struct device *, int);
194 
195 struct cfdriver xbf_cd = {
196 	NULL, "xbf", DV_DULL
197 };
198 
199 const struct cfattach xbf_ca = {
200 	sizeof(struct xbf_softc), xbf_match, xbf_attach, xbf_detach
201 };
202 
203 void	xbf_intr(void *);
204 
205 void	*xbf_io_get(void *);
206 void	xbf_io_put(void *, void *);
207 
208 int	xbf_load_xs(struct scsi_xfer *, int);
209 int	xbf_bounce_xs(struct scsi_xfer *, int);
210 void	xbf_reclaim_xs(struct scsi_xfer *, int);
211 
212 void	xbf_scsi_cmd(struct scsi_xfer *);
213 int	xbf_submit_cmd(struct scsi_xfer *);
214 int	xbf_poll_cmd(struct scsi_xfer *, int, int);
215 void	xbf_complete_cmd(struct scsi_xfer *, int);
216 int	xbf_dev_probe(struct scsi_link *);
217 
218 void	xbf_scsi_minphys(struct buf *, struct scsi_link *);
219 void	xbf_scsi_inq(struct scsi_xfer *);
220 void	xbf_scsi_inquiry(struct scsi_xfer *);
221 void	xbf_scsi_capacity(struct scsi_xfer *);
222 void	xbf_scsi_capacity16(struct scsi_xfer *);
223 void	xbf_scsi_done(struct scsi_xfer *, int);
224 
225 int	xbf_dma_alloc(struct xbf_softc *, struct xbf_dma_mem *,
226 	    bus_size_t, int, int);
227 void	xbf_dma_free(struct xbf_softc *, struct xbf_dma_mem *);
228 
229 int	xbf_get_type(struct xbf_softc *);
230 int	xbf_init(struct xbf_softc *);
231 int	xbf_ring_create(struct xbf_softc *);
232 void	xbf_ring_destroy(struct xbf_softc *);
233 void	xbf_stop(struct xbf_softc *);
234 
235 int
236 xbf_match(struct device *parent, void *match, void *aux)
237 {
238 	struct xen_attach_args *xa = aux;
239 
240 	if (strcmp("vbd", xa->xa_name))
241 		return (0);
242 
243 	return (1);
244 }
245 
246 void
247 xbf_attach(struct device *parent, struct device *self, void *aux)
248 {
249 	struct xen_attach_args *xa = aux;
250 	struct xbf_softc *sc = (struct xbf_softc *)self;
251 	struct scsibus_attach_args saa;
252 
253 	sc->sc_parent = parent;
254 	sc->sc_dmat = xa->xa_dmat;
255 	sc->sc_domid = xa->xa_domid;
256 
257 	memcpy(sc->sc_node, xa->xa_node, XEN_MAX_NODE_LEN);
258 	memcpy(sc->sc_backend, xa->xa_backend, XEN_MAX_BACKEND_LEN);
259 
260 	if (xbf_get_type(sc))
261 		return;
262 
263 	if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xbf_intr, sc,
264 	    sc->sc_dev.dv_xname)) {
265 		printf(": failed to establish an interrupt\n");
266 		return;
267 	}
268 	xen_intr_mask(sc->sc_xih);
269 
270 	printf(" backend %d channel %u: %s\n", sc->sc_domid, sc->sc_xih,
271 	    sc->sc_dtype);
272 
273 	scsi_iopool_init(&sc->sc_iopool, sc, xbf_io_get, xbf_io_put);
274 
275 	if (xbf_init(sc))
276 		goto error;
277 
278 	if (xen_intr_unmask(sc->sc_xih)) {
279 		printf("%s: failed to enable interrupts\n",
280 		    sc->sc_dev.dv_xname);
281 		goto error;
282 	}
283 
284 	sc->sc_switch.scsi_cmd = xbf_scsi_cmd;
285 	sc->sc_switch.scsi_minphys = xbf_scsi_minphys;
286 	sc->sc_switch.dev_probe = xbf_dev_probe;
287 
288 	sc->sc_link.adapter = &sc->sc_switch;
289 	sc->sc_link.adapter_softc = self;
290 	sc->sc_link.adapter_buswidth = 2;
291 	sc->sc_link.luns = 1;
292 	sc->sc_link.adapter_target = 2;
293 	sc->sc_link.openings = sc->sc_xr_ndesc - 1;
294 	sc->sc_link.pool = &sc->sc_iopool;
295 
296 	bzero(&saa, sizeof(saa));
297 	saa.saa_sc_link = &sc->sc_link;
298 	sc->sc_scsibus = config_found(self, &saa, scsiprint);
299 
300 	xen_unplug_emulated(parent, XEN_UNPLUG_IDE | XEN_UNPLUG_IDESEC);
301 
302 	return;
303 
304  error:
305 	xen_intr_disestablish(sc->sc_xih);
306 }
307 
308 int
309 xbf_detach(struct device *self, int flags)
310 {
311 	struct xbf_softc *sc = (struct xbf_softc *)self;
312 	int ostate = sc->sc_state;
313 
314 	sc->sc_state = XBF_CLOSING;
315 
316 	xen_intr_mask(sc->sc_xih);
317 	xen_intr_barrier(sc->sc_xih);
318 
319 	if (ostate == XBF_CONNECTED) {
320 		xen_intr_disestablish(sc->sc_xih);
321 		xbf_stop(sc);
322 	}
323 
324 	if (sc->sc_scsibus)
325 		return (config_detach(sc->sc_scsibus, flags | DETACH_FORCE));
326 
327 	return (0);
328 }
329 
330 void
331 xbf_intr(void *xsc)
332 {
333 	struct xbf_softc *sc = xsc;
334 	struct xbf_ring *xr = sc->sc_xr;
335 	struct xbf_dma_mem *dma = &sc->sc_xr_dma;
336 	struct scsi_xfer *xs;
337 	uint32_t cons;
338 	int desc;
339 
340 	bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_size,
341 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
342 
343 	for (cons = sc->sc_xr_cons; cons != xr->xr_cons; cons++) {
344 		desc = cons & (sc->sc_xr_ndesc - 1);
345 		xs = sc->sc_xs[desc];
346 		if (xs != NULL)
347 			xbf_complete_cmd(xs, desc);
348 	}
349 
350 	sc->sc_xr_cons = cons;
351 }
352 
353 void *
354 xbf_io_get(void *xsc)
355 {
356 	struct xbf_softc *sc = xsc;
357 	void *rv = sc; /* just has to be !NULL */
358 
359 	if (sc->sc_state != XBF_CONNECTED &&
360 	    sc->sc_state != XBF_CLOSING)
361 		rv = NULL;
362 
363 	return (rv);
364 }
365 
366 void
367 xbf_io_put(void *xsc, void *io)
368 {
369 #ifdef DIAGNOSTIC
370 	struct xbf_softc *sc = xsc;
371 
372 	if (sc != io)
373 		panic("xbf_io_put: unexpected io");
374 #endif
375 }
376 
377 void
378 xbf_scsi_cmd(struct scsi_xfer *xs)
379 {
380 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
381 	int desc;
382 
383 	switch (xs->cmd->opcode) {
384 	case READ_BIG:
385 	case READ_COMMAND:
386 	case READ_12:
387 	case READ_16:
388 	case WRITE_BIG:
389 	case WRITE_COMMAND:
390 	case WRITE_12:
391 	case WRITE_16:
392 		if (sc->sc_state != XBF_CONNECTED) {
393 			xbf_scsi_done(xs, XS_SELTIMEOUT);
394 			return;
395 		}
396 		break;
397 	case SYNCHRONIZE_CACHE:
398 		if (!(sc->sc_caps & (XBF_CAP_BARRIER|XBF_CAP_FLUSH))) {
399 			xbf_scsi_done(xs, XS_NOERROR);
400 			return;
401 		}
402 		break;
403 	case INQUIRY:
404 		xbf_scsi_inq(xs);
405 		return;
406 	case READ_CAPACITY:
407 		xbf_scsi_capacity(xs);
408 		return;
409 	case READ_CAPACITY_16:
410 		xbf_scsi_capacity16(xs);
411 		return;
412 	case TEST_UNIT_READY:
413 	case START_STOP:
414 	case PREVENT_ALLOW:
415 		xbf_scsi_done(xs, XS_NOERROR);
416 		return;
417 	default:
418 		printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode);
419 	case MODE_SENSE:
420 	case MODE_SENSE_BIG:
421 	case REPORT_LUNS:
422 	case READ_TOC:
423 		xbf_scsi_done(xs, XS_DRIVER_STUFFUP);
424 		return;
425 	}
426 
427 	desc = xbf_submit_cmd(xs);
428 	if (desc < 0) {
429 		xbf_scsi_done(xs, XS_DRIVER_STUFFUP);
430 		return;
431 	}
432 
433 	if (ISSET(xs->flags, SCSI_POLL) && xbf_poll_cmd(xs, desc, 1000)) {
434 		printf("%s: op %#x timed out\n", sc->sc_dev.dv_xname,
435 		    xs->cmd->opcode);
436 		if (sc->sc_state == XBF_CONNECTED) {
437 			sc->sc_xs[desc] = NULL;
438 			xbf_reclaim_xs(xs, desc);
439 			xbf_scsi_done(xs, XS_TIMEOUT);
440 		}
441 		return;
442 	}
443 }
444 
445 int
446 xbf_load_xs(struct scsi_xfer *xs, int desc)
447 {
448 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
449 	struct xbf_sge *sge;
450 	union xbf_ring_desc *xrd;
451 	bus_dmamap_t map;
452 	int i, error, mapflags;
453 
454 	xrd = &sc->sc_xr->xr_desc[desc];
455 	map = sc->sc_xs_map[desc];
456 
457 	mapflags = (sc->sc_domid << 16);
458 	if (ISSET(xs->flags, SCSI_NOSLEEP))
459 		mapflags |= BUS_DMA_NOWAIT;
460 	else
461 		mapflags |= BUS_DMA_WAITOK;
462 	if (ISSET(xs->flags, SCSI_DATA_IN))
463 		mapflags |= BUS_DMA_READ;
464 	else
465 		mapflags |= BUS_DMA_WRITE;
466 
467 	error = bus_dmamap_load(sc->sc_dmat, map, xs->data, xs->datalen,
468 	    NULL, mapflags);
469 	if (error) {
470 		DPRINTF("%s: failed to load %u bytes of data\n",
471 		    sc->sc_dev.dv_xname, xs->datalen);
472 		return (-1);
473 	}
474 
475 	for (i = 0; i < map->dm_nsegs; i++) {
476 		sge = &xrd->xrd_req.req_sgl[i];
477 		sge->sge_ref = map->dm_segs[i].ds_addr;
478 		sge->sge_first = i > 0 ? 0 :
479 		    ((vaddr_t)xs->data & PAGE_MASK) >> XBF_SEC_SHIFT;
480 		sge->sge_last = sge->sge_first +
481 		    (map->dm_segs[i].ds_len >> XBF_SEC_SHIFT) - 1;
482 
483 		DPRINTF("%s:   seg %d/%d ref %lu len %lu first %u last %u\n",
484 		    sc->sc_dev.dv_xname, i + 1, map->dm_nsegs,
485 		    map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len,
486 		    sge->sge_first, sge->sge_last);
487 
488 		KASSERT(sge->sge_last <= 7);
489 	}
490 
491 	xrd->xrd_req.req_nsegs = map->dm_nsegs;
492 
493 	return (0);
494 }
495 
496 int
497 xbf_bounce_xs(struct scsi_xfer *xs, int desc)
498 {
499 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
500 	struct xbf_sge *sge;
501 	struct xbf_dma_mem *dma;
502 	union xbf_ring_desc *xrd;
503 	bus_dmamap_t map;
504 	bus_size_t size;
505 	int i, error, mapflags;
506 
507 	xrd = &sc->sc_xr->xr_desc[desc];
508 	dma = &sc->sc_xs_bb[desc];
509 
510 	size = roundup(xs->datalen, PAGE_SIZE);
511 	if (size > sc->sc_maxphys)
512 		return (EFBIG);
513 
514 	mapflags = (sc->sc_domid << 16);
515 	if (ISSET(xs->flags, SCSI_NOSLEEP))
516 		mapflags |= BUS_DMA_NOWAIT;
517 	else
518 		mapflags |= BUS_DMA_WAITOK;
519 	if (ISSET(xs->flags, SCSI_DATA_IN))
520 		mapflags |= BUS_DMA_READ;
521 	else
522 		mapflags |= BUS_DMA_WRITE;
523 
524 	error = xbf_dma_alloc(sc, dma, size, size / PAGE_SIZE, mapflags);
525 	if (error) {
526 		DPRINTF("%s: failed to allocate a %lu byte bounce buffer\n",
527 		    sc->sc_dev.dv_xname, size);
528 		return (error);
529 	}
530 
531 	map = dma->dma_map;
532 
533 	DPRINTF("%s: bouncing %d bytes via %ld size map with %d segments\n",
534 	    sc->sc_dev.dv_xname, xs->datalen, size, map->dm_nsegs);
535 
536 	if (ISSET(xs->flags, SCSI_DATA_OUT))
537 		memcpy((caddr_t)dma->dma_vaddr, xs->data, xs->datalen);
538 
539 	for (i = 0; i < map->dm_nsegs; i++) {
540 		sge = &xrd->xrd_req.req_sgl[i];
541 		sge->sge_ref = map->dm_segs[i].ds_addr;
542 		sge->sge_first = i > 0 ? 0 :
543 		    ((vaddr_t)xs->data & PAGE_MASK) >> XBF_SEC_SHIFT;
544 		sge->sge_last = sge->sge_first +
545 		    (map->dm_segs[i].ds_len >> XBF_SEC_SHIFT) - 1;
546 
547 		DPRINTF("%s:   seg %d/%d ref %lu len %lu first %u last %u\n",
548 		    sc->sc_dev.dv_xname, i + 1, map->dm_nsegs,
549 		    map->dm_segs[i].ds_addr, map->dm_segs[i].ds_len,
550 		    sge->sge_first, sge->sge_last);
551 
552 		KASSERT(sge->sge_last <= 7);
553 	}
554 
555 	xrd->xrd_req.req_nsegs = map->dm_nsegs;
556 
557 	return (0);
558 }
559 
560 void
561 xbf_reclaim_xs(struct scsi_xfer *xs, int desc)
562 {
563 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
564 	struct xbf_dma_mem *dma;
565 
566 	dma = &sc->sc_xs_bb[desc];
567 	if (dma->dma_size == 0)
568 		return;
569 
570 	if (ISSET(xs->flags, SCSI_DATA_IN))
571 		memcpy(xs->data, (caddr_t)dma->dma_vaddr, xs->datalen);
572 
573 	xbf_dma_free(sc, dma);
574 }
575 
576 int
577 xbf_submit_cmd(struct scsi_xfer *xs)
578 {
579 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
580 	union xbf_ring_desc *xrd;
581 	bus_dmamap_t map;
582 	struct scsi_rw *rw;
583 	struct scsi_rw_big *rwb;
584 	struct scsi_rw_12 *rw12;
585 	struct scsi_rw_16 *rw16;
586 	uint64_t lba = 0;
587 	uint32_t nblk = 0;
588 	uint8_t operation = 0;
589 	int desc, error;
590 
591 	switch (xs->cmd->opcode) {
592 	case READ_BIG:
593 	case READ_COMMAND:
594 	case READ_12:
595 	case READ_16:
596 		operation = XBF_OP_READ;
597 		break;
598 
599 	case WRITE_BIG:
600 	case WRITE_COMMAND:
601 	case WRITE_12:
602 	case WRITE_16:
603 		operation = XBF_OP_WRITE;
604 		break;
605 
606 	case SYNCHRONIZE_CACHE:
607 		if (sc->sc_caps & XBF_CAP_FLUSH)
608 			operation = XBF_OP_FLUSH;
609 		else if (sc->sc_caps & XBF_CAP_BARRIER)
610 			operation = XBF_OP_BARRIER;
611 		break;
612 	}
613 
614 	/*
615 	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE
616 	 * has the same layout as 10-byte READ/WRITE commands.
617 	 */
618 	if (xs->cmdlen == 6) {
619 		rw = (struct scsi_rw *)xs->cmd;
620 		lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
621 		nblk = rw->length ? rw->length : 0x100;
622 	} else if (xs->cmdlen == 10) {
623 		rwb = (struct scsi_rw_big *)xs->cmd;
624 		lba = _4btol(rwb->addr);
625 		nblk = _2btol(rwb->length);
626 	} else if (xs->cmdlen == 12) {
627 		rw12 = (struct scsi_rw_12 *)xs->cmd;
628 		lba = _4btol(rw12->addr);
629 		nblk = _4btol(rw12->length);
630 	} else if (xs->cmdlen == 16) {
631 		rw16 = (struct scsi_rw_16 *)xs->cmd;
632 		lba = _8btol(rw16->addr);
633 		nblk = _4btol(rw16->length);
634 	}
635 
636 	desc = sc->sc_xr_prod & (sc->sc_xr_ndesc - 1);
637 	xrd = &sc->sc_xr->xr_desc[desc];
638 	map = sc->sc_xs_map[desc];
639 
640 	xrd->xrd_req.req_op = operation;
641 	xrd->xrd_req.req_unit = (uint16_t)sc->sc_unit;
642 	xrd->xrd_req.req_sector = lba;
643 
644 	if (operation == XBF_OP_READ || operation == XBF_OP_WRITE) {
645 		DPRINTF("%s: desc %u %s%s lba %llu nsec %u len %u\n",
646 		    sc->sc_dev.dv_xname, desc, operation == XBF_OP_READ ?
647 		    "read" : "write", ISSET(xs->flags, SCSI_POLL) ? "-poll" :
648 		    "", lba, nblk, xs->datalen);
649 
650 		if (((vaddr_t)xs->data & ((1 << XBF_SEC_SHIFT) - 1)) == 0)
651 			error = xbf_load_xs(xs, desc);
652 		else
653 			error = xbf_bounce_xs(xs, desc);
654 		if (error)
655 			return (error);
656 	} else {
657 		DPRINTF("%s: desc %u %s%s lba %llu\n", sc->sc_dev.dv_xname,
658 		    desc, operation == XBF_OP_FLUSH ? "flush" : "barrier",
659 		    ISSET(xs->flags, SCSI_POLL) ? "-poll" : "", lba);
660 		xrd->xrd_req.req_nsegs = 0;
661 	}
662 
663 	sc->sc_xs[desc] = xs;
664 
665 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
666 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
667 
668 	sc->sc_xr_prod++;
669 	sc->sc_xr->xr_prod = sc->sc_xr_prod;
670 	sc->sc_xr->xr_cons_event = sc->sc_xr_prod;
671 
672 	bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,
673 	    sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD |
674 	    BUS_DMASYNC_PREWRITE);
675 
676 	xen_intr_signal(sc->sc_xih);
677 
678 	return desc;
679 }
680 
681 int
682 xbf_poll_cmd(struct scsi_xfer *xs, int desc, int timo)
683 {
684 	do {
685 		if (ISSET(xs->flags, ITSDONE))
686 			break;
687 		if (ISSET(xs->flags, SCSI_NOSLEEP))
688 			delay(10);
689 		else
690 			tsleep(xs, PRIBIO, "xbfpoll", 1);
691 	} while(--timo > 0);
692 
693 	return (0);
694 }
695 
696 void
697 xbf_complete_cmd(struct scsi_xfer *xs, int desc)
698 {
699 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
700 	union xbf_ring_desc *xrd;
701 	bus_dmamap_t map;
702 	uint64_t id;
703 	int error;
704 
705 	bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,
706 	    sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD |
707 	    BUS_DMASYNC_POSTWRITE);
708 
709 	xrd = &sc->sc_xr->xr_desc[desc];
710 	error = xrd->xrd_rsp.rsp_status == XBF_OK ? XS_NOERROR :
711 	    XS_DRIVER_STUFFUP;
712 
713 	if (sc->sc_xs_bb[desc].dma_size > 0)
714 		map = sc->sc_xs_bb[desc].dma_map;
715 	else
716 		map = sc->sc_xs_map[desc];
717 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
718 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
719 	bus_dmamap_unload(sc->sc_dmat, map);
720 
721 	sc->sc_xs[desc] = NULL;
722 
723 	DPRINTF("%s: completing desc %u(%llu) op %u with error %d\n",
724 	    sc->sc_dev.dv_xname, desc, xrd->xrd_rsp.rsp_id,
725 	    xrd->xrd_rsp.rsp_op, xrd->xrd_rsp.rsp_status);
726 
727 	id = xrd->xrd_rsp.rsp_id;
728 	memset(xrd, 0, sizeof(*xrd));
729 	xrd->xrd_req.req_id = id;
730 
731 	xs->resid = 0;
732 
733 	xbf_reclaim_xs(xs, desc);
734 	xbf_scsi_done(xs, error);
735 }
736 
737 void
738 xbf_scsi_minphys(struct buf *bp, struct scsi_link *sl)
739 {
740 	struct xbf_softc *sc = sl->adapter_softc;
741 
742 	if (bp->b_bcount > sc->sc_maxphys)
743 		bp->b_bcount = sc->sc_maxphys;
744 }
745 
746 void
747 xbf_scsi_inq(struct scsi_xfer *xs)
748 {
749 	struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd;
750 
751 	if (ISSET(inq->flags, SI_EVPD))
752 		xbf_scsi_done(xs, XS_DRIVER_STUFFUP);
753 	else
754 		xbf_scsi_inquiry(xs);
755 }
756 
757 void
758 xbf_scsi_inquiry(struct scsi_xfer *xs)
759 {
760 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
761 	struct scsi_inquiry_data inq;
762 	/* char buf[5]; */
763 
764 	bzero(&inq, sizeof(inq));
765 
766 	switch (sc->sc_type) {
767 	case XBF_CDROM:
768 		inq.device = T_CDROM;
769 		break;
770 	default:
771 		inq.device = T_DIRECT;
772 		break;
773 	}
774 
775 	inq.version = 0x05; /* SPC-3 */
776 	inq.response_format = 2;
777 	inq.additional_length = 32;
778 	inq.flags |= SID_CmdQue;
779 	bcopy("Xen     ", inq.vendor, sizeof(inq.vendor));
780 	bcopy(sc->sc_prod, inq.product, sizeof(inq.product));
781 	bcopy("0000", inq.revision, sizeof(inq.revision));
782 
783 	bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
784 
785 	xbf_scsi_done(xs, XS_NOERROR);
786 }
787 
788 void
789 xbf_scsi_capacity(struct scsi_xfer *xs)
790 {
791 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
792 	struct scsi_read_cap_data rcd;
793 	uint64_t capacity;
794 
795 	bzero(&rcd, sizeof(rcd));
796 
797 	capacity = sc->sc_disk_size - 1;
798 	if (capacity > 0xffffffff)
799 		capacity = 0xffffffff;
800 
801 	_lto4b(capacity, rcd.addr);
802 	_lto4b(sc->sc_block_size, rcd.length);
803 
804 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
805 
806 	xbf_scsi_done(xs, XS_NOERROR);
807 }
808 
809 void
810 xbf_scsi_capacity16(struct scsi_xfer *xs)
811 {
812 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
813 	struct scsi_read_cap_data_16 rcd;
814 
815 	bzero(&rcd, sizeof(rcd));
816 
817 	_lto8b(sc->sc_disk_size - 1, rcd.addr);
818 	_lto4b(sc->sc_block_size, rcd.length);
819 
820 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
821 
822 	xbf_scsi_done(xs, XS_NOERROR);
823 }
824 
825 void
826 xbf_scsi_done(struct scsi_xfer *xs, int error)
827 {
828 	int s;
829 
830 	xs->error = error;
831 
832 	s = splbio();
833 	scsi_done(xs);
834 	splx(s);
835 }
836 
837 int
838 xbf_dev_probe(struct scsi_link *link)
839 {
840 	KASSERT(link->lun == 0);
841 
842 	if (link->target == 0)
843 		return (0);
844 
845 	return (ENODEV);
846 }
847 
848 int
849 xbf_get_type(struct xbf_softc *sc)
850 {
851 	unsigned long long res;
852 	const char *prop;
853 	char val[32];
854 	int error;
855 
856 	prop = "type";
857 	if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
858 	    sizeof(val))) != 0)
859 		goto errout;
860 	snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s", val);
861 
862 	prop = "dev";
863 	if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
864 	    sizeof(val))) != 0)
865 		goto errout;
866 	snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s %s", sc->sc_prod, val);
867 
868 	prop = "virtual-device";
869 	if ((error = xs_getnum(sc->sc_parent, sc->sc_node, prop, &res)) != 0)
870 		goto errout;
871 	sc->sc_unit = (uint32_t)res;
872 	snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s %llu", sc->sc_prod, res);
873 
874 	prop = "device-type";
875 	if ((error = xs_getprop(sc->sc_parent, sc->sc_node, prop,
876 	    sc->sc_dtype, sizeof(sc->sc_dtype))) != 0)
877 		goto errout;
878 	if (!strcmp(sc->sc_dtype, "cdrom"))
879 		sc->sc_type = XBF_CDROM;
880 
881 	return (0);
882 
883  errout:
884 	printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
885 	    prop);
886 	return (-1);
887 }
888 
889 int
890 xbf_init(struct xbf_softc *sc)
891 {
892 	unsigned long long res;
893 	const char *action, *prop;
894 	char pbuf[sizeof("ring-refXX")];
895 	int i, error;
896 
897 	prop = "max-ring-page-order";
898 	error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res);
899 	if (error == 0)
900 		sc->sc_xr_size = 1 << res;
901 	if (error == ENOENT) {
902 		prop = "max-ring-pages";
903 		error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res);
904 		if (error == 0)
905 			sc->sc_xr_size = res;
906 	}
907 	/* Fallback to the known minimum */
908 	if (error)
909 		sc->sc_xr_size = XBF_MIN_RING_SIZE;
910 
911 	if (sc->sc_xr_size < XBF_MIN_RING_SIZE)
912 		sc->sc_xr_size = XBF_MIN_RING_SIZE;
913 	if (sc->sc_xr_size > XBF_MAX_RING_SIZE)
914 		sc->sc_xr_size = XBF_MAX_RING_SIZE;
915 	if (!powerof2(sc->sc_xr_size))
916 		sc->sc_xr_size = 1 << (fls(sc->sc_xr_size) - 1);
917 
918 	sc->sc_xr_ndesc = ((sc->sc_xr_size * PAGE_SIZE) -
919 	    sizeof(struct xbf_ring)) / sizeof(union xbf_ring_desc);
920 	if (!powerof2(sc->sc_xr_ndesc))
921 		sc->sc_xr_ndesc = 1 << (fls(sc->sc_xr_ndesc) - 1);
922 	if (sc->sc_xr_ndesc > XBF_MAX_REQS)
923 		sc->sc_xr_ndesc = XBF_MAX_REQS;
924 
925 	DPRINTF("%s: %u ring pages, %u requests\n",
926 	    sc->sc_dev.dv_xname, sc->sc_xr_size, sc->sc_xr_ndesc);
927 
928 	if (xbf_ring_create(sc))
929 		return (-1);
930 
931 	action = "set";
932 
933 	for (i = 0; i < sc->sc_xr_size; i++) {
934 		if (i == 0 && sc->sc_xr_size == 1)
935 			snprintf(pbuf, sizeof(pbuf), "ring-ref");
936 		else
937 			snprintf(pbuf, sizeof(pbuf), "ring-ref%u", i);
938 		prop = pbuf;
939 		if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
940 		    sc->sc_xr_ref[i]))
941 			goto errout;
942 	}
943 
944 	if (sc->sc_xr_size > 1) {
945 		prop = "num-ring-pages";
946 		if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
947 		    sc->sc_xr_size))
948 			goto errout;
949 		prop = "ring-page-order";
950 		if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
951 		    fls(sc->sc_xr_size) - 1))
952 			goto errout;
953 	}
954 
955 	prop = "event-channel";
956 	if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_xih))
957 		goto errout;
958 
959 	prop = "protocol";
960 #ifdef __amd64__
961 	if (xs_setprop(sc->sc_parent, sc->sc_node, prop, "x86_64-abi",
962 	    strlen("x86_64-abi")))
963 		goto errout;
964 #else
965 	if (xs_setprop(sc->sc_parent, sc->sc_node, prop, "x86_32-abi",
966 	    strlen("x86_32-abi")))
967 		goto errout;
968 #endif
969 
970 	if (xs_setprop(sc->sc_parent, sc->sc_node, "state",
971 	    XEN_STATE_INITIALIZED, strlen(XEN_STATE_INITIALIZED))) {
972 		printf("%s: failed to set state to INITIALIZED\n",
973 		    sc->sc_dev.dv_xname);
974 		xbf_ring_destroy(sc);
975 		return (-1);
976 	}
977 
978 	if (xs_await_transition(sc->sc_parent, sc->sc_backend, "state",
979 	    XEN_STATE_CONNECTED, 10000)) {
980 		printf("%s: timed out waiting for backend to connect\n",
981 		    sc->sc_dev.dv_xname);
982 		xbf_ring_destroy(sc);
983 		return (-1);
984 	}
985 
986 	action = "read";
987 
988 	prop = "sectors";
989 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0)
990 		goto errout;
991 	sc->sc_disk_size = res;
992 
993 	prop = "sector-size";
994 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0)
995 		goto errout;
996 	sc->sc_block_size = res;
997 
998 	prop = "feature-barrier";
999 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1000 	    && error != ENOENT)
1001 		goto errout;
1002 	if (error == 0 && res == 1)
1003 		sc->sc_caps |= XBF_CAP_BARRIER;
1004 
1005 	prop = "feature-flush-cache";
1006 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1007 	    && error != ENOENT)
1008 		goto errout;
1009 	if (error == 0 && res == 1)
1010 		sc->sc_caps |= XBF_CAP_FLUSH;
1011 
1012 #ifdef XBF_DEBUG
1013 	if (sc->sc_caps) {
1014 		printf("%s: features:", sc->sc_dev.dv_xname);
1015 		if (sc->sc_caps & XBF_CAP_BARRIER)
1016 			printf(" BARRIER");
1017 		if (sc->sc_caps & XBF_CAP_FLUSH)
1018 			printf(" FLUSH");
1019 		printf("\n");
1020 	}
1021 #endif
1022 
1023 	if (xs_setprop(sc->sc_parent, sc->sc_node, "state",
1024 	    XEN_STATE_CONNECTED, strlen(XEN_STATE_CONNECTED))) {
1025 		printf("%s: failed to set state to CONNECTED\n",
1026 		    sc->sc_dev.dv_xname);
1027 		return (-1);
1028 	}
1029 
1030 	sc->sc_state = XBF_CONNECTED;
1031 
1032 	return (0);
1033 
1034  errout:
1035 	printf("%s: failed to %s \"%s\" property (%d)\n", sc->sc_dev.dv_xname,
1036 	    action, prop, error);
1037 	xbf_ring_destroy(sc);
1038 	return (-1);
1039 }
1040 
1041 int
1042 xbf_dma_alloc(struct xbf_softc *sc, struct xbf_dma_mem *dma,
1043     bus_size_t size, int nsegs, int mapflags)
1044 {
1045 	int error;
1046 
1047 	dma->dma_tag = sc->sc_dmat;
1048 
1049 	dma->dma_seg = mallocarray(nsegs, sizeof(bus_dma_segment_t), M_DEVBUF,
1050 	    M_ZERO | M_NOWAIT);
1051 	if (dma->dma_seg == NULL) {
1052 		printf("%s: failed to allocate a segment array\n",
1053 		    sc->sc_dev.dv_xname);
1054 		return (ENOMEM);
1055 	}
1056 
1057 	error = bus_dmamap_create(dma->dma_tag, size, nsegs, PAGE_SIZE, 0,
1058 	    BUS_DMA_NOWAIT, &dma->dma_map);
1059 	if (error) {
1060 		printf("%s: failed to create a memory map (%d)\n",
1061 		    sc->sc_dev.dv_xname, error);
1062 		goto errout;
1063 	}
1064 
1065 	error = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0,
1066 	    dma->dma_seg, nsegs, &dma->dma_rsegs, BUS_DMA_ZERO |
1067 	    BUS_DMA_NOWAIT);
1068 	if (error) {
1069 		printf("%s: failed to allocate DMA memory (%d)\n",
1070 		    sc->sc_dev.dv_xname, error);
1071 		goto destroy;
1072 	}
1073 
1074 	error = bus_dmamem_map(dma->dma_tag, dma->dma_seg, dma->dma_rsegs,
1075 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
1076 	if (error) {
1077 		printf("%s: failed to map DMA memory (%d)\n",
1078 		    sc->sc_dev.dv_xname, error);
1079 		goto free;
1080 	}
1081 
1082 	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1083 	    size, NULL, mapflags | BUS_DMA_NOWAIT);
1084 	if (error) {
1085 		printf("%s: failed to load DMA memory (%d)\n",
1086 		    sc->sc_dev.dv_xname, error);
1087 		goto unmap;
1088 	}
1089 
1090 	dma->dma_size = size;
1091 	dma->dma_nsegs = nsegs;
1092 	return (0);
1093 
1094  unmap:
1095 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1096  free:
1097 	bus_dmamem_free(dma->dma_tag, dma->dma_seg, dma->dma_rsegs);
1098  destroy:
1099 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1100  errout:
1101 	free(dma->dma_seg, M_DEVBUF, nsegs * sizeof(bus_dma_segment_t));
1102 	dma->dma_map = NULL;
1103 	dma->dma_tag = NULL;
1104 	return (error);
1105 }
1106 
1107 void
1108 xbf_dma_free(struct xbf_softc *sc, struct xbf_dma_mem *dma)
1109 {
1110 	if (dma->dma_tag == NULL || dma->dma_map == NULL)
1111 		return;
1112 	bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_size,
1113 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1114 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1115 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1116 	bus_dmamem_free(dma->dma_tag, dma->dma_seg, dma->dma_rsegs);
1117 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1118 	free(dma->dma_seg, M_DEVBUF, dma->dma_nsegs * sizeof(bus_dma_segment_t));
1119 	dma->dma_seg = NULL;
1120 	dma->dma_map = NULL;
1121 	dma->dma_size = 0;
1122 }
1123 
1124 int
1125 xbf_ring_create(struct xbf_softc *sc)
1126 {
1127 	int i, error, nsegs;
1128 
1129 	if (xbf_dma_alloc(sc, &sc->sc_xr_dma, sc->sc_xr_size * PAGE_SIZE,
1130 	    sc->sc_xr_size, sc->sc_domid << 16))
1131 		return (-1);
1132 	for (i = 0; i < sc->sc_xr_dma.dma_map->dm_nsegs; i++)
1133 		sc->sc_xr_ref[i] = sc->sc_xr_dma.dma_map->dm_segs[i].ds_addr;
1134 
1135 	sc->sc_xr = (struct xbf_ring *)sc->sc_xr_dma.dma_vaddr;
1136 
1137 	sc->sc_xr->xr_prod_event = sc->sc_xr->xr_cons_event = 1;
1138 
1139 	/* SCSI transfer map */
1140 	sc->sc_xs_map = mallocarray(sc->sc_xr_ndesc, sizeof(bus_dmamap_t),
1141 	    M_DEVBUF, M_ZERO | M_NOWAIT);
1142 	if (sc->sc_xs_map == NULL) {
1143 		printf("%s: failed to allocate scsi transfer map\n",
1144 		    sc->sc_dev.dv_xname);
1145 		goto errout;
1146 	}
1147 	sc->sc_xs = mallocarray(sc->sc_xr_ndesc, sizeof(struct scsi_xfer *),
1148 	    M_DEVBUF, M_ZERO | M_NOWAIT);
1149 	if (sc->sc_xs == NULL) {
1150 		printf("%s: failed to allocate scsi transfer array\n",
1151 		    sc->sc_dev.dv_xname);
1152 		goto errout;
1153 	}
1154 	sc->sc_xs_avail = sc->sc_xr_ndesc;
1155 
1156 	/* Bounce buffer maps for unaligned buffers */
1157 	sc->sc_xs_bb = mallocarray(sc->sc_xr_ndesc, sizeof(struct xbf_dma_mem),
1158 	    M_DEVBUF, M_ZERO | M_NOWAIT);
1159 	if (sc->sc_xs_bb == NULL) {
1160 		printf("%s: failed to allocate bounce buffer maps\n",
1161 		    sc->sc_dev.dv_xname);
1162 		goto errout;
1163 	}
1164 
1165 	nsegs = MIN(MAXPHYS / PAGE_SIZE, XBF_MAX_SGE);
1166 	sc->sc_maxphys = nsegs * PAGE_SIZE;
1167 
1168 	for (i = 0; i < sc->sc_xr_ndesc; i++) {
1169 		error = bus_dmamap_create(sc->sc_dmat, sc->sc_maxphys, nsegs,
1170 		    PAGE_SIZE, PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_xs_map[i]);
1171 		if (error) {
1172 			printf("%s: failed to create a memory map for "
1173 			    "the xfer %d (%d)\n", sc->sc_dev.dv_xname, i,
1174 			    error);
1175 			goto errout;
1176 		}
1177 		sc->sc_xr->xr_desc[i].xrd_req.req_id = i;
1178 	}
1179 
1180 	return (0);
1181 
1182  errout:
1183  	xbf_ring_destroy(sc);
1184  	return (-1);
1185 }
1186 
1187 void
1188 xbf_ring_destroy(struct xbf_softc *sc)
1189 {
1190 	int i;
1191 
1192 	for (i = 0; i < sc->sc_xr_ndesc; i++) {
1193 		if (sc->sc_xs_map == NULL)
1194 			break;
1195 		if (sc->sc_xs_map[i] == NULL)
1196 			continue;
1197 		bus_dmamap_sync(sc->sc_dmat, sc->sc_xs_map[i], 0, 0,
1198 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1199 		bus_dmamap_unload(sc->sc_dmat, sc->sc_xs_map[i]);
1200 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_xs_map[i]);
1201 		sc->sc_xs_map[i] = NULL;
1202 	}
1203 
1204 	free(sc->sc_xs, M_DEVBUF, sc->sc_xr_ndesc *
1205 	    sizeof(struct scsi_xfer *));
1206 	sc->sc_xs = NULL;
1207 
1208 	free(sc->sc_xs_map, M_DEVBUF, sc->sc_xr_ndesc *
1209 	    sizeof(bus_dmamap_t));
1210 	sc->sc_xs_map = NULL;
1211 
1212 	free(sc->sc_xs_bb, M_DEVBUF, sc->sc_xr_ndesc *
1213 	    sizeof(struct xbf_dma_mem));
1214 	sc->sc_xs_bb = NULL;
1215 
1216 	xbf_dma_free(sc, &sc->sc_xr_dma);
1217 	sc->sc_xr = NULL;
1218 }
1219 
1220 void
1221 xbf_stop(struct xbf_softc *sc)
1222 {
1223 	union xbf_ring_desc *xrd;
1224 	struct scsi_xfer *xs;
1225 	bus_dmamap_t map;
1226 	int desc;
1227 
1228 	bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,
1229 	    sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD |
1230 	    BUS_DMASYNC_POSTWRITE);
1231 
1232 	for (desc = 0; desc < sc->sc_xr_ndesc; desc++) {
1233 		xs = sc->sc_xs[desc];
1234 		if (xs == NULL)
1235 			continue;
1236 		xrd = &sc->sc_xr->xr_desc[desc];
1237 		DPRINTF("%s: aborting desc %u(%llu) op %u\n",
1238 		    sc->sc_dev.dv_xname, desc, xrd->xrd_rsp.rsp_id,
1239 		    xrd->xrd_rsp.rsp_op);
1240 		if (sc->sc_xs_bb[desc].dma_size > 0)
1241 			map = sc->sc_xs_bb[desc].dma_map;
1242 		else
1243 			map = sc->sc_xs_map[desc];
1244 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1245 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1246 		bus_dmamap_unload(sc->sc_dmat, map);
1247 		xbf_reclaim_xs(xs, desc);
1248 		xbf_scsi_done(xs, XS_SELTIMEOUT);
1249 		sc->sc_xs[desc] = NULL;
1250 	}
1251 
1252 	xbf_ring_destroy(sc);
1253 }
1254