xref: /openbsd-src/sys/dev/pv/xbf.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: xbf.c,v 1.36 2020/02/12 14:08:56 krw Exp $	*/
2 
3 /*
4  * Copyright (c) 2016, 2017 Mike Belopuhov
5  * Copyright (c) 2009, 2011 Mark Kettenis
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bio.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/atomic.h>
25 #include <sys/device.h>
26 #include <sys/kernel.h>
27 #include <sys/buf.h>
28 #include <sys/malloc.h>
29 #include <sys/task.h>
30 
31 #include <machine/bus.h>
32 
33 #include <dev/pv/xenreg.h>
34 #include <dev/pv/xenvar.h>
35 
36 #include <scsi/scsi_all.h>
37 #include <scsi/cd.h>
38 #include <scsi/scsi_disk.h>
39 #include <scsi/scsiconf.h>
40 
41 /* #define XBF_DEBUG */
42 
43 #ifdef XBF_DEBUG
44 #define DPRINTF(x...)		printf(x)
45 #else
46 #define DPRINTF(x...)
47 #endif
48 
49 #define XBF_OP_READ		0
50 #define XBF_OP_WRITE		1
51 #define XBF_OP_BARRIER		2 /* feature-barrier */
52 #define XBF_OP_FLUSH		3 /* feature-flush-cache */
53 #define XBF_OP_DISCARD		5 /* feature-discard */
54 #define XBF_OP_INDIRECT		6 /* feature-max-indirect-segments */
55 
56 #define XBF_MAX_SGE		11
57 #define XBF_MAX_ISGE		8
58 
59 #define XBF_SEC_SHIFT		9
60 
61 #define XBF_CDROM		1
62 #define XBF_REMOVABLE		2
63 #define XBF_READONLY		4
64 
65 #define XBF_OK			0
66 #define XBF_EIO			-1 /* generic failure */
67 #define XBF_EOPNOTSUPP		-2 /* only for XBF_OP_BARRIER */
68 
69 struct xbf_sge {
70 	uint32_t		 sge_ref;
71 	uint8_t			 sge_first;
72 	uint8_t			 sge_last;
73 	uint16_t		 sge_pad;
74 } __packed;
75 
76 /* Generic I/O request */
77 struct xbf_req {
78 	uint8_t			 req_op;
79 	uint8_t			 req_nsegs;
80 	uint16_t		 req_unit;
81 #ifdef __amd64__
82 	uint32_t		 req_pad;
83 #endif
84 	uint64_t		 req_id;
85 	uint64_t		 req_sector;
86 	struct xbf_sge		 req_sgl[XBF_MAX_SGE];
87 } __packed;
88 
89 /* Indirect I/O request */
90 struct xbf_ireq {
91 	uint8_t			 req_op;
92 	uint8_t			 req_iop;
93 	uint16_t		 req_nsegs;
94 #ifdef __amd64__
95 	uint32_t		 req_pad;
96 #endif
97 	uint64_t		 req_id;
98 	uint64_t		 req_sector;
99 	uint16_t		 req_unit;
100 	uint32_t		 req_gref[XBF_MAX_ISGE];
101 #ifdef __i386__
102 	uint64_t		 req_pad;
103 #endif
104 } __packed;
105 
106 struct xbf_rsp {
107 	uint64_t		 rsp_id;
108 	uint8_t			 rsp_op;
109 	uint8_t			 rsp_pad1;
110 	int16_t			 rsp_status;
111 #ifdef __amd64__
112 	uint32_t		 rsp_pad2;
113 #endif
114 } __packed;
115 
116 union xbf_ring_desc {
117 	struct xbf_req	 	 xrd_req;
118 	struct xbf_ireq		 xrd_ireq;
119 	struct xbf_rsp	 	 xrd_rsp;
120 } __packed;
121 
122 #define XBF_MIN_RING_SIZE	1
123 #define XBF_MAX_RING_SIZE	8
124 #define XBF_MAX_REQS		256 /* must be a power of 2 */
125 
126 struct xbf_ring {
127 	volatile uint32_t	 xr_prod;
128 	volatile uint32_t	 xr_prod_event;
129 	volatile uint32_t	 xr_cons;
130 	volatile uint32_t	 xr_cons_event;
131 	uint32_t		 xr_reserved[12];
132 	union xbf_ring_desc	 xr_desc[0];
133 } __packed;
134 
135 struct xbf_dma_mem {
136 	bus_size_t		 dma_size;
137 	bus_dma_tag_t		 dma_tag;
138 	bus_dmamap_t		 dma_map;
139 	bus_dma_segment_t	*dma_seg;
140 	int			 dma_nsegs; /* total amount */
141 	int			 dma_rsegs; /* used amount */
142 	caddr_t			 dma_vaddr;
143 };
144 
145 struct xbf_ccb {
146 	struct scsi_xfer	*ccb_xfer;  /* associated transfer */
147 	bus_dmamap_t		 ccb_dmap;  /* transfer map */
148 	struct xbf_dma_mem	 ccb_bbuf;  /* bounce buffer */
149 	uint32_t		 ccb_first; /* first descriptor */
150 	uint32_t		 ccb_last;  /* last descriptor */
151 	uint16_t		 ccb_want;  /* expected chunks */
152 	uint16_t		 ccb_seen;  /* completed chunks */
153 	TAILQ_ENTRY(xbf_ccb)	 ccb_link;
154 };
155 TAILQ_HEAD(xbf_ccb_queue, xbf_ccb);
156 
157 struct xbf_softc {
158 	struct device		 sc_dev;
159 	struct device		*sc_parent;
160 	char			 sc_node[XEN_MAX_NODE_LEN];
161 	char			 sc_backend[XEN_MAX_BACKEND_LEN];
162 	bus_dma_tag_t		 sc_dmat;
163 	int			 sc_domid;
164 
165 	xen_intr_handle_t	 sc_xih;
166 
167 	int			 sc_state;
168 #define  XBF_CONNECTED		  4
169 #define  XBF_CLOSING		  5
170 
171 	int			 sc_caps;
172 #define  XBF_CAP_BARRIER	  0x0001
173 #define  XBF_CAP_FLUSH		  0x0002
174 
175 	uint32_t		 sc_type;
176 	uint32_t		 sc_unit;
177 	char			 sc_dtype[16];
178 	char			 sc_prod[16];
179 
180 	uint64_t		 sc_disk_size;
181 	uint32_t		 sc_block_size;
182 
183 	/* Ring */
184 	struct xbf_ring		*sc_xr;
185 	uint32_t		 sc_xr_cons;
186 	uint32_t		 sc_xr_prod;
187 	uint32_t		 sc_xr_size; /* in pages */
188 	struct xbf_dma_mem	 sc_xr_dma;
189 	uint32_t		 sc_xr_ref[XBF_MAX_RING_SIZE];
190 	int			 sc_xr_ndesc;
191 
192 	/* Maximum number of blocks that one descriptor may refer to */
193 	int			 sc_xrd_nblk;
194 
195 	/* CCBs */
196 	int			 sc_nccb;
197 	struct xbf_ccb		*sc_ccbs;
198 	struct xbf_ccb_queue	 sc_ccb_fq; /* free queue */
199 	struct xbf_ccb_queue	 sc_ccb_sq; /* pending requests */
200 	struct mutex		 sc_ccb_fqlck;
201 	struct mutex		 sc_ccb_sqlck;
202 
203 	struct scsi_iopool	 sc_iopool;
204 	struct scsi_link         sc_link;
205 	struct device		*sc_scsibus;
206 };
207 
208 int	xbf_match(struct device *, void *, void *);
209 void	xbf_attach(struct device *, struct device *, void *);
210 int	xbf_detach(struct device *, int);
211 
212 struct cfdriver xbf_cd = {
213 	NULL, "xbf", DV_DULL
214 };
215 
216 const struct cfattach xbf_ca = {
217 	sizeof(struct xbf_softc), xbf_match, xbf_attach, xbf_detach
218 };
219 
220 void	xbf_intr(void *);
221 
222 int	xbf_load_cmd(struct scsi_xfer *);
223 int	xbf_bounce_cmd(struct scsi_xfer *);
224 void	xbf_reclaim_cmd(struct scsi_xfer *);
225 
226 void	xbf_scsi_cmd(struct scsi_xfer *);
227 int	xbf_submit_cmd(struct scsi_xfer *);
228 int	xbf_poll_cmd(struct scsi_xfer *);
229 void	xbf_complete_cmd(struct xbf_softc *, struct xbf_ccb_queue *, int);
230 int	xbf_dev_probe(struct scsi_link *);
231 
232 struct scsi_adapter xbf_switch = {
233 	xbf_scsi_cmd, NULL, xbf_dev_probe, NULL, NULL
234 };
235 
236 void	xbf_scsi_inq(struct scsi_xfer *);
237 void	xbf_scsi_inquiry(struct scsi_xfer *);
238 void	xbf_scsi_capacity(struct scsi_xfer *);
239 void	xbf_scsi_capacity16(struct scsi_xfer *);
240 void	xbf_scsi_done(struct scsi_xfer *, int);
241 
242 int	xbf_dma_alloc(struct xbf_softc *, struct xbf_dma_mem *,
243 	    bus_size_t, int, int);
244 void	xbf_dma_free(struct xbf_softc *, struct xbf_dma_mem *);
245 
246 int	xbf_get_type(struct xbf_softc *);
247 int	xbf_init(struct xbf_softc *);
248 int	xbf_ring_create(struct xbf_softc *);
249 void	xbf_ring_destroy(struct xbf_softc *);
250 void	xbf_stop(struct xbf_softc *);
251 
252 int	xbf_alloc_ccbs(struct xbf_softc *);
253 void	xbf_free_ccbs(struct xbf_softc *);
254 void	*xbf_get_ccb(void *);
255 void	xbf_put_ccb(void *, void *);
256 
257 int
258 xbf_match(struct device *parent, void *match, void *aux)
259 {
260 	struct xen_attach_args *xa = aux;
261 
262 	if (strcmp("vbd", xa->xa_name))
263 		return (0);
264 
265 	return (1);
266 }
267 
268 void
269 xbf_attach(struct device *parent, struct device *self, void *aux)
270 {
271 	struct xen_attach_args *xa = aux;
272 	struct xbf_softc *sc = (struct xbf_softc *)self;
273 	struct scsibus_attach_args saa;
274 
275 	sc->sc_parent = parent;
276 	sc->sc_dmat = xa->xa_dmat;
277 	sc->sc_domid = xa->xa_domid;
278 
279 	memcpy(sc->sc_node, xa->xa_node, XEN_MAX_NODE_LEN);
280 	memcpy(sc->sc_backend, xa->xa_backend, XEN_MAX_BACKEND_LEN);
281 
282 	if (xbf_get_type(sc))
283 		return;
284 
285 	if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xbf_intr, sc,
286 	    sc->sc_dev.dv_xname)) {
287 		printf(": failed to establish an interrupt\n");
288 		return;
289 	}
290 	xen_intr_mask(sc->sc_xih);
291 
292 	printf(" backend %d channel %u: %s\n", sc->sc_domid, sc->sc_xih,
293 	    sc->sc_dtype);
294 
295 	if (xbf_init(sc))
296 		goto error;
297 
298 	if (xen_intr_unmask(sc->sc_xih)) {
299 		printf("%s: failed to enable interrupts\n",
300 		    sc->sc_dev.dv_xname);
301 		goto error;
302 	}
303 
304 	sc->sc_link.adapter = &xbf_switch;
305 	sc->sc_link.adapter_softc = self;
306 	sc->sc_link.adapter_buswidth = 2;
307 	sc->sc_link.luns = 1;
308 	sc->sc_link.adapter_target = 2;
309 	sc->sc_link.openings = sc->sc_nccb;
310 	sc->sc_link.pool = &sc->sc_iopool;
311 
312 	bzero(&saa, sizeof(saa));
313 	saa.saa_sc_link = &sc->sc_link;
314 	sc->sc_scsibus = config_found(self, &saa, scsiprint);
315 
316 	xen_unplug_emulated(parent, XEN_UNPLUG_IDE | XEN_UNPLUG_IDESEC);
317 
318 	return;
319 
320  error:
321 	xen_intr_disestablish(sc->sc_xih);
322 }
323 
324 int
325 xbf_detach(struct device *self, int flags)
326 {
327 	struct xbf_softc *sc = (struct xbf_softc *)self;
328 	int ostate = sc->sc_state;
329 
330 	sc->sc_state = XBF_CLOSING;
331 
332 	xen_intr_mask(sc->sc_xih);
333 	xen_intr_barrier(sc->sc_xih);
334 
335 	if (ostate == XBF_CONNECTED) {
336 		xen_intr_disestablish(sc->sc_xih);
337 		xbf_stop(sc);
338 	}
339 
340 	if (sc->sc_scsibus)
341 		return (config_detach(sc->sc_scsibus, flags | DETACH_FORCE));
342 
343 	return (0);
344 }
345 
346 void
347 xbf_intr(void *xsc)
348 {
349 	struct xbf_softc *sc = xsc;
350 	struct xbf_ring *xr = sc->sc_xr;
351 	struct xbf_dma_mem *dma = &sc->sc_xr_dma;
352 	struct xbf_ccb_queue cq;
353 	struct xbf_ccb *ccb, *nccb;
354 	uint32_t cons;
355 	int desc, s;
356 
357 	TAILQ_INIT(&cq);
358 
359 	for (;;) {
360 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_size,
361 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
362 
363 		for (cons = sc->sc_xr_cons; cons != xr->xr_cons; cons++) {
364 			desc = cons & (sc->sc_xr_ndesc - 1);
365 			xbf_complete_cmd(sc, &cq, desc);
366 		}
367 
368 		sc->sc_xr_cons = cons;
369 
370 		if (TAILQ_EMPTY(&cq))
371 			break;
372 
373 		s = splbio();
374 		KERNEL_LOCK();
375 		TAILQ_FOREACH_SAFE(ccb, &cq, ccb_link, nccb) {
376 			TAILQ_REMOVE(&cq, ccb, ccb_link);
377 			xbf_reclaim_cmd(ccb->ccb_xfer);
378 			scsi_done(ccb->ccb_xfer);
379 		}
380 		KERNEL_UNLOCK();
381 		splx(s);
382 	}
383 }
384 
385 void
386 xbf_scsi_cmd(struct scsi_xfer *xs)
387 {
388 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
389 
390 	switch (xs->cmd->opcode) {
391 	case READ_BIG:
392 	case READ_COMMAND:
393 	case READ_12:
394 	case READ_16:
395 	case WRITE_BIG:
396 	case WRITE_COMMAND:
397 	case WRITE_12:
398 	case WRITE_16:
399 		if (sc->sc_state != XBF_CONNECTED) {
400 			xbf_scsi_done(xs, XS_SELTIMEOUT);
401 			return;
402 		}
403 		break;
404 	case SYNCHRONIZE_CACHE:
405 		if (!(sc->sc_caps & (XBF_CAP_BARRIER|XBF_CAP_FLUSH))) {
406 			xbf_scsi_done(xs, XS_NOERROR);
407 			return;
408 		}
409 		break;
410 	case INQUIRY:
411 		xbf_scsi_inq(xs);
412 		return;
413 	case READ_CAPACITY:
414 		xbf_scsi_capacity(xs);
415 		return;
416 	case READ_CAPACITY_16:
417 		xbf_scsi_capacity16(xs);
418 		return;
419 	case TEST_UNIT_READY:
420 	case START_STOP:
421 	case PREVENT_ALLOW:
422 		xbf_scsi_done(xs, XS_NOERROR);
423 		return;
424 	default:
425 		printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode);
426 	case MODE_SENSE:
427 	case MODE_SENSE_BIG:
428 	case REPORT_LUNS:
429 	case READ_TOC:
430 		xbf_scsi_done(xs, XS_DRIVER_STUFFUP);
431 		return;
432 	}
433 
434 	if (xbf_submit_cmd(xs)) {
435 		xbf_scsi_done(xs, XS_DRIVER_STUFFUP);
436 		return;
437 	}
438 
439 	if (ISSET(xs->flags, SCSI_POLL) && xbf_poll_cmd(xs)) {
440 		printf("%s: op %#x timed out\n", sc->sc_dev.dv_xname,
441 		    xs->cmd->opcode);
442 		if (sc->sc_state == XBF_CONNECTED) {
443 			xbf_reclaim_cmd(xs);
444 			xbf_scsi_done(xs, XS_TIMEOUT);
445 		}
446 		return;
447 	}
448 }
449 
450 int
451 xbf_load_cmd(struct scsi_xfer *xs)
452 {
453 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
454 	struct xbf_ccb *ccb = xs->io;
455 	struct xbf_sge *sge;
456 	union xbf_ring_desc *xrd;
457 	bus_dmamap_t map;
458 	int error, mapflags, nsg, seg;
459 	int desc, ndesc = 0;
460 
461 	map = ccb->ccb_dmap;
462 
463 	mapflags = (sc->sc_domid << 16);
464 	if (ISSET(xs->flags, SCSI_NOSLEEP))
465 		mapflags |= BUS_DMA_NOWAIT;
466 	else
467 		mapflags |= BUS_DMA_WAITOK;
468 	if (ISSET(xs->flags, SCSI_DATA_IN))
469 		mapflags |= BUS_DMA_READ;
470 	else
471 		mapflags |= BUS_DMA_WRITE;
472 
473 	error = bus_dmamap_load(sc->sc_dmat, map, xs->data, xs->datalen,
474 	    NULL, mapflags);
475 	if (error) {
476 		printf("%s: failed to load %d bytes of data\n",
477 		    sc->sc_dev.dv_xname, xs->datalen);
478 		return (error);
479 	}
480 
481 	xrd = &sc->sc_xr->xr_desc[ccb->ccb_first];
482 	/* seg is the segment map iterator, nsg is the s-g list iterator */
483 	for (seg = 0, nsg = 0; seg < map->dm_nsegs; seg++, nsg++) {
484 		if (nsg == XBF_MAX_SGE) {
485 			/* Number of segments so far */
486 			xrd->xrd_req.req_nsegs = nsg;
487 			/* Pick next descriptor */
488 			ndesc++;
489 			desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
490 			xrd = &sc->sc_xr->xr_desc[desc];
491 			nsg = 0;
492 		}
493 		sge = &xrd->xrd_req.req_sgl[nsg];
494 		sge->sge_ref = map->dm_segs[seg].ds_addr;
495 		sge->sge_first = nsg > 0 ? 0 :
496 		    (((vaddr_t)xs->data + ndesc * sc->sc_xrd_nblk *
497 			(1 << XBF_SEC_SHIFT)) & PAGE_MASK) >> XBF_SEC_SHIFT;
498 		sge->sge_last = sge->sge_first +
499 		    (map->dm_segs[seg].ds_len >> XBF_SEC_SHIFT) - 1;
500 
501 		DPRINTF("%s:   seg %d/%d ref %lu len %lu first %u last %u\n",
502 		    sc->sc_dev.dv_xname, nsg + 1, map->dm_nsegs,
503 		    map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len,
504 		    sge->sge_first, sge->sge_last);
505 
506 		KASSERT(sge->sge_last <= 7);
507 	}
508 
509 	xrd->xrd_req.req_nsegs = nsg;
510 
511 	return (0);
512 }
513 
514 int
515 xbf_bounce_cmd(struct scsi_xfer *xs)
516 {
517 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
518 	struct xbf_ccb *ccb = xs->io;
519 	struct xbf_sge *sge;
520 	struct xbf_dma_mem *dma;
521 	union xbf_ring_desc *xrd;
522 	bus_dmamap_t map;
523 	bus_size_t size;
524 	int error, mapflags, nsg, seg;
525 	int desc, ndesc = 0;
526 
527 	size = roundup(xs->datalen, PAGE_SIZE);
528 	if (size > MAXPHYS)
529 		return (EFBIG);
530 
531 	mapflags = (sc->sc_domid << 16);
532 	if (ISSET(xs->flags, SCSI_NOSLEEP))
533 		mapflags |= BUS_DMA_NOWAIT;
534 	else
535 		mapflags |= BUS_DMA_WAITOK;
536 	if (ISSET(xs->flags, SCSI_DATA_IN))
537 		mapflags |= BUS_DMA_READ;
538 	else
539 		mapflags |= BUS_DMA_WRITE;
540 
541 	dma = &ccb->ccb_bbuf;
542 	error = xbf_dma_alloc(sc, dma, size, size / PAGE_SIZE, mapflags);
543 	if (error) {
544 		DPRINTF("%s: failed to allocate a %lu byte bounce buffer\n",
545 		    sc->sc_dev.dv_xname, size);
546 		return (error);
547 	}
548 
549 	map = dma->dma_map;
550 
551 	DPRINTF("%s: bouncing %d bytes via %lu size map with %d segments\n",
552 	    sc->sc_dev.dv_xname, xs->datalen, size, map->dm_nsegs);
553 
554 	if (ISSET(xs->flags, SCSI_DATA_OUT))
555 		memcpy(dma->dma_vaddr, xs->data, xs->datalen);
556 
557 	xrd = &sc->sc_xr->xr_desc[ccb->ccb_first];
558 	/* seg is the map segment iterator, nsg is the s-g element iterator */
559 	for (seg = 0, nsg = 0; seg < map->dm_nsegs; seg++, nsg++) {
560 		if (nsg == XBF_MAX_SGE) {
561 			/* Number of segments so far */
562 			xrd->xrd_req.req_nsegs = nsg;
563 			/* Pick next descriptor */
564 			ndesc++;
565 			desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
566 			xrd = &sc->sc_xr->xr_desc[desc];
567 			nsg = 0;
568 		}
569 		sge = &xrd->xrd_req.req_sgl[nsg];
570 		sge->sge_ref = map->dm_segs[seg].ds_addr;
571 		sge->sge_first = nsg > 0 ? 0 :
572 		    (((vaddr_t)dma->dma_vaddr + ndesc * sc->sc_xrd_nblk *
573 			(1 << XBF_SEC_SHIFT)) & PAGE_MASK) >> XBF_SEC_SHIFT;
574 		sge->sge_last = sge->sge_first +
575 		    (map->dm_segs[seg].ds_len >> XBF_SEC_SHIFT) - 1;
576 
577 		DPRINTF("%s:   seg %d/%d ref %lu len %lu first %u last %u\n",
578 		    sc->sc_dev.dv_xname, nsg + 1, map->dm_nsegs,
579 		    map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len,
580 		    sge->sge_first, sge->sge_last);
581 
582 		KASSERT(sge->sge_last <= 7);
583 	}
584 
585 	xrd->xrd_req.req_nsegs = nsg;
586 
587 	return (0);
588 }
589 
590 void
591 xbf_reclaim_cmd(struct scsi_xfer *xs)
592 {
593 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
594 	struct xbf_ccb *ccb = xs->io;
595 	struct xbf_dma_mem *dma = &ccb->ccb_bbuf;
596 
597 	if (dma->dma_size == 0)
598 		return;
599 
600 	if (ISSET(xs->flags, SCSI_DATA_IN))
601 		memcpy(xs->data, (caddr_t)dma->dma_vaddr, xs->datalen);
602 
603 	xbf_dma_free(sc, &ccb->ccb_bbuf);
604 }
605 
606 int
607 xbf_submit_cmd(struct scsi_xfer *xs)
608 {
609 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
610 	struct xbf_ccb *ccb = xs->io;
611 	union xbf_ring_desc *xrd;
612 	struct scsi_rw *rw;
613 	struct scsi_rw_big *rwb;
614 	struct scsi_rw_12 *rw12;
615 	struct scsi_rw_16 *rw16;
616 	uint64_t lba = 0;
617 	uint32_t nblk = 0;
618 	uint8_t operation = 0;
619 	unsigned int ndesc = 0;
620 	int desc, error;
621 
622 	switch (xs->cmd->opcode) {
623 	case READ_BIG:
624 	case READ_COMMAND:
625 	case READ_12:
626 	case READ_16:
627 		operation = XBF_OP_READ;
628 		break;
629 
630 	case WRITE_BIG:
631 	case WRITE_COMMAND:
632 	case WRITE_12:
633 	case WRITE_16:
634 		operation = XBF_OP_WRITE;
635 		break;
636 
637 	case SYNCHRONIZE_CACHE:
638 		if (sc->sc_caps & XBF_CAP_FLUSH)
639 			operation = XBF_OP_FLUSH;
640 		else if (sc->sc_caps & XBF_CAP_BARRIER)
641 			operation = XBF_OP_BARRIER;
642 		break;
643 	}
644 
645 	/*
646 	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE
647 	 * has the same layout as 10-byte READ/WRITE commands.
648 	 */
649 	if (xs->cmdlen == 6) {
650 		rw = (struct scsi_rw *)xs->cmd;
651 		lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
652 		nblk = rw->length ? rw->length : 0x100;
653 	} else if (xs->cmdlen == 10) {
654 		rwb = (struct scsi_rw_big *)xs->cmd;
655 		lba = _4btol(rwb->addr);
656 		nblk = _2btol(rwb->length);
657 	} else if (xs->cmdlen == 12) {
658 		rw12 = (struct scsi_rw_12 *)xs->cmd;
659 		lba = _4btol(rw12->addr);
660 		nblk = _4btol(rw12->length);
661 	} else if (xs->cmdlen == 16) {
662 		rw16 = (struct scsi_rw_16 *)xs->cmd;
663 		lba = _8btol(rw16->addr);
664 		nblk = _4btol(rw16->length);
665 	}
666 
667 	ccb->ccb_want = ccb->ccb_seen = 0;
668 
669 	do {
670 		desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
671 		if (ndesc == 0)
672 			ccb->ccb_first = desc;
673 
674 		xrd = &sc->sc_xr->xr_desc[desc];
675 		xrd->xrd_req.req_op = operation;
676 		xrd->xrd_req.req_unit = (uint16_t)sc->sc_unit;
677 		xrd->xrd_req.req_sector = lba + ndesc * sc->sc_xrd_nblk;
678 
679 		ccb->ccb_want |= 1 << ndesc;
680 		ndesc++;
681 	} while (ndesc * sc->sc_xrd_nblk < nblk);
682 
683 	ccb->ccb_last = desc;
684 
685 	if (operation == XBF_OP_READ || operation == XBF_OP_WRITE) {
686 		DPRINTF("%s: desc %u,%u %s%s lba %llu nsec %u "
687 		    "len %d\n", sc->sc_dev.dv_xname, ccb->ccb_first,
688 		    ccb->ccb_last, operation == XBF_OP_READ ? "read" :
689 		    "write", ISSET(xs->flags, SCSI_POLL) ? "-poll" : "",
690 		    lba, nblk, xs->datalen);
691 
692 		if (((vaddr_t)xs->data & ((1 << XBF_SEC_SHIFT) - 1)) == 0)
693 			error = xbf_load_cmd(xs);
694 		else
695 			error = xbf_bounce_cmd(xs);
696 		if (error)
697 			return (-1);
698 	} else {
699 		DPRINTF("%s: desc %u %s%s lba %llu\n", sc->sc_dev.dv_xname,
700 		    ccb->ccb_first, operation == XBF_OP_FLUSH ? "flush" :
701 		    "barrier", ISSET(xs->flags, SCSI_POLL) ? "-poll" : "",
702 		    lba);
703 		xrd->xrd_req.req_nsegs = 0;
704 	}
705 
706 	ccb->ccb_xfer = xs;
707 
708 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmap, 0,
709 	    ccb->ccb_dmap->dm_mapsize, BUS_DMASYNC_PREREAD |
710 	    BUS_DMASYNC_PREWRITE);
711 
712 	mtx_enter(&sc->sc_ccb_sqlck);
713 	TAILQ_INSERT_TAIL(&sc->sc_ccb_sq, ccb, ccb_link);
714 	mtx_leave(&sc->sc_ccb_sqlck);
715 
716 	sc->sc_xr_prod += ndesc;
717 	sc->sc_xr->xr_prod = sc->sc_xr_prod;
718 	sc->sc_xr->xr_cons_event = sc->sc_xr_prod;
719 
720 	bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,
721 	    sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD |
722 	    BUS_DMASYNC_PREWRITE);
723 
724 	xen_intr_signal(sc->sc_xih);
725 
726 	return (0);
727 }
728 
729 int
730 xbf_poll_cmd(struct scsi_xfer *xs)
731 {
732 	int timo = 1000;
733 
734 	do {
735 		if (ISSET(xs->flags, ITSDONE))
736 			break;
737 		if (ISSET(xs->flags, SCSI_NOSLEEP))
738 			delay(10);
739 		else
740 			tsleep_nsec(xs, PRIBIO, "xbfpoll", USEC_TO_NSEC(10));
741 		xbf_intr(xs->sc_link->adapter_softc);
742 	} while(--timo > 0);
743 
744 	return (0);
745 }
746 
747 void
748 xbf_complete_cmd(struct xbf_softc *sc, struct xbf_ccb_queue *cq, int desc)
749 {
750 	struct xbf_ccb *ccb;
751 	union xbf_ring_desc *xrd;
752 	bus_dmamap_t map;
753 	uint32_t id, chunk;
754 	int error;
755 
756 	xrd = &sc->sc_xr->xr_desc[desc];
757 	error = xrd->xrd_rsp.rsp_status == XBF_OK ? XS_NOERROR :
758 	    XS_DRIVER_STUFFUP;
759 
760 	mtx_enter(&sc->sc_ccb_sqlck);
761 
762 	/*
763 	 * To find a CCB for id equal to x within an interval [a, b] we must
764 	 * locate a CCB such that (x - a) mod N <= (b - a) mod N, where a is
765 	 * the first descriptor, b is the last one and N is the ring size.
766 	 */
767 	id = (uint32_t)xrd->xrd_rsp.rsp_id;
768 	TAILQ_FOREACH(ccb, &sc->sc_ccb_sq, ccb_link) {
769 		if (((id - ccb->ccb_first) & (sc->sc_xr_ndesc - 1)) <=
770 		    ((ccb->ccb_last - ccb->ccb_first) & (sc->sc_xr_ndesc - 1)))
771 			break;
772 	}
773 	KASSERT(ccb != NULL);
774 
775 	/* Assert that this chunk belongs to this CCB */
776 	chunk = 1 << ((id - ccb->ccb_first) & (sc->sc_xr_ndesc - 1));
777 	KASSERT((ccb->ccb_want & chunk) != 0);
778 	KASSERT((ccb->ccb_seen & chunk) == 0);
779 
780 	/* When all chunks are collected remove the CCB from the queue */
781 	ccb->ccb_seen |= chunk;
782 	if (ccb->ccb_seen == ccb->ccb_want)
783 		TAILQ_REMOVE(&sc->sc_ccb_sq, ccb, ccb_link);
784 
785 	mtx_leave(&sc->sc_ccb_sqlck);
786 
787 	DPRINTF("%s: completing desc %d(%llu) op %u with error %d\n",
788 	    sc->sc_dev.dv_xname, desc, xrd->xrd_rsp.rsp_id,
789 	    xrd->xrd_rsp.rsp_op, xrd->xrd_rsp.rsp_status);
790 
791 	memset(xrd, 0, sizeof(*xrd));
792 	xrd->xrd_req.req_id = desc;
793 
794 	if (ccb->ccb_seen != ccb->ccb_want)
795 		return;
796 
797 	if (ccb->ccb_bbuf.dma_size > 0)
798 		map = ccb->ccb_bbuf.dma_map;
799 	else
800 		map = ccb->ccb_dmap;
801 
802 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
803 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
804 	bus_dmamap_unload(sc->sc_dmat, map);
805 
806 	ccb->ccb_xfer->resid = 0;
807 	ccb->ccb_xfer->error = error;
808 	TAILQ_INSERT_TAIL(cq, ccb, ccb_link);
809 }
810 
811 void
812 xbf_scsi_inq(struct scsi_xfer *xs)
813 {
814 	struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd;
815 
816 	if (ISSET(inq->flags, SI_EVPD))
817 		xbf_scsi_done(xs, XS_DRIVER_STUFFUP);
818 	else
819 		xbf_scsi_inquiry(xs);
820 }
821 
822 void
823 xbf_scsi_inquiry(struct scsi_xfer *xs)
824 {
825 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
826 	struct scsi_inquiry_data inq;
827 	/* char buf[5]; */
828 
829 	bzero(&inq, sizeof(inq));
830 
831 	switch (sc->sc_type) {
832 	case XBF_CDROM:
833 		inq.device = T_CDROM;
834 		break;
835 	default:
836 		inq.device = T_DIRECT;
837 		break;
838 	}
839 
840 	inq.version = 0x05; /* SPC-3 */
841 	inq.response_format = 2;
842 	inq.additional_length = 32;
843 	inq.flags |= SID_CmdQue;
844 	bcopy("Xen     ", inq.vendor, sizeof(inq.vendor));
845 	bcopy(sc->sc_prod, inq.product, sizeof(inq.product));
846 	bcopy("0000", inq.revision, sizeof(inq.revision));
847 
848 	bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
849 
850 	xbf_scsi_done(xs, XS_NOERROR);
851 }
852 
853 void
854 xbf_scsi_capacity(struct scsi_xfer *xs)
855 {
856 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
857 	struct scsi_read_cap_data rcd;
858 	uint64_t capacity;
859 
860 	bzero(&rcd, sizeof(rcd));
861 
862 	capacity = sc->sc_disk_size - 1;
863 	if (capacity > 0xffffffff)
864 		capacity = 0xffffffff;
865 
866 	_lto4b(capacity, rcd.addr);
867 	_lto4b(sc->sc_block_size, rcd.length);
868 
869 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
870 
871 	xbf_scsi_done(xs, XS_NOERROR);
872 }
873 
874 void
875 xbf_scsi_capacity16(struct scsi_xfer *xs)
876 {
877 	struct xbf_softc *sc = xs->sc_link->adapter_softc;
878 	struct scsi_read_cap_data_16 rcd;
879 
880 	bzero(&rcd, sizeof(rcd));
881 
882 	_lto8b(sc->sc_disk_size - 1, rcd.addr);
883 	_lto4b(sc->sc_block_size, rcd.length);
884 
885 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
886 
887 	xbf_scsi_done(xs, XS_NOERROR);
888 }
889 
890 void
891 xbf_scsi_done(struct scsi_xfer *xs, int error)
892 {
893 	int s;
894 
895 	xs->error = error;
896 
897 	s = splbio();
898 	scsi_done(xs);
899 	splx(s);
900 }
901 
902 int
903 xbf_dev_probe(struct scsi_link *link)
904 {
905 	if (link->target == 0)
906 		return (0);
907 
908 	return (ENODEV);
909 }
910 
911 int
912 xbf_get_type(struct xbf_softc *sc)
913 {
914 	unsigned long long res;
915 	const char *prop;
916 	char val[32];
917 	int error;
918 
919 	prop = "type";
920 	if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
921 	    sizeof(val))) != 0)
922 		goto errout;
923 	snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s", val);
924 
925 	prop = "dev";
926 	if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
927 	    sizeof(val))) != 0)
928 		goto errout;
929 	snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s %s", sc->sc_prod, val);
930 
931 	prop = "virtual-device";
932 	if ((error = xs_getnum(sc->sc_parent, sc->sc_node, prop, &res)) != 0)
933 		goto errout;
934 	sc->sc_unit = (uint32_t)res;
935 	snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s %llu", sc->sc_prod, res);
936 
937 	prop = "device-type";
938 	if ((error = xs_getprop(sc->sc_parent, sc->sc_node, prop,
939 	    sc->sc_dtype, sizeof(sc->sc_dtype))) != 0)
940 		goto errout;
941 	if (!strcmp(sc->sc_dtype, "cdrom"))
942 		sc->sc_type = XBF_CDROM;
943 
944 	return (0);
945 
946  errout:
947 	printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
948 	    prop);
949 	return (-1);
950 }
951 
952 int
953 xbf_init(struct xbf_softc *sc)
954 {
955 	unsigned long long res;
956 	const char *action, *prop;
957 	char pbuf[sizeof("ring-refXX")];
958 	unsigned int i;
959 	int error;
960 
961 	prop = "max-ring-page-order";
962 	error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res);
963 	if (error == 0)
964 		sc->sc_xr_size = 1 << res;
965 	if (error == ENOENT) {
966 		prop = "max-ring-pages";
967 		error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res);
968 		if (error == 0)
969 			sc->sc_xr_size = res;
970 	}
971 	/* Fallback to the known minimum */
972 	if (error)
973 		sc->sc_xr_size = XBF_MIN_RING_SIZE;
974 
975 	if (sc->sc_xr_size < XBF_MIN_RING_SIZE)
976 		sc->sc_xr_size = XBF_MIN_RING_SIZE;
977 	if (sc->sc_xr_size > XBF_MAX_RING_SIZE)
978 		sc->sc_xr_size = XBF_MAX_RING_SIZE;
979 	if (!powerof2(sc->sc_xr_size))
980 		sc->sc_xr_size = 1 << (fls(sc->sc_xr_size) - 1);
981 
982 	sc->sc_xr_ndesc = ((sc->sc_xr_size * PAGE_SIZE) -
983 	    sizeof(struct xbf_ring)) / sizeof(union xbf_ring_desc);
984 	if (!powerof2(sc->sc_xr_ndesc))
985 		sc->sc_xr_ndesc = 1 << (fls(sc->sc_xr_ndesc) - 1);
986 	if (sc->sc_xr_ndesc > XBF_MAX_REQS)
987 		sc->sc_xr_ndesc = XBF_MAX_REQS;
988 
989 	DPRINTF("%s: %u ring pages, %d requests\n",
990 	    sc->sc_dev.dv_xname, sc->sc_xr_size, sc->sc_xr_ndesc);
991 
992 	if (xbf_ring_create(sc))
993 		return (-1);
994 
995 	action = "set";
996 
997 	for (i = 0; i < sc->sc_xr_size; i++) {
998 		if (i == 0 && sc->sc_xr_size == 1)
999 			snprintf(pbuf, sizeof(pbuf), "ring-ref");
1000 		else
1001 			snprintf(pbuf, sizeof(pbuf), "ring-ref%d", i);
1002 		prop = pbuf;
1003 		if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
1004 		    sc->sc_xr_ref[i]))
1005 			goto errout;
1006 	}
1007 
1008 	if (sc->sc_xr_size > 1) {
1009 		prop = "num-ring-pages";
1010 		if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
1011 		    sc->sc_xr_size))
1012 			goto errout;
1013 		prop = "ring-page-order";
1014 		if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
1015 		    fls(sc->sc_xr_size) - 1))
1016 			goto errout;
1017 	}
1018 
1019 	prop = "event-channel";
1020 	if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_xih))
1021 		goto errout;
1022 
1023 	prop = "protocol";
1024 #ifdef __amd64__
1025 	if (xs_setprop(sc->sc_parent, sc->sc_node, prop, "x86_64-abi",
1026 	    strlen("x86_64-abi")))
1027 		goto errout;
1028 #else
1029 	if (xs_setprop(sc->sc_parent, sc->sc_node, prop, "x86_32-abi",
1030 	    strlen("x86_32-abi")))
1031 		goto errout;
1032 #endif
1033 
1034 	if (xs_setprop(sc->sc_parent, sc->sc_node, "state",
1035 	    XEN_STATE_INITIALIZED, strlen(XEN_STATE_INITIALIZED))) {
1036 		printf("%s: failed to set state to INITIALIZED\n",
1037 		    sc->sc_dev.dv_xname);
1038 		xbf_ring_destroy(sc);
1039 		return (-1);
1040 	}
1041 
1042 	if (xs_await_transition(sc->sc_parent, sc->sc_backend, "state",
1043 	    XEN_STATE_CONNECTED, 10000)) {
1044 		printf("%s: timed out waiting for backend to connect\n",
1045 		    sc->sc_dev.dv_xname);
1046 		xbf_ring_destroy(sc);
1047 		return (-1);
1048 	}
1049 
1050 	action = "read";
1051 
1052 	prop = "sectors";
1053 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0)
1054 		goto errout;
1055 	sc->sc_disk_size = res;
1056 
1057 	prop = "sector-size";
1058 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0)
1059 		goto errout;
1060 	sc->sc_block_size = res;
1061 
1062 	prop = "feature-barrier";
1063 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1064 	    && error != ENOENT)
1065 		goto errout;
1066 	if (error == 0 && res == 1)
1067 		sc->sc_caps |= XBF_CAP_BARRIER;
1068 
1069 	prop = "feature-flush-cache";
1070 	if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1071 	    && error != ENOENT)
1072 		goto errout;
1073 	if (error == 0 && res == 1)
1074 		sc->sc_caps |= XBF_CAP_FLUSH;
1075 
1076 #ifdef XBF_DEBUG
1077 	if (sc->sc_caps) {
1078 		printf("%s: features:", sc->sc_dev.dv_xname);
1079 		if (sc->sc_caps & XBF_CAP_BARRIER)
1080 			printf(" BARRIER");
1081 		if (sc->sc_caps & XBF_CAP_FLUSH)
1082 			printf(" FLUSH");
1083 		printf("\n");
1084 	}
1085 #endif
1086 
1087 	if (xs_setprop(sc->sc_parent, sc->sc_node, "state",
1088 	    XEN_STATE_CONNECTED, strlen(XEN_STATE_CONNECTED))) {
1089 		printf("%s: failed to set state to CONNECTED\n",
1090 		    sc->sc_dev.dv_xname);
1091 		return (-1);
1092 	}
1093 
1094 	sc->sc_state = XBF_CONNECTED;
1095 
1096 	return (0);
1097 
1098  errout:
1099 	printf("%s: failed to %s \"%s\" property (%d)\n", sc->sc_dev.dv_xname,
1100 	    action, prop, error);
1101 	xbf_ring_destroy(sc);
1102 	return (-1);
1103 }
1104 
1105 int
1106 xbf_dma_alloc(struct xbf_softc *sc, struct xbf_dma_mem *dma,
1107     bus_size_t size, int nsegs, int mapflags)
1108 {
1109 	int error;
1110 
1111 	dma->dma_tag = sc->sc_dmat;
1112 
1113 	dma->dma_seg = mallocarray(nsegs, sizeof(bus_dma_segment_t), M_DEVBUF,
1114 	    M_ZERO | M_NOWAIT);
1115 	if (dma->dma_seg == NULL) {
1116 		printf("%s: failed to allocate a segment array\n",
1117 		    sc->sc_dev.dv_xname);
1118 		return (ENOMEM);
1119 	}
1120 
1121 	error = bus_dmamap_create(dma->dma_tag, size, nsegs, PAGE_SIZE, 0,
1122 	    BUS_DMA_NOWAIT, &dma->dma_map);
1123 	if (error) {
1124 		printf("%s: failed to create a memory map (%d)\n",
1125 		    sc->sc_dev.dv_xname, error);
1126 		goto errout;
1127 	}
1128 
1129 	error = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0,
1130 	    dma->dma_seg, nsegs, &dma->dma_rsegs, BUS_DMA_ZERO |
1131 	    BUS_DMA_NOWAIT);
1132 	if (error) {
1133 		printf("%s: failed to allocate DMA memory (%d)\n",
1134 		    sc->sc_dev.dv_xname, error);
1135 		goto destroy;
1136 	}
1137 
1138 	error = bus_dmamem_map(dma->dma_tag, dma->dma_seg, dma->dma_rsegs,
1139 	    size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
1140 	if (error) {
1141 		printf("%s: failed to map DMA memory (%d)\n",
1142 		    sc->sc_dev.dv_xname, error);
1143 		goto free;
1144 	}
1145 
1146 	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1147 	    size, NULL, mapflags | BUS_DMA_NOWAIT);
1148 	if (error) {
1149 		printf("%s: failed to load DMA memory (%d)\n",
1150 		    sc->sc_dev.dv_xname, error);
1151 		goto unmap;
1152 	}
1153 
1154 	dma->dma_size = size;
1155 	dma->dma_nsegs = nsegs;
1156 	return (0);
1157 
1158  unmap:
1159 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1160  free:
1161 	bus_dmamem_free(dma->dma_tag, dma->dma_seg, dma->dma_rsegs);
1162  destroy:
1163 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1164  errout:
1165 	free(dma->dma_seg, M_DEVBUF, nsegs * sizeof(bus_dma_segment_t));
1166 	dma->dma_map = NULL;
1167 	dma->dma_tag = NULL;
1168 	return (error);
1169 }
1170 
1171 void
1172 xbf_dma_free(struct xbf_softc *sc, struct xbf_dma_mem *dma)
1173 {
1174 	if (dma->dma_tag == NULL || dma->dma_map == NULL)
1175 		return;
1176 	bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_size,
1177 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1178 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1179 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1180 	bus_dmamem_free(dma->dma_tag, dma->dma_seg, dma->dma_rsegs);
1181 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1182 	free(dma->dma_seg, M_DEVBUF, dma->dma_nsegs * sizeof(bus_dma_segment_t));
1183 	dma->dma_seg = NULL;
1184 	dma->dma_map = NULL;
1185 	dma->dma_size = 0;
1186 }
1187 
1188 int
1189 xbf_ring_create(struct xbf_softc *sc)
1190 {
1191 	int i;
1192 
1193 	if (xbf_dma_alloc(sc, &sc->sc_xr_dma, sc->sc_xr_size * PAGE_SIZE,
1194 	    sc->sc_xr_size, sc->sc_domid << 16))
1195 		return (-1);
1196 	for (i = 0; i < sc->sc_xr_dma.dma_map->dm_nsegs; i++)
1197 		sc->sc_xr_ref[i] = sc->sc_xr_dma.dma_map->dm_segs[i].ds_addr;
1198 
1199 	sc->sc_xr = (struct xbf_ring *)sc->sc_xr_dma.dma_vaddr;
1200 
1201 	sc->sc_xr->xr_prod_event = sc->sc_xr->xr_cons_event = 1;
1202 
1203 	for (i = 0; i < sc->sc_xr_ndesc; i++)
1204 		sc->sc_xr->xr_desc[i].xrd_req.req_id = i;
1205 
1206 	/* The number of contiguous blocks addressable by one descriptor */
1207 	sc->sc_xrd_nblk = (PAGE_SIZE * XBF_MAX_SGE) / (1 << XBF_SEC_SHIFT);
1208 
1209 	if (xbf_alloc_ccbs(sc)) {
1210 		xbf_ring_destroy(sc);
1211 		return (-1);
1212 	}
1213 
1214 	return (0);
1215 }
1216 
1217 void
1218 xbf_ring_destroy(struct xbf_softc *sc)
1219 {
1220 	xbf_free_ccbs(sc);
1221 	xbf_dma_free(sc, &sc->sc_xr_dma);
1222 	sc->sc_xr = NULL;
1223 }
1224 
1225 void
1226 xbf_stop(struct xbf_softc *sc)
1227 {
1228 	struct xbf_ccb *ccb, *nccb;
1229 	bus_dmamap_t map;
1230 
1231 	bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,
1232 	    sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD |
1233 	    BUS_DMASYNC_POSTWRITE);
1234 
1235 	TAILQ_FOREACH_SAFE(ccb, &sc->sc_ccb_sq, ccb_link, nccb) {
1236 		TAILQ_REMOVE(&sc->sc_ccb_sq, ccb, ccb_link);
1237 
1238 		if (ccb->ccb_bbuf.dma_size > 0)
1239 			map = ccb->ccb_bbuf.dma_map;
1240 		else
1241 			map = ccb->ccb_dmap;
1242 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1243 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1244 		bus_dmamap_unload(sc->sc_dmat, map);
1245 
1246 		xbf_reclaim_cmd(ccb->ccb_xfer);
1247 		xbf_scsi_done(ccb->ccb_xfer, XS_SELTIMEOUT);
1248 	}
1249 
1250 	xbf_ring_destroy(sc);
1251 }
1252 
1253 int
1254 xbf_alloc_ccbs(struct xbf_softc *sc)
1255 {
1256 	int i, error;
1257 
1258 	TAILQ_INIT(&sc->sc_ccb_fq);
1259 	TAILQ_INIT(&sc->sc_ccb_sq);
1260 	mtx_init(&sc->sc_ccb_fqlck, IPL_BIO);
1261 	mtx_init(&sc->sc_ccb_sqlck, IPL_BIO);
1262 
1263 	sc->sc_nccb = sc->sc_xr_ndesc / 2;
1264 
1265 	sc->sc_ccbs = mallocarray(sc->sc_nccb, sizeof(struct xbf_ccb),
1266 	    M_DEVBUF, M_ZERO | M_NOWAIT);
1267 	if (sc->sc_ccbs == NULL) {
1268 		printf("%s: failed to allocate CCBs\n", sc->sc_dev.dv_xname);
1269 		return (-1);
1270 	}
1271 
1272 	for (i = 0; i < sc->sc_nccb; i++) {
1273 		/*
1274 		 * Each CCB is set up to use up to 2 descriptors and
1275 		 * each descriptor can transfer XBF_MAX_SGE number of
1276 		 * pages.
1277 		 */
1278 		error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, 2 *
1279 		    XBF_MAX_SGE, PAGE_SIZE, PAGE_SIZE, BUS_DMA_NOWAIT,
1280 		    &sc->sc_ccbs[i].ccb_dmap);
1281 		if (error) {
1282 			printf("%s: failed to create a memory map for "
1283 			    "the xfer %d (%d)\n", sc->sc_dev.dv_xname, i,
1284 			    error);
1285 			goto errout;
1286 		}
1287 
1288 		xbf_put_ccb(sc, &sc->sc_ccbs[i]);
1289 	}
1290 
1291 	scsi_iopool_init(&sc->sc_iopool, sc, xbf_get_ccb, xbf_put_ccb);
1292 
1293 	return (0);
1294 
1295  errout:
1296 	xbf_free_ccbs(sc);
1297 	return (-1);
1298 }
1299 
1300 void
1301 xbf_free_ccbs(struct xbf_softc *sc)
1302 {
1303 	struct xbf_ccb *ccb;
1304 	int i;
1305 
1306 	for (i = 0; i < sc->sc_nccb; i++) {
1307 		ccb = &sc->sc_ccbs[i];
1308 		if (ccb->ccb_dmap == NULL)
1309 			continue;
1310 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmap, 0, 0,
1311 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1312 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmap);
1313 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmap);
1314 	}
1315 
1316 	free(sc->sc_ccbs, M_DEVBUF, sc->sc_nccb * sizeof(struct xbf_ccb));
1317 	sc->sc_ccbs = NULL;
1318 	sc->sc_nccb = 0;
1319 }
1320 
1321 void *
1322 xbf_get_ccb(void *xsc)
1323 {
1324 	struct xbf_softc *sc = xsc;
1325 	struct xbf_ccb *ccb;
1326 
1327 	if (sc->sc_state != XBF_CONNECTED &&
1328 	    sc->sc_state != XBF_CLOSING)
1329 		return (NULL);
1330 
1331 	mtx_enter(&sc->sc_ccb_fqlck);
1332 	ccb = TAILQ_FIRST(&sc->sc_ccb_fq);
1333 	if (ccb != NULL)
1334 		TAILQ_REMOVE(&sc->sc_ccb_fq, ccb, ccb_link);
1335 	mtx_leave(&sc->sc_ccb_fqlck);
1336 
1337 	return (ccb);
1338 }
1339 
1340 void
1341 xbf_put_ccb(void *xsc, void *io)
1342 {
1343 	struct xbf_softc *sc = xsc;
1344 	struct xbf_ccb *ccb = io;
1345 
1346 	ccb->ccb_xfer = NULL;
1347 
1348 	mtx_enter(&sc->sc_ccb_fqlck);
1349 	TAILQ_INSERT_HEAD(&sc->sc_ccb_fq, ccb, ccb_link);
1350 	mtx_leave(&sc->sc_ccb_fqlck);
1351 }
1352