1 /* $OpenBSD: xbf.c,v 1.54 2024/05/24 10:05:55 jsg Exp $ */
2
3 /*
4 * Copyright (c) 2016, 2017 Mike Belopuhov
5 * Copyright (c) 2009, 2011 Mark Kettenis
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/atomic.h>
23 #include <sys/device.h>
24 #include <sys/malloc.h>
25 #include <sys/task.h>
26
27 #include <machine/bus.h>
28
29 #include <dev/pv/xenreg.h>
30 #include <dev/pv/xenvar.h>
31
32 #include <scsi/scsi_all.h>
33 #include <scsi/cd.h>
34 #include <scsi/scsi_disk.h>
35 #include <scsi/scsiconf.h>
36
37 /* #define XBF_DEBUG */
38
39 #ifdef XBF_DEBUG
40 #define DPRINTF(x...) printf(x)
41 #else
42 #define DPRINTF(x...)
43 #endif
44
45 #define XBF_OP_READ 0
46 #define XBF_OP_WRITE 1
47 #define XBF_OP_BARRIER 2 /* feature-barrier */
48 #define XBF_OP_FLUSH 3 /* feature-flush-cache */
49 #define XBF_OP_DISCARD 5 /* feature-discard */
50 #define XBF_OP_INDIRECT 6 /* feature-max-indirect-segments */
51
52 #define XBF_MAX_SGE 11
53 #define XBF_MAX_ISGE 8
54
55 #define XBF_SEC_SHIFT 9
56 #define XBF_SEC_SIZE (1 << XBF_SEC_SHIFT)
57
58 #define XBF_CDROM 1
59 #define XBF_REMOVABLE 2
60 #define XBF_READONLY 4
61
62 #define XBF_OK 0
63 #define XBF_EIO -1 /* generic failure */
64 #define XBF_EOPNOTSUPP -2 /* only for XBF_OP_BARRIER */
65
66 struct xbf_sge {
67 uint32_t sge_ref;
68 uint8_t sge_first;
69 uint8_t sge_last;
70 uint16_t sge_pad;
71 } __packed;
72
73 /* Generic I/O request */
74 struct xbf_req {
75 uint8_t req_op;
76 uint8_t req_nsegs;
77 uint16_t req_unit;
78 #ifdef __amd64__
79 uint32_t req_pad;
80 #endif
81 uint64_t req_id;
82 uint64_t req_sector;
83 struct xbf_sge req_sgl[XBF_MAX_SGE];
84 } __packed;
85
86 /* Indirect I/O request */
87 struct xbf_ireq {
88 uint8_t req_op;
89 uint8_t req_iop;
90 uint16_t req_nsegs;
91 #ifdef __amd64__
92 uint32_t req_pad;
93 #endif
94 uint64_t req_id;
95 uint64_t req_sector;
96 uint16_t req_unit;
97 uint32_t req_gref[XBF_MAX_ISGE];
98 #ifdef __i386__
99 uint64_t req_pad;
100 #endif
101 } __packed;
102
103 struct xbf_rsp {
104 uint64_t rsp_id;
105 uint8_t rsp_op;
106 uint8_t rsp_pad1;
107 int16_t rsp_status;
108 #ifdef __amd64__
109 uint32_t rsp_pad2;
110 #endif
111 } __packed;
112
113 union xbf_ring_desc {
114 struct xbf_req xrd_req;
115 struct xbf_ireq xrd_ireq;
116 struct xbf_rsp xrd_rsp;
117 } __packed;
118
119 #define XBF_MIN_RING_SIZE 1
120 #define XBF_MAX_RING_SIZE 8
121 #define XBF_MAX_REQS 256 /* must be a power of 2 */
122
123 struct xbf_ring {
124 volatile uint32_t xr_prod;
125 volatile uint32_t xr_prod_event;
126 volatile uint32_t xr_cons;
127 volatile uint32_t xr_cons_event;
128 uint32_t xr_reserved[12];
129 union xbf_ring_desc xr_desc[0];
130 } __packed;
131
132 struct xbf_dma_mem {
133 bus_size_t dma_size;
134 bus_dma_tag_t dma_tag;
135 bus_dmamap_t dma_map;
136 bus_dma_segment_t *dma_seg;
137 int dma_nsegs; /* total amount */
138 int dma_rsegs; /* used amount */
139 caddr_t dma_vaddr;
140 };
141
142 struct xbf_ccb {
143 struct scsi_xfer *ccb_xfer; /* associated transfer */
144 bus_dmamap_t ccb_dmap; /* transfer map */
145 struct xbf_dma_mem ccb_bbuf; /* bounce buffer */
146 uint32_t ccb_first; /* first descriptor */
147 uint32_t ccb_last; /* last descriptor */
148 uint16_t ccb_want; /* expected chunks */
149 uint16_t ccb_seen; /* completed chunks */
150 TAILQ_ENTRY(xbf_ccb) ccb_link;
151 };
152 TAILQ_HEAD(xbf_ccb_queue, xbf_ccb);
153
154 struct xbf_softc {
155 struct device sc_dev;
156 struct device *sc_parent;
157 char sc_node[XEN_MAX_NODE_LEN];
158 char sc_backend[XEN_MAX_BACKEND_LEN];
159 bus_dma_tag_t sc_dmat;
160 int sc_domid;
161
162 xen_intr_handle_t sc_xih;
163
164 int sc_state;
165 #define XBF_CONNECTED 4
166 #define XBF_CLOSING 5
167
168 int sc_caps;
169 #define XBF_CAP_BARRIER 0x0001
170 #define XBF_CAP_FLUSH 0x0002
171
172 uint32_t sc_type;
173 uint32_t sc_unit;
174 char sc_dtype[16];
175 char sc_prod[16];
176
177 uint64_t sc_disk_size;
178 uint32_t sc_block_size;
179
180 /* Ring */
181 struct xbf_ring *sc_xr;
182 uint32_t sc_xr_cons;
183 uint32_t sc_xr_prod;
184 uint32_t sc_xr_size; /* in pages */
185 struct xbf_dma_mem sc_xr_dma;
186 uint32_t sc_xr_ref[XBF_MAX_RING_SIZE];
187 int sc_xr_ndesc;
188
189 /* Maximum number of blocks that one descriptor may refer to */
190 int sc_xrd_nblk;
191
192 /* CCBs */
193 int sc_nccb;
194 struct xbf_ccb *sc_ccbs;
195 struct xbf_ccb_queue sc_ccb_fq; /* free queue */
196 struct xbf_ccb_queue sc_ccb_sq; /* pending requests */
197 struct mutex sc_ccb_fqlck;
198 struct mutex sc_ccb_sqlck;
199
200 struct scsi_iopool sc_iopool;
201 struct device *sc_scsibus;
202 };
203
204 int xbf_match(struct device *, void *, void *);
205 void xbf_attach(struct device *, struct device *, void *);
206 int xbf_detach(struct device *, int);
207
208 struct cfdriver xbf_cd = {
209 NULL, "xbf", DV_DULL
210 };
211
212 const struct cfattach xbf_ca = {
213 sizeof(struct xbf_softc), xbf_match, xbf_attach, xbf_detach
214 };
215
216 void xbf_intr(void *);
217
218 int xbf_load_cmd(struct scsi_xfer *);
219 int xbf_bounce_cmd(struct scsi_xfer *);
220 void xbf_reclaim_cmd(struct scsi_xfer *);
221
222 void xbf_scsi_cmd(struct scsi_xfer *);
223 int xbf_submit_cmd(struct scsi_xfer *);
224 int xbf_poll_cmd(struct scsi_xfer *);
225 void xbf_complete_cmd(struct xbf_softc *, struct xbf_ccb_queue *, int);
226
227 const struct scsi_adapter xbf_switch = {
228 xbf_scsi_cmd, NULL, NULL, NULL, NULL
229 };
230
231 void xbf_scsi_inq(struct scsi_xfer *);
232 void xbf_scsi_inquiry(struct scsi_xfer *);
233 void xbf_scsi_capacity(struct scsi_xfer *);
234 void xbf_scsi_capacity16(struct scsi_xfer *);
235 void xbf_scsi_done(struct scsi_xfer *, int);
236
237 int xbf_dma_alloc(struct xbf_softc *, struct xbf_dma_mem *,
238 bus_size_t, int, int);
239 void xbf_dma_free(struct xbf_softc *, struct xbf_dma_mem *);
240
241 int xbf_get_type(struct xbf_softc *);
242 int xbf_init(struct xbf_softc *);
243 int xbf_ring_create(struct xbf_softc *);
244 void xbf_ring_destroy(struct xbf_softc *);
245 void xbf_stop(struct xbf_softc *);
246
247 int xbf_alloc_ccbs(struct xbf_softc *);
248 void xbf_free_ccbs(struct xbf_softc *);
249 void *xbf_get_ccb(void *);
250 void xbf_put_ccb(void *, void *);
251
252 int
xbf_match(struct device * parent,void * match,void * aux)253 xbf_match(struct device *parent, void *match, void *aux)
254 {
255 struct xen_attach_args *xa = aux;
256
257 if (strcmp("vbd", xa->xa_name))
258 return (0);
259
260 return (1);
261 }
262
263 void
xbf_attach(struct device * parent,struct device * self,void * aux)264 xbf_attach(struct device *parent, struct device *self, void *aux)
265 {
266 struct xen_attach_args *xa = aux;
267 struct xbf_softc *sc = (struct xbf_softc *)self;
268 struct scsibus_attach_args saa;
269
270 sc->sc_parent = parent;
271 sc->sc_dmat = xa->xa_dmat;
272 sc->sc_domid = xa->xa_domid;
273
274 memcpy(sc->sc_node, xa->xa_node, XEN_MAX_NODE_LEN);
275 memcpy(sc->sc_backend, xa->xa_backend, XEN_MAX_BACKEND_LEN);
276
277 if (xbf_get_type(sc))
278 return;
279
280 if (xen_intr_establish(0, &sc->sc_xih, sc->sc_domid, xbf_intr, sc,
281 sc->sc_dev.dv_xname)) {
282 printf(": failed to establish an interrupt\n");
283 return;
284 }
285 xen_intr_mask(sc->sc_xih);
286
287 printf(" backend %d channel %u: %s\n", sc->sc_domid, sc->sc_xih,
288 sc->sc_dtype);
289
290 if (xbf_init(sc))
291 goto error;
292
293 if (xen_intr_unmask(sc->sc_xih)) {
294 printf("%s: failed to enable interrupts\n",
295 sc->sc_dev.dv_xname);
296 goto error;
297 }
298
299 saa.saa_adapter = &xbf_switch;
300 saa.saa_adapter_softc = self;
301 saa.saa_adapter_buswidth = 1;
302 saa.saa_luns = 1;
303 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
304 saa.saa_openings = sc->sc_nccb;
305 saa.saa_pool = &sc->sc_iopool;
306 saa.saa_quirks = saa.saa_flags = 0;
307 saa.saa_wwpn = saa.saa_wwnn = 0;
308
309 sc->sc_scsibus = config_found(self, &saa, scsiprint);
310
311 xen_unplug_emulated(parent, XEN_UNPLUG_IDE | XEN_UNPLUG_IDESEC);
312
313 return;
314
315 error:
316 xen_intr_disestablish(sc->sc_xih);
317 }
318
319 int
xbf_detach(struct device * self,int flags)320 xbf_detach(struct device *self, int flags)
321 {
322 struct xbf_softc *sc = (struct xbf_softc *)self;
323 int ostate = sc->sc_state;
324
325 sc->sc_state = XBF_CLOSING;
326
327 xen_intr_mask(sc->sc_xih);
328 xen_intr_barrier(sc->sc_xih);
329
330 if (ostate == XBF_CONNECTED) {
331 xen_intr_disestablish(sc->sc_xih);
332 xbf_stop(sc);
333 }
334
335 if (sc->sc_scsibus)
336 return (config_detach(sc->sc_scsibus, flags | DETACH_FORCE));
337
338 return (0);
339 }
340
341 void
xbf_intr(void * xsc)342 xbf_intr(void *xsc)
343 {
344 struct xbf_softc *sc = xsc;
345 struct xbf_ring *xr = sc->sc_xr;
346 struct xbf_dma_mem *dma = &sc->sc_xr_dma;
347 struct xbf_ccb_queue cq;
348 struct xbf_ccb *ccb, *nccb;
349 uint32_t cons;
350 int desc, s;
351
352 TAILQ_INIT(&cq);
353
354 for (;;) {
355 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_size,
356 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
357
358 for (cons = sc->sc_xr_cons; cons != xr->xr_cons; cons++) {
359 desc = cons & (sc->sc_xr_ndesc - 1);
360 xbf_complete_cmd(sc, &cq, desc);
361 }
362
363 sc->sc_xr_cons = cons;
364
365 if (TAILQ_EMPTY(&cq))
366 break;
367
368 s = splbio();
369 KERNEL_LOCK();
370 TAILQ_FOREACH_SAFE(ccb, &cq, ccb_link, nccb) {
371 TAILQ_REMOVE(&cq, ccb, ccb_link);
372 xbf_reclaim_cmd(ccb->ccb_xfer);
373 scsi_done(ccb->ccb_xfer);
374 }
375 KERNEL_UNLOCK();
376 splx(s);
377 }
378 }
379
380 void
xbf_scsi_cmd(struct scsi_xfer * xs)381 xbf_scsi_cmd(struct scsi_xfer *xs)
382 {
383 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
384
385 switch (xs->cmd.opcode) {
386 case READ_COMMAND:
387 case READ_10:
388 case READ_12:
389 case READ_16:
390 case WRITE_COMMAND:
391 case WRITE_10:
392 case WRITE_12:
393 case WRITE_16:
394 if (sc->sc_state != XBF_CONNECTED) {
395 xbf_scsi_done(xs, XS_SELTIMEOUT);
396 return;
397 }
398 break;
399 case SYNCHRONIZE_CACHE:
400 if (!(sc->sc_caps & (XBF_CAP_BARRIER|XBF_CAP_FLUSH))) {
401 xbf_scsi_done(xs, XS_NOERROR);
402 return;
403 }
404 break;
405 case INQUIRY:
406 xbf_scsi_inq(xs);
407 return;
408 case READ_CAPACITY:
409 xbf_scsi_capacity(xs);
410 return;
411 case READ_CAPACITY_16:
412 xbf_scsi_capacity16(xs);
413 return;
414 case TEST_UNIT_READY:
415 case START_STOP:
416 case PREVENT_ALLOW:
417 xbf_scsi_done(xs, XS_NOERROR);
418 return;
419 default:
420 printf("%s cmd 0x%02x\n", __func__, xs->cmd.opcode);
421 case MODE_SENSE:
422 case MODE_SENSE_BIG:
423 case REPORT_LUNS:
424 case READ_TOC:
425 xbf_scsi_done(xs, XS_DRIVER_STUFFUP);
426 return;
427 }
428
429 if (xbf_submit_cmd(xs)) {
430 xbf_scsi_done(xs, XS_DRIVER_STUFFUP);
431 return;
432 }
433
434 if (ISSET(xs->flags, SCSI_POLL) && xbf_poll_cmd(xs)) {
435 printf("%s: op %#x timed out\n", sc->sc_dev.dv_xname,
436 xs->cmd.opcode);
437 if (sc->sc_state == XBF_CONNECTED) {
438 xbf_reclaim_cmd(xs);
439 xbf_scsi_done(xs, XS_TIMEOUT);
440 }
441 return;
442 }
443 }
444
445 int
xbf_load_cmd(struct scsi_xfer * xs)446 xbf_load_cmd(struct scsi_xfer *xs)
447 {
448 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
449 struct xbf_ccb *ccb = xs->io;
450 struct xbf_sge *sge;
451 union xbf_ring_desc *xrd;
452 bus_dmamap_t map;
453 int error, mapflags, nsg, seg;
454 int desc, ndesc = 0;
455
456 map = ccb->ccb_dmap;
457
458 mapflags = (sc->sc_domid << 16);
459 if (ISSET(xs->flags, SCSI_NOSLEEP))
460 mapflags |= BUS_DMA_NOWAIT;
461 else
462 mapflags |= BUS_DMA_WAITOK;
463 if (ISSET(xs->flags, SCSI_DATA_IN))
464 mapflags |= BUS_DMA_READ;
465 else
466 mapflags |= BUS_DMA_WRITE;
467
468 error = bus_dmamap_load(sc->sc_dmat, map, xs->data, xs->datalen,
469 NULL, mapflags);
470 if (error) {
471 printf("%s: failed to load %d bytes of data\n",
472 sc->sc_dev.dv_xname, xs->datalen);
473 return (error);
474 }
475
476 xrd = &sc->sc_xr->xr_desc[ccb->ccb_first];
477 /* seg is the segment map iterator, nsg is the s-g list iterator */
478 for (seg = 0, nsg = 0; seg < map->dm_nsegs; seg++, nsg++) {
479 if (nsg == XBF_MAX_SGE) {
480 /* Number of segments so far */
481 xrd->xrd_req.req_nsegs = nsg;
482 /* Pick next descriptor */
483 ndesc++;
484 desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
485 xrd = &sc->sc_xr->xr_desc[desc];
486 nsg = 0;
487 }
488 sge = &xrd->xrd_req.req_sgl[nsg];
489 sge->sge_ref = map->dm_segs[seg].ds_addr;
490 sge->sge_first = nsg > 0 ? 0 :
491 (((vaddr_t)xs->data + ndesc * sc->sc_xrd_nblk *
492 (1 << XBF_SEC_SHIFT)) & PAGE_MASK) >> XBF_SEC_SHIFT;
493 sge->sge_last = sge->sge_first +
494 (map->dm_segs[seg].ds_len >> XBF_SEC_SHIFT) - 1;
495
496 DPRINTF("%s: seg %d/%d ref %lu len %lu first %u last %u\n",
497 sc->sc_dev.dv_xname, nsg + 1, map->dm_nsegs,
498 map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len,
499 sge->sge_first, sge->sge_last);
500
501 KASSERT(sge->sge_last <= 7);
502 }
503
504 xrd->xrd_req.req_nsegs = nsg;
505
506 return (0);
507 }
508
509 int
xbf_bounce_cmd(struct scsi_xfer * xs)510 xbf_bounce_cmd(struct scsi_xfer *xs)
511 {
512 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
513 struct xbf_ccb *ccb = xs->io;
514 struct xbf_sge *sge;
515 struct xbf_dma_mem *dma;
516 union xbf_ring_desc *xrd;
517 bus_dmamap_t map;
518 bus_size_t size;
519 int error, mapflags, nsg, seg;
520 int desc, ndesc = 0;
521
522 size = roundup(xs->datalen, PAGE_SIZE);
523 if (size > MAXPHYS)
524 return (EFBIG);
525
526 mapflags = (sc->sc_domid << 16);
527 if (ISSET(xs->flags, SCSI_NOSLEEP))
528 mapflags |= BUS_DMA_NOWAIT;
529 else
530 mapflags |= BUS_DMA_WAITOK;
531 if (ISSET(xs->flags, SCSI_DATA_IN))
532 mapflags |= BUS_DMA_READ;
533 else
534 mapflags |= BUS_DMA_WRITE;
535
536 dma = &ccb->ccb_bbuf;
537 error = xbf_dma_alloc(sc, dma, size, size / PAGE_SIZE, mapflags);
538 if (error) {
539 DPRINTF("%s: failed to allocate a %lu byte bounce buffer\n",
540 sc->sc_dev.dv_xname, size);
541 return (error);
542 }
543
544 map = dma->dma_map;
545
546 DPRINTF("%s: bouncing %d bytes via %lu size map with %d segments\n",
547 sc->sc_dev.dv_xname, xs->datalen, size, map->dm_nsegs);
548
549 if (ISSET(xs->flags, SCSI_DATA_OUT))
550 memcpy(dma->dma_vaddr, xs->data, xs->datalen);
551
552 xrd = &sc->sc_xr->xr_desc[ccb->ccb_first];
553 /* seg is the map segment iterator, nsg is the s-g element iterator */
554 for (seg = 0, nsg = 0; seg < map->dm_nsegs; seg++, nsg++) {
555 if (nsg == XBF_MAX_SGE) {
556 /* Number of segments so far */
557 xrd->xrd_req.req_nsegs = nsg;
558 /* Pick next descriptor */
559 ndesc++;
560 desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
561 xrd = &sc->sc_xr->xr_desc[desc];
562 nsg = 0;
563 }
564 sge = &xrd->xrd_req.req_sgl[nsg];
565 sge->sge_ref = map->dm_segs[seg].ds_addr;
566 sge->sge_first = nsg > 0 ? 0 :
567 (((vaddr_t)dma->dma_vaddr + ndesc * sc->sc_xrd_nblk *
568 (1 << XBF_SEC_SHIFT)) & PAGE_MASK) >> XBF_SEC_SHIFT;
569 sge->sge_last = sge->sge_first +
570 (map->dm_segs[seg].ds_len >> XBF_SEC_SHIFT) - 1;
571
572 DPRINTF("%s: seg %d/%d ref %lu len %lu first %u last %u\n",
573 sc->sc_dev.dv_xname, nsg + 1, map->dm_nsegs,
574 map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len,
575 sge->sge_first, sge->sge_last);
576
577 KASSERT(sge->sge_last <= 7);
578 }
579
580 xrd->xrd_req.req_nsegs = nsg;
581
582 return (0);
583 }
584
585 void
xbf_reclaim_cmd(struct scsi_xfer * xs)586 xbf_reclaim_cmd(struct scsi_xfer *xs)
587 {
588 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
589 struct xbf_ccb *ccb = xs->io;
590 struct xbf_dma_mem *dma = &ccb->ccb_bbuf;
591
592 if (dma->dma_size == 0)
593 return;
594
595 if (ISSET(xs->flags, SCSI_DATA_IN))
596 memcpy(xs->data, (caddr_t)dma->dma_vaddr, xs->datalen);
597
598 xbf_dma_free(sc, &ccb->ccb_bbuf);
599 }
600
601 int
xbf_submit_cmd(struct scsi_xfer * xs)602 xbf_submit_cmd(struct scsi_xfer *xs)
603 {
604 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
605 struct xbf_ccb *ccb = xs->io;
606 union xbf_ring_desc *xrd;
607 struct scsi_rw *rw;
608 struct scsi_rw_10 *rw10;
609 struct scsi_rw_12 *rw12;
610 struct scsi_rw_16 *rw16;
611 uint64_t lba = 0;
612 uint32_t nblk = 0;
613 uint8_t operation = 0;
614 unsigned int ndesc = 0;
615 int desc, error;
616
617 switch (xs->cmd.opcode) {
618 case READ_COMMAND:
619 case READ_10:
620 case READ_12:
621 case READ_16:
622 operation = XBF_OP_READ;
623 break;
624
625 case WRITE_COMMAND:
626 case WRITE_10:
627 case WRITE_12:
628 case WRITE_16:
629 operation = XBF_OP_WRITE;
630 break;
631
632 case SYNCHRONIZE_CACHE:
633 if (sc->sc_caps & XBF_CAP_FLUSH)
634 operation = XBF_OP_FLUSH;
635 else if (sc->sc_caps & XBF_CAP_BARRIER)
636 operation = XBF_OP_BARRIER;
637 break;
638 }
639
640 /*
641 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE
642 * has the same layout as 10-byte READ/WRITE commands.
643 */
644 if (xs->cmdlen == 6) {
645 rw = (struct scsi_rw *)&xs->cmd;
646 lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
647 nblk = rw->length ? rw->length : 0x100;
648 } else if (xs->cmdlen == 10) {
649 rw10 = (struct scsi_rw_10 *)&xs->cmd;
650 lba = _4btol(rw10->addr);
651 nblk = _2btol(rw10->length);
652 } else if (xs->cmdlen == 12) {
653 rw12 = (struct scsi_rw_12 *)&xs->cmd;
654 lba = _4btol(rw12->addr);
655 nblk = _4btol(rw12->length);
656 } else if (xs->cmdlen == 16) {
657 rw16 = (struct scsi_rw_16 *)&xs->cmd;
658 lba = _8btol(rw16->addr);
659 nblk = _4btol(rw16->length);
660 }
661
662 /* SCSI lba/nblk are sc_block_size. ccb's need XBF_SEC_SIZE. */
663 lba *= sc->sc_block_size / XBF_SEC_SIZE;
664 nblk *= sc->sc_block_size / XBF_SEC_SIZE;
665
666 ccb->ccb_want = ccb->ccb_seen = 0;
667
668 do {
669 desc = (sc->sc_xr_prod + ndesc) & (sc->sc_xr_ndesc - 1);
670 if (ndesc == 0)
671 ccb->ccb_first = desc;
672
673 xrd = &sc->sc_xr->xr_desc[desc];
674 xrd->xrd_req.req_op = operation;
675 xrd->xrd_req.req_unit = (uint16_t)sc->sc_unit;
676 xrd->xrd_req.req_sector = lba + ndesc * sc->sc_xrd_nblk;
677
678 ccb->ccb_want |= 1 << ndesc;
679 ndesc++;
680 } while (ndesc * sc->sc_xrd_nblk < nblk);
681
682 ccb->ccb_last = desc;
683
684 if (operation == XBF_OP_READ || operation == XBF_OP_WRITE) {
685 DPRINTF("%s: desc %u,%u %s%s lba %llu nsec %u "
686 "len %d\n", sc->sc_dev.dv_xname, ccb->ccb_first,
687 ccb->ccb_last, operation == XBF_OP_READ ? "read" :
688 "write", ISSET(xs->flags, SCSI_POLL) ? "-poll" : "",
689 lba, nblk, xs->datalen);
690
691 if (((vaddr_t)xs->data & ((1 << XBF_SEC_SHIFT) - 1)) == 0)
692 error = xbf_load_cmd(xs);
693 else
694 error = xbf_bounce_cmd(xs);
695 if (error)
696 return (-1);
697 } else {
698 DPRINTF("%s: desc %u %s%s lba %llu\n", sc->sc_dev.dv_xname,
699 ccb->ccb_first, operation == XBF_OP_FLUSH ? "flush" :
700 "barrier", ISSET(xs->flags, SCSI_POLL) ? "-poll" : "",
701 lba);
702 xrd->xrd_req.req_nsegs = 0;
703 }
704
705 ccb->ccb_xfer = xs;
706
707 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmap, 0,
708 ccb->ccb_dmap->dm_mapsize, BUS_DMASYNC_PREREAD |
709 BUS_DMASYNC_PREWRITE);
710
711 mtx_enter(&sc->sc_ccb_sqlck);
712 TAILQ_INSERT_TAIL(&sc->sc_ccb_sq, ccb, ccb_link);
713 mtx_leave(&sc->sc_ccb_sqlck);
714
715 sc->sc_xr_prod += ndesc;
716 sc->sc_xr->xr_prod = sc->sc_xr_prod;
717 sc->sc_xr->xr_cons_event = sc->sc_xr_prod;
718
719 bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,
720 sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD |
721 BUS_DMASYNC_PREWRITE);
722
723 xen_intr_signal(sc->sc_xih);
724
725 return (0);
726 }
727
728 int
xbf_poll_cmd(struct scsi_xfer * xs)729 xbf_poll_cmd(struct scsi_xfer *xs)
730 {
731 int timo = 1000;
732
733 do {
734 if (ISSET(xs->flags, ITSDONE))
735 break;
736 if (ISSET(xs->flags, SCSI_NOSLEEP))
737 delay(10);
738 else
739 tsleep_nsec(xs, PRIBIO, "xbfpoll", USEC_TO_NSEC(10));
740 xbf_intr(xs->sc_link->bus->sb_adapter_softc);
741 } while(--timo > 0);
742
743 return (0);
744 }
745
746 void
xbf_complete_cmd(struct xbf_softc * sc,struct xbf_ccb_queue * cq,int desc)747 xbf_complete_cmd(struct xbf_softc *sc, struct xbf_ccb_queue *cq, int desc)
748 {
749 struct xbf_ccb *ccb;
750 union xbf_ring_desc *xrd;
751 bus_dmamap_t map;
752 uint32_t id, chunk;
753 int error;
754
755 xrd = &sc->sc_xr->xr_desc[desc];
756 error = xrd->xrd_rsp.rsp_status == XBF_OK ? XS_NOERROR :
757 XS_DRIVER_STUFFUP;
758
759 mtx_enter(&sc->sc_ccb_sqlck);
760
761 /*
762 * To find a CCB for id equal to x within an interval [a, b] we must
763 * locate a CCB such that (x - a) mod N <= (b - a) mod N, where a is
764 * the first descriptor, b is the last one and N is the ring size.
765 */
766 id = (uint32_t)xrd->xrd_rsp.rsp_id;
767 TAILQ_FOREACH(ccb, &sc->sc_ccb_sq, ccb_link) {
768 if (((id - ccb->ccb_first) & (sc->sc_xr_ndesc - 1)) <=
769 ((ccb->ccb_last - ccb->ccb_first) & (sc->sc_xr_ndesc - 1)))
770 break;
771 }
772 KASSERT(ccb != NULL);
773
774 /* Assert that this chunk belongs to this CCB */
775 chunk = 1 << ((id - ccb->ccb_first) & (sc->sc_xr_ndesc - 1));
776 KASSERT((ccb->ccb_want & chunk) != 0);
777 KASSERT((ccb->ccb_seen & chunk) == 0);
778
779 /* When all chunks are collected remove the CCB from the queue */
780 ccb->ccb_seen |= chunk;
781 if (ccb->ccb_seen == ccb->ccb_want)
782 TAILQ_REMOVE(&sc->sc_ccb_sq, ccb, ccb_link);
783
784 mtx_leave(&sc->sc_ccb_sqlck);
785
786 DPRINTF("%s: completing desc %d(%llu) op %u with error %d\n",
787 sc->sc_dev.dv_xname, desc, xrd->xrd_rsp.rsp_id,
788 xrd->xrd_rsp.rsp_op, xrd->xrd_rsp.rsp_status);
789
790 memset(xrd, 0, sizeof(*xrd));
791 xrd->xrd_req.req_id = desc;
792
793 if (ccb->ccb_seen != ccb->ccb_want)
794 return;
795
796 if (ccb->ccb_bbuf.dma_size > 0)
797 map = ccb->ccb_bbuf.dma_map;
798 else
799 map = ccb->ccb_dmap;
800
801 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
802 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
803 bus_dmamap_unload(sc->sc_dmat, map);
804
805 ccb->ccb_xfer->resid = 0;
806 ccb->ccb_xfer->error = error;
807 TAILQ_INSERT_TAIL(cq, ccb, ccb_link);
808 }
809
810 void
xbf_scsi_inq(struct scsi_xfer * xs)811 xbf_scsi_inq(struct scsi_xfer *xs)
812 {
813 struct scsi_inquiry *inq = (struct scsi_inquiry *)&xs->cmd;
814
815 if (ISSET(inq->flags, SI_EVPD))
816 xbf_scsi_done(xs, XS_DRIVER_STUFFUP);
817 else
818 xbf_scsi_inquiry(xs);
819 }
820
821 void
xbf_scsi_inquiry(struct scsi_xfer * xs)822 xbf_scsi_inquiry(struct scsi_xfer *xs)
823 {
824 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
825 struct scsi_inquiry_data inq;
826
827 bzero(&inq, sizeof(inq));
828
829 switch (sc->sc_type) {
830 case XBF_CDROM:
831 inq.device = T_CDROM;
832 break;
833 default:
834 inq.device = T_DIRECT;
835 break;
836 }
837
838 inq.version = SCSI_REV_SPC3;
839 inq.response_format = SID_SCSI2_RESPONSE;
840 inq.additional_length = SID_SCSI2_ALEN;
841 inq.flags |= SID_CmdQue;
842 bcopy("Xen ", inq.vendor, sizeof(inq.vendor));
843 bcopy(sc->sc_prod, inq.product, sizeof(inq.product));
844 bcopy("0000", inq.revision, sizeof(inq.revision));
845
846 scsi_copy_internal_data(xs, &inq, sizeof(inq));
847
848 xbf_scsi_done(xs, XS_NOERROR);
849 }
850
851 void
xbf_scsi_capacity(struct scsi_xfer * xs)852 xbf_scsi_capacity(struct scsi_xfer *xs)
853 {
854 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
855 struct scsi_read_cap_data rcd;
856 uint64_t capacity;
857
858 bzero(&rcd, sizeof(rcd));
859
860 /* [addr|length] are sc_block_size. sc->sc_disk_size is XBF_SEC_SIZE. */
861 capacity = (sc->sc_disk_size * XBF_SEC_SIZE) / sc->sc_block_size - 1;
862 if (capacity > 0xffffffff)
863 capacity = 0xffffffff;
864
865 _lto4b(capacity, rcd.addr);
866 _lto4b(sc->sc_block_size, rcd.length);
867
868 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
869
870 xbf_scsi_done(xs, XS_NOERROR);
871 }
872
873 void
xbf_scsi_capacity16(struct scsi_xfer * xs)874 xbf_scsi_capacity16(struct scsi_xfer *xs)
875 {
876 struct xbf_softc *sc = xs->sc_link->bus->sb_adapter_softc;
877 struct scsi_read_cap_data_16 rcd;
878 uint64_t capacity;
879
880 bzero(&rcd, sizeof(rcd));
881
882 /* [addr|length] are sc_block_size. sc->sc_disk_size is XBF_SEC_SIZE. */
883 capacity = (sc->sc_disk_size * XBF_SEC_SIZE) / sc->sc_block_size - 1;
884 _lto8b(capacity, rcd.addr);
885 _lto4b(sc->sc_block_size, rcd.length);
886
887 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
888
889 xbf_scsi_done(xs, XS_NOERROR);
890 }
891
892 void
xbf_scsi_done(struct scsi_xfer * xs,int error)893 xbf_scsi_done(struct scsi_xfer *xs, int error)
894 {
895 int s;
896
897 xs->error = error;
898
899 s = splbio();
900 scsi_done(xs);
901 splx(s);
902 }
903
904 int
xbf_get_type(struct xbf_softc * sc)905 xbf_get_type(struct xbf_softc *sc)
906 {
907 unsigned long long res;
908 const char *prop;
909 char val[32];
910 int error;
911
912 prop = "type";
913 if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
914 sizeof(val))) != 0)
915 goto errout;
916 snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s", val);
917
918 prop = "dev";
919 if ((error = xs_getprop(sc->sc_parent, sc->sc_backend, prop, val,
920 sizeof(val))) != 0)
921 goto errout;
922 snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s %s", sc->sc_prod, val);
923
924 prop = "virtual-device";
925 if ((error = xs_getnum(sc->sc_parent, sc->sc_node, prop, &res)) != 0)
926 goto errout;
927 sc->sc_unit = (uint32_t)res;
928 snprintf(sc->sc_prod, sizeof(sc->sc_prod), "%s %llu", sc->sc_prod, res);
929
930 prop = "device-type";
931 if ((error = xs_getprop(sc->sc_parent, sc->sc_node, prop,
932 sc->sc_dtype, sizeof(sc->sc_dtype))) != 0)
933 goto errout;
934 if (!strcmp(sc->sc_dtype, "cdrom"))
935 sc->sc_type = XBF_CDROM;
936
937 return (0);
938
939 errout:
940 printf("%s: failed to read \"%s\" property\n", sc->sc_dev.dv_xname,
941 prop);
942 return (-1);
943 }
944
945 int
xbf_init(struct xbf_softc * sc)946 xbf_init(struct xbf_softc *sc)
947 {
948 unsigned long long res;
949 const char *action, *prop;
950 char pbuf[sizeof("ring-refXX")];
951 unsigned int i;
952 int error;
953
954 prop = "max-ring-page-order";
955 error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res);
956 if (error == 0)
957 sc->sc_xr_size = 1 << res;
958 if (error == ENOENT) {
959 prop = "max-ring-pages";
960 error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res);
961 if (error == 0)
962 sc->sc_xr_size = res;
963 }
964 /* Fallback to the known minimum */
965 if (error)
966 sc->sc_xr_size = XBF_MIN_RING_SIZE;
967
968 if (sc->sc_xr_size < XBF_MIN_RING_SIZE)
969 sc->sc_xr_size = XBF_MIN_RING_SIZE;
970 if (sc->sc_xr_size > XBF_MAX_RING_SIZE)
971 sc->sc_xr_size = XBF_MAX_RING_SIZE;
972 if (!powerof2(sc->sc_xr_size))
973 sc->sc_xr_size = 1 << (fls(sc->sc_xr_size) - 1);
974
975 sc->sc_xr_ndesc = ((sc->sc_xr_size * PAGE_SIZE) -
976 sizeof(struct xbf_ring)) / sizeof(union xbf_ring_desc);
977 if (!powerof2(sc->sc_xr_ndesc))
978 sc->sc_xr_ndesc = 1 << (fls(sc->sc_xr_ndesc) - 1);
979 if (sc->sc_xr_ndesc > XBF_MAX_REQS)
980 sc->sc_xr_ndesc = XBF_MAX_REQS;
981
982 DPRINTF("%s: %u ring pages, %d requests\n",
983 sc->sc_dev.dv_xname, sc->sc_xr_size, sc->sc_xr_ndesc);
984
985 if (xbf_ring_create(sc))
986 return (-1);
987
988 action = "set";
989
990 for (i = 0; i < sc->sc_xr_size; i++) {
991 if (i == 0 && sc->sc_xr_size == 1)
992 snprintf(pbuf, sizeof(pbuf), "ring-ref");
993 else
994 snprintf(pbuf, sizeof(pbuf), "ring-ref%d", i);
995 prop = pbuf;
996 if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
997 sc->sc_xr_ref[i]))
998 goto errout;
999 }
1000
1001 if (sc->sc_xr_size > 1) {
1002 prop = "num-ring-pages";
1003 if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
1004 sc->sc_xr_size))
1005 goto errout;
1006 prop = "ring-page-order";
1007 if (xs_setnum(sc->sc_parent, sc->sc_node, prop,
1008 fls(sc->sc_xr_size) - 1))
1009 goto errout;
1010 }
1011
1012 prop = "event-channel";
1013 if (xs_setnum(sc->sc_parent, sc->sc_node, prop, sc->sc_xih))
1014 goto errout;
1015
1016 prop = "protocol";
1017 #ifdef __amd64__
1018 if (xs_setprop(sc->sc_parent, sc->sc_node, prop, "x86_64-abi",
1019 strlen("x86_64-abi")))
1020 goto errout;
1021 #else
1022 if (xs_setprop(sc->sc_parent, sc->sc_node, prop, "x86_32-abi",
1023 strlen("x86_32-abi")))
1024 goto errout;
1025 #endif
1026
1027 if (xs_setprop(sc->sc_parent, sc->sc_node, "state",
1028 XEN_STATE_INITIALIZED, strlen(XEN_STATE_INITIALIZED))) {
1029 printf("%s: failed to set state to INITIALIZED\n",
1030 sc->sc_dev.dv_xname);
1031 xbf_ring_destroy(sc);
1032 return (-1);
1033 }
1034
1035 if (xs_await_transition(sc->sc_parent, sc->sc_backend, "state",
1036 XEN_STATE_CONNECTED, 10000)) {
1037 printf("%s: timed out waiting for backend to connect\n",
1038 sc->sc_dev.dv_xname);
1039 xbf_ring_destroy(sc);
1040 return (-1);
1041 }
1042
1043 action = "read";
1044
1045 prop = "sectors";
1046 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0)
1047 goto errout;
1048 sc->sc_disk_size = res;
1049
1050 prop = "sector-size";
1051 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0)
1052 goto errout;
1053 sc->sc_block_size = res;
1054
1055 prop = "feature-barrier";
1056 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1057 && error != ENOENT)
1058 goto errout;
1059 if (error == 0 && res == 1)
1060 sc->sc_caps |= XBF_CAP_BARRIER;
1061
1062 prop = "feature-flush-cache";
1063 if ((error = xs_getnum(sc->sc_parent, sc->sc_backend, prop, &res)) != 0
1064 && error != ENOENT)
1065 goto errout;
1066 if (error == 0 && res == 1)
1067 sc->sc_caps |= XBF_CAP_FLUSH;
1068
1069 #ifdef XBF_DEBUG
1070 if (sc->sc_caps) {
1071 printf("%s: features:", sc->sc_dev.dv_xname);
1072 if (sc->sc_caps & XBF_CAP_BARRIER)
1073 printf(" BARRIER");
1074 if (sc->sc_caps & XBF_CAP_FLUSH)
1075 printf(" FLUSH");
1076 printf("\n");
1077 }
1078 #endif
1079
1080 if (xs_setprop(sc->sc_parent, sc->sc_node, "state",
1081 XEN_STATE_CONNECTED, strlen(XEN_STATE_CONNECTED))) {
1082 printf("%s: failed to set state to CONNECTED\n",
1083 sc->sc_dev.dv_xname);
1084 return (-1);
1085 }
1086
1087 sc->sc_state = XBF_CONNECTED;
1088
1089 return (0);
1090
1091 errout:
1092 printf("%s: failed to %s \"%s\" property (%d)\n", sc->sc_dev.dv_xname,
1093 action, prop, error);
1094 xbf_ring_destroy(sc);
1095 return (-1);
1096 }
1097
1098 int
xbf_dma_alloc(struct xbf_softc * sc,struct xbf_dma_mem * dma,bus_size_t size,int nsegs,int mapflags)1099 xbf_dma_alloc(struct xbf_softc *sc, struct xbf_dma_mem *dma,
1100 bus_size_t size, int nsegs, int mapflags)
1101 {
1102 int error;
1103
1104 dma->dma_tag = sc->sc_dmat;
1105
1106 dma->dma_seg = mallocarray(nsegs, sizeof(bus_dma_segment_t), M_DEVBUF,
1107 M_ZERO | M_NOWAIT);
1108 if (dma->dma_seg == NULL) {
1109 printf("%s: failed to allocate a segment array\n",
1110 sc->sc_dev.dv_xname);
1111 return (ENOMEM);
1112 }
1113
1114 error = bus_dmamap_create(dma->dma_tag, size, nsegs, PAGE_SIZE, 0,
1115 BUS_DMA_NOWAIT, &dma->dma_map);
1116 if (error) {
1117 printf("%s: failed to create a memory map (%d)\n",
1118 sc->sc_dev.dv_xname, error);
1119 goto errout;
1120 }
1121
1122 error = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0,
1123 dma->dma_seg, nsegs, &dma->dma_rsegs, BUS_DMA_ZERO |
1124 BUS_DMA_NOWAIT);
1125 if (error) {
1126 printf("%s: failed to allocate DMA memory (%d)\n",
1127 sc->sc_dev.dv_xname, error);
1128 goto destroy;
1129 }
1130
1131 error = bus_dmamem_map(dma->dma_tag, dma->dma_seg, dma->dma_rsegs,
1132 size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
1133 if (error) {
1134 printf("%s: failed to map DMA memory (%d)\n",
1135 sc->sc_dev.dv_xname, error);
1136 goto free;
1137 }
1138
1139 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1140 size, NULL, mapflags | BUS_DMA_NOWAIT);
1141 if (error) {
1142 printf("%s: failed to load DMA memory (%d)\n",
1143 sc->sc_dev.dv_xname, error);
1144 goto unmap;
1145 }
1146
1147 dma->dma_size = size;
1148 dma->dma_nsegs = nsegs;
1149 return (0);
1150
1151 unmap:
1152 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1153 free:
1154 bus_dmamem_free(dma->dma_tag, dma->dma_seg, dma->dma_rsegs);
1155 destroy:
1156 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1157 errout:
1158 free(dma->dma_seg, M_DEVBUF, nsegs * sizeof(bus_dma_segment_t));
1159 dma->dma_map = NULL;
1160 dma->dma_tag = NULL;
1161 return (error);
1162 }
1163
1164 void
xbf_dma_free(struct xbf_softc * sc,struct xbf_dma_mem * dma)1165 xbf_dma_free(struct xbf_softc *sc, struct xbf_dma_mem *dma)
1166 {
1167 if (dma->dma_tag == NULL || dma->dma_map == NULL)
1168 return;
1169 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, dma->dma_size,
1170 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1171 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1172 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1173 bus_dmamem_free(dma->dma_tag, dma->dma_seg, dma->dma_rsegs);
1174 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1175 free(dma->dma_seg, M_DEVBUF, dma->dma_nsegs * sizeof(bus_dma_segment_t));
1176 dma->dma_seg = NULL;
1177 dma->dma_map = NULL;
1178 dma->dma_size = 0;
1179 }
1180
1181 int
xbf_ring_create(struct xbf_softc * sc)1182 xbf_ring_create(struct xbf_softc *sc)
1183 {
1184 int i;
1185
1186 if (xbf_dma_alloc(sc, &sc->sc_xr_dma, sc->sc_xr_size * PAGE_SIZE,
1187 sc->sc_xr_size, sc->sc_domid << 16))
1188 return (-1);
1189 for (i = 0; i < sc->sc_xr_dma.dma_map->dm_nsegs; i++)
1190 sc->sc_xr_ref[i] = sc->sc_xr_dma.dma_map->dm_segs[i].ds_addr;
1191
1192 sc->sc_xr = (struct xbf_ring *)sc->sc_xr_dma.dma_vaddr;
1193
1194 sc->sc_xr->xr_prod_event = sc->sc_xr->xr_cons_event = 1;
1195
1196 for (i = 0; i < sc->sc_xr_ndesc; i++)
1197 sc->sc_xr->xr_desc[i].xrd_req.req_id = i;
1198
1199 /* The number of contiguous blocks addressable by one descriptor */
1200 sc->sc_xrd_nblk = (PAGE_SIZE * XBF_MAX_SGE) / (1 << XBF_SEC_SHIFT);
1201
1202 if (xbf_alloc_ccbs(sc)) {
1203 xbf_ring_destroy(sc);
1204 return (-1);
1205 }
1206
1207 return (0);
1208 }
1209
1210 void
xbf_ring_destroy(struct xbf_softc * sc)1211 xbf_ring_destroy(struct xbf_softc *sc)
1212 {
1213 xbf_free_ccbs(sc);
1214 xbf_dma_free(sc, &sc->sc_xr_dma);
1215 sc->sc_xr = NULL;
1216 }
1217
1218 void
xbf_stop(struct xbf_softc * sc)1219 xbf_stop(struct xbf_softc *sc)
1220 {
1221 struct xbf_ccb *ccb, *nccb;
1222 bus_dmamap_t map;
1223
1224 bus_dmamap_sync(sc->sc_dmat, sc->sc_xr_dma.dma_map, 0,
1225 sc->sc_xr_dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD |
1226 BUS_DMASYNC_POSTWRITE);
1227
1228 TAILQ_FOREACH_SAFE(ccb, &sc->sc_ccb_sq, ccb_link, nccb) {
1229 TAILQ_REMOVE(&sc->sc_ccb_sq, ccb, ccb_link);
1230
1231 if (ccb->ccb_bbuf.dma_size > 0)
1232 map = ccb->ccb_bbuf.dma_map;
1233 else
1234 map = ccb->ccb_dmap;
1235 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1236 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1237 bus_dmamap_unload(sc->sc_dmat, map);
1238
1239 xbf_reclaim_cmd(ccb->ccb_xfer);
1240 xbf_scsi_done(ccb->ccb_xfer, XS_SELTIMEOUT);
1241 }
1242
1243 xbf_ring_destroy(sc);
1244 }
1245
1246 int
xbf_alloc_ccbs(struct xbf_softc * sc)1247 xbf_alloc_ccbs(struct xbf_softc *sc)
1248 {
1249 int i, error;
1250
1251 TAILQ_INIT(&sc->sc_ccb_fq);
1252 TAILQ_INIT(&sc->sc_ccb_sq);
1253 mtx_init(&sc->sc_ccb_fqlck, IPL_BIO);
1254 mtx_init(&sc->sc_ccb_sqlck, IPL_BIO);
1255
1256 sc->sc_nccb = sc->sc_xr_ndesc / 2;
1257
1258 sc->sc_ccbs = mallocarray(sc->sc_nccb, sizeof(struct xbf_ccb),
1259 M_DEVBUF, M_ZERO | M_NOWAIT);
1260 if (sc->sc_ccbs == NULL) {
1261 printf("%s: failed to allocate CCBs\n", sc->sc_dev.dv_xname);
1262 return (-1);
1263 }
1264
1265 for (i = 0; i < sc->sc_nccb; i++) {
1266 /*
1267 * Each CCB is set up to use up to 2 descriptors and
1268 * each descriptor can transfer XBF_MAX_SGE number of
1269 * pages.
1270 */
1271 error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, 2 *
1272 XBF_MAX_SGE, PAGE_SIZE, PAGE_SIZE, BUS_DMA_NOWAIT,
1273 &sc->sc_ccbs[i].ccb_dmap);
1274 if (error) {
1275 printf("%s: failed to create a memory map for "
1276 "the xfer %d (%d)\n", sc->sc_dev.dv_xname, i,
1277 error);
1278 goto errout;
1279 }
1280
1281 xbf_put_ccb(sc, &sc->sc_ccbs[i]);
1282 }
1283
1284 scsi_iopool_init(&sc->sc_iopool, sc, xbf_get_ccb, xbf_put_ccb);
1285
1286 return (0);
1287
1288 errout:
1289 xbf_free_ccbs(sc);
1290 return (-1);
1291 }
1292
1293 void
xbf_free_ccbs(struct xbf_softc * sc)1294 xbf_free_ccbs(struct xbf_softc *sc)
1295 {
1296 struct xbf_ccb *ccb;
1297 int i;
1298
1299 for (i = 0; i < sc->sc_nccb; i++) {
1300 ccb = &sc->sc_ccbs[i];
1301 if (ccb->ccb_dmap == NULL)
1302 continue;
1303 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmap, 0, 0,
1304 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1305 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmap);
1306 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmap);
1307 }
1308
1309 free(sc->sc_ccbs, M_DEVBUF, sc->sc_nccb * sizeof(struct xbf_ccb));
1310 sc->sc_ccbs = NULL;
1311 sc->sc_nccb = 0;
1312 }
1313
1314 void *
xbf_get_ccb(void * xsc)1315 xbf_get_ccb(void *xsc)
1316 {
1317 struct xbf_softc *sc = xsc;
1318 struct xbf_ccb *ccb;
1319
1320 if (sc->sc_state != XBF_CONNECTED &&
1321 sc->sc_state != XBF_CLOSING)
1322 return (NULL);
1323
1324 mtx_enter(&sc->sc_ccb_fqlck);
1325 ccb = TAILQ_FIRST(&sc->sc_ccb_fq);
1326 if (ccb != NULL)
1327 TAILQ_REMOVE(&sc->sc_ccb_fq, ccb, ccb_link);
1328 mtx_leave(&sc->sc_ccb_fqlck);
1329
1330 return (ccb);
1331 }
1332
1333 void
xbf_put_ccb(void * xsc,void * io)1334 xbf_put_ccb(void *xsc, void *io)
1335 {
1336 struct xbf_softc *sc = xsc;
1337 struct xbf_ccb *ccb = io;
1338
1339 ccb->ccb_xfer = NULL;
1340
1341 mtx_enter(&sc->sc_ccb_fqlck);
1342 TAILQ_INSERT_HEAD(&sc->sc_ccb_fq, ccb, ccb_link);
1343 mtx_leave(&sc->sc_ccb_fqlck);
1344 }
1345