xref: /netbsd-src/sys/dev/ic/ciss.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: ciss.c,v 1.50 2020/07/16 14:39:33 jdolecek Exp $	*/
2 /*	$OpenBSD: ciss.c,v 1.68 2013/05/30 16:15:02 deraadt Exp $	*/
3 
4 /*
5  * Copyright (c) 2005,2006 Michael Shalayeff
6  * All rights reserved.
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
17  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
18  * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: ciss.c,v 1.50 2020/07/16 14:39:33 jdolecek Exp $");
23 
24 #include "bio.h"
25 
26 /* #define CISS_DEBUG */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/buf.h>
31 #include <sys/ioctl.h>
32 #include <sys/device.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/proc.h>
36 
37 #include <sys/bus.h>
38 
39 #include <dev/scsipi/scsi_all.h>
40 #include <dev/scsipi/scsi_disk.h>
41 #include <dev/scsipi/scsiconf.h>
42 #include <dev/scsipi/scsipi_all.h>
43 
44 #include <dev/ic/cissreg.h>
45 #include <dev/ic/cissvar.h>
46 
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50 
51 #ifdef CISS_DEBUG
52 #define	CISS_DPRINTF(m,a)	if (ciss_debug & (m)) printf a
53 #define	CISS_D_CMD	0x0001
54 #define	CISS_D_INTR	0x0002
55 #define	CISS_D_MISC	0x0004
56 #define	CISS_D_DMA	0x0008
57 #define	CISS_D_IOCTL	0x0010
58 #define	CISS_D_ERR	0x0020
59 int ciss_debug = 0
60 	| CISS_D_CMD
61 	| CISS_D_INTR
62 	| CISS_D_MISC
63 	| CISS_D_DMA
64 	| CISS_D_IOCTL
65 	| CISS_D_ERR
66 	;
67 #else
68 #define	CISS_DPRINTF(m,a)	/* m, a */
69 #endif
70 
71 static void	ciss_scsi_cmd(struct scsipi_channel *chan,
72 			scsipi_adapter_req_t req, void *arg);
73 static int	ciss_scsi_ioctl(struct scsipi_channel *chan, u_long cmd,
74 	    void *addr, int flag, struct proc *p);
75 static void	cissminphys(struct buf *bp);
76 
77 static int	ciss_sync(struct ciss_softc *sc);
78 static void	ciss_heartbeat(void *v);
79 static void	ciss_shutdown(void *v);
80 
81 static struct ciss_ccb *ciss_get_ccb(struct ciss_softc *);
82 static void	ciss_put_ccb(struct ciss_softc *, struct ciss_ccb *);
83 static int	ciss_cmd(struct ciss_softc *, struct ciss_ccb *, int, int);
84 static int	ciss_done(struct ciss_softc *, struct ciss_ccb *);
85 static int	ciss_error(struct ciss_softc *, struct ciss_ccb *);
86 struct ciss_ld *ciss_pdscan(struct ciss_softc *sc, int ld);
87 static int	ciss_inq(struct ciss_softc *sc, struct ciss_inquiry *inq);
88 int	ciss_ldid(struct ciss_softc *, int, struct ciss_ldid *);
89 int	ciss_ldstat(struct ciss_softc *, int, struct ciss_ldstat *);
90 static int	ciss_ldmap(struct ciss_softc *sc);
91 int	ciss_pdid(struct ciss_softc *, u_int8_t, struct ciss_pdid *, int);
92 
93 #if NBIO > 0
94 int		ciss_ioctl(device_t, u_long, void *);
95 int		ciss_ioctl_vol(struct ciss_softc *, struct bioc_vol *);
96 int		ciss_blink(struct ciss_softc *, int, int, int, struct ciss_blink *);
97 int		ciss_create_sensors(struct ciss_softc *);
98 void		ciss_sensor_refresh(struct sysmon_envsys *, envsys_data_t *);
99 #endif /* NBIO > 0 */
100 
101 static struct ciss_ccb *
102 ciss_get_ccb(struct ciss_softc *sc)
103 {
104 	struct ciss_ccb *ccb;
105 
106 	mutex_enter(&sc->sc_mutex);
107 	if ((ccb = TAILQ_LAST(&sc->sc_free_ccb, ciss_queue_head))) {
108 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link);
109 		ccb->ccb_state = CISS_CCB_READY;
110 	}
111 	mutex_exit(&sc->sc_mutex);
112 	return ccb;
113 }
114 
115 static void
116 ciss_put_ccb(struct ciss_softc *sc, struct ciss_ccb *ccb)
117 {
118 	ccb->ccb_state = CISS_CCB_FREE;
119 	mutex_enter(&sc->sc_mutex);
120 	TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
121 	mutex_exit(&sc->sc_mutex);
122 }
123 
124 static int
125 ciss_init_perf(struct ciss_softc *sc)
126 {
127 	struct ciss_perf_config *pc = &sc->perfcfg;
128 	int error, total, rseg;
129 
130 	if (sc->cfg.max_perfomant_mode_cmds)
131 		sc->maxcmd = sc->cfg.max_perfomant_mode_cmds;
132 
133 	bus_space_read_region_4(sc->sc_iot, sc->cfg_ioh,
134 	    sc->cfgoff + sc->cfg.troff,
135 	    (u_int32_t *)pc, sizeof(*pc) / 4);
136 
137 	total = sizeof(uint64_t) * sc->maxcmd;
138 
139 	if ((error = bus_dmamem_alloc(sc->sc_dmat, total, PAGE_SIZE, 0,
140 	    sc->replyseg, 1, &rseg, BUS_DMA_WAITOK))) {
141 		aprint_error(": cannot allocate perf area (%d)\n", error);
142 		return -1;
143 	}
144 
145 	if ((error = bus_dmamem_map(sc->sc_dmat, sc->replyseg, rseg, total,
146 	    (void **)&sc->perf_reply, BUS_DMA_WAITOK))) {
147 		aprint_error(": cannot map perf area (%d)\n", error);
148 		bus_dmamem_free(sc->sc_dmat, sc->replyseg, 1);
149 		return -1;
150 	}
151 
152 	if ((error = bus_dmamap_create(sc->sc_dmat, total, 1,
153 	    total, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &sc->replymap))) {
154 		aprint_error(": cannot create perf dmamap (%d)\n", error);
155 		bus_dmamem_unmap(sc->sc_dmat, sc->perf_reply, total);
156 		sc->perf_reply = NULL;
157 		bus_dmamem_free(sc->sc_dmat, sc->replyseg, 1);
158 		return -1;
159 	}
160 
161 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->replymap, sc->perf_reply,
162 	    total, NULL, BUS_DMA_WAITOK))) {
163 		aprint_error(": cannot load perf dmamap (%d)\n", error);
164 		bus_dmamap_destroy(sc->sc_dmat, sc->replymap);
165 		bus_dmamem_unmap(sc->sc_dmat, sc->perf_reply, total);
166 		sc->perf_reply = NULL;
167 		bus_dmamem_free(sc->sc_dmat, sc->replyseg, 1);
168 		return -1;
169 	}
170 
171 	memset(sc->perf_reply, 0, total);
172 
173 	sc->perf_cycle = 0x1;
174 	sc->perf_rqidx = 0;
175 
176 	/*
177 	* Preload the fetch table with common command sizes.  This allows the
178 	* hardware to not waste bus cycles for typical i/o commands, but also
179 	* not tax the driver to be too exact in choosing sizes.  The table
180 	* is optimized for page-aligned i/o's, but since most i/o comes
181 	* from the various pagers, it's a reasonable assumption to make.
182 	*/
183 #define CISS_FETCH_COUNT(x)	\
184     (sizeof(struct ciss_cmd) + sizeof(struct ciss_sg_entry) * (x - 1) + 15) / 16
185 
186 	pc->fetch_count[CISS_SG_FETCH_NONE] = CISS_FETCH_COUNT(0);
187 	pc->fetch_count[CISS_SG_FETCH_1] = CISS_FETCH_COUNT(1);
188 	pc->fetch_count[CISS_SG_FETCH_2] = CISS_FETCH_COUNT(2);
189 	pc->fetch_count[CISS_SG_FETCH_4] = CISS_FETCH_COUNT(4);
190 	pc->fetch_count[CISS_SG_FETCH_8] = CISS_FETCH_COUNT(8);
191 	pc->fetch_count[CISS_SG_FETCH_16] = CISS_FETCH_COUNT(16);
192 	pc->fetch_count[CISS_SG_FETCH_32] = CISS_FETCH_COUNT(32);
193 	pc->fetch_count[CISS_SG_FETCH_MAX] = (sc->ccblen + 15) / 16;
194 
195 	pc->rq_size = sc->maxcmd;
196 	pc->rq_count = 1;	/* Hardcode for a single queue */
197 	pc->rq_bank_hi = 0;
198 	pc->rq_bank_lo = 0;
199 	pc->rq[0].rq_addr_hi = 0x0;
200 	pc->rq[0].rq_addr_lo = sc->replymap->dm_segs[0].ds_addr;
201 
202 	/*
203 	 * Write back the changed configuration. Tt will be picked up
204 	 * by controller together with general configuration later on.
205 	 */
206 	bus_space_write_region_4(sc->sc_iot, sc->cfg_ioh,
207 	    sc->cfgoff + sc->cfg.troff,
208 	    (u_int32_t *)pc, sizeof(*pc) / 4);
209 	bus_space_barrier(sc->sc_iot, sc->cfg_ioh,
210 	    sc->cfgoff + sc->cfg.troff, sizeof(*pc),
211 	    BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
212 
213 	return 0;
214 }
215 
216 int
217 ciss_attach(struct ciss_softc *sc)
218 {
219 	struct ciss_ccb *ccb;
220 	struct ciss_cmd *cmd;
221 	struct ciss_inquiry *inq;
222 	bus_dma_segment_t seg[1];
223 	int error, i, total, rseg, maxfer;
224 	paddr_t pa;
225 
226 	if (sc->cfg.signature != CISS_SIGNATURE) {
227 		aprint_error(": bad sign 0x%08x\n", sc->cfg.signature);
228 		return -1;
229 	}
230 
231 	if (!(sc->cfg.methods & (CISS_METH_SIMPL|CISS_METH_PERF))) {
232 		aprint_error(": no supported method 0x%08x\n", sc->cfg.methods);
233 		return -1;
234 	}
235 
236 	if (!sc->cfg.maxsg)
237 		sc->cfg.maxsg = MAXPHYS / PAGE_SIZE + 1;
238 
239 	sc->maxcmd = sc->cfg.maxcmd;
240 	sc->maxsg = sc->cfg.maxsg;
241 	if (sc->maxsg > MAXPHYS / PAGE_SIZE + 1)
242 		sc->maxsg = MAXPHYS / PAGE_SIZE + 1;
243 	i = sizeof(struct ciss_ccb) +
244 	    sizeof(ccb->ccb_cmd.sgl[0]) * (sc->maxsg - 1);
245 	for (sc->ccblen = 0x10; sc->ccblen < i; sc->ccblen <<= 1);
246 
247 	sc->cfg.paddr_lim = 0;			/* 32bit addrs */
248 	sc->cfg.int_delay = 0;			/* disable coalescing */
249 	sc->cfg.int_count = 0;
250 	strlcpy(sc->cfg.hostname, "HUMPPA", sizeof(sc->cfg.hostname));
251 	sc->cfg.driverf |= CISS_DRV_PRF;	/* enable prefetch */
252 	if (CISS_PERF_SUPPORTED(sc)) {
253 		sc->cfg.rmethod = CISS_METH_PERF | CISS_METH_SHORT_TAG;
254 		if (ciss_init_perf(sc) != 0) {
255 			/* Don't try to fallback, just bail out */
256 			return -1;
257 		}
258 	} else {
259 		sc->cfg.rmethod = CISS_METH_SIMPL;
260 	}
261 
262 	bus_space_write_region_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff,
263 	    (u_int32_t *)&sc->cfg, sizeof(sc->cfg) / 4);
264 	bus_space_barrier(sc->sc_iot, sc->cfg_ioh, sc->cfgoff, sizeof(sc->cfg),
265 	    BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
266 
267 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_IDB, CISS_IDB_CFG);
268 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, CISS_IDB, 4,
269 	    BUS_SPACE_BARRIER_WRITE);
270 	for (i = 1000; i--; DELAY(1000)) {
271 		/* XXX maybe IDB is really 64bit? - hp dl380 needs this */
272 		(void)bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IDB + 4);
273 		if (!(bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IDB) & CISS_IDB_CFG))
274 			break;
275 		bus_space_barrier(sc->sc_iot, sc->sc_ioh, CISS_IDB, 4,
276 		    BUS_SPACE_BARRIER_READ);
277 	}
278 
279 	if (bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IDB) & CISS_IDB_CFG) {
280 		aprint_error(": cannot set config\n");
281 		return -1;
282 	}
283 
284 	bus_space_read_region_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff,
285 	    (u_int32_t *)&sc->cfg, sizeof(sc->cfg) / 4);
286 
287 	if (!(sc->cfg.amethod & (CISS_METH_SIMPL|CISS_METH_PERF))) {
288 		aprint_error(": cannot set method 0x%08x\n", sc->cfg.amethod);
289 		return -1;
290 	}
291 
292 	/* i'm ready for you and i hope you're ready for me */
293 	for (i = 30000; i--; DELAY(1000)) {
294 		if (bus_space_read_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff +
295 		    offsetof(struct ciss_config, amethod)) & CISS_METH_READY)
296 			break;
297 		bus_space_barrier(sc->sc_iot, sc->cfg_ioh, sc->cfgoff +
298 		    offsetof(struct ciss_config, amethod), 4,
299 		    BUS_SPACE_BARRIER_READ);
300 	}
301 
302 	if (!(bus_space_read_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff +
303 	    offsetof(struct ciss_config, amethod)) & CISS_METH_READY)) {
304 		aprint_error(": she never came ready for me 0x%08x\n",
305 		    sc->cfg.amethod);
306 		return -1;
307 	}
308 
309 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
310 	mutex_init(&sc->sc_mutex_scratch, MUTEX_DEFAULT, IPL_VM);
311 	cv_init(&sc->sc_condvar, "ciss_cmd");
312 
313 	total = sc->ccblen * sc->maxcmd;
314 	if ((error = bus_dmamem_alloc(sc->sc_dmat, total, PAGE_SIZE, 0,
315 	    sc->cmdseg, 1, &rseg, BUS_DMA_NOWAIT))) {
316 		aprint_error(": cannot allocate CCBs (%d)\n", error);
317 		return -1;
318 	}
319 
320 	if ((error = bus_dmamem_map(sc->sc_dmat, sc->cmdseg, rseg, total,
321 	    (void **)&sc->ccbs, BUS_DMA_NOWAIT))) {
322 		aprint_error(": cannot map CCBs (%d)\n", error);
323 		return -1;
324 	}
325 	memset(sc->ccbs, 0, total);
326 
327 	if ((error = bus_dmamap_create(sc->sc_dmat, total, 1,
328 	    total, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->cmdmap))) {
329 		aprint_error(": cannot create CCBs dmamap (%d)\n", error);
330 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
331 		return -1;
332 	}
333 
334 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->cmdmap, sc->ccbs, total,
335 	    NULL, BUS_DMA_NOWAIT))) {
336 		aprint_error(": cannot load CCBs dmamap (%d)\n", error);
337 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
338 		bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
339 		return -1;
340 	}
341 
342 	TAILQ_INIT(&sc->sc_free_ccb);
343 
344 	maxfer = sc->maxsg * PAGE_SIZE;
345 	for (i = 0; total > 0 && i < sc->maxcmd; i++, total -= sc->ccblen) {
346 		ccb = (struct ciss_ccb *) ((char *)sc->ccbs + i * sc->ccblen);
347 		cmd = &ccb->ccb_cmd;
348 		pa = sc->cmdseg[0].ds_addr + i * sc->ccblen;
349 
350 		ccb->ccb_cmdpa = pa + offsetof(struct ciss_ccb, ccb_cmd);
351 		ccb->ccb_state = CISS_CCB_FREE;
352 
353 		cmd->id = htole32(i << 2);
354 		cmd->id_hi = htole32(0);
355 		cmd->sgin = sc->maxsg;
356 		cmd->sglen = htole16((u_int16_t)cmd->sgin);
357 		cmd->err_len = htole32(sizeof(ccb->ccb_err));
358 		pa += offsetof(struct ciss_ccb, ccb_err);
359 		cmd->err_pa = htole64((u_int64_t)pa);
360 
361 		if ((error = bus_dmamap_create(sc->sc_dmat, maxfer, sc->maxsg,
362 		    maxfer, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
363 		    &ccb->ccb_dmamap)))
364 			break;
365 
366 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
367 	}
368 
369 	if (i < sc->maxcmd) {
370 		aprint_error(": cannot create ccb#%d dmamap (%d)\n", i, error);
371 		if (i == 0) {
372 			/* TODO leaking cmd's dmamaps and shitz */
373 			bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
374 			bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
375 			return -1;
376 		}
377 	}
378 
379 	if ((error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
380 	    seg, 1, &rseg, BUS_DMA_NOWAIT))) {
381 		aprint_error(": cannot allocate scratch buffer (%d)\n", error);
382 		return -1;
383 	}
384 
385 	if ((error = bus_dmamem_map(sc->sc_dmat, seg, rseg, PAGE_SIZE,
386 	    (void **)&sc->scratch, BUS_DMA_NOWAIT))) {
387 		aprint_error(": cannot map scratch buffer (%d)\n", error);
388 		return -1;
389 	}
390 	memset(sc->scratch, 0, PAGE_SIZE);
391 	sc->sc_waitflag = XS_CTL_NOSLEEP;		/* can't sleep yet */
392 
393 	mutex_enter(&sc->sc_mutex_scratch);	/* is this really needed? */
394 	inq = sc->scratch;
395 	if (ciss_inq(sc, inq)) {
396 		aprint_error(": adapter inquiry failed\n");
397 		mutex_exit(&sc->sc_mutex_scratch);
398 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
399 		bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
400 		return -1;
401 	}
402 
403 	if (!(inq->flags & CISS_INQ_BIGMAP)) {
404 		aprint_error(": big map is not supported, flags=0x%x\n",
405 		    inq->flags);
406 		mutex_exit(&sc->sc_mutex_scratch);
407 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
408 		bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
409 		return -1;
410 	}
411 
412 	sc->maxunits = inq->numld;
413 	sc->nbus = inq->nscsi_bus;
414 	sc->ndrives = inq->buswidth ? inq->buswidth : 256;
415 	aprint_normal(": %d LD%s, HW rev %d, FW %4.4s/%4.4s",
416 	    inq->numld, inq->numld == 1? "" : "s",
417 	    inq->hw_rev, inq->fw_running, inq->fw_stored);
418 
419 	if (sc->cfg.methods & CISS_METH_FIFO64)
420 		aprint_normal(", 64bit fifo");
421 	else if (sc->cfg.methods & CISS_METH_FIFO64_RRO)
422 		aprint_normal(", 64bit fifo rro");
423 	aprint_normal(", method %s %#x",
424 	    CISS_IS_PERF(sc) ? "perf" : "simple",
425 	    sc->cfg.amethod);
426 	aprint_normal("\n");
427 
428 	mutex_exit(&sc->sc_mutex_scratch);
429 
430 	callout_init(&sc->sc_hb, 0);
431 	callout_setfunc(&sc->sc_hb, ciss_heartbeat, sc);
432 	callout_schedule(&sc->sc_hb, hz * 3);
433 
434 	/* map LDs */
435 	if (ciss_ldmap(sc)) {
436 		aprint_error_dev(sc->sc_dev, "adapter LD map failed\n");
437 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
438 		bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
439 		return -1;
440 	}
441 
442 	sc->sc_lds = malloc(sc->maxunits * sizeof(*sc->sc_lds),
443 	    M_DEVBUF, M_WAITOK | M_ZERO);
444 
445 	sc->sc_flush = CISS_FLUSH_ENABLE;
446 	if (!(sc->sc_sh = shutdownhook_establish(ciss_shutdown, sc))) {
447 		aprint_error_dev(sc->sc_dev,
448 		    "unable to establish shutdown hook\n");
449 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
450 		bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
451 		return -1;
452 	}
453 
454 	sc->sc_channel.chan_adapter = &sc->sc_adapter;
455 	sc->sc_channel.chan_bustype = &scsi_bustype;
456 	sc->sc_channel.chan_channel = 0;
457 	sc->sc_channel.chan_ntargets = sc->maxunits;
458 	sc->sc_channel.chan_nluns = 1;	/* ciss doesn't really have SCSI luns */
459 	sc->sc_channel.chan_openings = sc->maxcmd;
460 #if NBIO > 0
461 	/* XXX Reserve some ccb's for sensor and bioctl. */
462 	if (sc->sc_channel.chan_openings > 2)
463 		sc->sc_channel.chan_openings -= 2;
464 #endif
465 	sc->sc_channel.chan_flags = 0;
466 	sc->sc_channel.chan_id = sc->maxunits;
467 
468 	sc->sc_adapter.adapt_dev = sc->sc_dev;
469 	sc->sc_adapter.adapt_openings = sc->sc_channel.chan_openings;
470 	sc->sc_adapter.adapt_max_periph = uimin(sc->sc_adapter.adapt_openings, 256);
471 	sc->sc_adapter.adapt_request = ciss_scsi_cmd;
472 	sc->sc_adapter.adapt_minphys = cissminphys;
473 	sc->sc_adapter.adapt_ioctl = ciss_scsi_ioctl;
474 	sc->sc_adapter.adapt_nchannels = 1;
475 	config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
476 
477 #if 0
478 	sc->sc_link_raw.adapter_softc = sc;
479 	sc->sc_link.openings = sc->sc_channel.chan_openings;
480 	sc->sc_link_raw.adapter = &ciss_raw_switch;
481 	sc->sc_link_raw.adapter_target = sc->ndrives;
482 	sc->sc_link_raw.adapter_buswidth = sc->ndrives;
483 	config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
484 #endif
485 
486 #if NBIO > 0
487 	/* now map all the physdevs into their lds */
488 	/* XXX currently we assign all of them into ld0 */
489 	for (i = 0; i < sc->maxunits && i < 1; i++)
490 		if (!(sc->sc_lds[i] = ciss_pdscan(sc, i))) {
491 			sc->sc_waitflag = 0;	/* we can sleep now */
492 			return 0;
493 		}
494 
495 	if (bio_register(sc->sc_dev, ciss_ioctl) != 0)
496 		aprint_error_dev(sc->sc_dev, "controller registration failed");
497 	else
498 		sc->sc_ioctl = ciss_ioctl;
499 	if (ciss_create_sensors(sc) != 0)
500 		aprint_error_dev(sc->sc_dev, "unable to create sensors");
501 #endif
502 	sc->sc_waitflag = 0;			/* we can sleep now */
503 
504 	return 0;
505 }
506 
507 static void
508 ciss_shutdown(void *v)
509 {
510 	struct ciss_softc *sc = v;
511 
512 	sc->sc_flush = CISS_FLUSH_DISABLE;
513 	/* timeout_del(&sc->sc_hb); */
514 	ciss_sync(sc);
515 }
516 
517 static void
518 cissminphys(struct buf *bp)
519 {
520 #if 0	/* TODO */
521 #define	CISS_MAXFER	(PAGE_SIZE * (sc->maxsg + 1))
522 	if (bp->b_bcount > CISS_MAXFER)
523 		bp->b_bcount = CISS_MAXFER;
524 #endif
525 	minphys(bp);
526 }
527 
528 static void
529 ciss_enqueue(struct ciss_softc *sc, ciss_queue_head *q, uint32_t id)
530 {
531 	struct ciss_ccb *ccb;
532 
533 	KASSERT(mutex_owned(&sc->sc_mutex));
534 
535 	KASSERT((id >> 2) <= sc->maxcmd);
536 	ccb = (struct ciss_ccb *) ((char *)sc->ccbs + (id >> 2) * sc->ccblen);
537 	ccb->ccb_cmd.id = htole32(id);
538 	ccb->ccb_cmd.id_hi = htole32(0);
539 	TAILQ_INSERT_TAIL(q, ccb, ccb_link);
540 }
541 
542 static void
543 ciss_completed_simple(struct ciss_softc *sc, ciss_queue_head *q)
544 {
545 	uint32_t id;
546 
547 	KASSERT(mutex_owned(&sc->sc_mutex));
548 
549 	for (;;) {
550 		if (sc->cfg.methods & CISS_METH_FIFO64) {
551 			if (bus_space_read_4(sc->sc_iot, sc->sc_ioh,
552 			    CISS_OUTQ64_HI) == 0xffffffff) {
553 				CISS_DPRINTF(CISS_D_CMD, ("Q"));
554 				break;
555 			}
556 			id = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
557 			    CISS_OUTQ64_LO);
558 		} else if (sc->cfg.methods & CISS_METH_FIFO64_RRO) {
559 			id = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
560 			    CISS_OUTQ64_LO);
561 			if (id == 0xffffffff) {
562 				CISS_DPRINTF(CISS_D_CMD, ("Q"));
563 				break;
564 			}
565 			(void)bus_space_read_4(sc->sc_iot, sc->sc_ioh,
566 			    CISS_OUTQ64_HI);
567 		} else {
568 			id = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
569 			    CISS_OUTQ);
570 			if (id == 0xffffffff) {
571 				CISS_DPRINTF(CISS_D_CMD, ("Q"));
572 				break;
573 			}
574 		}
575 
576 		CISS_DPRINTF(CISS_D_CMD, ("got=0x%x ", id));
577 		ciss_enqueue(sc, q, id);
578 	}
579 }
580 
581 static void
582 ciss_completed_perf(struct ciss_softc *sc, ciss_queue_head *q)
583 {
584 	uint32_t id;
585 
586 	KASSERT(mutex_owned(&sc->sc_mutex));
587 
588 	for (;;) {
589 		id = sc->perf_reply[sc->perf_rqidx];
590 		if ((id & CISS_CYCLE_MASK) != sc->perf_cycle)
591 			break;
592 
593 		if (++sc->perf_rqidx == sc->maxcmd) {
594 			sc->perf_rqidx = 0;
595 			sc->perf_cycle ^= 1;
596 		}
597 
598 		CISS_DPRINTF(CISS_D_CMD, ("got=0x%x ", id));
599 		ciss_enqueue(sc, q, id);
600 	}
601 }
602 
603 static int
604 ciss_poll(struct ciss_softc *sc, struct ciss_ccb *ccb, int ms)
605 {
606 	ciss_queue_head q;
607 	struct ciss_ccb *ccb1;
608 
609 	TAILQ_INIT(&q);
610 	ms /= 10;
611 
612 	while (ms-- > 0) {
613 		DELAY(10);
614 		mutex_enter(&sc->sc_mutex);
615 		if (CISS_IS_PERF(sc))
616 			ciss_completed_perf(sc, &q);
617 		else
618 			ciss_completed_simple(sc, &q);
619 		mutex_exit(&sc->sc_mutex);
620 
621 		while (!TAILQ_EMPTY(&q)) {
622 			ccb1 = TAILQ_FIRST(&q);
623 			TAILQ_REMOVE(&q, ccb1, ccb_link);
624 
625 			KASSERT(ccb1->ccb_state == CISS_CCB_ONQ);
626 			ciss_done(sc, ccb1);
627 			if (ccb1 == ccb) {
628 				KASSERT(TAILQ_EMPTY(&q));
629 				return 0;
630 			}
631 		}
632 	}
633 
634 	return ETIMEDOUT;
635 }
636 
637 static int
638 ciss_wait(struct ciss_softc *sc, struct ciss_ccb *ccb, int ms)
639 {
640 	int tohz, etick;
641 
642 	tohz = mstohz(ms);
643 	if (tohz == 0)
644 		tohz = 1;
645 	etick = getticks() + tohz;
646 
647 	for (;;) {
648 		CISS_DPRINTF(CISS_D_CMD, ("cv_timedwait(%d) ", tohz));
649 		mutex_enter(&sc->sc_mutex);
650 		if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, tohz)
651 		    == EWOULDBLOCK) {
652 			mutex_exit(&sc->sc_mutex);
653 			return EWOULDBLOCK;
654 		}
655 		mutex_exit(&sc->sc_mutex);
656 		if (ccb->ccb_state == CISS_CCB_ONQ) {
657 			ciss_done(sc, ccb);
658 			return 0;
659 		}
660 		tohz = etick - getticks();
661 		if (tohz <= 0)
662 			return EWOULDBLOCK;
663 		CISS_DPRINTF(CISS_D_CMD, ("T"));
664 	}
665 }
666 
667 /*
668  * submit a command and optionally wait for completition.
669  * wait arg abuses XS_CTL_POLL|XS_CTL_NOSLEEP flags to request
670  * to wait (XS_CTL_POLL) and to allow tsleep() (!XS_CTL_NOSLEEP)
671  * instead of busy loop waiting
672  */
673 static int
674 ciss_cmd(struct ciss_softc *sc, struct ciss_ccb *ccb, int flags, int wait)
675 {
676 	struct ciss_cmd *cmd = &ccb->ccb_cmd;
677 	bus_dmamap_t dmap = ccb->ccb_dmamap;
678 	u_int64_t addr;
679 	int i, error = 0;
680 	const bool pollsleep = ((wait & (XS_CTL_POLL|XS_CTL_NOSLEEP)) ==
681 	    XS_CTL_POLL);
682 
683 	if (ccb->ccb_state != CISS_CCB_READY) {
684 		printf("%s: ccb %d not ready state=0x%x\n", device_xname(sc->sc_dev),
685 		    cmd->id, ccb->ccb_state);
686 		return (EINVAL);
687 	}
688 
689 	if (ccb->ccb_data) {
690 		bus_dma_segment_t *sgd;
691 
692 		if ((error = bus_dmamap_load(sc->sc_dmat, dmap, ccb->ccb_data,
693 		    ccb->ccb_len, NULL, flags))) {
694 			if (error == EFBIG)
695 				printf("more than %d dma segs\n", sc->maxsg);
696 			else
697 				printf("error %d loading dma map\n", error);
698 			ciss_put_ccb(sc, ccb);
699 			return (error);
700 		}
701 		cmd->sgin = dmap->dm_nsegs;
702 
703 		sgd = dmap->dm_segs;
704 		CISS_DPRINTF(CISS_D_DMA, ("data=%p/%zu<%#" PRIxPADDR "/%zu",
705 		    ccb->ccb_data, ccb->ccb_len, sgd->ds_addr, sgd->ds_len));
706 
707 		for (i = 0; i < dmap->dm_nsegs; sgd++, i++) {
708 			cmd->sgl[i].addr_lo = htole32(sgd->ds_addr);
709 			cmd->sgl[i].addr_hi =
710 			    htole32((u_int64_t)sgd->ds_addr >> 32);
711 			cmd->sgl[i].len = htole32(sgd->ds_len);
712 			cmd->sgl[i].flags = htole32(0);
713 			if (i) {
714 				CISS_DPRINTF(CISS_D_DMA,
715 				    (",%#" PRIxPADDR "/%zu", sgd->ds_addr,
716 				    sgd->ds_len));
717 			}
718 		}
719 
720 		CISS_DPRINTF(CISS_D_DMA, ("> "));
721 
722 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
723 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
724 
725 		if (dmap->dm_nsegs == 0)
726 			ccb->ccb_sg_tag = CISS_SG_FETCH_NONE;
727 		else if (dmap->dm_nsegs == 1)
728 			ccb->ccb_sg_tag = CISS_SG_FETCH_1;
729 		else if (dmap->dm_nsegs == 2)
730 			ccb->ccb_sg_tag = CISS_SG_FETCH_2;
731 		else if (dmap->dm_nsegs <= 4)
732 			ccb->ccb_sg_tag = CISS_SG_FETCH_4;
733 		else if (dmap->dm_nsegs <= 8)
734 			ccb->ccb_sg_tag = CISS_SG_FETCH_8;
735 		else if (dmap->dm_nsegs <= 16)
736 			ccb->ccb_sg_tag = CISS_SG_FETCH_16;
737 		else if (dmap->dm_nsegs <= 32)
738 			ccb->ccb_sg_tag = CISS_SG_FETCH_32;
739 		else
740 			ccb->ccb_sg_tag = CISS_SG_FETCH_MAX;
741 	} else {
742 		ccb->ccb_sg_tag = CISS_SG_FETCH_NONE;
743 		cmd->sgin = 0;
744 	}
745 	cmd->sglen = htole16((u_int16_t)cmd->sgin);
746 	memset(&ccb->ccb_err, 0, sizeof(ccb->ccb_err));
747 
748 	bus_dmamap_sync(sc->sc_dmat, sc->cmdmap, 0, sc->cmdmap->dm_mapsize,
749 	    BUS_DMASYNC_PREWRITE);
750 
751 	if ((wait & (XS_CTL_POLL|XS_CTL_NOSLEEP)) == (XS_CTL_POLL|XS_CTL_NOSLEEP))
752 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_IMR,
753 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IMR) | sc->iem);
754 
755 	if (!pollsleep)
756 		ccb->ccb_state = CISS_CCB_ONQ;
757 	else
758 		ccb->ccb_state = CISS_CCB_POLL;
759 	CISS_DPRINTF(CISS_D_CMD, ("submit=0x%x ", cmd->id));
760 
761 	addr = (u_int64_t)ccb->ccb_cmdpa;
762 	if (CISS_IS_PERF(sc)) {
763 		KASSERT((addr & 0xf) == 0);
764 		/*
765 		 * The bits in addr in performant mean:
766 		 * - performant mode bit (bit 0)
767 		 * - pull count (bits 1-3)
768 		 * There is no support for ioaccel mode
769 		 */
770 		addr |= 1 | (ccb->ccb_sg_tag << 1);
771 	}
772 	if (sc->cfg.methods & (CISS_METH_FIFO64|CISS_METH_FIFO64_RRO)) {
773 		/*
774 		 * Write the upper 32bits immediately before the lower
775 		 * 32bits and set bit 63 to indicate 64bit FIFO mode.
776 		 */
777 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_INQ64_HI,
778 		    (addr >> 32) | 0x80000000);
779 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_INQ64_LO,
780 		    addr & 0x00000000ffffffffULL);
781 	} else
782 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_INQ,
783 		    (uint32_t)addr);
784 
785 	if (wait & XS_CTL_POLL) {
786 		int ms;
787 		CISS_DPRINTF(CISS_D_CMD, ("waiting "));
788 
789 		ms = ccb->ccb_xs ? ccb->ccb_xs->timeout : 60000;
790 		if (pollsleep)
791 			error = ciss_wait(sc, ccb, ms);
792 		else
793 			error = ciss_poll(sc, ccb, ms);
794 
795 		/* if never got a chance to be done above... */
796 		if (ccb->ccb_state != CISS_CCB_FREE) {
797 			KASSERT(error);
798 			ccb->ccb_err.cmd_stat = CISS_ERR_TMO;
799 			error = ciss_done(sc, ccb);
800 		}
801 
802 		CISS_DPRINTF(CISS_D_CMD, ("done %d:%d",
803 		    ccb->ccb_err.cmd_stat, ccb->ccb_err.scsi_stat));
804 	}
805 
806 	if ((wait & (XS_CTL_POLL|XS_CTL_NOSLEEP)) == (XS_CTL_POLL|XS_CTL_NOSLEEP))
807 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_IMR,
808 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IMR) & ~sc->iem);
809 
810 	return (error);
811 }
812 
813 static int
814 ciss_done(struct ciss_softc *sc, struct ciss_ccb *ccb)
815 {
816 	struct scsipi_xfer *xs = ccb->ccb_xs;
817 	struct ciss_cmd *cmd;
818 	int error = 0;
819 
820 	CISS_DPRINTF(CISS_D_CMD, ("ciss_done(%p) ", ccb));
821 
822 	if (ccb->ccb_state != CISS_CCB_ONQ) {
823 		printf("%s: unqueued ccb %p ready, state=0x%x\n",
824 		    device_xname(sc->sc_dev), ccb, ccb->ccb_state);
825 		return 1;
826 	}
827 
828 	ccb->ccb_state = CISS_CCB_READY;
829 
830 	if (ccb->ccb_cmd.id & CISS_CMD_ERR)
831 		error = ciss_error(sc, ccb);
832 
833 	cmd = &ccb->ccb_cmd;
834 	if (ccb->ccb_data) {
835 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
836 		    ccb->ccb_dmamap->dm_mapsize, (cmd->flags & CISS_CDB_IN) ?
837 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
838 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
839 		ccb->ccb_xs = NULL;
840 		ccb->ccb_data = NULL;
841 	}
842 
843 	ciss_put_ccb(sc, ccb);
844 
845 	if (xs) {
846 		xs->resid = 0;
847 		CISS_DPRINTF(CISS_D_CMD, ("scsipi_done(%p) ", xs));
848 		if (xs->cmd->opcode == INQUIRY) {
849 			struct scsipi_inquiry_data *inq;
850 			inq = (struct scsipi_inquiry_data *)xs->data;
851 			if ((inq->version & SID_ANSII) == 0 &&
852 			    (inq->flags3 & SID_CmdQue) != 0) {
853 				inq->version |= 2;
854 			}
855 		}
856 		scsipi_done(xs);
857 	}
858 
859 	return error;
860 }
861 
862 static int
863 ciss_error(struct ciss_softc *sc, struct ciss_ccb *ccb)
864 {
865 	struct ciss_error *err = &ccb->ccb_err;
866 	struct scsipi_xfer *xs = ccb->ccb_xs;
867 	int rv;
868 
869 	switch ((rv = le16toh(err->cmd_stat))) {
870 	case CISS_ERR_OK:
871 		rv = 0;
872 		break;
873 
874 	case CISS_ERR_INVCMD:
875 		if (xs == NULL ||
876 		    xs->cmd->opcode != SCSI_SYNCHRONIZE_CACHE_10)
877 			printf("%s: invalid cmd 0x%x: 0x%x is not valid @ 0x%x[%d]\n",
878 			    device_xname(sc->sc_dev), ccb->ccb_cmd.id,
879 			    err->err_info, err->err_type[3], err->err_type[2]);
880 		if (xs) {
881 			memset(&xs->sense, 0, sizeof(xs->sense));
882 			xs->sense.scsi_sense.response_code =
883 				SSD_RCODE_CURRENT | SSD_RCODE_VALID;
884 			xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
885 			xs->sense.scsi_sense.asc = 0x24; /* ill field */
886 			xs->sense.scsi_sense.ascq = 0x0;
887 			xs->error = XS_SENSE;
888 		}
889 		rv = EIO;
890 		break;
891 
892 	case CISS_ERR_TMO:
893 		xs->error = XS_TIMEOUT;
894 		rv = ETIMEDOUT;
895 		break;
896 
897 	case CISS_ERR_UNRUN:
898 		/* Underrun */
899 		xs->resid = le32toh(err->resid);
900 		CISS_DPRINTF(CISS_D_CMD, (" underrun resid=0x%x ",
901 					  xs->resid));
902 		rv = EIO;
903 		break;
904 	default:
905 		if (xs) {
906 			CISS_DPRINTF(CISS_D_CMD, ("scsi_stat=%x ", err->scsi_stat));
907 			switch (err->scsi_stat) {
908 			case SCSI_CHECK:
909 				xs->error = XS_SENSE;
910 				memcpy(&xs->sense, &err->sense[0],
911 				    sizeof(xs->sense));
912 				CISS_DPRINTF(CISS_D_CMD, (" sense=%02x %02x %02x %02x ",
913 					     err->sense[0], err->sense[1], err->sense[2], err->sense[3]));
914 				rv = EIO;
915 				break;
916 
917 			case XS_BUSY:
918 				xs->error = XS_BUSY;
919 				rv = EBUSY;
920 				break;
921 
922 			default:
923 				CISS_DPRINTF(CISS_D_ERR, ("%s: "
924 				    "cmd_stat=%x scsi_stat=0x%x resid=0x%x\n",
925 				    device_xname(sc->sc_dev), rv, err->scsi_stat,
926 				    le32toh(err->resid)));
927 				printf("ciss driver stuffup in %s:%d: %s()\n",
928 				       __FILE__, __LINE__, __func__);
929 				xs->error = XS_DRIVER_STUFFUP;
930 				rv = EIO;
931 				break;
932 			}
933 			xs->resid = le32toh(err->resid);
934 		} else
935 			rv = EIO;
936 	}
937 	ccb->ccb_cmd.id &= htole32(~3);
938 
939 	return rv;
940 }
941 
942 static int
943 ciss_inq(struct ciss_softc *sc, struct ciss_inquiry *inq)
944 {
945 	struct ciss_ccb *ccb;
946 	struct ciss_cmd *cmd;
947 
948 	ccb = ciss_get_ccb(sc);
949 	ccb->ccb_len = sizeof(*inq);
950 	ccb->ccb_data = inq;
951 	ccb->ccb_xs = NULL;
952 	cmd = &ccb->ccb_cmd;
953 	cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
954 	cmd->tgt2 = 0;
955 	cmd->cdblen = 10;
956 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
957 	cmd->tmo = htole16(0);
958 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
959 	cmd->cdb[0] = CISS_CMD_CTRL_GET;
960 	cmd->cdb[6] = CISS_CMS_CTRL_CTRL;
961 	cmd->cdb[7] = sizeof(*inq) >> 8;	/* biiiig endian */
962 	cmd->cdb[8] = sizeof(*inq) & 0xff;
963 
964 	return ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, XS_CTL_POLL|XS_CTL_NOSLEEP);
965 }
966 
967 static int
968 ciss_ldmap(struct ciss_softc *sc)
969 {
970 	struct ciss_ccb *ccb;
971 	struct ciss_cmd *cmd;
972 	struct ciss_ldmap *lmap;
973 	int total, rv;
974 
975 	mutex_enter(&sc->sc_mutex_scratch);
976 	lmap = sc->scratch;
977 	lmap->size = htobe32(sc->maxunits * sizeof(lmap->map));
978 	total = sizeof(*lmap) + (sc->maxunits - 1) * sizeof(lmap->map);
979 
980 	ccb = ciss_get_ccb(sc);
981 	ccb->ccb_len = total;
982 	ccb->ccb_data = lmap;
983 	ccb->ccb_xs = NULL;
984 	cmd = &ccb->ccb_cmd;
985 	cmd->tgt = CISS_CMD_MODE_PERIPH;
986 	cmd->tgt2 = 0;
987 	cmd->cdblen = 12;
988 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
989 	cmd->tmo = htole16(30);
990 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
991 	cmd->cdb[0] = CISS_CMD_LDMAP;
992 	cmd->cdb[8] = total >> 8;	/* biiiig endian */
993 	cmd->cdb[9] = total & 0xff;
994 
995 	rv = ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, XS_CTL_POLL|XS_CTL_NOSLEEP);
996 
997 	if (rv) {
998 		mutex_exit(&sc->sc_mutex_scratch);
999 		return rv;
1000 	}
1001 
1002 	CISS_DPRINTF(CISS_D_MISC, ("lmap %x:%x\n",
1003 	    lmap->map[0].tgt, lmap->map[0].tgt2));
1004 
1005 	mutex_exit(&sc->sc_mutex_scratch);
1006 	return 0;
1007 }
1008 
1009 static int
1010 ciss_sync(struct ciss_softc *sc)
1011 {
1012 	struct ciss_ccb *ccb;
1013 	struct ciss_cmd *cmd;
1014 	struct ciss_flush *flush;
1015 	int rv;
1016 
1017 	mutex_enter(&sc->sc_mutex_scratch);
1018 	flush = sc->scratch;
1019 	memset(flush, 0, sizeof(*flush));
1020 	flush->flush = sc->sc_flush;
1021 
1022 	ccb = ciss_get_ccb(sc);
1023 	ccb->ccb_len = sizeof(*flush);
1024 	ccb->ccb_data = flush;
1025 	ccb->ccb_xs = NULL;
1026 	cmd = &ccb->ccb_cmd;
1027 	cmd->tgt = CISS_CMD_MODE_PERIPH;
1028 	cmd->tgt2 = 0;
1029 	cmd->cdblen = 10;
1030 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_OUT;
1031 	cmd->tmo = 0;
1032 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1033 	cmd->cdb[0] = CISS_CMD_CTRL_SET;
1034 	cmd->cdb[6] = CISS_CMS_CTRL_FLUSH;
1035 	cmd->cdb[7] = sizeof(*flush) >> 8;	/* biiiig endian */
1036 	cmd->cdb[8] = sizeof(*flush) & 0xff;
1037 
1038 	rv = ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, XS_CTL_POLL|XS_CTL_NOSLEEP);
1039 	mutex_exit(&sc->sc_mutex_scratch);
1040 
1041 	return rv;
1042 }
1043 
1044 int
1045 ciss_ldid(struct ciss_softc *sc, int target, struct ciss_ldid *id)
1046 {
1047 	struct ciss_ccb *ccb;
1048 	struct ciss_cmd *cmd;
1049 
1050 	ccb = ciss_get_ccb(sc);
1051 	if (ccb == NULL)
1052 		return ENOMEM;
1053 	ccb->ccb_len = sizeof(*id);
1054 	ccb->ccb_data = id;
1055 	ccb->ccb_xs = NULL;
1056 	cmd = &ccb->ccb_cmd;
1057 	cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
1058 	cmd->tgt2 = 0;
1059 	cmd->cdblen = 10;
1060 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
1061 	cmd->tmo = htole16(0);
1062 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1063 	cmd->cdb[0] = CISS_CMD_CTRL_GET;
1064 	cmd->cdb[1] = target;
1065 	cmd->cdb[6] = CISS_CMS_CTRL_LDIDEXT;
1066 	cmd->cdb[7] = sizeof(*id) >> 8;	/* biiiig endian */
1067 	cmd->cdb[8] = sizeof(*id) & 0xff;
1068 
1069 	return ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, XS_CTL_POLL | sc->sc_waitflag);
1070 }
1071 
1072 int
1073 ciss_ldstat(struct ciss_softc *sc, int target, struct ciss_ldstat *stat)
1074 {
1075 	struct ciss_ccb *ccb;
1076 	struct ciss_cmd *cmd;
1077 
1078 	ccb = ciss_get_ccb(sc);
1079 	if (ccb == NULL)
1080 		return ENOMEM;
1081 	ccb->ccb_len = sizeof(*stat);
1082 	ccb->ccb_data = stat;
1083 	ccb->ccb_xs = NULL;
1084 	cmd = &ccb->ccb_cmd;
1085 	cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
1086 	cmd->tgt2 = 0;
1087 	cmd->cdblen = 10;
1088 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
1089 	cmd->tmo = htole16(0);
1090 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1091 	cmd->cdb[0] = CISS_CMD_CTRL_GET;
1092 	cmd->cdb[1] = target;
1093 	cmd->cdb[6] = CISS_CMS_CTRL_LDSTAT;
1094 	cmd->cdb[7] = sizeof(*stat) >> 8;	/* biiiig endian */
1095 	cmd->cdb[8] = sizeof(*stat) & 0xff;
1096 
1097 	return ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, XS_CTL_POLL | sc->sc_waitflag);
1098 }
1099 
1100 int
1101 ciss_pdid(struct ciss_softc *sc, u_int8_t drv, struct ciss_pdid *id, int wait)
1102 {
1103 	struct ciss_ccb *ccb;
1104 	struct ciss_cmd *cmd;
1105 
1106 	ccb = ciss_get_ccb(sc);
1107 	if (ccb == NULL)
1108 		return ENOMEM;
1109 	ccb->ccb_len = sizeof(*id);
1110 	ccb->ccb_data = id;
1111 	ccb->ccb_xs = NULL;
1112 	cmd = &ccb->ccb_cmd;
1113 	cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
1114 	cmd->tgt2 = 0;
1115 	cmd->cdblen = 10;
1116 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
1117 	cmd->tmo = htole16(0);
1118 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1119 	cmd->cdb[0] = CISS_CMD_CTRL_GET;
1120 	cmd->cdb[2] = drv;
1121 	cmd->cdb[6] = CISS_CMS_CTRL_PDID;
1122 	cmd->cdb[7] = sizeof(*id) >> 8;	/* biiiig endian */
1123 	cmd->cdb[8] = sizeof(*id) & 0xff;
1124 
1125 	return ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, wait);
1126 }
1127 
1128 
1129 struct ciss_ld *
1130 ciss_pdscan(struct ciss_softc *sc, int ld)
1131 {
1132 	struct ciss_pdid *pdid;
1133 	struct ciss_ld *ldp;
1134 	u_int8_t drv, buf[128];
1135 	int i, j, k = 0;
1136 
1137 	mutex_enter(&sc->sc_mutex_scratch);
1138 	pdid = sc->scratch;
1139 	if (sc->ndrives == 256) {
1140 		for (i = 0; i < CISS_BIGBIT; i++)
1141 			if (!ciss_pdid(sc, i, pdid,
1142 					XS_CTL_POLL|XS_CTL_NOSLEEP) &&
1143 			    (pdid->present & CISS_PD_PRESENT))
1144 				buf[k++] = i;
1145 	} else
1146 		for (i = 0; i < sc->nbus; i++)
1147 			for (j = 0; j < sc->ndrives; j++) {
1148 				drv = CISS_BIGBIT + i * sc->ndrives + j;
1149 				if (!ciss_pdid(sc, drv, pdid,
1150 						XS_CTL_POLL|XS_CTL_NOSLEEP))
1151 					buf[k++] = drv;
1152 			}
1153 	mutex_exit(&sc->sc_mutex_scratch);
1154 
1155 	if (!k)
1156 		return NULL;
1157 
1158 	ldp = malloc(sizeof(*ldp) + (k-1), M_DEVBUF, M_WAITOK);
1159 	memset(&ldp->bling, 0, sizeof(ldp->bling));
1160 	ldp->ndrives = k;
1161 	ldp->xname[0] = 0;
1162 	memcpy(ldp->tgts, buf, k);
1163 	return ldp;
1164 }
1165 
1166 static void
1167 ciss_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1168 	void *arg)
1169 {
1170 	struct scsipi_xfer *xs;
1171 	struct scsipi_xfer_mode *xm;
1172 	struct ciss_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1173 	u_int8_t target;
1174 	struct ciss_ccb *ccb;
1175 	struct ciss_cmd *cmd;
1176 
1177 	CISS_DPRINTF(CISS_D_CMD, ("ciss_scsi_cmd "));
1178 
1179 	switch (req)
1180 	{
1181 	case ADAPTER_REQ_RUN_XFER:
1182 		xs = (struct scsipi_xfer *) arg;
1183 		target = xs->xs_periph->periph_target;
1184 		CISS_DPRINTF(CISS_D_CMD, ("targ=%d ", target));
1185 		if (xs->cmdlen > CISS_MAX_CDB) {
1186 			CISS_DPRINTF(CISS_D_CMD, ("CDB too big %p ", xs));
1187 			memset(&xs->sense, 0, sizeof(xs->sense));
1188 			xs->error = XS_SENSE;
1189 			printf("ciss driver stuffup in %s:%d: %s()\n",
1190 			       __FILE__, __LINE__, __func__);
1191 			scsipi_done(xs);
1192 			break;
1193 		}
1194 
1195 		xs->error = XS_NOERROR;
1196 
1197 		/* XXX emulate SYNCHRONIZE_CACHE ??? */
1198 
1199 		ccb = ciss_get_ccb(sc);
1200 		cmd = &ccb->ccb_cmd;
1201 		ccb->ccb_len = xs->datalen;
1202 		ccb->ccb_data = xs->data;
1203 		ccb->ccb_xs = xs;
1204 		cmd->tgt = CISS_CMD_MODE_LD | target;
1205 		cmd->tgt2 = 0;
1206 		cmd->cdblen = xs->cmdlen;
1207 		cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL;
1208 		if (xs->xs_control & XS_CTL_DATA_IN)
1209 			cmd->flags |= CISS_CDB_IN;
1210 		else if (xs->xs_control & XS_CTL_DATA_OUT)
1211 			cmd->flags |= CISS_CDB_OUT;
1212 		cmd->tmo = htole16(xs->timeout < 1000? 1 : xs->timeout / 1000);
1213 		memcpy(&cmd->cdb[0], xs->cmd, xs->cmdlen);
1214 		CISS_DPRINTF(CISS_D_CMD, ("cmd=%02x %02x %02x %02x %02x %02x ",
1215 			     cmd->cdb[0], cmd->cdb[1], cmd->cdb[2],
1216 			     cmd->cdb[3], cmd->cdb[4], cmd->cdb[5]));
1217 
1218 		if (ciss_cmd(sc, ccb, BUS_DMA_WAITOK,
1219 		    xs->xs_control & (XS_CTL_POLL|XS_CTL_NOSLEEP))) {
1220 			printf("ciss driver stuffup in %s:%d: %s()\n",
1221 			       __FILE__, __LINE__, __func__);
1222 			xs->error = XS_DRIVER_STUFFUP;
1223 			scsipi_done(xs);
1224 			return;
1225 		}
1226 
1227 		break;
1228 	case ADAPTER_REQ_GROW_RESOURCES:
1229 		/*
1230 		 * Not supported.
1231 		 */
1232 		break;
1233 	case ADAPTER_REQ_SET_XFER_MODE:
1234 		/*
1235 		 * We can't change the transfer mode, but at least let
1236 		 * scsipi know what the adapter has negociated.
1237 		 */
1238 		xm = (struct scsipi_xfer_mode *)arg;
1239 		xm->xm_mode |= PERIPH_CAP_TQING;
1240 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
1241 		break;
1242 	default:
1243 		printf("%s: %d %d unsupported\n", __func__, __LINE__, req);
1244 	}
1245 }
1246 
1247 static void
1248 ciss_completed_process(struct ciss_softc *sc, ciss_queue_head *q)
1249 {
1250 	struct ciss_ccb *ccb;
1251 
1252 	while (!TAILQ_EMPTY(q)) {
1253 		ccb = TAILQ_FIRST(q);
1254 		TAILQ_REMOVE(q, ccb, ccb_link);
1255 
1256 		if (ccb->ccb_state == CISS_CCB_POLL) {
1257 			ccb->ccb_state = CISS_CCB_ONQ;
1258 			mutex_enter(&sc->sc_mutex);
1259 			cv_broadcast(&sc->sc_condvar);
1260 			mutex_exit(&sc->sc_mutex);
1261 		} else
1262 			ciss_done(sc, ccb);
1263 	}
1264 }
1265 
1266 int
1267 ciss_intr_simple_intx(void *v)
1268 {
1269 	struct ciss_softc *sc = v;
1270 	ciss_queue_head q;
1271 	int hit = 0;
1272 
1273 	CISS_DPRINTF(CISS_D_INTR, ("intr "));
1274 
1275 	/* XXX shouldn't be necessary, intr triggers only if enabled */
1276 	if (!(bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_ISR) & sc->iem))
1277 		return 0;
1278 
1279 	TAILQ_INIT(&q);
1280 	mutex_enter(&sc->sc_mutex);
1281 	ciss_completed_simple(sc, &q);
1282 	mutex_exit(&sc->sc_mutex);
1283 
1284 	hit = (!TAILQ_EMPTY(&q));
1285 	ciss_completed_process(sc, &q);
1286 
1287 	KASSERT(TAILQ_EMPTY(&q));
1288 	CISS_DPRINTF(CISS_D_INTR, ("exit\n"));
1289 
1290 	return hit;
1291 }
1292 
1293 int
1294 ciss_intr_perf_intx(void *v)
1295 {
1296 	struct ciss_softc *sc = v;
1297 
1298 	CISS_DPRINTF(CISS_D_INTR, ("intr "));
1299 
1300 	/* Clear the interrupt and flush the bridges.  Docs say that the flush
1301 	 * needs to be done twice, which doesn't seem right.
1302 	 */
1303 	bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_OSR);
1304 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_ODC, CISS_ODC_CLEAR);
1305 
1306 	return ciss_intr_perf_msi(sc);
1307 }
1308 
1309 int
1310 ciss_intr_perf_msi(void *v)
1311 {
1312 	struct ciss_softc *sc = v;
1313 	ciss_queue_head q;
1314 
1315 	CISS_DPRINTF(CISS_D_INTR, ("intr "));
1316 
1317 	TAILQ_INIT(&q);
1318 	mutex_enter(&sc->sc_mutex);
1319 	ciss_completed_perf(sc, &q);
1320 	mutex_exit(&sc->sc_mutex);
1321 
1322 	ciss_completed_process(sc, &q);
1323 
1324 	KASSERT(TAILQ_EMPTY(&q));
1325 	CISS_DPRINTF(CISS_D_INTR, ("exit"));
1326 
1327 	return 1;
1328 }
1329 
1330 static void
1331 ciss_heartbeat(void *v)
1332 {
1333 	struct ciss_softc *sc = v;
1334 	u_int32_t hb;
1335 
1336 	hb = bus_space_read_4(sc->sc_iot, sc->cfg_ioh,
1337 	    sc->cfgoff + offsetof(struct ciss_config, heartbeat));
1338 	if (hb == sc->heartbeat) {
1339 		sc->fibrillation++;
1340 		CISS_DPRINTF(CISS_D_ERR, ("%s: fibrillation #%d (value=%d)\n",
1341 		    device_xname(sc->sc_dev), sc->fibrillation, hb));
1342 		if (sc->fibrillation >= 11) {
1343 			/* No heartbeat for 33 seconds */
1344 			panic("%s: dead", device_xname(sc->sc_dev));	/* XXX reset! */
1345 		}
1346 	} else {
1347 		sc->heartbeat = hb;
1348 		if (sc->fibrillation) {
1349 			CISS_DPRINTF(CISS_D_ERR, ("%s: "
1350 			    "fibrillation ended (value=%d)\n",
1351 			    device_xname(sc->sc_dev), hb));
1352 		}
1353 		sc->fibrillation = 0;
1354 	}
1355 
1356 	callout_schedule(&sc->sc_hb, hz * 3);
1357 }
1358 
1359 static int
1360 ciss_scsi_ioctl(struct scsipi_channel *chan, u_long cmd,
1361     void *addr, int flag, struct proc *p)
1362 {
1363 #if NBIO > 0
1364 	return ciss_ioctl(chan->chan_adapter->adapt_dev, cmd, addr);
1365 #else
1366 	return ENOTTY;
1367 #endif
1368 }
1369 
1370 #if NBIO > 0
1371 const int ciss_level[] = { 0, 4, 1, 5, 51, 7 };
1372 const int ciss_stat[] = { BIOC_SVONLINE, BIOC_SVOFFLINE, BIOC_SVOFFLINE,
1373     BIOC_SVDEGRADED, BIOC_SVREBUILD, BIOC_SVREBUILD, BIOC_SVDEGRADED,
1374     BIOC_SVDEGRADED, BIOC_SVINVALID, BIOC_SVINVALID, BIOC_SVBUILDING,
1375     BIOC_SVOFFLINE, BIOC_SVBUILDING };
1376 
1377 int
1378 ciss_ioctl(device_t dev, u_long cmd, void *addr)
1379 {
1380 	struct ciss_softc	*sc = device_private(dev);
1381 	struct bioc_inq *bi;
1382 	struct bioc_disk *bd;
1383 	struct bioc_blink *bb;
1384 	struct ciss_ldstat *ldstat;
1385 	struct ciss_pdid *pdid;
1386 	struct ciss_blink *blink;
1387 	struct ciss_ld *ldp;
1388 	u_int8_t drv;
1389 	int ld, pd, error = 0;
1390 
1391 	switch (cmd) {
1392 	case BIOCINQ:
1393 		bi = (struct bioc_inq *)addr;
1394 		strlcpy(bi->bi_dev, device_xname(sc->sc_dev), sizeof(bi->bi_dev));
1395 		bi->bi_novol = sc->maxunits;
1396 		bi->bi_nodisk = sc->sc_lds[0]->ndrives;
1397 		break;
1398 
1399 	case BIOCVOL:
1400 		error = ciss_ioctl_vol(sc, (struct bioc_vol *)addr);
1401 		break;
1402 
1403 	case BIOCDISK_NOVOL:
1404 /*
1405  * XXX since we don't know how to associate physical drives with logical drives
1406  * yet, BIOCDISK_NOVOL is equivalent to BIOCDISK to the volume that we've
1407  * associated all physical drives to.
1408  * Maybe assoicate all physical drives to all logical volumes, but only return
1409  * physical drives on one logical volume.  Which one?  Either 1st volume that
1410  * is degraded, rebuilding, or failed?
1411  */
1412 		bd = (struct bioc_disk *)addr;
1413 		bd->bd_volid = 0;
1414 		bd->bd_disknovol = true;
1415 		/* FALLTHROUGH */
1416 	case BIOCDISK:
1417 		bd = (struct bioc_disk *)addr;
1418 		if (bd->bd_volid < 0 || bd->bd_volid > sc->maxunits) {
1419 			error = EINVAL;
1420 			break;
1421 		}
1422 		ldp = sc->sc_lds[0];
1423 		if (!ldp || (pd = bd->bd_diskid) < 0 || pd > ldp->ndrives) {
1424 			error = EINVAL;
1425 			break;
1426 		}
1427 		ldstat = sc->scratch;
1428 		if ((error = ciss_ldstat(sc, bd->bd_volid, ldstat))) {
1429 			break;
1430 		}
1431 		bd->bd_status = -1;
1432 		if (ldstat->stat == CISS_LD_REBLD &&
1433 		    ldstat->bigrebuild == ldp->tgts[pd])
1434 			bd->bd_status = BIOC_SDREBUILD;
1435 		if (ciss_bitset(ldp->tgts[pd] & (~CISS_BIGBIT),
1436 		    ldstat->bigfailed)) {
1437 			bd->bd_status = BIOC_SDFAILED;
1438 			bd->bd_size = 0;
1439 			bd->bd_channel = (ldp->tgts[pd] & (~CISS_BIGBIT)) /
1440 			    sc->ndrives;
1441 			bd->bd_target = ldp->tgts[pd] % sc->ndrives;
1442 			bd->bd_lun = 0;
1443 			bd->bd_vendor[0] = '\0';
1444 			bd->bd_serial[0] = '\0';
1445 			bd->bd_procdev[0] = '\0';
1446 		} else {
1447 			pdid = sc->scratch;
1448 			if ((error = ciss_pdid(sc, ldp->tgts[pd], pdid,
1449 			    XS_CTL_POLL))) {
1450 				bd->bd_status = BIOC_SDFAILED;
1451 				bd->bd_size = 0;
1452 				bd->bd_channel = (ldp->tgts[pd] & (~CISS_BIGBIT)) /
1453 				    sc->ndrives;
1454 				bd->bd_target = ldp->tgts[pd] % sc->ndrives;
1455 				bd->bd_lun = 0;
1456 				bd->bd_vendor[0] = '\0';
1457 				bd->bd_serial[0] = '\0';
1458 				bd->bd_procdev[0] = '\0';
1459 				error = 0;
1460 				break;
1461 			}
1462 			if (bd->bd_status < 0) {
1463 				if (pdid->config & CISS_PD_SPARE)
1464 					bd->bd_status = BIOC_SDHOTSPARE;
1465 				else if (pdid->present & CISS_PD_PRESENT)
1466 					bd->bd_status = BIOC_SDONLINE;
1467 				else
1468 					bd->bd_status = BIOC_SDINVALID;
1469 			}
1470 			bd->bd_size = (u_int64_t)le32toh(pdid->nblocks) *
1471 			    le16toh(pdid->blksz);
1472 			bd->bd_channel = pdid->bus;
1473 			bd->bd_target = pdid->target;
1474 			bd->bd_lun = 0;
1475 			strlcpy(bd->bd_vendor, pdid->model,
1476 			    sizeof(bd->bd_vendor));
1477 			strlcpy(bd->bd_serial, pdid->serial,
1478 			    sizeof(bd->bd_serial));
1479 			bd->bd_procdev[0] = '\0';
1480 		}
1481 		break;
1482 
1483 	case BIOCBLINK:
1484 		bb = (struct bioc_blink *)addr;
1485 		blink = sc->scratch;
1486 		error = EINVAL;
1487 		/* XXX workaround completely dumb scsi addressing */
1488 		for (ld = 0; ld < sc->maxunits; ld++) {
1489 			ldp = sc->sc_lds[ld];
1490 			if (!ldp)
1491 				continue;
1492 			if (sc->ndrives == 256)
1493 				drv = bb->bb_target;
1494 			else
1495 				drv = CISS_BIGBIT +
1496 				    bb->bb_channel * sc->ndrives +
1497 				    bb->bb_target;
1498 			for (pd = 0; pd < ldp->ndrives; pd++)
1499 				if (ldp->tgts[pd] == drv)
1500 					error = ciss_blink(sc, ld, pd,
1501 					    bb->bb_status, blink);
1502 		}
1503 		break;
1504 
1505 	default:
1506 		error = EINVAL;
1507 	}
1508 
1509 	return (error);
1510 }
1511 
1512 int
1513 ciss_ioctl_vol(struct ciss_softc *sc, struct bioc_vol *bv)
1514 {
1515 	struct ciss_ldid *ldid;
1516 	struct ciss_ld *ldp;
1517 	struct ciss_ldstat *ldstat;
1518 	struct ciss_pdid *pdid;
1519 	int error = 0;
1520 	u_int blks;
1521 
1522 	if (bv->bv_volid < 0 || bv->bv_volid > sc->maxunits) {
1523 		return EINVAL;
1524 	}
1525 	ldp = sc->sc_lds[bv->bv_volid];
1526 	ldid = sc->scratch;
1527 	if ((error = ciss_ldid(sc, bv->bv_volid, ldid))) {
1528 		return error;
1529 	}
1530 	bv->bv_status = BIOC_SVINVALID;
1531 	blks = (u_int)le16toh(ldid->nblocks[1]) << 16 |
1532 	    le16toh(ldid->nblocks[0]);
1533 	bv->bv_size = blks * (u_quad_t)le16toh(ldid->blksize);
1534 	bv->bv_level = ciss_level[ldid->type];
1535 /*
1536  * XXX Should only return bv_nodisk for logigal volume that we've associated
1537  * the physical drives to:  either the 1st degraded, rebuilding, or failed
1538  * volume else volume 0?
1539  */
1540 	if (ldp) {
1541 		bv->bv_nodisk = ldp->ndrives;
1542 		strlcpy(bv->bv_dev, ldp->xname, sizeof(bv->bv_dev));
1543 	}
1544 	strlcpy(bv->bv_vendor, "CISS", sizeof(bv->bv_vendor));
1545 	ldstat = sc->scratch;
1546 	memset(ldstat, 0, sizeof(*ldstat));
1547 	if ((error = ciss_ldstat(sc, bv->bv_volid, ldstat))) {
1548 		return error;
1549 	}
1550 	bv->bv_percent = -1;
1551 	bv->bv_seconds = 0;
1552 	if (ldstat->stat < sizeof(ciss_stat)/sizeof(ciss_stat[0]))
1553 		bv->bv_status = ciss_stat[ldstat->stat];
1554 	if (bv->bv_status == BIOC_SVREBUILD ||
1555 	    bv->bv_status == BIOC_SVBUILDING) {
1556 	 	u_int64_t prog;
1557 
1558 		ldp = sc->sc_lds[0];
1559 		if (ldp) {
1560 			bv->bv_nodisk = ldp->ndrives;
1561 			strlcpy(bv->bv_dev, ldp->xname, sizeof(bv->bv_dev));
1562 		}
1563 /*
1564  * XXX ldstat->prog is blocks remaining on physical drive being rebuilt
1565  * blks is only correct for a RAID1 set;  RAID5 needs to determine the
1566  * size of the physical device - which we don't yet know.
1567  * ldstat->bigrebuild has physical device target, so could be used with
1568  * pdid to get size.   Another way is to save pd information in sc so it's
1569  * easy to reference.
1570  */
1571 		prog = (u_int64_t)((ldstat->prog[3] << 24) |
1572 		    (ldstat->prog[2] << 16) | (ldstat->prog[1] << 8) |
1573 		    ldstat->prog[0]);
1574 		pdid = sc->scratch;
1575 		if (!ciss_pdid(sc, ldstat->bigrebuild, pdid, XS_CTL_POLL)) {
1576 			blks = le32toh(pdid->nblocks);
1577 			bv->bv_percent = (blks - prog) * 1000ULL / blks;
1578 		 }
1579 	}
1580 	return 0;
1581 }
1582 
1583 int
1584 ciss_blink(struct ciss_softc *sc, int ld, int pd, int stat,
1585     struct ciss_blink *blink)
1586 {
1587 	struct ciss_ccb *ccb;
1588 	struct ciss_cmd *cmd;
1589 	struct ciss_ld *ldp;
1590 
1591 	if (ld > sc->maxunits)
1592 		return EINVAL;
1593 
1594 	ldp = sc->sc_lds[ld];
1595 	if (!ldp || pd > ldp->ndrives)
1596 		return EINVAL;
1597 
1598 	ldp->bling.pdtab[ldp->tgts[pd]] = stat == BIOC_SBUNBLINK? 0 :
1599 	    CISS_BLINK_ALL;
1600 	memcpy(blink, &ldp->bling, sizeof(*blink));
1601 
1602 	ccb = ciss_get_ccb(sc);
1603 	if (ccb == NULL)
1604 		return ENOMEM;
1605 	ccb->ccb_len = sizeof(*blink);
1606 	ccb->ccb_data = blink;
1607 	ccb->ccb_xs = NULL;
1608 	cmd = &ccb->ccb_cmd;
1609 	cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
1610 	cmd->tgt2 = 0;
1611 	cmd->cdblen = 10;
1612 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_OUT;
1613 	cmd->tmo = htole16(0);
1614 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1615 	cmd->cdb[0] = CISS_CMD_CTRL_SET;
1616 	cmd->cdb[6] = CISS_CMS_CTRL_PDBLINK;
1617 	cmd->cdb[7] = sizeof(*blink) >> 8;	/* biiiig endian */
1618 	cmd->cdb[8] = sizeof(*blink) & 0xff;
1619 
1620 	return ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, XS_CTL_POLL);
1621 }
1622 
1623 int
1624 ciss_create_sensors(struct ciss_softc *sc)
1625 {
1626 	int			i;
1627 	int nsensors = sc->maxunits;
1628 
1629 	if (nsensors == 0) {
1630 		return 0;
1631 	}
1632 
1633 	sc->sc_sme = sysmon_envsys_create();
1634 	sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
1635 		M_DEVBUF, M_WAITOK | M_ZERO);
1636 
1637 	for (i = 0; i < nsensors; i++) {
1638 		sc->sc_sensor[i].units = ENVSYS_DRIVE;
1639 		sc->sc_sensor[i].state = ENVSYS_SINVALID;
1640 		sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
1641 		/* Enable monitoring for drive state changes */
1642 		sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
1643 		/* logical drives */
1644 		snprintf(sc->sc_sensor[i].desc,
1645 		    sizeof(sc->sc_sensor[i].desc), "%s:%d",
1646 		    device_xname(sc->sc_dev), i);
1647 		if (sysmon_envsys_sensor_attach(sc->sc_sme,
1648 		    &sc->sc_sensor[i]))
1649 			goto out;
1650 	}
1651 
1652 	sc->sc_sme->sme_name = device_xname(sc->sc_dev);
1653 	sc->sc_sme->sme_cookie = sc;
1654 	sc->sc_sme->sme_refresh = ciss_sensor_refresh;
1655 	if (sysmon_envsys_register(sc->sc_sme)) {
1656 		printf("%s: unable to register with sysmon\n",
1657 		    device_xname(sc->sc_dev));
1658 		return(1);
1659 	}
1660 	return (0);
1661 
1662 out:
1663 	free(sc->sc_sensor, M_DEVBUF);
1664 	sysmon_envsys_destroy(sc->sc_sme);
1665 	return EINVAL;
1666 }
1667 
1668 void
1669 ciss_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
1670 {
1671 	struct ciss_softc	*sc = sme->sme_cookie;
1672 	struct bioc_vol		bv;
1673 
1674 	if (edata->sensor >= sc->maxunits)
1675 		return;
1676 
1677 	memset(&bv, 0, sizeof(bv));
1678 	bv.bv_volid = edata->sensor;
1679 	if (ciss_ioctl_vol(sc, &bv))
1680 		bv.bv_status = BIOC_SVINVALID;
1681 
1682 	bio_vol_to_envsys(edata, &bv);
1683 }
1684 #endif /* NBIO > 0 */
1685