xref: /netbsd-src/sys/dev/ic/ciss.c (revision 53c3768c8855585de50bfb3001104ad54e215c2f)
1 /*	$NetBSD: ciss.c,v 1.56 2024/02/19 14:54:04 msaitoh Exp $	*/
2 /*	$OpenBSD: ciss.c,v 1.68 2013/05/30 16:15:02 deraadt Exp $	*/
3 
4 /*
5  * Copyright (c) 2005,2006 Michael Shalayeff
6  * All rights reserved.
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
17  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
18  * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: ciss.c,v 1.56 2024/02/19 14:54:04 msaitoh Exp $");
23 
24 #include "bio.h"
25 
26 /* #define CISS_DEBUG */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/buf.h>
31 #include <sys/ioctl.h>
32 #include <sys/device.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/proc.h>
36 
37 #include <sys/bus.h>
38 
39 #include <dev/scsipi/scsi_all.h>
40 #include <dev/scsipi/scsi_disk.h>
41 #include <dev/scsipi/scsiconf.h>
42 #include <dev/scsipi/scsipi_all.h>
43 
44 #include <dev/ic/cissreg.h>
45 #include <dev/ic/cissvar.h>
46 
47 #if NBIO > 0
48 #include <dev/biovar.h>
49 #endif /* NBIO > 0 */
50 
51 #ifdef CISS_DEBUG
52 #define	CISS_DPRINTF(m,a)	if (ciss_debug & (m)) printf a
53 #define	CISS_D_CMD	0x0001
54 #define	CISS_D_INTR	0x0002
55 #define	CISS_D_MISC	0x0004
56 #define	CISS_D_DMA	0x0008
57 #define	CISS_D_IOCTL	0x0010
58 #define	CISS_D_ERR	0x0020
59 int ciss_debug = 0
60 	| CISS_D_CMD
61 	| CISS_D_INTR
62 	| CISS_D_MISC
63 	| CISS_D_DMA
64 	| CISS_D_IOCTL
65 	| CISS_D_ERR
66 	;
67 #else
68 #define	CISS_DPRINTF(m,a)	/* m, a */
69 #endif
70 
71 static void	ciss_scsi_cmd(struct scsipi_channel *chan,
72 			scsipi_adapter_req_t req, void *arg);
73 static int	ciss_scsi_ioctl(struct scsipi_channel *chan, u_long cmd,
74 	    void *addr, int flag, struct proc *p);
75 static void	cissminphys(struct buf *bp);
76 
77 static int	ciss_sync(struct ciss_softc *sc);
78 static void	ciss_heartbeat(void *v);
79 static void	ciss_shutdown(void *v);
80 
81 static struct ciss_ccb *ciss_get_ccb(struct ciss_softc *);
82 static void	ciss_put_ccb(struct ciss_softc *, struct ciss_ccb *);
83 static int	ciss_cmd(struct ciss_softc *, struct ciss_ccb *, int, int);
84 static int	ciss_done(struct ciss_softc *, struct ciss_ccb *);
85 static int	ciss_error(struct ciss_softc *, struct ciss_ccb *);
86 struct ciss_ld *ciss_pdscan(struct ciss_softc *sc, int ld);
87 static int	ciss_inq(struct ciss_softc *sc, struct ciss_inquiry *inq);
88 int	ciss_ldid(struct ciss_softc *, int, struct ciss_ldid *);
89 int	ciss_ldstat(struct ciss_softc *, int, struct ciss_ldstat *);
90 static int	ciss_ldmap(struct ciss_softc *sc);
91 int	ciss_pdid(struct ciss_softc *, u_int8_t, struct ciss_pdid *, int);
92 
93 #if NBIO > 0
94 int		ciss_ioctl(device_t, u_long, void *);
95 int		ciss_ioctl_vol(struct ciss_softc *, struct bioc_vol *);
96 int		ciss_blink(struct ciss_softc *, int, int, int, struct ciss_blink *);
97 int		ciss_create_sensors(struct ciss_softc *);
98 void		ciss_sensor_refresh(struct sysmon_envsys *, envsys_data_t *);
99 #endif /* NBIO > 0 */
100 
101 static struct ciss_ccb *
ciss_get_ccb(struct ciss_softc * sc)102 ciss_get_ccb(struct ciss_softc *sc)
103 {
104 	struct ciss_ccb *ccb;
105 
106 	mutex_enter(&sc->sc_mutex);
107 	if ((ccb = TAILQ_LAST(&sc->sc_free_ccb, ciss_queue_head))) {
108 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link);
109 		ccb->ccb_state = CISS_CCB_READY;
110 	}
111 	mutex_exit(&sc->sc_mutex);
112 	return ccb;
113 }
114 
115 static void
ciss_put_ccb(struct ciss_softc * sc,struct ciss_ccb * ccb)116 ciss_put_ccb(struct ciss_softc *sc, struct ciss_ccb *ccb)
117 {
118 	ccb->ccb_state = CISS_CCB_FREE;
119 	mutex_enter(&sc->sc_mutex);
120 	TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
121 	mutex_exit(&sc->sc_mutex);
122 }
123 
124 static int
ciss_init_perf(struct ciss_softc * sc)125 ciss_init_perf(struct ciss_softc *sc)
126 {
127 	struct ciss_perf_config *pc = &sc->perfcfg;
128 	int error, total, rseg;
129 
130 	if (sc->cfg.max_perfomant_mode_cmds)
131 		sc->maxcmd = sc->cfg.max_perfomant_mode_cmds;
132 
133 	bus_space_read_region_4(sc->sc_iot, sc->cfg_ioh,
134 	    sc->cfgoff + sc->cfg.troff,
135 	    (u_int32_t *)pc, sizeof(*pc) / 4);
136 
137 	total = sizeof(uint64_t) * sc->maxcmd;
138 
139 	if ((error = bus_dmamem_alloc(sc->sc_dmat, total, PAGE_SIZE, 0,
140 	    sc->replyseg, 1, &rseg, BUS_DMA_WAITOK))) {
141 		aprint_error(": cannot allocate perf area (%d)\n", error);
142 		return -1;
143 	}
144 
145 	if ((error = bus_dmamem_map(sc->sc_dmat, sc->replyseg, rseg, total,
146 	    (void **)&sc->perf_reply, BUS_DMA_WAITOK))) {
147 		aprint_error(": cannot map perf area (%d)\n", error);
148 		bus_dmamem_free(sc->sc_dmat, sc->replyseg, 1);
149 		return -1;
150 	}
151 
152 	if ((error = bus_dmamap_create(sc->sc_dmat, total, 1,
153 	    total, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &sc->replymap))) {
154 		aprint_error(": cannot create perf dmamap (%d)\n", error);
155 		bus_dmamem_unmap(sc->sc_dmat, sc->perf_reply, total);
156 		sc->perf_reply = NULL;
157 		bus_dmamem_free(sc->sc_dmat, sc->replyseg, 1);
158 		return -1;
159 	}
160 
161 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->replymap, sc->perf_reply,
162 	    total, NULL, BUS_DMA_WAITOK))) {
163 		aprint_error(": cannot load perf dmamap (%d)\n", error);
164 		bus_dmamap_destroy(sc->sc_dmat, sc->replymap);
165 		bus_dmamem_unmap(sc->sc_dmat, sc->perf_reply, total);
166 		sc->perf_reply = NULL;
167 		bus_dmamem_free(sc->sc_dmat, sc->replyseg, 1);
168 		return -1;
169 	}
170 
171 	memset(sc->perf_reply, 0, total);
172 
173 	sc->perf_cycle = 0x1;
174 	sc->perf_rqidx = 0;
175 
176 	/*
177 	* Preload the fetch table with common command sizes.  This allows the
178 	* hardware to not waste bus cycles for typical i/o commands, but also
179 	* not tax the driver to be too exact in choosing sizes.  The table
180 	* is optimized for page-aligned i/o's, but since most i/o comes
181 	* from the various pagers, it's a reasonable assumption to make.
182 	*/
183 #define CISS_FETCH_COUNT(x)	\
184     (sizeof(struct ciss_cmd) + sizeof(struct ciss_sg_entry) * (x - 1) + 15) / 16
185 
186 	pc->fetch_count[CISS_SG_FETCH_NONE] = CISS_FETCH_COUNT(0);
187 	pc->fetch_count[CISS_SG_FETCH_1] = CISS_FETCH_COUNT(1);
188 	pc->fetch_count[CISS_SG_FETCH_2] = CISS_FETCH_COUNT(2);
189 	pc->fetch_count[CISS_SG_FETCH_4] = CISS_FETCH_COUNT(4);
190 	pc->fetch_count[CISS_SG_FETCH_8] = CISS_FETCH_COUNT(8);
191 	pc->fetch_count[CISS_SG_FETCH_16] = CISS_FETCH_COUNT(16);
192 	pc->fetch_count[CISS_SG_FETCH_32] = CISS_FETCH_COUNT(32);
193 	pc->fetch_count[CISS_SG_FETCH_MAX] = (sc->ccblen + 15) / 16;
194 
195 	pc->rq_size = sc->maxcmd;
196 	pc->rq_count = 1;	/* Hardcode for a single queue */
197 	pc->rq_bank_hi = 0;
198 	pc->rq_bank_lo = 0;
199 	pc->rq[0].rq_addr_hi = 0x0;
200 	pc->rq[0].rq_addr_lo = sc->replymap->dm_segs[0].ds_addr;
201 
202 	/*
203 	 * Write back the changed configuration. Tt will be picked up
204 	 * by controller together with general configuration later on.
205 	 */
206 	bus_space_write_region_4(sc->sc_iot, sc->cfg_ioh,
207 	    sc->cfgoff + sc->cfg.troff,
208 	    (u_int32_t *)pc, sizeof(*pc) / 4);
209 	bus_space_barrier(sc->sc_iot, sc->cfg_ioh,
210 	    sc->cfgoff + sc->cfg.troff, sizeof(*pc),
211 	    BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
212 
213 	return 0;
214 }
215 
216 int
ciss_attach(struct ciss_softc * sc)217 ciss_attach(struct ciss_softc *sc)
218 {
219 	struct ciss_ccb *ccb;
220 	struct ciss_cmd *cmd;
221 	struct ciss_inquiry *inq;
222 	bus_dma_segment_t seg[1];
223 	int error, i, total, rseg, maxfer;
224 	paddr_t pa;
225 
226 	if (sc->cfg.signature != CISS_SIGNATURE) {
227 		aprint_error(": bad sign 0x%08x\n", sc->cfg.signature);
228 		return -1;
229 	}
230 
231 	if (!(sc->cfg.methods & (CISS_METH_SIMPL|CISS_METH_PERF))) {
232 		aprint_error(": no supported method 0x%08x\n", sc->cfg.methods);
233 		return -1;
234 	}
235 
236 	if (!sc->cfg.maxsg)
237 		sc->cfg.maxsg = MAXPHYS / PAGE_SIZE + 1;
238 
239 	sc->maxcmd = sc->cfg.maxcmd;
240 	sc->maxsg = sc->cfg.maxsg;
241 	if (sc->maxsg > MAXPHYS / PAGE_SIZE + 1)
242 		sc->maxsg = MAXPHYS / PAGE_SIZE + 1;
243 	i = sizeof(struct ciss_ccb) +
244 	    sizeof(ccb->ccb_cmd.sgl[0]) * (sc->maxsg - 1);
245 	for (sc->ccblen = 0x10; sc->ccblen < i; sc->ccblen <<= 1);
246 
247 	sc->cfg.paddr_lim = 0;			/* 32bit addrs */
248 	sc->cfg.int_delay = 0;			/* disable coalescing */
249 	sc->cfg.int_count = 0;
250 	strlcpy(sc->cfg.hostname, "HUMPPA", sizeof(sc->cfg.hostname));
251 	sc->cfg.driverf |= CISS_DRV_PRF;	/* enable prefetch */
252 	if (CISS_PERF_SUPPORTED(sc)) {
253 		sc->cfg.rmethod = CISS_METH_PERF | CISS_METH_SHORT_TAG;
254 		if (ciss_init_perf(sc) != 0) {
255 			/* Don't try to fallback, just bail out */
256 			return -1;
257 		}
258 	} else {
259 		sc->cfg.rmethod = CISS_METH_SIMPL;
260 	}
261 
262 	bus_space_write_region_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff,
263 	    (u_int32_t *)&sc->cfg, sizeof(sc->cfg) / 4);
264 	bus_space_barrier(sc->sc_iot, sc->cfg_ioh, sc->cfgoff, sizeof(sc->cfg),
265 	    BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
266 
267 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_IDB, CISS_IDB_CFG);
268 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, CISS_IDB, 4,
269 	    BUS_SPACE_BARRIER_WRITE);
270 	for (i = 1000; i--; DELAY(1000)) {
271 		/* XXX maybe IDB is really 64bit? - hp dl380 needs this */
272 		(void)bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IDB + 4);
273 		if (!(bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IDB) & CISS_IDB_CFG))
274 			break;
275 		bus_space_barrier(sc->sc_iot, sc->sc_ioh, CISS_IDB, 4,
276 		    BUS_SPACE_BARRIER_READ);
277 	}
278 
279 	if (bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IDB) & CISS_IDB_CFG) {
280 		aprint_error(": cannot set config\n");
281 		return -1;
282 	}
283 
284 	bus_space_read_region_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff,
285 	    (u_int32_t *)&sc->cfg, sizeof(sc->cfg) / 4);
286 
287 	if (!(sc->cfg.amethod & (CISS_METH_SIMPL|CISS_METH_PERF))) {
288 		aprint_error(": cannot set method 0x%08x\n", sc->cfg.amethod);
289 		return -1;
290 	}
291 
292 	/* i'm ready for you and i hope you're ready for me */
293 	for (i = 30000; i--; DELAY(1000)) {
294 		if (bus_space_read_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff +
295 		    offsetof(struct ciss_config, amethod)) & CISS_METH_READY)
296 			break;
297 		bus_space_barrier(sc->sc_iot, sc->cfg_ioh, sc->cfgoff +
298 		    offsetof(struct ciss_config, amethod), 4,
299 		    BUS_SPACE_BARRIER_READ);
300 	}
301 
302 	if (!(bus_space_read_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff +
303 	    offsetof(struct ciss_config, amethod)) & CISS_METH_READY)) {
304 		aprint_error(": she never came ready for me 0x%08x\n",
305 		    sc->cfg.amethod);
306 		return -1;
307 	}
308 
309 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
310 	mutex_init(&sc->sc_mutex_scratch, MUTEX_DEFAULT, IPL_VM);
311 	cv_init(&sc->sc_condvar, "ciss_cmd");
312 
313 	total = sc->ccblen * sc->maxcmd;
314 	if ((error = bus_dmamem_alloc(sc->sc_dmat, total, PAGE_SIZE, 0,
315 	    sc->cmdseg, 1, &rseg, BUS_DMA_NOWAIT))) {
316 		aprint_error(": cannot allocate CCBs (%d)\n", error);
317 		return -1;
318 	}
319 
320 	if ((error = bus_dmamem_map(sc->sc_dmat, sc->cmdseg, rseg, total,
321 	    (void **)&sc->ccbs, BUS_DMA_NOWAIT))) {
322 		aprint_error(": cannot map CCBs (%d)\n", error);
323 		return -1;
324 	}
325 	memset(sc->ccbs, 0, total);
326 
327 	if ((error = bus_dmamap_create(sc->sc_dmat, total, 1,
328 	    total, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->cmdmap))) {
329 		aprint_error(": cannot create CCBs dmamap (%d)\n", error);
330 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
331 		return -1;
332 	}
333 
334 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->cmdmap, sc->ccbs, total,
335 	    NULL, BUS_DMA_NOWAIT))) {
336 		aprint_error(": cannot load CCBs dmamap (%d)\n", error);
337 		bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
338 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
339 		return -1;
340 	}
341 
342 	TAILQ_INIT(&sc->sc_free_ccb);
343 
344 	maxfer = sc->maxsg * PAGE_SIZE;
345 	for (i = 0; total > 0 && i < sc->maxcmd; i++, total -= sc->ccblen) {
346 		ccb = (struct ciss_ccb *) ((char *)sc->ccbs + i * sc->ccblen);
347 		cmd = &ccb->ccb_cmd;
348 		pa = sc->cmdseg[0].ds_addr + i * sc->ccblen;
349 
350 		ccb->ccb_cmdpa = pa + offsetof(struct ciss_ccb, ccb_cmd);
351 		ccb->ccb_state = CISS_CCB_FREE;
352 
353 		cmd->id = htole32(i << 2);
354 		cmd->id_hi = htole32(0);
355 		cmd->sgin = sc->maxsg;
356 		cmd->sglen = htole16((u_int16_t)cmd->sgin);
357 		cmd->err_len = htole32(sizeof(ccb->ccb_err));
358 		pa += offsetof(struct ciss_ccb, ccb_err);
359 		cmd->err_pa = htole64((u_int64_t)pa);
360 
361 		if ((error = bus_dmamap_create(sc->sc_dmat, maxfer, sc->maxsg,
362 		    maxfer, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
363 		    &ccb->ccb_dmamap)))
364 			break;
365 
366 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
367 	}
368 
369 	if (i < sc->maxcmd) {
370 		aprint_error(": cannot create ccb#%d dmamap (%d)\n", i, error);
371 		if (i == 0) {
372 			/* TODO leaking cmd's dmamaps and shitz */
373 			bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
374 			bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
375 			return -1;
376 		}
377 	}
378 
379 	if ((error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
380 	    seg, 1, &rseg, BUS_DMA_NOWAIT))) {
381 		aprint_error(": cannot allocate scratch buffer (%d)\n", error);
382 		return -1;
383 	}
384 
385 	if ((error = bus_dmamem_map(sc->sc_dmat, seg, rseg, PAGE_SIZE,
386 	    (void **)&sc->scratch, BUS_DMA_NOWAIT))) {
387 		aprint_error(": cannot map scratch buffer (%d)\n", error);
388 		return -1;
389 	}
390 	memset(sc->scratch, 0, PAGE_SIZE);
391 	sc->sc_waitflag = XS_CTL_NOSLEEP;		/* can't sleep yet */
392 
393 	mutex_enter(&sc->sc_mutex_scratch);	/* is this really needed? */
394 	inq = sc->scratch;
395 	if (ciss_inq(sc, inq)) {
396 		aprint_error(": adapter inquiry failed\n");
397 		mutex_exit(&sc->sc_mutex_scratch);
398 		bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
399 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
400 		return -1;
401 	}
402 
403 	if (!(inq->flags & CISS_INQ_BIGMAP)) {
404 		aprint_error(": big map is not supported, flags=0x%x\n",
405 		    inq->flags);
406 		mutex_exit(&sc->sc_mutex_scratch);
407 		bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
408 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
409 		return -1;
410 	}
411 
412 	sc->maxunits = inq->numld;
413 	sc->nbus = inq->nscsi_bus;
414 	sc->ndrives = inq->buswidth ? inq->buswidth : 256;
415 	aprint_normal(": %d LD%s, HW rev %d, FW %4.4s/%4.4s",
416 	    inq->numld, inq->numld == 1? "" : "s",
417 	    inq->hw_rev, inq->fw_running, inq->fw_stored);
418 
419 	if (sc->cfg.methods & CISS_METH_FIFO64)
420 		aprint_normal(", 64bit fifo");
421 	else if (sc->cfg.methods & CISS_METH_FIFO64_RRO)
422 		aprint_normal(", 64bit fifo rro");
423 	aprint_normal(", method %s %#x",
424 	    CISS_IS_PERF(sc) ? "perf" : "simple",
425 	    sc->cfg.amethod);
426 	aprint_normal("\n");
427 
428 	mutex_exit(&sc->sc_mutex_scratch);
429 
430 	if (sc->maxunits == 0) {
431 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
432 		bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
433 		aprint_error_dev(sc->sc_dev,
434 		    "No any LD. This driver can't attach.\n");
435 		return -1;
436 	}
437 
438 	callout_init(&sc->sc_hb, 0);
439 	callout_setfunc(&sc->sc_hb, ciss_heartbeat, sc);
440 	callout_schedule(&sc->sc_hb, hz * 3);
441 
442 	/* map LDs */
443 	if (ciss_ldmap(sc)) {
444 		aprint_error_dev(sc->sc_dev, "adapter LD map failed\n");
445 		bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
446 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
447 		return -1;
448 	}
449 
450 	sc->sc_lds = malloc(sc->maxunits * sizeof(*sc->sc_lds),
451 	    M_DEVBUF, M_WAITOK | M_ZERO);
452 
453 	sc->sc_flush = CISS_FLUSH_ENABLE;
454 	if (!(sc->sc_sh = shutdownhook_establish(ciss_shutdown, sc))) {
455 		aprint_error_dev(sc->sc_dev,
456 		    "unable to establish shutdown hook\n");
457 		bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap);
458 		bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1);
459 		return -1;
460 	}
461 
462 	sc->sc_channel.chan_adapter = &sc->sc_adapter;
463 	sc->sc_channel.chan_bustype = &scsi_bustype;
464 	sc->sc_channel.chan_channel = 0;
465 	sc->sc_channel.chan_ntargets = sc->maxunits;
466 	sc->sc_channel.chan_nluns = 1;	/* ciss doesn't really have SCSI luns */
467 	sc->sc_channel.chan_openings = sc->maxcmd;
468 #if NBIO > 0
469 	/* XXX Reserve some ccb's for sensor and bioctl. */
470 	if (sc->sc_channel.chan_openings > 2)
471 		sc->sc_channel.chan_openings -= 2;
472 #endif
473 	sc->sc_channel.chan_flags = 0;
474 	sc->sc_channel.chan_id = sc->maxunits;
475 
476 	sc->sc_adapter.adapt_dev = sc->sc_dev;
477 	sc->sc_adapter.adapt_openings = sc->sc_channel.chan_openings;
478 	sc->sc_adapter.adapt_max_periph = uimin(sc->sc_adapter.adapt_openings, 256);
479 	sc->sc_adapter.adapt_request = ciss_scsi_cmd;
480 	sc->sc_adapter.adapt_minphys = cissminphys;
481 	sc->sc_adapter.adapt_ioctl = ciss_scsi_ioctl;
482 	sc->sc_adapter.adapt_nchannels = 1;
483 	config_found(sc->sc_dev, &sc->sc_channel, scsiprint, CFARGS_NONE);
484 
485 #if 0
486 	sc->sc_link_raw.adapter_softc = sc;
487 	sc->sc_link.openings = sc->sc_channel.chan_openings;
488 	sc->sc_link_raw.adapter = &ciss_raw_switch;
489 	sc->sc_link_raw.adapter_target = sc->ndrives;
490 	sc->sc_link_raw.adapter_buswidth = sc->ndrives;
491 	config_found(sc->sc_dev, &sc->sc_channel, scsiprint, CFARGS_NONE);
492 #endif
493 
494 #if NBIO > 0
495 	/* now map all the physdevs into their lds */
496 	/* XXX currently we assign all of them into ld0 */
497 	for (i = 0; i < sc->maxunits && i < 1; i++)
498 		if (!(sc->sc_lds[i] = ciss_pdscan(sc, i))) {
499 			sc->sc_waitflag = 0;	/* we can sleep now */
500 			return 0;
501 		}
502 
503 	if (bio_register(sc->sc_dev, ciss_ioctl) != 0)
504 		aprint_error_dev(sc->sc_dev, "controller registration failed");
505 	else
506 		sc->sc_ioctl = ciss_ioctl;
507 	if (ciss_create_sensors(sc) != 0)
508 		aprint_error_dev(sc->sc_dev, "unable to create sensors");
509 #endif
510 	sc->sc_waitflag = 0;			/* we can sleep now */
511 
512 	return 0;
513 }
514 
515 static void
ciss_shutdown(void * v)516 ciss_shutdown(void *v)
517 {
518 	struct ciss_softc *sc = v;
519 
520 	sc->sc_flush = CISS_FLUSH_DISABLE;
521 	/* timeout_del(&sc->sc_hb); */
522 	ciss_sync(sc);
523 }
524 
525 static void
cissminphys(struct buf * bp)526 cissminphys(struct buf *bp)
527 {
528 #if 0	/* TODO */
529 #define	CISS_MAXFER	(PAGE_SIZE * (sc->maxsg + 1))
530 	if (bp->b_bcount > CISS_MAXFER)
531 		bp->b_bcount = CISS_MAXFER;
532 #endif
533 	minphys(bp);
534 }
535 
536 static void
ciss_enqueue(struct ciss_softc * sc,ciss_queue_head * q,uint32_t id)537 ciss_enqueue(struct ciss_softc *sc, ciss_queue_head *q, uint32_t id)
538 {
539 	struct ciss_ccb *ccb;
540 
541 	KASSERT(mutex_owned(&sc->sc_mutex));
542 
543 	KASSERT((id >> 2) <= sc->maxcmd);
544 	ccb = (struct ciss_ccb *) ((char *)sc->ccbs + (id >> 2) * sc->ccblen);
545 	ccb->ccb_cmd.id = htole32(id);
546 	ccb->ccb_cmd.id_hi = htole32(0);
547 	TAILQ_INSERT_TAIL(q, ccb, ccb_link);
548 }
549 
550 static void
ciss_completed_simple(struct ciss_softc * sc,ciss_queue_head * q)551 ciss_completed_simple(struct ciss_softc *sc, ciss_queue_head *q)
552 {
553 	uint32_t id;
554 
555 	KASSERT(mutex_owned(&sc->sc_mutex));
556 
557 	for (;;) {
558 		if (sc->cfg.methods & CISS_METH_FIFO64) {
559 			if (bus_space_read_4(sc->sc_iot, sc->sc_ioh,
560 			    CISS_OUTQ64_HI) == 0xffffffff) {
561 				CISS_DPRINTF(CISS_D_CMD, ("Q"));
562 				break;
563 			}
564 			id = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
565 			    CISS_OUTQ64_LO);
566 		} else if (sc->cfg.methods & CISS_METH_FIFO64_RRO) {
567 			id = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
568 			    CISS_OUTQ64_LO);
569 			if (id == 0xffffffff) {
570 				CISS_DPRINTF(CISS_D_CMD, ("Q"));
571 				break;
572 			}
573 			(void)bus_space_read_4(sc->sc_iot, sc->sc_ioh,
574 			    CISS_OUTQ64_HI);
575 		} else {
576 			id = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
577 			    CISS_OUTQ);
578 			if (id == 0xffffffff) {
579 				CISS_DPRINTF(CISS_D_CMD, ("Q"));
580 				break;
581 			}
582 		}
583 
584 		CISS_DPRINTF(CISS_D_CMD, ("got=0x%x ", id));
585 		ciss_enqueue(sc, q, id);
586 	}
587 }
588 
589 static void
ciss_completed_perf(struct ciss_softc * sc,ciss_queue_head * q)590 ciss_completed_perf(struct ciss_softc *sc, ciss_queue_head *q)
591 {
592 	uint32_t id;
593 
594 	KASSERT(mutex_owned(&sc->sc_mutex));
595 
596 	for (;;) {
597 		id = sc->perf_reply[sc->perf_rqidx];
598 		if ((id & CISS_CYCLE_MASK) != sc->perf_cycle)
599 			break;
600 
601 		if (++sc->perf_rqidx == sc->maxcmd) {
602 			sc->perf_rqidx = 0;
603 			sc->perf_cycle ^= 1;
604 		}
605 
606 		CISS_DPRINTF(CISS_D_CMD, ("got=0x%x ", id));
607 		ciss_enqueue(sc, q, id);
608 	}
609 }
610 
611 static int
ciss_poll(struct ciss_softc * sc,struct ciss_ccb * ccb,int ms)612 ciss_poll(struct ciss_softc *sc, struct ciss_ccb *ccb, int ms)
613 {
614 	ciss_queue_head q;
615 	struct ciss_ccb *ccb1;
616 
617 	TAILQ_INIT(&q);
618 	ms /= 10;
619 
620 	while (ms-- > 0) {
621 		DELAY(10);
622 		mutex_enter(&sc->sc_mutex);
623 		if (CISS_IS_PERF(sc))
624 			ciss_completed_perf(sc, &q);
625 		else
626 			ciss_completed_simple(sc, &q);
627 		mutex_exit(&sc->sc_mutex);
628 
629 		while (!TAILQ_EMPTY(&q)) {
630 			ccb1 = TAILQ_FIRST(&q);
631 			TAILQ_REMOVE(&q, ccb1, ccb_link);
632 
633 			KASSERT(ccb1->ccb_state == CISS_CCB_ONQ);
634 			ciss_done(sc, ccb1);
635 			if (ccb1 == ccb) {
636 				KASSERT(TAILQ_EMPTY(&q));
637 				return 0;
638 			}
639 		}
640 	}
641 
642 	return ETIMEDOUT;
643 }
644 
645 static int
ciss_wait(struct ciss_softc * sc,struct ciss_ccb * ccb,int ms)646 ciss_wait(struct ciss_softc *sc, struct ciss_ccb *ccb, int ms)
647 {
648 	int tohz, etick;
649 
650 	tohz = mstohz(ms);
651 	if (tohz == 0)
652 		tohz = 1;
653 	etick = getticks() + tohz;
654 
655 	for (;;) {
656 		CISS_DPRINTF(CISS_D_CMD, ("cv_timedwait(%d) ", tohz));
657 		mutex_enter(&sc->sc_mutex);
658 		if (cv_timedwait(&sc->sc_condvar, &sc->sc_mutex, tohz)
659 		    == EWOULDBLOCK) {
660 			mutex_exit(&sc->sc_mutex);
661 			return EWOULDBLOCK;
662 		}
663 		mutex_exit(&sc->sc_mutex);
664 		if (ccb->ccb_state == CISS_CCB_ONQ) {
665 			ciss_done(sc, ccb);
666 			return 0;
667 		}
668 		tohz = etick - getticks();
669 		if (tohz <= 0)
670 			return EWOULDBLOCK;
671 		CISS_DPRINTF(CISS_D_CMD, ("T"));
672 	}
673 }
674 
675 /*
676  * submit a command and optionally wait for completion.
677  * wait arg abuses XS_CTL_POLL|XS_CTL_NOSLEEP flags to request
678  * to wait (XS_CTL_POLL) and to allow tsleep() (!XS_CTL_NOSLEEP)
679  * instead of busy loop waiting
680  */
681 static int
ciss_cmd(struct ciss_softc * sc,struct ciss_ccb * ccb,int flags,int wait)682 ciss_cmd(struct ciss_softc *sc, struct ciss_ccb *ccb, int flags, int wait)
683 {
684 	struct ciss_cmd *cmd = &ccb->ccb_cmd;
685 	bus_dmamap_t dmap = ccb->ccb_dmamap;
686 	u_int64_t addr;
687 	int i, error = 0;
688 	const bool pollsleep = ((wait & (XS_CTL_POLL|XS_CTL_NOSLEEP)) ==
689 	    XS_CTL_POLL);
690 
691 	if (ccb->ccb_state != CISS_CCB_READY) {
692 		printf("%s: ccb %d not ready state=0x%x\n", device_xname(sc->sc_dev),
693 		    cmd->id, ccb->ccb_state);
694 		return (EINVAL);
695 	}
696 
697 	if (ccb->ccb_data) {
698 		bus_dma_segment_t *sgd;
699 
700 		if ((error = bus_dmamap_load(sc->sc_dmat, dmap, ccb->ccb_data,
701 		    ccb->ccb_len, NULL, flags))) {
702 			if (error == EFBIG)
703 				printf("more than %d dma segs\n", sc->maxsg);
704 			else
705 				printf("error %d loading dma map\n", error);
706 			ciss_put_ccb(sc, ccb);
707 			return (error);
708 		}
709 		cmd->sgin = dmap->dm_nsegs;
710 
711 		sgd = dmap->dm_segs;
712 		CISS_DPRINTF(CISS_D_DMA, ("data=%p/%zu<%#" PRIxPADDR "/%zu",
713 		    ccb->ccb_data, ccb->ccb_len, sgd->ds_addr, sgd->ds_len));
714 
715 		for (i = 0; i < dmap->dm_nsegs; sgd++, i++) {
716 			cmd->sgl[i].addr_lo = htole32(sgd->ds_addr);
717 			cmd->sgl[i].addr_hi =
718 			    htole32((u_int64_t)sgd->ds_addr >> 32);
719 			cmd->sgl[i].len = htole32(sgd->ds_len);
720 			cmd->sgl[i].flags = htole32(0);
721 			if (i) {
722 				CISS_DPRINTF(CISS_D_DMA,
723 				    (",%#" PRIxPADDR "/%zu", sgd->ds_addr,
724 				    sgd->ds_len));
725 			}
726 		}
727 
728 		CISS_DPRINTF(CISS_D_DMA, ("> "));
729 
730 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
731 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
732 
733 		if (dmap->dm_nsegs == 0)
734 			ccb->ccb_sg_tag = CISS_SG_FETCH_NONE;
735 		else if (dmap->dm_nsegs == 1)
736 			ccb->ccb_sg_tag = CISS_SG_FETCH_1;
737 		else if (dmap->dm_nsegs == 2)
738 			ccb->ccb_sg_tag = CISS_SG_FETCH_2;
739 		else if (dmap->dm_nsegs <= 4)
740 			ccb->ccb_sg_tag = CISS_SG_FETCH_4;
741 		else if (dmap->dm_nsegs <= 8)
742 			ccb->ccb_sg_tag = CISS_SG_FETCH_8;
743 		else if (dmap->dm_nsegs <= 16)
744 			ccb->ccb_sg_tag = CISS_SG_FETCH_16;
745 		else if (dmap->dm_nsegs <= 32)
746 			ccb->ccb_sg_tag = CISS_SG_FETCH_32;
747 		else
748 			ccb->ccb_sg_tag = CISS_SG_FETCH_MAX;
749 	} else {
750 		ccb->ccb_sg_tag = CISS_SG_FETCH_NONE;
751 		cmd->sgin = 0;
752 	}
753 	cmd->sglen = htole16((u_int16_t)cmd->sgin);
754 	memset(&ccb->ccb_err, 0, sizeof(ccb->ccb_err));
755 
756 	bus_dmamap_sync(sc->sc_dmat, sc->cmdmap, 0, sc->cmdmap->dm_mapsize,
757 	    BUS_DMASYNC_PREWRITE);
758 
759 	if ((wait & (XS_CTL_POLL|XS_CTL_NOSLEEP)) == (XS_CTL_POLL|XS_CTL_NOSLEEP))
760 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_IMR,
761 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IMR) | sc->iem);
762 
763 	if (!pollsleep)
764 		ccb->ccb_state = CISS_CCB_ONQ;
765 	else
766 		ccb->ccb_state = CISS_CCB_POLL;
767 	CISS_DPRINTF(CISS_D_CMD, ("submit=0x%x ", cmd->id));
768 
769 	addr = (u_int64_t)ccb->ccb_cmdpa;
770 	if (CISS_IS_PERF(sc)) {
771 		KASSERT((addr & 0xf) == 0);
772 		/*
773 		 * The bits in addr in performant mean:
774 		 * - performant mode bit (bit 0)
775 		 * - pull count (bits 1-3)
776 		 * There is no support for ioaccel mode
777 		 */
778 		addr |= 1 | (ccb->ccb_sg_tag << 1);
779 	}
780 	if (sc->cfg.methods & (CISS_METH_FIFO64|CISS_METH_FIFO64_RRO)) {
781 		/*
782 		 * Write the upper 32bits immediately before the lower
783 		 * 32bits and set bit 63 to indicate 64bit FIFO mode.
784 		 */
785 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_INQ64_HI,
786 		    (addr >> 32) | 0x80000000);
787 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_INQ64_LO,
788 		    addr & 0x00000000ffffffffULL);
789 	} else
790 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_INQ,
791 		    (uint32_t)addr);
792 
793 	if (wait & XS_CTL_POLL) {
794 		int ms;
795 		CISS_DPRINTF(CISS_D_CMD, ("waiting "));
796 
797 		ms = ccb->ccb_xs ? ccb->ccb_xs->timeout : 60000;
798 		if (pollsleep)
799 			error = ciss_wait(sc, ccb, ms);
800 		else
801 			error = ciss_poll(sc, ccb, ms);
802 
803 		/* if never got a chance to be done above... */
804 		if (ccb->ccb_state != CISS_CCB_FREE) {
805 			KASSERT(error);
806 			ccb->ccb_err.cmd_stat = CISS_ERR_TMO;
807 			error = ciss_done(sc, ccb);
808 		}
809 
810 		CISS_DPRINTF(CISS_D_CMD, ("done %d:%d",
811 		    ccb->ccb_err.cmd_stat, ccb->ccb_err.scsi_stat));
812 	}
813 
814 	if ((wait & (XS_CTL_POLL|XS_CTL_NOSLEEP)) == (XS_CTL_POLL|XS_CTL_NOSLEEP))
815 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_IMR,
816 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IMR) & ~sc->iem);
817 
818 	return (error);
819 }
820 
821 static int
ciss_done(struct ciss_softc * sc,struct ciss_ccb * ccb)822 ciss_done(struct ciss_softc *sc, struct ciss_ccb *ccb)
823 {
824 	struct scsipi_xfer *xs = ccb->ccb_xs;
825 	struct ciss_cmd *cmd;
826 	int error = 0;
827 
828 	CISS_DPRINTF(CISS_D_CMD, ("ciss_done(%p) ", ccb));
829 
830 	if (ccb->ccb_state != CISS_CCB_ONQ) {
831 		printf("%s: unqueued ccb %p ready, state=0x%x\n",
832 		    device_xname(sc->sc_dev), ccb, ccb->ccb_state);
833 		return 1;
834 	}
835 
836 	ccb->ccb_state = CISS_CCB_READY;
837 
838 	if (ccb->ccb_cmd.id & CISS_CMD_ERR)
839 		error = ciss_error(sc, ccb);
840 
841 	cmd = &ccb->ccb_cmd;
842 	if (ccb->ccb_data) {
843 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
844 		    ccb->ccb_dmamap->dm_mapsize, (cmd->flags & CISS_CDB_IN) ?
845 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
846 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
847 		ccb->ccb_xs = NULL;
848 		ccb->ccb_data = NULL;
849 	}
850 
851 	ciss_put_ccb(sc, ccb);
852 
853 	if (xs) {
854 		xs->resid = 0;
855 		CISS_DPRINTF(CISS_D_CMD, ("scsipi_done(%p) ", xs));
856 		if (xs->cmd->opcode == INQUIRY) {
857 			struct scsipi_inquiry_data *inq;
858 			inq = (struct scsipi_inquiry_data *)xs->data;
859 			if ((inq->version & SID_ANSII) == 0 &&
860 			    (inq->flags3 & SID_CmdQue) != 0) {
861 				inq->version |= 2;
862 			}
863 		}
864 		scsipi_done(xs);
865 	}
866 
867 	return error;
868 }
869 
870 static int
ciss_error(struct ciss_softc * sc,struct ciss_ccb * ccb)871 ciss_error(struct ciss_softc *sc, struct ciss_ccb *ccb)
872 {
873 	struct ciss_error *err = &ccb->ccb_err;
874 	struct scsipi_xfer *xs = ccb->ccb_xs;
875 	int rv;
876 
877 	switch ((rv = le16toh(err->cmd_stat))) {
878 	case CISS_ERR_OK:
879 		rv = 0;
880 		break;
881 
882 	case CISS_ERR_INVCMD:
883 		if (xs == NULL ||
884 		    xs->cmd->opcode != SCSI_SYNCHRONIZE_CACHE_10)
885 			printf("%s: invalid cmd 0x%x: 0x%x is not valid @ 0x%x[%d]\n",
886 			    device_xname(sc->sc_dev), ccb->ccb_cmd.id,
887 			    err->err_info, err->err_type[3], err->err_type[2]);
888 		if (xs) {
889 			memset(&xs->sense, 0, sizeof(xs->sense));
890 			xs->sense.scsi_sense.response_code =
891 				SSD_RCODE_CURRENT | SSD_RCODE_VALID;
892 			xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
893 			xs->sense.scsi_sense.asc = 0x24; /* ill field */
894 			xs->sense.scsi_sense.ascq = 0x0;
895 			xs->error = XS_SENSE;
896 		}
897 		rv = EIO;
898 		break;
899 
900 	case CISS_ERR_TMO:
901 		xs->error = XS_TIMEOUT;
902 		rv = ETIMEDOUT;
903 		break;
904 
905 	case CISS_ERR_UNRUN:
906 		/* Underrun */
907 		xs->resid = le32toh(err->resid);
908 		CISS_DPRINTF(CISS_D_CMD, (" underrun resid=0x%x ",
909 					  xs->resid));
910 		rv = EIO;
911 		break;
912 	default:
913 		if (xs) {
914 			CISS_DPRINTF(CISS_D_CMD, ("scsi_stat=%x ", err->scsi_stat));
915 			switch (err->scsi_stat) {
916 			case SCSI_CHECK:
917 				xs->error = XS_SENSE;
918 				memcpy(&xs->sense, &err->sense[0],
919 				    sizeof(xs->sense));
920 				CISS_DPRINTF(CISS_D_CMD, (" sense=%02x %02x %02x %02x ",
921 					     err->sense[0], err->sense[1], err->sense[2], err->sense[3]));
922 				rv = EIO;
923 				break;
924 
925 			case XS_BUSY:
926 				xs->error = XS_BUSY;
927 				rv = EBUSY;
928 				break;
929 
930 			default:
931 				CISS_DPRINTF(CISS_D_ERR, ("%s: "
932 				    "cmd_stat=%x scsi_stat=0x%x resid=0x%x\n",
933 				    device_xname(sc->sc_dev), rv, err->scsi_stat,
934 				    le32toh(err->resid)));
935 				printf("ciss driver stuffup in %s:%d: %s()\n",
936 				       __FILE__, __LINE__, __func__);
937 				xs->error = XS_DRIVER_STUFFUP;
938 				rv = EIO;
939 				break;
940 			}
941 			xs->resid = le32toh(err->resid);
942 		} else
943 			rv = EIO;
944 	}
945 	ccb->ccb_cmd.id &= htole32(~3);
946 
947 	return rv;
948 }
949 
950 static int
ciss_inq(struct ciss_softc * sc,struct ciss_inquiry * inq)951 ciss_inq(struct ciss_softc *sc, struct ciss_inquiry *inq)
952 {
953 	struct ciss_ccb *ccb;
954 	struct ciss_cmd *cmd;
955 
956 	ccb = ciss_get_ccb(sc);
957 	ccb->ccb_len = sizeof(*inq);
958 	ccb->ccb_data = inq;
959 	ccb->ccb_xs = NULL;
960 	cmd = &ccb->ccb_cmd;
961 	cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
962 	cmd->tgt2 = 0;
963 	cmd->cdblen = 10;
964 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
965 	cmd->tmo = htole16(0);
966 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
967 	cmd->cdb[0] = CISS_CMD_CTRL_GET;
968 	cmd->cdb[6] = CISS_CMS_CTRL_CTRL;
969 	cmd->cdb[7] = sizeof(*inq) >> 8;	/* biiiig endian */
970 	cmd->cdb[8] = sizeof(*inq) & 0xff;
971 
972 	return ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, XS_CTL_POLL|XS_CTL_NOSLEEP);
973 }
974 
975 static int
ciss_ldmap(struct ciss_softc * sc)976 ciss_ldmap(struct ciss_softc *sc)
977 {
978 	struct ciss_ccb *ccb;
979 	struct ciss_cmd *cmd;
980 	struct ciss_ldmap *lmap;
981 	int total, rv;
982 
983 	mutex_enter(&sc->sc_mutex_scratch);
984 	lmap = sc->scratch;
985 	lmap->size = htobe32(sc->maxunits * sizeof(lmap->map));
986 	total = sizeof(*lmap) + (sc->maxunits - 1) * sizeof(lmap->map);
987 
988 	ccb = ciss_get_ccb(sc);
989 	ccb->ccb_len = total;
990 	ccb->ccb_data = lmap;
991 	ccb->ccb_xs = NULL;
992 	cmd = &ccb->ccb_cmd;
993 	cmd->tgt = CISS_CMD_MODE_PERIPH;
994 	cmd->tgt2 = 0;
995 	cmd->cdblen = 12;
996 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
997 	cmd->tmo = htole16(30);
998 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
999 	cmd->cdb[0] = CISS_CMD_LDMAP;
1000 	cmd->cdb[8] = total >> 8;	/* biiiig endian */
1001 	cmd->cdb[9] = total & 0xff;
1002 
1003 	rv = ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, XS_CTL_POLL|XS_CTL_NOSLEEP);
1004 
1005 	if (rv) {
1006 		mutex_exit(&sc->sc_mutex_scratch);
1007 		return rv;
1008 	}
1009 
1010 	CISS_DPRINTF(CISS_D_MISC, ("lmap %x:%x\n",
1011 	    lmap->map[0].tgt, lmap->map[0].tgt2));
1012 
1013 	mutex_exit(&sc->sc_mutex_scratch);
1014 	return 0;
1015 }
1016 
1017 static int
ciss_sync(struct ciss_softc * sc)1018 ciss_sync(struct ciss_softc *sc)
1019 {
1020 	struct ciss_ccb *ccb;
1021 	struct ciss_cmd *cmd;
1022 	struct ciss_flush *flush;
1023 	int rv;
1024 
1025 	mutex_enter(&sc->sc_mutex_scratch);
1026 	flush = sc->scratch;
1027 	memset(flush, 0, sizeof(*flush));
1028 	flush->flush = sc->sc_flush;
1029 
1030 	ccb = ciss_get_ccb(sc);
1031 	ccb->ccb_len = sizeof(*flush);
1032 	ccb->ccb_data = flush;
1033 	ccb->ccb_xs = NULL;
1034 	cmd = &ccb->ccb_cmd;
1035 	cmd->tgt = CISS_CMD_MODE_PERIPH;
1036 	cmd->tgt2 = 0;
1037 	cmd->cdblen = 10;
1038 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_OUT;
1039 	cmd->tmo = 0;
1040 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1041 	cmd->cdb[0] = CISS_CMD_CTRL_SET;
1042 	cmd->cdb[6] = CISS_CMS_CTRL_FLUSH;
1043 	cmd->cdb[7] = sizeof(*flush) >> 8;	/* biiiig endian */
1044 	cmd->cdb[8] = sizeof(*flush) & 0xff;
1045 
1046 	rv = ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, XS_CTL_POLL|XS_CTL_NOSLEEP);
1047 	mutex_exit(&sc->sc_mutex_scratch);
1048 
1049 	return rv;
1050 }
1051 
1052 int
ciss_ldid(struct ciss_softc * sc,int target,struct ciss_ldid * id)1053 ciss_ldid(struct ciss_softc *sc, int target, struct ciss_ldid *id)
1054 {
1055 	struct ciss_ccb *ccb;
1056 	struct ciss_cmd *cmd;
1057 
1058 	ccb = ciss_get_ccb(sc);
1059 	if (ccb == NULL)
1060 		return ENOMEM;
1061 	ccb->ccb_len = sizeof(*id);
1062 	ccb->ccb_data = id;
1063 	ccb->ccb_xs = NULL;
1064 	cmd = &ccb->ccb_cmd;
1065 	cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
1066 	cmd->tgt2 = 0;
1067 	cmd->cdblen = 10;
1068 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
1069 	cmd->tmo = htole16(0);
1070 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1071 	cmd->cdb[0] = CISS_CMD_CTRL_GET;
1072 	cmd->cdb[1] = target;
1073 	cmd->cdb[6] = CISS_CMS_CTRL_LDIDEXT;
1074 	cmd->cdb[7] = sizeof(*id) >> 8;	/* biiiig endian */
1075 	cmd->cdb[8] = sizeof(*id) & 0xff;
1076 
1077 	return ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, XS_CTL_POLL | sc->sc_waitflag);
1078 }
1079 
1080 int
ciss_ldstat(struct ciss_softc * sc,int target,struct ciss_ldstat * stat)1081 ciss_ldstat(struct ciss_softc *sc, int target, struct ciss_ldstat *stat)
1082 {
1083 	struct ciss_ccb *ccb;
1084 	struct ciss_cmd *cmd;
1085 
1086 	ccb = ciss_get_ccb(sc);
1087 	if (ccb == NULL)
1088 		return ENOMEM;
1089 	ccb->ccb_len = sizeof(*stat);
1090 	ccb->ccb_data = stat;
1091 	ccb->ccb_xs = NULL;
1092 	cmd = &ccb->ccb_cmd;
1093 	cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
1094 	cmd->tgt2 = 0;
1095 	cmd->cdblen = 10;
1096 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
1097 	cmd->tmo = htole16(0);
1098 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1099 	cmd->cdb[0] = CISS_CMD_CTRL_GET;
1100 	cmd->cdb[1] = target;
1101 	cmd->cdb[6] = CISS_CMS_CTRL_LDSTAT;
1102 	cmd->cdb[7] = sizeof(*stat) >> 8;	/* biiiig endian */
1103 	cmd->cdb[8] = sizeof(*stat) & 0xff;
1104 
1105 	return ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, XS_CTL_POLL | sc->sc_waitflag);
1106 }
1107 
1108 int
ciss_pdid(struct ciss_softc * sc,u_int8_t drv,struct ciss_pdid * id,int wait)1109 ciss_pdid(struct ciss_softc *sc, u_int8_t drv, struct ciss_pdid *id, int wait)
1110 {
1111 	struct ciss_ccb *ccb;
1112 	struct ciss_cmd *cmd;
1113 
1114 	ccb = ciss_get_ccb(sc);
1115 	if (ccb == NULL)
1116 		return ENOMEM;
1117 	ccb->ccb_len = sizeof(*id);
1118 	ccb->ccb_data = id;
1119 	ccb->ccb_xs = NULL;
1120 	cmd = &ccb->ccb_cmd;
1121 	cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
1122 	cmd->tgt2 = 0;
1123 	cmd->cdblen = 10;
1124 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN;
1125 	cmd->tmo = htole16(0);
1126 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1127 	cmd->cdb[0] = CISS_CMD_CTRL_GET;
1128 	cmd->cdb[2] = drv;
1129 	cmd->cdb[6] = CISS_CMS_CTRL_PDID;
1130 	cmd->cdb[7] = sizeof(*id) >> 8;	/* biiiig endian */
1131 	cmd->cdb[8] = sizeof(*id) & 0xff;
1132 
1133 	return ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, wait);
1134 }
1135 
1136 
1137 struct ciss_ld *
ciss_pdscan(struct ciss_softc * sc,int ld)1138 ciss_pdscan(struct ciss_softc *sc, int ld)
1139 {
1140 	struct ciss_pdid *pdid;
1141 	struct ciss_ld *ldp;
1142 	u_int8_t drv, buf[128];
1143 	int i, j, k = 0;
1144 
1145 	mutex_enter(&sc->sc_mutex_scratch);
1146 	pdid = sc->scratch;
1147 	if (sc->ndrives == 256) {
1148 		for (i = 0; i < CISS_BIGBIT; i++)
1149 			if (!ciss_pdid(sc, i, pdid,
1150 					XS_CTL_POLL|XS_CTL_NOSLEEP) &&
1151 			    (pdid->present & CISS_PD_PRESENT))
1152 				buf[k++] = i;
1153 	} else
1154 		for (i = 0; i < sc->nbus; i++)
1155 			for (j = 0; j < sc->ndrives; j++) {
1156 				drv = CISS_BIGBIT + i * sc->ndrives + j;
1157 				if (!ciss_pdid(sc, drv, pdid,
1158 						XS_CTL_POLL|XS_CTL_NOSLEEP))
1159 					buf[k++] = drv;
1160 			}
1161 	mutex_exit(&sc->sc_mutex_scratch);
1162 
1163 	if (!k)
1164 		return NULL;
1165 
1166 	ldp = malloc(sizeof(*ldp) + (k-1), M_DEVBUF, M_WAITOK);
1167 	memset(&ldp->bling, 0, sizeof(ldp->bling));
1168 	ldp->ndrives = k;
1169 	ldp->xname[0] = 0;
1170 	memcpy(ldp->tgts, buf, k);
1171 	return ldp;
1172 }
1173 
1174 static void
ciss_scsi_cmd(struct scsipi_channel * chan,scsipi_adapter_req_t req,void * arg)1175 ciss_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1176 	void *arg)
1177 {
1178 	struct scsipi_xfer *xs;
1179 	struct scsipi_xfer_mode *xm;
1180 	struct ciss_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1181 	u_int8_t target;
1182 	struct ciss_ccb *ccb;
1183 	struct ciss_cmd *cmd;
1184 
1185 	CISS_DPRINTF(CISS_D_CMD, ("ciss_scsi_cmd "));
1186 
1187 	switch (req)
1188 	{
1189 	case ADAPTER_REQ_RUN_XFER:
1190 		xs = (struct scsipi_xfer *) arg;
1191 		target = xs->xs_periph->periph_target;
1192 		CISS_DPRINTF(CISS_D_CMD, ("targ=%d ", target));
1193 		if (xs->cmdlen > CISS_MAX_CDB) {
1194 			CISS_DPRINTF(CISS_D_CMD, ("CDB too big %p ", xs));
1195 			memset(&xs->sense, 0, sizeof(xs->sense));
1196 			xs->error = XS_SENSE;
1197 			printf("ciss driver stuffup in %s:%d: %s()\n",
1198 			       __FILE__, __LINE__, __func__);
1199 			scsipi_done(xs);
1200 			break;
1201 		}
1202 
1203 		xs->error = XS_NOERROR;
1204 
1205 		/* XXX emulate SYNCHRONIZE_CACHE ??? */
1206 
1207 		ccb = ciss_get_ccb(sc);
1208 		cmd = &ccb->ccb_cmd;
1209 		ccb->ccb_len = xs->datalen;
1210 		ccb->ccb_data = xs->data;
1211 		ccb->ccb_xs = xs;
1212 		cmd->tgt = CISS_CMD_MODE_LD | target;
1213 		cmd->tgt2 = 0;
1214 		cmd->cdblen = xs->cmdlen;
1215 		cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL;
1216 		if (xs->xs_control & XS_CTL_DATA_IN)
1217 			cmd->flags |= CISS_CDB_IN;
1218 		else if (xs->xs_control & XS_CTL_DATA_OUT)
1219 			cmd->flags |= CISS_CDB_OUT;
1220 		cmd->tmo = htole16(xs->timeout < 1000? 1 : xs->timeout / 1000);
1221 		memcpy(&cmd->cdb[0], xs->cmd, xs->cmdlen);
1222 		CISS_DPRINTF(CISS_D_CMD, ("cmd=%02x %02x %02x %02x %02x %02x ",
1223 			     cmd->cdb[0], cmd->cdb[1], cmd->cdb[2],
1224 			     cmd->cdb[3], cmd->cdb[4], cmd->cdb[5]));
1225 
1226 		if (ciss_cmd(sc, ccb, BUS_DMA_WAITOK,
1227 		    xs->xs_control & (XS_CTL_POLL|XS_CTL_NOSLEEP))) {
1228 			printf("ciss driver stuffup in %s:%d: %s()\n",
1229 			       __FILE__, __LINE__, __func__);
1230 			xs->error = XS_DRIVER_STUFFUP;
1231 			scsipi_done(xs);
1232 			return;
1233 		}
1234 
1235 		break;
1236 	case ADAPTER_REQ_GROW_RESOURCES:
1237 		/*
1238 		 * Not supported.
1239 		 */
1240 		break;
1241 	case ADAPTER_REQ_SET_XFER_MODE:
1242 		/*
1243 		 * We can't change the transfer mode, but at least let
1244 		 * scsipi know what the adapter has negotiated.
1245 		 */
1246 		xm = (struct scsipi_xfer_mode *)arg;
1247 		xm->xm_mode |= PERIPH_CAP_TQING;
1248 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
1249 		break;
1250 	default:
1251 		printf("%s: %d %d unsupported\n", __func__, __LINE__, req);
1252 	}
1253 }
1254 
1255 static void
ciss_completed_process(struct ciss_softc * sc,ciss_queue_head * q)1256 ciss_completed_process(struct ciss_softc *sc, ciss_queue_head *q)
1257 {
1258 	struct ciss_ccb *ccb;
1259 
1260 	while (!TAILQ_EMPTY(q)) {
1261 		ccb = TAILQ_FIRST(q);
1262 		TAILQ_REMOVE(q, ccb, ccb_link);
1263 
1264 		if (ccb->ccb_state == CISS_CCB_POLL) {
1265 			ccb->ccb_state = CISS_CCB_ONQ;
1266 			mutex_enter(&sc->sc_mutex);
1267 			cv_broadcast(&sc->sc_condvar);
1268 			mutex_exit(&sc->sc_mutex);
1269 		} else
1270 			ciss_done(sc, ccb);
1271 	}
1272 }
1273 
1274 int
ciss_intr_simple_intx(void * v)1275 ciss_intr_simple_intx(void *v)
1276 {
1277 	struct ciss_softc *sc = v;
1278 	ciss_queue_head q;
1279 	int hit = 0;
1280 
1281 	CISS_DPRINTF(CISS_D_INTR, ("intr "));
1282 
1283 	/* XXX shouldn't be necessary, intr triggers only if enabled */
1284 	if (!(bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_ISR) & sc->iem))
1285 		return 0;
1286 
1287 	TAILQ_INIT(&q);
1288 	mutex_enter(&sc->sc_mutex);
1289 	ciss_completed_simple(sc, &q);
1290 	mutex_exit(&sc->sc_mutex);
1291 
1292 	hit = (!TAILQ_EMPTY(&q));
1293 	ciss_completed_process(sc, &q);
1294 
1295 	KASSERT(TAILQ_EMPTY(&q));
1296 	CISS_DPRINTF(CISS_D_INTR, ("exit\n"));
1297 
1298 	return hit;
1299 }
1300 
1301 int
ciss_intr_perf_intx(void * v)1302 ciss_intr_perf_intx(void *v)
1303 {
1304 	struct ciss_softc *sc = v;
1305 
1306 	CISS_DPRINTF(CISS_D_INTR, ("intr "));
1307 
1308 	/* Clear the interrupt and flush the bridges.  Docs say that the flush
1309 	 * needs to be done twice, which doesn't seem right.
1310 	 */
1311 	bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_OSR);
1312 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_ODC, CISS_ODC_CLEAR);
1313 
1314 	return ciss_intr_perf_msi(sc);
1315 }
1316 
1317 int
ciss_intr_perf_msi(void * v)1318 ciss_intr_perf_msi(void *v)
1319 {
1320 	struct ciss_softc *sc = v;
1321 	ciss_queue_head q;
1322 
1323 	CISS_DPRINTF(CISS_D_INTR, ("intr "));
1324 
1325 	TAILQ_INIT(&q);
1326 	mutex_enter(&sc->sc_mutex);
1327 	ciss_completed_perf(sc, &q);
1328 	mutex_exit(&sc->sc_mutex);
1329 
1330 	ciss_completed_process(sc, &q);
1331 
1332 	KASSERT(TAILQ_EMPTY(&q));
1333 	CISS_DPRINTF(CISS_D_INTR, ("exit"));
1334 
1335 	return 1;
1336 }
1337 
1338 static void
ciss_heartbeat(void * v)1339 ciss_heartbeat(void *v)
1340 {
1341 	struct ciss_softc *sc = v;
1342 	u_int32_t hb;
1343 
1344 	hb = bus_space_read_4(sc->sc_iot, sc->cfg_ioh,
1345 	    sc->cfgoff + offsetof(struct ciss_config, heartbeat));
1346 	if (hb == sc->heartbeat) {
1347 		sc->fibrillation++;
1348 		CISS_DPRINTF(CISS_D_ERR, ("%s: fibrillation #%d (value=%d)\n",
1349 		    device_xname(sc->sc_dev), sc->fibrillation, hb));
1350 		if (sc->fibrillation >= 11) {
1351 			/* No heartbeat for 33 seconds */
1352 			panic("%s: dead", device_xname(sc->sc_dev));	/* XXX reset! */
1353 		}
1354 	} else {
1355 		sc->heartbeat = hb;
1356 		if (sc->fibrillation) {
1357 			CISS_DPRINTF(CISS_D_ERR, ("%s: "
1358 			    "fibrillation ended (value=%d)\n",
1359 			    device_xname(sc->sc_dev), hb));
1360 		}
1361 		sc->fibrillation = 0;
1362 	}
1363 
1364 	callout_schedule(&sc->sc_hb, hz * 3);
1365 }
1366 
1367 static int
ciss_scsi_ioctl(struct scsipi_channel * chan,u_long cmd,void * addr,int flag,struct proc * p)1368 ciss_scsi_ioctl(struct scsipi_channel *chan, u_long cmd,
1369     void *addr, int flag, struct proc *p)
1370 {
1371 #if NBIO > 0
1372 	return ciss_ioctl(chan->chan_adapter->adapt_dev, cmd, addr);
1373 #else
1374 	return ENOTTY;
1375 #endif
1376 }
1377 
1378 #if NBIO > 0
1379 const int ciss_level[] = { 0, 4, 1, 5, 51, 7 };
1380 const int ciss_stat[] = { BIOC_SVONLINE, BIOC_SVOFFLINE, BIOC_SVOFFLINE,
1381     BIOC_SVDEGRADED, BIOC_SVREBUILD, BIOC_SVREBUILD, BIOC_SVDEGRADED,
1382     BIOC_SVDEGRADED, BIOC_SVINVALID, BIOC_SVINVALID, BIOC_SVBUILDING,
1383     BIOC_SVOFFLINE, BIOC_SVBUILDING };
1384 
1385 int
ciss_ioctl(device_t dev,u_long cmd,void * addr)1386 ciss_ioctl(device_t dev, u_long cmd, void *addr)
1387 {
1388 	struct ciss_softc	*sc = device_private(dev);
1389 	struct bioc_inq *bi;
1390 	struct bioc_disk *bd;
1391 	struct bioc_blink *bb;
1392 	struct ciss_ldstat *ldstat;
1393 	struct ciss_pdid *pdid;
1394 	struct ciss_blink *blink;
1395 	struct ciss_ld *ldp;
1396 	u_int8_t drv;
1397 	int ld, pd, error = 0;
1398 
1399 	switch (cmd) {
1400 	case BIOCINQ:
1401 		bi = (struct bioc_inq *)addr;
1402 		strlcpy(bi->bi_dev, device_xname(sc->sc_dev), sizeof(bi->bi_dev));
1403 		bi->bi_novol = sc->maxunits;
1404 		bi->bi_nodisk = sc->sc_lds[0]->ndrives;
1405 		break;
1406 
1407 	case BIOCVOL:
1408 		error = ciss_ioctl_vol(sc, (struct bioc_vol *)addr);
1409 		break;
1410 
1411 	case BIOCDISK_NOVOL:
1412 /*
1413  * XXX since we don't know how to associate physical drives with logical drives
1414  * yet, BIOCDISK_NOVOL is equivalent to BIOCDISK to the volume that we've
1415  * associated all physical drives to.
1416  * Maybe associate all physical drives to all logical volumes, but only return
1417  * physical drives on one logical volume.  Which one?  Either 1st volume that
1418  * is degraded, rebuilding, or failed?
1419  */
1420 		bd = (struct bioc_disk *)addr;
1421 		bd->bd_volid = 0;
1422 		bd->bd_disknovol = true;
1423 		/* FALLTHROUGH */
1424 	case BIOCDISK:
1425 		bd = (struct bioc_disk *)addr;
1426 		if (bd->bd_volid < 0 || bd->bd_volid > sc->maxunits) {
1427 			error = EINVAL;
1428 			break;
1429 		}
1430 		ldp = sc->sc_lds[0];
1431 		if (!ldp || (pd = bd->bd_diskid) < 0 || pd > ldp->ndrives) {
1432 			error = EINVAL;
1433 			break;
1434 		}
1435 		ldstat = sc->scratch;
1436 		if ((error = ciss_ldstat(sc, bd->bd_volid, ldstat))) {
1437 			break;
1438 		}
1439 		bd->bd_status = -1;
1440 		if (ldstat->stat == CISS_LD_REBLD &&
1441 		    ldstat->bigrebuild == ldp->tgts[pd])
1442 			bd->bd_status = BIOC_SDREBUILD;
1443 		if (ciss_bitset(ldp->tgts[pd] & (~CISS_BIGBIT),
1444 		    ldstat->bigfailed)) {
1445 			bd->bd_status = BIOC_SDFAILED;
1446 			bd->bd_size = 0;
1447 			bd->bd_channel = (ldp->tgts[pd] & (~CISS_BIGBIT)) /
1448 			    sc->ndrives;
1449 			bd->bd_target = ldp->tgts[pd] % sc->ndrives;
1450 			bd->bd_lun = 0;
1451 			bd->bd_vendor[0] = '\0';
1452 			bd->bd_serial[0] = '\0';
1453 			bd->bd_procdev[0] = '\0';
1454 		} else {
1455 			pdid = sc->scratch;
1456 			if ((error = ciss_pdid(sc, ldp->tgts[pd], pdid,
1457 			    XS_CTL_POLL))) {
1458 				bd->bd_status = BIOC_SDFAILED;
1459 				bd->bd_size = 0;
1460 				bd->bd_channel = (ldp->tgts[pd] & (~CISS_BIGBIT)) /
1461 				    sc->ndrives;
1462 				bd->bd_target = ldp->tgts[pd] % sc->ndrives;
1463 				bd->bd_lun = 0;
1464 				bd->bd_vendor[0] = '\0';
1465 				bd->bd_serial[0] = '\0';
1466 				bd->bd_procdev[0] = '\0';
1467 				error = 0;
1468 				break;
1469 			}
1470 			if (bd->bd_status < 0) {
1471 				if (pdid->config & CISS_PD_SPARE)
1472 					bd->bd_status = BIOC_SDHOTSPARE;
1473 				else if (pdid->present & CISS_PD_PRESENT)
1474 					bd->bd_status = BIOC_SDONLINE;
1475 				else
1476 					bd->bd_status = BIOC_SDINVALID;
1477 			}
1478 			bd->bd_size = (u_int64_t)le32toh(pdid->nblocks) *
1479 			    le16toh(pdid->blksz);
1480 			bd->bd_channel = pdid->bus;
1481 			bd->bd_target = pdid->target;
1482 			bd->bd_lun = 0;
1483 			strlcpy(bd->bd_vendor, pdid->model,
1484 			    sizeof(bd->bd_vendor));
1485 			strlcpy(bd->bd_serial, pdid->serial,
1486 			    sizeof(bd->bd_serial));
1487 			bd->bd_procdev[0] = '\0';
1488 		}
1489 		break;
1490 
1491 	case BIOCBLINK:
1492 		bb = (struct bioc_blink *)addr;
1493 		blink = sc->scratch;
1494 		error = EINVAL;
1495 		/* XXX workaround completely dumb scsi addressing */
1496 		for (ld = 0; ld < sc->maxunits; ld++) {
1497 			ldp = sc->sc_lds[ld];
1498 			if (!ldp)
1499 				continue;
1500 			if (sc->ndrives == 256)
1501 				drv = bb->bb_target;
1502 			else
1503 				drv = CISS_BIGBIT +
1504 				    bb->bb_channel * sc->ndrives +
1505 				    bb->bb_target;
1506 			for (pd = 0; pd < ldp->ndrives; pd++)
1507 				if (ldp->tgts[pd] == drv)
1508 					error = ciss_blink(sc, ld, pd,
1509 					    bb->bb_status, blink);
1510 		}
1511 		break;
1512 
1513 	default:
1514 		error = EINVAL;
1515 	}
1516 
1517 	return (error);
1518 }
1519 
1520 int
ciss_ioctl_vol(struct ciss_softc * sc,struct bioc_vol * bv)1521 ciss_ioctl_vol(struct ciss_softc *sc, struct bioc_vol *bv)
1522 {
1523 	struct ciss_ldid *ldid;
1524 	struct ciss_ld *ldp;
1525 	struct ciss_ldstat *ldstat;
1526 	struct ciss_pdid *pdid;
1527 	int error = 0;
1528 	u_int blks;
1529 
1530 	if (bv->bv_volid < 0 || bv->bv_volid > sc->maxunits) {
1531 		return EINVAL;
1532 	}
1533 	ldp = sc->sc_lds[bv->bv_volid];
1534 	ldid = sc->scratch;
1535 	if ((error = ciss_ldid(sc, bv->bv_volid, ldid))) {
1536 		return error;
1537 	}
1538 	bv->bv_status = BIOC_SVINVALID;
1539 	blks = (u_int)le16toh(ldid->nblocks[1]) << 16 |
1540 	    le16toh(ldid->nblocks[0]);
1541 	bv->bv_size = blks * (u_quad_t)le16toh(ldid->blksize);
1542 	bv->bv_level = ciss_level[ldid->type];
1543 /*
1544  * XXX Should only return bv_nodisk for logical volume that we've associated
1545  * the physical drives to:  either the 1st degraded, rebuilding, or failed
1546  * volume else volume 0?
1547  */
1548 	if (ldp) {
1549 		bv->bv_nodisk = ldp->ndrives;
1550 		strlcpy(bv->bv_dev, ldp->xname, sizeof(bv->bv_dev));
1551 	}
1552 	strlcpy(bv->bv_vendor, "CISS", sizeof(bv->bv_vendor));
1553 	ldstat = sc->scratch;
1554 	memset(ldstat, 0, sizeof(*ldstat));
1555 	if ((error = ciss_ldstat(sc, bv->bv_volid, ldstat))) {
1556 		return error;
1557 	}
1558 	bv->bv_percent = -1;
1559 	bv->bv_seconds = 0;
1560 	if (ldstat->stat < sizeof(ciss_stat)/sizeof(ciss_stat[0]))
1561 		bv->bv_status = ciss_stat[ldstat->stat];
1562 	if (bv->bv_status == BIOC_SVREBUILD ||
1563 	    bv->bv_status == BIOC_SVBUILDING) {
1564 	 	u_int64_t prog;
1565 
1566 		ldp = sc->sc_lds[0];
1567 		if (ldp) {
1568 			bv->bv_nodisk = ldp->ndrives;
1569 			strlcpy(bv->bv_dev, ldp->xname, sizeof(bv->bv_dev));
1570 		}
1571 /*
1572  * XXX ldstat->prog is blocks remaining on physical drive being rebuilt
1573  * blks is only correct for a RAID1 set;  RAID5 needs to determine the
1574  * size of the physical device - which we don't yet know.
1575  * ldstat->bigrebuild has physical device target, so could be used with
1576  * pdid to get size.   Another way is to save pd information in sc so it's
1577  * easy to reference.
1578  */
1579 		prog = (u_int64_t)((ldstat->prog[3] << 24) |
1580 		    (ldstat->prog[2] << 16) | (ldstat->prog[1] << 8) |
1581 		    ldstat->prog[0]);
1582 		pdid = sc->scratch;
1583 		if (!ciss_pdid(sc, ldstat->bigrebuild, pdid, XS_CTL_POLL)) {
1584 			blks = le32toh(pdid->nblocks);
1585 			bv->bv_percent = (blks - prog) * 1000ULL / blks;
1586 		 }
1587 	}
1588 	return 0;
1589 }
1590 
1591 int
ciss_blink(struct ciss_softc * sc,int ld,int pd,int stat,struct ciss_blink * blink)1592 ciss_blink(struct ciss_softc *sc, int ld, int pd, int stat,
1593     struct ciss_blink *blink)
1594 {
1595 	struct ciss_ccb *ccb;
1596 	struct ciss_cmd *cmd;
1597 	struct ciss_ld *ldp;
1598 
1599 	if (ld > sc->maxunits)
1600 		return EINVAL;
1601 
1602 	ldp = sc->sc_lds[ld];
1603 	if (!ldp || pd > ldp->ndrives)
1604 		return EINVAL;
1605 
1606 	ldp->bling.pdtab[ldp->tgts[pd]] = stat == BIOC_SBUNBLINK? 0 :
1607 	    CISS_BLINK_ALL;
1608 	memcpy(blink, &ldp->bling, sizeof(*blink));
1609 
1610 	ccb = ciss_get_ccb(sc);
1611 	if (ccb == NULL)
1612 		return ENOMEM;
1613 	ccb->ccb_len = sizeof(*blink);
1614 	ccb->ccb_data = blink;
1615 	ccb->ccb_xs = NULL;
1616 	cmd = &ccb->ccb_cmd;
1617 	cmd->tgt = htole32(CISS_CMD_MODE_PERIPH);
1618 	cmd->tgt2 = 0;
1619 	cmd->cdblen = 10;
1620 	cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_OUT;
1621 	cmd->tmo = htole16(0);
1622 	memset(&cmd->cdb[0], 0, sizeof(cmd->cdb));
1623 	cmd->cdb[0] = CISS_CMD_CTRL_SET;
1624 	cmd->cdb[6] = CISS_CMS_CTRL_PDBLINK;
1625 	cmd->cdb[7] = sizeof(*blink) >> 8;	/* biiiig endian */
1626 	cmd->cdb[8] = sizeof(*blink) & 0xff;
1627 
1628 	return ciss_cmd(sc, ccb, BUS_DMA_NOWAIT, XS_CTL_POLL);
1629 }
1630 
1631 int
ciss_create_sensors(struct ciss_softc * sc)1632 ciss_create_sensors(struct ciss_softc *sc)
1633 {
1634 	int			i;
1635 	int nsensors = sc->maxunits;
1636 
1637 	if (nsensors == 0) {
1638 		return 0;
1639 	}
1640 
1641 	sc->sc_sme = sysmon_envsys_create();
1642 	sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors,
1643 		M_DEVBUF, M_WAITOK | M_ZERO);
1644 
1645 	for (i = 0; i < nsensors; i++) {
1646 		sc->sc_sensor[i].units = ENVSYS_DRIVE;
1647 		sc->sc_sensor[i].state = ENVSYS_SINVALID;
1648 		sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY;
1649 		/* Enable monitoring for drive state changes */
1650 		sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED;
1651 		/* logical drives */
1652 		snprintf(sc->sc_sensor[i].desc,
1653 		    sizeof(sc->sc_sensor[i].desc), "%s:%d",
1654 		    device_xname(sc->sc_dev), i);
1655 		if (sysmon_envsys_sensor_attach(sc->sc_sme,
1656 		    &sc->sc_sensor[i]))
1657 			goto out;
1658 	}
1659 
1660 	sc->sc_sme->sme_name = device_xname(sc->sc_dev);
1661 	sc->sc_sme->sme_cookie = sc;
1662 	sc->sc_sme->sme_refresh = ciss_sensor_refresh;
1663 	if (sysmon_envsys_register(sc->sc_sme)) {
1664 		printf("%s: unable to register with sysmon\n",
1665 		    device_xname(sc->sc_dev));
1666 		return(1);
1667 	}
1668 	return (0);
1669 
1670 out:
1671 	free(sc->sc_sensor, M_DEVBUF);
1672 	sysmon_envsys_destroy(sc->sc_sme);
1673 	return EINVAL;
1674 }
1675 
1676 void
ciss_sensor_refresh(struct sysmon_envsys * sme,envsys_data_t * edata)1677 ciss_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata)
1678 {
1679 	struct ciss_softc	*sc = sme->sme_cookie;
1680 	struct bioc_vol		bv;
1681 
1682 	if (edata->sensor >= sc->maxunits)
1683 		return;
1684 
1685 	memset(&bv, 0, sizeof(bv));
1686 	bv.bv_volid = edata->sensor;
1687 	if (ciss_ioctl_vol(sc, &bv))
1688 		bv.bv_status = BIOC_SVINVALID;
1689 
1690 	bio_vol_to_envsys(edata, &bv);
1691 }
1692 #endif /* NBIO > 0 */
1693