xref: /openbsd-src/sys/dev/ic/twe.c (revision 3a3fbb3f2e2521ab7c4a56b7ff7462ebd9095ec5)
1 /*	$OpenBSD: twe.c,v 1.15 2001/12/06 09:30:31 mickey Exp $	*/
2 
3 /*
4  * Copyright (c) 2000, 2001 Michael Shalayeff.  All rights reserved.
5  *
6  * The SCSI emulation layer is derived from gdt(4) driver,
7  * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Michael Shalayeff.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /* #define	TWE_DEBUG */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/buf.h>
41 #include <sys/device.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 
45 #include <machine/bus.h>
46 
47 #include <scsi/scsi_all.h>
48 #include <scsi/scsi_disk.h>
49 #include <scsi/scsiconf.h>
50 
51 #include <dev/ic/twereg.h>
52 #include <dev/ic/twevar.h>
53 
54 #ifdef TWE_DEBUG
55 #define	TWE_DPRINTF(m,a)	if (twe_debug & (m)) printf a
56 #define	TWE_D_CMD	0x0001
57 #define	TWE_D_INTR	0x0002
58 #define	TWE_D_MISC	0x0004
59 #define	TWE_D_DMA	0x0008
60 #define	TWE_D_AEN	0x0010
61 int twe_debug = 0;
62 #else
63 #define	TWE_DPRINTF(m,a)	/* m, a */
64 #endif
65 
66 struct cfdriver twe_cd = {
67 	NULL, "twe", DV_DULL
68 };
69 
70 int	twe_scsi_cmd __P((struct scsi_xfer *));
71 
72 struct scsi_adapter twe_switch = {
73 	twe_scsi_cmd, tweminphys, 0, 0,
74 };
75 
76 struct scsi_device twe_dev = {
77 	NULL, NULL, NULL, NULL
78 };
79 
80 static __inline struct twe_ccb *twe_get_ccb __P((struct twe_softc *sc));
81 static __inline void twe_put_ccb __P((struct twe_ccb *ccb));
82 void twe_dispose __P((struct twe_softc *sc));
83 int  twe_cmd __P((struct twe_ccb *ccb, int flags, int wait));
84 int  twe_start __P((struct twe_ccb *ccb, int wait));
85 int  twe_complete __P((struct twe_ccb *ccb));
86 int  twe_done __P((struct twe_softc *sc, int idx));
87 void twe_copy_internal_data __P((struct scsi_xfer *xs, void *v, size_t size));
88 
89 
90 static __inline struct twe_ccb *
91 twe_get_ccb(sc)
92 	struct twe_softc *sc;
93 {
94 	struct twe_ccb *ccb;
95 
96 	ccb = TAILQ_LAST(&sc->sc_free_ccb, twe_queue_head);
97 	if (ccb)
98 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link);
99 	return ccb;
100 }
101 
102 static __inline void
103 twe_put_ccb(ccb)
104 	struct twe_ccb *ccb;
105 {
106 	struct twe_softc *sc = ccb->ccb_sc;
107 
108 	ccb->ccb_state = TWE_CCB_FREE;
109 	TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
110 }
111 
112 void
113 twe_dispose(sc)
114 	struct twe_softc *sc;
115 {
116 	register struct twe_ccb *ccb;
117 	if (sc->sc_cmdmap != NULL) {
118 		bus_dmamap_destroy(sc->dmat, sc->sc_cmdmap);
119 		/* traverse the ccbs and destroy the maps */
120 		for (ccb = &sc->sc_ccbs[TWE_MAXCMDS - 1]; ccb >= sc->sc_ccbs; ccb--)
121 			if (ccb->ccb_dmamap)
122 				bus_dmamap_destroy(sc->dmat, ccb->ccb_dmamap);
123 	}
124 	bus_dmamem_unmap(sc->dmat, sc->sc_cmds,
125 	    sizeof(struct twe_cmd) * TWE_MAXCMDS);
126 	bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
127 }
128 
129 int
130 twe_attach(sc)
131 	struct twe_softc *sc;
132 {
133 	/* this includes a buffer for drive config req, and a capacity req */
134 	u_int8_t	param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
135 	struct twe_param *pb = (void *)
136 	    (((u_long)param_buf + TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
137 	struct twe_param *cap = (void *)((u_int8_t *)pb + TWE_SECTOR_SIZE);
138 	struct twe_ccb	*ccb;
139 	struct twe_cmd	*cmd;
140 	u_int32_t	status;
141 	int		error, i, retry, nunits, nseg;
142 	const char	*errstr;
143 	twe_lock_t	lock;
144 	paddr_t		pa;
145 
146 	error = bus_dmamem_alloc(sc->dmat, sizeof(struct twe_cmd) * TWE_MAXCMDS,
147 	    PAGE_SIZE, 0, sc->sc_cmdseg, 1, &nseg, BUS_DMA_NOWAIT);
148 	if (error) {
149 		printf(": cannot allocate commands (%d)\n", error);
150 		return (1);
151 	}
152 
153 	error = bus_dmamem_map(sc->dmat, sc->sc_cmdseg, nseg,
154 	    sizeof(struct twe_cmd) * TWE_MAXCMDS,
155 	    (caddr_t *)&sc->sc_cmds, BUS_DMA_NOWAIT);
156 	if (error) {
157 		printf(": cannot map commands (%d)\n", error);
158 		bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
159 		return (1);
160 	}
161 
162 	error = bus_dmamap_create(sc->dmat,
163 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, TWE_MAXCMDS,
164 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, 0,
165 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_cmdmap);
166 	if (error) {
167 		printf(": cannot create ccb cmd dmamap (%d)\n", error);
168 		twe_dispose(sc);
169 		return (1);
170 	}
171 	error = bus_dmamap_load(sc->dmat, sc->sc_cmdmap, sc->sc_cmds,
172 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, NULL, BUS_DMA_NOWAIT);
173 	if (error) {
174 		printf(": cannot load command dma map (%d)\n", error);
175 		twe_dispose(sc);
176 		return (1);
177 	}
178 
179 	TAILQ_INIT(&sc->sc_ccb2q);
180 	TAILQ_INIT(&sc->sc_ccbq);
181 	TAILQ_INIT(&sc->sc_free_ccb);
182 
183 	pa = sc->sc_cmdmap->dm_segs[0].ds_addr +
184 	    sizeof(struct twe_cmd) * (TWE_MAXCMDS - 1);;
185 	for (cmd = sc->sc_cmds + sizeof(struct twe_cmd) * (TWE_MAXCMDS - 1);
186 	     cmd >= (struct twe_cmd *)sc->sc_cmds; cmd--, pa -= sizeof(*cmd)) {
187 
188 		cmd->cmd_index = cmd - (struct twe_cmd *)sc->sc_cmds;
189 		ccb = &sc->sc_ccbs[cmd->cmd_index];
190 		error = bus_dmamap_create(sc->dmat,
191 		    TWE_MAXFER, TWE_MAXOFFSETS, TWE_MAXFER, 0,
192 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
193 		if (error) {
194 			printf(": cannot create ccb dmamap (%d)\n", error);
195 			twe_dispose(sc);
196 			return (1);
197 		}
198 		ccb->ccb_sc = sc;
199 		ccb->ccb_cmd = cmd;
200 		ccb->ccb_cmdpa = pa;
201 		ccb->ccb_state = TWE_CCB_FREE;
202 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
203 	}
204 
205 	for (errstr = NULL, retry = 3; retry--; ) {
206 		int		veseen_srst;
207 		u_int16_t	aen;
208 
209 		if (errstr)
210 			TWE_DPRINTF(TWE_D_MISC, ("%s ", errstr));
211 
212 		for (i = 350000; i--; DELAY(100)) {
213 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
214 			if (status & TWE_STAT_CPURDY)
215 				break;
216 		}
217 
218 		if (!(status & TWE_STAT_CPURDY)) {
219 			errstr = ": card CPU is not ready\n";
220 			continue;
221 		}
222 
223 		/* soft reset, disable ints */
224 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
225 		    TWE_CTRL_SRST |
226 		    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR |
227 		    TWE_CTRL_MCMDI | TWE_CTRL_MRDYI |
228 		    TWE_CTRL_MINT);
229 
230 		for (i = 350000; i--; DELAY(100)) {
231 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
232 			if (status & TWE_STAT_ATTNI)
233 				break;
234 		}
235 
236 		if (!(status & TWE_STAT_ATTNI)) {
237 			errstr = ": cannot get card's attention\n";
238 			continue;
239 		}
240 
241 		/* drain aen queue */
242 		for (veseen_srst = 0, aen = -1; aen != TWE_AEN_QEMPTY; ) {
243 
244 			if ((ccb = twe_get_ccb(sc)) == NULL) {
245 				errstr = ": out of ccbs\n";
246 				continue;
247 			}
248 
249 			ccb->ccb_xs = NULL;
250 			ccb->ccb_data = pb;
251 			ccb->ccb_length = TWE_SECTOR_SIZE;
252 			ccb->ccb_state = TWE_CCB_READY;
253 			cmd = ccb->ccb_cmd;
254 			cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
255 			cmd->cmd_op = TWE_CMD_GPARAM;
256 			cmd->cmd_param.count = 1;
257 
258 			pb->table_id = TWE_PARAM_AEN;
259 			pb->param_id = 2;
260 			pb->param_size = 2;
261 
262 			if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
263 				errstr = ": error draining attention queue\n";
264 				break;
265 			}
266 			aen = *(u_int16_t *)pb->data;
267 			TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
268 			if (aen == TWE_AEN_SRST)
269 				veseen_srst++;
270 		}
271 
272 		if (!veseen_srst) {
273 			errstr = ": we don't get it\n";
274 			continue;
275 		}
276 
277 		if (status & TWE_STAT_CPUERR) {
278 			errstr = ": card CPU error detected\n";
279 			continue;
280 		}
281 
282 		if (status & TWE_STAT_PCIPAR) {
283 			errstr = ": PCI parity error detected\n";
284 			continue;
285 		}
286 
287 		if (status & TWE_STAT_QUEUEE ) {
288 			errstr = ": queuing error detected\n";
289 			continue;
290 		}
291 
292 		if (status & TWE_STAT_PCIABR) {
293 			errstr = ": PCI abort\n";
294 			continue;
295 		}
296 
297 		while (!(status & TWE_STAT_RQE)) {
298 			bus_space_read_4(sc->iot, sc->ioh, TWE_READYQUEUE);
299 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
300 		}
301 
302 		break;
303 	}
304 
305 	if (retry < 0) {
306 		printf(errstr);
307 		twe_dispose(sc);
308 		return 1;
309 	}
310 
311 	if ((ccb = twe_get_ccb(sc)) == NULL) {
312 		printf(": out of ccbs\n");
313 		twe_dispose(sc);
314 		return 1;
315 	}
316 
317 	ccb->ccb_xs = NULL;
318 	ccb->ccb_data = pb;
319 	ccb->ccb_length = TWE_SECTOR_SIZE;
320 	ccb->ccb_state = TWE_CCB_READY;
321 	cmd = ccb->ccb_cmd;
322 	cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
323 	cmd->cmd_op = TWE_CMD_GPARAM;
324 	cmd->cmd_param.count = 1;
325 
326 	pb->table_id = TWE_PARAM_UC;
327 	pb->param_id = TWE_PARAM_UC;
328 	pb->param_size = TWE_MAX_UNITS;
329 	if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
330 		printf(": failed to fetch unit parameters\n");
331 		twe_dispose(sc);
332 		return 1;
333 	}
334 
335 	/* we are assuming last read status was good */
336 	printf(": Escalade V%d.%d\n", TWE_MAJV(status), TWE_MINV(status));
337 
338 	for (nunits = i = 0; i < TWE_MAX_UNITS; i++) {
339 		if (pb->data[i] == 0)
340 			continue;
341 
342 		if ((ccb = twe_get_ccb(sc)) == NULL) {
343 			printf(": out of ccbs\n");
344 			twe_dispose(sc);
345 			return 1;
346 		}
347 
348 		ccb->ccb_xs = NULL;
349 		ccb->ccb_data = cap;
350 		ccb->ccb_length = TWE_SECTOR_SIZE;
351 		ccb->ccb_state = TWE_CCB_READY;
352 		cmd = ccb->ccb_cmd;
353 		cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
354 		cmd->cmd_op = TWE_CMD_GPARAM;
355 		cmd->cmd_param.count = 1;
356 
357 		cap->table_id = TWE_PARAM_UI + i;
358 		cap->param_id = 4;
359 		cap->param_size = 4;	/* 4 bytes */
360 		lock = TWE_LOCK_TWE(sc);
361 		if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
362 			TWE_UNLOCK_TWE(sc, lock);
363 			printf("%s: error fetching capacity for unit %d\n",
364 			    sc->sc_dev.dv_xname, i);
365 			continue;
366 		}
367 		TWE_UNLOCK_TWE(sc, lock);
368 
369 		nunits++;
370 		sc->sc_hdr[i].hd_present = 1;
371 		sc->sc_hdr[i].hd_devtype = 0;
372 		sc->sc_hdr[i].hd_size = letoh32(*(u_int32_t *)cap->data);
373 		/* this is evil. they never learn */
374 		if (sc->sc_hdr[i].hd_size > 0x200000) {
375 			sc->sc_hdr[i].hd_secs = 63;
376 			sc->sc_hdr[i].hd_heads = 255;
377 		} else {
378 			sc->sc_hdr[i].hd_secs = 32;
379 			sc->sc_hdr[i].hd_heads = 64;
380 		}
381 		TWE_DPRINTF(TWE_D_MISC, ("twed%d: size=%d secs=%d heads=%d\n",
382 		    i, sc->sc_hdr[i].hd_size, sc->sc_hdr[i].hd_secs,
383 		    sc->sc_hdr[i].hd_heads));
384 	}
385 
386 	if (!nunits)
387 		nunits++;
388 
389 	/* TODO: fetch & print cache params? */
390 
391 	sc->sc_link.adapter_softc = sc;
392 	sc->sc_link.adapter = &twe_switch;
393 	sc->sc_link.adapter_target = TWE_MAX_UNITS;
394 	sc->sc_link.device = &twe_dev;
395 	sc->sc_link.openings = TWE_MAXCMDS / nunits;
396 	sc->sc_link.adapter_buswidth = TWE_MAX_UNITS;
397 
398 	config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
399 
400 	TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
401 	    bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
402 	/*
403 	 * ack all before enable, cannot be done in one
404 	 * operation as it seems clear is not processed
405 	 * if enable is specified.
406 	 */
407 	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
408 	    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR);
409 	TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
410 	    bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
411 	/* enable interrupts */
412 	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
413 	    TWE_CTRL_EINT | TWE_CTRL_ERDYI |
414 	    /*TWE_CTRL_HOSTI |*/ TWE_CTRL_MCMDI);
415 
416 	return 0;
417 }
418 
419 int
420 twe_cmd(ccb, flags, wait)
421 	struct twe_ccb *ccb;
422 	int flags, wait;
423 {
424 	struct twe_softc *sc = ccb->ccb_sc;
425 	bus_dmamap_t dmap;
426 	struct twe_cmd *cmd;
427 	struct twe_segs *sgp;
428 	int error, i;
429 
430 	if (ccb->ccb_data && ((u_long)ccb->ccb_data & (TWE_ALIGN - 1))) {
431 		TWE_DPRINTF(TWE_D_DMA, ("data=%p is unaligned ",ccb->ccb_data));
432 		ccb->ccb_realdata = ccb->ccb_data;
433 
434 		error = bus_dmamem_alloc(sc->dmat, ccb->ccb_length, PAGE_SIZE,
435 		    0, ccb->ccb_2bseg, TWE_MAXOFFSETS, &ccb->ccb_2nseg,
436 		    BUS_DMA_NOWAIT);
437 		if (error) {
438 			TWE_DPRINTF(TWE_D_DMA, ("2buf alloc failed(%d) ", error));
439 			twe_put_ccb(ccb);
440 			return (ENOMEM);
441 		}
442 
443 		error = bus_dmamem_map(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg,
444 		    ccb->ccb_length, (caddr_t *)&ccb->ccb_data, BUS_DMA_NOWAIT);
445 		if (error) {
446 			TWE_DPRINTF(TWE_D_DMA, ("2buf map failed(%d) ", error));
447 			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
448 			twe_put_ccb(ccb);
449 			return (ENOMEM);
450 		}
451 		bcopy(ccb->ccb_realdata, ccb->ccb_data, ccb->ccb_length);
452 	} else
453 		ccb->ccb_realdata = NULL;
454 
455 	dmap = ccb->ccb_dmamap;
456 	cmd = ccb->ccb_cmd;
457 	cmd->cmd_status = 0;
458 
459 	if (ccb->ccb_data) {
460 		error = bus_dmamap_load(sc->dmat, dmap, ccb->ccb_data,
461 		    ccb->ccb_length, NULL, flags);
462 		if (error) {
463 			if (error == EFBIG)
464 				printf("more than %d dma segs\n", TWE_MAXOFFSETS);
465 			else
466 				printf("error %d loading dma map\n", error);
467 
468 			if (ccb->ccb_realdata) {
469 				bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
470 				    ccb->ccb_length);
471 				bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
472 				    ccb->ccb_2nseg);
473 			}
474 			twe_put_ccb(ccb);
475 			return error;
476 		}
477 		/* load addresses into command */
478 		switch (cmd->cmd_op) {
479 		case TWE_CMD_GPARAM:
480 		case TWE_CMD_SPARAM:
481 			sgp = cmd->cmd_param.segs;
482 			break;
483 		case TWE_CMD_READ:
484 		case TWE_CMD_WRITE:
485 			sgp = cmd->cmd_io.segs;
486 			break;
487 		default:
488 			/* no data transfer */
489 			TWE_DPRINTF(TWE_D_DMA, ("twe_cmd: unknown sgp op=%x\n",
490 			    cmd->cmd_op));
491 			sgp = NULL;
492 			break;
493 		}
494 		TWE_DPRINTF(TWE_D_DMA, ("data=%p<", ccb->ccb_data));
495 		if (sgp) {
496 			/*
497 			 * we know that size is in the upper byte,
498 			 * and we do not worry about overflow
499 			 */
500 			cmd->cmd_op += (2 * dmap->dm_nsegs) << 8;
501 			bzero (sgp, TWE_MAXOFFSETS * sizeof(*sgp));
502 			for (i = 0; i < dmap->dm_nsegs; i++, sgp++) {
503 				sgp->twes_addr = htole32(dmap->dm_segs[i].ds_addr);
504 				sgp->twes_len  = htole32(dmap->dm_segs[i].ds_len);
505 				TWE_DPRINTF(TWE_D_DMA, ("%x[%x] ",
506 				    dmap->dm_segs[i].ds_addr,
507 				    dmap->dm_segs[i].ds_len));
508 			}
509 		}
510 		TWE_DPRINTF(TWE_D_DMA, ("> "));
511 		bus_dmamap_sync(sc->dmat, dmap, 0, dmap->dm_mapsize,
512 		    BUS_DMASYNC_PREWRITE);
513 	}
514 	bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, 0, sc->sc_cmdmap->dm_mapsize,
515 	    BUS_DMASYNC_PREWRITE);
516 
517 	if ((error = twe_start(ccb, wait))) {
518 		bus_dmamap_unload(sc->dmat, dmap);
519 		if (ccb->ccb_realdata) {
520 			bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
521 			    ccb->ccb_length);
522 			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
523 			    ccb->ccb_2nseg);
524 		}
525 		twe_put_ccb(ccb);
526 		return (error);
527 	}
528 
529 	return wait? twe_complete(ccb) : 0;
530 }
531 
532 int
533 twe_start(ccb, wait)
534 	struct twe_ccb *ccb;
535 	int wait;
536 {
537 	struct twe_softc*sc = ccb->ccb_sc;
538 	struct twe_cmd	*cmd = ccb->ccb_cmd;
539 	u_int32_t	status;
540 	int i;
541 
542 	cmd->cmd_op = htole16(cmd->cmd_op);
543 
544 	if (!wait) {
545 
546 		TWE_DPRINTF(TWE_D_CMD, ("prequeue(%d) ", cmd->cmd_index));
547 		ccb->ccb_state = TWE_CCB_PREQUEUED;
548 		TAILQ_INSERT_TAIL(&sc->sc_ccb2q, ccb, ccb_link);
549 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
550 		    TWE_CTRL_ECMDI);
551 		return 0;
552 	}
553 
554 	for (i = 1000; i--; DELAY(10)) {
555 
556 		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
557 		if (!(status & TWE_STAT_CQF))
558 			break;
559 		TWE_DPRINTF(TWE_D_CMD,  ("twe_start stat=%b ",
560 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
561 	}
562 
563 	if (!(status & TWE_STAT_CQF)) {
564 		bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
565 		    ccb->ccb_cmdpa);
566 
567 		TWE_DPRINTF(TWE_D_CMD, ("queue(%d) ", cmd->cmd_index));
568 		ccb->ccb_state = TWE_CCB_QUEUED;
569 		TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
570 		return 0;
571 
572 	} else {
573 
574 		printf("%s: twe_start(%d) timed out\n",
575 		    sc->sc_dev.dv_xname, cmd->cmd_index);
576 
577 		return 1;
578 	}
579 }
580 
581 int
582 twe_complete(ccb)
583 	struct twe_ccb *ccb;
584 {
585 	struct twe_softc *sc = ccb->ccb_sc;
586 	struct scsi_xfer *xs = ccb->ccb_xs;
587 	u_int32_t	status;
588 	int i;
589 
590 	for (i = 100 * (xs? xs->timeout : 35000); i--; DELAY(10)) {
591 		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
592 		/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
593 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
594 
595 		while (!(status & TWE_STAT_RQE)) {
596 			u_int32_t ready;
597 
598 			ready = bus_space_read_4(sc->iot, sc->ioh,
599 			    TWE_READYQUEUE);
600 
601 			TWE_DPRINTF(TWE_D_CMD, ("ready=%x ", ready));
602 
603 			if (!twe_done(sc, TWE_READYID(ready)) &&
604 			    ccb->ccb_state == TWE_CCB_FREE) {
605 				TWE_DPRINTF(TWE_D_CMD, ("complete\n"));
606 				return 0;
607 			}
608 
609 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
610 			/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
611 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
612 		}
613 	}
614 
615 	return 1;
616 }
617 
618 int
619 twe_done(sc, idx)
620 	struct twe_softc *sc;
621 	int	idx;
622 {
623 	struct twe_ccb *ccb = &sc->sc_ccbs[idx];
624 	struct twe_cmd *cmd = ccb->ccb_cmd;
625 	struct scsi_xfer *xs = ccb->ccb_xs;
626 	bus_dmamap_t	dmap;
627 	twe_lock_t	lock;
628 
629 	TWE_DPRINTF(TWE_D_CMD, ("done(%d) ", idx));
630 
631 	if (ccb->ccb_state != TWE_CCB_QUEUED) {
632 		printf("%s: unqueued ccb %d ready\n",
633 		    sc->sc_dev.dv_xname, idx);
634 		return 1;
635 	}
636 
637 	dmap = ccb->ccb_dmamap;
638 	if (xs) {
639 		if (xs->cmd->opcode != PREVENT_ALLOW &&
640 		    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
641 			bus_dmamap_sync(sc->dmat, dmap, 0,
642 			    dmap->dm_mapsize, (xs->flags & SCSI_DATA_IN) ?
643 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
644 			bus_dmamap_unload(sc->dmat, dmap);
645 		}
646 	} else {
647 		switch (letoh16(cmd->cmd_op)) {
648 		case TWE_CMD_GPARAM:
649 		case TWE_CMD_READ:
650 			bus_dmamap_sync(sc->dmat, dmap, 0,
651 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
652 			bus_dmamap_unload(sc->dmat, dmap);
653 			break;
654 		case TWE_CMD_SPARAM:
655 		case TWE_CMD_WRITE:
656 			bus_dmamap_sync(sc->dmat, dmap, 0,
657 			    dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
658 			bus_dmamap_unload(sc->dmat, dmap);
659 			break;
660 		default:
661 			/* no data */
662 			break;
663 		}
664 	}
665 
666 	if (ccb->ccb_realdata) {
667 		bcopy(ccb->ccb_data, ccb->ccb_realdata, ccb->ccb_length);
668 		bus_dmamem_unmap(sc->dmat, ccb->ccb_data, ccb->ccb_length);
669 		bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
670 	}
671 
672 	lock = TWE_LOCK_TWE(sc);
673 	TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link);
674 	twe_put_ccb(ccb);
675 	TWE_UNLOCK_TWE(sc, lock);
676 
677 	if (xs) {
678 		xs->resid = 0;
679 		xs->flags |= ITSDONE;
680 		scsi_done(xs);
681 	}
682 
683 	return 0;
684 }
685 void
686 tweminphys(bp)
687 	struct buf *bp;
688 {
689 	if (bp->b_bcount > TWE_MAXFER)
690 		bp->b_bcount = TWE_MAXFER;
691 	minphys(bp);
692 }
693 
694 void
695 twe_copy_internal_data(xs, v, size)
696 	struct scsi_xfer *xs;
697 	void *v;
698 	size_t size;
699 {
700 	size_t copy_cnt;
701 
702 	TWE_DPRINTF(TWE_D_MISC, ("twe_copy_internal_data "));
703 
704 	if (!xs->datalen)
705 		printf("uio move is not yet supported\n");
706 	else {
707 		copy_cnt = MIN(size, xs->datalen);
708 		bcopy(v, xs->data, copy_cnt);
709 	}
710 }
711 
712 int
713 twe_scsi_cmd(xs)
714 	struct scsi_xfer *xs;
715 {
716 	struct scsi_link *link = xs->sc_link;
717 	struct twe_softc *sc = link->adapter_softc;
718 	struct twe_ccb *ccb;
719 	struct twe_cmd *cmd;
720 	struct scsi_inquiry_data inq;
721 	struct scsi_sense_data sd;
722 	struct {
723 		struct scsi_mode_header hd;
724 		struct scsi_blk_desc bd;
725 		union scsi_disk_pages dp;
726 	} mpd;
727 	struct scsi_read_cap_data rcd;
728 	u_int8_t target = link->target;
729 	u_int32_t blockno, blockcnt;
730 	struct scsi_rw *rw;
731 	struct scsi_rw_big *rwb;
732 	int error, op, flags;
733 	twe_lock_t lock;
734 
735 
736 	if (target >= TWE_MAX_UNITS || !sc->sc_hdr[target].hd_present ||
737 	    link->lun != 0) {
738 		xs->error = XS_DRIVER_STUFFUP;
739 		return (COMPLETE);
740 	}
741 
742 	TWE_DPRINTF(TWE_D_CMD, ("twe_scsi_cmd "));
743 
744 	xs->error = XS_NOERROR;
745 
746 	switch (xs->cmd->opcode) {
747 	case TEST_UNIT_READY:
748 	case START_STOP:
749 #if 0
750 	case VERIFY:
751 #endif
752 		TWE_DPRINTF(TWE_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode,
753 		    target));
754 		break;
755 
756 	case REQUEST_SENSE:
757 		TWE_DPRINTF(TWE_D_CMD, ("REQUEST SENSE tgt %d ", target));
758 		bzero(&sd, sizeof sd);
759 		sd.error_code = 0x70;
760 		sd.segment = 0;
761 		sd.flags = SKEY_NO_SENSE;
762 		*(u_int32_t*)sd.info = htole32(0);
763 		sd.extra_len = 0;
764 		twe_copy_internal_data(xs, &sd, sizeof sd);
765 		break;
766 
767 	case INQUIRY:
768 		TWE_DPRINTF(TWE_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
769 		    sc->sc_hdr[target].hd_devtype));
770 		bzero(&inq, sizeof inq);
771 		inq.device =
772 		    (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
773 		inq.dev_qual2 =
774 		    (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
775 		inq.version = 2;
776 		inq.response_format = 2;
777 		inq.additional_length = 32;
778 		strcpy(inq.vendor, "3WARE  ");
779 		sprintf(inq.product, "Host drive  #%02d", target);
780 		strcpy(inq.revision, "   ");
781 		twe_copy_internal_data(xs, &inq, sizeof inq);
782 		break;
783 
784 	case MODE_SENSE:
785 		TWE_DPRINTF(TWE_D_CMD, ("MODE SENSE tgt %d ", target));
786 
787 		bzero(&mpd, sizeof mpd);
788 		switch (((struct scsi_mode_sense *)xs->cmd)->page) {
789 		case 4:
790 			/* scsi_disk.h says this should be 0x16 */
791 			mpd.dp.rigid_geometry.pg_length = 0x16;
792 			mpd.hd.data_length = sizeof mpd.hd + sizeof mpd.bd +
793 			    mpd.dp.rigid_geometry.pg_length;
794 			mpd.hd.blk_desc_len = sizeof mpd.bd;
795 
796 			/* XXX */
797 			mpd.hd.dev_spec =
798 			    (sc->sc_hdr[target].hd_devtype & 2) ? 0x80 : 0;
799 			_lto3b(TWE_SECTOR_SIZE, mpd.bd.blklen);
800 			mpd.dp.rigid_geometry.pg_code = 4;
801 			_lto3b(sc->sc_hdr[target].hd_size /
802 			    sc->sc_hdr[target].hd_heads /
803 			    sc->sc_hdr[target].hd_secs,
804 			    mpd.dp.rigid_geometry.ncyl);
805 			mpd.dp.rigid_geometry.nheads =
806 			    sc->sc_hdr[target].hd_heads;
807 			twe_copy_internal_data(xs, (u_int8_t *)&mpd,
808 			    sizeof mpd);
809 			break;
810 
811 		default:
812 			printf("%s: mode sense page %d not simulated\n",
813 			    sc->sc_dev.dv_xname,
814 			    ((struct scsi_mode_sense *)xs->cmd)->page);
815 			xs->error = XS_DRIVER_STUFFUP;
816 			return (TRY_AGAIN_LATER);
817 		}
818 		break;
819 
820 	case READ_CAPACITY:
821 		TWE_DPRINTF(TWE_D_CMD, ("READ CAPACITY tgt %d ", target));
822 		bzero(&rcd, sizeof rcd);
823 		_lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
824 		_lto4b(TWE_SECTOR_SIZE, rcd.length);
825 		twe_copy_internal_data(xs, &rcd, sizeof rcd);
826 		break;
827 
828 	case PREVENT_ALLOW:
829 		TWE_DPRINTF(TWE_D_CMD, ("PREVENT/ALLOW "));
830 		return (COMPLETE);
831 
832 	case READ_COMMAND:
833 	case READ_BIG:
834 	case WRITE_COMMAND:
835 	case WRITE_BIG:
836 	case SYNCHRONIZE_CACHE:
837 		lock = TWE_LOCK_TWE(sc);
838 
839 		flags = 0;
840 		if (xs->cmd->opcode != SYNCHRONIZE_CACHE) {
841 			/* A read or write operation. */
842 			if (xs->cmdlen == 6) {
843 				rw = (struct scsi_rw *)xs->cmd;
844 				blockno = _3btol(rw->addr) &
845 				    (SRW_TOPADDR << 16 | 0xffff);
846 				blockcnt = rw->length ? rw->length : 0x100;
847 			} else {
848 				rwb = (struct scsi_rw_big *)xs->cmd;
849 				blockno = _4btol(rwb->addr);
850 				blockcnt = _2btol(rwb->length);
851 				/* reflect DPO & FUA flags */
852 				if (xs->cmd->opcode == WRITE_BIG &&
853 				    rwb->byte2 & 0x18)
854 					flags = TWE_FLAGS_CACHEDISABLE;
855 			}
856 			if (blockno >= sc->sc_hdr[target].hd_size ||
857 			    blockno + blockcnt > sc->sc_hdr[target].hd_size) {
858 				TWE_UNLOCK_TWE(sc, lock);
859 				printf("%s: out of bounds %u-%u >= %u\n",
860 				    sc->sc_dev.dv_xname, blockno, blockcnt,
861 				    sc->sc_hdr[target].hd_size);
862 				xs->error = XS_DRIVER_STUFFUP;
863 				scsi_done(xs);
864 				return (COMPLETE);
865 			}
866 		}
867 
868 		switch (xs->cmd->opcode) {
869 		case READ_COMMAND:	op = TWE_CMD_READ;	break;
870 		case READ_BIG:		op = TWE_CMD_READ;	break;
871 		case WRITE_COMMAND:	op = TWE_CMD_WRITE;	break;
872 		case WRITE_BIG:		op = TWE_CMD_WRITE;	break;
873 		default:		op = TWE_CMD_NOP;	break;
874 		}
875 
876 		if ((ccb = twe_get_ccb(sc)) == NULL) {
877 			TWE_UNLOCK_TWE(sc, lock);
878 			xs->error = XS_DRIVER_STUFFUP;
879 			scsi_done(xs);
880 			return (COMPLETE);
881 		}
882 
883 		ccb->ccb_xs = xs;
884 		ccb->ccb_data = xs->data;
885 		ccb->ccb_length = xs->datalen;
886 		ccb->ccb_state = TWE_CCB_READY;
887 		cmd = ccb->ccb_cmd;
888 		cmd->cmd_unit_host = TWE_UNITHOST(target, 0); /* XXX why 0? */
889 		cmd->cmd_op = op;
890 		cmd->cmd_flags = flags;
891 		cmd->cmd_io.count = htole16(blockcnt);
892 		cmd->cmd_io.lba = htole32(blockno);
893 
894 		if ((error = twe_cmd(ccb, ((xs->flags & SCSI_NOSLEEP)?
895 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK), xs->flags & SCSI_POLL))) {
896 
897 			TWE_UNLOCK_TWE(sc, lock);
898 			TWE_DPRINTF(TWE_D_CMD, ("failed %p ", xs));
899 			if (xs->flags & SCSI_POLL) {
900 				xs->error = XS_TIMEOUT;
901 				return (TRY_AGAIN_LATER);
902 			} else {
903 				xs->error = XS_DRIVER_STUFFUP;
904 				scsi_done(xs);
905 				return (COMPLETE);
906 			}
907 		}
908 
909 		TWE_UNLOCK_TWE(sc, lock);
910 
911 		if (xs->flags & SCSI_POLL)
912 			return (COMPLETE);
913 		else
914 			return (SUCCESSFULLY_QUEUED);
915 
916 	default:
917 		TWE_DPRINTF(TWE_D_CMD, ("unknown opc %d ", xs->cmd->opcode));
918 		xs->error = XS_DRIVER_STUFFUP;
919 	}
920 
921 	return (COMPLETE);
922 }
923 
924 int
925 twe_intr(v)
926 	void *v;
927 {
928 	struct twe_softc *sc = v;
929 	struct twe_ccb	*ccb;
930 	struct twe_cmd	*cmd;
931 	u_int32_t	status;
932 	twe_lock_t	lock;
933 	int		rv = 0;
934 
935 	status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
936 	TWE_DPRINTF(TWE_D_INTR,  ("twe_intr stat=%b ",
937 	    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
938 #if 0
939 	if (status & TWE_STAT_HOSTI) {
940 
941 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
942 		    TWE_CTRL_CHOSTI);
943 	}
944 #endif
945 
946 	if (status & TWE_STAT_CMDI) {
947 
948 		lock = TWE_LOCK_TWE(sc);
949 		while (!(status & TWE_STAT_CQF) &&
950 		    !TAILQ_EMPTY(&sc->sc_ccb2q)) {
951 
952 			ccb = TAILQ_LAST(&sc->sc_ccb2q, twe_queue_head);
953 			TAILQ_REMOVE(&sc->sc_ccb2q, ccb, ccb_link);
954 
955 			ccb->ccb_state = TWE_CCB_QUEUED;
956 			TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
957 			bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
958 			    ccb->ccb_cmdpa);
959 
960 			rv++;
961 
962 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
963 			TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
964 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
965 		}
966 
967 		if (TAILQ_EMPTY(&sc->sc_ccb2q))
968 			bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
969 			    TWE_CTRL_MCMDI);
970 
971 		TWE_UNLOCK_TWE(sc, lock);
972 	}
973 
974 	if (status & TWE_STAT_RDYI) {
975 
976 		while (!(status & TWE_STAT_RQE)) {
977 
978 			u_int32_t ready;
979 
980 			/*
981 			 * it seems that reading ready queue
982 			 * we get all the status bits in each ready word.
983 			 * i wonder if it's legal to use those for
984 			 * status and avoid extra read below
985 			 */
986 			ready = bus_space_read_4(sc->iot, sc->ioh,
987 			    TWE_READYQUEUE);
988 
989 			if (!twe_done(sc, TWE_READYID(ready)))
990 				rv++;
991 
992 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
993 			TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
994 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
995 		}
996 	}
997 
998 	if (status & TWE_STAT_ATTNI) {
999 		u_int16_t aen;
1000 
1001 		/*
1002 		 * we know no attentions of interest right now.
1003 		 * one of those would be mirror degradation i think.
1004 		 * or, what else exists in there?
1005 		 * maybe 3ware can answer that?
1006 		 */
1007 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
1008 		    TWE_CTRL_CATTNI);
1009 
1010 		lock = TWE_LOCK_TWE(sc);
1011 		for (aen = -1; aen != TWE_AEN_QEMPTY; ) {
1012 			u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
1013 			struct twe_param *pb = (void *) (((u_long)param_buf +
1014 			    TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
1015 
1016 			if ((ccb = twe_get_ccb(sc)) == NULL)
1017 				break;
1018 
1019 			ccb->ccb_xs = NULL;
1020 			ccb->ccb_data = pb;
1021 			ccb->ccb_length = TWE_SECTOR_SIZE;
1022 			ccb->ccb_state = TWE_CCB_READY;
1023 			cmd = ccb->ccb_cmd;
1024 			cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
1025 			cmd->cmd_op = TWE_CMD_GPARAM;
1026 			cmd->cmd_flags = 0;
1027 			cmd->cmd_param.count = 1;
1028 
1029 			pb->table_id = TWE_PARAM_AEN;
1030 			pb->param_id = 2;
1031 			pb->param_size = 2;
1032 			if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
1033 				printf(": error draining attention queue\n");
1034 				break;
1035 			}
1036 			aen = *(u_int16_t *)pb->data;
1037 			TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
1038 		}
1039 		TWE_UNLOCK_TWE(sc, lock);
1040 	}
1041 
1042 	return rv;
1043 }
1044