xref: /openbsd-src/sys/dev/ic/twe.c (revision b2ea75c1b17e1a9a339660e7ed45cd24946b230e)
1 /*	$OpenBSD: twe.c,v 1.12 2001/07/04 22:53:24 espie Exp $	*/
2 
3 /*
4  * Copyright (c) 2000, 2001 Michael Shalayeff.  All rights reserved.
5  *
6  * The SCSI emulation layer is derived from gdt(4) driver,
7  * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Michael Shalayeff.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /* #define	TWE_DEBUG */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/buf.h>
41 #include <sys/device.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 
45 #include <machine/bus.h>
46 
47 #include <scsi/scsi_all.h>
48 #include <scsi/scsi_disk.h>
49 #include <scsi/scsiconf.h>
50 
51 #include <dev/ic/twereg.h>
52 #include <dev/ic/twevar.h>
53 
54 #ifdef TWE_DEBUG
55 #define	TWE_DPRINTF(m,a)	if (twe_debug & (m)) printf a
56 #define	TWE_D_CMD	0x0001
57 #define	TWE_D_INTR	0x0002
58 #define	TWE_D_MISC	0x0004
59 #define	TWE_D_DMA	0x0008
60 #define	TWE_D_AEN	0x0010
61 int twe_debug = 0xffff;
62 #else
63 #define	TWE_DPRINTF(m,a)	/* m, a */
64 #endif
65 
66 struct cfdriver twe_cd = {
67 	NULL, "twe", DV_DULL
68 };
69 
70 int	twe_scsi_cmd __P((struct scsi_xfer *));
71 
72 struct scsi_adapter twe_switch = {
73 	twe_scsi_cmd, tweminphys, 0, 0,
74 };
75 
76 struct scsi_device twe_dev = {
77 	NULL, NULL, NULL, NULL
78 };
79 
80 static __inline struct twe_ccb *twe_get_ccb __P((struct twe_softc *sc));
81 static __inline void twe_put_ccb __P((struct twe_ccb *ccb));
82 void twe_dispose __P((struct twe_softc *sc));
83 int  twe_cmd __P((struct twe_ccb *ccb, int flags, int wait));
84 int  twe_start __P((struct twe_ccb *ccb, int wait));
85 int  twe_complete __P((struct twe_ccb *ccb));
86 int  twe_done __P((struct twe_softc *sc, int idx));
87 void twe_copy_internal_data __P((struct scsi_xfer *xs, void *v, size_t size));
88 
89 
90 static __inline struct twe_ccb *
91 twe_get_ccb(sc)
92 	struct twe_softc *sc;
93 {
94 	struct twe_ccb *ccb;
95 
96 	ccb = TAILQ_LAST(&sc->sc_free_ccb, twe_queue_head);
97 	if (ccb)
98 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link);
99 	return ccb;
100 }
101 
102 static __inline void
103 twe_put_ccb(ccb)
104 	struct twe_ccb *ccb;
105 {
106 	struct twe_softc *sc = ccb->ccb_sc;
107 
108 	ccb->ccb_state = TWE_CCB_FREE;
109 	TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
110 }
111 
112 void
113 twe_dispose(sc)
114 	struct twe_softc *sc;
115 {
116 	register struct twe_ccb *ccb;
117 	if (sc->sc_cmdmap != NULL) {
118 		bus_dmamap_destroy(sc->dmat, sc->sc_cmdmap);
119 		/* traverse the ccbs and destroy the maps */
120 		for (ccb = &sc->sc_ccbs[TWE_MAXCMDS - 1]; ccb >= sc->sc_ccbs; ccb--)
121 			if (ccb->ccb_dmamap)
122 				bus_dmamap_destroy(sc->dmat, ccb->ccb_dmamap);
123 	}
124 	bus_dmamem_unmap(sc->dmat, sc->sc_cmds,
125 	    sizeof(struct twe_cmd) * TWE_MAXCMDS);
126 	bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
127 }
128 
129 int
130 twe_attach(sc)
131 	struct twe_softc *sc;
132 {
133 	/* this includes a buffer for drive config req, and a capacity req */
134 	u_int8_t	param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
135 	struct twe_param *pb = (void *)
136 	    (((u_long)param_buf + TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
137 	struct twe_param *cap = (void *)((u_int8_t *)pb + TWE_SECTOR_SIZE);
138 	struct twe_ccb	*ccb;
139 	struct twe_cmd	*cmd;
140 	u_int32_t	status;
141 	int		error, i, retry, nunits, nseg;
142 	const char	*errstr;
143 	twe_lock_t	lock;
144 
145 	error = bus_dmamem_alloc(sc->dmat, sizeof(struct twe_cmd) * TWE_MAXCMDS,
146 	    PAGE_SIZE, 0, sc->sc_cmdseg, 1, &nseg, BUS_DMA_NOWAIT);
147 	if (error) {
148 		printf(": cannot allocate commands (%d)\n", error);
149 		return (1);
150 	}
151 
152 	error = bus_dmamem_map(sc->dmat, sc->sc_cmdseg, nseg,
153 	    sizeof(struct twe_cmd) * TWE_MAXCMDS,
154 	    (caddr_t *)&sc->sc_cmds, BUS_DMA_NOWAIT);
155 	if (error) {
156 		printf(": cannot map commands (%d)\n", error);
157 		bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
158 		return (1);
159 	}
160 
161 	error = bus_dmamap_create(sc->dmat,
162 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, TWE_MAXCMDS,
163 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, 0,
164 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_cmdmap);
165 	if (error) {
166 		printf(": cannot create ccb cmd dmamap (%d)\n", error);
167 		twe_dispose(sc);
168 		return (1);
169 	}
170 	error = bus_dmamap_load(sc->dmat, sc->sc_cmdmap, sc->sc_cmds,
171 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, NULL, BUS_DMA_NOWAIT);
172 	if (error) {
173 		printf(": cannot load command dma map (%d)\n", error);
174 		twe_dispose(sc);
175 		return (1);
176 	}
177 
178 	TAILQ_INIT(&sc->sc_ccb2q);
179 	TAILQ_INIT(&sc->sc_ccbq);
180 	TAILQ_INIT(&sc->sc_free_ccb);
181 
182 	for (cmd = sc->sc_cmds + sizeof(struct twe_cmd) * (TWE_MAXCMDS - 1);
183 	     cmd >= (struct twe_cmd *)sc->sc_cmds; cmd--) {
184 
185 		cmd->cmd_index = cmd - (struct twe_cmd *)sc->sc_cmds;
186 		ccb = &sc->sc_ccbs[cmd->cmd_index];
187 		error = bus_dmamap_create(sc->dmat,
188 		    TWE_MAXFER, TWE_MAXOFFSETS, TWE_MAXFER, 0,
189 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
190 		if (error) {
191 			printf(": cannot create ccb dmamap (%d)\n", error);
192 			twe_dispose(sc);
193 			return (1);
194 		}
195 		ccb->ccb_sc = sc;
196 		ccb->ccb_cmd = cmd;
197 		ccb->ccb_state = TWE_CCB_FREE;
198 		ccb->ccb_cmdpa = kvtop((caddr_t)cmd);
199 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
200 	}
201 
202 	for (errstr = NULL, retry = 3; retry--; ) {
203 		int		veseen_srst;
204 		u_int16_t	aen;
205 
206 		if (errstr)
207 			TWE_DPRINTF(TWE_D_MISC, ("%s ", errstr));
208 
209 		for (i = 350000; i--; DELAY(100)) {
210 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
211 			if (status & TWE_STAT_CPURDY)
212 				break;
213 		}
214 
215 		if (!(status & TWE_STAT_CPURDY)) {
216 			errstr = ": card CPU is not ready\n";
217 			continue;
218 		}
219 
220 		/* soft reset, disable ints */
221 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
222 		    TWE_CTRL_SRST |
223 		    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR |
224 		    TWE_CTRL_MCMDI | TWE_CTRL_MRDYI |
225 		    TWE_CTRL_MINT);
226 
227 		for (i = 350000; i--; DELAY(100)) {
228 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
229 			if (status & TWE_STAT_ATTNI)
230 				break;
231 		}
232 
233 		if (!(status & TWE_STAT_ATTNI)) {
234 			errstr = ": cannot get card's attention\n";
235 			continue;
236 		}
237 
238 		/* drain aen queue */
239 		for (veseen_srst = 0, aen = -1; aen != TWE_AEN_QEMPTY; ) {
240 
241 			if ((ccb = twe_get_ccb(sc)) == NULL) {
242 				errstr = ": out of ccbs\n";
243 				continue;
244 			}
245 
246 			ccb->ccb_xs = NULL;
247 			ccb->ccb_data = pb;
248 			ccb->ccb_length = TWE_SECTOR_SIZE;
249 			ccb->ccb_state = TWE_CCB_READY;
250 			cmd = ccb->ccb_cmd;
251 			cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
252 			cmd->cmd_op = TWE_CMD_GPARAM;
253 			cmd->cmd_param.count = 1;
254 
255 			pb->table_id = TWE_PARAM_AEN;
256 			pb->param_id = 2;
257 			pb->param_size = 2;
258 
259 			if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
260 				errstr = ": error draining attention queue\n";
261 				break;
262 			}
263 			aen = *(u_int16_t *)pb->data;
264 			TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
265 			if (aen == TWE_AEN_SRST)
266 				veseen_srst++;
267 		}
268 
269 		if (!veseen_srst) {
270 			errstr = ": we don't get it\n";
271 			continue;
272 		}
273 
274 		if (status & TWE_STAT_CPUERR) {
275 			errstr = ": card CPU error detected\n";
276 			continue;
277 		}
278 
279 		if (status & TWE_STAT_PCIPAR) {
280 			errstr = ": PCI parity error detected\n";
281 			continue;
282 		}
283 
284 		if (status & TWE_STAT_QUEUEE ) {
285 			errstr = ": queuing error detected\n";
286 			continue;
287 		}
288 
289 		if (status & TWE_STAT_PCIABR) {
290 			errstr = ": PCI abort\n";
291 			continue;
292 		}
293 
294 		while (!(status & TWE_STAT_RQE)) {
295 			bus_space_read_4(sc->iot, sc->ioh, TWE_READYQUEUE);
296 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
297 		}
298 
299 		break;
300 	}
301 
302 	if (retry < 0) {
303 		printf(errstr);
304 		twe_dispose(sc);
305 		return 1;
306 	}
307 
308 	if ((ccb = twe_get_ccb(sc)) == NULL) {
309 		printf(": out of ccbs\n");
310 		twe_dispose(sc);
311 		return 1;
312 	}
313 
314 	ccb->ccb_xs = NULL;
315 	ccb->ccb_data = pb;
316 	ccb->ccb_length = TWE_SECTOR_SIZE;
317 	ccb->ccb_state = TWE_CCB_READY;
318 	cmd = ccb->ccb_cmd;
319 	cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
320 	cmd->cmd_op = TWE_CMD_GPARAM;
321 	cmd->cmd_param.count = 1;
322 
323 	pb->table_id = TWE_PARAM_UC;
324 	pb->param_id = TWE_PARAM_UC;
325 	pb->param_size = TWE_MAX_UNITS;
326 	if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
327 		printf(": failed to fetch unit parameters\n");
328 		twe_dispose(sc);
329 		return 1;
330 	}
331 
332 	/* we are assuming last read status was good */
333 	printf(": Escalade V%d.%d\n", TWE_MAJV(status), TWE_MINV(status));
334 
335 	for (nunits = i = 0; i < TWE_MAX_UNITS; i++) {
336 		if (pb->data[i] == 0)
337 			continue;
338 
339 		if ((ccb = twe_get_ccb(sc)) == NULL) {
340 			printf(": out of ccbs\n");
341 			twe_dispose(sc);
342 			return 1;
343 		}
344 
345 		ccb->ccb_xs = NULL;
346 		ccb->ccb_data = cap;
347 		ccb->ccb_length = TWE_SECTOR_SIZE;
348 		ccb->ccb_state = TWE_CCB_READY;
349 		cmd = ccb->ccb_cmd;
350 		cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
351 		cmd->cmd_op = TWE_CMD_GPARAM;
352 		cmd->cmd_param.count = 1;
353 
354 		cap->table_id = TWE_PARAM_UI + i;
355 		cap->param_id = 4;
356 		cap->param_size = 4;	/* 4 bytes */
357 		lock = TWE_LOCK_TWE(sc);
358 		if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
359 			TWE_UNLOCK_TWE(sc, lock);
360 			printf("%s: error fetching capacity for unit %d\n",
361 			    sc->sc_dev.dv_xname, i);
362 			continue;
363 		}
364 		TWE_UNLOCK_TWE(sc, lock);
365 
366 		nunits++;
367 		sc->sc_hdr[i].hd_present = 1;
368 		sc->sc_hdr[i].hd_devtype = 0;
369 		sc->sc_hdr[i].hd_size = letoh32(*(u_int32_t *)cap->data);
370 		/* this is evil. they never learn */
371 		if (sc->sc_hdr[i].hd_size > 0x200000) {
372 			sc->sc_hdr[i].hd_secs = 63;
373 			sc->sc_hdr[i].hd_heads = 255;
374 		} else {
375 			sc->sc_hdr[i].hd_secs = 32;
376 			sc->sc_hdr[i].hd_heads = 64;
377 		}
378 		TWE_DPRINTF(TWE_D_MISC, ("twed%d: size=%d secs=%d heads=%d\n",
379 		    i, sc->sc_hdr[i].hd_size, sc->sc_hdr[i].hd_secs,
380 		    sc->sc_hdr[i].hd_heads));
381 	}
382 
383 	if (!nunits)
384 		nunits++;
385 
386 	/* TODO: fetch & print cache params? */
387 
388 	sc->sc_link.adapter_softc = sc;
389 	sc->sc_link.adapter = &twe_switch;
390 	sc->sc_link.adapter_target = TWE_MAX_UNITS;
391 	sc->sc_link.device = &twe_dev;
392 	sc->sc_link.openings = TWE_MAXCMDS / nunits;
393 	sc->sc_link.adapter_buswidth = TWE_MAX_UNITS;
394 
395 	config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
396 
397 	/* enable interrupts */
398 	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL, TWE_CTRL_EINT |
399 	    /*TWE_CTRL_HOSTI |*/ TWE_CTRL_CATTNI | TWE_CTRL_ERDYI);
400 
401 	return 0;
402 }
403 
404 int
405 twe_cmd(ccb, flags, wait)
406 	struct twe_ccb *ccb;
407 	int flags, wait;
408 {
409 	struct twe_softc *sc = ccb->ccb_sc;
410 	bus_dmamap_t dmap;
411 	struct twe_cmd *cmd;
412 	struct twe_segs *sgp;
413 	int error, i;
414 
415 	if (ccb->ccb_data && ((u_long)ccb->ccb_data & (TWE_ALIGN - 1))) {
416 		TWE_DPRINTF(TWE_D_DMA, ("data=%p is unaligned ",ccb->ccb_data));
417 		ccb->ccb_realdata = ccb->ccb_data;
418 
419 		error = bus_dmamem_alloc(sc->dmat, ccb->ccb_length, PAGE_SIZE,
420 		    0, ccb->ccb_2bseg, TWE_MAXOFFSETS, &ccb->ccb_2nseg,
421 		    BUS_DMA_NOWAIT);
422 		if (error) {
423 			TWE_DPRINTF(TWE_D_DMA, ("2buf alloc failed(%d) ", error));
424 			twe_put_ccb(ccb);
425 			return (ENOMEM);
426 		}
427 
428 		error = bus_dmamem_map(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg,
429 		    ccb->ccb_length, (caddr_t *)&ccb->ccb_data, BUS_DMA_NOWAIT);
430 		if (error) {
431 			TWE_DPRINTF(TWE_D_DMA, ("2buf map failed(%d) ", error));
432 			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
433 			twe_put_ccb(ccb);
434 			return (ENOMEM);
435 		}
436 		bcopy(ccb->ccb_realdata, ccb->ccb_data, ccb->ccb_length);
437 	} else
438 		ccb->ccb_realdata = NULL;
439 
440 	dmap = ccb->ccb_dmamap;
441 	cmd = ccb->ccb_cmd;
442 	cmd->cmd_status = 0;
443 
444 	if (ccb->ccb_data) {
445 		error = bus_dmamap_load(sc->dmat, dmap, ccb->ccb_data,
446 		    ccb->ccb_length, NULL, flags);
447 		if (error) {
448 			if (error == EFBIG)
449 				printf("more than %d dma segs\n", TWE_MAXOFFSETS);
450 			else
451 				printf("error %d loading dma map\n", error);
452 
453 			if (ccb->ccb_realdata) {
454 				bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
455 				    ccb->ccb_length);
456 				bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
457 				    ccb->ccb_2nseg);
458 			}
459 			twe_put_ccb(ccb);
460 			return error;
461 		}
462 		/* load addresses into command */
463 		switch (cmd->cmd_op) {
464 		case TWE_CMD_GPARAM:
465 		case TWE_CMD_SPARAM:
466 			sgp = cmd->cmd_param.segs;
467 			break;
468 		case TWE_CMD_READ:
469 		case TWE_CMD_WRITE:
470 			sgp = cmd->cmd_io.segs;
471 			break;
472 		default:
473 			/* no data transfer */
474 			TWE_DPRINTF(TWE_D_DMA, ("twe_cmd: unknown sgp op=%x\n",
475 			    cmd->cmd_op));
476 			sgp = NULL;
477 			break;
478 		}
479 		TWE_DPRINTF(TWE_D_DMA, ("data=%p<", ccb->ccb_data));
480 		if (sgp) {
481 			/*
482 			 * we know that size is in the upper byte,
483 			 * and we do not worry about overflow
484 			 */
485 			cmd->cmd_op += (2 * dmap->dm_nsegs) << 8;
486 			bzero (sgp, TWE_MAXOFFSETS * sizeof(*sgp));
487 			for (i = 0; i < dmap->dm_nsegs; i++, sgp++) {
488 				sgp->twes_addr = htole32(dmap->dm_segs[i].ds_addr);
489 				sgp->twes_len  = htole32(dmap->dm_segs[i].ds_len);
490 				TWE_DPRINTF(TWE_D_DMA, ("%x[%x] ",
491 				    dmap->dm_segs[i].ds_addr,
492 				    dmap->dm_segs[i].ds_len));
493 			}
494 		}
495 		TWE_DPRINTF(TWE_D_DMA, ("> "));
496 		bus_dmamap_sync(sc->dmat, dmap, BUS_DMASYNC_PREWRITE);
497 	}
498 	bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, BUS_DMASYNC_PREWRITE);
499 
500 	if ((error = twe_start(ccb, wait))) {
501 		bus_dmamap_unload(sc->dmat, dmap);
502 		if (ccb->ccb_realdata) {
503 			bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
504 			    ccb->ccb_length);
505 			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
506 			    ccb->ccb_2nseg);
507 		}
508 		twe_put_ccb(ccb);
509 		return (error);
510 	}
511 
512 	return wait? twe_complete(ccb) : 0;
513 }
514 
515 int
516 twe_start(ccb, wait)
517 	struct twe_ccb *ccb;
518 	int wait;
519 {
520 	struct twe_softc*sc = ccb->ccb_sc;
521 	struct twe_cmd	*cmd = ccb->ccb_cmd;
522 	u_int32_t	status;
523 	int i;
524 
525 	cmd->cmd_op = htole16(cmd->cmd_op);
526 
527 	if (!wait) {
528 
529 		TWE_DPRINTF(TWE_D_CMD, ("prequeue(%d) ", cmd->cmd_index));
530 		ccb->ccb_state = TWE_CCB_PREQUEUED;
531 		TAILQ_INSERT_TAIL(&sc->sc_ccb2q, ccb, ccb_link);
532 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
533 		    TWE_CTRL_ECMDI);
534 		return 0;
535 	}
536 
537 	for (i = 1000; i--; DELAY(10)) {
538 
539 		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
540 		if (!(status & TWE_STAT_CQF))
541 			break;
542 		TWE_DPRINTF(TWE_D_CMD,  ("twe_start stat=%b ",
543 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
544 	}
545 
546 	if (!(status & TWE_STAT_CQF)) {
547 		bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
548 		    ccb->ccb_cmdpa);
549 
550 		TWE_DPRINTF(TWE_D_CMD, ("queue(%d) ", cmd->cmd_index));
551 		ccb->ccb_state = TWE_CCB_QUEUED;
552 		TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
553 		return 0;
554 
555 	} else {
556 
557 		printf("%s: twe_start(%d) timed out\n",
558 		    sc->sc_dev.dv_xname, cmd->cmd_index);
559 
560 		return 1;
561 	}
562 }
563 
564 int
565 twe_complete(ccb)
566 	struct twe_ccb *ccb;
567 {
568 	struct twe_softc *sc = ccb->ccb_sc;
569 	struct scsi_xfer *xs = ccb->ccb_xs;
570 	u_int32_t	status;
571 	int i;
572 
573 	for (i = 100 * (xs? xs->timeout : 35000); i--; DELAY(10)) {
574 		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
575 		/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
576 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
577 
578 		while (!(status & TWE_STAT_RQE)) {
579 			u_int32_t ready;
580 
581 			ready = bus_space_read_4(sc->iot, sc->ioh,
582 			    TWE_READYQUEUE);
583 
584 			TWE_DPRINTF(TWE_D_CMD, ("ready=%x ", ready));
585 
586 			if (!twe_done(sc, TWE_READYID(ready)) &&
587 			    ccb->ccb_state == TWE_CCB_FREE) {
588 				TWE_DPRINTF(TWE_D_CMD, ("complete\n"));
589 				return 0;
590 			}
591 
592 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
593 			/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
594 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
595 		}
596 	}
597 
598 	return 1;
599 }
600 
601 int
602 twe_done(sc, idx)
603 	struct twe_softc *sc;
604 	int	idx;
605 {
606 	struct twe_ccb *ccb = &sc->sc_ccbs[idx];
607 	struct twe_cmd *cmd = ccb->ccb_cmd;
608 	struct scsi_xfer *xs = ccb->ccb_xs;
609 	bus_dmamap_t	dmap;
610 	twe_lock_t	lock;
611 
612 	TWE_DPRINTF(TWE_D_CMD, ("done(%d) ", idx));
613 
614 	if (ccb->ccb_state != TWE_CCB_QUEUED) {
615 		printf("%s: unqueued ccb %d ready\n",
616 		    sc->sc_dev.dv_xname, idx);
617 		return 1;
618 	}
619 
620 	dmap = ccb->ccb_dmamap;
621 	if (xs) {
622 		if (xs->cmd->opcode != PREVENT_ALLOW &&
623 		    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
624 			bus_dmamap_sync(sc->dmat, dmap,
625 			    (xs->flags & SCSI_DATA_IN) ?
626 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
627 			bus_dmamap_unload(sc->dmat, dmap);
628 		}
629 	} else {
630 		switch (letoh16(cmd->cmd_op)) {
631 		case TWE_CMD_GPARAM:
632 		case TWE_CMD_READ:
633 			bus_dmamap_sync(sc->dmat, dmap, BUS_DMASYNC_POSTREAD);
634 			bus_dmamap_unload(sc->dmat, dmap);
635 			break;
636 		case TWE_CMD_SPARAM:
637 		case TWE_CMD_WRITE:
638 			bus_dmamap_sync(sc->dmat, dmap, BUS_DMASYNC_POSTWRITE);
639 			bus_dmamap_unload(sc->dmat, dmap);
640 			break;
641 		default:
642 			/* no data */
643 			break;
644 		}
645 	}
646 
647 	if (ccb->ccb_realdata) {
648 		bcopy(ccb->ccb_data, ccb->ccb_realdata, ccb->ccb_length);
649 		bus_dmamem_unmap(sc->dmat, ccb->ccb_data, ccb->ccb_length);
650 		bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
651 	}
652 
653 	lock = TWE_LOCK_TWE(sc);
654 	TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link);
655 	twe_put_ccb(ccb);
656 	TWE_UNLOCK_TWE(sc, lock);
657 
658 	if (xs) {
659 		xs->resid = 0;
660 		xs->flags |= ITSDONE;
661 		scsi_done(xs);
662 	}
663 
664 	return 0;
665 }
666 void
667 tweminphys(bp)
668 	struct buf *bp;
669 {
670 	if (bp->b_bcount > TWE_MAXFER)
671 		bp->b_bcount = TWE_MAXFER;
672 	minphys(bp);
673 }
674 
675 void
676 twe_copy_internal_data(xs, v, size)
677 	struct scsi_xfer *xs;
678 	void *v;
679 	size_t size;
680 {
681 	size_t copy_cnt;
682 
683 	TWE_DPRINTF(TWE_D_MISC, ("twe_copy_internal_data "));
684 
685 	if (!xs->datalen)
686 		printf("uio move is not yet supported\n");
687 	else {
688 		copy_cnt = MIN(size, xs->datalen);
689 		bcopy(v, xs->data, copy_cnt);
690 	}
691 }
692 
693 int
694 twe_scsi_cmd(xs)
695 	struct scsi_xfer *xs;
696 {
697 	struct scsi_link *link = xs->sc_link;
698 	struct twe_softc *sc = link->adapter_softc;
699 	struct twe_ccb *ccb;
700 	struct twe_cmd *cmd;
701 	struct scsi_inquiry_data inq;
702 	struct scsi_sense_data sd;
703 	struct {
704 		struct scsi_mode_header hd;
705 		struct scsi_blk_desc bd;
706 		union scsi_disk_pages dp;
707 	} mpd;
708 	struct scsi_read_cap_data rcd;
709 	u_int8_t target = link->target;
710 	u_int32_t blockno, blockcnt;
711 	struct scsi_rw *rw;
712 	struct scsi_rw_big *rwb;
713 	int error, op, flags;
714 	twe_lock_t lock;
715 
716 
717 	if (target >= TWE_MAX_UNITS || !sc->sc_hdr[target].hd_present ||
718 	    link->lun != 0) {
719 		xs->error = XS_DRIVER_STUFFUP;
720 		return (COMPLETE);
721 	}
722 
723 	TWE_DPRINTF(TWE_D_CMD, ("twe_scsi_cmd "));
724 
725 	xs->error = XS_NOERROR;
726 
727 	switch (xs->cmd->opcode) {
728 	case TEST_UNIT_READY:
729 	case START_STOP:
730 #if 0
731 	case VERIFY:
732 #endif
733 		TWE_DPRINTF(TWE_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode,
734 		    target));
735 		break;
736 
737 	case REQUEST_SENSE:
738 		TWE_DPRINTF(TWE_D_CMD, ("REQUEST SENSE tgt %d ", target));
739 		bzero(&sd, sizeof sd);
740 		sd.error_code = 0x70;
741 		sd.segment = 0;
742 		sd.flags = SKEY_NO_SENSE;
743 		*(u_int32_t*)sd.info = htole32(0);
744 		sd.extra_len = 0;
745 		twe_copy_internal_data(xs, &sd, sizeof sd);
746 		break;
747 
748 	case INQUIRY:
749 		TWE_DPRINTF(TWE_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
750 		    sc->sc_hdr[target].hd_devtype));
751 		bzero(&inq, sizeof inq);
752 		inq.device =
753 		    (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
754 		inq.dev_qual2 =
755 		    (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
756 		inq.version = 2;
757 		inq.response_format = 2;
758 		inq.additional_length = 32;
759 		strcpy(inq.vendor, "3WARE  ");
760 		sprintf(inq.product, "Host drive  #%02d", target);
761 		strcpy(inq.revision, "   ");
762 		twe_copy_internal_data(xs, &inq, sizeof inq);
763 		break;
764 
765 	case MODE_SENSE:
766 		TWE_DPRINTF(TWE_D_CMD, ("MODE SENSE tgt %d ", target));
767 
768 		bzero(&mpd, sizeof mpd);
769 		switch (((struct scsi_mode_sense *)xs->cmd)->page) {
770 		case 4:
771 			/* scsi_disk.h says this should be 0x16 */
772 			mpd.dp.rigid_geometry.pg_length = 0x16;
773 			mpd.hd.data_length = sizeof mpd.hd + sizeof mpd.bd +
774 			    mpd.dp.rigid_geometry.pg_length;
775 			mpd.hd.blk_desc_len = sizeof mpd.bd;
776 
777 			/* XXX */
778 			mpd.hd.dev_spec =
779 			    (sc->sc_hdr[target].hd_devtype & 2) ? 0x80 : 0;
780 			_lto3b(TWE_SECTOR_SIZE, mpd.bd.blklen);
781 			mpd.dp.rigid_geometry.pg_code = 4;
782 			_lto3b(sc->sc_hdr[target].hd_size /
783 			    sc->sc_hdr[target].hd_heads /
784 			    sc->sc_hdr[target].hd_secs,
785 			    mpd.dp.rigid_geometry.ncyl);
786 			mpd.dp.rigid_geometry.nheads =
787 			    sc->sc_hdr[target].hd_heads;
788 			twe_copy_internal_data(xs, (u_int8_t *)&mpd,
789 			    sizeof mpd);
790 			break;
791 
792 		default:
793 			printf("%s: mode sense page %d not simulated\n",
794 			    sc->sc_dev.dv_xname,
795 			    ((struct scsi_mode_sense *)xs->cmd)->page);
796 			xs->error = XS_DRIVER_STUFFUP;
797 			return (TRY_AGAIN_LATER);
798 		}
799 		break;
800 
801 	case READ_CAPACITY:
802 		TWE_DPRINTF(TWE_D_CMD, ("READ CAPACITY tgt %d ", target));
803 		bzero(&rcd, sizeof rcd);
804 		_lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
805 		_lto4b(TWE_SECTOR_SIZE, rcd.length);
806 		twe_copy_internal_data(xs, &rcd, sizeof rcd);
807 		break;
808 
809 	case PREVENT_ALLOW:
810 		TWE_DPRINTF(TWE_D_CMD, ("PREVENT/ALLOW "));
811 		return (COMPLETE);
812 
813 	case READ_COMMAND:
814 	case READ_BIG:
815 	case WRITE_COMMAND:
816 	case WRITE_BIG:
817 	case SYNCHRONIZE_CACHE:
818 		lock = TWE_LOCK_TWE(sc);
819 
820 		flags = 0;
821 		if (xs->cmd->opcode != SYNCHRONIZE_CACHE) {
822 			/* A read or write operation. */
823 			if (xs->cmdlen == 6) {
824 				rw = (struct scsi_rw *)xs->cmd;
825 				blockno = _3btol(rw->addr) &
826 				    (SRW_TOPADDR << 16 | 0xffff);
827 				blockcnt = rw->length ? rw->length : 0x100;
828 			} else {
829 				rwb = (struct scsi_rw_big *)xs->cmd;
830 				blockno = _4btol(rwb->addr);
831 				blockcnt = _2btol(rwb->length);
832 				/* reflect DPO & FUA flags */
833 				if (xs->cmd->opcode == WRITE_BIG &&
834 				    rwb->byte2 & 0x18)
835 					flags = TWE_FLAGS_CACHEDISABLE;
836 			}
837 			if (blockno >= sc->sc_hdr[target].hd_size ||
838 			    blockno + blockcnt > sc->sc_hdr[target].hd_size) {
839 				TWE_UNLOCK_TWE(sc, lock);
840 				printf("%s: out of bounds %u-%u >= %u\n",
841 				    sc->sc_dev.dv_xname, blockno, blockcnt,
842 				    sc->sc_hdr[target].hd_size);
843 				xs->error = XS_DRIVER_STUFFUP;
844 				scsi_done(xs);
845 				return (COMPLETE);
846 			}
847 		}
848 
849 		switch (xs->cmd->opcode) {
850 		case READ_COMMAND:	op = TWE_CMD_READ;	break;
851 		case READ_BIG:		op = TWE_CMD_READ;	break;
852 		case WRITE_COMMAND:	op = TWE_CMD_WRITE;	break;
853 		case WRITE_BIG:		op = TWE_CMD_WRITE;	break;
854 		default:		op = TWE_CMD_NOP;	break;
855 		}
856 
857 		if ((ccb = twe_get_ccb(sc)) == NULL) {
858 			TWE_UNLOCK_TWE(sc, lock);
859 			xs->error = XS_DRIVER_STUFFUP;
860 			scsi_done(xs);
861 			return (COMPLETE);
862 		}
863 
864 		ccb->ccb_xs = xs;
865 		ccb->ccb_data = xs->data;
866 		ccb->ccb_length = xs->datalen;
867 		ccb->ccb_state = TWE_CCB_READY;
868 		cmd = ccb->ccb_cmd;
869 		cmd->cmd_unit_host = TWE_UNITHOST(target, 0); /* XXX why 0? */
870 		cmd->cmd_op = op;
871 		cmd->cmd_flags = flags;
872 		cmd->cmd_io.count = htole16(blockcnt);
873 		cmd->cmd_io.lba = htole32(blockno);
874 
875 		if ((error = twe_cmd(ccb, ((xs->flags & SCSI_NOSLEEP)?
876 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK), xs->flags & SCSI_POLL))) {
877 
878 			TWE_UNLOCK_TWE(sc, lock);
879 			TWE_DPRINTF(TWE_D_CMD, ("failed %p ", xs));
880 			if (xs->flags & SCSI_POLL) {
881 				xs->error = XS_TIMEOUT;
882 				return (TRY_AGAIN_LATER);
883 			} else {
884 				xs->error = XS_DRIVER_STUFFUP;
885 				scsi_done(xs);
886 				return (COMPLETE);
887 			}
888 		}
889 
890 		TWE_UNLOCK_TWE(sc, lock);
891 
892 		if (xs->flags & SCSI_POLL)
893 			return (COMPLETE);
894 		else
895 			return (SUCCESSFULLY_QUEUED);
896 
897 	default:
898 		TWE_DPRINTF(TWE_D_CMD, ("unknown opc %d ", xs->cmd->opcode));
899 		xs->error = XS_DRIVER_STUFFUP;
900 	}
901 
902 	return (COMPLETE);
903 }
904 
905 int
906 twe_intr(v)
907 	void *v;
908 {
909 	struct twe_softc *sc = v;
910 	struct twe_ccb	*ccb;
911 	struct twe_cmd	*cmd;
912 	u_int32_t	status;
913 	twe_lock_t	lock;
914 	int		rv = 0;
915 
916 	status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
917 	TWE_DPRINTF(TWE_D_INTR,  ("twe_intr stat=%b ",
918 	    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
919 #if 0
920 	if (status & TWE_STAT_HOSTI) {
921 
922 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
923 		    TWE_CTRL_CHOSTI);
924 	}
925 #endif
926 
927 	if (status & TWE_STAT_CMDI) {
928 
929 		lock = TWE_LOCK_TWE(sc);
930 		while (!(status & TWE_STAT_CQF) &&
931 		    !TAILQ_EMPTY(&sc->sc_ccb2q)) {
932 
933 			ccb = TAILQ_LAST(&sc->sc_ccb2q, twe_queue_head);
934 			TAILQ_REMOVE(&sc->sc_ccb2q, ccb, ccb_link);
935 
936 			ccb->ccb_state = TWE_CCB_QUEUED;
937 			TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
938 			bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
939 			    ccb->ccb_cmdpa);
940 
941 			rv++;
942 
943 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
944 			TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
945 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
946 		}
947 
948 		if (TAILQ_EMPTY(&sc->sc_ccb2q))
949 			bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
950 			    TWE_CTRL_MCMDI);
951 
952 		TWE_UNLOCK_TWE(sc, lock);
953 	}
954 
955 	if (status & TWE_STAT_RDYI) {
956 
957 		while (!(status & TWE_STAT_RQE)) {
958 
959 			u_int32_t ready;
960 
961 			/*
962 			 * it seems that reading ready queue
963 			 * we get all the status bits in each ready word.
964 			 * i wonder if it's legal to use those for
965 			 * status and avoid extra read below
966 			 */
967 			ready = bus_space_read_4(sc->iot, sc->ioh,
968 			    TWE_READYQUEUE);
969 
970 			if (!twe_done(sc, TWE_READYID(ready)))
971 				rv++;
972 
973 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
974 			TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
975 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
976 		}
977 	}
978 
979 	if (status & TWE_STAT_ATTNI) {
980 		u_int16_t aen;
981 
982 		/*
983 		 * we know no attentions of interest right now.
984 		 * one of those would be mirror degradation i think.
985 		 * or, what else exists in there?
986 		 * maybe 3ware can answer that?
987 		 */
988 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
989 		    TWE_CTRL_CATTNI);
990 
991 		lock = TWE_LOCK_TWE(sc);
992 		for (aen = -1; aen != TWE_AEN_QEMPTY; ) {
993 			u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
994 			struct twe_param *pb = (void *) (((u_long)param_buf +
995 			    TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
996 
997 			if ((ccb = twe_get_ccb(sc)) == NULL)
998 				break;
999 
1000 			ccb->ccb_xs = NULL;
1001 			ccb->ccb_data = pb;
1002 			ccb->ccb_length = TWE_SECTOR_SIZE;
1003 			ccb->ccb_state = TWE_CCB_READY;
1004 			cmd = ccb->ccb_cmd;
1005 			cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
1006 			cmd->cmd_op = TWE_CMD_GPARAM;
1007 			cmd->cmd_flags = 0;
1008 			cmd->cmd_param.count = 1;
1009 
1010 			pb->table_id = TWE_PARAM_AEN;
1011 			pb->param_id = 2;
1012 			pb->param_size = 2;
1013 			if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
1014 				printf(": error draining attention queue\n");
1015 				break;
1016 			}
1017 			aen = *(u_int16_t *)pb->data;
1018 			TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
1019 		}
1020 		TWE_UNLOCK_TWE(sc, lock);
1021 	}
1022 
1023 	return rv;
1024 }
1025