xref: /openbsd-src/sys/dev/ic/twe.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: twe.c,v 1.30 2009/02/16 21:19:07 miod Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2002 Michael Shalayeff.  All rights reserved.
5  *
6  * The SCSI emulation layer is derived from gdt(4) driver,
7  * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* #define	TWE_DEBUG */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/buf.h>
36 #include <sys/device.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/kthread.h>
41 
42 #include <machine/bus.h>
43 
44 #include <scsi/scsi_all.h>
45 #include <scsi/scsi_disk.h>
46 #include <scsi/scsiconf.h>
47 
48 #include <dev/ic/twereg.h>
49 #include <dev/ic/twevar.h>
50 
51 #ifdef TWE_DEBUG
52 #define	TWE_DPRINTF(m,a)	if (twe_debug & (m)) printf a
53 #define	TWE_D_CMD	0x0001
54 #define	TWE_D_INTR	0x0002
55 #define	TWE_D_MISC	0x0004
56 #define	TWE_D_DMA	0x0008
57 #define	TWE_D_AEN	0x0010
58 int twe_debug = 0;
59 #else
60 #define	TWE_DPRINTF(m,a)	/* m, a */
61 #endif
62 
63 struct cfdriver twe_cd = {
64 	NULL, "twe", DV_DULL
65 };
66 
67 int	twe_scsi_cmd(struct scsi_xfer *);
68 
69 struct scsi_adapter twe_switch = {
70 	twe_scsi_cmd, tweminphys, 0, 0,
71 };
72 
73 struct scsi_device twe_dev = {
74 	NULL, NULL, NULL, NULL
75 };
76 
77 static __inline struct twe_ccb *twe_get_ccb(struct twe_softc *sc);
78 static __inline void twe_put_ccb(struct twe_ccb *ccb);
79 void twe_dispose(struct twe_softc *sc);
80 int  twe_cmd(struct twe_ccb *ccb, int flags, int wait);
81 int  twe_start(struct twe_ccb *ccb, int wait);
82 int  twe_complete(struct twe_ccb *ccb);
83 int  twe_done(struct twe_softc *sc, struct twe_ccb *ccb);
84 void twe_copy_internal_data(struct scsi_xfer *xs, void *v, size_t size);
85 void twe_thread_create(void *v);
86 void twe_thread(void *v);
87 
88 
89 static __inline struct twe_ccb *
90 twe_get_ccb(sc)
91 	struct twe_softc *sc;
92 {
93 	struct twe_ccb *ccb;
94 
95 	ccb = TAILQ_LAST(&sc->sc_free_ccb, twe_queue_head);
96 	if (ccb)
97 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link);
98 	return ccb;
99 }
100 
101 static __inline void
102 twe_put_ccb(ccb)
103 	struct twe_ccb *ccb;
104 {
105 	struct twe_softc *sc = ccb->ccb_sc;
106 
107 	ccb->ccb_state = TWE_CCB_FREE;
108 	TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
109 }
110 
111 void
112 twe_dispose(sc)
113 	struct twe_softc *sc;
114 {
115 	register struct twe_ccb *ccb;
116 	if (sc->sc_cmdmap != NULL) {
117 		bus_dmamap_destroy(sc->dmat, sc->sc_cmdmap);
118 		/* traverse the ccbs and destroy the maps */
119 		for (ccb = &sc->sc_ccbs[TWE_MAXCMDS - 1]; ccb >= sc->sc_ccbs; ccb--)
120 			if (ccb->ccb_dmamap)
121 				bus_dmamap_destroy(sc->dmat, ccb->ccb_dmamap);
122 	}
123 	bus_dmamem_unmap(sc->dmat, sc->sc_cmds,
124 	    sizeof(struct twe_cmd) * TWE_MAXCMDS);
125 	bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
126 }
127 
128 int
129 twe_attach(sc)
130 	struct twe_softc *sc;
131 {
132 	struct scsibus_attach_args saa;
133 	/* this includes a buffer for drive config req, and a capacity req */
134 	u_int8_t	param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
135 	struct twe_param *pb = (void *)
136 	    (((u_long)param_buf + TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
137 	struct twe_param *cap = (void *)((u_int8_t *)pb + TWE_SECTOR_SIZE);
138 	struct twe_ccb	*ccb;
139 	struct twe_cmd	*cmd;
140 	u_int32_t	status;
141 	int		error, i, retry, nunits, nseg;
142 	const char	*errstr;
143 	twe_lock_t	lock;
144 	paddr_t		pa;
145 
146 	error = bus_dmamem_alloc(sc->dmat, sizeof(struct twe_cmd) * TWE_MAXCMDS,
147 	    PAGE_SIZE, 0, sc->sc_cmdseg, 1, &nseg, BUS_DMA_NOWAIT);
148 	if (error) {
149 		printf(": cannot allocate commands (%d)\n", error);
150 		return (1);
151 	}
152 
153 	error = bus_dmamem_map(sc->dmat, sc->sc_cmdseg, nseg,
154 	    sizeof(struct twe_cmd) * TWE_MAXCMDS,
155 	    (caddr_t *)&sc->sc_cmds, BUS_DMA_NOWAIT);
156 	if (error) {
157 		printf(": cannot map commands (%d)\n", error);
158 		bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
159 		return (1);
160 	}
161 
162 	error = bus_dmamap_create(sc->dmat,
163 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, TWE_MAXCMDS,
164 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, 0,
165 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_cmdmap);
166 	if (error) {
167 		printf(": cannot create ccb cmd dmamap (%d)\n", error);
168 		twe_dispose(sc);
169 		return (1);
170 	}
171 	error = bus_dmamap_load(sc->dmat, sc->sc_cmdmap, sc->sc_cmds,
172 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, NULL, BUS_DMA_NOWAIT);
173 	if (error) {
174 		printf(": cannot load command dma map (%d)\n", error);
175 		twe_dispose(sc);
176 		return (1);
177 	}
178 
179 	TAILQ_INIT(&sc->sc_ccb2q);
180 	TAILQ_INIT(&sc->sc_ccbq);
181 	TAILQ_INIT(&sc->sc_free_ccb);
182 	TAILQ_INIT(&sc->sc_done_ccb);
183 
184 	pa = sc->sc_cmdmap->dm_segs[0].ds_addr +
185 	    sizeof(struct twe_cmd) * (TWE_MAXCMDS - 1);
186 	for (cmd = (struct twe_cmd *)sc->sc_cmds + TWE_MAXCMDS - 1;
187 	     cmd >= (struct twe_cmd *)sc->sc_cmds; cmd--, pa -= sizeof(*cmd)) {
188 
189 		cmd->cmd_index = cmd - (struct twe_cmd *)sc->sc_cmds;
190 		ccb = &sc->sc_ccbs[cmd->cmd_index];
191 		error = bus_dmamap_create(sc->dmat,
192 		    TWE_MAXFER, TWE_MAXOFFSETS, TWE_MAXFER, 0,
193 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
194 		if (error) {
195 			printf(": cannot create ccb dmamap (%d)\n", error);
196 			twe_dispose(sc);
197 			return (1);
198 		}
199 		ccb->ccb_sc = sc;
200 		ccb->ccb_cmd = cmd;
201 		ccb->ccb_cmdpa = pa;
202 		ccb->ccb_state = TWE_CCB_FREE;
203 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
204 	}
205 
206 	for (errstr = NULL, retry = 3; retry--; ) {
207 		int		veseen_srst;
208 		u_int16_t	aen;
209 
210 		if (errstr)
211 			TWE_DPRINTF(TWE_D_MISC, ("%s ", errstr));
212 
213 		for (i = 350000; i--; DELAY(100)) {
214 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
215 			if (status & TWE_STAT_CPURDY)
216 				break;
217 		}
218 
219 		if (!(status & TWE_STAT_CPURDY)) {
220 			errstr = ": card CPU is not ready\n";
221 			continue;
222 		}
223 
224 		/* soft reset, disable ints */
225 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
226 		    TWE_CTRL_SRST |
227 		    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR |
228 		    TWE_CTRL_MCMDI | TWE_CTRL_MRDYI |
229 		    TWE_CTRL_MINT);
230 
231 		for (i = 350000; i--; DELAY(100)) {
232 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
233 			if (status & TWE_STAT_ATTNI)
234 				break;
235 		}
236 
237 		if (!(status & TWE_STAT_ATTNI)) {
238 			errstr = ": cannot get card's attention\n";
239 			continue;
240 		}
241 
242 		/* drain aen queue */
243 		for (veseen_srst = 0, aen = -1; aen != TWE_AEN_QEMPTY; ) {
244 
245 			if ((ccb = twe_get_ccb(sc)) == NULL) {
246 				errstr = ": out of ccbs\n";
247 				continue;
248 			}
249 
250 			ccb->ccb_xs = NULL;
251 			ccb->ccb_data = pb;
252 			ccb->ccb_length = TWE_SECTOR_SIZE;
253 			ccb->ccb_state = TWE_CCB_READY;
254 			cmd = ccb->ccb_cmd;
255 			cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
256 			cmd->cmd_op = TWE_CMD_GPARAM;
257 			cmd->cmd_param.count = 1;
258 
259 			pb->table_id = TWE_PARAM_AEN;
260 			pb->param_id = 2;
261 			pb->param_size = 2;
262 
263 			if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
264 				errstr = ": error draining attention queue\n";
265 				break;
266 			}
267 			aen = *(u_int16_t *)pb->data;
268 			TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
269 			if (aen == TWE_AEN_SRST)
270 				veseen_srst++;
271 		}
272 
273 		if (!veseen_srst) {
274 			errstr = ": we don't get it\n";
275 			continue;
276 		}
277 
278 		if (status & TWE_STAT_CPUERR) {
279 			errstr = ": card CPU error detected\n";
280 			continue;
281 		}
282 
283 		if (status & TWE_STAT_PCIPAR) {
284 			errstr = ": PCI parity error detected\n";
285 			continue;
286 		}
287 
288 		if (status & TWE_STAT_QUEUEE ) {
289 			errstr = ": queuing error detected\n";
290 			continue;
291 		}
292 
293 		if (status & TWE_STAT_PCIABR) {
294 			errstr = ": PCI abort\n";
295 			continue;
296 		}
297 
298 		while (!(status & TWE_STAT_RQE)) {
299 			bus_space_read_4(sc->iot, sc->ioh, TWE_READYQUEUE);
300 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
301 		}
302 
303 		break;
304 	}
305 
306 	if (retry < 0) {
307 		printf(errstr);
308 		twe_dispose(sc);
309 		return 1;
310 	}
311 
312 	if ((ccb = twe_get_ccb(sc)) == NULL) {
313 		printf(": out of ccbs\n");
314 		twe_dispose(sc);
315 		return 1;
316 	}
317 
318 	ccb->ccb_xs = NULL;
319 	ccb->ccb_data = pb;
320 	ccb->ccb_length = TWE_SECTOR_SIZE;
321 	ccb->ccb_state = TWE_CCB_READY;
322 	cmd = ccb->ccb_cmd;
323 	cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
324 	cmd->cmd_op = TWE_CMD_GPARAM;
325 	cmd->cmd_param.count = 1;
326 
327 	pb->table_id = TWE_PARAM_UC;
328 	pb->param_id = TWE_PARAM_UC;
329 	pb->param_size = TWE_MAX_UNITS;
330 	if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
331 		printf(": failed to fetch unit parameters\n");
332 		twe_dispose(sc);
333 		return 1;
334 	}
335 
336 	/* we are assuming last read status was good */
337 	printf(": Escalade V%d.%d\n", TWE_MAJV(status), TWE_MINV(status));
338 
339 	for (nunits = i = 0; i < TWE_MAX_UNITS; i++) {
340 		if (pb->data[i] == 0)
341 			continue;
342 
343 		if ((ccb = twe_get_ccb(sc)) == NULL) {
344 			printf(": out of ccbs\n");
345 			twe_dispose(sc);
346 			return 1;
347 		}
348 
349 		ccb->ccb_xs = NULL;
350 		ccb->ccb_data = cap;
351 		ccb->ccb_length = TWE_SECTOR_SIZE;
352 		ccb->ccb_state = TWE_CCB_READY;
353 		cmd = ccb->ccb_cmd;
354 		cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
355 		cmd->cmd_op = TWE_CMD_GPARAM;
356 		cmd->cmd_param.count = 1;
357 
358 		cap->table_id = TWE_PARAM_UI + i;
359 		cap->param_id = 4;
360 		cap->param_size = 4;	/* 4 bytes */
361 		lock = TWE_LOCK(sc);
362 		if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
363 			TWE_UNLOCK(sc, lock);
364 			printf("%s: error fetching capacity for unit %d\n",
365 			    sc->sc_dev.dv_xname, i);
366 			continue;
367 		}
368 		TWE_UNLOCK(sc, lock);
369 
370 		nunits++;
371 		sc->sc_hdr[i].hd_present = 1;
372 		sc->sc_hdr[i].hd_devtype = 0;
373 		sc->sc_hdr[i].hd_size = letoh32(*(u_int32_t *)cap->data);
374 		TWE_DPRINTF(TWE_D_MISC, ("twed%d: size=%d\n",
375 		    i, sc->sc_hdr[i].hd_size));
376 	}
377 
378 	if (!nunits)
379 		nunits++;
380 
381 	/* TODO: fetch & print cache params? */
382 
383 	sc->sc_link.adapter_softc = sc;
384 	sc->sc_link.adapter = &twe_switch;
385 	sc->sc_link.adapter_target = TWE_MAX_UNITS;
386 	sc->sc_link.device = &twe_dev;
387 	sc->sc_link.openings = TWE_MAXCMDS / nunits;
388 	sc->sc_link.adapter_buswidth = TWE_MAX_UNITS;
389 
390 	bzero(&saa, sizeof(saa));
391 	saa.saa_sc_link = &sc->sc_link;
392 
393 	config_found(&sc->sc_dev, &saa, scsiprint);
394 
395 	kthread_create_deferred(twe_thread_create, sc);
396 
397 	return (0);
398 }
399 
400 void
401 twe_thread_create(void *v)
402 {
403 	struct twe_softc *sc = v;
404 
405 	if (kthread_create(twe_thread, sc, &sc->sc_thread,
406 	    "%s", sc->sc_dev.dv_xname)) {
407 		/* TODO disable twe */
408 		printf("%s: failed to create kernel thread, disabled\n",
409 		    sc->sc_dev.dv_xname);
410 		return;
411 	}
412 
413 	TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
414 	    bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
415 	/*
416 	 * ack all before enable, cannot be done in one
417 	 * operation as it seems clear is not processed
418 	 * if enable is specified.
419 	 */
420 	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
421 	    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR);
422 	TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
423 	    bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
424 	/* enable interrupts */
425 	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
426 	    TWE_CTRL_EINT | TWE_CTRL_ERDYI |
427 	    /*TWE_CTRL_HOSTI |*/ TWE_CTRL_MCMDI);
428 }
429 
430 void
431 twe_thread(v)
432 	void *v;
433 {
434 	struct twe_softc *sc = v;
435 	struct twe_ccb *ccb;
436 	twe_lock_t lock;
437 	u_int32_t status;
438 	int err;
439 
440 	splbio();
441 	for (;;) {
442 		lock = TWE_LOCK(sc);
443 
444 		while (!TAILQ_EMPTY(&sc->sc_done_ccb)) {
445 			ccb = TAILQ_FIRST(&sc->sc_done_ccb);
446 			TAILQ_REMOVE(&sc->sc_done_ccb, ccb, ccb_link);
447 			if ((err = twe_done(sc, ccb)))
448 				printf("%s: done failed (%d)\n",
449 				    sc->sc_dev.dv_xname, err);
450 		}
451 
452 		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
453 		TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
454 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
455 		while (!(status & TWE_STAT_CQF) &&
456 		    !TAILQ_EMPTY(&sc->sc_ccb2q)) {
457 
458 			ccb = TAILQ_LAST(&sc->sc_ccb2q, twe_queue_head);
459 			TAILQ_REMOVE(&sc->sc_ccb2q, ccb, ccb_link);
460 
461 			ccb->ccb_state = TWE_CCB_QUEUED;
462 			TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
463 			bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
464 			    ccb->ccb_cmdpa);
465 
466 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
467 			TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
468 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
469 		}
470 
471 		if (!TAILQ_EMPTY(&sc->sc_ccb2q))
472 			bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
473 			    TWE_CTRL_ECMDI);
474 
475 		TWE_UNLOCK(sc, lock);
476 		sc->sc_thread_on = 1;
477 		tsleep(sc, PWAIT, "twespank", 0);
478 	}
479 }
480 
481 int
482 twe_cmd(ccb, flags, wait)
483 	struct twe_ccb *ccb;
484 	int flags, wait;
485 {
486 	struct twe_softc *sc = ccb->ccb_sc;
487 	bus_dmamap_t dmap;
488 	struct twe_cmd *cmd;
489 	struct twe_segs *sgp;
490 	int error, i;
491 
492 	if (ccb->ccb_data && ((u_long)ccb->ccb_data & (TWE_ALIGN - 1))) {
493 		TWE_DPRINTF(TWE_D_DMA, ("data=%p is unaligned ",ccb->ccb_data));
494 		ccb->ccb_realdata = ccb->ccb_data;
495 
496 		error = bus_dmamem_alloc(sc->dmat, ccb->ccb_length, PAGE_SIZE,
497 		    0, ccb->ccb_2bseg, TWE_MAXOFFSETS, &ccb->ccb_2nseg,
498 		    BUS_DMA_NOWAIT);
499 		if (error) {
500 			TWE_DPRINTF(TWE_D_DMA, ("2buf alloc failed(%d) ", error));
501 			twe_put_ccb(ccb);
502 			return (ENOMEM);
503 		}
504 
505 		error = bus_dmamem_map(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg,
506 		    ccb->ccb_length, (caddr_t *)&ccb->ccb_data, BUS_DMA_NOWAIT);
507 		if (error) {
508 			TWE_DPRINTF(TWE_D_DMA, ("2buf map failed(%d) ", error));
509 			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
510 			twe_put_ccb(ccb);
511 			return (ENOMEM);
512 		}
513 		bcopy(ccb->ccb_realdata, ccb->ccb_data, ccb->ccb_length);
514 	} else
515 		ccb->ccb_realdata = NULL;
516 
517 	dmap = ccb->ccb_dmamap;
518 	cmd = ccb->ccb_cmd;
519 	cmd->cmd_status = 0;
520 
521 	if (ccb->ccb_data) {
522 		error = bus_dmamap_load(sc->dmat, dmap, ccb->ccb_data,
523 		    ccb->ccb_length, NULL, flags);
524 		if (error) {
525 			if (error == EFBIG)
526 				printf("more than %d dma segs\n", TWE_MAXOFFSETS);
527 			else
528 				printf("error %d loading dma map\n", error);
529 
530 			if (ccb->ccb_realdata) {
531 				bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
532 				    ccb->ccb_length);
533 				bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
534 				    ccb->ccb_2nseg);
535 			}
536 			twe_put_ccb(ccb);
537 			return error;
538 		}
539 		/* load addresses into command */
540 		switch (cmd->cmd_op) {
541 		case TWE_CMD_GPARAM:
542 		case TWE_CMD_SPARAM:
543 			sgp = cmd->cmd_param.segs;
544 			break;
545 		case TWE_CMD_READ:
546 		case TWE_CMD_WRITE:
547 			sgp = cmd->cmd_io.segs;
548 			break;
549 		default:
550 			/* no data transfer */
551 			TWE_DPRINTF(TWE_D_DMA, ("twe_cmd: unknown sgp op=%x\n",
552 			    cmd->cmd_op));
553 			sgp = NULL;
554 			break;
555 		}
556 		TWE_DPRINTF(TWE_D_DMA, ("data=%p<", ccb->ccb_data));
557 		if (sgp) {
558 			/*
559 			 * we know that size is in the upper byte,
560 			 * and we do not worry about overflow
561 			 */
562 			cmd->cmd_op += (2 * dmap->dm_nsegs) << 8;
563 			bzero (sgp, TWE_MAXOFFSETS * sizeof(*sgp));
564 			for (i = 0; i < dmap->dm_nsegs; i++, sgp++) {
565 				sgp->twes_addr = htole32(dmap->dm_segs[i].ds_addr);
566 				sgp->twes_len  = htole32(dmap->dm_segs[i].ds_len);
567 				TWE_DPRINTF(TWE_D_DMA, ("%x[%x] ",
568 				    dmap->dm_segs[i].ds_addr,
569 				    dmap->dm_segs[i].ds_len));
570 			}
571 		}
572 		TWE_DPRINTF(TWE_D_DMA, ("> "));
573 		bus_dmamap_sync(sc->dmat, dmap, 0, dmap->dm_mapsize,
574 		    BUS_DMASYNC_PREWRITE);
575 	}
576 	bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, 0, sc->sc_cmdmap->dm_mapsize,
577 	    BUS_DMASYNC_PREWRITE);
578 
579 	if ((error = twe_start(ccb, wait))) {
580 		bus_dmamap_unload(sc->dmat, dmap);
581 		if (ccb->ccb_realdata) {
582 			bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
583 			    ccb->ccb_length);
584 			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
585 			    ccb->ccb_2nseg);
586 		}
587 		twe_put_ccb(ccb);
588 		return (error);
589 	}
590 
591 	return wait? twe_complete(ccb) : 0;
592 }
593 
594 int
595 twe_start(ccb, wait)
596 	struct twe_ccb *ccb;
597 	int wait;
598 {
599 	struct twe_softc*sc = ccb->ccb_sc;
600 	struct twe_cmd	*cmd = ccb->ccb_cmd;
601 	u_int32_t	status;
602 	int i;
603 
604 	cmd->cmd_op = htole16(cmd->cmd_op);
605 
606 	if (!wait) {
607 
608 		TWE_DPRINTF(TWE_D_CMD, ("prequeue(%d) ", cmd->cmd_index));
609 		ccb->ccb_state = TWE_CCB_PREQUEUED;
610 		TAILQ_INSERT_TAIL(&sc->sc_ccb2q, ccb, ccb_link);
611 		wakeup(sc);
612 		return 0;
613 	}
614 
615 	for (i = 1000; i--; DELAY(10)) {
616 
617 		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
618 		if (!(status & TWE_STAT_CQF))
619 			break;
620 		TWE_DPRINTF(TWE_D_CMD,  ("twe_start stat=%b ",
621 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
622 	}
623 
624 	if (!(status & TWE_STAT_CQF)) {
625 		bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
626 		    ccb->ccb_cmdpa);
627 
628 		TWE_DPRINTF(TWE_D_CMD, ("queue(%d) ", cmd->cmd_index));
629 		ccb->ccb_state = TWE_CCB_QUEUED;
630 		TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
631 		return 0;
632 
633 	} else {
634 
635 		printf("%s: twe_start(%d) timed out\n",
636 		    sc->sc_dev.dv_xname, cmd->cmd_index);
637 
638 		return 1;
639 	}
640 }
641 
642 int
643 twe_complete(ccb)
644 	struct twe_ccb *ccb;
645 {
646 	struct twe_softc *sc = ccb->ccb_sc;
647 	struct scsi_xfer *xs = ccb->ccb_xs;
648 	int i;
649 
650 	for (i = 100 * (xs? xs->timeout : 35000); i--; DELAY(10)) {
651 		u_int32_t status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
652 
653 		/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
654 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
655 
656 		while (!(status & TWE_STAT_RQE)) {
657 			struct twe_ccb *ccb1;
658 			u_int32_t ready;
659 
660 			ready = bus_space_read_4(sc->iot, sc->ioh,
661 			    TWE_READYQUEUE);
662 
663 			TWE_DPRINTF(TWE_D_CMD, ("ready=%x ", ready));
664 
665 			ccb1 = &sc->sc_ccbs[TWE_READYID(ready)];
666 			TAILQ_REMOVE(&sc->sc_ccbq, ccb1, ccb_link);
667 			ccb1->ccb_state = TWE_CCB_DONE;
668 			if (!twe_done(sc, ccb1) && ccb1 == ccb) {
669 				TWE_DPRINTF(TWE_D_CMD, ("complete\n"));
670 				return 0;
671 			}
672 
673 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
674 			/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
675 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
676 		}
677 	}
678 
679 	return 1;
680 }
681 
682 int
683 twe_done(sc, ccb)
684 	struct twe_softc *sc;
685 	struct twe_ccb *ccb;
686 {
687 	struct twe_cmd *cmd = ccb->ccb_cmd;
688 	struct scsi_xfer *xs = ccb->ccb_xs;
689 	bus_dmamap_t	dmap;
690 	twe_lock_t	lock;
691 
692 	TWE_DPRINTF(TWE_D_CMD, ("done(%d) ", cmd->cmd_index));
693 
694 	if (ccb->ccb_state != TWE_CCB_DONE) {
695 		printf("%s: undone ccb %d ready\n",
696 		     sc->sc_dev.dv_xname, cmd->cmd_index);
697 		return 1;
698 	}
699 
700 	dmap = ccb->ccb_dmamap;
701 	if (xs) {
702 		if (xs->cmd->opcode != PREVENT_ALLOW &&
703 		    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
704 			bus_dmamap_sync(sc->dmat, dmap, 0,
705 			    dmap->dm_mapsize, (xs->flags & SCSI_DATA_IN) ?
706 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
707 			bus_dmamap_unload(sc->dmat, dmap);
708 		}
709 	} else {
710 		switch (letoh16(cmd->cmd_op)) {
711 		case TWE_CMD_GPARAM:
712 		case TWE_CMD_READ:
713 			bus_dmamap_sync(sc->dmat, dmap, 0,
714 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
715 			bus_dmamap_unload(sc->dmat, dmap);
716 			break;
717 		case TWE_CMD_SPARAM:
718 		case TWE_CMD_WRITE:
719 			bus_dmamap_sync(sc->dmat, dmap, 0,
720 			    dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
721 			bus_dmamap_unload(sc->dmat, dmap);
722 			break;
723 		default:
724 			/* no data */
725 			break;
726 		}
727 	}
728 
729 	if (ccb->ccb_realdata) {
730 		bcopy(ccb->ccb_data, ccb->ccb_realdata, ccb->ccb_length);
731 		bus_dmamem_unmap(sc->dmat, ccb->ccb_data, ccb->ccb_length);
732 		bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
733 	}
734 
735 	lock = TWE_LOCK(sc);
736 	twe_put_ccb(ccb);
737 
738 	if (xs) {
739 		xs->resid = 0;
740 		xs->flags |= ITSDONE;
741 		scsi_done(xs);
742 	}
743 	TWE_UNLOCK(sc, lock);
744 
745 	return 0;
746 }
747 
748 void
749 tweminphys(struct buf *bp, struct scsi_link *sl)
750 {
751 	if (bp->b_bcount > TWE_MAXFER)
752 		bp->b_bcount = TWE_MAXFER;
753 	minphys(bp);
754 }
755 
756 void
757 twe_copy_internal_data(xs, v, size)
758 	struct scsi_xfer *xs;
759 	void *v;
760 	size_t size;
761 {
762 	size_t copy_cnt;
763 
764 	TWE_DPRINTF(TWE_D_MISC, ("twe_copy_internal_data "));
765 
766 	if (!xs->datalen)
767 		printf("uio move is not yet supported\n");
768 	else {
769 		copy_cnt = MIN(size, xs->datalen);
770 		bcopy(v, xs->data, copy_cnt);
771 	}
772 }
773 
774 int
775 twe_scsi_cmd(xs)
776 	struct scsi_xfer *xs;
777 {
778 	struct scsi_link *link = xs->sc_link;
779 	struct twe_softc *sc = link->adapter_softc;
780 	struct twe_ccb *ccb;
781 	struct twe_cmd *cmd;
782 	struct scsi_inquiry_data inq;
783 	struct scsi_sense_data sd;
784 	struct scsi_read_cap_data rcd;
785 	u_int8_t target = link->target;
786 	u_int32_t blockno, blockcnt;
787 	struct scsi_rw *rw;
788 	struct scsi_rw_big *rwb;
789 	int error, op, flags, wait;
790 	twe_lock_t lock;
791 
792 
793 	if (target >= TWE_MAX_UNITS || !sc->sc_hdr[target].hd_present ||
794 	    link->lun != 0) {
795 		xs->error = XS_DRIVER_STUFFUP;
796 		return (COMPLETE);
797 	}
798 
799 	TWE_DPRINTF(TWE_D_CMD, ("twe_scsi_cmd "));
800 
801 	xs->error = XS_NOERROR;
802 
803 	switch (xs->cmd->opcode) {
804 	case TEST_UNIT_READY:
805 	case START_STOP:
806 #if 0
807 	case VERIFY:
808 #endif
809 		TWE_DPRINTF(TWE_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode,
810 		    target));
811 		break;
812 
813 	case REQUEST_SENSE:
814 		TWE_DPRINTF(TWE_D_CMD, ("REQUEST SENSE tgt %d ", target));
815 		bzero(&sd, sizeof sd);
816 		sd.error_code = 0x70;
817 		sd.segment = 0;
818 		sd.flags = SKEY_NO_SENSE;
819 		*(u_int32_t*)sd.info = htole32(0);
820 		sd.extra_len = 0;
821 		twe_copy_internal_data(xs, &sd, sizeof sd);
822 		break;
823 
824 	case INQUIRY:
825 		TWE_DPRINTF(TWE_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
826 		    sc->sc_hdr[target].hd_devtype));
827 		bzero(&inq, sizeof inq);
828 		inq.device =
829 		    (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
830 		inq.dev_qual2 =
831 		    (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
832 		inq.version = 2;
833 		inq.response_format = 2;
834 		inq.additional_length = 32;
835 		strlcpy(inq.vendor, "3WARE  ", sizeof inq.vendor);
836 		snprintf(inq.product, sizeof inq.product, "Host drive  #%02d",
837 		    target);
838 		strlcpy(inq.revision, "   ", sizeof inq.revision);
839 		twe_copy_internal_data(xs, &inq, sizeof inq);
840 		break;
841 
842 	case READ_CAPACITY:
843 		TWE_DPRINTF(TWE_D_CMD, ("READ CAPACITY tgt %d ", target));
844 		bzero(&rcd, sizeof rcd);
845 		_lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
846 		_lto4b(TWE_SECTOR_SIZE, rcd.length);
847 		twe_copy_internal_data(xs, &rcd, sizeof rcd);
848 		break;
849 
850 	case PREVENT_ALLOW:
851 		TWE_DPRINTF(TWE_D_CMD, ("PREVENT/ALLOW "));
852 		return (COMPLETE);
853 
854 	case READ_COMMAND:
855 	case READ_BIG:
856 	case WRITE_COMMAND:
857 	case WRITE_BIG:
858 	case SYNCHRONIZE_CACHE:
859 		lock = TWE_LOCK(sc);
860 
861 		flags = 0;
862 		if (xs->cmd->opcode != SYNCHRONIZE_CACHE) {
863 			/* A read or write operation. */
864 			if (xs->cmdlen == 6) {
865 				rw = (struct scsi_rw *)xs->cmd;
866 				blockno = _3btol(rw->addr) &
867 				    (SRW_TOPADDR << 16 | 0xffff);
868 				blockcnt = rw->length ? rw->length : 0x100;
869 			} else {
870 				rwb = (struct scsi_rw_big *)xs->cmd;
871 				blockno = _4btol(rwb->addr);
872 				blockcnt = _2btol(rwb->length);
873 				/* reflect DPO & FUA flags */
874 				if (xs->cmd->opcode == WRITE_BIG &&
875 				    rwb->byte2 & 0x18)
876 					flags = TWE_FLAGS_CACHEDISABLE;
877 			}
878 			if (blockno >= sc->sc_hdr[target].hd_size ||
879 			    blockno + blockcnt > sc->sc_hdr[target].hd_size) {
880 				printf("%s: out of bounds %u-%u >= %u\n",
881 				    sc->sc_dev.dv_xname, blockno, blockcnt,
882 				    sc->sc_hdr[target].hd_size);
883 				xs->error = XS_DRIVER_STUFFUP;
884 				scsi_done(xs);
885 				TWE_UNLOCK(sc, lock);
886 				return (COMPLETE);
887 			}
888 		}
889 
890 		switch (xs->cmd->opcode) {
891 		case READ_COMMAND:	op = TWE_CMD_READ;	break;
892 		case READ_BIG:		op = TWE_CMD_READ;	break;
893 		case WRITE_COMMAND:	op = TWE_CMD_WRITE;	break;
894 		case WRITE_BIG:		op = TWE_CMD_WRITE;	break;
895 		default:		op = TWE_CMD_NOP;	break;
896 		}
897 
898 		if ((ccb = twe_get_ccb(sc)) == NULL) {
899 			xs->error = XS_DRIVER_STUFFUP;
900 			scsi_done(xs);
901 			TWE_UNLOCK(sc, lock);
902 			return (COMPLETE);
903 		}
904 
905 		ccb->ccb_xs = xs;
906 		ccb->ccb_data = xs->data;
907 		ccb->ccb_length = xs->datalen;
908 		ccb->ccb_state = TWE_CCB_READY;
909 		cmd = ccb->ccb_cmd;
910 		cmd->cmd_unit_host = TWE_UNITHOST(target, 0); /* XXX why 0? */
911 		cmd->cmd_op = op;
912 		cmd->cmd_flags = flags;
913 		cmd->cmd_io.count = htole16(blockcnt);
914 		cmd->cmd_io.lba = htole32(blockno);
915 		wait = xs->flags & SCSI_POLL;
916 		if (!sc->sc_thread_on)
917 			wait |= SCSI_POLL;
918 
919 		if ((error = twe_cmd(ccb, ((xs->flags & SCSI_NOSLEEP)?
920 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK), wait))) {
921 
922 			TWE_DPRINTF(TWE_D_CMD, ("failed %p ", xs));
923 			if (xs->flags & SCSI_POLL) {
924 				TWE_UNLOCK(sc, lock);
925 				return (TRY_AGAIN_LATER);
926 			} else {
927 				xs->error = XS_DRIVER_STUFFUP;
928 				scsi_done(xs);
929 				TWE_UNLOCK(sc, lock);
930 				return (COMPLETE);
931 			}
932 		}
933 
934 		TWE_UNLOCK(sc, lock);
935 
936 		if (wait & SCSI_POLL)
937 			return (COMPLETE);
938 		else
939 			return (SUCCESSFULLY_QUEUED);
940 
941 	default:
942 		TWE_DPRINTF(TWE_D_CMD, ("unsupported scsi command %#x tgt %d ",
943 		    xs->cmd->opcode, target));
944 		xs->error = XS_DRIVER_STUFFUP;
945 	}
946 
947 	return (COMPLETE);
948 }
949 
950 int
951 twe_intr(v)
952 	void *v;
953 {
954 	struct twe_softc *sc = v;
955 	struct twe_ccb	*ccb;
956 	struct twe_cmd	*cmd;
957 	u_int32_t	status;
958 	twe_lock_t	lock;
959 	int		rv = 0;
960 
961 	status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
962 	TWE_DPRINTF(TWE_D_INTR,  ("twe_intr stat=%b ",
963 	    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
964 #if 0
965 	if (status & TWE_STAT_HOSTI) {
966 
967 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
968 		    TWE_CTRL_CHOSTI);
969 	}
970 #endif
971 
972 	if (status & TWE_STAT_RDYI) {
973 
974 		while (!(status & TWE_STAT_RQE)) {
975 
976 			u_int32_t ready;
977 
978 			/*
979 			 * it seems that reading ready queue
980 			 * we get all the status bits in each ready word.
981 			 * i wonder if it's legal to use those for
982 			 * status and avoid extra read below
983 			 */
984 			ready = bus_space_read_4(sc->iot, sc->ioh,
985 			    TWE_READYQUEUE);
986 
987 			ccb = &sc->sc_ccbs[TWE_READYID(ready)];
988 			TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link);
989 			ccb->ccb_state = TWE_CCB_DONE;
990 			TAILQ_INSERT_TAIL(&sc->sc_done_ccb, ccb, ccb_link);
991 			rv++;
992 
993 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
994 			TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
995 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
996 		}
997 	}
998 
999 	if (status & TWE_STAT_CMDI) {
1000 		rv++;
1001 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
1002 		    TWE_CTRL_MCMDI);
1003 	}
1004 
1005 	if (rv)
1006 		wakeup(sc);
1007 
1008 	if (status & TWE_STAT_ATTNI) {
1009 		u_int16_t aen;
1010 
1011 		/*
1012 		 * we know no attentions of interest right now.
1013 		 * one of those would be mirror degradation i think.
1014 		 * or, what else exists in there?
1015 		 * maybe 3ware can answer that?
1016 		 */
1017 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
1018 		    TWE_CTRL_CATTNI);
1019 
1020 		lock = TWE_LOCK(sc);
1021 		for (aen = -1; aen != TWE_AEN_QEMPTY; ) {
1022 			u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
1023 			struct twe_param *pb = (void *) (((u_long)param_buf +
1024 			    TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
1025 
1026 			if ((ccb = twe_get_ccb(sc)) == NULL)
1027 				break;
1028 
1029 			ccb->ccb_xs = NULL;
1030 			ccb->ccb_data = pb;
1031 			ccb->ccb_length = TWE_SECTOR_SIZE;
1032 			ccb->ccb_state = TWE_CCB_READY;
1033 			cmd = ccb->ccb_cmd;
1034 			cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
1035 			cmd->cmd_op = TWE_CMD_GPARAM;
1036 			cmd->cmd_flags = 0;
1037 			cmd->cmd_param.count = 1;
1038 
1039 			pb->table_id = TWE_PARAM_AEN;
1040 			pb->param_id = 2;
1041 			pb->param_size = 2;
1042 			if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
1043 				printf(": error draining attention queue\n");
1044 				break;
1045 			}
1046 			aen = *(u_int16_t *)pb->data;
1047 			TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
1048 		}
1049 		TWE_UNLOCK(sc, lock);
1050 	}
1051 
1052 	return rv;
1053 }
1054