xref: /openbsd-src/sys/dev/ic/twe.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: twe.c,v 1.51 2020/02/15 18:02:00 krw Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2002 Michael Shalayeff.  All rights reserved.
5  *
6  * The SCSI emulation layer is derived from gdt(4) driver,
7  * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* #define	TWE_DEBUG */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/buf.h>
36 #include <sys/device.h>
37 #include <sys/malloc.h>
38 #include <sys/kthread.h>
39 
40 #include <machine/bus.h>
41 
42 #include <scsi/scsi_all.h>
43 #include <scsi/scsi_disk.h>
44 #include <scsi/scsiconf.h>
45 
46 #include <dev/ic/twereg.h>
47 #include <dev/ic/twevar.h>
48 
49 #ifdef TWE_DEBUG
50 #define	TWE_DPRINTF(m,a)	if (twe_debug & (m)) printf a
51 #define	TWE_D_CMD	0x0001
52 #define	TWE_D_INTR	0x0002
53 #define	TWE_D_MISC	0x0004
54 #define	TWE_D_DMA	0x0008
55 #define	TWE_D_AEN	0x0010
56 int twe_debug = 0;
57 #else
58 #define	TWE_DPRINTF(m,a)	/* m, a */
59 #endif
60 
61 struct cfdriver twe_cd = {
62 	NULL, "twe", DV_DULL
63 };
64 
65 void	twe_scsi_cmd(struct scsi_xfer *);
66 
67 struct scsi_adapter twe_switch = {
68 	twe_scsi_cmd, NULL, NULL, NULL, NULL
69 };
70 
71 void *twe_get_ccb(void *);
72 void twe_put_ccb(void *, void *);
73 void twe_dispose(struct twe_softc *sc);
74 int  twe_cmd(struct twe_ccb *ccb, int flags, int wait);
75 int  twe_start(struct twe_ccb *ccb, int wait);
76 int  twe_complete(struct twe_ccb *ccb);
77 int  twe_done(struct twe_softc *sc, struct twe_ccb *ccb);
78 void twe_copy_internal_data(struct scsi_xfer *xs, void *v, size_t size);
79 void twe_thread_create(void *v);
80 void twe_thread(void *v);
81 void twe_aen(void *, void *);
82 
83 void *
84 twe_get_ccb(void *xsc)
85 {
86 	struct twe_softc *sc = xsc;
87 	struct twe_ccb *ccb;
88 
89 	mtx_enter(&sc->sc_ccb_mtx);
90 	ccb = TAILQ_LAST(&sc->sc_free_ccb, twe_queue_head);
91 	if (ccb != NULL)
92 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link);
93 	mtx_leave(&sc->sc_ccb_mtx);
94 
95 	return (ccb);
96 }
97 
98 void
99 twe_put_ccb(void *xsc, void *xccb)
100 {
101 	struct twe_softc *sc = xsc;
102 	struct twe_ccb *ccb = xccb;
103 
104 	ccb->ccb_state = TWE_CCB_FREE;
105 	mtx_enter(&sc->sc_ccb_mtx);
106 	TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
107 	mtx_leave(&sc->sc_ccb_mtx);
108 }
109 
110 void
111 twe_dispose(sc)
112 	struct twe_softc *sc;
113 {
114 	register struct twe_ccb *ccb;
115 	if (sc->sc_cmdmap != NULL) {
116 		bus_dmamap_destroy(sc->dmat, sc->sc_cmdmap);
117 		/* traverse the ccbs and destroy the maps */
118 		for (ccb = &sc->sc_ccbs[TWE_MAXCMDS - 1]; ccb >= sc->sc_ccbs; ccb--)
119 			if (ccb->ccb_dmamap)
120 				bus_dmamap_destroy(sc->dmat, ccb->ccb_dmamap);
121 	}
122 	bus_dmamem_unmap(sc->dmat, sc->sc_cmds,
123 	    sizeof(struct twe_cmd) * TWE_MAXCMDS);
124 	bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
125 }
126 
127 int
128 twe_attach(sc)
129 	struct twe_softc *sc;
130 {
131 	struct scsibus_attach_args saa;
132 	/* this includes a buffer for drive config req, and a capacity req */
133 	u_int8_t	param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
134 	struct twe_param *pb = (void *)
135 	    (((u_long)param_buf + TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
136 	struct twe_param *cap = (void *)((u_int8_t *)pb + TWE_SECTOR_SIZE);
137 	struct twe_ccb	*ccb;
138 	struct twe_cmd	*cmd;
139 	u_int32_t	status;
140 	int		error, i, retry, nunits, nseg;
141 	const char	*errstr;
142 	twe_lock_t	lock;
143 	paddr_t		pa;
144 
145 	error = bus_dmamem_alloc(sc->dmat, sizeof(struct twe_cmd) * TWE_MAXCMDS,
146 	    PAGE_SIZE, 0, sc->sc_cmdseg, 1, &nseg, BUS_DMA_NOWAIT);
147 	if (error) {
148 		printf(": cannot allocate commands (%d)\n", error);
149 		return (1);
150 	}
151 
152 	error = bus_dmamem_map(sc->dmat, sc->sc_cmdseg, nseg,
153 	    sizeof(struct twe_cmd) * TWE_MAXCMDS,
154 	    (caddr_t *)&sc->sc_cmds, BUS_DMA_NOWAIT);
155 	if (error) {
156 		printf(": cannot map commands (%d)\n", error);
157 		bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
158 		return (1);
159 	}
160 
161 	error = bus_dmamap_create(sc->dmat,
162 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, TWE_MAXCMDS,
163 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, 0,
164 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_cmdmap);
165 	if (error) {
166 		printf(": cannot create ccb cmd dmamap (%d)\n", error);
167 		twe_dispose(sc);
168 		return (1);
169 	}
170 	error = bus_dmamap_load(sc->dmat, sc->sc_cmdmap, sc->sc_cmds,
171 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, NULL, BUS_DMA_NOWAIT);
172 	if (error) {
173 		printf(": cannot load command dma map (%d)\n", error);
174 		twe_dispose(sc);
175 		return (1);
176 	}
177 
178 	TAILQ_INIT(&sc->sc_ccb2q);
179 	TAILQ_INIT(&sc->sc_ccbq);
180 	TAILQ_INIT(&sc->sc_free_ccb);
181 	TAILQ_INIT(&sc->sc_done_ccb);
182 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
183 	scsi_iopool_init(&sc->sc_iopool, sc, twe_get_ccb, twe_put_ccb);
184 
185 	scsi_ioh_set(&sc->sc_aen, &sc->sc_iopool, twe_aen, sc);
186 
187 	pa = sc->sc_cmdmap->dm_segs[0].ds_addr +
188 	    sizeof(struct twe_cmd) * (TWE_MAXCMDS - 1);
189 	for (cmd = (struct twe_cmd *)sc->sc_cmds + TWE_MAXCMDS - 1;
190 	     cmd >= (struct twe_cmd *)sc->sc_cmds; cmd--, pa -= sizeof(*cmd)) {
191 
192 		cmd->cmd_index = cmd - (struct twe_cmd *)sc->sc_cmds;
193 		ccb = &sc->sc_ccbs[cmd->cmd_index];
194 		error = bus_dmamap_create(sc->dmat,
195 		    TWE_MAXFER, TWE_MAXOFFSETS, TWE_MAXFER, 0,
196 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
197 		if (error) {
198 			printf(": cannot create ccb dmamap (%d)\n", error);
199 			twe_dispose(sc);
200 			return (1);
201 		}
202 		ccb->ccb_sc = sc;
203 		ccb->ccb_cmd = cmd;
204 		ccb->ccb_cmdpa = pa;
205 		ccb->ccb_state = TWE_CCB_FREE;
206 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
207 	}
208 
209 	for (errstr = NULL, retry = 3; retry--; ) {
210 		int		veseen_srst;
211 		u_int16_t	aen;
212 
213 		if (errstr)
214 			TWE_DPRINTF(TWE_D_MISC, ("%s ", errstr));
215 
216 		for (i = 350000; i--; DELAY(100)) {
217 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
218 			if (status & TWE_STAT_CPURDY)
219 				break;
220 		}
221 
222 		if (!(status & TWE_STAT_CPURDY)) {
223 			errstr = ": card CPU is not ready\n";
224 			continue;
225 		}
226 
227 		/* soft reset, disable ints */
228 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
229 		    TWE_CTRL_SRST |
230 		    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR |
231 		    TWE_CTRL_MCMDI | TWE_CTRL_MRDYI |
232 		    TWE_CTRL_MINT);
233 
234 		for (i = 350000; i--; DELAY(100)) {
235 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
236 			if (status & TWE_STAT_ATTNI)
237 				break;
238 		}
239 
240 		if (!(status & TWE_STAT_ATTNI)) {
241 			errstr = ": cannot get card's attention\n";
242 			continue;
243 		}
244 
245 		/* drain aen queue */
246 		for (veseen_srst = 0, aen = -1; aen != TWE_AEN_QEMPTY; ) {
247 
248 			ccb = scsi_io_get(&sc->sc_iopool, 0);
249 			if (ccb == NULL) {
250 				errstr = ": out of ccbs\n";
251 				break;
252 			}
253 
254 			ccb->ccb_xs = NULL;
255 			ccb->ccb_data = pb;
256 			ccb->ccb_length = TWE_SECTOR_SIZE;
257 			ccb->ccb_state = TWE_CCB_READY;
258 			cmd = ccb->ccb_cmd;
259 			cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
260 			cmd->cmd_op = TWE_CMD_GPARAM;
261 			cmd->cmd_param.count = 1;
262 
263 			pb->table_id = TWE_PARAM_AEN;
264 			pb->param_id = 2;
265 			pb->param_size = 2;
266 
267 			error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
268 			scsi_io_put(&sc->sc_iopool, ccb);
269 			if (error) {
270 				errstr = ": error draining attention queue\n";
271 				break;
272 			}
273 
274 			aen = *(u_int16_t *)pb->data;
275 			TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
276 			if (aen == TWE_AEN_SRST)
277 				veseen_srst++;
278 		}
279 
280 		if (!veseen_srst) {
281 			errstr = ": we don't get it\n";
282 			continue;
283 		}
284 
285 		if (status & TWE_STAT_CPUERR) {
286 			errstr = ": card CPU error detected\n";
287 			continue;
288 		}
289 
290 		if (status & TWE_STAT_PCIPAR) {
291 			errstr = ": PCI parity error detected\n";
292 			continue;
293 		}
294 
295 		if (status & TWE_STAT_QUEUEE ) {
296 			errstr = ": queuing error detected\n";
297 			continue;
298 		}
299 
300 		if (status & TWE_STAT_PCIABR) {
301 			errstr = ": PCI abort\n";
302 			continue;
303 		}
304 
305 		while (!(status & TWE_STAT_RQE)) {
306 			bus_space_read_4(sc->iot, sc->ioh, TWE_READYQUEUE);
307 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
308 		}
309 
310 		break;
311 	}
312 
313 	if (retry < 0) {
314 		printf("%s", errstr);
315 		twe_dispose(sc);
316 		return 1;
317 	}
318 
319 	ccb = scsi_io_get(&sc->sc_iopool, 0);
320 	if (ccb == NULL) {
321 		printf(": out of ccbs\n");
322 		twe_dispose(sc);
323 		return 1;
324 	}
325 
326 	ccb->ccb_xs = NULL;
327 	ccb->ccb_data = pb;
328 	ccb->ccb_length = TWE_SECTOR_SIZE;
329 	ccb->ccb_state = TWE_CCB_READY;
330 	cmd = ccb->ccb_cmd;
331 	cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
332 	cmd->cmd_op = TWE_CMD_GPARAM;
333 	cmd->cmd_param.count = 1;
334 
335 	pb->table_id = TWE_PARAM_UC;
336 	pb->param_id = TWE_PARAM_UC;
337 	pb->param_size = TWE_MAX_UNITS;
338 
339 	error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
340 	scsi_io_put(&sc->sc_iopool, ccb);
341 	if (error) {
342 		printf(": failed to fetch unit parameters\n");
343 		twe_dispose(sc);
344 		return 1;
345 	}
346 
347 	/* we are assuming last read status was good */
348 	printf(": Escalade V%d.%d\n", TWE_MAJV(status), TWE_MINV(status));
349 
350 	for (nunits = i = 0; i < TWE_MAX_UNITS; i++) {
351 		if (pb->data[i] == 0)
352 			continue;
353 
354 		ccb = scsi_io_get(&sc->sc_iopool, 0);
355 		if (ccb == NULL) {
356 			printf(": out of ccbs\n");
357 			twe_dispose(sc);
358 			return 1;
359 		}
360 
361 		ccb->ccb_xs = NULL;
362 		ccb->ccb_data = cap;
363 		ccb->ccb_length = TWE_SECTOR_SIZE;
364 		ccb->ccb_state = TWE_CCB_READY;
365 		cmd = ccb->ccb_cmd;
366 		cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
367 		cmd->cmd_op = TWE_CMD_GPARAM;
368 		cmd->cmd_param.count = 1;
369 
370 		cap->table_id = TWE_PARAM_UI + i;
371 		cap->param_id = 4;
372 		cap->param_size = 4;	/* 4 bytes */
373 
374 		lock = TWE_LOCK(sc);
375 		twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
376 		TWE_UNLOCK(sc, lock);
377 		scsi_io_put(&sc->sc_iopool, ccb);
378 		if (error) {
379 			printf("%s: error fetching capacity for unit %d\n",
380 			    sc->sc_dev.dv_xname, i);
381 			continue;
382 		}
383 
384 		nunits++;
385 		sc->sc_hdr[i].hd_present = 1;
386 		sc->sc_hdr[i].hd_devtype = 0;
387 		sc->sc_hdr[i].hd_size = letoh32(*(u_int32_t *)cap->data);
388 		TWE_DPRINTF(TWE_D_MISC, ("twed%d: size=%d\n",
389 		    i, sc->sc_hdr[i].hd_size));
390 	}
391 
392 	if (!nunits)
393 		nunits++;
394 
395 	/* TODO: fetch & print cache params? */
396 
397 	sc->sc_link.adapter_softc = sc;
398 	sc->sc_link.adapter = &twe_switch;
399 	sc->sc_link.adapter_target = TWE_MAX_UNITS;
400 	sc->sc_link.openings = TWE_MAXCMDS / nunits;
401 	sc->sc_link.adapter_buswidth = TWE_MAX_UNITS;
402 	sc->sc_link.pool = &sc->sc_iopool;
403 
404 	bzero(&saa, sizeof(saa));
405 	saa.saa_sc_link = &sc->sc_link;
406 
407 	config_found(&sc->sc_dev, &saa, scsiprint);
408 
409 	kthread_create_deferred(twe_thread_create, sc);
410 
411 	return (0);
412 }
413 
414 void
415 twe_thread_create(void *v)
416 {
417 	struct twe_softc *sc = v;
418 
419 	if (kthread_create(twe_thread, sc, &sc->sc_thread,
420 	    sc->sc_dev.dv_xname)) {
421 		/* TODO disable twe */
422 		printf("%s: failed to create kernel thread, disabled\n",
423 		    sc->sc_dev.dv_xname);
424 		return;
425 	}
426 
427 	TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
428 	    bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
429 	/*
430 	 * ack all before enable, cannot be done in one
431 	 * operation as it seems clear is not processed
432 	 * if enable is specified.
433 	 */
434 	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
435 	    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR);
436 	TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
437 	    bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
438 	/* enable interrupts */
439 	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
440 	    TWE_CTRL_EINT | TWE_CTRL_ERDYI |
441 	    /*TWE_CTRL_HOSTI |*/ TWE_CTRL_MCMDI);
442 }
443 
444 void
445 twe_thread(v)
446 	void *v;
447 {
448 	struct twe_softc *sc = v;
449 	struct twe_ccb *ccb;
450 	twe_lock_t lock;
451 	u_int32_t status;
452 	int err;
453 
454 	for (;;) {
455 		lock = TWE_LOCK(sc);
456 
457 		while (!TAILQ_EMPTY(&sc->sc_done_ccb)) {
458 			ccb = TAILQ_FIRST(&sc->sc_done_ccb);
459 			TAILQ_REMOVE(&sc->sc_done_ccb, ccb, ccb_link);
460 			if ((err = twe_done(sc, ccb)))
461 				printf("%s: done failed (%d)\n",
462 				    sc->sc_dev.dv_xname, err);
463 		}
464 
465 		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
466 		TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
467 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
468 		while (!(status & TWE_STAT_CQF) &&
469 		    !TAILQ_EMPTY(&sc->sc_ccb2q)) {
470 
471 			ccb = TAILQ_LAST(&sc->sc_ccb2q, twe_queue_head);
472 			TAILQ_REMOVE(&sc->sc_ccb2q, ccb, ccb_link);
473 
474 			ccb->ccb_state = TWE_CCB_QUEUED;
475 			TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
476 			bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
477 			    ccb->ccb_cmdpa);
478 
479 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
480 			TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
481 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
482 		}
483 
484 		if (!TAILQ_EMPTY(&sc->sc_ccb2q))
485 			bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
486 			    TWE_CTRL_ECMDI);
487 
488 		TWE_UNLOCK(sc, lock);
489 		sc->sc_thread_on = 1;
490 		tsleep_nsec(sc, PWAIT, "twespank", INFSLP);
491 	}
492 }
493 
494 int
495 twe_cmd(ccb, flags, wait)
496 	struct twe_ccb *ccb;
497 	int flags, wait;
498 {
499 	struct twe_softc *sc = ccb->ccb_sc;
500 	bus_dmamap_t dmap;
501 	struct twe_cmd *cmd;
502 	struct twe_segs *sgp;
503 	int error, i;
504 
505 	if (ccb->ccb_data && ((u_long)ccb->ccb_data & (TWE_ALIGN - 1))) {
506 		TWE_DPRINTF(TWE_D_DMA, ("data=%p is unaligned ",ccb->ccb_data));
507 		ccb->ccb_realdata = ccb->ccb_data;
508 
509 		error = bus_dmamem_alloc(sc->dmat, ccb->ccb_length, PAGE_SIZE,
510 		    0, ccb->ccb_2bseg, TWE_MAXOFFSETS, &ccb->ccb_2nseg,
511 		    BUS_DMA_NOWAIT);
512 		if (error) {
513 			TWE_DPRINTF(TWE_D_DMA, ("2buf alloc failed(%d) ", error));
514 			return (ENOMEM);
515 		}
516 
517 		error = bus_dmamem_map(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg,
518 		    ccb->ccb_length, (caddr_t *)&ccb->ccb_data, BUS_DMA_NOWAIT);
519 		if (error) {
520 			TWE_DPRINTF(TWE_D_DMA, ("2buf map failed(%d) ", error));
521 			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
522 			return (ENOMEM);
523 		}
524 		bcopy(ccb->ccb_realdata, ccb->ccb_data, ccb->ccb_length);
525 	} else
526 		ccb->ccb_realdata = NULL;
527 
528 	dmap = ccb->ccb_dmamap;
529 	cmd = ccb->ccb_cmd;
530 	cmd->cmd_status = 0;
531 
532 	if (ccb->ccb_data) {
533 		error = bus_dmamap_load(sc->dmat, dmap, ccb->ccb_data,
534 		    ccb->ccb_length, NULL, flags);
535 		if (error) {
536 			if (error == EFBIG)
537 				printf("more than %d dma segs\n", TWE_MAXOFFSETS);
538 			else
539 				printf("error %d loading dma map\n", error);
540 
541 			if (ccb->ccb_realdata) {
542 				bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
543 				    ccb->ccb_length);
544 				bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
545 				    ccb->ccb_2nseg);
546 			}
547 			return error;
548 		}
549 		/* load addresses into command */
550 		switch (cmd->cmd_op) {
551 		case TWE_CMD_GPARAM:
552 		case TWE_CMD_SPARAM:
553 			sgp = cmd->cmd_param.segs;
554 			break;
555 		case TWE_CMD_READ:
556 		case TWE_CMD_WRITE:
557 			sgp = cmd->cmd_io.segs;
558 			break;
559 		default:
560 			/* no data transfer */
561 			TWE_DPRINTF(TWE_D_DMA, ("twe_cmd: unknown sgp op=%x\n",
562 			    cmd->cmd_op));
563 			sgp = NULL;
564 			break;
565 		}
566 		TWE_DPRINTF(TWE_D_DMA, ("data=%p<", ccb->ccb_data));
567 		if (sgp) {
568 			/*
569 			 * we know that size is in the upper byte,
570 			 * and we do not worry about overflow
571 			 */
572 			cmd->cmd_op += (2 * dmap->dm_nsegs) << 8;
573 			bzero (sgp, TWE_MAXOFFSETS * sizeof(*sgp));
574 			for (i = 0; i < dmap->dm_nsegs; i++, sgp++) {
575 				sgp->twes_addr = htole32(dmap->dm_segs[i].ds_addr);
576 				sgp->twes_len  = htole32(dmap->dm_segs[i].ds_len);
577 				TWE_DPRINTF(TWE_D_DMA, ("%x[%x] ",
578 				    dmap->dm_segs[i].ds_addr,
579 				    dmap->dm_segs[i].ds_len));
580 			}
581 		}
582 		TWE_DPRINTF(TWE_D_DMA, ("> "));
583 		bus_dmamap_sync(sc->dmat, dmap, 0, dmap->dm_mapsize,
584 		    BUS_DMASYNC_PREWRITE);
585 	}
586 	bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, 0, sc->sc_cmdmap->dm_mapsize,
587 	    BUS_DMASYNC_PREWRITE);
588 
589 	if ((error = twe_start(ccb, wait))) {
590 		bus_dmamap_unload(sc->dmat, dmap);
591 		if (ccb->ccb_realdata) {
592 			bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
593 			    ccb->ccb_length);
594 			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
595 			    ccb->ccb_2nseg);
596 		}
597 		return (error);
598 	}
599 
600 	return wait? twe_complete(ccb) : 0;
601 }
602 
603 int
604 twe_start(ccb, wait)
605 	struct twe_ccb *ccb;
606 	int wait;
607 {
608 	struct twe_softc*sc = ccb->ccb_sc;
609 	struct twe_cmd	*cmd = ccb->ccb_cmd;
610 	u_int32_t	status;
611 	int i;
612 
613 	cmd->cmd_op = htole16(cmd->cmd_op);
614 
615 	if (!wait) {
616 
617 		TWE_DPRINTF(TWE_D_CMD, ("prequeue(%d) ", cmd->cmd_index));
618 		ccb->ccb_state = TWE_CCB_PREQUEUED;
619 		TAILQ_INSERT_TAIL(&sc->sc_ccb2q, ccb, ccb_link);
620 		wakeup(sc);
621 		return 0;
622 	}
623 
624 	for (i = 1000; i--; DELAY(10)) {
625 
626 		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
627 		if (!(status & TWE_STAT_CQF))
628 			break;
629 		TWE_DPRINTF(TWE_D_CMD,  ("twe_start stat=%b ",
630 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
631 	}
632 
633 	if (!(status & TWE_STAT_CQF)) {
634 		bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
635 		    ccb->ccb_cmdpa);
636 
637 		TWE_DPRINTF(TWE_D_CMD, ("queue(%d) ", cmd->cmd_index));
638 		ccb->ccb_state = TWE_CCB_QUEUED;
639 		TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
640 		return 0;
641 
642 	} else {
643 
644 		printf("%s: twe_start(%d) timed out\n",
645 		    sc->sc_dev.dv_xname, cmd->cmd_index);
646 
647 		return EPERM;
648 	}
649 }
650 
651 int
652 twe_complete(ccb)
653 	struct twe_ccb *ccb;
654 {
655 	struct twe_softc *sc = ccb->ccb_sc;
656 	struct scsi_xfer *xs = ccb->ccb_xs;
657 	int i;
658 
659 	for (i = 100 * (xs? xs->timeout : 35000); i--; DELAY(10)) {
660 		u_int32_t status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
661 
662 		/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
663 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
664 
665 		while (!(status & TWE_STAT_RQE)) {
666 			struct twe_ccb *ccb1;
667 			u_int32_t ready;
668 
669 			ready = bus_space_read_4(sc->iot, sc->ioh,
670 			    TWE_READYQUEUE);
671 
672 			TWE_DPRINTF(TWE_D_CMD, ("ready=%x ", ready));
673 
674 			ccb1 = &sc->sc_ccbs[TWE_READYID(ready)];
675 			TAILQ_REMOVE(&sc->sc_ccbq, ccb1, ccb_link);
676 			ccb1->ccb_state = TWE_CCB_DONE;
677 			if (!twe_done(sc, ccb1) && ccb1 == ccb) {
678 				TWE_DPRINTF(TWE_D_CMD, ("complete\n"));
679 				return 0;
680 			}
681 
682 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
683 			/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
684 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
685 		}
686 	}
687 
688 	return 1;
689 }
690 
691 int
692 twe_done(sc, ccb)
693 	struct twe_softc *sc;
694 	struct twe_ccb *ccb;
695 {
696 	struct twe_cmd *cmd = ccb->ccb_cmd;
697 	struct scsi_xfer *xs = ccb->ccb_xs;
698 	bus_dmamap_t	dmap;
699 	twe_lock_t	lock;
700 
701 	TWE_DPRINTF(TWE_D_CMD, ("done(%d) ", cmd->cmd_index));
702 
703 	if (ccb->ccb_state != TWE_CCB_DONE) {
704 		printf("%s: undone ccb %d ready\n",
705 		     sc->sc_dev.dv_xname, cmd->cmd_index);
706 		return 1;
707 	}
708 
709 	dmap = ccb->ccb_dmamap;
710 	if (xs) {
711 		if (xs->cmd->opcode != PREVENT_ALLOW &&
712 		    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
713 			bus_dmamap_sync(sc->dmat, dmap, 0,
714 			    dmap->dm_mapsize, (xs->flags & SCSI_DATA_IN) ?
715 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
716 			bus_dmamap_unload(sc->dmat, dmap);
717 		}
718 	} else {
719 		switch (letoh16(cmd->cmd_op)) {
720 		case TWE_CMD_GPARAM:
721 		case TWE_CMD_READ:
722 			bus_dmamap_sync(sc->dmat, dmap, 0,
723 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
724 			bus_dmamap_unload(sc->dmat, dmap);
725 			break;
726 		case TWE_CMD_SPARAM:
727 		case TWE_CMD_WRITE:
728 			bus_dmamap_sync(sc->dmat, dmap, 0,
729 			    dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
730 			bus_dmamap_unload(sc->dmat, dmap);
731 			break;
732 		default:
733 			/* no data */
734 			break;
735 		}
736 	}
737 
738 	if (ccb->ccb_realdata) {
739 		bcopy(ccb->ccb_data, ccb->ccb_realdata, ccb->ccb_length);
740 		bus_dmamem_unmap(sc->dmat, ccb->ccb_data, ccb->ccb_length);
741 		bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
742 	}
743 
744 	lock = TWE_LOCK(sc);
745 
746 	if (xs) {
747 		xs->resid = 0;
748 		scsi_done(xs);
749 	}
750 	TWE_UNLOCK(sc, lock);
751 
752 	return 0;
753 }
754 
755 void
756 twe_copy_internal_data(xs, v, size)
757 	struct scsi_xfer *xs;
758 	void *v;
759 	size_t size;
760 {
761 	size_t copy_cnt;
762 
763 	TWE_DPRINTF(TWE_D_MISC, ("twe_copy_internal_data "));
764 
765 	if (!xs->datalen)
766 		printf("uio move is not yet supported\n");
767 	else {
768 		copy_cnt = MIN(size, xs->datalen);
769 		bcopy(v, xs->data, copy_cnt);
770 	}
771 }
772 
773 void
774 twe_scsi_cmd(xs)
775 	struct scsi_xfer *xs;
776 {
777 	struct scsi_link *link = xs->sc_link;
778 	struct twe_softc *sc = link->adapter_softc;
779 	struct twe_ccb *ccb = xs->io;
780 	struct twe_cmd *cmd;
781 	struct scsi_inquiry_data inq;
782 	struct scsi_sense_data sd;
783 	struct scsi_read_cap_data rcd;
784 	u_int8_t target = link->target;
785 	u_int32_t blockno, blockcnt;
786 	struct scsi_rw *rw;
787 	struct scsi_rw_big *rwb;
788 	int error, op, flags, wait;
789 	twe_lock_t lock;
790 
791 
792 	if (target >= TWE_MAX_UNITS || !sc->sc_hdr[target].hd_present ||
793 	    link->lun != 0) {
794 		xs->error = XS_DRIVER_STUFFUP;
795 		scsi_done(xs);
796 		return;
797 	}
798 
799 	TWE_DPRINTF(TWE_D_CMD, ("twe_scsi_cmd "));
800 
801 	xs->error = XS_NOERROR;
802 
803 	switch (xs->cmd->opcode) {
804 	case TEST_UNIT_READY:
805 	case START_STOP:
806 #if 0
807 	case VERIFY:
808 #endif
809 		TWE_DPRINTF(TWE_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode,
810 		    target));
811 		break;
812 
813 	case REQUEST_SENSE:
814 		TWE_DPRINTF(TWE_D_CMD, ("REQUEST SENSE tgt %d ", target));
815 		bzero(&sd, sizeof sd);
816 		sd.error_code = SSD_ERRCODE_CURRENT;
817 		sd.segment = 0;
818 		sd.flags = SKEY_NO_SENSE;
819 		*(u_int32_t*)sd.info = htole32(0);
820 		sd.extra_len = 0;
821 		twe_copy_internal_data(xs, &sd, sizeof sd);
822 		break;
823 
824 	case INQUIRY:
825 		TWE_DPRINTF(TWE_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
826 		    sc->sc_hdr[target].hd_devtype));
827 		bzero(&inq, sizeof inq);
828 		inq.device =
829 		    (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
830 		inq.dev_qual2 =
831 		    (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
832 		inq.version = 2;
833 		inq.response_format = 2;
834 		inq.additional_length = 32;
835 		strlcpy(inq.vendor, "3WARE  ", sizeof inq.vendor);
836 		snprintf(inq.product, sizeof inq.product, "Host drive  #%02d",
837 		    target);
838 		strlcpy(inq.revision, "   ", sizeof inq.revision);
839 		twe_copy_internal_data(xs, &inq, sizeof inq);
840 		break;
841 
842 	case READ_CAPACITY:
843 		TWE_DPRINTF(TWE_D_CMD, ("READ CAPACITY tgt %d ", target));
844 		bzero(&rcd, sizeof rcd);
845 		_lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
846 		_lto4b(TWE_SECTOR_SIZE, rcd.length);
847 		twe_copy_internal_data(xs, &rcd, sizeof rcd);
848 		break;
849 
850 	case PREVENT_ALLOW:
851 		TWE_DPRINTF(TWE_D_CMD, ("PREVENT/ALLOW "));
852 		scsi_done(xs);
853 		return;
854 
855 	case READ_COMMAND:
856 	case READ_BIG:
857 	case WRITE_COMMAND:
858 	case WRITE_BIG:
859 	case SYNCHRONIZE_CACHE:
860 		lock = TWE_LOCK(sc);
861 
862 		flags = 0;
863 		if (xs->cmd->opcode == SYNCHRONIZE_CACHE) {
864 			blockno = blockcnt = 0;
865 		} else {
866 			/* A read or write operation. */
867 			if (xs->cmdlen == 6) {
868 				rw = (struct scsi_rw *)xs->cmd;
869 				blockno = _3btol(rw->addr) &
870 				    (SRW_TOPADDR << 16 | 0xffff);
871 				blockcnt = rw->length ? rw->length : 0x100;
872 			} else {
873 				rwb = (struct scsi_rw_big *)xs->cmd;
874 				blockno = _4btol(rwb->addr);
875 				blockcnt = _2btol(rwb->length);
876 				/* reflect DPO & FUA flags */
877 				if (xs->cmd->opcode == WRITE_BIG &&
878 				    rwb->byte2 & 0x18)
879 					flags = TWE_FLAGS_CACHEDISABLE;
880 			}
881 			if (blockno >= sc->sc_hdr[target].hd_size ||
882 			    blockno + blockcnt > sc->sc_hdr[target].hd_size) {
883 				printf("%s: out of bounds %u-%u >= %u\n",
884 				    sc->sc_dev.dv_xname, blockno, blockcnt,
885 				    sc->sc_hdr[target].hd_size);
886 				xs->error = XS_DRIVER_STUFFUP;
887 				scsi_done(xs);
888 				TWE_UNLOCK(sc, lock);
889 				return;
890 			}
891 		}
892 
893 		switch (xs->cmd->opcode) {
894 		case READ_COMMAND:	op = TWE_CMD_READ;	break;
895 		case READ_BIG:		op = TWE_CMD_READ;	break;
896 		case WRITE_COMMAND:	op = TWE_CMD_WRITE;	break;
897 		case WRITE_BIG:		op = TWE_CMD_WRITE;	break;
898 		default:		op = TWE_CMD_NOP;	break;
899 		}
900 
901 		ccb->ccb_xs = xs;
902 		ccb->ccb_data = xs->data;
903 		ccb->ccb_length = xs->datalen;
904 		ccb->ccb_state = TWE_CCB_READY;
905 		cmd = ccb->ccb_cmd;
906 		cmd->cmd_unit_host = TWE_UNITHOST(target, 0); /* XXX why 0? */
907 		cmd->cmd_op = op;
908 		cmd->cmd_flags = flags;
909 		cmd->cmd_io.count = htole16(blockcnt);
910 		cmd->cmd_io.lba = htole32(blockno);
911 		wait = xs->flags & SCSI_POLL;
912 		if (!sc->sc_thread_on)
913 			wait |= SCSI_POLL;
914 
915 		if ((error = twe_cmd(ccb, ((xs->flags & SCSI_NOSLEEP)?
916 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK), wait))) {
917 
918 			TWE_DPRINTF(TWE_D_CMD, ("failed %p ", xs));
919 			xs->error = XS_DRIVER_STUFFUP;
920 			scsi_done(xs);
921 		}
922 
923 		TWE_UNLOCK(sc, lock);
924 		return;
925 
926 	default:
927 		TWE_DPRINTF(TWE_D_CMD, ("unsupported scsi command %#x tgt %d ",
928 		    xs->cmd->opcode, target));
929 		xs->error = XS_DRIVER_STUFFUP;
930 	}
931 
932 	scsi_done(xs);
933 }
934 
935 int
936 twe_intr(v)
937 	void *v;
938 {
939 	struct twe_softc *sc = v;
940 	struct twe_ccb	*ccb;
941 	u_int32_t	status;
942 	int		rv = 0;
943 
944 	status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
945 	TWE_DPRINTF(TWE_D_INTR,  ("twe_intr stat=%b ",
946 	    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
947 #if 0
948 	if (status & TWE_STAT_HOSTI) {
949 
950 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
951 		    TWE_CTRL_CHOSTI);
952 	}
953 #endif
954 
955 	if (status & TWE_STAT_RDYI) {
956 
957 		while (!(status & TWE_STAT_RQE)) {
958 
959 			u_int32_t ready;
960 
961 			/*
962 			 * it seems that reading ready queue
963 			 * we get all the status bits in each ready word.
964 			 * i wonder if it's legal to use those for
965 			 * status and avoid extra read below
966 			 */
967 			ready = bus_space_read_4(sc->iot, sc->ioh,
968 			    TWE_READYQUEUE);
969 
970 			ccb = &sc->sc_ccbs[TWE_READYID(ready)];
971 			TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link);
972 			ccb->ccb_state = TWE_CCB_DONE;
973 			TAILQ_INSERT_TAIL(&sc->sc_done_ccb, ccb, ccb_link);
974 			rv++;
975 
976 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
977 			TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
978 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
979 		}
980 	}
981 
982 	if (status & TWE_STAT_CMDI) {
983 		rv++;
984 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
985 		    TWE_CTRL_MCMDI);
986 	}
987 
988 	if (rv)
989 		wakeup(sc);
990 
991 	if (status & TWE_STAT_ATTNI) {
992 		/*
993 		 * we know no attentions of interest right now.
994 		 * one of those would be mirror degradation i think.
995 		 * or, what else exists in there?
996 		 * maybe 3ware can answer that?
997 		 */
998 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
999 		    TWE_CTRL_CATTNI);
1000 
1001 		scsi_ioh_add(&sc->sc_aen);
1002 	}
1003 
1004 	return rv;
1005 }
1006 
1007 void
1008 twe_aen(void *cookie, void *io)
1009 {
1010 	struct twe_softc *sc = cookie;
1011 	struct twe_ccb *ccb = io;
1012 	struct twe_cmd *cmd = ccb->ccb_cmd;
1013 
1014 	u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
1015 	struct twe_param *pb = (void *) (((u_long)param_buf +
1016 	    TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
1017 	u_int16_t aen;
1018 
1019 	twe_lock_t lock;
1020 	int error;
1021 
1022 	ccb->ccb_xs = NULL;
1023 	ccb->ccb_data = pb;
1024 	ccb->ccb_length = TWE_SECTOR_SIZE;
1025 	ccb->ccb_state = TWE_CCB_READY;
1026 	cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
1027 	cmd->cmd_op = TWE_CMD_GPARAM;
1028 	cmd->cmd_flags = 0;
1029 	cmd->cmd_param.count = 1;
1030 
1031 	pb->table_id = TWE_PARAM_AEN;
1032 	pb->param_id = 2;
1033 	pb->param_size = 2;
1034 
1035 	lock = TWE_LOCK(sc);
1036 	error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
1037 	TWE_UNLOCK(sc, lock);
1038 	scsi_io_put(&sc->sc_iopool, ccb);
1039 
1040 	if (error) {
1041 		printf("%s: error draining attention queue\n",
1042 		    sc->sc_dev.dv_xname);
1043 		return;
1044 	}
1045 
1046 	aen = *(u_int16_t *)pb->data;
1047 	if (aen != TWE_AEN_QEMPTY)
1048 		scsi_ioh_add(&sc->sc_aen);
1049 }
1050