xref: /openbsd-src/sys/dev/ic/adv.c (revision b2ea75c1b17e1a9a339660e7ed45cd24946b230e)
1 /*	$OpenBSD: adv.c,v 1.8 2001/08/12 20:33:50 mickey Exp $	*/
2 /*	$NetBSD: adv.c,v 1.6 1998/10/28 20:39:45 dante Exp $	*/
3 
4 /*
5  * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
6  *
7  * Copyright (c) 1998 The NetBSD Foundation, Inc.
8  * All rights reserved.
9  *
10  * Author: Baldassare Dante Profeta <dante@mclink.it>
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *        This product includes software developed by the NetBSD
23  *        Foundation, Inc. and its contributors.
24  * 4. Neither the name of The NetBSD Foundation nor the names of its
25  *    contributors may be used to endorse or promote products derived
26  *    from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52 
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55 
56 #include <vm/vm.h>
57 
58 #include <scsi/scsi_all.h>
59 #include <scsi/scsiconf.h>
60 
61 #include <dev/ic/adv.h>
62 #include <dev/ic/advlib.h>
63 
64 #ifndef DDB
65 #define	Debugger()	panic("should call debugger here (adv.c)")
66 #endif /* ! DDB */
67 
68 
69 /* #define ASC_DEBUG */
70 
71 /******************************************************************************/
72 
73 
74 static void adv_enqueue __P((ASC_SOFTC *, struct scsi_xfer *, int));
75 static struct scsi_xfer *adv_dequeue __P((ASC_SOFTC *));
76 
77 static int adv_alloc_ccbs __P((ASC_SOFTC *));
78 static int adv_create_ccbs __P((ASC_SOFTC *, ADV_CCB *, int));
79 static void adv_free_ccb __P((ASC_SOFTC *, ADV_CCB *));
80 static void adv_reset_ccb __P((ADV_CCB *));
81 static int adv_init_ccb __P((ASC_SOFTC *, ADV_CCB *));
82 static ADV_CCB *adv_get_ccb __P((ASC_SOFTC *, int));
83 static void adv_queue_ccb __P((ASC_SOFTC *, ADV_CCB *));
84 static void adv_start_ccbs __P((ASC_SOFTC *));
85 
86 static u_int8_t *adv_alloc_overrunbuf __P((char *dvname, bus_dma_tag_t));
87 
88 static int adv_scsi_cmd __P((struct scsi_xfer *));
89 static void advminphys __P((struct buf *));
90 static void adv_narrow_isr_callback __P((ASC_SOFTC *, ASC_QDONE_INFO *));
91 
92 static int adv_poll __P((ASC_SOFTC *, struct scsi_xfer *, int));
93 static void adv_timeout __P((void *));
94 static void adv_watchdog __P((void *));
95 
96 
97 /******************************************************************************/
98 
99 
100 struct cfdriver adv_cd = {
101 	NULL, "adv", DV_DULL
102 };
103 
104 
105 struct scsi_adapter adv_switch =
106 {
107 	adv_scsi_cmd,		/* called to start/enqueue a SCSI command */
108 	advminphys,		/* to limit the transfer to max device can do */
109 	0,			/* IT SEEMS IT IS NOT USED YET */
110 	0,			/* as above... */
111 };
112 
113 
114 /* the below structure is so we have a default dev struct for out link struct */
115 struct scsi_device adv_dev =
116 {
117 	NULL,			/* Use default error handler */
118 	NULL,			/* have a queue, served by this */
119 	NULL,			/* have no async handler */
120 	NULL,			/* Use default 'done' routine */
121 };
122 
123 
124 #define ADV_ABORT_TIMEOUT       2000	/* time to wait for abort (mSec) */
125 #define ADV_WATCH_TIMEOUT       1000	/* time to wait for watchdog (mSec) */
126 
127 
128 /******************************************************************************/
129 /*                            scsi_xfer queue routines                      */
130 /******************************************************************************/
131 
132 
133 /*
134  * Insert a scsi_xfer into the software queue.  We overload xs->free_list
135  * to avoid having to allocate additional resources (since we're used
136  * only during resource shortages anyhow.
137  */
138 static void
139 adv_enqueue(sc, xs, infront)
140 	ASC_SOFTC      *sc;
141 	struct scsi_xfer *xs;
142 	int             infront;
143 {
144 
145 	if (infront || sc->sc_queue.lh_first == NULL) {
146 		if (sc->sc_queue.lh_first == NULL)
147 			sc->sc_queuelast = xs;
148 		LIST_INSERT_HEAD(&sc->sc_queue, xs, free_list);
149 		return;
150 	}
151 	LIST_INSERT_AFTER(sc->sc_queuelast, xs, free_list);
152 	sc->sc_queuelast = xs;
153 }
154 
155 
156 /*
157  * Pull a scsi_xfer off the front of the software queue.
158  */
159 static struct scsi_xfer *
160 adv_dequeue(sc)
161 	ASC_SOFTC      *sc;
162 {
163 	struct scsi_xfer *xs;
164 
165 	xs = sc->sc_queue.lh_first;
166 	LIST_REMOVE(xs, free_list);
167 
168 	if (sc->sc_queue.lh_first == NULL)
169 		sc->sc_queuelast = NULL;
170 
171 	return (xs);
172 }
173 
174 
175 /******************************************************************************/
176 /*                             Control Blocks routines                        */
177 /******************************************************************************/
178 
179 
180 static int
181 adv_alloc_ccbs(sc)
182 	ASC_SOFTC      *sc;
183 {
184 	bus_dma_segment_t seg;
185 	int             error, rseg;
186 
187 	/*
188          * Allocate the control blocks.
189          */
190 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
191 			   NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
192 		printf("%s: unable to allocate control structures,"
193 		       " error = %d\n", sc->sc_dev.dv_xname, error);
194 		return (error);
195 	}
196 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
197 		   sizeof(struct adv_control), (caddr_t *) & sc->sc_control,
198 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
199 		printf("%s: unable to map control structures, error = %d\n",
200 		       sc->sc_dev.dv_xname, error);
201 		return (error);
202 	}
203 	/*
204          * Create and load the DMA map used for the control blocks.
205          */
206 	if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
207 			   1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
208 				       &sc->sc_dmamap_control)) != 0) {
209 		printf("%s: unable to create control DMA map, error = %d\n",
210 		       sc->sc_dev.dv_xname, error);
211 		return (error);
212 	}
213 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
214 			   sc->sc_control, sizeof(struct adv_control), NULL,
215 				     BUS_DMA_NOWAIT)) != 0) {
216 		printf("%s: unable to load control DMA map, error = %d\n",
217 		       sc->sc_dev.dv_xname, error);
218 		return (error);
219 	}
220 	return (0);
221 }
222 
223 
224 /*
225  * Create a set of ccbs and add them to the free list.  Called once
226  * by adv_init().  We return the number of CCBs successfully created.
227  */
228 static int
229 adv_create_ccbs(sc, ccbstore, count)
230 	ASC_SOFTC      *sc;
231 	ADV_CCB        *ccbstore;
232 	int             count;
233 {
234 	ADV_CCB        *ccb;
235 	int             i, error;
236 
237 	bzero(ccbstore, sizeof(ADV_CCB) * count);
238 	for (i = 0; i < count; i++) {
239 		ccb = &ccbstore[i];
240 		if ((error = adv_init_ccb(sc, ccb)) != 0) {
241 			printf("%s: unable to initialize ccb, error = %d\n",
242 			       sc->sc_dev.dv_xname, error);
243 			return (i);
244 		}
245 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
246 	}
247 
248 	return (i);
249 }
250 
251 
252 /*
253  * A ccb is put onto the free list.
254  */
255 static void
256 adv_free_ccb(sc, ccb)
257 	ASC_SOFTC      *sc;
258 	ADV_CCB        *ccb;
259 {
260 	int             s;
261 
262 	s = splbio();
263 
264 	adv_reset_ccb(ccb);
265 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
266 
267 	/*
268          * If there were none, wake anybody waiting for one to come free,
269          * starting with queued entries.
270          */
271 	if (ccb->chain.tqe_next == 0)
272 		wakeup(&sc->sc_free_ccb);
273 
274 	splx(s);
275 }
276 
277 
278 static void
279 adv_reset_ccb(ccb)
280 	ADV_CCB        *ccb;
281 {
282 
283 	ccb->flags = 0;
284 }
285 
286 
287 static int
288 adv_init_ccb(sc, ccb)
289 	ASC_SOFTC      *sc;
290 	ADV_CCB        *ccb;
291 {
292 	int             error;
293 
294 	/*
295          * Create the DMA map for this CCB.
296          */
297 	error = bus_dmamap_create(sc->sc_dmat,
298 				  (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
299 			 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
300 		   0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
301 	if (error) {
302 		printf("%s: unable to create DMA map, error = %d\n",
303 		       sc->sc_dev.dv_xname, error);
304 		return (error);
305 	}
306 	adv_reset_ccb(ccb);
307 	return (0);
308 }
309 
310 
311 /*
312  * Get a free ccb
313  *
314  * If there are none, see if we can allocate a new one
315  */
316 static ADV_CCB *
317 adv_get_ccb(sc, flags)
318 	ASC_SOFTC      *sc;
319 	int             flags;
320 {
321 	ADV_CCB        *ccb = 0;
322 	int             s;
323 
324 	s = splbio();
325 
326 	/*
327          * If we can and have to, sleep waiting for one to come free
328          * but only if we can't allocate a new one.
329          */
330 	for (;;) {
331 		ccb = sc->sc_free_ccb.tqh_first;
332 		if (ccb) {
333 			TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
334 			break;
335 		}
336 		if ((flags & SCSI_NOSLEEP) != 0)
337 			goto out;
338 
339 		tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0);
340 	}
341 
342 	ccb->flags |= CCB_ALLOC;
343 
344 out:
345 	splx(s);
346 	return (ccb);
347 }
348 
349 
350 /*
351  * Queue a CCB to be sent to the controller, and send it if possible.
352  */
353 static void
354 adv_queue_ccb(sc, ccb)
355 	ASC_SOFTC      *sc;
356 	ADV_CCB        *ccb;
357 {
358 
359 	timeout_set(&ccb->xs->stimeout, adv_timeout, ccb);
360 	TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
361 
362 	adv_start_ccbs(sc);
363 }
364 
365 
366 static void
367 adv_start_ccbs(sc)
368 	ASC_SOFTC      *sc;
369 {
370 	ADV_CCB        *ccb;
371 	struct scsi_xfer *xs;
372 
373 	while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
374 
375 		xs = ccb->xs;
376 		if (ccb->flags & CCB_WATCHDOG)
377 			timeout_del(&xs->stimeout);
378 
379 		if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
380 			ccb->flags |= CCB_WATCHDOG;
381 			timeout_set(&xs->stimeout, adv_watchdog, ccb);
382 			timeout_add(&xs->stimeout,
383 				(ADV_WATCH_TIMEOUT * hz) / 1000);
384 			break;
385 		}
386 		TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
387 
388 		if ((ccb->xs->flags & SCSI_POLL) == 0) {
389 			timeout_set(&xs->stimeout, adv_timeout, ccb);
390 			timeout_add(&xs->stimeout, (ccb->timeout * hz) / 1000);
391 		}
392 	}
393 }
394 
395 
396 /******************************************************************************/
397 /*                      DMA able memory allocation routines                   */
398 /******************************************************************************/
399 
400 
401 /*
402  * Allocate a DMA able memory for overrun_buffer.
403  * This memory can be safely shared among all the AdvanSys boards.
404  */
405 u_int8_t       *
406 adv_alloc_overrunbuf(dvname, dmat)
407 	char           *dvname;
408 	bus_dma_tag_t   dmat;
409 {
410 	static u_int8_t *overrunbuf = NULL;
411 
412 	bus_dmamap_t    ovrbuf_dmamap;
413 	bus_dma_segment_t seg;
414 	int             rseg, error;
415 
416 
417 	/*
418          * if an overrun buffer has been already allocated don't allocate it
419          * again. Instead return the address of the allocated buffer.
420          */
421 	if (overrunbuf)
422 		return (overrunbuf);
423 
424 
425 	if ((error = bus_dmamem_alloc(dmat, ASC_OVERRUN_BSIZE,
426 			   NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
427 		printf("%s: unable to allocate overrun buffer, error = %d\n",
428 		       dvname, error);
429 		return (0);
430 	}
431 	if ((error = bus_dmamem_map(dmat, &seg, rseg, ASC_OVERRUN_BSIZE,
432 	(caddr_t *) & overrunbuf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
433 		printf("%s: unable to map overrun buffer, error = %d\n",
434 		       dvname, error);
435 
436 		bus_dmamem_free(dmat, &seg, 1);
437 		return (0);
438 	}
439 	if ((error = bus_dmamap_create(dmat, ASC_OVERRUN_BSIZE, 1,
440 	      ASC_OVERRUN_BSIZE, 0, BUS_DMA_NOWAIT, &ovrbuf_dmamap)) != 0) {
441 		printf("%s: unable to create overrun buffer DMA map,"
442 		       " error = %d\n", dvname, error);
443 
444 		bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
445 		bus_dmamem_free(dmat, &seg, 1);
446 		return (0);
447 	}
448 	if ((error = bus_dmamap_load(dmat, ovrbuf_dmamap, overrunbuf,
449 			   ASC_OVERRUN_BSIZE, NULL, BUS_DMA_NOWAIT)) != 0) {
450 		printf("%s: unable to load overrun buffer DMA map,"
451 		       " error = %d\n", dvname, error);
452 
453 		bus_dmamap_destroy(dmat, ovrbuf_dmamap);
454 		bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
455 		bus_dmamem_free(dmat, &seg, 1);
456 		return (0);
457 	}
458 	return (overrunbuf);
459 }
460 
461 
462 /******************************************************************************/
463 /*                         SCSI layer interfacing routines                    */
464 /******************************************************************************/
465 
466 
467 int
468 adv_init(sc)
469 	ASC_SOFTC      *sc;
470 {
471 	int             warn;
472 
473 	if (!AscFindSignature(sc->sc_iot, sc->sc_ioh))
474 		panic("adv_init: adv_find_signature failed");
475 
476 	/*
477          * Read the board configuration
478          */
479 	AscInitASC_SOFTC(sc);
480 	warn = AscInitFromEEP(sc);
481 	if (warn) {
482 		printf("%s -get: ", sc->sc_dev.dv_xname);
483 		switch (warn) {
484 		case -1:
485 			printf("Chip is not halted\n");
486 			break;
487 
488 		case -2:
489 			printf("Couldn't get MicroCode Start"
490 			       " address\n");
491 			break;
492 
493 		case ASC_WARN_IO_PORT_ROTATE:
494 			printf("I/O port address modified\n");
495 			break;
496 
497 		case ASC_WARN_AUTO_CONFIG:
498 			printf("I/O port increment switch enabled\n");
499 			break;
500 
501 		case ASC_WARN_EEPROM_CHKSUM:
502 			printf("EEPROM checksum error\n");
503 			break;
504 
505 		case ASC_WARN_IRQ_MODIFIED:
506 			printf("IRQ modified\n");
507 			break;
508 
509 		case ASC_WARN_CMD_QNG_CONFLICT:
510 			printf("tag queuing enabled w/o disconnects\n");
511 			break;
512 
513 		default:
514 			printf("unknown warning %d\n", warn);
515 		}
516 	}
517 	if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
518 		sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
519 
520 	/*
521          * Modify the board configuration
522          */
523 	warn = AscInitFromASC_SOFTC(sc);
524 	if (warn) {
525 		printf("%s -set: ", sc->sc_dev.dv_xname);
526 		switch (warn) {
527 		case ASC_WARN_CMD_QNG_CONFLICT:
528 			printf("tag queuing enabled w/o disconnects\n");
529 			break;
530 
531 		case ASC_WARN_AUTO_CONFIG:
532 			printf("I/O port increment switch enabled\n");
533 			break;
534 
535 		default:
536 			printf("unknown warning %d\n", warn);
537 		}
538 	}
539 	sc->isr_callback = (ulong) adv_narrow_isr_callback;
540 
541 	if (!(sc->overrun_buf = adv_alloc_overrunbuf(sc->sc_dev.dv_xname,
542 						     sc->sc_dmat))) {
543 		return (1);
544 	}
545 
546 	return (0);
547 }
548 
549 
550 void
551 adv_attach(sc)
552 	ASC_SOFTC      *sc;
553 {
554 	int             i, error;
555 
556 	/*
557          * Initialize board RISC chip and enable interrupts.
558          */
559 	switch (AscInitDriver(sc)) {
560 	case 0:
561 		/* AllOK */
562 		break;
563 
564 	case 1:
565 		panic("%s: bad signature", sc->sc_dev.dv_xname);
566 		break;
567 
568 	case 2:
569 		panic("%s: unable to load MicroCode",
570 		      sc->sc_dev.dv_xname);
571 		break;
572 
573 	case 3:
574 		panic("%s: unable to initialize MicroCode",
575 		      sc->sc_dev.dv_xname);
576 		break;
577 
578 	default:
579 		panic("%s: unable to initialize board RISC chip",
580 		      sc->sc_dev.dv_xname);
581 	}
582 
583 
584 	/*
585          * fill in the prototype scsi_link.
586          */
587 	sc->sc_link.adapter_softc = sc;
588 	sc->sc_link.adapter_target = sc->chip_scsi_id;
589 	sc->sc_link.adapter = &adv_switch;
590 	sc->sc_link.device = &adv_dev;
591 	sc->sc_link.openings = 4;
592 	sc->sc_link.adapter_buswidth = 7;
593 
594 
595 	TAILQ_INIT(&sc->sc_free_ccb);
596 	TAILQ_INIT(&sc->sc_waiting_ccb);
597 	LIST_INIT(&sc->sc_queue);
598 
599 
600 	/*
601          * Allocate the Control Blocks.
602          */
603 	error = adv_alloc_ccbs(sc);
604 	if (error)
605 		return; /* (error) */ ;
606 
607 	/*
608          * Create and initialize the Control Blocks.
609          */
610 	i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
611 	if (i == 0) {
612 		printf("%s: unable to create control blocks\n",
613 		       sc->sc_dev.dv_xname);
614 		return; /* (ENOMEM) */ ;
615 	} else if (i != ADV_MAX_CCB) {
616 		printf("%s: WARNING: only %d of %d control blocks created\n",
617 		       sc->sc_dev.dv_xname, i, ADV_MAX_CCB);
618 	}
619 	config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
620 }
621 
622 
623 static void
624 advminphys(bp)
625 	struct buf     *bp;
626 {
627 
628 	if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
629 		bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
630 	minphys(bp);
631 }
632 
633 
634 /*
635  * start a scsi operation given the command and the data address.  Also needs
636  * the unit, target and lu.
637  */
638 static int
639 adv_scsi_cmd(xs)
640 	struct scsi_xfer *xs;
641 {
642 	struct scsi_link *sc_link = xs->sc_link;
643 	ASC_SOFTC      *sc = sc_link->adapter_softc;
644 	bus_dma_tag_t   dmat = sc->sc_dmat;
645 	ADV_CCB        *ccb;
646 	int             s, flags, error, nsegs;
647 	int             fromqueue = 1, dontqueue = 0;
648 
649 
650 	s = splbio();		/* protect the queue */
651 
652 	/*
653          * If we're running the queue from adv_done(), we've been
654          * called with the first queue entry as our argument.
655          */
656 	if (xs == sc->sc_queue.lh_first) {
657 		xs = adv_dequeue(sc);
658 		fromqueue = 1;
659 	} else {
660 
661 		/* Polled requests can't be queued for later. */
662 		dontqueue = xs->flags & SCSI_POLL;
663 
664 		/*
665                  * If there are jobs in the queue, run them first.
666                  */
667 		if (sc->sc_queue.lh_first != NULL) {
668 			/*
669                          * If we can't queue, we have to abort, since
670                          * we have to preserve order.
671                          */
672 			if (dontqueue) {
673 				splx(s);
674 				xs->error = XS_DRIVER_STUFFUP;
675 				return (TRY_AGAIN_LATER);
676 			}
677 			/*
678                          * Swap with the first queue entry.
679                          */
680 			adv_enqueue(sc, xs, 0);
681 			xs = adv_dequeue(sc);
682 			fromqueue = 1;
683 		}
684 	}
685 
686 
687 	/*
688          * get a ccb to use. If the transfer
689          * is from a buf (possibly from interrupt time)
690          * then we can't allow it to sleep
691          */
692 
693 	flags = xs->flags;
694 	if ((ccb = adv_get_ccb(sc, flags)) == NULL) {
695 		/*
696                  * If we can't queue, we lose.
697                  */
698 		if (dontqueue) {
699 			splx(s);
700 			xs->error = XS_DRIVER_STUFFUP;
701 			return (TRY_AGAIN_LATER);
702 		}
703 		/*
704                  * Stuff ourselves into the queue, in front
705                  * if we came off in the first place.
706                  */
707 		adv_enqueue(sc, xs, fromqueue);
708 		splx(s);
709 		return (SUCCESSFULLY_QUEUED);
710 	}
711 	splx(s);		/* done playing with the queue */
712 
713 	ccb->xs = xs;
714 	ccb->timeout = xs->timeout;
715 
716 	/*
717          * Build up the request
718          */
719 	memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
720 
721 	ccb->scsiq.q2.ccb_ptr = (ulong) ccb;
722 
723 	ccb->scsiq.cdbptr = &xs->cmd->opcode;
724 	ccb->scsiq.q2.cdb_len = xs->cmdlen;
725 	ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->target);
726 	ccb->scsiq.q1.target_lun = sc_link->lun;
727 	ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->target,
728 						   sc_link->lun);
729 	ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
730 		ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
731 	ccb->scsiq.q1.sense_len = sizeof(struct scsi_sense_data);
732 
733 	/*
734          * If  there  are  any  outstanding  requests  for  the  current target,
735          * then  every  255th request  send an  ORDERED request.  This heuristic
736          * tries  to  retain  the  benefit  of request  sorting while preventing
737          * request starvation. 255 is the max number of tags or pending commands
738          * a device may have outstanding.
739          */
740 	sc->reqcnt[sc_link->target]++;
741 	if ((sc->reqcnt[sc_link->target] > 0) &&
742 	    (sc->reqcnt[sc_link->target] % 255) == 0) {
743 		ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
744 	} else {
745 		ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
746 	}
747 
748 
749 	if (xs->datalen) {
750 		/*
751                  * Map the DMA transfer.
752                  */
753 #ifdef TFS
754 		if (flags & SCSI_DATA_UIO) {
755 			error = bus_dmamap_load_uio(dmat,
756 				  ccb->dmamap_xfer, (struct uio *) xs->data,
757 						    (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
758 		} else
759 #endif				/* TFS */
760 		{
761 			error = bus_dmamap_load(dmat,
762 			      ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
763 						(flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
764 		}
765 
766 		if (error) {
767 			if (error == EFBIG) {
768 				printf("%s: adv_scsi_cmd, more than %d dma"
769 				       " segments\n",
770 				       sc->sc_dev.dv_xname, ASC_MAX_SG_LIST);
771 			} else {
772 				printf("%s: adv_scsi_cmd, error %d loading"
773 				       " dma map\n",
774 				       sc->sc_dev.dv_xname, error);
775 			}
776 
777 			xs->error = XS_DRIVER_STUFFUP;
778 			adv_free_ccb(sc, ccb);
779 			return (COMPLETE);
780 		}
781 		bus_dmamap_sync(dmat, ccb->dmamap_xfer,
782 			      (flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
783 				BUS_DMASYNC_PREWRITE);
784 
785 
786 		memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
787 
788 		for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
789 
790 			ccb->sghead.sg_list[nsegs].addr =
791 				ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
792 			ccb->sghead.sg_list[nsegs].bytes =
793 				ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
794 		}
795 
796 		ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
797 			ccb->dmamap_xfer->dm_nsegs;
798 
799 		ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
800 		ccb->scsiq.sg_head = &ccb->sghead;
801 		ccb->scsiq.q1.data_addr = 0;
802 		ccb->scsiq.q1.data_cnt = 0;
803 	} else {
804 		/*
805                  * No data xfer, use non S/G values.
806                  */
807 		ccb->scsiq.q1.data_addr = 0;
808 		ccb->scsiq.q1.data_cnt = 0;
809 	}
810 
811 #ifdef ASC_DEBUG
812 	printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX \n",
813 			sc_link->scsipi_scsi.target,
814 			sc_link->scsipi_scsi.lun, xs->cmd->opcode,
815 			(unsigned long)ccb);
816 #endif
817 	s = splbio();
818 	adv_queue_ccb(sc, ccb);
819 	splx(s);
820 
821 	/*
822          * Usually return SUCCESSFULLY QUEUED
823          */
824 	if ((flags & SCSI_POLL) == 0)
825 		return (SUCCESSFULLY_QUEUED);
826 
827 	/*
828          * If we can't use interrupts, poll on completion
829          */
830 	if (adv_poll(sc, xs, ccb->timeout)) {
831 		adv_timeout(ccb);
832 		if (adv_poll(sc, xs, ccb->timeout))
833 			adv_timeout(ccb);
834 	}
835 	return (COMPLETE);
836 }
837 
838 
839 int
840 adv_intr(arg)
841 	void           *arg;
842 {
843 	ASC_SOFTC      *sc = arg;
844 	struct scsi_xfer *xs;
845 
846 #ifdef ASC_DEBUG
847 	int int_pend = FALSE;
848 
849 	if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh))
850 	{
851 		int_pend = TRUE;
852 		printf("ISR - ");
853 	}
854 #endif
855 	AscISR(sc);
856 #ifdef ASC_DEBUG
857 	if(int_pend)
858 		printf("\n");
859 #endif
860 
861 	/*
862          * If there are queue entries in the software queue, try to
863          * run the first one.  We should be more or less guaranteed
864          * to succeed, since we just freed a CCB.
865          *
866          * NOTE: adv_scsi_cmd() relies on our calling it with
867          * the first entry in the queue.
868          */
869 	if ((xs = sc->sc_queue.lh_first) != NULL)
870 		(void) adv_scsi_cmd(xs);
871 
872 	return (1);
873 }
874 
875 
876 /*
877  * Poll a particular unit, looking for a particular xs
878  */
879 static int
880 adv_poll(sc, xs, count)
881 	ASC_SOFTC      *sc;
882 	struct scsi_xfer *xs;
883 	int             count;
884 {
885 
886 	/* timeouts are in msec, so we loop in 1000 usec cycles */
887 	while (count) {
888 		adv_intr(sc);
889 		if (xs->flags & ITSDONE)
890 			return (0);
891 		delay(1000);	/* only happens in boot so ok */
892 		count--;
893 	}
894 	return (1);
895 }
896 
897 
898 static void
899 adv_timeout(arg)
900 	void           *arg;
901 {
902 	ADV_CCB        *ccb = arg;
903 	struct scsi_xfer *xs = ccb->xs;
904 	struct scsi_link *sc_link = xs->sc_link;
905 	ASC_SOFTC      *sc = sc_link->adapter_softc;
906 	int             s;
907 
908 	sc_print_addr(sc_link);
909 	printf("timed out");
910 
911 	s = splbio();
912 
913 	/*
914          * If it has been through before, then a previous abort has failed,
915          * don't try abort again, reset the bus instead.
916          */
917 	if (ccb->flags & CCB_ABORT) {
918 		/* abort timed out */
919 		printf(" AGAIN. Resetting Bus\n");
920 		/* Lets try resetting the bus! */
921 		if (AscResetBus(sc) == ASC_ERROR) {
922 			ccb->timeout = sc->scsi_reset_wait;
923 			adv_queue_ccb(sc, ccb);
924 		}
925 	} else {
926 		/* abort the operation that has timed out */
927 		printf("\n");
928 		AscAbortCCB(sc, (u_int32_t) ccb);
929 		ccb->xs->error = XS_TIMEOUT;
930 		ccb->timeout = ADV_ABORT_TIMEOUT;
931 		ccb->flags |= CCB_ABORT;
932 		adv_queue_ccb(sc, ccb);
933 	}
934 
935 	splx(s);
936 }
937 
938 
939 static void
940 adv_watchdog(arg)
941 	void           *arg;
942 {
943 	ADV_CCB        *ccb = arg;
944 	struct scsi_xfer *xs = ccb->xs;
945 	struct scsi_link *sc_link = xs->sc_link;
946 	ASC_SOFTC      *sc = sc_link->adapter_softc;
947 	int             s;
948 
949 	s = splbio();
950 
951 	ccb->flags &= ~CCB_WATCHDOG;
952 	adv_start_ccbs(sc);
953 
954 	splx(s);
955 }
956 
957 
958 /******************************************************************************/
959 /*                  NARROW and WIDE boards Interrupt callbacks                */
960 /******************************************************************************/
961 
962 
963 /*
964  * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
965  *
966  * Interrupt callback function for the Narrow SCSI Asc Library.
967  */
968 static void
969 adv_narrow_isr_callback(sc, qdonep)
970 	ASC_SOFTC      *sc;
971 	ASC_QDONE_INFO *qdonep;
972 {
973 	bus_dma_tag_t   dmat = sc->sc_dmat;
974 	ADV_CCB        *ccb = (ADV_CCB *) qdonep->d2.ccb_ptr;
975 	struct scsi_xfer *xs = ccb->xs;
976 	struct scsi_sense_data *s1, *s2;
977 
978 
979 #ifdef ASC_DEBUG
980 	printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ",
981 			(unsigned long)ccb,
982 			xs->sc_link->scsipi_scsi.target,
983 			xs->sc_link->scsipi_scsi.lun, xs->cmd->opcode);
984 #endif
985 	timeout_del(&xs->stimeout);
986 
987 	/*
988          * If we were a data transfer, unload the map that described
989          * the data buffer.
990          */
991 	if (xs->datalen) {
992 		bus_dmamap_sync(dmat, ccb->dmamap_xfer,
993 			 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
994 				BUS_DMASYNC_POSTWRITE);
995 		bus_dmamap_unload(dmat, ccb->dmamap_xfer);
996 	}
997 	if ((ccb->flags & CCB_ALLOC) == 0) {
998 		printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
999 		Debugger();
1000 		return;
1001 	}
1002 	/*
1003          * 'qdonep' contains the command's ending status.
1004          */
1005 #ifdef ASC_DEBUG
1006 	printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat);
1007 #endif
1008 	switch (qdonep->d3.done_stat) {
1009 	case ASC_QD_NO_ERROR:
1010 		switch (qdonep->d3.host_stat) {
1011 		case ASC_QHSTA_NO_ERROR:
1012 			xs->error = XS_NOERROR;
1013 			xs->resid = 0;
1014 			break;
1015 
1016 		default:
1017 			/* QHSTA error occurred */
1018 			xs->error = XS_DRIVER_STUFFUP;
1019 			break;
1020 		}
1021 
1022 		/*
1023                  * If an INQUIRY command completed successfully, then call
1024                  * the AscInquiryHandling() function to patch bugged boards.
1025                  */
1026 		if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
1027 		    (xs->sc_link->lun == 0) &&
1028 		    (xs->datalen - qdonep->remain_bytes) >= 8) {
1029 			AscInquiryHandling(sc,
1030 				      xs->sc_link->target & 0x7,
1031 					   (ASC_SCSI_INQUIRY *) xs->data);
1032 		}
1033 		break;
1034 
1035 	case ASC_QD_WITH_ERROR:
1036 		switch (qdonep->d3.host_stat) {
1037 		case ASC_QHSTA_NO_ERROR:
1038 			if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
1039 				s1 = &ccb->scsi_sense;
1040 				s2 = &xs->sense;
1041 				*s2 = *s1;
1042 				xs->error = XS_SENSE;
1043 			} else {
1044 				xs->error = XS_DRIVER_STUFFUP;
1045 			}
1046 			break;
1047 
1048 		default:
1049 			/* QHSTA error occurred */
1050 			xs->error = XS_DRIVER_STUFFUP;
1051 			break;
1052 		}
1053 		break;
1054 
1055 	case ASC_QD_ABORTED_BY_HOST:
1056 	default:
1057 		xs->error = XS_DRIVER_STUFFUP;
1058 		break;
1059 	}
1060 
1061 
1062 	adv_free_ccb(sc, ccb);
1063 	xs->flags |= ITSDONE;
1064 	scsi_done(xs);
1065 }
1066