xref: /netbsd-src/sys/dev/ic/adv.c (revision bada23909e740596d0a3785a73bd3583a9807fb8)
1 /*	$NetBSD: adv.c,v 1.11 1999/03/04 20:16:56 dante Exp $	*/
2 
3 /*
4  * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
5  *
6  * Copyright (c) 1998 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * Author: Baldassare Dante Profeta <dante@mclink.it>
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *        This product includes software developed by the NetBSD
22  *        Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51 
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58 
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62 
63 #include <dev/ic/advlib.h>
64 #include <dev/ic/adv.h>
65 
66 #ifndef DDB
67 #define	Debugger()	panic("should call debugger here (adv.c)")
68 #endif /* ! DDB */
69 
70 
71 /* #define ASC_DEBUG */
72 
73 /******************************************************************************/
74 
75 
76 static int adv_alloc_ccbs __P((ASC_SOFTC *));
77 static int adv_create_ccbs __P((ASC_SOFTC *, ADV_CCB *, int));
78 static void adv_free_ccb __P((ASC_SOFTC *, ADV_CCB *));
79 static void adv_reset_ccb __P((ADV_CCB *));
80 static int adv_init_ccb __P((ASC_SOFTC *, ADV_CCB *));
81 static ADV_CCB *adv_get_ccb __P((ASC_SOFTC *, int));
82 static void adv_queue_ccb __P((ASC_SOFTC *, ADV_CCB *));
83 static void adv_start_ccbs __P((ASC_SOFTC *));
84 
85 static u_int8_t *adv_alloc_overrunbuf __P((char *dvname, bus_dma_tag_t));
86 
87 static int adv_scsi_cmd __P((struct scsipi_xfer *));
88 static void advminphys __P((struct buf *));
89 static void adv_narrow_isr_callback __P((ASC_SOFTC *, ASC_QDONE_INFO *));
90 
91 static int adv_poll __P((ASC_SOFTC *, struct scsipi_xfer *, int));
92 static void adv_timeout __P((void *));
93 static void adv_watchdog __P((void *));
94 
95 
96 /******************************************************************************/
97 
98 
99 /* the below structure is so we have a default dev struct for out link struct */
100 struct scsipi_device adv_dev =
101 {
102 	NULL,			/* Use default error handler */
103 	NULL,			/* have a queue, served by this */
104 	NULL,			/* have no async handler */
105 	NULL,			/* Use default 'done' routine */
106 };
107 
108 
109 #define ADV_ABORT_TIMEOUT       2000	/* time to wait for abort (mSec) */
110 #define ADV_WATCH_TIMEOUT       1000	/* time to wait for watchdog (mSec) */
111 
112 
113 /******************************************************************************/
114 /*                            scsipi_xfer queue routines                      */
115 /******************************************************************************/
116 
117 
118 /******************************************************************************/
119 /*                             Control Blocks routines                        */
120 /******************************************************************************/
121 
122 
123 static int
124 adv_alloc_ccbs(sc)
125 	ASC_SOFTC      *sc;
126 {
127 	bus_dma_segment_t seg;
128 	int             error, rseg;
129 
130 	/*
131          * Allocate the control blocks.
132          */
133 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
134 			   NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
135 		printf("%s: unable to allocate control structures,"
136 		       " error = %d\n", sc->sc_dev.dv_xname, error);
137 		return (error);
138 	}
139 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
140 		   sizeof(struct adv_control), (caddr_t *) & sc->sc_control,
141 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
142 		printf("%s: unable to map control structures, error = %d\n",
143 		       sc->sc_dev.dv_xname, error);
144 		return (error);
145 	}
146 	/*
147          * Create and load the DMA map used for the control blocks.
148          */
149 	if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
150 			   1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
151 				       &sc->sc_dmamap_control)) != 0) {
152 		printf("%s: unable to create control DMA map, error = %d\n",
153 		       sc->sc_dev.dv_xname, error);
154 		return (error);
155 	}
156 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
157 			   sc->sc_control, sizeof(struct adv_control), NULL,
158 				     BUS_DMA_NOWAIT)) != 0) {
159 		printf("%s: unable to load control DMA map, error = %d\n",
160 		       sc->sc_dev.dv_xname, error);
161 		return (error);
162 	}
163 	return (0);
164 }
165 
166 
167 /*
168  * Create a set of ccbs and add them to the free list.  Called once
169  * by adv_init().  We return the number of CCBs successfully created.
170  */
171 static int
172 adv_create_ccbs(sc, ccbstore, count)
173 	ASC_SOFTC      *sc;
174 	ADV_CCB        *ccbstore;
175 	int             count;
176 {
177 	ADV_CCB        *ccb;
178 	int             i, error;
179 
180 	bzero(ccbstore, sizeof(ADV_CCB) * count);
181 	for (i = 0; i < count; i++) {
182 		ccb = &ccbstore[i];
183 		if ((error = adv_init_ccb(sc, ccb)) != 0) {
184 			printf("%s: unable to initialize ccb, error = %d\n",
185 			       sc->sc_dev.dv_xname, error);
186 			return (i);
187 		}
188 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
189 	}
190 
191 	return (i);
192 }
193 
194 
195 /*
196  * A ccb is put onto the free list.
197  */
198 static void
199 adv_free_ccb(sc, ccb)
200 	ASC_SOFTC      *sc;
201 	ADV_CCB        *ccb;
202 {
203 	int             s;
204 
205 	s = splbio();
206 
207 	adv_reset_ccb(ccb);
208 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
209 
210 	/*
211          * If there were none, wake anybody waiting for one to come free,
212          * starting with queued entries.
213          */
214 	if (ccb->chain.tqe_next == 0)
215 		wakeup(&sc->sc_free_ccb);
216 
217 	splx(s);
218 }
219 
220 
221 static void
222 adv_reset_ccb(ccb)
223 	ADV_CCB        *ccb;
224 {
225 
226 	ccb->flags = 0;
227 }
228 
229 
230 static int
231 adv_init_ccb(sc, ccb)
232 	ASC_SOFTC      *sc;
233 	ADV_CCB        *ccb;
234 {
235 	int	hashnum, error;
236 
237 	/*
238          * Create the DMA map for this CCB.
239          */
240 	error = bus_dmamap_create(sc->sc_dmat,
241 				  (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
242 			 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
243 		   0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
244 	if (error) {
245 		printf("%s: unable to create DMA map, error = %d\n",
246 		       sc->sc_dev.dv_xname, error);
247 		return (error);
248 	}
249 
250 	/*
251 	 * put in the phystokv hash table
252 	 * Never gets taken out.
253 	 */
254 	ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
255 	    ADV_CCB_OFF(ccb);
256 	hashnum = CCB_HASH(ccb->hashkey);
257 	ccb->nexthash = sc->sc_ccbhash[hashnum];
258 	sc->sc_ccbhash[hashnum] = ccb;
259 
260 	adv_reset_ccb(ccb);
261 	return (0);
262 }
263 
264 
265 /*
266  * Get a free ccb
267  *
268  * If there are none, see if we can allocate a new one
269  */
270 static ADV_CCB *
271 adv_get_ccb(sc, flags)
272 	ASC_SOFTC      *sc;
273 	int             flags;
274 {
275 	ADV_CCB        *ccb = 0;
276 	int             s;
277 
278 	s = splbio();
279 
280 	/*
281          * If we can and have to, sleep waiting for one to come free
282          * but only if we can't allocate a new one.
283          */
284 	for (;;) {
285 		ccb = sc->sc_free_ccb.tqh_first;
286 		if (ccb) {
287 			TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
288 			break;
289 		}
290 		if ((flags & SCSI_NOSLEEP) != 0)
291 			goto out;
292 
293 		tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0);
294 	}
295 
296 	ccb->flags |= CCB_ALLOC;
297 
298 out:
299 	splx(s);
300 	return (ccb);
301 }
302 
303 
304 /*
305  * Given a physical address, find the ccb that it corresponds to.
306  */
307 ADV_CCB *
308 adv_ccb_phys_kv(sc, ccb_phys)
309 	ASC_SOFTC	*sc;
310 	u_long		ccb_phys;
311 {
312 	int hashnum = CCB_HASH(ccb_phys);
313 	ADV_CCB *ccb = sc->sc_ccbhash[hashnum];
314 
315 	while (ccb) {
316 		if (ccb->hashkey == ccb_phys)
317 			break;
318 		ccb = ccb->nexthash;
319 	}
320 	return (ccb);
321 }
322 
323 
324 /*
325  * Queue a CCB to be sent to the controller, and send it if possible.
326  */
327 static void
328 adv_queue_ccb(sc, ccb)
329 	ASC_SOFTC      *sc;
330 	ADV_CCB        *ccb;
331 {
332 
333 	TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
334 
335 	adv_start_ccbs(sc);
336 }
337 
338 
339 static void
340 adv_start_ccbs(sc)
341 	ASC_SOFTC      *sc;
342 {
343 	ADV_CCB        *ccb;
344 
345 	while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
346 		if (ccb->flags & CCB_WATCHDOG)
347 			untimeout(adv_watchdog, ccb);
348 
349 		if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
350 			ccb->flags |= CCB_WATCHDOG;
351 			timeout(adv_watchdog, ccb,
352 				(ADV_WATCH_TIMEOUT * hz) / 1000);
353 			break;
354 		}
355 		TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
356 
357 		if ((ccb->xs->flags & SCSI_POLL) == 0)
358 			timeout(adv_timeout, ccb, (ccb->timeout * hz) / 1000);
359 	}
360 }
361 
362 
363 /******************************************************************************/
364 /*                      DMA able memory allocation routines                   */
365 /******************************************************************************/
366 
367 
368 /*
369  * Allocate a DMA able memory for overrun_buffer.
370  * This memory can be safely shared among all the AdvanSys boards.
371  */
372 u_int8_t       *
373 adv_alloc_overrunbuf(dvname, dmat)
374 	char           *dvname;
375 	bus_dma_tag_t   dmat;
376 {
377 	static u_int8_t *overrunbuf = NULL;
378 
379 	bus_dmamap_t    ovrbuf_dmamap;
380 	bus_dma_segment_t seg;
381 	int             rseg, error;
382 
383 
384 	/*
385          * if an overrun buffer has been already allocated don't allocate it
386          * again. Instead return the address of the allocated buffer.
387          */
388 	if (overrunbuf)
389 		return (overrunbuf);
390 
391 
392 	if ((error = bus_dmamem_alloc(dmat, ASC_OVERRUN_BSIZE,
393 			   NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
394 		printf("%s: unable to allocate overrun buffer, error = %d\n",
395 		       dvname, error);
396 		return (0);
397 	}
398 	if ((error = bus_dmamem_map(dmat, &seg, rseg, ASC_OVERRUN_BSIZE,
399 	(caddr_t *) & overrunbuf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
400 		printf("%s: unable to map overrun buffer, error = %d\n",
401 		       dvname, error);
402 
403 		bus_dmamem_free(dmat, &seg, 1);
404 		return (0);
405 	}
406 	if ((error = bus_dmamap_create(dmat, ASC_OVERRUN_BSIZE, 1,
407 	      ASC_OVERRUN_BSIZE, 0, BUS_DMA_NOWAIT, &ovrbuf_dmamap)) != 0) {
408 		printf("%s: unable to create overrun buffer DMA map,"
409 		       " error = %d\n", dvname, error);
410 
411 		bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
412 		bus_dmamem_free(dmat, &seg, 1);
413 		return (0);
414 	}
415 	if ((error = bus_dmamap_load(dmat, ovrbuf_dmamap, overrunbuf,
416 			   ASC_OVERRUN_BSIZE, NULL, BUS_DMA_NOWAIT)) != 0) {
417 		printf("%s: unable to load overrun buffer DMA map,"
418 		       " error = %d\n", dvname, error);
419 
420 		bus_dmamap_destroy(dmat, ovrbuf_dmamap);
421 		bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
422 		bus_dmamem_free(dmat, &seg, 1);
423 		return (0);
424 	}
425 	return (overrunbuf);
426 }
427 
428 
429 /******************************************************************************/
430 /*                         SCSI layer interfacing routines                    */
431 /******************************************************************************/
432 
433 
434 int
435 adv_init(sc)
436 	ASC_SOFTC      *sc;
437 {
438 	int             warn;
439 
440 	if (!AscFindSignature(sc->sc_iot, sc->sc_ioh))
441 		panic("adv_init: adv_find_signature failed");
442 
443 	/*
444          * Read the board configuration
445          */
446 	AscInitASC_SOFTC(sc);
447 	warn = AscInitFromEEP(sc);
448 	if (warn) {
449 		printf("%s -get: ", sc->sc_dev.dv_xname);
450 		switch (warn) {
451 		case -1:
452 			printf("Chip is not halted\n");
453 			break;
454 
455 		case -2:
456 			printf("Couldn't get MicroCode Start"
457 			       " address\n");
458 			break;
459 
460 		case ASC_WARN_IO_PORT_ROTATE:
461 			printf("I/O port address modified\n");
462 			break;
463 
464 		case ASC_WARN_AUTO_CONFIG:
465 			printf("I/O port increment switch enabled\n");
466 			break;
467 
468 		case ASC_WARN_EEPROM_CHKSUM:
469 			printf("EEPROM checksum error\n");
470 			break;
471 
472 		case ASC_WARN_IRQ_MODIFIED:
473 			printf("IRQ modified\n");
474 			break;
475 
476 		case ASC_WARN_CMD_QNG_CONFLICT:
477 			printf("tag queuing enabled w/o disconnects\n");
478 			break;
479 
480 		default:
481 			printf("unknown warning %d\n", warn);
482 		}
483 	}
484 	if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
485 		sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
486 
487 	/*
488          * Modify the board configuration
489          */
490 	warn = AscInitFromASC_SOFTC(sc);
491 	if (warn) {
492 		printf("%s -set: ", sc->sc_dev.dv_xname);
493 		switch (warn) {
494 		case ASC_WARN_CMD_QNG_CONFLICT:
495 			printf("tag queuing enabled w/o disconnects\n");
496 			break;
497 
498 		case ASC_WARN_AUTO_CONFIG:
499 			printf("I/O port increment switch enabled\n");
500 			break;
501 
502 		default:
503 			printf("unknown warning %d\n", warn);
504 		}
505 	}
506 	sc->isr_callback = (ASC_CALLBACK) adv_narrow_isr_callback;
507 
508 	if (!(sc->overrun_buf = adv_alloc_overrunbuf(sc->sc_dev.dv_xname,
509 						     sc->sc_dmat))) {
510 		return (1);
511 	}
512 
513 	return (0);
514 }
515 
516 
517 void
518 adv_attach(sc)
519 	ASC_SOFTC      *sc;
520 {
521 	int             i, error;
522 
523 	/*
524          * Initialize board RISC chip and enable interrupts.
525          */
526 	switch (AscInitDriver(sc)) {
527 	case 0:
528 		/* AllOK */
529 		break;
530 
531 	case 1:
532 		panic("%s: bad signature", sc->sc_dev.dv_xname);
533 		break;
534 
535 	case 2:
536 		panic("%s: unable to load MicroCode",
537 		      sc->sc_dev.dv_xname);
538 		break;
539 
540 	case 3:
541 		panic("%s: unable to initialize MicroCode",
542 		      sc->sc_dev.dv_xname);
543 		break;
544 
545 	default:
546 		panic("%s: unable to initialize board RISC chip",
547 		      sc->sc_dev.dv_xname);
548 	}
549 
550 	/*
551 	 * Fill in the adapter.
552 	 */
553 	sc->sc_adapter.scsipi_cmd = adv_scsi_cmd;
554 	sc->sc_adapter.scsipi_minphys = advminphys;
555 
556 	/*
557          * fill in the prototype scsipi_link.
558          */
559 	sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
560 	sc->sc_link.adapter_softc = sc;
561 	sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
562 	sc->sc_link.adapter = &sc->sc_adapter;
563 	sc->sc_link.device = &adv_dev;
564 	sc->sc_link.openings = 4;
565 	sc->sc_link.scsipi_scsi.max_target = 7;
566 	sc->sc_link.scsipi_scsi.max_lun = 7;
567 	sc->sc_link.type = BUS_SCSI;
568 
569 
570 	TAILQ_INIT(&sc->sc_free_ccb);
571 	TAILQ_INIT(&sc->sc_waiting_ccb);
572 	TAILQ_INIT(&sc->sc_queue);
573 
574 
575 	/*
576          * Allocate the Control Blocks.
577          */
578 	error = adv_alloc_ccbs(sc);
579 	if (error)
580 		return; /* (error) */ ;
581 
582 	/*
583          * Create and initialize the Control Blocks.
584          */
585 	i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
586 	if (i == 0) {
587 		printf("%s: unable to create control blocks\n",
588 		       sc->sc_dev.dv_xname);
589 		return; /* (ENOMEM) */ ;
590 	} else if (i != ADV_MAX_CCB) {
591 		printf("%s: WARNING: only %d of %d control blocks created\n",
592 		       sc->sc_dev.dv_xname, i, ADV_MAX_CCB);
593 	}
594 	config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
595 }
596 
597 
598 static void
599 advminphys(bp)
600 	struct buf     *bp;
601 {
602 
603 	if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
604 		bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
605 	minphys(bp);
606 }
607 
608 
609 /*
610  * start a scsi operation given the command and the data address.  Also needs
611  * the unit, target and lu.
612  */
613 static int
614 adv_scsi_cmd(xs)
615 	struct scsipi_xfer *xs;
616 {
617 	struct scsipi_link *sc_link = xs->sc_link;
618 	ASC_SOFTC      *sc = sc_link->adapter_softc;
619 	bus_dma_tag_t   dmat = sc->sc_dmat;
620 	ADV_CCB        *ccb;
621 	int             s, flags, error, nsegs;
622 	int             fromqueue = 1, dontqueue = 0;
623 
624 
625 	s = splbio();		/* protect the queue */
626 
627 	/*
628          * If we're running the queue from adv_done(), we've been
629          * called with the first queue entry as our argument.
630          */
631 	if (xs == TAILQ_FIRST(&sc->sc_queue)) {
632 		TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
633 		fromqueue = 1;
634 	} else {
635 
636 		/* Polled requests can't be queued for later. */
637 		dontqueue = xs->flags & SCSI_POLL;
638 
639 		/*
640                  * If there are jobs in the queue, run them first.
641                  */
642 		if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
643 			/*
644                          * If we can't queue, we have to abort, since
645                          * we have to preserve order.
646                          */
647 			if (dontqueue) {
648 				splx(s);
649 				xs->error = XS_DRIVER_STUFFUP;
650 				return (TRY_AGAIN_LATER);
651 			}
652 			/*
653                          * Swap with the first queue entry.
654                          */
655 			TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
656 			xs = TAILQ_FIRST(&sc->sc_queue);
657 			TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
658 			fromqueue = 1;
659 		}
660 	}
661 
662 
663 	/*
664          * get a ccb to use. If the transfer
665          * is from a buf (possibly from interrupt time)
666          * then we can't allow it to sleep
667          */
668 
669 	flags = xs->flags;
670 	if ((ccb = adv_get_ccb(sc, flags)) == NULL) {
671 		/*
672                  * If we can't queue, we lose.
673                  */
674 		if (dontqueue) {
675 			splx(s);
676 			xs->error = XS_DRIVER_STUFFUP;
677 			return (TRY_AGAIN_LATER);
678 		}
679 		/*
680                  * Stuff ourselves into the queue, in front
681                  * if we came off in the first place.
682                  */
683 		if (fromqueue)
684 			TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
685 		else
686 			TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
687 		splx(s);
688 		return (SUCCESSFULLY_QUEUED);
689 	}
690 	splx(s);		/* done playing with the queue */
691 
692 	ccb->xs = xs;
693 	ccb->timeout = xs->timeout;
694 
695 	/*
696          * Build up the request
697          */
698 	memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
699 
700 	ccb->scsiq.q2.ccb_ptr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
701 		    ADV_CCB_OFF(ccb);
702 
703 	ccb->scsiq.cdbptr = &xs->cmd->opcode;
704 	ccb->scsiq.q2.cdb_len = xs->cmdlen;
705 	ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->scsipi_scsi.target);
706 	ccb->scsiq.q1.target_lun = sc_link->scsipi_scsi.lun;
707 	ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->scsipi_scsi.target,
708 						   sc_link->scsipi_scsi.lun);
709 	ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
710 		ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
711 	ccb->scsiq.q1.sense_len = sizeof(struct scsipi_sense_data);
712 
713 	/*
714          * If  there  are  any  outstanding  requests  for  the  current target,
715          * then  every  255th request  send an  ORDERED request.  This heuristic
716          * tries  to  retain  the  benefit  of request  sorting while preventing
717          * request starvation. 255 is the max number of tags or pending commands
718          * a device may have outstanding.
719          */
720 	sc->reqcnt[sc_link->scsipi_scsi.target]++;
721 	if ((sc->reqcnt[sc_link->scsipi_scsi.target] > 0) &&
722 	    (sc->reqcnt[sc_link->scsipi_scsi.target] % 255) == 0) {
723 		ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
724 	} else {
725 		ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
726 	}
727 
728 
729 	if (xs->datalen) {
730 		/*
731                  * Map the DMA transfer.
732                  */
733 #ifdef TFS
734 		if (flags & SCSI_DATA_UIO) {
735 			error = bus_dmamap_load_uio(dmat,
736 				  ccb->dmamap_xfer, (struct uio *) xs->data,
737 						    (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
738 		} else
739 #endif				/* TFS */
740 		{
741 			error = bus_dmamap_load(dmat,
742 			      ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
743 						(flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
744 		}
745 
746 		if (error) {
747 			if (error == EFBIG) {
748 				printf("%s: adv_scsi_cmd, more than %d dma"
749 				       " segments\n",
750 				       sc->sc_dev.dv_xname, ASC_MAX_SG_LIST);
751 			} else {
752 				printf("%s: adv_scsi_cmd, error %d loading"
753 				       " dma map\n",
754 				       sc->sc_dev.dv_xname, error);
755 			}
756 
757 			xs->error = XS_DRIVER_STUFFUP;
758 			adv_free_ccb(sc, ccb);
759 			return (COMPLETE);
760 		}
761 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
762 				ccb->dmamap_xfer->dm_mapsize,
763 			      (flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
764 				BUS_DMASYNC_PREWRITE);
765 
766 
767 		memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
768 
769 		for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
770 
771 			ccb->sghead.sg_list[nsegs].addr =
772 				ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
773 			ccb->sghead.sg_list[nsegs].bytes =
774 				ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
775 		}
776 
777 		ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
778 			ccb->dmamap_xfer->dm_nsegs;
779 
780 		ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
781 		ccb->scsiq.sg_head = &ccb->sghead;
782 		ccb->scsiq.q1.data_addr = 0;
783 		ccb->scsiq.q1.data_cnt = 0;
784 	} else {
785 		/*
786                  * No data xfer, use non S/G values.
787                  */
788 		ccb->scsiq.q1.data_addr = 0;
789 		ccb->scsiq.q1.data_cnt = 0;
790 	}
791 
792 #ifdef ASC_DEBUG
793 	printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX \n",
794 			sc_link->scsipi_scsi.target,
795 			sc_link->scsipi_scsi.lun, xs->cmd->opcode,
796 			(unsigned long)ccb);
797 #endif
798 	s = splbio();
799 	adv_queue_ccb(sc, ccb);
800 	splx(s);
801 
802 	/*
803          * Usually return SUCCESSFULLY QUEUED
804          */
805 	if ((flags & SCSI_POLL) == 0)
806 		return (SUCCESSFULLY_QUEUED);
807 
808 	/*
809          * If we can't use interrupts, poll on completion
810          */
811 	if (adv_poll(sc, xs, ccb->timeout)) {
812 		adv_timeout(ccb);
813 		if (adv_poll(sc, xs, ccb->timeout))
814 			adv_timeout(ccb);
815 	}
816 	return (COMPLETE);
817 }
818 
819 
820 int
821 adv_intr(arg)
822 	void           *arg;
823 {
824 	ASC_SOFTC      *sc = arg;
825 	struct scsipi_xfer *xs;
826 
827 #ifdef ASC_DEBUG
828 	int int_pend = FALSE;
829 
830 	if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh))
831 	{
832 		int_pend = TRUE;
833 		printf("ISR - ");
834 	}
835 #endif
836 	AscISR(sc);
837 #ifdef ASC_DEBUG
838 	if(int_pend)
839 		printf("\n");
840 #endif
841 
842 	/*
843          * If there are queue entries in the software queue, try to
844          * run the first one.  We should be more or less guaranteed
845          * to succeed, since we just freed a CCB.
846          *
847          * NOTE: adv_scsi_cmd() relies on our calling it with
848          * the first entry in the queue.
849          */
850 	if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
851 		(void) adv_scsi_cmd(xs);
852 
853 	return (1);
854 }
855 
856 
857 /*
858  * Poll a particular unit, looking for a particular xs
859  */
860 static int
861 adv_poll(sc, xs, count)
862 	ASC_SOFTC      *sc;
863 	struct scsipi_xfer *xs;
864 	int             count;
865 {
866 
867 	/* timeouts are in msec, so we loop in 1000 usec cycles */
868 	while (count) {
869 		adv_intr(sc);
870 		if (xs->flags & ITSDONE)
871 			return (0);
872 		delay(1000);	/* only happens in boot so ok */
873 		count--;
874 	}
875 	return (1);
876 }
877 
878 
879 static void
880 adv_timeout(arg)
881 	void           *arg;
882 {
883 	ADV_CCB        *ccb = arg;
884 	struct scsipi_xfer *xs = ccb->xs;
885 	struct scsipi_link *sc_link = xs->sc_link;
886 	ASC_SOFTC      *sc = sc_link->adapter_softc;
887 	int             s;
888 
889 	scsi_print_addr(sc_link);
890 	printf("timed out");
891 
892 	s = splbio();
893 
894 	/*
895          * If it has been through before, then a previous abort has failed,
896          * don't try abort again, reset the bus instead.
897          */
898 	if (ccb->flags & CCB_ABORT) {
899 		/* abort timed out */
900 		printf(" AGAIN. Resetting Bus\n");
901 		/* Lets try resetting the bus! */
902 		if (AscResetBus(sc) == ASC_ERROR) {
903 			ccb->timeout = sc->scsi_reset_wait;
904 			adv_queue_ccb(sc, ccb);
905 		}
906 	} else {
907 		/* abort the operation that has timed out */
908 		printf("\n");
909 		AscAbortCCB(sc, ccb);
910 		ccb->xs->error = XS_TIMEOUT;
911 		ccb->timeout = ADV_ABORT_TIMEOUT;
912 		ccb->flags |= CCB_ABORT;
913 		adv_queue_ccb(sc, ccb);
914 	}
915 
916 	splx(s);
917 }
918 
919 
920 static void
921 adv_watchdog(arg)
922 	void           *arg;
923 {
924 	ADV_CCB        *ccb = arg;
925 	struct scsipi_xfer *xs = ccb->xs;
926 	struct scsipi_link *sc_link = xs->sc_link;
927 	ASC_SOFTC      *sc = sc_link->adapter_softc;
928 	int             s;
929 
930 	s = splbio();
931 
932 	ccb->flags &= ~CCB_WATCHDOG;
933 	adv_start_ccbs(sc);
934 
935 	splx(s);
936 }
937 
938 
939 /******************************************************************************/
940 /*                      NARROW boards Interrupt callbacks                     */
941 /******************************************************************************/
942 
943 
944 /*
945  * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
946  *
947  * Interrupt callback function for the Narrow SCSI Asc Library.
948  */
949 static void
950 adv_narrow_isr_callback(sc, qdonep)
951 	ASC_SOFTC      *sc;
952 	ASC_QDONE_INFO *qdonep;
953 {
954 	bus_dma_tag_t   dmat = sc->sc_dmat;
955 	ADV_CCB        *ccb;
956 	struct scsipi_xfer *xs;
957 	struct scsipi_sense_data *s1, *s2;
958 
959 
960 	ccb = adv_ccb_phys_kv(sc, qdonep->d2.ccb_ptr);
961 	xs = ccb->xs;
962 
963 #ifdef ASC_DEBUG
964 	printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ",
965 			(unsigned long)ccb,
966 			xs->sc_link->scsipi_scsi.target,
967 			xs->sc_link->scsipi_scsi.lun, xs->cmd->opcode);
968 #endif
969 	untimeout(adv_timeout, ccb);
970 
971 	/*
972          * If we were a data transfer, unload the map that described
973          * the data buffer.
974          */
975 	if (xs->datalen) {
976 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
977 				ccb->dmamap_xfer->dm_mapsize,
978 			 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
979 				BUS_DMASYNC_POSTWRITE);
980 		bus_dmamap_unload(dmat, ccb->dmamap_xfer);
981 	}
982 	if ((ccb->flags & CCB_ALLOC) == 0) {
983 		printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
984 		Debugger();
985 		return;
986 	}
987 	/*
988          * 'qdonep' contains the command's ending status.
989          */
990 #ifdef ASC_DEBUG
991 	printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat);
992 #endif
993 	switch (qdonep->d3.done_stat) {
994 	case ASC_QD_NO_ERROR:
995 		switch (qdonep->d3.host_stat) {
996 		case ASC_QHSTA_NO_ERROR:
997 			xs->error = XS_NOERROR;
998 			xs->resid = 0;
999 			break;
1000 
1001 		default:
1002 			/* QHSTA error occurred */
1003 			xs->error = XS_DRIVER_STUFFUP;
1004 			break;
1005 		}
1006 
1007 		/*
1008                  * If an INQUIRY command completed successfully, then call
1009                  * the AscInquiryHandling() function to patch bugged boards.
1010                  */
1011 		if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
1012 		    (xs->sc_link->scsipi_scsi.lun == 0) &&
1013 		    (xs->datalen - qdonep->remain_bytes) >= 8) {
1014 			AscInquiryHandling(sc,
1015 				      xs->sc_link->scsipi_scsi.target & 0x7,
1016 					   (ASC_SCSI_INQUIRY *) xs->data);
1017 		}
1018 		break;
1019 
1020 	case ASC_QD_WITH_ERROR:
1021 		switch (qdonep->d3.host_stat) {
1022 		case ASC_QHSTA_NO_ERROR:
1023 			if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
1024 				s1 = &ccb->scsi_sense;
1025 				s2 = &xs->sense.scsi_sense;
1026 				*s2 = *s1;
1027 				xs->error = XS_SENSE;
1028 			} else {
1029 				xs->error = XS_DRIVER_STUFFUP;
1030 			}
1031 			break;
1032 
1033 		default:
1034 			/* QHSTA error occurred */
1035 			xs->error = XS_DRIVER_STUFFUP;
1036 			break;
1037 		}
1038 		break;
1039 
1040 	case ASC_QD_ABORTED_BY_HOST:
1041 	default:
1042 		xs->error = XS_DRIVER_STUFFUP;
1043 		break;
1044 	}
1045 
1046 
1047 	adv_free_ccb(sc, ccb);
1048 	xs->flags |= ITSDONE;
1049 	scsipi_done(xs);
1050 }
1051