xref: /netbsd-src/sys/dev/ic/adv.c (revision 4472dbe5e3bd91ef2540bada7a7ca7384627ff9b)
1 /*	$NetBSD: adv.c,v 1.16 2000/03/23 07:01:28 thorpej Exp $	*/
2 
3 /*
4  * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
5  *
6  * Copyright (c) 1998 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * Author: Baldassare Dante Profeta <dante@mclink.it>
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *        This product includes software developed by the NetBSD
22  *        Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52 
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55 
56 #include <vm/vm.h>
57 #include <vm/vm_param.h>
58 #include <vm/pmap.h>
59 
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsipi_all.h>
62 #include <dev/scsipi/scsiconf.h>
63 
64 #include <dev/ic/advlib.h>
65 #include <dev/ic/adv.h>
66 
67 #ifndef DDB
68 #define	Debugger()	panic("should call debugger here (adv.c)")
69 #endif /* ! DDB */
70 
71 
72 /* #define ASC_DEBUG */
73 
74 /******************************************************************************/
75 
76 
77 static int adv_alloc_control_data __P((ASC_SOFTC *));
78 static int adv_create_ccbs __P((ASC_SOFTC *, ADV_CCB *, int));
79 static void adv_free_ccb __P((ASC_SOFTC *, ADV_CCB *));
80 static void adv_reset_ccb __P((ADV_CCB *));
81 static int adv_init_ccb __P((ASC_SOFTC *, ADV_CCB *));
82 static ADV_CCB *adv_get_ccb __P((ASC_SOFTC *, int));
83 static void adv_queue_ccb __P((ASC_SOFTC *, ADV_CCB *));
84 static void adv_start_ccbs __P((ASC_SOFTC *));
85 
86 
87 static int adv_scsi_cmd __P((struct scsipi_xfer *));
88 static void advminphys __P((struct buf *));
89 static void adv_narrow_isr_callback __P((ASC_SOFTC *, ASC_QDONE_INFO *));
90 
91 static int adv_poll __P((ASC_SOFTC *, struct scsipi_xfer *, int));
92 static void adv_timeout __P((void *));
93 static void adv_watchdog __P((void *));
94 
95 
96 /******************************************************************************/
97 
98 
99 /* the below structure is so we have a default dev struct for out link struct */
100 struct scsipi_device adv_dev =
101 {
102 	NULL,			/* Use default error handler */
103 	NULL,			/* have a queue, served by this */
104 	NULL,			/* have no async handler */
105 	NULL,			/* Use default 'done' routine */
106 };
107 
108 
109 #define ADV_ABORT_TIMEOUT       2000	/* time to wait for abort (mSec) */
110 #define ADV_WATCH_TIMEOUT       1000	/* time to wait for watchdog (mSec) */
111 
112 
113 /******************************************************************************/
114 /*                             Control Blocks routines                        */
115 /******************************************************************************/
116 
117 
118 static int
119 adv_alloc_control_data(sc)
120 	ASC_SOFTC      *sc;
121 {
122 	bus_dma_segment_t seg;
123 	int             error, rseg;
124 
125 	/*
126          * Allocate the control blocks.
127          */
128 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
129 			   NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
130 		printf("%s: unable to allocate control structures,"
131 		       " error = %d\n", sc->sc_dev.dv_xname, error);
132 		return (error);
133 	}
134 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
135 		   sizeof(struct adv_control), (caddr_t *) & sc->sc_control,
136 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
137 		printf("%s: unable to map control structures, error = %d\n",
138 		       sc->sc_dev.dv_xname, error);
139 		return (error);
140 	}
141 	/*
142          * Create and load the DMA map used for the control blocks.
143          */
144 	if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
145 			   1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
146 				       &sc->sc_dmamap_control)) != 0) {
147 		printf("%s: unable to create control DMA map, error = %d\n",
148 		       sc->sc_dev.dv_xname, error);
149 		return (error);
150 	}
151 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
152 			   sc->sc_control, sizeof(struct adv_control), NULL,
153 				     BUS_DMA_NOWAIT)) != 0) {
154 		printf("%s: unable to load control DMA map, error = %d\n",
155 		       sc->sc_dev.dv_xname, error);
156 		return (error);
157 	}
158 
159 	/*
160 	 * Initialize the overrun_buf address.
161 	 */
162 	sc->overrun_buf = sc->sc_dmamap_control->dm_segs[0].ds_addr +
163 	    offsetof(struct adv_control, overrun_buf);
164 
165 	return (0);
166 }
167 
168 
169 /*
170  * Create a set of ccbs and add them to the free list.  Called once
171  * by adv_init().  We return the number of CCBs successfully created.
172  */
173 static int
174 adv_create_ccbs(sc, ccbstore, count)
175 	ASC_SOFTC      *sc;
176 	ADV_CCB        *ccbstore;
177 	int             count;
178 {
179 	ADV_CCB        *ccb;
180 	int             i, error;
181 
182 	bzero(ccbstore, sizeof(ADV_CCB) * count);
183 	for (i = 0; i < count; i++) {
184 		ccb = &ccbstore[i];
185 		if ((error = adv_init_ccb(sc, ccb)) != 0) {
186 			printf("%s: unable to initialize ccb, error = %d\n",
187 			       sc->sc_dev.dv_xname, error);
188 			return (i);
189 		}
190 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
191 	}
192 
193 	return (i);
194 }
195 
196 
197 /*
198  * A ccb is put onto the free list.
199  */
200 static void
201 adv_free_ccb(sc, ccb)
202 	ASC_SOFTC      *sc;
203 	ADV_CCB        *ccb;
204 {
205 	int             s;
206 
207 	s = splbio();
208 
209 	adv_reset_ccb(ccb);
210 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
211 
212 	/*
213          * If there were none, wake anybody waiting for one to come free,
214          * starting with queued entries.
215          */
216 	if (ccb->chain.tqe_next == 0)
217 		wakeup(&sc->sc_free_ccb);
218 
219 	splx(s);
220 }
221 
222 
223 static void
224 adv_reset_ccb(ccb)
225 	ADV_CCB        *ccb;
226 {
227 
228 	ccb->flags = 0;
229 }
230 
231 
232 static int
233 adv_init_ccb(sc, ccb)
234 	ASC_SOFTC      *sc;
235 	ADV_CCB        *ccb;
236 {
237 	int	hashnum, error;
238 
239 	callout_init(&ccb->ccb_watchdog);
240 
241 	/*
242          * Create the DMA map for this CCB.
243          */
244 	error = bus_dmamap_create(sc->sc_dmat,
245 				  (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
246 			 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
247 		   0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
248 	if (error) {
249 		printf("%s: unable to create DMA map, error = %d\n",
250 		       sc->sc_dev.dv_xname, error);
251 		return (error);
252 	}
253 
254 	/*
255 	 * put in the phystokv hash table
256 	 * Never gets taken out.
257 	 */
258 	ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
259 	    ADV_CCB_OFF(ccb);
260 	hashnum = CCB_HASH(ccb->hashkey);
261 	ccb->nexthash = sc->sc_ccbhash[hashnum];
262 	sc->sc_ccbhash[hashnum] = ccb;
263 
264 	adv_reset_ccb(ccb);
265 	return (0);
266 }
267 
268 
269 /*
270  * Get a free ccb
271  *
272  * If there are none, see if we can allocate a new one
273  */
274 static ADV_CCB *
275 adv_get_ccb(sc, flags)
276 	ASC_SOFTC      *sc;
277 	int             flags;
278 {
279 	ADV_CCB        *ccb = 0;
280 	int             s;
281 
282 	s = splbio();
283 
284 	/*
285          * If we can and have to, sleep waiting for one to come free
286          * but only if we can't allocate a new one.
287          */
288 	for (;;) {
289 		ccb = sc->sc_free_ccb.tqh_first;
290 		if (ccb) {
291 			TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
292 			break;
293 		}
294 		if ((flags & XS_CTL_NOSLEEP) != 0)
295 			goto out;
296 
297 		tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0);
298 	}
299 
300 	ccb->flags |= CCB_ALLOC;
301 
302 out:
303 	splx(s);
304 	return (ccb);
305 }
306 
307 
308 /*
309  * Given a physical address, find the ccb that it corresponds to.
310  */
311 ADV_CCB *
312 adv_ccb_phys_kv(sc, ccb_phys)
313 	ASC_SOFTC	*sc;
314 	u_long		ccb_phys;
315 {
316 	int hashnum = CCB_HASH(ccb_phys);
317 	ADV_CCB *ccb = sc->sc_ccbhash[hashnum];
318 
319 	while (ccb) {
320 		if (ccb->hashkey == ccb_phys)
321 			break;
322 		ccb = ccb->nexthash;
323 	}
324 	return (ccb);
325 }
326 
327 
328 /*
329  * Queue a CCB to be sent to the controller, and send it if possible.
330  */
331 static void
332 adv_queue_ccb(sc, ccb)
333 	ASC_SOFTC      *sc;
334 	ADV_CCB        *ccb;
335 {
336 
337 	TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
338 
339 	adv_start_ccbs(sc);
340 }
341 
342 
343 static void
344 adv_start_ccbs(sc)
345 	ASC_SOFTC      *sc;
346 {
347 	ADV_CCB        *ccb;
348 
349 	while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
350 		if (ccb->flags & CCB_WATCHDOG)
351 			callout_stop(&ccb->ccb_watchdog);
352 
353 		if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
354 			ccb->flags |= CCB_WATCHDOG;
355 			callout_reset(&ccb->ccb_watchdog,
356 			    (ADV_WATCH_TIMEOUT * hz) / 1000,
357 			    adv_watchdog, ccb);
358 			break;
359 		}
360 		TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
361 
362 		if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
363 			callout_reset(&ccb->xs->xs_callout,
364 			    (ccb->timeout * hz) / 1000,
365 			    adv_timeout, ccb);
366 	}
367 }
368 
369 
370 /******************************************************************************/
371 /*                         SCSI layer interfacing routines                    */
372 /******************************************************************************/
373 
374 
375 int
376 adv_init(sc)
377 	ASC_SOFTC      *sc;
378 {
379 	int             warn;
380 
381 	if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) {
382 		printf("adv_init: failed to find signature\n");
383 		return (1);
384 	}
385 
386 	/*
387          * Read the board configuration
388          */
389 	AscInitASC_SOFTC(sc);
390 	warn = AscInitFromEEP(sc);
391 	if (warn) {
392 		printf("%s -get: ", sc->sc_dev.dv_xname);
393 		switch (warn) {
394 		case -1:
395 			printf("Chip is not halted\n");
396 			break;
397 
398 		case -2:
399 			printf("Couldn't get MicroCode Start"
400 			       " address\n");
401 			break;
402 
403 		case ASC_WARN_IO_PORT_ROTATE:
404 			printf("I/O port address modified\n");
405 			break;
406 
407 		case ASC_WARN_AUTO_CONFIG:
408 			printf("I/O port increment switch enabled\n");
409 			break;
410 
411 		case ASC_WARN_EEPROM_CHKSUM:
412 			printf("EEPROM checksum error\n");
413 			break;
414 
415 		case ASC_WARN_IRQ_MODIFIED:
416 			printf("IRQ modified\n");
417 			break;
418 
419 		case ASC_WARN_CMD_QNG_CONFLICT:
420 			printf("tag queuing enabled w/o disconnects\n");
421 			break;
422 
423 		default:
424 			printf("unknown warning %d\n", warn);
425 		}
426 	}
427 	if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
428 		sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
429 
430 	/*
431          * Modify the board configuration
432          */
433 	warn = AscInitFromASC_SOFTC(sc);
434 	if (warn) {
435 		printf("%s -set: ", sc->sc_dev.dv_xname);
436 		switch (warn) {
437 		case ASC_WARN_CMD_QNG_CONFLICT:
438 			printf("tag queuing enabled w/o disconnects\n");
439 			break;
440 
441 		case ASC_WARN_AUTO_CONFIG:
442 			printf("I/O port increment switch enabled\n");
443 			break;
444 
445 		default:
446 			printf("unknown warning %d\n", warn);
447 		}
448 	}
449 	sc->isr_callback = (ASC_CALLBACK) adv_narrow_isr_callback;
450 
451 	return (0);
452 }
453 
454 
455 void
456 adv_attach(sc)
457 	ASC_SOFTC      *sc;
458 {
459 	int             i, error;
460 
461 	/*
462          * Initialize board RISC chip and enable interrupts.
463          */
464 	switch (AscInitDriver(sc)) {
465 	case 0:
466 		/* AllOK */
467 		break;
468 
469 	case 1:
470 		panic("%s: bad signature", sc->sc_dev.dv_xname);
471 		break;
472 
473 	case 2:
474 		panic("%s: unable to load MicroCode",
475 		      sc->sc_dev.dv_xname);
476 		break;
477 
478 	case 3:
479 		panic("%s: unable to initialize MicroCode",
480 		      sc->sc_dev.dv_xname);
481 		break;
482 
483 	default:
484 		panic("%s: unable to initialize board RISC chip",
485 		      sc->sc_dev.dv_xname);
486 	}
487 
488 	/*
489 	 * Fill in the adapter.
490 	 */
491 	sc->sc_adapter.scsipi_cmd = adv_scsi_cmd;
492 	sc->sc_adapter.scsipi_minphys = advminphys;
493 
494 	/*
495          * fill in the prototype scsipi_link.
496          */
497 	sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
498 	sc->sc_link.adapter_softc = sc;
499 	sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
500 	sc->sc_link.adapter = &sc->sc_adapter;
501 	sc->sc_link.device = &adv_dev;
502 	sc->sc_link.openings = 4;
503 	sc->sc_link.scsipi_scsi.max_target = 7;
504 	sc->sc_link.scsipi_scsi.max_lun = 7;
505 	sc->sc_link.type = BUS_SCSI;
506 
507 
508 	TAILQ_INIT(&sc->sc_free_ccb);
509 	TAILQ_INIT(&sc->sc_waiting_ccb);
510 	TAILQ_INIT(&sc->sc_queue);
511 
512 
513 	/*
514          * Allocate the Control Blocks and the overrun buffer.
515          */
516 	error = adv_alloc_control_data(sc);
517 	if (error)
518 		return; /* (error) */
519 
520 	/*
521          * Create and initialize the Control Blocks.
522          */
523 	i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
524 	if (i == 0) {
525 		printf("%s: unable to create control blocks\n",
526 		       sc->sc_dev.dv_xname);
527 		return; /* (ENOMEM) */ ;
528 	} else if (i != ADV_MAX_CCB) {
529 		printf("%s: WARNING: only %d of %d control blocks created\n",
530 		       sc->sc_dev.dv_xname, i, ADV_MAX_CCB);
531 	}
532 	config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
533 }
534 
535 
536 static void
537 advminphys(bp)
538 	struct buf     *bp;
539 {
540 
541 	if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
542 		bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
543 	minphys(bp);
544 }
545 
546 
547 /*
548  * start a scsi operation given the command and the data address.  Also needs
549  * the unit, target and lu.
550  */
551 static int
552 adv_scsi_cmd(xs)
553 	struct scsipi_xfer *xs;
554 {
555 	struct scsipi_link *sc_link = xs->sc_link;
556 	ASC_SOFTC      *sc = sc_link->adapter_softc;
557 	bus_dma_tag_t   dmat = sc->sc_dmat;
558 	ADV_CCB        *ccb;
559 	int             s, flags, error, nsegs;
560 	int             fromqueue = 0, dontqueue = 0, nowait = 0;
561 
562 
563 	s = splbio();		/* protect the queue */
564 
565 	/*
566          * If we're running the queue from adv_done(), we've been
567          * called with the first queue entry as our argument.
568          */
569 	if (xs == TAILQ_FIRST(&sc->sc_queue)) {
570 		TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
571 		fromqueue = 1;
572 		nowait = 1;
573 	} else {
574 
575 		/* Polled requests can't be queued for later. */
576 		dontqueue = xs->xs_control & XS_CTL_POLL;
577 
578 		/*
579                  * If there are jobs in the queue, run them first.
580                  */
581 		if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
582 			/*
583                          * If we can't queue, we have to abort, since
584                          * we have to preserve order.
585                          */
586 			if (dontqueue) {
587 				splx(s);
588 				xs->error = XS_DRIVER_STUFFUP;
589 				return (TRY_AGAIN_LATER);
590 			}
591 			/*
592                          * Swap with the first queue entry.
593                          */
594 			TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
595 			xs = TAILQ_FIRST(&sc->sc_queue);
596 			TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
597 			fromqueue = 1;
598 		}
599 	}
600 
601 
602 	/*
603          * get a ccb to use. If the transfer
604          * is from a buf (possibly from interrupt time)
605          * then we can't allow it to sleep
606          */
607 
608 	flags = xs->xs_control;
609 	if (nowait)
610 		flags |= XS_CTL_NOSLEEP;
611 	if ((ccb = adv_get_ccb(sc, flags)) == NULL) {
612 		/*
613                  * If we can't queue, we lose.
614                  */
615 		if (dontqueue) {
616 			splx(s);
617 			xs->error = XS_DRIVER_STUFFUP;
618 			return (TRY_AGAIN_LATER);
619 		}
620 		/*
621                  * Stuff ourselves into the queue, in front
622                  * if we came off in the first place.
623                  */
624 		if (fromqueue)
625 			TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
626 		else
627 			TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
628 		splx(s);
629 		return (SUCCESSFULLY_QUEUED);
630 	}
631 	splx(s);		/* done playing with the queue */
632 
633 	ccb->xs = xs;
634 	ccb->timeout = xs->timeout;
635 
636 	/*
637          * Build up the request
638          */
639 	memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
640 
641 	ccb->scsiq.q2.ccb_ptr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
642 		    ADV_CCB_OFF(ccb);
643 
644 	ccb->scsiq.cdbptr = &xs->cmd->opcode;
645 	ccb->scsiq.q2.cdb_len = xs->cmdlen;
646 	ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->scsipi_scsi.target);
647 	ccb->scsiq.q1.target_lun = sc_link->scsipi_scsi.lun;
648 	ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->scsipi_scsi.target,
649 						   sc_link->scsipi_scsi.lun);
650 	ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
651 		ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
652 	ccb->scsiq.q1.sense_len = sizeof(struct scsipi_sense_data);
653 
654 	/*
655          * If  there  are  any  outstanding  requests  for  the  current target,
656          * then  every  255th request  send an  ORDERED request.  This heuristic
657          * tries  to  retain  the  benefit  of request  sorting while preventing
658          * request starvation. 255 is the max number of tags or pending commands
659          * a device may have outstanding.
660          */
661 	sc->reqcnt[sc_link->scsipi_scsi.target]++;
662 	if ((sc->reqcnt[sc_link->scsipi_scsi.target] > 0) &&
663 	    (sc->reqcnt[sc_link->scsipi_scsi.target] % 255) == 0) {
664 		ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
665 	} else {
666 		ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
667 	}
668 
669 
670 	if (xs->datalen) {
671 		/*
672                  * Map the DMA transfer.
673                  */
674 #ifdef TFS
675 		if (flags & SCSI_DATA_UIO) {
676 			error = bus_dmamap_load_uio(dmat,
677 				  ccb->dmamap_xfer, (struct uio *) xs->data,
678 						    (flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
679 		} else
680 #endif				/* TFS */
681 		{
682 			error = bus_dmamap_load(dmat,
683 			      ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
684 						(flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
685 		}
686 
687 		if (error) {
688 			if (error == EFBIG) {
689 				printf("%s: adv_scsi_cmd, more than %d dma"
690 				       " segments\n",
691 				       sc->sc_dev.dv_xname, ASC_MAX_SG_LIST);
692 			} else {
693 				printf("%s: adv_scsi_cmd, error %d loading"
694 				       " dma map\n",
695 				       sc->sc_dev.dv_xname, error);
696 			}
697 
698 			xs->error = XS_DRIVER_STUFFUP;
699 			adv_free_ccb(sc, ccb);
700 			return (COMPLETE);
701 		}
702 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
703 				ccb->dmamap_xfer->dm_mapsize,
704 			      (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
705 				BUS_DMASYNC_PREWRITE);
706 
707 
708 		memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
709 
710 		for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
711 
712 			ccb->sghead.sg_list[nsegs].addr =
713 				ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
714 			ccb->sghead.sg_list[nsegs].bytes =
715 				ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
716 		}
717 
718 		ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
719 			ccb->dmamap_xfer->dm_nsegs;
720 
721 		ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
722 		ccb->scsiq.sg_head = &ccb->sghead;
723 		ccb->scsiq.q1.data_addr = 0;
724 		ccb->scsiq.q1.data_cnt = 0;
725 	} else {
726 		/*
727                  * No data xfer, use non S/G values.
728                  */
729 		ccb->scsiq.q1.data_addr = 0;
730 		ccb->scsiq.q1.data_cnt = 0;
731 	}
732 
733 #ifdef ASC_DEBUG
734 	printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX \n",
735 			sc_link->scsipi_scsi.target,
736 			sc_link->scsipi_scsi.lun, xs->cmd->opcode,
737 			(unsigned long)ccb);
738 #endif
739 	s = splbio();
740 	adv_queue_ccb(sc, ccb);
741 	splx(s);
742 
743 	/*
744          * Usually return SUCCESSFULLY QUEUED
745          */
746 	if ((flags & XS_CTL_POLL) == 0)
747 		return (SUCCESSFULLY_QUEUED);
748 
749 	/*
750          * If we can't use interrupts, poll on completion
751          */
752 	if (adv_poll(sc, xs, ccb->timeout)) {
753 		adv_timeout(ccb);
754 		if (adv_poll(sc, xs, ccb->timeout))
755 			adv_timeout(ccb);
756 	}
757 	return (COMPLETE);
758 }
759 
760 
761 int
762 adv_intr(arg)
763 	void           *arg;
764 {
765 	ASC_SOFTC      *sc = arg;
766 	struct scsipi_xfer *xs;
767 
768 #ifdef ASC_DEBUG
769 	int int_pend = FALSE;
770 
771 	if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh))
772 	{
773 		int_pend = TRUE;
774 		printf("ISR - ");
775 	}
776 #endif
777 	AscISR(sc);
778 #ifdef ASC_DEBUG
779 	if(int_pend)
780 		printf("\n");
781 #endif
782 
783 	/*
784          * If there are queue entries in the software queue, try to
785          * run the first one.  We should be more or less guaranteed
786          * to succeed, since we just freed a CCB.
787          *
788          * NOTE: adv_scsi_cmd() relies on our calling it with
789          * the first entry in the queue.
790          */
791 	if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
792 		(void) adv_scsi_cmd(xs);
793 
794 	return (1);
795 }
796 
797 
798 /*
799  * Poll a particular unit, looking for a particular xs
800  */
801 static int
802 adv_poll(sc, xs, count)
803 	ASC_SOFTC      *sc;
804 	struct scsipi_xfer *xs;
805 	int             count;
806 {
807 
808 	/* timeouts are in msec, so we loop in 1000 usec cycles */
809 	while (count) {
810 		adv_intr(sc);
811 		if (xs->xs_status & XS_STS_DONE)
812 			return (0);
813 		delay(1000);	/* only happens in boot so ok */
814 		count--;
815 	}
816 	return (1);
817 }
818 
819 
820 static void
821 adv_timeout(arg)
822 	void           *arg;
823 {
824 	ADV_CCB        *ccb = arg;
825 	struct scsipi_xfer *xs = ccb->xs;
826 	struct scsipi_link *sc_link = xs->sc_link;
827 	ASC_SOFTC      *sc = sc_link->adapter_softc;
828 	int             s;
829 
830 	scsi_print_addr(sc_link);
831 	printf("timed out");
832 
833 	s = splbio();
834 
835 	/*
836          * If it has been through before, then a previous abort has failed,
837          * don't try abort again, reset the bus instead.
838          */
839 	if (ccb->flags & CCB_ABORT) {
840 		/* abort timed out */
841 		printf(" AGAIN. Resetting Bus\n");
842 		/* Lets try resetting the bus! */
843 		if (AscResetBus(sc) == ASC_ERROR) {
844 			ccb->timeout = sc->scsi_reset_wait;
845 			adv_queue_ccb(sc, ccb);
846 		}
847 	} else {
848 		/* abort the operation that has timed out */
849 		printf("\n");
850 		AscAbortCCB(sc, ccb);
851 		ccb->xs->error = XS_TIMEOUT;
852 		ccb->timeout = ADV_ABORT_TIMEOUT;
853 		ccb->flags |= CCB_ABORT;
854 		adv_queue_ccb(sc, ccb);
855 	}
856 
857 	splx(s);
858 }
859 
860 
861 static void
862 adv_watchdog(arg)
863 	void           *arg;
864 {
865 	ADV_CCB        *ccb = arg;
866 	struct scsipi_xfer *xs = ccb->xs;
867 	struct scsipi_link *sc_link = xs->sc_link;
868 	ASC_SOFTC      *sc = sc_link->adapter_softc;
869 	int             s;
870 
871 	s = splbio();
872 
873 	ccb->flags &= ~CCB_WATCHDOG;
874 	adv_start_ccbs(sc);
875 
876 	splx(s);
877 }
878 
879 
880 /******************************************************************************/
881 /*                      NARROW boards Interrupt callbacks                     */
882 /******************************************************************************/
883 
884 
885 /*
886  * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
887  *
888  * Interrupt callback function for the Narrow SCSI Asc Library.
889  */
890 static void
891 adv_narrow_isr_callback(sc, qdonep)
892 	ASC_SOFTC      *sc;
893 	ASC_QDONE_INFO *qdonep;
894 {
895 	bus_dma_tag_t   dmat = sc->sc_dmat;
896 	ADV_CCB        *ccb;
897 	struct scsipi_xfer *xs;
898 	struct scsipi_sense_data *s1, *s2;
899 
900 
901 	ccb = adv_ccb_phys_kv(sc, qdonep->d2.ccb_ptr);
902 	xs = ccb->xs;
903 
904 #ifdef ASC_DEBUG
905 	printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ",
906 			(unsigned long)ccb,
907 			xs->sc_link->scsipi_scsi.target,
908 			xs->sc_link->scsipi_scsi.lun, xs->cmd->opcode);
909 #endif
910 	callout_stop(&ccb->xs->xs_callout);
911 
912 	/*
913          * If we were a data transfer, unload the map that described
914          * the data buffer.
915          */
916 	if (xs->datalen) {
917 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
918 				ccb->dmamap_xfer->dm_mapsize,
919 			 (xs->xs_control & XS_CTL_DATA_IN) ?
920 			 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
921 		bus_dmamap_unload(dmat, ccb->dmamap_xfer);
922 	}
923 	if ((ccb->flags & CCB_ALLOC) == 0) {
924 		printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
925 		Debugger();
926 		return;
927 	}
928 	/*
929          * 'qdonep' contains the command's ending status.
930          */
931 #ifdef ASC_DEBUG
932 	printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat);
933 #endif
934 	switch (qdonep->d3.done_stat) {
935 	case ASC_QD_NO_ERROR:
936 		switch (qdonep->d3.host_stat) {
937 		case ASC_QHSTA_NO_ERROR:
938 			xs->error = XS_NOERROR;
939 			xs->resid = 0;
940 			break;
941 
942 		default:
943 			/* QHSTA error occurred */
944 			xs->error = XS_DRIVER_STUFFUP;
945 			break;
946 		}
947 
948 		/*
949                  * If an INQUIRY command completed successfully, then call
950                  * the AscInquiryHandling() function to patch bugged boards.
951                  */
952 		if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
953 		    (xs->sc_link->scsipi_scsi.lun == 0) &&
954 		    (xs->datalen - qdonep->remain_bytes) >= 8) {
955 			AscInquiryHandling(sc,
956 				      xs->sc_link->scsipi_scsi.target & 0x7,
957 					   (ASC_SCSI_INQUIRY *) xs->data);
958 		}
959 		break;
960 
961 	case ASC_QD_WITH_ERROR:
962 		switch (qdonep->d3.host_stat) {
963 		case ASC_QHSTA_NO_ERROR:
964 			if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
965 				s1 = &ccb->scsi_sense;
966 				s2 = &xs->sense.scsi_sense;
967 				*s2 = *s1;
968 				xs->error = XS_SENSE;
969 			} else {
970 				xs->error = XS_DRIVER_STUFFUP;
971 			}
972 			break;
973 
974 		default:
975 			/* QHSTA error occurred */
976 			xs->error = XS_DRIVER_STUFFUP;
977 			break;
978 		}
979 		break;
980 
981 	case ASC_QD_ABORTED_BY_HOST:
982 	default:
983 		xs->error = XS_DRIVER_STUFFUP;
984 		break;
985 	}
986 
987 
988 	adv_free_ccb(sc, ccb);
989 	xs->xs_status |= XS_STS_DONE;
990 	scsipi_done(xs);
991 }
992