xref: /netbsd-src/sys/dev/ic/adv.c (revision dc306354b0b29af51801a7632f1e95265a68cd81)
1 /*	$NetBSD: adv.c,v 1.9 1998/12/09 08:47:17 thorpej Exp $	*/
2 
3 /*
4  * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
5  *
6  * Copyright (c) 1998 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * Author: Baldassare Dante Profeta <dante@mclink.it>
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *        This product includes software developed by the NetBSD
22  *        Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51 
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58 
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62 
63 #include <dev/ic/adv.h>
64 #include <dev/ic/advlib.h>
65 
66 #ifndef DDB
67 #define	Debugger()	panic("should call debugger here (adv.c)")
68 #endif /* ! DDB */
69 
70 
71 /* #define ASC_DEBUG */
72 
73 /******************************************************************************/
74 
75 
76 static int adv_alloc_ccbs __P((ASC_SOFTC *));
77 static int adv_create_ccbs __P((ASC_SOFTC *, ADV_CCB *, int));
78 static void adv_free_ccb __P((ASC_SOFTC *, ADV_CCB *));
79 static void adv_reset_ccb __P((ADV_CCB *));
80 static int adv_init_ccb __P((ASC_SOFTC *, ADV_CCB *));
81 static ADV_CCB *adv_get_ccb __P((ASC_SOFTC *, int));
82 static void adv_queue_ccb __P((ASC_SOFTC *, ADV_CCB *));
83 static void adv_start_ccbs __P((ASC_SOFTC *));
84 
85 static u_int8_t *adv_alloc_overrunbuf __P((char *dvname, bus_dma_tag_t));
86 
87 static int adv_scsi_cmd __P((struct scsipi_xfer *));
88 static void advminphys __P((struct buf *));
89 static void adv_narrow_isr_callback __P((ASC_SOFTC *, ASC_QDONE_INFO *));
90 
91 static int adv_poll __P((ASC_SOFTC *, struct scsipi_xfer *, int));
92 static void adv_timeout __P((void *));
93 static void adv_watchdog __P((void *));
94 
95 
96 /******************************************************************************/
97 
98 
99 /* the below structure is so we have a default dev struct for out link struct */
100 struct scsipi_device adv_dev =
101 {
102 	NULL,			/* Use default error handler */
103 	NULL,			/* have a queue, served by this */
104 	NULL,			/* have no async handler */
105 	NULL,			/* Use default 'done' routine */
106 };
107 
108 
109 #define ADV_ABORT_TIMEOUT       2000	/* time to wait for abort (mSec) */
110 #define ADV_WATCH_TIMEOUT       1000	/* time to wait for watchdog (mSec) */
111 
112 
113 /******************************************************************************/
114 /*                            scsipi_xfer queue routines                      */
115 /******************************************************************************/
116 
117 
118 /******************************************************************************/
119 /*                             Control Blocks routines                        */
120 /******************************************************************************/
121 
122 
123 static int
124 adv_alloc_ccbs(sc)
125 	ASC_SOFTC      *sc;
126 {
127 	bus_dma_segment_t seg;
128 	int             error, rseg;
129 
130 	/*
131          * Allocate the control blocks.
132          */
133 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
134 			   NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
135 		printf("%s: unable to allocate control structures,"
136 		       " error = %d\n", sc->sc_dev.dv_xname, error);
137 		return (error);
138 	}
139 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
140 		   sizeof(struct adv_control), (caddr_t *) & sc->sc_control,
141 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
142 		printf("%s: unable to map control structures, error = %d\n",
143 		       sc->sc_dev.dv_xname, error);
144 		return (error);
145 	}
146 	/*
147          * Create and load the DMA map used for the control blocks.
148          */
149 	if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
150 			   1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
151 				       &sc->sc_dmamap_control)) != 0) {
152 		printf("%s: unable to create control DMA map, error = %d\n",
153 		       sc->sc_dev.dv_xname, error);
154 		return (error);
155 	}
156 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
157 			   sc->sc_control, sizeof(struct adv_control), NULL,
158 				     BUS_DMA_NOWAIT)) != 0) {
159 		printf("%s: unable to load control DMA map, error = %d\n",
160 		       sc->sc_dev.dv_xname, error);
161 		return (error);
162 	}
163 	return (0);
164 }
165 
166 
167 /*
168  * Create a set of ccbs and add them to the free list.  Called once
169  * by adv_init().  We return the number of CCBs successfully created.
170  */
171 static int
172 adv_create_ccbs(sc, ccbstore, count)
173 	ASC_SOFTC      *sc;
174 	ADV_CCB        *ccbstore;
175 	int             count;
176 {
177 	ADV_CCB        *ccb;
178 	int             i, error;
179 
180 	bzero(ccbstore, sizeof(ADV_CCB) * count);
181 	for (i = 0; i < count; i++) {
182 		ccb = &ccbstore[i];
183 		if ((error = adv_init_ccb(sc, ccb)) != 0) {
184 			printf("%s: unable to initialize ccb, error = %d\n",
185 			       sc->sc_dev.dv_xname, error);
186 			return (i);
187 		}
188 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
189 	}
190 
191 	return (i);
192 }
193 
194 
195 /*
196  * A ccb is put onto the free list.
197  */
198 static void
199 adv_free_ccb(sc, ccb)
200 	ASC_SOFTC      *sc;
201 	ADV_CCB        *ccb;
202 {
203 	int             s;
204 
205 	s = splbio();
206 
207 	adv_reset_ccb(ccb);
208 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
209 
210 	/*
211          * If there were none, wake anybody waiting for one to come free,
212          * starting with queued entries.
213          */
214 	if (ccb->chain.tqe_next == 0)
215 		wakeup(&sc->sc_free_ccb);
216 
217 	splx(s);
218 }
219 
220 
221 static void
222 adv_reset_ccb(ccb)
223 	ADV_CCB        *ccb;
224 {
225 
226 	ccb->flags = 0;
227 }
228 
229 
230 static int
231 adv_init_ccb(sc, ccb)
232 	ASC_SOFTC      *sc;
233 	ADV_CCB        *ccb;
234 {
235 	int             error;
236 
237 	/*
238          * Create the DMA map for this CCB.
239          */
240 	error = bus_dmamap_create(sc->sc_dmat,
241 				  (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
242 			 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
243 		   0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
244 	if (error) {
245 		printf("%s: unable to create DMA map, error = %d\n",
246 		       sc->sc_dev.dv_xname, error);
247 		return (error);
248 	}
249 	adv_reset_ccb(ccb);
250 	return (0);
251 }
252 
253 
254 /*
255  * Get a free ccb
256  *
257  * If there are none, see if we can allocate a new one
258  */
259 static ADV_CCB *
260 adv_get_ccb(sc, flags)
261 	ASC_SOFTC      *sc;
262 	int             flags;
263 {
264 	ADV_CCB        *ccb = 0;
265 	int             s;
266 
267 	s = splbio();
268 
269 	/*
270          * If we can and have to, sleep waiting for one to come free
271          * but only if we can't allocate a new one.
272          */
273 	for (;;) {
274 		ccb = sc->sc_free_ccb.tqh_first;
275 		if (ccb) {
276 			TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
277 			break;
278 		}
279 		if ((flags & SCSI_NOSLEEP) != 0)
280 			goto out;
281 
282 		tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0);
283 	}
284 
285 	ccb->flags |= CCB_ALLOC;
286 
287 out:
288 	splx(s);
289 	return (ccb);
290 }
291 
292 
293 /*
294  * Queue a CCB to be sent to the controller, and send it if possible.
295  */
296 static void
297 adv_queue_ccb(sc, ccb)
298 	ASC_SOFTC      *sc;
299 	ADV_CCB        *ccb;
300 {
301 
302 	TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
303 
304 	adv_start_ccbs(sc);
305 }
306 
307 
308 static void
309 adv_start_ccbs(sc)
310 	ASC_SOFTC      *sc;
311 {
312 	ADV_CCB        *ccb;
313 
314 	while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
315 		if (ccb->flags & CCB_WATCHDOG)
316 			untimeout(adv_watchdog, ccb);
317 
318 		if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
319 			ccb->flags |= CCB_WATCHDOG;
320 			timeout(adv_watchdog, ccb,
321 				(ADV_WATCH_TIMEOUT * hz) / 1000);
322 			break;
323 		}
324 		TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
325 
326 		if ((ccb->xs->flags & SCSI_POLL) == 0)
327 			timeout(adv_timeout, ccb, (ccb->timeout * hz) / 1000);
328 	}
329 }
330 
331 
332 /******************************************************************************/
333 /*                      DMA able memory allocation routines                   */
334 /******************************************************************************/
335 
336 
337 /*
338  * Allocate a DMA able memory for overrun_buffer.
339  * This memory can be safely shared among all the AdvanSys boards.
340  */
341 u_int8_t       *
342 adv_alloc_overrunbuf(dvname, dmat)
343 	char           *dvname;
344 	bus_dma_tag_t   dmat;
345 {
346 	static u_int8_t *overrunbuf = NULL;
347 
348 	bus_dmamap_t    ovrbuf_dmamap;
349 	bus_dma_segment_t seg;
350 	int             rseg, error;
351 
352 
353 	/*
354          * if an overrun buffer has been already allocated don't allocate it
355          * again. Instead return the address of the allocated buffer.
356          */
357 	if (overrunbuf)
358 		return (overrunbuf);
359 
360 
361 	if ((error = bus_dmamem_alloc(dmat, ASC_OVERRUN_BSIZE,
362 			   NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
363 		printf("%s: unable to allocate overrun buffer, error = %d\n",
364 		       dvname, error);
365 		return (0);
366 	}
367 	if ((error = bus_dmamem_map(dmat, &seg, rseg, ASC_OVERRUN_BSIZE,
368 	(caddr_t *) & overrunbuf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
369 		printf("%s: unable to map overrun buffer, error = %d\n",
370 		       dvname, error);
371 
372 		bus_dmamem_free(dmat, &seg, 1);
373 		return (0);
374 	}
375 	if ((error = bus_dmamap_create(dmat, ASC_OVERRUN_BSIZE, 1,
376 	      ASC_OVERRUN_BSIZE, 0, BUS_DMA_NOWAIT, &ovrbuf_dmamap)) != 0) {
377 		printf("%s: unable to create overrun buffer DMA map,"
378 		       " error = %d\n", dvname, error);
379 
380 		bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
381 		bus_dmamem_free(dmat, &seg, 1);
382 		return (0);
383 	}
384 	if ((error = bus_dmamap_load(dmat, ovrbuf_dmamap, overrunbuf,
385 			   ASC_OVERRUN_BSIZE, NULL, BUS_DMA_NOWAIT)) != 0) {
386 		printf("%s: unable to load overrun buffer DMA map,"
387 		       " error = %d\n", dvname, error);
388 
389 		bus_dmamap_destroy(dmat, ovrbuf_dmamap);
390 		bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
391 		bus_dmamem_free(dmat, &seg, 1);
392 		return (0);
393 	}
394 	return (overrunbuf);
395 }
396 
397 
398 /******************************************************************************/
399 /*                         SCSI layer interfacing routines                    */
400 /******************************************************************************/
401 
402 
403 int
404 adv_init(sc)
405 	ASC_SOFTC      *sc;
406 {
407 	int             warn;
408 
409 	if (!AscFindSignature(sc->sc_iot, sc->sc_ioh))
410 		panic("adv_init: adv_find_signature failed");
411 
412 	/*
413          * Read the board configuration
414          */
415 	AscInitASC_SOFTC(sc);
416 	warn = AscInitFromEEP(sc);
417 	if (warn) {
418 		printf("%s -get: ", sc->sc_dev.dv_xname);
419 		switch (warn) {
420 		case -1:
421 			printf("Chip is not halted\n");
422 			break;
423 
424 		case -2:
425 			printf("Couldn't get MicroCode Start"
426 			       " address\n");
427 			break;
428 
429 		case ASC_WARN_IO_PORT_ROTATE:
430 			printf("I/O port address modified\n");
431 			break;
432 
433 		case ASC_WARN_AUTO_CONFIG:
434 			printf("I/O port increment switch enabled\n");
435 			break;
436 
437 		case ASC_WARN_EEPROM_CHKSUM:
438 			printf("EEPROM checksum error\n");
439 			break;
440 
441 		case ASC_WARN_IRQ_MODIFIED:
442 			printf("IRQ modified\n");
443 			break;
444 
445 		case ASC_WARN_CMD_QNG_CONFLICT:
446 			printf("tag queuing enabled w/o disconnects\n");
447 			break;
448 
449 		default:
450 			printf("unknown warning %d\n", warn);
451 		}
452 	}
453 	if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
454 		sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
455 
456 	/*
457          * Modify the board configuration
458          */
459 	warn = AscInitFromASC_SOFTC(sc);
460 	if (warn) {
461 		printf("%s -set: ", sc->sc_dev.dv_xname);
462 		switch (warn) {
463 		case ASC_WARN_CMD_QNG_CONFLICT:
464 			printf("tag queuing enabled w/o disconnects\n");
465 			break;
466 
467 		case ASC_WARN_AUTO_CONFIG:
468 			printf("I/O port increment switch enabled\n");
469 			break;
470 
471 		default:
472 			printf("unknown warning %d\n", warn);
473 		}
474 	}
475 	sc->isr_callback = (ulong) adv_narrow_isr_callback;
476 
477 	if (!(sc->overrun_buf = adv_alloc_overrunbuf(sc->sc_dev.dv_xname,
478 						     sc->sc_dmat))) {
479 		return (1);
480 	}
481 
482 	return (0);
483 }
484 
485 
486 void
487 adv_attach(sc)
488 	ASC_SOFTC      *sc;
489 {
490 	int             i, error;
491 
492 	/*
493          * Initialize board RISC chip and enable interrupts.
494          */
495 	switch (AscInitDriver(sc)) {
496 	case 0:
497 		/* AllOK */
498 		break;
499 
500 	case 1:
501 		panic("%s: bad signature", sc->sc_dev.dv_xname);
502 		break;
503 
504 	case 2:
505 		panic("%s: unable to load MicroCode",
506 		      sc->sc_dev.dv_xname);
507 		break;
508 
509 	case 3:
510 		panic("%s: unable to initialize MicroCode",
511 		      sc->sc_dev.dv_xname);
512 		break;
513 
514 	default:
515 		panic("%s: unable to initialize board RISC chip",
516 		      sc->sc_dev.dv_xname);
517 	}
518 
519 	/*
520 	 * Fill in the adapter.
521 	 */
522 	sc->sc_adapter.scsipi_cmd = adv_scsi_cmd;
523 	sc->sc_adapter.scsipi_minphys = advminphys;
524 
525 	/*
526          * fill in the prototype scsipi_link.
527          */
528 	sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
529 	sc->sc_link.adapter_softc = sc;
530 	sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
531 	sc->sc_link.adapter = &sc->sc_adapter;
532 	sc->sc_link.device = &adv_dev;
533 	sc->sc_link.openings = 4;
534 	sc->sc_link.scsipi_scsi.max_target = 7;
535 	sc->sc_link.scsipi_scsi.max_lun = 7;
536 	sc->sc_link.type = BUS_SCSI;
537 
538 
539 	TAILQ_INIT(&sc->sc_free_ccb);
540 	TAILQ_INIT(&sc->sc_waiting_ccb);
541 	TAILQ_INIT(&sc->sc_queue);
542 
543 
544 	/*
545          * Allocate the Control Blocks.
546          */
547 	error = adv_alloc_ccbs(sc);
548 	if (error)
549 		return; /* (error) */ ;
550 
551 	/*
552          * Create and initialize the Control Blocks.
553          */
554 	i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
555 	if (i == 0) {
556 		printf("%s: unable to create control blocks\n",
557 		       sc->sc_dev.dv_xname);
558 		return; /* (ENOMEM) */ ;
559 	} else if (i != ADV_MAX_CCB) {
560 		printf("%s: WARNING: only %d of %d control blocks created\n",
561 		       sc->sc_dev.dv_xname, i, ADV_MAX_CCB);
562 	}
563 	config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
564 }
565 
566 
567 static void
568 advminphys(bp)
569 	struct buf     *bp;
570 {
571 
572 	if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
573 		bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
574 	minphys(bp);
575 }
576 
577 
578 /*
579  * start a scsi operation given the command and the data address.  Also needs
580  * the unit, target and lu.
581  */
582 static int
583 adv_scsi_cmd(xs)
584 	struct scsipi_xfer *xs;
585 {
586 	struct scsipi_link *sc_link = xs->sc_link;
587 	ASC_SOFTC      *sc = sc_link->adapter_softc;
588 	bus_dma_tag_t   dmat = sc->sc_dmat;
589 	ADV_CCB        *ccb;
590 	int             s, flags, error, nsegs;
591 	int             fromqueue = 1, dontqueue = 0;
592 
593 
594 	s = splbio();		/* protect the queue */
595 
596 	/*
597          * If we're running the queue from adv_done(), we've been
598          * called with the first queue entry as our argument.
599          */
600 	if (xs == TAILQ_FIRST(&sc->sc_queue)) {
601 		TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
602 		fromqueue = 1;
603 	} else {
604 
605 		/* Polled requests can't be queued for later. */
606 		dontqueue = xs->flags & SCSI_POLL;
607 
608 		/*
609                  * If there are jobs in the queue, run them first.
610                  */
611 		if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
612 			/*
613                          * If we can't queue, we have to abort, since
614                          * we have to preserve order.
615                          */
616 			if (dontqueue) {
617 				splx(s);
618 				xs->error = XS_DRIVER_STUFFUP;
619 				return (TRY_AGAIN_LATER);
620 			}
621 			/*
622                          * Swap with the first queue entry.
623                          */
624 			TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
625 			xs = TAILQ_FIRST(&sc->sc_queue);
626 			TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
627 			fromqueue = 1;
628 		}
629 	}
630 
631 
632 	/*
633          * get a ccb to use. If the transfer
634          * is from a buf (possibly from interrupt time)
635          * then we can't allow it to sleep
636          */
637 
638 	flags = xs->flags;
639 	if ((ccb = adv_get_ccb(sc, flags)) == NULL) {
640 		/*
641                  * If we can't queue, we lose.
642                  */
643 		if (dontqueue) {
644 			splx(s);
645 			xs->error = XS_DRIVER_STUFFUP;
646 			return (TRY_AGAIN_LATER);
647 		}
648 		/*
649                  * Stuff ourselves into the queue, in front
650                  * if we came off in the first place.
651                  */
652 		if (fromqueue)
653 			TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
654 		else
655 			TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
656 		splx(s);
657 		return (SUCCESSFULLY_QUEUED);
658 	}
659 	splx(s);		/* done playing with the queue */
660 
661 	ccb->xs = xs;
662 	ccb->timeout = xs->timeout;
663 
664 	/*
665          * Build up the request
666          */
667 	memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
668 
669 	ccb->scsiq.q2.ccb_ptr = (ulong) ccb;
670 
671 	ccb->scsiq.cdbptr = &xs->cmd->opcode;
672 	ccb->scsiq.q2.cdb_len = xs->cmdlen;
673 	ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->scsipi_scsi.target);
674 	ccb->scsiq.q1.target_lun = sc_link->scsipi_scsi.lun;
675 	ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->scsipi_scsi.target,
676 						   sc_link->scsipi_scsi.lun);
677 	ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
678 		ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
679 	ccb->scsiq.q1.sense_len = sizeof(struct scsipi_sense_data);
680 
681 	/*
682          * If  there  are  any  outstanding  requests  for  the  current target,
683          * then  every  255th request  send an  ORDERED request.  This heuristic
684          * tries  to  retain  the  benefit  of request  sorting while preventing
685          * request starvation. 255 is the max number of tags or pending commands
686          * a device may have outstanding.
687          */
688 	sc->reqcnt[sc_link->scsipi_scsi.target]++;
689 	if ((sc->reqcnt[sc_link->scsipi_scsi.target] > 0) &&
690 	    (sc->reqcnt[sc_link->scsipi_scsi.target] % 255) == 0) {
691 		ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
692 	} else {
693 		ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
694 	}
695 
696 
697 	if (xs->datalen) {
698 		/*
699                  * Map the DMA transfer.
700                  */
701 #ifdef TFS
702 		if (flags & SCSI_DATA_UIO) {
703 			error = bus_dmamap_load_uio(dmat,
704 				  ccb->dmamap_xfer, (struct uio *) xs->data,
705 						    (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
706 		} else
707 #endif				/* TFS */
708 		{
709 			error = bus_dmamap_load(dmat,
710 			      ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
711 						(flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
712 		}
713 
714 		if (error) {
715 			if (error == EFBIG) {
716 				printf("%s: adv_scsi_cmd, more than %d dma"
717 				       " segments\n",
718 				       sc->sc_dev.dv_xname, ASC_MAX_SG_LIST);
719 			} else {
720 				printf("%s: adv_scsi_cmd, error %d loading"
721 				       " dma map\n",
722 				       sc->sc_dev.dv_xname, error);
723 			}
724 
725 			xs->error = XS_DRIVER_STUFFUP;
726 			adv_free_ccb(sc, ccb);
727 			return (COMPLETE);
728 		}
729 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
730 				ccb->dmamap_xfer->dm_mapsize,
731 			      (flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
732 				BUS_DMASYNC_PREWRITE);
733 
734 
735 		memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
736 
737 		for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
738 
739 			ccb->sghead.sg_list[nsegs].addr =
740 				ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
741 			ccb->sghead.sg_list[nsegs].bytes =
742 				ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
743 		}
744 
745 		ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
746 			ccb->dmamap_xfer->dm_nsegs;
747 
748 		ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
749 		ccb->scsiq.sg_head = &ccb->sghead;
750 		ccb->scsiq.q1.data_addr = 0;
751 		ccb->scsiq.q1.data_cnt = 0;
752 	} else {
753 		/*
754                  * No data xfer, use non S/G values.
755                  */
756 		ccb->scsiq.q1.data_addr = 0;
757 		ccb->scsiq.q1.data_cnt = 0;
758 	}
759 
760 #ifdef ASC_DEBUG
761 	printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX \n",
762 			sc_link->scsipi_scsi.target,
763 			sc_link->scsipi_scsi.lun, xs->cmd->opcode,
764 			(unsigned long)ccb);
765 #endif
766 	s = splbio();
767 	adv_queue_ccb(sc, ccb);
768 	splx(s);
769 
770 	/*
771          * Usually return SUCCESSFULLY QUEUED
772          */
773 	if ((flags & SCSI_POLL) == 0)
774 		return (SUCCESSFULLY_QUEUED);
775 
776 	/*
777          * If we can't use interrupts, poll on completion
778          */
779 	if (adv_poll(sc, xs, ccb->timeout)) {
780 		adv_timeout(ccb);
781 		if (adv_poll(sc, xs, ccb->timeout))
782 			adv_timeout(ccb);
783 	}
784 	return (COMPLETE);
785 }
786 
787 
788 int
789 adv_intr(arg)
790 	void           *arg;
791 {
792 	ASC_SOFTC      *sc = arg;
793 	struct scsipi_xfer *xs;
794 
795 #ifdef ASC_DEBUG
796 	int int_pend = FALSE;
797 
798 	if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh))
799 	{
800 		int_pend = TRUE;
801 		printf("ISR - ");
802 	}
803 #endif
804 	AscISR(sc);
805 #ifdef ASC_DEBUG
806 	if(int_pend)
807 		printf("\n");
808 #endif
809 
810 	/*
811          * If there are queue entries in the software queue, try to
812          * run the first one.  We should be more or less guaranteed
813          * to succeed, since we just freed a CCB.
814          *
815          * NOTE: adv_scsi_cmd() relies on our calling it with
816          * the first entry in the queue.
817          */
818 	if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
819 		(void) adv_scsi_cmd(xs);
820 
821 	return (1);
822 }
823 
824 
825 /*
826  * Poll a particular unit, looking for a particular xs
827  */
828 static int
829 adv_poll(sc, xs, count)
830 	ASC_SOFTC      *sc;
831 	struct scsipi_xfer *xs;
832 	int             count;
833 {
834 
835 	/* timeouts are in msec, so we loop in 1000 usec cycles */
836 	while (count) {
837 		adv_intr(sc);
838 		if (xs->flags & ITSDONE)
839 			return (0);
840 		delay(1000);	/* only happens in boot so ok */
841 		count--;
842 	}
843 	return (1);
844 }
845 
846 
847 static void
848 adv_timeout(arg)
849 	void           *arg;
850 {
851 	ADV_CCB        *ccb = arg;
852 	struct scsipi_xfer *xs = ccb->xs;
853 	struct scsipi_link *sc_link = xs->sc_link;
854 	ASC_SOFTC      *sc = sc_link->adapter_softc;
855 	int             s;
856 
857 	scsi_print_addr(sc_link);
858 	printf("timed out");
859 
860 	s = splbio();
861 
862 	/*
863          * If it has been through before, then a previous abort has failed,
864          * don't try abort again, reset the bus instead.
865          */
866 	if (ccb->flags & CCB_ABORT) {
867 		/* abort timed out */
868 		printf(" AGAIN. Resetting Bus\n");
869 		/* Lets try resetting the bus! */
870 		if (AscResetBus(sc) == ASC_ERROR) {
871 			ccb->timeout = sc->scsi_reset_wait;
872 			adv_queue_ccb(sc, ccb);
873 		}
874 	} else {
875 		/* abort the operation that has timed out */
876 		printf("\n");
877 		AscAbortCCB(sc, (u_int32_t) ccb);
878 		ccb->xs->error = XS_TIMEOUT;
879 		ccb->timeout = ADV_ABORT_TIMEOUT;
880 		ccb->flags |= CCB_ABORT;
881 		adv_queue_ccb(sc, ccb);
882 	}
883 
884 	splx(s);
885 }
886 
887 
888 static void
889 adv_watchdog(arg)
890 	void           *arg;
891 {
892 	ADV_CCB        *ccb = arg;
893 	struct scsipi_xfer *xs = ccb->xs;
894 	struct scsipi_link *sc_link = xs->sc_link;
895 	ASC_SOFTC      *sc = sc_link->adapter_softc;
896 	int             s;
897 
898 	s = splbio();
899 
900 	ccb->flags &= ~CCB_WATCHDOG;
901 	adv_start_ccbs(sc);
902 
903 	splx(s);
904 }
905 
906 
907 /******************************************************************************/
908 /*                  NARROW and WIDE boards Interrupt callbacks                */
909 /******************************************************************************/
910 
911 
912 /*
913  * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
914  *
915  * Interrupt callback function for the Narrow SCSI Asc Library.
916  */
917 static void
918 adv_narrow_isr_callback(sc, qdonep)
919 	ASC_SOFTC      *sc;
920 	ASC_QDONE_INFO *qdonep;
921 {
922 	bus_dma_tag_t   dmat = sc->sc_dmat;
923 	ADV_CCB        *ccb = (ADV_CCB *) qdonep->d2.ccb_ptr;
924 	struct scsipi_xfer *xs = ccb->xs;
925 	struct scsipi_sense_data *s1, *s2;
926 
927 
928 #ifdef ASC_DEBUG
929 	printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ",
930 			(unsigned long)ccb,
931 			xs->sc_link->scsipi_scsi.target,
932 			xs->sc_link->scsipi_scsi.lun, xs->cmd->opcode);
933 #endif
934 	untimeout(adv_timeout, ccb);
935 
936 	/*
937          * If we were a data transfer, unload the map that described
938          * the data buffer.
939          */
940 	if (xs->datalen) {
941 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
942 				ccb->dmamap_xfer->dm_mapsize,
943 			 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
944 				BUS_DMASYNC_POSTWRITE);
945 		bus_dmamap_unload(dmat, ccb->dmamap_xfer);
946 	}
947 	if ((ccb->flags & CCB_ALLOC) == 0) {
948 		printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
949 		Debugger();
950 		return;
951 	}
952 	/*
953          * 'qdonep' contains the command's ending status.
954          */
955 #ifdef ASC_DEBUG
956 	printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat);
957 #endif
958 	switch (qdonep->d3.done_stat) {
959 	case ASC_QD_NO_ERROR:
960 		switch (qdonep->d3.host_stat) {
961 		case ASC_QHSTA_NO_ERROR:
962 			xs->error = XS_NOERROR;
963 			xs->resid = 0;
964 			break;
965 
966 		default:
967 			/* QHSTA error occurred */
968 			xs->error = XS_DRIVER_STUFFUP;
969 			break;
970 		}
971 
972 		/*
973                  * If an INQUIRY command completed successfully, then call
974                  * the AscInquiryHandling() function to patch bugged boards.
975                  */
976 		if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
977 		    (xs->sc_link->scsipi_scsi.lun == 0) &&
978 		    (xs->datalen - qdonep->remain_bytes) >= 8) {
979 			AscInquiryHandling(sc,
980 				      xs->sc_link->scsipi_scsi.target & 0x7,
981 					   (ASC_SCSI_INQUIRY *) xs->data);
982 		}
983 		break;
984 
985 	case ASC_QD_WITH_ERROR:
986 		switch (qdonep->d3.host_stat) {
987 		case ASC_QHSTA_NO_ERROR:
988 			if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
989 				s1 = &ccb->scsi_sense;
990 				s2 = &xs->sense.scsi_sense;
991 				*s2 = *s1;
992 				xs->error = XS_SENSE;
993 			} else {
994 				xs->error = XS_DRIVER_STUFFUP;
995 			}
996 			break;
997 
998 		default:
999 			/* QHSTA error occurred */
1000 			xs->error = XS_DRIVER_STUFFUP;
1001 			break;
1002 		}
1003 		break;
1004 
1005 	case ASC_QD_ABORTED_BY_HOST:
1006 	default:
1007 		xs->error = XS_DRIVER_STUFFUP;
1008 		break;
1009 	}
1010 
1011 
1012 	adv_free_ccb(sc, ccb);
1013 	xs->flags |= ITSDONE;
1014 	scsipi_done(xs);
1015 }
1016