xref: /netbsd-src/sys/dev/ic/adv.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: adv.c,v 1.50 2019/12/15 16:48:27 tsutsui Exp $	*/
2 
3 /*
4  * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
5  *
6  * Copyright (c) 1998 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * Author: Baldassare Dante Profeta <dante@mclink.it>
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: adv.c,v 1.50 2019/12/15 16:48:27 tsutsui Exp $");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/callout.h>
39 #include <sys/kernel.h>
40 #include <sys/errno.h>
41 #include <sys/ioctl.h>
42 #include <sys/device.h>
43 #include <sys/malloc.h>
44 #include <sys/buf.h>
45 #include <sys/proc.h>
46 
47 #include <sys/bus.h>
48 #include <sys/intr.h>
49 
50 #include <dev/scsipi/scsi_all.h>
51 #include <dev/scsipi/scsipi_all.h>
52 #include <dev/scsipi/scsiconf.h>
53 
54 #include <dev/ic/advlib.h>
55 #include <dev/ic/adv.h>
56 
57 #ifndef DDB
58 #define	Debugger()	panic("should call debugger here (adv.c)")
59 #endif /* ! DDB */
60 
61 
62 /* #define ASC_DEBUG */
63 
64 /******************************************************************************/
65 
66 
67 static int adv_alloc_control_data(ASC_SOFTC *);
68 static void adv_free_control_data(ASC_SOFTC *);
69 static int adv_create_ccbs(ASC_SOFTC *, ADV_CCB *, int);
70 static void adv_free_ccb(ASC_SOFTC *, ADV_CCB *);
71 static void adv_reset_ccb(ADV_CCB *);
72 static int adv_init_ccb(ASC_SOFTC *, ADV_CCB *);
73 static ADV_CCB *adv_get_ccb(ASC_SOFTC *);
74 static void adv_queue_ccb(ASC_SOFTC *, ADV_CCB *);
75 static void adv_start_ccbs(ASC_SOFTC *);
76 
77 
78 static void adv_scsipi_request(struct scsipi_channel *,
79 	scsipi_adapter_req_t, void *);
80 static void advminphys(struct buf *);
81 static void adv_narrow_isr_callback(ASC_SOFTC *, ASC_QDONE_INFO *);
82 
83 static int adv_poll(ASC_SOFTC *, struct scsipi_xfer *, int);
84 static void adv_timeout(void *);
85 static void adv_watchdog(void *);
86 
87 
88 /******************************************************************************/
89 
90 #define ADV_ABORT_TIMEOUT       2000	/* time to wait for abort (mSec) */
91 #define ADV_WATCH_TIMEOUT       1000	/* time to wait for watchdog (mSec) */
92 
93 /******************************************************************************/
94 /*                             Control Blocks routines                        */
95 /******************************************************************************/
96 
97 
98 static int
99 adv_alloc_control_data(ASC_SOFTC *sc)
100 {
101 	int error;
102 
103 	/*
104  	* Allocate the control blocks.
105 	 */
106 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
107 			   PAGE_SIZE, 0, &sc->sc_control_seg, 1,
108 			   &sc->sc_control_nsegs, BUS_DMA_NOWAIT)) != 0) {
109 		aprint_error_dev(sc->sc_dev, "unable to allocate control "
110 		    "structures, error = %d\n", error);
111 		return (error);
112 	}
113 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_control_seg,
114 			   sc->sc_control_nsegs, sizeof(struct adv_control),
115 			   (void **) & sc->sc_control,
116 			   BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
117 		aprint_error_dev(sc->sc_dev,
118 		    "unable to map control structures, error = %d\n", error);
119 		return (error);
120 	}
121 	/*
122 	 * Create and load the DMA map used for the control blocks.
123 	 */
124 	if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
125 			   1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
126 				       &sc->sc_dmamap_control)) != 0) {
127 		aprint_error_dev(sc->sc_dev,
128 		    "unable to create control DMA map, error = %d\n", error);
129 		return (error);
130 	}
131 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
132 			   sc->sc_control, sizeof(struct adv_control), NULL,
133 				     BUS_DMA_NOWAIT)) != 0) {
134 		aprint_error_dev(sc->sc_dev,
135 		    "unable to load control DMA map, error = %d\n", error);
136 		return (error);
137 	}
138 
139 	/*
140 	 * Initialize the overrun_buf address.
141 	 */
142 	sc->overrun_buf = sc->sc_dmamap_control->dm_segs[0].ds_addr +
143 	    offsetof(struct adv_control, overrun_buf);
144 
145 	return (0);
146 }
147 
148 static void
149 adv_free_control_data(ASC_SOFTC *sc)
150 {
151 
152 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap_control);
153 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_control);
154 	sc->sc_dmamap_control = NULL;
155 
156 	bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control,
157 	    sizeof(struct adv_control));
158 	bus_dmamem_free(sc->sc_dmat, &sc->sc_control_seg,
159 	    sc->sc_control_nsegs);
160 }
161 
162 /*
163  * Create a set of ccbs and add them to the free list.  Called once
164  * by adv_init().  We return the number of CCBs successfully created.
165  */
166 static int
167 adv_create_ccbs(ASC_SOFTC *sc, ADV_CCB *ccbstore, int count)
168 {
169 	ADV_CCB        *ccb;
170 	int             i, error;
171 
172 	memset(ccbstore, 0, sizeof(ADV_CCB) * count);
173 	for (i = 0; i < count; i++) {
174 		ccb = &ccbstore[i];
175 		if ((error = adv_init_ccb(sc, ccb)) != 0) {
176 			aprint_error_dev(sc->sc_dev,
177 			    "unable to initialize ccb, error = %d\n", error);
178 			return (i);
179 		}
180 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
181 	}
182 
183 	return (i);
184 }
185 
186 
187 /*
188  * A ccb is put onto the free list.
189  */
190 static void
191 adv_free_ccb(ASC_SOFTC *sc, ADV_CCB *ccb)
192 {
193 	int             s;
194 
195 	s = splbio();
196 	adv_reset_ccb(ccb);
197 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
198 	splx(s);
199 }
200 
201 
202 static void
203 adv_reset_ccb(ADV_CCB *ccb)
204 {
205 
206 	ccb->flags = 0;
207 }
208 
209 
210 static int
211 adv_init_ccb(ASC_SOFTC *sc, ADV_CCB *ccb)
212 {
213 	int	hashnum, error;
214 
215 	callout_init(&ccb->ccb_watchdog, 0);
216 
217 	/*
218 	 * Create the DMA map for this CCB.
219 	 */
220 	error = bus_dmamap_create(sc->sc_dmat,
221 				  (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
222 			 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
223 		   0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
224 	if (error) {
225 		aprint_error_dev(sc->sc_dev,
226 		    "unable to create DMA map, error = %d\n", error);
227 		return (error);
228 	}
229 
230 	/*
231 	 * put in the phystokv hash table
232 	 * Never gets taken out.
233 	 */
234 	ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
235 	    ADV_CCB_OFF(ccb);
236 	hashnum = CCB_HASH(ccb->hashkey);
237 	ccb->nexthash = sc->sc_ccbhash[hashnum];
238 	sc->sc_ccbhash[hashnum] = ccb;
239 
240 	adv_reset_ccb(ccb);
241 	return (0);
242 }
243 
244 
245 /*
246  * Get a free ccb
247  *
248  * If there are none, see if we can allocate a new one
249  */
250 static ADV_CCB *
251 adv_get_ccb(ASC_SOFTC *sc)
252 {
253 	ADV_CCB        *ccb = 0;
254 	int             s;
255 
256 	s = splbio();
257 	ccb = TAILQ_FIRST(&sc->sc_free_ccb);
258 	if (ccb != NULL) {
259 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
260 		ccb->flags |= CCB_ALLOC;
261 	}
262 	splx(s);
263 	return (ccb);
264 }
265 
266 
267 /*
268  * Given a physical address, find the ccb that it corresponds to.
269  */
270 ADV_CCB *
271 adv_ccb_phys_kv(ASC_SOFTC *sc, u_long ccb_phys)
272 {
273 	int hashnum = CCB_HASH(ccb_phys);
274 	ADV_CCB *ccb = sc->sc_ccbhash[hashnum];
275 
276 	while (ccb) {
277 		if (ccb->hashkey == ccb_phys)
278 			break;
279 		ccb = ccb->nexthash;
280 	}
281 	return (ccb);
282 }
283 
284 
285 /*
286  * Queue a CCB to be sent to the controller, and send it if possible.
287  */
288 static void
289 adv_queue_ccb(ASC_SOFTC *sc, ADV_CCB *ccb)
290 {
291 
292 	TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
293 
294 	adv_start_ccbs(sc);
295 }
296 
297 
298 static void
299 adv_start_ccbs(ASC_SOFTC *sc)
300 {
301 	ADV_CCB        *ccb;
302 
303 	while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
304 		if (ccb->flags & CCB_WATCHDOG)
305 			callout_stop(&ccb->ccb_watchdog);
306 
307 		if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
308 			ccb->flags |= CCB_WATCHDOG;
309 			callout_reset(&ccb->ccb_watchdog,
310 			    (ADV_WATCH_TIMEOUT * hz) / 1000,
311 			    adv_watchdog, ccb);
312 			break;
313 		}
314 		TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
315 
316 		if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
317 			callout_reset(&ccb->xs->xs_callout,
318 			    mstohz(ccb->timeout), adv_timeout, ccb);
319 	}
320 }
321 
322 
323 /******************************************************************************/
324 /*                         SCSI layer interfacing routines                    */
325 /******************************************************************************/
326 
327 
328 int
329 adv_init(ASC_SOFTC *sc)
330 {
331 	int             warn;
332 
333 	if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) {
334 		aprint_error("adv_init: failed to find signature\n");
335 		return (1);
336 	}
337 
338 	/*
339 	 * Read the board configuration
340 	 */
341 	AscInitASC_SOFTC(sc);
342 	warn = AscInitFromEEP(sc);
343 	if (warn) {
344 		aprint_error_dev(sc->sc_dev, "-get: ");
345 		switch (warn) {
346 		case -1:
347 			aprint_normal("Chip is not halted\n");
348 			break;
349 
350 		case -2:
351 			aprint_normal("Couldn't get MicroCode Start"
352 			       " address\n");
353 			break;
354 
355 		case ASC_WARN_IO_PORT_ROTATE:
356 			aprint_normal("I/O port address modified\n");
357 			break;
358 
359 		case ASC_WARN_AUTO_CONFIG:
360 			aprint_normal("I/O port increment switch enabled\n");
361 			break;
362 
363 		case ASC_WARN_EEPROM_CHKSUM:
364 			aprint_normal("EEPROM checksum error\n");
365 			break;
366 
367 		case ASC_WARN_IRQ_MODIFIED:
368 			aprint_normal("IRQ modified\n");
369 			break;
370 
371 		case ASC_WARN_CMD_QNG_CONFLICT:
372 			aprint_normal("tag queuing enabled w/o disconnects\n");
373 			break;
374 
375 		default:
376 			aprint_normal("unknown warning %d\n", warn);
377 		}
378 	}
379 	if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
380 		sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
381 
382 	/*
383 	 * Modify the board configuration
384 	 */
385 	warn = AscInitFromASC_SOFTC(sc);
386 	if (warn) {
387 		aprint_error_dev(sc->sc_dev, "-set: ");
388 		switch (warn) {
389 		case ASC_WARN_CMD_QNG_CONFLICT:
390 			aprint_normal("tag queuing enabled w/o disconnects\n");
391 			break;
392 
393 		case ASC_WARN_AUTO_CONFIG:
394 			aprint_normal("I/O port increment switch enabled\n");
395 			break;
396 
397 		default:
398 			aprint_normal("unknown warning %d\n", warn);
399 		}
400 	}
401 	sc->isr_callback = adv_narrow_isr_callback;
402 
403 	return (0);
404 }
405 
406 
407 void
408 adv_attach(ASC_SOFTC *sc)
409 {
410 	struct scsipi_adapter *adapt = &sc->sc_adapter;
411 	struct scsipi_channel *chan = &sc->sc_channel;
412 	int             i, error;
413 
414 	/*
415 	 * Initialize board RISC chip and enable interrupts.
416 	 */
417 	switch (AscInitDriver(sc)) {
418 	case 0:
419 		/* AllOK */
420 		break;
421 
422 	case 1:
423 		panic("%s: bad signature", device_xname(sc->sc_dev));
424 		break;
425 
426 	case 2:
427 		panic("%s: unable to load MicroCode",
428 		      device_xname(sc->sc_dev));
429 		break;
430 
431 	case 3:
432 		panic("%s: unable to initialize MicroCode",
433 		      device_xname(sc->sc_dev));
434 		break;
435 
436 	default:
437 		panic("%s: unable to initialize board RISC chip",
438 		      device_xname(sc->sc_dev));
439 	}
440 
441 	/*
442 	 * Fill in the scsipi_adapter.
443 	 */
444 	memset(adapt, 0, sizeof(*adapt));
445 	adapt->adapt_dev = sc->sc_dev;
446 	adapt->adapt_nchannels = 1;
447 	/* adapt_openings initialized below */
448 	/* adapt_max_periph initialized below */
449 	adapt->adapt_request = adv_scsipi_request;
450 	adapt->adapt_minphys = advminphys;
451 
452 	/*
453 	 * Fill in the scsipi_channel.
454 	 */
455 	memset(chan, 0, sizeof(*chan));
456 	chan->chan_adapter = adapt;
457 	chan->chan_bustype = &scsi_bustype;
458 	chan->chan_channel = 0;
459 	chan->chan_ntargets = 8;
460 	chan->chan_nluns = 8;
461 	chan->chan_id = sc->chip_scsi_id;
462 
463 	TAILQ_INIT(&sc->sc_free_ccb);
464 	TAILQ_INIT(&sc->sc_waiting_ccb);
465 
466 	/*
467 	 * Allocate the Control Blocks and the overrun buffer.
468 	 */
469 	error = adv_alloc_control_data(sc);
470 	if (error)
471 		return; /* (error) */
472 
473 	/*
474 	 * Create and initialize the Control Blocks.
475 	 */
476 	i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
477 	if (i == 0) {
478 		aprint_error_dev(sc->sc_dev,
479 		    "unable to create control blocks\n");
480 		return; /* (ENOMEM) */ ;
481 	} else if (i != ADV_MAX_CCB) {
482 		aprint_error_dev(sc->sc_dev,
483 		    "WARNING: only %d of %d control blocks created\n",
484 		    i, ADV_MAX_CCB);
485 	}
486 
487 	adapt->adapt_openings = i;
488 	adapt->adapt_max_periph = adapt->adapt_openings;
489 
490 	sc->sc_child = config_found(sc->sc_dev, chan, scsiprint);
491 }
492 
493 int
494 adv_detach(ASC_SOFTC *sc, int flags)
495 {
496 	int rv = 0;
497 
498 	if (sc->sc_child != NULL)
499 		rv = config_detach(sc->sc_child, flags);
500 
501 	adv_free_control_data(sc);
502 
503 	return (rv);
504 }
505 
506 static void
507 advminphys(struct buf *bp)
508 {
509 
510 	if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
511 		bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
512 	minphys(bp);
513 }
514 
515 
516 /*
517  * start a scsi operation given the command and the data address.  Also needs
518  * the unit, target and lu.
519  */
520 
521 static void
522 adv_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
523     void *arg)
524 {
525  	struct scsipi_xfer *xs;
526  	struct scsipi_periph *periph;
527  	ASC_SOFTC      *sc = device_private(chan->chan_adapter->adapt_dev);
528  	bus_dma_tag_t   dmat = sc->sc_dmat;
529  	ADV_CCB        *ccb;
530  	int             s, flags, error, nsegs;
531 
532  	switch (req) {
533  	case ADAPTER_REQ_RUN_XFER:
534  		xs = arg;
535  		periph = xs->xs_periph;
536  		flags = xs->xs_control;
537 
538  		/*
539  		 * Get a CCB to use.
540  		 */
541  		ccb = adv_get_ccb(sc);
542 #ifdef DIAGNOSTIC
543  		/*
544  		 * This should never happen as we track the resources
545  		 * in the mid-layer.
546  		 */
547  		if (ccb == NULL) {
548  			scsipi_printaddr(periph);
549  			printf("unable to allocate ccb\n");
550  			panic("adv_scsipi_request");
551  		}
552 #endif
553 
554  		ccb->xs = xs;
555  		ccb->timeout = xs->timeout;
556 
557  		/*
558  		 * Build up the request
559  		 */
560  		memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
561 
562  		ccb->scsiq.q2.ccb_ptr =
563  		    sc->sc_dmamap_control->dm_segs[0].ds_addr +
564  		    ADV_CCB_OFF(ccb);
565 
566  		ccb->scsiq.cdbptr = &xs->cmd->opcode;
567  		ccb->scsiq.q2.cdb_len = xs->cmdlen;
568  		ccb->scsiq.q1.target_id =
569  		    ASC_TID_TO_TARGET_ID(periph->periph_target);
570  		ccb->scsiq.q1.target_lun = periph->periph_lun;
571  		ccb->scsiq.q2.target_ix =
572  		    ASC_TIDLUN_TO_IX(periph->periph_target,
573  		    periph->periph_lun);
574  		ccb->scsiq.q1.sense_addr =
575  		    sc->sc_dmamap_control->dm_segs[0].ds_addr +
576  		    ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
577  		ccb->scsiq.q1.sense_len = sizeof(struct scsi_sense_data);
578 
579  		/*
580  		 * If there are any outstanding requests for the current
581  		 * target, then every 255th request send an ORDERED request.
582  		 * This heuristic tries to retain the benefit of request
583  		 * sorting while preventing request starvation. 255 is the
584  		 * max number of tags or pending commands a device may have
585  		 * outstanding.
586  		 */
587  		sc->reqcnt[periph->periph_target]++;
588  		if (((sc->reqcnt[periph->periph_target] > 0) &&
589  		    (sc->reqcnt[periph->periph_target] % 255) == 0) ||
590 		    xs->bp == NULL || (xs->bp->b_flags & B_ASYNC) == 0) {
591  			ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
592  		} else {
593  			ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
594  		}
595 
596  		if (xs->datalen) {
597  			/*
598  			 * Map the DMA transfer.
599  			 */
600 #ifdef TFS
601  			if (flags & SCSI_DATA_UIO) {
602  				error = bus_dmamap_load_uio(dmat,
603  				    ccb->dmamap_xfer, (struct uio *) xs->data,
604 				    ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
605 				     BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
606 				     ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
607 				      BUS_DMA_WRITE));
608  			} else
609 #endif /* TFS */
610  			{
611  				error = bus_dmamap_load(dmat, ccb->dmamap_xfer,
612  				    xs->data, xs->datalen, NULL,
613 				    ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
614 				     BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
615 				     ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
616 				      BUS_DMA_WRITE));
617  			}
618 
619  			switch (error) {
620  			case 0:
621  				break;
622 
623 
624  			case ENOMEM:
625  			case EAGAIN:
626  				xs->error = XS_RESOURCE_SHORTAGE;
627  				goto out_bad;
628 
629  			default:
630  				xs->error = XS_DRIVER_STUFFUP;
631 				if (error == EFBIG) {
632 					aprint_error_dev(sc->sc_dev,
633 					    "adv_scsi_cmd, more than %d"
634 					    " DMA segments\n",
635 					    ASC_MAX_SG_LIST);
636 				} else {
637 					aprint_error_dev(sc->sc_dev,
638 					    "adv_scsi_cmd, error %d"
639 					    " loading DMA map\n", error);
640 				}
641 
642 out_bad:
643  				adv_free_ccb(sc, ccb);
644  				scsipi_done(xs);
645  				return;
646  			}
647  			bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
648  			    ccb->dmamap_xfer->dm_mapsize,
649  			    (flags & XS_CTL_DATA_IN) ?
650  			     BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
651 
652  			memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
653 
654  			for (nsegs = 0;
655  			     nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
656  				ccb->sghead.sg_list[nsegs].addr =
657  				    ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
658  				ccb->sghead.sg_list[nsegs].bytes =
659  				    ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
660  			}
661 
662  			ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
663  			    ccb->dmamap_xfer->dm_nsegs;
664 
665  			ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
666  			ccb->scsiq.sg_head = &ccb->sghead;
667  			ccb->scsiq.q1.data_addr = 0;
668  			ccb->scsiq.q1.data_cnt = 0;
669  		} else {
670  			/*
671  			 * No data xfer, use non S/G values.
672  			 */
673  			ccb->scsiq.q1.data_addr = 0;
674  			ccb->scsiq.q1.data_cnt = 0;
675  		}
676 
677 #ifdef ASC_DEBUG
678  		printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX\n",
679  		    periph->periph_target,
680  		    periph->periph_lun, xs->cmd->opcode,
681  		    (unsigned long)ccb);
682 #endif
683  		s = splbio();
684  		adv_queue_ccb(sc, ccb);
685  		splx(s);
686 
687  		if ((flags & XS_CTL_POLL) == 0)
688  			return;
689 
690  		/* Not allowed to use interrupts, poll for completion. */
691  		if (adv_poll(sc, xs, ccb->timeout)) {
692  			adv_timeout(ccb);
693  			if (adv_poll(sc, xs, ccb->timeout))
694  				adv_timeout(ccb);
695  		}
696  		return;
697 
698  	case ADAPTER_REQ_GROW_RESOURCES:
699  		/* XXX Not supported. */
700  		return;
701 
702  	case ADAPTER_REQ_SET_XFER_MODE:
703  	    {
704  		/*
705  		 * We can't really set the mode, but we know how to
706  		 * query what the firmware negotiated.
707  		 */
708  		struct scsipi_xfer_mode *xm = arg;
709  		u_int8_t sdtr_data;
710  		ASC_SCSI_BIT_ID_TYPE tid_bit;
711 
712  		tid_bit = ASC_TIX_TO_TARGET_ID(xm->xm_target);
713 
714  		xm->xm_mode = 0;
715  		xm->xm_period = 0;
716  		xm->xm_offset = 0;
717 
718  		if (sc->init_sdtr & tid_bit) {
719  			xm->xm_mode |= PERIPH_CAP_SYNC;
720  			sdtr_data = sc->sdtr_data[xm->xm_target];
721  			xm->xm_period =
722  			    sc->sdtr_period_tbl[(sdtr_data >> 4) &
723  			    (sc->max_sdtr_index - 1)];
724  			xm->xm_offset = sdtr_data & ASC_SYN_MAX_OFFSET;
725  		}
726 
727  		if (sc->use_tagged_qng & tid_bit)
728  			xm->xm_mode |= PERIPH_CAP_TQING;
729 
730  		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
731  		return;
732  	    }
733  	}
734 }
735 
736 int
737 adv_intr(void *arg)
738 {
739 	ASC_SOFTC      *sc = arg;
740 
741 #ifdef ASC_DEBUG
742 	int int_pend = FALSE;
743 
744 	if (ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh)) {
745 		int_pend = TRUE;
746 		printf("ISR - ");
747 	}
748 #endif
749 	AscISR(sc);
750 #ifdef ASC_DEBUG
751 	if(int_pend)
752 		printf("\n");
753 #endif
754 
755 	return (1);
756 }
757 
758 
759 /*
760  * Poll a particular unit, looking for a particular xs
761  */
762 static int
763 adv_poll(ASC_SOFTC *sc, struct scsipi_xfer *xs, int count)
764 {
765 
766 	/* timeouts are in msec, so we loop in 1000 usec cycles */
767 	while (count) {
768 		adv_intr(sc);
769 		if (xs->xs_status & XS_STS_DONE)
770 			return (0);
771 		delay(1000);	/* only happens in boot so ok */
772 		count--;
773 	}
774 	return (1);
775 }
776 
777 
778 static void
779 adv_timeout(void *arg)
780 {
781 	ADV_CCB        *ccb = arg;
782 	struct scsipi_xfer *xs = ccb->xs;
783 	struct scsipi_periph *periph = xs->xs_periph;
784 	ASC_SOFTC      *sc =
785 	    device_private(periph->periph_channel->chan_adapter->adapt_dev);
786 	int             s;
787 
788 	scsipi_printaddr(periph);
789 	printf("timed out");
790 
791 	s = splbio();
792 
793 	/*
794 	 * If it has been through before, then a previous abort has failed,
795 	 * don't try abort again, reset the bus instead.
796 	 */
797 	if (ccb->flags & CCB_ABORT) {
798 		/* abort timed out */
799 		printf(" AGAIN. Resetting Bus\n");
800 		/* Lets try resetting the bus! */
801 		if (AscResetBus(sc) == ASC_ERROR) {
802 			ccb->timeout = sc->scsi_reset_wait;
803 			adv_queue_ccb(sc, ccb);
804 		}
805 	} else {
806 		/* abort the operation that has timed out */
807 		printf("\n");
808 		AscAbortCCB(sc, ccb);
809 		ccb->xs->error = XS_TIMEOUT;
810 		ccb->timeout = ADV_ABORT_TIMEOUT;
811 		ccb->flags |= CCB_ABORT;
812 		adv_queue_ccb(sc, ccb);
813 	}
814 
815 	splx(s);
816 }
817 
818 
819 static void
820 adv_watchdog(void *arg)
821 {
822 	ADV_CCB        *ccb = arg;
823 	struct scsipi_xfer *xs = ccb->xs;
824 	struct scsipi_periph *periph = xs->xs_periph;
825 	ASC_SOFTC      *sc =
826 	    device_private(periph->periph_channel->chan_adapter->adapt_dev);
827 	int             s;
828 
829 	s = splbio();
830 
831 	ccb->flags &= ~CCB_WATCHDOG;
832 	adv_start_ccbs(sc);
833 
834 	splx(s);
835 }
836 
837 
838 /******************************************************************************/
839 /*                      NARROW boards Interrupt callbacks                     */
840 /******************************************************************************/
841 
842 
843 /*
844  * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
845  *
846  * Interrupt callback function for the Narrow SCSI Asc Library.
847  */
848 static void
849 adv_narrow_isr_callback(ASC_SOFTC *sc, ASC_QDONE_INFO *qdonep)
850 {
851 	bus_dma_tag_t   dmat = sc->sc_dmat;
852 	ADV_CCB        *ccb;
853 	struct scsipi_xfer *xs;
854 	struct scsi_sense_data *s1, *s2;
855 
856 
857 	ccb = adv_ccb_phys_kv(sc, qdonep->d2.ccb_ptr);
858 	xs = ccb->xs;
859 
860 #ifdef ASC_DEBUG
861 	printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ",
862 			(unsigned long)ccb,
863 			xs->xs_periph->periph_target,
864 			xs->xs_periph->periph_lun, xs->cmd->opcode);
865 #endif
866 	callout_stop(&ccb->xs->xs_callout);
867 
868 	/*
869 	 * If we were a data transfer, unload the map that described
870 	 * the data buffer.
871 	 */
872 	if (xs->datalen) {
873 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
874 				ccb->dmamap_xfer->dm_mapsize,
875 			 (xs->xs_control & XS_CTL_DATA_IN) ?
876 			 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
877 		bus_dmamap_unload(dmat, ccb->dmamap_xfer);
878 	}
879 	if ((ccb->flags & CCB_ALLOC) == 0) {
880 		aprint_error_dev(sc->sc_dev, "exiting ccb not allocated!\n");
881 		Debugger();
882 		return;
883 	}
884 	/*
885 	 * 'qdonep' contains the command's ending status.
886 	 */
887 #ifdef ASC_DEBUG
888 	printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat);
889 #endif
890 	switch (qdonep->d3.done_stat) {
891 	case ASC_QD_NO_ERROR:
892 		switch (qdonep->d3.host_stat) {
893 		case ASC_QHSTA_NO_ERROR:
894 			xs->error = XS_NOERROR;
895 			/*
896 			 * XXX
897 			 * According to the original Linux driver, xs->resid
898 			 * should be qdonep->remain_bytes. However, its value
899 			 * is bogus, which seems like a H/W bug. The best thing
900 			 * we can do would be to ignore it, assuming that all
901 			 * data has been successfully transferred...
902 			 */
903 			xs->resid = 0;
904 			break;
905 
906 		default:
907 			/* QHSTA error occurred */
908 			xs->error = XS_DRIVER_STUFFUP;
909 			break;
910 		}
911 
912 		/*
913 	         * If an INQUIRY command completed successfully, then call
914 	         * the AscInquiryHandling() function to patch bugged boards.
915 	         */
916 		if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
917 		    (xs->xs_periph->periph_lun == 0) &&
918 		    (xs->datalen - qdonep->remain_bytes) >= 8) {
919 			AscInquiryHandling(sc,
920 				      xs->xs_periph->periph_target & 0x7,
921 					   (ASC_SCSI_INQUIRY *) xs->data);
922 		}
923 		break;
924 
925 	case ASC_QD_WITH_ERROR:
926 		switch (qdonep->d3.host_stat) {
927 		case ASC_QHSTA_NO_ERROR:
928 			if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
929 				s1 = &ccb->scsi_sense;
930 				s2 = &xs->sense.scsi_sense;
931 				*s2 = *s1;
932 				xs->error = XS_SENSE;
933 			} else {
934 				xs->error = XS_DRIVER_STUFFUP;
935 			}
936 			break;
937 
938 		case ASC_QHSTA_M_SEL_TIMEOUT:
939 			xs->error = XS_SELTIMEOUT;
940 			break;
941 
942 		default:
943 			/* QHSTA error occurred */
944 			xs->error = XS_DRIVER_STUFFUP;
945 			break;
946 		}
947 		break;
948 
949 	case ASC_QD_ABORTED_BY_HOST:
950 	default:
951 		xs->error = XS_DRIVER_STUFFUP;
952 		break;
953 	}
954 
955 	adv_free_ccb(sc, ccb);
956 	scsipi_done(xs);
957 }
958