xref: /netbsd-src/sys/dev/ic/bha.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: bha.c,v 1.75 2012/10/27 17:18:19 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Originally written by Julian Elischer (julian@tfs.com)
35  * for TRW Financial Systems for use under the MACH(2.5) operating system.
36  *
37  * TRW Financial Systems, in accordance with their agreement with Carnegie
38  * Mellon University, makes this software available to CMU to distribute
39  * or use in any manner that they see fit as long as this message is kept with
40  * the software. For this reason TFS also grants any other persons or
41  * organisations permission to use or modify this software.
42  *
43  * TFS supplies this software to be publicly redistributed
44  * on the understanding that TFS is not responsible for the correct
45  * functioning of this software in any circumstances.
46  */
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: bha.c,v 1.75 2012/10/27 17:18:19 chs Exp $");
50 
51 #include "opt_ddb.h"
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/callout.h>
56 #include <sys/kernel.h>
57 #include <sys/errno.h>
58 #include <sys/ioctl.h>
59 #include <sys/device.h>
60 #include <sys/malloc.h>
61 #include <sys/buf.h>
62 #include <sys/proc.h>
63 
64 #include <sys/bus.h>
65 #include <sys/intr.h>
66 
67 #include <dev/scsipi/scsi_all.h>
68 #include <dev/scsipi/scsipi_all.h>
69 #include <dev/scsipi/scsiconf.h>
70 
71 #include <dev/ic/bhareg.h>
72 #include <dev/ic/bhavar.h>
73 
74 #ifndef DDB
75 #define Debugger() panic("should call debugger here (bha.c)")
76 #endif /* ! DDB */
77 
78 #define	BHA_MAXXFER	((BHA_NSEG - 1) << PGSHIFT)
79 
80 #ifdef BHADEBUG
81 int     bha_debug = 0;
82 #endif /* BHADEBUG */
83 
84 static int	bha_cmd(bus_space_tag_t, bus_space_handle_t, const char *, int,
85 			u_char *, int, u_char *);
86 
87 static void	bha_scsipi_request(struct scsipi_channel *,
88 				   scsipi_adapter_req_t, void *);
89 static void	bha_minphys(struct buf *);
90 
91 static void	bha_get_xfer_mode(struct bha_softc *,
92 				  struct scsipi_xfer_mode *);
93 
94 static void	bha_done(struct bha_softc *, struct bha_ccb *);
95 static int	bha_poll(struct bha_softc *, struct scsipi_xfer *, int);
96 static void	bha_timeout(void *arg);
97 
98 static int	bha_init(struct bha_softc *);
99 
100 static int	bha_create_mailbox(struct bha_softc *);
101 static void	bha_collect_mbo(struct bha_softc *);
102 
103 static void	bha_queue_ccb(struct bha_softc *, struct bha_ccb *);
104 static void	bha_start_ccbs(struct bha_softc *);
105 static void	bha_finish_ccbs(struct bha_softc *);
106 
107 static struct bha_ccb *bha_ccb_phys_kv(struct bha_softc *, bus_addr_t);
108 static void	bha_create_ccbs(struct bha_softc *, int);
109 static int	bha_init_ccb(struct bha_softc *, struct bha_ccb *);
110 static struct bha_ccb *bha_get_ccb(struct bha_softc *);
111 static void	bha_free_ccb(struct bha_softc *, struct bha_ccb *);
112 
113 #define BHA_RESET_TIMEOUT	2000	/* time to wait for reset (mSec) */
114 #define	BHA_ABORT_TIMEOUT	2000	/* time to wait for abort (mSec) */
115 
116 /*
117  * Number of CCBs in an allocation group; must be computed at run-time.
118  */
119 static int	bha_ccbs_per_group;
120 
121 static inline struct bha_mbx_out *
122 bha_nextmbo(struct bha_softc *sc, struct bha_mbx_out *mbo)
123 {
124 
125 	if (mbo == &sc->sc_mbo[sc->sc_mbox_count - 1])
126 		return (&sc->sc_mbo[0]);
127 	return (mbo + 1);
128 }
129 
130 static inline struct bha_mbx_in *
131 bha_nextmbi(struct bha_softc *sc, struct bha_mbx_in *mbi)
132 {
133 	if (mbi == &sc->sc_mbi[sc->sc_mbox_count - 1])
134 		return (&sc->sc_mbi[0]);
135 	return (mbi + 1);
136 }
137 
138 /*
139  * bha_attach:
140  *
141  *	Finish attaching a Buslogic controller, and configure children.
142  */
143 void
144 bha_attach(struct bha_softc *sc)
145 {
146 	struct scsipi_adapter *adapt = &sc->sc_adapter;
147 	struct scsipi_channel *chan = &sc->sc_channel;
148 	int initial_ccbs;
149 
150 	/*
151 	 * Initialize the number of CCBs per group.
152 	 */
153 	if (bha_ccbs_per_group == 0)
154 		bha_ccbs_per_group = BHA_CCBS_PER_GROUP;
155 
156 	initial_ccbs = bha_info(sc);
157 	if (initial_ccbs == 0) {
158 		aprint_error_dev(sc->sc_dev, "unable to get adapter info\n");
159 		return;
160 	}
161 
162 	/*
163 	 * Fill in the scsipi_adapter.
164 	 */
165 	memset(adapt, 0, sizeof(*adapt));
166 	adapt->adapt_dev = sc->sc_dev;
167 	adapt->adapt_nchannels = 1;
168 	/* adapt_openings initialized below */
169 	adapt->adapt_max_periph = sc->sc_mbox_count;
170 	adapt->adapt_request = bha_scsipi_request;
171 	adapt->adapt_minphys = bha_minphys;
172 
173 	/*
174 	 * Fill in the scsipi_channel.
175 	 */
176 	memset(chan, 0, sizeof(*chan));
177 	chan->chan_adapter = adapt;
178 	chan->chan_bustype = &scsi_bustype;
179 	chan->chan_channel = 0;
180 	chan->chan_flags = SCSIPI_CHAN_CANGROW;
181 	chan->chan_ntargets = (sc->sc_flags & BHAF_WIDE) ? 16 : 8;
182 	chan->chan_nluns = (sc->sc_flags & BHAF_WIDE_LUN) ? 32 : 8;
183 	chan->chan_id = sc->sc_scsi_id;
184 
185 	TAILQ_INIT(&sc->sc_free_ccb);
186 	TAILQ_INIT(&sc->sc_waiting_ccb);
187 	TAILQ_INIT(&sc->sc_allocating_ccbs);
188 
189 	if (bha_create_mailbox(sc) != 0)
190 		return;
191 
192 	bha_create_ccbs(sc, initial_ccbs);
193 	if (sc->sc_cur_ccbs < 2) {
194 		aprint_error_dev(sc->sc_dev, "not enough CCBs to run\n");
195 		return;
196 	}
197 
198 	adapt->adapt_openings = sc->sc_cur_ccbs;
199 
200 	if (bha_init(sc) != 0)
201 		return;
202 
203 	(void) config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
204 }
205 
206 /*
207  * bha_intr:
208  *
209  *	Interrupt service routine.
210  */
211 int
212 bha_intr(void *arg)
213 {
214 	struct bha_softc *sc = arg;
215 	bus_space_tag_t iot = sc->sc_iot;
216 	bus_space_handle_t ioh = sc->sc_ioh;
217 	u_char sts;
218 
219 #ifdef BHADEBUG
220 	printf("%s: bha_intr ", device_xname(sc->sc_dev));
221 #endif /* BHADEBUG */
222 
223 	/*
224 	 * First acknowledge the interrupt, Then if it's not telling about
225 	 * a completed operation just return.
226 	 */
227 	sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT);
228 	if ((sts & BHA_INTR_ANYINTR) == 0)
229 		return (0);
230 	bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST);
231 
232 #ifdef BHADIAG
233 	/* Make sure we clear CCB_SENDING before finishing a CCB. */
234 	bha_collect_mbo(sc);
235 #endif
236 
237 	/* Mail box out empty? */
238 	if (sts & BHA_INTR_MBOA) {
239 		struct bha_toggle toggle;
240 
241 		toggle.cmd.opcode = BHA_MBO_INTR_EN;
242 		toggle.cmd.enable = 0;
243 		bha_cmd(iot, ioh, device_xname(sc->sc_dev),
244 		    sizeof(toggle.cmd), (u_char *)&toggle.cmd,
245 		    0, (u_char *)0);
246 		bha_start_ccbs(sc);
247 	}
248 
249 	/* Mail box in full? */
250 	if (sts & BHA_INTR_MBIF)
251 		bha_finish_ccbs(sc);
252 
253 	return (1);
254 }
255 
256 /*****************************************************************************
257  * SCSI interface routines
258  *****************************************************************************/
259 
260 /*
261  * bha_scsipi_request:
262  *
263  *	Perform a request for the SCSIPI layer.
264  */
265 static void
266 bha_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
267     void *arg)
268 {
269 	struct scsipi_adapter *adapt = chan->chan_adapter;
270 	struct bha_softc *sc = device_private(adapt->adapt_dev);
271 	struct scsipi_xfer *xs;
272 	struct scsipi_periph *periph;
273 	bus_dma_tag_t dmat = sc->sc_dmat;
274 	struct bha_ccb *ccb;
275 	int error, seg, flags, s;
276 
277 	switch (req) {
278 	case ADAPTER_REQ_RUN_XFER:
279 		xs = arg;
280 		periph = xs->xs_periph;
281 		flags = xs->xs_control;
282 
283 		SC_DEBUG(periph, SCSIPI_DB2, ("bha_scsipi_request\n"));
284 
285 		/* Get a CCB to use. */
286 		ccb = bha_get_ccb(sc);
287 #ifdef DIAGNOSTIC
288 		/*
289 		 * This should never happen as we track the resources
290 		 * in the mid-layer.
291 		 */
292 		if (ccb == NULL) {
293 			scsipi_printaddr(periph);
294 			printf("unable to allocate ccb\n");
295 			panic("bha_scsipi_request");
296 		}
297 #endif
298 
299 		ccb->xs = xs;
300 		ccb->timeout = xs->timeout;
301 
302 		/*
303 		 * Put all the arguments for the xfer in the ccb
304 		 */
305 		if (flags & XS_CTL_RESET) {
306 			ccb->opcode = BHA_RESET_CCB;
307 			ccb->scsi_cmd_length = 0;
308 		} else {
309 			/* can't use S/G if zero length */
310 			if (xs->cmdlen > sizeof(ccb->scsi_cmd)) {
311 				printf("%s: cmdlen %d too large for CCB\n",
312 				    device_xname(sc->sc_dev), xs->cmdlen);
313 				xs->error = XS_DRIVER_STUFFUP;
314 				goto out_bad;
315 			}
316 			ccb->opcode = (xs->datalen ? BHA_INIT_SCAT_GATH_CCB
317 						   : BHA_INITIATOR_CCB);
318 			memcpy(&ccb->scsi_cmd, xs->cmd,
319 			    ccb->scsi_cmd_length = xs->cmdlen);
320 		}
321 
322 		if (xs->datalen) {
323 			/*
324 			 * Map the DMA transfer.
325 			 */
326 #ifdef TFS
327 			if (flags & XS_CTL_DATA_UIO) {
328 				error = bus_dmamap_load_uio(dmat,
329 				    ccb->dmamap_xfer, (struct uio *)xs->data,
330 				    ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
331 				     BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
332 				     ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
333 				      BUS_DMA_WRITE));
334 			} else
335 #endif /* TFS */
336 			{
337 				error = bus_dmamap_load(dmat,
338 				    ccb->dmamap_xfer, xs->data, xs->datalen,
339 				    NULL,
340 				    ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
341 				     BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
342 				     ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
343 				      BUS_DMA_WRITE));
344 			}
345 
346 			switch (error) {
347 			case 0:
348 				break;
349 
350 			case ENOMEM:
351 			case EAGAIN:
352 				xs->error = XS_RESOURCE_SHORTAGE;
353 				goto out_bad;
354 
355 			default:
356 				xs->error = XS_DRIVER_STUFFUP;
357 				aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n", error);
358  out_bad:
359 				bha_free_ccb(sc, ccb);
360 				scsipi_done(xs);
361 				return;
362 			}
363 
364 			bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
365 			    ccb->dmamap_xfer->dm_mapsize,
366 			    (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
367 			    BUS_DMASYNC_PREWRITE);
368 
369 			/*
370 			 * Load the hardware scatter/gather map with the
371 			 * contents of the DMA map.
372 			 */
373 			for (seg = 0; seg < ccb->dmamap_xfer->dm_nsegs; seg++) {
374 				ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_addr,
375 				    ccb->scat_gath[seg].seg_addr);
376 				ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_len,
377 				    ccb->scat_gath[seg].seg_len);
378 			}
379 
380 			ltophys(ccb->hashkey + offsetof(struct bha_ccb,
381 			    scat_gath), ccb->data_addr);
382 			ltophys(ccb->dmamap_xfer->dm_nsegs *
383 			    sizeof(struct bha_scat_gath), ccb->data_length);
384 		} else {
385 			/*
386 			 * No data xfer, use non S/G values.
387 			 */
388 			ltophys(0, ccb->data_addr);
389 			ltophys(0, ccb->data_length);
390 		}
391 
392 		if (XS_CTL_TAGTYPE(xs) != 0) {
393 			ccb->tag_enable = 1;
394 			ccb->tag_type = xs->xs_tag_type & 0x03;
395 		} else {
396 			ccb->tag_enable = 0;
397 			ccb->tag_type = 0;
398 		}
399 
400 		ccb->data_out = 0;
401 		ccb->data_in = 0;
402 		ccb->target = periph->periph_target;
403 		ccb->lun = periph->periph_lun;
404 		ltophys(ccb->hashkey + offsetof(struct bha_ccb, scsi_sense),
405 		    ccb->sense_ptr);
406 		ccb->req_sense_length = sizeof(ccb->scsi_sense);
407 		ccb->host_stat = 0x00;
408 		ccb->target_stat = 0x00;
409 		ccb->link_id = 0;
410 		ltophys(0, ccb->link_addr);
411 
412 		BHA_CCB_SYNC(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
413 
414 		s = splbio();
415 		bha_queue_ccb(sc, ccb);
416 		splx(s);
417 
418 		SC_DEBUG(periph, SCSIPI_DB3, ("cmd_sent\n"));
419 		if ((flags & XS_CTL_POLL) == 0)
420 			return;
421 
422 		/*
423 		 * If we can't use interrupts, poll on completion
424 		 */
425 		if (bha_poll(sc, xs, ccb->timeout)) {
426 			bha_timeout(ccb);
427 			if (bha_poll(sc, xs, ccb->timeout))
428 				bha_timeout(ccb);
429 		}
430 		return;
431 
432 	case ADAPTER_REQ_GROW_RESOURCES:
433 		if (sc->sc_cur_ccbs == sc->sc_max_ccbs) {
434 			chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
435 			return;
436 		}
437 		seg = sc->sc_cur_ccbs;
438 		bha_create_ccbs(sc, bha_ccbs_per_group);
439 		adapt->adapt_openings += sc->sc_cur_ccbs - seg;
440 		return;
441 
442 	case ADAPTER_REQ_SET_XFER_MODE:
443 		/*
444 		 * Can't really do this on the Buslogic.  It has its
445 		 * own setup info.  But we do know how to query what
446 		 * the settings are.
447 		 */
448 		bha_get_xfer_mode(sc, (struct scsipi_xfer_mode *)arg);
449 		return;
450 	}
451 }
452 
453 /*
454  * bha_minphys:
455  *
456  *	Limit a transfer to our maximum transfer size.
457  */
458 void
459 bha_minphys(struct buf *bp)
460 {
461 
462 	if (bp->b_bcount > BHA_MAXXFER)
463 		bp->b_bcount = BHA_MAXXFER;
464 	minphys(bp);
465 }
466 
467 /*****************************************************************************
468  * SCSI job execution helper routines
469  *****************************************************************************/
470 
471 /*
472  * bha_get_xfer_mode;
473  *
474  *	Negotiate the xfer mode for the specified periph, and report
475  *	back the mode to the midlayer.
476  *
477  *	NOTE: we must be called at splbio().
478  */
479 static void
480 bha_get_xfer_mode(struct bha_softc *sc, struct scsipi_xfer_mode *xm)
481 {
482 	struct bha_setup hwsetup;
483 	struct bha_period hwperiod;
484 	struct bha_sync *bs;
485 	int toff = xm->xm_target & 7, tmask = (1 << toff);
486 	int wide, period, offset, rlen;
487 
488 	/*
489 	 * Issue an Inquire Setup Information.  We can extract
490 	 * sync and wide information from here.
491 	 */
492 	rlen = sizeof(hwsetup.reply) +
493 	    ((sc->sc_flags & BHAF_WIDE) ? sizeof(hwsetup.reply_w) : 0);
494 	hwsetup.cmd.opcode = BHA_INQUIRE_SETUP;
495 	hwsetup.cmd.len = rlen;
496 	bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(sc->sc_dev),
497 	    sizeof(hwsetup.cmd), (u_char *)&hwsetup.cmd,
498 	    rlen, (u_char *)&hwsetup.reply);
499 
500 	xm->xm_mode = 0;
501 	xm->xm_period = 0;
502 	xm->xm_offset = 0;
503 
504 	/*
505 	 * First check for wide.  On later boards, we can check
506 	 * directly in the setup info if wide is currently active.
507 	 *
508 	 * On earlier boards, we have to make an educated guess.
509 	 */
510 	if (sc->sc_flags & BHAF_WIDE) {
511 		if (strcmp(sc->sc_firmware, "5.06L") >= 0) {
512 			if (xm->xm_target > 7) {
513 				wide =
514 				    hwsetup.reply_w.high_wide_active & tmask;
515 			} else {
516 				wide =
517 				    hwsetup.reply_w.low_wide_active & tmask;
518 			}
519 			if (wide)
520 				xm->xm_mode |= PERIPH_CAP_WIDE16;
521 		} else {
522 			/* XXX Check `wide permitted' in the config info. */
523 			xm->xm_mode |= PERIPH_CAP_WIDE16;
524 		}
525 	}
526 
527 	/*
528 	 * Now get basic sync info.
529 	 */
530 	bs = (xm->xm_target > 7) ?
531 	     &hwsetup.reply_w.sync_high[toff] :
532 	     &hwsetup.reply.sync_low[toff];
533 
534 	if (bs->valid) {
535 		xm->xm_mode |= PERIPH_CAP_SYNC;
536 		period = (bs->period * 50) + 20;
537 		offset = bs->offset;
538 
539 		/*
540 		 * On boards that can do Fast and Ultra, use the Inquire Period
541 		 * command to get the period.
542 		 */
543 		if (sc->sc_firmware[0] >= '3') {
544 			rlen = sizeof(hwperiod.reply) +
545 			    ((sc->sc_flags & BHAF_WIDE) ?
546 			      sizeof(hwperiod.reply_w) : 0);
547 			hwperiod.cmd.opcode = BHA_INQUIRE_PERIOD;
548 			hwperiod.cmd.len = rlen;
549 			bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(sc->sc_dev),
550 			    sizeof(hwperiod.cmd), (u_char *)&hwperiod.cmd,
551 			    rlen, (u_char *)&hwperiod.reply);
552 
553 			if (xm->xm_target > 7)
554 				period = hwperiod.reply_w.period[toff];
555 			else
556 				period = hwperiod.reply.period[toff];
557 
558 			period *= 10;
559 		}
560 
561 		xm->xm_period =
562 		    scsipi_sync_period_to_factor(period * 100);
563 		xm->xm_offset = offset;
564 	}
565 
566 	/*
567 	 * Now check for tagged queueing support.
568 	 *
569 	 * XXX Check `tags permitted' in the config info.
570 	 */
571 	if (sc->sc_flags & BHAF_TAGGED_QUEUEING)
572 		xm->xm_mode |= PERIPH_CAP_TQING;
573 
574 	scsipi_async_event(&sc->sc_channel, ASYNC_EVENT_XFER_MODE, xm);
575 }
576 
577 /*
578  * bha_done:
579  *
580  *	A CCB has completed execution.  Pass the status back to the
581  *	upper layer.
582  */
583 static void
584 bha_done(struct bha_softc *sc, struct bha_ccb *ccb)
585 {
586 	bus_dma_tag_t dmat = sc->sc_dmat;
587 	struct scsipi_xfer *xs = ccb->xs;
588 
589 	SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("bha_done\n"));
590 
591 #ifdef BHADIAG
592 	if (ccb->flags & CCB_SENDING) {
593 		printf("%s: exiting ccb still in transit!\n",
594 		    device_xname(sc->sc_dev));
595 		Debugger();
596 		return;
597 	}
598 #endif
599 	if ((ccb->flags & CCB_ALLOC) == 0) {
600 		aprint_error_dev(sc->sc_dev, "exiting ccb not allocated!\n");
601 		Debugger();
602 		return;
603 	}
604 
605 	/*
606 	 * If we were a data transfer, unload the map that described
607 	 * the data buffer.
608 	 */
609 	if (xs->datalen) {
610 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
611 		    ccb->dmamap_xfer->dm_mapsize,
612 		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
613 		    BUS_DMASYNC_POSTWRITE);
614 		bus_dmamap_unload(dmat, ccb->dmamap_xfer);
615 	}
616 
617 	if (xs->error == XS_NOERROR) {
618 		if (ccb->host_stat != BHA_OK) {
619 			switch (ccb->host_stat) {
620 			case BHA_SEL_TIMEOUT:	/* No response */
621 				xs->error = XS_SELTIMEOUT;
622 				break;
623 			default:	/* Other scsi protocol messes */
624 				printf("%s: host_stat %x\n",
625 				    device_xname(sc->sc_dev), ccb->host_stat);
626 				xs->error = XS_DRIVER_STUFFUP;
627 				break;
628 			}
629 		} else if (ccb->target_stat != SCSI_OK) {
630 			switch (ccb->target_stat) {
631 			case SCSI_CHECK:
632 				memcpy(&xs->sense.scsi_sense,
633 				    &ccb->scsi_sense,
634 				    sizeof(xs->sense.scsi_sense));
635 				xs->error = XS_SENSE;
636 				break;
637 			case SCSI_BUSY:
638 				xs->error = XS_BUSY;
639 				break;
640 			default:
641 				printf("%s: target_stat %x\n",
642 				    device_xname(sc->sc_dev), ccb->target_stat);
643 				xs->error = XS_DRIVER_STUFFUP;
644 				break;
645 			}
646 		} else
647 			xs->resid = 0;
648 	}
649 
650 	bha_free_ccb(sc, ccb);
651 	scsipi_done(xs);
652 }
653 
654 /*
655  * bha_poll:
656  *
657  *	Poll for completion of the specified job.
658  */
659 static int
660 bha_poll(struct bha_softc *sc, struct scsipi_xfer *xs, int count)
661 {
662 	bus_space_tag_t iot = sc->sc_iot;
663 	bus_space_handle_t ioh = sc->sc_ioh;
664 
665 	/* timeouts are in msec, so we loop in 1000 usec cycles */
666 	while (count) {
667 		/*
668 		 * If we had interrupts enabled, would we
669 		 * have got an interrupt?
670 		 */
671 		if (bus_space_read_1(iot, ioh, BHA_INTR_PORT) &
672 		    BHA_INTR_ANYINTR)
673 			bha_intr(sc);
674 		if (xs->xs_status & XS_STS_DONE)
675 			return (0);
676 		delay(1000);	/* only happens in boot so ok */
677 		count--;
678 	}
679 	return (1);
680 }
681 
682 /*
683  * bha_timeout:
684  *
685  *	CCB timeout handler.
686  */
687 static void
688 bha_timeout(void *arg)
689 {
690 	struct bha_ccb *ccb = arg;
691 	struct scsipi_xfer *xs = ccb->xs;
692 	struct scsipi_periph *periph = xs->xs_periph;
693 	struct bha_softc *sc =
694 	    device_private(periph->periph_channel->chan_adapter->adapt_dev);
695 	int s;
696 
697 	scsipi_printaddr(periph);
698 	printf("timed out");
699 
700 	s = splbio();
701 
702 #ifdef BHADIAG
703 	/*
704 	 * If the ccb's mbx is not free, then the board has gone Far East?
705 	 */
706 	bha_collect_mbo(sc);
707 	if (ccb->flags & CCB_SENDING) {
708 		aprint_error_dev(sc->sc_dev, "not taking commands!\n");
709 		Debugger();
710 	}
711 #endif
712 
713 	/*
714 	 * If it has been through before, then
715 	 * a previous abort has failed, don't
716 	 * try abort again
717 	 */
718 	if (ccb->flags & CCB_ABORT) {
719 		/* abort timed out */
720 		printf(" AGAIN\n");
721 		/* XXX Must reset! */
722 	} else {
723 		/* abort the operation that has timed out */
724 		printf("\n");
725 		ccb->xs->error = XS_TIMEOUT;
726 		ccb->timeout = BHA_ABORT_TIMEOUT;
727 		ccb->flags |= CCB_ABORT;
728 		bha_queue_ccb(sc, ccb);
729 	}
730 
731 	splx(s);
732 }
733 
734 /*****************************************************************************
735  * Misc. subroutines.
736  *****************************************************************************/
737 
738 /*
739  * bha_cmd:
740  *
741  *	Send a command to the Buglogic controller.
742  */
743 static int
744 bha_cmd(bus_space_tag_t iot, bus_space_handle_t ioh, const char *name, int icnt,
745     u_char *ibuf, int ocnt, u_char *obuf)
746 {
747 	int i;
748 	int wait;
749 	u_char sts;
750 	u_char opcode = ibuf[0];
751 
752 	/*
753 	 * Calculate a reasonable timeout for the command.
754 	 */
755 	switch (opcode) {
756 	case BHA_INQUIRE_DEVICES:
757 	case BHA_INQUIRE_DEVICES_2:
758 		wait = 90 * 20000;
759 		break;
760 	default:
761 		wait = 1 * 20000;
762 		break;
763 	}
764 
765 	/*
766 	 * Wait for the adapter to go idle, unless it's one of
767 	 * the commands which don't need this
768 	 */
769 	if (opcode != BHA_MBO_INTR_EN) {
770 		for (i = 20000; i; i--) {	/* 1 sec? */
771 			sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
772 			if (sts & BHA_STAT_IDLE)
773 				break;
774 			delay(50);
775 		}
776 		if (!i) {
777 			printf("%s: bha_cmd, host not idle(0x%x)\n",
778 			    name, sts);
779 			return (1);
780 		}
781 	}
782 
783 	/*
784 	 * Now that it is idle, if we expect output, preflush the
785 	 * queue feeding to us.
786 	 */
787 	if (ocnt) {
788 		while ((bus_space_read_1(iot, ioh, BHA_STAT_PORT)) &
789 		    BHA_STAT_DF)
790 			(void)bus_space_read_1(iot, ioh, BHA_DATA_PORT);
791 	}
792 
793 	/*
794 	 * Output the command and the number of arguments given
795 	 * for each byte, first check the port is empty.
796 	 */
797 	while (icnt--) {
798 		for (i = wait; i; i--) {
799 			sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
800 			if (!(sts & BHA_STAT_CDF))
801 				break;
802 			delay(50);
803 		}
804 		if (!i) {
805 			if (opcode != BHA_INQUIRE_REVISION)
806 				printf("%s: bha_cmd, cmd/data port full\n",
807 				    name);
808 			goto bad;
809 		}
810 		bus_space_write_1(iot, ioh, BHA_CMD_PORT, *ibuf++);
811 	}
812 
813 	/*
814 	 * If we expect input, loop that many times, each time,
815 	 * looking for the data register to have valid data
816 	 */
817 	while (ocnt--) {
818 		for (i = wait; i; i--) {
819 			sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
820 			if (sts & BHA_STAT_DF)
821 				break;
822 			delay(50);
823 		}
824 		if (!i) {
825 #ifdef BHADEBUG
826 			if (opcode != BHA_INQUIRE_REVISION)
827 				printf("%s: bha_cmd, cmd/data port empty %d\n",
828 				    name, ocnt);
829 #endif /* BHADEBUG */
830 			goto bad;
831 		}
832 		*obuf++ = bus_space_read_1(iot, ioh, BHA_DATA_PORT);
833 	}
834 
835 	/*
836 	 * Wait for the board to report a finished instruction.
837 	 * We may get an extra interrupt for the HACC signal, but this is
838 	 * unimportant.
839 	 */
840 	if (opcode != BHA_MBO_INTR_EN && opcode != BHA_MODIFY_IOPORT) {
841 		for (i = 20000; i; i--) {	/* 1 sec? */
842 			sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT);
843 			/* XXX Need to save this in the interrupt handler? */
844 			if (sts & BHA_INTR_HACC)
845 				break;
846 			delay(50);
847 		}
848 		if (!i) {
849 			printf("%s: bha_cmd, host not finished(0x%x)\n",
850 			    name, sts);
851 			return (1);
852 		}
853 	}
854 	bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST);
855 	return (0);
856 
857 bad:
858 	bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_SRST);
859 	return (1);
860 }
861 
862 /*
863  * bha_find:
864  *
865  *	Find the board.
866  */
867 int
868 bha_find(bus_space_tag_t iot, bus_space_handle_t ioh)
869 {
870 	int i;
871 	u_char sts;
872 	struct bha_extended_inquire inquire;
873 
874 	/* Check something is at the ports we need to access */
875 	sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
876 	if (sts == 0xFF)
877 		return (0);
878 
879 	/*
880 	 * Reset board, If it doesn't respond, assume
881 	 * that it's not there.. good for the probe
882 	 */
883 
884 	bus_space_write_1(iot, ioh, BHA_CTRL_PORT,
885 	    BHA_CTRL_HRST | BHA_CTRL_SRST);
886 
887 	delay(100);
888 	for (i = BHA_RESET_TIMEOUT; i; i--) {
889 		sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
890 		if (sts == (BHA_STAT_IDLE | BHA_STAT_INIT))
891 			break;
892 		delay(1000);
893 	}
894 	if (!i) {
895 #ifdef BHADEBUG
896 		if (bha_debug)
897 			printf("bha_find: No answer from buslogic board\n");
898 #endif /* BHADEBUG */
899 		return (0);
900 	}
901 
902 	/*
903 	 * The BusLogic cards implement an Adaptec 1542 (aha)-compatible
904 	 * interface. The native bha interface is not compatible with
905 	 * an aha. 1542. We need to ensure that we never match an
906 	 * Adaptec 1542. We must also avoid sending Adaptec-compatible
907 	 * commands to a real bha, lest it go into 1542 emulation mode.
908 	 * (On an indirect bus like ISA, we should always probe for BusLogic
909 	 * interfaces before Adaptec interfaces).
910 	 */
911 
912 	/*
913 	 * Make sure we don't match an AHA-1542A or AHA-1542B, by checking
914 	 * for an extended-geometry register.  The 1542[AB] don't have one.
915 	 */
916 	sts = bus_space_read_1(iot, ioh, BHA_EXTGEOM_PORT);
917 	if (sts == 0xFF)
918 		return (0);
919 
920 	/*
921 	 * Check that we actually know how to use this board.
922 	 */
923 	delay(1000);
924 	inquire.cmd.opcode = BHA_INQUIRE_EXTENDED;
925 	inquire.cmd.len = sizeof(inquire.reply);
926 	i = bha_cmd(iot, ioh, "(bha_find)",
927 	    sizeof(inquire.cmd), (u_char *)&inquire.cmd,
928 	    sizeof(inquire.reply), (u_char *)&inquire.reply);
929 
930 	/*
931 	 * Some 1542Cs (CP, perhaps not CF, may depend on firmware rev)
932 	 * have the extended-geometry register and also respond to
933 	 * BHA_INQUIRE_EXTENDED.  Make sure we never match such cards,
934 	 * by checking the size of the reply is what a BusLogic card returns.
935 	 */
936 	if (i) {
937 #ifdef BHADEBUG
938 		printf("bha_find: board returned %d instead of %zu to %s\n",
939 		       i, sizeof(inquire.reply), "INQUIRE_EXTENDED");
940 #endif
941 		return (0);
942 	}
943 
944 	/* OK, we know we've found a buslogic adaptor. */
945 
946 	switch (inquire.reply.bus_type) {
947 	case BHA_BUS_TYPE_24BIT:
948 	case BHA_BUS_TYPE_32BIT:
949 		break;
950 	case BHA_BUS_TYPE_MCA:
951 		/* We don't grok MicroChannel (yet). */
952 		return (0);
953 	default:
954 		printf("bha_find: illegal bus type %c\n",
955 		    inquire.reply.bus_type);
956 		return (0);
957 	}
958 
959 	return (1);
960 }
961 
962 
963 /*
964  * bha_inquire_config:
965  *
966  *	Determine irq/drq.
967  */
968 int
969 bha_inquire_config(bus_space_tag_t iot, bus_space_handle_t ioh,
970 	    struct bha_probe_data *sc)
971 {
972 	int irq, drq;
973 	struct bha_config config;
974 
975 	/*
976 	 * Assume we have a board at this stage setup DMA channel from
977 	 * jumpers and save int level
978 	 */
979 	delay(1000);
980 	config.cmd.opcode = BHA_INQUIRE_CONFIG;
981 	bha_cmd(iot, ioh, "(bha_inquire_config)",
982 	    sizeof(config.cmd), (u_char *)&config.cmd,
983 	    sizeof(config.reply), (u_char *)&config.reply);
984 	switch (config.reply.chan) {
985 	case EISADMA:
986 		drq = -1;
987 		break;
988 	case CHAN0:
989 		drq = 0;
990 		break;
991 	case CHAN5:
992 		drq = 5;
993 		break;
994 	case CHAN6:
995 		drq = 6;
996 		break;
997 	case CHAN7:
998 		drq = 7;
999 		break;
1000 	default:
1001 		printf("bha: illegal drq setting %x\n",
1002 		    config.reply.chan);
1003 		return (0);
1004 	}
1005 
1006 	switch (config.reply.intr) {
1007 	case INT9:
1008 		irq = 9;
1009 		break;
1010 	case INT10:
1011 		irq = 10;
1012 		break;
1013 	case INT11:
1014 		irq = 11;
1015 		break;
1016 	case INT12:
1017 		irq = 12;
1018 		break;
1019 	case INT14:
1020 		irq = 14;
1021 		break;
1022 	case INT15:
1023 		irq = 15;
1024 		break;
1025 	default:
1026 		printf("bha: illegal irq setting %x\n",
1027 		    config.reply.intr);
1028 		return (0);
1029 	}
1030 
1031 	/* if we want to fill in softc, do so now */
1032 	if (sc != NULL) {
1033 		sc->sc_irq = irq;
1034 		sc->sc_drq = drq;
1035 	}
1036 
1037 	return (1);
1038 }
1039 
1040 int
1041 bha_probe_inquiry(bus_space_tag_t iot, bus_space_handle_t ioh,
1042     struct bha_probe_data *bpd)
1043 {
1044 	return bha_find(iot, ioh) && bha_inquire_config(iot, ioh, bpd);
1045 }
1046 
1047 /*
1048  * bha_disable_isacompat:
1049  *
1050  *	Disable the ISA-compatibility ioports on PCI bha devices,
1051  *	to ensure they're not autoconfigured a second time as an ISA bha.
1052  */
1053 int
1054 bha_disable_isacompat(struct bha_softc *sc)
1055 {
1056 	struct bha_isadisable isa_disable;
1057 
1058 	isa_disable.cmd.opcode = BHA_MODIFY_IOPORT;
1059 	isa_disable.cmd.modifier = BHA_IOMODIFY_DISABLE1;
1060 	bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(sc->sc_dev),
1061 	    sizeof(isa_disable.cmd), (u_char*)&isa_disable.cmd,
1062 	    0, (u_char *)0);
1063 	return (0);
1064 }
1065 
1066 /*
1067  * bha_info:
1068  *
1069  *	Get information about the board, and report it.  We
1070  *	return the initial number of CCBs, 0 if we failed.
1071  */
1072 int
1073 bha_info(struct bha_softc *sc)
1074 {
1075 	bus_space_tag_t iot = sc->sc_iot;
1076 	bus_space_handle_t ioh = sc->sc_ioh;
1077 	struct bha_extended_inquire inquire;
1078 	struct bha_config config;
1079 	struct bha_devices devices;
1080 	struct bha_setup setup;
1081 	struct bha_model model;
1082 	struct bha_revision revision;
1083 	struct bha_digit digit;
1084 	int i, j, initial_ccbs, rlen;
1085 	const char *name = device_xname(sc->sc_dev);
1086 	char *p;
1087 
1088 	/*
1089 	 * Fetch the extended inquire information.
1090 	 */
1091 	inquire.cmd.opcode = BHA_INQUIRE_EXTENDED;
1092 	inquire.cmd.len = sizeof(inquire.reply);
1093 	bha_cmd(iot, ioh, name,
1094 	    sizeof(inquire.cmd), (u_char *)&inquire.cmd,
1095 	    sizeof(inquire.reply), (u_char *)&inquire.reply);
1096 
1097 	/*
1098 	 * Fetch the configuration information.
1099 	 */
1100 	config.cmd.opcode = BHA_INQUIRE_CONFIG;
1101 	bha_cmd(iot, ioh, name,
1102 	    sizeof(config.cmd), (u_char *)&config.cmd,
1103 	    sizeof(config.reply), (u_char *)&config.reply);
1104 
1105 	sc->sc_scsi_id = config.reply.scsi_dev;
1106 
1107 	/*
1108 	 * Get the firmware revision.
1109 	 */
1110 	p = sc->sc_firmware;
1111 	revision.cmd.opcode = BHA_INQUIRE_REVISION;
1112 	bha_cmd(iot, ioh, name,
1113 	    sizeof(revision.cmd), (u_char *)&revision.cmd,
1114 	    sizeof(revision.reply), (u_char *)&revision.reply);
1115 	*p++ = revision.reply.firm_revision;
1116 	*p++ = '.';
1117 	*p++ = revision.reply.firm_version;
1118 	digit.cmd.opcode = BHA_INQUIRE_REVISION_3;
1119 	bha_cmd(iot, ioh, name,
1120 	    sizeof(digit.cmd), (u_char *)&digit.cmd,
1121 	    sizeof(digit.reply), (u_char *)&digit.reply);
1122 	*p++ = digit.reply.digit;
1123 	if (revision.reply.firm_revision >= '3' ||
1124 	    (revision.reply.firm_revision == '3' &&
1125 	     revision.reply.firm_version >= '3')) {
1126 		digit.cmd.opcode = BHA_INQUIRE_REVISION_4;
1127 		bha_cmd(iot, ioh, name,
1128 		    sizeof(digit.cmd), (u_char *)&digit.cmd,
1129 		    sizeof(digit.reply), (u_char *)&digit.reply);
1130 		*p++ = digit.reply.digit;
1131 	}
1132 	while (p > sc->sc_firmware && (p[-1] == ' ' || p[-1] == '\0'))
1133 		p--;
1134 	*p = '\0';
1135 
1136 	/*
1137 	 * Get the model number.
1138 	 *
1139 	 * Some boards do not handle the Inquire Board Model Number
1140 	 * command correctly, or don't give correct information.
1141 	 *
1142 	 * So, we use the Firmware Revision and Extended Setup
1143 	 * information to fixup the model number in these cases.
1144 	 *
1145 	 * The firmware version indicates:
1146 	 *
1147 	 *	5.xx	BusLogic "W" Series Host Adapters
1148 	 *		BT-948/958/958D
1149 	 *
1150 	 *	4.xx	BusLogic "C" Series Host Adapters
1151 	 *		BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF
1152 	 *
1153 	 *	3.xx	BusLogic "S" Series Host Adapters
1154 	 *		BT-747S/747D/757S/757D/445S/545S/542D
1155 	 *		BT-542B/742A (revision H)
1156 	 *
1157 	 *	2.xx	BusLogic "A" Series Host Adapters
1158 	 *		BT-542B/742A (revision G and below)
1159 	 *
1160 	 *	0.xx	AMI FastDisk VLB/EISA BusLogic Clone Host Adapter
1161 	 */
1162 	if (inquire.reply.bus_type == BHA_BUS_TYPE_24BIT &&
1163 	    sc->sc_firmware[0] < '3')
1164 		snprintf(sc->sc_model, sizeof(sc->sc_model), "542B");
1165 	else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT &&
1166 	    sc->sc_firmware[0] == '2' &&
1167 	    (sc->sc_firmware[2] == '1' ||
1168 	     (sc->sc_firmware[2] == '2' && sc->sc_firmware[3] == '0')))
1169 		snprintf(sc->sc_model, sizeof(sc->sc_model), "742A");
1170 	else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT &&
1171 	    sc->sc_firmware[0] == '0')
1172 		snprintf(sc->sc_model, sizeof(sc->sc_model), "747A");
1173 	else {
1174 		p = sc->sc_model;
1175 		model.cmd.opcode = BHA_INQUIRE_MODEL;
1176 		model.cmd.len = sizeof(model.reply);
1177 		bha_cmd(iot, ioh, name,
1178 		    sizeof(model.cmd), (u_char *)&model.cmd,
1179 		    sizeof(model.reply), (u_char *)&model.reply);
1180 		*p++ = model.reply.id[0];
1181 		*p++ = model.reply.id[1];
1182 		*p++ = model.reply.id[2];
1183 		*p++ = model.reply.id[3];
1184 		while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0'))
1185 			p--;
1186 		*p++ = model.reply.version[0];
1187 		*p++ = model.reply.version[1];
1188 		while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0'))
1189 			p--;
1190 		*p = '\0';
1191 	}
1192 
1193 	/* Enable round-robin scheme - appeared at firmware rev. 3.31. */
1194 	if (strcmp(sc->sc_firmware, "3.31") >= 0)
1195 		sc->sc_flags |= BHAF_STRICT_ROUND_ROBIN;
1196 
1197 	/*
1198 	 * Determine some characteristics about our bus.
1199 	 */
1200 	if (inquire.reply.scsi_flags & BHA_SCSI_WIDE)
1201 		sc->sc_flags |= BHAF_WIDE;
1202 	if (inquire.reply.scsi_flags & BHA_SCSI_DIFFERENTIAL)
1203 		sc->sc_flags |= BHAF_DIFFERENTIAL;
1204 	if (inquire.reply.scsi_flags & BHA_SCSI_ULTRA)
1205 		sc->sc_flags |= BHAF_ULTRA;
1206 
1207 	/*
1208 	 * Determine some characterists of the board.
1209 	 */
1210 	sc->sc_max_dmaseg = inquire.reply.sg_limit;
1211 
1212 	/*
1213 	 * Determine the maximum CCB count and whether or not
1214 	 * tagged queueing is available on this host adapter.
1215 	 *
1216 	 * Tagged queueing works on:
1217 	 *
1218 	 *	"W" Series adapters
1219 	 *	"C" Series adapters with firmware >= 4.22
1220 	 *	"S" Series adapters with firmware >= 3.35
1221 	 *
1222 	 * The internal CCB counts are:
1223 	 *
1224 	 *	192	BT-948/958/958D
1225 	 *	100	BT-946C/956C/956CD/747C/757C/757CD/445C
1226 	 *	50	BT-545C/540CF
1227 	 *	30	BT-747S/747D/757S/757D/445S/545S/542D/542B/742A
1228 	 */
1229 	switch (sc->sc_firmware[0]) {
1230 	case '5':
1231 		sc->sc_max_ccbs = 192;
1232 		sc->sc_flags |= BHAF_TAGGED_QUEUEING;
1233 		break;
1234 
1235 	case '4':
1236 		if (sc->sc_model[0] == '5')
1237 			sc->sc_max_ccbs = 50;
1238 		else
1239 			sc->sc_max_ccbs = 100;
1240 		if (strcmp(sc->sc_firmware, "4.22") >= 0)
1241 			sc->sc_flags |= BHAF_TAGGED_QUEUEING;
1242 		break;
1243 
1244 	case '3':
1245 		if (strcmp(sc->sc_firmware, "3.35") >= 0)
1246 			sc->sc_flags |= BHAF_TAGGED_QUEUEING;
1247 		/* FALLTHROUGH */
1248 
1249 	default:
1250 		sc->sc_max_ccbs = 30;
1251 	}
1252 
1253 	/*
1254 	 * Set the mailbox count to precisely the number of HW CCBs
1255 	 * available.  A mailbox isn't required while a CCB is executing,
1256 	 * but this allows us to actually enqueue up to our resource
1257 	 * limit.
1258 	 *
1259 	 * This will keep the mailbox count small on boards which don't
1260 	 * have strict round-robin (they have to scan the entire set of
1261 	 * mailboxes each time they run a command).
1262 	 */
1263 	sc->sc_mbox_count = sc->sc_max_ccbs;
1264 
1265 	/*
1266 	 * Obtain setup information.
1267 	 */
1268 	rlen = sizeof(setup.reply) +
1269 	    ((sc->sc_flags & BHAF_WIDE) ? sizeof(setup.reply_w) : 0);
1270 	setup.cmd.opcode = BHA_INQUIRE_SETUP;
1271 	setup.cmd.len = rlen;
1272 	bha_cmd(iot, ioh, name,
1273 	    sizeof(setup.cmd), (u_char *)&setup.cmd,
1274 	    rlen, (u_char *)&setup.reply);
1275 
1276 	aprint_normal_dev(sc->sc_dev, "model BT-%s, firmware %s\n",
1277 	    sc->sc_model, sc->sc_firmware);
1278 
1279 	aprint_normal_dev(sc->sc_dev, "%d H/W CCBs", sc->sc_max_ccbs);
1280 	if (setup.reply.sync_neg)
1281 		aprint_normal(", sync");
1282 	if (setup.reply.parity)
1283 		aprint_normal(", parity");
1284 	if (sc->sc_flags & BHAF_TAGGED_QUEUEING)
1285 		aprint_normal(", tagged queueing");
1286 	if (sc->sc_flags & BHAF_WIDE_LUN)
1287 		aprint_normal(", wide LUN support");
1288 	aprint_normal("\n");
1289 
1290 	/*
1291 	 * Poll targets 0 - 7.
1292 	 */
1293 	devices.cmd.opcode = BHA_INQUIRE_DEVICES;
1294 	bha_cmd(iot, ioh, name,
1295 	    sizeof(devices.cmd), (u_char *)&devices.cmd,
1296 	    sizeof(devices.reply), (u_char *)&devices.reply);
1297 
1298 	/* Count installed units. */
1299 	initial_ccbs = 0;
1300 	for (i = 0; i < 8; i++) {
1301 		for (j = 0; j < 8; j++) {
1302 			if (((devices.reply.lun_map[i] >> j) & 1) == 1)
1303 				initial_ccbs++;
1304 		}
1305 	}
1306 
1307 	/*
1308 	 * Poll targets 8 - 15 if we have a wide bus.
1309 	 */
1310 	if (sc->sc_flags & BHAF_WIDE) {
1311 		devices.cmd.opcode = BHA_INQUIRE_DEVICES_2;
1312 		bha_cmd(iot, ioh, name,
1313 		    sizeof(devices.cmd), (u_char *)&devices.cmd,
1314 		    sizeof(devices.reply), (u_char *)&devices.reply);
1315 
1316 		for (i = 0; i < 8; i++) {
1317 			for (j = 0; j < 8; j++) {
1318 				if (((devices.reply.lun_map[i] >> j) & 1) == 1)
1319 					initial_ccbs++;
1320 			}
1321 		}
1322 	}
1323 
1324 	/*
1325 	 * Double the initial CCB count, for good measure.
1326 	 */
1327 	initial_ccbs *= 2;
1328 
1329 	/*
1330 	 * Sanity check the initial CCB count; don't create more than
1331 	 * we can enqueue (sc_max_ccbs), and make sure there are some
1332 	 * at all.
1333 	 */
1334 	if (initial_ccbs > sc->sc_max_ccbs)
1335 		initial_ccbs = sc->sc_max_ccbs;
1336 	if (initial_ccbs == 0)
1337 		initial_ccbs = 2;
1338 
1339 	return (initial_ccbs);
1340 }
1341 
1342 /*
1343  * bha_init:
1344  *
1345  *	Initialize the board.
1346  */
1347 static int
1348 bha_init(struct bha_softc *sc)
1349 {
1350 	const char *name = device_xname(sc->sc_dev);
1351 	struct bha_toggle toggle;
1352 	struct bha_mailbox mailbox;
1353 	struct bha_mbx_out *mbo;
1354 	struct bha_mbx_in *mbi;
1355 	int i;
1356 
1357 	/*
1358 	 * Set up the mailbox.  We always run the mailbox in round-robin.
1359 	 */
1360 	for (i = 0; i < sc->sc_mbox_count; i++) {
1361 		mbo = &sc->sc_mbo[i];
1362 		mbi = &sc->sc_mbi[i];
1363 
1364 		mbo->cmd = BHA_MBO_FREE;
1365 		BHA_MBO_SYNC(sc, mbo, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1366 
1367 		mbi->comp_stat = BHA_MBI_FREE;
1368 		BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1369 	}
1370 
1371 	sc->sc_cmbo = sc->sc_tmbo = &sc->sc_mbo[0];
1372 	sc->sc_tmbi = &sc->sc_mbi[0];
1373 
1374 	sc->sc_mbofull = 0;
1375 
1376 	/*
1377 	 * If the board supports strict round-robin, enable that.
1378 	 */
1379 	if (sc->sc_flags & BHAF_STRICT_ROUND_ROBIN) {
1380 		toggle.cmd.opcode = BHA_ROUND_ROBIN;
1381 		toggle.cmd.enable = 1;
1382 		bha_cmd(sc->sc_iot, sc->sc_ioh, name,
1383 		    sizeof(toggle.cmd), (u_char *)&toggle.cmd,
1384 		    0, NULL);
1385 	}
1386 
1387 	/*
1388 	 * Give the mailbox to the board.
1389 	 */
1390 	mailbox.cmd.opcode = BHA_MBX_INIT_EXTENDED;
1391 	mailbox.cmd.nmbx = sc->sc_mbox_count;
1392 	ltophys(sc->sc_dmamap_mbox->dm_segs[0].ds_addr, mailbox.cmd.addr);
1393 	bha_cmd(sc->sc_iot, sc->sc_ioh, name,
1394 	    sizeof(mailbox.cmd), (u_char *)&mailbox.cmd,
1395 	    0, (u_char *)0);
1396 
1397 	return (0);
1398 }
1399 
1400 /*****************************************************************************
1401  * CCB execution engine
1402  *****************************************************************************/
1403 
1404 /*
1405  * bha_queue_ccb:
1406  *
1407  *	Queue a CCB to be sent to the controller, and send it if possible.
1408  */
1409 static void
1410 bha_queue_ccb(struct bha_softc *sc, struct bha_ccb *ccb)
1411 {
1412 
1413 	TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
1414 	bha_start_ccbs(sc);
1415 }
1416 
1417 /*
1418  * bha_start_ccbs:
1419  *
1420  *	Send as many CCBs as we have empty mailboxes for.
1421  */
1422 static void
1423 bha_start_ccbs(struct bha_softc *sc)
1424 {
1425 	bus_space_tag_t iot = sc->sc_iot;
1426 	bus_space_handle_t ioh = sc->sc_ioh;
1427 	struct bha_ccb_group *bcg;
1428 	struct bha_mbx_out *mbo;
1429 	struct bha_ccb *ccb;
1430 
1431 	mbo = sc->sc_tmbo;
1432 
1433 	while ((ccb = TAILQ_FIRST(&sc->sc_waiting_ccb)) != NULL) {
1434 		if (sc->sc_mbofull >= sc->sc_mbox_count) {
1435 #ifdef DIAGNOSTIC
1436 			if (sc->sc_mbofull > sc->sc_mbox_count)
1437 				panic("bha_start_ccbs: mbofull > mbox_count");
1438 #endif
1439 			/*
1440 			 * No mailboxes available; attempt to collect ones
1441 			 * that have already been used.
1442 			 */
1443 			bha_collect_mbo(sc);
1444 			if (sc->sc_mbofull == sc->sc_mbox_count) {
1445 				/*
1446 				 * Still no more available; have the
1447 				 * controller interrupt us when it
1448 				 * frees one.
1449 				 */
1450 				struct bha_toggle toggle;
1451 
1452 				toggle.cmd.opcode = BHA_MBO_INTR_EN;
1453 				toggle.cmd.enable = 1;
1454 				bha_cmd(iot, ioh, device_xname(sc->sc_dev),
1455 				    sizeof(toggle.cmd), (u_char *)&toggle.cmd,
1456 				    0, (u_char *)0);
1457 				break;
1458 			}
1459 		}
1460 
1461 		TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
1462 #ifdef BHADIAG
1463 		ccb->flags |= CCB_SENDING;
1464 #endif
1465 
1466 		/*
1467 		 * Put the CCB in the mailbox.
1468 		 */
1469 		bcg = BHA_CCB_GROUP(ccb);
1470 		ltophys(bcg->bcg_dmamap->dm_segs[0].ds_addr +
1471 		    BHA_CCB_OFFSET(ccb), mbo->ccb_addr);
1472 		if (ccb->flags & CCB_ABORT)
1473 			mbo->cmd = BHA_MBO_ABORT;
1474 		else
1475 			mbo->cmd = BHA_MBO_START;
1476 
1477 		BHA_MBO_SYNC(sc, mbo,
1478 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1479 
1480 		/* Tell the card to poll immediately. */
1481 		bus_space_write_1(iot, ioh, BHA_CMD_PORT, BHA_START_SCSI);
1482 
1483 		if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
1484 			callout_reset(&ccb->xs->xs_callout,
1485 			    mstohz(ccb->timeout), bha_timeout, ccb);
1486 
1487 		++sc->sc_mbofull;
1488 		mbo = bha_nextmbo(sc, mbo);
1489 	}
1490 
1491 	sc->sc_tmbo = mbo;
1492 }
1493 
1494 /*
1495  * bha_finish_ccbs:
1496  *
1497  *	Finalize the execution of CCBs in our incoming mailbox.
1498  */
1499 static void
1500 bha_finish_ccbs(struct bha_softc *sc)
1501 {
1502 	struct bha_mbx_in *mbi;
1503 	struct bha_ccb *ccb;
1504 	int i;
1505 
1506 	mbi = sc->sc_tmbi;
1507 
1508 	BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1509 
1510 	if (mbi->comp_stat == BHA_MBI_FREE) {
1511 		for (i = 0; i < sc->sc_mbox_count; i++) {
1512 			if (mbi->comp_stat != BHA_MBI_FREE) {
1513 #ifdef BHADIAG
1514 				/*
1515 				 * This can happen in normal operation if
1516 				 * we use all mailbox slots.
1517 				 */
1518 				printf("%s: mbi not in round-robin order\n",
1519 				    device_xname(sc->sc_dev));
1520 #endif
1521 				goto again;
1522 			}
1523 			mbi = bha_nextmbi(sc, mbi);
1524 			BHA_MBI_SYNC(sc, mbi,
1525 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1526 		}
1527 #ifdef BHADIAGnot
1528 		printf("%s: mbi interrupt with no full mailboxes\n",
1529 		    device_xname(sc->sc_dev));
1530 #endif
1531 		return;
1532 	}
1533 
1534  again:
1535 	do {
1536 		ccb = bha_ccb_phys_kv(sc, phystol(mbi->ccb_addr));
1537 		if (ccb == NULL) {
1538 			aprint_error_dev(sc->sc_dev, "bad mbi ccb pointer 0x%08x; skipping\n",
1539 			    phystol(mbi->ccb_addr));
1540 			goto next;
1541 		}
1542 
1543 		BHA_CCB_SYNC(sc, ccb,
1544 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1545 
1546 #ifdef BHADEBUG
1547 		if (bha_debug) {
1548 			u_char *cp = ccb->scsi_cmd;
1549 			printf("op=%x %x %x %x %x %x\n",
1550 			    cp[0], cp[1], cp[2], cp[3], cp[4], cp[5]);
1551 			printf("comp_stat %x for mbi addr = %p, ",
1552 			    mbi->comp_stat, mbi);
1553 			printf("ccb addr = %p\n", ccb);
1554 		}
1555 #endif /* BHADEBUG */
1556 
1557 		switch (mbi->comp_stat) {
1558 		case BHA_MBI_OK:
1559 		case BHA_MBI_ERROR:
1560 			if ((ccb->flags & CCB_ABORT) != 0) {
1561 				/*
1562 				 * If we already started an abort, wait for it
1563 				 * to complete before clearing the CCB.  We
1564 				 * could instead just clear CCB_SENDING, but
1565 				 * what if the mailbox was already received?
1566 				 * The worst that happens here is that we clear
1567 				 * the CCB a bit later than we need to.  BFD.
1568 				 */
1569 				goto next;
1570 			}
1571 			break;
1572 
1573 		case BHA_MBI_ABORT:
1574 		case BHA_MBI_UNKNOWN:
1575 			/*
1576 			 * Even if the CCB wasn't found, we clear it anyway.
1577 			 * See preceding comment.
1578 			 */
1579 			break;
1580 
1581 		default:
1582 			aprint_error_dev(sc->sc_dev, "bad mbi comp_stat %02x; skipping\n",
1583 			    mbi->comp_stat);
1584 			goto next;
1585 		}
1586 
1587 		callout_stop(&ccb->xs->xs_callout);
1588 		bha_done(sc, ccb);
1589 
1590 	next:
1591 		mbi->comp_stat = BHA_MBI_FREE;
1592 		BHA_CCB_SYNC(sc, ccb,
1593 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1594 
1595 		mbi = bha_nextmbi(sc, mbi);
1596 		BHA_MBI_SYNC(sc, mbi,
1597 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1598 	} while (mbi->comp_stat != BHA_MBI_FREE);
1599 
1600 	sc->sc_tmbi = mbi;
1601 }
1602 
1603 /*****************************************************************************
1604  * Mailbox management functions.
1605  *****************************************************************************/
1606 
1607 /*
1608  * bha_create_mailbox:
1609  *
1610  *	Create the mailbox structures.  Helper function for bha_attach().
1611  *
1612  *	NOTE: The Buslogic hardware only gets one DMA address for the
1613  *	mailbox!  It expects:
1614  *
1615  *		mailbox_out[mailbox_size]
1616  *		mailbox_in[mailbox_size]
1617  */
1618 static int
1619 bha_create_mailbox(struct bha_softc *sc)
1620 {
1621 	bus_dma_segment_t seg;
1622 	size_t size;
1623 	int error, rseg;
1624 
1625 	size = (sizeof(struct bha_mbx_out) * sc->sc_mbox_count) +
1626 	       (sizeof(struct bha_mbx_in)  * sc->sc_mbox_count);
1627 
1628 	error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg,
1629 	    1, &rseg, sc->sc_dmaflags);
1630 	if (error) {
1631 		aprint_error_dev(sc->sc_dev, "unable to allocate mailboxes, error = %d\n",
1632 		    error);
1633 		goto bad_0;
1634 	}
1635 
1636 	error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
1637 	    (void **)&sc->sc_mbo, sc->sc_dmaflags | BUS_DMA_COHERENT);
1638 	if (error) {
1639 		aprint_error_dev(sc->sc_dev, "unable to map mailboxes, error = %d\n",
1640 		    error);
1641 		goto bad_1;
1642 	}
1643 
1644 	memset(sc->sc_mbo, 0, size);
1645 
1646 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1647 	    sc->sc_dmaflags, &sc->sc_dmamap_mbox);
1648 	if (error) {
1649 		aprint_error_dev(sc->sc_dev,
1650 		    "unable to create mailbox DMA map, error = %d\n",
1651 		    error);
1652 		goto bad_2;
1653 	}
1654 
1655 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_mbox,
1656 	    sc->sc_mbo, size, NULL, 0);
1657 	if (error) {
1658 		aprint_error_dev(sc->sc_dev, "unable to load mailbox DMA map, error = %d\n",
1659 		    error);
1660 		goto bad_3;
1661 	}
1662 
1663 	sc->sc_mbi = (struct bha_mbx_in *)(sc->sc_mbo + sc->sc_mbox_count);
1664 
1665 	return (0);
1666 
1667  bad_3:
1668 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_mbox);
1669  bad_2:
1670 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_mbo, size);
1671  bad_1:
1672 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1673  bad_0:
1674 	return (error);
1675 }
1676 
1677 /*
1678  * bha_collect_mbo:
1679  *
1680  *	Garbage collect mailboxes that are no longer in use.
1681  */
1682 static void
1683 bha_collect_mbo(struct bha_softc *sc)
1684 {
1685 	struct bha_mbx_out *mbo;
1686 #ifdef BHADIAG
1687 	struct bha_ccb *ccb;
1688 #endif
1689 
1690 	mbo = sc->sc_cmbo;
1691 
1692 	while (sc->sc_mbofull > 0) {
1693 		BHA_MBO_SYNC(sc, mbo,
1694 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1695 		if (mbo->cmd != BHA_MBO_FREE)
1696 			break;
1697 
1698 #ifdef BHADIAG
1699 		ccb = bha_ccb_phys_kv(sc, phystol(mbo->ccb_addr));
1700 		ccb->flags &= ~CCB_SENDING;
1701 #endif
1702 
1703 		--sc->sc_mbofull;
1704 		mbo = bha_nextmbo(sc, mbo);
1705 	}
1706 
1707 	sc->sc_cmbo = mbo;
1708 }
1709 
1710 /*****************************************************************************
1711  * CCB management functions
1712  *****************************************************************************/
1713 
1714 static inline void
1715 bha_reset_ccb(struct bha_ccb *ccb)
1716 {
1717 
1718 	ccb->flags = 0;
1719 }
1720 
1721 /*
1722  * bha_create_ccbs:
1723  *
1724  *	Create a set of CCBs.
1725  *
1726  *	We determine the target CCB count, and then keep creating them
1727  *	until we reach the target, or fail.  CCBs that are allocated
1728  *	but not "created" are left on the allocating list.
1729  *
1730  *	XXX AB_QUIET/AB_SILENT lossage here; this is called during
1731  *	boot as well as at run-time.
1732  */
1733 static void
1734 bha_create_ccbs(struct bha_softc *sc, int count)
1735 {
1736 	struct bha_ccb_group *bcg;
1737 	struct bha_ccb *ccb;
1738 	bus_dma_segment_t seg;
1739 	bus_dmamap_t ccbmap;
1740 	int target, i, error, rseg;
1741 
1742 	/*
1743 	 * If the current CCB count is already the max number we're
1744 	 * allowed to have, bail out now.
1745 	 */
1746 	if (sc->sc_cur_ccbs == sc->sc_max_ccbs)
1747 		return;
1748 
1749 	/*
1750 	 * Compute our target count, and clamp it down to the max
1751 	 * number we're allowed to have.
1752 	 */
1753 	target = sc->sc_cur_ccbs + count;
1754 	if (target > sc->sc_max_ccbs)
1755 		target = sc->sc_max_ccbs;
1756 
1757 	/*
1758 	 * If there are CCBs on the allocating list, don't allocate a
1759 	 * CCB group yet.
1760 	 */
1761 	if (TAILQ_FIRST(&sc->sc_allocating_ccbs) != NULL)
1762 		goto have_allocating_ccbs;
1763 
1764  allocate_group:
1765 	error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
1766 	    PAGE_SIZE, 0, &seg, 1, &rseg, sc->sc_dmaflags | BUS_DMA_NOWAIT);
1767 	if (error) {
1768 		aprint_error_dev(sc->sc_dev, "unable to allocate CCB group, error = %d\n",
1769 		    error);
1770 		goto bad_0;
1771 	}
1772 
1773 	error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
1774 	    (void *)&bcg,
1775 	    sc->sc_dmaflags | BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1776 	if (error) {
1777 		aprint_error_dev(sc->sc_dev, "unable to map CCB group, error = %d\n",
1778 		    error);
1779 		goto bad_1;
1780 	}
1781 
1782 	memset(bcg, 0, PAGE_SIZE);
1783 
1784 	error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE,
1785 	    1, PAGE_SIZE, 0, sc->sc_dmaflags | BUS_DMA_NOWAIT, &ccbmap);
1786 	if (error) {
1787 		aprint_error_dev(sc->sc_dev, "unable to create CCB group DMA map, error = %d\n",
1788 		    error);
1789 		goto bad_2;
1790 	}
1791 
1792 	error = bus_dmamap_load(sc->sc_dmat, ccbmap, bcg, PAGE_SIZE, NULL,
1793 	    sc->sc_dmaflags | BUS_DMA_NOWAIT);
1794 	if (error) {
1795 		aprint_error_dev(sc->sc_dev, "unable to load CCB group DMA map, error = %d\n",
1796 		    error);
1797 		goto bad_3;
1798 	}
1799 
1800 	bcg->bcg_dmamap = ccbmap;
1801 
1802 #ifdef DIAGNOSTIC
1803 	if (BHA_CCB_GROUP(&bcg->bcg_ccbs[0]) !=
1804 	    BHA_CCB_GROUP(&bcg->bcg_ccbs[bha_ccbs_per_group - 1]))
1805 		panic("bha_create_ccbs: CCB group size botch");
1806 #endif
1807 
1808 	/*
1809 	 * Add all of the CCBs in this group to the allocating list.
1810 	 */
1811 	for (i = 0; i < bha_ccbs_per_group; i++) {
1812 		ccb = &bcg->bcg_ccbs[i];
1813 		TAILQ_INSERT_TAIL(&sc->sc_allocating_ccbs, ccb, chain);
1814 	}
1815 
1816  have_allocating_ccbs:
1817 	/*
1818 	 * Loop over the allocating list until we reach our CCB target.
1819 	 * If we run out on the list, we'll allocate another group's
1820 	 * worth.
1821 	 */
1822 	while (sc->sc_cur_ccbs < target) {
1823 		ccb = TAILQ_FIRST(&sc->sc_allocating_ccbs);
1824 		if (ccb == NULL)
1825 			goto allocate_group;
1826 		if (bha_init_ccb(sc, ccb) != 0) {
1827 			/*
1828 			 * We were unable to initialize the CCB.
1829 			 * This is likely due to a resource shortage,
1830 			 * so bail out now.
1831 			 */
1832 			return;
1833 		}
1834 	}
1835 
1836 	/*
1837 	 * If we got here, we've reached our target!
1838 	 */
1839 	return;
1840 
1841  bad_3:
1842 	bus_dmamap_destroy(sc->sc_dmat, ccbmap);
1843  bad_2:
1844 	bus_dmamem_unmap(sc->sc_dmat, (void *)bcg, PAGE_SIZE);
1845  bad_1:
1846 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1847  bad_0:
1848 	return;
1849 }
1850 
1851 /*
1852  * bha_init_ccb:
1853  *
1854  *	Initialize a CCB; helper function for bha_create_ccbs().
1855  */
1856 static int
1857 bha_init_ccb(struct bha_softc *sc, struct bha_ccb *ccb)
1858 {
1859 	struct bha_ccb_group *bcg = BHA_CCB_GROUP(ccb);
1860 	int hashnum, error;
1861 
1862 	/*
1863 	 * Create the DMA map for this CCB.
1864 	 *
1865 	 * XXX ALLOCNOW is a hack to prevent bounce buffer shortages
1866 	 * XXX in the ISA case.  A better solution is needed.
1867 	 */
1868 	error = bus_dmamap_create(sc->sc_dmat, BHA_MAXXFER, BHA_NSEG,
1869 	    BHA_MAXXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | sc->sc_dmaflags,
1870 	    &ccb->dmamap_xfer);
1871 	if (error) {
1872 		aprint_error_dev(sc->sc_dev, "unable to create CCB DMA map, error = %d\n",
1873 		    error);
1874 		return (error);
1875 	}
1876 
1877 	TAILQ_REMOVE(&sc->sc_allocating_ccbs, ccb, chain);
1878 
1879 	/*
1880 	 * Put the CCB into the phystokv hash table.
1881 	 */
1882 	ccb->hashkey = bcg->bcg_dmamap->dm_segs[0].ds_addr +
1883 	    BHA_CCB_OFFSET(ccb);
1884 	hashnum = CCB_HASH(ccb->hashkey);
1885 	ccb->nexthash = sc->sc_ccbhash[hashnum];
1886 	sc->sc_ccbhash[hashnum] = ccb;
1887 	bha_reset_ccb(ccb);
1888 
1889 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
1890 	sc->sc_cur_ccbs++;
1891 
1892 	return (0);
1893 }
1894 
1895 /*
1896  * bha_get_ccb:
1897  *
1898  *	Get a CCB for the SCSI operation.  If there are none left,
1899  *	wait until one becomes available, if we can.
1900  */
1901 static struct bha_ccb *
1902 bha_get_ccb(struct bha_softc *sc)
1903 {
1904 	struct bha_ccb *ccb;
1905 	int s;
1906 
1907 	s = splbio();
1908 	ccb = TAILQ_FIRST(&sc->sc_free_ccb);
1909 	if (ccb != NULL) {
1910 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
1911 		ccb->flags |= CCB_ALLOC;
1912 	}
1913 	splx(s);
1914 	return (ccb);
1915 }
1916 
1917 /*
1918  * bha_free_ccb:
1919  *
1920  *	Put a CCB back onto the free list.
1921  */
1922 static void
1923 bha_free_ccb(struct bha_softc *sc, struct bha_ccb *ccb)
1924 {
1925 	int s;
1926 
1927 	s = splbio();
1928 	bha_reset_ccb(ccb);
1929 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
1930 	splx(s);
1931 }
1932 
1933 /*
1934  * bha_ccb_phys_kv:
1935  *
1936  *	Given a CCB DMA address, locate the CCB in kernel virtual space.
1937  */
1938 static struct bha_ccb *
1939 bha_ccb_phys_kv(struct bha_softc *sc, bus_addr_t ccb_phys)
1940 {
1941 	int hashnum = CCB_HASH(ccb_phys);
1942 	struct bha_ccb *ccb = sc->sc_ccbhash[hashnum];
1943 
1944 	while (ccb) {
1945 		if (ccb->hashkey == ccb_phys)
1946 			break;
1947 		ccb = ccb->nexthash;
1948 	}
1949 	return (ccb);
1950 }
1951