xref: /netbsd-src/sys/dev/ic/aic79xx_osm.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: aic79xx_osm.c,v 1.33 2016/07/14 04:00:45 msaitoh Exp $	*/
2 
3 /*
4  * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers
5  *
6  * Copyright (c) 1994-2002 Justin T. Gibbs.
7  * Copyright (c) 2001-2002 Adaptec Inc.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * Alternatively, this software may be distributed under the terms of the
20  * GNU Public License ("GPL").
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
35  *
36  * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.11 2003/05/04 00:20:07 gibbs Exp $
37  */
38 /*
39  * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
40  * - April 2003
41  */
42 
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: aic79xx_osm.c,v 1.33 2016/07/14 04:00:45 msaitoh Exp $");
45 
46 #include <dev/ic/aic79xx_osm.h>
47 #include <dev/ic/aic79xx_inline.h>
48 
49 #ifndef AHD_TMODE_ENABLE
50 #define AHD_TMODE_ENABLE 0
51 #endif
52 
53 static int	ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
54 			  void *addr, int flag, struct proc *p);
55 static void	ahd_action(struct scsipi_channel *chan,
56 			   scsipi_adapter_req_t req, void *arg);
57 static void	ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
58 				int nsegments);
59 static int	ahd_poll(struct ahd_softc *ahd, int wait);
60 static void	ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
61 			       struct scb *scb);
62 
63 #if NOT_YET
64 static void	ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
65 #endif
66 
67 static bool	ahd_pmf_suspend(device_t, const pmf_qual_t *);
68 static bool	ahd_pmf_resume(device_t, const pmf_qual_t *);
69 static bool	ahd_pmf_shutdown(device_t, int);
70 
71 /*
72  * Attach all the sub-devices we can find
73  */
74 int
75 ahd_attach(struct ahd_softc *ahd)
76 {
77 	int	s;
78 	char	ahd_info[256];
79 
80 	ahd_controller_info(ahd, ahd_info, sizeof(ahd_info));
81 	aprint_normal("%s: %s\n", ahd_name(ahd), ahd_info);
82 
83 	ahd_lock(ahd, &s);
84 
85 	ahd->sc_adapter.adapt_dev = ahd->sc_dev;
86 	ahd->sc_adapter.adapt_nchannels = 1;
87 
88 	ahd->sc_adapter.adapt_openings = ahd->scb_data.numscbs - 1;
89 	ahd->sc_adapter.adapt_max_periph = 32;
90 
91 	ahd->sc_adapter.adapt_ioctl = ahd_ioctl;
92 	ahd->sc_adapter.adapt_minphys = ahd_minphys;
93 	ahd->sc_adapter.adapt_request = ahd_action;
94 
95 	ahd->sc_channel.chan_adapter = &ahd->sc_adapter;
96 	ahd->sc_channel.chan_bustype = &scsi_bustype;
97 	ahd->sc_channel.chan_channel = 0;
98 	ahd->sc_channel.chan_ntargets = AHD_NUM_TARGETS;
99 	ahd->sc_channel.chan_nluns = 8 /*AHD_NUM_LUNS*/;
100 	ahd->sc_channel.chan_id = ahd->our_id;
101 	ahd->sc_channel.chan_flags |= SCSIPI_CHAN_CANGROW;
102 
103 	ahd->sc_child = config_found(ahd->sc_dev, &ahd->sc_channel, scsiprint);
104 
105 	ahd_intr_enable(ahd, TRUE);
106 
107 	if (ahd->flags & AHD_RESET_BUS_A)
108 		ahd_reset_channel(ahd, 'A', TRUE);
109 
110 	if (!pmf_device_register1(ahd->sc_dev,
111 	    ahd_pmf_suspend, ahd_pmf_resume, ahd_pmf_shutdown))
112 		aprint_error_dev(ahd->sc_dev,
113 		    "couldn't establish power handler\n");
114 
115 	ahd_unlock(ahd, &s);
116 
117 	return (1);
118 }
119 
120 static bool
121 ahd_pmf_suspend(device_t dev, const pmf_qual_t *qual)
122 {
123 	struct ahd_softc *sc = device_private(dev);
124 #if 0
125 	return (ahd_suspend(sc) == 0);
126 #else
127 	ahd_shutdown(sc);
128 	return true;
129 #endif
130 }
131 
132 static bool
133 ahd_pmf_resume(device_t dev, const pmf_qual_t *qual)
134 {
135 #if 0
136 	struct ahd_softc *sc = device_private(dev);
137 
138 	return (ahd_resume(sc) == 0);
139 #else
140 	return true;
141 #endif
142 }
143 
144 static bool
145 ahd_pmf_shutdown(device_t dev, int howto)
146 {
147 	struct ahd_softc *sc = device_private(dev);
148 
149 	/* Disable all interrupt sources by resetting the controller */
150 	ahd_shutdown(sc);
151 
152 	return true;
153 }
154 
155 static int
156 ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
157 	  void *addr, int flag, struct proc *p)
158 {
159 	struct ahd_softc *ahd;
160 	int s, ret = ENOTTY;
161 
162 	ahd = device_private(channel->chan_adapter->adapt_dev);
163 
164 	switch (cmd) {
165 	case SCBUSIORESET:
166 		s = splbio();
167 		ahd_reset_channel(ahd, channel->chan_channel == 1 ? 'B' : 'A', TRUE);
168 		splx(s);
169 		ret = 0;
170 		break;
171 	default:
172 		break;
173 	}
174 
175 	return ret;
176 }
177 
178 /*
179  * Catch an interrupt from the adapter
180  */
181 void
182 ahd_platform_intr(void *arg)
183 {
184 	struct	ahd_softc *ahd;
185 
186 	ahd = arg;
187 
188 	printf("%s; ahd_platform_intr\n", ahd_name(ahd));
189 
190 	ahd_intr(ahd);
191 }
192 
193 /*
194  * We have an scb which has been processed by the
195  * adaptor, now we look to see how the operation * went.
196  */
197 void
198 ahd_done(struct ahd_softc *ahd, struct scb *scb)
199 {
200 	struct scsipi_xfer	*xs;
201 	struct scsipi_periph	*periph;
202 	int			s;
203 
204 	LIST_REMOVE(scb, pending_links);
205 
206 	xs = scb->xs;
207 	periph = xs->xs_periph;
208 
209 	callout_stop(&scb->xs->xs_callout);
210 
211 	if (xs->datalen) {
212 		int op;
213 
214 		if (xs->xs_control & XS_CTL_DATA_IN)
215 			op = BUS_DMASYNC_POSTREAD;
216 		else
217 			op = BUS_DMASYNC_POSTWRITE;
218 
219 		bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
220 				scb->dmamap->dm_mapsize, op);
221 		bus_dmamap_unload(ahd->parent_dmat, scb->dmamap);
222 	}
223 
224 	/*
225 	 * If the recovery SCB completes, we have to be
226 	 * out of our timeout.
227 	 */
228 	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
229 		struct	scb *list_scb;
230 
231 		/*
232 		 * We were able to complete the command successfully,
233 		 * so reinstate the timeouts for all other pending
234 		 * commands.
235 		 */
236 		LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
237 			struct scsipi_xfer	*txs = list_scb->xs;
238 
239 			if (!(txs->xs_control & XS_CTL_POLL)) {
240 				callout_reset(&txs->xs_callout,
241 				    (txs->timeout > 1000000) ?
242 				    (txs->timeout / 1000) * hz :
243 				    (txs->timeout * hz) / 1000,
244 				    ahd_timeout, list_scb);
245 			}
246 		}
247 
248 		if (ahd_get_transaction_status(scb) != XS_NOERROR)
249 			ahd_set_transaction_status(scb, XS_TIMEOUT);
250 		scsipi_printaddr(xs->xs_periph);
251 		printf("%s: no longer in timeout, status = %x\n",
252 		       ahd_name(ahd), xs->status);
253 	}
254 
255 	if (xs->error != XS_NOERROR) {
256 		/* Don't clobber any existing error state */
257 	} else if ((xs->status == SCSI_STATUS_BUSY) ||
258 		   (xs->status == SCSI_STATUS_QUEUE_FULL)) {
259 		ahd_set_transaction_status(scb, XS_BUSY);
260 		printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n",
261 		       ahd_name(ahd), SCB_GET_TARGET(ahd,scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb));
262 	} else if ((scb->flags & SCB_SENSE) != 0) {
263 		/*
264 		 * We performed autosense retrieval.
265 		 *
266 		 * zero the sense data before having
267 		 * the drive fill it.  The SCSI spec mandates
268 		 * that any untransferred data should be
269 		 * assumed to be zero.  Complete the 'bounce'
270 		 * of sense information through buffers accessible
271 		 * via bus-space by copying it into the clients
272 		 * csio.
273 		 */
274 		memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
275 		memcpy(&xs->sense.scsi_sense, ahd_get_sense_buf(ahd, scb),
276 		       sizeof(struct scsi_sense_data));
277 
278 		ahd_set_transaction_status(scb, XS_SENSE);
279 	} else if ((scb->flags & SCB_PKT_SENSE) != 0) {
280 		struct scsi_status_iu_header *siu;
281 		u_int sense_len;
282 #ifdef AHD_DEBUG
283 		int i;
284 #endif
285 		/*
286 		 * Copy only the sense data into the provided buffer.
287 		 */
288 		siu = (struct scsi_status_iu_header *)scb->sense_data;
289 		sense_len = MIN(scsi_4btoul(siu->sense_length),
290 				sizeof(xs->sense.scsi_sense));
291 		memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
292 		memcpy(&xs->sense.scsi_sense,
293 		       scb->sense_data + SIU_SENSE_OFFSET(siu), sense_len);
294 #ifdef AHD_DEBUG
295 		printf("Copied %d bytes of sense data offset %d:", sense_len,
296 		       SIU_SENSE_OFFSET(siu));
297 		for (i = 0; i < sense_len; i++)
298 			printf(" 0x%x", ((uint8_t *)&xs->sense.scsi_sense)[i]);
299 		printf("\n");
300 #endif
301 		ahd_set_transaction_status(scb, XS_SENSE);
302 	}
303 
304 	if (scb->flags & SCB_FREEZE_QUEUE) {
305 		scsipi_periph_thaw(periph, 1);
306 		scb->flags &= ~SCB_FREEZE_QUEUE;
307 	}
308 
309 	if (scb->flags & SCB_REQUEUE)
310 		ahd_set_transaction_status(scb, XS_REQUEUE);
311 
312 	ahd_lock(ahd, &s);
313 	ahd_free_scb(ahd, scb);
314 	ahd_unlock(ahd, &s);
315 
316 	scsipi_done(xs);
317 }
318 
319 static void
320 ahd_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
321 {
322 	struct ahd_softc *ahd;
323 	struct ahd_initiator_tinfo *tinfo;
324 	struct ahd_tmode_tstate *tstate;
325 
326 	ahd = device_private(chan->chan_adapter->adapt_dev);
327 
328 	switch(req) {
329 
330 	case ADAPTER_REQ_RUN_XFER:
331 	  {
332 		struct scsipi_xfer *xs;
333 		struct scsipi_periph *periph;
334 		struct scb *scb;
335 		struct hardware_scb *hscb;
336 		u_int target_id;
337 		u_int our_id;
338 		u_int col_idx;
339 		char channel;
340 		int s;
341 
342 		xs = arg;
343 		periph = xs->xs_periph;
344 
345 		SC_DEBUG(periph, SCSIPI_DB3, ("ahd_action\n"));
346 
347 		target_id = periph->periph_target;
348 		our_id = ahd->our_id;
349 		channel = (chan->chan_channel == 1) ? 'B' : 'A';
350 
351 		/*
352 		 * get an scb to use.
353 		 */
354 		ahd_lock(ahd, &s);
355 		tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
356 					    target_id, &tstate);
357 
358 		if (xs->xs_tag_type != 0 ||
359 		    (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
360 			col_idx = AHD_NEVER_COL_IDX;
361 		else
362 			col_idx = AHD_BUILD_COL_IDX(target_id,
363 			    periph->periph_lun);
364 
365 		if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
366 			xs->error = XS_RESOURCE_SHORTAGE;
367 			ahd_unlock(ahd, &s);
368 			scsipi_done(xs);
369 			return;
370 		}
371 		ahd_unlock(ahd, &s);
372 
373 		hscb = scb->hscb;
374 
375 		SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
376 		scb->xs = xs;
377 
378 		/*
379 		 * Put all the arguments for the xfer in the scb
380 		 */
381 		hscb->control = 0;
382 		hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
383 		hscb->lun = periph->periph_lun;
384 		if (xs->xs_control & XS_CTL_RESET) {
385 			hscb->cdb_len = 0;
386 			scb->flags |= SCB_DEVICE_RESET;
387 			hscb->control |= MK_MESSAGE;
388 			hscb->task_management = SIU_TASKMGMT_LUN_RESET;
389 			ahd_execute_scb(scb, NULL, 0);
390 		} else {
391 			hscb->task_management = 0;
392 		}
393 
394 		ahd_setup_data(ahd, xs, scb);
395 		break;
396 	  }
397 
398 	case ADAPTER_REQ_GROW_RESOURCES:
399 #ifdef AHC_DEBUG
400 		printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd));
401 #endif
402 		chan->chan_adapter->adapt_openings += ahd_alloc_scbs(ahd);
403 		if (ahd->scb_data.numscbs >= AHD_SCB_MAX_ALLOC)
404 			chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
405 		break;
406 
407 	case ADAPTER_REQ_SET_XFER_MODE:
408 	    {
409 		struct scsipi_xfer_mode *xm = arg;
410 		struct ahd_devinfo devinfo;
411 		int target_id, our_id, first;
412 		u_int width;
413 		int s;
414 		char channel;
415 		u_int ppr_options = 0, period, offset;
416 		uint16_t old_autoneg;
417 
418 		target_id = xm->xm_target;
419 		our_id = chan->chan_id;
420 		channel = 'A';
421 		s = splbio();
422 		tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id,
423 		    &tstate);
424 		ahd_compile_devinfo(&devinfo, our_id, target_id,
425 		    0, channel, ROLE_INITIATOR);
426 
427 		old_autoneg = tstate->auto_negotiate;
428 
429 		/*
430 		 * XXX since the period and offset are not provided here,
431 		 * fake things by forcing a renegotiation using the user
432 		 * settings if this is called for the first time (i.e.
433 		 * during probe). Also, cap various values at the user
434 		 * values, assuming that the user set it up that way.
435 		 */
436 		if (ahd->inited_target[target_id] == 0) {
437 			period = tinfo->user.period;
438 			offset = tinfo->user.offset;
439 			ppr_options = tinfo->user.ppr_options;
440 			width = tinfo->user.width;
441 			tstate->tagenable |=
442 			    (ahd->user_tagenable & devinfo.target_mask);
443 			tstate->discenable |=
444 			    (ahd->user_discenable & devinfo.target_mask);
445 			ahd->inited_target[target_id] = 1;
446 			first = 1;
447 		} else
448 			first = 0;
449 
450 		if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
451 			width = MSG_EXT_WDTR_BUS_16_BIT;
452 		else
453 			width = MSG_EXT_WDTR_BUS_8_BIT;
454 
455 		ahd_validate_width(ahd, NULL, &width, ROLE_UNKNOWN);
456 		if (width > tinfo->user.width)
457 			width = tinfo->user.width;
458 		ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
459 
460 		if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
461 			period = 0;
462 			offset = 0;
463 			ppr_options = 0;
464 		}
465 
466 		if ((xm->xm_mode & PERIPH_CAP_DT) &&
467 		    (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
468 			ppr_options |= MSG_EXT_PPR_DT_REQ;
469 		else
470 			ppr_options &= ~MSG_EXT_PPR_DT_REQ;
471 
472 		if ((tstate->discenable & devinfo.target_mask) == 0 ||
473 		    (tstate->tagenable & devinfo.target_mask) == 0)
474 			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
475 
476 		if ((xm->xm_mode & PERIPH_CAP_TQING) &&
477 		    (ahd->user_tagenable & devinfo.target_mask))
478 			tstate->tagenable |= devinfo.target_mask;
479 		else
480 			tstate->tagenable &= ~devinfo.target_mask;
481 
482 		ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
483 		ahd_validate_offset(ahd, NULL, period, &offset,
484 		    MSG_EXT_WDTR_BUS_8_BIT, ROLE_UNKNOWN);
485 		if (offset == 0) {
486 			period = 0;
487 			ppr_options = 0;
488 		}
489 		if (ppr_options != 0
490 		    && tinfo->user.transport_version >= 3) {
491 			tinfo->goal.transport_version =
492 			    tinfo->user.transport_version;
493 			tinfo->curr.transport_version =
494 			    tinfo->user.transport_version;
495 		}
496 
497 		ahd_set_syncrate(ahd, &devinfo, period, offset,
498 		    ppr_options, AHD_TRANS_GOAL, FALSE);
499 
500 		/*
501 		 * If this is the first request, and no negotiation is
502 		 * needed, just confirm the state to the scsipi layer,
503 		 * so that it can print a message.
504 		 */
505 		if (old_autoneg == tstate->auto_negotiate && first) {
506 			xm->xm_mode = 0;
507 			xm->xm_period = tinfo->curr.period;
508 			xm->xm_offset = tinfo->curr.offset;
509 			if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
510 				xm->xm_mode |= PERIPH_CAP_WIDE16;
511 			if (tinfo->curr.period)
512 				xm->xm_mode |= PERIPH_CAP_SYNC;
513 			if (tstate->tagenable & devinfo.target_mask)
514 				xm->xm_mode |= PERIPH_CAP_TQING;
515 			if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
516 				xm->xm_mode |= PERIPH_CAP_DT;
517 			scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
518 		}
519 		splx(s);
520 	    }
521 	}
522 
523 	return;
524 }
525 
526 static void
527 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
528 {
529 	struct scb *scb;
530 	struct scsipi_xfer *xs;
531 	struct ahd_softc *ahd;
532 	struct ahd_initiator_tinfo *tinfo;
533 	struct ahd_tmode_tstate *tstate;
534 	u_int  mask;
535 	int    s;
536 
537 	scb = arg;
538 	xs = scb->xs;
539 	xs->error = 0;
540 	xs->status = 0;
541 	xs->xs_status = 0;
542 	ahd = device_private(
543 	    xs->xs_periph->periph_channel->chan_adapter->adapt_dev);
544 
545 	scb->sg_count = 0;
546 	if (nsegments != 0) {
547 		void *sg;
548 		int op;
549 		u_int i;
550 
551 		ahd_setup_data_scb(ahd, scb);
552 
553 		/* Copy the segments into our SG list */
554 		for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
555 
556 			sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
557 					  dm_segs->ds_len,
558 					  /*last*/i == 1);
559 			dm_segs++;
560 		}
561 
562 		if (xs->xs_control & XS_CTL_DATA_IN)
563 			op = BUS_DMASYNC_PREREAD;
564 		else
565 			op = BUS_DMASYNC_PREWRITE;
566 
567 		bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
568 				scb->dmamap->dm_mapsize, op);
569 	}
570 
571 	ahd_lock(ahd, &s);
572 
573 	/*
574 	 * Last time we need to check if this SCB needs to
575 	 * be aborted.
576 	 */
577 	if (ahd_get_scsi_status(scb) == XS_STS_DONE) {
578 		if (nsegments != 0)
579 			bus_dmamap_unload(ahd->parent_dmat,
580 					  scb->dmamap);
581 		ahd_free_scb(ahd, scb);
582 		ahd_unlock(ahd, &s);
583 		return;
584 	}
585 
586 	tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
587 				    SCSIID_OUR_ID(scb->hscb->scsiid),
588 				    SCSIID_TARGET(ahd, scb->hscb->scsiid),
589 				    &tstate);
590 
591 	mask = SCB_GET_TARGET_MASK(ahd, scb);
592 
593 	if ((tstate->discenable & mask) != 0)
594 		scb->hscb->control |= DISCENB;
595 
596 	if ((tstate->tagenable & mask) != 0)
597 		scb->hscb->control |= xs->xs_tag_type|TAG_ENB;
598 
599 	if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU) != 0) {
600 		scb->flags |= SCB_PACKETIZED;
601 		if (scb->hscb->task_management != 0)
602 			scb->hscb->control &= ~MK_MESSAGE;
603 	}
604 
605 #if 0	/* This looks like it makes sense at first, but it can loop */
606 	if ((xs->xs_control & XS_CTL_DISCOVERY) &&
607 	    (tinfo->goal.width != 0
608 	     || tinfo->goal.period != 0
609 	     || tinfo->goal.ppr_options != 0)) {
610 		scb->flags |= SCB_NEGOTIATE;
611 		scb->hscb->control |= MK_MESSAGE;
612 	} else
613 #endif
614 	if ((tstate->auto_negotiate & mask) != 0) {
615 		scb->flags |= SCB_AUTO_NEGOTIATE;
616 		scb->hscb->control |= MK_MESSAGE;
617 	}
618 
619 	LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
620 
621 	scb->flags |= SCB_ACTIVE;
622 
623 	if (!(xs->xs_control & XS_CTL_POLL)) {
624 		callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
625 			      (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
626 			      ahd_timeout, scb);
627 	}
628 
629 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
630 		/* Define a mapping from our tag to the SCB. */
631 		ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
632 		ahd_pause(ahd);
633 		ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
634 		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
635 		ahd_unpause(ahd);
636 	} else {
637 		ahd_queue_scb(ahd, scb);
638 	}
639 
640 	if (!(xs->xs_control & XS_CTL_POLL)) {
641 		ahd_unlock(ahd, &s);
642 		return;
643 	}
644 	/*
645 	 * If we can't use interrupts, poll for completion
646 	 */
647 	SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
648 	do {
649 		if (ahd_poll(ahd, xs->timeout)) {
650 			if (!(xs->xs_control & XS_CTL_SILENT))
651 				printf("cmd fail\n");
652 			ahd_timeout(scb);
653 			break;
654 		}
655 	} while (!(xs->xs_status & XS_STS_DONE));
656 
657 	ahd_unlock(ahd, &s);
658 }
659 
660 static int
661 ahd_poll(struct ahd_softc *ahd, int wait)
662 {
663 
664 	while (--wait) {
665 		DELAY(1000);
666 		if (ahd_inb(ahd, INTSTAT) & INT_PEND)
667 			break;
668 	}
669 
670 	if (wait == 0) {
671 		printf("%s: board is not responding\n", ahd_name(ahd));
672 		return (EIO);
673 	}
674 
675 	ahd_intr(ahd);
676 	return (0);
677 }
678 
679 
680 static void
681 ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
682 	       struct scb *scb)
683 {
684 	struct hardware_scb *hscb;
685 
686 	hscb = scb->hscb;
687 	xs->resid = xs->status = 0;
688 
689 	hscb->cdb_len = xs->cmdlen;
690 	if (hscb->cdb_len > MAX_CDB_LEN) {
691 		int s;
692 		/*
693 		 * Should CAM start to support CDB sizes
694 		 * greater than 16 bytes, we could use
695 		 * the sense buffer to store the CDB.
696 		 */
697 		ahd_set_transaction_status(scb,
698 					   XS_DRIVER_STUFFUP);
699 
700 		ahd_lock(ahd, &s);
701 		ahd_free_scb(ahd, scb);
702 		ahd_unlock(ahd, &s);
703 		scsipi_done(xs);
704 	}
705 	memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len);
706 
707 	/* Only use S/G if there is a transfer */
708 	if (xs->datalen) {
709 		int error;
710 
711 		error = bus_dmamap_load(ahd->parent_dmat,
712 					scb->dmamap, xs->data,
713 					xs->datalen, NULL,
714 					((xs->xs_control & XS_CTL_NOSLEEP) ?
715 					 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
716 					BUS_DMA_STREAMING |
717 					((xs->xs_control & XS_CTL_DATA_IN) ?
718 					 BUS_DMA_READ : BUS_DMA_WRITE));
719 		if (error) {
720 #ifdef AHD_DEBUG
721 			printf("%s: in ahd_setup_data(): bus_dmamap_load() "
722 			       "= %d\n",
723 			       ahd_name(ahd), error);
724 #endif
725 			xs->error = XS_RESOURCE_SHORTAGE;
726 			scsipi_done(xs);
727 			return;
728 		}
729 		ahd_execute_scb(scb,
730 				scb->dmamap->dm_segs,
731 				scb->dmamap->dm_nsegs);
732 	} else {
733 		ahd_execute_scb(scb, NULL, 0);
734 	}
735 }
736 
737 void
738 ahd_timeout(void *arg)
739 {
740 	struct	scb	  *scb;
741 	struct	ahd_softc *ahd;
742 	int		   s;
743 
744 	scb = arg;
745 	ahd = scb->ahd_softc;
746 
747 	printf("%s: ahd_timeout\n", ahd_name(ahd));
748 
749 	ahd_lock(ahd, &s);
750 
751 	ahd_pause_and_flushwork(ahd);
752 	(void)ahd_save_modes(ahd);
753 #if 0
754 	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
755 	ahd_outb(ahd, SCSISIGO, ACKO);
756 	printf("set ACK\n");
757 	ahd_outb(ahd, SCSISIGO, 0);
758 	printf("clearing Ack\n");
759 	ahd_restore_modes(ahd, saved_modes);
760 #endif
761 	if ((scb->flags & SCB_ACTIVE) == 0) {
762 		/* Previous timeout took care of me already */
763 		printf("%s: Timedout SCB already complete. "
764 		       "Interrupts may not be functioning.\n", ahd_name(ahd));
765 		ahd_unpause(ahd);
766 		ahd_unlock(ahd, &s);
767 		return;
768 	}
769 
770 	ahd_print_path(ahd, scb);
771 	printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
772 	ahd_dump_card_state(ahd);
773 	ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
774 			  /*initiate reset*/TRUE);
775 	ahd_unlock(ahd, &s);
776 	return;
777 }
778 
779 int
780 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
781 {
782 	ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
783 				    M_NOWAIT /*| M_ZERO*/);
784 	if (ahd->platform_data == NULL)
785 		return (ENOMEM);
786 
787 	memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
788 
789 	return (0);
790 }
791 
792 void
793 ahd_platform_free(struct ahd_softc *ahd)
794 {
795 	free(ahd->platform_data, M_DEVBUF);
796 }
797 
798 int
799 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
800 {
801 	/* We don't sort softcs under NetBSD so report equal always */
802 	return (0);
803 }
804 
805 int
806 ahd_detach(struct ahd_softc *ahd, int flags)
807 {
808 	int rv = 0;
809 
810 	if (ahd->sc_child != NULL)
811 		rv = config_detach(ahd->sc_child, flags);
812 
813 	pmf_device_deregister(ahd->sc_dev);
814 
815 	ahd_free(ahd);
816 
817 	return rv;
818 }
819 
820 void
821 ahd_platform_set_tags(struct ahd_softc *ahd,
822 		      struct ahd_devinfo *devinfo, ahd_queue_alg alg)
823 {
824 	struct ahd_tmode_tstate *tstate;
825 
826 	ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
827 			    devinfo->target, &tstate);
828 
829 	if (alg != AHD_QUEUE_NONE)
830 		tstate->tagenable |= devinfo->target_mask;
831 	else
832 		tstate->tagenable &= ~devinfo->target_mask;
833 }
834 
835 void
836 ahd_send_async(struct ahd_softc *ahd, char channel, u_int target, u_int lun,
837 	       ac_code code, void *opt_arg)
838 {
839 	struct ahd_tmode_tstate *tstate;
840 	struct ahd_initiator_tinfo *tinfo;
841 	struct ahd_devinfo devinfo;
842 	struct scsipi_channel *chan;
843 	struct scsipi_xfer_mode xm;
844 
845 #ifdef DIAGNOSTIC
846 	if (channel != 'A')
847 		panic("ahd_send_async: not channel A");
848 #endif
849 	chan = &ahd->sc_channel;
850 	switch (code) {
851 	case AC_TRANSFER_NEG:
852 		tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id, target,
853 			    &tstate);
854 		ahd_compile_devinfo(&devinfo, ahd->our_id, target, lun,
855 		    channel, ROLE_UNKNOWN);
856 		/*
857 		 * Don't bother if negotiating. XXX?
858 		 */
859 		if (tinfo->curr.period != tinfo->goal.period
860 		    || tinfo->curr.width != tinfo->goal.width
861 		    || tinfo->curr.offset != tinfo->goal.offset
862 		    || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
863 			break;
864 		xm.xm_target = target;
865 		xm.xm_mode = 0;
866 		xm.xm_period = tinfo->curr.period;
867 		xm.xm_offset = tinfo->curr.offset;
868 		if (tinfo->goal.ppr_options & MSG_EXT_PPR_DT_REQ)
869 			xm.xm_mode |= PERIPH_CAP_DT;
870 		if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
871 			xm.xm_mode |= PERIPH_CAP_WIDE16;
872 		if (tinfo->curr.period)
873 			xm.xm_mode |= PERIPH_CAP_SYNC;
874 		if (tstate->tagenable & devinfo.target_mask)
875 			xm.xm_mode |= PERIPH_CAP_TQING;
876 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
877 		break;
878 	case AC_BUS_RESET:
879 		scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
880 	case AC_SENT_BDR:
881 	default:
882 		break;
883 	}
884 }
885