xref: /netbsd-src/sys/dev/ic/aic79xx_osm.c (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: aic79xx_osm.c,v 1.3 2003/04/21 20:05:26 fvdl Exp $	*/
2 
3 /*
4  * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers
5  *
6  * Copyright (c) 1994-2002 Justin T. Gibbs.
7  * Copyright (c) 2001-2002 Adaptec Inc.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * Alternatively, this software may be distributed under the terms of the
20  * GNU Public License ("GPL").
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
35  *
36  * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.8 2003/02/27 23:23:16 gibbs Exp $
37  */
38 /*
39  * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
40  * - April 2003
41  */
42 
43 #include <dev/ic/aic79xx_osm.h>
44 #include <dev/ic/aic7xxx_cam.h>
45 #include <dev/ic/aic79xx_inline.h>
46 
47 #ifndef AHD_TMODE_ENABLE
48 #define AHD_TMODE_ENABLE 0
49 #endif
50 
51 static int	ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
52 			  caddr_t addr, int flag, struct proc *p);
53 static void	ahd_action(struct scsipi_channel *chan,
54 			   scsipi_adapter_req_t req, void *arg);
55 static void	ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
56 				int nsegments);
57 static int	ahd_poll(struct ahd_softc *ahd, int wait);
58 static void	ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
59 			       struct scb *scb);
60 
61 #if NOT_YET
62 static void	ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
63 #endif
64 
65 /*
66  * Attach all the sub-devices we can find
67  */
68 int
69 ahd_attach(struct ahd_softc *ahd)
70 {
71 	int 	s;
72 	char	ahd_info[256];
73 
74 	ahd_controller_info(ahd, ahd_info);
75         printf("%s: %s\n", ahd->sc_dev.dv_xname, ahd_info);
76 
77         ahd_lock(ahd, &s);
78 
79 	ahd->sc_adapter.adapt_dev = &ahd->sc_dev;
80 	ahd->sc_adapter.adapt_nchannels = 1;
81 
82 	ahd->sc_adapter.adapt_openings = AHD_MAX_QUEUE;
83 	ahd->sc_adapter.adapt_max_periph = 32;
84 
85 	ahd->sc_adapter.adapt_ioctl = ahd_ioctl;
86 	ahd->sc_adapter.adapt_minphys = ahd_minphys;
87 	ahd->sc_adapter.adapt_request = ahd_action;
88 
89 	ahd->sc_channel.chan_adapter = &ahd->sc_adapter;
90         ahd->sc_channel.chan_bustype = &scsi_bustype;
91         ahd->sc_channel.chan_channel = 0;
92         ahd->sc_channel.chan_ntargets = AHD_NUM_TARGETS;
93         ahd->sc_channel.chan_nluns = 8 /*AHD_NUM_LUNS*/;
94         ahd->sc_channel.chan_id = ahd->our_id;
95 
96 	ahd->sc_child = config_found((void *)ahd, &ahd->sc_channel, scsiprint);
97 
98 	ahd_intr_enable(ahd, TRUE);
99 
100 	if (ahd->flags & AHD_RESET_BUS_A)
101 		ahd_reset_channel(ahd, 'A', TRUE);
102 
103         ahd_unlock(ahd, &s);
104 
105 	return (1);
106 }
107 
108 static int
109 ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
110 	  caddr_t addr, int flag, struct proc *p)
111 {
112         struct ahd_softc *ahd = (void *)channel->chan_adapter->adapt_dev;
113         int s, ret = ENOTTY;
114 
115         switch (cmd) {
116         case SCBUSIORESET:
117                 s = splbio();
118                 ahd_reset_channel(ahd, channel->chan_channel == 1 ? 'B' : 'A', TRUE);
119                 splx(s);
120                 ret = 0;
121                 break;
122         default:
123                 break;
124         }
125 
126         return ret;
127 }
128 
129 /*
130  * Catch an interrupt from the adapter
131  */
132 void
133 ahd_platform_intr(void *arg)
134 {
135 	struct	ahd_softc *ahd;
136 
137 	ahd = (struct ahd_softc *)arg;
138 
139 	printf("%s; ahd_platform_intr\n", ahd_name(ahd));
140 
141 	ahd_intr(ahd);
142 }
143 
144 /*
145  * We have an scb which has been processed by the
146  * adaptor, now we look to see how the operation * went.
147  */
148 void
149 ahd_done(struct ahd_softc *ahd, struct scb *scb)
150 {
151 	struct scsipi_xfer	*xs;
152 	struct scsipi_periph	*periph;
153 	int			target;
154 	int			s;
155 
156 	LIST_REMOVE(scb, pending_links);
157 
158 	xs = scb->xs;
159 	periph = xs->xs_periph;
160 
161 	callout_stop(&scb->xs->xs_callout);
162 
163 	target = periph->periph_target;
164 
165 	if (xs->datalen) {
166 		int op;
167 
168 		if (xs->xs_control & XS_CTL_DATA_IN)
169 		  op = BUS_DMASYNC_POSTREAD;
170 		else
171 		  op = BUS_DMASYNC_POSTWRITE;
172 
173 		bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
174 				scb->dmamap->dm_mapsize, op);
175                 bus_dmamap_unload(ahd->parent_dmat, scb->dmamap);
176         }
177 
178 	/*
179 	 * If the recovery SCB completes, we have to be
180 	 * out of our timeout.
181 	 */
182 	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
183 		struct	scb *list_scb;
184 
185 		/*
186 		 * We were able to complete the command successfully,
187 		 * so reinstate the timeouts for all other pending
188 		 * commands.
189 		 */
190 		LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
191 			struct scsipi_xfer	*txs = list_scb->xs;
192 
193 			if (!(txs->xs_control & XS_CTL_POLL)) {
194                                 callout_reset(&txs->xs_callout,
195                                     (txs->timeout > 1000000) ?
196                                     (txs->timeout / 1000) * hz :
197                                     (txs->timeout * hz) / 1000,
198                                     ahd_timeout, list_scb);
199                         }
200 		}
201 
202 		if (ahd_get_transaction_status(scb) != XS_NOERROR)
203 		  ahd_set_transaction_status(scb, XS_TIMEOUT);
204                 scsipi_printaddr(xs->xs_periph);
205 		printf("%s: no longer in timeout, status = %x\n",
206 		       ahd_name(ahd), xs->status);
207 	}
208 
209 	if (xs->error != XS_NOERROR) {
210                 /* Don't clobber any existing error state */
211 	} else if ((xs->status == SCSI_STATUS_BUSY) ||
212 		   (xs->status == SCSI_STATUS_QUEUE_FULL)) {
213 	  	ahd_set_transaction_status(scb, XS_BUSY);
214 		printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n",
215 		       ahd_name(ahd), SCB_GET_TARGET(ahd,scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb));
216         } else if ((scb->flags & SCB_SENSE) != 0) {
217                 /*
218                  * We performed autosense retrieval.
219                  *
220                  * zero the sense data before having
221                  * the drive fill it.  The SCSI spec mandates
222                  * that any untransferred data should be
223                  * assumed to be zero.  Complete the 'bounce'
224                  * of sense information through buffers accessible
225                  * via bus-space by copying it into the clients
226                  * csio.
227                  */
228                 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
229                 memcpy(&xs->sense.scsi_sense, ahd_get_sense_buf(ahd, scb),
230 		       sizeof(struct scsipi_sense_data));
231 
232                 ahd_set_transaction_status(scb, XS_SENSE);
233         } else if ((scb->flags & SCB_PKT_SENSE) != 0) {
234 		struct scsi_status_iu_header *siu;
235 		u_int sense_len;
236 		int i;
237 
238 		/*
239 		 * Copy only the sense data into the provided buffer.
240 		 */
241 		siu = (struct scsi_status_iu_header *)scb->sense_data;
242 		sense_len = MIN(scsi_4btoul(siu->sense_length),
243 				sizeof(&xs->sense.scsi_sense));
244 		memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
245 		memcpy(&xs->sense.scsi_sense,
246 		       scb->sense_data + SIU_SENSE_OFFSET(siu), sense_len);
247 		printf("Copied %d bytes of sense data offset %d:", sense_len,
248 		       SIU_SENSE_OFFSET(siu));
249 		for (i = 0; i < sense_len; i++)
250 			printf(" 0x%x", ((uint8_t *)&xs->sense.scsi_sense)[i]);
251 		printf("\n");
252 
253                 ahd_set_transaction_status(scb, XS_SENSE);
254 	}
255 
256 	if (scb->flags & SCB_FREEZE_QUEUE) {
257 	        scsipi_periph_thaw(periph, 1);
258                 scb->flags &= ~SCB_FREEZE_QUEUE;
259         }
260 
261         if (scb->flags & SCB_REQUEUE)
262                 ahd_set_transaction_status(scb, XS_REQUEUE);
263 
264         ahd_lock(ahd, &s);
265         ahd_free_scb(ahd, scb);
266         ahd_unlock(ahd, &s);
267 
268         scsipi_done(xs);
269 }
270 
271 static void
272 ahd_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
273 {
274         struct ahd_softc *ahd;
275 	struct ahd_initiator_tinfo *tinfo;
276 	struct ahd_tmode_tstate *tstate;
277 
278 	ahd = (void *)chan->chan_adapter->adapt_dev;
279 
280 	switch(req) {
281 
282 	case ADAPTER_REQ_RUN_XFER:
283 	  {
284 		struct scsipi_xfer *xs;
285         	struct scsipi_periph *periph;
286 	        struct scb *scb;
287         	struct hardware_scb *hscb;
288 		u_int target_id;
289 		u_int our_id;
290 		u_int col_idx;
291 		char channel;
292 		int s;
293 
294 	  	xs = arg;
295                 periph = xs->xs_periph;
296 
297                 SC_DEBUG(periph, SCSIPI_DB3, ("ahd_action\n"));
298 
299 		target_id = periph->periph_target;
300                 our_id = ahd->our_id;
301                 channel = (chan->chan_channel == 1) ? 'B' : 'A';
302 
303                 /*
304 		 * get an scb to use.
305 		 */
306 		ahd_lock(ahd, &s);
307 		tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
308 					    target_id, &tstate);
309 
310 		col_idx = AHD_NEVER_COL_IDX; /* ??? */
311 
312 		if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
313 			xs->error = XS_RESOURCE_SHORTAGE;
314 			ahd_unlock(ahd, &s);
315 			scsipi_done(xs);
316 			return;
317 		}
318 		ahd_unlock(ahd, &s);
319 
320 		hscb = scb->hscb;
321 
322 		SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
323 		scb->xs = xs;
324 
325 		/*
326 		 * Put all the arguments for the xfer in the scb
327 		 */
328 		hscb->control = 0;
329 		hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
330 		hscb->lun = periph->periph_lun;
331 		if (xs->xs_control & XS_CTL_RESET) {
332 			hscb->cdb_len = 0;
333 			scb->flags |= SCB_DEVICE_RESET;
334 			hscb->control |= MK_MESSAGE;
335 			hscb->task_management = SIU_TASKMGMT_LUN_RESET;
336 			ahd_execute_scb(scb, NULL, 0);
337 		} else {
338 			hscb->task_management = 0;
339 		}
340 
341 		ahd_setup_data(ahd, xs, scb);
342 		break;
343 	  }
344 
345 	case ADAPTER_REQ_GROW_RESOURCES:
346 		printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd));
347 		break;
348 
349 	case ADAPTER_REQ_SET_XFER_MODE:
350 	    {
351 		struct scsipi_xfer_mode *xm = arg;
352 		struct ahd_devinfo devinfo;
353 		int target_id, our_id, first;
354 		u_int width;
355 		int s;
356 		char channel;
357 
358 		target_id = xm->xm_target;
359 		our_id = chan->chan_id;
360 		channel = 'A';
361 		s = splbio();
362 		tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id,
363 		    &tstate);
364 		ahd_compile_devinfo(&devinfo, our_id, target_id,
365 		    0, channel, ROLE_INITIATOR);
366 
367 		/*
368 		 * XXX since the period and offset are not provided here,
369 		 * fake things by forcing a renegotiation using the user
370 		 * settings if this is called for the first time (i.e.
371 		 * during probe). Also, cap various values at the user
372 		 * values, assuming that the user set it up that way.
373 		 */
374 		if (ahd->inited_target[target_id] == 0) {
375 			tinfo->goal = tinfo->user;
376 			tstate->tagenable |=
377 			    (ahd->user_tagenable & devinfo.target_mask);
378 			tstate->discenable |=
379 			    (ahd->user_discenable & devinfo.target_mask);
380 			ahd->inited_target[target_id] = 1;
381 			first = 1;
382 		} else
383 			first = 0;
384 
385 		if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
386 			width = MSG_EXT_WDTR_BUS_16_BIT;
387 		else
388 			width = MSG_EXT_WDTR_BUS_8_BIT;
389 
390 		ahd_validate_width(ahd, NULL, &width, ROLE_UNKNOWN);
391 		if (width > tinfo->user.width)
392 			width = tinfo->user.width;
393 		tinfo->goal.width = width;
394 
395 		if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
396 			tinfo->goal.period = 0;
397 			tinfo->goal.offset = 0;
398 			tinfo->goal.ppr_options = 0;
399 		}
400 
401 		if ((xm->xm_mode & PERIPH_CAP_DT) &&
402 		    (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
403 			tinfo->goal.ppr_options |= MSG_EXT_PPR_DT_REQ;
404 		else
405 			tinfo->goal.ppr_options &= ~MSG_EXT_PPR_DT_REQ;
406 
407 		if ((xm->xm_mode & PERIPH_CAP_TQING) &&
408 		    (ahd->user_tagenable & devinfo.target_mask))
409 			tstate->tagenable |= devinfo.target_mask;
410 		else
411 			tstate->tagenable &= ~devinfo.target_mask;
412 
413 		/*
414 		 * If this is the first request, and no negotiation is
415 		 * needed, just confirm the state to the scsipi layer,
416 		 * so that it can print a message.
417 		 */
418 		if (!ahd_update_neg_request(ahd, &devinfo, tstate,
419 		    tinfo, AHD_NEG_IF_NON_ASYNC) && first)
420 			scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
421 		splx(s);
422 	    }
423 	}
424 
425 	return;
426 }
427 
428 static void
429 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
430 {
431 	struct scb *scb;
432 	struct scsipi_xfer *xs;
433         struct ahd_softc *ahd;
434 	struct ahd_initiator_tinfo *tinfo;
435 	struct ahd_tmode_tstate *tstate;
436 	u_int  mask;
437         int    s;
438 
439 	scb = (struct scb*)arg;
440 	xs = scb->xs;
441 	xs->error = 0;
442 	xs->status = 0;
443 	xs->xs_status = 0;
444 	ahd = (void*)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
445 
446 	scb->sg_count = 0;
447 	if (nsegments != 0) {
448 		void *sg;
449 		int op;
450 		u_int i;
451 
452 		ahd_setup_data_scb(ahd, scb);
453 
454 		/* Copy the segments into our SG list */
455 		for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
456 
457 			sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
458 					  dm_segs->ds_len,
459 					  /*last*/i == 1);
460 			dm_segs++;
461 		}
462 
463 		if (xs->xs_control & XS_CTL_DATA_IN)
464 			op = BUS_DMASYNC_PREREAD;
465 		else
466 			op = BUS_DMASYNC_PREWRITE;
467 
468 		bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
469 				scb->dmamap->dm_mapsize, op);
470 	}
471 
472 	ahd_lock(ahd, &s);
473 
474 	/*
475 	 * Last time we need to check if this SCB needs to
476 	 * be aborted.
477 	 */
478 	if (ahd_get_scsi_status(scb) == XS_STS_DONE) {
479 		if (nsegments != 0)
480 			bus_dmamap_unload(ahd->parent_dmat,
481 					  scb->dmamap);
482 		ahd_free_scb(ahd, scb);
483 		ahd_unlock(ahd, &s);
484 		return;
485 	}
486 
487 	tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
488 				    SCSIID_OUR_ID(scb->hscb->scsiid),
489 				    SCSIID_TARGET(ahd, scb->hscb->scsiid),
490 				    &tstate);
491 
492 	mask = SCB_GET_TARGET_MASK(ahd, scb);
493 
494 	if ((tstate->discenable & mask) != 0)
495 		scb->hscb->control |= DISCENB;
496 
497 	if ((tstate->tagenable & mask) != 0)
498 		scb->hscb->control |= xs->xs_tag_type|TAG_ENB;
499 
500 	if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU) != 0) {
501 		scb->flags |= SCB_PACKETIZED;
502 		if (scb->hscb->task_management != 0)
503 			scb->hscb->control &= ~MK_MESSAGE;
504 	}
505 
506 	if ((xs->xs_control & XS_CTL_DISCOVERY) &&
507 	    (tinfo->goal.width != 0
508 	     || tinfo->goal.period != 0
509 	     || tinfo->goal.ppr_options != 0)) {
510 		scb->flags |= SCB_NEGOTIATE;
511 		scb->hscb->control |= MK_MESSAGE;
512 	} else if ((tstate->auto_negotiate & mask) != 0) {
513 	  	scb->flags |= SCB_AUTO_NEGOTIATE;
514 		scb->hscb->control |= MK_MESSAGE;
515 	}
516 
517 	LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
518 
519 	scb->flags |= SCB_ACTIVE;
520 
521 	if (!(xs->xs_control & XS_CTL_POLL)) {
522 		callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
523 			      (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
524 			      ahd_timeout, scb);
525 	}
526 
527 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
528 		/* Define a mapping from our tag to the SCB. */
529 		ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
530 		ahd_pause(ahd);
531 		ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
532 		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
533 		ahd_unpause(ahd);
534 	} else {
535 		ahd_queue_scb(ahd, scb);
536 	}
537 
538 	if (!(xs->xs_control & XS_CTL_POLL)) {
539                 ahd_unlock(ahd, &s);
540                 return;
541         }
542         /*
543          * If we can't use interrupts, poll for completion
544          */
545         SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
546         do {
547                 if (ahd_poll(ahd, xs->timeout)) {
548                         if (!(xs->xs_control & XS_CTL_SILENT))
549                                 printf("cmd fail\n");
550                         ahd_timeout(scb);
551                         break;
552                 }
553         } while (!(xs->xs_status & XS_STS_DONE));
554 
555 	ahd_unlock(ahd, &s);
556 }
557 
558 static int
559 ahd_poll(struct ahd_softc *ahd, int wait)
560 {
561 
562 	while (--wait) {
563                 DELAY(1000);
564                 if (ahd_inb(ahd, INTSTAT) & INT_PEND)
565                         break;
566         }
567 
568         if (wait == 0) {
569                 printf("%s: board is not responding\n", ahd_name(ahd));
570                 return (EIO);
571         }
572 
573         ahd_intr((void *)ahd);
574         return (0);
575 }
576 
577 
578 static void
579 ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
580 	       struct scb *scb)
581 {
582 	struct hardware_scb *hscb;
583 
584 	hscb = scb->hscb;
585 	xs->resid = xs->status = 0;
586 
587 	hscb->cdb_len = xs->cmdlen;
588 	if (hscb->cdb_len > MAX_CDB_LEN) {
589 		int s;
590 		/*
591 		 * Should CAM start to support CDB sizes
592 		 * greater than 16 bytes, we could use
593 		 * the sense buffer to store the CDB.
594 		 */
595 		ahd_set_transaction_status(scb,
596 					   XS_DRIVER_STUFFUP);
597 
598 		ahd_lock(ahd, &s);
599 		ahd_free_scb(ahd, scb);
600 		ahd_unlock(ahd, &s);
601 		scsipi_done(xs);
602 	}
603 	memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len);
604 
605 	/* Only use S/G if there is a transfer */
606         if (xs->datalen) {
607                 int error;
608 
609                 error = bus_dmamap_load(ahd->parent_dmat,
610 					scb->dmamap, xs->data,
611 					xs->datalen, NULL,
612 					((xs->xs_control & XS_CTL_NOSLEEP) ?
613 					 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
614 					BUS_DMA_STREAMING |
615 					((xs->xs_control & XS_CTL_DATA_IN) ?
616 					 BUS_DMA_READ : BUS_DMA_WRITE));
617                 if (error) {
618 #ifdef AHD_DEBUG
619                         printf("%s: in ahc_setup_data(): bus_dmamap_load() "
620 			       "= %d\n",
621 			       ahd_name(ahd), error);
622 #endif
623                         xs->error = XS_RESOURCE_SHORTAGE;
624                         scsipi_done(xs);
625                         return;
626                 }
627                 ahd_execute_scb(scb,
628 				scb->dmamap->dm_segs,
629 				scb->dmamap->dm_nsegs);
630         } else {
631                 ahd_execute_scb(scb, NULL, 0);
632         }
633 }
634 
635 void
636 ahd_timeout(void *arg)
637 {
638 	struct	scb	  *scb;
639 	struct	ahd_softc *ahd;
640 	ahd_mode_state	   saved_modes;
641 	int		   s;
642 	int		   target;
643 	int		   lun;
644 	char		   channel;
645 
646 	scb = (struct scb *)arg;
647 	ahd = (struct ahd_softc *)scb->ahd_softc;
648 
649 	printf("%s: ahd_timeout\n", ahd_name(ahd));
650 
651 	ahd_lock(ahd, &s);
652 
653 	ahd_pause_and_flushwork(ahd);
654 	saved_modes = ahd_save_modes(ahd);
655 #if 0
656 	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
657 	ahd_outb(ahd, SCSISIGO, ACKO);
658 	printf("set ACK\n");
659 	ahd_outb(ahd, SCSISIGO, 0);
660 	printf("clearing Ack\n");
661 	ahd_restore_modes(ahd, saved_modes);
662 #endif
663 	if ((scb->flags & SCB_ACTIVE) == 0) {
664 		/* Previous timeout took care of me already */
665 		printf("%s: Timedout SCB already complete. "
666 		       "Interrupts may not be functioning.\n", ahd_name(ahd));
667 		ahd_unpause(ahd);
668 		ahd_unlock(ahd, &s);
669 		return;
670 	}
671 
672 	target = SCB_GET_TARGET(ahd, scb);
673 	channel = SCB_GET_CHANNEL(ahd, scb);
674 	lun = SCB_GET_LUN(scb);
675 
676 	ahd_print_path(ahd, scb);
677 	printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
678 	ahd_dump_card_state(ahd);
679 	ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
680 			  /*initiate reset*/TRUE);
681 	ahd_unlock(ahd, &s);
682 	return;
683 }
684 
685 int
686 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
687 {
688 	ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
689 				    M_NOWAIT /*| M_ZERO*/);
690 	if (ahd->platform_data == NULL)
691 		return (ENOMEM);
692 
693 	memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
694 
695 	return (0);
696 }
697 
698 void
699 ahd_platform_free(struct ahd_softc *ahd)
700 {
701 	free(ahd->platform_data, M_DEVBUF);
702 }
703 
704 int
705 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
706 {
707 	/* We don't sort softcs under NetBSD so report equal always */
708 	return (0);
709 }
710 
711 int
712 ahd_detach(struct device *self, int flags)
713 {
714 	int rv = 0;
715 
716 	struct ahd_softc *ahd = (struct ahd_softc*)self;
717 
718 	if (ahd->sc_child != NULL)
719 		rv = config_detach((void *)ahd->sc_child, flags);
720 
721 	shutdownhook_disestablish(ahd->shutdown_hook);
722 
723 	ahd_free(ahd);
724 
725 	return rv;
726 }
727 
728 void
729 ahd_platform_set_tags(struct ahd_softc *ahd,
730 		      struct ahd_devinfo *devinfo, ahd_queue_alg alg)
731 {
732 	struct ahd_initiator_tinfo *tinfo;
733         struct ahd_tmode_tstate *tstate;
734 
735         tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
736                                     devinfo->target, &tstate);
737 
738         if (alg != AHD_QUEUE_NONE)
739                 tstate->tagenable |= devinfo->target_mask;
740 	else
741 	  	tstate->tagenable &= ~devinfo->target_mask;
742 }
743 
744 void
745 ahd_send_async(struct ahd_softc *ahc, char channel, u_int target, u_int lun,
746 	       ac_code code, void *opt_arg)
747 {
748 	struct ahd_tmode_tstate *tstate;
749 	struct ahd_initiator_tinfo *tinfo;
750 	struct ahd_devinfo devinfo;
751 	struct scsipi_channel *chan;
752 	struct scsipi_xfer_mode xm;
753 
754 #ifdef DIAGNOSTIC
755 	if (channel != 'A')
756 		panic("ahd_send_async: not channel A");
757 #endif
758 	chan = &ahc->sc_channel;
759 	switch (code) {
760 	case AC_TRANSFER_NEG:
761 		tinfo = ahd_fetch_transinfo(ahc, channel, ahc->our_id, target,
762 			    &tstate);
763 		ahd_compile_devinfo(&devinfo, ahc->our_id, target, lun,
764 		    channel, ROLE_UNKNOWN);
765 		/*
766 		 * Don't bother if negotiating. XXX?
767 		 */
768 		if (tinfo->curr.period != tinfo->goal.period
769 		    || tinfo->curr.width != tinfo->goal.width
770 		    || tinfo->curr.offset != tinfo->goal.offset
771 		    || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
772 			break;
773 		xm.xm_target = target;
774 		xm.xm_mode = 0;
775 		xm.xm_period = tinfo->curr.period;
776 		xm.xm_offset = tinfo->curr.offset;
777 		if (tinfo->goal.ppr_options & MSG_EXT_PPR_DT_REQ)
778 			xm.xm_mode |= PERIPH_CAP_DT;
779 		if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
780 			xm.xm_mode |= PERIPH_CAP_WIDE16;
781 		if (tinfo->curr.period)
782 			xm.xm_mode |= PERIPH_CAP_SYNC;
783 		if (tstate->tagenable & devinfo.target_mask)
784 			xm.xm_mode |= PERIPH_CAP_TQING;
785 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
786 		break;
787 	case AC_BUS_RESET:
788 		scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
789 	case AC_SENT_BDR:
790 	default:
791 		break;
792 	}
793 }
794